diff --git a/platypush/backend/camera/pi.py b/platypush/backend/camera/pi.py index 8ae9f58e..e6ec3b74 100644 --- a/platypush/backend/camera/pi.py +++ b/platypush/backend/camera/pi.py @@ -19,6 +19,10 @@ class CameraPiBackend(Backend): * **picamera** (``pip install picamera``) * **redis** (``pip install redis``) for inter-process communication with the camera process + + This backend is **DEPRECATED**. Use the plugin :class:`platypush.plugins.camera.pi.CameraPiPlugin` instead to run + Pi camera actions. If you want to start streaming the camera on application start then simply create an event hook + on :class:`platypush.message.event.application.ApplicationStartedEvent` that runs ``camera.pi.start_streaming``. """ class CameraAction(Enum): diff --git a/platypush/backend/http/app/routes/plugins/camera/__init__.py b/platypush/backend/http/app/routes/plugins/camera/__init__.py index 7eca4014..dfea77d6 100644 --- a/platypush/backend/http/app/routes/plugins/camera/__init__.py +++ b/platypush/backend/http/app/routes/plugins/camera/__init__.py @@ -1,9 +1,12 @@ -from flask import Response, Blueprint -from platypush.plugins.camera import CameraPlugin +import json +from typing import Optional + +from flask import Response, Blueprint, request -from platypush import Config from platypush.backend.http.app import template_folder -from platypush.backend.http.app.utils import authenticate, send_request +from platypush.backend.http.app.utils import authenticate +from platypush.context import get_plugin +from platypush.plugins.camera import CameraPlugin, Camera, StreamWriter camera = Blueprint('camera', __name__, template_folder=template_folder) @@ -13,75 +16,95 @@ __routes__ = [ ] -def get_device_id(device_id=None): - if device_id is None: - device_id = int(send_request(action='camera.get_default_device_id').output) - return device_id +def get_camera(plugin: str) -> CameraPlugin: + return get_plugin('camera.' + plugin) -def get_camera(device_id=None): - device_id = get_device_id(device_id) - camera_conf = Config.get('camera') or {} - camera_conf['device_id'] = device_id - return CameraPlugin(**camera_conf) +def get_frame(session: Camera, timeout: Optional[float] = None) -> bytes: + with session.stream.ready: + session.stream.ready.wait(timeout=timeout) + return session.stream.frame -def get_frame(device_id=None): - cam = get_camera(device_id) - with cam: - frame = None - - for _ in range(cam.warmup_frames): - output = cam.get_stream() - - with output.ready: - output.ready.wait() - frame = output.frame - - return frame - - -def video_feed(device_id=None): - cam = get_camera(device_id) - - with cam: +def feed(plugin: str, **kwargs): + plugin = get_camera(plugin) + with plugin.open(stream=True, **kwargs) as session: + plugin.start_camera(session) while True: - output = cam.get_stream() - with output.ready: - output.ready.wait() - frame = output.frame - - if frame and len(frame): - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + frame = get_frame(session, timeout=5.0) + if frame: + yield frame -@camera.route('/camera//frame', methods=['GET']) +def get_args(kwargs): + kwargs = kwargs.copy() + if 't' in kwargs: + del kwargs['t'] + + for k, v in kwargs.items(): + if k == 'resolution': + v = json.loads('[{}]'.format(v)) + else: + # noinspection PyBroadException + try: + v = int(v) + except: + # noinspection PyBroadException + try: + v = float(v) + except: + pass + + kwargs[k] = v + + return kwargs + + +@camera.route('/camera//photo.', methods=['GET']) @authenticate() -def get_camera_frame(device_id): - frame = get_frame(device_id) - return Response(frame, mimetype='image/jpeg') +def get_photo(plugin, extension): + plugin = get_camera(plugin) + extension = 'jpeg' if extension in ('jpg', 'jpeg') else extension + + with plugin.open(stream=True, stream_format=extension, frames_dir=None, **get_args(request.args)) as session: + plugin.start_camera(session) + frame = None + for _ in range(session.info.warmup_frames): + frame = get_frame(session) + + return Response(frame, mimetype=session.stream.mimetype) -@camera.route('/camera/frame', methods=['GET']) +@camera.route('/camera//video.', methods=['GET']) @authenticate() -def get_default_camera_frame(): - frame = get_frame() - return Response(frame, mimetype='image/jpeg') +def get_video(plugin, extension): + stream_class = StreamWriter.get_class_by_name(extension) + return Response(feed(plugin, stream_format=extension, frames_dir=None, **get_args(request.args)), + mimetype=stream_class.mimetype) -@camera.route('/camera//stream', methods=['GET']) +@camera.route('/camera//photo', methods=['GET']) @authenticate() -def get_stream_feed(device_id): - return Response(video_feed(device_id), - mimetype='multipart/x-mixed-replace; boundary=frame') +def get_photo_default(plugin): + return get_photo(plugin, 'jpeg') -@camera.route('/camera/stream', methods=['GET']) +@camera.route('/camera//video', methods=['GET']) @authenticate() -def get_default_stream_feed(): - return Response(video_feed(), - mimetype='multipart/x-mixed-replace; boundary=frame') +def get_video_default(plugin): + return get_video(plugin, 'mjpeg') + + +@camera.route('/camera//frame', methods=['GET']) +@authenticate() +def get_photo_deprecated(plugin): + return get_photo_default(plugin) + + +@camera.route('/camera//feed', methods=['GET']) +@authenticate() +def get_video_deprecated(plugin): + return get_video_default(plugin) # vim:sw=4:ts=4:et: diff --git a/platypush/backend/http/app/routes/plugins/camera/ir/mlx90640.py b/platypush/backend/http/app/routes/plugins/camera/ir/mlx90640.py index 439feec0..9dce0a44 100644 --- a/platypush/backend/http/app/routes/plugins/camera/ir/mlx90640.py +++ b/platypush/backend/http/app/routes/plugins/camera/ir/mlx90640.py @@ -1,12 +1,8 @@ -import os -import tempfile +from flask import Blueprint -from flask import Response, request, Blueprint, send_from_directory - -from platypush import Config from platypush.backend.http.app import template_folder -from platypush.backend.http.app.utils import authenticate, send_request -from platypush.plugins.camera.ir.mlx90640 import CameraIrMlx90640Plugin +from platypush.backend.http.app.routes.plugins.camera import get_photo, get_video +from platypush.backend.http.app.utils import authenticate camera_ir_mlx90640 = Blueprint('camera.ir.mlx90640', __name__, template_folder=template_folder) @@ -16,50 +12,40 @@ __routes__ = [ ] -def get_feed(**_): - camera_conf = Config.get('camera.ir.mlx90640') or {} - camera = CameraIrMlx90640Plugin(**camera_conf) +@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo.', methods=['GET']) +@authenticate() +def get_photo_route(extension): + return get_photo('ir.mlx90640', extension) - with camera: - while True: - output = camera.get_stream() - with output.ready: - output.ready.wait() - frame = output.frame +@camera_ir_mlx90640.route('/camera/ir/mlx90640/video.', methods=['GET']) +@authenticate() +def get_video_route(extension): + return get_video('ir.mlx90640', extension) - if frame and len(frame): - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + +@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo', methods=['GET']) +@authenticate() +def get_photo_route_default(): + return get_photo_route('jpeg') + + +@camera_ir_mlx90640.route('/camera/ir/mlx90640/video', methods=['GET']) +@authenticate() +def get_video_route_default(): + return get_video_route('mjpeg') @camera_ir_mlx90640.route('/camera/ir/mlx90640/frame', methods=['GET']) @authenticate() -def get_frame_route(): - f = tempfile.NamedTemporaryFile(prefix='ir_camera_frame_', suffix='.jpg', delete=False) - args = { - 'grayscale': bool(int(request.args.get('grayscale', 0))), - 'scale_factor': int(request.args.get('scale_factor', 1)), - 'rotate': int(request.args.get('rotate', 0)), - 'output_file': f.name, - } - - send_request(action='camera.ir.mlx90640.capture', **args) - return send_from_directory(os.path.dirname(f.name), - os.path.basename(f.name)) +def get_photo_route_deprecated(): + return get_photo_route_default() -@camera_ir_mlx90640.route('/camera/ir/mlx90640/stream', methods=['GET']) +@camera_ir_mlx90640.route('/camera/ir/mlx90640/feed', methods=['GET']) @authenticate() -def get_feed_route(): - args = { - 'grayscale': bool(int(request.args.get('grayscale', 0))), - 'scale_factor': int(request.args.get('scale_factor', 1)), - 'rotate': int(request.args.get('rotate', 0)), - 'format': 'jpeg', - } +def get_video_route_deprecated(): + return get_video_route_default() - return Response(get_feed(**args), - mimetype='multipart/x-mixed-replace; boundary=frame') # vim:sw=4:ts=4:et: diff --git a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.android.ipcam.css b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.android.ipcam.css index 5037c711..1b515f92 100644 --- a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.android.ipcam.css +++ b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.android.ipcam.css @@ -1 +1 @@ -.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2} +.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea} diff --git a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.ir.mlx90640.css b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.ir.mlx90640.css index 5037c711..1b515f92 100644 --- a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.ir.mlx90640.css +++ b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.ir.mlx90640.css @@ -1 +1 @@ -.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2} +.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea} diff --git a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.pi.css b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.pi.css index 5037c711..1b515f92 100644 --- a/platypush/backend/http/static/css/dist/webpanel/plugins/camera.pi.css +++ b/platypush/backend/http/static/css/dist/webpanel/plugins/camera.pi.css @@ -1 +1 @@ -.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2} +.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea} diff --git a/platypush/backend/http/static/css/source/common/vars.scss b/platypush/backend/http/static/css/source/common/vars.scss index 1f29e375..3ff63302 100644 --- a/platypush/backend/http/static/css/source/common/vars.scss +++ b/platypush/backend/http/static/css/source/common/vars.scss @@ -4,6 +4,7 @@ $default-bg-2: #f4f5f6 !default; $default-bg-3: #f1f3f2 !default; $default-bg-4: #edf0ee !default; $default-bg-5: #f8f8f8 !default; +$default-bg-6: #e4e4e4 !default; $default-fg: black !default; $default-fg-2: #333333 !default; $default-fg-3: #888888 !default; diff --git a/platypush/backend/http/static/css/source/webpanel/plugins/camera.android.ipcam b/platypush/backend/http/static/css/source/webpanel/plugins/camera.android.ipcam index 5f839334..74de6c32 120000 --- a/platypush/backend/http/static/css/source/webpanel/plugins/camera.android.ipcam +++ b/platypush/backend/http/static/css/source/webpanel/plugins/camera.android.ipcam @@ -1 +1 @@ -camera \ No newline at end of file +camera.cv \ No newline at end of file diff --git a/platypush/backend/http/static/css/source/webpanel/plugins/camera.cv/index.scss b/platypush/backend/http/static/css/source/webpanel/plugins/camera.cv/index.scss new file mode 100644 index 00000000..f8fc3bcc --- /dev/null +++ b/platypush/backend/http/static/css/source/webpanel/plugins/camera.cv/index.scss @@ -0,0 +1,116 @@ +@import 'common/vars'; + +.camera { + min-height: 90%; + margin-top: 4%; + overflow: auto; + display: flex; + flex-direction: column; + align-items: center; + + .camera-container { + position: relative; + background: black; + margin-bottom: 1em; + + .frame, .no-frame { + position: absolute; + top: 0; + width: 100%; + height: 100%; + } + + .frame { + z-index: 1; + } + + .no-frame { + display: flex; + background: rgba(0, 0, 0, 0.1); + color: white; + align-items: center; + justify-content: center; + z-index: 2; + } + } + + .url { + width: 640px; + display: flex; + margin: 1em; + + .row { + width: 100%; + display: flex; + align-items: center; + } + + .name { + width: 140px; + } + + input { + width: 500px; + font-weight: normal; + } + } + + .params { + margin-top: 1em; + padding: 1em; + width: 640px; + display: flex; + flex-direction: column; + border: $default-border-3; + border-radius: 1em; + + label { + font-weight: normal; + } + + .head { + display: flex; + justify-content: center; + + label { + width: 100%; + display: flex; + justify-content: right; + + .name { + margin-right: 1em; + } + } + } + + .body { + display: flex; + flex-direction: column; + margin: 0 0 0 -1em; + + .row { + width: 100%; + display: flex; + align-items: center; + padding: 0.5em; + + .name { + width: 30%; + } + + input { + width: 70%; + } + + &:nth-child(even) { + background: $default-bg-6; + } + + &:hover { + background: $hover-bg; + } + } + } + } +} + diff --git a/platypush/backend/http/static/css/source/webpanel/plugins/camera.ir.mlx90640 b/platypush/backend/http/static/css/source/webpanel/plugins/camera.ir.mlx90640 index 5f839334..74de6c32 120000 --- a/platypush/backend/http/static/css/source/webpanel/plugins/camera.ir.mlx90640 +++ b/platypush/backend/http/static/css/source/webpanel/plugins/camera.ir.mlx90640 @@ -1 +1 @@ -camera \ No newline at end of file +camera.cv \ No newline at end of file diff --git a/platypush/backend/http/static/css/source/webpanel/plugins/camera.pi b/platypush/backend/http/static/css/source/webpanel/plugins/camera.pi index 5f839334..74de6c32 120000 --- a/platypush/backend/http/static/css/source/webpanel/plugins/camera.pi +++ b/platypush/backend/http/static/css/source/webpanel/plugins/camera.pi @@ -1 +1 @@ -camera \ No newline at end of file +camera.cv \ No newline at end of file diff --git a/platypush/backend/http/static/css/source/webpanel/plugins/camera/index.scss b/platypush/backend/http/static/css/source/webpanel/plugins/camera/index.scss deleted file mode 100644 index 43387330..00000000 --- a/platypush/backend/http/static/css/source/webpanel/plugins/camera/index.scss +++ /dev/null @@ -1,39 +0,0 @@ -@import 'common/vars'; - -.camera { - min-height: 90%; - margin-top: 4%; - overflow: auto; - display: flex; - flex-direction: column; - align-items: center; - - .camera-container { - min-width: 640px; - min-height: 480px; - position: relative; - background: black; - margin-bottom: 1em; - - .frame, .no-frame { - position: absolute; - top: 0; - width: 100%; - height: 100%; - } - - .frame { - z-index: 1; - } - - .no-frame { - display: flex; - background: rgba(0, 0, 0, 0.1); - color: white; - align-items: center; - justify-content: center; - z-index: 2; - } - } -} - diff --git a/platypush/backend/http/static/js/plugins/camera.cv/index.js b/platypush/backend/http/static/js/plugins/camera.cv/index.js new file mode 100644 index 00000000..4674c92b --- /dev/null +++ b/platypush/backend/http/static/js/plugins/camera.cv/index.js @@ -0,0 +1,15 @@ +Vue.component('camera-cv', { + template: '#tmpl-camera-cv', + mixins: [cameraMixin], + + methods: { + startStreaming: function() { + this._startStreaming('cv'); + }, + + capture: function() { + this._capture('cv'); + }, + }, +}); + diff --git a/platypush/backend/http/static/js/plugins/camera.ir.mlx90640/index.js b/platypush/backend/http/static/js/plugins/camera.ir.mlx90640/index.js index f26fe528..2bfc460a 100644 --- a/platypush/backend/http/static/js/plugins/camera.ir.mlx90640/index.js +++ b/platypush/backend/http/static/js/plugins/camera.ir.mlx90640/index.js @@ -1,59 +1,19 @@ Vue.component('camera-ir-mlx90640', { template: '#tmpl-camera-ir-mlx90640', - props: ['config'], - - data: function() { - return { - bus: new Vue({}), - capturing: false, - rotate: this.config.rotate || 0, - grayscale: false, - }; - }, + mixins: [cameraMixin], methods: { - startStreaming: async function() { - if (this.capturing) - return; - - this.capturing = true; - this.$refs.frame.setAttribute('src', '/camera/ir/mlx90640/stream?rotate=' - + this.rotate + '&grayscale=' + (this.grayscale ? 1 : 0) + '&t=' - + (new Date()).getTime()); + startStreaming: function() { + this._startStreaming('ir.mlx90640'); }, - stopStreaming: async function() { - await request('camera.ir.mlx90640.stop'); - this.$refs.frame.removeAttribute('src'); - this.capturing = false; - }, - - onRotationChange: function() { - this.rotate = parseInt(this.$refs.rotate.value); - const cameraContainer = this.$el.querySelector('.camera-container'); - - switch (this.rotate) { - case 0: - case 180: - cameraContainer.style.width = '640px'; - cameraContainer.style.minWidth = '640px'; - cameraContainer.style.height = '480px'; - cameraContainer.style.minHeight = '480px'; - break; - - case 90: - case 270: - cameraContainer.style.width = '480px'; - cameraContainer.style.minWidth = '480px'; - cameraContainer.style.height = '640px'; - cameraContainer.style.minHeight = '640px'; - break; - } + capture: function() { + this._capture('ir.mlx90640'); }, }, mounted: function() { - this.onRotationChange(); - }, + this.attrs.resolution = [32, 24]; + } }); diff --git a/platypush/backend/http/static/js/plugins/camera/index.js b/platypush/backend/http/static/js/plugins/camera/index.js index 4f34e9a5..b8d871e5 100644 --- a/platypush/backend/http/static/js/plugins/camera/index.js +++ b/platypush/backend/http/static/js/plugins/camera/index.js @@ -1,5 +1,4 @@ -Vue.component('camera', { - template: '#tmpl-camera', +var cameraMixin = { props: ['config'], data: function() { @@ -7,23 +6,57 @@ Vue.component('camera', { bus: new Vue({}), streaming: false, capturing: false, + showParams: false, + url: null, + attrs: { + resolution: this.config.resolution || [640, 480], + device: this.config.device, + horizontal_flip: this.config.horizontal_flip || 0, + vertical_flip: this.config.vertical_flip || 0, + rotate: this.config.rotate || 0, + scale_x: this.config.scale_x || 1.0, + scale_y: this.config.scale_y || 1.0, + fps: this.config.fps || 16.0, + grayscale: this.config.grayscale || 0, + stream_format: this.config.stream_format || 'mjpeg', + }, }; }, computed: { - deviceId: function() { - return this.config.device_id || 0; + params: function() { + return { + resolution: this.attrs.resolution, + device: this.attrs.device != null && ('' + this.attrs.device).length > 0 ? this.attrs.device : null, + horizontal_flip: parseInt(0 + this.attrs.horizontal_flip), + vertical_flip: parseInt(0 + this.attrs.vertical_flip), + rotate: parseFloat(this.attrs.rotate), + scale_x: parseFloat(this.attrs.scale_x), + scale_y: parseFloat(this.attrs.scale_y), + fps: parseFloat(this.attrs.fps), + grayscale: parseInt(0 + this.attrs.grayscale), + }; + }, + + window: function() { + return window; }, }, methods: { - startStreaming: function() { + getUrl: function(plugin, action) { + return '/camera/' + plugin + '/' + action + '?' + + Object.entries(this.params).filter(([k, v]) => v != null && ('' + v).length > 0) + .map(([k, v]) => k + '=' + v).join('&'); + }, + + _startStreaming: function(plugin) { if (this.streaming) return; this.streaming = true; this.capturing = false; - this.$refs.frame.setAttribute('src', '/camera/' + this.deviceId + '/stream'); + this.url = this.getUrl(plugin, 'video.' + this.attrs.stream_format); }, stopStreaming: function() { @@ -32,16 +65,16 @@ Vue.component('camera', { this.streaming = false; this.capturing = false; - this.$refs.frame.removeAttribute('src'); + this.url = null; }, - capture: function() { + _capture: function(plugin) { if (this.capturing) return; this.streaming = false; this.capturing = true; - this.$refs.frame.setAttribute('src', '/camera/' + this.deviceId + '/frame?t=' + (new Date()).getTime()); + this.url = this.getUrl(plugin, 'photo.jpg') + '&t=' + (new Date()).getTime(); }, onFrameLoaded: function(event) { @@ -49,10 +82,22 @@ Vue.component('camera', { this.capturing = false; } }, + + onDeviceChanged: function(event) {}, + onFlipChanged: function(event) {}, + onSizeChanged: function(event) { + const degToRad = (deg) => (deg * Math.PI)/180; + const rot = degToRad(this.params.rotate); + this.$refs.frameContainer.style.width = Math.round(this.params.scale_x * Math.abs(this.params.resolution[0] * Math.cos(rot) + this.params.resolution[1] * Math.sin(rot))) + 'px'; + this.$refs.frameContainer.style.height = Math.round(this.params.scale_y * Math.abs(this.params.resolution[0] * Math.sin(rot) + this.params.resolution[1] * Math.cos(rot))) + 'px'; + }, + + onFpsChanged: function(event) {}, + onGrayscaleChanged: function(event) {}, }, mounted: function() { this.$refs.frame.addEventListener('load', this.onFrameLoaded); + this.onSizeChanged(); }, -}); - +}; diff --git a/platypush/backend/http/templates/nav.html b/platypush/backend/http/templates/nav.html index 9d6845a0..01fa4024 100644 --- a/platypush/backend/http/templates/nav.html +++ b/platypush/backend/http/templates/nav.html @@ -2,6 +2,7 @@ with pluginIcons = { 'camera': 'fas fa-camera', 'camera.android.ipcam': 'fab fa-android', + 'camera.cv': 'fas fa-camera', 'camera.pi': 'fab fa-raspberry-pi', 'camera.ir.mlx90640': 'fas fa-sun', 'execute': 'fas fa-play', diff --git a/platypush/backend/http/templates/plugins/camera.cv/index.html b/platypush/backend/http/templates/plugins/camera.cv/index.html new file mode 100644 index 00000000..988bcd66 --- /dev/null +++ b/platypush/backend/http/templates/plugins/camera.cv/index.html @@ -0,0 +1,6 @@ + + + + diff --git a/platypush/backend/http/templates/plugins/camera.ir.mlx90640/index.html b/platypush/backend/http/templates/plugins/camera.ir.mlx90640/index.html index d09e0ba7..943cfb02 100644 --- a/platypush/backend/http/templates/plugins/camera.ir.mlx90640/index.html +++ b/platypush/backend/http/templates/plugins/camera.ir.mlx90640/index.html @@ -1,31 +1,6 @@ + + diff --git a/platypush/backend/http/templates/plugins/camera/index.html b/platypush/backend/http/templates/plugins/camera/index.html index 18d23b73..1a05b3d8 100644 --- a/platypush/backend/http/templates/plugins/camera/index.html +++ b/platypush/backend/http/templates/plugins/camera/index.html @@ -1,23 +1,90 @@ - + diff --git a/platypush/message/event/camera.py b/platypush/message/event/camera.py index 4476cc14..31186b45 100644 --- a/platypush/message/event/camera.py +++ b/platypush/message/event/camera.py @@ -13,8 +13,8 @@ class CameraRecordingStartedEvent(CameraEvent): Event triggered when a new recording starts """ - def __init__(self, device_id, filename=None, *args, **kwargs): - super().__init__(*args, device_id=device_id, filename=filename, **kwargs) + def __init__(self, device, filename=None, *args, **kwargs): + super().__init__(*args, device=device, filename=filename, **kwargs) class CameraRecordingStoppedEvent(CameraEvent): @@ -22,8 +22,8 @@ class CameraRecordingStoppedEvent(CameraEvent): Event triggered when a recording stops """ - def __init__(self, device_id, *args, **kwargs): - super().__init__(*args, device_id=device_id, **kwargs) + def __init__(self, device, *args, **kwargs): + super().__init__(*args, device=device, **kwargs) class CameraVideoRenderedEvent(CameraEvent): diff --git a/platypush/message/response/__init__.py b/platypush/message/response/__init__.py index 588e45db..acb44d1a 100644 --- a/platypush/message/response/__init__.py +++ b/platypush/message/response/__init__.py @@ -15,9 +15,7 @@ class Response(Message): :param origin: Origin :type origin: str :param output: Output - :type output: str :param errors: Errors - :type errors: list :param id: Message ID this response refers to :type id: str :param timestamp: Message timestamp diff --git a/platypush/plugins/camera/__init__.py b/platypush/plugins/camera/__init__.py index dd9152c0..9c48924d 100644 --- a/platypush/plugins/camera/__init__.py +++ b/platypush/plugins/camera/__init__.py @@ -1,64 +1,53 @@ import io import os -import re -import shutil +import pathlib +import socket import threading import time +from abc import ABC, abstractmethod +from contextlib import contextmanager from datetime import datetime -from typing import Optional +from multiprocessing import Process +from queue import Queue +from typing import Optional, Union, Dict, Tuple, IO from platypush.config import Config -from platypush.message import Mapping -from platypush.message.response import Response -from platypush.message.event.camera import CameraRecordingStartedEvent, \ - CameraRecordingStoppedEvent, CameraVideoRenderedEvent, \ - CameraPictureTakenEvent, CameraFrameCapturedEvent - +from platypush.message.event.camera import CameraRecordingStartedEvent, CameraPictureTakenEvent, \ + CameraRecordingStoppedEvent, CameraVideoRenderedEvent from platypush.plugins import Plugin, action +from platypush.plugins.camera.model.camera import CameraInfo, Camera +from platypush.plugins.camera.model.exceptions import CameraException, CaptureSessionAlreadyRunningException +from platypush.plugins.camera.model.writer import VideoWriter, StreamWriter +from platypush.plugins.camera.model.writer.ffmpeg import FFmpegFileWriter +from platypush.plugins.camera.model.writer.preview import PreviewWriter, PreviewWriterFactory +from platypush.utils import get_plugin_name_by_class + +__all__ = ['Camera', 'CameraInfo', 'CameraException', 'CameraPlugin', 'CaptureSessionAlreadyRunningException', + 'StreamWriter'] -class StreamingOutput: - def __init__(self, raw=False): - self.frame = None - self.raw_frame = None - self.raw = raw - self.buffer = io.BytesIO() - self.ready = threading.Condition() - - def is_new_frame(self, buf): - if self.raw: - return True - - # JPEG header begin - return buf.startswith(b'\xff\xd8') - - def write(self, buf): - if not self.is_new_frame(buf): - return - - if self.raw: - with self.ready: - self.raw_frame = buf - self.ready.notify_all() - return - - # New frame, copy the existing buffer's content and notify all clients that it's available - self.buffer.truncate() - with self.ready: - self.frame = self.buffer.getvalue() - self.ready.notify_all() - - self.buffer.seek(0) - return self.buffer.write(buf) - - def close(self): - self.buffer.close() - - -class CameraPlugin(Plugin): +class CameraPlugin(Plugin, ABC): """ - Plugin to control generic cameras over OpenCV. + Abstract plugin to control camera devices. + + If the :class:`platypush.backend.http.HttpBackend` is enabled then the plugins that implement this class can + expose two endpoints: + + - ``http://host:8008/camera//photo<.extension>`` to capture a photo from the camera, where + ``.extension`` can be ``.jpg``, ``.png`` or ``.bmp``. + - ``http://host:8008/camera//video<.extension>`` to get a live feed from the camera, where + ``.extension`` can be ``.mjpeg``, ``.mkv``/``.webm``, ``.mp4``/``.h264`` or ``.h265``. + + Both the endpoints support the same parameters of the constructor of this class (e.g. ``device``, ``warmup_frames``, + ``duration`` etc.) as ``GET`` parameters. + + Requires: + + * **Pillow** (``pip install Pillow``) [optional] default handler for image transformations. + * **wxPython** (``pip install wxPython``) [optional] default handler for camera previews (``ffplay`` will be + used as a fallback if ``wxPython`` is not installed). + * **ffmpeg** (see installation instructions for your OS) for rendering/streaming videos. Triggers: @@ -71,547 +60,681 @@ class CameraPlugin(Plugin): * :class:`platypush.message.event.camera.CameraPictureTakenEvent` when a snapshot is captured and stored to an image file - Requires: - - * **opencv** (``pip install opencv-python``) - """ - _default_warmup_frames = 5 - _default_sleep_between_frames = 0 - _default_color_transform = 'COLOR_BGR2BGRA' - _default_frames_dir = None + _camera_class = Camera + _camera_info_class = CameraInfo + _video_writer_class = FFmpegFileWriter - _max_stored_frames = 100 - _frame_filename_regex = re.compile('(\d+)-(\d+)-(\d+)_(\d+)-(\d+)-(\d+)-(\d+).jpe?g$') - - def __init__(self, device_id=0, frames_dir=None, - warmup_frames=_default_warmup_frames, video_type=0, - sleep_between_frames=_default_sleep_between_frames, - max_stored_frames=_max_stored_frames, - color_transform=_default_color_transform, - scale_x=None, scale_y=None, rotate=None, flip=None, stream_raw_frames=False, **kwargs): + def __init__(self, device: Optional[Union[int, str]] = None, resolution: Tuple[int, int] = (640, 480), + frames_dir: Optional[str] = None, warmup_frames: int = 5, warmup_seconds: Optional[float] = 0., + capture_timeout: Optional[float] = 20.0, scale_x: Optional[float] = None, + scale_y: Optional[float] = None, rotate: Optional[float] = None, grayscale: Optional[bool] = None, + color_transform: Optional[Union[int, str]] = None, fps: float = 16, horizontal_flip: bool = False, + vertical_flip: bool = False, video_type: Optional[str] = None, stream_format: str = 'mjpeg', + listen_port: Optional[int] = 5000, bind_address: str = '0.0.0.0', **kwargs): """ - :param device_id: Index of the default video device to be used for - capturing (default: 0) - :type device_id: int - - :param frames_dir: Directory where the camera frames will be stored - (default: ``~/.local/share/platypush/camera/frames``) - :type frames_dir: str - + :param device: Identifier of the default capturing device. + :param resolution: Default resolution, as a tuple of two integers. + :param frames_dir: Directory where the camera frames will be stored (default: + ``~/.local/share/platypush//frames``) :param warmup_frames: Cameras usually take a while to adapt their luminosity and focus to the environment when taking a picture. This parameter allows you to specify the number of "warmup" frames to capture upon picture command before actually capturing a frame (default: 5 but you may want to calibrate this parameter for your camera) - :type warmup_frames: int + :param warmup_seconds: Number of seconds to wait before a picture is taken or the first frame of a + video/sequence is captured (default: 0). + :param capture_timeout: Maximum number of seconds to wait between the programmed termination of a capture + session and the moment the device is released. + :param scale_x: If set, the images will be scaled along the x axis by the specified factor + :param scale_y: If set, the images will be scaled along the y axis by the specified factor + :param color_transform: Color transformation to apply to the images. + :param grayscale: Whether the output should be converted to grayscale. + :param rotate: If set, the images will be rotated by the specified number of degrees + :param fps: Frames per second (default: 25). + :param horizontal_flip: If set, the images will be flipped on the horizontal axis. + :param vertical_flip: If set, the images will be flipped on the vertical axis. + :param video_type: Plugin-specific format/type for the output videos. + :param listen_port: Default port to be used for streaming over TCP (default: 5000). + :param bind_address: Default bind address for TCP streaming (default: 0.0.0.0, accept any connections). + :param stream_format: Default format for the output when streamed to a network device. Available: - :param video_type: Default video type to use when exporting captured - frames to camera (default: 0, infers the type from the video file - extension). See - `here `_ - for a reference on the supported types (e.g. 'MJPEG', 'XVID', 'H264' etc') - :type video_type: str or int + - ``MJPEG`` (default) + - ``H264`` (over ``ffmpeg``) + - ``H265`` (over ``ffmpeg``) + - ``MKV`` (over ``ffmpeg``) + - ``MP4`` (over ``ffmpeg``) - :param sleep_between_frames: If set, the process will sleep for the - specified amount of seconds between two frames when recording - (default: 0) - :type sleep_between_frames: float - - :param max_stored_frames: Maximum number of frames to store in - ``frames_dir`` when recording with no persistence (e.g. streaming - over HTTP) (default: 100) - :type max_stored_frames: int - - :param color_transform: Color transformation to apply to the captured - frames. See https://docs.opencv.org/3.2.0/d7/d1b/group__imgproc__misc.html - for a full list of supported color transformations. - (default: "``COLOR_BGR2BGRA``") - :type color_transform: str - - :param scale_x: If set, the images will be scaled along the x axis by the - specified factor - :type scale_x: float - - :param scale_y: If set, the images will be scaled along the y axis by the - specified factor - :type scale_y: float - - :param rotate: If set, the images will be rotated by the specified - number of degrees - :type rotate: float - - :param flip: If set, the images will be flipped around the specified - axis. Possible values:: - - - ``0`` - flip along the x axis - - ``1`` - flip along the y axis - - ``-1`` - flip along both the axis - - :type flip: int """ - super().__init__(**kwargs) - self._default_frames_dir = os.path.join(Config.get('workdir'), 'camera', 'frames') - self.default_device_id = device_id - self.frames_dir = os.path.abspath(os.path.expanduser(frames_dir or self._default_frames_dir)) - self.warmup_frames = warmup_frames - self.video_type = video_type - self.stream_raw_frames = stream_raw_frames + _default_frames_dir = os.path.join(Config.get('workdir'), get_plugin_name_by_class(self), 'frames') + # noinspection PyArgumentList + self.camera_info = self._camera_info_class(device, color_transform=color_transform, warmup_frames=warmup_frames, + warmup_seconds=warmup_seconds, rotate=rotate, scale_x=scale_x, + scale_y=scale_y, capture_timeout=capture_timeout, + video_type=video_type, fps=fps, stream_format=stream_format, + resolution=resolution, grayscale=grayscale, listen_port=listen_port, + horizontal_flip=horizontal_flip, vertical_flip=vertical_flip, + bind_address=bind_address, frames_dir=os.path.abspath( + os.path.expanduser(frames_dir or _default_frames_dir))) - if isinstance(video_type, str): - import cv2 - self.video_type = cv2.VideoWriter_fourcc(*video_type.upper()) + self._devices: Dict[Union[int, str], Camera] = {} + self._streams: Dict[Union[int, str], Camera] = {} - self.sleep_between_frames = sleep_between_frames - self.max_stored_frames = max_stored_frames - self.color_transform = color_transform - self.scale_x = scale_x - self.scale_y = scale_y - self.rotate = rotate - self.flip = flip + def _merge_info(self, **info) -> CameraInfo: + merged_info = self.camera_info.clone() + for k, v in info.items(): + if hasattr(merged_info, k): + setattr(merged_info, k, v) - self._is_recording = {} # device_id => Event map - self._devices = {} # device_id => VideoCapture map - self._recording_threads = {} # device_id => Thread map - self._recording_info = {} # device_id => recording info map - self._output = None + return merged_info - def _init_device(self, device_id, frames_dir=None, **info): - import cv2 - self._release_device(device_id) + def open_device(self, device: Optional[Union[int, str]] = None, stream: bool = False, **params) -> Camera: + """ + Initialize and open a device. - if device_id not in self._devices: - self._devices[device_id] = cv2.VideoCapture(device_id) - - if device_id not in self._is_recording: - self._is_recording[device_id] = threading.Event() - - self._recording_info[device_id] = info - - if frames_dir: - os.makedirs(frames_dir, exist_ok=True) - self._recording_info[device_id]['frames_dir'] = frames_dir - - return self._devices[device_id] - - def _release_device(self, device_id, wait_thread_termination=True): - if device_id in self._is_recording: - self._is_recording[device_id].clear() - - if device_id in self._recording_threads: - if wait_thread_termination: - self.logger.info('A recording thread is running, waiting for termination') - if self._recording_threads[device_id].is_alive(): - self._recording_threads[device_id].join() - del self._recording_threads[device_id] - - if device_id in self._devices: - self._devices[device_id].release() - del self._devices[device_id] - self.fire_event(CameraRecordingStoppedEvent(device_id=device_id)) - self.logger.info("Device {} released".format(device_id)) - - if device_id in self._recording_info: - del self._recording_info[device_id] - - @staticmethod - def _store_frame_to_file(frame, frames_dir, image_file): - import cv2 - - if image_file: - filepath = image_file + :return: The initialized camera device. + :raises: :class:`platypush.plugins.camera.CaptureSessionAlreadyRunningException` + """ + if device is None: + info = self.camera_info.clone() + device = info.device + elif device not in self._devices: + info = self._merge_info(**params) + info.device = device else: - filepath = os.path.join( - frames_dir, datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f.jpg')) + info = self._devices[device].info.clone() - cv2.imwrite(filepath, frame) + assert device is not None, 'No device specified/configured' + if device in self._devices: + camera = self._devices[device] + if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set(): + raise CaptureSessionAlreadyRunningException(device) + + camera.start_event.clear() + camera.capture_thread = None + else: + # noinspection PyArgumentList + camera = self._camera_class(info=info) + + camera.info.set(**params) + camera.object = self.prepare_device(camera) + + if stream: + writer_class = StreamWriter.get_class_by_name(camera.info.stream_format) + camera.stream = writer_class(camera=camera, plugin=self) + + if camera.info.frames_dir: + pathlib.Path(os.path.abspath(os.path.expanduser(camera.info.frames_dir))).mkdir( + mode=0o755, exist_ok=True, parents=True) + + self._devices[device] = camera + return camera + + def close_device(self, camera: Camera, wait_capture: bool = True) -> None: + """ + Close and release a device. + """ + name = camera.info.device + self.stop_preview(camera) + camera.start_event.clear() + + if wait_capture: + self.wait_capture(camera) + + self.release_device(camera) + if name in self._devices: + del self._devices[name] + + def wait_capture(self, camera: Camera) -> None: + """ + Wait until a capture session terminates. + + :param camera: Camera object. ``camera.info.capture_timeout`` is used as a capture thread termination timeout + if set. + """ + if camera.capture_thread and camera.capture_thread.is_alive() and \ + threading.get_ident() != camera.capture_thread.ident: + try: + camera.capture_thread.join(timeout=camera.info.capture_timeout) + except Exception as e: + self.logger.warning('Error on FFmpeg capture wait: {}'.format(str(e))) + + @contextmanager + def open(self, device: Optional[Union[int, str]] = None, stream: bool = None, **info) -> Camera: + """ + Initialize and open a device using a context manager pattern. + + :param device: Capture device by name, path or ID. + :param stream: If set, the frames will be streamed to ``camera.stream``. + :param info: Camera parameters override - see constructors parameters. + :return: The initialized :class:`platypush.plugins.camera.Camera` object. + """ + camera = None + try: + camera = self.open_device(device, stream=stream, **info) + yield camera + finally: + self.close_device(camera) + + @abstractmethod + def prepare_device(self, device: Camera): + """ + Prepare a device using the plugin-specific logic - to be implemented by the derived classes. + + :param device: An initialized :class:`platypush.plugins.camera.Camera` object. + """ + raise NotImplementedError() + + @abstractmethod + def release_device(self, device: Camera): + """ + Release a device using the plugin-specific logic - to be implemented by the derived classes. + + :param device: An initialized :class:`platypush.plugins.camera.Camera` object. + """ + raise NotImplementedError() + + @abstractmethod + def capture_frame(self, device: Camera, *args, **kwargs): + """ + Capture a frame from a device using the plugin-specific logic - to be implemented by the derived classes. + + :param device: An initialized :class:`platypush.plugins.camera.Camera` object. + """ + raise NotImplementedError() + + # noinspection PyShadowingBuiltins + @staticmethod + def store_frame(frame, filepath: str, format: Optional[str] = None): + """ + Capture a frame to the filesystem using the ``PIL`` library - it can be overridden by derived classes. + + :param frame: Frame object (default: a byte-encoded object or a ``PIL.Image`` object). + :param filepath: Destination file. + :param format: Output format. + """ + from PIL import Image + if isinstance(frame, bytes): + frame = list(frame) + elif not isinstance(frame, Image.Image): + frame = Image.fromarray(frame) + + save_args = {} + if format: + save_args['format'] = format + + frame.save(filepath, **save_args) + + def _store_frame(self, frame, frames_dir: Optional[str] = None, image_file: Optional[str] = None, + *args, **kwargs) -> str: + """ + :meth:`.store_frame` wrapper. + """ + if image_file: + filepath = os.path.abspath(os.path.expanduser(image_file)) + else: + filepath = os.path.abspath(os.path.expanduser( + os.path.join(frames_dir or '', datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f.jpg')))) + + pathlib.Path(filepath).parent.mkdir(mode=0o755, exist_ok=True, parents=True) + self.store_frame(frame, filepath, *args, **kwargs) return filepath - def _get_stored_frames_files(self, frames_dir): - ret = sorted([ - os.path.join(frames_dir, f) for f in os.listdir(frames_dir) - if os.path.isfile(os.path.join(frames_dir, f)) and - re.search(self._frame_filename_regex, f) - ]) - return ret - - def _get_avg_fps(self, frames_dir): - files = self._get_stored_frames_files(frames_dir) - frame_time_diff = 0.0 - n_frames = 0 - - for i in range(1, len(files)): - m1 = re.search(self._frame_filename_regex, files[i - 1]) - m2 = re.search(self._frame_filename_regex, files[i]) - - if not m1 or not m2: - continue - - t1 = datetime.timestamp(datetime(*map(int, m1.groups()))) - t2 = datetime.timestamp(datetime(*map(int, m2.groups()))) - frame_time_diff += (t2 - t1) - n_frames += 1 - - return n_frames / frame_time_diff if n_frames and frame_time_diff else 0 - - def _remove_expired_frames(self, frames_dir, max_stored_frames): - files = self._get_stored_frames_files(frames_dir) - for f in files[:len(files) - max_stored_frames]: - os.unlink(f) - - def _make_video_file(self, frames_dir, video_file, video_type): - import cv2 - - files = self._get_stored_frames_files(frames_dir) - if not files: - self.logger.warning('No frames found in {}'.format(frames_dir)) + def start_preview(self, camera: Camera): + if camera.preview and not camera.preview.closed: + self.logger.info('A preview window is already active on device {}'.format(camera.info.device)) return - frame = cv2.imread(files[0]) - height, width, layers = frame.shape - fps = self._get_avg_fps(frames_dir) - video = cv2.VideoWriter(video_file, video_type, fps, (width, height)) + camera.preview = PreviewWriterFactory.get(camera, self) + if isinstance(camera.preview, Process): + camera.preview.start() - for f in files: - video.write(cv2.imread(f)) - video.release() + def stop_preview(self, camera: Camera): + if camera.preview and not camera.preview.closed: + camera.preview.close() - self.fire_event(CameraVideoRenderedEvent(filename=video_file)) - shutil.rmtree(frames_dir, ignore_errors=True) + if isinstance(camera.preview, Process) and camera.preview.is_alive(): + camera.preview.terminate() + camera.preview.join(timeout=5.0) - def _recording_thread(self): - def thread(duration, video_file, image_file, device_id, - frames_dir, n_frames, sleep_between_frames, - max_stored_frames, color_transform, video_type, - scale_x, scale_y, rotate, flip): - import cv2 - device = self._devices[device_id] - color_transform = getattr(cv2, color_transform or self.color_transform) - rotation_matrix = None - self._is_recording[device_id].wait() - self.logger.info('Starting recording from video device {}'.format(device_id)) - recording_started_time = time.time() - captured_frames = 0 + if isinstance(camera.preview, Process) and camera.preview.is_alive(): + camera.preview.kill() - evt_args = { - 'device_id': device_id, - } + camera.preview = None - if video_file or image_file: - evt_args['filename'] = video_file or image_file - if frames_dir: - evt_args['frames_dir'] = frames_dir + def frame_processor(self, frame_queue: Queue, camera: Camera, image_file: Optional[str] = None): + while True: + frame = frame_queue.get() + if frame is None: + break - self.fire_event(CameraRecordingStartedEvent(**evt_args)) + frame = self.transform_frame(frame, camera.info.color_transform) + if camera.info.grayscale: + frame = self.to_grayscale(frame) - while device_id in self._is_recording and self._is_recording[device_id].is_set(): - if duration and time.time() - recording_started_time >= duration \ - or n_frames and captured_frames >= n_frames: + frame = self.rotate_frame(frame, camera.info.rotate) + frame = self.flip_frame(frame, camera.info.horizontal_flip, camera.info.vertical_flip) + frame = self.scale_frame(frame, camera.info.scale_x, camera.info.scale_y) + + for output in camera.get_outputs(): + output.write(frame) + + if camera.info.frames_dir or image_file: + self._store_frame(frame=frame, frames_dir=camera.info.frames_dir, image_file=image_file) + + def capturing_thread(self, camera: Camera, duration: Optional[float] = None, video_file: Optional[str] = None, + image_file: Optional[str] = None, n_frames: Optional[int] = None, preview: bool = False, + **kwargs): + """ + Camera capturing thread. + + :param camera: An initialized :class:`platypush.plugins.camera.Camera` object. + :param duration: Capturing session duration in seconds (default: until :meth:`.stop_capture` is called). + :param video_file: If set, the session will be recorded to this output video file (video capture mode). + :param image_file: If set, the output of the session will be a single image file (photo mode). + :param n_frames: Number of frames to be captured (default: until :meth:`.stop_capture` is called). + :param preview: Start a preview window. + :param kwargs: Extra arguments to be passed to :meth:`.capture_frame`. + """ + camera.start_event.wait() + recording_started_time = time.time() + captured_frames = 0 + + evt_args = { + 'device': camera.info.device, + } + + if video_file or image_file: + evt_args['filename'] = video_file or image_file + if camera.info.frames_dir: + evt_args['frames_dir'] = camera.info.frames_dir + if preview: + self.start_preview(camera) + if duration and camera.info.warmup_seconds: + duration = duration + camera.info.warmup_seconds + if video_file: + camera.file_writer = self._video_writer_class(camera=camera, video_file=video_file, plugin=self) + + frame_queue = Queue() + frame_processor = threading.Thread(target=self.frame_processor, + kwargs=dict(frame_queue=frame_queue, camera=camera, image_file=image_file)) + frame_processor.start() + self.fire_event(CameraRecordingStartedEvent(**evt_args)) + + try: + while camera.start_event.is_set(): + if (duration and time.time() - recording_started_time >= duration) \ + or (n_frames and captured_frames >= n_frames): break - ret, frame = device.read() - if not ret: - self.logger.warning('Error while retrieving video frame') + frame_capture_start = time.time() + try: + frame = self.capture_frame(camera, **kwargs) + frame_queue.put(frame) + except AssertionError as e: + self.logger.warning(str(e)) continue - frame = cv2.cvtColor(frame, color_transform) + if not n_frames or not camera.info.warmup_seconds or \ + (time.time() - recording_started_time >= camera.info.warmup_seconds): + captured_frames += 1 - if rotate: - rows, cols = frame.shape - if not rotation_matrix: - rotation_matrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate, 1) + if camera.info.fps: + wait_time = (1. / camera.info.fps) - (time.time() - frame_capture_start) + if wait_time > 0: + time.sleep(wait_time) + finally: + frame_queue.put(None) + self.stop_preview(camera) + for output in camera.get_outputs(): + # noinspection PyBroadException + try: + output.close() + except: + pass - frame = cv2.warpAffine(frame, rotation_matrix, (cols, rows)) + self.close_device(camera, wait_capture=False) + frame_processor.join(timeout=5.0) + self.fire_event(CameraRecordingStoppedEvent(**evt_args)) - if flip is not None: - frame = cv2.flip(frame, flip) + if image_file: + self.fire_event(CameraPictureTakenEvent(filename=image_file)) - if scale_x or scale_y: - scale_x = scale_x or 1 - scale_y = scale_y or 1 - frame = cv2.resize(frame, None, fx=scale_x, fy=scale_y, - interpolation=cv2.INTER_CUBIC) + if video_file: + self.fire_event(CameraVideoRenderedEvent(filename=video_file)) - if self._output: - if not self.stream_raw_frames: - result, frame = cv2.imencode('.jpg', frame) - if not result: - self.logger.warning('Unable to convert frame to JPEG') - continue + def start_camera(self, camera: Camera, preview: bool = False, *args, **kwargs): + """ + Start a camera capture session. - self._output.write(frame.tobytes()) - else: - self._output.write(frame) - elif frames_dir: - self._store_frame_to_file(frame=frame, frames_dir=frames_dir, image_file=image_file) + :param camera: An initialized :class:`platypush.plugins.camera.Camera` object. + :param preview: Show a preview of the camera frames. + """ + assert not (camera.capture_thread and camera.capture_thread.is_alive()), \ + 'A capture session is already in progress' - captured_frames += 1 - self.fire_event(CameraFrameCapturedEvent(filename=image_file)) - - if max_stored_frames and not video_file: - self._remove_expired_frames( - frames_dir=frames_dir, - max_stored_frames=max_stored_frames) - - if sleep_between_frames: - time.sleep(sleep_between_frames) - - self._release_device(device_id, wait_thread_termination=False) - - if image_file: - self.fire_event(CameraPictureTakenEvent(filename=image_file)) - - self.logger.info('Recording terminated') - - if video_file: - self.logger.info('Writing frames to video file {}'. - format(video_file)) - self._make_video_file(frames_dir=frames_dir, - video_file=video_file, - video_type=video_type) - self.logger.info('Video file {}: rendering completed'. - format(video_file)) - - return thread + camera.capture_thread = threading.Thread(target=self.capturing_thread, args=(camera, *args), + kwargs={'preview': preview, **kwargs}) + camera.capture_thread.start() + camera.start_event.set() @action - def start_recording(self, duration: Optional[float] = None, video_file: Optional[str] = None, - video_type: Optional[str] = None, device_id: Optional[int] = None, - frames_dir: Optional[str] = None, sleep_between_frames: Optional[float] = None, - max_stored_frames: Optional[int] = None, color_transform: Optional[str] = None, - scale_x: Optional[float] = None, scale_y: Optional[float] = None, - rotate: Optional[float] = None, flip: Optional[int] = None): + def capture_video(self, duration: Optional[float] = None, video_file: Optional[str] = None, preview: bool = False, + **camera) -> Union[str, dict]: """ - Start recording + Capture a video. - :param duration: Record duration in seconds (default: None, record until - ``stop_recording``) - :param video_file: If set, the stream will be recorded to the specified - video file (default: None) - :param video_type: Overrides the default configured ``video_type`` - - :param device_id: Override default device_id - :param frames_dir: Override default frames_dir - :param sleep_between_frames: Override default sleep_between_frames - :param max_stored_frames: Override default max_stored_frames - :param color_transform: Override default color_transform - :param scale_x: Override default scale_x - :param scale_y: Override default scale_y - :param rotate: Override default rotate - :param flip: Override default flip + :param duration: Record duration in seconds (default: None, record until ``stop_capture``). + :param video_file: If set, the stream will be recorded to the specified video file (default: None). + :param camera: Camera parameters override - see constructors parameters. + :param preview: Show a preview of the camera frames. + :return: If duration is specified, the method will wait until the recording is done and return the local path + to the recorded resource. Otherwise, it will return the status of the camera device after starting it. """ + camera = self.open_device(**camera) + self.start_camera(camera, duration=duration, video_file=video_file, frames_dir=None, image_file=None, + preview=preview) - device_id = device_id if device_id is not None else self.default_device_id - if device_id in self._is_recording and \ - self._is_recording[device_id].is_set(): - self.logger.info('A recording on device {} is already in progress'. - format(device_id)) - return self.status(device_id=device_id) + if duration: + self.wait_capture(camera) + return video_file - recording_started = threading.Event() - - # noinspection PyUnusedLocal - def on_recording_started(event): - recording_started.set() - - attrs = self._get_attributes(frames_dir=frames_dir, sleep_between_frames=sleep_between_frames, - max_stored_frames=max_stored_frames, color_transform=color_transform, - scale_x=scale_x, scale_y=scale_y, rotate=rotate, flip=flip, video_type=video_type) - - # noinspection PyUnresolvedReferences - if attrs.frames_dir: - # noinspection PyUnresolvedReferences - attrs.frames_dir = os.path.join(attrs.frames_dir, str(device_id)) - if video_file: - video_file = os.path.abspath(os.path.expanduser(video_file)) - attrs.frames_dir = os.path.join(attrs.frames_dir, 'recording_{}'.format( - datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f'))) - - # noinspection PyUnresolvedReferences - self._init_device(device_id, - video_file=video_file, - video_type=attrs.video_type, - frames_dir=attrs.frames_dir, - sleep_between_frames=attrs.sleep_between_frames, - max_stored_frames=attrs.max_stored_frames, - color_transform=attrs.color_transform, - scale_x=attrs.scale_x, - scale_y=attrs.scale_y, - rotate=attrs.rotate, - flip=attrs.flip) - - self.register_handler(CameraRecordingStartedEvent, on_recording_started) - - # noinspection PyUnresolvedReferences - self._recording_threads[device_id] = threading.Thread( - target=self._recording_thread(), kwargs={ - 'duration': duration, - 'video_file': video_file, - 'video_type': attrs.video_type, - 'image_file': None, - 'device_id': device_id, - 'frames_dir': attrs.frames_dir, - 'n_frames': None, - 'sleep_between_frames': attrs.sleep_between_frames, - 'max_stored_frames': attrs.max_stored_frames, - 'color_transform': attrs.color_transform, - 'scale_x': attrs.scale_x, - 'scale_y': attrs.scale_y, - 'rotate': attrs.rotate, - 'flip': attrs.flip, - }) - - self._recording_threads[device_id].start() - self._is_recording[device_id].set() - - recording_started.wait() - self.unregister_handler(CameraRecordingStartedEvent, on_recording_started) - return self.status(device_id=device_id) + return self.status(camera.info.device) @action - def stop_recording(self, device_id=None): + def stop_capture(self, device: Optional[Union[int, str]] = None): """ - Stop recording + Stop any capturing session on the specified device. + + :param device: Name/path/ID of the device to stop (default: all the active devices). """ + devices = self._devices.copy() + stop_devices = list(devices.values())[:] + if device: + stop_devices = [self._devices[device]] if device in self._devices else [] - device_id = device_id if device_id is not None else self.default_device_id - frames_dir = self._recording_info.get(device_id, {}).get('frames_dir') - self._release_device(device_id) - shutil.rmtree(frames_dir, ignore_errors=True) - - def _get_attributes(self, frames_dir=None, warmup_frames=None, - color_transform=None, scale_x=None, scale_y=None, - rotate=None, flip=None, sleep_between_frames=None, - max_stored_frames=None, video_type=None) -> Mapping: - import cv2 - - warmup_frames = warmup_frames if warmup_frames is not None else self.warmup_frames - frames_dir = os.path.abspath(os.path.expanduser(frames_dir)) if frames_dir is not None else self.frames_dir - sleep_between_frames = sleep_between_frames if sleep_between_frames is not None else self.sleep_between_frames - max_stored_frames = max_stored_frames if max_stored_frames is not None else self.max_stored_frames - color_transform = color_transform if color_transform is not None else self.color_transform - scale_x = scale_x if scale_x is not None else self.scale_x - scale_y = scale_y if scale_y is not None else self.scale_y - rotate = rotate if rotate is not None else self.rotate - flip = flip if flip is not None else self.flip - if video_type is not None: - video_type = cv2.VideoWriter_fourcc(*video_type.upper()) if isinstance(video_type, str) else video_type - else: - video_type = self.video_type - - return Mapping(warmup_frames=warmup_frames, frames_dir=frames_dir, sleep_between_frames=sleep_between_frames, - max_stored_frames=max_stored_frames, color_transform=color_transform, scale_x=scale_x, - scale_y=scale_y, rotate=rotate, flip=flip, video_type=video_type) + for device in stop_devices: + self.close_device(device) @action - def take_picture(self, image_file: str, device_id: Optional[int] = None, warmup_frames: Optional[int] = None, - color_transform: Optional[str] = None, scale_x: Optional[float] = None, - scale_y: Optional[float] = None, rotate: Optional[float] = None, flip: Optional[int] = None): + def capture_image(self, image_file: str, preview: bool = False, **camera) -> str: """ - Take a picture. + Capture an image. :param image_file: Path where the output image will be stored. - :param device_id: Override default device_id - :param warmup_frames: Override default warmup_frames - :param color_transform: Override default color_transform - :param scale_x: Override default scale_x - :param scale_y: Override default scale_y - :param rotate: Override default rotate - :param flip: Override default flip + :param camera: Camera parameters override - see constructors parameters. + :param preview: Show a preview of the camera frames. + :return: The local path to the saved image. """ - device_id = device_id if device_id is not None else self.default_device_id - image_file = os.path.abspath(os.path.expanduser(image_file)) - picture_taken = threading.Event() + with self.open(**camera) as camera: + warmup_frames = camera.info.warmup_frames if camera.info.warmup_frames else 1 + self.start_camera(camera, image_file=image_file, n_frames=warmup_frames, preview=preview) + self.wait_capture(camera) - # noinspection PyUnusedLocal - def on_picture_taken(event): - picture_taken.set() - - if device_id in self._is_recording and \ - self._is_recording[device_id].is_set(): - self.logger.info('A recording on device {} is already in progress'. - format(device_id)) - - status = self.status(device_id=device_id).output.get(device_id) - if 'image_file' in status: - shutil.copyfile(status['image_file'], image_file) - return {'path': image_file} - - raise RuntimeError('Recording already in progress and no images ' + - 'have been captured yet') - - attrs = self._get_attributes(warmup_frames=warmup_frames, color_transform=color_transform, scale_x=scale_x, - scale_y=scale_y, rotate=rotate, flip=flip) - - # noinspection PyUnresolvedReferences - self._init_device(device_id, image_file=image_file, warmup_frames=attrs.warmup_frames, - color_transform=attrs.color_transform, scale_x=attrs.scale_x, scale_y=attrs.scale_y, - rotate=attrs.rotate, flip=attrs.flip) - - self.register_handler(CameraPictureTakenEvent, on_picture_taken) - self._recording_threads[device_id] = threading.Thread( - target=self._recording_thread(), kwargs={ - 'duration': None, 'video_file': None, - 'image_file': image_file, 'video_type': None, - 'device_id': device_id, 'frames_dir': None, - 'n_frames': warmup_frames, - 'sleep_between_frames': None, - 'max_stored_frames': None, - 'color_transform': color_transform, - 'scale_x': scale_x, 'scale_y': scale_y, - 'rotate': rotate, 'flip': flip - }) - - self._recording_threads[device_id].start() - self._is_recording[device_id].set() - - picture_taken.wait() - self.unregister_handler(CameraPictureTakenEvent, on_picture_taken) - return {'path': image_file} + return image_file @action - def status(self, device_id=None): - """ - Returns the status of the specified device_id or all the device in a - ``{ device_id => device_info }`` map format. Device info includes - ``video_file``, ``image_file``, ``frames_dir`` and additional video info + def take_picture(self, image_file: str, **camera) -> str: """ + Alias for :meth:`.capture_image`. - resp = Response(output={ - id: { - 'image_file': self._get_stored_frames_files(info['frames_dir'])[-2] - if 'frames_dir' in info - and len(self._get_stored_frames_files(info['frames_dir'])) > 1 - and 'image_file' not in info else info.get('image_file'), **info - } - for id, info in self._recording_info.items() - if device_id is None or id == device_id - }, disable_logging=True) - return resp + :param image_file: Path where the output image will be stored. + :param camera: Camera parameters override - see constructors parameters. + :return: The local path to the saved image. + """ + return self.capture_image(image_file, **camera) @action - def get_default_device_id(self): - return self.default_device_id + def capture_sequence(self, duration: Optional[float] = None, n_frames: Optional[int] = None, preview: bool = False, + **camera) -> str: + """ + Capture a sequence of frames from a camera and store them to a directory. - def get_stream(self): - return self._output + :param duration: Duration of the sequence in seconds (default: until :meth:`.stop_capture` is called). + :param n_frames: Number of images to be captured (default: until :meth:`.stop_capture` is called). + :param camera: Camera parameters override - see constructors parameters. ``frames_dir`` and ``fps`` in + particular can be specifically tuned for ``capture_sequence``. + :param preview: Show a preview of the camera frames. + :return: The directory where the image files have been stored. + """ + with self.open(**camera) as camera: + self.start_camera(camera, duration=duration, n_frames=n_frames, preview=preview) + self.wait_capture(camera) + return camera.info.frames_dir - def __enter__(self): - device_id = self.default_device_id - self._output = StreamingOutput(raw=self.stream_raw_frames) - self._init_device(device_id=device_id) - self.start_recording(device_id=device_id) + @action + def capture_preview(self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera) -> dict: + """ + Start a camera preview session. - def __exit__(self, exc_type, exc_val, exc_tb): - self.stop_recording(self.default_device_id) - if self._output: - self._output.close() - self._output = None + :param duration: Preview duration (default: until :meth:`.stop_capture` is called). + :param n_frames: Number of frames to display before closing (default: until :meth:`.stop_capture` is called). + :param camera: Camera object properties. + :return: The status of the device. + """ + camera = self.open_device(frames_dir=None, **camera) + self.start_camera(camera, duration=duration, n_frames=n_frames, preview=True) + return self.status(camera.info.device) + + @staticmethod + def _prepare_server_socket(camera: Camera) -> socket.socket: + server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + server_socket.bind((camera.info.bind_address or '0.0.0.0', camera.info.listen_port)) + server_socket.listen(1) + server_socket.settimeout(1) + return server_socket + + def _accept_client(self, server_socket: socket.socket) -> Optional[IO]: + try: + sock = server_socket.accept()[0] + self.logger.info('Accepted client connection from {}'.format(sock.getpeername())) + return sock.makefile('wb') + except socket.timeout: + return + + def streaming_thread(self, camera: Camera, stream_format: str, duration: Optional[float] = None): + streaming_started_time = time.time() + server_socket = self._prepare_server_socket(camera) + sock = None + self.logger.info('Starting streaming on port {}'.format(camera.info.listen_port)) + + try: + while camera.stream_event.is_set(): + if duration and time.time() - streaming_started_time >= duration: + break + + sock = self._accept_client(server_socket) + if not sock: + continue + + if camera.info.device not in self._devices: + info = camera.info.to_dict() + info['stream_format'] = stream_format + camera = self.open_device(stream=True, **info) + + camera.stream.sock = sock + self.start_camera(camera, duration=duration, frames_dir=None, image_file=None) + finally: + self._cleanup_stream(camera, server_socket, sock) + self.logger.info('Stopped camera stream') + + def _cleanup_stream(self, camera: Camera, server_socket: socket.socket, client: IO): + if client: + try: + client.close() + except Exception as e: + self.logger.warning('Error on client socket close: {}'.format(str(e))) + + try: + server_socket.close() + except Exception as e: + self.logger.warning('Error on server socket close: {}'.format(str(e))) + + if camera.stream: + try: + camera.stream.close() + except Exception as e: + self.logger.warning('Error while closing the encoding stream: {}'.format(str(e))) + + @action + def start_streaming(self, duration: Optional[float] = None, stream_format: str = 'mkv', **camera) -> dict: + """ + Expose the video stream of a camera over a TCP connection. + + :param duration: Streaming thread duration (default: until :meth:`.stop_streaming` is called). + :param stream_format: Format of the output stream - e.g. ``h264``, ``mjpeg``, ``mkv`` etc. (default: ``mkv``). + :param camera: Camera object properties - see constructor parameters. + :return: The status of the device. + """ + camera = self.open_device(stream=True, stream_format=stream_format, **camera) + return self._start_streaming(camera, duration, stream_format) + + def _start_streaming(self, camera: Camera, duration: Optional[float], stream_format: str): + assert camera.info.listen_port, 'No listen_port specified/configured' + assert not camera.stream_event.is_set() and camera.info.device not in self._streams, \ + 'A streaming session is already running for device {}'.format(camera.info.device) + + camera.stream_thread = threading.Thread(target=self.streaming_thread, kwargs=dict( + camera=camera, duration=duration, stream_format=stream_format)) + self._streams[camera.info.device] = camera + + camera.stream_event.set() + camera.stream_thread.start() + return self.status(camera.info.device) + + @action + def stop_streaming(self, device: Optional[Union[int, str]] = None): + """ + Stop a camera over TCP session. + + :param device: Name/path/ID of the device to stop (default: all the active devices). + """ + streams = self._streams.copy() + stop_devices = list(streams.values())[:] + if device: + stop_devices = [self._streams[device]] if device in self._streams else [] + + for device in stop_devices: + self._stop_streaming(device) + + def _stop_streaming(self, camera: Camera): + camera.stream_event.clear() + if camera.stream_thread.is_alive(): + camera.stream_thread.join(timeout=5.0) + + if camera.info.device in self._streams: + del self._streams[camera.info.device] + + def _status(self, device: Union[int, str]) -> dict: + camera = self._devices.get(device, self._streams.get(device)) + if not camera: + return {} + + return { + **camera.info.to_dict(), + 'active': True if camera.capture_thread and camera.capture_thread.is_alive() else False, + 'capturing': True if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set() else False, + 'streaming': camera.stream_thread and camera.stream_thread.is_alive() and camera.stream_event.is_set(), + } + + @action + def status(self, device: Optional[Union[int, str]] = None): + """ + Returns the status of the specified camera or all the active cameras if ``device`` is ``None``. + """ + + if device: + return self._status(device) + + return { + id: self._status(device) + for id, camera in self._devices.items() + } + + @staticmethod + def transform_frame(frame, color_transform): + """ + Frame color space (e.g. ``RGB24``, ``YUV`` etc.) transform logic. Does nothing unless implemented by a + derived plugin. + """ + return frame.convert(color_transform) + + def to_grayscale(self, frame): + """ + Convert a frame to grayscale. The default implementation assumes that frame is a ``PIL.Image`` object. + + :param frame: Image frame (default: a ``PIL.Image`` object). + """ + from PIL import ImageOps + return ImageOps.grayscale(frame) + + @staticmethod + def rotate_frame(frame, rotation: Optional[Union[float, int]] = None): + """ + Frame rotation logic. The default implementation assumes that frame is a ``PIL.Image`` object. + + :param frame: Image frame (default: a ``PIL.Image`` object). + :param rotation: Rotation angle in degrees. + """ + if not rotation: + return frame + + return frame.rotate(rotation, expand=True) + + @staticmethod + def flip_frame(frame, horizontal_flip: bool = False, vertical_flip: bool = False): + """ + Frame flip logic. Does nothing unless implemented by a derived plugin. + + :param frame: Image frame (default: a ``PIL.Image`` object). + :param horizontal_flip: Flip along the horizontal axis. + :param vertical_flip: Flip along the vertical axis. + """ + from PIL import Image + + if horizontal_flip: + frame = frame.transpose(Image.FLIP_TOP_BOTTOM) + if vertical_flip: + frame = frame.transpose(Image.FLIP_LEFT_RIGHT) + + return frame + + @staticmethod + def scale_frame(frame, scale_x: Optional[float] = None, scale_y: Optional[float] = None): + """ + Frame scaling logic. The default implementation assumes that frame is a ``PIL.Image`` object. + + :param frame: Image frame (default: a ``PIL.Image`` object). + :param scale_x: X-scale factor. + :param scale_y: Y-scale factor. + """ + from PIL import Image + if not (scale_x and scale_y) or (scale_x == 1 and scale_y == 1): + return frame + + size = (int(frame.size[0] * scale_x), int(frame.size[1] * scale_y)) + return frame.resize(size, Image.ANTIALIAS) + + @staticmethod + def encode_frame(frame, encoding: str = 'jpeg') -> bytes: + """ + Encode a frame to a target type. The default implementation assumes that frame is a ``PIL.Image`` object. + + :param frame: Image frame (default: a ``PIL.Image`` object). + :param encoding: Image encoding (e.g. ``jpeg``). + """ + if not encoding: + return frame + + with io.BytesIO() as buf: + frame.save(buf, format=encoding) + return buf.getvalue() # vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/cv.py b/platypush/plugins/camera/cv.py new file mode 100644 index 00000000..6ed063b6 --- /dev/null +++ b/platypush/plugins/camera/cv.py @@ -0,0 +1,79 @@ +from typing import Optional, Union + +from platypush.plugins.camera import CameraPlugin, Camera +from platypush.plugins.camera.model.writer.cv import CvFileWriter + + +class CameraCvPlugin(CameraPlugin): + """ + Plugin to control generic cameras over OpenCV. + + Requires: + + * **opencv** (``pip install opencv-python``) + * **Pillow** (``pip install Pillow``) + + """ + + def __init__(self, color_transform: Optional[str] = 'COLOR_BGR2RGB', video_type: str = 'XVID', + video_writer: str = 'ffmpeg', **kwargs): + """ + :param device: Device ID (0 for the first camera, 1 for the second etc.) or path (e.g. ``/dev/video0``). + :param video_type: Default video type to use when exporting captured frames to camera (default: 0, infers the + type from the video file extension). See + `here `_ + for a reference on the supported types (e.g. 'MJPEG', 'XVID', 'H264', 'X264', 'AVC1' etc.) + + :param color_transform: Color transformation to apply to the captured frames. See + https://docs.opencv.org/3.2.0/d7/d1b/group__imgproc__misc.html for a full list of supported color + transformations (default: "``COLOR_RGB2BGR``") + + :param video_writer: Class to be used to write frames to a video file. Supported values: + + - ``ffmpeg``: Use the FFmpeg writer (default, and usually more reliable - it requires ``ffmpeg`` + installed). + - ``cv``: Use the native OpenCV writer. + + The FFmpeg video writer requires ``scikit-video`` (``pip install scikit-video``) and ``ffmpeg``. + + :param kwargs: Extra arguments to be passed up to :class:`platypush.plugins.camera.CameraPlugin`. + """ + super().__init__(color_transform=color_transform, video_type=video_type, **kwargs) + if video_writer == 'cv': + self._video_writer_class = CvFileWriter + + def prepare_device(self, device: Camera): + import cv2 + + cam = cv2.VideoCapture(device.info.device) + if device.info.resolution and device.info.resolution[0]: + cam.set(cv2.CAP_PROP_FRAME_WIDTH, device.info.resolution[0]) + cam.set(cv2.CAP_PROP_FRAME_HEIGHT, device.info.resolution[1]) + + return cam + + def release_device(self, device: Camera): + if device.object: + device.object.release() + device.object = None + + def capture_frame(self, camera: Camera, *args, **kwargs): + import cv2 + from PIL import Image + ret, frame = camera.object.read() + assert ret, 'Cannot retrieve frame from {}'.format(camera.info.device) + + color_transform = camera.info.color_transform + if isinstance(color_transform, str): + color_transform = getattr(cv2, color_transform or self.camera_info.color_transform) + if color_transform: + frame = cv2.cvtColor(frame, color_transform) + + return Image.fromarray(frame) + + @staticmethod + def transform_frame(frame, color_transform: Union[str, int]): + return frame + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/ir/mlx90640/__init__.py b/platypush/plugins/camera/ir/mlx90640/__init__.py index ade96a3f..7a2c1ce4 100644 --- a/platypush/plugins/camera/ir/mlx90640/__init__.py +++ b/platypush/plugins/camera/ir/mlx90640/__init__.py @@ -1,12 +1,9 @@ -import base64 -import io import os import subprocess -import threading -import time +from typing import Optional, Tuple from platypush.plugins import action -from platypush.plugins.camera import CameraPlugin, StreamingOutput +from platypush.plugins.camera import CameraPlugin, Camera class CameraIrMlx90640Plugin(CameraPlugin): @@ -32,189 +29,65 @@ class CameraIrMlx90640Plugin(CameraPlugin): * **mlx90640-library** installation (see instructions above) * **PIL** image library (``pip install Pillow``) + """ - _img_size = (32, 24) - _rotate_values = {} - - def __init__(self, fps=16, skip_frames=2, scale_factor=1, rotate=0, grayscale=False, rawrgb_path=None, **kwargs): + def __init__(self, rawrgb_path: Optional[str] = None, resolution: Tuple[int, int] = (32, 24), + warmup_frames: Optional[int] = 5, **kwargs): """ - :param fps: Frames per seconds (default: 16) - :param skip_frames: Number of frames to be skipped on sensor initialization/warmup (default: 2) - :param scale_factor: The camera outputs 24x32 pixels artifacts. Use scale_factor to scale them up to a larger - image (default: 1) - :param rotate: Rotation angle in degrees (default: 0) - :param grayscale: Save the image as grayscale - black pixels will be colder, white pixels warmer - (default: False = use false colors) :param rawrgb_path: Specify it if the rawrgb executable compiled from https://github.com/pimoroni/mlx90640-library is in another folder than `/lib/examples`. + :param resolution: Device resolution (default: 32x24). + :param warmup_frames: Number of frames to be skipped on sensor initialization/warmup (default: 2). + :param kwargs: Extra parameters to be passed to :class:`platypush.plugins.camera.CameraPlugin`. """ - from PIL import Image - super().__init__(**kwargs) - - self._rotate_values = { - 90: Image.ROTATE_90, - 180: Image.ROTATE_180, - 270: Image.ROTATE_270, - } + super().__init__(device='mlx90640', resolution=resolution, warmup_frames=warmup_frames, **kwargs) if not rawrgb_path: rawrgb_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib', 'examples', 'rawrgb') rawrgb_path = os.path.abspath(os.path.expanduser(rawrgb_path)) - assert fps > 0 - assert skip_frames >= 0 - assert os.path.isfile(rawrgb_path) + assert os.path.isfile(rawrgb_path),\ + 'rawrgb executable not found. Please follow the documentation of this plugin to build it' - self.fps = fps - self.rotate = rotate - self.skip_frames = skip_frames - self.scale_factor = scale_factor self.rawrgb_path = rawrgb_path - self.grayscale = grayscale self._capture_proc = None - def _is_capture_proc_running(self): + def _is_capture_running(self): return self._capture_proc is not None and self._capture_proc.poll() is None - def _get_capture_proc(self, fps): - if not self._is_capture_proc_running(): - fps = fps or self.fps - self._capture_proc = subprocess.Popen([self.rawrgb_path, '{}'.format(fps)], stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + def prepare_device(self, device: Camera): + if not self._is_capture_running(): + self._capture_proc = subprocess.Popen([self.rawrgb_path, '{}'.format(device.info.fps)], + stdin=subprocess.PIPE, stdout=subprocess.PIPE) return self._capture_proc - def _raw_capture(self): + def release_device(self, device: Camera): + if not self._is_capture_running(): + return + + self._capture_proc.terminate() + self._capture_proc.kill() + self._capture_proc.wait() + self._capture_proc = None + + def capture_frame(self, device: Camera, *args, **kwargs): from PIL import Image - camera = self._get_capture_proc(fps=self.fps) - size = self._img_size + camera = self.prepare_device(device) + frame = camera.stdout.read(device.info.resolution[0] * device.info.resolution[1] * 3) + return Image.frombytes('RGB', device.info.resolution, frame) - while self._is_capture_proc_running(): - frame = camera.stdout.read(size[0] * size[1] * 3) - image = Image.frombytes('RGB', size, frame) - self._output.write(frame) - - if self.grayscale: - image = self._convert_to_grayscale(image) - if self.scale_factor != 1: - size = tuple(i * self.scale_factor for i in size) - image = image.resize(size, Image.ANTIALIAS) - if self.rotate: - rotate = self._rotate_values.get(int(self.rotate), 0) - image = image.transpose(rotate) - - temp = io.BytesIO() - image.save(temp, format='jpeg') - self._output.write(temp.getvalue()) - - def __enter__(self): - self._output = StreamingOutput(raw=False) - self._capturing_thread = threading.Thread(target=self._raw_capture) - self._capturing_thread.start() - - def __exit__(self, exc_type, exc_val, exc_tb): - self.stop() - - # noinspection PyShadowingBuiltins - @action - def capture(self, output_file=None, frames=1, grayscale=None, fps=None, skip_frames=None, scale_factor=None, - rotate=None, format='jpeg'): - """ - Capture one or multiple frames and return them as raw RGB - - :param output_file: Can be either the path to a single image file or a format string - (e.g. 'snapshots/image-{:04d}') in case of multiple frames. If not set the function will return a list of - base64 encoded representations of the raw RGB frames, otherwise the list of captured files. - :type output_file: str - - :param frames: Number of frames to be captured (default: 1). If None the capture process will proceed until - `stop` is called. - :type frames: int - - :param grayscale: Override the default ``grayscale`` parameter. - :type grayscale: bool - - :param fps: If set it overrides the fps parameter specified on the object (default: None) - :type fps: int - - :param skip_frames: If set it overrides the skip_frames parameter specified on the object (default: None) - :type skip_frames: int - - :param scale_factor: If set it overrides the scale_factor parameter specified on the object (default: None) - :type scale_factor: float - - :param rotate: If set it overrides the rotate parameter specified on the object (default: None) - :type rotate: int - - :param format: Output image format if output_file is not specified (default: jpeg). - It can be jpg, png, gif or any format supported by PIL - :type format: str - - :returns: list[str]. Each item is a base64 encoded representation of a frame in the specified format if - output_file is not set, otherwise a list with the captured image files will be returned. - """ - - from PIL import Image - fps = self.fps if fps is None else fps - skip_frames = self.skip_frames if skip_frames is None else skip_frames - scale_factor = self.scale_factor if scale_factor is None else scale_factor - rotate = self._rotate_values.get(self.rotate if rotate is None else rotate, 0) - grayscale = self.grayscale if grayscale is None else grayscale - - size = self._img_size - sleep_time = 1.0 / fps - captured_frames = [] - n_captured_frames = 0 - files = set() - camera = self._get_capture_proc(fps) - - while (frames is not None and n_captured_frames < frames) or ( - frames is None and self._is_capture_proc_running()): - frame = camera.stdout.read(size[0] * size[1] * 3) - - if skip_frames > 0: - time.sleep(sleep_time) - skip_frames -= 1 - continue - - image = Image.frombytes('RGB', size, frame) - - if grayscale: - image = self._convert_to_grayscale(image) - if scale_factor != 1: - size = tuple(i * scale_factor for i in size) - image = image.resize(size, Image.ANTIALIAS) - if rotate: - image = image.transpose(rotate) - - if not output_file: - temp = io.BytesIO() - image.save(temp, format=format) - frame = base64.encodebytes(temp.getvalue()).decode() - captured_frames.append(frame) - else: - image_file = os.path.abspath(os.path.expanduser(output_file.format(n_captured_frames))) - image.save(image_file) - files.add(image_file) - - n_captured_frames += 1 - time.sleep(sleep_time) - - self.stop() - return sorted([f for f in files]) if output_file else captured_frames - - @staticmethod - def _convert_to_grayscale(image): + def to_grayscale(self, image): from PIL import Image new_image = Image.new('L', image.size) for i in range(0, image.size[0]): for j in range(0, image.size[1]): r, g, b = image.getpixel((i, j)) - value = int(2.0 * r - 0.5 * g - 1.5 * b) + value = int(2.0 * r - 1.125 * g - 1.75 * b) if value > 255: value = 255 @@ -226,16 +99,11 @@ class CameraIrMlx90640Plugin(CameraPlugin): return new_image @action - def stop(self): + def capture(self, output_file=None, *args, **kwargs): """ - Stop an ongoing capture session + Back-compatibility alias for :meth:`.capture_image`. """ - if not self._is_capture_proc_running(): - return + return self.capture_image(image_file=output_file, *args, **kwargs) - self._capture_proc.terminate() - self._capture_proc.kill() - self._capture_proc.wait() - self._capture_proc = None # vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/__init__.py b/platypush/plugins/camera/model/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/platypush/plugins/camera/model/camera.py b/platypush/plugins/camera/model/camera.py new file mode 100644 index 00000000..40038bab --- /dev/null +++ b/platypush/plugins/camera/model/camera.py @@ -0,0 +1,101 @@ +import math +import threading +from dataclasses import dataclass +from typing import Optional, Union, Tuple, Set + +import numpy as np + +from platypush.plugins.camera.model.writer import StreamWriter, VideoWriter, FileVideoWriter +from platypush.plugins.camera.model.writer.preview import PreviewWriter + + +@dataclass +class CameraInfo: + device: Optional[Union[int, str]] + resolution: Optional[Tuple[int, int]] = None + color_transform: Optional[str] = None + frames_dir: Optional[str] = None + rotate: Optional[float] = None + horizontal_flip: bool = False + vertical_flip: bool = False + scale_x: Optional[float] = None + scale_y: Optional[float] = None + warmup_frames: int = 0 + warmup_seconds: float = 0. + capture_timeout: float = 20.0 + fps: Optional[float] = None + grayscale: Optional[bool] = None + video_type: Optional[str] = None + stream_format: str = 'mjpeg' + listen_port: Optional[int] = None + bind_address: Optional[str] = None + + def set(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def to_dict(self) -> dict: + return { + 'device': self.device, + 'color_transform': self.color_transform, + 'frames_dir': self.frames_dir, + 'rotate': self.rotate, + 'horizontal_flip': self.horizontal_flip, + 'vertical_flip': self.vertical_flip, + 'scale_x': self.scale_x, + 'scale_y': self.scale_y, + 'warmup_frames': self.warmup_frames, + 'warmup_seconds': self.warmup_seconds, + 'capture_timeout': self.capture_timeout, + 'fps': self.fps, + 'grayscale': self.grayscale, + 'resolution': list(self.resolution or ()), + 'video_type': self.video_type, + 'stream_format': self.stream_format, + 'listen_port': self.listen_port, + 'bind_address': self.bind_address, + } + + def clone(self): + # noinspection PyArgumentList + return self.__class__(**self.to_dict()) + + +@dataclass +class Camera: + info: CameraInfo + start_event: threading.Event = threading.Event() + stream_event: threading.Event = threading.Event() + capture_thread: Optional[threading.Thread] = None + stream_thread: Optional[threading.Thread] = None + object = None + stream: Optional[StreamWriter] = None + preview: Optional[PreviewWriter] = None + file_writer: Optional[FileVideoWriter] = None + + def get_outputs(self) -> Set[VideoWriter]: + writers = set() + # if self.preview and self.preview.is_alive(): + if self.preview and not self.preview.closed: + writers.add(self.preview) + + if self.stream and not self.stream.closed: + writers.add(self.stream) + + if self.file_writer and not self.file_writer.closed: + writers.add(self.file_writer) + + return writers + + def effective_resolution(self) -> Tuple[int, int]: + rot = (self.info.rotate or 0) * math.pi / 180 + sin = math.sin(rot) + cos = math.cos(rot) + scale = np.array([[self.info.scale_x or 1., self.info.scale_y or 1.]]) + resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]]) + rot_matrix = np.array([[sin, cos], [cos, sin]]) + resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0] + return int(round(resolution[0])), int(round(resolution[1])) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/exceptions.py b/platypush/plugins/camera/model/exceptions.py new file mode 100644 index 00000000..2045d3be --- /dev/null +++ b/platypush/plugins/camera/model/exceptions.py @@ -0,0 +1,10 @@ +class CameraException(RuntimeError): + pass + + +class CaptureSessionAlreadyRunningException(CameraException): + def __init__(self, device): + super().__init__('A capturing session on the device {} is already running'.format(device)) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/__init__.py b/platypush/plugins/camera/model/writer/__init__.py new file mode 100644 index 00000000..5f8713ab --- /dev/null +++ b/platypush/plugins/camera/model/writer/__init__.py @@ -0,0 +1,135 @@ +import io +import logging +import os +import threading +import time + +from abc import ABC, abstractmethod +from typing import Optional, IO + +from PIL.Image import Image + +logger = logging.getLogger('video-writer') + + +class VideoWriter(ABC): + """ + Generic class interface for handling frames-to-video operations. + """ + + mimetype: Optional[str] = None + + def __init__(self, camera, plugin, *_, **__): + from platypush.plugins.camera import Camera, CameraPlugin + self.camera: Camera = camera + self.plugin: CameraPlugin = plugin + self.closed = False + + @abstractmethod + def write(self, img: Image): + """ + Write an image to the channel. + + :param img: PIL Image instance. + """ + raise NotImplementedError() + + @abstractmethod + def close(self): + """ + Close the channel. + """ + if self.camera: + self.plugin.close_device(self.camera) + self.closed = True + + def __enter__(self): + """ + Context manager-based interface. + """ + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Context manager-based interface. + """ + self.close() + + +class FileVideoWriter(VideoWriter, ABC): + """ + Abstract class to handle frames-to-video file operations. + """ + def __init__(self, *args, video_file: str, **kwargs): + VideoWriter.__init__(self, *args, **kwargs) + self.video_file = os.path.abspath(os.path.expanduser(video_file)) + + +class StreamWriter(VideoWriter, ABC): + """ + Abstract class for camera streaming operations. + """ + def __init__(self, *args, sock: Optional[IO] = None, **kwargs): + VideoWriter.__init__(self, *args, **kwargs) + self.frame: Optional[bytes] = None + self.frame_time: Optional[float] = None + self.buffer = io.BytesIO() + self.ready = threading.Condition() + self.sock = sock + + def write(self, image: Image): + data = self.encode(image) + with self.ready: + if self.buffer.closed: + return + + self.buffer.truncate() + self.frame = self.buffer.getvalue() + self.frame_time = time.time() + self.ready.notify_all() + + self._sock_send(self.frame) + if not self.buffer.closed: + self.buffer.seek(0) + return self.buffer.write(data) + + def _sock_send(self, data): + if self.sock and data: + try: + self.sock.write(data) + except ConnectionError: + logger.warning('Client connection closed') + self.close() + + @abstractmethod + def encode(self, image: Image) -> bytes: + """ + Encode an image before sending it to the channel. + + :param image: PIL Image object. + :return: The bytes-encoded representation of the frame. + """ + raise NotImplementedError() + + def close(self): + self.buffer.close() + if self.sock: + # noinspection PyBroadException + try: + self.sock.close() + except: + pass + + super().close() + + @staticmethod + def get_class_by_name(name: str): + from platypush.plugins.camera.model.writer.index import StreamHandlers + name = name.upper() + assert hasattr(StreamHandlers, name), 'No such stream handler: {}. Supported types: {}'.format( + name, [hndl.name for hndl in list(StreamHandlers)]) + + return getattr(StreamHandlers, name).value + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/cv.py b/platypush/plugins/camera/model/writer/cv.py new file mode 100644 index 00000000..fa886c64 --- /dev/null +++ b/platypush/plugins/camera/model/writer/cv.py @@ -0,0 +1,46 @@ +import numpy as np +from PIL.Image import Image as ImageType + +from platypush.plugins.camera.model.writer import FileVideoWriter + + +class CvFileWriter(FileVideoWriter): + """ + Write camera frames to a file using OpenCV. + """ + def __init__(self, *args, **kwargs): + import cv2 + super(CvFileWriter, self).__init__(*args, **kwargs) + + video_type = cv2.VideoWriter_fourcc(*(self.camera.info.video_type or 'xvid').upper()) + resolution = ( + int(self.camera.info.resolution[0] * (self.camera.info.scale_x or 1.)), + int(self.camera.info.resolution[1] * (self.camera.info.scale_y or 1.)), + ) + + self.writer = cv2.VideoWriter(self.video_file, video_type, self.camera.info.fps, resolution, False) + + def write(self, img): + if not self.writer: + return + + # noinspection PyBroadException + try: + if isinstance(img, ImageType): + # noinspection PyTypeChecker + img = np.array(img) + except: + pass + + self.writer.write(img) + + def close(self): + if not self.writer: + return + + self.writer.release() + self.writer = None + super().close() + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/ffmpeg.py b/platypush/plugins/camera/model/writer/ffmpeg.py new file mode 100644 index 00000000..5aee1ae9 --- /dev/null +++ b/platypush/plugins/camera/model/writer/ffmpeg.py @@ -0,0 +1,183 @@ +import logging +import subprocess +import threading +import time + +from abc import ABC +from typing import Optional, List + +from PIL.Image import Image + +from platypush.plugins.camera.model.writer import VideoWriter, FileVideoWriter, StreamWriter + + +logger = logging.getLogger('ffmpeg-writer') + + +class FFmpegWriter(VideoWriter, ABC): + """ + Generic FFmpeg encoder for camera frames. + """ + + def __init__(self, *args, input_file: str = '-', input_format: str = 'rawvideo', input_codec: Optional[str] = None, + output_file: str = '-', output_format: Optional[str] = None, output_codec: Optional[str] = None, + pix_fmt: Optional[str] = None, output_opts: Optional[List[str]] = None, **kwargs): + super().__init__(*args, **kwargs) + + self.input_file = input_file + self.input_format = input_format + self.input_codec = input_codec + self.output_file = output_file + self.output_format = output_format + self.output_codec = output_codec + self.width, self.height = self.camera.effective_resolution() + self.pix_fmt = pix_fmt + self.output_opts = output_opts or [] + + logger.info('Starting FFmpeg. Command: {}'.format(' '.join(self.ffmpeg_args))) + self.ffmpeg = subprocess.Popen(self.ffmpeg_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + + @property + def ffmpeg_args(self): + return ['ffmpeg', '-y', + '-f', self.input_format, + *(('-vcodec', self.input_codec) if self.input_codec else ()), + *(('-pix_fmt', self.pix_fmt) if self.pix_fmt else ()), + '-s', '{}x{}'.format(self.width, self.height), + '-r', str(self.camera.info.fps), + '-i', self.input_file, + *(('-f', self.output_format) if self.output_format else ()), + *self.output_opts, + *(('-vcodec', self.output_codec) if self.output_codec else ()), + self.output_file] + + def is_closed(self): + return self.closed or not self.ffmpeg or self.ffmpeg.poll() is not None + + def write(self, image: Image): + if self.is_closed(): + return + + try: + self.ffmpeg.stdin.write(image.convert('RGB').tobytes()) + except Exception as e: + logger.warning('FFmpeg send error: {}'.format(str(e))) + self.close() + + def close(self): + if not self.is_closed(): + if self.ffmpeg and self.ffmpeg.stdin: + try: + self.ffmpeg.stdin.close() + except (IOError, OSError): + pass + + if self.ffmpeg: + self.ffmpeg.terminate() + try: + self.ffmpeg.wait(timeout=5.0) + except subprocess.TimeoutExpired: + logger.warning('FFmpeg has not returned - killing it') + self.ffmpeg.kill() + + if self.ffmpeg and self.ffmpeg.stdout: + try: + self.ffmpeg.stdout.close() + except (IOError, OSError): + pass + + self.ffmpeg = None + super().close() + + +class FFmpegFileWriter(FileVideoWriter, FFmpegWriter): + """ + Write camera frames to a file using FFmpeg. + """ + + def __init__(self, *args, video_file: str, **kwargs): + FileVideoWriter.__init__(self, *args, video_file=video_file, **kwargs) + FFmpegWriter.__init__(self, *args, pix_fmt='rgb24', output_file=self.video_file, **kwargs) + + +class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC): + """ + Stream camera frames using FFmpeg. + """ + + def __init__(self, *args, output_format: str, **kwargs): + StreamWriter.__init__(self, *args, **kwargs) + FFmpegWriter.__init__(self, *args, pix_fmt='rgb24', output_format=output_format, + output_opts=[ + '-tune', '-zerolatency', '-preset', 'superfast', '-trellis', '0', + '-fflags', 'nobuffer'], **kwargs) + self._reader = threading.Thread(target=self._reader_thread) + self._reader.start() + + def encode(self, image: Image) -> bytes: + return image.convert('RGB').tobytes() + + def _reader_thread(self): + start_time = time.time() + + while not self.is_closed(): + try: + data = self.ffmpeg.stdout.read(1 << 15) + except Exception as e: + logger.warning('FFmpeg reader error: {}'.format(str(e))) + break + + if not data: + continue + + if self.frame is None: + latency = time.time() - start_time + logger.info('FFmpeg stream latency: {} secs'.format(latency)) + + with self.ready: + self.frame = data + self.frame_time = time.time() + self.ready.notify_all() + + self._sock_send(self.frame) + + def write(self, image: Image): + if self.is_closed(): + return + + data = self.encode(image) + try: + self.ffmpeg.stdin.write(data) + except Exception as e: + logger.warning('FFmpeg send error: {}'.format(str(e))) + self.close() + + def close(self): + super().close() + if self._reader and self._reader.is_alive() and threading.get_ident() != self._reader.ident: + self._reader.join(timeout=5.0) + self._reader = None + + +class MKVStreamWriter(FFmpegStreamWriter): + mimetype = 'video/webm' + + def __init__(self, *args, **kwargs): + super().__init__(*args, output_format='matroska', **kwargs) + + +class H264StreamWriter(FFmpegStreamWriter): + mimetype = 'video/h264' + + def __init__(self, *args, **kwargs): + super().__init__(*args, output_format='h264', **kwargs) + + +class H265StreamWriter(FFmpegStreamWriter): + mimetype = 'video/h265' + + def __init__(self, *args, **kwargs): + super().__init__(*args, output_format='h265', **kwargs) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/image.py b/platypush/plugins/camera/model/writer/image.py new file mode 100644 index 00000000..3b100c4f --- /dev/null +++ b/platypush/plugins/camera/model/writer/image.py @@ -0,0 +1,67 @@ +import io +from abc import ABC + +from PIL.Image import Image + +from platypush.plugins.camera.model.writer import StreamWriter + + +class ImageStreamWriter(StreamWriter, ABC): + """ + Write camera frames to a stream as single JPEG items. + """ + + @staticmethod + def _encode(image: Image, encoding: str, **kwargs) -> bytes: + with io.BytesIO() as buf: + image.save(buf, format=encoding, **kwargs) + return buf.getvalue() + + +class JPEGStreamWriter(ImageStreamWriter): + """ + Write camera frames to a stream as single JPEG items. + """ + mimetype = 'image/jpeg' + + def __init__(self, *args, quality: int = 90, **kwargs): + super().__init__(*args, **kwargs) + assert 0 < quality <= 100, 'JPEG quality should be between 0 and 100' + self.quality = quality + + def encode(self, image: Image) -> bytes: + return self._encode(image, 'jpeg', quality=self.quality) + + +class PNGStreamWriter(ImageStreamWriter): + """ + Write camera frames to a stream as single PNG items. + """ + mimetype = 'image/png' + + def encode(self, image: Image) -> bytes: + return self._encode(image, 'png') + + +class BMPStreamWriter(ImageStreamWriter): + """ + Write camera frames to a stream as single BMP items. + """ + mimetype = 'image/bmp' + + def encode(self, image: Image) -> bytes: + return self._encode(image, 'bmp') + + +class MJPEGStreamWriter(JPEGStreamWriter): + """ + Write camera frames to a stream as an MJPEG feed. + """ + mimetype = 'multipart/x-mixed-replace; boundary=frame' + + def encode(self, image: Image) -> bytes: + return (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + super().encode(image) + b'\r\n') + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/index.py b/platypush/plugins/camera/model/writer/index.py new file mode 100644 index 00000000..dc6691b1 --- /dev/null +++ b/platypush/plugins/camera/model/writer/index.py @@ -0,0 +1,22 @@ +from enum import Enum + +from platypush.plugins.camera.model.writer.ffmpeg import MKVStreamWriter, H264StreamWriter, H265StreamWriter +from platypush.plugins.camera.model.writer.image import JPEGStreamWriter, PNGStreamWriter, BMPStreamWriter, \ + MJPEGStreamWriter + + +class StreamHandlers(Enum): + JPG = JPEGStreamWriter + JPEG = JPEGStreamWriter + PNG = PNGStreamWriter + BMP = BMPStreamWriter + MJPEG = MJPEGStreamWriter + MJPG = MJPEGStreamWriter + MKV = MKVStreamWriter + WEBM = MKVStreamWriter + H264 = H264StreamWriter + H265 = H265StreamWriter + MP4 = H264StreamWriter + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/preview/__init__.py b/platypush/plugins/camera/model/writer/preview/__init__.py new file mode 100644 index 00000000..6039b45c --- /dev/null +++ b/platypush/plugins/camera/model/writer/preview/__init__.py @@ -0,0 +1,31 @@ +import logging + +from abc import ABC + +from platypush.plugins.camera.model.writer import VideoWriter + +logger = logging.getLogger('cam-preview') + + +class PreviewWriter(VideoWriter, ABC): + """ + Abstract class for camera previews. + """ + + +class PreviewWriterFactory: + @staticmethod + def get(*args, **kwargs) -> PreviewWriter: + try: + import wx + # noinspection PyUnresolvedReferences + from platypush.plugins.camera.model.writer.preview.wx import WxPreviewWriter + return WxPreviewWriter(*args, **kwargs) + except ImportError: + logger.warning('wxPython not available, using ffplay as a fallback for camera previews') + + from platypush.plugins.camera.model.writer.preview.ffplay import FFplayPreviewWriter + return FFplayPreviewWriter(*args, **kwargs) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/preview/ffplay.py b/platypush/plugins/camera/model/writer/preview/ffplay.py new file mode 100644 index 00000000..c1a3ce61 --- /dev/null +++ b/platypush/plugins/camera/model/writer/preview/ffplay.py @@ -0,0 +1,47 @@ +import logging +import subprocess +import threading + +from platypush.plugins.camera.model.writer.image import MJPEGStreamWriter +from platypush.plugins.camera.model.writer.preview import PreviewWriter + +logger = logging.getLogger('cam-preview') + + +class FFplayPreviewWriter(PreviewWriter, MJPEGStreamWriter): + """ + General class for managing previews from camera devices or generic sources of images over ffplay. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.ffplay = subprocess.Popen(['ffplay', '-'], stdin=subprocess.PIPE) + self._preview_thread = threading.Thread(target=self._ffplay_thread) + self._preview_thread.start() + + def _ffplay_thread(self): + while not self.closed and self.ffplay.poll() is None: + with self.ready: + self.ready.wait(1.) + if not self.frame: + continue + + try: + self.ffplay.stdin.write(self.frame) + except Exception as e: + logger.warning('ffplay write error: {}'.format(str(e))) + self.close() + break + + def close(self): + if self.ffplay and self.ffplay.poll() is None: + self.ffplay.terminate() + + self.camera = None + super().close() + if self._preview_thread and self._preview_thread.is_alive() and \ + threading.get_ident() != self._preview_thread.ident: + self._preview_thread.join(timeout=5.0) + self._preview_thread = None + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/preview/ui.py b/platypush/plugins/camera/model/writer/preview/ui.py new file mode 100644 index 00000000..bce6553e --- /dev/null +++ b/platypush/plugins/camera/model/writer/preview/ui.py @@ -0,0 +1,68 @@ +from queue import Empty +import wx + +from platypush.plugins.camera.model.writer.preview import PreviewWriter + + +class Panel(wx.Panel): + def __init__(self, parent, process, width: int, height: int): + import wx + super().__init__(parent, -1) + + self.process: PreviewWriter = process + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + self.SetSize(width, height) + self.Bind(wx.EVT_PAINT, self.on_paint) + self.update() + + @staticmethod + def img_to_bitmap(image) -> wx.Bitmap: + import wx + return wx.Bitmap.FromBuffer(image.width, image.height, image.tobytes()) + + def get_bitmap(self): + try: + return self.process.bitmap_queue.get(block=True, timeout=1.0) + except Empty: + return None + + def update(self): + import wx + self.Refresh() + self.Update() + wx.CallLater(15, self.update) + + def create_bitmap(self): + image = self.get_bitmap() + if image is None: + return + + return self.img_to_bitmap(image) + + def on_paint(self, *_, **__): + import wx + bitmap = self.create_bitmap() + if not bitmap: + return + + dc = wx.AutoBufferedPaintDC(self) + dc.DrawBitmap(bitmap, 0, 0) + + +class Frame(wx.Frame): + def __init__(self, process): + import wx + style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX + self.process = process + image = self.process.bitmap_queue.get() + + super().__init__(None, -1, process.camera.info.device or 'Camera Preview', style=style) + self.Bind(wx.EVT_WINDOW_DESTROY, self.on_close) + self.panel = Panel(self, process, width=image.width, height=image.height) + self.Fit() + + def on_close(self, *_, **__): + self.process.close() + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/preview/wx/__init__.py b/platypush/plugins/camera/model/writer/preview/wx/__init__.py new file mode 100644 index 00000000..c0b60115 --- /dev/null +++ b/platypush/plugins/camera/model/writer/preview/wx/__init__.py @@ -0,0 +1,53 @@ +import logging +from multiprocessing import Process, Queue, Event + +from platypush.plugins.camera.model.writer import VideoWriter +from platypush.plugins.camera.model.writer.preview import PreviewWriter + +logger = logging.getLogger('cam-preview') + + +class WxPreviewWriter(PreviewWriter, Process): + """ + General class for managing previews from camera devices or sources of images. + """ + + def __init__(self, camera, plugin, *args, **kwargs): + Process.__init__(self, *args, **kwargs) + VideoWriter.__init__(self, camera=camera, plugin=plugin) + self.app = None + self.bitmap_queue = Queue() + self.stopped_event = Event() + + def run(self) -> None: + import wx + from platypush.plugins.camera.model.writer.preview.wx.ui import Frame + + self.app = wx.App() + frame = Frame(self) + frame.Center() + frame.Show() + self.app.MainLoop() + + def close(self): + if not self.app: + return + + self.app.ExitMainLoop() + self.app = None + self.camera.preview = None + self.bitmap_queue.close() + self.bitmap_queue = None + self.stopped_event.set() + + def write(self, image): + if self.stopped_event.is_set(): + return + + try: + self.bitmap_queue.put(image) + except Exception as e: + logger.warning('Could not add an image to the preview queue: {}'.format(str(e))) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/model/writer/preview/wx/ui.py b/platypush/plugins/camera/model/writer/preview/wx/ui.py new file mode 100644 index 00000000..dd2a901d --- /dev/null +++ b/platypush/plugins/camera/model/writer/preview/wx/ui.py @@ -0,0 +1,68 @@ +from queue import Empty +import wx + +from platypush.plugins.camera.model.writer.preview.wx import WxPreviewWriter + + +class Panel(wx.Panel): + def __init__(self, parent, process, width: int, height: int): + import wx + super().__init__(parent, -1) + + self.process: WxPreviewWriter = process + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + self.SetSize(width, height) + self.Bind(wx.EVT_PAINT, self.on_paint) + self.update() + + @staticmethod + def img_to_bitmap(image) -> wx.Bitmap: + import wx + return wx.Bitmap.FromBuffer(image.width, image.height, image.tobytes()) + + def get_bitmap(self): + try: + return self.process.bitmap_queue.get(block=True, timeout=1.0) + except Empty: + return None + + def update(self): + import wx + self.Refresh() + self.Update() + wx.CallLater(15, self.update) + + def create_bitmap(self): + image = self.get_bitmap() + if image is None: + return + + return self.img_to_bitmap(image) + + def on_paint(self, *_, **__): + import wx + bitmap = self.create_bitmap() + if not bitmap: + return + + dc = wx.AutoBufferedPaintDC(self) + dc.DrawBitmap(bitmap, 0, 0) + + +class Frame(wx.Frame): + def __init__(self, process): + import wx + style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX + self.process = process + image = self.process.bitmap_queue.get() + + super().__init__(None, -1, process.camera.info.device or 'Camera Preview', style=style) + self.Bind(wx.EVT_WINDOW_DESTROY, self.on_close) + self.panel = Panel(self, process, width=image.width, height=image.height) + self.Fit() + + def on_close(self, *_, **__): + self.process.close() + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/pi.py b/platypush/plugins/camera/pi.py deleted file mode 100644 index de96bdb9..00000000 --- a/platypush/plugins/camera/pi.py +++ /dev/null @@ -1,579 +0,0 @@ -""" -.. moduleauthor:: Fabio Manganiello -""" - -import os -import socket -import threading -import time - -from typing import Optional - -from platypush.plugins import action -from platypush.plugins.camera import CameraPlugin, StreamingOutput - - -class CameraPiPlugin(CameraPlugin): - """ - Plugin to control a Pi camera. - - Requires: - - * **picamera** (``pip install picamera``) - * **numpy** (``pip install numpy``) - - """ - - _default_resolution = (800, 600) - _default_listen_port = 5000 - - def __init__(self, resolution=(_default_resolution[0], _default_resolution[1]), framerate=24, - hflip=False, vflip=False, sharpness=0, contrast=0, brightness=50, video_stabilization=False, iso=0, - exposure_compensation=0, exposure_mode='auto', meter_mode='average', awb_mode='auto', - image_effect='none', color_effects=None, rotation=0, zoom=(0.0, 0.0, 1.0, 1.0), - listen_port: int = _default_listen_port, **kwargs): - """ - See https://www.raspberrypi.org/documentation/usage/camera/python/README.md - for a detailed reference about the Pi camera options. - - :param listen_port: Default port that will be used for streaming the feed (default: 5000) - """ - super().__init__(**kwargs) - - self.camera_args = { - 'resolution': tuple(resolution), - 'framerate': framerate, - 'hflip': hflip, - 'vflip': vflip, - 'sharpness': sharpness, - 'contrast': contrast, - 'brightness': brightness, - 'video_stabilization': video_stabilization, - 'iso': iso, - 'exposure_compensation': exposure_compensation, - 'exposure_mode': exposure_mode, - 'meter_mode': meter_mode, - 'awb_mode': awb_mode, - 'image_effect': image_effect, - 'color_effects': color_effects, - 'rotation': rotation, - 'zoom': tuple(zoom), - } - - self._camera = None - self.listen_port = listen_port - - self._time_lapse_thread = None - self._recording_thread = None - self._streaming_thread = None - self._capturing_thread = None - self._time_lapse_stop_condition = threading.Condition() - self._recording_stop_condition = threading.Condition() - self._can_stream = False - self._can_capture = False - - # noinspection PyUnresolvedReferences,PyPackageRequirements - def _get_camera(self, **opts): - if self._camera and not self._camera.closed: - return self._camera - - import picamera - self._camera = picamera.PiCamera() - - for (attr, value) in self.camera_args.items(): - setattr(self._camera, attr, value) - for (attr, value) in opts.items(): - setattr(self._camera, attr, value) - - return self._camera - - @action - def close(self): - """ - Close an active connection to the camera. - """ - import picamera - - if self._output and self._camera: - try: - self._camera.stop_recording() - except picamera.PiCameraNotRecording: - pass - - if self._camera and not self._camera.closed: - try: - self._camera.close() - except picamera.PiCameraClosed: - pass - - self._camera = None - - @action - def start_preview(self, **opts): - """ - Start camera preview. - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - """ - camera = self._get_camera(**opts) - camera.start_preview() - - @action - def stop_preview(self): - """ - Stop camera preview. - """ - camera = self._get_camera() - try: - camera.stop_preview() - except Exception as e: - self.logger.warning(str(e)) - - @action - def take_picture(self, image_file, preview=False, warmup_time=2, resize=None, close=True, **opts): - """ - Take a picture. - - :param image_file: Path where the output image will be stored. - :type image_file: str - - :param preview: Show a preview before taking the picture (default: False) - :type preview: bool - - :param warmup_time: Time before taking the picture (default: 2 seconds) - :type warmup_time: float - - :param resize: Set if you want to resize the picture to a new format - :type resize: list or tuple (with two elements) - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - - :param close: If True (default) close the connection to the camera after capturing, - otherwise keep the connection open (e.g. if you want to take a sequence of pictures). - If you set close=False you should remember to call ``close`` when you don't need - the connection anymore. - - :return: dict:: - - {"image_file": path_to_the_image} - - """ - - camera = None - - try: - camera = self._get_camera(**opts) - image_file = os.path.abspath(os.path.expanduser(image_file)) - - if preview: - camera.start_preview() - - if warmup_time: - time.sleep(warmup_time) - - capture_opts = {} - if resize: - capture_opts['resize'] = tuple(resize) - - camera.capture(image_file, **capture_opts) - - if preview: - camera.stop_preview() - - return {'image_file': image_file} - finally: - if camera and close: - self.close() - - def _raw_capture(self): - import numpy as np - resolution = self.camera_args['resolution'] - camera = self._get_camera() - - while self._can_capture: - shape = (resolution[1] + (resolution[1]%16), - resolution[0] + (resolution[0]%32), - 3) - - frame = np.empty(shape, dtype=np.uint8) - camera.capture(frame, 'bgr') - frame.reshape((shape[0], shape[1], 3)) - self._output.write(frame) - - def __enter__(self): - camera = self._get_camera() - self._output = StreamingOutput(raw=self.stream_raw_frames) - self._can_capture = True - - if self.stream_raw_frames: - self._capturing_thread = threading.Thread(target=self._raw_capture) - self._capturing_thread.start() - else: - camera.start_recording(self._output, format='mjpeg') - - def __exit__(self, exc_type, exc_val, exc_tb): - self._can_capture = False - if self._capturing_thread: - self._capturing_thread.join() - self._capturing_thread = None - - self.close() - - @action - def capture_sequence(self, n_images, directory, name_format='image_%04d.jpg', preview=False, warmup_time=2, - resize=None, **opts): - """ - Capture a sequence of images - - :param n_images: Number of images to capture - :type n_images: int - - :param directory: Path where the images will be stored - :type directory: str - - :param name_format: Format for the name of the stored images. Use %d or any other format string for representing - the image index (default: image_%04d.jpg) - :type name_format: str - - :param preview: Show a preview before taking the picture (default: False) - :type preview: bool - - :param warmup_time: Time before taking the picture (default: 2 seconds) - :type warmup_time: float - - :param resize: Set if you want to resize the picture to a new format - :type resize: list or tuple (with two elements) - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - - :return: dict:: - - {"image_files": [list of captured images]} - - """ - - try: - camera = self._get_camera(**opts) - directory = os.path.abspath(os.path.expanduser(directory)) - - if preview: - camera.start_preview() - - if warmup_time: - time.sleep(warmup_time) - camera.exposure_mode = 'off' - - camera.shutter_speed = camera.exposure_speed - g = camera.awb_gains - camera.awb_mode = 'off' - camera.awb_gains = g - capture_opts = {} - - if resize: - capture_opts['resize'] = tuple(resize) - - images = [os.path.join(directory, name_format % (i+1)) for i in range(0, n_images)] - camera.capture_sequence(images, **capture_opts) - - if preview: - camera.stop_preview() - - return {'image_files': images} - finally: - self.close() - - @action - def start_time_lapse(self, directory, n_images=None, interval=0, warmup_time=2, - resize=None, **opts): - """ - Start a time lapse capture - - :param directory: Path where the images will be stored - :type directory: str - - :param n_images: Number of images to capture (default: None, capture until stop_time_lapse) - :type n_images: int - - :param interval: Interval in seconds between two pictures (default: 0) - :type interval: float - - :param warmup_time: Time before taking the picture (default: 2 seconds) - :type warmup_time: float - - :param resize: Set if you want to resize the picture to a new format - :type resize: list or tuple (with two elements) - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - """ - - if self._time_lapse_thread: - return None, 'A time lapse thread is already running' - - camera = self._get_camera(**opts) - directory = os.path.abspath(os.path.expanduser(directory)) - - if warmup_time: - time.sleep(warmup_time) - - capture_opts = {} - if resize: - capture_opts['resize'] = tuple(resize) - - def capture_thread(): - try: - self.logger.info('Starting time lapse recording to directory {}'.format(directory)) - i = 0 - - for filename in camera.capture_continuous(os.path.join(directory, 'image_{counter:04d}.jpg')): - i += 1 - self.logger.info('Captured {}'.format(filename)) - - if n_images and i >= n_images: - break - - self._time_lapse_stop_condition.acquire() - should_stop = self._time_lapse_stop_condition.wait(timeout=interval) - self._time_lapse_stop_condition.release() - - if should_stop: - break - finally: - self._time_lapse_thread = None - self.logger.info('Stopped time lapse recording') - - self._time_lapse_thread = threading.Thread(target=capture_thread) - self._time_lapse_thread.start() - - @action - def stop_time_lapse(self): - """ - Stop a time lapse sequence if it's running - """ - - if not self._time_lapse_thread: - self.logger.info('No time lapse thread is running') - return - - self._time_lapse_stop_condition.acquire() - self._time_lapse_stop_condition.notify_all() - self._time_lapse_stop_condition.release() - - if self._time_lapse_thread: - self._time_lapse_thread.join() - - # noinspection PyMethodOverriding - @action - def start_recording(self, video_file=None, directory=None, name_format='video_%04d.h264', duration=None, - split_duration=None, **opts): - """ - Start recording to a video file or to multiple video files - - :param video_file: Path of the video file, if you want to keep the recording all in one file - :type video_file: str - - :param directory: Path of the directory that will store the video files, if you want to split the recording - on multiple files. Note that you need to specify either video_file (to save the recording to one single - file) or directory (to split the recording on multiple files) - :type directory: str - - :param name_format: If you're splitting the recording to multiple files, then you can specify the name format - for those files (default: 'video_%04d.h264') - on multiple files. Note that you need to specify either video_file (to save the recording to one single - file) or directory (to split the recording on multiple files) - :type name_format: str - - :param duration: Video duration in seconds (default: None, record until stop_recording is called) - :type duration: float - - :param split_duration: If you're splitting the recording to multiple files, then you should specify how long - each video should be in seconds - :type split_duration: float - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - """ - - if self._recording_thread: - return None, 'A recording thread is already running' - - multifile = not video_file - if multifile and not (directory and split_duration): - return None, 'No video_file specified for single file capture and no directory/split_duration ' + \ - 'specified for multi-file split' - - camera = self._get_camera(**opts) - video_file = os.path.abspath(os.path.expanduser(video_file)) - - def recording_thread(): - try: - if not multifile: - self.logger.info('Starting recording to video file {}'.format(video_file)) - camera.start_recording(video_file, format='h264') - self._recording_stop_condition.acquire() - self._recording_stop_condition.wait(timeout=duration) - self._recording_stop_condition.release() - self.logger.info('Video recorded to {}'.format(video_file)) - return - - self.logger.info('Starting recording video files to directory {}'.format(directory)) - i = 1 - end_time = None - timeout = split_duration - - if duration is not None: - end_time = time.time() + duration - timeout = min(split_duration, duration) - - camera.start_recording(name_format % i, format='h264') - self._recording_stop_condition.acquire() - self._recording_stop_condition.wait(timeout=timeout) - self._recording_stop_condition.release() - self.logger.info('Video file {} saved'.format(name_format % i)) - - while True: - i += 1 - timeout = None - - if end_time: - remaining_duration = end_time - time.time() - timeout = min(split_duration, remaining_duration) - if remaining_duration <= 0: - break - - camera.split_recording(name_format % i) - self._recording_stop_condition.acquire() - should_stop = self._recording_stop_condition.wait(timeout=timeout) - self._recording_stop_condition.release() - self.logger.info('Video file {} saved'.format(name_format % i)) - - if should_stop: - break - finally: - try: - camera.stop_recording() - except Exception as e: - self.logger.exception(e) - - self._recording_thread = None - self.logger.info('Stopped camera recording') - - self._recording_thread = threading.Thread(target=recording_thread) - self._recording_thread.start() - - @action - def stop_recording(self, **kwargs): - """ - Stop a camera recording - """ - - if not self._recording_thread: - self.logger.info('No recording thread is running') - return - - self._recording_stop_condition.acquire() - self._recording_stop_condition.notify_all() - self._recording_stop_condition.release() - - if self._recording_thread: - self._recording_thread.join() - - # noinspection PyShadowingBuiltins - @action - def start_streaming(self, listen_port: Optional[int] = None, format='h264', **opts): - """ - Start recording to a network stream - - :param listen_port: TCP listen port (default: `listen_port` configured value or 5000) - :type listen_port: int - - :param format: Video stream format (default: h264) - :type format: str - - :param opts: Extra options to pass to the camera (see - https://www.raspberrypi.org/documentation/usage/camera/python/README.md) - """ - - if self._streaming_thread: - return None, 'A streaming thread is already running' - - if not listen_port: - listen_port = self.listen_port - - camera = self._get_camera(**opts) - server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - server_socket.bind(('0.0.0.0', listen_port)) - server_socket.listen(1) - server_socket.settimeout(1) - - # noinspection PyBroadException - def streaming_thread(): - try: - self.logger.info('Starting streaming on port {}'.format(listen_port)) - - while self._can_stream: - try: - sock = server_socket.accept()[0] - stream = sock.makefile('wb') - self.logger.info('Accepted client connection from {}'.format(sock.getpeername())) - except socket.timeout: - continue - - try: - if stream: - camera.start_recording(stream, format=format) - while True: - camera.wait_recording(1) - except ConnectionError: - self.logger.info('Client closed connection') - finally: - if sock: - sock.close() - finally: - try: - server_socket.close() - camera.stop_recording() - except: - pass - - try: - camera.close() - except: - pass - - self._streaming_thread = None - self.logger.info('Stopped camera stream') - - self._can_stream = True - self._streaming_thread = threading.Thread(target=streaming_thread) - self._streaming_thread.start() - - @action - def stop_streaming(self): - """ - Stop a camera streaming session - """ - - if not self._streaming_thread: - self.logger.info('No recording thread is running') - return - - self._can_stream = False - - if self._streaming_thread: - self._streaming_thread.join() - - @action - def is_streaming(self): - """ - :return: True if the Pi Camera network streaming thread is running, - False otherwise. - """ - return self._streaming_thread is not None - - -# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/pi/__init__.py b/platypush/plugins/camera/pi/__init__.py new file mode 100644 index 00000000..82be09be --- /dev/null +++ b/platypush/plugins/camera/pi/__init__.py @@ -0,0 +1,179 @@ +import threading +import time + +from typing import Optional, List, Tuple, Union + +from platypush.plugins import action +from platypush.plugins.camera import CameraPlugin, Camera +from platypush.plugins.camera.pi.model import PiCameraInfo, PiCamera + + +class CameraPiPlugin(CameraPlugin): + """ + Plugin to control a Pi camera. + + Requires: + + * **picamera** (``pip install picamera``) + * **numpy** (``pip install numpy``) + * **Pillow** (``pip install Pillow``) + + """ + + _camera_class = PiCamera + _camera_info_class = PiCameraInfo + + def __init__(self, device: int = 0, fps: float = 30., warmup_seconds: float = 2., sharpness: int = 0, + contrast: int = 0, brightness: int = 50, video_stabilization: bool = False, iso: int = 0, + exposure_compensation: int = 0, exposure_mode: str = 'auto', meter_mode: str = 'average', + awb_mode: str = 'auto', image_effect: str = 'none', led_pin: Optional[int] = None, + color_effects: Optional[Union[str, List[str]]] = None, + zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0), **camera): + """ + See https://www.raspberrypi.org/documentation/usage/camera/python/README.md + for a detailed reference about the Pi camera options. + + :param camera: Options for the base camera plugin (see :class:`platypush.plugins.camera.CameraPlugin`). + """ + super().__init__(device=device, fps=fps, warmup_seconds=warmup_seconds, **camera) + + self.camera_info.sharpness = sharpness + self.camera_info.contrast = contrast + self.camera_info.brightness = brightness + self.camera_info.video_stabilization = video_stabilization + self.camera_info.iso = iso + self.camera_info.exposure_compensation = exposure_compensation + self.camera_info.meter_mode = meter_mode + self.camera_info.exposure_mode = exposure_mode + self.camera_info.awb_mode = awb_mode + self.camera_info.image_effect = image_effect + self.camera_info.color_effects = color_effects + self.camera_info.zoom = zoom + self.camera_info.led_pin = led_pin + + # noinspection DuplicatedCode + def prepare_device(self, device: PiCamera): + # noinspection PyUnresolvedReferences + import picamera + + camera = picamera.PiCamera(camera_num=device.info.device, resolution=device.info.resolution, + framerate=device.info.fps, led_pin=device.info.led_pin) + + camera.hflip = device.info.horizontal_flip + camera.vflip = device.info.vertical_flip + camera.sharpness = device.info.sharpness + camera.contrast = device.info.contrast + camera.brightness = device.info.brightness + camera.video_stabilization = device.info.video_stabilization + camera.iso = device.info.iso + camera.exposure_compensation = device.info.exposure_compensation + camera.exposure_mode = device.info.exposure_mode + camera.meter_mode = device.info.meter_mode + camera.awb_mode = device.info.awb_mode + camera.image_effect = device.info.image_effect + camera.color_effects = device.info.color_effects + camera.rotation = device.info.rotate or 0 + camera.zoom = device.info.zoom + + return camera + + def release_device(self, device: PiCamera): + # noinspection PyUnresolvedReferences + import picamera + + if device.object: + try: + device.object.stop_recording() + except picamera.PiCameraNotRecording: + pass + + if device.object and not device.object.closed: + try: + device.object.close() + except picamera.PiCameraClosed: + pass + + def capture_frame(self, camera: Camera, *args, **kwargs): + import numpy as np + from PIL import Image + + shape = (camera.info.resolution[1] + (camera.info.resolution[1] % 16), + camera.info.resolution[0] + (camera.info.resolution[0] % 32), + 3) + + frame = np.empty(shape, dtype=np.uint8) + camera.object.capture(frame, 'rgb') + return Image.fromarray(frame) + + def start_preview(self, camera: Camera): + """ + Start camera preview. + """ + camera.object.start_preview() + + def stop_preview(self, camera: Camera): + """ + Stop camera preview. + """ + try: + camera.object.stop_preview() + except Exception as e: + self.logger.warning(str(e)) + + @action + def capture_preview(self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera) -> dict: + camera = self.open_device(**camera) + self.start_preview(camera) + + if n_frames: + duration = n_frames * (camera.info.fps or 0) + if duration: + threading.Timer(duration, lambda: self.stop_preview(camera)) + + return self.status() + + def streaming_thread(self, camera: Camera, stream_format: str, duration: Optional[float] = None): + server_socket = self._prepare_server_socket(camera) + sock = None + streaming_started_time = time.time() + self.logger.info('Starting streaming on port {}'.format(camera.info.listen_port)) + + try: + while camera.stream_event.is_set(): + if duration and time.time() - streaming_started_time >= duration: + break + + sock = self._accept_client(server_socket) + if not sock: + continue + + try: + camera.object.start_recording(sock, format=stream_format) + while camera.stream_event.is_set(): + camera.object.wait_recording(1) + except ConnectionError: + self.logger.info('Client closed connection') + finally: + if sock: + sock.close() + finally: + self._cleanup_stream(camera, server_socket, sock) + try: + camera.object.stop_recording() + except Exception as e: + self.logger.warning('Error while stopping camera recording: {}'.format(str(e))) + + try: + camera.object.close() + except Exception as e: + self.logger.warning('Error while closing camera: {}'.format(str(e))) + + self.logger.info('Stopped camera stream') + + @action + def start_streaming(self, duration: Optional[float] = None, stream_format: str = 'h264', **camera) -> dict: + camera = self.open_device(stream_format=stream_format, **camera) + return self._start_streaming(camera, duration, stream_format) + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/camera/pi/model.py b/platypush/plugins/camera/pi/model.py new file mode 100644 index 00000000..9d2e9896 --- /dev/null +++ b/platypush/plugins/camera/pi/model.py @@ -0,0 +1,46 @@ +from dataclasses import dataclass +from typing import Optional, Union, List, Tuple + +from platypush.plugins.camera import CameraInfo, Camera + + +@dataclass +class PiCameraInfo(CameraInfo): + sharpness: int = 0 + contrast: int = 0 + brightness: int = 50 + video_stabilization: bool = False + iso: int = 0 + exposure_compensation: int = 0 + exposure_mode: str = 'auto' + meter_mode: str = 'average' + awb_mode: str = 'auto' + image_effect: str = 'none' + color_effects: Optional[Union[str, List[str]]] = None + zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0) + led_pin: Optional[int] = None + + def to_dict(self) -> dict: + return { + 'sharpness': self.sharpness, + 'contrast': self.contrast, + 'brightness': self.brightness, + 'video_stabilization': self.video_stabilization, + 'iso': self.iso, + 'exposure_compensation': self.exposure_compensation, + 'exposure_mode': self.exposure_mode, + 'meter_mode': self.meter_mode, + 'awb_mode': self.awb_mode, + 'image_effect': self.image_effect, + 'color_effects': self.color_effects, + 'zoom': self.zoom, + 'led_pin': self.led_pin, + **super().to_dict() + } + + +class PiCamera(Camera): + info: PiCameraInfo + + +# vim:sw=4:ts=4:et: diff --git a/platypush/plugins/qrcode.py b/platypush/plugins/qrcode.py index e16c49a8..1a51b1f5 100644 --- a/platypush/plugins/qrcode.py +++ b/platypush/plugins/qrcode.py @@ -30,7 +30,7 @@ class QrcodePlugin(Plugin): def __init__(self, camera_plugin: Optional[str] = None, **kwargs): """ :param camera_plugin: Name of the plugin that will be used as a camera to capture images (e.g. - ``camera`` or ``camera.pi``). + ``camera.cv`` or ``camera.pi``). """ super().__init__(**kwargs) self.camera_plugin = camera_plugin @@ -104,6 +104,8 @@ class QrcodePlugin(Plugin): def _convert_frame(self, frame): import numpy as np + from PIL import Image + assert isinstance(frame, np.ndarray), \ 'Image conversion only works with numpy arrays for now (got {})'.format(type(frame)) mode = 'RGB' diff --git a/platypush/utils/__init__.py b/platypush/utils/__init__.py index 1a11a2d4..3bfe60fe 100644 --- a/platypush/utils/__init__.py +++ b/platypush/utils/__init__.py @@ -76,6 +76,23 @@ def get_plugin_class_by_name(plugin_name): return None +def get_plugin_name_by_class(plugin) -> str: + """Gets the common name of a plugin (e.g. "music.mpd" or "media.vlc") given its class. """ + + from platypush.plugins import Plugin + + if isinstance(plugin, Plugin): + plugin = plugin.__class__ + + class_name = plugin.__name__ + class_tokens = [ + token.lower() for token in re.sub(r'([A-Z])', r' \1', class_name).split(' ') + if token.strip() and token != 'Plugin' + ] + + return '.'.join(class_tokens) + + def set_timeout(seconds, on_timeout): """ Set a function to be called if timeout expires without being cleared. diff --git a/requirements.txt b/requirements.txt index a9f9bdb6..e44b8b86 100644 --- a/requirements.txt +++ b/requirements.txt @@ -81,6 +81,7 @@ zeroconf # Support for the RaspberryPi camera module # picamera +# Pillow # Support for torrents download # python-libtorrent @@ -298,3 +299,9 @@ croniter # Support for NextCloud integration # git+https://github.com/EnterpriseyIntranet/nextcloud-API.git + +# Support for FFmpeg integration +# ffmpeg-python + +# Generic support for cameras +# Pillow diff --git a/setup.py b/setup.py index 107df102..1b8fb04d 100755 --- a/setup.py +++ b/setup.py @@ -194,8 +194,10 @@ setup( 'youtube': ['youtube-dl'], # Support for torrents download 'torrent': ['python-libtorrent'], + # Generic support for cameras + 'camera': ['numpy', 'Pillow'], # Support for RaspberryPi camera - 'picamera': ['picamera', 'numpy'], + 'picamera': ['picamera', 'numpy', 'Pillow'], # Support for inotify file monitors 'inotify': ['inotify'], # Support for Google Assistant @@ -264,8 +266,8 @@ setup( 'pwm3901': ['pwm3901'], # Support for MLX90640 thermal camera 'mlx90640': ['Pillow'], - # Support for machine learning and CV plugin - 'cv': ['cv2', 'numpy'], + # Support for machine learning models and cameras over OpenCV + 'cv': ['cv2', 'numpy', 'Pillow'], # Support for the generation of HTML documentation from docstring 'htmldoc': ['docutils'], # Support for Node-RED integration @@ -334,5 +336,7 @@ setup( 'imap': ['imapclient'], # Support for NextCloud integration 'nextcloud': ['nextcloud-API @ git+https://github.com/EnterpriseyIntranet/nextcloud-API.git'], + # Support for FFmpeg integration + 'ffmpeg': ['ffmpeg-python'], }, )