Camera plugins refactor

This commit is contained in:
Fabio Manganiello 2020-09-19 00:50:22 +02:00
parent c0f7cc0782
commit 09f9e974b1
44 changed files with 2251 additions and 1516 deletions

View file

@ -19,6 +19,10 @@ class CameraPiBackend(Backend):
* **picamera** (``pip install picamera``)
* **redis** (``pip install redis``) for inter-process communication with the camera process
This backend is **DEPRECATED**. Use the plugin :class:`platypush.plugins.camera.pi.CameraPiPlugin` instead to run
Pi camera actions. If you want to start streaming the camera on application start then simply create an event hook
on :class:`platypush.message.event.application.ApplicationStartedEvent` that runs ``camera.pi.start_streaming``.
"""
class CameraAction(Enum):

View file

@ -1,9 +1,12 @@
from flask import Response, Blueprint
from platypush.plugins.camera import CameraPlugin
import json
from typing import Optional
from flask import Response, Blueprint, request
from platypush import Config
from platypush.backend.http.app import template_folder
from platypush.backend.http.app.utils import authenticate, send_request
from platypush.backend.http.app.utils import authenticate
from platypush.context import get_plugin
from platypush.plugins.camera import CameraPlugin, Camera, StreamWriter
camera = Blueprint('camera', __name__, template_folder=template_folder)
@ -13,75 +16,95 @@ __routes__ = [
]
def get_device_id(device_id=None):
if device_id is None:
device_id = int(send_request(action='camera.get_default_device_id').output)
return device_id
def get_camera(plugin: str) -> CameraPlugin:
return get_plugin('camera.' + plugin)
def get_camera(device_id=None):
device_id = get_device_id(device_id)
camera_conf = Config.get('camera') or {}
camera_conf['device_id'] = device_id
return CameraPlugin(**camera_conf)
def get_frame(session: Camera, timeout: Optional[float] = None) -> bytes:
with session.stream.ready:
session.stream.ready.wait(timeout=timeout)
return session.stream.frame
def get_frame(device_id=None):
cam = get_camera(device_id)
with cam:
frame = None
for _ in range(cam.warmup_frames):
output = cam.get_stream()
with output.ready:
output.ready.wait()
frame = output.frame
return frame
def video_feed(device_id=None):
cam = get_camera(device_id)
with cam:
def feed(plugin: str, **kwargs):
plugin = get_camera(plugin)
with plugin.open(stream=True, **kwargs) as session:
plugin.start_camera(session)
while True:
output = cam.get_stream()
with output.ready:
output.ready.wait()
frame = output.frame
if frame and len(frame):
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
frame = get_frame(session, timeout=5.0)
if frame:
yield frame
@camera.route('/camera/<device_id>/frame', methods=['GET'])
def get_args(kwargs):
kwargs = kwargs.copy()
if 't' in kwargs:
del kwargs['t']
for k, v in kwargs.items():
if k == 'resolution':
v = json.loads('[{}]'.format(v))
else:
# noinspection PyBroadException
try:
v = int(v)
except:
# noinspection PyBroadException
try:
v = float(v)
except:
pass
kwargs[k] = v
return kwargs
@camera.route('/camera/<plugin>/photo.<extension>', methods=['GET'])
@authenticate()
def get_camera_frame(device_id):
frame = get_frame(device_id)
return Response(frame, mimetype='image/jpeg')
def get_photo(plugin, extension):
plugin = get_camera(plugin)
extension = 'jpeg' if extension in ('jpg', 'jpeg') else extension
with plugin.open(stream=True, stream_format=extension, frames_dir=None, **get_args(request.args)) as session:
plugin.start_camera(session)
frame = None
for _ in range(session.info.warmup_frames):
frame = get_frame(session)
return Response(frame, mimetype=session.stream.mimetype)
@camera.route('/camera/frame', methods=['GET'])
@camera.route('/camera/<plugin>/video.<extension>', methods=['GET'])
@authenticate()
def get_default_camera_frame():
frame = get_frame()
return Response(frame, mimetype='image/jpeg')
def get_video(plugin, extension):
stream_class = StreamWriter.get_class_by_name(extension)
return Response(feed(plugin, stream_format=extension, frames_dir=None, **get_args(request.args)),
mimetype=stream_class.mimetype)
@camera.route('/camera/<device_id>/stream', methods=['GET'])
@camera.route('/camera/<plugin>/photo', methods=['GET'])
@authenticate()
def get_stream_feed(device_id):
return Response(video_feed(device_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
def get_photo_default(plugin):
return get_photo(plugin, 'jpeg')
@camera.route('/camera/stream', methods=['GET'])
@camera.route('/camera/<plugin>/video', methods=['GET'])
@authenticate()
def get_default_stream_feed():
return Response(video_feed(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def get_video_default(plugin):
return get_video(plugin, 'mjpeg')
@camera.route('/camera/<plugin>/frame', methods=['GET'])
@authenticate()
def get_photo_deprecated(plugin):
return get_photo_default(plugin)
@camera.route('/camera/<plugin>/feed', methods=['GET'])
@authenticate()
def get_video_deprecated(plugin):
return get_video_default(plugin)
# vim:sw=4:ts=4:et:

View file

@ -1,12 +1,8 @@
import os
import tempfile
from flask import Blueprint
from flask import Response, request, Blueprint, send_from_directory
from platypush import Config
from platypush.backend.http.app import template_folder
from platypush.backend.http.app.utils import authenticate, send_request
from platypush.plugins.camera.ir.mlx90640 import CameraIrMlx90640Plugin
from platypush.backend.http.app.routes.plugins.camera import get_photo, get_video
from platypush.backend.http.app.utils import authenticate
camera_ir_mlx90640 = Blueprint('camera.ir.mlx90640', __name__, template_folder=template_folder)
@ -16,50 +12,40 @@ __routes__ = [
]
def get_feed(**_):
camera_conf = Config.get('camera.ir.mlx90640') or {}
camera = CameraIrMlx90640Plugin(**camera_conf)
@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo.<extension>', methods=['GET'])
@authenticate()
def get_photo_route(extension):
return get_photo('ir.mlx90640', extension)
with camera:
while True:
output = camera.get_stream()
with output.ready:
output.ready.wait()
frame = output.frame
@camera_ir_mlx90640.route('/camera/ir/mlx90640/video.<extension>', methods=['GET'])
@authenticate()
def get_video_route(extension):
return get_video('ir.mlx90640', extension)
if frame and len(frame):
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo', methods=['GET'])
@authenticate()
def get_photo_route_default():
return get_photo_route('jpeg')
@camera_ir_mlx90640.route('/camera/ir/mlx90640/video', methods=['GET'])
@authenticate()
def get_video_route_default():
return get_video_route('mjpeg')
@camera_ir_mlx90640.route('/camera/ir/mlx90640/frame', methods=['GET'])
@authenticate()
def get_frame_route():
f = tempfile.NamedTemporaryFile(prefix='ir_camera_frame_', suffix='.jpg', delete=False)
args = {
'grayscale': bool(int(request.args.get('grayscale', 0))),
'scale_factor': int(request.args.get('scale_factor', 1)),
'rotate': int(request.args.get('rotate', 0)),
'output_file': f.name,
}
send_request(action='camera.ir.mlx90640.capture', **args)
return send_from_directory(os.path.dirname(f.name),
os.path.basename(f.name))
def get_photo_route_deprecated():
return get_photo_route_default()
@camera_ir_mlx90640.route('/camera/ir/mlx90640/stream', methods=['GET'])
@camera_ir_mlx90640.route('/camera/ir/mlx90640/feed', methods=['GET'])
@authenticate()
def get_feed_route():
args = {
'grayscale': bool(int(request.args.get('grayscale', 0))),
'scale_factor': int(request.args.get('scale_factor', 1)),
'rotate': int(request.args.get('rotate', 0)),
'format': 'jpeg',
}
def get_video_route_deprecated():
return get_video_route_default()
return Response(get_feed(**args),
mimetype='multipart/x-mixed-replace; boundary=frame')
# vim:sw=4:ts=4:et:

View file

@ -1 +1 @@
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea}

View file

@ -1 +1 @@
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea}

View file

@ -1 +1 @@
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{min-width:640px;min-height:480px;position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}
.camera{min-height:90%;margin-top:4%;overflow:auto;display:flex;flex-direction:column;align-items:center}.camera .camera-container{position:relative;background:#000;margin-bottom:1em}.camera .camera-container .frame,.camera .camera-container .no-frame{position:absolute;top:0;width:100%;height:100%}.camera .camera-container .frame{z-index:1}.camera .camera-container .no-frame{display:flex;background:rgba(0,0,0,0.1);color:#fff;align-items:center;justify-content:center;z-index:2}.camera .url{width:640px;display:flex;margin:1em}.camera .url .row{width:100%;display:flex;align-items:center}.camera .url .name{width:140px}.camera .url input{width:500px;font-weight:normal}.camera .params{margin-top:1em;padding:1em;width:640px;display:flex;flex-direction:column;border:1px solid #ccc;border-radius:1em}.camera .params label{font-weight:normal}.camera .params .head{display:flex;justify-content:center}.camera .params .head label{width:100%;display:flex;justify-content:right}.camera .params .head label .name{margin-right:1em}.camera .params .body{display:flex;flex-direction:column;margin:0 0 0 -1em}.camera .params .body .row{width:100%;display:flex;align-items:center;padding:.5em}.camera .params .body .row .name{width:30%}.camera .params .body .row input{width:70%}.camera .params .body .row:nth-child(even){background:#e4e4e4}.camera .params .body .row:hover{background:#def6ea}

View file

@ -4,6 +4,7 @@ $default-bg-2: #f4f5f6 !default;
$default-bg-3: #f1f3f2 !default;
$default-bg-4: #edf0ee !default;
$default-bg-5: #f8f8f8 !default;
$default-bg-6: #e4e4e4 !default;
$default-fg: black !default;
$default-fg-2: #333333 !default;
$default-fg-3: #888888 !default;

View file

@ -0,0 +1,116 @@
@import 'common/vars';
.camera {
min-height: 90%;
margin-top: 4%;
overflow: auto;
display: flex;
flex-direction: column;
align-items: center;
.camera-container {
position: relative;
background: black;
margin-bottom: 1em;
.frame, .no-frame {
position: absolute;
top: 0;
width: 100%;
height: 100%;
}
.frame {
z-index: 1;
}
.no-frame {
display: flex;
background: rgba(0, 0, 0, 0.1);
color: white;
align-items: center;
justify-content: center;
z-index: 2;
}
}
.url {
width: 640px;
display: flex;
margin: 1em;
.row {
width: 100%;
display: flex;
align-items: center;
}
.name {
width: 140px;
}
input {
width: 500px;
font-weight: normal;
}
}
.params {
margin-top: 1em;
padding: 1em;
width: 640px;
display: flex;
flex-direction: column;
border: $default-border-3;
border-radius: 1em;
label {
font-weight: normal;
}
.head {
display: flex;
justify-content: center;
label {
width: 100%;
display: flex;
justify-content: right;
.name {
margin-right: 1em;
}
}
}
.body {
display: flex;
flex-direction: column;
margin: 0 0 0 -1em;
.row {
width: 100%;
display: flex;
align-items: center;
padding: 0.5em;
.name {
width: 30%;
}
input {
width: 70%;
}
&:nth-child(even) {
background: $default-bg-6;
}
&:hover {
background: $hover-bg;
}
}
}
}
}

View file

@ -1,39 +0,0 @@
@import 'common/vars';
.camera {
min-height: 90%;
margin-top: 4%;
overflow: auto;
display: flex;
flex-direction: column;
align-items: center;
.camera-container {
min-width: 640px;
min-height: 480px;
position: relative;
background: black;
margin-bottom: 1em;
.frame, .no-frame {
position: absolute;
top: 0;
width: 100%;
height: 100%;
}
.frame {
z-index: 1;
}
.no-frame {
display: flex;
background: rgba(0, 0, 0, 0.1);
color: white;
align-items: center;
justify-content: center;
z-index: 2;
}
}
}

View file

@ -0,0 +1,15 @@
Vue.component('camera-cv', {
template: '#tmpl-camera-cv',
mixins: [cameraMixin],
methods: {
startStreaming: function() {
this._startStreaming('cv');
},
capture: function() {
this._capture('cv');
},
},
});

View file

@ -1,59 +1,19 @@
Vue.component('camera-ir-mlx90640', {
template: '#tmpl-camera-ir-mlx90640',
props: ['config'],
data: function() {
return {
bus: new Vue({}),
capturing: false,
rotate: this.config.rotate || 0,
grayscale: false,
};
},
mixins: [cameraMixin],
methods: {
startStreaming: async function() {
if (this.capturing)
return;
this.capturing = true;
this.$refs.frame.setAttribute('src', '/camera/ir/mlx90640/stream?rotate='
+ this.rotate + '&grayscale=' + (this.grayscale ? 1 : 0) + '&t='
+ (new Date()).getTime());
startStreaming: function() {
this._startStreaming('ir.mlx90640');
},
stopStreaming: async function() {
await request('camera.ir.mlx90640.stop');
this.$refs.frame.removeAttribute('src');
this.capturing = false;
},
onRotationChange: function() {
this.rotate = parseInt(this.$refs.rotate.value);
const cameraContainer = this.$el.querySelector('.camera-container');
switch (this.rotate) {
case 0:
case 180:
cameraContainer.style.width = '640px';
cameraContainer.style.minWidth = '640px';
cameraContainer.style.height = '480px';
cameraContainer.style.minHeight = '480px';
break;
case 90:
case 270:
cameraContainer.style.width = '480px';
cameraContainer.style.minWidth = '480px';
cameraContainer.style.height = '640px';
cameraContainer.style.minHeight = '640px';
break;
}
capture: function() {
this._capture('ir.mlx90640');
},
},
mounted: function() {
this.onRotationChange();
},
this.attrs.resolution = [32, 24];
}
});

View file

@ -1,5 +1,4 @@
Vue.component('camera', {
template: '#tmpl-camera',
var cameraMixin = {
props: ['config'],
data: function() {
@ -7,23 +6,57 @@ Vue.component('camera', {
bus: new Vue({}),
streaming: false,
capturing: false,
showParams: false,
url: null,
attrs: {
resolution: this.config.resolution || [640, 480],
device: this.config.device,
horizontal_flip: this.config.horizontal_flip || 0,
vertical_flip: this.config.vertical_flip || 0,
rotate: this.config.rotate || 0,
scale_x: this.config.scale_x || 1.0,
scale_y: this.config.scale_y || 1.0,
fps: this.config.fps || 16.0,
grayscale: this.config.grayscale || 0,
stream_format: this.config.stream_format || 'mjpeg',
},
};
},
computed: {
deviceId: function() {
return this.config.device_id || 0;
params: function() {
return {
resolution: this.attrs.resolution,
device: this.attrs.device != null && ('' + this.attrs.device).length > 0 ? this.attrs.device : null,
horizontal_flip: parseInt(0 + this.attrs.horizontal_flip),
vertical_flip: parseInt(0 + this.attrs.vertical_flip),
rotate: parseFloat(this.attrs.rotate),
scale_x: parseFloat(this.attrs.scale_x),
scale_y: parseFloat(this.attrs.scale_y),
fps: parseFloat(this.attrs.fps),
grayscale: parseInt(0 + this.attrs.grayscale),
};
},
window: function() {
return window;
},
},
methods: {
startStreaming: function() {
getUrl: function(plugin, action) {
return '/camera/' + plugin + '/' + action + '?' +
Object.entries(this.params).filter(([k, v]) => v != null && ('' + v).length > 0)
.map(([k, v]) => k + '=' + v).join('&');
},
_startStreaming: function(plugin) {
if (this.streaming)
return;
this.streaming = true;
this.capturing = false;
this.$refs.frame.setAttribute('src', '/camera/' + this.deviceId + '/stream');
this.url = this.getUrl(plugin, 'video.' + this.attrs.stream_format);
},
stopStreaming: function() {
@ -32,16 +65,16 @@ Vue.component('camera', {
this.streaming = false;
this.capturing = false;
this.$refs.frame.removeAttribute('src');
this.url = null;
},
capture: function() {
_capture: function(plugin) {
if (this.capturing)
return;
this.streaming = false;
this.capturing = true;
this.$refs.frame.setAttribute('src', '/camera/' + this.deviceId + '/frame?t=' + (new Date()).getTime());
this.url = this.getUrl(plugin, 'photo.jpg') + '&t=' + (new Date()).getTime();
},
onFrameLoaded: function(event) {
@ -49,10 +82,22 @@ Vue.component('camera', {
this.capturing = false;
}
},
onDeviceChanged: function(event) {},
onFlipChanged: function(event) {},
onSizeChanged: function(event) {
const degToRad = (deg) => (deg * Math.PI)/180;
const rot = degToRad(this.params.rotate);
this.$refs.frameContainer.style.width = Math.round(this.params.scale_x * Math.abs(this.params.resolution[0] * Math.cos(rot) + this.params.resolution[1] * Math.sin(rot))) + 'px';
this.$refs.frameContainer.style.height = Math.round(this.params.scale_y * Math.abs(this.params.resolution[0] * Math.sin(rot) + this.params.resolution[1] * Math.cos(rot))) + 'px';
},
onFpsChanged: function(event) {},
onGrayscaleChanged: function(event) {},
},
mounted: function() {
this.$refs.frame.addEventListener('load', this.onFrameLoaded);
this.onSizeChanged();
},
});
};

View file

@ -2,6 +2,7 @@
with pluginIcons = {
'camera': 'fas fa-camera',
'camera.android.ipcam': 'fab fa-android',
'camera.cv': 'fas fa-camera',
'camera.pi': 'fab fa-raspberry-pi',
'camera.ir.mlx90640': 'fas fa-sun',
'execute': 'fas fa-play',

View file

@ -0,0 +1,6 @@
<script type="text/javascript" src="{{ url_for('static', filename='js/plugins/camera/index.js') }}"></script>
<script type="text/x-template" id="tmpl-camera-cv">
{% include 'plugins/camera/index.html' %}
</script>

View file

@ -1,31 +1,6 @@
<script type="text/javascript" src="{{ url_for('static', filename='js/plugins/camera/index.js') }}"></script>
<script type="text/x-template" id="tmpl-camera-ir-mlx90640">
<div class="camera">
<div class="camera-container">
<div class="no-frame" v-if="!capturing">The camera is not active</div>
<img class="frame" ref="frame">
</div>
<div class="controls">
<button type="button" @click="startStreaming" v-if="!capturing">
<i class="fa fa-play"></i>&nbsp; Start streaming
</button>
<button type="button" @click="stopStreaming" v-else>
<i class="fa fa-stop"></i>&nbsp; Stop streaming
</button>
<select ref="rotate" @change="onRotationChange" :disabled="capturing">
<option value="0" :selected="rotate == 0">0 degrees</option>
<option value="90" :selected="rotate == 90">90 degrees</option>
<option value="180" :selected="rotate == 180">180 degrees</option>
<option value="270" :selected="rotate == 270">270 degrees</option>
</select>
<input type="checkbox" :checked="grayscale" :disabled="capturing"
@change="grayscale = $event.target.checked">
Grayscale
</input>
</div>
</div>
{% include 'plugins/camera/index.html' %}
</script>

View file

@ -1,23 +1,90 @@
<script type="text/x-template" id="tmpl-camera">
<div class="camera">
<div class="camera-container">
<div class="no-frame" v-if="!streaming && !capturing">The camera is not active</div>
<img class="frame" ref="frame">
<div class="camera">
<div class="camera-container" ref="frameContainer">
<div class="no-frame" v-if="!streaming && !capturing">The camera is not active</div>
<img class="frame" :src="url" ref="frame">
</div>
<div class="controls">
<button type="button" @click="startStreaming" :disabled="capturing" v-if="!streaming">
<i class="fa fa-play"></i>&nbsp; Start streaming
</button>
<button type="button" @click="stopStreaming" :disabled="capturing" v-else>
<i class="fa fa-stop"></i>&nbsp; Stop streaming
</button>
<button type="button" @click="capture" :disabled="streaming || capturing">
<i class="fas fa-camera"></i>&nbsp; Take snapshot
</button>
</div>
<div class="url" v-if="url && url.length">
<label class="row">
<span class="name">Stream URL</span>
<input name="url" type="text" :value="window.location.protocol + '//' + window.location.host + url"
disabled="disabled"/>
</label>
</div>
<div class="params">
<div class="head">
<label class="row">
<span class="name">Show parameters</span>
<input name="toggleParams" type="checkbox" v-model="showParams"/>
</label>
</div>
<div class="controls">
<button type="button" @click="startStreaming" :disabled="capturing" v-if="!streaming">
<i class="fa fa-play"></i>&nbsp; Start streaming
</button>
<div class="body" :class="{ hidden: !showParams }">
<label class="row">
<span class="name">Device</span>
<input name="device" type="text" v-model="attrs.device" @change="onDeviceChanged"/>
</label>
<button type="button" @click="stopStreaming" :disabled="capturing" v-else>
<i class="fa fa-stop"></i>&nbsp; Stop streaming
</button>
<label class="row">
<span class="name">Width</span>
<input name="width" type="text" v-model="attrs.resolution[0]" @change="onSizeChanged"/>
</label>
<button type="button" @click="capture" :disabled="streaming || capturing">
<i class="fas fa-camera"></i>&nbsp; Take snapshot
</button>
<label class="row">
<span class="name">Height</span>
<input name="height" type="text" v-model="attrs.resolution[1]" @change="onSizeChanged"/>
</label>
<label class="row">
<span class="name">Horizontal Flip</span>
<input name="horizontal_flip" type="checkbox" v-model="attrs.horizontal_flip" @change="onFlipChanged"/>
</label>
<label class="row">
<span class="name">Vertical Flip</span>
<input name="vertical_flip" type="checkbox" v-model="attrs.vertical_flip" @change="onFlipChanged"/>
</label>
<label class="row">
<span class="name">Rotate</span>
<input name="rotate" type="text" v-model="attrs.rotate" @change="onSizeChanged"/>
</label>
<label class="row">
<span class="name">Scale-X</span>
<input name="scale_x" type="text" v-model="attrs.scale_x" @change="onSizeChanged"/>
</label>
<label class="row">
<span class="name">Scale-Y</span>
<input name="scale_y" type="text" v-model="attrs.scale_y" @change="onSizeChanged"/>
</label>
<label class="row">
<span class="name">Frames per second</span>
<input name="fps" type="text" v-model="attrs.fps" @change="onFpsChanged"/>
</label>
<label class="row">
<span class="name">Grayscale</span>
<input name="grayscale" type="checkbox" v-model="attrs.grayscale" @change="onGrayscaleChanged"/>
</label>
</div>
</div>
</script>
</div>

View file

@ -13,8 +13,8 @@ class CameraRecordingStartedEvent(CameraEvent):
Event triggered when a new recording starts
"""
def __init__(self, device_id, filename=None, *args, **kwargs):
super().__init__(*args, device_id=device_id, filename=filename, **kwargs)
def __init__(self, device, filename=None, *args, **kwargs):
super().__init__(*args, device=device, filename=filename, **kwargs)
class CameraRecordingStoppedEvent(CameraEvent):
@ -22,8 +22,8 @@ class CameraRecordingStoppedEvent(CameraEvent):
Event triggered when a recording stops
"""
def __init__(self, device_id, *args, **kwargs):
super().__init__(*args, device_id=device_id, **kwargs)
def __init__(self, device, *args, **kwargs):
super().__init__(*args, device=device, **kwargs)
class CameraVideoRenderedEvent(CameraEvent):

View file

@ -15,9 +15,7 @@ class Response(Message):
:param origin: Origin
:type origin: str
:param output: Output
:type output: str
:param errors: Errors
:type errors: list
:param id: Message ID this response refers to
:type id: str
:param timestamp: Message timestamp

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,79 @@
from typing import Optional, Union
from platypush.plugins.camera import CameraPlugin, Camera
from platypush.plugins.camera.model.writer.cv import CvFileWriter
class CameraCvPlugin(CameraPlugin):
"""
Plugin to control generic cameras over OpenCV.
Requires:
* **opencv** (``pip install opencv-python``)
* **Pillow** (``pip install Pillow``)
"""
def __init__(self, color_transform: Optional[str] = 'COLOR_BGR2RGB', video_type: str = 'XVID',
video_writer: str = 'ffmpeg', **kwargs):
"""
:param device: Device ID (0 for the first camera, 1 for the second etc.) or path (e.g. ``/dev/video0``).
:param video_type: Default video type to use when exporting captured frames to camera (default: 0, infers the
type from the video file extension). See
`here <https://docs.opencv.org/4.0.1/dd/d9e/classcv_1_1VideoWriter.html#afec93f94dc6c0b3e28f4dd153bc5a7f0>`_
for a reference on the supported types (e.g. 'MJPEG', 'XVID', 'H264', 'X264', 'AVC1' etc.)
:param color_transform: Color transformation to apply to the captured frames. See
https://docs.opencv.org/3.2.0/d7/d1b/group__imgproc__misc.html for a full list of supported color
transformations (default: "``COLOR_RGB2BGR``")
:param video_writer: Class to be used to write frames to a video file. Supported values:
- ``ffmpeg``: Use the FFmpeg writer (default, and usually more reliable - it requires ``ffmpeg``
installed).
- ``cv``: Use the native OpenCV writer.
The FFmpeg video writer requires ``scikit-video`` (``pip install scikit-video``) and ``ffmpeg``.
:param kwargs: Extra arguments to be passed up to :class:`platypush.plugins.camera.CameraPlugin`.
"""
super().__init__(color_transform=color_transform, video_type=video_type, **kwargs)
if video_writer == 'cv':
self._video_writer_class = CvFileWriter
def prepare_device(self, device: Camera):
import cv2
cam = cv2.VideoCapture(device.info.device)
if device.info.resolution and device.info.resolution[0]:
cam.set(cv2.CAP_PROP_FRAME_WIDTH, device.info.resolution[0])
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, device.info.resolution[1])
return cam
def release_device(self, device: Camera):
if device.object:
device.object.release()
device.object = None
def capture_frame(self, camera: Camera, *args, **kwargs):
import cv2
from PIL import Image
ret, frame = camera.object.read()
assert ret, 'Cannot retrieve frame from {}'.format(camera.info.device)
color_transform = camera.info.color_transform
if isinstance(color_transform, str):
color_transform = getattr(cv2, color_transform or self.camera_info.color_transform)
if color_transform:
frame = cv2.cvtColor(frame, color_transform)
return Image.fromarray(frame)
@staticmethod
def transform_frame(frame, color_transform: Union[str, int]):
return frame
# vim:sw=4:ts=4:et:

View file

@ -1,12 +1,9 @@
import base64
import io
import os
import subprocess
import threading
import time
from typing import Optional, Tuple
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, StreamingOutput
from platypush.plugins.camera import CameraPlugin, Camera
class CameraIrMlx90640Plugin(CameraPlugin):
@ -32,189 +29,65 @@ class CameraIrMlx90640Plugin(CameraPlugin):
* **mlx90640-library** installation (see instructions above)
* **PIL** image library (``pip install Pillow``)
"""
_img_size = (32, 24)
_rotate_values = {}
def __init__(self, fps=16, skip_frames=2, scale_factor=1, rotate=0, grayscale=False, rawrgb_path=None, **kwargs):
def __init__(self, rawrgb_path: Optional[str] = None, resolution: Tuple[int, int] = (32, 24),
warmup_frames: Optional[int] = 5, **kwargs):
"""
:param fps: Frames per seconds (default: 16)
:param skip_frames: Number of frames to be skipped on sensor initialization/warmup (default: 2)
:param scale_factor: The camera outputs 24x32 pixels artifacts. Use scale_factor to scale them up to a larger
image (default: 1)
:param rotate: Rotation angle in degrees (default: 0)
:param grayscale: Save the image as grayscale - black pixels will be colder, white pixels warmer
(default: False = use false colors)
:param rawrgb_path: Specify it if the rawrgb executable compiled from
https://github.com/pimoroni/mlx90640-library is in another folder than
`<directory of this file>/lib/examples`.
:param resolution: Device resolution (default: 32x24).
:param warmup_frames: Number of frames to be skipped on sensor initialization/warmup (default: 2).
:param kwargs: Extra parameters to be passed to :class:`platypush.plugins.camera.CameraPlugin`.
"""
from PIL import Image
super().__init__(**kwargs)
self._rotate_values = {
90: Image.ROTATE_90,
180: Image.ROTATE_180,
270: Image.ROTATE_270,
}
super().__init__(device='mlx90640', resolution=resolution, warmup_frames=warmup_frames, **kwargs)
if not rawrgb_path:
rawrgb_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib', 'examples', 'rawrgb')
rawrgb_path = os.path.abspath(os.path.expanduser(rawrgb_path))
assert fps > 0
assert skip_frames >= 0
assert os.path.isfile(rawrgb_path)
assert os.path.isfile(rawrgb_path),\
'rawrgb executable not found. Please follow the documentation of this plugin to build it'
self.fps = fps
self.rotate = rotate
self.skip_frames = skip_frames
self.scale_factor = scale_factor
self.rawrgb_path = rawrgb_path
self.grayscale = grayscale
self._capture_proc = None
def _is_capture_proc_running(self):
def _is_capture_running(self):
return self._capture_proc is not None and self._capture_proc.poll() is None
def _get_capture_proc(self, fps):
if not self._is_capture_proc_running():
fps = fps or self.fps
self._capture_proc = subprocess.Popen([self.rawrgb_path, '{}'.format(fps)], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def prepare_device(self, device: Camera):
if not self._is_capture_running():
self._capture_proc = subprocess.Popen([self.rawrgb_path, '{}'.format(device.info.fps)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return self._capture_proc
def _raw_capture(self):
def release_device(self, device: Camera):
if not self._is_capture_running():
return
self._capture_proc.terminate()
self._capture_proc.kill()
self._capture_proc.wait()
self._capture_proc = None
def capture_frame(self, device: Camera, *args, **kwargs):
from PIL import Image
camera = self._get_capture_proc(fps=self.fps)
size = self._img_size
camera = self.prepare_device(device)
frame = camera.stdout.read(device.info.resolution[0] * device.info.resolution[1] * 3)
return Image.frombytes('RGB', device.info.resolution, frame)
while self._is_capture_proc_running():
frame = camera.stdout.read(size[0] * size[1] * 3)
image = Image.frombytes('RGB', size, frame)
self._output.write(frame)
if self.grayscale:
image = self._convert_to_grayscale(image)
if self.scale_factor != 1:
size = tuple(i * self.scale_factor for i in size)
image = image.resize(size, Image.ANTIALIAS)
if self.rotate:
rotate = self._rotate_values.get(int(self.rotate), 0)
image = image.transpose(rotate)
temp = io.BytesIO()
image.save(temp, format='jpeg')
self._output.write(temp.getvalue())
def __enter__(self):
self._output = StreamingOutput(raw=False)
self._capturing_thread = threading.Thread(target=self._raw_capture)
self._capturing_thread.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
# noinspection PyShadowingBuiltins
@action
def capture(self, output_file=None, frames=1, grayscale=None, fps=None, skip_frames=None, scale_factor=None,
rotate=None, format='jpeg'):
"""
Capture one or multiple frames and return them as raw RGB
:param output_file: Can be either the path to a single image file or a format string
(e.g. 'snapshots/image-{:04d}') in case of multiple frames. If not set the function will return a list of
base64 encoded representations of the raw RGB frames, otherwise the list of captured files.
:type output_file: str
:param frames: Number of frames to be captured (default: 1). If None the capture process will proceed until
`stop` is called.
:type frames: int
:param grayscale: Override the default ``grayscale`` parameter.
:type grayscale: bool
:param fps: If set it overrides the fps parameter specified on the object (default: None)
:type fps: int
:param skip_frames: If set it overrides the skip_frames parameter specified on the object (default: None)
:type skip_frames: int
:param scale_factor: If set it overrides the scale_factor parameter specified on the object (default: None)
:type scale_factor: float
:param rotate: If set it overrides the rotate parameter specified on the object (default: None)
:type rotate: int
:param format: Output image format if output_file is not specified (default: jpeg).
It can be jpg, png, gif or any format supported by PIL
:type format: str
:returns: list[str]. Each item is a base64 encoded representation of a frame in the specified format if
output_file is not set, otherwise a list with the captured image files will be returned.
"""
from PIL import Image
fps = self.fps if fps is None else fps
skip_frames = self.skip_frames if skip_frames is None else skip_frames
scale_factor = self.scale_factor if scale_factor is None else scale_factor
rotate = self._rotate_values.get(self.rotate if rotate is None else rotate, 0)
grayscale = self.grayscale if grayscale is None else grayscale
size = self._img_size
sleep_time = 1.0 / fps
captured_frames = []
n_captured_frames = 0
files = set()
camera = self._get_capture_proc(fps)
while (frames is not None and n_captured_frames < frames) or (
frames is None and self._is_capture_proc_running()):
frame = camera.stdout.read(size[0] * size[1] * 3)
if skip_frames > 0:
time.sleep(sleep_time)
skip_frames -= 1
continue
image = Image.frombytes('RGB', size, frame)
if grayscale:
image = self._convert_to_grayscale(image)
if scale_factor != 1:
size = tuple(i * scale_factor for i in size)
image = image.resize(size, Image.ANTIALIAS)
if rotate:
image = image.transpose(rotate)
if not output_file:
temp = io.BytesIO()
image.save(temp, format=format)
frame = base64.encodebytes(temp.getvalue()).decode()
captured_frames.append(frame)
else:
image_file = os.path.abspath(os.path.expanduser(output_file.format(n_captured_frames)))
image.save(image_file)
files.add(image_file)
n_captured_frames += 1
time.sleep(sleep_time)
self.stop()
return sorted([f for f in files]) if output_file else captured_frames
@staticmethod
def _convert_to_grayscale(image):
def to_grayscale(self, image):
from PIL import Image
new_image = Image.new('L', image.size)
for i in range(0, image.size[0]):
for j in range(0, image.size[1]):
r, g, b = image.getpixel((i, j))
value = int(2.0 * r - 0.5 * g - 1.5 * b)
value = int(2.0 * r - 1.125 * g - 1.75 * b)
if value > 255:
value = 255
@ -226,16 +99,11 @@ class CameraIrMlx90640Plugin(CameraPlugin):
return new_image
@action
def stop(self):
def capture(self, output_file=None, *args, **kwargs):
"""
Stop an ongoing capture session
Back-compatibility alias for :meth:`.capture_image`.
"""
if not self._is_capture_proc_running():
return
return self.capture_image(image_file=output_file, *args, **kwargs)
self._capture_proc.terminate()
self._capture_proc.kill()
self._capture_proc.wait()
self._capture_proc = None
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,101 @@
import math
import threading
from dataclasses import dataclass
from typing import Optional, Union, Tuple, Set
import numpy as np
from platypush.plugins.camera.model.writer import StreamWriter, VideoWriter, FileVideoWriter
from platypush.plugins.camera.model.writer.preview import PreviewWriter
@dataclass
class CameraInfo:
device: Optional[Union[int, str]]
resolution: Optional[Tuple[int, int]] = None
color_transform: Optional[str] = None
frames_dir: Optional[str] = None
rotate: Optional[float] = None
horizontal_flip: bool = False
vertical_flip: bool = False
scale_x: Optional[float] = None
scale_y: Optional[float] = None
warmup_frames: int = 0
warmup_seconds: float = 0.
capture_timeout: float = 20.0
fps: Optional[float] = None
grayscale: Optional[bool] = None
video_type: Optional[str] = None
stream_format: str = 'mjpeg'
listen_port: Optional[int] = None
bind_address: Optional[str] = None
def set(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def to_dict(self) -> dict:
return {
'device': self.device,
'color_transform': self.color_transform,
'frames_dir': self.frames_dir,
'rotate': self.rotate,
'horizontal_flip': self.horizontal_flip,
'vertical_flip': self.vertical_flip,
'scale_x': self.scale_x,
'scale_y': self.scale_y,
'warmup_frames': self.warmup_frames,
'warmup_seconds': self.warmup_seconds,
'capture_timeout': self.capture_timeout,
'fps': self.fps,
'grayscale': self.grayscale,
'resolution': list(self.resolution or ()),
'video_type': self.video_type,
'stream_format': self.stream_format,
'listen_port': self.listen_port,
'bind_address': self.bind_address,
}
def clone(self):
# noinspection PyArgumentList
return self.__class__(**self.to_dict())
@dataclass
class Camera:
info: CameraInfo
start_event: threading.Event = threading.Event()
stream_event: threading.Event = threading.Event()
capture_thread: Optional[threading.Thread] = None
stream_thread: Optional[threading.Thread] = None
object = None
stream: Optional[StreamWriter] = None
preview: Optional[PreviewWriter] = None
file_writer: Optional[FileVideoWriter] = None
def get_outputs(self) -> Set[VideoWriter]:
writers = set()
# if self.preview and self.preview.is_alive():
if self.preview and not self.preview.closed:
writers.add(self.preview)
if self.stream and not self.stream.closed:
writers.add(self.stream)
if self.file_writer and not self.file_writer.closed:
writers.add(self.file_writer)
return writers
def effective_resolution(self) -> Tuple[int, int]:
rot = (self.info.rotate or 0) * math.pi / 180
sin = math.sin(rot)
cos = math.cos(rot)
scale = np.array([[self.info.scale_x or 1., self.info.scale_y or 1.]])
resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])
rot_matrix = np.array([[sin, cos], [cos, sin]])
resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]
return int(round(resolution[0])), int(round(resolution[1]))
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,10 @@
class CameraException(RuntimeError):
pass
class CaptureSessionAlreadyRunningException(CameraException):
def __init__(self, device):
super().__init__('A capturing session on the device {} is already running'.format(device))
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,135 @@
import io
import logging
import os
import threading
import time
from abc import ABC, abstractmethod
from typing import Optional, IO
from PIL.Image import Image
logger = logging.getLogger('video-writer')
class VideoWriter(ABC):
"""
Generic class interface for handling frames-to-video operations.
"""
mimetype: Optional[str] = None
def __init__(self, camera, plugin, *_, **__):
from platypush.plugins.camera import Camera, CameraPlugin
self.camera: Camera = camera
self.plugin: CameraPlugin = plugin
self.closed = False
@abstractmethod
def write(self, img: Image):
"""
Write an image to the channel.
:param img: PIL Image instance.
"""
raise NotImplementedError()
@abstractmethod
def close(self):
"""
Close the channel.
"""
if self.camera:
self.plugin.close_device(self.camera)
self.closed = True
def __enter__(self):
"""
Context manager-based interface.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Context manager-based interface.
"""
self.close()
class FileVideoWriter(VideoWriter, ABC):
"""
Abstract class to handle frames-to-video file operations.
"""
def __init__(self, *args, video_file: str, **kwargs):
VideoWriter.__init__(self, *args, **kwargs)
self.video_file = os.path.abspath(os.path.expanduser(video_file))
class StreamWriter(VideoWriter, ABC):
"""
Abstract class for camera streaming operations.
"""
def __init__(self, *args, sock: Optional[IO] = None, **kwargs):
VideoWriter.__init__(self, *args, **kwargs)
self.frame: Optional[bytes] = None
self.frame_time: Optional[float] = None
self.buffer = io.BytesIO()
self.ready = threading.Condition()
self.sock = sock
def write(self, image: Image):
data = self.encode(image)
with self.ready:
if self.buffer.closed:
return
self.buffer.truncate()
self.frame = self.buffer.getvalue()
self.frame_time = time.time()
self.ready.notify_all()
self._sock_send(self.frame)
if not self.buffer.closed:
self.buffer.seek(0)
return self.buffer.write(data)
def _sock_send(self, data):
if self.sock and data:
try:
self.sock.write(data)
except ConnectionError:
logger.warning('Client connection closed')
self.close()
@abstractmethod
def encode(self, image: Image) -> bytes:
"""
Encode an image before sending it to the channel.
:param image: PIL Image object.
:return: The bytes-encoded representation of the frame.
"""
raise NotImplementedError()
def close(self):
self.buffer.close()
if self.sock:
# noinspection PyBroadException
try:
self.sock.close()
except:
pass
super().close()
@staticmethod
def get_class_by_name(name: str):
from platypush.plugins.camera.model.writer.index import StreamHandlers
name = name.upper()
assert hasattr(StreamHandlers, name), 'No such stream handler: {}. Supported types: {}'.format(
name, [hndl.name for hndl in list(StreamHandlers)])
return getattr(StreamHandlers, name).value
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,46 @@
import numpy as np
from PIL.Image import Image as ImageType
from platypush.plugins.camera.model.writer import FileVideoWriter
class CvFileWriter(FileVideoWriter):
"""
Write camera frames to a file using OpenCV.
"""
def __init__(self, *args, **kwargs):
import cv2
super(CvFileWriter, self).__init__(*args, **kwargs)
video_type = cv2.VideoWriter_fourcc(*(self.camera.info.video_type or 'xvid').upper())
resolution = (
int(self.camera.info.resolution[0] * (self.camera.info.scale_x or 1.)),
int(self.camera.info.resolution[1] * (self.camera.info.scale_y or 1.)),
)
self.writer = cv2.VideoWriter(self.video_file, video_type, self.camera.info.fps, resolution, False)
def write(self, img):
if not self.writer:
return
# noinspection PyBroadException
try:
if isinstance(img, ImageType):
# noinspection PyTypeChecker
img = np.array(img)
except:
pass
self.writer.write(img)
def close(self):
if not self.writer:
return
self.writer.release()
self.writer = None
super().close()
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,183 @@
import logging
import subprocess
import threading
import time
from abc import ABC
from typing import Optional, List
from PIL.Image import Image
from platypush.plugins.camera.model.writer import VideoWriter, FileVideoWriter, StreamWriter
logger = logging.getLogger('ffmpeg-writer')
class FFmpegWriter(VideoWriter, ABC):
"""
Generic FFmpeg encoder for camera frames.
"""
def __init__(self, *args, input_file: str = '-', input_format: str = 'rawvideo', input_codec: Optional[str] = None,
output_file: str = '-', output_format: Optional[str] = None, output_codec: Optional[str] = None,
pix_fmt: Optional[str] = None, output_opts: Optional[List[str]] = None, **kwargs):
super().__init__(*args, **kwargs)
self.input_file = input_file
self.input_format = input_format
self.input_codec = input_codec
self.output_file = output_file
self.output_format = output_format
self.output_codec = output_codec
self.width, self.height = self.camera.effective_resolution()
self.pix_fmt = pix_fmt
self.output_opts = output_opts or []
logger.info('Starting FFmpeg. Command: {}'.format(' '.join(self.ffmpeg_args)))
self.ffmpeg = subprocess.Popen(self.ffmpeg_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@property
def ffmpeg_args(self):
return ['ffmpeg', '-y',
'-f', self.input_format,
*(('-vcodec', self.input_codec) if self.input_codec else ()),
*(('-pix_fmt', self.pix_fmt) if self.pix_fmt else ()),
'-s', '{}x{}'.format(self.width, self.height),
'-r', str(self.camera.info.fps),
'-i', self.input_file,
*(('-f', self.output_format) if self.output_format else ()),
*self.output_opts,
*(('-vcodec', self.output_codec) if self.output_codec else ()),
self.output_file]
def is_closed(self):
return self.closed or not self.ffmpeg or self.ffmpeg.poll() is not None
def write(self, image: Image):
if self.is_closed():
return
try:
self.ffmpeg.stdin.write(image.convert('RGB').tobytes())
except Exception as e:
logger.warning('FFmpeg send error: {}'.format(str(e)))
self.close()
def close(self):
if not self.is_closed():
if self.ffmpeg and self.ffmpeg.stdin:
try:
self.ffmpeg.stdin.close()
except (IOError, OSError):
pass
if self.ffmpeg:
self.ffmpeg.terminate()
try:
self.ffmpeg.wait(timeout=5.0)
except subprocess.TimeoutExpired:
logger.warning('FFmpeg has not returned - killing it')
self.ffmpeg.kill()
if self.ffmpeg and self.ffmpeg.stdout:
try:
self.ffmpeg.stdout.close()
except (IOError, OSError):
pass
self.ffmpeg = None
super().close()
class FFmpegFileWriter(FileVideoWriter, FFmpegWriter):
"""
Write camera frames to a file using FFmpeg.
"""
def __init__(self, *args, video_file: str, **kwargs):
FileVideoWriter.__init__(self, *args, video_file=video_file, **kwargs)
FFmpegWriter.__init__(self, *args, pix_fmt='rgb24', output_file=self.video_file, **kwargs)
class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC):
"""
Stream camera frames using FFmpeg.
"""
def __init__(self, *args, output_format: str, **kwargs):
StreamWriter.__init__(self, *args, **kwargs)
FFmpegWriter.__init__(self, *args, pix_fmt='rgb24', output_format=output_format,
output_opts=[
'-tune', '-zerolatency', '-preset', 'superfast', '-trellis', '0',
'-fflags', 'nobuffer'], **kwargs)
self._reader = threading.Thread(target=self._reader_thread)
self._reader.start()
def encode(self, image: Image) -> bytes:
return image.convert('RGB').tobytes()
def _reader_thread(self):
start_time = time.time()
while not self.is_closed():
try:
data = self.ffmpeg.stdout.read(1 << 15)
except Exception as e:
logger.warning('FFmpeg reader error: {}'.format(str(e)))
break
if not data:
continue
if self.frame is None:
latency = time.time() - start_time
logger.info('FFmpeg stream latency: {} secs'.format(latency))
with self.ready:
self.frame = data
self.frame_time = time.time()
self.ready.notify_all()
self._sock_send(self.frame)
def write(self, image: Image):
if self.is_closed():
return
data = self.encode(image)
try:
self.ffmpeg.stdin.write(data)
except Exception as e:
logger.warning('FFmpeg send error: {}'.format(str(e)))
self.close()
def close(self):
super().close()
if self._reader and self._reader.is_alive() and threading.get_ident() != self._reader.ident:
self._reader.join(timeout=5.0)
self._reader = None
class MKVStreamWriter(FFmpegStreamWriter):
mimetype = 'video/webm'
def __init__(self, *args, **kwargs):
super().__init__(*args, output_format='matroska', **kwargs)
class H264StreamWriter(FFmpegStreamWriter):
mimetype = 'video/h264'
def __init__(self, *args, **kwargs):
super().__init__(*args, output_format='h264', **kwargs)
class H265StreamWriter(FFmpegStreamWriter):
mimetype = 'video/h265'
def __init__(self, *args, **kwargs):
super().__init__(*args, output_format='h265', **kwargs)
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,67 @@
import io
from abc import ABC
from PIL.Image import Image
from platypush.plugins.camera.model.writer import StreamWriter
class ImageStreamWriter(StreamWriter, ABC):
"""
Write camera frames to a stream as single JPEG items.
"""
@staticmethod
def _encode(image: Image, encoding: str, **kwargs) -> bytes:
with io.BytesIO() as buf:
image.save(buf, format=encoding, **kwargs)
return buf.getvalue()
class JPEGStreamWriter(ImageStreamWriter):
"""
Write camera frames to a stream as single JPEG items.
"""
mimetype = 'image/jpeg'
def __init__(self, *args, quality: int = 90, **kwargs):
super().__init__(*args, **kwargs)
assert 0 < quality <= 100, 'JPEG quality should be between 0 and 100'
self.quality = quality
def encode(self, image: Image) -> bytes:
return self._encode(image, 'jpeg', quality=self.quality)
class PNGStreamWriter(ImageStreamWriter):
"""
Write camera frames to a stream as single PNG items.
"""
mimetype = 'image/png'
def encode(self, image: Image) -> bytes:
return self._encode(image, 'png')
class BMPStreamWriter(ImageStreamWriter):
"""
Write camera frames to a stream as single BMP items.
"""
mimetype = 'image/bmp'
def encode(self, image: Image) -> bytes:
return self._encode(image, 'bmp')
class MJPEGStreamWriter(JPEGStreamWriter):
"""
Write camera frames to a stream as an MJPEG feed.
"""
mimetype = 'multipart/x-mixed-replace; boundary=frame'
def encode(self, image: Image) -> bytes:
return (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + super().encode(image) + b'\r\n')
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,22 @@
from enum import Enum
from platypush.plugins.camera.model.writer.ffmpeg import MKVStreamWriter, H264StreamWriter, H265StreamWriter
from platypush.plugins.camera.model.writer.image import JPEGStreamWriter, PNGStreamWriter, BMPStreamWriter, \
MJPEGStreamWriter
class StreamHandlers(Enum):
JPG = JPEGStreamWriter
JPEG = JPEGStreamWriter
PNG = PNGStreamWriter
BMP = BMPStreamWriter
MJPEG = MJPEGStreamWriter
MJPG = MJPEGStreamWriter
MKV = MKVStreamWriter
WEBM = MKVStreamWriter
H264 = H264StreamWriter
H265 = H265StreamWriter
MP4 = H264StreamWriter
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,31 @@
import logging
from abc import ABC
from platypush.plugins.camera.model.writer import VideoWriter
logger = logging.getLogger('cam-preview')
class PreviewWriter(VideoWriter, ABC):
"""
Abstract class for camera previews.
"""
class PreviewWriterFactory:
@staticmethod
def get(*args, **kwargs) -> PreviewWriter:
try:
import wx
# noinspection PyUnresolvedReferences
from platypush.plugins.camera.model.writer.preview.wx import WxPreviewWriter
return WxPreviewWriter(*args, **kwargs)
except ImportError:
logger.warning('wxPython not available, using ffplay as a fallback for camera previews')
from platypush.plugins.camera.model.writer.preview.ffplay import FFplayPreviewWriter
return FFplayPreviewWriter(*args, **kwargs)
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,47 @@
import logging
import subprocess
import threading
from platypush.plugins.camera.model.writer.image import MJPEGStreamWriter
from platypush.plugins.camera.model.writer.preview import PreviewWriter
logger = logging.getLogger('cam-preview')
class FFplayPreviewWriter(PreviewWriter, MJPEGStreamWriter):
"""
General class for managing previews from camera devices or generic sources of images over ffplay.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ffplay = subprocess.Popen(['ffplay', '-'], stdin=subprocess.PIPE)
self._preview_thread = threading.Thread(target=self._ffplay_thread)
self._preview_thread.start()
def _ffplay_thread(self):
while not self.closed and self.ffplay.poll() is None:
with self.ready:
self.ready.wait(1.)
if not self.frame:
continue
try:
self.ffplay.stdin.write(self.frame)
except Exception as e:
logger.warning('ffplay write error: {}'.format(str(e)))
self.close()
break
def close(self):
if self.ffplay and self.ffplay.poll() is None:
self.ffplay.terminate()
self.camera = None
super().close()
if self._preview_thread and self._preview_thread.is_alive() and \
threading.get_ident() != self._preview_thread.ident:
self._preview_thread.join(timeout=5.0)
self._preview_thread = None
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,68 @@
from queue import Empty
import wx
from platypush.plugins.camera.model.writer.preview import PreviewWriter
class Panel(wx.Panel):
def __init__(self, parent, process, width: int, height: int):
import wx
super().__init__(parent, -1)
self.process: PreviewWriter = process
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.SetSize(width, height)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.update()
@staticmethod
def img_to_bitmap(image) -> wx.Bitmap:
import wx
return wx.Bitmap.FromBuffer(image.width, image.height, image.tobytes())
def get_bitmap(self):
try:
return self.process.bitmap_queue.get(block=True, timeout=1.0)
except Empty:
return None
def update(self):
import wx
self.Refresh()
self.Update()
wx.CallLater(15, self.update)
def create_bitmap(self):
image = self.get_bitmap()
if image is None:
return
return self.img_to_bitmap(image)
def on_paint(self, *_, **__):
import wx
bitmap = self.create_bitmap()
if not bitmap:
return
dc = wx.AutoBufferedPaintDC(self)
dc.DrawBitmap(bitmap, 0, 0)
class Frame(wx.Frame):
def __init__(self, process):
import wx
style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
self.process = process
image = self.process.bitmap_queue.get()
super().__init__(None, -1, process.camera.info.device or 'Camera Preview', style=style)
self.Bind(wx.EVT_WINDOW_DESTROY, self.on_close)
self.panel = Panel(self, process, width=image.width, height=image.height)
self.Fit()
def on_close(self, *_, **__):
self.process.close()
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,53 @@
import logging
from multiprocessing import Process, Queue, Event
from platypush.plugins.camera.model.writer import VideoWriter
from platypush.plugins.camera.model.writer.preview import PreviewWriter
logger = logging.getLogger('cam-preview')
class WxPreviewWriter(PreviewWriter, Process):
"""
General class for managing previews from camera devices or sources of images.
"""
def __init__(self, camera, plugin, *args, **kwargs):
Process.__init__(self, *args, **kwargs)
VideoWriter.__init__(self, camera=camera, plugin=plugin)
self.app = None
self.bitmap_queue = Queue()
self.stopped_event = Event()
def run(self) -> None:
import wx
from platypush.plugins.camera.model.writer.preview.wx.ui import Frame
self.app = wx.App()
frame = Frame(self)
frame.Center()
frame.Show()
self.app.MainLoop()
def close(self):
if not self.app:
return
self.app.ExitMainLoop()
self.app = None
self.camera.preview = None
self.bitmap_queue.close()
self.bitmap_queue = None
self.stopped_event.set()
def write(self, image):
if self.stopped_event.is_set():
return
try:
self.bitmap_queue.put(image)
except Exception as e:
logger.warning('Could not add an image to the preview queue: {}'.format(str(e)))
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,68 @@
from queue import Empty
import wx
from platypush.plugins.camera.model.writer.preview.wx import WxPreviewWriter
class Panel(wx.Panel):
def __init__(self, parent, process, width: int, height: int):
import wx
super().__init__(parent, -1)
self.process: WxPreviewWriter = process
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.SetSize(width, height)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.update()
@staticmethod
def img_to_bitmap(image) -> wx.Bitmap:
import wx
return wx.Bitmap.FromBuffer(image.width, image.height, image.tobytes())
def get_bitmap(self):
try:
return self.process.bitmap_queue.get(block=True, timeout=1.0)
except Empty:
return None
def update(self):
import wx
self.Refresh()
self.Update()
wx.CallLater(15, self.update)
def create_bitmap(self):
image = self.get_bitmap()
if image is None:
return
return self.img_to_bitmap(image)
def on_paint(self, *_, **__):
import wx
bitmap = self.create_bitmap()
if not bitmap:
return
dc = wx.AutoBufferedPaintDC(self)
dc.DrawBitmap(bitmap, 0, 0)
class Frame(wx.Frame):
def __init__(self, process):
import wx
style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
self.process = process
image = self.process.bitmap_queue.get()
super().__init__(None, -1, process.camera.info.device or 'Camera Preview', style=style)
self.Bind(wx.EVT_WINDOW_DESTROY, self.on_close)
self.panel = Panel(self, process, width=image.width, height=image.height)
self.Fit()
def on_close(self, *_, **__):
self.process.close()
# vim:sw=4:ts=4:et:

View file

@ -1,579 +0,0 @@
"""
.. moduleauthor:: Fabio Manganiello <blacklight86@gmail.com>
"""
import os
import socket
import threading
import time
from typing import Optional
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, StreamingOutput
class CameraPiPlugin(CameraPlugin):
"""
Plugin to control a Pi camera.
Requires:
* **picamera** (``pip install picamera``)
* **numpy** (``pip install numpy``)
"""
_default_resolution = (800, 600)
_default_listen_port = 5000
def __init__(self, resolution=(_default_resolution[0], _default_resolution[1]), framerate=24,
hflip=False, vflip=False, sharpness=0, contrast=0, brightness=50, video_stabilization=False, iso=0,
exposure_compensation=0, exposure_mode='auto', meter_mode='average', awb_mode='auto',
image_effect='none', color_effects=None, rotation=0, zoom=(0.0, 0.0, 1.0, 1.0),
listen_port: int = _default_listen_port, **kwargs):
"""
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options.
:param listen_port: Default port that will be used for streaming the feed (default: 5000)
"""
super().__init__(**kwargs)
self.camera_args = {
'resolution': tuple(resolution),
'framerate': framerate,
'hflip': hflip,
'vflip': vflip,
'sharpness': sharpness,
'contrast': contrast,
'brightness': brightness,
'video_stabilization': video_stabilization,
'iso': iso,
'exposure_compensation': exposure_compensation,
'exposure_mode': exposure_mode,
'meter_mode': meter_mode,
'awb_mode': awb_mode,
'image_effect': image_effect,
'color_effects': color_effects,
'rotation': rotation,
'zoom': tuple(zoom),
}
self._camera = None
self.listen_port = listen_port
self._time_lapse_thread = None
self._recording_thread = None
self._streaming_thread = None
self._capturing_thread = None
self._time_lapse_stop_condition = threading.Condition()
self._recording_stop_condition = threading.Condition()
self._can_stream = False
self._can_capture = False
# noinspection PyUnresolvedReferences,PyPackageRequirements
def _get_camera(self, **opts):
if self._camera and not self._camera.closed:
return self._camera
import picamera
self._camera = picamera.PiCamera()
for (attr, value) in self.camera_args.items():
setattr(self._camera, attr, value)
for (attr, value) in opts.items():
setattr(self._camera, attr, value)
return self._camera
@action
def close(self):
"""
Close an active connection to the camera.
"""
import picamera
if self._output and self._camera:
try:
self._camera.stop_recording()
except picamera.PiCameraNotRecording:
pass
if self._camera and not self._camera.closed:
try:
self._camera.close()
except picamera.PiCameraClosed:
pass
self._camera = None
@action
def start_preview(self, **opts):
"""
Start camera preview.
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
"""
camera = self._get_camera(**opts)
camera.start_preview()
@action
def stop_preview(self):
"""
Stop camera preview.
"""
camera = self._get_camera()
try:
camera.stop_preview()
except Exception as e:
self.logger.warning(str(e))
@action
def take_picture(self, image_file, preview=False, warmup_time=2, resize=None, close=True, **opts):
"""
Take a picture.
:param image_file: Path where the output image will be stored.
:type image_file: str
:param preview: Show a preview before taking the picture (default: False)
:type preview: bool
:param warmup_time: Time before taking the picture (default: 2 seconds)
:type warmup_time: float
:param resize: Set if you want to resize the picture to a new format
:type resize: list or tuple (with two elements)
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
:param close: If True (default) close the connection to the camera after capturing,
otherwise keep the connection open (e.g. if you want to take a sequence of pictures).
If you set close=False you should remember to call ``close`` when you don't need
the connection anymore.
:return: dict::
{"image_file": path_to_the_image}
"""
camera = None
try:
camera = self._get_camera(**opts)
image_file = os.path.abspath(os.path.expanduser(image_file))
if preview:
camera.start_preview()
if warmup_time:
time.sleep(warmup_time)
capture_opts = {}
if resize:
capture_opts['resize'] = tuple(resize)
camera.capture(image_file, **capture_opts)
if preview:
camera.stop_preview()
return {'image_file': image_file}
finally:
if camera and close:
self.close()
def _raw_capture(self):
import numpy as np
resolution = self.camera_args['resolution']
camera = self._get_camera()
while self._can_capture:
shape = (resolution[1] + (resolution[1]%16),
resolution[0] + (resolution[0]%32),
3)
frame = np.empty(shape, dtype=np.uint8)
camera.capture(frame, 'bgr')
frame.reshape((shape[0], shape[1], 3))
self._output.write(frame)
def __enter__(self):
camera = self._get_camera()
self._output = StreamingOutput(raw=self.stream_raw_frames)
self._can_capture = True
if self.stream_raw_frames:
self._capturing_thread = threading.Thread(target=self._raw_capture)
self._capturing_thread.start()
else:
camera.start_recording(self._output, format='mjpeg')
def __exit__(self, exc_type, exc_val, exc_tb):
self._can_capture = False
if self._capturing_thread:
self._capturing_thread.join()
self._capturing_thread = None
self.close()
@action
def capture_sequence(self, n_images, directory, name_format='image_%04d.jpg', preview=False, warmup_time=2,
resize=None, **opts):
"""
Capture a sequence of images
:param n_images: Number of images to capture
:type n_images: int
:param directory: Path where the images will be stored
:type directory: str
:param name_format: Format for the name of the stored images. Use %d or any other format string for representing
the image index (default: image_%04d.jpg)
:type name_format: str
:param preview: Show a preview before taking the picture (default: False)
:type preview: bool
:param warmup_time: Time before taking the picture (default: 2 seconds)
:type warmup_time: float
:param resize: Set if you want to resize the picture to a new format
:type resize: list or tuple (with two elements)
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
:return: dict::
{"image_files": [list of captured images]}
"""
try:
camera = self._get_camera(**opts)
directory = os.path.abspath(os.path.expanduser(directory))
if preview:
camera.start_preview()
if warmup_time:
time.sleep(warmup_time)
camera.exposure_mode = 'off'
camera.shutter_speed = camera.exposure_speed
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
capture_opts = {}
if resize:
capture_opts['resize'] = tuple(resize)
images = [os.path.join(directory, name_format % (i+1)) for i in range(0, n_images)]
camera.capture_sequence(images, **capture_opts)
if preview:
camera.stop_preview()
return {'image_files': images}
finally:
self.close()
@action
def start_time_lapse(self, directory, n_images=None, interval=0, warmup_time=2,
resize=None, **opts):
"""
Start a time lapse capture
:param directory: Path where the images will be stored
:type directory: str
:param n_images: Number of images to capture (default: None, capture until stop_time_lapse)
:type n_images: int
:param interval: Interval in seconds between two pictures (default: 0)
:type interval: float
:param warmup_time: Time before taking the picture (default: 2 seconds)
:type warmup_time: float
:param resize: Set if you want to resize the picture to a new format
:type resize: list or tuple (with two elements)
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
"""
if self._time_lapse_thread:
return None, 'A time lapse thread is already running'
camera = self._get_camera(**opts)
directory = os.path.abspath(os.path.expanduser(directory))
if warmup_time:
time.sleep(warmup_time)
capture_opts = {}
if resize:
capture_opts['resize'] = tuple(resize)
def capture_thread():
try:
self.logger.info('Starting time lapse recording to directory {}'.format(directory))
i = 0
for filename in camera.capture_continuous(os.path.join(directory, 'image_{counter:04d}.jpg')):
i += 1
self.logger.info('Captured {}'.format(filename))
if n_images and i >= n_images:
break
self._time_lapse_stop_condition.acquire()
should_stop = self._time_lapse_stop_condition.wait(timeout=interval)
self._time_lapse_stop_condition.release()
if should_stop:
break
finally:
self._time_lapse_thread = None
self.logger.info('Stopped time lapse recording')
self._time_lapse_thread = threading.Thread(target=capture_thread)
self._time_lapse_thread.start()
@action
def stop_time_lapse(self):
"""
Stop a time lapse sequence if it's running
"""
if not self._time_lapse_thread:
self.logger.info('No time lapse thread is running')
return
self._time_lapse_stop_condition.acquire()
self._time_lapse_stop_condition.notify_all()
self._time_lapse_stop_condition.release()
if self._time_lapse_thread:
self._time_lapse_thread.join()
# noinspection PyMethodOverriding
@action
def start_recording(self, video_file=None, directory=None, name_format='video_%04d.h264', duration=None,
split_duration=None, **opts):
"""
Start recording to a video file or to multiple video files
:param video_file: Path of the video file, if you want to keep the recording all in one file
:type video_file: str
:param directory: Path of the directory that will store the video files, if you want to split the recording
on multiple files. Note that you need to specify either video_file (to save the recording to one single
file) or directory (to split the recording on multiple files)
:type directory: str
:param name_format: If you're splitting the recording to multiple files, then you can specify the name format
for those files (default: 'video_%04d.h264')
on multiple files. Note that you need to specify either video_file (to save the recording to one single
file) or directory (to split the recording on multiple files)
:type name_format: str
:param duration: Video duration in seconds (default: None, record until stop_recording is called)
:type duration: float
:param split_duration: If you're splitting the recording to multiple files, then you should specify how long
each video should be in seconds
:type split_duration: float
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
"""
if self._recording_thread:
return None, 'A recording thread is already running'
multifile = not video_file
if multifile and not (directory and split_duration):
return None, 'No video_file specified for single file capture and no directory/split_duration ' + \
'specified for multi-file split'
camera = self._get_camera(**opts)
video_file = os.path.abspath(os.path.expanduser(video_file))
def recording_thread():
try:
if not multifile:
self.logger.info('Starting recording to video file {}'.format(video_file))
camera.start_recording(video_file, format='h264')
self._recording_stop_condition.acquire()
self._recording_stop_condition.wait(timeout=duration)
self._recording_stop_condition.release()
self.logger.info('Video recorded to {}'.format(video_file))
return
self.logger.info('Starting recording video files to directory {}'.format(directory))
i = 1
end_time = None
timeout = split_duration
if duration is not None:
end_time = time.time() + duration
timeout = min(split_duration, duration)
camera.start_recording(name_format % i, format='h264')
self._recording_stop_condition.acquire()
self._recording_stop_condition.wait(timeout=timeout)
self._recording_stop_condition.release()
self.logger.info('Video file {} saved'.format(name_format % i))
while True:
i += 1
timeout = None
if end_time:
remaining_duration = end_time - time.time()
timeout = min(split_duration, remaining_duration)
if remaining_duration <= 0:
break
camera.split_recording(name_format % i)
self._recording_stop_condition.acquire()
should_stop = self._recording_stop_condition.wait(timeout=timeout)
self._recording_stop_condition.release()
self.logger.info('Video file {} saved'.format(name_format % i))
if should_stop:
break
finally:
try:
camera.stop_recording()
except Exception as e:
self.logger.exception(e)
self._recording_thread = None
self.logger.info('Stopped camera recording')
self._recording_thread = threading.Thread(target=recording_thread)
self._recording_thread.start()
@action
def stop_recording(self, **kwargs):
"""
Stop a camera recording
"""
if not self._recording_thread:
self.logger.info('No recording thread is running')
return
self._recording_stop_condition.acquire()
self._recording_stop_condition.notify_all()
self._recording_stop_condition.release()
if self._recording_thread:
self._recording_thread.join()
# noinspection PyShadowingBuiltins
@action
def start_streaming(self, listen_port: Optional[int] = None, format='h264', **opts):
"""
Start recording to a network stream
:param listen_port: TCP listen port (default: `listen_port` configured value or 5000)
:type listen_port: int
:param format: Video stream format (default: h264)
:type format: str
:param opts: Extra options to pass to the camera (see
https://www.raspberrypi.org/documentation/usage/camera/python/README.md)
"""
if self._streaming_thread:
return None, 'A streaming thread is already running'
if not listen_port:
listen_port = self.listen_port
camera = self._get_camera(**opts)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', listen_port))
server_socket.listen(1)
server_socket.settimeout(1)
# noinspection PyBroadException
def streaming_thread():
try:
self.logger.info('Starting streaming on port {}'.format(listen_port))
while self._can_stream:
try:
sock = server_socket.accept()[0]
stream = sock.makefile('wb')
self.logger.info('Accepted client connection from {}'.format(sock.getpeername()))
except socket.timeout:
continue
try:
if stream:
camera.start_recording(stream, format=format)
while True:
camera.wait_recording(1)
except ConnectionError:
self.logger.info('Client closed connection')
finally:
if sock:
sock.close()
finally:
try:
server_socket.close()
camera.stop_recording()
except:
pass
try:
camera.close()
except:
pass
self._streaming_thread = None
self.logger.info('Stopped camera stream')
self._can_stream = True
self._streaming_thread = threading.Thread(target=streaming_thread)
self._streaming_thread.start()
@action
def stop_streaming(self):
"""
Stop a camera streaming session
"""
if not self._streaming_thread:
self.logger.info('No recording thread is running')
return
self._can_stream = False
if self._streaming_thread:
self._streaming_thread.join()
@action
def is_streaming(self):
"""
:return: True if the Pi Camera network streaming thread is running,
False otherwise.
"""
return self._streaming_thread is not None
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,179 @@
import threading
import time
from typing import Optional, List, Tuple, Union
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, Camera
from platypush.plugins.camera.pi.model import PiCameraInfo, PiCamera
class CameraPiPlugin(CameraPlugin):
"""
Plugin to control a Pi camera.
Requires:
* **picamera** (``pip install picamera``)
* **numpy** (``pip install numpy``)
* **Pillow** (``pip install Pillow``)
"""
_camera_class = PiCamera
_camera_info_class = PiCameraInfo
def __init__(self, device: int = 0, fps: float = 30., warmup_seconds: float = 2., sharpness: int = 0,
contrast: int = 0, brightness: int = 50, video_stabilization: bool = False, iso: int = 0,
exposure_compensation: int = 0, exposure_mode: str = 'auto', meter_mode: str = 'average',
awb_mode: str = 'auto', image_effect: str = 'none', led_pin: Optional[int] = None,
color_effects: Optional[Union[str, List[str]]] = None,
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0), **camera):
"""
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options.
:param camera: Options for the base camera plugin (see :class:`platypush.plugins.camera.CameraPlugin`).
"""
super().__init__(device=device, fps=fps, warmup_seconds=warmup_seconds, **camera)
self.camera_info.sharpness = sharpness
self.camera_info.contrast = contrast
self.camera_info.brightness = brightness
self.camera_info.video_stabilization = video_stabilization
self.camera_info.iso = iso
self.camera_info.exposure_compensation = exposure_compensation
self.camera_info.meter_mode = meter_mode
self.camera_info.exposure_mode = exposure_mode
self.camera_info.awb_mode = awb_mode
self.camera_info.image_effect = image_effect
self.camera_info.color_effects = color_effects
self.camera_info.zoom = zoom
self.camera_info.led_pin = led_pin
# noinspection DuplicatedCode
def prepare_device(self, device: PiCamera):
# noinspection PyUnresolvedReferences
import picamera
camera = picamera.PiCamera(camera_num=device.info.device, resolution=device.info.resolution,
framerate=device.info.fps, led_pin=device.info.led_pin)
camera.hflip = device.info.horizontal_flip
camera.vflip = device.info.vertical_flip
camera.sharpness = device.info.sharpness
camera.contrast = device.info.contrast
camera.brightness = device.info.brightness
camera.video_stabilization = device.info.video_stabilization
camera.iso = device.info.iso
camera.exposure_compensation = device.info.exposure_compensation
camera.exposure_mode = device.info.exposure_mode
camera.meter_mode = device.info.meter_mode
camera.awb_mode = device.info.awb_mode
camera.image_effect = device.info.image_effect
camera.color_effects = device.info.color_effects
camera.rotation = device.info.rotate or 0
camera.zoom = device.info.zoom
return camera
def release_device(self, device: PiCamera):
# noinspection PyUnresolvedReferences
import picamera
if device.object:
try:
device.object.stop_recording()
except picamera.PiCameraNotRecording:
pass
if device.object and not device.object.closed:
try:
device.object.close()
except picamera.PiCameraClosed:
pass
def capture_frame(self, camera: Camera, *args, **kwargs):
import numpy as np
from PIL import Image
shape = (camera.info.resolution[1] + (camera.info.resolution[1] % 16),
camera.info.resolution[0] + (camera.info.resolution[0] % 32),
3)
frame = np.empty(shape, dtype=np.uint8)
camera.object.capture(frame, 'rgb')
return Image.fromarray(frame)
def start_preview(self, camera: Camera):
"""
Start camera preview.
"""
camera.object.start_preview()
def stop_preview(self, camera: Camera):
"""
Stop camera preview.
"""
try:
camera.object.stop_preview()
except Exception as e:
self.logger.warning(str(e))
@action
def capture_preview(self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera) -> dict:
camera = self.open_device(**camera)
self.start_preview(camera)
if n_frames:
duration = n_frames * (camera.info.fps or 0)
if duration:
threading.Timer(duration, lambda: self.stop_preview(camera))
return self.status()
def streaming_thread(self, camera: Camera, stream_format: str, duration: Optional[float] = None):
server_socket = self._prepare_server_socket(camera)
sock = None
streaming_started_time = time.time()
self.logger.info('Starting streaming on port {}'.format(camera.info.listen_port))
try:
while camera.stream_event.is_set():
if duration and time.time() - streaming_started_time >= duration:
break
sock = self._accept_client(server_socket)
if not sock:
continue
try:
camera.object.start_recording(sock, format=stream_format)
while camera.stream_event.is_set():
camera.object.wait_recording(1)
except ConnectionError:
self.logger.info('Client closed connection')
finally:
if sock:
sock.close()
finally:
self._cleanup_stream(camera, server_socket, sock)
try:
camera.object.stop_recording()
except Exception as e:
self.logger.warning('Error while stopping camera recording: {}'.format(str(e)))
try:
camera.object.close()
except Exception as e:
self.logger.warning('Error while closing camera: {}'.format(str(e)))
self.logger.info('Stopped camera stream')
@action
def start_streaming(self, duration: Optional[float] = None, stream_format: str = 'h264', **camera) -> dict:
camera = self.open_device(stream_format=stream_format, **camera)
return self._start_streaming(camera, duration, stream_format)
# vim:sw=4:ts=4:et:

View file

@ -0,0 +1,46 @@
from dataclasses import dataclass
from typing import Optional, Union, List, Tuple
from platypush.plugins.camera import CameraInfo, Camera
@dataclass
class PiCameraInfo(CameraInfo):
sharpness: int = 0
contrast: int = 0
brightness: int = 50
video_stabilization: bool = False
iso: int = 0
exposure_compensation: int = 0
exposure_mode: str = 'auto'
meter_mode: str = 'average'
awb_mode: str = 'auto'
image_effect: str = 'none'
color_effects: Optional[Union[str, List[str]]] = None
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0)
led_pin: Optional[int] = None
def to_dict(self) -> dict:
return {
'sharpness': self.sharpness,
'contrast': self.contrast,
'brightness': self.brightness,
'video_stabilization': self.video_stabilization,
'iso': self.iso,
'exposure_compensation': self.exposure_compensation,
'exposure_mode': self.exposure_mode,
'meter_mode': self.meter_mode,
'awb_mode': self.awb_mode,
'image_effect': self.image_effect,
'color_effects': self.color_effects,
'zoom': self.zoom,
'led_pin': self.led_pin,
**super().to_dict()
}
class PiCamera(Camera):
info: PiCameraInfo
# vim:sw=4:ts=4:et:

View file

@ -30,7 +30,7 @@ class QrcodePlugin(Plugin):
def __init__(self, camera_plugin: Optional[str] = None, **kwargs):
"""
:param camera_plugin: Name of the plugin that will be used as a camera to capture images (e.g.
``camera`` or ``camera.pi``).
``camera.cv`` or ``camera.pi``).
"""
super().__init__(**kwargs)
self.camera_plugin = camera_plugin
@ -104,6 +104,8 @@ class QrcodePlugin(Plugin):
def _convert_frame(self, frame):
import numpy as np
from PIL import Image
assert isinstance(frame, np.ndarray), \
'Image conversion only works with numpy arrays for now (got {})'.format(type(frame))
mode = 'RGB'

View file

@ -76,6 +76,23 @@ def get_plugin_class_by_name(plugin_name):
return None
def get_plugin_name_by_class(plugin) -> str:
"""Gets the common name of a plugin (e.g. "music.mpd" or "media.vlc") given its class. """
from platypush.plugins import Plugin
if isinstance(plugin, Plugin):
plugin = plugin.__class__
class_name = plugin.__name__
class_tokens = [
token.lower() for token in re.sub(r'([A-Z])', r' \1', class_name).split(' ')
if token.strip() and token != 'Plugin'
]
return '.'.join(class_tokens)
def set_timeout(seconds, on_timeout):
"""
Set a function to be called if timeout expires without being cleared.

View file

@ -81,6 +81,7 @@ zeroconf
# Support for the RaspberryPi camera module
# picamera
# Pillow
# Support for torrents download
# python-libtorrent
@ -298,3 +299,9 @@ croniter
# Support for NextCloud integration
# git+https://github.com/EnterpriseyIntranet/nextcloud-API.git
# Support for FFmpeg integration
# ffmpeg-python
# Generic support for cameras
# Pillow

View file

@ -194,8 +194,10 @@ setup(
'youtube': ['youtube-dl'],
# Support for torrents download
'torrent': ['python-libtorrent'],
# Generic support for cameras
'camera': ['numpy', 'Pillow'],
# Support for RaspberryPi camera
'picamera': ['picamera', 'numpy'],
'picamera': ['picamera', 'numpy', 'Pillow'],
# Support for inotify file monitors
'inotify': ['inotify'],
# Support for Google Assistant
@ -264,8 +266,8 @@ setup(
'pwm3901': ['pwm3901'],
# Support for MLX90640 thermal camera
'mlx90640': ['Pillow'],
# Support for machine learning and CV plugin
'cv': ['cv2', 'numpy'],
# Support for machine learning models and cameras over OpenCV
'cv': ['cv2', 'numpy', 'Pillow'],
# Support for the generation of HTML documentation from docstring
'htmldoc': ['docutils'],
# Support for Node-RED integration
@ -334,5 +336,7 @@ setup(
'imap': ['imapclient'],
# Support for NextCloud integration
'nextcloud': ['nextcloud-API @ git+https://github.com/EnterpriseyIntranet/nextcloud-API.git'],
# Support for FFmpeg integration
'ffmpeg': ['ffmpeg-python'],
},
)