forked from platypush/platypush
[assistant.picovoice] Implemented mic mute/unmute handling.
This commit is contained in:
parent
9de49c71a1
commit
2c197c275e
4 changed files with 139 additions and 7 deletions
|
@ -69,7 +69,6 @@ class ConversationEndEvent(AssistantEvent):
|
|||
:param with_follow_on_turn: Set to true if the conversation expects a
|
||||
user follow-up, false otherwise
|
||||
"""
|
||||
|
||||
super().__init__(*args, with_follow_on_turn=with_follow_on_turn, **kwargs)
|
||||
|
||||
|
||||
|
@ -82,17 +81,25 @@ class ConversationTimeoutEvent(ConversationEndEvent):
|
|||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ResponseEvent(ConversationEndEvent):
|
||||
class ResponseEvent(AssistantEvent):
|
||||
"""
|
||||
Event triggered when a response is processed by the assistant
|
||||
"""
|
||||
|
||||
def __init__(self, *args, response_text: str, **kwargs):
|
||||
def __init__(
|
||||
self, *args, response_text: str, with_follow_on_turn: bool = False, **kwargs
|
||||
):
|
||||
"""
|
||||
:param response_text: Response text processed by the assistant
|
||||
:param with_follow_on_turn: Set to true if the conversation expects a
|
||||
user follow-up, false otherwise
|
||||
"""
|
||||
|
||||
super().__init__(*args, response_text=response_text, **kwargs)
|
||||
super().__init__(
|
||||
*args,
|
||||
response_text=response_text,
|
||||
with_follow_on_turn=with_follow_on_turn,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class NoResponseEvent(ConversationEndEvent):
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
from typing import Optional, Sequence
|
||||
|
||||
from platypush.context import get_plugin
|
||||
from platypush.message.event.assistant import MicMutedEvent, MicUnmutedEvent
|
||||
from platypush.plugins import RunnablePlugin, action
|
||||
from platypush.plugins.assistant import AssistantPlugin
|
||||
from platypush.plugins.tts.picovoice import TtsPicovoicePlugin
|
||||
|
@ -64,6 +65,7 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
start_conversation_on_hotword: bool = True,
|
||||
audio_queue_size: int = 100,
|
||||
conversation_timeout: Optional[float] = 7.5,
|
||||
muted: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
|
@ -129,6 +131,10 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
within this time, the conversation will time out and the plugin will
|
||||
go back into hotword detection mode, if the mode is enabled. Default:
|
||||
7.5 seconds.
|
||||
:param muted: Set to True to start the assistant in a muted state. You will
|
||||
need to call the :meth:`.unmute` method to start the assistant listening
|
||||
for commands, or programmatically call the :meth:`.start_conversation`
|
||||
to start a conversation.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._assistant = None
|
||||
|
@ -147,6 +153,7 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
'start_conversation_on_hotword': start_conversation_on_hotword,
|
||||
'audio_queue_size': audio_queue_size,
|
||||
'conversation_timeout': conversation_timeout,
|
||||
'muted': muted,
|
||||
'on_conversation_start': self._on_conversation_start,
|
||||
'on_conversation_end': self._on_conversation_end,
|
||||
'on_conversation_timeout': self._on_conversation_timeout,
|
||||
|
@ -215,6 +222,7 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
Mute the microphone. Alias for :meth:`.set_mic_mute` with
|
||||
``muted=True``.
|
||||
"""
|
||||
return self.set_mic_mute(muted=True)
|
||||
|
||||
@action
|
||||
def unmute(self, *_, **__):
|
||||
|
@ -222,6 +230,7 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
Unmute the microphone. Alias for :meth:`.set_mic_mute` with
|
||||
``muted=False``.
|
||||
"""
|
||||
return self.set_mic_mute(muted=False)
|
||||
|
||||
@action
|
||||
def set_mic_mute(self, muted: bool):
|
||||
|
@ -230,12 +239,18 @@ class AssistantPicovoicePlugin(AssistantPlugin, RunnablePlugin):
|
|||
|
||||
:param muted: Set to True or False.
|
||||
"""
|
||||
self._is_muted = muted
|
||||
if self._assistant:
|
||||
self._assistant.set_mic_mute(muted)
|
||||
|
||||
self._send_event(MicMutedEvent if muted else MicUnmutedEvent)
|
||||
|
||||
@action
|
||||
def toggle_mute(self, *_, **__):
|
||||
"""
|
||||
Toggle the mic mute state.
|
||||
"""
|
||||
return self.set_mic_mute(not self._is_muted)
|
||||
|
||||
@action
|
||||
def send_text_query(self, *_, query: str, **__):
|
||||
|
|
|
@ -47,6 +47,7 @@ class Assistant:
|
|||
enable_automatic_punctuation: bool = False,
|
||||
start_conversation_on_hotword: bool = False,
|
||||
audio_queue_size: int = 100,
|
||||
muted: bool = False,
|
||||
conversation_timeout: Optional[float] = None,
|
||||
on_conversation_start=_default_callback,
|
||||
on_conversation_end=_default_callback,
|
||||
|
@ -69,6 +70,7 @@ class Assistant:
|
|||
self.enable_automatic_punctuation = enable_automatic_punctuation
|
||||
self.start_conversation_on_hotword = start_conversation_on_hotword
|
||||
self.audio_queue_size = audio_queue_size
|
||||
self._muted = muted
|
||||
self._speech_model_path = speech_model_path
|
||||
self._speech_model_path_override = None
|
||||
|
||||
|
@ -221,6 +223,7 @@ class Assistant:
|
|||
sample_rate=sample_rate,
|
||||
frame_size=frame_length,
|
||||
queue_size=self.audio_queue_size,
|
||||
paused=self._muted,
|
||||
channels=1,
|
||||
)
|
||||
|
||||
|
@ -296,6 +299,28 @@ class Assistant:
|
|||
|
||||
raise StopIteration
|
||||
|
||||
def mute(self):
|
||||
self._muted = True
|
||||
if self._recorder:
|
||||
self._recorder.pause()
|
||||
|
||||
def unmute(self):
|
||||
self._muted = False
|
||||
if self._recorder:
|
||||
self._recorder.resume()
|
||||
|
||||
def set_mic_mute(self, mute: bool):
|
||||
if mute:
|
||||
self.mute()
|
||||
else:
|
||||
self.unmute()
|
||||
|
||||
def toggle_mic_mute(self):
|
||||
if self._muted:
|
||||
self.unmute()
|
||||
else:
|
||||
self.mute()
|
||||
|
||||
def _process_hotword(self, frame):
|
||||
if not self.porcupine:
|
||||
return None
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
from collections import namedtuple
|
||||
from dataclasses import dataclass, field
|
||||
from logging import getLogger
|
||||
from queue import Full, Queue
|
||||
from threading import Event
|
||||
from threading import Event, RLock
|
||||
from time import time
|
||||
from typing import Optional
|
||||
|
||||
|
@ -13,6 +14,61 @@ from platypush.utils import wait_for_either
|
|||
AudioFrame = namedtuple('AudioFrame', ['data', 'timestamp'])
|
||||
|
||||
|
||||
@dataclass
|
||||
class PauseState:
|
||||
"""
|
||||
Data class to hold the boilerplate (state + synchronization events) for the
|
||||
audio recorder pause API.
|
||||
"""
|
||||
|
||||
_paused_event: Event = field(default_factory=Event)
|
||||
_recording_event: Event = field(default_factory=Event)
|
||||
_state_lock: RLock = field(default_factory=RLock)
|
||||
|
||||
@property
|
||||
def paused(self):
|
||||
with self._state_lock:
|
||||
return self._paused_event.is_set()
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Pause the audio recorder.
|
||||
"""
|
||||
with self._state_lock:
|
||||
self._paused_event.set()
|
||||
self._recording_event.clear()
|
||||
|
||||
def resume(self):
|
||||
"""
|
||||
Resume the audio recorder.
|
||||
"""
|
||||
with self._state_lock:
|
||||
self._paused_event.clear()
|
||||
self._recording_event.set()
|
||||
|
||||
def toggle(self):
|
||||
"""
|
||||
Toggle the audio recorder pause state.
|
||||
"""
|
||||
with self._state_lock:
|
||||
if self.paused:
|
||||
self.resume()
|
||||
else:
|
||||
self.pause()
|
||||
|
||||
def wait_paused(self, timeout: Optional[float] = None):
|
||||
"""
|
||||
Wait until the audio recorder is paused.
|
||||
"""
|
||||
self._paused_event.wait(timeout=timeout)
|
||||
|
||||
def wait_recording(self, timeout: Optional[float] = None):
|
||||
"""
|
||||
Wait until the audio recorder is resumed.
|
||||
"""
|
||||
self._recording_event.wait(timeout=timeout)
|
||||
|
||||
|
||||
class AudioRecorder:
|
||||
"""
|
||||
Audio recorder component that uses the sounddevice library to record audio
|
||||
|
@ -25,6 +81,7 @@ class AudioRecorder:
|
|||
sample_rate: int,
|
||||
frame_size: int,
|
||||
channels: int,
|
||||
paused: bool = False,
|
||||
dtype: str = 'int16',
|
||||
queue_size: int = 100,
|
||||
):
|
||||
|
@ -33,6 +90,12 @@ class AudioRecorder:
|
|||
self.frame_size = frame_size
|
||||
self._stop_event = Event()
|
||||
self._upstream_stop_event = stop_event
|
||||
self._paused_state = PauseState()
|
||||
if paused:
|
||||
self._paused_state.pause()
|
||||
else:
|
||||
self._paused_state.resume()
|
||||
|
||||
self.stream = sd.InputStream(
|
||||
samplerate=sample_rate,
|
||||
channels=channels,
|
||||
|
@ -41,6 +104,10 @@ class AudioRecorder:
|
|||
callback=self._audio_callback,
|
||||
)
|
||||
|
||||
@property
|
||||
def paused(self):
|
||||
return self._paused_state.paused
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Start the audio stream.
|
||||
|
@ -56,7 +123,7 @@ class AudioRecorder:
|
|||
self.stop()
|
||||
|
||||
def _audio_callback(self, indata, *_):
|
||||
if self.should_stop():
|
||||
if self.should_stop() or self.paused:
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -85,6 +152,24 @@ class AudioRecorder:
|
|||
self._stop_event.set()
|
||||
self.stream.stop()
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Pause the audio stream.
|
||||
"""
|
||||
self._paused_state.pause()
|
||||
|
||||
def resume(self):
|
||||
"""
|
||||
Resume the audio stream.
|
||||
"""
|
||||
self._paused_state.resume()
|
||||
|
||||
def toggle(self):
|
||||
"""
|
||||
Toggle the audio stream pause state.
|
||||
"""
|
||||
self._paused_state.toggle()
|
||||
|
||||
def should_stop(self):
|
||||
return self._stop_event.is_set() or self._upstream_stop_event.is_set()
|
||||
|
||||
|
|
Loading…
Reference in a new issue