Removed deprecated `google.assistant.pushtotalk` plugin.

It only existed as a back-compatibility layer with armv6, since there
was no build of the assistant library that worked on Raspberry Pi Zero.

But that API layer has been discontinued by Google and it's no longer
functional, so only the `assistant.google` integration (on x86_64 and
armv7) is currently supported.
This commit is contained in:
Fabio Manganiello 2023-10-22 19:57:55 +02:00
parent cce6c4c5ad
commit b46c00f015
Signed by: blacklight
GPG Key ID: D90FBA7F76362774
5 changed files with 0 additions and 600 deletions

View File

@ -1,6 +0,0 @@
``assistant.google.pushtotalk``
=================================================
.. automodule:: platypush.plugins.assistant.google.pushtotalk
:members:

View File

@ -12,7 +12,6 @@ Plugins
platypush/plugins/arduino.rst
platypush/plugins/assistant.echo.rst
platypush/plugins/assistant.google.rst
platypush/plugins/assistant.google.pushtotalk.rst
platypush/plugins/autoremote.rst
platypush/plugins/bluetooth.rst
platypush/plugins/calendar.rst

View File

@ -1,236 +0,0 @@
"""Based on Google pushtotalk.py sample."""
import concurrent.futures
import json
import logging
import grpc
from google.assistant.embedded.v1alpha2 import (
embedded_assistant_pb2,
embedded_assistant_pb2_grpc
)
from tenacity import retry, stop_after_attempt, retry_if_exception
try:
from googlesamples.assistant.grpc import (
assistant_helpers,
audio_helpers,
browser_helpers,
device_helpers
)
except (SystemError, ImportError):
import assistant_helpers
import audio_helpers
import browser_helpers
import device_helpers
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
END_OF_UTTERANCE = embedded_assistant_pb2.AssistResponse.END_OF_UTTERANCE
DIALOG_FOLLOW_ON = embedded_assistant_pb2.DialogStateOut.DIALOG_FOLLOW_ON
CLOSE_MICROPHONE = embedded_assistant_pb2.DialogStateOut.CLOSE_MICROPHONE
PLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
class SampleAssistant(object):
"""Sample Assistant that supports conversations and device actions.
Args:
device_model_id: identifier of the device model.
device_id: identifier of the registered device instance.
conversation_stream(ConversationStream): audio stream
for recording query and playing back assistant answer.
channel: authorized gRPC channel for connection to the
Google Assistant API.
deadline_sec: gRPC deadline in seconds for Google Assistant API call.
device_handler: callback for device actions.
"""
def __init__(self, language_code, device_model_id, device_id,
conversation_stream, display,
channel, deadline_sec, device_handler, play_response=True,
on_conversation_start=None, on_conversation_end=None,
on_speech_recognized=None, on_volume_changed=None,
on_response=None):
self.language_code = language_code
self.device_model_id = device_model_id
self.device_id = device_id
self.conversation_stream = conversation_stream
self.display = display
self.play_response = play_response
# Opaque blob provided in AssistResponse that,
# when provided in a follow-up AssistRequest,
# gives the Assistant a context marker within the current state
# of the multi-Assist()-RPC "conversation".
# This value, along with MicrophoneMode, supports a more natural
# "conversation" with the Assistant.
self.conversation_state = None
# Force reset of first conversation.
self.is_new_conversation = True
# Create Google Assistant API gRPC client.
self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)
self.deadline = deadline_sec
self.device_handler = device_handler
self.detected_speech = None
self.on_conversation_start = on_conversation_start
self.on_conversation_end = on_conversation_end
self.on_speech_recognized = on_speech_recognized
self.on_volume_changed = on_volume_changed
self.on_response = on_response
def __enter__(self):
return self
def __exit__(self, etype, e, traceback):
if e:
return False
self.conversation_stream.close()
@staticmethod
def is_grpc_error_unavailable(e):
is_grpc_error = isinstance(e, grpc.RpcError)
if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
logging.error('grpc unavailable error: %s', e)
return True
return False
@retry(reraise=True, stop=stop_after_attempt(3),
retry=retry_if_exception(is_grpc_error_unavailable))
def assist(self):
"""Send a voice request to the Assistant and playback the response.
Returns: True if conversation should continue.
"""
continue_conversation = False
device_actions_futures = []
self.conversation_stream.start_recording()
if self.on_conversation_start:
self.on_conversation_start()
logging.info('Recording audio request.')
def iter_log_assist_requests():
for c in self.gen_assist_requests():
assistant_helpers.log_assist_request_without_audio(c)
yield c
logging.debug('Reached end of AssistRequest iteration.')
# This generator yields AssistResponse proto messages
# received from the gRPC Google Assistant API.
for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline):
assistant_helpers.log_assist_response_without_audio(resp)
if resp.event_type == END_OF_UTTERANCE:
logging.info('End of audio request detected.')
logging.info('Stopping recording.')
self.conversation_stream.stop_recording()
if self.detected_speech and self.on_speech_recognized:
self.on_speech_recognized(self.detected_speech)
if resp.speech_results:
self.detected_speech = ' '.join(
r.transcript.strip() for r in resp.speech_results
if len(r.transcript.strip())).strip()
logging.info('Transcript of user request: "%s".', self.detected_speech)
if len(resp.audio_out.audio_data) > 0:
if not self.conversation_stream.playing:
self.conversation_stream.stop_recording()
if self.play_response:
self.conversation_stream.start_playback()
logging.info('Playing assistant response.')
if self.play_response and self.conversation_stream.playing:
self.conversation_stream.write(resp.audio_out.audio_data)
elif self.conversation_stream.playing:
self.conversation_stream.stop_playback()
if resp.dialog_state_out.conversation_state:
conversation_state = resp.dialog_state_out.conversation_state
logging.debug('Updating conversation state.')
self.conversation_state = conversation_state
if resp.dialog_state_out.volume_percentage != 0:
volume_percentage = resp.dialog_state_out.volume_percentage
logging.info('Setting volume to %s%%', volume_percentage)
self.conversation_stream.volume_percentage = volume_percentage
if self.on_volume_changed:
self.on_volume_changed(volume_percentage)
if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
continue_conversation = True
logging.info('Expecting follow-on query from user.')
elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
continue_conversation = False
if resp.device_action.device_request_json:
device_request = json.loads(
resp.device_action.device_request_json
)
fs = self.device_handler(device_request)
if fs:
device_actions_futures.extend(fs)
if self.display and resp.screen_out.data:
system_browser = browser_helpers.system_browser
system_browser.display(resp.screen_out.data)
if resp.dialog_state_out.supplemental_display_text and self.on_response:
self.on_response(resp.dialog_state_out.supplemental_display_text)
if len(device_actions_futures):
logging.info('Waiting for device executions to complete.')
concurrent.futures.wait(device_actions_futures)
logging.info('Finished playing assistant response.')
self.conversation_stream.stop_playback()
if self.on_conversation_end:
self.on_conversation_end(continue_conversation)
return continue_conversation
def gen_assist_requests(self):
"""Yields: AssistRequest messages to send to the API."""
config = embedded_assistant_pb2.AssistConfig(
audio_in_config=embedded_assistant_pb2.AudioInConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
),
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
volume_percentage=self.conversation_stream.volume_percentage,
),
dialog_state_in=embedded_assistant_pb2.DialogStateIn(
language_code=self.language_code,
conversation_state=self.conversation_state,
is_new_conversation=self.is_new_conversation,
),
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=self.device_id,
device_model_id=self.device_model_id,
)
)
if self.display:
config.screen_out_config.screen_mode = PLAYING
# Continue current conversation with later requests.
self.is_new_conversation = False
# The first AssistRequest must contain the AssistConfig
# and no audio data.
yield embedded_assistant_pb2.AssistRequest(config=config)
for data in self.conversation_stream:
# Subsequent requests need audio data, but not config.
yield embedded_assistant_pb2.AssistRequest(audio_in=data)

View File

@ -1,330 +0,0 @@
import json
import os
from typing import Optional, Dict, Any
from platypush.context import get_bus, get_plugin
from platypush.message.event.assistant import (
ConversationStartEvent,
ConversationEndEvent,
SpeechRecognizedEvent,
VolumeChangedEvent,
ResponseEvent,
)
from platypush.message.event.google import GoogleDeviceOnOffEvent
from platypush.plugins import action
from platypush.plugins.assistant import AssistantPlugin
class AssistantGooglePushtotalkPlugin(AssistantPlugin):
"""
Plugin for the Google Assistant push-to-talk API.
"""
api_endpoint = 'embeddedassistant.googleapis.com'
grpc_deadline = 60 * 3 + 5
device_handler = None
_default_credentials_file = os.path.join(
os.path.expanduser('~'),
'.config',
'google-oauthlib-tool',
'credentials.json',
)
_default_device_config = os.path.join(
os.path.expanduser('~'),
'.config',
'googlesamples-assistant',
'device_config.json',
)
def __init__(
self,
credentials_file=_default_credentials_file,
device_config=_default_device_config,
language='en-US',
play_response=True,
tts_plugin=None,
tts_args=None,
**kwargs
):
"""
:param credentials_file: Path to the Google OAuth credentials file
(default: ~/.config/google-oauthlib-tool/credentials.json).
See
https://developers.google.com/assistant/sdk/guides/library/python/embed/install-sample#generate_credentials
for instructions to get your own credentials file.
:type credentials_file: str
:param device_config: Path to device_config.json. Register your device
(see https://developers.google.com/assistant/sdk/guides/library/python/embed/register-device)
and create a project, then run the pushtotalk.py script from
googlesamples to create your device_config.json
:type device_config: str
:param language: Assistant language (default: en-US)
:type language: str
:param play_response: If True (default) then the plugin will play the assistant response upon processed
response. Otherwise nothing will be played - but you may want to handle the ``ResponseEvent`` manually.
:type play_response: bool
:param tts_plugin: Optional text-to-speech plugin to be used to process response text.
:type tts_plugin: str
:param tts_args: Optional arguments for the TTS plugin ``say`` method.
:type tts_args: dict
"""
import googlesamples.assistant.grpc.audio_helpers as audio_helpers
super().__init__(**kwargs)
self.audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
self.audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
self.audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
self.audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
self.audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
self.language = language
self.credentials_file = credentials_file
self.device_config = device_config
self.play_response = play_response
self.tts_plugin = tts_plugin
self.tts_args = tts_args or {}
self.assistant = None
self.interactions = []
with open(self.device_config) as f:
device = json.load(f)
self.device_id = device['id']
self.device_model_id = device['model_id']
# Load OAuth 2.0 credentials.
try:
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
with open(self.credentials_file, 'r') as f:
self.credentials = Credentials(token=None, **json.load(f))
self.http_request = Request()
self.credentials.refresh(self.http_request)
except Exception as ex:
self.logger.error('Error loading credentials: %s', str(ex))
self.logger.error(
'Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.'
)
raise
self.grpc_channel = None
self.conversation_stream = None
def _init_assistant(self):
import googlesamples.assistant.grpc.audio_helpers as audio_helpers
from google.auth.transport.grpc import secure_authorized_channel
self.interactions = []
# Create an authorized gRPC channel.
self.grpc_channel = secure_authorized_channel(
self.credentials, self.http_request, self.api_endpoint
)
self.logger.info('Connecting to {}'.format(self.api_endpoint))
# Configure audio source and sink.
audio_device = None
audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream(
sample_rate=self.audio_sample_rate,
sample_width=self.audio_sample_width,
block_size=self.audio_block_size,
flush_size=self.audio_flush_size,
)
audio_sink = audio_device or audio_helpers.SoundDeviceStream(
sample_rate=self.audio_sample_rate,
sample_width=self.audio_sample_width,
block_size=self.audio_block_size,
flush_size=self.audio_flush_size,
)
# Create conversation stream with the given audio source and sink.
self.conversation_stream = audio_helpers.ConversationStream(
source=audio_source,
sink=audio_sink,
iter_size=self.audio_iter_size,
sample_width=self.audio_sample_width,
)
self._install_device_handlers()
def on_conversation_start(self):
"""Conversation start handler"""
def handler():
get_bus().post(ConversationStartEvent(assistant=self))
return handler
def on_conversation_end(self):
"""Conversation end handler"""
def handler(with_follow_on_turn):
get_bus().post(
ConversationEndEvent(
assistant=self, with_follow_on_turn=with_follow_on_turn
)
)
return handler
def on_speech_recognized(self):
"""Speech recognized handler"""
def handler(phrase):
get_bus().post(SpeechRecognizedEvent(assistant=self, phrase=phrase))
self.interactions.append({'request': phrase})
return handler
def on_volume_changed(self):
"""Volume changed event"""
def handler(volume):
get_bus().post(VolumeChangedEvent(assistant=self, volume=volume))
return handler
def on_response(self):
"""Response handler"""
def handler(response):
get_bus().post(ResponseEvent(assistant=self, response_text=response))
if not self.interactions:
self.interactions.append({'response': response})
else:
self.interactions[-1]['response'] = response
if self.tts_plugin:
tts = get_plugin(self.tts_plugin)
tts.say(response, **self.tts_args)
return handler
@action
def start_conversation(
self,
*_,
language: Optional[str] = None,
tts_plugin: Optional[str] = None,
tts_args: Optional[Dict[str, Any]] = None,
**__
):
"""
Start a conversation
:param language: Language code override (default: default configured language).
:param tts_plugin: Optional text-to-speech plugin to be used for rendering text.
:param tts_args: Optional arguments for the TTS plugin say method.
:returns: A list of the interactions that happen within the conversation.
.. code-block:: json
[
{
"request": "request 1",
"response": "response 1"
},
{
"request": "request 2",
"response": "response 2"
}
]
"""
from platypush.plugins.assistant.google.lib import SampleAssistant
self.tts_plugin = tts_plugin
self.tts_args = tts_args
language = language or self.language
play_response = False if self.tts_plugin else self.play_response
self._init_assistant()
self.on_conversation_start()
with SampleAssistant(
language_code=language,
device_model_id=self.device_model_id,
device_id=self.device_id,
conversation_stream=self.conversation_stream,
display=None,
channel=self.grpc_channel,
deadline_sec=self.grpc_deadline,
play_response=play_response,
device_handler=self.device_handler,
on_conversation_start=self.on_conversation_start(),
on_conversation_end=self.on_conversation_end(),
on_volume_changed=self.on_volume_changed(),
on_response=self.on_response(),
on_speech_recognized=self.on_speech_recognized(),
) as self.assistant:
continue_conversation = True
while continue_conversation:
try:
continue_conversation = self.assistant.assist()
except Exception as e:
self.logger.warning(
'Unhandled assistant exception: {}'.format(str(e))
)
self.logger.exception(e)
self._init_assistant()
return self.interactions
@action
def stop_conversation(self):
if self.assistant:
self.assistant.play_response = False
if self.conversation_stream:
self.conversation_stream.stop_playback()
self.conversation_stream.stop_recording()
get_bus().post(ConversationEndEvent(assistant=self))
@action
def set_mic_mute(self, muted: bool = True):
"""
Programmatically mute/unmute the microphone.
:param muted: Set to True or False.
"""
if not self.conversation_stream:
self.logger.warning('The assistant is not running')
return
if muted:
self.conversation_stream.stop_recording()
else:
self.conversation_stream.start_recording()
def _install_device_handlers(self):
import googlesamples.assistant.grpc.device_helpers as device_helpers
self.device_handler = device_helpers.DeviceRequestHandler(self.device_id)
@self.device_handler.command('action.devices.commands.OnOff')
def handler(on): # type: ignore
get_bus().post(
GoogleDeviceOnOffEvent(
device_id=self.device_id,
device_model_id=self.device_model_id,
on=on,
)
)
# vim:sw=4:ts=4:et:

View File

@ -1,27 +0,0 @@
manifest:
events:
platypush.message.event.assistant.ConversationEndEvent: when a new conversation
ends
platypush.message.event.assistant.ConversationStartEvent: when a new conversation
starts
platypush.message.event.assistant.SpeechRecognizedEvent: when a new voice command
is recognized
install:
apk:
- py3-tenacity
- py3-google-auth
apt:
- python3-tenacity
- python3-google-auth
dnf:
- python-tenacity
- python-google-auth
pacman:
- python-tenacity
- python-google-auth
pip:
- tenacity
- google-assistant-sdk
- google-auth
package: platypush.plugins.assistant.google.pushtotalk
type: plugin