AI Voice Assistant
AI Voice Assistant
Input:-
Host device:-
.
link:-
Finally I am able to build my AI Voice Assistant on Raspberry Pi. I used Raspberry Pi 4 (2GB)
along with Re-Speaker 2-Mic PiHAT and an external speaker. You can find the source code and
step by step process in my github page.
This is an open source startup project, you can contribute your ideas to make that project big.
Source Code:
https://github.com/avseng/ai_voice_as…
2. Install git
git clone
https://github.com/avseng/ai_voice_assistant.git
cd ai_voice_assistant/
sudo chmod u+x setup.sh
sudo bash setup.sh
cd ai_voice_assistant/
python3 check_device_id.py
Note down the input device index number from your sound card.
This code can be used to check any input device index number.
sudo nano main.py
"""
Getting weather report from weather.com
"""
def weatherReport(city):
weatherDetails = weathercom.getCityWeatherDetails(city)
humidity =json.loads(weatherDetails)["vt1observation"]
["humidity"]
temp = json.loads(weatherDetails)["vt1observation"]
["temperature"]
phrase = json.loads(weatherDetails)["vt1observation"]["phrase"]
return humidity, temp, phrase
"""
Perform search operation
if the content reffer any person or group, the it will check in
wikipedia
otherwise it will search in google.
"""
def google_search(search_text):
translator = Translator()
result = ''
search_data = search_text
logger.info("google_search : "+search_data)
if "who is" in search_data or "who are" in search_data:
search_data = search_data.split(" ")[2:]
search_data = " ".join(search_data)
try:
result = wikipedia.summary(search_data, sentences=2)
except Exception as e:
pass
else:
url = "https://www.google.co.in/search?q="+search_data
logger.info("google_search : URL : "+url)
try:
search_result = requests.get(url).text
soup = BeautifulSoup(search_result, 'html.parser')
if "born" in search_data:
for i in result_div:
s = translator.translate(dest='en', text = i.text)
a = str(s).split("=")[3].split(",")
b = a[:len(a)-1]
b = " ".join(b)
if "Born" in b:
result = b.split(":")[1:].__str__().replace("['
","").replace("']","")
#print(result)
break
else:
for i in result_div:
s = translator.translate(dest='en', text=i.text)
a = str(s).split("=")[3].split(",")
b = a[:len(a) - 1]
result = " ".join(b)
#print(result)
break
except Exception as e:
pass
logger.info("google_search : Search Result ::"+result)
return result
"""
get the current date and time.
"""
def current_datetime(type):
returndata = ''
timeData =
urllib.request.urlopen("http://worldtimeapi.org/api/ip").read()
datetime = json.loads(timeData)["datetime"]
date = datetime.split("T")[0]
time = datetime.split("T")[1]
if type == "time":
time = time.split(".")[0]
hr = int(time.split(":")[0])
min = time.split(":")[1]
suffix = ''
if hr >12:
hr = hr - 12
suffix="PM"
else:
suffix="AM"
if hr == 0:
hr=12
suffix="AM"
if type == "date":
year = date.split("-")[0]
month_int=int(date.split("-")[1])
day = date.split("-")[2]
month = ''
if month_int == 1:
month = 'Janiary'
elif month_int == 2:
month = "February"
elif month_int == 3:
month = "March"
elif month_int == 4:
month = "April"
elif month_int == 5:
month = "May"
elif month_int == 6:
month = "June"
elif month_int == 7:
month = "July"
elif month_int == 8:
month = "August"
elif month_int == 9:
month = "September"
elif month_int == 10:
month = "October"
elif month_int == 11:
month = "Novenber"
elif month_int == 12:
month = "December"
"""
Reboot raspberry pi.
"""
def reboot_server():
command = "/usr/bin/sudo /sbin/shutdown -r now"
process = subprocess.Popen(command.split(),
stdout=subprocess.PIPE)
3)Apa102.py
"""
The code is based on https://github.com/tinue/APA102_Pi
This is the main driver module for APA102 LEDs
License: GPL V2
"""
import spidev
from math import ceil
RGB_MAP = { 'rgb': [3, 2, 1], 'rbg': [3, 1, 2], 'grb': [2, 3, 1],
'gbr': [2, 1, 3], 'brg': [1, 3, 2], 'bgr': [1, 2, 3] }
class APA102:
"""
Driver for APA102 LEDS (aka "DotStar").
(c) Martin Erzberger 2016-2017
The rest of the methods are used internally and should not be
used by the
user of the library.
After having received the 32 bit color frame, the LED changes
color ,
and then resumes to just copying data-in to data-out.
The really clever bit is this: While receiving the 32 bit LED
frame,
the LED sends zeroes on its data-out line. Because a color
frame is
32 bits, the LED sends 32 bits of zeroes to the next LED.
As we have seen above, this means that the next LED is now
ready
to accept a color frame and update its color.
def clock_start_frame(self):
"""Sends a start frame to the LED strip.
def clock_end_frame(self):
"""Sends an end frame to the LED strip.
As explained above, dummy data must be sent after the last
real colour
information so that all of the data can reach its destination
down the line.
The delay is not as bad as with the human example above.
It is only 1/2 bit per LED. This is because the SPI clock line
needs to be inverted.
After one LED the clock is inverted, and after two LEDs it is
in sync
again, but one cycle behind. Therefore, for every two LEDs,
one bit
of delay gets accumulated. For 300 LEDs, 150 additional bits
must be fed to
the input of LED one so that the data can reach the last LED.
self.spi.xfer2([0xFF] * 4)
def clear_strip(self):
""" Turns off the strip and shows the result right away."""
start_index = 4 * led_num
self.leds[start_index] = ledstart
self.leds[start_index + self.rgb[0]] = red
self.leds[start_index + self.rgb[1]] = green
self.leds[start_index + self.rgb[2]] = blue
def show(self):
"""Sends the content of the pixel buffer to the strip.
Todo: More than 1024 LEDs requires more than one xfer
operation.
"""
self.clock_start_frame()
# xfer2 kills the list, unfortunately. So it must be copied first
# SPI takes up to 4096 Integers. So we are fine for up to
1024 LEDs.
data = list(self.leds)
while data:
self.spi.xfer2(data[:32])
data = data[32:]
self.clock_end_frame()
def cleanup(self):
"""Release the SPI device; Call this method at the end"""
@staticmethod
def combine_color(red, green, blue):
"""Make one 3*8 byte color value."""
def dump_array(self):
"""For debug purposes: Dump the LED array onto the
console."""
print(self.leds)
4)check_device_id.py
import pyaudio
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0,
i).get('maxInputChannels')) > 0:
print("Input Device id: "+ str(i)+ " -
"+str(p.get_device_info_by_host_api_device_index(0,
i).get('name')))
5)launcher.sh
#!/bin/sh
sleep 3
sudo alsactl restore -f /etc/asound.state
sleep 10
python3 /home/pi/ai_voice_assistant/main.py >
/mnt/ramdisk/voice.txt 2>&1
6)main.py
import speech_recognition as sr
import pyaudio
import time
import wave
import threading
import os
from pixels import Pixels
import valib
import response
import glob
import logging
r = sr.Recognizer()
class voice:
"""
__init__ method will create pyaudio stream object
for the entire session. This stream will be used
every time for voice detection from microphone.
"""
def __init__(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
rate=RESPEAKER_RATE,
format=pyaudio.paInt16,
input_device_index=RESPEAKER_INDEX,
channels=RESPEAKER_CHANNELS,
input=True,
frames_per_buffer=CHUNK)
"""
process() method reads data from pyaudio stream for given
duration.
After read, it creates audio frame and save it to .wav file.
it generates new WAV file every time it gets called.
"""
def process(self, RECORD_SECONDS):
frames = []
for i in range(0, int(RESPEAKER_RATE / CHUNK *
RECORD_SECONDS)):
data = self.stream.read(CHUNK,
exception_on_overflow=False)
frames.append(data)
out_filename = WAVE_OUTPUT_FILEPATH +
str(time.time()) + ".wav"
wf = wave.open(out_filename, 'wb')
wf.setnchannels(RESPEAKER_CHANNELS)
wf.setsampwidth(self.p.get_sample_size(self.p.get_format_from_
width(RESPEAKER_WIDTH)))
wf.setframerate(RESPEAKER_RATE)
wf.writeframes(b''.join(frames))
wf.close()
return out_filename
"""
voice_command_processor() method reads data from .wav file
and convert into text.
it is using speech_recognition library and recognize_google
option to convert speech
into text.
"""
def voice_command_processor(self, filename):
global recognized_text
with sr.AudioFile(filename) as source:
#r.adjust_for_ambient_noise(source=source,
duration=0.5)
wait_time = 3
while True:
audio = r.record(source, duration=3)
if audio:
break
time.sleep(1)
wait_time = wait_time - 1
if wait_time == 0:
break
try:
recognized_text = r.recognize_google(audio)
except sr.UnknownValueError as e:
pass
except sr.RequestError as e:
logger.error("service is down")
pass
os.remove(filename)
return recognized_text
"""
Infinite loop:
1. Reading microphone for 3 sec and generation .wav file.
2. Creating thread with voice_command_processor() method
for converting speech to text.
3. IF wake word is detected (in my case Gideon):
files = glob.glob(os.path.join(WAVE_OUTPUT_FILEPATH
+ '*.wav'))
for file in files:
os.remove(file)
recognized_text = ''
px.off()
else:
t1 =
threading.Thread(target=a.voice_command_processor,
args=(file_name,))
t1.start()
7)pixels.py
"""
LED light pattern like Google Home
"""
import apa102
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
class Pixels:
PIXELS_N = 3
def __init__(self):
self.basis = [0] * 3 * self.PIXELS_N
self.basis[0] = 2
self.basis[3] = 1
self.basis[4] = 1
self.basis[7] = 2
self.next = threading.Event()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
self.next.set()
self.queue.put(f)
def listen(self):
self.next.set()
self.queue.put(self._listen)
def think(self):
self.next.set()
self.queue.put(self._think)
def speak(self):
self.next.set()
self.queue.put(self._speak)
def off(self):
self.next.set()
self.queue.put(self._off)
def _run(self):
while True:
func = self.queue.get()
func()
def _wakeup(self, direction=0):
for i in range(1, 25):
colors = [i * v for v in self.basis]
self.write(colors)
time.sleep(0.01)
self.colors = colors
def _listen(self):
for i in range(1, 25):
colors = [i * v for v in self.basis]
self.write(colors)
time.sleep(0.01)
self.colors = colors
def _think(self):
colors = self.colors
self.next.clear()
while not self.next.is_set():
colors = colors[3:] + colors[:3]
self.write(colors)
time.sleep(0.2)
t = 0.1
for i in range(0, 5):
colors = colors[3:] + colors[:3]
self.write([(v * (4 - i) / 4) for v in colors])
time.sleep(t)
t /= 2
# time.sleep(0.5)
self.colors = colors
def _speak(self):
colors = self.colors
gradient = -1
position = 24
self.next.clear()
while not self.next.is_set():
position += gradient
self.write([(v * position / 24) for v in colors])
if position == 24 or position == 4:
gradient = -gradient
time.sleep(0.2)
else:
time.sleep(0.01)
# self._off()
def _off(self):
self.write([0] * 3 * self.PIXELS_N)
self.dev.show()
8)response.py
"""
This file process converted text and perform actions accordingly.
This file can be extended with more action.
"""
import valib as va
import action as a
import time
import logging
"""
asking who are you?
"""
if "who are you" in text:
va.audio_playback("i am a i voice assistant system")
"""
asking about weather information.
"""
if "weather" in text:
va.audio_playback("which city")
time.sleep(0.5)
file_name = pa.process(3)
city = pa.voice_command_processor(file_name)
logger.info("process_text : City :: " + city)
try:
humidity, temp, phrase = a.weatherReport(city)
va.audio_playback(
"currently in " + city + " temperature is " + str(temp) + "
degree celsius, " + "humidity is " + str(
humidity) + " percent and sky is " + phrase)
logger.info("currently in " + city + " temperature is " +
str(temp) + "degree celsius, " + "humidity is " + str(
humidity) + " percent and sky is " + phrase)
except KeyError as e:
va.audio_playback("sorry, i couldn't get the location")
"""
asking for search somthing like:
what is raspberry pi
who is isac newton etc.
"""
if "search" in text or "Search" in text:
va.audio_playback("tell me what to search")
time.sleep(0.5)
file_name = pa.process(5)
search_data = pa.voice_command_processor(file_name)
try:
result = a.google_search(search_data)
if result:
va.audio_playback(result)
else:
va.audio_playback("sorry, i couldn't find any result for "
+ search_data)
except KeyError as e:
va.audio_playback("sorry, i couldn't find any result for " +
search_data)
pass
"""
asking aboout current time.
"""
if "time" in text or "Time" in text:
current_time = a.current_datetime("time")
va.audio_playback("right now it is " + current_time)
"""
asking about today's date.
"""
if "date" in text or "Date" in text:
date = a.current_datetime("date")
va.audio_playback("today it is " + date)
"""
asking for rebooting the voice assistant system.
"""
if "reboot" in text or "Reboot" in text:
va.audio_playback("ok.. rebooting the server")
a.reboot_server()
return "done"
9)setup.sh
#!/bin/bash
10)valib.py
from gtts import gTTS
from pygame import mixer
import os
AUDIO_PLAYBACK_FILENAME =
"/mnt/ramdisk/audio_play_back.mp3"
def audio_playback(text):
tts = gTTS(text=text, lang='en-us')
tts.save(AUDIO_PLAYBACK_FILENAME)
mixer.init()
mixer.music.load(AUDIO_PLAYBACK_FILENAME)
mixer.music.play()
while mixer.music.get_busy():
pass
os.remove(AUDIO_PLAYBACK_FILENAME)
Voice recognition using raspberry Pi
https://www.instructables.com/Voice-Recognition-Raspberry-Pi/
16GB Micro SD – https://goo.gl/FDqZal
RPI 3 – https://goo.gl/CdVNoH
Materials required:-
Raspberry pi 3B+ :- https://amzn.to/2EvusgE
USB Microphone:- https://amzn.to/2VZzZH3
Speakers:- https://amzn.to/2EIvwOr
SD Card 32GB:- https://amzn.to/2HBTCfh
Card Reader:- https://amzn.to/2JD1f7C
HDMI Cable:- https://amzn.to/2M8VZdO
5v Power Adapter:- https://amzn.to/2ExV7JF
how to create a raspberry pi voice chat bot using chat gpt - YouTube : Search Link
Text b
How to Use ChatGPT Voice Assistant raspberry pi 4 | install ChatGPT on raspberry pi 4 - YouTube
Unit Use/
SL No Components Quantity Price Total Link Comment
Main
1 Raspberry Pi 4 4GB 1 4,617.93 4617.93 Buy Now computer
Micro SD card 64GB Buy
2 class 10 1 439 439 Now
Power Bank Buy
3 10000mAh 1 1249 1249 Now
4 USB Microphone compact 1 1621 1621 Buy Now
Compact Speakers for
5 Rasp PI 1 349 349 Buy Now
6 Card reader 1 399 399 Buy Now
7 USB C Cable 1 529 529 Buy Now
8 Optional humanoid face 0 0 0
Mycroft link
Introductio video:https://www.youtube.com/watch?v=tcI8ibjUOzg
26 Sep
Mycroft: NLP
Mycroft is more than a stand alone device. It is an open platform that will allow developers to add
natural language processing to anything. Here we show what Mycroft will be capable of in 2018.
Mycroft will span all of your devices and provide seamless interaction on your desktop, mobile
device, embedded speaker or automobile. It is more than a voice interface or a simply voice control
system, Mycroft is an AI for everyone.
Servo Smoothing
https://github.com/XRobots/ServoSmoothing
Chat bot using Chat GPT api
https://youtu.be/4qNwoAAfnk4 : Madhusudhan watch this
● 18 dollar free credit
Important: If you still have a command prompt (or any type of terminal) open that was
opened before you installed Node.js, close and re-open your terminal.
After you have successfully installed Node.js, run the following command in your
terminal to check that Node.js is install properly:
node -v
v16.13.2
Note: If you get the error, The term 'node' is not recognized as the name, ensure that
you have closed and re-opened all terminals or text editors. A system restart may be
beneficial at this point if you are running into a node error
If the issue still isn't resolved you will need to add Node.js to your system path.
The first step is to find the path to your Node.js installation. Depending on how you
installed Node.js, the location of the installation files may vary. If you used the official
Node.js installer, the default installation directory should be C:\Program Files\
nodejs.
If you installed Node.js in a different location, you'll need to locate the installation
directory manually. You can do this by searching your computer for the node.exe file.
Next, you need to open the Environment Variables settings on your computer. To do
this in Windows 10, follow these steps:
In the Environment Variables window, locate the "System Variables" section and scroll
down until you find the "Path" variable. Select the "Path" variable and click on the
"Edit" button.
In the Edit Environment Variable window that appears, click on the "New" button and
enter the path to your Node.js installation directory. Make sure to separate the path
from any existing paths in the variable with a semicolon (;).
For example, if your Node.js installation directory is C:\Program Files\nodejs, you
would add the following path to the System Path variable:
C:\Program Files\nodejs;
Once you've added the Node.js installation directory to your system path, click on the
"OK" button to save the changes. You may need to restart your command prompt or
terminal for the changes to take effect.
To test that Node.js is properly added to your system path, open a new command
prompt or terminal and run the following command:
node -v
This should output the current version of Node.js that is installed on your computer.
And that's it! You've successfully added Node.js to your system path. Now you can
start using Node.js commands in the terminal or command prompt without having to
specify the full path to the node.exe file every time.
key : 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
C:\Users\User\Desktop>python open_ai.py
Traceback (most recent call last):
File "C:\Users\User\Desktop\open_ai.py", line 20, in <module>
inputs = gr.inputs.Textbox(lines=7, label="Chat with AI")
^^^^^^^^^
AttributeError: module 'gradio' has no attribute 'inputs'
import openai
import gradio as gr
openai.api_key = "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"
messages = [
{"role": "system", "content": "You are a helpful and kind AI Assistant."},
]
def chatbot(input):
if input:
messages.append({"role": "user", "content": input})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message['content']
messages.append({"role": "assistant", "content": reply})
return reply
C:\Users\User\Desktop>python open_ai2.py
C:\Python312\Lib\site-packages\gradio\blocks.py:528: UserWarning: Cannot load compact.
Caught Exception: The space compact does not exist
warnings.warn(f"Cannot load {theme}. Caught Exception: {str(e)}")
Running on local URL: http://127.0.0.1:7860
Running on public URL: https://0be27924034504f356.gradio.live
This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run
`gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)
Additional resources for gradio
Gradio tutorial (Build machine learning applications) (machinelearningnuggets.com)
1/12/2023
RASPBERRY PI GOOGLE ASSISTANT
https://techiesms.com/google-assistant-on-raspberry-pi/ code
https://youtu.be/eg_V56BUmh0
Spare
https://youtu.be/4luTnRpfgbs