tutorial reproducir procesamiento instalar grabar como python wav audio-recording

reproducir - Detecta y graba audio en Python



pyaudio tutorial (6)

Como seguimiento de la respuesta de Nick Fortescue, aquí hay un ejemplo más completo de cómo grabar desde el micrófono y procesar los datos resultantes:

from sys import byteorder from array import array from struct import pack import pyaudio import wave THRESHOLD = 500 CHUNK_SIZE = 1024 FORMAT = pyaudio.paInt16 RATE = 44100 def is_silent(snd_data): "Returns ''True'' if below the ''silent'' threshold" return max(snd_data) < THRESHOLD def normalize(snd_data): "Average the volume out" MAXIMUM = 16384 times = float(MAXIMUM)/max(abs(i) for i in snd_data) r = array(''h'') for i in snd_data: r.append(int(i*times)) return r def trim(snd_data): "Trim the blank spots at the start and end" def _trim(snd_data): snd_started = False r = array(''h'') for i in snd_data: if not snd_started and abs(i)>THRESHOLD: snd_started = True r.append(i) elif snd_started: r.append(i) return r # Trim to the left snd_data = _trim(snd_data) # Trim to the right snd_data.reverse() snd_data = _trim(snd_data) snd_data.reverse() return snd_data def add_silence(snd_data, seconds): "Add silence to the start and end of ''snd_data'' of length ''seconds'' (float)" r = array(''h'', [0 for i in xrange(int(seconds*RATE))]) r.extend(snd_data) r.extend([0 for i in xrange(int(seconds*RATE))]) return r def record(): """ Record a word or words from the microphone and return the data as an array of signed shorts. Normalizes the audio, trims silence from the start and end, and pads with 0.5 seconds of blank sound to make sure VLC et al can play it without getting chopped off. """ p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE) num_silent = 0 snd_started = False r = array(''h'') while 1: # little endian, signed short snd_data = array(''h'', stream.read(CHUNK_SIZE)) if byteorder == ''big'': snd_data.byteswap() r.extend(snd_data) silent = is_silent(snd_data) if silent and snd_started: num_silent += 1 elif not silent and not snd_started: snd_started = True if snd_started and num_silent > 30: break sample_width = p.get_sample_size(FORMAT) stream.stop_stream() stream.close() p.terminate() r = normalize(r) r = trim(r) r = add_silence(r, 0.5) return sample_width, r def record_to_file(path): "Records from the microphone and outputs the resulting data to ''path''" sample_width, data = record() data = pack(''<'' + (''h''*len(data)), *data) wf = wave.open(path, ''wb'') wf.setnchannels(1) wf.setsampwidth(sample_width) wf.setframerate(RATE) wf.writeframes(data) wf.close() if __name__ == ''__main__'': print("please speak a word into the microphone") record_to_file(''demo.wav'') print("done - result written to demo.wav")

Necesito capturar clips de audio como archivos WAV que luego puedo pasar a otro bit de python para su procesamiento. El problema es que necesito determinar cuándo hay audio presente y luego grabarlo, detenerlo cuando esté en silencio y luego pasar ese archivo al módulo de procesamiento.

Estoy pensando que con el módulo de ondas debería ser posible detectar cuándo hay silencio puro y descartarlo; tan pronto como se detecte algo que no sea silencio, empiece a grabar, luego, cuando la línea vuelva a sonar silenciosa, detenga la grabación.

Simplemente no puedo entenderlo, ¿alguien puede ayudarme a comenzar con un ejemplo básico?


Creo que el módulo WAVE no admite la grabación, solo procesa archivos existentes. Es posible que desee ver PyAudio para realmente grabar. WAV es sobre el formato de archivo más simple del mundo. En paInt16, solo obtiene un entero con signo que representa un nivel, y más cerca de 0 es más silencioso. No puedo recordar si los archivos WAV son byte alto primero o byte bajo, pero algo así debería funcionar (lo siento, no soy realmente un programador python:

from array import array # you''ll probably want to experiment on threshold # depends how noisy the signal threshold = 10 max_value = 0 as_ints = array(''h'', data) max_value = max(as_ints) if max_value > threshold: # not silence

Código de PyAudio para grabación guardado para referencia:

import pyaudio import sys chunk = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 44100 RECORD_SECONDS = 5 p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=chunk) print "* recording" for i in range(0, 44100 / chunk * RECORD_SECONDS): data = stream.read(chunk) # check for silence here by comparing the level with 0 (or some threshold) for # the contents of data. # then write data or not to a file print "* done" stream.stop_stream() stream.close() p.terminate()


El sitio web pyaudio tiene muchos ejemplos que son bastante cortos y claros: PyAudio


Es posible que desee mirar csounds , también. Tiene varias API, incluyendo Python. Es posible que pueda interactuar con una interfaz AD y recopilar muestras de sonido.


Gracias a cryo por la versión mejorada, basé mi código probado a continuación:

#Instead of adding silence at start and end of recording (values=0) I add the original audio . This makes audio sound more natural as volume is >0. See trim() #I also fixed issue with the previous code - accumulated silence counter needs to be cleared once recording is resumed. from array import array from struct import pack from sys import byteorder import copy import pyaudio import wave THRESHOLD = 500 # audio levels not normalised. CHUNK_SIZE = 1024 SILENT_CHUNKS = 3 * 44100 / 1024 # about 3sec FORMAT = pyaudio.paInt16 FRAME_MAX_VALUE = 2 ** 15 - 1 NORMALIZE_MINUS_ONE_dB = 10 ** (-1.0 / 20) RATE = 44100 CHANNELS = 1 TRIM_APPEND = RATE / 4 def is_silent(data_chunk): """Returns ''True'' if below the ''silent'' threshold""" return max(data_chunk) < THRESHOLD def normalize(data_all): """Amplify the volume out to max -1dB""" # MAXIMUM = 16384 normalize_factor = (float(NORMALIZE_MINUS_ONE_dB * FRAME_MAX_VALUE) / max(abs(i) for i in data_all)) r = array(''h'') for i in data_all: r.append(int(i * normalize_factor)) return r def trim(data_all): _from = 0 _to = len(data_all) - 1 for i, b in enumerate(data_all): if abs(b) > THRESHOLD: _from = max(0, i - TRIM_APPEND) break for i, b in enumerate(reversed(data_all)): if abs(b) > THRESHOLD: _to = min(len(data_all) - 1, len(data_all) - 1 - i + TRIM_APPEND) break return copy.deepcopy(data_all[_from:(_to + 1)]) def record(): """Record a word or words from the microphone and return the data as an array of signed shorts.""" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE) silent_chunks = 0 audio_started = False data_all = array(''h'') while True: # little endian, signed short data_chunk = array(''h'', stream.read(CHUNK_SIZE)) if byteorder == ''big'': data_chunk.byteswap() data_all.extend(data_chunk) silent = is_silent(data_chunk) if audio_started: if silent: silent_chunks += 1 if silent_chunks > SILENT_CHUNKS: break else: silent_chunks = 0 elif not silent: audio_started = True sample_width = p.get_sample_size(FORMAT) stream.stop_stream() stream.close() p.terminate() data_all = trim(data_all) # we trim before normalize as threshhold applies to un-normalized wave (as well as is_silent() function) data_all = normalize(data_all) return sample_width, data_all def record_to_file(path): "Records from the microphone and outputs the resulting data to ''path''" sample_width, data = record() data = pack(''<'' + (''h'' * len(data)), *data) wave_file = wave.open(path, ''wb'') wave_file.setnchannels(CHANNELS) wave_file.setsampwidth(sample_width) wave_file.setframerate(RATE) wave_file.writeframes(data) wave_file.close() if __name__ == ''__main__'': print("Wait in silence to begin recording; wait in silence to terminate") record_to_file(''demo.wav'') print("done - result written to demo.wav")


import pyaudio import wave from array import array FORMAT=pyaudio.paInt16 CHANNELS=2 RATE=44100 CHUNK=1024 RECORD_SECONDS=15 FILE_NAME="RECORDING.wav" audio=pyaudio.PyAudio() #instantiate the pyaudio #recording prerequisites stream=audio.open(format=FORMAT,channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) #starting recording frames=[] for i in range(0,int(RATE/CHUNK*RECORD_SECONDS)): data=stream.read(CHUNK) data_chunk=array(''h'',data) vol=max(data_chunk) if(vol>=500): print("something said") frames.append(data) else: print("nothing") print("/n") #end of recording stream.stop_stream() stream.close() audio.terminate() #writing to file wavfile=wave.open(FILE_NAME,''wb'') wavfile.setnchannels(CHANNELS) wavfile.setsampwidth(audio.get_sample_size(FORMAT)) wavfile.setframerate(RATE) wavfile.writeframes(b''''.join(frames))#append frames recorded to file wavfile.close()

Creo que esto ayudará. Es una secuencia de comandos simple que verificará si hay un silencio o no. Si se detecta silencio, no se grabará, de lo contrario, se grabará.