write videocapture read guardar from example capturar and python video-capture

videocapture - Cómo capturar un video(Y audio) en python, desde una cámara(o cámara web)



read and write video python (4)

Estoy buscando una solución, ya sea en Linux o en Windows, que me permita

  • grabar video (+ audio) desde mi webcam y micrófono, simultáneamente.
  • guárdalo como un archivo .AVI (o mpg o lo que sea)
  • Muestra el video en la pantalla mientras lo grabas.

La compresión NO es un problema en mi caso, y en realidad prefiero capturar RAW y comprimirlo más tarde.

Hasta ahora lo he hecho con un componente ActiveX en VB que se encargó de todo, y me gustaría progresar con python (la solución VB es inestable, no confiable).

Hasta ahora he visto un código que captura solo VIDEO, o fotogramas individuales ...

He visto hasta ahora

  • OpenCV - no se pudo encontrar la captura de audio allí
  • PyGame - sin captura de audio simultánea (AFAIK)
  • Captura de video: proporciona solo fotogramas individuales.
  • SimpleCV - sin audio
  • VLC - enlace al programa VideoLAN en wxPthon - con suerte lo hará (aún investigando esta opción)
  • kivy: me acabo de enterar, no conseguí que funcionara bajo ventanas tan lejos.

La pregunta: ¿hay una biblioteca de captura de video y audio para python?

o - ¿cuáles son las otras opciones si las hay?


A las preguntas formuladas anteriormente: Sí, el código también debería funcionar en Python3. Lo ajusté un poco y ahora funciona para python2 y python3 (lo probé en windows7 con 2.7 y 3.6, aunque necesita tener instalado ffmpeg o el ejecutable ffmpeg.exe al menos en el mismo directorio, puede obtenerlo aquí: https://www.ffmpeg.org/download.html ). Por supuesto, también necesita todas las demás bibliotecas cv2, numpy, pyaudio, instaladas aquí:

pip install opencv-python numpy pyaudio

Ahora puede ejecutar el código directamente:

#!/usr/bin/env python # -*- coding: utf-8 -*- # VideoRecorder.py from __future__ import print_function, division import numpy as np import cv2 import pyaudio import wave import threading import time import subprocess import os class VideoRecorder(): "Video class based on openCV" def __init__(self, name="temp_video.avi", fourcc="MJPG", sizex=640, sizey=480, camindex=0, fps=30): self.open = True self.device_index = camindex self.fps = fps # fps should be the minimum constant rate at which the camera can self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required) self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used self.video_filename = name self.video_cap = cv2.VideoCapture(self.device_index) self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc) self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize) self.frame_counts = 1 self.start_time = time.time() def record(self): "Video starts being recorded" # counter = 1 timer_start = time.time() timer_current = 0 while self.open: ret, video_frame = self.video_cap.read() if ret: self.video_out.write(video_frame) # print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current)) self.frame_counts += 1 # counter += 1 # timer_current = time.time() - timer_start time.sleep(1/self.fps) # gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY) # cv2.imshow(''video_frame'', gray) # cv2.waitKey(1) else: break def stop(self): "Finishes the video recording therefore the thread too" if self.open: self.open=False self.video_out.release() self.video_cap.release() cv2.destroyAllWindows() def start(self): "Launches the video recording function using a thread" video_thread = threading.Thread(target=self.record) video_thread.start() class AudioRecorder(): "Audio class based on pyAudio and Wave" def __init__(self, filename="temp_audio.wav", rate=44100, fpb=1024, channels=2): self.open = True self.rate = rate self.frames_per_buffer = fpb self.channels = channels self.format = pyaudio.paInt16 self.audio_filename = filename self.audio = pyaudio.PyAudio() self.stream = self.audio.open(format=self.format, channels=self.channels, rate=self.rate, input=True, frames_per_buffer = self.frames_per_buffer) self.audio_frames = [] def record(self): "Audio starts being recorded" self.stream.start_stream() while self.open: data = self.stream.read(self.frames_per_buffer) self.audio_frames.append(data) if not self.open: break def stop(self): "Finishes the audio recording therefore the thread too" if self.open: self.open = False self.stream.stop_stream() self.stream.close() self.audio.terminate() waveFile = wave.open(self.audio_filename, ''wb'') waveFile.setnchannels(self.channels) waveFile.setsampwidth(self.audio.get_sample_size(self.format)) waveFile.setframerate(self.rate) waveFile.writeframes(b''''.join(self.audio_frames)) waveFile.close() def start(self): "Launches the audio recording function using a thread" audio_thread = threading.Thread(target=self.record) audio_thread.start() def start_AVrecording(filename="test"): global video_thread global audio_thread video_thread = VideoRecorder() audio_thread = AudioRecorder() audio_thread.start() video_thread.start() return filename def start_video_recording(filename="test"): global video_thread video_thread = VideoRecorder() video_thread.start() return filename def start_audio_recording(filename="test"): global audio_thread audio_thread = AudioRecorder() audio_thread.start() return filename def stop_AVrecording(filename="test"): audio_thread.stop() frame_counts = video_thread.frame_counts elapsed_time = time.time() - video_thread.start_time recorded_fps = frame_counts / elapsed_time print("total frames " + str(frame_counts)) print("elapsed time " + str(elapsed_time)) print("recorded fps " + str(recorded_fps)) video_thread.stop() # Makes sure the threads have finished while threading.active_count() > 1: time.sleep(1) # Merging audio and video signal if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected print("Re-encoding") cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi" subprocess.call(cmd, shell=True) print("Muxing") cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi" subprocess.call(cmd, shell=True) else: print("Normal recording/nMuxing") cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi" subprocess.call(cmd, shell=True) print("..") def file_manager(filename="test"): "Required and wanted processing of final files" local_path = os.getcwd() if os.path.exists(str(local_path) + "/temp_audio.wav"): os.remove(str(local_path) + "/temp_audio.wav") if os.path.exists(str(local_path) + "/temp_video.avi"): os.remove(str(local_path) + "/temp_video.avi") if os.path.exists(str(local_path) + "/temp_video2.avi"): os.remove(str(local_path) + "/temp_video2.avi") # if os.path.exists(str(local_path) + "/" + filename + ".avi"): # os.remove(str(local_path) + "/" + filename + ".avi") if __name__ == ''__main__'': start_AVrecording() time.sleep(5) stop_AVrecording() file_manager()


He estado buscando una buena respuesta para esto, y creo que es GStreamer ...

La documentación para los enlaces de python es extremadamente ligera, y la mayoría parecía centrada en la antigua versión 0.10 de GStreamer en lugar de las nuevas versiones 1.X, pero GStreamer es un marco multimedia multiplataforma extremadamente poderoso que puede transmitir, mux, transcodifica, y muestra casi cualquier cosa.


Respuesta: No. No hay una sola biblioteca / solución en Python para hacer la grabación de video / audio simultáneamente. Debe implementar ambos por separado y combinar la señal de audio y video de una manera inteligente para terminar con un archivo de video / audio.

Tengo una solución para el problema que presentas. Mi código aborda sus tres problemas:

  • Graba video + audio desde webcam y micrófono simultáneamente.
  • Guarda el archivo final de video / audio como .AVI
  • Al dejar de comentar las líneas 76, 77 y 78, el video se mostrará en pantalla mientras se graba.

Mi solución utiliza pyaudio para grabación de audio, opencv para grabación de video y ffmpeg para muxear las dos señales. Para poder grabar ambos simultáneamente, uso multiproceso. Un hilo graba video, y un segundo el audio. He cargado mi código en github y también he incluido todas las partes esenciales aquí.

https://github.com/JRodrigoF/AVrecordeR

Nota: opencv no puede controlar los fps en los que la cámara web realiza la grabación. Solo puede especificar en la codificación del archivo los fps finales deseados, pero la cámara web generalmente se comporta de manera diferente según las especificaciones y las condiciones de luz (encontré). Así que los fps deben ser controlados al nivel del código.

import cv2 import pyaudio import wave import threading import time import subprocess import os class VideoRecorder(): # Video class based on openCV def __init__(self): self.open = True self.device_index = 0 self.fps = 6 # fps should be the minimum constant rate at which the camera can self.fourcc = "MJPG" # capture images (with no decrease in speed over time; testing is required) self.frameSize = (640,480) # video formats and sizes also depend and vary according to the camera used self.video_filename = "temp_video.avi" self.video_cap = cv2.VideoCapture(self.device_index) self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc) self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize) self.frame_counts = 1 self.start_time = time.time() # Video starts being recorded def record(self): # counter = 1 timer_start = time.time() timer_current = 0 while(self.open==True): ret, video_frame = self.video_cap.read() if (ret==True): self.video_out.write(video_frame) # print str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current) self.frame_counts += 1 # counter += 1 # timer_current = time.time() - timer_start time.sleep(0.16) # gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY) # cv2.imshow(''video_frame'', gray) # cv2.waitKey(1) else: break # 0.16 delay -> 6 fps # # Finishes the video recording therefore the thread too def stop(self): if self.open==True: self.open=False self.video_out.release() self.video_cap.release() cv2.destroyAllWindows() else: pass # Launches the video recording function using a thread def start(self): video_thread = threading.Thread(target=self.record) video_thread.start() class AudioRecorder(): # Audio class based on pyAudio and Wave def __init__(self): self.open = True self.rate = 44100 self.frames_per_buffer = 1024 self.channels = 2 self.format = pyaudio.paInt16 self.audio_filename = "temp_audio.wav" self.audio = pyaudio.PyAudio() self.stream = self.audio.open(format=self.format, channels=self.channels, rate=self.rate, input=True, frames_per_buffer = self.frames_per_buffer) self.audio_frames = [] # Audio starts being recorded def record(self): self.stream.start_stream() while(self.open == True): data = self.stream.read(self.frames_per_buffer) self.audio_frames.append(data) if self.open==False: break # Finishes the audio recording therefore the thread too def stop(self): if self.open==True: self.open = False self.stream.stop_stream() self.stream.close() self.audio.terminate() waveFile = wave.open(self.audio_filename, ''wb'') waveFile.setnchannels(self.channels) waveFile.setsampwidth(self.audio.get_sample_size(self.format)) waveFile.setframerate(self.rate) waveFile.writeframes(b''''.join(self.audio_frames)) waveFile.close() pass # Launches the audio recording function using a thread def start(self): audio_thread = threading.Thread(target=self.record) audio_thread.start() def start_AVrecording(filename): global video_thread global audio_thread video_thread = VideoRecorder() audio_thread = AudioRecorder() audio_thread.start() video_thread.start() return filename def start_video_recording(filename): global video_thread video_thread = VideoRecorder() video_thread.start() return filename def start_audio_recording(filename): global audio_thread audio_thread = AudioRecorder() audio_thread.start() return filename def stop_AVrecording(filename): audio_thread.stop() frame_counts = video_thread.frame_counts elapsed_time = time.time() - video_thread.start_time recorded_fps = frame_counts / elapsed_time print "total frames " + str(frame_counts) print "elapsed time " + str(elapsed_time) print "recorded fps " + str(recorded_fps) video_thread.stop() # Makes sure the threads have finished while threading.active_count() > 1: time.sleep(1) # Merging audio and video signal if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected print "Re-encoding" cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi" subprocess.call(cmd, shell=True) print "Muxing" cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi" subprocess.call(cmd, shell=True) else: print "Normal recording/nMuxing" cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi" subprocess.call(cmd, shell=True) print ".." # Required and wanted processing of final files def file_manager(filename): local_path = os.getcwd() if os.path.exists(str(local_path) + "/temp_audio.wav"): os.remove(str(local_path) + "/temp_audio.wav") if os.path.exists(str(local_path) + "/temp_video.avi"): os.remove(str(local_path) + "/temp_video.avi") if os.path.exists(str(local_path) + "/temp_video2.avi"): os.remove(str(local_path) + "/temp_video2.avi") if os.path.exists(str(local_path) + "/" + filename + ".avi"): os.remove(str(local_path) + "/" + filename + ".avi")