add video record
This commit is contained in:
parent
e9faa50b9e
commit
baf8270fc5
17
app.py
17
app.py
|
@ -163,6 +163,22 @@ async def set_audiotype(request):
|
|||
),
|
||||
)
|
||||
|
||||
async def record(request):
|
||||
params = await request.json()
|
||||
|
||||
sessionid = params.get('sessionid',0)
|
||||
if params['type']=='start_record':
|
||||
# nerfreals[sessionid].put_msg_txt(params['text'])
|
||||
nerfreals[sessionid].start_recording()
|
||||
elif params['type']=='end_record':
|
||||
nerfreals[sessionid].stop_recording()
|
||||
return web.Response(
|
||||
content_type="application/json",
|
||||
text=json.dumps(
|
||||
{"code": 0, "data":"ok"}
|
||||
),
|
||||
)
|
||||
|
||||
async def on_shutdown(app):
|
||||
# close peer connections
|
||||
coros = [pc.close() for pc in pcs]
|
||||
|
@ -421,6 +437,7 @@ if __name__ == '__main__':
|
|||
appasync.router.add_post("/offer", offer)
|
||||
appasync.router.add_post("/human", human)
|
||||
appasync.router.add_post("/set_audiotype", set_audiotype)
|
||||
appasync.router.add_post("/record", record)
|
||||
appasync.router.add_static('/',path='web')
|
||||
|
||||
# Configure default CORS settings.
|
||||
|
|
61
basereal.py
61
basereal.py
|
@ -15,6 +15,9 @@ from threading import Thread, Event
|
|||
from io import BytesIO
|
||||
import soundfile as sf
|
||||
|
||||
import av
|
||||
from fractions import Fraction
|
||||
|
||||
from ttsreal import EdgeTTS,VoitsTTS,XTTS
|
||||
|
||||
from tqdm import tqdm
|
||||
|
@ -39,6 +42,10 @@ class BaseReal:
|
|||
elif opt.tts == "xtts":
|
||||
self.tts = XTTS(opt,self)
|
||||
|
||||
self.recording = False
|
||||
self.recordq_video = Queue()
|
||||
self.recordq_audio = Queue()
|
||||
|
||||
self.curr_state=0
|
||||
self.custom_img_cycle = {}
|
||||
self.custom_audio_cycle = {}
|
||||
|
@ -65,6 +72,60 @@ class BaseReal:
|
|||
for key in self.custom_index:
|
||||
self.custom_index[key]=0
|
||||
|
||||
def start_recording(self):
|
||||
"""开始录制视频"""
|
||||
if self.recording:
|
||||
return
|
||||
self.recording = True
|
||||
self.recordq_video.queue.clear()
|
||||
self.recordq_audio.queue.clear()
|
||||
self.container = av.open("data/record_lasted.mp4", mode="w")
|
||||
|
||||
process_thread = Thread(target=self.record_frame, args=())
|
||||
process_thread.start()
|
||||
|
||||
def record_frame(self):
|
||||
videostream = self.container.add_stream("libx264", rate=25)
|
||||
videostream.codec_context.time_base = Fraction(1, 25)
|
||||
audiostream = self.container.add_stream("aac")
|
||||
audiostream.codec_context.time_base = Fraction(1, 16000)
|
||||
init = True
|
||||
framenum = 0
|
||||
while self.recording:
|
||||
try:
|
||||
videoframe = self.recordq_video.get(block=True, timeout=1)
|
||||
videoframe.pts = framenum #int(round(framenum*0.04 / videostream.codec_context.time_base))
|
||||
videoframe.dts = videoframe.pts
|
||||
if init:
|
||||
videostream.width = videoframe.width
|
||||
videostream.height = videoframe.height
|
||||
init = False
|
||||
for packet in videostream.encode(videoframe):
|
||||
self.container.mux(packet)
|
||||
for k in range(2):
|
||||
audioframe = self.recordq_audio.get(block=True, timeout=1)
|
||||
audioframe.pts = int(round((framenum*2+k)*0.02 / audiostream.codec_context.time_base))
|
||||
audioframe.dts = audioframe.pts
|
||||
for packet in audiostream.encode(audioframe):
|
||||
self.container.mux(packet)
|
||||
framenum += 1
|
||||
except queue.Empty:
|
||||
print('record queue empty,')
|
||||
continue
|
||||
except Exception as e:
|
||||
print(e)
|
||||
#break
|
||||
self.container.close()
|
||||
self.recordq_video.queue.clear()
|
||||
self.recordq_audio.queue.clear()
|
||||
print('record thread stop')
|
||||
|
||||
def stop_recording(self):
|
||||
"""停止录制视频"""
|
||||
if not self.recording:
|
||||
return
|
||||
self.recording = False
|
||||
|
||||
def mirror_index(self,size, index):
|
||||
#size = len(self.coord_list_cycle)
|
||||
turn = index // size
|
||||
|
|
|
@ -26,6 +26,8 @@ from av import AudioFrame, VideoFrame
|
|||
from wav2lip.models import Wav2Lip
|
||||
from basereal import BaseReal
|
||||
|
||||
#from imgcache import ImgCache
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
|
@ -188,6 +190,7 @@ class LipReal(BaseReal):
|
|||
input_img_list = glob.glob(os.path.join(self.full_imgs_path, '*.[jpJP][pnPN]*[gG]'))
|
||||
input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
|
||||
self.frame_list_cycle = read_imgs(input_img_list)
|
||||
#self.imagecache = ImgCache(len(self.coord_list_cycle),self.full_imgs_path,1000)
|
||||
|
||||
|
||||
def put_msg_txt(self,msg):
|
||||
|
@ -218,9 +221,11 @@ class LipReal(BaseReal):
|
|||
# self.curr_state = 1 #当前视频不循环播放,切换到静音状态
|
||||
else:
|
||||
combine_frame = self.frame_list_cycle[idx]
|
||||
#combine_frame = self.imagecache.get_img(idx)
|
||||
else:
|
||||
bbox = self.coord_list_cycle[idx]
|
||||
combine_frame = copy.deepcopy(self.frame_list_cycle[idx])
|
||||
#combine_frame = copy.deepcopy(self.imagecache.get_img(idx))
|
||||
y1, y2, x1, x2 = bbox
|
||||
try:
|
||||
res_frame = cv2.resize(res_frame.astype(np.uint8),(x2-x1,y2-y1))
|
||||
|
@ -234,6 +239,8 @@ class LipReal(BaseReal):
|
|||
image = combine_frame #(outputs['image'] * 255).astype(np.uint8)
|
||||
new_frame = VideoFrame.from_ndarray(image, format="bgr24")
|
||||
asyncio.run_coroutine_threadsafe(video_track._queue.put(new_frame), loop)
|
||||
if self.recording:
|
||||
self.recordq_video.put(new_frame)
|
||||
|
||||
for audio_frame in audio_frames:
|
||||
frame,type = audio_frame
|
||||
|
@ -244,6 +251,8 @@ class LipReal(BaseReal):
|
|||
# if audio_track._queue.qsize()>10:
|
||||
# time.sleep(0.1)
|
||||
asyncio.run_coroutine_threadsafe(audio_track._queue.put(new_frame), loop)
|
||||
if self.recording:
|
||||
self.recordq_audio.put(new_frame)
|
||||
print('musereal process_frames thread stop')
|
||||
|
||||
def render(self,quit_event,loop=None,audio_track=None,video_track=None):
|
||||
|
|
|
@ -270,6 +270,8 @@ class MuseReal(BaseReal):
|
|||
image = combine_frame #(outputs['image'] * 255).astype(np.uint8)
|
||||
new_frame = VideoFrame.from_ndarray(image, format="bgr24")
|
||||
asyncio.run_coroutine_threadsafe(video_track._queue.put(new_frame), loop)
|
||||
if self.recording:
|
||||
self.recordq_video.put(new_frame)
|
||||
|
||||
for audio_frame in audio_frames:
|
||||
frame,type = audio_frame
|
||||
|
@ -280,6 +282,8 @@ class MuseReal(BaseReal):
|
|||
# if audio_track._queue.qsize()>10:
|
||||
# time.sleep(0.1)
|
||||
asyncio.run_coroutine_threadsafe(audio_track._queue.put(new_frame), loop)
|
||||
if self.recording:
|
||||
self.recordq_audio.put(new_frame)
|
||||
print('musereal process_frames thread stop')
|
||||
|
||||
def render(self,quit_event,loop=None,audio_track=None,video_track=None):
|
||||
|
|
|
@ -30,6 +30,9 @@
|
|||
</div>
|
||||
<button id="start" onclick="start()">Start</button>
|
||||
<button id="stop" style="display: none" onclick="stop()">Stop</button>
|
||||
<button class="btn btn-primary" id="btn_start_record">Start Recording</button>
|
||||
<button class="btn btn-primary" id="btn_stop_record" disabled>Stop Recording</button>
|
||||
<!-- <button class="btn btn-primary" id="btn_download">Download Video</button> -->
|
||||
<input type="hidden" id="sessionid" value="0">
|
||||
<form class="form-inline" id="echo-form">
|
||||
<div class="form-group">
|
||||
|
@ -92,6 +95,90 @@
|
|||
//ws.send(message);
|
||||
$('#message').val('');
|
||||
});
|
||||
|
||||
$('#btn_start_record').click(function() {
|
||||
// 开始录制
|
||||
console.log('Starting recording...');
|
||||
fetch('/record', {
|
||||
body: JSON.stringify({
|
||||
type: 'start_record',
|
||||
}),
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
method: 'POST'
|
||||
}).then(function(response) {
|
||||
if (response.ok) {
|
||||
console.log('Recording started.');
|
||||
$('#btn_start_record').prop('disabled', true);
|
||||
$('#btn_stop_record').prop('disabled', false);
|
||||
// $('#btn_download').prop('disabled', true);
|
||||
} else {
|
||||
console.error('Failed to start recording.');
|
||||
}
|
||||
}).catch(function(error) {
|
||||
console.error('Error:', error);
|
||||
});
|
||||
});
|
||||
|
||||
$('#btn_stop_record').click(function() {
|
||||
// 结束录制
|
||||
console.log('Stopping recording...');
|
||||
fetch('/record', {
|
||||
body: JSON.stringify({
|
||||
type: 'end_record',
|
||||
}),
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
method: 'POST'
|
||||
}).then(function(response) {
|
||||
if (response.ok) {
|
||||
console.log('Recording stopped.');
|
||||
$('#btn_start_record').prop('disabled', false);
|
||||
$('#btn_stop_record').prop('disabled', true);
|
||||
// $('#btn_download').prop('disabled', false);
|
||||
} else {
|
||||
console.error('Failed to stop recording.');
|
||||
}
|
||||
}).catch(function(error) {
|
||||
console.error('Error:', error);
|
||||
});
|
||||
});
|
||||
|
||||
// $('#btn_download').click(function() {
|
||||
// // 下载视频文件
|
||||
// console.log('Downloading video...');
|
||||
// fetch('/record_lasted.mp4', {
|
||||
// method: 'GET'
|
||||
// }).then(function(response) {
|
||||
// if (response.ok) {
|
||||
// return response.blob();
|
||||
// } else {
|
||||
// throw new Error('Failed to download the video.');
|
||||
// }
|
||||
// }).then(function(blob) {
|
||||
// // 创建一个 Blob 对象
|
||||
// const url = window.URL.createObjectURL(blob);
|
||||
// // 创建一个隐藏的可下载链接
|
||||
// const a = document.createElement('a');
|
||||
// a.style.display = 'none';
|
||||
// a.href = url;
|
||||
// a.download = 'record_lasted.mp4';
|
||||
// document.body.appendChild(a);
|
||||
// // 触发下载
|
||||
// a.click();
|
||||
// // 清理
|
||||
// window.URL.revokeObjectURL(url);
|
||||
// document.body.removeChild(a);
|
||||
// console.log('Video downloaded successfully.');
|
||||
// }).catch(function(error) {
|
||||
// console.error('Error:', error);
|
||||
// });
|
||||
// });
|
||||
|
||||
});
|
||||
|
||||
|
||||
</script>
|
||||
</html>
|
||||
|
|
Loading…
Reference in New Issue