improve nerf audio video sync
This commit is contained in:
parent
dc94e87620
commit
677227145e
10
app.py
10
app.py
|
@ -20,10 +20,6 @@ from aiortc import RTCPeerConnection, RTCSessionDescription
|
||||||
from webrtc import HumanPlayer
|
from webrtc import HumanPlayer
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from ernerf.nerf_triplane.provider import NeRFDataset_Test
|
|
||||||
from ernerf.nerf_triplane.utils import *
|
|
||||||
from ernerf.nerf_triplane.network import NeRFNetwork
|
|
||||||
from nerfreal import NeRFReal
|
|
||||||
|
|
||||||
import shutil
|
import shutil
|
||||||
import asyncio
|
import asyncio
|
||||||
|
@ -437,7 +433,7 @@ if __name__ == '__main__':
|
||||||
#musetalk opt
|
#musetalk opt
|
||||||
parser.add_argument('--avatar_id', type=str, default='avator_1')
|
parser.add_argument('--avatar_id', type=str, default='avator_1')
|
||||||
parser.add_argument('--bbox_shift', type=int, default=5)
|
parser.add_argument('--bbox_shift', type=int, default=5)
|
||||||
parser.add_argument('--batch_size', type=int, default=4)
|
parser.add_argument('--batch_size', type=int, default=16)
|
||||||
|
|
||||||
parser.add_argument('--customvideo', action='store_true', help="custom video")
|
parser.add_argument('--customvideo', action='store_true', help="custom video")
|
||||||
parser.add_argument('--customvideo_img', type=str, default='data/customvideo/img')
|
parser.add_argument('--customvideo_img', type=str, default='data/customvideo/img')
|
||||||
|
@ -466,6 +462,10 @@ if __name__ == '__main__':
|
||||||
gspeaker = get_speaker(opt.REF_FILE, opt.TTS_SERVER)
|
gspeaker = get_speaker(opt.REF_FILE, opt.TTS_SERVER)
|
||||||
|
|
||||||
if opt.model == 'ernerf':
|
if opt.model == 'ernerf':
|
||||||
|
from ernerf.nerf_triplane.provider import NeRFDataset_Test
|
||||||
|
from ernerf.nerf_triplane.utils import *
|
||||||
|
from ernerf.nerf_triplane.network import NeRFNetwork
|
||||||
|
from nerfreal import NeRFReal
|
||||||
# assert test mode
|
# assert test mode
|
||||||
opt.test = True
|
opt.test = True
|
||||||
opt.test_train = False
|
opt.test_train = False
|
||||||
|
|
|
@ -26,6 +26,7 @@ from museasr import MuseASR
|
||||||
import asyncio
|
import asyncio
|
||||||
from av import AudioFrame, VideoFrame
|
from av import AudioFrame, VideoFrame
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
class MuseReal:
|
class MuseReal:
|
||||||
def __init__(self, opt):
|
def __init__(self, opt):
|
||||||
self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
|
self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
|
||||||
|
|
|
@ -262,7 +262,10 @@ class NeRFReal:
|
||||||
print(f"------actual avg infer fps:{count/totaltime:.4f}")
|
print(f"------actual avg infer fps:{count/totaltime:.4f}")
|
||||||
count=0
|
count=0
|
||||||
totaltime=0
|
totaltime=0
|
||||||
delay = _starttime+_totalframe*0.04-time.perf_counter() #40ms
|
if video_track._queue.qsize()>=5:
|
||||||
if delay > 0:
|
#print('sleep qsize=',video_track._queue.qsize())
|
||||||
time.sleep(delay)
|
time.sleep(0.1)
|
||||||
|
# delay = _starttime+_totalframe*0.04-time.perf_counter() #40ms
|
||||||
|
# if delay > 0:
|
||||||
|
# time.sleep(delay)
|
||||||
|
|
Loading…
Reference in New Issue