fix webrtc audio problem
This commit is contained in:
parent
f2d81f88d3
commit
027e15201a
13
nerfreal.py
13
nerfreal.py
|
@ -167,14 +167,21 @@ class NeRFReal:
|
||||||
#print(f'[INFO] get_audio_out shape ',frame.shape)
|
#print(f'[INFO] get_audio_out shape ',frame.shape)
|
||||||
self.streamer.stream_frame_audio(frame)
|
self.streamer.stream_frame_audio(frame)
|
||||||
else:
|
else:
|
||||||
frame1 = self.asr.get_audio_out()
|
for _ in range(2):
|
||||||
frame2 = self.asr.get_audio_out()
|
frame = self.asr.get_audio_out()
|
||||||
frame = np.concatenate((frame1,frame2))
|
|
||||||
frame = (frame * 32767).astype(np.int16)
|
frame = (frame * 32767).astype(np.int16)
|
||||||
new_frame = AudioFrame(format='s16', layout='mono', samples=frame.shape[0])
|
new_frame = AudioFrame(format='s16', layout='mono', samples=frame.shape[0])
|
||||||
new_frame.planes[0].update(frame.tobytes())
|
new_frame.planes[0].update(frame.tobytes())
|
||||||
new_frame.sample_rate=16000
|
new_frame.sample_rate=16000
|
||||||
asyncio.run_coroutine_threadsafe(audio_track._queue.put(new_frame), loop)
|
asyncio.run_coroutine_threadsafe(audio_track._queue.put(new_frame), loop)
|
||||||
|
# frame1 = self.asr.get_audio_out()
|
||||||
|
# frame2 = self.asr.get_audio_out()
|
||||||
|
# frame = np.concatenate((frame1,frame2))
|
||||||
|
# frame = (frame * 32767).astype(np.int16)
|
||||||
|
# new_frame = AudioFrame(format='s16', layout='mono', samples=frame.shape[0])
|
||||||
|
# new_frame.planes[0].update(frame.tobytes())
|
||||||
|
# new_frame.sample_rate=16000
|
||||||
|
# asyncio.run_coroutine_threadsafe(audio_track._queue.put(new_frame), loop)
|
||||||
# frame = (frame * 32767).astype(np.int16).tobytes()
|
# frame = (frame * 32767).astype(np.int16).tobytes()
|
||||||
# self.fifo_audio.write(frame)
|
# self.fifo_audio.write(frame)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -46,8 +46,8 @@ class PlayerStreamTrack(MediaStreamTrack):
|
||||||
|
|
||||||
if self.kind == 'video':
|
if self.kind == 'video':
|
||||||
if hasattr(self, "_timestamp"):
|
if hasattr(self, "_timestamp"):
|
||||||
self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
# self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
||||||
# self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
||||||
# wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
# wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
||||||
# if wait>0:
|
# if wait>0:
|
||||||
# await asyncio.sleep(wait)
|
# await asyncio.sleep(wait)
|
||||||
|
@ -57,8 +57,8 @@ class PlayerStreamTrack(MediaStreamTrack):
|
||||||
return self._timestamp, VIDEO_TIME_BASE
|
return self._timestamp, VIDEO_TIME_BASE
|
||||||
else: #audio
|
else: #audio
|
||||||
if hasattr(self, "_timestamp"):
|
if hasattr(self, "_timestamp"):
|
||||||
self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
# self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
||||||
# self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
||||||
# wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
# wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
||||||
# if wait>0:
|
# if wait>0:
|
||||||
# await asyncio.sleep(wait)
|
# await asyncio.sleep(wait)
|
||||||
|
|
Loading…
Reference in New Issue