Skip to content

Commit

Permalink
Reducing updates on processing memories
Browse files Browse the repository at this point in the history
  • Loading branch information
beastoin committed Sep 12, 2024
1 parent 3b78f2b commit abf9734
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 5 deletions.
7 changes: 7 additions & 0 deletions backend/database/processing_memories.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ def update_basic(uid: str, id: str, geolocation: dict, emotional_feedback: bool)
'geolocation':geolocation,
})

def update_audio_url(uid: str, id: str, audio_url: str):
user_ref = db.collection('users').document(uid)
memory_ref = user_ref.collection('processing_memories').document(id)
memory_ref.update({
'audio_url': audio_url,
})

def get_last(uid: str):
processing_memories_ref = (
db.collection('users').document(uid).collection('processing_memories')
Expand Down
11 changes: 6 additions & 5 deletions backend/routers/transcribe.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ def _combine_segments(segments: [], new_segments: [], delta_seconds: int = 0):
else:
joined_similar_segments.append(new_segment)


if (segments and
(segments[-1].speaker == joined_similar_segments[0].speaker or
(segments[-1].is_user and joined_similar_segments[0].is_user)) and
Expand Down Expand Up @@ -160,10 +159,11 @@ def stream_transcript(segments, stream_id):
memory_transcript_segements = _combine_segments(memory_transcript_segements, list(map(lambda m: TranscriptSegment(**m), segments)), delta_seconds)

# Sync processing transcript, periodly
if processing_memory and len(memory_transcript_segements) % 2 == 0:
if processing_memory and int(time.time()) % 3 == 0:
processing_memory_synced = len(memory_transcript_segements)
processing_memory.transcript_segments = memory_transcript_segements
processing_memories_db.update_processing_memory(uid, processing_memory.id, processing_memory.dict())
processing_memories_db.update_processing_memory_segments(uid, processing_memory.id,
list(map(lambda m: m.dict(), processing_memory.transcript_segments)))

def stream_audio(audio_buffer):
if not new_memory_watch:
Expand Down Expand Up @@ -384,7 +384,7 @@ async def _post_process_memory(memory: Memory):
# Store postprocessing audio file
signed_url = upload_postprocessing_audio(file_path)
processing_memory.audio_url = signed_url
processing_memories_db.update_processing_memory(uid, processing_memory.id, processing_memory.dict())
processing_memories_db.update_audio_url(uid, processing_memory.id, processing_memory.audio_url)

os.remove(file_path)

Expand Down Expand Up @@ -412,7 +412,8 @@ async def _create_memory():

processing_memory_synced = len(memory_transcript_segements)
processing_memory.transcript_segments = memory_transcript_segements[:processing_memory_synced]
processing_memories_db.update_processing_memory(uid, processing_memory.id, processing_memory.dict())
processing_memories_db.update_processing_memory_segments(uid, processing_memory.id,
list(map(lambda m: m.dict(), processing_memory.transcript_segments)))

# Message: creating
ok = await _send_message_event(MessageEvent(event_type="new_memory_creating"))
Expand Down

0 comments on commit abf9734

Please sign in to comment.