Skip to content

Commit

Permalink
Set pre-segments for capturing memory
Browse files Browse the repository at this point in the history
  • Loading branch information
beastoin committed Sep 27, 2024
1 parent 6e6e88e commit 4304ee4
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 35 deletions.
6 changes: 6 additions & 0 deletions app/lib/backend/schema/memory.dart
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,15 @@ class ServerProcessingMemory {
final DateTime? startedAt;
final DateTime? capturingTo;
final ServerProcessingMemoryStatus? status;
final List<TranscriptSegment> transcriptSegments;

ServerProcessingMemory({
required this.id,
required this.createdAt,
this.startedAt,
this.capturingTo,
this.status,
this.transcriptSegments = const [],
});

factory ServerProcessingMemory.fromJson(Map<String, dynamic> json) {
Expand All @@ -96,6 +98,9 @@ class ServerProcessingMemory {
startedAt: json['started_at'] != null ? DateTime.parse(json['started_at']).toLocal() : null,
capturingTo: json['capturing_to'] != null ? DateTime.parse(json['capturing_to']).toLocal() : null,
status: json['status'] != null ? ServerProcessingMemoryStatus.valuesFromString(json['status']) : null,
transcriptSegments: ((json['transcript_segments'] ?? []) as List<dynamic>)
.map((segment) => TranscriptSegment.fromJson(segment))
.toList(),
);
}

Expand All @@ -106,6 +111,7 @@ class ServerProcessingMemory {
'started_at': startedAt?.toUtc().toIso8601String(),
'capturing_to': capturingTo?.toUtc().toIso8601String(),
'status': status.toString(),
'transcript_segments': transcriptSegments.map((segment) => segment.toJson()).toList(),
};
}

Expand Down
46 changes: 17 additions & 29 deletions app/lib/providers/capture_provider.dart
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,6 @@ class CaptureProvider extends ChangeNotifier

// -----------------------
// Memory creation variables
double? streamStartedAtSecond;
DateTime? firstStreamReceivedAt;
int? secondsMissedOnReconnect;
WavBytesUtil? audioStorage;
String conversationId = const Uuid().v4();
int elapsedSeconds = 0;
List<int> currentStorageFiles = <int>[];
Expand Down Expand Up @@ -163,19 +159,32 @@ class CaptureProvider extends ChangeNotifier
}
_setCapturingProcessingMemory(result?.result);

// Set pre-segements
if (capturingProcessingMemory != null && (capturingProcessingMemory?.transcriptSegments ?? []).isNotEmpty) {
List<TranscriptSegment> transcriptSegments = capturingProcessingMemory!.transcriptSegments;
if (segments.isNotEmpty) {
int idx = transcriptSegments.indexWhere((e) => e.end > segments.first.start) - 1;
if (idx >= 0) {
transcriptSegments = transcriptSegments.sublist(0, idx);
}
transcriptSegments.addAll(segments);
}
segments = transcriptSegments;
setHasTranscripts(segments.isNotEmpty);
}

// Update processing memory
_updateProcessingMemory();
}

void _setCapturingProcessingMemory(ServerProcessingMemory? pm) {
var now = DateTime.now();
debugPrint("${pm?.toJson()}");
if (pm != null &&
pm.status == ServerProcessingMemoryStatus.capturing &&
pm.capturingTo != null &&
pm.capturingTo!.isAfter(DateTime.now())) {
pm.capturingTo!.isAfter(now)) {
capturingProcessingMemory = pm;

// TODO: thinh, might set pre-segments
} else {
capturingProcessingMemory = null;
}
Expand All @@ -189,7 +198,7 @@ class CaptureProvider extends ChangeNotifier
// Or watch
var id = capturingProcessingMemory!.id;
var delayMs = capturingProcessingMemory?.capturingTo != null
? capturingProcessingMemory!.capturingTo!.millisecondsSinceEpoch - DateTime.now().millisecondsSinceEpoch
? capturingProcessingMemory!.capturingTo!.millisecondsSinceEpoch - now.millisecondsSinceEpoch
: 2 * 60 * 1000; // 2m
if (delayMs > 0) {
_processingMemoryWatchTimer?.cancel();
Expand Down Expand Up @@ -354,13 +363,8 @@ class CaptureProvider extends ChangeNotifier
Future _clean() async {
segments = [];

audioStorage?.clearAudioBytes();

elapsedSeconds = 0;

streamStartedAtSecond = null;
firstStreamReceivedAt = null;
secondsMissedOnReconnect = null;
photos = [];
conversationId = const Uuid().v4();

Expand Down Expand Up @@ -431,26 +435,17 @@ class CaptureProvider extends ChangeNotifier
}
_socket?.subscribe(this, this);
_transcriptServiceReady = true;

if (segments.isNotEmpty) {
// means that it was a reconnection, so we need to reset
streamStartedAtSecond = null;
secondsMissedOnReconnect = (DateTime.now().difference(firstStreamReceivedAt!).inSeconds);
}
}

Future streamAudioToWs(String id, BleAudioCodec codec) async {
debugPrint('streamAudioToWs in capture_provider');
audioStorage = WavBytesUtil(codec: codec);
if (_bleBytesStream != null) {
_bleBytesStream?.cancel();
}
_bleBytesStream = await _getBleAudioBytesListener(
id,
onAudioBytesReceived: (List<int> value) {
if (value.isEmpty) return;
// audioStorage!.storeFramePacket(value);
// print('audioStorage: ${audioStorage!.frames.length} ${audioStorage!.rawPackets.length}');

final trimmedValue = value.sublist(3);

Expand Down Expand Up @@ -861,18 +856,11 @@ class CaptureProvider extends ChangeNotifier
// TODO: small bug -> when memory A creates, and memory B starts, memory B will clean a lot more seconds than available,
// losing from the audio the first part of the recording. All other parts are fine.
FlutterForegroundTask.sendDataToTask(jsonEncode({'location': true}));
var currentSeconds = (audioStorage?.frames.length ?? 0) ~/ 100;
var removeUpToSecond = newSegments[0].start.toInt();
audioStorage?.removeFramesRange(fromSecond: 0, toSecond: min(max(currentSeconds - 5, 0), removeUpToSecond));
firstStreamReceivedAt = DateTime.now();
}

streamStartedAtSecond ??= newSegments[0].start;
TranscriptSegment.combineSegments(
segments,
newSegments,
toRemoveSeconds: streamStartedAtSecond ?? 0,
toAddSeconds: secondsMissedOnReconnect ?? 0,
);
triggerTranscriptSegmentReceivedEvents(newSegments, conversationId, sendMessageToChat: (v) {
messageProvider?.addMessage(v);
Expand Down
15 changes: 15 additions & 0 deletions backend/models/processing_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,17 @@ class BasicProcessingMemory(BaseModel):

memory_id: Optional[str] = None

class DetailProcessingMemory(BaseModel):
id: str
timer_start: float
created_at: datetime
capturing_to: Optional[datetime] = None
status: Optional[ProcessingMemoryStatus] = None
geolocation: Optional[Geolocation] = None
emotional_feedback: Optional[bool] = False
transcript_segments: List[TranscriptSegment] = []
memory_id: Optional[str] = None


class UpdateProcessingMemory(BaseModel):
id: Optional[str] = None
Expand All @@ -56,6 +67,10 @@ class UpdateProcessingMemory(BaseModel):
class UpdateProcessingMemoryResponse(BaseModel):
result: BasicProcessingMemory

class DetailProcessingMemoryResponse(BaseModel):
result: DetailProcessingMemory


class BasicProcessingMemoryResponse(BaseModel):
result: BasicProcessingMemory

Expand Down
6 changes: 3 additions & 3 deletions backend/routers/processing_memories.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from fastapi import APIRouter, Depends, HTTPException

import utils.processing_memories as processing_memory_utils
from models.processing_memory import UpdateProcessingMemoryResponse, UpdateProcessingMemory, BasicProcessingMemoryResponse, BasicProcessingMemoriesResponse
from models.processing_memory import UpdateProcessingMemoryResponse, UpdateProcessingMemory, BasicProcessingMemoryResponse, BasicProcessingMemoriesResponse, DetailProcessingMemoryResponse
from utils.other import endpoints as auth

router = APIRouter()
Expand Down Expand Up @@ -31,7 +31,7 @@ def update_processing_memory(

return UpdateProcessingMemoryResponse(result=processing_memory)

@router.get("/v1/processing-memories/{processing_memory_id}", response_model=BasicProcessingMemoryResponse,
@router.get("/v1/processing-memories/{processing_memory_id}", response_model=DetailProcessingMemoryResponse,
tags=['processing_memories'])
def get_processing_memory(
processing_memory_id: str,
Expand All @@ -49,7 +49,7 @@ def get_processing_memory(
if not processing_memory:
raise HTTPException(status_code=404, detail="Processing memory not found")

return BasicProcessingMemoryResponse(result=processing_memory)
return DetailProcessingMemoryResponse(result=processing_memory)

@router.get("/v1/processing-memories", response_model=BasicProcessingMemoriesResponse,
tags=['processing_memories'])
Expand Down
6 changes: 3 additions & 3 deletions backend/utils/processing_memories.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import database.memories as memories_db
import database.processing_memories as processing_memories_db
from models.memory import CreateMemory, PostProcessingModel, PostProcessingStatus, MemoryPostProcessing
from models.processing_memory import ProcessingMemory, UpdateProcessingMemory, BasicProcessingMemory, ProcessingMemoryStatus
from models.processing_memory import ProcessingMemory, UpdateProcessingMemory, BasicProcessingMemory, ProcessingMemoryStatus, DetailProcessingMemory
from utils.memories.location import get_google_maps_location
from utils.memories.process_memory import process_memory
from utils.plugins import trigger_external_integrations
Expand Down Expand Up @@ -55,12 +55,12 @@ async def create_memory_by_processing_memory(uid: str, processing_memory_id: str

return memory, messages, processing_memory

def get_processing_memory(uid: str, id: str, ) -> BasicProcessingMemory:
def get_processing_memory(uid: str, id: str, ) -> DetailProcessingMemory:
processing_memory = processing_memories_db.get_processing_memory_by_id(uid, id)
if not processing_memory:
print("processing memory is not found")
return
processing_memory = BasicProcessingMemory(**processing_memory)
processing_memory = DetailProcessingMemory(**processing_memory)

return processing_memory

Expand Down

0 comments on commit 4304ee4

Please sign in to comment.