-
Notifications
You must be signed in to change notification settings - Fork 8
/
extract_frame_landmarks.py
95 lines (75 loc) · 3.67 KB
/
extract_frame_landmarks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import argparse
import torchlm
import torch
import cv2
from torchlm.tools import faceboxesv2
from torchlm.models import pipnet
from tqdm import tqdm
import os
import numpy as np
def save_lmds(dict_item, txt_path):
with open(txt_path, 'w') as obj:
for name, landmarks in dict_item.items():
obj.write(name + " ")
for x, y in landmarks:
obj.write(f"{int(x)}_{int(y)} ")
obj.write("\n")
def main(from_dir, lmd_output_dir, skip_existing, check_and_padding):
os.makedirs(lmd_output_dir, exist_ok=True)
device = torch.device("cuda:0")
torchlm.runtime.bind(faceboxesv2(device=device))
torchlm.runtime.bind(
pipnet(backbone="resnet18", pretrained=True,
num_nb=10, num_lms=68, net_stride=32, input_size=256,
meanface_type="300w", map_location=device, checkpoint=None)
)
clip_dirs = os.listdir(from_dir)
np.random.shuffle(clip_dirs)
for clip_dir in tqdm(clip_dirs, desc="Processing clips"):
lmd_path = os.path.join(lmd_output_dir, f'{clip_dir}.txt')
frames_path = os.path.join(from_dir, clip_dir)
img_lists = sorted(os.listdir(frames_path))
if check_and_padding and os.path.exists(lmd_path):
with open(lmd_path, 'r') as file:
lines = file.readlines()
if len(img_lists) == len(lines):
continue
else:
print(f'{lmd_path} has not aligned landmark size.{len(img_lists)}!={len(lines)} checking....')
elif skip_existing and os.path.exists(lmd_path):
continue
# img_lists = sorted(os.listdir(frames_path))
current_dict = {}
last_landmarks = None
for image_name in tqdm(img_lists):
if not (image_name.endswith('.png') or image_name.endswith('.jpg') or image_name.endswith('.jpeg')):
continue
frame = cv2.imread(os.path.join(frames_path, image_name))
if frame is None:
break
landmarks, bboxes = torchlm.runtime.forward(frame)
if len(bboxes) == 0:
if check_and_padding:
if last_landmarks is None:
print(f"{clip_dir}'s {image_name} does not have first frame. Passing ...")
break
print(f"{clip_dir}'s {image_name} padds the missing landmarks using last frames.")
landmarks = last_landmarks
else:
print(f"{clip_dir}'s {image_name} is missing, later frames will not be processed!")
break
current_dict[image_name] = [(x, y) for x, y in landmarks[0][:68]]
last_landmarks = landmarks
save_lmds(current_dict, lmd_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract frame landmarks.')
parser.add_argument('--from_dir', type=str, default='./data_processing/specified_formats/videos/video_frames/',
help='Directory where video frames are stored')
parser.add_argument('--lmd_output_dir', type=str, default='./data_processing/specified_formats/videos/landmarks/',
help='Directory where landmarks will be saved')
parser.add_argument('--skip_existing', action='store_true',
help='Skip processing if landmarks file already exists')
parser.add_argument('--check_and_padding', action='store_true',
help='Check and pad frames.')
args = parser.parse_args()
main(args.from_dir, args.lmd_output_dir, args.skip_existing, args.check_and_padding)