diff --git a/src/python/application/__init__.py b/src/python/application/__init__.py index 857d432..34103ba 100644 --- a/src/python/application/__init__.py +++ b/src/python/application/__init__.py @@ -8,6 +8,7 @@ import src.python.facenet.utils as face_utils from src.python.insight_face.mtcnn import MTCNN from src.python.application.insight_face import Insight_Face +from PIL import Image cap = cv2.VideoCapture(0) emb_train = [] @@ -16,7 +17,8 @@ is_use_windows_message_box = False if __name__ == "__main__": - while True: + count = 0 + while count < 10: frame = li.load_from_camera(cap) cv2.imshow('frame', frame) faces_coord = face_utils.get_face_from_image(frame) @@ -29,7 +31,7 @@ frame[y_face:y_face + h_face, x_face:x_face + w_face] )) y_train.append(1) - break + count += 1 if is_use_insight_face: insight_face = Insight_Face() @@ -85,13 +87,22 @@ frame[y_face:y_face + h_face, x_face:x_face + w_face] ) res = fs.predict_embedded_euclidean_distance(np.array([emb_test]), np.array(emb_train), y_train) + cv2.putText( + frame, + "FaceNet", + (x_face + 10, y_face - 10), + cv2.FONT_HERSHEY_SIMPLEX, 1, (34, 139, 34), 2 + ) cv2.rectangle( frame, (face_coord[0], face_coord[1]), (face_coord[0] + face_coord[2], face_coord[1] + face_coord[3]), - (0, 0, 255) if res[0] is None else (255, 0, 0), + (34, 139, 34) if res[0] else (0, 69, 255), 6 ) + # show_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # show_img = Image.fromarray(show_img) + # show_img.show() cv2.imshow('frame', frame) cap.release() diff --git a/src/python/application/insight_face.py b/src/python/application/insight_face.py index c110f37..13c21d4 100644 --- a/src/python/application/insight_face.py +++ b/src/python/application/insight_face.py @@ -65,7 +65,7 @@ def train_mvp(self): embs = [] embeddings = [] cap = cv2.VideoCapture(0) - while len(embs) == 0: + while len(embs) < 10: frame = li.load_from_camera(cap) emb = get_emb_from_frame(self.conf, self.learner.model, self.mtcnn, frame, tta=False) if emb is not None: @@ -94,7 +94,7 @@ def infer(self, embeddings, names, boxes, faces, frame): cv2.putText( frame, - predictions[ind], + "ArcFace+ResNet50", (x_l_face_mtcnn + 10, y_up_face_mtcnn - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (34, 139, 34), 2 ) diff --git a/src/python/insight_face/Learner.py b/src/python/insight_face/Learner.py index e2f04b0..b956c7c 100644 --- a/src/python/insight_face/Learner.py +++ b/src/python/insight_face/Learner.py @@ -2,7 +2,10 @@ import torch from torchvision import transforms as trans from pathlib import Path - +from tqdm import tqdm +import numpy as np +from .verifacation import evaluate +from .utils import get_time class face_learner(object): def __init__(self, conf): @@ -22,6 +25,92 @@ def load_state(self, conf, fixed_str, from_save_folder=False, model_only=False): self.head.load_state_dict(torch.load(save_path / 'head_{}'.format(fixed_str))) self.optimizer.load_state_dict(torch.load(save_path / 'optimizer_{}'.format(fixed_str))) + def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False): + self.model.eval() + idx = 0 + embeddings = np.zeros([len(carray), conf.embedding_size]) + with torch.no_grad(): + while idx + conf.batch_size <= len(carray): + batch = torch.tensor(carray[idx:idx + conf.batch_size]) + embeddings[idx:idx + conf.batch_size] = self.model(batch.to(conf.device)).cpu() + idx += conf.batch_size + if idx < len(carray): + batch = torch.tensor(carray[idx:]) + embeddings[idx:] = self.model(batch.to(conf.device)).cpu() + tpr, fpr, accuracy, best_thresholds, tp, fp, tn, fn = evaluate(embeddings, issame, nrof_folds) + # buf = gen_plot(fpr, tpr) + # roc_curve = Image.open(buf) + # roc_curve_tensor = trans.ToTensor()(roc_curve) + roc_curve_tensor = "" + return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor, tp, fp, tn, fn + + def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor): + self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy, self.step) + self.writer.add_scalar('{}_best_threshold'.format(db_name), best_threshold, self.step) + self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor, self.step) + + def train(self, conf, epochs): + self.model.train() + running_loss = 0. + for e in range(epochs): + print('epoch {} started'.format(e)) + if e == self.milestones[0]: + self.schedule_lr() + if e == self.milestones[1]: + self.schedule_lr() + if e == self.milestones[2]: + self.schedule_lr() + for imgs, labels in tqdm(iter(self.loader)): + imgs = imgs.to(conf.device) + labels = labels.to(conf.device) + self.optimizer.zero_grad() + embeddings = self.model(imgs) + thetas = self.head(embeddings, labels) + loss = conf.ce_loss(thetas, labels) + loss.backward() + running_loss += loss.item() + self.optimizer.step() + + if self.step % self.board_loss_every == 0 and self.step != 0: + loss_board = running_loss / self.board_loss_every + self.writer.add_scalar('train_loss', loss_board, self.step) + running_loss = 0. + + if self.step % self.evaluate_every == 0 and self.step != 0: + accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.agedb_30, + self.agedb_30_issame) + self.board_val('agedb_30', accuracy, best_threshold, roc_curve_tensor) + self.model.train() + if self.step % self.save_every == 0 and self.step != 0: + self.save_state(conf, accuracy) + + self.step += 1 + + self.save_state(conf, accuracy, to_save_folder=True, extra='final') + + def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False): + if to_save_folder: + save_path = conf.save_path + else: + save_path = conf.model_path + torch.save( + self.model.state_dict(), save_path / + ('model_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, + extra))) + if not model_only: + torch.save( + self.head.state_dict(), save_path / + ('head_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, + extra))) + torch.save( + self.optimizer.state_dict(), save_path / + ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, + self.step, extra))) + def schedule_lr(self): + for params in self.optimizer.param_groups: + params['lr'] /= 10 + print(self.optimizer) + def infer(self, conf, faces, target_embs, tta=False): ''' faces : list of PIL Image diff --git a/src/python/insight_face/config.py b/src/python/insight_face/config.py index bd56035..1c10ac8 100644 --- a/src/python/insight_face/config.py +++ b/src/python/insight_face/config.py @@ -43,8 +43,8 @@ def get_config(training=True): conf.ce_loss = CrossEntropyLoss() # --------------------Inference Config ------------------------ else: - conf.facebank_path = conf.data_path / 'facebank' - conf.threshold = 1.5 + conf.facebank_path = conf.data_path + conf.threshold = 0.8 conf.face_limit = 10 # when inference, at maximum detect 10 faces in one image, my laptop is slow conf.min_face_size = 30 diff --git a/src/python/insight_face/face_verify.py b/src/python/insight_face/face_verify.py new file mode 100644 index 0000000..01c43c8 --- /dev/null +++ b/src/python/insight_face/face_verify.py @@ -0,0 +1,282 @@ +from PIL import Image +import argparse +from pathlib import Path +from src.python.insight_face.config import get_config +from src.python.insight_face.mtcnn import MTCNN +from src.python.insight_face.Learner import face_learner +from src.python.insight_face.utils import load_facebank, prepare_facebank +import numpy as np +import os +import time +import shutil +import cv2 + + +# Результат прогноза сетки: +# predict - предположительный класс, к которому отн-ся фото +# dist - евклидово расстояние до предположительного класса +# соотв-но в самой сетке выбор класса происходит по минимальному расстоянию +# threshold - параметр-расстояние, который мы можем задать +# actual - действительный класс изображения +def save_photos(img, boxes, path): + open_cv_image = np.array(img) + show_img = open_cv_image[:, :, ::-1].copy() # Convert RGB to BGR + # for point in facial5points: + # show_img = cv2.circle(show_img, (point[0], point[1]), radius, (0, 0, 255), 3) + for box in boxes: + x_l_face_mtcnn, y_up_face_mtcnn, x_r_face_mtcnn, y_down_face_mtcnn = round(box[0]), round( + box[1]), round(box[2]), round(box[3]) + cv2.rectangle( + show_img, + (x_l_face_mtcnn, y_up_face_mtcnn), + (x_r_face_mtcnn, y_down_face_mtcnn), + (34, 139, 34), + 6 + ) + cv2.imwrite(path, show_img) + # show_img = cv2.cvtColor(show_img, cv2.COLOR_BGR2RGB) + # show_img = Image.fromarray(show_img) + # show_img.show() + + +def get_tp_fp_tn_fn(dist: float, actual, predict, path_to_file: Path): + tp, fp, tn, fn = 0, 0, 0, 0 + # threshold = 0.8, actual = 103, dist = 0.6024671792984009, predict = 103 + # threshold = 0.8, actual = 103, dist = 0.8871978521347046, predict = None + predict_is_same = (predict != None) and (predict != '0') # predict != 'None' => 1, predict == 'None' => 0 + actual_is_same = (actual != None) and (actual != '0') # actual != None => 1, actual == None => 0 + + # threshold,actual,dist,predict,path' + if ((predict == None) and (actual != None)) or ((predict != None) and (actual == None)): + with open('mistakes.csv', 'a') as file: + file.write(f'{learner.threshold},{actual},{dist},{predict},{path_to_file}\n') + if predict_is_same and actual_is_same: # расстояние до класса мало и фото одинаковые + tp += 1 + return tp, fp, tn, fn + if predict_is_same and not actual_is_same: # расстояние до класса мало, хотя фото разные + fp += 1 + print(f'FP threshold={0.8}, dist={dist}, actual_person={actual},' + f' predict_person={predict}, path_to_file={path_to_file}') + return tp, fp, tn, fn + if not predict_is_same and not actual_is_same: # расстояние до класса большое и действ-но фото разные + tn += 1 + return tp, fp, tn, fn + if not predict_is_same and actual_is_same: # расстояние до класса большое, хотя фото одинаковые + fn += 1 + print(f'FN threshold={0.8}, dist={dist}, actual_person={actual},' + f' predict_person={predict}, path_to_file={path_to_file}') + return tp, fp, tn, fn + print('Неправильно задано условие') + return tp, fp, tn, fn + + +def get_face(file_path, path_del_files): + if path_del_files.name != '.DS_Store': + try: + img = Image.open(file_path) + if img.mode == 'RGBA': + img.load() # required for png.split() + background = Image.new("RGB", img.size, (255, 255, 255)) + background.paste(img, mask=img.split()[3]) # 3 is the alpha channel + img = background + if img.mode == 'SRGB': + img = img.convert('RGB') + except Exception: + print(f"\nRemove file {file_path}\n") + with open(path_del_files, 'a') as f: + f.write(f'{file_path},cant_open\n') + shutil.move(str(file_path), f'data/removed/{file_path.name}') + return None + if img.size != (112, 112): + face_to_ret, box = mtcnn.align(img) + if not face_to_ret: + print(f"\nRemove file {file_path}\n") + with open(path_del_files, 'a') as f: + f.write(f'{file_path},mtccn_err\n') + shutil.move(str(file_path), f'data/removed/{file_path.name}') + return None + return face_to_ret, img, box + else: + return None + + +if __name__ == '__main__': + + # mtcnn = MTCNN() + # tim_arr = [] + # for file in Path('data/not_show').iterdir(): + # if not file.is_file(): + # continue + # try: + # img = Image.open(file) + # if img.mode == 'RGBA': + # img.load() # required for png.split() + # background = Image.new("RGB", img.size, (255, 255, 255)) + # background.paste(img, mask=img.split()[3]) # 3 is the alpha channel + # img = background + # if img.mode == 'SRGB': + # img = img.convert('RGB') + # except Exception: + # print(f"\nRemove file {file}\n") + # if img.size != (112, 112): + # tim = time.time() + # face_to_ret = mtcnn.align_multi(img) + # if face_to_ret: + # end = time.time() - tim + # print(end) + # tim_arr.append(end) + # print(len(tim_arr)) + # print(max(tim_arr), sum(tim_arr) / len(tim_arr)) + + conf = get_config(False) + + mtcnn = MTCNN() + print('mtcnn loaded') + with open('mistakes.csv', 'w') as file: + file.write('threshold,actual,dist,predict,path\n') + learner = face_learner(conf) + # learner.threshold = 0.8 + learner.load_state(conf, 'cpu_final.pth', True, True) + learner.model.eval() + + if False: + targets, names = prepare_facebank(conf, learner.model, mtcnn, tta=False) + print('facebank updated') + else: + targets, names = load_facebank(conf) + + print('databank is ready') + tp_all, fp_all, tn_all, fn_all = 0, 0, 0, 0 + paths_test = list((Path(conf.data_path) / 'test').iterdir()) + counter = 1 + len_paths = len(paths_test) + path_del_files = conf.data_path / 'deleted_files.txt' + + # create True faces: + faces_true = [] + faces_false = [] + test_y_true = [] + test_y_false = [] + path_file_true = [] + path_file_false = [] + for path in paths_test: + if path.is_file(): + continue + actual_person = str(path).split('/')[-1] # Path.name() + for file in path.iterdir(): + if not file.is_file(): + continue + face, img, boxes = get_face(file, path_del_files) + if not face: + if actual_person == '0': + path_to_save = f"data_with_face/tn/{file.name}" + else: + path_to_save = f"data_with_face/fn/{file.name}" + save_photos(img, boxes, path_to_save) + continue + continue + results, dists = learner.infer(conf, [face], targets, False) + predict_temp = names[results[0] + 1] if results[0] + 1 != 0 else None + tp_temp, fp_temp, tn_temp, fn_temp = get_tp_fp_tn_fn( + dist=dists[0], actual=actual_person, + predict=predict_temp, path_to_file=file) + if tp_temp: + path_to_save = f"data_with_face/tp/{file.name}" + elif fp_temp: + path_to_save = f"data_with_face/fp/{file.name}" + elif tn_temp: + path_to_save = f"data_with_face/tn/{file.name}" + elif fn_temp: + path_to_save = f"data_with_face/fn/{file.name}" + + save_photos(img, boxes, path_to_save) + if actual_person != '0': + faces_true.append(face) + test_y_true.append(actual_person) + path_file_true.append(file) + else: + faces_false.append(face) + test_y_false.append(None) + path_file_false.append(file) + + test_y_predict_true = [] + test_y_predict_false = [] + for indx in range(len(faces_true)): + print(f'{counter}/{len(faces_true)} verify data') + counter += 1 + # begin = time.time() + results, dists = learner.infer(conf, [faces_true[indx]], targets, False) + # end = time.time() + # print(f'time={end - begin}') + + predict_temp = names[results[0] + 1] if results[0] + 1 != 0 else None + test_y_predict_true.append(predict_temp) + tp_temp, fp_temp, tn_temp, fn_temp = get_tp_fp_tn_fn( + dist=dists[0], actual=test_y_true[indx], + predict=predict_temp, path_to_file=path_file_true[indx]) + tp_all += tp_temp + fp_all += fp_temp + tn_all += tn_temp + fn_all += fn_temp + + counter = 0 + for indx in range(len(faces_false)): + print(f'{counter}/{len(faces_false)} verify data') + counter += 1 + results, dists = learner.infer(conf, [faces_false[indx]], targets, False) + predict_temp = names[results[0] + 1] if results[0] + 1 != 0 else None + test_y_predict_false.append(predict_temp) + tp_temp, fp_temp, tn_temp, fn_temp = get_tp_fp_tn_fn( + dist=dists[0], actual=test_y_false[indx], + predict=predict_temp, path_to_file=path_file_false[indx]) + tp_all += tp_temp + fp_all += fp_temp + tn_all += tn_temp + fn_all += fn_temp + + true_positive = np.mean( + [test_y_true[indx] == test_y_predict_true[indx] for indx in range(len(test_y_predict_true))]) + false_negative = np.mean([test_y_predict_true[indx] == None for indx in range(len(test_y_predict_true))]) + true_negative = np.mean( + [test_y_false[indx] == test_y_predict_false[indx] for indx in range(len(test_y_predict_false))]) + false_positive = np.mean([test_y_predict_false[indx] != None for indx in range(len(test_y_predict_false))]) + + tpr, fpr, tnr, fnr = 0, 0, 0, 0 + tpr = tp_all / (tp_all + fn_all) if (tp_all + fn_all) != 0 else 0 + fpr = fp_all / (fp_all + tn_all) if (fp_all + tn_all) != 0 else 0 + tnr = tn_all / (fp_all + tn_all) if (fp_all + tn_all) != 0 else 0 + fnr = fn_all / (tp_all + fn_all) if (tp_all + fn_all) != 0 else 0 + _sum = tp_all + fn_all + fp_all + tn_all + tpr_to_sum = tp_all / _sum + fpr_to_sum = fp_all / _sum + tnr_to_sum = tn_all / _sum + fnr_to_sum = fn_all / _sum + accuracy = (tp_all + tn_all) / _sum # 0.93 + + print(f'faces_true={faces_true}') + print(f'faces_false={faces_false}') + print(f'test_y_true={test_y_true}') + print(f'test_y_false={test_y_false}') + print(f'test_y_predict_true={test_y_predict_true}') + print(f'test_y_predict_false={test_y_predict_false}') + + print(f"TP={true_positive}") + print(f"FP={false_positive}") + print(f"TN={true_negative}") + print(f"FN={false_negative}") + + print(f'tp_all = {tp_all}') + print(f'fp_all = {fp_all}') + print(f'tn_all = {tn_all}') + print(f'fn_all = {fn_all}') + + print(f'threshold = {learner.threshold}') + print(f'tpr = {tpr}') + print(f'fpr = {fpr}') + print(f'tnr = {tnr}') + print(f'fnr = {fnr}') + print(f'tpr_to_sum = {tpr_to_sum}') + print(f'fpr_to_sum = {fpr_to_sum}') + print(f'tnr_to_sum = {tnr_to_sum}') + print(f'fnr_to_sum = {fnr_to_sum}') + print(f'accuracy = {accuracy}') diff --git a/src/python/insight_face/face_verify_binary.py b/src/python/insight_face/face_verify_binary.py index 5defd88..fc2416b 100644 --- a/src/python/insight_face/face_verify_binary.py +++ b/src/python/insight_face/face_verify_binary.py @@ -10,7 +10,7 @@ import time -# Результат прогназа сетки: +# Результат прогноза сетки: # predict - предположительный класс, к которому отн-ся фото # dist - евклидово расстояние до предположительного класса # соотв-но в самой сетке выбор класса происходит по минимальному расстоянию diff --git a/src/python/insight_face/mtcnn.py b/src/python/insight_face/mtcnn.py index 1b58772..ea19997 100644 --- a/src/python/insight_face/mtcnn.py +++ b/src/python/insight_face/mtcnn.py @@ -17,11 +17,21 @@ def image_process_from_cv2(frame): return gray -def show_facial_points(img, facial5points, radius=30): +def show_facial_points(img, boxes, facial5points, radius=20): open_cv_image = np.array(img) show_img = open_cv_image[:, :, ::-1].copy() # Convert RGB to BGR - for point in facial5points: - show_img = cv2.circle(show_img, (point[0], point[1]), radius, (0, 0, 255), 1) + # for point in facial5points: + # show_img = cv2.circle(show_img, (point[0], point[1]), radius, (0, 0, 255), 3) + for box in boxes: + x_l_face_mtcnn, y_up_face_mtcnn, x_r_face_mtcnn, y_down_face_mtcnn = round(box[0]), round( + box[1]), round(box[2]), round(box[3]) + cv2.rectangle( + show_img, + (x_l_face_mtcnn, y_up_face_mtcnn), + (x_r_face_mtcnn, y_down_face_mtcnn), + (34, 139, 34), + 6 + ) show_img = cv2.cvtColor(show_img, cv2.COLOR_BGR2RGB) show_img = Image.fromarray(show_img) show_img.show() @@ -40,20 +50,18 @@ def __init__(self): def align(self, img, landmarks=None): if landmarks is None: - _, landmarks = self.detect_faces(img) + box, landmarks = self.detect_faces(img) if len(landmarks): facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)] - # show_facial_points(img, facial5points, 20) - # - # # reference_points = get_reference_facial_points((1280, 720), 0.7, (128, 72), default_square=False) # warped_face = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(1280, 720)) - # show_img = Image.fromarray(warped_face) + # show_facial_points(img, [box], facial5points, 10) + # show_img = Image.fromarray(np.array(img)) # show_img.show() warped_face = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112)) # show_img = Image.fromarray(warped_face) # show_img.show() - return Image.fromarray(warped_face) + return Image.fromarray(warped_face), box else: return None @@ -65,15 +73,19 @@ def align_multi(self, img, limit=None, min_face_size=30.0, boxes=None, landmarks boxes = boxes[:limit] landmarks = landmarks[:limit] faces = [] + # if len(boxes): + # show_facial_points(img, boxes, None, 10) for landmark in landmarks: facial5points = [[landmark[j], landmark[j + 5]] for j in range(5)] warped_face = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112)) + # show_img = Image.fromarray(warped_face) + # show_img.show() faces.append(Image.fromarray(warped_face)) # return boxes, faces return faces def detect_faces(self, image, min_face_size=20.0, - thresholds=[0.6, 0.7, 0.8], + thresholds=[0.4, 0.7, 0.8], nms_thresholds=[0.7, 0.7, 0.7]): """ Arguments: diff --git a/src/python/insight_face/utils.py b/src/python/insight_face/utils.py index 90b3614..a0a576d 100644 --- a/src/python/insight_face/utils.py +++ b/src/python/insight_face/utils.py @@ -6,6 +6,8 @@ from src.python.insight_face.model import l2_norm import cv2 import os +import shutil +from datetime import datetime def prepare_facebank(conf, model, mtcnn, tta=True): @@ -27,19 +29,30 @@ def prepare_facebank(conf, model, mtcnn, tta=True): continue try: img = Image.open(file) - except: + if img.mode == 'RGBA': + img.load() # required for png.split() + background = Image.new("RGB", img.size, (255, 255, 255)) + background.paste(img, mask=img.split()[3]) # 3 is the alpha channel + img = background + if img.mode == 'SRGB': + img = img.convert('RGB') + print(file) + if img.size != (112, 112): + img = mtcnn.align(img) + if img is None: + print(f"\nRemove file {file}\n") + with open(path_del_files, 'a') as f: + f.write(f'{file},mtccn_err\n') + # os.remove(file) + shutil.move(str(file), f'data/removed/{file.name}') + continue + + except Exception: print(f"\nRemove file {file}\n") with open(path_del_files, 'a') as f: f.write(f'{file},cant_open\n') os.remove(file) - if img.size != (112, 112): - img = mtcnn.align(img) - if img == False: - print(f"\nRemove file {file}\n") - with open(path_del_files, 'a') as f: - f.write(f'{file},mtccn_err\n') - os.remove(file) - continue + with torch.no_grad(): if tta: mirror = trans.functional.hflip(img) @@ -49,10 +62,9 @@ def prepare_facebank(conf, model, mtcnn, tta=True): else: embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0))) if len(embs) == 0: - print(f"\nRemove file {file}\n") + print(f"\nRemove cls {path.name}\n") with open(path_del_files, 'a') as f: - f.write(f'{file},no_embs\n') - os.remove(file) + f.write(f'{path.name},no_embs!!!\n') continue embedding = torch.cat(embs).mean(0, keepdim=True) embeddings.append(embedding) @@ -81,7 +93,7 @@ def get_face(mtcnn, frame): def get_emb_from_frame(conf, model, mtcnn, frame, tta=True): img_temp = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = Image.fromarray(img_temp) - detected_img = mtcnn.align(img) + detected_img, _ = mtcnn.align(img) if detected_img: with torch.no_grad(): if tta: @@ -153,3 +165,6 @@ def hflip_batch(imgs_tensor): for i, img_ten in enumerate(imgs_tensor): hfliped_imgs[i] = hflip(img_ten) return hfliped_imgs + +def get_time(): + return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-') \ No newline at end of file