Spaces:
Runtime error
Runtime error
Commit
·
1b3fb15
0
Parent(s):
Add deepface app
Browse files- README.md +6 -0
- app.py +208 -0
- db.py +78 -0
- deeperface.py +158 -0
- examples/log.csv +6 -0
- examples/ospan-ali-5zigAp0ng4s-unsplash.jpg +0 -0
- examples/ospan-ali-GUmRXc-vOxw-unsplash.jpg +0 -0
- examples/ospan-ali-_ZXdnKTHPGs-unsplash.jpg +0 -0
- examples/ospan-ali-t1JfI6SyEhM-unsplash.jpg +0 -0
- examples/ospan-ali-ufgR_EHJOUw-unsplash.jpg +0 -0
- requirements.txt +3 -0
README.md
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
emoji: 🤗
|
| 3 |
+
sdk: gradio
|
| 4 |
+
sdk_version: 4.13.0
|
| 5 |
+
title: Deepface
|
| 6 |
+
---
|
app.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gradio import AnnotatedImage, Blocks, Button, Column, Dropdown, Examples, Gallery, HTML, Image, Row, SelectData, Tab, Textbox
|
| 2 |
+
|
| 3 |
+
import db
|
| 4 |
+
import deeperface
|
| 5 |
+
import os.path
|
| 6 |
+
|
| 7 |
+
################################################################################
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def delete(id):
|
| 11 |
+
db.delete_by_id(id)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def emotions():
|
| 15 |
+
return deeperface.Emotion.labels
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def process(path):
|
| 19 |
+
img = deeperface.Image.read(path)
|
| 20 |
+
|
| 21 |
+
############################################################################
|
| 22 |
+
|
| 23 |
+
if img.nsfw():
|
| 24 |
+
img.pixelate().write(path)
|
| 25 |
+
|
| 26 |
+
############################################################################
|
| 27 |
+
|
| 28 |
+
if db.exists(path):
|
| 29 |
+
id, metadata = db.get(path)
|
| 30 |
+
else:
|
| 31 |
+
metadata = deeperface.Metadata(img)
|
| 32 |
+
|
| 33 |
+
id = db.update(path, metadata)
|
| 34 |
+
|
| 35 |
+
############################################################################
|
| 36 |
+
|
| 37 |
+
annotations = []
|
| 38 |
+
for face in metadata:
|
| 39 |
+
annotations.extend(img.annotate(face, metadata[face]['emotion']))
|
| 40 |
+
|
| 41 |
+
############################################################################
|
| 42 |
+
|
| 43 |
+
verified_paths = []
|
| 44 |
+
for test_id, test_path, test_metadata in db.tuples():
|
| 45 |
+
if test_path != path:
|
| 46 |
+
if deeperface.verify(metadata.representations(),
|
| 47 |
+
test_metadata.representations()):
|
| 48 |
+
verified_paths.append((test_path, os.path.basename(test_id)))
|
| 49 |
+
|
| 50 |
+
############################################################################
|
| 51 |
+
|
| 52 |
+
return id, (path, annotations), verified_paths
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def search(filter='All'):
|
| 56 |
+
filtered_paths = []
|
| 57 |
+
for id, path, metadata in db.tuples():
|
| 58 |
+
if filter == 'All' or filter in metadata.emotions():
|
| 59 |
+
filtered_paths.append((path, os.path.basename(id)))
|
| 60 |
+
|
| 61 |
+
return filtered_paths
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
################################################################################
|
| 65 |
+
|
| 66 |
+
with Blocks(title='Face recognition and facial attribute analysis') as blocks:
|
| 67 |
+
HTML(
|
| 68 |
+
'<h1><p align="center">Face recognition and facial attribute analysis</p></h1>'
|
| 69 |
+
)
|
| 70 |
+
with Row():
|
| 71 |
+
with Column(scale=3):
|
| 72 |
+
with Row():
|
| 73 |
+
with Column():
|
| 74 |
+
textbox = Textbox(visible=False)
|
| 75 |
+
annotated_image = AnnotatedImage(color_map={
|
| 76 |
+
'face': '#f97316',
|
| 77 |
+
'emotion': '#f3f4f6'
|
| 78 |
+
},
|
| 79 |
+
show_legend=False,
|
| 80 |
+
visible=False)
|
| 81 |
+
button = Button('Delete', visible=False)
|
| 82 |
+
with Column():
|
| 83 |
+
gallery_1 = Gallery(columns=3,
|
| 84 |
+
container=False,
|
| 85 |
+
show_download_button=False,
|
| 86 |
+
show_share_button=False,
|
| 87 |
+
visible=False)
|
| 88 |
+
html = HTML(visible=False)
|
| 89 |
+
with Column(scale=2):
|
| 90 |
+
with Tab(label='Upload / Camera'):
|
| 91 |
+
image = Image(container=False,
|
| 92 |
+
sources=['upload', 'webcam'],
|
| 93 |
+
type='filepath')
|
| 94 |
+
Examples('examples', image)
|
| 95 |
+
with Tab(label='Gallery') as tab:
|
| 96 |
+
dropdown = Dropdown(['All'] + emotions(),
|
| 97 |
+
container=False,
|
| 98 |
+
filterable=False,
|
| 99 |
+
value=0)
|
| 100 |
+
gallery_2 = Gallery(allow_preview=False,
|
| 101 |
+
columns=3,
|
| 102 |
+
container=False,
|
| 103 |
+
show_share_button=False)
|
| 104 |
+
|
| 105 |
+
############################################################################
|
| 106 |
+
|
| 107 |
+
def on_button_click(textbox, dropdown):
|
| 108 |
+
if not textbox or not dropdown:
|
| 109 |
+
return AnnotatedImage(), Button(), Gallery(), HTML(), Gallery()
|
| 110 |
+
|
| 111 |
+
delete(textbox)
|
| 112 |
+
gallery_2 = search(dropdown)
|
| 113 |
+
|
| 114 |
+
return AnnotatedImage(visible=False), Button(visible=False), Gallery(
|
| 115 |
+
visible=False), HTML(visible=False), Gallery(gallery_2,
|
| 116 |
+
selected_index=None)
|
| 117 |
+
|
| 118 |
+
button.click(on_button_click, [textbox, dropdown],
|
| 119 |
+
[annotated_image, button, gallery_1, html, gallery_2],
|
| 120 |
+
show_progress='hidden')
|
| 121 |
+
|
| 122 |
+
############################################################################
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def on_image_change_or_select(image, dropdown):
|
| 126 |
+
if not image or not dropdown:
|
| 127 |
+
return Textbox(), AnnotatedImage(), Button(), Gallery(), HTML(
|
| 128 |
+
), Gallery()
|
| 129 |
+
|
| 130 |
+
textbox, annotated_image, gallery_1 = process(image)
|
| 131 |
+
gallery_2 = search(dropdown)
|
| 132 |
+
|
| 133 |
+
if len(gallery_1) > 1:
|
| 134 |
+
return textbox, AnnotatedImage(
|
| 135 |
+
annotated_image, label=textbox, visible=True
|
| 136 |
+
), Button(visible=True), Gallery(gallery_1, visible=True), HTML(
|
| 137 |
+
f'<i><p align="center">{len(gallery_1)} Similar Images in Gallery</p></i>',
|
| 138 |
+
visible=True), Gallery(gallery_2, selected_index=None)
|
| 139 |
+
elif len(gallery_1) > 0:
|
| 140 |
+
return textbox, AnnotatedImage(
|
| 141 |
+
annotated_image, label=textbox, visible=True
|
| 142 |
+
), Button(visible=True), Gallery(gallery_1, visible=True), HTML(
|
| 143 |
+
'<i><p align="center">1 Similar Image in Gallery</p></i>',
|
| 144 |
+
visible=True), Gallery(gallery_2, selected_index=None)
|
| 145 |
+
else:
|
| 146 |
+
return textbox, AnnotatedImage(
|
| 147 |
+
annotated_image, label=textbox,
|
| 148 |
+
visible=True), Button(visible=True), Gallery(
|
| 149 |
+
visible=False), HTML(visible=False), Gallery(
|
| 150 |
+
gallery_2, selected_index=None)
|
| 151 |
+
|
| 152 |
+
image.change(
|
| 153 |
+
on_image_change_or_select, [image, dropdown],
|
| 154 |
+
[textbox, annotated_image, button, gallery_1, html, gallery_2],
|
| 155 |
+
show_progress='hidden')
|
| 156 |
+
|
| 157 |
+
image.select(
|
| 158 |
+
on_image_change_or_select, [image, dropdown],
|
| 159 |
+
[textbox, annotated_image, button, gallery_1, html, gallery_2],
|
| 160 |
+
show_progress='hidden')
|
| 161 |
+
|
| 162 |
+
############################################################################
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def on_tab_select(dropdown):
|
| 166 |
+
if not dropdown:
|
| 167 |
+
return Gallery()
|
| 168 |
+
|
| 169 |
+
gallery_2 = search(dropdown)
|
| 170 |
+
|
| 171 |
+
return Gallery(gallery_2, selected_index=None)
|
| 172 |
+
|
| 173 |
+
tab.select(on_tab_select, dropdown, gallery_2, show_progress='hidden')
|
| 174 |
+
|
| 175 |
+
############################################################################
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def on_dropdown_select_event(event: SelectData):
|
| 179 |
+
dropdown = event.value
|
| 180 |
+
|
| 181 |
+
gallery_2 = on_tab_select(dropdown)
|
| 182 |
+
|
| 183 |
+
return gallery_2
|
| 184 |
+
|
| 185 |
+
dropdown.select(on_dropdown_select_event,
|
| 186 |
+
outputs=gallery_2,
|
| 187 |
+
show_progress='hidden')
|
| 188 |
+
|
| 189 |
+
############################################################################
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def on_gallery_2_select_event(event: SelectData, dropdown):
|
| 193 |
+
image = event.value['image']['path']
|
| 194 |
+
|
| 195 |
+
textbox, annotated_image, button, gallery_1, html, gallery_2 = on_image_change_or_select(
|
| 196 |
+
image, dropdown)
|
| 197 |
+
|
| 198 |
+
return textbox, annotated_image, button, gallery_1, html, gallery_2
|
| 199 |
+
|
| 200 |
+
gallery_2.select(
|
| 201 |
+
on_gallery_2_select_event,
|
| 202 |
+
dropdown,
|
| 203 |
+
[textbox, annotated_image, button, gallery_1, html, gallery_2],
|
| 204 |
+
show_progress='hidden')
|
| 205 |
+
|
| 206 |
+
############################################################################
|
| 207 |
+
|
| 208 |
+
blocks.launch(show_api=False)
|
db.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from peewee import BlobField, CharField, IntegrityError, Model, SqliteDatabase
|
| 2 |
+
|
| 3 |
+
import datetime
|
| 4 |
+
import os.path
|
| 5 |
+
import pickle
|
| 6 |
+
import tempfile
|
| 7 |
+
|
| 8 |
+
################################################################################
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Gallery(Model):
|
| 12 |
+
|
| 13 |
+
id = CharField(primary_key=True)
|
| 14 |
+
|
| 15 |
+
path = CharField(unique=True)
|
| 16 |
+
|
| 17 |
+
metadata = BlobField()
|
| 18 |
+
|
| 19 |
+
class Meta:
|
| 20 |
+
|
| 21 |
+
database = SqliteDatabase(f'{tempfile.gettempdir()}/.db')
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
Gallery.create_table()
|
| 25 |
+
|
| 26 |
+
################################################################################
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def delete(path):
|
| 30 |
+
Gallery.delete().where(Gallery.path == path).execute()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def delete_by_id(id):
|
| 34 |
+
Gallery.delete().where(Gallery.id == id).execute()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def exists(path):
|
| 38 |
+
return Gallery.select().where(Gallery.path == path).exists()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def exists_by_id(id):
|
| 42 |
+
return Gallery.select().where(Gallery.id == id).exists()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get(path):
|
| 46 |
+
image = Gallery.get(Gallery.path == path)
|
| 47 |
+
return image.id, pickle.loads(image.metadata)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_by_id(id):
|
| 51 |
+
image = Gallery.get(Gallery.id == id)
|
| 52 |
+
return image.path, pickle.loads(image.metadata)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def tuples():
|
| 56 |
+
images = []
|
| 57 |
+
for id, path, metadata in Gallery.select().order_by(Gallery.id).tuples():
|
| 58 |
+
images.append((id, path, pickle.loads(metadata)))
|
| 59 |
+
return images
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def update(path, metadata):
|
| 63 |
+
metadata = pickle.dumps(metadata)
|
| 64 |
+
|
| 65 |
+
if os.path.basename(path) == 'webcam.png':
|
| 66 |
+
timestamp = datetime.datetime.fromtimestamp(
|
| 67 |
+
os.path.getctime(path)).strftime('%Y%m%d_%H%M%S')
|
| 68 |
+
|
| 69 |
+
id = f'DCIM/Camera/{timestamp}.png'
|
| 70 |
+
else:
|
| 71 |
+
id = f'DCIM/Upload/{os.path.basename(path)}'
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
Gallery.create(id=id, path=path, metadata=metadata)
|
| 75 |
+
except IntegrityError:
|
| 76 |
+
Gallery.set_by_id(id, dict(path=path, metadata=metadata))
|
| 77 |
+
|
| 78 |
+
return id
|
deeperface.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from deepface import DeepFace
|
| 2 |
+
from deepface.detectors import FaceDetector, OpenCvWrapper
|
| 3 |
+
from deepface.extendedmodels import Emotion
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import deepface.commons.functions
|
| 7 |
+
import numpy
|
| 8 |
+
import opennsfw2
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Emotion:
|
| 12 |
+
|
| 13 |
+
labels = [emotion.capitalize() for emotion in Emotion.labels]
|
| 14 |
+
model = DeepFace.build_model('Emotion')
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class NSFW:
|
| 18 |
+
|
| 19 |
+
labels = [False, True]
|
| 20 |
+
model = opennsfw2.make_open_nsfw_model()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
################################################################################
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Pixels(numpy.ndarray):
|
| 27 |
+
|
| 28 |
+
@classmethod
|
| 29 |
+
def read(cls, path):
|
| 30 |
+
return cv2.imread(path).view(type=cls)
|
| 31 |
+
|
| 32 |
+
def write(self, path):
|
| 33 |
+
cv2.imwrite(path, self)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class FaceImage(Pixels):
|
| 37 |
+
|
| 38 |
+
def analyze(face_img):
|
| 39 |
+
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
|
| 40 |
+
face_img = cv2.resize(face_img, (48, 48))
|
| 41 |
+
face_img = numpy.expand_dims(face_img, axis=0)
|
| 42 |
+
|
| 43 |
+
predictions = Emotion.model.predict(face_img).ravel()
|
| 44 |
+
|
| 45 |
+
return Emotion.labels[numpy.argmax(predictions)]
|
| 46 |
+
|
| 47 |
+
def represent(face_img):
|
| 48 |
+
face_img = numpy.expand_dims(face_img, axis=0)
|
| 49 |
+
return DeepFace.represent(face_img,
|
| 50 |
+
'VGG-Face',
|
| 51 |
+
detector_backend='skip')[0]['embedding']
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Image(Pixels):
|
| 55 |
+
|
| 56 |
+
def annotate(img, face, emotion):
|
| 57 |
+
face_annotation = numpy.zeros_like(img)
|
| 58 |
+
face_annotation = cv2.cvtColor(face_annotation,
|
| 59 |
+
cv2.COLOR_BGR2GRAY).view(type=Pixels)
|
| 60 |
+
x, y, w, h = face
|
| 61 |
+
axes = (int(0.1 * w), int(0.1 * h))
|
| 62 |
+
cv2.ellipse(face_annotation, (x + axes[0], y + axes[1]), axes, 180, 0,
|
| 63 |
+
90, (1, 0, 0), 2)
|
| 64 |
+
cv2.ellipse(face_annotation, (x + w - axes[0], y + axes[1]), axes, 270,
|
| 65 |
+
0, 90, (1, 0, 0), 2)
|
| 66 |
+
cv2.ellipse(face_annotation, (x + axes[0], y + h - axes[1]), axes, 90,
|
| 67 |
+
0, 90, (1, 0, 0), 2)
|
| 68 |
+
cv2.ellipse(face_annotation, (x + w - axes[0], y + h - axes[1]), axes,
|
| 69 |
+
0, 0, 90, (1, 0, 0), 2)
|
| 70 |
+
|
| 71 |
+
emotion_annotation = numpy.zeros_like(img)
|
| 72 |
+
emotion_annotation = cv2.cvtColor(emotion_annotation,
|
| 73 |
+
cv2.COLOR_BGR2GRAY).view(type=Pixels)
|
| 74 |
+
for fontScale in numpy.arange(10, 0, -0.1):
|
| 75 |
+
textSize, _ = cv2.getTextSize(emotion, cv2.FONT_HERSHEY_SIMPLEX,
|
| 76 |
+
fontScale, 2)
|
| 77 |
+
if textSize[0] <= int(0.6 * w):
|
| 78 |
+
break
|
| 79 |
+
cv2.putText(emotion_annotation, emotion,
|
| 80 |
+
(int(x + (w - textSize[0]) / 2), int(y + textSize[1] / 2)),
|
| 81 |
+
cv2.FONT_HERSHEY_SIMPLEX, fontScale, (1, 0, 0), 2)
|
| 82 |
+
|
| 83 |
+
return [(face_annotation, 'face'), (emotion_annotation, 'emotion')]
|
| 84 |
+
|
| 85 |
+
def detect_faces(img):
|
| 86 |
+
face_detector = FaceDetector.build_model('opencv')
|
| 87 |
+
faces = []
|
| 88 |
+
for _, face, _ in FaceDetector.detect_faces(face_detector, 'opencv',
|
| 89 |
+
img, False):
|
| 90 |
+
face = (int(face[0]), int(face[1]), int(face[2]), int(face[3]))
|
| 91 |
+
faces.append(face)
|
| 92 |
+
return faces
|
| 93 |
+
|
| 94 |
+
def extract_face(img, face):
|
| 95 |
+
face_detector = FaceDetector.build_model('opencv')
|
| 96 |
+
x, y, w, h = face
|
| 97 |
+
img = img[y:y + h, x:x + w]
|
| 98 |
+
img = OpenCvWrapper.align_face(face_detector['eye_detector'], img)
|
| 99 |
+
target_size = deepface.commons.functions.find_target_size('VGG-Face')
|
| 100 |
+
face_img, _, _ = deepface.commons.functions.extract_faces(
|
| 101 |
+
img, target_size, 'skip')[0]
|
| 102 |
+
face_img = numpy.squeeze(face_img, axis=0)
|
| 103 |
+
return face_img.view(type=FaceImage)
|
| 104 |
+
|
| 105 |
+
def nsfw(img):
|
| 106 |
+
img = cv2.resize(img, (224, 224))
|
| 107 |
+
img = img - numpy.array([104, 117, 123], numpy.float32)
|
| 108 |
+
img = numpy.expand_dims(img, axis=0)
|
| 109 |
+
|
| 110 |
+
predictions = NSFW.model.predict(img).ravel()
|
| 111 |
+
|
| 112 |
+
return NSFW.labels[numpy.argmax(predictions)]
|
| 113 |
+
|
| 114 |
+
def pixelate(img):
|
| 115 |
+
h, w, _ = img.shape
|
| 116 |
+
img = cv2.resize(img, (16, 16))
|
| 117 |
+
return cv2.resize(img, (w, h),
|
| 118 |
+
interpolation=cv2.INTER_NEAREST).view(type=Pixels)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
################################################################################
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class Metadata(dict):
|
| 125 |
+
|
| 126 |
+
def __init__(self, img):
|
| 127 |
+
metadata = {}
|
| 128 |
+
for face in img.detect_faces():
|
| 129 |
+
face_img = img.extract_face(face)
|
| 130 |
+
|
| 131 |
+
emotion = face_img.analyze()
|
| 132 |
+
representation = face_img.represent()
|
| 133 |
+
|
| 134 |
+
metadata[face] = {
|
| 135 |
+
'emotion': emotion,
|
| 136 |
+
'representation': representation
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
super(Metadata, self).__init__(metadata)
|
| 140 |
+
|
| 141 |
+
def emotions(self):
|
| 142 |
+
return [value['emotion'] for value in self.values()]
|
| 143 |
+
|
| 144 |
+
def representations(self):
|
| 145 |
+
return [value['representation'] for value in self.values()]
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
################################################################################
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def verify(source_representations, test_representations):
|
| 152 |
+
for source_representation in source_representations:
|
| 153 |
+
for test_representation in test_representations:
|
| 154 |
+
if deepface.commons.distance.findCosineDistance(
|
| 155 |
+
source_representation, test_representation
|
| 156 |
+
) < deepface.commons.distance.findThreshold('VGG-Face', 'cosine'):
|
| 157 |
+
return True
|
| 158 |
+
return False
|
examples/log.csv
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
image
|
| 2 |
+
ospan-ali-5zigAp0ng4s-unsplash.jpg
|
| 3 |
+
ospan-ali-GUmRXc-vOxw-unsplash.jpg
|
| 4 |
+
ospan-ali-t1JfI6SyEhM-unsplash.jpg
|
| 5 |
+
ospan-ali-ufgR_EHJOUw-unsplash.jpg
|
| 6 |
+
ospan-ali-_ZXdnKTHPGs-unsplash.jpg
|
examples/ospan-ali-5zigAp0ng4s-unsplash.jpg
ADDED
|
examples/ospan-ali-GUmRXc-vOxw-unsplash.jpg
ADDED
|
examples/ospan-ali-_ZXdnKTHPGs-unsplash.jpg
ADDED
|
examples/ospan-ali-t1JfI6SyEhM-unsplash.jpg
ADDED
|
examples/ospan-ali-ufgR_EHJOUw-unsplash.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
deepface==0.0.81
|
| 2 |
+
opennsfw2==0.13.7
|
| 3 |
+
peewee==3.17.0
|