Commit
·
36368e6
1
Parent(s):
ccb670c
update: description
Browse files
app.py
CHANGED
@@ -28,15 +28,48 @@ def predict(img):
|
|
28 |
return [emotions, sentiments] #{**emotions, **sentiments}
|
29 |
|
30 |
# Gradio
|
31 |
-
title = "Facial
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
enable_queue=True
|
41 |
|
42 |
examples = ['happy1.jpg', 'happy2.jpg', 'angry1.png', 'angry2.jpg', 'neutral1.jpg', 'neutral2.jpg']
|
@@ -47,4 +80,5 @@ gr.Interface(fn = predict,
|
|
47 |
title = title,
|
48 |
examples = examples,
|
49 |
description = description,
|
50 |
-
article=article
|
|
|
|
28 |
return [emotions, sentiments] #{**emotions, **sentiments}
|
29 |
|
30 |
# Gradio
|
31 |
+
title = "Facial Emotion and Sentiment Detector"
|
32 |
+
|
33 |
+
description = gr.Markdown(
|
34 |
+
"""Ever wondered what a person might be feeling looking at their picture?
|
35 |
+
Well, now you can! Try this fun app. Just upload a facial image in JPG or
|
36 |
+
PNG format. Voila! you can now see what they might have felt when the picture
|
37 |
+
was taken.
|
38 |
+
|
39 |
+
**Tip**: Be sure to only include face to get best results. Check some sample images
|
40 |
+
below for inspiration!""").value
|
41 |
+
|
42 |
+
article = gr.Markdown(
|
43 |
+
"""**DISCLAIMER:** This model does not reveal the actual emotional state of a person. Use and
|
44 |
+
interpret results at your own risk! It was built as a demo for AI course. Samples images
|
45 |
+
were downloaded from VG & AftenPosten news webpages. Copyrights belong to respective
|
46 |
+
brands. All rights reserved.
|
47 |
+
|
48 |
+
**PREMISE:** The idea is to determine an overall sentiment of a news site on a daily basis
|
49 |
+
based on the pictures. We are restricting pictures to only include close-up facial
|
50 |
+
images.
|
51 |
+
|
52 |
+
**DATA:** FER2013 dataset consists of 48x48 pixel grayscale images of faces. There are 28,709
|
53 |
+
images in the training set and 3,589 images in the test set. However, for this demo all
|
54 |
+
pictures were combined into a single dataset and 80:20 split was used for training. Images
|
55 |
+
are assigned one of the 7 emotions: Angry, Disgust, Fear, Happy, Sad, Surprise, and Neutral.
|
56 |
+
In addition to these 7 classes, images were re-classified into 3 sentiment categories based
|
57 |
+
on emotions:
|
58 |
+
|
59 |
+
Positive (Happy, Surprise)
|
60 |
+
|
61 |
+
Negative (Angry, Disgust, Fear, Sad)
|
62 |
+
|
63 |
+
Neutral (Neutral)
|
64 |
+
|
65 |
+
FER2013 (preliminary version) dataset can be downloaded at:
|
66 |
+
https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data
|
67 |
+
|
68 |
+
**MODEL:** VGG19 was used as the base model and trained on FER2013 dataset. Model was trained
|
69 |
+
using PyTorch and FastAI. Two models were trained, one for detecting emotion and the other
|
70 |
+
for detecting sentiment. Although, this could have been done with just one model, here two
|
71 |
+
models were trained for the demo.""").value
|
72 |
+
|
73 |
enable_queue=True
|
74 |
|
75 |
examples = ['happy1.jpg', 'happy2.jpg', 'angry1.png', 'angry2.jpg', 'neutral1.jpg', 'neutral2.jpg']
|
|
|
80 |
title = title,
|
81 |
examples = examples,
|
82 |
description = description,
|
83 |
+
article=article,
|
84 |
+
allow_flagging='never').launch(enable_queue=enable_queue)
|