App
Browse files
app.py
ADDED
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
from textblob import TextBlob
|
4 |
+
|
5 |
+
pipe = pipeline('summarization')
|
6 |
+
st.title("Spamd: Turkish Spam Detector")
|
7 |
+
|
8 |
+
# -*- coding: utf-8 -*-
|
9 |
+
"""Spamd_SpamDetector_Turkish_BERT_22.09.2022.ipynb
|
10 |
+
|
11 |
+
Automatically generated by Colaboratory.
|
12 |
+
|
13 |
+
Original file is located at
|
14 |
+
https://colab.research.google.com/drive/1QuorqAuLsmomesZHsaQHEZgzbPEM8YTH
|
15 |
+
"""
|
16 |
+
|
17 |
+
#Cuda and PyTorch Versions must match https://pytorch.org/get-started/locally/
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
import csv
|
22 |
+
data = []
|
23 |
+
# with open('TurkishSMSCollection.csv', "rt", encoding="utf-8") as csvfile:
|
24 |
+
# reader = csv.reader(csvfile, skipinitialspace=True)
|
25 |
+
# data.append(tuple(next(reader)))
|
26 |
+
# for Message, Group in reader:
|
27 |
+
# data.append((int(Group), Message))
|
28 |
+
import pandas as pd
|
29 |
+
|
30 |
+
|
31 |
+
df = pd.read_csv('TurkishSMSCollection.csv', encoding='utf-8', on_bad_lines='skip', usecols= ['Group','Message'], sep=r';')
|
32 |
+
df['Group']= df['Group'].replace(2, 0)
|
33 |
+
|
34 |
+
# reader = open('TurkishSMSCollection.csv', "rt", encoding="utf-8") as csvfile
|
35 |
+
print(df)
|
36 |
+
|
37 |
+
text = df.Message.values
|
38 |
+
len(text)
|
39 |
+
|
40 |
+
labels = df.Group.values
|
41 |
+
len(labels)
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
from transformers import AutoTokenizer
|
46 |
+
tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-turkish-uncased")
|
47 |
+
|
48 |
+
import os
|
49 |
+
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
|
50 |
+
|
51 |
+
import torch
|
52 |
+
token_id = []
|
53 |
+
attention_masks = []
|
54 |
+
|
55 |
+
def preprocessing(input_text, tokenizer):
|
56 |
+
'''
|
57 |
+
Returns <class transformers.tokenization_utils_base.BatchEncoding> with the following fields:
|
58 |
+
- input_ids: list of token ids
|
59 |
+
- token_type_ids: list of token type ids
|
60 |
+
- attention_mask: list of indices (0,1) specifying which tokens should considered by the model (return_attention_mask = True).
|
61 |
+
'''
|
62 |
+
return tokenizer.encode_plus(
|
63 |
+
input_text,
|
64 |
+
add_special_tokens = True,
|
65 |
+
max_length = 32,
|
66 |
+
pad_to_max_length = True,
|
67 |
+
return_attention_mask = True,
|
68 |
+
return_tensors = 'pt'
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
for sample in text:
|
73 |
+
encoding_dict = preprocessing(sample, tokenizer)
|
74 |
+
token_id.append(encoding_dict['input_ids'])
|
75 |
+
attention_masks.append(encoding_dict['attention_mask'])
|
76 |
+
|
77 |
+
|
78 |
+
token_id = torch.cat(token_id, dim = 0)
|
79 |
+
attention_masks = torch.cat(attention_masks, dim = 0)
|
80 |
+
labels = torch.tensor(labels)
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
import random
|
86 |
+
import numpy as np
|
87 |
+
from tabulate import tabulate
|
88 |
+
def print_rand_sentence_encoding():
|
89 |
+
'''Displays tokens, token IDs and attention mask of a random text sample'''
|
90 |
+
index = random.randint(0, len(text) - 1)
|
91 |
+
tokens = tokenizer.tokenize(tokenizer.decode(token_id[index]))
|
92 |
+
token_ids = [i.numpy() for i in token_id[index]]
|
93 |
+
attention = [i.numpy() for i in attention_masks[index]]
|
94 |
+
|
95 |
+
table = np.array([tokens, token_ids, attention]).T
|
96 |
+
print(tabulate(table,
|
97 |
+
headers = ['Tokens', 'Token IDs', 'Attention Mask'],
|
98 |
+
tablefmt = 'fancy_grid'))
|
99 |
+
|
100 |
+
print_rand_sentence_encoding()
|
101 |
+
|
102 |
+
|
103 |
+
from sklearn.model_selection import train_test_split
|
104 |
+
from torch.utils.data import Dataset, TensorDataset
|
105 |
+
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
|
106 |
+
|
107 |
+
|
108 |
+
val_ratio = 0.2
|
109 |
+
# Recommended batch size: 16, 32. See: https://arxiv.org/pdf/1810.04805.pdf
|
110 |
+
batch_size = 32
|
111 |
+
|
112 |
+
# Indices of the train and validation splits stratified by labels
|
113 |
+
train_idx, val_idx = train_test_split(
|
114 |
+
np.arange(len(labels)),
|
115 |
+
test_size = val_ratio,
|
116 |
+
shuffle = True,
|
117 |
+
stratify = labels)
|
118 |
+
|
119 |
+
# Train and validation sets
|
120 |
+
train_set = TensorDataset(token_id[train_idx],
|
121 |
+
attention_masks[train_idx],
|
122 |
+
labels[train_idx])
|
123 |
+
|
124 |
+
val_set = TensorDataset(token_id[val_idx],
|
125 |
+
attention_masks[val_idx],
|
126 |
+
labels[val_idx])
|
127 |
+
|
128 |
+
# Prepare DataLoader
|
129 |
+
train_dataloader = DataLoader(
|
130 |
+
train_set,
|
131 |
+
sampler = RandomSampler(train_set),
|
132 |
+
batch_size = batch_size
|
133 |
+
)
|
134 |
+
|
135 |
+
validation_dataloader = DataLoader(
|
136 |
+
val_set,
|
137 |
+
sampler = SequentialSampler(val_set),
|
138 |
+
batch_size = batch_size
|
139 |
+
)
|
140 |
+
|
141 |
+
def b_tp(preds, labels):
|
142 |
+
'''Returns True Positives (TP): count of correct predictions of actual class 1'''
|
143 |
+
return sum([preds == labels and preds == 1 for preds, labels in zip(preds, labels)])
|
144 |
+
|
145 |
+
def b_fp(preds, labels):
|
146 |
+
'''Returns False Positives (FP): count of wrong predictions of actual class 1'''
|
147 |
+
return sum([preds != labels and preds == 1 for preds, labels in zip(preds, labels)])
|
148 |
+
|
149 |
+
def b_tn(preds, labels):
|
150 |
+
'''Returns True Negatives (TN): count of correct predictions of actual class 0'''
|
151 |
+
return sum([preds == labels and preds == 0 for preds, labels in zip(preds, labels)])
|
152 |
+
|
153 |
+
def b_fn(preds, labels):
|
154 |
+
'''Returns False Negatives (FN): count of wrong predictions of actual class 0'''
|
155 |
+
return sum([preds != labels and preds == 0 for preds, labels in zip(preds, labels)])
|
156 |
+
|
157 |
+
def b_metrics(preds, labels):
|
158 |
+
'''
|
159 |
+
Returns the following metrics:
|
160 |
+
- accuracy = (TP + TN) / N
|
161 |
+
- precision = TP / (TP + FP)
|
162 |
+
- recall = TP / (TP + FN)
|
163 |
+
- specificity = TN / (TN + FP)
|
164 |
+
'''
|
165 |
+
preds = np.argmax(preds, axis = 1).flatten()
|
166 |
+
labels = labels.flatten()
|
167 |
+
tp = b_tp(preds, labels)
|
168 |
+
tn = b_tn(preds, labels)
|
169 |
+
fp = b_fp(preds, labels)
|
170 |
+
fn = b_fn(preds, labels)
|
171 |
+
b_accuracy = (tp + tn) / len(labels)
|
172 |
+
b_precision = tp / (tp + fp) if (tp + fp) > 0 else 'nan'
|
173 |
+
b_recall = tp / (tp + fn) if (tp + fn) > 0 else 'nan'
|
174 |
+
b_specificity = tn / (tn + fp) if (tn + fp) > 0 else 'nan'
|
175 |
+
return b_accuracy, b_precision, b_recall, b_specificity
|
176 |
+
|
177 |
+
from transformers import AutoModel
|
178 |
+
|
179 |
+
#!pip install torch.utils
|
180 |
+
|
181 |
+
from transformers import BertForSequenceClassification, AdamW, BertConfig
|
182 |
+
|
183 |
+
model = BertForSequenceClassification.from_pretrained(
|
184 |
+
"dbmdz/bert-base-turkish-uncased",
|
185 |
+
num_labels = 2,
|
186 |
+
output_attentions = False,
|
187 |
+
output_hidden_states = False)
|
188 |
+
|
189 |
+
optimizer = torch.optim.AdamW(model.parameters(),
|
190 |
+
lr = 5e-5,
|
191 |
+
eps = 1e-08
|
192 |
+
)
|
193 |
+
|
194 |
+
# Run on GPU
|
195 |
+
model.cuda()
|
196 |
+
|
197 |
+
from tqdm import trange
|
198 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
199 |
+
|
200 |
+
# Recommended number of epochs: 2, 3, 4. See: https://arxiv.org/pdf/1810.04805.pdf
|
201 |
+
epochs = 5
|
202 |
+
|
203 |
+
for _ in trange(epochs, desc = 'Epoch'):
|
204 |
+
|
205 |
+
# ========== Training ==========
|
206 |
+
|
207 |
+
# Set model to training mode
|
208 |
+
model.train()
|
209 |
+
|
210 |
+
# Tracking variables
|
211 |
+
tr_loss = 0
|
212 |
+
nb_tr_examples, nb_tr_steps = 0, 0
|
213 |
+
|
214 |
+
for step, batch in enumerate(train_dataloader):
|
215 |
+
batch = tuple(t.to(device) for t in batch)
|
216 |
+
b_input_ids, b_input_mask, b_labels = batch
|
217 |
+
optimizer.zero_grad()
|
218 |
+
# Forward pass
|
219 |
+
train_output = model(b_input_ids,
|
220 |
+
token_type_ids = None,
|
221 |
+
attention_mask = b_input_mask,
|
222 |
+
labels = b_labels)
|
223 |
+
# Backward pass
|
224 |
+
train_output.loss.backward()
|
225 |
+
optimizer.step()
|
226 |
+
# Update tracking variables
|
227 |
+
tr_loss += train_output.loss.item()
|
228 |
+
nb_tr_examples += b_input_ids.size(0)
|
229 |
+
nb_tr_steps += 1
|
230 |
+
|
231 |
+
# ========== Validation ==========
|
232 |
+
|
233 |
+
# Set model to evaluation mode
|
234 |
+
model.eval()
|
235 |
+
|
236 |
+
# Tracking variables
|
237 |
+
val_accuracy = []
|
238 |
+
val_precision = []
|
239 |
+
val_recall = []
|
240 |
+
val_specificity = []
|
241 |
+
|
242 |
+
for batch in validation_dataloader:
|
243 |
+
batch = tuple(t.to(device) for t in batch)
|
244 |
+
b_input_ids, b_input_mask, b_labels = batch
|
245 |
+
with torch.no_grad():
|
246 |
+
# Forward pass
|
247 |
+
eval_output = model(b_input_ids,
|
248 |
+
token_type_ids = None,
|
249 |
+
attention_mask = b_input_mask)
|
250 |
+
logits = eval_output.logits.detach().cpu().numpy()
|
251 |
+
label_ids = b_labels.to('cpu').numpy()
|
252 |
+
# Calculate validation metrics
|
253 |
+
b_accuracy, b_precision, b_recall, b_specificity = b_metrics(logits, label_ids)
|
254 |
+
val_accuracy.append(b_accuracy)
|
255 |
+
# Update precision only when (tp + fp) !=0; ignore nan
|
256 |
+
if b_precision != 'nan': val_precision.append(b_precision)
|
257 |
+
# Update recall only when (tp + fn) !=0; ignore nan
|
258 |
+
if b_recall != 'nan': val_recall.append(b_recall)
|
259 |
+
# Update specificity only when (tn + fp) !=0; ignore nan
|
260 |
+
if b_specificity != 'nan': val_specificity.append(b_specificity)
|
261 |
+
|
262 |
+
print('\n\t - Train loss: {:.4f}'.format(tr_loss / nb_tr_steps))
|
263 |
+
print('\t - Validation Accuracy: {:.4f}'.format(sum(val_accuracy)/len(val_accuracy)))
|
264 |
+
print('\t - Validation Precision: {:.4f}'.format(sum(val_precision)/len(val_precision)) if len(val_precision)>0 else '\t - Validation Precision: NaN')
|
265 |
+
print('\t - Validation Recall: {:.4f}'.format(sum(val_recall)/len(val_recall)) if len(val_recall)>0 else '\t - Validation Recall: NaN')
|
266 |
+
print('\t - Validation Specificity: {:.4f}\n'.format(sum(val_specificity)/len(val_specificity)) if len(val_specificity)>0 else '\t - Validation Specificity: NaN')
|
267 |
+
|
268 |
+
#Used for printing the name if the variables. Removing it will not intrupt the project.
|
269 |
+
def namestr(obj, namespace):
|
270 |
+
return [name for name in namespace if namespace[name] is obj]
|
271 |
+
|
272 |
+
def predict(new_sentence):
|
273 |
+
# We need Token IDs and Attention Mask for inference on the new sentence
|
274 |
+
test_ids = []
|
275 |
+
test_attention_mask = []
|
276 |
+
|
277 |
+
# Apply the tokenizer
|
278 |
+
encoding = preprocessing(new_sentence, tokenizer)
|
279 |
+
|
280 |
+
# Extract IDs and Attention Mask
|
281 |
+
test_ids.append(encoding['input_ids'])
|
282 |
+
test_attention_mask.append(encoding['attention_mask'])
|
283 |
+
test_ids = torch.cat(test_ids, dim = 0)
|
284 |
+
test_attention_mask = torch.cat(test_attention_mask, dim = 0)
|
285 |
+
|
286 |
+
# Forward pass, calculate logit predictions
|
287 |
+
with torch.no_grad():
|
288 |
+
output = model(test_ids.to(device), token_type_ids = None, attention_mask = test_attention_mask.to(device))
|
289 |
+
|
290 |
+
prediction = 'Spam' if np.argmax(output.logits.cpu().numpy()).flatten().item() == 1 else 'Normal'
|
291 |
+
|
292 |
+
|
293 |
+
print('Input', namestr(new_sentence, globals()),': \n', new_sentence)
|
294 |
+
# Remove the namestr(new_sentence, globals()) in case of an error
|
295 |
+
print('Predicted Class: ', prediction,'\n----------------------------------\n')
|
296 |
+
|
297 |
+
#Textbox for text user is entering
|
298 |
+
st.subheader("Enter the text you'd like to analyze for spam.")
|
299 |
+
text = st.text_input('Enter text') #text is stored in this variable
|
300 |
+
|
301 |
+
predict(text)
|
302 |
+
|
303 |
+
|
304 |
+
'''
|
305 |
+
@software{stefan_schweter_2020_3770924,
|
306 |
+
author = {Stefan Schweter},
|
307 |
+
title = {BERTurk - BERT models for Turkish},
|
308 |
+
month = apr,
|
309 |
+
year = 2020,
|
310 |
+
publisher = {Zenodo},
|
311 |
+
version = {1.0.0},
|
312 |
+
doi = {10.5281/zenodo.3770924},
|
313 |
+
url = {https://doi.org/10.5281/zenodo.3770924}
|
314 |
+
}
|
315 |
+
'''
|