인공지능 공부/딥러닝 논문읽기

(자연어처리) NLP 스팸메일 분류 CNN

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

plt.style.use('seaborn-white')

from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import urllib.request

urllib.request.urlretrieve("https://raw.githubusercontent.com/mohitgupta-omg/Kaggle-SMS-spam-Collection-Dataset-/master/spam.csv", filename = 'spam.csv')

data = pd.read_csv('spam.csv', encoding='latin-1')
print(len(data))

data['v1'] = data['v1'].replace(['ham', 'spam'], [0,1])
data.drop_duplicates(subset=['v2'], inplace= True)
data['v1'].value_counts().plot(kind='bar')

data['v1'] = data['v1'].replace(['ham', 'spam'], [0,1])
data.drop_duplicates(subset=['v2'], inplace= True)
data.groupby('v1').size().reset_index(name='count')
X_data = data['v2']
y_data = data['v1']
vocab_size = 1000
tokenizer = Tokenizer(num_words = vocab_size)
tokenizer.fit_on_texts(X_data)
sequences = tokenizer.texts_to_sequences(X_data)
X_data = sequences
print('메일 최대일이 {}'.format(max(len(l) for l in X_data)))
print("메일의 평균 길이 {}".format((sum(map(len, X_data))/len(X_data))))
plt.hist([len(s) for s in X_data], bins = 50)
plt.xlabel('Length of Samples')
plt.ylabel('Number of Samples')
plt.show()

max_len = 60
data = pad_sequences(X_data, maxlen = max_len)
print(data.shape)
n_train = int(len(sequences) * 0.8)
n_test = int(len(sequences) - n_train)
X_train = data[:n_train]
y_train = np.array(y_data[:n_train])
X_test = data[n_train:]
y_test = np.array(y_data[n_train:])
from tensorflow.keras.layers import Dense, Conv1D, GlobalMaxPooling1D, Embedding, Dropout, MaxPooling1D
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

model = Sequential()
model.add(Embedding(vocab_size, 32))
model.add(Dropout(0.2))
model.add(Conv1D(32, 5, strides =1 , padding = 'valid', activation = 'relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.summary()

model.compile(optimizer='adam',
              loss = 'binary_crossentropy',
              metrics = ['acc'])

es = EarlyStopping(monitor = 'val_loss', mode = 'min', verbose=1, patience =3)
mc = ModelCheckpoint('best_model.h5', monitor = 'val_acc', mode = 'max', save_best_only=True)
history = model.fit(X_train, y_train,
                    epochs = 10,
                    batch_size = 64,
                    validation_split = 0.2,
                    callbacks=[es, mc])

loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['acc']
val_acc = history.history['val_acc']

epochs = range(1, len(loss)+1)

plt.plot(epochs, loss, 'b--', label = 'training loss')
plt.plot(epochs, val_loss, 'r', label = 'validation loss')