인공지능 공부/딥러닝 논문읽기
파이썬 딥러닝 텐서플로 기초부터
앨런튜링_
2021. 9. 6. 10:04
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_valid, y_valid) = mnist.load_data()
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
import matplotlib.pylab as plt
def plot_image(data, idx):
plt.figure(figsize=(5,5))
plt.imshow(data[idx], cmap='gray')
plt.axis('off')
plt.show()
plot_image(x_train, 0)
print(x_train.min(), x_train.max())
print(x_valid.min(), x_valid.max())
0 255
0 255
#딥러닝 모델의 안정적인 학습 위함 => 픽셀 값을 정규화 변환
x_train = x_train/255.0
x_valid = x_valid/255.0
print(x_train.min(), x_train.max())
print(x_valid.min(), x_valid.max())
0.0 1.0
0.0 1.0
#CNN 모델에 주입하기 위해서 색상 채널을 추가해야한다.
#(60000,28,28) => (60000, 28, 28, 1)
print(x_train.shape, x_valid.shape)
x_train_in = x_train[..., tf.newaxis]
x_valid_in = x_valid[..., tf.newaxis]
print(x_train_in.shape, x_valid_in.shape)
(60000, 28, 28) (10000, 28, 28)
(60000, 28, 28, 1) (10000, 28, 28, 1)
#Sequential API를 사용해 샘플 모델 생성
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation = 'relu', input_shape=(28,28,1), name = 'conv'),
tf.keras.layers.MaxPooling2D((2,2), name = 'pool'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
history = model.fit(x_train_in, y_train,
validation_data = (x_valid_in, y_valid),
epochs = 10)
model.evaluate(x_valid_in, y_valid)
313/313 [==============================] - 1s 4ms/step - loss: 0.0626 - accuracy: 0.9825
[0.06260814517736435, 0.9825000166893005]
def plot_loss_acc(history, epoch):
loss, val_loss = history.history['loss'], history.history['val_loss']
acc, val_acc = history.history['accuracy'], history.history['val_accuracy']
fig, axes = plt.subplots(1,2,figsize=(12,4))
axes[0].plot(range(1, epoch+1), loss, label = 'Training')
axes[0].plot(range(1, epoch+1), val_loss, label = "Validation")
axes[0].legend(loc='best')
axes[0].set_title('Loss')
axes[1].plot(range(1, epoch+1), acc, label = 'Training')
axes[1].plot(range(1, epoch+1), val_acc, label = "Validation")
axes[1].legend(loc='best')
axes[1].set_title('Accuracy')
plt.show()
plot_loss_acc(history, 10)
model.summary()
model.input
<KerasTensor: shape=(None, 28, 28, 1) dtype=float32 (created by layer 'conv_input')>
model.output
<KerasTensor: shape=(None, 10) dtype=float32 (created by layer 'dense')>
model.layers
[<keras.layers.convolutional.Conv2D at 0x158e629b4f0>,
<keras.layers.pooling.MaxPooling2D at 0x158e6234d60>,
<keras.layers.core.Flatten at 0x158e62d10d0>,
<keras.layers.core.Dense at 0x158e62d1820>]
model.layers[0]
<keras.layers.convolutional.Conv2D at 0x158e629b4f0>
model.layers[0].input
<KerasTensor: shape=(None, 28, 28, 1) dtype=float32 (created by layer 'conv_input')>
model.layers[0].output
<KerasTensor: shape=(None, 26, 26, 32) dtype=float32 (created by layer 'conv')>
model.layers[0].weights
model.layers[0].bias
model.get_layer('conv')
<keras.layers.convolutional.Conv2D at 0x158e629b4f0>
activator = tf.keras.Model(inputs=model.input, outputs = [layer.output for layer in model.layers[:2]])
activations = activator.predict(x_train_in[0][tf.newaxis, ...])
len(activations)
2
conv_activation = activations[0]
conv_activation.shape
(1, 26, 26, 32)
fig, axes = plt.subplots(4,8)
fig.set_size_inches(10,5)
for i in range(32):
axes[i//8, i%8].matshow(conv_activation[0,:,:,i], cmap='viridis')
axes[i//8, i%8].set_title('kernel %s'%str(i), fontsize=10)
plt.setp(axes[i//8, i%8].get_xticklabels(), visible = False)
plt.setp(axes[i//8, i%8].get_yticklabels(), visible = False)
plt.tight_layout()
plt.show()
pooling_activation = activations[1]
print(pooling_activation.shape)
(1, 13, 13, 32)
fig, axes = plt.subplots(4,8)
fig.set_size_inches(10,5)
for i in range(32):
axes[i//8, i%8].matshow(pooling_activation[0,:,:,i], cmap='viridis')
axes[i//8, i%8].set_title('kernel %s'%str(i), fontsize=10)
plt.setp(axes[i//8, i%8].get_xticklabels(), visible = False)
plt.setp(axes[i//8, i%8].get_yticklabels(), visible = False)
plt.tight_layout()
plt.show()