KT AIVLE School/시각지능 딥러닝
100중 분류
Rabet
2024. 10. 24. 13:41
100분류에 원핫인코디을 진행하면 99개의 0 데이터와 1개의 1데이터가 생겨서 과부하가 생길 수 있다.
1. 데이터 불러오기 및 나누기
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets.cifar100 import load_data
(train_x, train_y), (test_x, test_y) = load_data()
# train데이터 val 데이터로 나누기
from sklearn.model_selection import train_test_split
train_x, val_x, train_y, val_y = train_test_split(train_x,train_y, test_size=.2, random_state=2024)
2. 모델링
compile시에 sparse_categorical_crossentropy 사용하기
- 라이브러리
from keras.utils import clear_session, plot_model
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Flatten, BatchNormalization, Dropout
from keras.layers import Conv2D, MaxPool2D
from keras.callbacks import EarlyStopping
from keras.layers import RandomRotation, RandomTranslation, RandomFlip, RandomZoom
- Sequential API
clear_session()
model = Sequential([Input(shape=(32,32,3)),
#########################
# Augmentation Layer
RandomRotation(0.1),
RandomTranslation(0.1,0.1),
RandomZoom(0.1),
RandomFlip('horizontal'),
##########################
Conv2D(64,3,1,'same',activation='relu'),
Conv2D(64,3,1,'same',activation='relu'),
MaxPool2D(2),
BatchNormalization(),
Dropout(0.4),
Conv2D(128,3,1,'same',activation='relu'),
Conv2D(128,3,1,'same',activation='relu'),
MaxPool2D(2),
BatchNormalization(),
Dropout(0.4),
Flatten(),
Dense(512, activation='relu'),
Dense(100, activation='softmax')
])
model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
- Functional API
clear_session()
il = Input(shape=(32,32,3))
al = RandomTranslation(height_factor=(-0.3,0.3), width_factor=(-0.3,0.3))(il)
al = RandomZoom(height_factor=(-0.2,0.2), width_factor=(-0.2,0.2))(al)
hl = Conv2D(64, 3, 1, 'same', activation='relu')(al)
hl = Conv2D(64, 3, 1, 'same', activation='relu')(hl)
hl = MaxPool2D(2,2)(hl)
hl = BatchNormalization()(hl)
hl = Dropout(0.25)(hl)
hl = Conv2D(128, 3, 1, 'same', activation='relu')(hl)
hl = Conv2D(128, 3, 1, 'same', activation='relu')(hl)
hl = MaxPool2D(2,2)(hl)
hl = BatchNormalization()(hl)
hl = Conv2D(256, 3, 1, 'same', activation='relu')(hl)
hl = Conv2D(256, 3, 1, 'same', activation='relu')(hl)
hl = MaxPool2D(2,2)(hl)
hl = BatchNormalization()(hl)
hl = Dropout(0.25)(hl)
hl = Flatten()(hl)
hl = Dense(4096, activation='relu')(hl)
hl = BatchNormalization()(hl)
hl = Dropout(0.25)(hl)
hl = Dense(1024, activation='relu')(hl)
hl = BatchNormalization()(hl)
hl = Dropout(0.25)(hl)
ol = Dense(100, activation='softmax')(hl)
model = Model(il,ol)
model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
3. 학습하기
validation_data를 val데이터로 설정
es = EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True)
history = model.fit(train_x, train_y, epochs=10000, validation_data=(val_x,val_y), callbacks=[es])
4. 평가하기
test_y는 1차원으로 변경
# 평가
model_test = model.evaluate(test_x, test_y)
# 에측
y_pred = model.predict(test_x)
y_pred_arg = np.argmax(y_pred, axis=1)
test_y_arg = test_y.reshape(-1)
from sklearn.metrics import accuracy_score, classification_report
accuracy_score(test_y_arg, y_pred_arg)