Single-layer Perceptron (SLP) 코딩 실습
- Import
import numpy as npimport pandas as pdimport seaborn as sns
import matplotlib as mplimport matplotlib.pyplot as pltimport matplotlib_inline.backend_inline
import sklearnfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import MinMaxScalerfrom sklearn.ensemble import RandomForestClassifierfrom sklearn import metricsfrom sklearn.datasets import load_breast_cancerfrom sklearn.datasets import load_digits
import tensorflowfrom tensorflow.keras.utils import to_categoricalfrom tensorflow.keras.models import Sequential, load_modelfrom tensorflow.keras.layers import Densefrom tensorflow.keras.optimizers import Adamfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStoppingfrom tensorflow.keras.datasets import mnist
import random - Data 불러오기
digits = load_digits()digits - Data info. 확인하기
digits.keys() - x, y 설정
x_data = digits["images"]y_data = digits["target"] - train/ test data split
x_train, x_test, y_train, y_test = train_test_split(x_data,y_data,test_size = 0.20,random_state = 42,stratify = y_data) - Scaling 하기
x_train, x_test = x_train / x_train.max(), x_test / x_train.max() - Data의 shape 확인하기
print("x_train: ", x_train.shape)
print("y_train: ", y_train.shape)
print("x_test: ", x_test.shape)
print("y_test: ", y_test.shape)
x_train: (1437, 8, 8)
y_train: (1437,)
x_test: (360, 8, 8)
y_test: (360,)
# 데이터의 shape을 확인 후 2차원으로 만들어주기 (딥러닝)
x_train_re = np.reshape(x_train, (x_train.shape[0], 64))
x_test_re = np.reshape(x_test, (x_test.shape[0], 64))
x_train_re.shape
(1437, 64)
- one-hot encoding 하기
y_train = to_categorical(y_train)y_train - modeling하기
random.seed(209)# 입력노드 수 = feature 수input_node = 8*8# 출력노드 수output_node = 10
#모델model_digit = Sequential()model_digit.add(Dense(output_node,activation = "softmax",input_shape = (input_node,))) - compliling
model_digit.compile(loss='categorical_crossentropy',optimizer='adam',metrics = ['accuracy']) - Hyperparameter 정하기
model_digit_hist = model_digit.fit(x_train_re,y_train,epochs=100,batch_size=10,validation_split=0.2) - 시각화해서 확인하기
fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize = (8,3))
ax_loss.plot(model_digit_hist.history["loss"], label="train_loss")ax_loss.plot(model_digit_hist.history["val_loss"], label="val_loss")ax_loss.set(xlabel = "epoch",ylabel = "loss")ax_loss.legend()
ax_acc.plot(model_digit_hist.history["accuracy"], label="train_acc")ax_acc.plot(model_digit_hist.history["val_accuracy"], label="val_acc")ax_acc.set(xlabel = "epoch",ylabel = "accuracy")ax_acc.legend(); - Accuracy 확인
metrics.accuracy_score(pred_digit, y_test)
Multi-layer Perceptron (MLP) 코딩 실습
- Data 불러오기
cancer = load_breast_cancer() - x, y값 정하기
x_data = cancer["data"]y_data = cancer["target"] - Train/test data split
x_train, x_test, y_train, y_test = train_test_split(x_data,y_data,test_size = 0.20,random_state = 42,stratify = y_data) - Scaling
minmax = MinMaxScaler()x_train_scaled = minmax.fit_transform(x_train)x_test_scaled = minmax.transform(x_test) - Data shape 확인
print("x_train: ", x_train.shape)print("y_train: ", y_train.shape)print("x_test: ", x_test.shape)print("y_test: ", y_test.shape) - Modeling
random.seed(42)# 입력노드 수 = feature 수input_node = 30# 출력노드 수output_node = 1# 은닉층 노드 수first_hidden_node = 10
#모델model_cancer = Sequential(name = "cancer_classifier")model_cancer.add(Dense(first_hidden_node,# kernel_initializer='he_normal',activation = "relu",input_shape=(input_node,)))model_cancer.add(Dense(output_node,# kernel_initializer='he_normal',activation = "sigmoid")) - Compling
adam = Adam(learning_rate=0.01)
model_cancer.compile(loss='binary_crossentropy',optimizer=adam,metrics = ['accuracy']) - Hyperparameter 입력
model_cancer_hist = model_cancer.fit(x_train_scaled,y_train,epochs=10,batch_size=10,validation_split=0.2,# verbose = 1) - Learning 과정 시각화
fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize = (8,3))
ax_loss.plot(model_cancer_hist.history["loss"], label="train_loss")ax_loss.plot(model_cancer_hist.history["val_loss"], label="val_loss")ax_loss.set(xlabel = "epoch", ylabel = "loss")ax_loss.legend()
ax_acc.plot(model_cancer_hist.history["accuracy"], label="train_acc")ax_acc.plot(model_cancer_hist.history["val_accuracy"], label="val_acc")ax_acc.set(xlabel = "epoch", ylabel = "accuracy")ax_acc.legend(); - Model 평가
pred_cancer = model_cancer.predict(x_test_scaled).flatten() >= 0.5 - Accuracy 확인
metrics.accuracy_score(pred_cancer, y_test)
'TIL > Deep learning' 카테고리의 다른 글
24.07.08 Auto encoder (0) | 2024.07.08 |
---|---|
24.07.03 CNN (0) | 2024.07.06 |
24.07.01 Deep learning (구조 및 역할) (0) | 2024.07.01 |
24.06.12 Review(Decision Tree/ Random Tree) (1) | 2024.06.12 |
24.05.29 Ensemble model/ Neural network (0) | 2024.05.29 |