import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# 정확도, 혼돈행렬, 리포트
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# 표준화 lib
from sklearn.preprocessing import StandardScaler
wine = datasets.load_wine();
X = wine.data
y = wine.target
X_train, X_test , y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
sc = StandardScaler()
X_train_sc = sc.fit_transform(X_train)
X_test_sc = sc.transform(X_test) # fit을 쓰지 않는 다 fit을 적용시 X_test로 재 학습을 한다
model = RandomForestClassifier(max_depth=2, random_state=124)
model.fit(X_train_sc, y_train)
y_pred = model.predict(X_test_sc)
accuracy_score(y_test, y_pred)
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
더보기
precision recall f1-score support
0 1.00 1.00 1.00 14
1 0.86 1.00 0.92 18
2 1.00 0.86 0.93 22
accuracy 0.94 54
macro avg 0.95 0.95 0.95 54
weighted avg 0.95 0.94 0.94 54
'Colab > 머신러닝' 카테고리의 다른 글
12. 부스팅(Boosting) 01 (0) | 2023.03.09 |
---|---|
11. 랜덤 포레스트 (random forest) 03 (0) | 2023.03.09 |
09. 랜덤 포레스트 (random forest) 01 (0) | 2023.03.09 |
08. 결정 트리 (Decision Tree) 02 (0) | 2023.03.08 |
07. 결정 트리 (Decision Tree) 01 (0) | 2023.03.08 |