73 KiB
73 KiB
In [17]:
import pandas as pd
train = pd.read_csv("data/density_train.csv", sep=";", decimal=",")
test = pd.read_csv("data/density_test.csv", sep=";", decimal=",")
train["Density"] = train["Density"]
test["Density"] = test["Density"]
display(train.head())
display(test.head())
In [18]:
import matplotlib.pyplot as plt
train.hist(bins=30, figsize=(10, 10))
plt.show()
In [19]:
y_train = train["T"]
X_train = train.drop(["T"], axis=1)
display(X_train.head())
display(y_train.head())
y_test = test["T"]
X_test = test.drop(["T"], axis=1)
display(X_test.head())
display(y_test.head())
In [20]:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model, tree, neighbors, ensemble
random_state = 9
models = {
"linear": {"model": linear_model.LinearRegression(n_jobs=-1)},
"linear_poly": {
"model": make_pipeline(
PolynomialFeatures(degree=2),
linear_model.LinearRegression(fit_intercept=False, n_jobs=-1),
)
},
"linear_interact": {
"model": make_pipeline(
PolynomialFeatures(interaction_only=True),
linear_model.LinearRegression(fit_intercept=False, n_jobs=-1),
)
},
"ridge": {"model": linear_model.RidgeCV()},
"decision_tree": {
"model": tree.DecisionTreeRegressor(random_state=random_state, max_depth=6, criterion="absolute_error")
},
"knn": {"model": neighbors.KNeighborsRegressor(n_neighbors=7, n_jobs=-1)},
"random_forest": {
"model": ensemble.RandomForestRegressor(
max_depth=7, random_state=random_state, n_jobs=-1
)
},
}
In [21]:
import math
from sklearn import metrics
for model_name in models.keys():
print(f"Model: {model_name}")
fitted_model = models[model_name]["model"].fit(
X_train.values, y_train.values.ravel()
)
y_train_pred = fitted_model.predict(X_train.values)
y_test_pred = fitted_model.predict(X_test.values)
models[model_name]["fitted"] = fitted_model
models[model_name]["RMSE_train"] = math.sqrt(
metrics.mean_squared_error(y_train, y_train_pred)
)
models[model_name]["RMSE_test"] = math.sqrt(
metrics.mean_squared_error(y_test, y_test_pred)
)
models[model_name]["RMAE_test"] = math.sqrt(
metrics.mean_absolute_error(y_test, y_test_pred)
)
models[model_name]["R2_test"] = metrics.r2_score(y_test, y_test_pred)
In [22]:
reg_metrics = pd.DataFrame.from_dict(models, "index")[
["RMSE_train", "RMSE_test", "RMAE_test", "R2_test"]
]
reg_metrics.sort_values(by="RMSE_test").style.background_gradient(
cmap="viridis", low=1, high=0.3, subset=["RMSE_train", "RMSE_test"]
).background_gradient(cmap="plasma", low=0.3, high=1, subset=["RMAE_test", "R2_test"])
Out[22]:
In [53]:
import numpy as np
from sklearn import model_selection
parameters = {
"criterion": ["squared_error", "absolute_error", "friedman_mse", "poisson"],
"max_depth": np.arange(1, 21).tolist()[0::2],
"min_samples_split": np.arange(2, 11).tolist()[0::2],
}
grid = model_selection.GridSearchCV(
tree.DecisionTreeRegressor(random_state=random_state),
parameters,
cv=4,
n_jobs=-1,
scoring="r2",
)
grid.fit(X_test, y_test)
grid.best_params_
Out[53]:
In [54]:
model = grid.best_estimator_
y_pred = model.predict(X_test)
old_metrics = {
"RMSE_test": models["decision_tree"]["RMSE_test"],
"RMAE_test": models["decision_tree"]["RMAE_test"],
"R2_test": models["decision_tree"]["R2_test"],
}
new_metrics = {}
new_metrics["RMSE_test"] = math.sqrt(metrics.mean_squared_error(y_test, y_pred))
new_metrics["RMAE_test"] = math.sqrt(metrics.mean_absolute_error(y_test, y_pred))
new_metrics["MAE_test"] = float(metrics.mean_absolute_error(y_test, y_pred))
new_metrics["R2_test"] = metrics.r2_score(y_test, y_pred)
display(old_metrics)
display(new_metrics)
In [55]:
rules = tree.export_text(
models["decision_tree"]["fitted"], feature_names=X_train.columns.values.tolist()
)
print(rules)
In [59]:
import pickle
pickle.dump(
models["decision_tree"]["fitted"], open("data/temp_density_tree.model.sav", "wb")
)
In [57]:
rules2 = tree.export_text(model, feature_names=X_train.columns.values.tolist())
print(rules2)
In [58]:
import pickle
pickle.dump(model, open("data/temp_density_tree-gs.model.sav", "wb"))