Skip to content
Permalink
main
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
# references: https://towardsdatascience.com/linear-regression-in-6-lines-of-python-5e1d0cd05b8d, https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
# Read data
df_data = pd.read_csv('Trips_Full_Data.csv', dtype={'Trips 1-25 Miles': 'float64',
'Trips 1-3 Miles': 'float64',
'Trips 10-25 Miles': 'float64',
'Trips 100-250 Miles': 'float64',
'Trips 25-50 Miles': 'float64',
'Trips 250-500 Miles': 'float64',
'Trips 3-5 Miles': 'float64',
'Trips 5-10 Miles': 'float64',
'Trips 50-100 Miles': 'float64',
'Trips <1 Mile': 'float64',
'Trips 500+ Miles': 'float64',
'Population Not Staying at Home': 'float64',
'Population Staying at Home': 'float64',
'Week': 'float64'})
df_distance = pd.read_csv('Trips_by_Distance.csv', dtype={'County Name': 'object',
'Number of Trips': 'float64',
'Number of Trips 1-3': 'float64',
'Number of Trips 10-25': 'float64',
'Number of Trips 100-250': 'float64',
'Number of Trips 25-50': 'float64',
'Number of Trips 250-500': 'float64',
'Number of Trips 3-5': 'float64',
'Number of Trips 5-10': 'float64',
'Number of Trips 50-100': 'float64',
'Number of Trips <1': 'float64',
'Number of Trips >=500': 'float64',
'Population Not Staying at Home': 'float64',
'Population Staying at Home': 'float64',
'Week': 'float64'})
# Data cleaning
df_data.dropna(inplace=True)
df_data.fillna(0, inplace=True)
# People vs Trips
Trips = ['Trips 1-25 Miles', 'Trips 1-3 Miles', 'Trips 10-25 Miles', 'Trips 100-250 Miles',
'Trips 25-50 Miles', 'Trips 250-500 Miles', 'Trips 3-5 Miles', 'Trips 5-10 Miles',
'Trips 50-100 Miles', 'Trips <1 Mile', 'Trips 500+ Miles']
# Group and sum
peoplevtrips = df_data[Trips].sum()
print("People v trips", peoplevtrips)
# People vs Distance
Trips = ['Number of Trips <1', 'Number of Trips 1-3', 'Number of Trips 3-5', 'Number of Trips 5-10',
'Number of Trips 10-25', 'Number of Trips 25-50', 'Number of Trips 50-100', 'Number of Trips 100-250',
'Number of Trips 250-500', 'Number of Trips >=500']
# Group and sum
peoplevdistance = df_distance[Trips].sum()
print("People vs Distance:", peoplevdistance)
# Reshape data for scikit-learn
X = peoplevtrips[:-1].values.reshape(-1, 1) # I need to exclude the last element because peoplevtrips have 11 entries and the other has 10
y = peoplevdistance.values
# Linear model
model = LinearRegression()
model.fit(X, y)
print("Linear model:")
r_sq = model.score(X, y)
print("Coefficient of determination (R^2):", r_sq)
print("Intercept:", model.intercept_)
print("Coefficient(slope):", model.coef_)
y_pred = model.predict(X)
print("Predicted response:\n", y_pred)
# Results:
# Linear model:
# ('Coefficient of determination (R^2):', 0.2815917064454585)
# ('Intercept:', 146845315292.5589)
# ('Coefficient(slope):', array([63.90150459]))
# ('Predicted response:\n', array([6.01113971e+11, 3.12116115e+11, 2.50209167e+11, 1.49909451e+11,
# 1.77780923e+11, 1.47663555e+11, 2.28057152e+11, 2.51267483e+11,
# 1.55289788e+11, 2.92657976e+11]))
print("Training linear model")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
y_pred = linear_model.predict(X_test)
r_squared = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
print("Trained linear model R-squared value (R^2):", r_squared)
print("Mean Squared Error (MSE):", mse)
# Results
# Training linear model
# ('Trained linear model R-squared value (R^2):', 0.24003353124357718)
# ('Mean Squared Error (MSE):', 7.723222074854258e+22)
poly = PolynomialFeatures(degree=2)
X_poly = poly.fit_transform(X)
poly_model = LinearRegression()
poly_model.fit(X_poly, y)
print("Polynomial model:")
y_pred = poly_model.predict(X_poly)
print("Predicted response:\n", y_pred)
print("Intercept:", poly_model.intercept_)
print("Coefficients:", poly_model.coef_)
r_sq = poly_model.score(X_poly, y)
print("Coefficient of determination (R^2):", r_sq)
# Results:
# Polynomial model:
# ('Predicted response:\n', array([6.48303516e+11, 2.62015333e+11, 2.21370704e+11, 1.87051715e+11,
# 1.92675848e+11, 1.86729617e+11, 2.10435900e+11, 2.21940724e+11,
# 1.87902869e+11, 2.47639354e+11]))
# ('Intercept:', 186617127217.98172)
# ('Coefficients:', array([0.00000000e+00, 8.68369156e+00, 7.91421328e-09]))
# ('Coefficient of determination (R^2):', 0.30388237649541094)
# Model training polynomial
print("Training polynomial model")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
poly = PolynomialFeatures(degree=2)
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.transform(X_test)
# Initialize polynomial regression model
poly_model = LinearRegression()
poly_model.fit(X_train_poly, y_train)
y_pred = poly_model.predict(X_test_poly)
# Evaluate the model
r_squared = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
print("Trained model R value (R^2):", r_squared)
print("Mean squared error:", mse)
#Results
# Training polynomial model
# ('Trained model R value (R^2):', -1.0532504215161564)
# ('Mean squared error:', 2.086632717704806e+23)