Untitled
import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # Load dataset ds = pd.read_csv("housing.csv") # Drop columns ds.pop("ocean_proximity") ds.pop("total_bedrooms") # Normalize the data hnorm = (ds - ds.min() - 1) / (ds.max() - ds.min()) # Extract target and features target = ds.pop("median_house_value") y = target.values x = hnorm.values # Split the data into training and testing sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3) # Build the model model = keras.Sequential([ keras.layers.Dense(16, activation="relu", input_shape=(8,)), keras.layers.Dense(1) ]) model.compile(loss="mae", optimizer="sgd") # Train the model and capture the history history = model.fit(x_train, y_train, epochs=10, verbose=1, validation_data=(x_test, y_test)) # Plot training loss and validation loss plt.plot(history.history['loss'], label='Training Loss') plt.plot(history.history['val_loss'], label='Validation Loss') plt.title('Training and Validation Loss vs Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() # Evaluate the model on the test set evaluation_result = model.evaluate(x_test, y_test) print("Test Loss:", evaluation_result)
Leave a Comment