Untitled
import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras import backend as K import numpy as np import cv2 import os from tensorflow.keras.optimizers import Adam from tensorflow.keras import Input, Model from tensorflow.keras.layers import Lambda, Dense, Flatten, Reshape # Load dataset (function already defined in your code) def load_data(data_path, img_size=(128, 128)): images = [] labels = [] for label_folder in os.listdir(data_path): folder_path = os.path.join(data_path, label_folder) if os.path.isdir(folder_path): for img_file in os.listdir(folder_path): img_path = os.path.join(folder_path, img_file) img = cv2.imread(img_path) if img is not None: img = cv2.resize(img, img_size) images.append(img / 255.0) # Normalize labels.append(label_folder) return np.array(images), labels # Load the data (train, validation, test) train_images, _ = load_data(r'D:\Lastyear\CI\archive (2)\train') val_images, _ = load_data(r'D:\Lastyear\CI\archive (2)\val') test_images, _ = load_data(r'D:\Lastyear\CI\archive (2)\test') # Define the latent dimension (latent space size) latent_dim = 128 # Sampling function for VAE def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=K.shape(z_mean)) return z_mean + K.exp(0.5 * z_log_var) * epsilon # Encoder encoder_input = Input(shape=(128, 128, 3), name="Encoder_Input") x = Flatten()(encoder_input) x = Dense(512, activation="relu")(x) x = Dense(256, activation="relu")(x) z_mean = Dense(latent_dim, name="z_mean")(x) z_log_var = Dense(latent_dim, name="z_log_var")(x) z = Lambda(sampling, name="Latent_Space")([z_mean, z_log_var]) encoder = Model(encoder_input, [z_mean, z_log_var, z], name="Encoder") encoder.summary() # Decoder decoder_input = Input(shape=(latent_dim,), name="Decoder_Input") x = Dense(256, activation="relu")(decoder_input) x = Dense(512, activation="relu")(x) x = Dense(128 * 128 * 3, activation="sigmoid")(x) # Match the flattened size of input decoder_output = Reshape((128, 128, 3), name="Decoder_Output")(x) decoder = Model(decoder_input, decoder_output, name="Decoder") decoder.summary() # VAE Model vae_output = decoder(encoder(encoder_input)[2]) vae = Model(encoder_input, vae_output, name="Variational_Autoencoder") # Compile the VAE model with the custom loss function vae.compile(optimizer=Adam(), loss=lambda x, rv: tf.reduce_mean((x - rv[0])**2), metrics=['accuracy']) # Summary of the VAE model vae.summary() # Train the VAE model vae.fit( train_images, train_images, # Using images as both input and target for autoencoder validation_data=(val_images, val_images), epochs=20, # Number of epochs batch_size=64 # Batch size ) # Plot loss import matplotlib.pyplot as plt loss = vae.history.history['loss'] # Training loss val_loss = vae.history.history['val_loss'] # Validation loss plt.figure(figsize=(10, 6)) plt.plot(loss, label='Training Loss', marker='o') plt.plot(val_loss, label='Validation Loss', marker='x') plt.title('Variational Autoencoder Loss Over Epochs') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.grid(True) plt.show()
Leave a Comment