Untitled
unknown
plain_text
2 years ago
4.0 kB
6
Indexable
import numpy as np
class DenseLayer:
def __init__(self, input_size, output_size):
self.weights = np.random.randn(input_size, output_size) * 0.01
self.bias = np.zeros((1, output_size))
def forward(self, input):
self.input = input
return np.dot(input, self.weights) + self.bias
def backward(self, output_gradient, learning_rate):
weights_gradient = np.dot(self.input.T, output_gradient)
self.weights -= learning_rate * weights_gradient
self.bias -= learning_rate * np.sum(output_gradient, axis=0)
return np.dot(output_gradient, self.weights.T)
def set_weights(self, weights, bias):
self.weights = weights
self.bias = bias
class ReLU:
def forward(self, input):
self.input = input
return np.maximum(0, input)
def backward(self, output_gradient, learning_rate):
return output_gradient * (self.input > 0)
class BatchNormalization:
def __init__(self, size):
self.gamma = np.ones((1, size))
self.beta = np.zeros((1, size))
# Initialize moving mean and variance
self.moving_mean = np.zeros((1, size))
self.moving_variance = np.ones((1, size))
def set_weights(self, gamma, beta, moving_mean, moving_variance):
self.gamma = gamma
self.beta = beta
self.moving_mean = moving_mean
self.moving_variance = moving_variance
def forward(self, input, training=True):
if training:
self.mean = np.mean(input, axis=0)
self.variance = np.var(input, axis=0)
self.normalized = (input - self.mean) / np.sqrt(self.variance + 1e-5)
else:
# Use moving mean and variance for inference
self.normalized = (input - self.moving_mean) / np.sqrt(self.moving_variance + 1e-5)
return self.gamma * self.normalized + self.beta
class SequentialModel:
def __init__(self, layers):
self.layers = layers
def forward(self, input):
for layer in self.layers:
input = layer.forward(input)
return input
def backward(self, output_gradient, learning_rate):
for layer in reversed(self.layers):
output_gradient = layer.backward(output_gradient, learning_rate)
model = SequentialModel([
DenseLayer(2, 32),
ReLU(),
BatchNormalization(32),
DenseLayer(32, 16),
ReLU(),
BatchNormalization(16),
DenseLayer(16, 32),
ReLU(),
BatchNormalization(32),
DenseLayer(32, 1)
])
from tensorflow import keras
# Define the same model architecture as your custom model
keras_model = keras.Sequential([
keras.layers.Dense(32, input_shape=(2,), activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dense(16, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dense(32, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dense(1)
])
# Load the weights
keras_model.load_weights('/home/gyulii/development/Python/test/NeuralNetwork_1521_28561.keras')
weights_and_biases = [layer.get_weights() for layer in keras_model.layers if len(layer.get_weights()) > 0]
i=0
for layer in (model.layers):
if isinstance(layer, DenseLayer):
# For dense layers (which have 2 parameters: weights and biases)
if len(weights_and_biases[i]) == 2:
weights, biases = weights_and_biases[i]
layer.set_weights(weights, biases)
i = i+1
elif isinstance(layer, BatchNormalization):
# For batch normalization layers (which have 4 parameters)
if len(weights_and_biases[i]) == 4:
gamma, beta, moving_mean, moving_variance = weights_and_biases[i]
layer.set_weights(gamma, beta, moving_mean, moving_variance)
i = i+1
elif isinstance(layer, ReLU):
pass
# Example input
X = np.array([0.0001, 0.01])
# Forward pass
output = model.forward(X)
print(output)
Editor is loading...
Leave a Comment