Untitled

mail@pastecode.io avatar
unknown
plain_text
2 years ago
3.0 kB
3
Indexable
import numpy as np
import matplotlib.pyplot as plt

class Network:
    def __init__(self, rate, function,epochs):
        self.layers = []
        self.learning_rate = rate
        self.loss_function = function
        self.epochs = epochs

    def add_layer(self, layer):
        self.layers.append(layer)

    def train(self, train_x, train_y):
        for epoch in range(self.epochs):
            error_value = 0
            for j in range(len(train_x)):

                predicted_value = train_x[j]

                for layer in self.layers:

                    predicted_value = layer.forward_prop(predicted_value)
                    predicted_value = layer.activation(predicted_value)

                error_value = error_value + self.mse_error(predicted_value,train_y[j])
                error = self.mse_error_prime(predicted_value,train_y[j])

                for layer in reversed(self.layers):
                    error = layer.backward_activation(error,self.learning_rate)
                    error = layer.backward_prop(self.learning_rate, error)


            print(error_value)

    def predict(self, input_values):
        # sample dimension first
        samples = len(input_values)
        result = []

        for i in range(samples):
            # forward propagation
            output = input_values[i]
            for layer in self.layers:
                output = layer.forward_prop(output)
                output = layer.activation(output)
            result.append(output)

        return result



    def mse_error(self, prediction, actual):
        return np.mean(np.power(actual - prediction, 2))

    def mse_error_prime(self, prediction, actual):
        return 2 * (prediction - actual) / actual.size

class Layer:
    def __init__(self,input_neurons, output_neurons, activation):
        self.weights = np.random.rand(input_neurons,output_neurons)-0.5 ###Array with weight connecting all inputs to outputs
        self.biases = np.random.rand(1, output_neurons)-0.5
        self.activation_function = activation


    def forward_prop(self, input_neurons):
        self.layer_values = input_neurons
        self.output = np.dot(self.layer_values,self.weights) + self.biases
        return self.output

    def backward_prop(self, learning_rate, gradient_value):
        next_step_error = np.dot(gradient_value,self.weights.T)
        weights_error = np.dot(self.layer_values.T,gradient_value)
        self.weights -= weights_error * learning_rate

        self.biases -= gradient_value * learning_rate

        return next_step_error

    def activation(self,input_neurons):
        self.input_activation = input_neurons
        self.output = np.tanh(self.input_activation)
        return self.output


    def backward_activation(self, error_value, learning_rate):
        return_error = (1-np.tanh(self.input_activation)**2) * error_value
        return return_error