Untitled
unknown
plain_text
2 years ago
2.7 kB
12
Indexable
import numpy as np
# Softmax function
def softmax(z):
exp_scores = np.exp(z)
return exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Cross-entropy loss function
def cross_entropy_loss(probs, y):
batch_size = len(y)
correct_logprobs = -np.log(probs[range(batch_size), y])
return np.mean(correct_logprobs)
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# Initialize weights and biases
self.W1 = np.random.randn(self.input_size, self.hidden_size)
self.b1 = np.zeros((1, self.hidden_size))
self.W2 = np.random.randn(self.hidden_size, self.output_size)
self.b2 = np.zeros((1, self.output_size))
def forward(self, X):
# Forward propagation
self.z1 = np.dot(X, self.W1) + self.b1
self.a1 = self.sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.W2) + self.b2
# Apply softmax activation function
self.probs = softmax(self.z2)
return self.probs
def backward(self, X, y, learning_rate=0.01):
# Backpropagation
batch_size = len(X)
delta3 = self.probs
delta3[range(batch_size), y] -= 1
dW2 = np.dot(self.a1.T, delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = np.dot(delta3, self.W2.T) * (self.a1 * (1 - self.a1))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Update weights and biases
self.W1 -= learning_rate * dW1
self.b1 -= learning_rate * db1
self.W2 -= learning_rate * dW2
self.b2 -= learning_rate * db2
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def predict(self, X):
# Make predictions
return np.argmax(self.forward(X), axis=1)
# Generate some random data for testing
np.random.seed(0)
X = np.random.randn(300, 10) # 300 samples, 10 features
y = np.random.randint(0, 3, 300) # 3 classes
# Initialize and train the neural network
input_size = 10
hidden_size = 5
output_size = 3
model = NeuralNetwork(input_size, hidden_size, output_size)
# Training loop
num_epochs = 1000
for epoch in range(num_epochs):
# Forward pass
probs = model.forward(X)
# Backward pass
model.backward(X, y)
# Print the loss
if epoch % 100 == 0:
loss = cross_entropy_loss(probs, y)
print(f'Epoch {epoch}, Loss: {loss}')
# Evaluate the trained model
predictions = model.predict(X)
accuracy = np.mean(predictions == y)
print(f'Accuracy: {accuracy}')
Editor is loading...
Leave a Comment