Untitled

 avatar
unknown
plain_text
2 years ago
7.9 kB
11
Indexable
import numpy as np 
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split  
from sklearn import metrics

dataset=load_iris()
X=dataset.data
y=dataset.target
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)
classifier = KNeighborsClassifier(n_neighbors=5).fit(Xtrain, ytrain) 
ypred = classifier.predict(Xtest) 
i = 0
print ('%-25s %-25s %-25s' % ('Original Label', 'Predicted Label', 'Correct/Wrong'))
for label in ytest:
    print ('%-25s %-25s' % (label, ypred[i]), end="")
    if (label == ypred[i]):
        print (' %-25s' % ('Correct'))
    else:
        print (' %-25s' % ('Wrong'))
    i = i + 1
print ("-------------------------------------------------------------------------")
print("\nConfusion Matrix:\n",metrics.confusion_matrix(ytest, ypred))  
print ("-------------------------------------------------------------------------")
print("\nClassification Report:\n",metrics.classification_report(ytest, ypred)) 
print ("-------------------------------------------------------------------------")
print('Accuracy of the classifer is %0.2f' % metrics.accuracy_score(ytest,ypred))
print ("-------------------------------------------------------------------------")

2
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
import sklearn.metrics as metrics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

names = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width', 'Class']

from sklearn import datasets
dataset = pd.read_csv("data.csv", names=names)

X = iris_dataset.iloc[:, :-1] 
label = {'Iris-setosa': 0,'Iris-versicolor': 1, 'Iris-virginica': 2} 
y = [label[c] for c in dataset['species']]
#print(y)
plt.figure(figsize=(14,7))
colormap=np.array(['red','lime','black'])

plt.subplot(1,3,1)
plt.title('Real')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y])

model=KMeans(n_clusters=3, random_state=0).fit(X)
plt.subplot(1,3,2)
plt.title('KMeans')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[model.labels_])
print('The accuracy score of K-Mean: ',metrics.accuracy_score(y, model.labels_))
print('The Confusion matrixof K-Mean:\n',metrics.confusion_matrix(y, model.labels_))

gmm=GaussianMixture(n_components=3, random_state=0).fit(X)
y_cluster_gmm=gmm.predict(X)
plt.subplot(1,3,3)
plt.title('GMM Classification')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y_cluster_gmm])

print('The accuracy score of EM: ',metrics.accuracy_score(y, y_cluster_gmm))
print('The Confusion matrix of EM:\n ',metrics.confusion_matrix(y, y_cluster_gmm))
print('Observation: The GMM using EM algorithm based clustering matched the true labels more closely than the Kmeans.')

3
import numpy as np
import matplotlib.pyplot as plt

def local_regression(x0, X, Y, tau):
    x0 = [1, x0]   
    X = [[1, i] for i in X]
    X = np.asarray(X)
    xw = (X.T) * np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * tau))
    beta = np.linalg.pinv(xw @ X) @ xw @ Y @ x0  
    return beta    

    prediction = [local_regression(x0, X, Y, tau) for x0 in domain]
    plt.plot(X, Y, 'o', color='black')
    plt.plot(domain, prediction, color='red')
    plt.show()

X = np.linspace(-3, 3, num=1000)
domain = X
Y = np.log(np.abs(X ** 2 - 1) + .5)

draw(10)
draw(0.1)
draw(0.01)
draw(0.001)

4
import numpy as np

sigmoid = lambda x: 1 / (1 + np.exp(-x))
X, y = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), np.array([[0], [1], [1], [0]])
wh, bh, wo, bo = np.random.randn(2, 2), np.zeros((1, 2)), np.random.randn(2, 1), np.zeros((1, 1))

def forward(x): return sigmoid(np.dot(sigmoid(np.dot(x, wh) + bh), wo) + bo)

print("Before:", forward(X))
for _ in range(10000):
    h = sigmoid(np.dot(X, wh) + bh)
    eo = sigmoid(np.dot(h, wo) + bo)
    eo, do = y - o, eo * o * (1 - o)
    dh = np.dot(do, wo.T) * h * (1 - h)
    wo += h.T.dot(do) * 0.1
    bo += np.sum(do, 0, keepdims=True) * 0.1
    wh += X.T.dot(dh) * 0.1
    bh += np.sum(dh, 0, keepdims=True) * 0.1

print("After:", forward(X))


import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)
X = X/np.amax(X,axis=0) # maximum of X array longitudinally
y = y/100

#Sigmoid Function
def sigmoid (x):
    return 1/(1 + np.exp(-x))


def derivatives_sigmoid(x):
    return x * (1 - x)


epoch=5000                
lr=0.1                    
inputlayer_neurons = 2    
hiddenlayer_neurons = 3  
output_neurons = 1        

#weight and bias initialization
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))


#draws a random range of numbers uniformly of dim x*y
for i in range(epoch):
    
#Forward Propogation
    hinp1=np.dot(X,wh)
    hinp=hinp1 + bh
    hlayer_act = sigmoid(hinp)
    outinp1=np.dot(hlayer_act,wout)
    outinp= outinp1+ bout
    output = sigmoid(outinp)
    
#Backpropagation
    EO = y-output
    outgrad = derivatives_sigmoid(output)
    d_output = EO* outgrad
    EH = d_output.dot(wout.T)


    hiddengrad = derivatives_sigmoid(hlayer_act)
    d_hiddenlayer = EH * hiddengrad

    wout += hlayer_act.T.dot(d_output) *lr
    wh += X.T.dot(d_hiddenlayer) *lr
    
    
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)

5
import random
import string


target_string = "GENETIC ALGORITHM"
target_length = len(target_string)


population_size = 100
mutation_rate = 0.01


def generate_random_string(length):
    return ''.join(random.choice(string.ascii_uppercase + ' ,') for _ in range(length))

def initial_population():
    return [generate_random_string(target_length) for _ in range(population_size)]


def calculate_fitness(individual):
    return sum(1 for i in range(target_length) if individual[i] == target_string[i])

def select_parents(population):
    return random.choices(population, weights=[calculate_fitness(individual) for individual in population], k=2)


def crossover(parent1, parent2):
    crossover_point = random.randint(1, target_length - 1)
    child = parent1[:crossover_point] + parent2[crossover_point:]
    return child


def mutate(individual):
    return ''.join(random.choice(string.ascii_uppercase + ' ,') if random.random() < mutation_rate else char for char in individual)


population = initial_population()
generation = 1

while True:
    population = sorted(population, key=calculate_fitness, reverse=True)
    best_individual = population[0]
    print(f"Generation {generation}: {best_individual} (Fitness: {calculate_fitness(best_individual)})")

    if best_individual == target_string:
        break

    new_population = [best_individual]

    for _ in range(population_size - 1):
        parent1, parent2 = select_parents(population)
        child = crossover(parent1, parent2)
        child = mutate(child)
        new_population.append(child)

    population = new_population
    generation += 1

6
import numpy as np
import random


states = [0, 1, 2, 3]


q_table = {}
for state in states:
    q_table[state] = 0


alpha = 0.5 
gamma = 0.9  

# Q-learning
for i in range(100):
    
   
    state = random.choice(states)
    
    action = random.choice([0, 1])
    
    
    if action == 0:
        new_state = max(state - 1, 0)
    else:
        new_state = min(state + 1, 3)
    
   
    reward = 1 
    
    # Update Q value
    q_table[state] += alpha * (reward + gamma * q_table[new_state] - q_table[state])
    
    state = new_state

print(q_table)
Editor is loading...
Leave a Comment