Untitled
import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC # Load dataset ds = pd.read_csv("bill_authentication.csv") # Normalize the data bnorm = (ds - ds.min() - 1) / (ds.max() - ds.min()) # Extract target and features target = ds.pop("Class") y = target.values x = bnorm.values # Split the data into training and testing sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3) # Build the model svmclassifier=SVC(kernel='linear') # Train the model and capture the history svmclassifier.fit(x_train,y_train) # Evaluate the model on the test set y_pred=svmclassifier.predict(x_test) print("Test Loss:", y_pred) #code 2: from sklearn import datasets import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn import metrics iris = datasets.load_iris() #Loading the dataset iris.keys() dict_keys = (['data', 'target', 'frame', 'target_names', 'DESCR', 'feature_names', 'filename', 'data_module']) iris = pd.DataFrame( data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target'] ) iris.head(10) species = [] for i in range(len(iris['target'])): if iris['target'][i] == 0: species.append("setosa") elif iris['target'][i] == 1: species.append('versicolor') else: species.append('virginica') iris['species'] = species iris.groupby('species').size() iris.describe() setosa = iris[iris.species == "setosa"] versicolor = iris[iris.species=='versicolor'] virginica = iris[iris.species=='virginica'] fig, ax = plt.subplots() fig.set_size_inches(13, 7) # adjusting the length and width of plot # lables and scatter points ax.scatter(setosa['petal length (cm)'], setosa['petal width (cm)'], label="Setosa", facecolor="blue") ax.scatter(versicolor['petal length (cm)'], versicolor['petal width (cm)'], label="Versicolor", facecolor="green") ax.scatter(virginica['petal length (cm)'], virginica['petal width (cm)'], label="Virginica", facecolor="red") ax.set_xlabel("petal length (cm)") ax.set_ylabel("petal width (cm)") ax.grid() ax.set_title("Iris petals") ax.legend() # Droping the target and species since we only need the measurements X = iris.drop(['target','species'], axis=1) # converting into numpy array and assigning petal length and petal width X = X.to_numpy()[:, (2,3)] y = iris['target'] # Splitting into train and test X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5, random_state=42) log_reg = LogisticRegression() log_reg.fit(X_train,y_train) training_prediction = log_reg.predict(X_train) training_prediction test_prediction = log_reg.predict(X_test) test_prediction print("Precision, Recall, Confusion matrix, in training\n") # Precision Recall scores print(metrics.classification_report(y_train, training_prediction, digits=3)) # Confusion matrix print(metrics.confusion_matrix(y_train, training_prediction)) print("Precision, Recall, Confusion matrix, in testing\n") # Precision Recall scores print(metrics.classification_report(y_test, test_prediction, digits=3)) # Confusion matrix print(metrics.confusion_matrix(y_test, test_prediction))
Leave a Comment