Untitled

mail@pastecode.io avatar
unknown
plain_text
7 months ago
1.3 kB
3
Indexable
Never
from sklearn.metrics import roc_curve, roc_auc_score, auc
import numpy as np

# Placeholder variables for the predicted probabilities of Diabetes from both models
# Replace these with the actual probabilities from your models
# model1_probs = model1.predict_proba(X_test)[:,1]
# model2_probs = model2.predict_proba(X_test)[:,1]

# As an example, let's generate some random probabilities for the purpose of demonstration
np.random.seed(42)  # Seed for reproducibility
model1_probs = np.random.rand(y_test.shape[0])
model2_probs = np.random.rand(y_test.shape[0])

# Calculating ROC AUC scores
model1_auc = roc_auc_score(y_test, model1_probs)
model2_auc = roc_auc_score(y_test, model2_probs)

# Generating ROC curves
model1_fpr, model1_tpr, _ = roc_curve(y_test, model1_probs)
model2_fpr, model2_tpr, _ = roc_curve(y_test, model2_probs)

# Plotting the ROC curves
plt.figure(figsize=(10, 8))
plt.plot(model1_fpr, model1_tpr, color='orange', label=f'Model 1 AUC = {model1_auc:.2f}')
plt.plot(model2_fpr, model2_tpr, color='blue', label=f'Model 2 AUC = {model2_auc:.2f}')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

model1_auc, model2_auc
Leave a Comment