-
Notifications
You must be signed in to change notification settings - Fork 8
/
get_results.py
100 lines (83 loc) · 2.95 KB
/
get_results.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
from sklearn.metrics import roc_curve, auc, confusion_matrix
def plot_roc_auc(y_true, y_pred):
"""
This function plots the ROC curves and provides the scores.
"""
# initialize dictionaries and array
fpr = dict()
tpr = dict()
roc_auc = np.zeros(3)
# prepare for figure
plt.figure()
colors = ['aqua', 'cornflowerblue']
# for both classification tasks (categories 1 and 2)
for i in range(2):
# obtain ROC curve
fpr[i], tpr[i], _ = roc_curve(y_true[:,i], y_pred[:,i])
# obtain ROC AUC
roc_auc[i] = auc(fpr[i], tpr[i])
# plot ROC curve
plt.plot(fpr[i], tpr[i], color=colors[i], lw=2,
label='ROC curve for task {d} (area = {f:.2f})'.format(d=i+1, f=roc_auc[i]))
# get score for category 3
roc_auc[2] = np.average(roc_auc[:2])
# format figure
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curves')
plt.legend(loc="lower right")
plt.show()
# print scores
for i in range(3):
print('Category {d} Score: {f:.3f}'. format(d=i+1, f=roc_auc[i]))
def plot_confusion_matrix(y_true, y_pred, thresh, classes):
"""
This function plots the (normalized) confusion matrix.
"""
# obtain class predictions from probabilities
y_pred = (y_pred>=thresh)*1
# obtain (unnormalized) confusion matrix
cm = confusion_matrix(y_true, y_pred)
# normalize confusion matrix
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], '.2f'),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
if __name__ == "__main__":
preds_path = sys.argv[1]
if len(sys.argv)==3:
thresh = float(sys.argv[2])
else:
thresh = 0.5
# get ground truth labels for test dataset
truth = pd.read_csv('ground_truth.csv')
y_true = truth.as_matrix(columns=["task_1", "task_2"])
# get model predictions for test dataset
y_pred = pd.read_csv(preds_path)
y_pred = y_pred.as_matrix(columns=["task_1", "task_2"])
# plot ROC curves and print scores
plot_roc_auc(y_true, y_pred)
# plot confusion matrix
classes = ['benign', 'malignant']
plot_confusion_matrix(y_true[:,0], y_pred[:,0], thresh, classes)