compare_methods_dict = {
'CNN Pycomlink Torch': {
'prediction': cml.cnn_torch_pred.values,
'wet_dry': cml.cnn_torch_wd.values,
'threshold': cnn_torch_threshold
'roc': metrics.calculate_roc_curve(cmlcml.cnn_torch_pred.values, ref_wd, 0, 1),
'confusion_matrix': skl.confusion_matrix(ref_wd, cml.cnn_torch_wd.values, labels=[1,0], normalize='true').round(decimals=4),
'ACC': np.round(skl.accuracy_score(ref_wd, cml.cnn_torch_wd),decimals=4),
'MCC': np.round(skl.matthews_corrcoef(ref_wd, cml.cnn_torch_wd),decimals=4),
'F1': np.round(skl.f1_score(ref_wd, cml.cnn_torch_wd),decimals=4)
},
'STD': {
# ... same as above
},
#... other methods
}
Resolve minor TODOs scattered in the Model_Intercomparison_Example notebook