from sklearn.metrics import accuracy_score,auc,roc_curve,roc_auc_score,cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import metrics
import numpy as np

def CalculateClsScore(prediction,label):
    kappa_val = cohen_kappa_score(label,prediction)
    conf_matrix = confusion_matrix(label,prediction)
    print ('kappa val is ',kappa_val)
    print ('conf_matrix')
    print (conf_matrix)

    from sklearn.metrics import f1_score, precision_score, recall_score
    f1 = f1_score( label, prediction, average='macro' )
    p = precision_score( label, prediction,  average='macro')
    r = recall_score( label, prediction,  average='macro')

    print (classification_report(label,prediction))
    print ('f1 and p and r is ',f1,p,r)
    return kappa_val

def CalculateAuc(probs,label):
    fpr, tpr, thresholds = metrics.roc_curve(label, probs)
    auc_value = metrics.auc(fpr, tpr)
    print ('auc_value',auc_value)
    return auc_value


def CalculateClsScoreByTh(probs,label,th=0.05,acc_flag=False):
    nums = int(1/th)
    for idx in range(nums):
        print ('='*60)
        current_th = th*idx
        print ('Current th value is ',current_th)
        current_pred = [val>current_th for val in probs]
        CalculateClsScore(current_pred,label)
        if acc_flag:
            # print (np.squeeze(current_pred).shape,np.squeeze(label).shape)
            acc_value = accuracy_score(np.squeeze(current_pred),np.squeeze(label))
            print ('*'*30+" acc val is "+str(round(acc_value,4)*100)+"*"*30)