# -*- coding: utf-8 -*-
"""some measures for evaluation of prediction, tests and model selection
Created on Tue Nov 08 15:23:20 2011
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
[docs]def mse(x1, x2, axis=0):
    """mean squared error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    mse : ndarray or float
       mean squared error along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass, for example
    numpy matrices will silently produce an incorrect result.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.mean((x1-x2)**2, axis=axis) 
[docs]def rmse(x1, x2, axis=0):
    """root mean squared error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    rmse : ndarray or float
       root mean squared error along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass, for example
    numpy matrices will silently produce an incorrect result.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.sqrt(mse(x1, x2, axis=axis)) 
[docs]def maxabs(x1, x2, axis=0):
    """maximum absolute error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    maxabs : ndarray or float
       maximum absolute difference along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.max(np.abs(x1-x2), axis=axis) 
[docs]def meanabs(x1, x2, axis=0):
    """mean absolute error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    meanabs : ndarray or float
       mean absolute difference along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.mean(np.abs(x1-x2), axis=axis) 
[docs]def bias(x1, x2, axis=0):
    """bias, mean error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    bias : ndarray or float
       bias, or mean difference along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.mean(x1-x2, axis=axis) 
[docs]def vare(x1, x2, ddof=0, axis=0):
    """variance of error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    vare : ndarray or float
       variance of difference along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.var(x1-x2, ddof=ddof, axis=axis) 
[docs]def stde(x1, x2, ddof=0, axis=0):
    """standard deviation of error
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    stde : ndarray or float
       standard deviation of difference along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.
    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.std(x1-x2, ddof=ddof, axis=axis) 
[docs]def iqr(x1, x2, axis=0):
    """interquartile range of error
    rounded index, no interpolations
    this could use newer numpy function instead
    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated
    Returns
    -------
    mse : ndarray or float
       mean squared error along given axis.
    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asarray`` to convert the input, in contrast to the other
    functions in this category.
    """
    x1 = np.asarray(x1)
    x2 = np.asarray(x2)
    if axis is None:
        x1 = np.ravel(x1)
        x2 = np.ravel(x2)
        axis = 0
    xdiff = np.sort(x1 - x2)
    nobs = x1.shape[axis]
    idx = np.round((nobs-1) * np.array([0.25, 0.75])).astype(int)
    sl = [slice(None)] * xdiff.ndim
    sl[axis] = idx
    iqr = np.diff(xdiff[sl], axis=axis)
    iqr = np.squeeze(iqr)  # drop reduced dimension
    return iqr 
# Information Criteria
# ---------------------
[docs]def aic(llf, nobs, df_modelwc):
    """Akaike information criterion
    Parameters
    ----------
    llf : float
        value of the loglikelihood
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    aic : float
        information criterion
    References
    ----------
    http://en.wikipedia.org/wiki/Akaike_information_criterion
    """
    return -2. * llf + 2. * df_modelwc 
[docs]def aicc(llf, nobs, df_modelwc):
    """Akaike information criterion (AIC) with small sample correction
    Parameters
    ----------
    llf : float
        value of the loglikelihood
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    aicc : float
        information criterion
    References
    ----------
    http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
    """
    return -2. * llf + 2. * df_modelwc * nobs / (nobs - df_modelwc - 1.) 
[docs]def bic(llf, nobs, df_modelwc):
    """Bayesian information criterion (BIC) or Schwarz criterion
    Parameters
    ----------
    llf : float
        value of the loglikelihood
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    bic : float
        information criterion
    References
    ----------
    http://en.wikipedia.org/wiki/Bayesian_information_criterion
    """
    return -2. * llf + np.log(nobs) * df_modelwc 
[docs]def hqic(llf, nobs, df_modelwc):
    """Hannan-Quinn information criterion (HQC)
    Parameters
    ----------
    llf : float
        value of the loglikelihood
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    hqic : float
        information criterion
    References
    ----------
    Wikipedia doesn't say much
    """
    return -2. * llf + 2 * np.log(np.log(nobs)) * df_modelwc 
# IC based on residual sigma
[docs]def aic_sigma(sigma2, nobs, df_modelwc, islog=False):
    """Akaike information criterion
    Parameters
    ----------
    sigma2 : float
        estimate of the residual variance or determinant of Sigma_hat in the
        multivariate case. If islog is true, then it is assumed that sigma
        is already log-ed, for example logdetSigma.
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    aic : float
        information criterion
    Notes
    -----
    A constant has been dropped in comparison to the loglikelihood base
    information criteria. The information criteria should be used to compare
    only comparable models.
    For example, AIC is defined in terms of the loglikelihood as
    :math:`-2 llf + 2 k`
    in terms of :math:`\hat{\sigma}^2`
    :math:`log(\hat{\sigma}^2) + 2 k / n`
    in terms of the determinant of :math:`\hat{\Sigma}`
    :math:`log(\|\hat{\Sigma}\|) + 2 k / n`
    Note: In our definition we do not divide by n in the log-likelihood
    version.
    TODO: Latex math
    reference for example lecture notes by Herman Bierens
    See Also
    --------
    References
    ----------
    http://en.wikipedia.org/wiki/Akaike_information_criterion
    """
    if not islog:
        sigma2 = np.log(sigma2)
    return sigma2 + aic(0, nobs, df_modelwc) / nobs 
[docs]def aicc_sigma(sigma2, nobs, df_modelwc, islog=False):
    """Akaike information criterion (AIC) with small sample correction
    Parameters
    ----------
    sigma2 : float
        estimate of the residual variance or determinant of Sigma_hat in the
        multivariate case. If islog is true, then it is assumed that sigma
        is already log-ed, for example logdetSigma.
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    aicc : float
        information criterion
    Notes
    -----
    A constant has been dropped in comparison to the loglikelihood base
    information criteria. These should be used to compare for comparable
    models.
    References
    ----------
    http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
    """
    if not islog:
        sigma2 = np.log(sigma2)
    return sigma2 + aicc(0, nobs, df_modelwc) / nobs 
[docs]def bic_sigma(sigma2, nobs, df_modelwc, islog=False):
    """Bayesian information criterion (BIC) or Schwarz criterion
    Parameters
    ----------
    sigma2 : float
        estimate of the residual variance or determinant of Sigma_hat in the
        multivariate case. If islog is true, then it is assumed that sigma
        is already log-ed, for example logdetSigma.
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    bic : float
        information criterion
    Notes
    -----
    A constant has been dropped in comparison to the loglikelihood base
    information criteria. These should be used to compare for comparable
    models.
    References
    ----------
    http://en.wikipedia.org/wiki/Bayesian_information_criterion
    """
    if not islog:
        sigma2 = np.log(sigma2)
    return sigma2 + bic(0, nobs, df_modelwc) / nobs 
[docs]def hqic_sigma(sigma2, nobs, df_modelwc, islog=False):
    """Hannan-Quinn information criterion (HQC)
    Parameters
    ----------
    sigma2 : float
        estimate of the residual variance or determinant of Sigma_hat in the
        multivariate case. If islog is true, then it is assumed that sigma
        is already log-ed, for example logdetSigma.
    nobs : int
        number of observations
    df_modelwc : int
        number of parameters including constant
    Returns
    -------
    hqic : float
        information criterion
    Notes
    -----
    A constant has been dropped in comparison to the loglikelihood base
    information criteria. These should be used to compare for comparable
    models.
    References
    ----------
    xxx
    """
    if not islog:
        sigma2 = np.log(sigma2)
    return sigma2 + hqic(0, nobs, df_modelwc) / nobs 
# from var_model.py, VAR only? separates neqs and k_vars per equation
# def fpe_sigma():
#     ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
__all__ = [maxabs, meanabs, medianabs, medianbias, mse, rmse, stde, vare,
           aic, aic_sigma, aicc, aicc_sigma, bias, bic, bic_sigma,
           hqic, hqic_sigma, iqr]