123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390 |
- from collections import namedtuple
- import numpy as np
- from . import distributions
- __all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
- LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
- 'rvalue', 'pvalue',
- 'stderr'))
- def linregress(x, y=None):
- """
- Calculate a linear least-squares regression for two sets of measurements.
- Parameters
- ----------
- x, y : array_like
- Two sets of measurements. Both arrays should have the same length.
- If only x is given (and y=None), then it must be a two-dimensional
- array where one dimension has length 2. The two sets of measurements
- are then found by splitting the array along the length-2 dimension.
- Returns
- -------
- slope : float
- slope of the regression line
- intercept : float
- intercept of the regression line
- rvalue : float
- correlation coefficient
- pvalue : float
- two-sided p-value for a hypothesis test whose null hypothesis is
- that the slope is zero, using Wald Test with t-distribution of
- the test statistic.
- stderr : float
- Standard error of the estimated gradient.
- See also
- --------
- :func:`scipy.optimize.curve_fit` : Use non-linear
- least squares to fit a function to data.
- :func:`scipy.optimize.leastsq` : Minimize the sum of
- squares of a set of equations.
- Examples
- --------
- >>> import matplotlib.pyplot as plt
- >>> from scipy import stats
- Generate some data:
- >>> np.random.seed(12345678)
- >>> x = np.random.random(10)
- >>> y = 1.6*x + np.random.random(10)
- Perform the linear regression:
- >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
- >>> print("slope: %f intercept: %f" % (slope, intercept))
- slope: 1.944864 intercept: 0.268578
- To get coefficient of determination (r_squared):
- >>> print("r-squared: %f" % r_value**2)
- r-squared: 0.735498
- Plot the data along with the fitted line:
- >>> plt.plot(x, y, 'o', label='original data')
- >>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')
- >>> plt.legend()
- >>> plt.show()
- """
- TINY = 1.0e-20
- if y is None: # x is a (2, N) or (N, 2) shaped array_like
- x = np.asarray(x)
- if x.shape[0] == 2:
- x, y = x
- elif x.shape[1] == 2:
- x, y = x.T
- else:
- msg = ("If only `x` is given as input, it has to be of shape "
- "(2, N) or (N, 2), provided shape was %s" % str(x.shape))
- raise ValueError(msg)
- else:
- x = np.asarray(x)
- y = np.asarray(y)
- if x.size == 0 or y.size == 0:
- raise ValueError("Inputs must not be empty.")
- n = len(x)
- xmean = np.mean(x, None)
- ymean = np.mean(y, None)
- # average sum of squares:
- ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
- r_num = ssxym
- r_den = np.sqrt(ssxm * ssym)
- if r_den == 0.0:
- r = 0.0
- else:
- r = r_num / r_den
- # test for numerical error propagation
- if r > 1.0:
- r = 1.0
- elif r < -1.0:
- r = -1.0
- df = n - 2
- slope = r_num / ssxm
- intercept = ymean - slope*xmean
- if n == 2:
- # handle case when only two points are passed in
- if y[0] == y[1]:
- prob = 1.0
- else:
- prob = 0.0
- sterrest = 0.0
- else:
- t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
- prob = 2 * distributions.t.sf(np.abs(t), df)
- sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
- return LinregressResult(slope, intercept, r, prob, sterrest)
- def theilslopes(y, x=None, alpha=0.95):
- r"""
- Computes the Theil-Sen estimator for a set of points (x, y).
- `theilslopes` implements a method for robust linear regression. It
- computes the slope as the median of all slopes between paired values.
- Parameters
- ----------
- y : array_like
- Dependent variable.
- x : array_like or None, optional
- Independent variable. If None, use ``arange(len(y))`` instead.
- alpha : float, optional
- Confidence degree between 0 and 1. Default is 95% confidence.
- Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
- interpreted as "find the 90% confidence interval".
- Returns
- -------
- medslope : float
- Theil slope.
- medintercept : float
- Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
- lo_slope : float
- Lower bound of the confidence interval on `medslope`.
- up_slope : float
- Upper bound of the confidence interval on `medslope`.
- See also
- --------
- siegelslopes : a similar technique using repeated medians
- Notes
- -----
- The implementation of `theilslopes` follows [1]_. The intercept is
- not defined in [1]_, and here it is defined as ``median(y) -
- medslope*median(x)``, which is given in [3]_. Other definitions of
- the intercept exist in the literature. A confidence interval for
- the intercept is not given as this question is not addressed in
- [1]_.
- References
- ----------
- .. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
- J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
- .. [2] H. Theil, "A rank-invariant method of linear and polynomial
- regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
- 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
- .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
- John Wiley and Sons, New York, pp. 493.
- Examples
- --------
- >>> from scipy import stats
- >>> import matplotlib.pyplot as plt
- >>> x = np.linspace(-5, 5, num=150)
- >>> y = x + np.random.normal(size=x.size)
- >>> y[11:15] += 10 # add outliers
- >>> y[-5:] -= 7
- Compute the slope, intercept and 90% confidence interval. For comparison,
- also compute the least-squares fit with `linregress`:
- >>> res = stats.theilslopes(y, x, 0.90)
- >>> lsq_res = stats.linregress(x, y)
- Plot the results. The Theil-Sen regression line is shown in red, with the
- dashed red lines illustrating the confidence interval of the slope (note
- that the dashed red lines are not the confidence interval of the regression
- as the confidence interval of the intercept is not included). The green
- line shows the least-squares fit for comparison.
- >>> fig = plt.figure()
- >>> ax = fig.add_subplot(111)
- >>> ax.plot(x, y, 'b.')
- >>> ax.plot(x, res[1] + res[0] * x, 'r-')
- >>> ax.plot(x, res[1] + res[2] * x, 'r--')
- >>> ax.plot(x, res[1] + res[3] * x, 'r--')
- >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
- >>> plt.show()
- """
- # We copy both x and y so we can use _find_repeats.
- y = np.array(y).flatten()
- if x is None:
- x = np.arange(len(y), dtype=float)
- else:
- x = np.array(x, dtype=float).flatten()
- if len(x) != len(y):
- raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
- # Compute sorted slopes only when deltax > 0
- deltax = x[:, np.newaxis] - x
- deltay = y[:, np.newaxis] - y
- slopes = deltay[deltax > 0] / deltax[deltax > 0]
- slopes.sort()
- medslope = np.median(slopes)
- medinter = np.median(y) - medslope * np.median(x)
- # Now compute confidence intervals
- if alpha > 0.5:
- alpha = 1. - alpha
- z = distributions.norm.ppf(alpha / 2.)
- # This implements (2.6) from Sen (1968)
- _, nxreps = _find_repeats(x)
- _, nyreps = _find_repeats(y)
- nt = len(slopes) # N in Sen (1968)
- ny = len(y) # n in Sen (1968)
- # Equation 2.6 in Sen (1968):
- sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
- sum(k * (k-1) * (2*k + 5) for k in nxreps) -
- sum(k * (k-1) * (2*k + 5) for k in nyreps))
- # Find the confidence interval indices in `slopes`
- sigma = np.sqrt(sigsq)
- Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
- Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
- delta = slopes[[Rl, Ru]]
- return medslope, medinter, delta[0], delta[1]
- def _find_repeats(arr):
- # This function assumes it may clobber its input.
- if len(arr) == 0:
- return np.array(0, np.float64), np.array(0, np.intp)
- # XXX This cast was previously needed for the Fortran implementation,
- # should we ditch it?
- arr = np.asarray(arr, np.float64).ravel()
- arr.sort()
- # Taken from NumPy 1.9's np.unique.
- change = np.concatenate(([True], arr[1:] != arr[:-1]))
- unique = arr[change]
- change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
- freq = np.diff(change_idx)
- atleast2 = freq > 1
- return unique[atleast2], freq[atleast2]
- def siegelslopes(y, x=None, method="hierarchical"):
- r"""
- Computes the Siegel estimator for a set of points (x, y).
- `siegelslopes` implements a method for robust linear regression
- using repeated medians (see [1]_) to fit a line to the points (x, y).
- The method is robust to outliers with an asymptotic breakdown point
- of 50%.
- Parameters
- ----------
- y : array_like
- Dependent variable.
- x : array_like or None, optional
- Independent variable. If None, use ``arange(len(y))`` instead.
- method : {'hierarchical', 'separate'}
- If 'hierarchical', estimate the intercept using the estimated
- slope ``medslope`` (default option).
- If 'separate', estimate the intercept independent of the estimated
- slope. See Notes for details.
- Returns
- -------
- medslope : float
- Estimate of the slope of the regression line.
- medintercept : float
- Estimate of the intercept of the regression line.
- See also
- --------
- theilslopes : a similar technique without repeated medians
- Notes
- -----
- With ``n = len(y)``, compute ``m_j`` as the median of
- the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
- ``medslope`` is then the median of all slopes ``m_j``.
- Two ways are given to estimate the intercept in [1]_ which can be chosen
- via the parameter ``method``.
- The hierarchical approach uses the estimated slope ``medslope``
- and computes ``medintercept`` as the median of ``y - medslope*x``.
- The other approach estimates the intercept separately as follows: for
- each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
- lines through the remaining points and take the median ``i_j``.
- ``medintercept`` is the median of the ``i_j``.
- The implementation computes `n` times the median of a vector of size `n`
- which can be slow for large vectors. There are more efficient algorithms
- (see [2]_) which are not implemented here.
- References
- ----------
- .. [1] A. Siegel, "Robust Regression Using Repeated Medians",
- Biometrika, Vol. 69, pp. 242-244, 1982.
- .. [2] A. Stein and M. Werman, "Finding the repeated median regression
- line", Proceedings of the Third Annual ACM-SIAM Symposium on
- Discrete Algorithms, pp. 409-413, 1992.
- Examples
- --------
- >>> from scipy import stats
- >>> import matplotlib.pyplot as plt
- >>> x = np.linspace(-5, 5, num=150)
- >>> y = x + np.random.normal(size=x.size)
- >>> y[11:15] += 10 # add outliers
- >>> y[-5:] -= 7
- Compute the slope and intercept. For comparison, also compute the
- least-squares fit with `linregress`:
- >>> res = stats.siegelslopes(y, x)
- >>> lsq_res = stats.linregress(x, y)
- Plot the results. The Siegel regression line is shown in red. The green
- line shows the least-squares fit for comparison.
- >>> fig = plt.figure()
- >>> ax = fig.add_subplot(111)
- >>> ax.plot(x, y, 'b.')
- >>> ax.plot(x, res[1] + res[0] * x, 'r-')
- >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
- >>> plt.show()
- """
- if method not in ['hierarchical', 'separate']:
- raise ValueError("method can only be 'hierarchical' or 'separate'")
- y = np.asarray(y).ravel()
- if x is None:
- x = np.arange(len(y), dtype=float)
- else:
- x = np.asarray(x, dtype=float).ravel()
- if len(x) != len(y):
- raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
- deltax = x[:, np.newaxis] - x
- deltay = y[:, np.newaxis] - y
- slopes, intercepts = [], []
- for j in range(len(x)):
- id_nonzero = deltax[j, :] != 0
- slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero]
- medslope_j = np.median(slopes_j)
- slopes.append(medslope_j)
- if method == 'separate':
- z = y*x[j] - y[j]*x
- medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero])
- intercepts.append(medintercept_j)
- medslope = np.median(np.asarray(slopes))
- if method == "separate":
- medinter = np.median(np.asarray(intercepts))
- else:
- medinter = np.median(y - medslope*x)
- return medslope, medinter
|