o
    \i@                     @   s
  d Z ddlZddlmZ ddlmZmZ ddlZddl	m
Z
 ddlmZ ddlmZ ddlmZ d	d
lmZmZ d	dlmZ d	dlmZ d	dlmZmZmZ d	dlmZmZ d	dlm Z  ddl!m"Z" e#ej$j%Z&dd Z'dddZ(dd Z)dd Z*G dd dee"Z+dS )z<
A Theil-Sen Estimator for Multiple Linear Regression Model
    N)combinations)IntegralReal)effective_n_jobs)linalg)get_lapack_funcs)binom   )RegressorMixin_fit_context)ConvergenceWarning)check_random_state)HiddenInterval
StrOptions)Paralleldelayed)validate_data   )LinearModelc                 C   s   | | }t t j|d dd}|tk}t| | jd k }|| }|| ddt jf }tt j|| dd}|tkrWt j| |ddf | ddt jd| dd }nd}d}t	dd||  | t
d|| |  S )u	  Modified Weiszfeld step.

    This function defines one iteration step in order to approximate the
    spatial median (L1 median). It is a form of an iteratively re-weighted
    least squares method.

    Parameters
    ----------
    X : array-like of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    x_old : ndarray of shape = (n_features,)
        Current start vector.

    Returns
    -------
    x_new : ndarray of shape (n_features,)
        New iteration step.

    References
    ----------
    - On Computation of Spatial Median for Robust Data Mining, 2005
      T. Kärkkäinen and S. Äyrämö
      http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
    r	   r   axisr   Ng      ?        )npsqrtsum_EPSILONintshapenewaxisr   normmaxmin)Xx_olddiff	diff_normmaskis_x_old_in_Xquotient_normnew_direction r+   /var/www/www-root/data/www/176.119.141.140/sports-predictor/venv/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py_modified_weiszfeld_step   s"    
r-   ,  MbP?c                 C   s   | j d dkrdtj|  ddfS |dC }tj| dd}t|D ]}t| |}t|| d |k r8 ||fS |}q!t	dj
|dt ||fS )	u	  Spatial median (L1 median).

    The spatial median is member of a class of so-called M-estimators which
    are defined by an optimization problem. Given a number of p points in an
    n-dimensional space, the point x minimizing the sum of all distances to the
    p other points is called spatial median.

    Parameters
    ----------
    X : array-like of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    max_iter : int, default=300
        Maximum number of iterations.

    tol : float, default=1.e-3
        Stop the algorithm if spatial_median has converged.

    Returns
    -------
    spatial_median : ndarray of shape = (n_features,)
        Spatial median.

    n_iter : int
        Number of iterations needed.

    References
    ----------
    - On Computation of Spatial Median for Robust Data Mining, 2005
      T. Kärkkäinen and S. Äyrämö
      http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
    r   T)keepdimsr	   r   r   zYMaximum number of iterations {max_iter} reached in spatial median for TheilSen regressor.)max_iter)r   r   medianravelmeanranger-   r   warningswarnformatr   )r#   r1   tolspatial_median_oldn_iterspatial_medianr+   r+   r,   _spatial_medianP   s"   "

r=   c                 C   s(   ddd|  | | d  | d |   S )a  Approximation of the breakdown point.

    Parameters
    ----------
    n_samples : int
        Number of samples.

    n_subsamples : int
        Number of subsamples to consider.

    Returns
    -------
    breakdown_point : float
        Approximation of breakdown point.
    r   g      ?r+   )	n_samplesn_subsamplesr+   r+   r,   _breakdown_point   s   r@   c                 C   s   t |}| jd | }|jd }t|jd |f}t||f}tt||}td||f\}	t|D ])\}
}| |ddf |dd|df< || |d|< |	||d d| ||
< q5|S )a  Least Squares Estimator for TheilSenRegressor class.

    This function calculates the least squares method on a subset of rows of X
    and y defined by the indices array. Optionally, an intercept column is
    added if intercept is set to true.

    Parameters
    ----------
    X : array-like of shape (n_samples, n_features)
        Design matrix, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    y : ndarray of shape (n_samples,)
        Target vector, where `n_samples` is the number of samples.

    indices : ndarray of shape (n_subpopulation, n_subsamples)
        Indices of all subsamples with respect to the chosen subpopulation.

    fit_intercept : bool
        Fit intercept or not.

    Returns
    -------
    weights : ndarray of shape (n_subpopulation, n_features + intercept)
        Solution matrix of n_subpopulation solved least square problems.
    r   r   )gelssN)	r   r   r   emptyoneszerosr!   r   	enumerate)r#   yindicesfit_intercept
n_featuresr?   weightsX_subpopulationy_subpopulationlstsqindexsubsetr+   r+   r,   _lstsq   s   
 rP   c                   @   s   e Zd ZU dZdgdeedhgeeddddgdegeeddddgeed	dddgd
gdegdgd	Z	e
ed< dddddddddd	ddZdd Zedddd ZdS )TheilSenRegressora  Theil-Sen Estimator: robust multivariate regression model.

    The algorithm calculates least square solutions on subsets with size
    n_subsamples of the samples in X. Any value of n_subsamples between the
    number of features and samples leads to an estimator with a compromise
    between robustness and efficiency. Since the number of least square
    solutions is "n_samples choose n_subsamples", it can be extremely large
    and can therefore be limited with max_subpopulation. If this limit is
    reached, the subsets are chosen randomly. In a final step, the spatial
    median (or L1 median) is calculated of all least square solutions.

    Read more in the :ref:`User Guide <theil_sen_regression>`.

    Parameters
    ----------
    fit_intercept : bool, default=True
        Whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations.

    copy_X : bool, default=True
        If True, X will be copied; else, it may be overwritten.

        .. deprecated:: 1.6
            `copy_X` was deprecated in 1.6 and will be removed in 1.8.
            It has no effect as a copy is always made.

    max_subpopulation : int, default=1e4
        Instead of computing with a set of cardinality 'n choose k', where n is
        the number of samples and k is the number of subsamples (at least
        number of features), consider only a stochastic subpopulation of a
        given maximal size if 'n choose k' is larger than max_subpopulation.
        For other than small problem sizes this parameter will determine
        memory usage and runtime if n_subsamples is not changed. Note that the
        data type should be int but floats such as 1e4 can be accepted too.

    n_subsamples : int, default=None
        Number of samples to calculate the parameters. This is at least the
        number of features (plus 1 if fit_intercept=True) and the number of
        samples as a maximum. A lower number leads to a higher breakdown
        point and a low efficiency while a high number leads to a low
        breakdown point and a high efficiency. If None, take the
        minimum number of subsamples leading to maximal robustness.
        If n_subsamples is set to n_samples, Theil-Sen is identical to least
        squares.

    max_iter : int, default=300
        Maximum number of iterations for the calculation of spatial median.

    tol : float, default=1e-3
        Tolerance when calculating spatial median.

    random_state : int, RandomState instance or None, default=None
        A random number generator instance to define the state of the random
        permutations generator. Pass an int for reproducible output across
        multiple function calls.
        See :term:`Glossary <random_state>`.

    n_jobs : int, default=None
        Number of CPUs to use during the cross validation.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    verbose : bool, default=False
        Verbose mode when fitting the model.

    Attributes
    ----------
    coef_ : ndarray of shape (n_features,)
        Coefficients of the regression model (median of distribution).

    intercept_ : float
        Estimated intercept of regression model.

    breakdown_ : float
        Approximated breakdown point.

    n_iter_ : int
        Number of iterations needed for the spatial median.

    n_subpopulation_ : int
        Number of combinations taken into account from 'n choose k', where n is
        the number of samples and k is the number of subsamples.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    HuberRegressor : Linear regression model that is robust to outliers.
    RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
    SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.

    References
    ----------
    - Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
      Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
      http://home.olemiss.edu/~xdang/papers/MTSE.pdf

    Examples
    --------
    >>> from sklearn.linear_model import TheilSenRegressor
    >>> from sklearn.datasets import make_regression
    >>> X, y = make_regression(
    ...     n_samples=200, n_features=2, noise=4.0, random_state=0)
    >>> reg = TheilSenRegressor(random_state=0).fit(X, y)
    >>> reg.score(X, y)
    0.9884
    >>> reg.predict(X[:1,])
    array([-31.5871])
    boolean
deprecatedr   Nleft)closedr   r   random_stateverbose	rH   copy_Xmax_subpopulationr?   r1   r9   rV   n_jobsrW   _parameter_constraintsTg     @r.   r/   Fc       	   
      C   s:   || _ || _|| _|| _|| _|| _|| _|| _|	| _d S NrX   )
selfrH   rY   rZ   r?   r1   r9   rV   r[   rW   r+   r+   r,   __init__U  s   
zTheilSenRegressor.__init__c                 C   s   | j }| jr|d }n|}|d urC||krtd||||kr6||kr5| jr*dnd}td|||n||krBtd||nt||}tdtt||}t	t| j
|}||fS )Nr   z=Invalid parameter since n_subsamples > n_samples ({0} > {1}).z+1 zAInvalid parameter since n_features{0} > n_subsamples ({1} > {2}).z\Invalid parameter since n_subsamples != n_samples ({0} != {1}) while n_samples < n_features.)r?   rH   
ValueErrorr8   r"   r!   r   rintr   r   rZ   )r^   r>   rI   r?   n_dimplus_1all_combinationsn_subpopulationr+   r+   r,   _check_subparamsl  s8   


z"TheilSenRegressor._check_subparams)prefer_skip_nested_validationc           	         s  j dkrtdt tjt dd\  j\}|\_	t
_jrVtdj td tj }td| tdj	 ttjkrkttt}nfd	d
tj	D }tj}t||t|jd fddt|D }t|}t|jjd\_}j r|d _!|dd _"S d_!|_"S )aU  Fit linear model.

        Parameters
        ----------
        X : ndarray of shape (n_samples, n_features)
            Training data.
        y : ndarray of shape (n_samples,)
            Target values.

        Returns
        -------
        self : returns an instance of self.
            Fitted `TheilSenRegressor` estimator.
        rS   z`copy_X` was deprecated in 1.6 and will be removed in 1.8 since it has no effect internally. Simply leave this parameter to its default value to avoid this warning.T)	y_numericzBreakdown point: {0}zNumber of samples: {0}zTolerable outliers: {0}zNumber of subpopulations: {0}c                    s   g | ]
}j  d dqS )F)sizereplace)choice).0_)r>   r?   rV   r+   r,   
<listcomp>  s    z)TheilSenRegressor.fit.<locals>.<listcomp>)r[   rW   c                 3   s(    | ]}t t | jV  qd S r]   )r   rP   rH   )rm   job)r#   
index_listr^   rF   r+   r,   	<genexpr>  s
    
z(TheilSenRegressor.fit.<locals>.<genexpr>)r1   r9   r   r   Nr   )#rY   r6   r7   FutureWarningr   rV   r   r   rg   n_subpopulation_r@   
breakdown_rW   printr8   r   r   rb   r   rZ   listr   r5   r   r[   array_splitr   vstackr=   r1   r9   n_iter_rH   
intercept_coef_)	r^   r#   rF   rI   tol_outliersrG   r[   rJ   coefsr+   )r#   rq   r>   r?   rV   r^   rF   r,   fit  sN   







zTheilSenRegressor.fit)__name__
__module____qualname____doc__r   r   r   r   r   r\   dict__annotations__r_   rg   r   r   r+   r+   r+   r,   rQ      s2   
 y%rQ   )r.   r/   ),r   r6   	itertoolsr   numbersr   r   numpyr   joblibr   scipyr   scipy.linalg.lapackr   scipy.specialr   baser
   r   
exceptionsr   utilsr   utils._param_validationr   r   r   utils.parallelr   r   utils.validationr   _baser   finfodoubleepsr   r-   r=   r@   rP   rQ   r+   r+   r+   r,   <module>   s,    
38,