o
    \iE                  
   @   s  d Z ddlZddlZddlmZ ddlmZmZ ddl	m
Z
mZmZ ddlmZ ddlmZ ddlmZ dd	lmZ e
eegZ	d)ddZejdeejdddgejdg dejddejejejgdd Zejdeejdddgejdddgejdddgejdedd Zejdeejdddgejdddgejdedg dd  Zejdeejdddgejdddgejdddgd!d" Zejdddgd#d$ Z ejdddgd%d& Z!d'd( Z"dS )*z
Tests for LinearModelLoss

Note that correctness of losses (which compose LinearModelLoss) is already well
covered in the _loss module.
    N)assert_allclose)linalgoptimize)HalfBinomialLossHalfMultinomialLossHalfPoissonLoss)make_low_rank_matrix)LinearModelLoss)squared_norm)CSR_CONTAINERS   *   c                    sH  t j| || j }t|| d}| |}| jjrk| jj} j	|d |d || d|j
dd< | jrK||ddddf j |dddf  }	n||j }	| jj|	}
 fdd}|t ||
d	t j}n4 j	|d |d |d|j
dd< | jr||dd  |d  }	n|| }	| jj|	 j	dd|d }|||fS )
z-Random generate y, X and coef in valid range.)	n_samples
n_featuresrandom_stater      )lowhighsizeNc                    s@   |j dd} |jd d d d f }||k jdd}| | S )Nr   axisr   )cumsumrandshapesum)itemspsrkrng /var/www/www-root/data/www/176.119.141.140/sports-predictor/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_linear_loss.pychoice_vectorized8   s   z*random_X_y_coef.<locals>.choice_vectorized)r   )nprandomRandomStatefit_interceptr   init_zero_coef	base_lossis_multiclass	n_classesuniformflatTlinkinversearangeastypefloat64)linear_model_lossr   r   
coef_boundseedn_dofXcoefr/   raw_predictionprobar'   yr%   r#   r&   random_X_y_coef   sB   

,

rA   r-   r+   FTr   )r   r   
   dtypec           
      C   s   t |  |d}tj|}|jd|fd}|j||d}|jjr7|jj}	|j	|	|| fks/J |j
d s6J n
|j	|| fksAJ |du rN|j|jksMJ n|j|ksUJ t|dks^J dS )z4Test that init_zero_coef initializes coef correctly.r-   r+      )r   )rC   F_CONTIGUOUSNr   )r	   r(   r)   r*   normalr,   r-   r.   r/   r   flagsrC   count_nonzero)
r-   r+   r   rC   global_random_seedlossr$   r<   r=   r/   r%   r%   r&   test_init_zero_coefP   s   rL   sample_weightrangel2_reg_strengthr   csr_containerc           #   	   C   s  t |  |d}t|dd|d\}}}	| | |	 }
}}|dkr2tjd|jd |jd d}|j|	||||d	}|j|	||||d	}|j|	||||d	\}}|j	|	||||d	\}}|j
|	||||d	\}}}t|| t|| t|| t|| t||jd
d ||jd
d t|	}tj|	|	j|	jfd}|j
|	||||||d\}}}t||sJ t||sJ t|| t|| t|| t|| ||}|j|	||||d	}|j|	||||d	}|j|	||||d	\}}|j	|	||||d	\}} |j
|	||||d	\}!}"}t|| t|| t|| t|| t|| t||| | t||! t||" t||
 t| |
 t|| t|	| dS )zDTest that loss and gradient are the same across different functions.rD   rB   rE   r8   r   r   r:   rN   r   r   numrM   rO   Forder)r   )rM   rO   gradient_outhessian_outN)r	   rA   copyr(   linspacer   rK   gradientloss_gradientgradient_hessian_productgradient_hessianr   ravel
empty_liker   shares_memorytoarray)#r-   r+   rM   rO   rP   rJ   rK   r<   r@   r=   X_oldy_oldcoef_oldl1g1l2g2g3h3g4h4_g_outh_outg5h5Xsl1_spg1_spl2_spg2_spg3_sph3_spg4_sph4_spr%   r%   r&    test_loss_grad_hess_are_the_samek   s   










"
	



















r}   X_containerc                 C   s  t |  dd}t |  dd}d\}}t||||d\}	}
}d|	dddf< |	ddddf }|dur7||	}	|d	krItjd|
jd
 |
jd
 d}|j||	|
||d\}}|j||	|
||d\}}|j|||
||d\}}|j|||
||d\}}|t|d| t	|j
d   ksJ |}|j
d  ||j
d  7  < t|| tj|j|j }||}||}|}|j
d  ||j
d  7  < t|| dS )z7Test that loss and gradient handle intercept correctly.FrD   TrB   rE   rQ   r   Nr   rN   r   rR   rT   g      ?)r	   rA   r(   r[   r   r]   r^   pytestapproxr
   r2   r   r)   r*   randn)r-   rM   rO   r~   rJ   rK   
loss_interr   r   r<   r@   r=   X_interlgro   hesspl_interg_interhessp_interg_inter_correctedr    hh_interh_inter_correctedr%   r%   r&   #test_loss_gradients_hessp_intercept   sR   









r   c              	      sv  t |  |dd\}}t|||d\ jdddkr.tjdjd jd d	d
j d\}}t fddd }	t fddd }
d|	 |
 d }t	||ddd t
|dd< |}dt d}t fdd|D }||jdd8 }t|ddtjf |d  }t	||dd dS )zTest gradients and hessians with numerical derivatives.

    Gradient should equal the numerical derivatives of the loss function.
    Hessians should equal the numerical derivatives of gradients.
    rD   r   rQ   rU   rV   rN   r   r   rR   gư>rT   c                    s   j |   dS )NrT   rK   r=   r<   epsrO   rK   rM   r@   r%   r&   <lambda>*  s    z5test_gradients_hessians_numerically.<locals>.<lambda>r   c                    s   j | d   dS )Nr   rT   r   r   r   r%   r&   r   6  s    
      g{Gz?g:0yE>)rtolatolgMbP?   c              	      s(   g | ]}j |   d qS )rT   )r\   ).0t)r<   r=   rO   rK   rM   vectorr@   r%   r&   
<listcomp>N  s    
z7test_gradients_hessians_numerically.<locals>.<listcomp>r   N)r   )r	   rA   r`   r(   r[   r   r^   r   approx_fprimer   
zeros_likearraymeanr   lstsqnewaxis)r-   r+   rM   rO   rJ   r   r   r   r   	approx_g1	approx_g2approx_ghess_cold_xd_gradapprox_hess_colr%   )r<   r=   r   rO   rK   rM   r   r@   r&   #test_gradients_hessians_numerically  sP   


"r   c                 C   s  t t | d}d\}}t||||d\}}}tj|j|j }||||\}	}
|	|||}|
|||\}}||}|
j|jksEJ |j|jksMJ t|
| t|
| ||||\}}}|j|jksiJ |j|j|jfkstJ |jdd}|jdd}||||\}}|	|||}|
|||\}}||}|j|jksJ |j|jksJ t|| t|| t|
|j|jjddd t||j|jjddd dS )z=Test that multinomial LinearModelLoss respects shape of coef.rD   r   rQ   rU   rV   r   N)r	   r   rA   r(   r)   r*   r   r   r]   r\   r^   r   r_   r   r`   reshaper-   r/   )r+   rJ   rK   r   r   r<   r@   r=   r    r   r   rh   rj   r   r   rk   hessro   coef_rs_rl_rg_rg1_rg2_rhessp_rh_rr%   r%   r&   test_multinomial_coef_shape^  s@   



r   c              	   C   s  d\}}}t t|ddd}t||||d\}}}|jdd}| dkr1tjd	|jd
 |jd
 d} |j|||| d
d\}	}
}t|
|
j	 |
||\}}}|jj||| d\}}t|ddd
f t|ddd	f t|dddf ttdf\}}}}t|||  | | | | g| | |||  | | g| | | | |||  gg}|||||f}| du r|| }n	|| t|  9 }td|||}t|dd}|j|| || dd}t||j	 t|
| dS )a  Test multinomial hessian for 3 classes and 2 points.

    For n_classes = 3 and n_samples = 2, we have
      p0 = [p0_0, p0_1]
      p1 = [p1_0, p1_1]
      p2 = [p2_0, p2_1]
    and with 2 x 2 diagonal subblocks
      H = [p0 * (1-p0),    -p0 * p1,    -p0 * p2]
          [   -p0 * p1, p1 * (1-p1),    -p1 * p2]
          [   -p0 * p2,    -p1 * p2, p2 * (1-p2)]
      hess = X' H X
    )r   rE   r   )r/   FrD   rQ   rU   rV   rN   r   r   rR   rT   )y_truer>   rM   Nr   zij, mini, ik->jmnkr   C)r	   r   rA   r`   r(   r[   r   r_   r   r2   weight_intercept_rawr-   gradient_probadiagonesblockr   r   einsummoveaxis)rM   rJ   r   r   r/   rK   r<   r@   r=   gradr   ro   weights	interceptr>   grad_pointwiser?   p0dp1dp2donedr   hess_expectedr%   r%   r&   "test_multinomial_hessian_3_classes  sb   



r   c            	      C   s  d\} }}t t dd}t| |f}t| }||}td}tjtdd |j	||||dd W d   n1 s>w   Y  td}tjtd	d |j	|||d|d W d   n1 sdw   Y  t t
 dd}||}td
| |fddd
 }tjtdd |j	||||d W d   n1 sw   Y  td
| | || fddd
 }tjtdd |j	|||d|d W d   dS 1 sw   Y  dS )z;Test that wrong gradient_out and hessian_out raises errors.)rE   r   r   FrD   r   z1gradient_out is required to have shape coef.shape)matchN)r=   r<   r@   rX   rY   z%hessian_out is required to have shaper   z!gradient_out must be F-contiguous)r=   r<   r@   rX   zhessian_out must be contiguous)r	   r   r(   r   r,   zerosr   raises
ValueErrorr_   r   )	r   r   r/   rK   r<   r@   r=   rX   rY   r%   r%   r&   =test_linear_loss_gradient_hessian_raises_wrong_out_parameters  s`   





	
$"r   )r   r   )#__doc__numpyr(   r   numpy.testingr   scipyr   r   sklearn._loss.lossr   r   r   sklearn.datasetsr   !sklearn.linear_model._linear_lossr	   sklearn.utils.extmathr
   sklearn.utils.fixesr   LOSSESrA   markparametrizefloat32r7   int64rL   r}   r   r   r   r   r   r%   r%   r%   r&   <module>   sN    

4[7T
(
H