@@ -234,6 +234,8 @@ def map_visibilities(self, u, v, V, weights, frequencies=None, geometry=None):
234234 'j' : js [0 ],
235235 'null_likelihood' : H0 ,
236236 'hash' : [False , self ._DHT , geometry , self ._vis_model , self ._scale_height ],
237+ 'V' : Vs ,
238+ 'W' : ws ,
237239 }
238240
239241 def check_hash (self , hash , multi_freq = False , geometry = None ):
@@ -628,9 +630,12 @@ class GaussianModel:
628630 """
629631
630632 def __init__ (self , DHT , M , j , p = None , scale = None , guess = None ,
631- Nfields = None , noise_likelihood = 0 ):
633+ Nfields = None , noise_likelihood = 0 ,
634+ Wvalues = None , V = None ):
632635
633636 self ._DHT = DHT
637+ self ._Wvalues = Wvalues
638+ self ._V = V
634639
635640 # Correct shape of design matrix etc.
636641 if len (M .shape ) == 2 :
@@ -687,7 +692,19 @@ def __init__(self, DHT, M, j, p=None, scale=None, guess=None,
687692
688693 self ._Sinv [sn :en , sn :en ] += Sj [n ]
689694 else :
690- self ._Sinv = None
695+ #self._Sinv = None
696+ q_array = self ._DHT .q
697+
698+ def true_squared_exponential_kernel (q , p , l ):
699+
700+ q1 , q2 = np .meshgrid (q , q )
701+ p1 , p2 = np .meshgrid (p , p )
702+ SE_Kernel = np .sqrt (p1 * p2 ) * np .exp (- 0.5 * (q1 - q2 )** 2 / l ** 2 )
703+ return SE_Kernel
704+
705+ Ykm = self ._DHT .coefficients (direction = "backward" )
706+ # We continue after set M matrix because is needed to calculate
707+ # best parameters for S matrix.
691708
692709 # Compute the design matrix
693710 self ._M = np .zeros ([Nr * Nfields , Nr * Nfields ], dtype = 'f8' )
@@ -707,7 +724,89 @@ def __init__(self, DHT, M, j, p=None, scale=None, guess=None,
707724
708725 self ._like_noise = noise_likelihood
709726
727+ # M is already defined, so we find best parameters for S matrix and use it.
728+ m , c , l = self .minimizeS ()
729+ pI = np .exp (m * np .log (q_array ) + c )
730+ S_fspace = true_squared_exponential_kernel (q_array , pI , l )
731+ S_real = np .dot (np .transpose (Ykm ), np .dot (S_fspace , Ykm ))
732+ S_real_inv = np .linalg .inv (S_real )
733+ self ._Sinv = S_real_inv
734+
710735 self ._fit ()
736+
737+ def minimizeS (self ):
738+ from scipy .optimize import minimize
739+ from scipy .special import gamma
740+ V = self ._V
741+
742+ def calculate_S (m , c , l ):
743+ q_array = self ._DHT .q
744+ p_array = c * (q_array ** m )
745+ def true_squared_exponential_kernel (q , p , l ):
746+ q1 , q2 = np .meshgrid (q , q )
747+ p1 , p2 = np .meshgrid (p , p )
748+ SE_Kernel = np .sqrt (p1 * p2 ) * np .exp (- 0.5 * (q1 - q2 )** 2 / l ** 2 )
749+ return SE_Kernel
750+
751+ Ykm = self ._DHT .coefficients (direction = "backward" )
752+ S_fspace = true_squared_exponential_kernel (q_array , p_array , l )
753+ S_real = np .dot (np .transpose (Ykm ), np .dot (S_fspace , Ykm ))
754+ return S_real
755+
756+ def calculate_D (S ):
757+ S_real_inv = np .linalg .inv (S )
758+ Dinv = self ._M + S_real_inv
759+ D = np .linalg .inv (Dinv )
760+ return [Dinv , D ]
761+
762+ def calculate_mu (Dinv ):
763+ try :
764+ Dchol = scipy .linalg .cho_factor (Dinv )
765+ mu = scipy .linalg .cho_solve (Dchol , self ._j )
766+
767+ except np .linalg .LinAlgError :
768+ U , s , V = scipy .linalg .svd (Dinv , full_matrices = False )
769+ s1 = np .where (s > 0 , 1. / s , 0 )
770+ mu = np .dot (V .T , np .multiply (np .dot (U .T , self ._j ), s1 ))
771+ return mu
772+
773+ def likelihood (param , data ):
774+ m , c , l = param
775+ Wvalues = self ._Wvalues
776+ N = np .diag (1 / Wvalues )
777+
778+ alpha = 1.3
779+ l0 = 1e7
780+
781+ # Create an Inverse Gamma distribution function
782+ def inv_gamma_function (l , alpha , beta ):
783+ return ((gamma (alpha )* beta )** (- 1 ))* ((beta / l )** (alpha + 1 ))* np .exp (- beta / l )
784+
785+ S = calculate_S (m ,c , l )
786+ [Dinv , D ] = calculate_D (S )
787+ mu = calculate_mu (Dinv )
788+ logdetS = np .linalg .slogdet (S )[1 ]
789+ logdetD = np .linalg .slogdet (D )[1 ]
790+ logdetN = np .linalg .slogdet (N )[1 ]
791+ factor = np .log (2 * np .pi )
792+
793+ log_likelihood = 2 * np .log (np .abs ((1 / m )* (1 / c ))) \
794+ + 2 * np .log (inv_gamma_function (l , alpha , l0 )) \
795+ - 0.5 * (factor + logdetN ) \
796+ - 0.5 * (factor + logdetS ) \
797+ + 0.5 * (factor + logdetD ) \
798+ + 0.5 * np .dot (np .transpose (self ._j ), mu ) \
799+ - 0.5 * np .dot (np .transpose (data ), np .dot (np .diag (Wvalues ), data ))
800+ return - log_likelihood
801+
802+ result = minimize (likelihood , x0 = np .array ([- 5 , 60 , 1e5 ]), args = (V ,),
803+ bounds = [(- 6 , 6 ), (1 , 70 ), (1e4 , 1e6 )],
804+ method = "Nelder-Mead" , tol = 1e-7 ,
805+ )
806+ m , c , l = result .x
807+ print ("Result: " , "m: " , m , "c: " , c , "l: " , "{:e}" .format (l ))
808+ return [m , c , l ]
809+
711810
712811 def _fit (self ):
713812 """Compute the mean and variance"""
0 commit comments