#/usr/bin/python

"""
#DESCRIPTION
# Calcul du coefficient de correlation de pearson entre differentes variables physiques issues de series temporelles (issues de fichiers netcdf) et 
une serie temporelle de donnees in situ 

REFERENCES
    
BUGS 
    
REVISIONS

"""

from pylab import *
import matplotlib.colors as colors
import os
import datetime
import matplotlib.cm as cm
import scipy as Sci
import scipy.linalg
import matplotlib.pyplot as plt
import numpy as np
import Scientific.IO.NetCDF as nc
import MA
from matplotlib.font_manager import FontProperties
from scipy import *
from matplotlib import rc
rc('text', usetex=True)
from numpy.matlib import repmat

__author__="olga"
__date__ = "Mai 2011"

def read_grid_netcdf(grid_name):
    """"
    READ GRID VAR : longitude,latidude and mask  
    """
    nc_grid=nc.NetCDFFile(grid_name)
    grid_data = {}.fromkeys( ['longitude', 'latitude', 'mask'] )
    for key in grid_data.keys():
        grid_data[key] = (np.array(nc_grid.variables[key][:]))
    nc_grid.close()
    return grid_data

def reading_csv_data_IG(filename):
    """"
    Lecture fichier CSV - Indice Gonadosomatique
    """
    annee=[]
    mois=[]
    bioannee=[]
    biomois=[]
    gsi=[]
    data_out = {}.fromkeys( ['Year','BiolYear','Month','BiolMonth','GSI'] )

    FileIn = open(filename,'rb')
    titles = FileIn.next().strip().split(';')
    #print titles
    for row in FileIn:
        values = row.strip().split(';' )
        data = dict(zip(titles, values))
        y = np.int(data['Year'])
        by = np.int(data['BiolYear'])
        m= np.int(data['Month'])
        bm =np.int(data['BiolMonth'])
        ig= np.float(data['GSI'])

        if (y >= 1992 ):
            annee.append(y)
            bioannee.append(by)
            mois.append(m)
            biomois.append(bm)
            gsi.append(ig)

    data_out['Year']=annee
    data_out['BiolYear']=bioannee
    data_out['Month']=mois
    data_out['BiolMonth']=biomois
    data_out['GSI']=np.array(gsi)

    return data_out


def time_series_mean(data_in,mask,data_gsi):
    """"
    calcul de la moyenne temporelle dans une region definie dans le mask
    """
    dim_gsi=len(data_gsi['GSI']) # de 1992 a 1999
    dim_lon=len(data_in['pp'][0,0,:])
    dim_lat=len(data_in['pp'][0,:,0])
    dim_series=len(data_in['pp'][:,0,0]) # de 1992 a 2000 

    # INITIALISATION DES VECTEURS DE SORTIES
    output={}.fromkeys(data_in)
    for key in output.keys():
        output[key]=zeros((dim_gsi))

    # CALCUL DE LA MOYENNE TEMPORELLE (1 valeur par mois de 1992 a 1999 ) pour chaque variable: 
    for key in output.keys():
        for t in xrange(dim_gsi):
            count=0
            for i in xrange(dim_lon):
                for j in xrange(dim_lat):
                    #if (mask['mask'][j,i]<10): #1) or (mask['mask'][j,i]==2)  ): 
                    if ((mask['mask'][j,i]==1) or (mask['mask'][j,i]==2)  ):
                    #if ( (mask['mask'][j,i]==3) or (mask['mask'][j,i]==4)  or  (mask['mask'][j,i]==5)):                     
                        if data_in[key][t,j,i]<30000:
                            output[key][t] +=  data_in[key][t,j,i]
                            count +=1
            output[key][t] /= count

    counter_NAN=0
    for key in output.keys():
        for t in xrange(dim_gsi):
            if data_gsi['GSI'][t]>900:
                counter_NAN +=1
                output[key][t] = NaN

    for i in xrange(dim_gsi):
        if (data_gsi['GSI'][i]>900.0):
            data_gsi['GSI'][i]=NaN

    # plot(data['GSI'],'r')
    # show()
    # On enleve les NAN des vecteurs  (pas de donnees partout)
    gsi=data_gsi['GSI'][~np.isnan(data_gsi['GSI'])]
    output_final={}.fromkeys(output.keys())

    for key in output_final.keys():
        output_final[key]=output[key][~np.isnan(output[key])]

    return output_final,gsi

def main():

    # FICHIER MASK
    filename_mask='mask_opti_runinter.nc'

    # FICHIER ou sont les differentes variables avec lesquels ont vont travailler 
    filename1='ROMS_Perou_monthly_92-2000-BGCH.nc'
    filename2='ROMS_Perou_monthly_92-2000-PHYS.nc'
    filename3='ROMS_Perou_monthly_92-2000-FRG.nc'

    # Lecture de MASK
    mask=read_grid_netcdf(filename_mask)

    nc1=nc.NetCDFFile(filename1)
    nc2=nc.NetCDFFile(filename2)
    nc3=nc.NetCDFFile(filename3)

    # Je met tous dans un dictionnaire pour que ca soit plus pratique de traiter les donnees par la suite.  # donnees de 1992 a 2000
    data_in={}.fromkeys(['pp','O2','zoo_model','temp','frg','salt'])

    data_in['O2']=np.array(nc1.variables['O2'][:,0,:,:] )
    data_in['pp']=np.array(nc1.variables['pp'][:] )
    data_in['zoo_model']=np.array(nc1.variables['zoo_tot'][:] )
    data_in['temp']=np.array(nc2.variables['temperature_clm'][:,0,:,:] )
    data_in['salt']=np.array(nc2.variables['salinity'][:,0,:,:] )
    data_in['frg']=np.array(nc3.variables['epi_ltl_pb'][:] )

    # FICHIER IGS (donnees observees) # note: donnees de  1992 a 1999 
    filename='IGS1990_1999.csv'
    data_gsi=reading_csv_data_IG(filename)

    # CALCUL DE LA MOYENNE DES SERIES TEMPORELLES SUR UN ZONE DU MASK CHOISIE
    output_final,gsi=time_series_mean(data_in,mask,data_gsi)

    #  TIME SERIES CORRELATION  - PEARSON CORRELATION: (hypothese: distribution normale, sinon utiliser  stats.spearmanr ou stats.kendalltau !!)
    # librarie de stats
    from scipy import stats

    # On calcule le coefficient de correlation (valeur, pvalue) # fonction stats.pearsonr
    output_correlation={}.fromkeys(output_final.keys())
    for key in output_final.keys():
            output_correlation[key]= stats.pearsonr(output_final[key],gsi)
            print key, "    ", "Pearson Correlation value,pvalue" , output_correlation[key]

            # Quelques info de stats sur nos vecteur
            print  key, " (size of the data,  (min, max),  arithmetic mean,  unbiased variance, biased skewness,  biased kurtosis)"
            print key, "    " , stats.describe(output_final[key])
             #  Returns   (size of the data,  (min, max),  arithmetic mean,  unbiased variance, biased skewness,  biased kurtosis=coefficient d'aplatissement de Pearson)
            print " "
            n, (smin, smax), sm, sv, ss, sk = stats.describe(output_final[key])

    # Verifier la significativete de la valeur de correlation avec la pvalue

    # puis calculer le seuil de correlation critique en fonction du nombre de degre de liberte du systeme ( test de student si la distrubution est normale) 
    # A finir de coder ! calcul du nombre de degree de liberte du systeme puis seuil de correlation critique   


if __name__ == '__main__':
    main()
    print 'FIN'