• R/O
  • SSH

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

File Info

Rev. 9c25f3895466837d55cf2f13aefe813063fd1df7
Tamanho 31,315 bytes
Hora 2008-01-29 19:22:18
Autor iselllo
Mensagem de Log

I corrected a bug in plot_statistics.py: y_pos2 and z_pos2 were defined
in terms of x_arr instead of y_arr and z_arr respectively!

Content

#! /usr/bin/env python

import scipy as s
import numpy as n
import pylab as p
from rpy import r
import distance_calc as d_calc



n_part=500
density=0.01
#Again, I prefer to give the number of particles and the density as input parameters
# and work out the box size accordingly


ini_config=0
fin_config=1000 #for large data post-processing I need to declare an initial and final
               #configuration I want to read and post-process


by=50 #this tells how many configurations there are in the file I am reading

n_config=(fin_config-ini_config)/by # total number of configurations, which are numbered from 0 to n_config-1

my_selection=s.arange(ini_config,fin_config,by)

box_size=(n_part/density)**(1./3.)
print "the box size is, ", box_size

threshold=1.06 #threshold to consider to particles as directly connected





t_step=1. #here meant as the time-separation between adjacent configurations





fold=0

gyration = 1 #parameter controlling whether I want to calculate the radius of gyration

#time=s.linspace(0.,((n_config-1)*t_step),n_config) #I define the time along the simulation
time=p.load("time.dat")

time=time[my_selection] #I adapt the time to the initial and final configuration



#some physical parameters of the system
T=0.1  #temperature
beta=0.1 #damping coefficient
v_0=0.  #initial particle mean velocity


n_s_ini=2  #element in the time-sequence corresponding
 # to the chosen reference time for calculating the velocity autocorrelation
 #function

s_ini=(n_s_ini)*t_step #I define a specific time I will need for the calculation
# of the autocorrelation function

print 'the time chosen for the velocity correlation function is, ', s_ini
print 'and the corresponding time on the array is, ', time[n_s_ini]



Len=box_size
print 'Len is, ', Len



calculate=1

if (calculate == 1):
	

# 	energy_1000=p.load("tot_energy.dat")

# 	p.plot(energy_1000[:,0], energy_1000[:,1] ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('energy')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('energy evolution')
# 	p.grid(True)
# 	p.savefig('energy_2000_particles.pdf')
# 	p.hold(False)

# 	print "the std of the energy for a system with 2000 particles is, ", s.std(energy_1000[config_ini:n_config,1])
# 	print "the mean of the energy for a system  with 2000 particles is, ", s.mean(energy_1000[config_ini:n_config,1])

# 	print "the ratio of the two is,", s.std(energy_1000[config_ini:n_config,1])\
# 	/s.mean(energy_1000[config_ini:n_config,1])
# 	print "and the theoretical value is, ", s.sqrt(2./3.)*(1./s.sqrt(n_part))

	tot_config=p.load("total_configuration.dat")
#	tot_config=tot_config[(ini_config*n_part*3):(fin_config*n_part*3)]
 #I cut the array with the total number
	                                               #of configurations for large arrays
	
	 
	  
	   
	print "the length of tot_config is, ", len(tot_config)
	tot_config=s.reshape(tot_config,(n_config,3*n_part))

# 	print "tot_config at line 10 is, ", tot_config[10,:]


# 	#Now I save some "raw" data for detailed analysis
# 	i=71
#        	test_arr=tot_config[i,:]
#        	test_arr=s.reshape(test_arr,(n_part,3))
# 	p.save("test_71_raw.dat",test_arr)

	

	#Now I want to "fold" particle positions in order to be sure that they are inside
	# my box.

	#f[:,:,k]=where((Vsum<=V[k+1]) & (Vsum>=V[k]), (V[k+1]-Vsum)/(V[k+1]-V[k]),\
		#f[:,:,k] )
		#f[:,:,k]=where((Vsum<=V[k]) & (Vsum>=V[k-1]),(Vsum-V[k-1])/(V[k]-V[k-1]),\
		#f[:,:,k])



# 	if (fold ==1):
# 		#In the case commented below, the box goes from [-L/2,L/2] but that is proba
# 		#bly not the case in Espresso

# 		#Len=box_size/2.
		 

# 		# and then I do the following

# 		#tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))<Len),\
# 		#s.remainder(tot_config,Len),tot_config)


# 		#tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))>=Len),\
# 		#(s.remainder(tot_config,Len)-Len),tot_config)

# 		#tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))< -Len)\
# 		#(s.remainder(tot_config,-Len)+Len),tot_config)

# 		#tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))>=-Len)\
# 		#s.remainder(tot_config,-Len),tot_config)


# #Now the case when the box is [0,L], so Len is actually the box size

# 		p.save("tot_config_unfolded.dat",tot_config)

# 		Len=box_size
	
# 		tot_config=s.where(tot_config>Len,s.remainder(tot_config,Len),tot_config)
# 		tot_config=s.where(tot_config<0.,(s.remainder(tot_config,-Len)+Len),tot_config)

# 		print "the max of tot_config is", tot_config.max()
# 		print "the min of tot_config is", tot_config.min()
	
# 		p.save("tot_config_my_folding.dat",tot_config)

# 	x_0=tot_config[0,0] #initial x position of all my particles



# 	x_col=s.arange(n_part)*3

# 	print "x_col is, ", x_col


# 	#Now I plot the motion of particle number 1; I follow the three components

# 	p.plot(time,tot_config[:,0]  ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('x position particle 1 ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('x_1 vs time')
# 	p.grid(True)
# 	p.savefig('x_position_particle_1.pdf')
# 	p.hold(False)

# #	p.save("mean_square_disp.dat", var_x_arr)


# 	p.plot(time,tot_config[:,1]  ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('y position particle 1 ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('y_1 vs time')
# 	p.grid(True)
# 	p.savefig('y_position_particle_1.pdf')
# 	p.hold(False)

# #	p.save("mean_square_disp.dat", var_x_arr)


# 	p.plot(time,tot_config[:,2]  ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('z position particle 1 ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('z_1 vs time')
# 	p.grid(True)
# 	p.savefig('z_position_particle_1.pdf')
# 	p.hold(False)

# #	p.save("mean_square_disp.dat", var_x_arr)




	x_arr=s.zeros((n_config,n_part))
	y_arr=s.zeros((n_config,n_part))
       	z_arr=s.zeros((n_config,n_part))

	

        for i in xrange(0,n_part):
		x_arr[:,i]=tot_config[:,3*i]
		y_arr[:,i]=tot_config[:,(3*i+1)]
       		z_arr[:,i]=tot_config[:,(3*i+2)]


#	print "the x coordinates are, ", x_arr

# 	#Now a test: I try to calculate the radius of gyration
# 	#with a particular distance (suggestion by Yannis)
		
#  	mean_x_arr=s.zeros(n_config)

# 	mean_x_arr=x_arr.mean(axis=1)

	
#  	mean_y_arr=s.zeros(n_config)

# 	mean_y_arr=y_arr.mean(axis=1)


#  	mean_z_arr=s.zeros(n_config)

# 	mean_z_arr=z_arr.mean(axis=1)

# 	var_x_arr2=s.zeros(n_config)
# 	var_y_arr2=s.zeros(n_config)
# 	var_y_arr2=s.zeros(n_config)



# 	for i in xrange(0,n_config):
# 		for j in xrange(0,n_part):
# 			var_x_arr2[i]=var_x_arr2[i]+((x_arr[i,j]-mean_x_arr[i])- \
# 				       Len*n.round((x_arr[i,j]-mean_x_arr[i])/Len))**2.
	

# 	var_x_arr2=var_x_arr2/n_part
# 	print 'var_x_arr2 is, ', var_x_arr2
# 	p.save( "test_config_before_manipulation.dat",x_arr[600,:])

	

# 	max_x_dist=s.zeros(n_config)

	
# 	#for i in xrange(0, n_config):
# 		#mean_x_arr[i]=s.mean(x_arr[i,:])
# 		#var_x_arr[i]=s.var(x_arr[i,:])

# 	mean_x_arr[:]=s.mean(x_arr,axis=1)
# 	var_x_arr[:]=s.var(x_arr,axis=1)
# 	max_x_dist[:]=x_arr.max(axis=1)-x_arr.min(axis=1)

# 	print "mean_x_arr is, ", mean_x_arr
# 	print "var_x_arr is, ", var_x_arr


# 	if (fold==1):
		
# 		p.plot(time, max_x_dist ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('x-dimension of aggregate ')
# 		p.title('x-maximum stretch vs time [folded]')
# 		p.grid(True)
# 		p.savefig('max_x_distance_folded.pdf')
# 		p.hold(False)

# 		p.save("max_x_distance_folded.dat", max_x_dist)


# 	elif (fold==0):
# 		p.plot(time, max_x_dist ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('x-dimension of aggregate ')
# 		p.title('x-maximum stretch vs time[unfolded]')
# 		p.grid(True)
# 		p.savefig('max_x_distance_unfolded.pdf')
# 		p.hold(False)

# 		p.save("max_x_distance_unfolded.dat", max_x_dist)
		


	

# 	p.plot(time, var_x_arr ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('mean square displacement ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('VAR(x) vs time')
# 	p.grid(True)
# 	p.savefig('mean_square_disp.pdf')
# 	p.hold(False)

# 	p.save("mean_square_disp.dat", var_x_arr)



# 	p.plot(time, s.sqrt(var_x_arr) ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('mean square displacement ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('std(x) vs time')
# 	p.grid(True)
# 	p.savefig('std_of_x_position.pdf')
# 	p.hold(False)

# 	p.save("std_of_x_position.dat", s.sqrt(var_x_arr))




# 	p.plot(time, mean_x_arr ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('mean x position ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('mean(x) vs time')
# 	p.grid(True)
# 	p.savefig('mean_x_position.pdf')
# 	p.hold(False)

# 	p.save("mean_x_position.dat", mean_x_arr)


# 	#I now calculate the radius of gyration

# 	if (gyration ==1):

# 		mean_y_arr=s.zeros(n_config)
# 		mean_z_arr=s.zeros(n_config)


		

# 		var_y_arr[:]=s.var(y_arr,axis=1)
# 		var_z_arr[:]=s.var(z_arr,axis=1)

# 		mean_y_arr[:]=s.mean(y_arr,axis=1)
# 		mean_z_arr[:]=s.mean(z_arr,axis=1)


# 		r_gyr=s.sqrt(var_x_arr+var_y_arr+var_z_arr)

# 		p.save("r_gyr_my_post_processing.dat",r_gyr)
		
# 		p.plot(time, r_gyr ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('Radius of gyration')
# 		p.title('Radius of gyration vs time')
# 		p.grid(True)
# 		p.savefig('radius of gyration.pdf')
# 		p.hold(False)

# 		#Now I want to calculate the distance of the centre of
# 		#mass of my aggregate from the origin
# 		dist_origin=s.sqrt(mean_x_arr**2.+mean_y_arr**2.+mean_z_arr**2.)

# 		p.plot(time, dist_origin ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('Mean distance from origin')
# 		p.title('Mean distance vs time')
# 		p.grid(True)
# 		p.savefig('mean_distance_from_origin.pdf')
# 		p.hold(False)

		

# #Now some plots of the std's along the other 2 directions
# 		p.plot(time, s.sqrt(var_y_arr) ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean square displacement ')
# 		p.title('std(y) vs time')
# 		p.grid(True)
# 		p.savefig('std_of_y_position.pdf')
# 		p.hold(False)

# 		p.save("std_of_y_position.dat", s.sqrt(var_y_arr))


# 		p.plot(time, s.sqrt(var_z_arr) ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean square displacement ')
# 		p.title('std(z) vs time')
# 		p.grid(True)
# 		p.savefig('std_of_z_position.pdf')
# 		p.hold(False)

# 		p.save("std_of_z_position.dat", s.sqrt(var_z_arr))



# 		p.plot(time, mean_y_arr ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean y position ')
# 		p.title('mean(y) vs time')
# 		p.grid(True)
# 		p.savefig('mean_y_position.pdf')
# 		p.hold(False)

# 		p.save("mean_y_position.dat", mean_y_arr)




# 		p.plot(time, mean_z_arr ,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean z position ')
# 		p.title('mean(z) vs time')
# 		p.grid(True)
# 		p.savefig('mean_z_position.pdf')
# 		p.hold(False)

# 		p.save("mean_z_position.dat", mean_z_arr)


# #Now some comparative plots:


# 		p.plot(time, mean_x_arr,time, mean_y_arr,time, mean_z_arr,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean position')
# 		p.legend(('mean x','mean y','mean z',))
# 		p.title('Evolution mean position')
# 		p.grid(True)
# 		p.savefig('mean_position_comparison.pdf')
# 		p.hold(False)


# 		p.plot(time, var_x_arr,time, var_y_arr,time, var_z_arr,linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('mean square displacement')
# 		p.legend(('var x','var y','var z',))
# 		p.title('Evolution mean square displacement')
# 		p.grid(True)
# 		p.savefig('mean_square_displacement_comparison.pdf')
# 		p.hold(False)



# 		p.plot(time, s.sqrt(var_x_arr),time, s.sqrt(var_y_arr),time,\
# 		       s.sqrt(var_z_arr),linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('std of displacement')
# 		p.legend(('std x','std y','std z',))
# 		p.title('Evolution std of displacement')
# 		p.grid(True)
# 		p.savefig('std_displacement_comparison.pdf')
# 		p.hold(False)





# # 		Now I compare my calculations with the one made
# # 		by espresso:

# 		r_gyr_esp=p.load("rgyr.dat")
		
# 		p.plot(time, r_gyr, r_gyr_esp[:,0], r_gyr_esp[:,1],linewidth=2.)
# 		p.xlabel('time')
# 		p.ylabel('Radius of gyration')
# 		p.legend(('my_postprocessing','espresso'))
# 		p.title('Radius of gyration vs time')
# 		p.grid(True)
# 		p.savefig('radius_of_gyration_comparison.pdf')
# 		p.hold(False)

		



# # Now I do pretty much the same thing with the velocity

# 	tot_config_vel=p.load("total_configuration_vel.dat")

	
# 	if (gyration ==1):
# 		r_gyr=s.zeros(n_config)
# 		y_arr=s.zeros((n_config,n_part))
# 		z_arr=s.zeros((n_config,n_part))

# 		var_y_arr=s.zeros(n_config)
# 		var_z_arr=s.zeros(n_config)
		
# 		for i in xrange(0,n_part):
# 			y_arr[:,i]=tot_config[:,(3*i+1)]
# 			z_arr[:,i]=tot_config[:,(3*i+2)]

# 		var_y_arr[:]=s.var(y_arr,axis=1)
# 		var_z_arr[:]=s.var(z_arr,axis=1)
	 
	  
	   
# 	print "the length of tot_config is, ", len(tot_config)
# 	tot_config_vel=s.reshape(tot_config_vel,(n_config,3*n_part))

# 	#print "tot_config at line 10 is, ", tot_config[10,:]


# 	v_0=tot_config_vel[0,0] #initial x position of all my particles
# 	v_arr=s.zeros((n_config,n_part))
# 	print 'OK creating v_arr'
# 	v_col=s.arange(n_part)*3

# 	#print "x_col is, ", x_col

# 	for i in xrange(0,n_part):
# 		v_arr[:,i]=tot_config_vel[:,3*i]

# 	mean_v_arr=s.zeros(n_config)
# 	var_v_arr=s.zeros(n_config)

# #	for i in xrange(0, n_config):
# 	#	mean_v_arr[i]=s.mean(v_arr[i,:])
# 		#var_v_arr[i]=s.var(v_arr[i,:])

# 	mean_v_arr[:]=s.mean(v_arr,axis=1)
# 	var_v_arr[:]=s.var(v_arr,axis=1)




# 	#print "mean_v_arr is, ", mean_v_arr
# 	#print "var_v_arr is, ", var_v_arr

# 	#time=s.linspace(0.,(n_config*t_step),n_config)

# 	p.plot(time, var_v_arr ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('velocity variance')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('VAR(v) vs time')
# 	p.grid(True)
# 	p.savefig('velocity_variance.pdf')
# 	p.hold(False)

# 	p.save("velocity_variance.dat", var_v_arr)

# 	#Now I want to calculate the autocorrelation function.
# 	#First, I need to fix an intial time. I choose something which is well past the
# 	#ballistic regime
	


# 	#Now I start calculating the velocity autocorrelation function
	
# 	vel_autocor=s.zeros(n_config)

	
	
#        	for i in xrange(0, n_config):
# 		vel_autocor[i]=s.mean(v_arr[i]*v_arr[n_s_ini])


# 	p.plot(time, vel_autocor ,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('<v(s)v(t)> ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('Velocity correlation')
# 	p.grid(True)
# 	p.savefig('velocity_correlation_numerics.pdf')
# 	p.hold(False)


# 	p.save("velocity_autocorrelation.dat", vel_autocor)






# 	#Now I try reconstructing the structure of the aggregate.

# 	#first a test case

# 	my_conf=tot_config[919,:]

# 	print 'my_conf is, ', my_conf


# 	#Now I want to reconstruct the structure of the aggregate in 3D
# 	# I choose particle zero as a reference

# 	x_test=s.zeros(3)
# 	y_test=s.zeros(3)
# 	z_test=s.zeros(3)
	
# 	for i in xrange(0,3):
# 		x_test[i]=my_conf[3*i]
# 		y_test[i]=my_conf[3*i+1]
# 		z_test[i]=my_conf[3*i+2]


# 	print "x_test is, ", x_test
# 	print "y_test is, ", y_test
# 	print "z_test is, ", z_test

# 	#OK reading the files

	

# 	pos_0=s.zeros(3) #I initialized the positions of the three particles
# 	pos_1=s.zeros(3)
# 	pos_2=s.zeros(3)

# 	pos_1[0]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# 	pos_2[0]=r_ij[1]


# 	#Now I do the same for the y coord!

# 	count_int=0
	
#  	for i in xrange(0,(n_part-1)):
#  		for j in xrange((i+1),n_part):
# 			r_ij[count_int]=y_test[i]-y_test[j]
# 			r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len)
# 			r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# 			print 'i and j are, ', (i+1), (j+1)
			
# 			count_int=count_int+1

# 	print 'r_ij is, ', r_ij


# 	pos_1[1]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# 	pos_2[1]=r_ij[1]
	
	
			
# 	#Now I do the same for the z coord!

# 	count_int=0
	
#  	for i in xrange(0,(n_part-1)):
#  		for j in xrange((i+1),n_part):
# 			r_ij[count_int]=z_test[i]-z_test[j]
# 			r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len)
# 			r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# 			print 'i and j are, ', (i+1), (j+1)
			
# 			count_int=count_int+1

# 	print 'r_ij is, ', r_ij


# 	pos_1[2]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# 	pos_2[2]=r_ij[1]

# 	print 'pos_0 is,', pos_0
# 	print 'pos_1 is,', pos_1
# 	print 'pos_2 is,', pos_2
	
# 	#now I can calculate the radius of gyration!

# 	x_test[0]=pos_0[0]
# 	x_test[1]=pos_1[0]
# 	x_test[2]=pos_2[0]


# 	y_test[0]=pos_0[1]
# 	y_test[1]=pos_1[1]
# 	y_test[2]=pos_2[1]


# 	z_test[0]=pos_0[2]
# 	z_test[1]=pos_1[2]
# 	z_test[2]=pos_2[2]

	
# 	R_g=s.sqrt(s.var(x_test)+s.var(y_test)+s.var(z_test))

# 	print "the correct radius of gyration is, ", R_g

#############################################################
#############################################################
        #Now I start counting the number of aggregates in each saved configuration
	#First I re-build the configuration of the system with the "correct" x_arr,y_arr,z_arr

	# I turned the following bit into a comment since I am using the original coord as
	#returned by Espresso to start with



	
# 	#I re-use the tot_config array for this purpose

# 	for i in xrange(0,n_part):
# 		tot_config[:,3*i]=x_arr[:,i]
# 		tot_config[:,(3*i+1)]=y_arr[:,i]
# 		tot_config[:,(3*i+2)]=z_arr[:,i]

	#p.save("test_calculating_graph.dat",tot_config[71:73,:])

        #Now a function to calculate the radius of gyration
        def calc_radius(x_arr,y_arr,z_arr,Len):
            #here x_arr is one-dimensional corresponding to a single configuration
            r_0j=s.zeros((len(x_arr)-1))
            for j in xrange(1,len(x_arr)): #so, particle zero is now the reference particle
                r_0j[j-1]=x_arr[0]-x_arr[j]
                r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
                #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
                #print 'i and j are, ', (i+1), (j+1)


            #Now I re-define the x_arr in order to be able to take tha variance correctly
            x_arr[0]=0.
            x_arr[1:n_part]=r_0j

            #var_x_arr[:]=s.var(r_0j, axis=1)
            var_x_arr=s.var(x_arr)

            for j in xrange(1,len(y_arr)): #so, particle zero is now the reference particle
                r_0j[j-1]=y_arr[0]-y_arr[j]
                r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
                #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
                #print 'i and j are, ', (i+1), (j+1)


            #Now I re-define the x_arr in order to be able to take tha variance correctly
            y_arr[0]=0.
            y_arr[1:n_part]=r_0j

            #var_x_arr[:]=s.var(r_0j, axis=1)
            var_y_arr=s.var(y_arr)



            for j in xrange(1,len(z_arr)): #so, particle zero is now the reference particle
                r_0j[j-1]=z_arr[0]-z_arr[j]
                r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
                #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
                #print 'i and j are, ', (i+1), (j+1)


            #Now I re-define the x_arr in order to be able to take tha variance correctly
            z_arr[0]=0.
            z_arr[1:n_part]=r_0j

            #var_x_arr[:]=s.var(r_0j, axis=1)
            var_z_arr=s.var(z_arr)

            radius=s.sqrt(var_x_arr+var_y_arr+var_z_arr)
            return radius






	#Now I try loading the R script
	
        r.source("cluster_functions.R")

	#I now calculate the number of clusters in each configuration

	n_clusters=s.zeros(n_config)
#	min_dist=s.zeros(n_config)
	dist_mat=s.zeros((n_part,n_part))
	for i in xrange(0,n_config):
		test_arr=tot_config[i,:]
		test_arr=s.reshape(test_arr,(n_part,3))
# 		if (i==14):
# 			p.save("test_14.dat",test_arr)
		#dist_mat=r.distance(test_arr)
		x_pos=x_arr[i,:]
		y_pos=y_arr[i,:]
		z_pos=z_arr[i,:]
		dist_mat=d_calc.distance_calc(x_pos,y_pos,z_pos, box_size)
# 		if (i==71):
# 			p.save("distance_matrix_71.dat",dist_mat)
# 			p.save("x_pos_71.dat",x_pos)


		
                clust_struc= (r.mycluster2(dist_mat,threshold)) #a cumbersome
		    #way to make sure I have an array even when clust_struct is a single
		    #number (i.e. I have only a cluster)
#                print'clust_struct is, ', clust_struc
# 		n_clusters[i]=r.mycluster(dist_mat,threshold)


                n_clusters[i]=p.load("nc_temp.dat")
#		print 'the shape of cluster_struct is, ',s.shape(clust_struc)
                #dist_mat=s.where(dist_mat==0.,1000.,dist_mat)
#		min_dist[i]=(s.ravel(dist_mat)).min()


                #print 'The cluster distribution is, ', clust_struc[1:,0]
                #save the cluster size
		cluster_name="cluster_dist%05d"%my_selection[i]
                my_cluster=p.load("cluster_dist_temp.dat")
		p.save(cluster_name,my_cluster)

                p.hist(my_cluster)
                cluster_name="hist_number_cluster%05d"%my_selection[i]
                p.savefig(cluster_name)
                p.hold(False)

                #Now I re-organize the particles in my configuration
                #by putting together those which are in the same
                #cluster

                part_in_clust=s.argsort(clust_struc)
                r_gyr_dist=s.zeros(len(my_cluster)) #this will contain the distribution of
                #the calculated radia of gyration

                my_sum=s.cumsum(my_cluster)
                f=s.arange(1) #simply 0 but as an array!
                my_lim=s.concatenate((f,my_sum))
                for m in xrange(0,len(my_cluster)):
                    x_pos2=x_arr[i,part_in_clust[my_lim[m]:my_lim[m+1]]]
                    y_pos2=y_arr[i,part_in_clust[my_lim[m]:my_lim[m+1]]]
                    z_pos2=z_arr[i,part_in_clust[my_lim[m]:my_lim[m+1]]]
                    r_gyr_dist[m]=calc_radius(x_pos2,y_pos2,z_pos2,box_size)


		cluster_name="R_gyr_dist%05d"%my_selection[i]
                p.save(cluster_name,r_gyr_dist)
                #print 'the distribution of Rg is, ', r_gyr_dist
                p.hist(r_gyr_dist)
                cluster_name="hist_radius_gyration%05d"%my_selection[i]
                p.savefig(cluster_name)
                p.hold(False)
                
# 	print 'the evolution of the number of clusters is, ', n_clusters

	p.save("number_cluster.dat",n_clusters)
		
	p.plot(time, n_clusters,linewidth=2.)
	p.xlabel('time')
	p.ylabel('number of clusters ')
	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
	p.title('Evolution Number of Clusters')
	p.grid(True)
	p.savefig('number_clusters_vs_time.pdf')
	p.hold(False)
	

# 	p.plot(time, min_dist,linewidth=2.)
# 	p.xlabel('time')
# 	p.ylabel('Minimum distance ')
# 	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# 	p.title('Evolution Mininum Distance')
# 	p.grid(True)
# 	p.savefig('minimum_distance_vs_time.pdf')
# 	p.hold(False)

	


	
#I comment the following lines which are a test I performed in order to test the procedure
#for counting the clusters on a single configuration
# 	test_arr=tot_config[19,:]
# 	print "test_arr now is, ", test_arr
# 	#Now I work out a distance between particles 0 and 2 just to make sure...
# 	d_02=s.sqrt((test_arr[0]-test_arr[6])**2.+(test_arr[1]-test_arr[7])**2.+\
# 		    (test_arr[2]-test_arr[8])**2.)
# 	print "d_02 is, ", d_02
# 	#important!!!! First I have to change the shape of the array!
# 	test_arr=s.reshape(test_arr,(n_part,3))

# 	dist_mat=r.distance(test_arr)

# 	print 'd_02 calculated via dist_mat is [2 results], ',dist_mat[0,2],dist_mat[2,0]

# 	print 'dist_mat is, ', dist_mat
# 	print 'the 1st column of dist_mat is, ', dist_mat[:,0]
# 	print 'and the 1st row is, ', dist_mat[0,:]
# 	print 'the dimensions of dist_mat are, ', shape(dist_mat)
# 	print 'the mean separation distance is, ', s.mean(s.ravel(dist_mat))
# 	threshold=1.2
# 	print "the shape of test_arr is, ", shape(test_arr)
# 	n_cluster=r.mycluster(dist_mat,threshold)
# 	print 'the number of clusters is, ', n_cluster
	




#######################################################################
#######################################################################
#######################################################################
#######################################################################

#Now I deal with the calculation of the radius of gyration


#I change the following part into a comment since I am not sure that is the right way to
# re-organize the "raw" data from Espresso


       	r_gyr=s.zeros(n_config)


	#Now the arrays which will be containing the variance
	var_x_arr=s.zeros(n_config)
       	var_y_arr=s.zeros(n_config)
	var_z_arr=s.zeros(n_config)



# # 	len_int=(n_part*n_part-n_part)/2
# # 	print 'the number of interactions to be counted is, ', len_int

# 	r_0j=s.zeros((n_config,(n_part-1)))

# 	count_int=0

# 	#The following commented loop is what I would need to evaluate all the interaction
# 	#distances among the particles and it goes like n_part**2. I cannot run it as it is
# 	#since the array x_arr are 2D, but it gives an idea of the mechanism.
# 	#If I want to reconstruct the position of the particles wrt particle zero, then I
# 	#need only a loop on n-1 particles for each coord (and each stored configuration)

	
# #  	for i in xrange(0,(n_part-1)):
# #  		for j in xrange((i+1),n_part):
# # 			r_ij[count_int]=x_arr[i]-x_arr[j]
# # 			r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len)
# # 			r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# # 			print 'i and j are, ', (i+1), (j+1)
			
# # 			count_int=count_int+1

# # 	print 'r_ij is, ', r_ij




# #First I am going to calculate the correct variance of x

#  	for i in xrange(0,n_config):
#  		for j in xrange(1,n_part): #so, particle zero is now the reference particle
# 			r_0j[i,j-1]=x_arr[i,0]-x_arr[i,j]
# 			r_0j[i,j-1]=-(r_0j[i,j-1]-Len*n.round(r_0j[i,j-1]/Len))
# 			#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# 			#print 'i and j are, ', (i+1), (j+1)
	

# 	#Now I re-define the x_arr in order to be able to take tha variance correctly
# 	x_arr[:,0]=0.
# 	x_arr[:,1:n_part]=r_0j

# 	#var_x_arr[:]=s.var(r_0j, axis=1)
# 	var_x_arr[:]=s.var(x_arr, axis=1)


# #	p.save("test_config_after_manipulation.dat",r_0j[600,:])

# #Now I am going to do the same along y and z


#  	for i in xrange(0,n_config):
#  		for j in xrange(1,n_part): #so, particle zero is now the reference particle
# 			r_0j[i,j-1]=y_arr[i,0]-y_arr[i,j]
# 			r_0j[i,j-1]=-(r_0j[i,j-1]-Len*n.round(r_0j[i,j-1]/Len))
# 			#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# 			#print 'i and j are, ', (i+1), (j+1)

# 	y_arr[:,0]=0.
# 	y_arr[:,1:n_part]=r_0j

# 	#var_x_arr[:]=s.var(r_0j, axis=1)
# 	var_y_arr[:]=s.var(y_arr, axis=1)


	
			


#  	for i in xrange(0,n_config):
#  		for j in xrange(1,n_part): #so, particle zero is now the reference particle
# 			r_0j[i,j-1]=z_arr[i,0]-z_arr[i,j]
# 			r_0j[i,j-1]=-(r_0j[i,j-1]-Len*n.round(r_0j[i,j-1]/Len))
# 			#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# 			#print 'i and j are, ', (i+1), (j+1)
	

# 	#print "r_0j is, ", r_0j


# 	z_arr[:,0]=0.
# 	z_arr[:,1:n_part]=r_0j

# 	#var_x_arr[:]=s.var(r_0j, axis=1)
# 	var_z_arr[:]=s.var(z_arr, axis=1)

# #	print 'z_arr is,', z_arr


#         r_gyr=s.sqrt(var_x_arr+var_y_arr+var_z_arr)

# 	p.save("r_gyr_correct.dat",r_gyr)
		
#        	p.plot(time, r_gyr ,linewidth=2.)
#        	p.xlabel('time')
#        	p.ylabel('Radius of gyration')
#        	p.title('Radius of gyration vs time')
#        	p.grid(True)
#        	p.savefig('radius_gyration_correct.pdf')
#        	p.hold(False)






#####################################################################################
#####################################################################################
#####################################################################################
#####################################################################################
#### End of the part I had changed into a comment






elif (calculate ==0):
	
	var_x_arr=p.load("mean_square_disp.dat")
	
	#now some analytics of the mean-square displacement
	def x_sq(T,beta,v_0 ,t):
		z=2.*T/(beta)*t+v_0**2./beta**2.*(1.-s.exp(-beta*t))\
		   -T/beta**2.*(3.-4.*s.exp(-beta*t)+s.exp(-2.*beta*t))
		return z
	
#	time=s.linspace(0.,(n_config*t_step),n_config)
	
	
	x_sq_book=x_sq(T,beta,v_0,time)
	
  	p.plot(time, var_x_arr,time, x_sq_book ,linewidth=2.)
	p.xlabel('time')
	p.ylabel('mean square displacement ')
	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
	p.title('VAR(x) vs time')
	p.grid(True)
	p.savefig('mean_square_disp_comparison_analytics.pdf')
	p.hold(False)
	
	#now a similar thing for the velocity
	
	def v_sq(T,beta,v_0,Gamma,t):
		z=Gamma**2./(2.*beta)+(v_0**2.-Gamma**2./(2.*beta))*s.exp(-2.*beta*t)
		return z
	
	Gamma=s.sqrt(2.*beta*T)
	
	#NB: Gamma (NOT the damping parameter which I call beta here!!!) is related to the nosie strength i.e.
	# Gamma**2./(2*beta)=k_BT/m i.e. Gamma=sqrt(2*beta*k_b*T/m), but I work in units where
	#Gamma=sqrt(2*beta*T)
	
	
	v_sq_book=v_sq(T,beta,v_0,Gamma,time)
	
	var_v_arr=p.load("velocity_variance.dat")
	
	
	p.plot(time, var_v_arr,time, v_sq_book ,linewidth=2.)
	p.xlabel('time')
	p.ylabel('velocity variance ')
	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
	p.title('VAR(x) vs time')
	p.grid(True)
	p.savefig('velocity_variance_comparison_analytics.pdf')
	p.hold(False)

	def vel_corr(t,s_ini,v_0,beta,T):
		z=(v_0**2.-T)*s.exp(-beta*(t+s_ini))+T*s.exp(-beta*abs(t-s_ini))
#		z=(v_0**2.-T)*s.exp(-t/10.)
		return z

#	def vel_corr_asym(t,s,beta,T):
	#	z=T*s.exp(-beta*abs(t-s))
		#return z




	print 'the initial time for the velocity correlation is',s_ini

	print 'T is,', T
	print 'beta is, ', beta
	print 'time is, ', time
	print 'v_0 is, ', v_0
	my_vel_corr= vel_corr(time,s_ini,v_0,beta,T)
	print 'my_vel_corr is, ', my_vel_corr



	

	print 'OK before plot'
	
	p.plot(time, my_vel_corr ,linewidth=2.)
	p.xlabel('time')
	p.ylabel('<v(s)v(t)> ')
	#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
	p.title('Velocity correlation')
	p.grid(True)
	p.savefig('velocity_correlation_analytics.pdf')
	p.hold(False)

	#Now I read the velocity autocorrelation function I calculated numerically
	# and try seeing how it compares with the analytics

	vel_autocor_numerics=p.load("velocity_autocorrelation.dat")
	

       	p.plot(time, vel_autocor_numerics,time, my_vel_corr ,linewidth=2.)
	p.xlabel('time')
	p.ylabel('velocity autocorrelation ')
	p.legend(('Numerical Result','Analytical Result'))
	p.title('Velocity Autocorrelation')
	p.grid(True)
	p.savefig('velocity_autocorrelation_comparison_numerics_analytics.pdf')
	p.hold(False)


print "So far so good"