Rev. | 89465b6a444b6333459f9faa9faba64546caaf9e |
---|---|
Tamanho | 5,751 bytes |
Hora | 2008-07-30 00:56:48 |
Autor | iselllo |
Mensagem de Log | I added a code which evaluates the kernel elements from a system
|
#! /usr/bin/env python
import scipy as s
import numpy as n
import pylab as p
A=p.load("k_vec_binary00004.dat")
d = {}
for r in A:
t = tuple(r)
d[t] = d.get(t,0) + 1
# The dict d now has the counts of the unique rows of A.
B = n.array(d.keys()) # The unique rows of A
C = n.array(d.values()) # The counts of the unique rows
def kernel_calc(A):
d = {}
for r in A:
t = tuple(r)
d[t] = d.get(t,0) + 1
# The dict d now has the counts of the unique rows of A.
B = n.array(d.keys()) # The unique rows of A
C = n.array(d.values()) # The counts of the unique rows
return B,C
print "A is, ", A
print "B is, ", B
print "C is, ", C
mycalc=kernel_calc(A)
print "mycalc is, ", mycalc
print "mycalc[0] is, ", mycalc[0]
print "mycalc[1] is, ", mycalc[1]
B2=mycalc[0]
C2=mycalc[1]
print "B is, ", B2
print "C is, ", C2
print "B-B2 is, ", B-B2
print "C-C2 is, ", C-C2
#Now proper evaluation of the kernel!
dist=p.load("cluster_dist00003")
print "dist is,", dist
delta_t=2. #in the complete code, make this automatic from the reading of the file time.dat
#I need to create an n_in_j matrix from the ij matrix (B2!!!!)
dim=s.shape(B2)
n_i=s.zeros(dim[0])
n_j=s.zeros(dim[0])
print "n_i is, ", n_i
for i in xrange(len(B2[:,0])):
print "B2[i,0] is, ", B2[i,0]
n_i[i]=len(s.where(dist==B2[i,0])[0])
n_j[i]=len(s.where(dist==B2[i,1])[0])
print "n_i, nj now are, ", n_i, n_j
kernel_list=C2/(n_i*n_j)
print "kernel_list is, ", kernel_list
#Now I define a function to do the same as above
def kernel_entries(B2, dist, C2):
dim=s.shape(B2)
print "dim is, ", dim
n_i=s.zeros(dim[0])
n_j=s.zeros(dim[0])
for i in xrange(len(B2[:,0])):
n_i[i]=len(s.where(dist==B2[i,0])[0])
n_j[i]=len(s.where(dist==B2[i,1])[0])
kernel_list=C2/(n_i*n_j) #I do not get the whole kernel matrix,
#but only the matrix elements for the collisions which actually took place
return kernel_list
kernel_list2=kernel_entries(B2, dist, C2)
print "kernel_list2 is, ", kernel_list2
print "kernel_list-kernel_list2 is, ", kernel_list-kernel_list2
#Now the "correct" function to evaluate the kernel where n_i and n_j are expressed as concentrations
#i.e. number of clusters with i and j monomers divided by the box volume
def kernel_entries_normalized(B2, dist, C2, box_vol, delta_t):
dim=s.shape(B2)
print "dim is, ", dim
n_i=s.zeros(dim[0])
n_j=s.zeros(dim[0])
for i in xrange(len(B2[:,0])):
n_i[i]=len(s.where(dist==B2[i,0])[0])
n_j[i]=len(s.where(dist==B2[i,1])[0])
n_i=n_i/box_vol
n_j=n_j/box_vol
kernel_list=C2/(n_i*n_j*delta_t*box_vol) #I do not get the whole kernel matrix,
#but only the matrix elements for the collisions which actually took place
return kernel_list
box_vol=5000./0.01
kernel_corr=kernel_entries_normalized(B2, dist, C2, box_vol,delta_t)
print "the kernel elements are, ", kernel_corr
#Now a calculation from the continuum kernel, two fractal dimensions and beta
a_small=0.36
df_small=2.20
a_large=0.24
df_large=1.62
T_0=0.5
r_mon=0.5
v_mono=4./3.*s.pi*r_mon**3.
print "the monodisperse volume is, ", v_mono
x=s.linspace(v_mono, v_mono*5.,5) # volume grid
n_mon=x/v_mono #volume of solids expressed in terms of number of monomers
print "n_mon is, ", n_mon
beta=1. #cluster/monomer 1/tau
k_B=1. #in these units
T_0=0.5 #temperature of the system
m_mon=1. #monomer mass in these units
sigma=1. #monomer diameter
mu=(m_mon*beta)/(3.*s.pi*sigma) # fluid viscosity
threshold=15. # I define the threshold between small and large clusters, to be used where it matters.
small=s.where(n_mon<=threshold)
large=s.where(n_mon>threshold)
def Brow_ker_cont_optim_diffusion_adjusted_and_monomer_beta_fuchs(Vlist):
#monomer volume
#r_mon=0.5 #monomer radius
#v_mon=4./3.*s.pi*r_mon**3.
#v_mono already defined as a global variable
#n_mon=Vlist/v_mono #number of monomers in each aggregate
#print "n_mon is, ", n_mon
Diff=k_B*T_0/(n_mon*m_mon*beta) #vector with the cluster diffusion coefficients
#which just depends on the cluster size.
R_list=s.zeros(len(Vlist)) #I initialize the array which will contain the
#radia of gyration
#threshold=15.
# small=s.where(n_mon<=threshold)
# large=s.where(n_mon>threshold)
# a_small=0.36
# df_small=2.19
# a_large=0.241
# df_large=1.62
R_list[small]=a_small*n_mon[small]**(1./df_small)
R_list[large]=a_large*n_mon[large]**(1./df_large)
# R_list[0]=0.5 #special case for the monomer radius
# m=rho_p*Vlist # this holds in general (i.e. for Vlist !=3.)
## as long as Vlist is the volume of solid
##and not the space occupied by the agglomerate structure
m=Vlist #since rho = 1.
c=(8.*k_B*T_0/(s.pi*m))**0.5
#print 'c is', c
l=8.*Diff/(s.pi*c)
#print 'l is', l
diam_seq=2.*R_list
g=1./(3.*diam_seq*l)*((diam_seq+l)**3.-(diam_seq**2.+l**2.)**1.5)-diam_seq
beta_fuchs=(\
(diam_seq[:,s.newaxis]+diam_seq[s.newaxis,:]) \
/(diam_seq[:,s.newaxis]+diam_seq[s.newaxis,:]\
+2.*(g[:,s.newaxis]**2.+g[s.newaxis,:]**2.)**0.5)\
+ 8.*(Diff[:,s.newaxis]+Diff[s.newaxis,:])/\
((c[:,s.newaxis]**2.+c[s.newaxis,:]**2.)**0.5*\
(diam_seq[:,s.newaxis]+diam_seq[s.newaxis,:]))\
)**(-1.)
## now I have all the bits for the kernel matrix
# kern_mat=Brow_ker_cont_optim(Vlist)*beta
kern_mat=4.*s.pi*(Diff[:,s.newaxis]+Diff[s.newaxis,:])* \
(R_list[:,s.newaxis]+R_list[s.newaxis,:])*beta_fuchs
return kern_mat
kernel_smolu=Brow_ker_cont_optim_diffusion_adjusted_and_monomer_beta_fuchs(x)
print "smoluchowski kernel is, ", kernel_smolu
print "So far so good"