[cleanup] Great purge of obsolete scripts
Some of the purged scripts might be desirable to reimplement using the new API, especially those related to hexagonal lattices.
This commit is contained in:
parent
fc27320988
commit
93118cfc07
|
@ -1,547 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
|
|
||||||
import argparse, re, random, string
|
|
||||||
import subprocess
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
#TODO? použít type=argparse.FileType('r') ?
|
|
||||||
parser.add_argument('--TMatrix', action='store', required=True, help='Path to TMatrix file')
|
|
||||||
parser.add_argument('--griddir', action='store', required=True, help='Path to the directory with precalculated translation operators')
|
|
||||||
#sizepar = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
parser.add_argument('--hexside', action='store', type=float, required=True, help='Lattice hexagon size length')
|
|
||||||
parser.add_argument('--output', action='store', help='Path to output PDF')
|
|
||||||
parser.add_argument('--store_SVD', action='store_true', help='If specified without --SVD_output, it will save the data in a file named as the PDF output, but with .npz extension instead')
|
|
||||||
#parser.add_argument('--SVD_output', action='store', help='Path to output singular value decomposition result')
|
|
||||||
parser.add_argument('--nSV', action='store', metavar='N', type=int, default=1, help='Store and draw N minimun singular values')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--sparse', action='store', type=int, help='Skip frequencies for preview')
|
|
||||||
parser.add_argument('--eVmax', action='store', type=float, help='Skip frequencies above this value')
|
|
||||||
parser.add_argument('--eVmin', action='store', type=float, help='Skip frequencies below this value')
|
|
||||||
parser.add_argument('--kdensity', action='store', type=int, default=66, help='Number of k-points per x-axis segment')
|
|
||||||
parser.add_argument('--lMax', action='store', type=int, help='Override lMax from the TMatrix file')
|
|
||||||
#TODO some more sophisticated x axis definitions
|
|
||||||
parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
popgrp.add_argument('--tr0', dest='ops', action=make_action_sharedlist('tr0', 'ops'))
|
|
||||||
popgrp.add_argument('--tr1', dest='ops', action=make_action_sharedlist('tr1', 'ops'))
|
|
||||||
popgrp.add_argument('--sym', dest='ops', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
popgrp.add_argument('--sym0', dest='ops', action=make_action_sharedlist('sym0', 'ops'))
|
|
||||||
popgrp.add_argument('--sym1', dest='ops', action=make_action_sharedlist('sym1', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult0', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult0', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult1', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult1', 'ops'))
|
|
||||||
popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
popgrp.add_argument('--multl0', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl0', 'ops'))
|
|
||||||
popgrp.add_argument('--multl1', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl1', 'ops'))
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
# TODO enable more flexible per-sublattice specification
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
print(pargs)
|
|
||||||
|
|
||||||
|
|
||||||
translations_dir = pargs.griddir
|
|
||||||
TMatrix_file = pargs.TMatrix
|
|
||||||
pdfout = pargs.output if pargs.output else (''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) + '.pdf')
|
|
||||||
print(pdfout)
|
|
||||||
if(pargs.store_SVD):
|
|
||||||
if re.search('.pdf$', pdfout):
|
|
||||||
svdout = re.sub('.pdf$', r'.npz', pdfout)
|
|
||||||
else:
|
|
||||||
svdout = pdfout + '.npz'
|
|
||||||
else:
|
|
||||||
svdout = None
|
|
||||||
|
|
||||||
|
|
||||||
hexside = pargs.hexside #375e-9
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kdensity = pargs.kdensity
|
|
||||||
minfreq = pargs.eVmin*eV/hbar if pargs.eVmin else None
|
|
||||||
maxfreq = pargs.eVmax*eV/hbar if pargs.eVmax else None
|
|
||||||
skipfreq = pargs.sparse if pargs.sparse else None
|
|
||||||
svn = pargs.nSV
|
|
||||||
|
|
||||||
# TODO multiplier operation definitions and parsing
|
|
||||||
#factor13inc = 10
|
|
||||||
#factor13scat=10
|
|
||||||
|
|
||||||
ops = list()
|
|
||||||
opre = re.compile('(tr|sym|copy|multl|mult)(\d*)')
|
|
||||||
for oparg in pargs.ops:
|
|
||||||
opm = opre.match(oparg[0])
|
|
||||||
if opm:
|
|
||||||
ops.append(((opm.group(2),) if opm.group(2) else (0,1), opm.group(1), oparg[1]))
|
|
||||||
else:
|
|
||||||
raise # should not happen
|
|
||||||
print(ops)
|
|
||||||
|
|
||||||
#ops = (
|
|
||||||
# # co, typ operace (symetrizace / transformace / kopie), specifikace (operace nebo zdroj),
|
|
||||||
# # co: 0, 1, (0,1), (0,), (1,), #NI: 'all'
|
|
||||||
# # typ operace: sym, tr, copy
|
|
||||||
# # specifikace:
|
|
||||||
# # sym, tr: 'σ_z', 'σ_y', 'C2'; sym: 'C3',
|
|
||||||
# # copy: 0, 1 (zdroj)
|
|
||||||
# ((0,1), 'sym', 'σ_z'),
|
|
||||||
# #((0,1), 'sym', 'σ_x'),
|
|
||||||
# #((0,1), 'sym', 'σ_y'),
|
|
||||||
# ((0,1), 'sym', 'C3'),
|
|
||||||
# ((1), 'tr', 'C2'),
|
|
||||||
#
|
|
||||||
#)
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
import time
|
|
||||||
begtime=time.time()
|
|
||||||
|
|
||||||
from matplotlib.path import Path
|
|
||||||
import matplotlib.patches as patches
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, sys, warnings, math
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
from matplotlib.backends.backend_pdf import PdfPages
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
pdf = PdfPages(pdfout)
|
|
||||||
|
|
||||||
# In[3]:
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
|
|
||||||
ž = np.arange(2*nelem)
|
|
||||||
tž = ž // nelem
|
|
||||||
mž = my[ž%nelem]
|
|
||||||
nž = ny[ž%nelem]
|
|
||||||
TEž = ž[(mž+nž+tž) % 2 == 0]
|
|
||||||
TMž = ž[(mž+nž+tž) % 2 == 1]
|
|
||||||
|
|
||||||
č = np.arange(2*2*nelem)
|
|
||||||
žč = č % (2* nelem)
|
|
||||||
tč = tž[žč]
|
|
||||||
mč = mž[žč]
|
|
||||||
nč = nž[žč]
|
|
||||||
TEč = č[(mč+nč+tč) % 2 == 0]
|
|
||||||
TMč = č[(mč+nč+tč) % 2 == 1]
|
|
||||||
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),2,2,nelem,2,nelem)) )
|
|
||||||
|
|
||||||
#TMatrices[:,:,:,:,:,ny==3] *= factor13inc
|
|
||||||
#TMatrices[:,:,:,ny==3,:,:] *= factor13scat
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
targets = (0,1)
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# In[4]:
|
|
||||||
|
|
||||||
om = np.linspace(np.min(freqs_orig), np.max(freqs_orig),100)
|
|
||||||
TMatrix0ip = np.reshape(TMatrices_interp(om)[:,0], (len(om), 2*nelem*2*nelem))
|
|
||||||
f, axa = plt.subplots(2, 2, figsize=(15,15))
|
|
||||||
|
|
||||||
#print(TMatrices.shape)
|
|
||||||
#plt.plot(om, TMatrices[:,0,0,0,0].imag,'r',om, TMatrices[:,0,0,0,0].real,'r--',om, TMatrices[:,0,2,0,2].imag,'b',om, TMatrices[:,0,2,0,2].real,'b--'))
|
|
||||||
|
|
||||||
ax = axa[0,0]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, TMatrix0ip[:,:].imag,'-',om, TMatrix0ip[:,:].real,'--',
|
|
||||||
)
|
|
||||||
ax = axa[0,1]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, abs(TMatrix0ip[:,:]),'-'
|
|
||||||
)
|
|
||||||
ax.set_yscale('log')
|
|
||||||
|
|
||||||
ax = axa[1,1]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, np.unwrap(np.angle(TMatrix0ip[:,:]),axis=0),'-'
|
|
||||||
)
|
|
||||||
|
|
||||||
ax = axa[1,0]
|
|
||||||
ax.text(0.5,0.5,str(pargs).replace(',',',\n'),horizontalalignment='center',verticalalignment='center',transform=ax.transAxes)
|
|
||||||
pdf.savefig(f)
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
#kdensity = 66 #defined from cl arguments
|
|
||||||
bz_0 = np.array((0,0,0.,))
|
|
||||||
bz_K1 = np.array((1.,0,0))*4*np.pi/3/hexside/s3
|
|
||||||
bz_K2 = np.array((1./2.,s3/2,0))*4*np.pi/3/hexside/s3
|
|
||||||
bz_M = np.array((3./4, s3/4,0))*4*np.pi/3/hexside/s3
|
|
||||||
k0Mlist = bz_0 + (bz_M-bz_0) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kMK1list = bz_M + (bz_K1-bz_M) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kK10list = bz_K1 + (bz_0-bz_K1) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
k0K2list = bz_0 + (bz_K2-bz_0) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kK2Mlist = bz_K2 + (bz_M-bz_K2) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
B1 = 2* bz_K1 - bz_K2
|
|
||||||
B2 = 2* bz_K2 - bz_K1
|
|
||||||
klist = np.concatenate((k0Mlist,kMK1list,kK10list,k0K2list,kK2Mlist), axis=0)
|
|
||||||
kxmaplist = np.concatenate((np.array([0]),np.cumsum(np.linalg.norm(np.diff(klist, axis=0), axis=-1))))
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
n2id = np.identity(2*nelem)
|
|
||||||
n2id.shape = (2,nelem,2,nelem)
|
|
||||||
extlistlist = list()
|
|
||||||
leftmatrixlistlist = list()
|
|
||||||
minsvTElistlist=list()
|
|
||||||
minsvTMlistlist=list()
|
|
||||||
if svdout:
|
|
||||||
svUfullTElistlist = list()
|
|
||||||
svVfullTElistlist = list()
|
|
||||||
svSfullTElistlist = list()
|
|
||||||
svUfullTMlistlist = list()
|
|
||||||
svVfullTMlistlist = list()
|
|
||||||
svSfullTMlistlist = list()
|
|
||||||
|
|
||||||
nan = float('nan')
|
|
||||||
omegalist = list()
|
|
||||||
filecount = 0
|
|
||||||
for trfile in os.scandir(translations_dir):
|
|
||||||
filecount += 1
|
|
||||||
if (skipfreq and filecount % skipfreq):
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
npz = np.load(trfile.path, mmap_mode='r')
|
|
||||||
k_0 = npz['precalc_params'][()]['k_hexside'] / hexside
|
|
||||||
omega = k_0 * c / math.sqrt(epsilon_b)
|
|
||||||
if((minfreq and omega < minfreq) or (maxfreq and omega > maxfreq)):
|
|
||||||
continue
|
|
||||||
except:
|
|
||||||
print ("Unexpected error, trying to continue with another file:", sys.exc_info()[0])
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
tdic = qpms.hexlattice_precalc_AB_loadunwrap(trfile.path, return_points=True)
|
|
||||||
except:
|
|
||||||
print ("Unexpected error, trying to continue with another file:", sys.exc_info()[0])
|
|
||||||
continue
|
|
||||||
k_0 = tdic['k_hexside'] / hexside
|
|
||||||
omega = k_0 * c / math.sqrt(epsilon_b)
|
|
||||||
omegalist.append(omega)
|
|
||||||
print(filecount, omega/eV*hbar)
|
|
||||||
sys.stdout.flush()
|
|
||||||
a_self = tdic['a_self'][:,:nelem,:nelem]
|
|
||||||
b_self = tdic['b_self'][:,:nelem,:nelem]
|
|
||||||
a_u2d = tdic['a_u2d'][:,:nelem,:nelem]
|
|
||||||
b_u2d = tdic['b_u2d'][:,:nelem,:nelem]
|
|
||||||
a_d2u = tdic['a_d2u'][:,:nelem,:nelem]
|
|
||||||
b_d2u = tdic['b_d2u'][:,:nelem,:nelem]
|
|
||||||
unitcell_translations = tdic['self_tr']*hexside*s3
|
|
||||||
u2d_translations = tdic['u2d_tr']*hexside*s3
|
|
||||||
d2u_translations = tdic['d2u_tr']*hexside*s3
|
|
||||||
if gaussianSigma:
|
|
||||||
unitcell_envelope = np.exp(-np.sum(tdic['self_tr']**2,axis=-1)/(2*gaussianSigma**2))
|
|
||||||
u2d_envelope = np.exp(-np.sum(tdic['u2d_tr']**2,axis=-1)/(2*gaussianSigma**2))
|
|
||||||
d2u_envelope = np.exp(-np.sum(tdic['d2u_tr']**2,axis=-1)/(2*gaussianSigma**2))
|
|
||||||
|
|
||||||
|
|
||||||
TMatrices_om = TMatrices_interp(omega)
|
|
||||||
if svdout:
|
|
||||||
svUfullTElist = np.full((klist.shape[0], 2*nelem, 2*nelem), np.nan, dtype=complex)
|
|
||||||
svVfullTElist = np.full((klist.shape[0], 2*nelem, 2*nelem), np.nan, dtype=complex)
|
|
||||||
svSfullTElist = np.full((klist.shape[0], 2*nelem), np.nan, dtype=complex)
|
|
||||||
svUfullTMlist = np.full((klist.shape[0], 2*nelem, 2*nelem), np.nan, dtype=complex)
|
|
||||||
svVfullTMlist = np.full((klist.shape[0], 2*nelem, 2*nelem), np.nan, dtype=complex)
|
|
||||||
svSfullTMlist = np.full((klist.shape[0], 2*nelem), np.nan, dtype=complex)
|
|
||||||
|
|
||||||
|
|
||||||
minsvTElist = np.full((klist.shape[0], svn),np.nan)
|
|
||||||
minsvTMlist = np.full((klist.shape[0], svn),np.nan)
|
|
||||||
leftmatrixlist = np.full((klist.shape[0],2,2,nelem,2,2,nelem),np.nan,dtype=complex)
|
|
||||||
isNaNlist = np.zeros((klist.shape[0]), dtype=bool)
|
|
||||||
|
|
||||||
# sem nějaká rozumná smyčka
|
|
||||||
for ki in range(klist.shape[0]):
|
|
||||||
k = klist[ki]
|
|
||||||
if (k_0*k_0 - k[0]*k[0] - k[1]*k[1] < 0):
|
|
||||||
isNaNlist[ki] = True
|
|
||||||
continue
|
|
||||||
|
|
||||||
phases_self = np.exp(1j*np.tensordot(k,unitcell_translations,axes=(0,-1)))
|
|
||||||
phases_u2d = np.exp(1j*np.tensordot(k,u2d_translations,axes=(0,-1)))
|
|
||||||
phases_d2u = np.exp(1j*np.tensordot(k,d2u_translations,axes=(0,-1)))
|
|
||||||
if gaussianSigma:
|
|
||||||
phases_self *= unitcell_envelope
|
|
||||||
phases_u2d *= u2d_envelope
|
|
||||||
phases_d2u *= d2u_envelope
|
|
||||||
leftmatrix = np.zeros((2,2,nelem, 2,2,nelem), dtype=complex)
|
|
||||||
|
|
||||||
leftmatrix[0,0,:,0,0,:] = np.tensordot(a_self,phases_self, axes=(0,-1)) # u2u, E2E
|
|
||||||
leftmatrix[1,0,:,1,0,:] = leftmatrix[0,0,:,0,0,:] # d2d, E2E
|
|
||||||
leftmatrix[0,1,:,0,1,:] = leftmatrix[0,0,:,0,0,:] # u2u, M2M
|
|
||||||
leftmatrix[1,1,:,1,1,:] = leftmatrix[0,0,:,0,0,:] # d2d, M2M
|
|
||||||
leftmatrix[0,0,:,0,1,:] = np.tensordot(b_self,phases_self, axes=(0,-1)) # u2u, M2E
|
|
||||||
leftmatrix[0,1,:,0,0,:] = leftmatrix[0,0,:,0,1,:] # u2u, E2M
|
|
||||||
leftmatrix[1,1,:,1,0,:] = leftmatrix[0,0,:,0,1,:] # d2d, E2M
|
|
||||||
leftmatrix[1,0,:,1,1,:] = leftmatrix[0,0,:,0,1,:] # d2d, M2E
|
|
||||||
leftmatrix[0,0,:,1,0,:] = np.tensordot(a_d2u, phases_d2u,axes=(0,-1)) #d2u,E2E
|
|
||||||
leftmatrix[0,1,:,1,1,:] = leftmatrix[0,0,:,1,0,:] #d2u, M2M
|
|
||||||
leftmatrix[1,0,:,0,0,:] = np.tensordot(a_u2d, phases_u2d,axes=(0,-1)) #u2d,E2E
|
|
||||||
leftmatrix[1,1,:,0,1,:] = leftmatrix[1,0,:,0,0,:] #u2d, M2M
|
|
||||||
leftmatrix[0,0,:,1,1,:] = np.tensordot(b_d2u, phases_d2u,axes=(0,-1)) #d2u,M2E
|
|
||||||
leftmatrix[0,1,:,1,0,:] = leftmatrix[0,0,:,1,1,:] #d2u, E2M
|
|
||||||
leftmatrix[1,0,:,0,1,:] = np.tensordot(b_u2d, phases_u2d,axes=(0,-1)) #u2d,M2E
|
|
||||||
leftmatrix[1,1,:,0,0,:] = leftmatrix[1,0,:,0,1,:] #u2d, E2M
|
|
||||||
#leftmatrix is now the translation matrix T
|
|
||||||
for j in range(2):
|
|
||||||
leftmatrix[j] = -np.tensordot(TMatrices_om[j], leftmatrix[j], axes=([-2,-1],[0,1]))
|
|
||||||
# at this point, jth row of leftmatrix is that of -MT
|
|
||||||
leftmatrix[j,:,:,j,:,:] += n2id
|
|
||||||
#now we are done, 1-MT
|
|
||||||
|
|
||||||
leftmatrixlist[ki] = leftmatrix
|
|
||||||
|
|
||||||
|
|
||||||
nnlist = np.logical_not(isNaNlist)
|
|
||||||
leftmatrixlist_s = np.reshape(leftmatrixlist,(klist.shape[0], 2*2*nelem,2*2*nelem))[nnlist]
|
|
||||||
leftmatrixlist_TE = leftmatrixlist_s[np.ix_(np.arange(leftmatrixlist_s.shape[0]),TEč,TEč)]
|
|
||||||
leftmatrixlist_TM = leftmatrixlist_s[np.ix_(np.arange(leftmatrixlist_s.shape[0]),TMč,TMč)]
|
|
||||||
#svarr = np.linalg.svd(leftmatrixlist_TE, compute_uv=False)
|
|
||||||
#argsortlist = np.argsort(svarr, axis=-1)[...,:svn]
|
|
||||||
#minsvTElist[nnlist] = svarr[...,argsortlist]
|
|
||||||
#minsvTElist[nnlist] = np.amin(np.linalg.svd(leftmatrixlist_TE, compute_uv=False), axis=-1)
|
|
||||||
if svdout:
|
|
||||||
svUfullTElist[nnlist], svSfullTElist[nnlist], svVfullTElist[nnlist] = np.linalg.svd(leftmatrixlist_TE, compute_uv=True)
|
|
||||||
svUfullTMlist[nnlist], svSfullTMlist[nnlist], svVfullTMlist[nnlist] = np.linalg.svd(leftmatrixlist_TM, compute_uv=True)
|
|
||||||
svUfullTElistlist.append(svUfullTElist)
|
|
||||||
svVfullTElistlist.append(svVfullTElist)
|
|
||||||
svSfullTElistlist.append(svSfullTElist)
|
|
||||||
svUfullTMlistlist.append(svUfullTMlist)
|
|
||||||
svVfullTMlistlist.append(svVfullTMlist)
|
|
||||||
svSfullTMlistlist.append(svSfullTMlist)
|
|
||||||
minsvTElist[nnlist] = np.linalg.svd(leftmatrixlist_TE, compute_uv=False)[...,-svn:]
|
|
||||||
#svarr = np.linalg.svd(leftmatrixlist_TM, compute_uv=False)
|
|
||||||
#argsortlist = np.argsort(svarr, axis=-1)[...,:svn]
|
|
||||||
#minsvTMlist[nnlist] = svarr[...,argsortlist]
|
|
||||||
#minsvTMlist[nnlist] = np.amin(np.linalg.svd(leftmatrixlist_TM, compute_uv=False), axis=-1)
|
|
||||||
minsvTMlist[nnlist] = np.linalg.svd(leftmatrixlist_TM, compute_uv=False)[...,-svn:]
|
|
||||||
minsvTMlistlist.append(minsvTMlist)
|
|
||||||
minsvTElistlist.append(minsvTElist)
|
|
||||||
|
|
||||||
minsvTElistarr = np.array(minsvTElistlist)
|
|
||||||
minsvTMlistarr = np.array(minsvTMlistlist)
|
|
||||||
del minsvTElistlist, minsvTMlistlist
|
|
||||||
if svdout:
|
|
||||||
svUfullTElistarr = np.array(svUfullTElistlist)
|
|
||||||
svVfullTElistarr = np.array(svVfullTElistlist)
|
|
||||||
svSfullTElistarr = np.array(svSfullTElistlist)
|
|
||||||
del svUfullTElistlist, svVfullTElistlist, svSfullTElistlist
|
|
||||||
svUfullTMlistarr = np.array(svUfullTMlistlist)
|
|
||||||
svVfullTMlistarr = np.array(svVfullTMlistlist)
|
|
||||||
svSfullTMlistarr = np.array(svSfullTMlistlist)
|
|
||||||
del svUfullTMlistlist, svVfullTMlistlist, svSfullTMlistlist
|
|
||||||
omegalist = np.array(omegalist)
|
|
||||||
# order to make the scatter plots "nice"
|
|
||||||
omegaorder = np.argsort(omegalist)
|
|
||||||
omegalist = omegalist[omegaorder]
|
|
||||||
minsvTElistarr = minsvTElistarr[omegaorder]
|
|
||||||
minsvTMlistarr = minsvTMlistarr[omegaorder]
|
|
||||||
if svdout:
|
|
||||||
svUfullTElistarr = svUfullTElistarr[omegaorder]
|
|
||||||
svVfullTElistarr = svVfullTElistarr[omegaorder]
|
|
||||||
svSfullTElistarr = svSfullTElistarr[omegaorder]
|
|
||||||
svUfullTMlistarr = svUfullTMlistarr[omegaorder]
|
|
||||||
svVfullTMlistarr = svVfullTMlistarr[omegaorder]
|
|
||||||
svSfullTMlistarr = svSfullTMlistarr[omegaorder]
|
|
||||||
np.savez(svdout, omega = omegalist, klist = klist, bzpoints = np.array([bz_0, bz_K1, bz_K2, bz_M, B1, B2]),
|
|
||||||
uTE = svUfullTElistarr,
|
|
||||||
vTE = svVfullTElistarr,
|
|
||||||
sTE = svSfullTElistarr,
|
|
||||||
uTM = svUfullTMlistarr,
|
|
||||||
vTM = svVfullTMlistarr,
|
|
||||||
sTM = svSfullTMlistarr,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
omlist = np.broadcast_to(omegalist[:,nx], minsvTElistarr[...,0].shape)
|
|
||||||
kxmlarr = np.broadcast_to(kxmaplist[nx,:], minsvTElistarr[...,0].shape)
|
|
||||||
klist = np.concatenate((k0Mlist,kMK1list,kK10list,k0K2list,kK2Mlist), axis=0)
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
for minN in reversed(range(svn)):
|
|
||||||
f, ax = plt.subplots(1, figsize=(20,15))
|
|
||||||
sc = ax.scatter(kxmlarr, omlist/eV*hbar, c = np.sqrt(minsvTMlistarr[...,minN]), s =40, lw=0)
|
|
||||||
ax.plot(kxmaplist, np.linalg.norm(klist,axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1-B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1-2*B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
# kxmaplist, np.linalg.norm(klist+2*B2-B1, axis=-1)*cdn, '-',
|
|
||||||
# kxmaplist, np.linalg.norm(klist+2*B1-B2, axis=-1)*cdn, '-',
|
|
||||||
)
|
|
||||||
ax.set_xlim([np.min(kxmlarr),np.max(kxmlarr)])
|
|
||||||
#ax.set_ylim([2.15,2.30])
|
|
||||||
ax.set_ylim([np.min(omlist/eV*hbar),np.max(omlist/eV*hbar)])
|
|
||||||
ax.set_xticks([0, kxmaplist[len(k0Mlist)-1], kxmaplist[len(k0Mlist)+len(kMK1list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)+len(kK2Mlist)-1]])
|
|
||||||
ax.set_xticklabels(['Γ', 'M', 'K', 'Γ', 'K\'','M'])
|
|
||||||
f.colorbar(sc)
|
|
||||||
|
|
||||||
pdf.savefig(f)
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
f, ax = plt.subplots(1, figsize=(20,15))
|
|
||||||
sc = ax.scatter(kxmlarr, omlist/eV*hbar, c = np.sqrt(minsvTElistarr[...,minN]), s =40, lw=0)
|
|
||||||
ax.plot(kxmaplist, np.linalg.norm(klist,axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist+B2+B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B2-B1, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1-B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
kxmaplist, np.linalg.norm(klist-2*B1-2*B2, axis=-1)*cdn/eV*hbar, '-',
|
|
||||||
# kxmaplist, np.linalg.norm(klist+2*B2-B1, axis=-1)*cdn, '-',
|
|
||||||
# kxmaplist, np.linalg.norm(klist+2*B1-B2, axis=-1)*cdn, '-',
|
|
||||||
)
|
|
||||||
ax.set_xlim([np.min(kxmlarr),np.max(kxmlarr)])
|
|
||||||
#ax.set_ylim([2.15,2.30])
|
|
||||||
ax.set_ylim([np.min(omlist/eV*hbar),np.max(omlist/eV*hbar)])
|
|
||||||
ax.set_xticks([0, kxmaplist[len(k0Mlist)-1], kxmaplist[len(k0Mlist)+len(kMK1list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)+len(kK2Mlist)-1]])
|
|
||||||
ax.set_xticklabels(['Γ', 'M', 'K', 'Γ', 'K\'','M'])
|
|
||||||
f.colorbar(sc)
|
|
||||||
|
|
||||||
pdf.savefig(f)
|
|
||||||
pdf.close()
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
subprocess.run(['scp', pdfout, scp_dest])
|
|
||||||
if svdout:
|
|
||||||
subprocess.run(['scp', svdout, scp_dest])
|
|
||||||
|
|
||||||
print(time.strftime("%H.%M:%S",time.gmtime(time.time()-begtime)))
|
|
|
@ -1,403 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
|
|
||||||
import argparse, re, random, string
|
|
||||||
import subprocess
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
#TODO? použít type=argparse.FileType('r') ?
|
|
||||||
parser.add_argument('--TMatrix', action='store', required=True, help='Path to TMatrix file')
|
|
||||||
#parser.add_argument('--griddir', action='store', required=True, help='Path to the directory with precalculated translation operators')
|
|
||||||
parser.add_argument('--output_prefix', action='store', required=True, help='Prefix to the pdf and/or npz output (will be appended frequency and hexside)')
|
|
||||||
#sizepar = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
parser.add_argument('--hexside', action='store', type=float, required=True, help='Lattice hexagon size length')
|
|
||||||
parser.add_argument('--output', action='store', help='Path to output PDF')
|
|
||||||
parser.add_argument('--store_SVD', action='store_false', help='If specified without --SVD_output, it will save the data in a file named as the PDF output, but with .npz extension instead')
|
|
||||||
parser.add_argument('--plot_TMatrix', action='store_true', help='Visualise TMatrix on the first page of the output')
|
|
||||||
#parser.add_argument('--SVD_output', action='store', help='Path to output singular value decomposition result')
|
|
||||||
parser.add_argument('--nSV', action='store', metavar='N', type=int, default=1, help='Store and draw N minimun singular values')
|
|
||||||
parser.add_argument('--maxlayer', action='store', type=int, default=100, help='How far to sum the lattice points to obtain the dispersion')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--eVfreq', action='store', required=True, type=float, help='Frequency in eV')
|
|
||||||
parser.add_argument('--kdensity', action='store', type=int, default=33, help='Number of k-points per x-axis segment')
|
|
||||||
parser.add_argument('--lMax', action='store', type=int, help='Override lMax from the TMatrix file')
|
|
||||||
#TODO some more sophisticated x axis definitions
|
|
||||||
parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
popgrp.add_argument('--tr0', dest='ops', action=make_action_sharedlist('tr0', 'ops'))
|
|
||||||
popgrp.add_argument('--tr1', dest='ops', action=make_action_sharedlist('tr1', 'ops'))
|
|
||||||
popgrp.add_argument('--sym', dest='ops', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
popgrp.add_argument('--sym0', dest='ops', action=make_action_sharedlist('sym0', 'ops'))
|
|
||||||
popgrp.add_argument('--sym1', dest='ops', action=make_action_sharedlist('sym1', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult0', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult0', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult1', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult1', 'ops'))
|
|
||||||
popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
popgrp.add_argument('--multl0', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl0', 'ops'))
|
|
||||||
popgrp.add_argument('--multl1', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl1', 'ops'))
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
# TODO enable more flexible per-sublattice specification
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
print(pargs)
|
|
||||||
|
|
||||||
maxlayer=pargs.maxlayer
|
|
||||||
hexside=pargs.hexside
|
|
||||||
eVfreq = pargs.eVfreq
|
|
||||||
freq = eVfreq*eV/hbar
|
|
||||||
|
|
||||||
TMatrix_file = pargs.TMatrix
|
|
||||||
pdfout = pargs.output if pargs.output else (
|
|
||||||
'%s_%dnm_%.4f.pdf' % (pargs.output_prefix,hexside/1e-9,eVfreq) if pargs.output_prefix else
|
|
||||||
(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) + '.pdf'))
|
|
||||||
print(pdfout)
|
|
||||||
if(pargs.store_SVD):
|
|
||||||
if re.search('.pdf$', pdfout):
|
|
||||||
svdout = re.sub('.pdf$', r'.npz', pdfout)
|
|
||||||
else:
|
|
||||||
svdout = pdfout + '.npz'
|
|
||||||
else:
|
|
||||||
svdout = None
|
|
||||||
|
|
||||||
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kdensity = pargs.kdensity
|
|
||||||
svn = pargs.nSV
|
|
||||||
|
|
||||||
# TODO multiplier operation definitions and parsing
|
|
||||||
#factor13inc = 10
|
|
||||||
#factor13scat=10
|
|
||||||
|
|
||||||
ops = list()
|
|
||||||
opre = re.compile('(tr|sym|copy|multl|mult)(\d*)')
|
|
||||||
for oparg in pargs.ops:
|
|
||||||
opm = opre.match(oparg[0])
|
|
||||||
if opm:
|
|
||||||
ops.append(((opm.group(2),) if opm.group(2) else (0,1), opm.group(1), oparg[1]))
|
|
||||||
else:
|
|
||||||
raise # should not happen
|
|
||||||
print(ops)
|
|
||||||
|
|
||||||
#ops = (
|
|
||||||
# # co, typ operace (symetrizace / transformace / kopie), specifikace (operace nebo zdroj),
|
|
||||||
# # co: 0, 1, (0,1), (0,), (1,), #NI: 'all'
|
|
||||||
# # typ operace: sym, tr, copy
|
|
||||||
# # specifikace:
|
|
||||||
# # sym, tr: 'σ_z', 'σ_y', 'C2'; sym: 'C3',
|
|
||||||
# # copy: 0, 1 (zdroj)
|
|
||||||
# ((0,1), 'sym', 'σ_z'),
|
|
||||||
# #((0,1), 'sym', 'σ_x'),
|
|
||||||
# #((0,1), 'sym', 'σ_y'),
|
|
||||||
# ((0,1), 'sym', 'C3'),
|
|
||||||
# ((1), 'tr', 'C2'),
|
|
||||||
#
|
|
||||||
#)
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
import time
|
|
||||||
begtime=time.time()
|
|
||||||
|
|
||||||
from matplotlib.path import Path
|
|
||||||
import matplotlib.patches as patches
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, sys, warnings, math
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
from matplotlib.backends.backend_pdf import PdfPages
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
pdf = PdfPages(pdfout)
|
|
||||||
|
|
||||||
# In[3]:
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
lMax = lMaxTM
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),2,2,nelem,2,nelem)) )
|
|
||||||
|
|
||||||
#TMatrices[:,:,:,:,:,ny==3] *= factor13inc
|
|
||||||
#TMatrices[:,:,:,ny==3,:,:] *= factor13scat
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
targets = (0,1)
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# In[4]:
|
|
||||||
if(pargs.plot_TMatrix):
|
|
||||||
om = np.linspace(np.min(freqs_orig), np.max(freqs_orig),100)
|
|
||||||
TMatrix0ip = np.reshape(TMatrices_interp(om)[:,0], (len(om), 2*nelem*2*nelem))
|
|
||||||
f, axa = plt.subplots(2, 2, figsize=(15,15))
|
|
||||||
|
|
||||||
#print(TMatrices.shape)
|
|
||||||
#plt.plot(om, TMatrices[:,0,0,0,0].imag,'r',om, TMatrices[:,0,0,0,0].real,'r--',om, TMatrices[:,0,2,0,2].imag,'b',om, TMatrices[:,0,2,0,2].real,'b--'))
|
|
||||||
|
|
||||||
ax = axa[0,0]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, TMatrix0ip[:,:].imag,'-',om, TMatrix0ip[:,:].real,'--',
|
|
||||||
)
|
|
||||||
ax = axa[0,1]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, abs(TMatrix0ip[:,:]),'-'
|
|
||||||
)
|
|
||||||
ax.set_yscale('log')
|
|
||||||
|
|
||||||
ax = axa[1,1]
|
|
||||||
ax2 = ax.twiny()
|
|
||||||
ax2.set_xlim([ax.get_xlim()[0]/eV*hbar,ax.get_xlim()[1]/eV*hbar])
|
|
||||||
ax.plot(
|
|
||||||
om, np.unwrap(np.angle(TMatrix0ip[:,:]),axis=0),'-'
|
|
||||||
)
|
|
||||||
|
|
||||||
ax = axa[1,0]
|
|
||||||
ax.text(0.5,0.5,str(pargs).replace(',',',\n'),horizontalalignment='center',verticalalignment='center',transform=ax.transAxes)
|
|
||||||
pdf.savefig(f)
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
'''
|
|
||||||
#kdensity = 66 #defined from cl arguments
|
|
||||||
bz_0 = np.array((0,0,0.,))
|
|
||||||
bz_K1 = np.array((1.,0,0))*4*np.pi/3/hexside/s3
|
|
||||||
bz_K2 = np.array((1./2.,s3/2,0))*4*np.pi/3/hexside/s3
|
|
||||||
bz_M = np.array((3./4, s3/4,0))*4*np.pi/3/hexside/s3
|
|
||||||
k0Mlist = bz_0 + (bz_M-bz_0) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kMK1list = bz_M + (bz_K1-bz_M) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kK10list = bz_K1 + (bz_0-bz_K1) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
k0K2list = bz_0 + (bz_K2-bz_0) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
kK2Mlist = bz_K2 + (bz_M-bz_K2) * np.linspace(0,1,kdensity)[:,nx]
|
|
||||||
B1 = 2* bz_K1 - bz_K2
|
|
||||||
B2 = 2* bz_K2 - bz_K1
|
|
||||||
klist = np.concatenate((k0Mlist,kMK1list,kK10list,k0K2list,kK2Mlist), axis=0)
|
|
||||||
kxmaplist = np.concatenate((np.array([0]),np.cumsum(np.linalg.norm(np.diff(klist, axis=0), axis=-1))))
|
|
||||||
'''
|
|
||||||
klist = qpms.generate_trianglepoints(kdensity, v3d=True, include_origin=True)*3*math.pi/(3*kdensity*hexside)
|
|
||||||
TMatrices_om = TMatrices_interp(freq)
|
|
||||||
|
|
||||||
svdres = qpms.hexlattice_zsym_getSVD(lMax=lMax, TMatrices_om=TMatrices_om, epsilon_b=epsilon_b, hexside=hexside, maxlayer=maxlayer,
|
|
||||||
omega=freq, klist=klist, gaussianSigma=gaussianSigma, onlyNmin=(0 if svdout else svn))
|
|
||||||
if svdout:
|
|
||||||
((svUfullTElist, svSfullTElist, svVfullTElist), (svUfullTMlist, svSfullTMlist, svVfullTMlist)) = svdres
|
|
||||||
(minsvElist, minsvTMlist) = (svSfullTElist[...,-svn:], svSfullTMlist[...,-svn:])
|
|
||||||
else:
|
|
||||||
minsvTElist, minsvTMlist = svdres
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
''' The new pretty diffracted order drawing '''
|
|
||||||
maxlayer_reciprocal=4
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
bz_0 = np.array((0,0,))
|
|
||||||
bz_K1 = np.array((1.,0))*4*np.pi/3/hexside/s3
|
|
||||||
bz_K2 = np.array((1./2.,s3/2))*4*np.pi/3/hexside/s3
|
|
||||||
bz_M = np.array((3./4, s3/4))*4*np.pi/3/hexside/s3
|
|
||||||
|
|
||||||
# reciprocal lattice basis
|
|
||||||
B1 = 2* bz_K1 - bz_K2
|
|
||||||
B2 = 2* bz_K2 - bz_K1
|
|
||||||
|
|
||||||
if svdout:
|
|
||||||
np.savez(svdout, omega = freq, klist = klist, bzpoints = np.array([bz_0, bz_K1, bz_K2, bz_M, B1, B2]),
|
|
||||||
uTE = svUfullTElist,
|
|
||||||
vTE = svVfullTElist,
|
|
||||||
sTE = svSfullTElist,
|
|
||||||
uTM = svUfullTMlist,
|
|
||||||
vTM = svVfullTMlist,
|
|
||||||
sTM = svSfullTMlist,
|
|
||||||
)
|
|
||||||
|
|
||||||
k2density = 100
|
|
||||||
k0Mlist = bz_0 + (bz_M-bz_0) * np.linspace(0,1,k2density)[:,nx]
|
|
||||||
kMK1list = bz_M + (bz_K1-bz_M) * np.linspace(0,1,k2density)[:,nx]
|
|
||||||
kK10list = bz_K1 + (bz_0-bz_K1) * np.linspace(0,1,k2density)[:,nx]
|
|
||||||
k0K2list = bz_0 + (bz_K2-bz_0) * np.linspace(0,1,k2density)[:,nx]
|
|
||||||
kK2Mlist = bz_K2 + (bz_M-bz_K2) * np.linspace(0,1,k2density)[:,nx]
|
|
||||||
k2list = np.concatenate((k0Mlist,kMK1list,kK10list,k0K2list,kK2Mlist), axis=0)
|
|
||||||
kxmaplist = np.concatenate((np.array([0]),np.cumsum(np.linalg.norm(np.diff(k2list, axis=0), axis=-1))))
|
|
||||||
|
|
||||||
centers2=qpms.generate_trianglepoints(maxlayer_reciprocal, v3d = False, include_origin= True)*4*np.pi/3/hexside
|
|
||||||
rot90 = np.array([[0,-1],[1,0]])
|
|
||||||
centers2=np.dot(centers2,rot90)
|
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import matplotlib
|
|
||||||
from matplotlib.path import Path
|
|
||||||
import matplotlib.patches as patches
|
|
||||||
cmap = matplotlib.cm.prism
|
|
||||||
colormax = np.amax(np.linalg.norm(centers2,axis=0))
|
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
for minN in reversed(range(svn)):
|
|
||||||
f, axes = plt.subplots(1,3, figsize=(20,4.8))
|
|
||||||
ax = axes[0]
|
|
||||||
sc = ax.scatter(klist[:,0], klist[:,1], c = np.clip(np.abs(minsvTElist[:,minN]),0,1), lw=0)
|
|
||||||
for center in centers2:
|
|
||||||
circle=plt.Circle((center[0],center[1]),omega/cdn, facecolor='none', edgecolor=cmap(np.linalg.norm(center)/colormax),lw=0.5)
|
|
||||||
ax.add_artist(circle)
|
|
||||||
verts = [(math.cos(math.pi*i/3)*4*np.pi/3/hexside/s3,math.sin(math.pi*i/3)*4*np.pi/3/hexside/s3) for i in range(6 +1)]
|
|
||||||
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY,]
|
|
||||||
path = Path(verts, codes)
|
|
||||||
patch = patches.PathPatch(path, facecolor='none', edgecolor='black', lw=1)
|
|
||||||
ax.add_patch(patch)
|
|
||||||
ax.set_xticks([])
|
|
||||||
ax.set_yticks([])
|
|
||||||
ax.title.set_text('E in-plane ("TE")')
|
|
||||||
f.colorbar(sc,ax=ax)
|
|
||||||
|
|
||||||
|
|
||||||
ax = axes[1]
|
|
||||||
sc = ax.scatter(klist[:,0], klist[:,1], c = np.clip(np.abs(minsvTMlist[:,minN]),0,1), lw=0)
|
|
||||||
for center in centers2:
|
|
||||||
circle=plt.Circle((center[0],center[1]),omega/cdn, facecolor='none', edgecolor=cmap(np.linalg.norm(center)/colormax),lw=0.5)
|
|
||||||
ax.add_artist(circle)
|
|
||||||
verts = [(math.cos(math.pi*i/3)*4*np.pi/3/hexside/s3,math.sin(math.pi*i/3)*4*np.pi/3/hexside/s3) for i in range(6 +1)]
|
|
||||||
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY,]
|
|
||||||
path = Path(verts, codes)
|
|
||||||
patch = patches.PathPatch(path, facecolor='none', edgecolor='black', lw=1)
|
|
||||||
ax.add_patch(patch)
|
|
||||||
ax.set_xticks([])
|
|
||||||
ax.set_yticks([])
|
|
||||||
ax.title.set_text('E perpendicular ("TM")')
|
|
||||||
f.colorbar(sc,ax=ax)
|
|
||||||
|
|
||||||
ax = axes[2]
|
|
||||||
for center in centers2:
|
|
||||||
ax.plot(kxmaplist, np.linalg.norm(k2list-center,axis=-1)*cdn, '-', color=cmap(np.linalg.norm(center)/colormax))
|
|
||||||
|
|
||||||
#ax.set_xlim([np.min(kxmlarr),np.max(kxmlarr)])
|
|
||||||
#ax.set_ylim([np.min(omegalist),np.max(omegalist)])
|
|
||||||
xticklist = [0, kxmaplist[len(k0Mlist)-1], kxmaplist[len(k0Mlist)+len(kMK1list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)-1], kxmaplist[len(k0Mlist)+len(kMK1list)+len(kK10list)+len(k0K2list)+len(kK2Mlist)-1]]
|
|
||||||
ax.set_xticks(xticklist)
|
|
||||||
for xt in xticklist:
|
|
||||||
ax.axvline(xt, ls='dotted', lw=0.3,c='k')
|
|
||||||
ax.set_xticklabels(['Γ', 'M', 'K', 'Γ', 'K\'','M'])
|
|
||||||
ax.axhline(omega, c='black')
|
|
||||||
ax.set_ylim([0,5e15])
|
|
||||||
ax2 = ax.twinx()
|
|
||||||
ax2.set_ylim([ax.get_ylim()[0]/eV*hbar,ax.get_ylim()[1]/eV*hbar])
|
|
||||||
|
|
||||||
pdf.savefig(f)
|
|
||||||
|
|
||||||
pdf.close()
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
subprocess.run(['scp', pdfout, scp_dest])
|
|
||||||
if svdout:
|
|
||||||
subprocess.run(['scp', svdout, scp_dest])
|
|
||||||
|
|
||||||
print(time.strftime("%H.%M:%S",time.gmtime(time.time()-begtime)))
|
|
|
@ -1,239 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse, re, random, string
|
|
||||||
import subprocess
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
#TODO? použít type=argparse.FileType('r') ?
|
|
||||||
parser.add_argument('--TMatrix', action='store', required=True, help='Path to TMatrix file')
|
|
||||||
#parser.add_argument('--griddir', action='store', required=True, help='Path to the directory with precalculated translation operators')
|
|
||||||
parser.add_argument('--output_prefix', action='store', required=True, help='Prefix to the npz output (will be appended frequency, hexside and chunkno)')
|
|
||||||
#sizepar = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
parser.add_argument('--hexside', action='store', type=float, required=True, help='Lattice hexagon size length')
|
|
||||||
parser.add_argument('--plot_TMatrix', action='store_true', help='Visualise TMatrix on the first page of the output')
|
|
||||||
#parser.add_argument('--SVD_output', action='store', help='Path to output singular value decomposition result')
|
|
||||||
parser.add_argument('--maxlayer', action='store', type=int, default=100, help='How far to sum the lattice points to obtain the dispersion')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--eVfreq', action='store', required=True, type=float, help='Frequency in eV')
|
|
||||||
parser.add_argument('--kdensity', action='store', type=int, default=33, help='Number of k-points per x-axis segment')
|
|
||||||
parser.add_argument('--chunklen', action='store', type=int, default=1000, help='Number of k-points per output file (default 1000)')
|
|
||||||
parser.add_argument('--lMax', action='store', type=int, help='Override lMax from the TMatrix file')
|
|
||||||
#TODO some more sophisticated x axis definitions
|
|
||||||
parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
parser.add_argument('--verbose', '-v', action='count', help='Be verbose (about computation times, mostly)')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
popgrp.add_argument('--tr0', dest='ops', action=make_action_sharedlist('tr0', 'ops'))
|
|
||||||
popgrp.add_argument('--tr1', dest='ops', action=make_action_sharedlist('tr1', 'ops'))
|
|
||||||
popgrp.add_argument('--sym', dest='ops', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
popgrp.add_argument('--sym0', dest='ops', action=make_action_sharedlist('sym0', 'ops'))
|
|
||||||
popgrp.add_argument('--sym1', dest='ops', action=make_action_sharedlist('sym1', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult0', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult0', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult1', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult1', 'ops'))
|
|
||||||
popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
popgrp.add_argument('--multl0', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl0', 'ops'))
|
|
||||||
popgrp.add_argument('--multl1', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl1', 'ops'))
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
# TODO enable more flexible per-sublattice specification
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
print(pargs)
|
|
||||||
|
|
||||||
maxlayer=pargs.maxlayer
|
|
||||||
hexside=pargs.hexside
|
|
||||||
eVfreq = pargs.eVfreq
|
|
||||||
freq = eVfreq*eV/hbar
|
|
||||||
verbose=pargs.verbose
|
|
||||||
|
|
||||||
TMatrix_file = pargs.TMatrix
|
|
||||||
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kdensity = pargs.kdensity
|
|
||||||
chunklen = pargs.chunklen
|
|
||||||
|
|
||||||
ops = list()
|
|
||||||
opre = re.compile('(tr|sym|copy|multl|mult)(\d*)')
|
|
||||||
for oparg in pargs.ops:
|
|
||||||
opm = opre.match(oparg[0])
|
|
||||||
if opm:
|
|
||||||
ops.append(((opm.group(2),) if opm.group(2) else (0,1), opm.group(1), oparg[1]))
|
|
||||||
else:
|
|
||||||
raise # should not happen
|
|
||||||
print(ops)
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
from qpms.timetrack import _time_b, _time_e
|
|
||||||
btime=_time_b(verbose)
|
|
||||||
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, sys, warnings, math
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
lMax = lMaxTM
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),2,2,nelem,2,nelem)) )
|
|
||||||
|
|
||||||
#TMatrices[:,:,:,:,:,ny==3] *= factor13inc
|
|
||||||
#TMatrices[:,:,:,ny==3,:,:] *= factor13scat
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
targets = (0,1)
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
klist_full = qpms.generate_trianglepoints(kdensity, v3d=True, include_origin=True)*3*math.pi/(3*kdensity*hexside)
|
|
||||||
TMatrices_om = TMatrices_interp(freq)
|
|
||||||
|
|
||||||
chunkn = math.ceil(klist_full.shape[0] / chunklen)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print('Evaluating %d k-points in %d chunks' % (klist_full.shape[0], chunkn), file = sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
metadata = np.array({
|
|
||||||
'lMax' : lMax,
|
|
||||||
'maxlayer' : maxlayer,
|
|
||||||
'gaussianSigma' : gaussianSigma,
|
|
||||||
'epsilon_b' : epsilon_b,
|
|
||||||
'hexside' : hexside,
|
|
||||||
'chunkn' : chunkn,
|
|
||||||
'TMatrix_file' : TMatrix_file,
|
|
||||||
'ops' : ops,
|
|
||||||
})
|
|
||||||
|
|
||||||
for chunki in range(chunkn):
|
|
||||||
svdout = '%s_%dnm_%.4f_c%03d.npz' % (pargs.output_prefix, hexside/1e-9, eVfreq, chunki)
|
|
||||||
|
|
||||||
klist = klist_full[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
|
|
||||||
svdres = qpms.hexlattice_zsym_getSVD(lMax=lMax, TMatrices_om=TMatrices_om, epsilon_b=epsilon_b, hexside=hexside, maxlayer=maxlayer,
|
|
||||||
omega=freq, klist=klist, gaussianSigma=gaussianSigma, onlyNmin=False, verbose=verbose)
|
|
||||||
|
|
||||||
#((svUfullTElist, svSfullTElist, svVfullTElist), (svUfullTMlist, svSfullTMlist, svVfullTMlist)) = svdres
|
|
||||||
|
|
||||||
np.savez(svdout, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
uTE = svdres[0][0],
|
|
||||||
vTE = svdres[0][2],
|
|
||||||
sTE = svdres[0][1],
|
|
||||||
uTM = svdres[1][0],
|
|
||||||
vTM = svdres[1][2],
|
|
||||||
sTM = svdres[1][1],
|
|
||||||
|
|
||||||
)
|
|
||||||
svdres=None
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
if svdout:
|
|
||||||
subprocess.run(['scp', svdout, scp_dest])
|
|
||||||
|
|
||||||
_time_e(btime, verbose)
|
|
||||||
#print(time.strftime("%H.%M:%S",time.gmtime(time.time()-begtime)))
|
|
|
@ -1,376 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse, re, random, string, sys
|
|
||||||
import subprocess
|
|
||||||
import warnings
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
|
|
||||||
unitcell_size = 1 # rectangular lattice
|
|
||||||
unitcell_indices = tuple(range(unitcell_size))
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
#TODO? použít type=argparse.FileType('r') ?
|
|
||||||
parser.add_argument('--TMatrix', action='store', required=True, help='Path to TMatrix file')
|
|
||||||
#parser.add_argument('--griddir', action='store', required=True, help='Path to the directory with precalculated translation operators')
|
|
||||||
parser.add_argument('--output_prefix', '-p', '-o', action='store', required=True, help='Prefix to the npz output (will be appended frequency, hexside and chunkno)')
|
|
||||||
parser.add_argument('--nosuffix', action='store_true', help='Do not add dimension metadata to the output filenames')
|
|
||||||
#sizepar = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
#parser.add_argument('--hexside', action='store', type=float, required=True, help='Lattice hexagon size length')
|
|
||||||
parser.add_argument('--dx', action='store', type=float, required=True, help='x-direction lattice constant')
|
|
||||||
parser.add_argument('--dy', action='store', type=float, required=True, help='y-direction lattice constant')
|
|
||||||
|
|
||||||
parser.add_argument('--Nx', '--nx', action='store', type=int, required=True, help='Lattice points in the x-direction')
|
|
||||||
parser.add_argument('--Ny', '--ny', action='store', type=int, required=True, help='Lattice points in the y-direction')
|
|
||||||
|
|
||||||
# In these default settings, the area is 2x2 times larger than first BZ
|
|
||||||
parser.add_argument('--kxmin', action='store', type=float, default=-1., help='TODO')
|
|
||||||
parser.add_argument('--kxmax', action='store', type=float, default=1., help='TODO')
|
|
||||||
parser.add_argument('--kymin', action='store', type=float, default=-1., help='TODO')
|
|
||||||
parser.add_argument('--kymax', action='store', type=float, default=1., help='TODO')
|
|
||||||
|
|
||||||
#parser.add_argument('--kdensity', action='store', type=int, default=33, help='Number of k-points per x-axis segment')
|
|
||||||
parser.add_argument('--kxdensity', action='store', type=int, default=51, help='k-space resolution in the x-direction')
|
|
||||||
parser.add_argument('--kydensity', action='store', type=int, default=51, help='k-space resolution in the y-direction')
|
|
||||||
|
|
||||||
partgrp = parser.add_mutually_exclusive_group()
|
|
||||||
partgrp.add_argument('--only_TE', action='store_true', help='Calculate only the projection on the E⟂z modes')
|
|
||||||
partgrp.add_argument('--only_TM', action='store_true', help='Calculate only the projection on the E∥z modes')
|
|
||||||
partgrp.add_argument('--serial', action='store_true', help='Calculate the TE and TM parts separately to save memory')
|
|
||||||
|
|
||||||
parser.add_argument('--nocentre', action='store_true', help='Place the coordinate origin to the left bottom corner rather that to the centre of the array')
|
|
||||||
|
|
||||||
parser.add_argument('--plot_TMatrix', action='store_true', help='Visualise TMatrix on the first page of the output')
|
|
||||||
#parser.add_argument('--SVD_output', action='store', help='Path to output singular value decomposition result')
|
|
||||||
parser.add_argument('--maxlayer', action='store', type=int, default=100, help='How far to sum the lattice points to obtain the dispersion')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--eVfreq', action='store', required=True, type=float, help='Frequency in eV')
|
|
||||||
|
|
||||||
parser.add_argument('--chunklen', action='store', type=int, default=3000, help='Number of k-points per output file (default 3000)')
|
|
||||||
parser.add_argument('--lMax', action='store', type=int, help='Override lMax from the TMatrix file')
|
|
||||||
#TODO some more sophisticated x axis definitions
|
|
||||||
#parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
parser.add_argument('--verbose', '-v', action='count', help='Be verbose (about computation times, mostly)')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
for i in unitcell_indices:
|
|
||||||
popgrp.add_argument('--tr%d'%i, dest='ops', action=make_action_sharedlist('tr%d'%i, 'ops'))
|
|
||||||
popgrp.add_argument('--sym', dest='ops', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
for i in unitcell_indices:
|
|
||||||
popgrp.add_argument('--sym%d'%i, dest='ops', action=make_action_sharedlist('sym%d'%i, 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult0', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult0', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult1', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult1', 'ops'))
|
|
||||||
popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
for i in unitcell_indices:
|
|
||||||
popgrp.add_argument('--multl%d'%i, dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl%d'%i, 'ops'))
|
|
||||||
#popgrp.add_argument('--multl1', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl1', 'ops'))
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
# TODO enable more flexible per-sublattice specification
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
if pargs.verbose:
|
|
||||||
print(pargs, file = sys.stderr)
|
|
||||||
|
|
||||||
maxlayer=pargs.maxlayer
|
|
||||||
eVfreq = pargs.eVfreq
|
|
||||||
freq = eVfreq*eV/hbar
|
|
||||||
verbose=pargs.verbose
|
|
||||||
dy = pargs.dy
|
|
||||||
dx = pargs.dx
|
|
||||||
Ny = pargs.Ny
|
|
||||||
Nx = pargs.Nx
|
|
||||||
|
|
||||||
TMatrix_file = pargs.TMatrix
|
|
||||||
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
#gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kxdensity = pargs.kxdensity
|
|
||||||
kydensity = pargs.kydensity
|
|
||||||
chunklen = pargs.chunklen
|
|
||||||
|
|
||||||
ops = list()
|
|
||||||
opre = re.compile('(tr|sym|copy|multl|mult)(\d*)')
|
|
||||||
for oparg in pargs.ops:
|
|
||||||
opm = opre.match(oparg[0])
|
|
||||||
if opm:
|
|
||||||
ops.append(((opm.group(2),) if opm.group(2) else unitcell_indices, opm.group(1), oparg[1]))
|
|
||||||
else:
|
|
||||||
raise # should not happen
|
|
||||||
if(verbose):
|
|
||||||
print(ops, file = sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
from qpms.timetrack import _time_b, _time_e
|
|
||||||
btime=_time_b(verbose)
|
|
||||||
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, warnings, math
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
refind = math.sqrt(epsilon_b)
|
|
||||||
cdn = c / refind
|
|
||||||
k_0 = freq * refind / c # = freq / cdn
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
lMax = lMaxTM
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),unitcell_size,2,nelem,2,nelem)) )
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
#targets = (0,1)
|
|
||||||
targets = unitcell_indices
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
|
|
||||||
xpositions = np.arange(Nx) * dx
|
|
||||||
ypositions = np.arange(Ny) * dy
|
|
||||||
if not pargs.nocentre:
|
|
||||||
xpositions -= Nx * dx / 2
|
|
||||||
ypositions -= Ny * dy / 2
|
|
||||||
xpositions, ypositions = np.meshgrid(xpositions, ypositions, indexing='ij', copy=False)
|
|
||||||
positions=np.stack((xpositions.ravel(),ypositions.ravel()), axis=-1)
|
|
||||||
positions=positions[np.random.permutation(len(positions))]
|
|
||||||
N = positions.shape[0]
|
|
||||||
|
|
||||||
kx = np.linspace(pargs.kxmin, pargs.kxmax, num=pargs.kxdensity, endpoint=True) * 2*np.pi / dx
|
|
||||||
ky = np.linspace(pargs.kymin, pargs.kymax, num=pargs.kydensity, endpoint=True) * 2*np.pi / dy
|
|
||||||
kx, ky = np.meshgrid(kx, ky, indexing='ij', copy=False)
|
|
||||||
kz = np.sqrt(k_0**2 - (kx ** 2 + ky ** 2))
|
|
||||||
|
|
||||||
klist_full = np.stack((kx,ky,kz), axis=-1).reshape((-1,3))
|
|
||||||
TMatrices_om = TMatrices_interp(freq)
|
|
||||||
|
|
||||||
chunkn = math.ceil(klist_full.size / 3 / chunklen)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print('Evaluating %d k-points' % klist_full.size + ('in %d chunks'%chunkn) if chunkn>1 else '' , file = sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
try:
|
|
||||||
version = qpms.__version__
|
|
||||||
except NameError:
|
|
||||||
version = None
|
|
||||||
|
|
||||||
metadata = np.array({
|
|
||||||
'script': os.path.basename(__file__),
|
|
||||||
'version': version,
|
|
||||||
'type' : 'Plane wave scattering on a finite rectangular lattice',
|
|
||||||
'lMax' : lMax,
|
|
||||||
'dx' : dx,
|
|
||||||
'dy' : dy,
|
|
||||||
'Nx' : Nx,
|
|
||||||
'Ny' : Ny,
|
|
||||||
#'maxlayer' : maxlayer,
|
|
||||||
#'gaussianSigma' : gaussianSigma,
|
|
||||||
'epsilon_b' : epsilon_b,
|
|
||||||
#'hexside' : hexside,
|
|
||||||
'chunkn' : chunkn,
|
|
||||||
'chunki' : 0,
|
|
||||||
'TMatrix_file' : TMatrix_file,
|
|
||||||
'ops' : ops,
|
|
||||||
'centred' : not pargs.nocentre
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
scat = qpms.Scattering_2D_zsym(positions, TMatrices_om, k_0, verbose=verbose)
|
|
||||||
|
|
||||||
if pargs.only_TE:
|
|
||||||
actions = (0,)
|
|
||||||
elif pargs.only_TM:
|
|
||||||
actions = (1,)
|
|
||||||
elif pargs.serial:
|
|
||||||
actions = (0,1)
|
|
||||||
else:
|
|
||||||
actions = (None,)
|
|
||||||
|
|
||||||
xu = np.array((1,0,0))
|
|
||||||
yu = np.array((0,1,0))
|
|
||||||
zu = np.array((0,0,1))
|
|
||||||
TEč, TMč = qpms.symz_indexarrays(lMax)
|
|
||||||
|
|
||||||
klist_full_2D = klist_full[...,:2]
|
|
||||||
klist_full_dir = klist_full/np.linalg.norm(klist_full, axis=-1, keepdims=True)
|
|
||||||
for action in actions:
|
|
||||||
if action is None:
|
|
||||||
scat.prepare(verbose=verbose)
|
|
||||||
actionstring = ''
|
|
||||||
else:
|
|
||||||
scat.prepare_partial(action, verbose=verbose)
|
|
||||||
actionstring = '.TM' if action else '.TE'
|
|
||||||
for chunki in range(chunkn):
|
|
||||||
sbtime = _time_b(verbose, step='Solving the scattering problem, chunk %d'%chunki+actionstring)
|
|
||||||
if pargs.nosuffix:
|
|
||||||
outfile = pargs.output_prefix + actionstring + (
|
|
||||||
('.%03d' % chunki) if chunkn > 1 else '')
|
|
||||||
else:
|
|
||||||
outfile = '%s_%dx%d_%.0fnmx%.0fnm_%.4f%s%s.npz' % (
|
|
||||||
pargs.output_prefix, Nx, Ny, dx/1e-9, dy/1e-9,
|
|
||||||
eVfreq, actionstring,
|
|
||||||
(".%03d" % chunki) if chunkn > 1 else '')
|
|
||||||
|
|
||||||
klist = klist_full[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
klist2d = klist_full_2D[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
klistdir = klist_full_dir[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
|
|
||||||
'''
|
|
||||||
The following loop is a fuckup that has its roots in the fact that
|
|
||||||
the function qpms.get_π̃τ̃_y1 in qpms_p.py is not vectorized
|
|
||||||
(and consequently, neither is plane_pq_y.)
|
|
||||||
|
|
||||||
And Scattering_2D_zsym.scatter_partial is not vectorized, either.
|
|
||||||
'''
|
|
||||||
if action == 0 or action is None:
|
|
||||||
xresult = np.full((klist.shape[0], N, nelem), np.nan, dtype=complex)
|
|
||||||
yresult = np.full((klist.shape[0], N, nelem), np.nan, dtype=complex)
|
|
||||||
if action == 1 or action is None:
|
|
||||||
zresult = np.full((klist.shape[0], N, nelem), np.nan, dtype=complex)
|
|
||||||
for i in range(klist.shape[0]):
|
|
||||||
if math.isnan(klist[i,2]):
|
|
||||||
if(verbose):
|
|
||||||
print("%d. momentum %s invalid (k_0=%f), skipping" % (i, str(klist[i]),k_0))
|
|
||||||
continue
|
|
||||||
kdir = klistdir[i]
|
|
||||||
phases = np.exp(-1j*np.sum(klist2d[i] * positions, axis=-1))
|
|
||||||
if action == 0 or action is None:
|
|
||||||
pq = np.array(qpms.plane_pq_y(lMax, kdir, xu)).ravel()[TEč] * phases[:, nx]
|
|
||||||
xresult[i] = scat.scatter_partial(0, pq)
|
|
||||||
pq = np.array(qpms.plane_pq_y(lMax, kdir, yu)).ravel()[TEč] * phases[:, nx]
|
|
||||||
yresult[i] = scat.scatter_partial(0, pq)
|
|
||||||
if action == 1 or action is None:
|
|
||||||
pq = np.array(qpms.plane_pq_y(lMax, kdir, zu)).ravel()[TMč] * phases[:, nx]
|
|
||||||
zresult[i] = scat.scatter_partial(1, pq)
|
|
||||||
_time_e(sbtime, verbose, step='Solving the scattering problem, chunk %d'%chunki+actionstring)
|
|
||||||
|
|
||||||
metadata[()]['chunki'] = chunki
|
|
||||||
if action is None:
|
|
||||||
np.savez(outfile, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
positions=positions,
|
|
||||||
ab_x=xresult,
|
|
||||||
ab_y=yresult,
|
|
||||||
ab_z=zresult
|
|
||||||
)
|
|
||||||
elif action == 0:
|
|
||||||
np.savez(outfile, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
positions=positions,
|
|
||||||
ab_x=xresult,
|
|
||||||
ab_y=yresult,
|
|
||||||
)
|
|
||||||
elif action == 1:
|
|
||||||
np.savez(outfile, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
positions=positions,
|
|
||||||
ab_z=zresult
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
if outfile:
|
|
||||||
subprocess.run(['scp', outfile, scp_dest])
|
|
||||||
scat.forget_matrices() # free memory in case --serial was used
|
|
||||||
|
|
||||||
_time_e(btime, verbose)
|
|
|
@ -1,340 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
'''
|
|
||||||
Bulk SVD mode computation for compact scatterer 2D lattices
|
|
||||||
'''
|
|
||||||
|
|
||||||
__TODOs__ = '''
|
|
||||||
BIG TODO: Use more efficient way to calculate the interaction sums: perhaps some customized Ewald-type summation?
|
|
||||||
|
|
||||||
Small TODOs:
|
|
||||||
- Implement a more user-friendly way to define the lattice base vectors and positions of the particles.
|
|
||||||
cf. https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string/2371789
|
|
||||||
- low priority: allow to perform some more custom operations on T-Matrix, using some kind of parsing from the previous point
|
|
||||||
- Autodetect symmetries
|
|
||||||
|
|
||||||
'''
|
|
||||||
import argparse, re, random, string
|
|
||||||
import subprocess
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
#TODO? použít type=argparse.FileType('r') ?
|
|
||||||
#TODO create some user-friendlier way to define lattice vectors, cf. https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string/2371789
|
|
||||||
parser.add_argument('--lattice_base', nargs=4, action='store', type=float, required=True, help='Lattice basis vectors x1, y1, x2, y2')
|
|
||||||
parser.add_argument('--particle', '-p', nargs='+', action=make_action_sharedlist('particle', 'particlespec'), help='Particle label, coordinates x,y, and (optionally) path to the T-Matrix.')
|
|
||||||
parser.add_argument('--TMatrix', '-t', nargs='+', action=make_action_sharedlist('TMatrix_path', 'particlespec'), help='Path to TMatrix file')
|
|
||||||
#parser.add_argument('--griddir', action='store', required=True, help='Path to the directory with precalculated translation operators')
|
|
||||||
parser.add_argument('--output_prefix', action='store', required=True, help='Prefix to the npz output (will be appended frequency, hexside and chunkno)')
|
|
||||||
#sizepar = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
#DEL parser.add_argument('--hexside', action='store', type=float, required=True, help='Lattice hexagon size length')
|
|
||||||
parser.add_argument('--plot_TMatrix', action='store_true', help='Visualise TMatrix on the first page of the output')
|
|
||||||
#parser.add_argument('--SVD_output', action='store', help='Path to output singular value decomposition result')
|
|
||||||
parser.add_argument('--maxlayer', action='store', type=int, default=100, help='How far to sum the lattice points to obtain the dispersion')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--eVfreq', action='store', required=True, type=float, help='Frequency in eV')
|
|
||||||
parser.add_argument('--kdensity', '--k_density', action='store', type=int, default=33, help='Number of k-points per x-axis segment FIXME DESCRIPTION')
|
|
||||||
parser.add_argument('--bz_coverage', action='store', type=float, default=1., help='Brillouin zone coverage in relative length (default 1 for whole 1. BZ)')
|
|
||||||
parser.add_argument('--bz_edge_width', action='store', type=float, default=0., help='Width of the more densely covered belt along the 1. BZ edge in relative lengths')
|
|
||||||
parser.add_argument('--bz_edge_factor', action='store', type=float, default=8., help='Relative density of the belt along the 1. BZ edge w.r.t. k_density (default==8)')
|
|
||||||
parser.add_argument('--bz_edge_twoside', action='store_true', help='Compute also the parts of the densely covered edge belt outside the 1. BZ')
|
|
||||||
parser.add_argument('--bz_corner_width', action='store', type=float, default=0., help='Size of the more densely covered subcell along the 1. BZ corners in relative lengths')
|
|
||||||
parser.add_argument('--bz_corner_factor', action='store', type=float, default=16., help='Relative density of the subcell along the 1. BZ corner w.r.t. k_density (default==16)')
|
|
||||||
parser.add_argument('--bz_corner_twoside', action='store_true', help='Compute also the parts of the densely covered subcell outside the 1. BZ')
|
|
||||||
|
|
||||||
parser.add_argument('--chunklen', action='store', type=int, default=1000, help='Number of k-points per output file (default 1000)')
|
|
||||||
parser.add_argument('--lMax', action=make_action_sharedlist('lMax', 'particlespec'), nargs=+, help='Override lMax from the TMatrix file')
|
|
||||||
#TODO some more sophisticated x axis definitions
|
|
||||||
parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
parser.add_argument('--verbose', '-v', action='count', help='Be verbose (about computation times, mostly)')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', nargs='+', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
popgrp.add_argument('--sym', dest='ops', nargs='+', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
print(pargs)
|
|
||||||
|
|
||||||
exit(0) ###
|
|
||||||
|
|
||||||
maxlayer=pargs.maxlayer
|
|
||||||
#DEL hexside=pargs.hexside
|
|
||||||
eVfreq = pargs.eVfreq
|
|
||||||
freq = eVfreq*eV/hbar
|
|
||||||
verbose=pargs.verbose
|
|
||||||
|
|
||||||
#DEL TMatrix_file = pargs.TMatrix
|
|
||||||
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kdensity = pargs.kdensity
|
|
||||||
chunklen = pargs.chunklen
|
|
||||||
|
|
||||||
#### Nanoparticle position and T-matrix path parsing ####
|
|
||||||
TMatrix_paths = dict()
|
|
||||||
lMax_overrides = dict()
|
|
||||||
default_TMatrix_path = None
|
|
||||||
default_lMax_override = None
|
|
||||||
if not any((arg_type == 'particle') in (arg_type, arg_content) for in pargs.particlespec):
|
|
||||||
# no particles positions given: suppose only one per unit cell, in the cell origin
|
|
||||||
positions = {None: (0.0)}
|
|
||||||
else:
|
|
||||||
positions = dict()
|
|
||||||
for arg_type, arg_content in pargs.particlespec:
|
|
||||||
if arg_type == 'particle' # --particle option
|
|
||||||
if 3 <= len(arg_content) <= 4:
|
|
||||||
try:
|
|
||||||
positions[arg_content[0]] = (float(arg_content[1]), float(arg_content[2]))
|
|
||||||
except ValueError as e:
|
|
||||||
e.args += ("second and third argument of --particle must be valid floats, given: ", arg_content)
|
|
||||||
raise
|
|
||||||
if len(arg_content == 4):
|
|
||||||
if arg_content[0] in TMatrix_paths:
|
|
||||||
warnings.warn('T-matrix path for particle \'%s\' already specified.'
|
|
||||||
'Overriding with the last value.' % arg_content[0], SyntaxWarning)
|
|
||||||
TMatrix_paths[arg_content[0]] = arg_content[3]
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError("--particle expects 3 or 4 arguments, %d given: " % len(arg_content), arg_content)
|
|
||||||
elif arg_type == 'TMatrix_path': # --TMatrix option
|
|
||||||
if len(arg_content) == 1: # --TMatrix default_path
|
|
||||||
if default_TMatrix_path is not None:
|
|
||||||
warnings.warn('Default T-matrix path already specified. Overriding with the last value.', SyntaxWarning)
|
|
||||||
default_TMatrix_path = arg_content[0]
|
|
||||||
elif len(arg_content) > 1: # --TMatrix label [label2 [...]] path
|
|
||||||
for label in arg_content[:-1]:
|
|
||||||
if label in TMatrix_paths.keys():
|
|
||||||
warnings.warn('T-matrix path for particle \'%s\' already specified.'
|
|
||||||
'Overriding with the last value.' % label, SyntaxWarning)
|
|
||||||
TMatrix_paths[label] = arg_content[-1]
|
|
||||||
elif arg_type == 'lMax': # --lMax option
|
|
||||||
if len(arg_content) == 1: # --lMax default_lmax_override
|
|
||||||
if default_lMax_override is not None:
|
|
||||||
warnings.warn('Default lMax override value already specified. Overriding the last value.', SyntaxWarning)
|
|
||||||
default_lMax_override = int(arg_content[-1])
|
|
||||||
else:
|
|
||||||
for label in arg_content[:-1]:
|
|
||||||
if label in lMax_overrides.keys:
|
|
||||||
warnings.warn('lMax override for particle \'%s\' already specified.'
|
|
||||||
'overriding with the last value.' % label, SyntaxWarning)
|
|
||||||
lMax_overrides[label] = int(arg_content[-1])
|
|
||||||
else: assert False, 'unknown option type'
|
|
||||||
# Check the info from positions and TMatrix_paths and lMax_overrides
|
|
||||||
if not set(TMatrix_paths.keys()) <= set(positions.keys()):
|
|
||||||
raise ValueError("T-Matrix path(s) for particle(s) labeled %s was given, but not their positions"
|
|
||||||
% str(set(TMatrix_paths.keys()) - set(positions.keys())))
|
|
||||||
if not set(lMax_overrides.keys()) <= set(positions.keys()):
|
|
||||||
raise ValueError("lMax override(s) for particle(s) labeled %s was given, but not their positions"
|
|
||||||
%str(set(lMax_overrides.keys()) - set(positions.keys())))
|
|
||||||
if (set(TMatrix_paths.keys()) != set(positions.keys())) and default_TMatrix_path is None:
|
|
||||||
raise ValueError("Position(s) of particles(s) labeled %s was given without their T-matrix"
|
|
||||||
" and no default T-matrix was specified"
|
|
||||||
% str(set(positions.keys()) - set(TMatrix_paths_keys())))
|
|
||||||
for path in TMatrix_paths.values():
|
|
||||||
if not os.path.exists(path):
|
|
||||||
raise ValueError("Cannot access T-matrix file %s. Does it exist?" % path)
|
|
||||||
|
|
||||||
# Assign (pre-parse) the T-matrix operations to individual particles
|
|
||||||
ops = dict()
|
|
||||||
for label in positions.keys(): ops[label] = list()
|
|
||||||
for optype, arg_content in pargs.ops:
|
|
||||||
# if, no label given, apply to all, otherwise on the specifield particles
|
|
||||||
for label in (positions.keys() if len(arg_content) == 1 else arg_content[:-1]):
|
|
||||||
try:
|
|
||||||
ops[label].append((optype, arg_content[-1]))
|
|
||||||
except KeyError as e:
|
|
||||||
e.args += 'Specified operation on undefined particle labeled \'%s\'' % label
|
|
||||||
raise
|
|
||||||
|
|
||||||
print(sys.stderr, "ops: ", ops) #DEBUG
|
|
||||||
|
|
||||||
#### Collect all the info about the particles / their T-matrices into one list ####
|
|
||||||
# Enumerate and assign all the _different_ T-matrices (without any intelligent group-theory checking, though)
|
|
||||||
TMatrix_specs = dict((spec, number)
|
|
||||||
for (number, spec) in enumerate(set(
|
|
||||||
(lMax_overrides[label] if label in lMax_overrides.keys() else None,
|
|
||||||
TMatrix_paths[label],
|
|
||||||
tuple(ops[label]))
|
|
||||||
for label in positions.keys()
|
|
||||||
)))
|
|
||||||
# particles_specs contains (label, (xpos, ypos), tmspec_index per element)
|
|
||||||
particles_specs = [(label, positions(label),
|
|
||||||
TMatrix_specs[(lMax_overrides[label] if label in lMax_overrides.keys() else None,
|
|
||||||
TMatrix_paths[label],
|
|
||||||
tuple(ops[label]))]
|
|
||||||
) for label in positions.keys()]
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
from qpms.timetrack import _time_b, _time_e
|
|
||||||
btime=_time_b(verbose)
|
|
||||||
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, sys, warnings, math
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
lMax = lMaxTM
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),2,2,nelem,2,nelem)) )
|
|
||||||
|
|
||||||
#TMatrices[:,:,:,:,:,ny==3] *= factor13inc
|
|
||||||
#TMatrices[:,:,:,ny==3,:,:] *= factor13scat
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
targets = (0,1)
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise ValueError('\'%d\' is not an implemented symmetry operation' % op[2])
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise ValueError('\'%d\' is not an implemented T-matrix transformation operation' % op[2])
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
klist_full = qpms.generate_trianglepoints(kdensity, v3d=True, include_origin=True)*3*math.pi/(3*kdensity*hexside)
|
|
||||||
TMatrices_om = TMatrices_interp(freq)
|
|
||||||
|
|
||||||
chunkn = math.ceil(klist_full.shape[0] / chunklen)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print('Evaluating %d k-points in %d chunks' % (klist_full.shape[0], chunkn), file = sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
metadata = np.array({
|
|
||||||
'lMax' : lMax,
|
|
||||||
'maxlayer' : maxlayer,
|
|
||||||
'gaussianSigma' : gaussianSigma,
|
|
||||||
'epsilon_b' : epsilon_b,
|
|
||||||
'hexside' : hexside,
|
|
||||||
'chunkn' : chunkn,
|
|
||||||
'TMatrix_file' : TMatrix_file,
|
|
||||||
'ops' : ops,
|
|
||||||
})
|
|
||||||
|
|
||||||
for chunki in range(chunkn):
|
|
||||||
svdout = '%s_%dnm_%.4f_c%03d.npz' % (pargs.output_prefix, hexside/1e-9, eVfreq, chunki)
|
|
||||||
|
|
||||||
klist = klist_full[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
|
|
||||||
svdres = qpms.hexlattice_zsym_getSVD(lMax=lMax, TMatrices_om=TMatrices_om, epsilon_b=epsilon_b, hexside=hexside, maxlayer=maxlayer,
|
|
||||||
omega=freq, klist=klist, gaussianSigma=gaussianSigma, onlyNmin=False, verbose=verbose)
|
|
||||||
|
|
||||||
#((svUfullTElist, svSfullTElist, svVfullTElist), (svUfullTMlist, svSfullTMlist, svVfullTMlist)) = svdres
|
|
||||||
|
|
||||||
np.savez(svdout, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
uTE = svdres[0][0],
|
|
||||||
vTE = svdres[0][2],
|
|
||||||
sTE = svdres[0][1],
|
|
||||||
uTM = svdres[1][0],
|
|
||||||
vTM = svdres[1][2],
|
|
||||||
sTM = svdres[1][1],
|
|
||||||
|
|
||||||
)
|
|
||||||
svdres=None
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
if svdout:
|
|
||||||
subprocess.run(['scp', svdout, scp_dest])
|
|
||||||
|
|
||||||
_time_e(btime, verbose)
|
|
||||||
#print(time.strftime("%H.%M:%S",time.gmtime(time.time()-begtime)))
|
|
|
@ -1,35 +0,0 @@
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
from numpy import newaxis as nx
|
|
||||||
import math
|
|
||||||
import cmath
|
|
||||||
import os
|
|
||||||
from scipy.constants import c, e as eV, hbar
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("omega")
|
|
||||||
#parser.add_argument("maxlayer")
|
|
||||||
args = parser.parse_args()
|
|
||||||
omega_eV = float(args.omega)
|
|
||||||
|
|
||||||
print(omega_eV)
|
|
||||||
|
|
||||||
epsilon_b = 2.3104
|
|
||||||
hexside = 375e-9
|
|
||||||
lMax = 3
|
|
||||||
maxlayer = 222
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
|
|
||||||
omega = omega_eV * eV / hbar
|
|
||||||
|
|
||||||
k_0 = omega * math.sqrt(epsilon_b) / c
|
|
||||||
|
|
||||||
output_prefix = '/tmp/diracpoints-newdata2/%d/' % maxlayer
|
|
||||||
|
|
||||||
os.makedirs(output_prefix, exist_ok=True)
|
|
||||||
qpms.hexlattice_precalc_AB_save(file=output_prefix+str(omega_eV), lMax=lMax, k_hexside=k_0*hexside,
|
|
||||||
maxlayer=maxlayer, savepointinfo=True)
|
|
|
@ -1,11 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from qpms import processWfiles_sameKs
|
|
||||||
|
|
||||||
npart = int(sys.argv[1])
|
|
||||||
dest = sys.argv[2]
|
|
||||||
srcs = sys.argv[3:]
|
|
||||||
|
|
||||||
processWfiles_sameKs(srcs, dest, f='d', nparticles=npart)
|
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from qpms import processWfiles_sameKs
|
|
||||||
|
|
||||||
dest = sys.argv[1]
|
|
||||||
srcs = sys.argv[2:]
|
|
||||||
|
|
||||||
processWfiles_sameKs(srcs, dest, f='d')
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from qpms import processWfiles_sameKs
|
|
||||||
|
|
||||||
npart = int(sys.argv[1])
|
|
||||||
dest = sys.argv[2]
|
|
||||||
srcs = sys.argv[3:]
|
|
||||||
|
|
||||||
srcs_sorted = sorted(srcs, key=float)
|
|
||||||
|
|
||||||
processWfiles_sameKs(srcs_sorted, dest, f='d', nparticles=npart)
|
|
||||||
|
|
|
@ -1,31 +0,0 @@
|
||||||
'''INCOMPLETE! This will read read the refractiveindex.info yaml files
|
|
||||||
and transforms the database into a C source.'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
try:
|
|
||||||
from yaml import CLoader as Loader, CDumper as Dumper
|
|
||||||
except ImportError:
|
|
||||||
from yaml import Loader, Dumper
|
|
||||||
|
|
||||||
# Right now, we can process only the 'tabulated nk' data
|
|
||||||
searchfor = '- type: tabulated nk'
|
|
||||||
searchfor = re.compile(searchfor)
|
|
||||||
|
|
||||||
ridatadir = "/u/46/necadam1/unix/repo/refractiveindex.info-database/database/data"
|
|
||||||
|
|
||||||
nktables = dict()
|
|
||||||
|
|
||||||
def find_files_by_pattern (pattern, dir):
|
|
||||||
r = re.compile(pattern)
|
|
||||||
for parent, dnames, fnames in os.walk(ridatadir):
|
|
||||||
for fname in fnames:
|
|
||||||
filename = os.path.join(parent, fname)
|
|
||||||
if os.path.isfile(filename):
|
|
||||||
with open(filename) as f:
|
|
||||||
text = f.read()
|
|
||||||
if r.search(text):
|
|
||||||
yield (
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
ricbessel # ricatti-bessel psi
|
|
||||||
richankes # ricatti-hankel xi
|
|
||||||
cricbessel
|
|
||||||
crichankel # (same with complex argument)
|
|
||||||
cspherebessel # spherical Bessel jn(z), yn(z)
|
|
||||||
vcfunc # vector coupling coefficients C(m,n|k,l|m+k,w), w = |n-l|,...n+l
|
|
||||||
normalizedlegendre # normalized asssociated legendre functions
|
|
||||||
rotcoef # Generalized spherical functions
|
|
||||||
taufunc # vector spherical harmonics, normalized
|
|
||||||
pifunc # vector spherical harmonics, ?
|
|
||||||
planewavecoef # regular vswf expansion coefficients for a plane wave
|
|
||||||
gaussianbeamcoef # regular vsfw expansion for a gaussian beam
|
|
||||||
sphereplanewavecoef # plane wave expansion coefficients at sphere origins
|
|
||||||
axialtrancoefrecurrence # axial translation ceifficients
|
|
||||||
axialtrancoefinit
|
|
||||||
tranordertest # test to determine convergence of regular vswf addition theorem
|
|
||||||
atcadd
|
|
||||||
atcdim
|
|
||||||
moffset
|
|
||||||
gentrancoef # calculates the vwh translation coefficients for a general translation from one origin to another
|
|
||||||
cartosphere # cartesian to spherical coorsinates
|
|
||||||
eulerrotation # euler rotation of a point specified in cartesian coords
|
|
||||||
ephicoef
|
|
||||||
planewavetruncationorder # test to determine max order of vswf expansion of a plane wave at distance r
|
|
||||||
vwhcalc # calculates the cartesian components of the vswf at position rpos
|
|
||||||
vwhaxialcalc # svwf calculation for an axial translation
|
|
||||||
twobytwoinverse # inverse of a 2x2 matrix
|
|
||||||
transfer
|
|
||||||
|
|
||||||
mpisetup
|
|
||||||
|
|
||||||
module spheredata (lots of declarations!, read all the shit)
|
|
||||||
(...)
|
|
||||||
|
|
||||||
module miecoefdata
|
|
||||||
miecoefcalc # calculation of the max order of sphere expansions and storage of mie coefficients
|
|
||||||
readtmatrix # reads and stores a PARTICLE T matrix
|
|
||||||
lrmodetran # transformation between lr and te tm basis
|
|
||||||
mieoa # optically aptive lorenz/mie coefficients
|
|
||||||
getmiedataall # retrieve the array of mie data
|
|
||||||
getmiedataone # retrieve mie data for a single sphere
|
|
||||||
onemiecoeffmult # multiplies coefficients for sphere i by appropriate lm coefficient
|
|
||||||
multmiecoeffmult # generalized mie coefficient mult
|
|
||||||
dotproduct # vectorproduct for each rhs element of coefficient array
|
|
||||||
|
|
||||||
module translation
|
|
||||||
hostconfiguration # calculates lists for identifying host and interior sphere
|
|
||||||
rottranmtrxsetup # sets up the stored translation matrices and sets other constants
|
|
||||||
rottranmtrxclear # clear the stored translation matrices
|
|
||||||
sphereinteraction # the general sphere interaction driver
|
|
||||||
external_to_external_expansion # outgoing translation operator: a(i) = H(i-j) a(j)
|
|
||||||
external_to_internal_expansion #
|
|
||||||
m1_to_the_n # sign flipped for odd degrees
|
|
||||||
rottranfarfield # far field formula for outgoing vswf translation
|
|
||||||
farfieldtranslationerror # correction ter for hybrid bcgm solution
|
|
||||||
rottran # the vectorized rotation translation-rotation operation !!!!
|
|
||||||
spheregaussianbeamcoef # GB coefficients for sphere-centered expansion, obtained via translation
|
|
||||||
rotvec # rotation of expansion coefficients amn by euler angles
|
|
||||||
|
|
||||||
module scatprops
|
|
||||||
tranorders # determinaniot of maximum orders for target-based expansions
|
|
||||||
amncommonorigin # translation of sphere-based expansions to common target origin
|
|
||||||
lrsphereqeff # general efficiency factor calculation
|
|
||||||
qefficiencyfactors # calling routine for efficiency calculation
|
|
||||||
scatteringmatrix # scattering amplitude sa and matrix sm calculation
|
|
||||||
s11expansion
|
|
||||||
fosmcalc # azimuth-averaged scattering matrix
|
|
||||||
formexpansion # determine the generalized sf expansion for the azimuth-averaged scatt. matrix
|
|
||||||
ranorientscatmatrix
|
|
||||||
ranorientscatmatrixcalc
|
|
||||||
|
|
||||||
module nearfield
|
|
||||||
packcoefficient
|
|
||||||
unpackcoefficient
|
|
||||||
nearfieldspherepart # the field at point xg generated by the spheres
|
|
||||||
nearfieldincidentpart # the incident field at point xg using a regular vswh expansion
|
|
||||||
nearfieldincidentcoef # reshaped array of incident field coefficients
|
|
||||||
nearfieldpointcalc # !
|
|
||||||
nearfieldgridcalc
|
|
||||||
|
|
||||||
module solver
|
|
||||||
tmatrixsoln # calculation of T-mat. via solution of interaction eqs for a generalized plane wave expansion
|
|
||||||
fixedorsoln # solution of interaction exuations for a fixed orientation
|
|
||||||
cbicgff # hybrid bcgm, using far field translation
|
|
||||||
cbicg # bcgm iteration solver
|
|
|
@ -1,5 +1,5 @@
|
||||||
'''
|
'''
|
||||||
Common snippets for argument processing in command line scripts; legacy scripts use scripts_common.py instead.
|
Common snippets for argument processing in command line scripts.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
scattering.py: Scattering in finite lattices.
|
|
||||||
modes2d.py TODO: Modes in infinite lattices.
|
|
||||||
tmatrices.py TODO: T-matrix creation (loading), (symmetry) operations etc. Perhaps move here some content of qpms_p.py
|
|
||||||
lattices2d.py IN PROGRESS: Various 2D lattice generation, lattice type recognition and related functions.
|
|
||||||
hexpoints.py: To be obsoleted by more general lattices2d.py.
|
|
||||||
scripts_common.py: Argument parsing common to various scripts.
|
|
||||||
timetrack.py: Auxilliary module for measuring elapsed time.
|
|
||||||
qpms_c.pyx: Cython wrapper for the c code and some miscellanous functions.
|
|
||||||
qpms_p.py: Miscellanous functions that have not been moved elsewhere.
|
|
||||||
legacy.py: Unused code moved from old versions, should not be imported with qpms. To be removed in the end.
|
|
|
@ -1,375 +0,0 @@
|
||||||
'''
|
|
||||||
Mostly legacy code; new scripts use mostly argproc.py
|
|
||||||
'''
|
|
||||||
import warnings
|
|
||||||
import argparse
|
|
||||||
#import sys # for debugging purpose, TODO remove in production
|
|
||||||
import os # because of path
|
|
||||||
from .types import TMatrixOp, TMatrixSpec, ParticleSpec, LatticeSpec
|
|
||||||
import collections
|
|
||||||
|
|
||||||
''' # REMOVE IN PRODUCTION
|
|
||||||
ParticleSpec = collections.namedtuple('ParticleSpec', ['label', 'position', 'tmatrix_spec'])
|
|
||||||
TMatrixOp = collections.namedtuple('TMatrixOp',
|
|
||||||
['optype', 'content'])
|
|
||||||
TMatrixSpec = collections.namedtuple('TMatrixSpec',
|
|
||||||
['lMax_override', 'tmatrix_path', 'ops'])
|
|
||||||
'''
|
|
||||||
|
|
||||||
__TODOs__ = '''
|
|
||||||
- Checking validity of T-matrix ops (the arguments of --tr, --sym or similar) according to what is implemented
|
|
||||||
in tmatrices.py.
|
|
||||||
- Implement a more user-friendly way to define the lattice base vectors and positions of the particles.
|
|
||||||
cf. https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string/2371789
|
|
||||||
- low priority: allow to perform some more custom operations on T-Matrix, using some kind of parsing from the previous point
|
|
||||||
- Autodetect symmetries
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
def make_action_sharedlist(opname, listname):
|
|
||||||
class opAction(argparse.Action):
|
|
||||||
def __call__(self, parser, args, values, option_string=None):
|
|
||||||
if (not hasattr(args, listname)) or getattr(args, listname) is None:
|
|
||||||
setattr(args, listname, list())
|
|
||||||
getattr(args,listname).append((opname, values))
|
|
||||||
return opAction
|
|
||||||
|
|
||||||
|
|
||||||
def add_argparse_k_output_options(parser):
|
|
||||||
parser.add_argument('--kdensity', '--k_density', action='store', type=int, nargs='+', default=33, help='Number of k-points per x-axis segment FIXME DESCRIPTION')
|
|
||||||
parser.add_argument('--bz_coverage', action='store', type=float, default=1., help='Brillouin zone coverage in relative length (default 1 for whole 1. BZ)')
|
|
||||||
parser.add_argument('--bz_edge_width', action='store', type=float, default=0., help='Width of the more densely covered belt along the 1. BZ edge in relative lengths')
|
|
||||||
parser.add_argument('--bz_edge_factor', action='store', type=float, default=8., help='Relative density of the belt along the 1. BZ edge w.r.t. k_density (default==8)')
|
|
||||||
parser.add_argument('--bz_edge_twoside', action='store_true', help='Compute also the parts of the densely covered edge belt outside the 1. BZ')
|
|
||||||
parser.add_argument('--bz_corner_width', action='store', type=float, default=0., help='Size of the more densely covered subcell along the 1. BZ corners in relative lengths')
|
|
||||||
parser.add_argument('--bz_corner_factor', action='store', type=float, default=16., help='Relative density of the subcell along the 1. BZ corner w.r.t. k_density (default==16)')
|
|
||||||
parser.add_argument('--bz_corner_twoside', action='store_true', help='Compute also the parts of the densely covered subcell outside the 1. BZ')
|
|
||||||
return
|
|
||||||
|
|
||||||
def add_argparse_unitcell_definitions(parser):
|
|
||||||
#TODO create some user-friendlier way to define lattice vectors, cf. https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string/2371789
|
|
||||||
parser.add_argument('--lattice_base', nargs=4, action='store', type=float, required=True, help='Lattice basis vectors x1, y1, x2, y2')
|
|
||||||
parser.add_argument('--particle', '-p', nargs='+', action=make_action_sharedlist('particle', 'particlespec'), help='Particle label, coordinates x,y, and (optionally) path to the T-Matrix.')
|
|
||||||
parser.add_argument('--TMatrix', '-t', nargs='+', action=make_action_sharedlist('TMatrix_path', 'particlespec'), help='Path to TMatrix file')
|
|
||||||
parser.add_argument('--background_permittivity', action='store', type=float, default=1., help='Background medium relative permittivity (default 1)')
|
|
||||||
parser.add_argument('--lMax', action=make_action_sharedlist('lMax', 'particlespec'), nargs='+', help='Override lMax from the TMatrix file')
|
|
||||||
popgrp=parser.add_argument_group(title='Operations')
|
|
||||||
popgrp.add_argument('--tr', dest='ops', nargs='+', action=make_action_sharedlist('tr', 'ops'), default=list()) # the default value for dest can be set once
|
|
||||||
popgrp.add_argument('--sym', dest='ops', nargs='+', action=make_action_sharedlist('sym', 'ops'))
|
|
||||||
#popgrp.add_argument('--mult', dest='ops', nargs=3, metavar=('INCSPEC', 'SCATSPEC', 'MULTIPLIER'), action=make_action_sharedlist('mult', 'ops'))
|
|
||||||
#popgrp.add_argument('--multl', dest='ops', nargs=3, metavar=('INCL[,INCL,...]', 'SCATL[,SCATL,...]', 'MULTIPLIER'), action=make_action_sharedlist('multl', 'ops'))
|
|
||||||
return
|
|
||||||
|
|
||||||
def add_argparse_infinite_lattice_options(parser):
|
|
||||||
parser.add_argument('--maxlayer', action='store', type=int, default=100, help='How far to sum the lattice points to obtain the dispersion')
|
|
||||||
parser.add_argument('--gaussian', action='store', type=float, metavar='σ', help='Use a gaussian envelope for weighting the interaction matrix contributions (depending on the distance), measured in unit cell lengths (?) FIxME).')
|
|
||||||
return
|
|
||||||
|
|
||||||
def add_argparse_output_options(parser):
|
|
||||||
parser.add_argument('--output_prefix', action='store', required=True, help='Prefix to the npz output (will be appended frequency, hexside and chunkno)')
|
|
||||||
parser.add_argument('--plot_TMatrix', action='store_true', help='Visualise TMatrix on the first page of the output')
|
|
||||||
parser.add_argument('--scp_to', action='store', metavar='N', type=str, help='SCP the output files to a given destination')
|
|
||||||
parser.add_argument('--chunklen', action='store', type=int, default=1000, help='Number of k-points per output file (default 1000)')
|
|
||||||
return
|
|
||||||
|
|
||||||
def add_argparse_common_options(parser):
|
|
||||||
parser.add_argument('--eVfreq', action='store', required=True, type=float, help='Frequency in eV')
|
|
||||||
parser.add_argument('--verbose', '-v', action='count', help='Be verbose (about computation times, mostly)')
|
|
||||||
parser.add_argument('--frequency_multiplier', action='store', type=float, default=1., help='Multiplies the frequencies in the TMatrix file by a given factor.')
|
|
||||||
|
|
||||||
def arg_preprocess_particles(pargs, d=None, return_tuple=False):
|
|
||||||
'''
|
|
||||||
Nanoparticle position and T-matrix path parsing
|
|
||||||
|
|
||||||
parser: ArgumentParser on which add_argparse_unitcell_definitions() and whose
|
|
||||||
parse_args() has been called.
|
|
||||||
|
|
||||||
returns a list of ParticleSpec objects
|
|
||||||
'''
|
|
||||||
TMatrix_paths = dict()
|
|
||||||
lMax_overrides = dict()
|
|
||||||
default_TMatrix_path = None
|
|
||||||
default_lMax_override = None
|
|
||||||
if not any((arg_type == 'particle') for (arg_type, arg_content) in pargs.particlespec):
|
|
||||||
# no particles positions given: suppose only one per unit cell, in the cell origin
|
|
||||||
positions = {None: (0.0)}
|
|
||||||
else:
|
|
||||||
positions = dict()
|
|
||||||
for arg_type, arg_content in pargs.particlespec:
|
|
||||||
if arg_type == 'particle': # --particle option
|
|
||||||
if 3 <= len(arg_content) <= 4:
|
|
||||||
try:
|
|
||||||
positions[arg_content[0]] = (float(arg_content[1]), float(arg_content[2]))
|
|
||||||
except ValueError as e:
|
|
||||||
e.args += ("second and third argument of --particle must be valid floats, given: ", arg_content)
|
|
||||||
raise
|
|
||||||
if len(arg_content) == 4:
|
|
||||||
if arg_content[0] in TMatrix_paths:
|
|
||||||
warnings.warn('T-matrix path for particle \'%s\' already specified.'
|
|
||||||
'Overriding with the last value.' % arg_content[0], SyntaxWarning)
|
|
||||||
TMatrix_paths[arg_content[0]] = arg_content[3]
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError("--particle expects 3 or 4 arguments, %d given: " % len(arg_content), arg_content)
|
|
||||||
elif arg_type == 'TMatrix_path': # --TMatrix option
|
|
||||||
if len(arg_content) == 1: # --TMatrix default_path
|
|
||||||
if default_TMatrix_path is not None:
|
|
||||||
warnings.warn('Default T-matrix path already specified. Overriding with the last value.', SyntaxWarning)
|
|
||||||
default_TMatrix_path = arg_content[0]
|
|
||||||
elif len(arg_content) > 1: # --TMatrix label [label2 [...]] path
|
|
||||||
for label in arg_content[:-1]:
|
|
||||||
if label in TMatrix_paths.keys():
|
|
||||||
warnings.warn('T-matrix path for particle \'%s\' already specified.'
|
|
||||||
'Overriding with the last value.' % label, SyntaxWarning)
|
|
||||||
TMatrix_paths[label] = arg_content[-1]
|
|
||||||
elif arg_type == 'lMax': # --lMax option
|
|
||||||
if len(arg_content) == 1: # --lMax default_lmax_override
|
|
||||||
if default_lMax_override is not None:
|
|
||||||
warnings.warn('Default lMax override value already specified. Overriding the last value.', SyntaxWarning)
|
|
||||||
default_lMax_override = int(arg_content[-1])
|
|
||||||
else:
|
|
||||||
for label in arg_content[:-1]:
|
|
||||||
if label in lMax_overrides.keys:
|
|
||||||
warnings.warn('lMax override for particle \'%s\' already specified.'
|
|
||||||
'overriding with the last value.' % label, SyntaxWarning)
|
|
||||||
lMax_overrides[label] = int(arg_content[-1])
|
|
||||||
else: assert False, 'unknown option type'
|
|
||||||
# Check the info from positions and TMatrix_paths and lMax_overrides
|
|
||||||
if not set(TMatrix_paths.keys()) <= set(positions.keys()):
|
|
||||||
raise ValueError("T-Matrix path(s) for particle(s) labeled %s was given, but not their positions"
|
|
||||||
% str(set(TMatrix_paths.keys()) - set(positions.keys())))
|
|
||||||
if not set(lMax_overrides.keys()) <= set(positions.keys()):
|
|
||||||
raise ValueError("lMax override(s) for particle(s) labeled %s was given, but not their positions"
|
|
||||||
%str(set(lMax_overrides.keys()) - set(positions.keys())))
|
|
||||||
if (set(TMatrix_paths.keys()) != set(positions.keys())) and default_TMatrix_path is None:
|
|
||||||
raise ValueError("Position(s) of particles(s) labeled %s was given without their T-matrix"
|
|
||||||
" and no default T-matrix was specified"
|
|
||||||
% str(set(positions.keys()) - set(TMatrix_paths.keys())))
|
|
||||||
# Fill default_TMatrix_path to those that don't have its own
|
|
||||||
for label in (set(positions.keys()) - set(TMatrix_paths.keys())):
|
|
||||||
TMatrix_paths[label] = default_TMatrix_path
|
|
||||||
for path in TMatrix_paths.values():
|
|
||||||
if not os.path.exists(path):
|
|
||||||
raise ValueError("Cannot access T-matrix file %s. Does it exist?" % path)
|
|
||||||
|
|
||||||
# Assign (pre-parse) the T-matrix operations to individual particles
|
|
||||||
ops = dict()
|
|
||||||
for label in positions.keys(): ops[label] = list()
|
|
||||||
for optype, arg_content in pargs.ops:
|
|
||||||
# if, no label given, apply to all, otherwise on the specifield particles
|
|
||||||
for label in (positions.keys() if len(arg_content) == 1 else arg_content[:-1]):
|
|
||||||
try:
|
|
||||||
ops[label].append(TMatrixOp(optype, arg_content[-1]))
|
|
||||||
except KeyError as e:
|
|
||||||
e.args += 'Specified operation on undefined particle labeled \'%s\'' % label
|
|
||||||
raise
|
|
||||||
|
|
||||||
#### Collect all the info about the particles / their T-matrices into one list ####
|
|
||||||
# get rid of the non-unique T-matrix specs (so there is only one instance living
|
|
||||||
# of each different TMatrixSpec, possibly with multiple references to it
|
|
||||||
TMatrix_specs = dict((spec, spec)
|
|
||||||
for spec in (TMatrixSpec(
|
|
||||||
lMax_overrides[label] if label in lMax_overrides.keys() else None,
|
|
||||||
TMatrix_paths[label],
|
|
||||||
tuple(ops[label]))
|
|
||||||
for label in positions.keys())
|
|
||||||
)
|
|
||||||
# particles_specs contains (label, (xpos, ypos), tmspec per element)
|
|
||||||
particles_specs = [ParticleSpec(label, positions[label],
|
|
||||||
TMatrix_specs[(lMax_overrides[label] if label in lMax_overrides.keys() else None,
|
|
||||||
TMatrix_paths[label],
|
|
||||||
tuple(ops[label]))]
|
|
||||||
) for label in positions.keys()]
|
|
||||||
|
|
||||||
return particles_specs
|
|
||||||
|
|
||||||
'''
|
|
||||||
import argparse, re, random, string
|
|
||||||
import subprocess
|
|
||||||
from scipy.constants import hbar, e as eV, pi, c
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
pargs=parser.parse_args()
|
|
||||||
print(pargs)
|
|
||||||
|
|
||||||
exit(0) ###
|
|
||||||
|
|
||||||
maxlayer=pargs.maxlayer
|
|
||||||
#DEL hexside=pargs.hexside
|
|
||||||
eVfreq = pargs.eVfreq
|
|
||||||
freq = eVfreq*eV/hbar
|
|
||||||
verbose=pargs.verbose
|
|
||||||
|
|
||||||
#DEL TMatrix_file = pargs.TMatrix
|
|
||||||
|
|
||||||
epsilon_b = pargs.background_permittivity #2.3104
|
|
||||||
gaussianSigma = pargs.gaussian if pargs.gaussian else None # hexside * 222 / 7
|
|
||||||
interpfreqfactor = pargs.frequency_multiplier
|
|
||||||
scp_dest = pargs.scp_to if pargs.scp_to else None
|
|
||||||
kdensity = pargs.kdensity
|
|
||||||
chunklen = pargs.chunklen
|
|
||||||
|
|
||||||
# -----------------finished basic CLI parsing (except for op arguments) ------------------
|
|
||||||
from qpms.timetrack import _time_b, _time_e
|
|
||||||
btime=_time_b(verbose)
|
|
||||||
|
|
||||||
import qpms
|
|
||||||
import numpy as np
|
|
||||||
import os, sys, warnings, math
|
|
||||||
from scipy import interpolate
|
|
||||||
nx = None
|
|
||||||
s3 = math.sqrt(3)
|
|
||||||
|
|
||||||
|
|
||||||
# specifikace T-matice zde
|
|
||||||
cdn = c/ math.sqrt(epsilon_b)
|
|
||||||
TMatrices_orig, freqs_orig, freqs_weirdunits_orig, lMaxTM = qpms.loadScuffTMatrices(TMatrix_file)
|
|
||||||
lMax = lMaxTM
|
|
||||||
if pargs.lMax:
|
|
||||||
lMax = pargs.lMax if pargs.lMax else lMaxTM
|
|
||||||
my, ny = qpms.get_mn_y(lMax)
|
|
||||||
nelem = len(my)
|
|
||||||
if pargs.lMax: #force commandline specified lMax
|
|
||||||
TMatrices_orig = TMatrices_orig[...,0:nelem,:,0:nelem]
|
|
||||||
|
|
||||||
TMatrices = np.array(np.broadcast_to(TMatrices_orig[:,nx,:,:,:,:],(len(freqs_orig),2,2,nelem,2,nelem)) )
|
|
||||||
|
|
||||||
#TMatrices[:,:,:,:,:,ny==3] *= factor13inc
|
|
||||||
#TMatrices[:,:,:,ny==3,:,:] *= factor13scat
|
|
||||||
xfl = qpms.xflip_tyty(lMax)
|
|
||||||
yfl = qpms.yflip_tyty(lMax)
|
|
||||||
zfl = qpms.zflip_tyty(lMax)
|
|
||||||
c2rot = qpms.apply_matrix_left(qpms.yflip_yy(3),qpms.xflip_yy(3),-1)
|
|
||||||
|
|
||||||
reCN = re.compile('(\d*)C(\d+)')
|
|
||||||
#TODO C nekonečno
|
|
||||||
|
|
||||||
for op in ops:
|
|
||||||
if op[0] == 'all':
|
|
||||||
targets = (0,1)
|
|
||||||
elif isinstance(op[0],int):
|
|
||||||
targets = (op[0],)
|
|
||||||
else:
|
|
||||||
targets = op[0]
|
|
||||||
|
|
||||||
if op[1] == 'sym':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1)))/2
|
|
||||||
elif op[2] == 'C2': # special case of the latter
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = (TMatrices[:,t] + qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1))/2
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
for i in range(rotN):
|
|
||||||
rotangle = 2*np.pi*i / rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax,np.array([0,0,-rotangle]))
|
|
||||||
TMatrix_contribs[i] = qpms.apply_matrix_left(rot,qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
TMatrices[:,t] = np.sum(TMatrix_contribs, axis=0) / rotN
|
|
||||||
else:
|
|
||||||
raise ValueError('\'%d\' is not an implemented symmetry operation' % op[2])
|
|
||||||
elif op[1] == 'tr':
|
|
||||||
mCN = reCN.match(op[2]) # Fuck van Rossum for not having assignments inside expressions
|
|
||||||
if op[2] == 'σ_z':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(zfl,qpms.apply_ndmatrix_left(zfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_y':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(yfl,qpms.apply_ndmatrix_left(yfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'σ_x':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_ndmatrix_left(xfl,qpms.apply_ndmatrix_left(xfl, TMatrices[:,t], (-4,-3)),(-2,-1))
|
|
||||||
elif op[2] == 'C2':
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(c2rot,qpms.apply_matrix_left(c2rot, TMatrices[:,t], -3),-1)
|
|
||||||
elif mCN:
|
|
||||||
rotN = int(mCN.group(2))
|
|
||||||
power = int(mCN.group(1)) if mCN.group(1) else 1
|
|
||||||
TMatrix_contribs = np.empty((rotN,TMatrices.shape[0],2,nelem,2,nelem), dtype=np.complex_)
|
|
||||||
for t in targets:
|
|
||||||
rotangle = 2*np.pi*power/rotN
|
|
||||||
rot = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,rotangle]))
|
|
||||||
rotinv = qpms.WignerD_yy_fromvector(lMax, np.array([0,0,-rotangle]))
|
|
||||||
TMatrices[:,t] = qpms.apply_matrix_left(rot, qpms.apply_matrix_left(rotinv, TMatrices[:,t], -3),-1)
|
|
||||||
else:
|
|
||||||
raise ValueError('\'%d\' is not an implemented T-matrix transformation operation' % op[2])
|
|
||||||
elif op[1] == 'copy':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'mult':
|
|
||||||
raise # not implemented
|
|
||||||
elif op[1] == 'multl':
|
|
||||||
incy = np.full((nelem,), False, dtype=bool)
|
|
||||||
for incl in op[2][0].split(','):
|
|
||||||
l = int(incl)
|
|
||||||
incy += (l == ny)
|
|
||||||
scaty = np.full((nelem,), False, dtype=bool)
|
|
||||||
for scatl in op[2][1].split(','):
|
|
||||||
l = int(scatl)
|
|
||||||
scaty += (l == ny)
|
|
||||||
for t in targets:
|
|
||||||
TMatrices[np.ix_(np.arange(TMatrices.shape[0]),np.array([t]),np.array([0,1]),scaty,np.array([0,1]),incy)] *= float(op[2][2])
|
|
||||||
else:
|
|
||||||
raise #unknown operation; should not happen
|
|
||||||
|
|
||||||
TMatrices_interp = interpolate.interp1d(freqs_orig*interpfreqfactor, TMatrices, axis=0, kind='linear',fill_value="extrapolate")
|
|
||||||
|
|
||||||
klist_full = qpms.generate_trianglepoints(kdensity, v3d=True, include_origin=True)*3*math.pi/(3*kdensity*hexside)
|
|
||||||
TMatrices_om = TMatrices_interp(freq)
|
|
||||||
|
|
||||||
chunkn = math.ceil(klist_full.shape[0] / chunklen)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print('Evaluating %d k-points in %d chunks' % (klist_full.shape[0], chunkn), file = sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
metadata = np.array({
|
|
||||||
'lMax' : lMax,
|
|
||||||
'maxlayer' : maxlayer,
|
|
||||||
'gaussianSigma' : gaussianSigma,
|
|
||||||
'epsilon_b' : epsilon_b,
|
|
||||||
'hexside' : hexside,
|
|
||||||
'chunkn' : chunkn,
|
|
||||||
'TMatrix_file' : TMatrix_file,
|
|
||||||
'ops' : ops,
|
|
||||||
})
|
|
||||||
|
|
||||||
for chunki in range(chunkn):
|
|
||||||
svdout = '%s_%dnm_%.4f_c%03d.npz' % (pargs.output_prefix, hexside/1e-9, eVfreq, chunki)
|
|
||||||
|
|
||||||
klist = klist_full[chunki * chunklen : (chunki + 1) * chunklen]
|
|
||||||
|
|
||||||
svdres = qpms.hexlattice_zsym_getSVD(lMax=lMax, TMatrices_om=TMatrices_om, epsilon_b=epsilon_b, hexside=hexside, maxlayer=maxlayer,
|
|
||||||
omega=freq, klist=klist, gaussianSigma=gaussianSigma, onlyNmin=False, verbose=verbose)
|
|
||||||
|
|
||||||
#((svUfullTElist, svSfullTElist, svVfullTElist), (svUfullTMlist, svSfullTMlist, svVfullTMlist)) = svdres
|
|
||||||
|
|
||||||
np.savez(svdout, omega = freq, klist = klist,
|
|
||||||
metadata=metadata,
|
|
||||||
uTE = svdres[0][0],
|
|
||||||
vTE = svdres[0][2],
|
|
||||||
sTE = svdres[0][1],
|
|
||||||
uTM = svdres[1][0],
|
|
||||||
vTM = svdres[1][2],
|
|
||||||
sTM = svdres[1][1],
|
|
||||||
|
|
||||||
)
|
|
||||||
svdres=None
|
|
||||||
|
|
||||||
if scp_dest:
|
|
||||||
if svdout:
|
|
||||||
subprocess.run(['scp', svdout, scp_dest])
|
|
||||||
|
|
||||||
_time_e(btime, verbose)
|
|
||||||
#print(time.strftime("%H.%M:%S",time.gmtime(time.time()-begtime)))
|
|
||||||
'''
|
|
|
@ -1,35 +0,0 @@
|
||||||
"""
|
|
||||||
Legacy code, used only in scripts_common.py. To be removed in future versions.
|
|
||||||
"""
|
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def _time_b(active = True, name = None, step = None):
|
|
||||||
'''
|
|
||||||
Auxiliary function for keeping track of elapsed time.
|
|
||||||
Returns current time (to be used by _time_e).
|
|
||||||
'''
|
|
||||||
now = time.time()
|
|
||||||
if active:
|
|
||||||
if not name:
|
|
||||||
name = sys._getframe(1).f_code.co_name
|
|
||||||
if step:
|
|
||||||
print('%.4f: %s in function %s started.' % (now, step, name), file = sys.stderr)
|
|
||||||
else:
|
|
||||||
print('%.4f: Function %s started.' % (now, name), file=sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
return now
|
|
||||||
|
|
||||||
def _time_e(start_time, active = True, name = None, step = None):
|
|
||||||
now = time.time()
|
|
||||||
if active:
|
|
||||||
if not name:
|
|
||||||
name = sys._getframe(1).f_code.co_name
|
|
||||||
if step:
|
|
||||||
print('%.4f: %s in function %s finished (elapsed %.2f s).'
|
|
||||||
% (now, step, name, now - start_time), file = sys.stderr)
|
|
||||||
else:
|
|
||||||
print('%.4f: Function %s finished (elapsed %.2f s).'
|
|
||||||
% (now, name, now - start_time), file = sys.stderr)
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
Loading…
Reference in New Issue