WALL-E: Einmal aufräumen und zurück!
This commit is contained in:
@@ -144,7 +144,7 @@ def autopickstation(wfstream, pickparam, verbose=False):
|
||||
Sflag = 0
|
||||
Pmarker = []
|
||||
Ao = None # Wood-Anderson peak-to-peak amplitude
|
||||
picker = 'autoPyLoT' # name of the picking programm
|
||||
picker = 'autoPyLoT' # name of the picking programm
|
||||
|
||||
# split components
|
||||
zdat = wfstream.select(component="Z")
|
||||
@@ -867,19 +867,19 @@ def iteratepicker(wf, NLLocfile, picks, badpicks, pickparameter):
|
||||
pickparameter.setParam(noisefactor=1.0)
|
||||
pickparameter.setParam(zfac=1.0)
|
||||
print(
|
||||
"iteratepicker: The following picking parameters have been modified for iterative picking:")
|
||||
"iteratepicker: The following picking parameters have been modified for iterative picking:")
|
||||
print(
|
||||
"pstart: %fs => %fs" % (pstart_old, pickparameter.getParam('pstart')))
|
||||
"pstart: %fs => %fs" % (pstart_old, pickparameter.getParam('pstart')))
|
||||
print(
|
||||
"pstop: %fs => %fs" % (pstop_old, pickparameter.getParam('pstop')))
|
||||
"pstop: %fs => %fs" % (pstop_old, pickparameter.getParam('pstop')))
|
||||
print(
|
||||
"sstop: %fs => %fs" % (sstop_old, pickparameter.getParam('sstop')))
|
||||
"sstop: %fs => %fs" % (sstop_old, pickparameter.getParam('sstop')))
|
||||
print("pickwinP: %fs => %fs" % (
|
||||
pickwinP_old, pickparameter.getParam('pickwinP')))
|
||||
pickwinP_old, pickparameter.getParam('pickwinP')))
|
||||
print("Precalcwin: %fs => %fs" % (
|
||||
Precalcwin_old, pickparameter.getParam('Precalcwin')))
|
||||
Precalcwin_old, pickparameter.getParam('Precalcwin')))
|
||||
print("noisefactor: %f => %f" % (
|
||||
noisefactor_old, pickparameter.getParam('noisefactor')))
|
||||
noisefactor_old, pickparameter.getParam('noisefactor')))
|
||||
print("zfac: %f => %f" % (zfac_old, pickparameter.getParam('zfac')))
|
||||
|
||||
# repick station
|
||||
|
||||
@@ -21,10 +21,12 @@ import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from obspy.core import Stream
|
||||
|
||||
|
||||
class CharacteristicFunction(object):
|
||||
'''
|
||||
SuperClass for different types of characteristic functions.
|
||||
'''
|
||||
|
||||
def __init__(self, data, cut, t2=None, order=None, t1=None, fnoise=None, stealthMode=False):
|
||||
'''
|
||||
Initialize data type object with information from the original
|
||||
@@ -103,9 +105,9 @@ class CharacteristicFunction(object):
|
||||
|
||||
def setARdetStep(self, t1):
|
||||
if t1:
|
||||
self.ARdetStep = []
|
||||
self.ARdetStep.append(t1 / 4)
|
||||
self.ARdetStep.append(int(np.ceil(self.getTime2() / self.getIncrement()) / 4))
|
||||
self.ARdetStep = []
|
||||
self.ARdetStep.append(t1 / 4)
|
||||
self.ARdetStep.append(int(np.ceil(self.getTime2() / self.getIncrement()) / 4))
|
||||
|
||||
def getOrder(self):
|
||||
return self.order
|
||||
@@ -150,14 +152,14 @@ class CharacteristicFunction(object):
|
||||
if cut is not None:
|
||||
if len(self.orig_data) == 1:
|
||||
if self.cut[0] == 0 and self.cut[1] == 0:
|
||||
start = 0
|
||||
stop = len(self.orig_data[0])
|
||||
start = 0
|
||||
stop = len(self.orig_data[0])
|
||||
elif self.cut[0] == 0 and self.cut[1] is not 0:
|
||||
start = 0
|
||||
stop = self.cut[1] / self.dt
|
||||
start = 0
|
||||
stop = self.cut[1] / self.dt
|
||||
else:
|
||||
start = self.cut[0] / self.dt
|
||||
stop = self.cut[1] / self.dt
|
||||
start = self.cut[0] / self.dt
|
||||
stop = self.cut[1] / self.dt
|
||||
zz = self.orig_data.copy()
|
||||
z1 = zz[0].copy()
|
||||
zz[0].data = z1.data[int(start):int(stop)]
|
||||
@@ -165,16 +167,16 @@ class CharacteristicFunction(object):
|
||||
return data
|
||||
elif len(self.orig_data) == 2:
|
||||
if self.cut[0] == 0 and self.cut[1] == 0:
|
||||
start = 0
|
||||
stop = min([len(self.orig_data[0]), len(self.orig_data[1])])
|
||||
start = 0
|
||||
stop = min([len(self.orig_data[0]), len(self.orig_data[1])])
|
||||
elif self.cut[0] == 0 and self.cut[1] is not 0:
|
||||
start = 0
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1])])
|
||||
start = 0
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1])])
|
||||
else:
|
||||
start = max([0, self.cut[0] / self.dt])
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1])])
|
||||
start = max([0, self.cut[0] / self.dt])
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1])])
|
||||
hh = self.orig_data.copy()
|
||||
h1 = hh[0].copy()
|
||||
h2 = hh[1].copy()
|
||||
@@ -184,16 +186,16 @@ class CharacteristicFunction(object):
|
||||
return data
|
||||
elif len(self.orig_data) == 3:
|
||||
if self.cut[0] == 0 and self.cut[1] == 0:
|
||||
start = 0
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1]), len(self.orig_data[2])])
|
||||
start = 0
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1]), len(self.orig_data[2])])
|
||||
elif self.cut[0] == 0 and self.cut[1] is not 0:
|
||||
start = 0
|
||||
stop = self.cut[1] / self.dt
|
||||
start = 0
|
||||
stop = self.cut[1] / self.dt
|
||||
else:
|
||||
start = max([0, self.cut[0] / self.dt])
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1]), len(self.orig_data[2])])
|
||||
start = max([0, self.cut[0] / self.dt])
|
||||
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
|
||||
len(self.orig_data[1]), len(self.orig_data[2])])
|
||||
hh = self.orig_data.copy()
|
||||
h1 = hh[0].copy()
|
||||
h2 = hh[1].copy()
|
||||
@@ -223,13 +225,13 @@ class AICcf(CharacteristicFunction):
|
||||
|
||||
def calcCF(self, data):
|
||||
|
||||
#if self._getStealthMode() is False:
|
||||
# if self._getStealthMode() is False:
|
||||
# print 'Calculating AIC ...'
|
||||
x = self.getDataArray()
|
||||
xnp = x[0].data
|
||||
nn = np.isnan(xnp)
|
||||
if len(nn) > 1:
|
||||
xnp[nn] = 0
|
||||
xnp[nn] = 0
|
||||
datlen = len(xnp)
|
||||
k = np.arange(1, datlen)
|
||||
cf = np.zeros(datlen)
|
||||
@@ -247,6 +249,7 @@ class AICcf(CharacteristicFunction):
|
||||
self.cf = cf - np.mean(cf)
|
||||
self.xcf = x
|
||||
|
||||
|
||||
class HOScf(CharacteristicFunction):
|
||||
'''
|
||||
Function to calculate skewness (statistics of order 3) or kurtosis
|
||||
@@ -257,38 +260,38 @@ class HOScf(CharacteristicFunction):
|
||||
def calcCF(self, data):
|
||||
|
||||
x = self.getDataArray(self.getCut())
|
||||
xnp =x[0].data
|
||||
xnp = x[0].data
|
||||
nn = np.isnan(xnp)
|
||||
if len(nn) > 1:
|
||||
xnp[nn] = 0
|
||||
xnp[nn] = 0
|
||||
if self.getOrder() == 3: # this is skewness
|
||||
#if self._getStealthMode() is False:
|
||||
# if self._getStealthMode() is False:
|
||||
# print 'Calculating skewness ...'
|
||||
y = np.power(xnp, 3)
|
||||
y1 = np.power(xnp, 2)
|
||||
elif self.getOrder() == 4: # this is kurtosis
|
||||
#if self._getStealthMode() is False:
|
||||
# if self._getStealthMode() is False:
|
||||
# print 'Calculating kurtosis ...'
|
||||
y = np.power(xnp, 4)
|
||||
y1 = np.power(xnp, 2)
|
||||
|
||||
#Initialisation
|
||||
#t2: long term moving window
|
||||
# Initialisation
|
||||
# t2: long term moving window
|
||||
ilta = int(round(self.getTime2() / self.getIncrement()))
|
||||
lta = y[0]
|
||||
lta1 = y1[0]
|
||||
#moving windows
|
||||
# moving windows
|
||||
LTA = np.zeros(len(xnp))
|
||||
for j in range(0, len(xnp)):
|
||||
if j < 4:
|
||||
LTA[j] = 0
|
||||
elif j <= ilta:
|
||||
lta = (y[j] + lta * (j-1)) / j
|
||||
lta1 = (y1[j] + lta1 * (j-1)) / j
|
||||
lta = (y[j] + lta * (j - 1)) / j
|
||||
lta1 = (y1[j] + lta1 * (j - 1)) / j
|
||||
else:
|
||||
lta = (y[j] - y[j - ilta]) / ilta + lta
|
||||
lta1 = (y1[j] - y1[j - ilta]) / ilta + lta1
|
||||
#define LTA
|
||||
# define LTA
|
||||
if self.getOrder() == 3:
|
||||
LTA[j] = lta / np.power(lta1, 1.5)
|
||||
elif self.getOrder() == 4:
|
||||
@@ -296,13 +299,12 @@ class HOScf(CharacteristicFunction):
|
||||
|
||||
nn = np.isnan(LTA)
|
||||
if len(nn) > 1:
|
||||
LTA[nn] = 0
|
||||
LTA[nn] = 0
|
||||
self.cf = LTA
|
||||
self.xcf = x
|
||||
|
||||
|
||||
class ARZcf(CharacteristicFunction):
|
||||
|
||||
def calcCF(self, data):
|
||||
|
||||
print 'Calculating AR-prediction error from single trace ...'
|
||||
@@ -310,33 +312,33 @@ class ARZcf(CharacteristicFunction):
|
||||
xnp = x[0].data
|
||||
nn = np.isnan(xnp)
|
||||
if len(nn) > 1:
|
||||
xnp[nn] = 0
|
||||
#some parameters needed
|
||||
#add noise to time series
|
||||
xnp[nn] = 0
|
||||
# some parameters needed
|
||||
# add noise to time series
|
||||
xnoise = xnp + np.random.normal(0.0, 1.0, len(xnp)) * self.getFnoise() * max(abs(xnp))
|
||||
tend = len(xnp)
|
||||
#Time1: length of AR-determination window [sec]
|
||||
#Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
|
||||
# Time1: length of AR-determination window [sec]
|
||||
# Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) # length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) # length of AR-prediction window [samples]
|
||||
|
||||
cf = np.zeros(len(xnp))
|
||||
loopstep = self.getARdetStep()
|
||||
arcalci = ldet + self.getOrder() #AR-calculation index
|
||||
arcalci = ldet + self.getOrder() # AR-calculation index
|
||||
for i in range(ldet + self.getOrder(), tend - lpred - 1):
|
||||
if i == arcalci:
|
||||
#determination of AR coefficients
|
||||
#to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDetZ(xnoise, self.getOrder(), i-ldet, i)
|
||||
# determination of AR coefficients
|
||||
# to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDetZ(xnoise, self.getOrder(), i - ldet, i)
|
||||
arcalci = arcalci + loopstep[1]
|
||||
#AR prediction of waveform using calculated AR coefficients
|
||||
# AR prediction of waveform using calculated AR coefficients
|
||||
self.arPredZ(xnp, self.arpara, i + 1, lpred)
|
||||
#prediction error = CF
|
||||
cf[i + lpred-1] = np.sqrt(np.sum(np.power(self.xpred[i:i + lpred-1] - xnp[i:i + lpred-1], 2)) / lpred)
|
||||
# prediction error = CF
|
||||
cf[i + lpred - 1] = np.sqrt(np.sum(np.power(self.xpred[i:i + lpred - 1] - xnp[i:i + lpred - 1], 2)) / lpred)
|
||||
nn = np.isnan(cf)
|
||||
if len(nn) > 1:
|
||||
cf[nn] = 0
|
||||
#remove zeros and artefacts
|
||||
cf[nn] = 0
|
||||
# remove zeros and artefacts
|
||||
tap = np.hanning(len(cf))
|
||||
cf = tap * cf
|
||||
io = np.where(cf == 0)
|
||||
@@ -366,25 +368,25 @@ class ARZcf(CharacteristicFunction):
|
||||
Output: AR parameters arpara
|
||||
'''
|
||||
|
||||
#recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
# recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
rhs = np.zeros(self.getOrder())
|
||||
for k in range(0, self.getOrder()):
|
||||
for i in range(rind, ldet+1):
|
||||
for i in range(rind, ldet + 1):
|
||||
ki = k + 1
|
||||
rhs[k] = rhs[k] + data[i] * data[i - ki]
|
||||
|
||||
#recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((self.getOrder(),self.getOrder()))
|
||||
# recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((self.getOrder(), self.getOrder()))
|
||||
for k in range(1, self.getOrder() + 1):
|
||||
for j in range(1, k + 1):
|
||||
for i in range(rind, ldet+1):
|
||||
for i in range(rind, ldet + 1):
|
||||
ki = k - 1
|
||||
ji = j - 1
|
||||
A[ki,ji] = A[ki,ji] + data[i - j] * data[i - k]
|
||||
A[ki, ji] = A[ki, ji] + data[i - j] * data[i - k]
|
||||
|
||||
A[ji,ki] = A[ki,ji]
|
||||
A[ji, ki] = A[ki, ji]
|
||||
|
||||
#apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
# apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
self.arpara = np.dot(np.linalg.pinv(A), rhs)
|
||||
|
||||
def arPredZ(self, data, arpara, rind, lpred):
|
||||
@@ -406,10 +408,10 @@ class ARZcf(CharacteristicFunction):
|
||||
|
||||
Output: predicted waveform z
|
||||
'''
|
||||
#be sure of the summation indeces
|
||||
# be sure of the summation indeces
|
||||
if rind < len(arpara):
|
||||
rind = len(arpara)
|
||||
if rind > len(data) - lpred :
|
||||
if rind > len(data) - lpred:
|
||||
rind = len(data) - lpred
|
||||
if lpred < 1:
|
||||
lpred = 1
|
||||
@@ -426,7 +428,6 @@ class ARZcf(CharacteristicFunction):
|
||||
|
||||
|
||||
class ARHcf(CharacteristicFunction):
|
||||
|
||||
def calcCF(self, data):
|
||||
|
||||
print 'Calculating AR-prediction error from both horizontal traces ...'
|
||||
@@ -434,41 +435,42 @@ class ARHcf(CharacteristicFunction):
|
||||
xnp = self.getDataArray(self.getCut())
|
||||
n0 = np.isnan(xnp[0].data)
|
||||
if len(n0) > 1:
|
||||
xnp[0].data[n0] = 0
|
||||
xnp[0].data[n0] = 0
|
||||
n1 = np.isnan(xnp[1].data)
|
||||
if len(n1) > 1:
|
||||
xnp[1].data[n1] = 0
|
||||
xnp[1].data[n1] = 0
|
||||
|
||||
#some parameters needed
|
||||
#add noise to time series
|
||||
# some parameters needed
|
||||
# add noise to time series
|
||||
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
|
||||
xnnoise = xnp[1].data + np.random.normal(0.0, 1.0, len(xnp[1].data)) * self.getFnoise() * max(abs(xnp[1].data))
|
||||
Xnoise = np.array( [xenoise.tolist(), xnnoise.tolist()] )
|
||||
Xnoise = np.array([xenoise.tolist(), xnnoise.tolist()])
|
||||
tend = len(xnp[0].data)
|
||||
#Time1: length of AR-determination window [sec]
|
||||
#Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
|
||||
# Time1: length of AR-determination window [sec]
|
||||
# Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) # length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) # length of AR-prediction window [samples]
|
||||
|
||||
cf = np.zeros(len(xenoise))
|
||||
loopstep = self.getARdetStep()
|
||||
arcalci = lpred + self.getOrder() - 1 #AR-calculation index
|
||||
#arcalci = ldet + self.getOrder() - 1 #AR-calculation index
|
||||
arcalci = lpred + self.getOrder() - 1 # AR-calculation index
|
||||
# arcalci = ldet + self.getOrder() - 1 #AR-calculation index
|
||||
for i in range(lpred + self.getOrder() - 1, tend - 2 * lpred + 1):
|
||||
if i == arcalci:
|
||||
#determination of AR coefficients
|
||||
#to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDetH(Xnoise, self.getOrder(), i-ldet, i)
|
||||
# determination of AR coefficients
|
||||
# to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDetH(Xnoise, self.getOrder(), i - ldet, i)
|
||||
arcalci = arcalci + loopstep[1]
|
||||
#AR prediction of waveform using calculated AR coefficients
|
||||
# AR prediction of waveform using calculated AR coefficients
|
||||
self.arPredH(xnp, self.arpara, i + 1, lpred)
|
||||
#prediction error = CF
|
||||
# prediction error = CF
|
||||
cf[i + lpred] = np.sqrt(np.sum(np.power(self.xpred[0][i:i + lpred] - xnp[0][i:i + lpred], 2) \
|
||||
+ np.power(self.xpred[1][i:i + lpred] - xnp[1][i:i + lpred], 2)) / (2 * lpred))
|
||||
+ np.power(self.xpred[1][i:i + lpred] - xnp[1][i:i + lpred], 2)) / (
|
||||
2 * lpred))
|
||||
nn = np.isnan(cf)
|
||||
if len(nn) > 1:
|
||||
cf[nn] = 0
|
||||
#remove zeros and artefacts
|
||||
cf[nn] = 0
|
||||
# remove zeros and artefacts
|
||||
tap = np.hanning(len(cf))
|
||||
cf = tap * cf
|
||||
io = np.where(cf == 0)
|
||||
@@ -500,24 +502,24 @@ class ARHcf(CharacteristicFunction):
|
||||
Output: AR parameters arpara
|
||||
'''
|
||||
|
||||
#recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
# recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
rhs = np.zeros(self.getOrder())
|
||||
for k in range(0, self.getOrder()):
|
||||
for i in range(rind, ldet):
|
||||
rhs[k] = rhs[k] + data[0,i] * data[0,i - k] + data[1,i] * data[1,i - k]
|
||||
rhs[k] = rhs[k] + data[0, i] * data[0, i - k] + data[1, i] * data[1, i - k]
|
||||
|
||||
#recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((4,4))
|
||||
# recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((4, 4))
|
||||
for k in range(1, self.getOrder() + 1):
|
||||
for j in range(1, k + 1):
|
||||
for i in range(rind, ldet):
|
||||
ki = k - 1
|
||||
ji = j - 1
|
||||
A[ki,ji] = A[ki,ji] + data[0,i - ji] * data[0,i - ki] + data[1,i - ji] *data[1,i - ki]
|
||||
A[ki, ji] = A[ki, ji] + data[0, i - ji] * data[0, i - ki] + data[1, i - ji] * data[1, i - ki]
|
||||
|
||||
A[ji,ki] = A[ki,ji]
|
||||
A[ji, ki] = A[ki, ji]
|
||||
|
||||
#apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
# apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
self.arpara = np.dot(np.linalg.pinv(A), rhs)
|
||||
|
||||
def arPredH(self, data, arpara, rind, lpred):
|
||||
@@ -540,7 +542,7 @@ class ARHcf(CharacteristicFunction):
|
||||
Output: predicted waveform z
|
||||
:type: structured array
|
||||
'''
|
||||
#be sure of the summation indeces
|
||||
# be sure of the summation indeces
|
||||
if rind < len(arpara) + 1:
|
||||
rind = len(arpara) + 1
|
||||
if rind > len(data[0]) - lpred + 1:
|
||||
@@ -558,11 +560,11 @@ class ARHcf(CharacteristicFunction):
|
||||
z1[i] = z1[i] + arpara[ji] * z1[i - ji]
|
||||
z2[i] = z2[i] + arpara[ji] * z2[i - ji]
|
||||
|
||||
z = np.array( [z1.tolist(), z2.tolist()] )
|
||||
z = np.array([z1.tolist(), z2.tolist()])
|
||||
self.xpred = z
|
||||
|
||||
class AR3Ccf(CharacteristicFunction):
|
||||
|
||||
class AR3Ccf(CharacteristicFunction):
|
||||
def calcCF(self, data):
|
||||
|
||||
print 'Calculating AR-prediction error from all 3 components ...'
|
||||
@@ -570,46 +572,47 @@ class AR3Ccf(CharacteristicFunction):
|
||||
xnp = self.getDataArray(self.getCut())
|
||||
n0 = np.isnan(xnp[0].data)
|
||||
if len(n0) > 1:
|
||||
xnp[0].data[n0] = 0
|
||||
xnp[0].data[n0] = 0
|
||||
n1 = np.isnan(xnp[1].data)
|
||||
if len(n1) > 1:
|
||||
xnp[1].data[n1] = 0
|
||||
xnp[1].data[n1] = 0
|
||||
n2 = np.isnan(xnp[2].data)
|
||||
if len(n2) > 1:
|
||||
xnp[2].data[n2] = 0
|
||||
xnp[2].data[n2] = 0
|
||||
|
||||
#some parameters needed
|
||||
#add noise to time series
|
||||
# some parameters needed
|
||||
# add noise to time series
|
||||
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
|
||||
xnnoise = xnp[1].data + np.random.normal(0.0, 1.0, len(xnp[1].data)) * self.getFnoise() * max(abs(xnp[1].data))
|
||||
xznoise = xnp[2].data + np.random.normal(0.0, 1.0, len(xnp[2].data)) * self.getFnoise() * max(abs(xnp[2].data))
|
||||
Xnoise = np.array( [xenoise.tolist(), xnnoise.tolist(), xznoise.tolist()] )
|
||||
Xnoise = np.array([xenoise.tolist(), xnnoise.tolist(), xznoise.tolist()])
|
||||
tend = len(xnp[0].data)
|
||||
#Time1: length of AR-determination window [sec]
|
||||
#Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
|
||||
# Time1: length of AR-determination window [sec]
|
||||
# Time2: length of AR-prediction window [sec]
|
||||
ldet = int(round(self.getTime1() / self.getIncrement())) # length of AR-determination window [samples]
|
||||
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) # length of AR-prediction window [samples]
|
||||
|
||||
cf = np.zeros(len(xenoise))
|
||||
loopstep = self.getARdetStep()
|
||||
arcalci = ldet + self.getOrder() - 1 #AR-calculation index
|
||||
arcalci = ldet + self.getOrder() - 1 # AR-calculation index
|
||||
for i in range(ldet + self.getOrder() - 1, tend - 2 * lpred + 1):
|
||||
if i == arcalci:
|
||||
#determination of AR coefficients
|
||||
#to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDet3C(Xnoise, self.getOrder(), i-ldet, i)
|
||||
# determination of AR coefficients
|
||||
# to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
|
||||
self.arDet3C(Xnoise, self.getOrder(), i - ldet, i)
|
||||
arcalci = arcalci + loopstep[1]
|
||||
|
||||
#AR prediction of waveform using calculated AR coefficients
|
||||
# AR prediction of waveform using calculated AR coefficients
|
||||
self.arPred3C(xnp, self.arpara, i + 1, lpred)
|
||||
#prediction error = CF
|
||||
# prediction error = CF
|
||||
cf[i + lpred] = np.sqrt(np.sum(np.power(self.xpred[0][i:i + lpred] - xnp[0][i:i + lpred], 2) \
|
||||
+ np.power(self.xpred[1][i:i + lpred] - xnp[1][i:i + lpred], 2) \
|
||||
+ np.power(self.xpred[2][i:i + lpred] - xnp[2][i:i + lpred], 2)) / (3 * lpred))
|
||||
+ np.power(self.xpred[1][i:i + lpred] - xnp[1][i:i + lpred], 2) \
|
||||
+ np.power(self.xpred[2][i:i + lpred] - xnp[2][i:i + lpred], 2)) / (
|
||||
3 * lpred))
|
||||
nn = np.isnan(cf)
|
||||
if len(nn) > 1:
|
||||
cf[nn] = 0
|
||||
#remove zeros and artefacts
|
||||
cf[nn] = 0
|
||||
# remove zeros and artefacts
|
||||
tap = np.hanning(len(cf))
|
||||
cf = tap * cf
|
||||
io = np.where(cf == 0)
|
||||
@@ -641,26 +644,26 @@ class AR3Ccf(CharacteristicFunction):
|
||||
Output: AR parameters arpara
|
||||
'''
|
||||
|
||||
#recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
# recursive calculation of data vector (right part of eq. 6.5 in Kueperkoch et al. (2012)
|
||||
rhs = np.zeros(self.getOrder())
|
||||
for k in range(0, self.getOrder()):
|
||||
for i in range(rind, ldet):
|
||||
rhs[k] = rhs[k] + data[0,i] * data[0,i - k] + data[1,i] * data[1,i - k] \
|
||||
+ data[2,i] * data[2,i - k]
|
||||
rhs[k] = rhs[k] + data[0, i] * data[0, i - k] + data[1, i] * data[1, i - k] \
|
||||
+ data[2, i] * data[2, i - k]
|
||||
|
||||
#recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((4,4))
|
||||
# recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
|
||||
A = np.zeros((4, 4))
|
||||
for k in range(1, self.getOrder() + 1):
|
||||
for j in range(1, k + 1):
|
||||
for i in range(rind, ldet):
|
||||
ki = k - 1
|
||||
ji = j - 1
|
||||
A[ki,ji] = A[ki,ji] + data[0,i - ji] * data[0,i - ki] + data[1,i - ji] *data[1,i - ki] \
|
||||
+ data[2,i - ji] *data[2,i - ki]
|
||||
A[ki, ji] = A[ki, ji] + data[0, i - ji] * data[0, i - ki] + data[1, i - ji] * data[1, i - ki] \
|
||||
+ data[2, i - ji] * data[2, i - ki]
|
||||
|
||||
A[ji,ki] = A[ki,ji]
|
||||
A[ji, ki] = A[ki, ji]
|
||||
|
||||
#apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
# apply Moore-Penrose inverse for SVD yielding the AR-parameters
|
||||
self.arpara = np.dot(np.linalg.pinv(A), rhs)
|
||||
|
||||
def arPred3C(self, data, arpara, rind, lpred):
|
||||
@@ -683,7 +686,7 @@ class AR3Ccf(CharacteristicFunction):
|
||||
Output: predicted waveform z
|
||||
:type: structured array
|
||||
'''
|
||||
#be sure of the summation indeces
|
||||
# be sure of the summation indeces
|
||||
if rind < len(arpara) + 1:
|
||||
rind = len(arpara) + 1
|
||||
if rind > len(data[0]) - lpred + 1:
|
||||
@@ -703,5 +706,5 @@ class AR3Ccf(CharacteristicFunction):
|
||||
z2[i] = z2[i] + arpara[ji] * z2[i - ji]
|
||||
z3[i] = z3[i] + arpara[ji] * z3[i - ji]
|
||||
|
||||
z = np.array( [z1.tolist(), z2.tolist(), z3.tolist()] )
|
||||
z = np.array([z1.tolist(), z2.tolist(), z3.tolist()])
|
||||
self.xpred = z
|
||||
|
||||
@@ -25,6 +25,7 @@ from pylot.core.pick.utils import getnoisewin, getsignalwin
|
||||
from pylot.core.pick.charfuns import CharacteristicFunction
|
||||
import warnings
|
||||
|
||||
|
||||
class AutoPicker(object):
|
||||
'''
|
||||
Superclass of different, automated picking algorithms applied on a CF determined
|
||||
@@ -87,7 +88,6 @@ class AutoPicker(object):
|
||||
Tsmooth=self.getTsmooth(),
|
||||
Pick1=self.getpick1())
|
||||
|
||||
|
||||
def getTSNR(self):
|
||||
return self.TSNR
|
||||
|
||||
@@ -152,14 +152,14 @@ class AICPicker(AutoPicker):
|
||||
self.Pick = None
|
||||
self.slope = None
|
||||
self.SNR = None
|
||||
#find NaN's
|
||||
# find NaN's
|
||||
nn = np.isnan(self.cf)
|
||||
if len(nn) > 1:
|
||||
self.cf[nn] = 0
|
||||
#taper AIC-CF to get rid off side maxima
|
||||
# taper AIC-CF to get rid off side maxima
|
||||
tap = np.hanning(len(self.cf))
|
||||
aic = tap * self.cf + max(abs(self.cf))
|
||||
#smooth AIC-CF
|
||||
# smooth AIC-CF
|
||||
ismooth = int(round(self.Tsmooth / self.dt))
|
||||
aicsmooth = np.zeros(len(aic))
|
||||
if len(aic) < ismooth:
|
||||
@@ -171,32 +171,32 @@ class AICPicker(AutoPicker):
|
||||
ii1 = i - ismooth
|
||||
aicsmooth[i] = aicsmooth[i - 1] + (aic[i] - aic[ii1]) / ismooth
|
||||
else:
|
||||
aicsmooth[i] = np.mean(aic[1 : i])
|
||||
#remove offset
|
||||
aicsmooth[i] = np.mean(aic[1: i])
|
||||
# remove offset
|
||||
offset = abs(min(aic) - min(aicsmooth))
|
||||
aicsmooth = aicsmooth - offset
|
||||
#get maximum of 1st derivative of AIC-CF (more stable!) as starting point
|
||||
# get maximum of 1st derivative of AIC-CF (more stable!) as starting point
|
||||
diffcf = np.diff(aicsmooth)
|
||||
#find NaN's
|
||||
# find NaN's
|
||||
nn = np.isnan(diffcf)
|
||||
if len(nn) > 1:
|
||||
diffcf[nn] = 0
|
||||
#taper CF to get rid off side maxima
|
||||
# taper CF to get rid off side maxima
|
||||
tap = np.hanning(len(diffcf))
|
||||
diffcf = tap * diffcf * max(abs(aicsmooth))
|
||||
icfmax = np.argmax(diffcf)
|
||||
|
||||
#find minimum in AIC-CF front of maximum
|
||||
# find minimum in AIC-CF front of maximum
|
||||
lpickwindow = int(round(self.PickWindow / self.dt))
|
||||
for i in range(icfmax - 1, max([icfmax - lpickwindow, 2]), -1):
|
||||
if aicsmooth[i - 1] >= aicsmooth[i]:
|
||||
self.Pick = self.Tcf[i]
|
||||
break
|
||||
#if no minimum could be found:
|
||||
#search in 1st derivative of AIC-CF
|
||||
# if no minimum could be found:
|
||||
# search in 1st derivative of AIC-CF
|
||||
if self.Pick is None:
|
||||
for i in range(icfmax -1, max([icfmax -lpickwindow, 2]), -1):
|
||||
if diffcf[i -1] >= diffcf[i]:
|
||||
for i in range(icfmax - 1, max([icfmax - lpickwindow, 2]), -1):
|
||||
if diffcf[i - 1] >= diffcf[i]:
|
||||
self.Pick = self.Tcf[i]
|
||||
break
|
||||
|
||||
@@ -215,7 +215,7 @@ class AICPicker(AutoPicker):
|
||||
max(abs(aic[inoise] - np.mean(aic[inoise])))
|
||||
# calculate slope from CF after initial pick
|
||||
# get slope window
|
||||
tslope = self.TSNR[3] #slope determination window
|
||||
tslope = self.TSNR[3] # slope determination window
|
||||
islope = np.where((self.Tcf <= min([self.Pick + tslope, len(self.Data[0].data)])) \
|
||||
& (self.Tcf >= self.Pick))
|
||||
# find maximum within slope determination window
|
||||
@@ -237,7 +237,7 @@ class AICPicker(AutoPicker):
|
||||
raw_input()
|
||||
plt.close(p)
|
||||
return
|
||||
islope = islope[0][0 :imax]
|
||||
islope = islope[0][0:imax]
|
||||
dataslope = self.Data[0].data[islope]
|
||||
# calculate slope as polynomal fit of order 1
|
||||
xslope = np.arange(0, len(dataslope), 1)
|
||||
@@ -258,7 +258,7 @@ class AICPicker(AutoPicker):
|
||||
p1, = plt.plot(self.Tcf, x / max(x), 'k')
|
||||
p2, = plt.plot(self.Tcf, aicsmooth / max(aicsmooth), 'r')
|
||||
if self.Pick is not None:
|
||||
p3, = plt.plot([self.Pick, self.Pick], [-0.1 , 0.5], 'b', linewidth=2)
|
||||
p3, = plt.plot([self.Pick, self.Pick], [-0.1, 0.5], 'b', linewidth=2)
|
||||
plt.legend([p1, p2, p3], ['(HOS-/AR-) Data', 'Smoothed AIC-CF', 'AIC-Pick'])
|
||||
else:
|
||||
plt.legend([p1, p2], ['(HOS-/AR-) Data', 'Smoothed AIC-CF'])
|
||||
@@ -273,7 +273,8 @@ class AICPicker(AutoPicker):
|
||||
p13, = plt.plot(self.Tcf[isignal], self.Data[0].data[isignal], 'r')
|
||||
p14, = plt.plot(self.Tcf[islope], dataslope, 'g--')
|
||||
p15, = plt.plot(self.Tcf[islope], datafit, 'g', linewidth=2)
|
||||
plt.legend([p11, p12, p13, p14, p15], ['Data', 'Noise Window', 'Signal Window', 'Slope Window', 'Slope'],
|
||||
plt.legend([p11, p12, p13, p14, p15],
|
||||
['Data', 'Noise Window', 'Signal Window', 'Slope Window', 'Slope'],
|
||||
loc='best')
|
||||
plt.title('Station %s, SNR=%7.2f, Slope= %12.2f counts/s' % (self.Data[0].stats.station,
|
||||
self.SNR, self.slope))
|
||||
@@ -303,7 +304,7 @@ class PragPicker(AutoPicker):
|
||||
self.SNR = None
|
||||
self.slope = None
|
||||
pickflag = 0
|
||||
#smooth CF
|
||||
# smooth CF
|
||||
ismooth = int(round(self.Tsmooth / self.dt))
|
||||
cfsmooth = np.zeros(len(self.cf))
|
||||
if len(self.cf) < ismooth:
|
||||
@@ -315,28 +316,28 @@ class PragPicker(AutoPicker):
|
||||
ii1 = i - ismooth
|
||||
cfsmooth[i] = cfsmooth[i - 1] + (self.cf[i] - self.cf[ii1]) / ismooth
|
||||
else:
|
||||
cfsmooth[i] = np.mean(self.cf[1 : i])
|
||||
cfsmooth[i] = np.mean(self.cf[1: i])
|
||||
|
||||
#select picking window
|
||||
#which is centered around tpick1
|
||||
# select picking window
|
||||
# which is centered around tpick1
|
||||
ipick = np.where((self.Tcf >= self.getpick1() - self.PickWindow / 2) \
|
||||
& (self.Tcf <= self.getpick1() + self.PickWindow / 2))
|
||||
cfipick = self.cf[ipick] - np.mean(self.cf[ipick])
|
||||
Tcfpick = self.Tcf[ipick]
|
||||
cfsmoothipick = cfsmooth[ipick]- np.mean(self.cf[ipick])
|
||||
cfsmoothipick = cfsmooth[ipick] - np.mean(self.cf[ipick])
|
||||
ipick1 = np.argmin(abs(self.Tcf - self.getpick1()))
|
||||
cfpick1 = 2 * self.cf[ipick1]
|
||||
|
||||
#check trend of CF, i.e. differences of CF and adjust aus regarding this trend
|
||||
#prominent trend: decrease aus
|
||||
#flat: use given aus
|
||||
# check trend of CF, i.e. differences of CF and adjust aus regarding this trend
|
||||
# prominent trend: decrease aus
|
||||
# flat: use given aus
|
||||
cfdiff = np.diff(cfipick)
|
||||
i0diff = np.where(cfdiff > 0)
|
||||
cfdiff = cfdiff[i0diff]
|
||||
minaus = min(cfdiff * (1 + self.aus))
|
||||
aus1 = max([minaus, self.aus])
|
||||
|
||||
#at first we look to the right until the end of the pick window is reached
|
||||
# at first we look to the right until the end of the pick window is reached
|
||||
flagpick_r = 0
|
||||
flagpick_l = 0
|
||||
cfpick_r = 0
|
||||
@@ -380,8 +381,8 @@ class PragPicker(AutoPicker):
|
||||
|
||||
if self.getiplot() > 1:
|
||||
p = plt.figure(self.getiplot())
|
||||
p1, = plt.plot(Tcfpick,cfipick, 'k')
|
||||
p2, = plt.plot(Tcfpick,cfsmoothipick, 'r')
|
||||
p1, = plt.plot(Tcfpick, cfipick, 'k')
|
||||
p2, = plt.plot(Tcfpick, cfsmoothipick, 'r')
|
||||
if pickflag > 0:
|
||||
p3, = plt.plot([self.Pick, self.Pick], [min(cfipick), max(cfipick)], 'b', linewidth=2)
|
||||
plt.legend([p1, p2, p3], ['CF', 'Smoothed CF', 'Pick'])
|
||||
|
||||
@@ -1,307 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Script to run autoPyLoT-script "run_makeCF.py".
|
||||
Only for test purposes!
|
||||
"""
|
||||
|
||||
from obspy.core import read
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from pylot.core.pick.charfuns import CharacteristicFunction
|
||||
from pylot.core.pick.picker import AutoPicker
|
||||
from pylot.core.pick.utils import *
|
||||
import glob
|
||||
import argparse
|
||||
|
||||
def run_makeCF(project, database, event, iplot, station=None):
|
||||
#parameters for CF calculation
|
||||
t2 = 7 #length of moving window for HOS calculation [sec]
|
||||
p = 4 #order of HOS
|
||||
cuttimes = [10, 50] #start and end time for CF calculation
|
||||
bpz = [2, 30] #corner frequencies of bandpass filter, vertical component
|
||||
bph = [2, 15] #corner frequencies of bandpass filter, horizontal components
|
||||
tdetz= 1.2 #length of AR-determination window [sec], vertical component
|
||||
tdeth= 0.8 #length of AR-determination window [sec], horizontal components
|
||||
tpredz = 0.4 #length of AR-prediction window [sec], vertical component
|
||||
tpredh = 0.4 #length of AR-prediction window [sec], horizontal components
|
||||
addnoise = 0.001 #add noise to seismogram for stable AR prediction
|
||||
arzorder = 2 #chosen order of AR process, vertical component
|
||||
arhorder = 4 #chosen order of AR process, horizontal components
|
||||
TSNRhos = [5, 0.5, 1, .6] #window lengths [s] for calculating SNR for earliest/latest pick and quality assessment
|
||||
#from HOS-CF [noise window, safety gap, signal window, slope determination window]
|
||||
TSNRarz = [5, 0.5, 1, 1.0] #window lengths [s] for calculating SNR for earliest/lates pick and quality assessment
|
||||
#from ARZ-CF
|
||||
#get waveform data
|
||||
if station:
|
||||
dpz = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*HZ.msd' % (project, database, event, station)
|
||||
dpe = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*HE.msd' % (project, database, event, station)
|
||||
dpn = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*HN.msd' % (project, database, event, station)
|
||||
#dpz = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*_z.gse' % (project, database, event, station)
|
||||
#dpe = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*_e.gse' % (project, database, event, station)
|
||||
#dpn = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/%s*_n.gse' % (project, database, event, station)
|
||||
else:
|
||||
dpz = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/*HZ.msd' % (project, database, event)
|
||||
dpe = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/*HE.msd' % (project, database, event)
|
||||
dpn = '/DATA/%s/EVENT_DATA/LOCAL/%s/%s/*HN.msd' % (project, database, event)
|
||||
wfzfiles = glob.glob(dpz)
|
||||
wfefiles = glob.glob(dpe)
|
||||
wfnfiles = glob.glob(dpn)
|
||||
if wfzfiles:
|
||||
for i in range(len(wfzfiles)):
|
||||
print 'Vertical component data found ...'
|
||||
print wfzfiles[i]
|
||||
st = read('%s' % wfzfiles[i])
|
||||
st_copy = st.copy()
|
||||
#filter and taper data
|
||||
tr_filt = st[0].copy()
|
||||
tr_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
|
||||
tr_filt.taper(max_percentage=0.05, type='hann')
|
||||
st_copy[0].data = tr_filt.data
|
||||
##############################################################
|
||||
#calculate HOS-CF using subclass HOScf of class CharacteristicFunction
|
||||
hoscf = HOScf(st_copy, cuttimes, t2, p) #instance of HOScf
|
||||
##############################################################
|
||||
#calculate AIC-HOS-CF using subclass AICcf of class CharacteristicFunction
|
||||
#class needs stream object => build it
|
||||
tr_aic = tr_filt.copy()
|
||||
tr_aic.data = hoscf.getCF()
|
||||
st_copy[0].data = tr_aic.data
|
||||
aiccf = AICcf(st_copy, cuttimes) #instance of AICcf
|
||||
##############################################################
|
||||
#get prelimenary onset time from AIC-HOS-CF using subclass AICPicker of class AutoPicking
|
||||
aicpick = AICPicker(aiccf, TSNRhos, 3, 10, None, 0.1)
|
||||
##############################################################
|
||||
#get refined onset time from HOS-CF using class Picker
|
||||
hospick = PragPicker(hoscf, TSNRhos, 2, 10, 0.001, 0.2, aicpick.getpick())
|
||||
#############################################################
|
||||
#get earliest and latest possible picks
|
||||
st_copy[0].data = tr_filt.data
|
||||
[lpickhos, epickhos, pickerrhos] = earllatepicker(st_copy, 1.5, TSNRhos, hospick.getpick(), 10)
|
||||
#############################################################
|
||||
#get SNR
|
||||
[SNR, SNRdB] = getSNR(st_copy, TSNRhos, hospick.getpick())
|
||||
print 'SNR:', SNR, 'SNR[dB]:', SNRdB
|
||||
##########################################################
|
||||
#get first motion of onset
|
||||
hosfm = fmpicker(st, st_copy, 0.2, hospick.getpick(), 11)
|
||||
##############################################################
|
||||
#calculate ARZ-CF using subclass ARZcf of class CharcteristicFunction
|
||||
arzcf = ARZcf(st, cuttimes, tpredz, arzorder, tdetz, addnoise) #instance of ARZcf
|
||||
##############################################################
|
||||
#calculate AIC-ARZ-CF using subclass AICcf of class CharacteristicFunction
|
||||
#class needs stream object => build it
|
||||
tr_arzaic = tr_filt.copy()
|
||||
tr_arzaic.data = arzcf.getCF()
|
||||
st_copy[0].data = tr_arzaic.data
|
||||
araiccf = AICcf(st_copy, cuttimes, tpredz, 0, tdetz) #instance of AICcf
|
||||
##############################################################
|
||||
#get onset time from AIC-ARZ-CF using subclass AICPicker of class AutoPicking
|
||||
aicarzpick = AICPicker(araiccf, TSNRarz, 2, 10, None, 0.1)
|
||||
##############################################################
|
||||
#get refined onset time from ARZ-CF using class Picker
|
||||
arzpick = PragPicker(arzcf, TSNRarz, 2.0, 10, 0.1, 0.05, aicarzpick.getpick())
|
||||
#get earliest and latest possible picks
|
||||
st_copy[0].data = tr_filt.data
|
||||
[lpickarz, epickarz, pickerrarz] = earllatepicker(st_copy, 1.5, TSNRarz, arzpick.getpick(), 10)
|
||||
elif not wfzfiles:
|
||||
print 'No vertical component data found!'
|
||||
|
||||
if wfefiles and wfnfiles:
|
||||
for i in range(len(wfefiles)):
|
||||
print 'Horizontal component data found ...'
|
||||
print wfefiles[i]
|
||||
print wfnfiles[i]
|
||||
#merge streams
|
||||
H = read('%s' % wfefiles[i])
|
||||
H += read('%s' % wfnfiles[i])
|
||||
H_copy = H.copy()
|
||||
#filter and taper data
|
||||
trH1_filt = H[0].copy()
|
||||
trH2_filt = H[1].copy()
|
||||
trH1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
|
||||
trH2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
|
||||
trH1_filt.taper(max_percentage=0.05, type='hann')
|
||||
trH2_filt.taper(max_percentage=0.05, type='hann')
|
||||
H_copy[0].data = trH1_filt.data
|
||||
H_copy[1].data = trH2_filt.data
|
||||
|
||||
##############################################################
|
||||
#calculate ARH-CF using subclass ARHcf of class CharcteristicFunction
|
||||
arhcf = ARHcf(H_copy, cuttimes, tpredh, arhorder, tdeth, addnoise) #instance of ARHcf
|
||||
##############################################################
|
||||
#calculate AIC-ARH-CF using subclass AICcf of class CharacteristicFunction
|
||||
#class needs stream object => build it
|
||||
tr_arhaic = trH1_filt.copy()
|
||||
tr_arhaic.data = arhcf.getCF()
|
||||
H_copy[0].data = tr_arhaic.data
|
||||
#calculate ARH-AIC-CF
|
||||
arhaiccf = AICcf(H_copy, cuttimes, tpredh, 0, tdeth) #instance of AICcf
|
||||
##############################################################
|
||||
#get onset time from AIC-ARH-CF using subclass AICPicker of class AutoPicking
|
||||
aicarhpick = AICPicker(arhaiccf, TSNRarz, 4, 10, None, 0.1)
|
||||
###############################################################
|
||||
#get refined onset time from ARH-CF using class Picker
|
||||
arhpick = PragPicker(arhcf, TSNRarz, 2.5, 10, 0.1, 0.05, aicarhpick.getpick())
|
||||
#get earliest and latest possible picks
|
||||
H_copy[0].data = trH1_filt.data
|
||||
[lpickarh1, epickarh1, pickerrarh1] = earllatepicker(H_copy, 1.5, TSNRarz, arhpick.getpick(), 10)
|
||||
H_copy[0].data = trH2_filt.data
|
||||
[lpickarh2, epickarh2, pickerrarh2] = earllatepicker(H_copy, 1.5, TSNRarz, arhpick.getpick(), 10)
|
||||
#get earliest pick of both earliest possible picks
|
||||
epick = [epickarh1, epickarh2]
|
||||
lpick = [lpickarh1, lpickarh2]
|
||||
pickerr = [pickerrarh1, pickerrarh2]
|
||||
ipick =np.argmin([epickarh1, epickarh2])
|
||||
epickarh = epick[ipick]
|
||||
lpickarh = lpick[ipick]
|
||||
pickerrarh = pickerr[ipick]
|
||||
|
||||
#create stream with 3 traces
|
||||
#merge streams
|
||||
AllC = read('%s' % wfefiles[i])
|
||||
AllC += read('%s' % wfnfiles[i])
|
||||
AllC += read('%s' % wfzfiles[i])
|
||||
#filter and taper data
|
||||
All1_filt = AllC[0].copy()
|
||||
All2_filt = AllC[1].copy()
|
||||
All3_filt = AllC[2].copy()
|
||||
All1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
|
||||
All2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
|
||||
All3_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
|
||||
All1_filt.taper(max_percentage=0.05, type='hann')
|
||||
All2_filt.taper(max_percentage=0.05, type='hann')
|
||||
All3_filt.taper(max_percentage=0.05, type='hann')
|
||||
AllC[0].data = All1_filt.data
|
||||
AllC[1].data = All2_filt.data
|
||||
AllC[2].data = All3_filt.data
|
||||
#calculate AR3C-CF using subclass AR3Ccf of class CharacteristicFunction
|
||||
ar3ccf = AR3Ccf(AllC, cuttimes, tpredz, arhorder, tdetz, addnoise) #instance of AR3Ccf
|
||||
##############################################################
|
||||
if iplot:
|
||||
#plot vertical trace
|
||||
plt.figure()
|
||||
tr = st[0]
|
||||
tdata = np.arange(0, tr.stats.npts / tr.stats.sampling_rate, tr.stats.delta)
|
||||
p1, = plt.plot(tdata, tr_filt.data/max(tr_filt.data), 'k')
|
||||
p2, = plt.plot(hoscf.getTimeArray(), hoscf.getCF() / max(hoscf.getCF()), 'r')
|
||||
p3, = plt.plot(aiccf.getTimeArray(), aiccf.getCF()/max(aiccf.getCF()), 'b')
|
||||
p4, = plt.plot(arzcf.getTimeArray(), arzcf.getCF()/max(arzcf.getCF()), 'g')
|
||||
p5, = plt.plot(araiccf.getTimeArray(), araiccf.getCF()/max(araiccf.getCF()), 'y')
|
||||
plt.plot([aicpick.getpick(), aicpick.getpick()], [-1, 1], 'b--')
|
||||
plt.plot([aicpick.getpick()-0.5, aicpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.plot([aicpick.getpick()-0.5, aicpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([hospick.getpick(), hospick.getpick()], [-1.3, 1.3], 'r', linewidth=2)
|
||||
plt.plot([hospick.getpick()-0.5, hospick.getpick()+0.5], [1.3, 1.3], 'r')
|
||||
plt.plot([hospick.getpick()-0.5, hospick.getpick()+0.5], [-1.3, -1.3], 'r')
|
||||
plt.plot([lpickhos, lpickhos], [-1.1, 1.1], 'r--')
|
||||
plt.plot([epickhos, epickhos], [-1.1, 1.1], 'r--')
|
||||
plt.plot([aicarzpick.getpick(), aicarzpick.getpick()], [-1.2, 1.2], 'y', linewidth=2)
|
||||
plt.plot([aicarzpick.getpick()-0.5, aicarzpick.getpick()+0.5], [1.2, 1.2], 'y')
|
||||
plt.plot([aicarzpick.getpick()-0.5, aicarzpick.getpick()+0.5], [-1.2, -1.2], 'y')
|
||||
plt.plot([arzpick.getpick(), arzpick.getpick()], [-1.4, 1.4], 'g', linewidth=2)
|
||||
plt.plot([arzpick.getpick()-0.5, arzpick.getpick()+0.5], [1.4, 1.4], 'g')
|
||||
plt.plot([arzpick.getpick()-0.5, arzpick.getpick()+0.5], [-1.4, -1.4], 'g')
|
||||
plt.plot([lpickarz, lpickarz], [-1.2, 1.2], 'g--')
|
||||
plt.plot([epickarz, epickarz], [-1.2, 1.2], 'g--')
|
||||
plt.yticks([])
|
||||
plt.ylim([-1.5, 1.5])
|
||||
plt.xlabel('Time [s]')
|
||||
plt.ylabel('Normalized Counts')
|
||||
plt.title('%s, %s, CF-SNR=%7.2f, CF-Slope=%12.2f' % (tr.stats.station,
|
||||
tr.stats.channel, aicpick.getSNR(), aicpick.getSlope()))
|
||||
plt.suptitle(tr.stats.starttime)
|
||||
plt.legend([p1, p2, p3, p4, p5], ['Data', 'HOS-CF', 'HOSAIC-CF', 'ARZ-CF', 'ARZAIC-CF'])
|
||||
#plot horizontal traces
|
||||
plt.figure(2)
|
||||
plt.subplot(2,1,1)
|
||||
tsteph = tpredh / 4
|
||||
th1data = np.arange(0, trH1_filt.stats.npts / trH1_filt.stats.sampling_rate, trH1_filt.stats.delta)
|
||||
th2data = np.arange(0, trH2_filt.stats.npts / trH2_filt.stats.sampling_rate, trH2_filt.stats.delta)
|
||||
tarhcf = np.arange(0, len(arhcf.getCF()) * tsteph, tsteph) + cuttimes[0] + tdeth +tpredh
|
||||
p21, = plt.plot(th1data, trH1_filt.data/max(trH1_filt.data), 'k')
|
||||
p22, = plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
|
||||
p23, = plt.plot(arhaiccf.getTimeArray(), arhaiccf.getCF()/max(arhaiccf.getCF()))
|
||||
plt.plot([aicarhpick.getpick(), aicarhpick.getpick()], [-1, 1], 'b')
|
||||
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([arhpick.getpick(), arhpick.getpick()], [-1, 1], 'r')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [1, 1], 'r')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [-1, -1], 'r')
|
||||
plt.plot([lpickarh, lpickarh], [-0.8, 0.8], 'r--')
|
||||
plt.plot([epickarh, epickarh], [-0.8, 0.8], 'r--')
|
||||
plt.plot([arhpick.getpick() + pickerrarh, arhpick.getpick() + pickerrarh], [-0.2, 0.2], 'r--')
|
||||
plt.plot([arhpick.getpick() - pickerrarh, arhpick.getpick() - pickerrarh], [-0.2, 0.2], 'r--')
|
||||
plt.yticks([])
|
||||
plt.ylim([-1.5, 1.5])
|
||||
plt.ylabel('Normalized Counts')
|
||||
plt.title([trH1_filt.stats.station, trH1_filt.stats.channel])
|
||||
plt.suptitle(trH1_filt.stats.starttime)
|
||||
plt.legend([p21, p22, p23], ['Data', 'ARH-CF', 'ARHAIC-CF'])
|
||||
plt.subplot(2,1,2)
|
||||
plt.plot(th2data, trH2_filt.data/max(trH2_filt.data), 'k')
|
||||
plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
|
||||
plt.plot(arhaiccf.getTimeArray(), arhaiccf.getCF()/max(arhaiccf.getCF()))
|
||||
plt.plot([aicarhpick.getpick(), aicarhpick.getpick()], [-1, 1], 'b')
|
||||
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([arhpick.getpick(), arhpick.getpick()], [-1, 1], 'r')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [1, 1], 'r')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [-1, -1], 'r')
|
||||
plt.plot([lpickarh, lpickarh], [-0.8, 0.8], 'r--')
|
||||
plt.plot([epickarh, epickarh], [-0.8, 0.8], 'r--')
|
||||
plt.plot([arhpick.getpick() + pickerrarh, arhpick.getpick() + pickerrarh], [-0.2, 0.2], 'r--')
|
||||
plt.plot([arhpick.getpick() - pickerrarh, arhpick.getpick() - pickerrarh], [-0.2, 0.2], 'r--')
|
||||
plt.title([trH2_filt.stats.station, trH2_filt.stats.channel])
|
||||
plt.yticks([])
|
||||
plt.ylim([-1.5, 1.5])
|
||||
plt.xlabel('Time [s]')
|
||||
plt.ylabel('Normalized Counts')
|
||||
#plot 3-component window
|
||||
plt.figure(3)
|
||||
plt.subplot(3,1,1)
|
||||
p31, = plt.plot(tdata, tr_filt.data/max(tr_filt.data), 'k')
|
||||
p32, = plt.plot(ar3ccf.getTimeArray(), ar3ccf.getCF()/max(ar3ccf.getCF()), 'r')
|
||||
plt.plot([arhpick.getpick(), arhpick.getpick()], [-1, 1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.yticks([])
|
||||
plt.xticks([])
|
||||
plt.ylabel('Normalized Counts')
|
||||
plt.title([tr.stats.station, tr.stats.channel])
|
||||
plt.suptitle(trH1_filt.stats.starttime)
|
||||
plt.legend([p31, p32], ['Data', 'AR3C-CF'])
|
||||
plt.subplot(3,1,2)
|
||||
plt.plot(th1data, trH1_filt.data/max(trH1_filt.data), 'k')
|
||||
plt.plot(ar3ccf.getTimeArray(), ar3ccf.getCF()/max(ar3ccf.getCF()), 'r')
|
||||
plt.plot([arhpick.getpick(), arhpick.getpick()], [-1, 1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.yticks([])
|
||||
plt.xticks([])
|
||||
plt.ylabel('Normalized Counts')
|
||||
plt.title([trH1_filt.stats.station, trH1_filt.stats.channel])
|
||||
plt.subplot(3,1,3)
|
||||
plt.plot(th2data, trH2_filt.data/max(trH2_filt.data), 'k')
|
||||
plt.plot(ar3ccf.getTimeArray(), ar3ccf.getCF()/max(ar3ccf.getCF()), 'r')
|
||||
plt.plot([arhpick.getpick(), arhpick.getpick()], [-1, 1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [-1, -1], 'b')
|
||||
plt.plot([arhpick.getpick()-0.5, arhpick.getpick()+0.5], [1, 1], 'b')
|
||||
plt.yticks([])
|
||||
plt.ylabel('Normalized Counts')
|
||||
plt.title([trH2_filt.stats.station, trH2_filt.stats.channel])
|
||||
plt.xlabel('Time [s]')
|
||||
plt.show()
|
||||
raw_input()
|
||||
plt.close()
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--project', type=str, help='project name (e.g. Insheim)')
|
||||
parser.add_argument('--database', type=str, help='event data base (e.g. 2014.09_Insheim)')
|
||||
parser.add_argument('--event', type=str, help='event ID (e.g. e0010.015.14)')
|
||||
parser.add_argument('--iplot', help='anything, if set, figure occurs')
|
||||
parser.add_argument('--station', type=str, help='Station ID (e.g. INS3) (optional)')
|
||||
args = parser.parse_args()
|
||||
|
||||
run_makeCF(args.project, args.database, args.event, args.iplot, args.station)
|
||||
@@ -15,7 +15,7 @@ from obspy.core import Stream, UTCDateTime
|
||||
import warnings
|
||||
|
||||
|
||||
def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
|
||||
def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode=False):
|
||||
'''
|
||||
Function to derive earliest and latest possible pick after Diehl & Kissling (2009)
|
||||
as reasonable uncertainties. Latest possible pick is based on noise level,
|
||||
@@ -70,7 +70,8 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
|
||||
|
||||
# get earliest possible pick
|
||||
|
||||
EPick = np.nan; count = 0
|
||||
EPick = np.nan;
|
||||
count = 0
|
||||
pis = isignal
|
||||
|
||||
# if EPick stays NaN the signal window size will be doubled
|
||||
@@ -78,10 +79,10 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
|
||||
if count > 0:
|
||||
if stealthMode is False:
|
||||
print("\nearllatepicker: Doubled signal window size %s time(s) "
|
||||
"because of NaN for earliest pick." %count)
|
||||
"because of NaN for earliest pick." % count)
|
||||
isigDoubleWinStart = pis[-1] + 1
|
||||
isignalDoubleWin = np.arange(isigDoubleWinStart,
|
||||
isigDoubleWinStart + len(pis))
|
||||
isigDoubleWinStart + len(pis))
|
||||
if (isigDoubleWinStart + len(pis)) < X[0].data.size:
|
||||
pis = np.concatenate((pis, isignalDoubleWin))
|
||||
else:
|
||||
@@ -92,8 +93,7 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
|
||||
zc = crossings_nonzero_all(x[pis] - x[pis].mean())
|
||||
# calculate mean half period T0 of signal as the average of the
|
||||
T0 = np.mean(np.diff(zc)) * X[0].stats.delta # this is half wave length!
|
||||
EPick = Pick1 - T0 # half wavelength as suggested by Diehl et al.
|
||||
|
||||
EPick = Pick1 - T0 # half wavelength as suggested by Diehl et al.
|
||||
|
||||
# get symmetric pick error as mean from earliest and latest possible pick
|
||||
# by weighting latest possible pick two times earliest possible pick
|
||||
@@ -395,7 +395,7 @@ def getnoisewin(t, t1, tnoise, tgap):
|
||||
|
||||
# get noise window
|
||||
inoise, = np.where((t <= max([t1 - tgap, 0])) \
|
||||
& (t >= max([t1 - tnoise - tgap, 0])))
|
||||
& (t >= max([t1 - tnoise - tgap, 0])))
|
||||
if np.size(inoise) < 1:
|
||||
print ("getnoisewin: Empty array inoise, check noise window!")
|
||||
|
||||
@@ -419,7 +419,7 @@ def getsignalwin(t, t1, tsignal):
|
||||
|
||||
# get signal window
|
||||
isignal, = np.where((t <= min([t1 + tsignal, len(t)])) \
|
||||
& (t >= t1))
|
||||
& (t >= t1))
|
||||
if np.size(isignal) < 1:
|
||||
print ("getsignalwin: Empty array isignal, check signal window!")
|
||||
|
||||
@@ -460,7 +460,7 @@ def getResolutionWindow(snr):
|
||||
else:
|
||||
time_resolution = res_wins['HRW']
|
||||
|
||||
return time_resolution/2
|
||||
return time_resolution / 2
|
||||
|
||||
|
||||
def wadaticheck(pickdic, dttolerance, iplot):
|
||||
@@ -488,17 +488,16 @@ def wadaticheck(pickdic, dttolerance, iplot):
|
||||
SPtimes = []
|
||||
for key in pickdic:
|
||||
if pickdic[key]['P']['weight'] < 4 and pickdic[key]['S']['weight'] < 4:
|
||||
# calculate S-P time
|
||||
spt = pickdic[key]['S']['mpp'] - pickdic[key]['P']['mpp']
|
||||
# add S-P time to dictionary
|
||||
pickdic[key]['SPt'] = spt
|
||||
# add P onsets and corresponding S-P times to list
|
||||
UTCPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
UTCSpick = UTCDateTime(pickdic[key]['S']['mpp'])
|
||||
Ppicks.append(UTCPpick.timestamp)
|
||||
Spicks.append(UTCSpick.timestamp)
|
||||
SPtimes.append(spt)
|
||||
|
||||
# calculate S-P time
|
||||
spt = pickdic[key]['S']['mpp'] - pickdic[key]['P']['mpp']
|
||||
# add S-P time to dictionary
|
||||
pickdic[key]['SPt'] = spt
|
||||
# add P onsets and corresponding S-P times to list
|
||||
UTCPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
UTCSpick = UTCDateTime(pickdic[key]['S']['mpp'])
|
||||
Ppicks.append(UTCPpick.timestamp)
|
||||
Spicks.append(UTCSpick.timestamp)
|
||||
SPtimes.append(spt)
|
||||
|
||||
if len(SPtimes) >= 3:
|
||||
# calculate slope
|
||||
@@ -530,7 +529,7 @@ def wadaticheck(pickdic, dttolerance, iplot):
|
||||
ibad += 1
|
||||
else:
|
||||
marker = 'goodWadatiCheck'
|
||||
checkedPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
checkedPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
checkedPpicks.append(checkedPpick.timestamp)
|
||||
checkedSpick = UTCDateTime(pickdic[key]['S']['mpp'])
|
||||
checkedSpicks.append(checkedSpick.timestamp)
|
||||
@@ -642,7 +641,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
|
||||
# calculate minimum adjusted signal level
|
||||
minsiglevel = max(rms[inoise]) * nfac
|
||||
# minimum adjusted number of samples over minimum signal level
|
||||
minnum = len(isignal) * minpercent/100
|
||||
minnum = len(isignal) * minpercent / 100
|
||||
# get number of samples above minimum adjusted signal level
|
||||
numoverthr = len(np.where(rms[isignal] >= minsiglevel)[0])
|
||||
|
||||
@@ -657,10 +656,10 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
|
||||
|
||||
if iplot == 2:
|
||||
plt.figure(iplot)
|
||||
p1, = plt.plot(t,rms, 'k')
|
||||
p1, = plt.plot(t, rms, 'k')
|
||||
p2, = plt.plot(t[inoise], rms[inoise], 'c')
|
||||
p3, = plt.plot(t[isignal],rms[isignal], 'r')
|
||||
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]],
|
||||
p3, = plt.plot(t[isignal], rms[isignal], 'r')
|
||||
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal) - 1]]],
|
||||
[minsiglevel, minsiglevel], 'g', linewidth=2)
|
||||
p5, = plt.plot([pick, pick], [min(rms), max(rms)], 'b', linewidth=2)
|
||||
plt.legend([p1, p2, p3, p4, p5], ['RMS Data', 'RMS Noise Window',
|
||||
@@ -701,15 +700,15 @@ def checkPonsets(pickdic, dttolerance, iplot):
|
||||
stations = []
|
||||
for key in pickdic:
|
||||
if pickdic[key]['P']['weight'] < 4:
|
||||
# add P onsets to list
|
||||
UTCPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
Ppicks.append(UTCPpick.timestamp)
|
||||
stations.append(key)
|
||||
# add P onsets to list
|
||||
UTCPpick = UTCDateTime(pickdic[key]['P']['mpp'])
|
||||
Ppicks.append(UTCPpick.timestamp)
|
||||
stations.append(key)
|
||||
|
||||
# apply jackknife bootstrapping on variance of P onsets
|
||||
print ("###############################################")
|
||||
print ("checkPonsets: Apply jackknife bootstrapping on P-onset times ...")
|
||||
[xjack,PHI_pseudo,PHI_sub] = jackknife(Ppicks, 'VAR', 1)
|
||||
[xjack, PHI_pseudo, PHI_sub] = jackknife(Ppicks, 'VAR', 1)
|
||||
# get pseudo variances smaller than average variances
|
||||
# (times safety factor), these picks passed jackknife test
|
||||
ij = np.where(PHI_pseudo <= 2 * xjack)
|
||||
@@ -730,7 +729,7 @@ def checkPonsets(pickdic, dttolerance, iplot):
|
||||
|
||||
print ("checkPonsets: %d pick(s) deviate too much from median!" % len(ibad))
|
||||
print ("checkPonsets: Skipped %d P pick(s) out of %d" % (len(badstations) \
|
||||
+ len(badjkstations), len(stations)))
|
||||
+ len(badjkstations), len(stations)))
|
||||
|
||||
goodmarker = 'goodPonsetcheck'
|
||||
badmarker = 'badPonsetcheck'
|
||||
@@ -881,10 +880,9 @@ def checkZ4S(X, pick, zfac, checkwin, iplot):
|
||||
if len(ndat) == 0: # check for other components
|
||||
ndat = X.select(component="1")
|
||||
|
||||
|
||||
z = zdat[0].data
|
||||
tz = np.arange(0, zdat[0].stats.npts / zdat[0].stats.sampling_rate,
|
||||
zdat[0].stats.delta)
|
||||
zdat[0].stats.delta)
|
||||
|
||||
# calculate RMS trace from vertical component
|
||||
absz = np.sqrt(np.power(z, 2))
|
||||
@@ -916,9 +914,9 @@ def checkZ4S(X, pick, zfac, checkwin, iplot):
|
||||
|
||||
if iplot > 1:
|
||||
te = np.arange(0, edat[0].stats.npts / edat[0].stats.sampling_rate,
|
||||
edat[0].stats.delta)
|
||||
edat[0].stats.delta)
|
||||
tn = np.arange(0, ndat[0].stats.npts / ndat[0].stats.sampling_rate,
|
||||
ndat[0].stats.delta)
|
||||
ndat[0].stats.delta)
|
||||
plt.plot(tz, z / max(z), 'k')
|
||||
plt.plot(tz[isignal], z[isignal] / max(z), 'r')
|
||||
plt.plot(te, edat[0].data / max(edat[0].data) + 1, 'k')
|
||||
@@ -960,65 +958,64 @@ def writephases(arrivals, fformat, filename):
|
||||
:type: string
|
||||
'''
|
||||
|
||||
|
||||
if fformat == 'NLLoc':
|
||||
print ("Writing phases to %s for NLLoc" % filename)
|
||||
fid = open("%s" % filename, 'w')
|
||||
# write header
|
||||
fid.write('# EQEVENT: Label: EQ001 Loc: X 0.00 Y 0.00 Z 10.00 OT 0.00 \n')
|
||||
for key in arrivals:
|
||||
# P onsets
|
||||
if arrivals[key]['P']:
|
||||
fm = arrivals[key]['P']['fm']
|
||||
if fm == None:
|
||||
fm = '?'
|
||||
onset = arrivals[key]['P']['mpp']
|
||||
year = onset.year
|
||||
month = onset.month
|
||||
day = onset.day
|
||||
hh = onset.hour
|
||||
mm = onset.minute
|
||||
ss = onset.second
|
||||
ms = onset.microsecond
|
||||
ss_ms = ss + ms / 1000000.0
|
||||
if arrivals[key]['P']['weight'] < 4:
|
||||
pweight = 1 # use pick
|
||||
else:
|
||||
pweight = 0 # do not use pick
|
||||
fid.write('%s ? ? ? P %s %d%02d%02d %02d%02d %7.4f GAU 0 0 0 0 %d \n' % (key,
|
||||
fm,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
pweight))
|
||||
# S onsets
|
||||
if arrivals[key]['S']:
|
||||
fm = '?'
|
||||
onset = arrivals[key]['S']['mpp']
|
||||
year = onset.year
|
||||
month = onset.month
|
||||
day = onset.day
|
||||
hh = onset.hour
|
||||
mm = onset.minute
|
||||
ss = onset.second
|
||||
ms = onset.microsecond
|
||||
ss_ms = ss + ms / 1000000.0
|
||||
if arrivals[key]['S']['weight'] < 4:
|
||||
sweight = 1 # use pick
|
||||
else:
|
||||
sweight = 0 # do not use pick
|
||||
fid.write('%s ? ? ? S %s %d%02d%02d %02d%02d %7.4f GAU 0 0 0 0 %d \n' % (key,
|
||||
fm,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
sweight))
|
||||
# P onsets
|
||||
if arrivals[key]['P']:
|
||||
fm = arrivals[key]['P']['fm']
|
||||
if fm == None:
|
||||
fm = '?'
|
||||
onset = arrivals[key]['P']['mpp']
|
||||
year = onset.year
|
||||
month = onset.month
|
||||
day = onset.day
|
||||
hh = onset.hour
|
||||
mm = onset.minute
|
||||
ss = onset.second
|
||||
ms = onset.microsecond
|
||||
ss_ms = ss + ms / 1000000.0
|
||||
if arrivals[key]['P']['weight'] < 4:
|
||||
pweight = 1 # use pick
|
||||
else:
|
||||
pweight = 0 # do not use pick
|
||||
fid.write('%s ? ? ? P %s %d%02d%02d %02d%02d %7.4f GAU 0 0 0 0 %d \n' % (key,
|
||||
fm,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
pweight))
|
||||
# S onsets
|
||||
if arrivals[key]['S']:
|
||||
fm = '?'
|
||||
onset = arrivals[key]['S']['mpp']
|
||||
year = onset.year
|
||||
month = onset.month
|
||||
day = onset.day
|
||||
hh = onset.hour
|
||||
mm = onset.minute
|
||||
ss = onset.second
|
||||
ms = onset.microsecond
|
||||
ss_ms = ss + ms / 1000000.0
|
||||
if arrivals[key]['S']['weight'] < 4:
|
||||
sweight = 1 # use pick
|
||||
else:
|
||||
sweight = 0 # do not use pick
|
||||
fid.write('%s ? ? ? S %s %d%02d%02d %02d%02d %7.4f GAU 0 0 0 0 %d \n' % (key,
|
||||
fm,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
sweight))
|
||||
|
||||
fid.close()
|
||||
|
||||
@@ -1043,9 +1040,9 @@ def writephases(arrivals, fformat, filename):
|
||||
Ao = str('%7.2f' % Ao)
|
||||
year = Ponset.year
|
||||
if year >= 2000:
|
||||
year = year -2000
|
||||
year = year - 2000
|
||||
else:
|
||||
year = year - 1900
|
||||
year = year - 1900
|
||||
month = Ponset.month
|
||||
day = Ponset.day
|
||||
hh = Ponset.hour
|
||||
@@ -1054,9 +1051,9 @@ def writephases(arrivals, fformat, filename):
|
||||
ms = Ponset.microsecond
|
||||
ss_ms = ss + ms / 1000000.0
|
||||
if pweight < 2:
|
||||
pstr = 'I'
|
||||
pstr = 'I'
|
||||
elif pweight >= 2:
|
||||
pstr = 'E'
|
||||
pstr = 'E'
|
||||
if arrivals[key]['S']['weight'] < 4:
|
||||
Sss = Sonset.second
|
||||
Sms = Sonset.microsecond
|
||||
@@ -1067,35 +1064,36 @@ def writephases(arrivals, fformat, filename):
|
||||
elif sweight >= 2:
|
||||
sstr = 'E'
|
||||
fid.write('%s%sP%s%d %02d%02d%02d%02d%02d%5.2f %s%sS %d %s\n' % (key,
|
||||
pstr,
|
||||
fm,
|
||||
pweight,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
Sss_ms,
|
||||
sstr,
|
||||
sweight,
|
||||
Ao))
|
||||
pstr,
|
||||
fm,
|
||||
pweight,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
Sss_ms,
|
||||
sstr,
|
||||
sweight,
|
||||
Ao))
|
||||
else:
|
||||
fid.write('%s%sP%s%d %02d%02d%02d%02d%02d%5.2f %s\n' % (key,
|
||||
pstr,
|
||||
fm,
|
||||
pweight,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
Ao))
|
||||
pstr,
|
||||
fm,
|
||||
pweight,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
hh,
|
||||
mm,
|
||||
ss_ms,
|
||||
Ao))
|
||||
|
||||
fid.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
||||
|
||||
Reference in New Issue
Block a user