Merge branch 'develop' of ariadne.geophysik.rub.de:/data/git/pylot into develop

Conflicts:
	autoPyLoT.py
	pylot/core/analysis/magnitude.py
	pylot/core/pick/utils.py
This commit is contained in:
Ludger Küperkoch
2015-10-27 09:25:29 +01:00
35 changed files with 1132 additions and 1331 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created Oct/Nov 2014
@@ -24,7 +25,7 @@ class CharacteristicFunction(object):
'''
SuperClass for different types of characteristic functions.
'''
def __init__(self, data, cut, t2=None, order=None, t1=None, fnoise=None):
def __init__(self, data, cut, t2=None, order=None, t1=None, fnoise=None, stealthMode=False):
'''
Initialize data type object with information from the original
Seismogram.
@@ -61,6 +62,7 @@ class CharacteristicFunction(object):
self.calcCF(self.getDataArray())
self.arpara = np.array([])
self.xpred = np.array([])
self._stealthMode = stealthMode
def __str__(self):
return '''\n\t{name} object:\n
@@ -119,7 +121,7 @@ class CharacteristicFunction(object):
def getTimeArray(self):
incr = self.getIncrement()
self.TimeArray = np.arange(0, len(self.getCF()) * incr, incr) + self.getCut()[0]
self.TimeArray = np.arange(0, len(self.getCF()) * incr, incr) + self.getCut()[0]
return self.TimeArray
def getFnoise(self):
@@ -134,6 +136,9 @@ class CharacteristicFunction(object):
def getXCF(self):
return self.xcf
def _getStealthMode(self):
return self._stealthMode()
def getDataArray(self, cut=None):
'''
If cut times are given, time series is cut from cut[0] (start time)
@@ -164,31 +169,31 @@ class CharacteristicFunction(object):
stop = min([len(self.orig_data[0]), len(self.orig_data[1])])
elif self.cut[0] == 0 and self.cut[1] is not 0:
start = 0
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]), \
len(self.orig_data[1])])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
len(self.orig_data[1])])
else:
start = max([0, self.cut[0] / self.dt])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]), \
len(self.orig_data[1])])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
len(self.orig_data[1])])
hh = self.orig_data.copy()
h1 = hh[0].copy()
h2 = hh[1].copy()
hh[0].data = h1.data[int(start):int(stop)]
hh[1].data = h2.data[int(start):int(stop)]
data = hh
data = hh
return data
elif len(self.orig_data) == 3:
if self.cut[0] == 0 and self.cut[1] == 0:
start = 0
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]), \
len(self.orig_data[1]), len(self.orig_data[2])])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
len(self.orig_data[1]), len(self.orig_data[2])])
elif self.cut[0] == 0 and self.cut[1] is not 0:
start = 0
stop = self.cut[1] / self.dt
else:
start = max([0, self.cut[0] / self.dt])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]), \
len(self.orig_data[1]), len(self.orig_data[2])])
stop = min([self.cut[1] / self.dt, len(self.orig_data[0]),
len(self.orig_data[1]), len(self.orig_data[2])])
hh = self.orig_data.copy()
h1 = hh[0].copy()
h2 = hh[1].copy()
@@ -196,12 +201,12 @@ class CharacteristicFunction(object):
hh[0].data = h1.data[int(start):int(stop)]
hh[1].data = h2.data[int(start):int(stop)]
hh[2].data = h3.data[int(start):int(stop)]
data = hh
data = hh
return data
else:
data = self.orig_data.copy()
return data
def calcCF(self, data=None):
self.cf = data
@@ -218,7 +223,8 @@ class AICcf(CharacteristicFunction):
def calcCF(self, data):
#print 'Calculating AIC ...' ## MP MP output suppressed
#if self._getStealthMode() is False:
# print 'Calculating AIC ...'
x = self.getDataArray()
xnp = x[0].data
nn = np.isnan(xnp)
@@ -230,7 +236,7 @@ class AICcf(CharacteristicFunction):
cumsumcf = np.cumsum(np.power(xnp, 2))
i = np.where(cumsumcf == 0)
cumsumcf[i] = np.finfo(np.float64).eps
cf[k] = ((k - 1) * np.log(cumsumcf[k] / k) + (datlen - k + 1) * \
cf[k] = ((k - 1) * np.log(cumsumcf[k] / k) + (datlen - k + 1) *
np.log((cumsumcf[datlen - 1] - cumsumcf[k - 1]) / (datlen - k + 1)))
cf[0] = cf[1]
inf = np.isinf(cf)
@@ -256,11 +262,13 @@ class HOScf(CharacteristicFunction):
if len(nn) > 1:
xnp[nn] = 0
if self.getOrder() == 3: # this is skewness
print 'Calculating skewness ...'
#if self._getStealthMode() is False:
# print 'Calculating skewness ...'
y = np.power(xnp, 3)
y1 = np.power(xnp, 2)
elif self.getOrder() == 4: # this is kurtosis
#print 'Calculating kurtosis ...' ## MP MP output suppressed
#if self._getStealthMode() is False:
# print 'Calculating kurtosis ...'
y = np.power(xnp, 4)
y1 = np.power(xnp, 2)
@@ -285,7 +293,7 @@ class HOScf(CharacteristicFunction):
LTA[j] = lta / np.power(lta1, 1.5)
elif self.getOrder() == 4:
LTA[j] = lta / np.power(lta1, 2)
nn = np.isnan(LTA)
if len(nn) > 1:
LTA[nn] = 0
@@ -315,7 +323,7 @@ class ARZcf(CharacteristicFunction):
cf = np.zeros(len(xnp))
loopstep = self.getARdetStep()
arcalci = ldet + self.getOrder() #AR-calculation index
for i in range(ldet + self.getOrder(), tend - lpred - 1):
for i in range(ldet + self.getOrder(), tend - lpred - 1):
if i == arcalci:
#determination of AR coefficients
#to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
@@ -362,7 +370,7 @@ class ARZcf(CharacteristicFunction):
rhs = np.zeros(self.getOrder())
for k in range(0, self.getOrder()):
for i in range(rind, ldet+1):
ki = k + 1
ki = k + 1
rhs[k] = rhs[k] + data[i] * data[i - ki]
#recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
@@ -382,7 +390,7 @@ class ARZcf(CharacteristicFunction):
def arPredZ(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, time series to be predicted
:type: array
@@ -400,9 +408,9 @@ class ARZcf(CharacteristicFunction):
'''
#be sure of the summation indeces
if rind < len(arpara):
rind = len(arpara)
rind = len(arpara)
if rind > len(data) - lpred :
rind = len(data) - lpred
rind = len(data) - lpred
if lpred < 1:
lpred = 1
if lpred > len(data) - 2:
@@ -422,7 +430,7 @@ class ARHcf(CharacteristicFunction):
def calcCF(self, data):
print 'Calculating AR-prediction error from both horizontal traces ...'
xnp = self.getDataArray(self.getCut())
n0 = np.isnan(xnp[0].data)
if len(n0) > 1:
@@ -430,7 +438,7 @@ class ARHcf(CharacteristicFunction):
n1 = np.isnan(xnp[1].data)
if len(n1) > 1:
xnp[1].data[n1] = 0
#some parameters needed
#add noise to time series
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
@@ -441,7 +449,7 @@ class ARHcf(CharacteristicFunction):
#Time2: length of AR-prediction window [sec]
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
cf = np.zeros(len(xenoise))
loopstep = self.getARdetStep()
arcalci = lpred + self.getOrder() - 1 #AR-calculation index
@@ -515,7 +523,7 @@ class ARHcf(CharacteristicFunction):
def arPredH(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, horizontal component seismograms to be predicted
:type: structured array
@@ -558,7 +566,7 @@ class AR3Ccf(CharacteristicFunction):
def calcCF(self, data):
print 'Calculating AR-prediction error from all 3 components ...'
xnp = self.getDataArray(self.getCut())
n0 = np.isnan(xnp[0].data)
if len(n0) > 1:
@@ -569,7 +577,7 @@ class AR3Ccf(CharacteristicFunction):
n2 = np.isnan(xnp[2].data)
if len(n2) > 1:
xnp[2].data[n2] = 0
#some parameters needed
#add noise to time series
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
@@ -581,7 +589,7 @@ class AR3Ccf(CharacteristicFunction):
#Time2: length of AR-prediction window [sec]
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
cf = np.zeros(len(xenoise))
loopstep = self.getARdetStep()
arcalci = ldet + self.getOrder() - 1 #AR-calculation index
@@ -616,7 +624,7 @@ class AR3Ccf(CharacteristicFunction):
Function to calculate AR parameters arpara after Thomas Meier (CAU), published
in Kueperkoch et al. (2012). This function solves SLE using the Moore-
Penrose inverse, i.e. the least-squares approach. "data" is a structured array.
AR parameters are calculated based on both horizontal components and vertical
AR parameters are calculated based on both horizontal components and vertical
componant.
:param: data, horizontal component seismograms to calculate AR parameters from
:type: structured array
@@ -658,7 +666,7 @@ class AR3Ccf(CharacteristicFunction):
def arPred3C(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet3C. After
p (=size(arpara)), with AR parameters arpara calculated in arDet3C. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, horizontal and vertical component seismograms to be predicted
:type: structured array

View File

@@ -312,7 +312,7 @@ class PragPicker(AutoPicking):
else:
for i in range(1, len(self.cf)):
if i > ismooth:
ii1 = i - ismooth;
ii1 = i - ismooth
cfsmooth[i] = cfsmooth[i - 1] + (self.cf[i] - self.cf[ii1]) / ismooth
else:
cfsmooth[i] = np.mean(self.cf[1 : i])

View File

@@ -1 +1,2 @@
#
# -*- coding: utf-8 -*-
#

View File

@@ -204,27 +204,27 @@ def autopickstation(wfstream, pickparam):
if len(ndat) == 0 or len(edat) == 0:
print ("One or more horizontal components missing!")
print ("Signal length only checked on vertical component!")
print ("Decreasing minsiglengh from %f to %f" \
% (minsiglength, minsiglength / 2))
print ("Decreasing minsiglengh from %f to %f"
% (minsiglength, minsiglength / 2))
Pflag = checksignallength(zne, aicpick.getpick(), tsnrz,
minsiglength / 2, \
minsiglength / 2,
nfacsl, minpercent, iplot)
else:
# filter and taper horizontal traces
trH1_filt = edat.copy()
trH2_filt = ndat.copy()
trH1_filt.filter('bandpass', freqmin=bph1[0],
freqmax=bph1[1], \
zerophase=False)
freqmax=bph1[1],
zerophase=False)
trH2_filt.filter('bandpass', freqmin=bph1[0],
freqmax=bph1[1], \
zerophase=False)
freqmax=bph1[1],
zerophase=False)
trH1_filt.taper(max_percentage=0.05, type='hann')
trH2_filt.taper(max_percentage=0.05, type='hann')
zne += trH1_filt
zne += trH2_filt
Pflag = checksignallength(zne, aicpick.getpick(), tsnrz,
minsiglength, \
minsiglength,
nfacsl, minpercent, iplot)
if Pflag == 1:
@@ -234,7 +234,7 @@ def autopickstation(wfstream, pickparam):
print 'One or more horizontal components missing!'
print 'Skipping control function checkZ4S.'
else:
Pflag = checkZ4S(zne, aicpick.getpick(), zfac, \
Pflag = checkZ4S(zne, aicpick.getpick(), zfac,
tsnrz[3], iplot)
if Pflag == 0:
Pmarker = 'SinsteadP'
@@ -317,31 +317,31 @@ def autopickstation(wfstream, pickparam):
data = Data()
[corzdat, restflag] = data.restituteWFData(invdir, zdat)
if restflag == 1:
# integrate to displacement
corintzdat = integrate.cumtrapz(corzdat[0], None, corzdat[0].stats.delta)
# class needs stream object => build it
z_copy = zdat.copy()
z_copy[0].data = corintzdat
# largest detectable period == window length
# after P pulse for calculating source spectrum
winzc = (1 / bpz2[0]) * z_copy[0].stats.sampling_rate
impickP = mpickP * z_copy[0].stats.sampling_rate
wfzc = z_copy[0].data[impickP : impickP + winzc]
# calculate spectrum using only first cycles of
# waveform after P onset!
zc = crossings_nonzero_all(wfzc)
if np.size(zc) == 0:
print ("Something is wrong with the waveform, " \
"no zero crossings derived!")
print ("Cannot calculate source spectrum!")
else:
calcwin = (zc[3] - zc[0]) * z_copy[0].stats.delta
# calculate source spectrum and get w0 and fc
specpara = DCfc(z_copy, mpickP, calcwin, iplot)
w0 = specpara.getw0()
fc = specpara.getfc()
# integrate to displacement
corintzdat = integrate.cumtrapz(corzdat[0], None, corzdat[0].stats.delta)
# class needs stream object => build it
z_copy = zdat.copy()
z_copy[0].data = corintzdat
# largest detectable period == window length
# after P pulse for calculating source spectrum
winzc = (1 / bpz2[0]) * z_copy[0].stats.sampling_rate
impickP = mpickP * z_copy[0].stats.sampling_rate
wfzc = z_copy[0].data[impickP : impickP + winzc]
# calculate spectrum using only first cycles of
# waveform after P onset!
zc = crossings_nonzero_all(wfzc)
if np.size(zc) == 0:
print ("Something is wrong with the waveform, "
"no zero crossings derived!")
print ("Cannot calculate source spectrum!")
else:
calcwin = (zc[3] - zc[0]) * z_copy[0].stats.delta
# calculate source spectrum and get w0 and fc
specpara = DCfc(z_copy, mpickP, calcwin, iplot)
w0 = specpara.getw0()
fc = specpara.getfc()
print ("autopickstation: P-weight: %d, SNR: %f, SNR[dB]: %f, " \
print ("autopickstation: P-weight: %d, SNR: %f, SNR[dB]: %f, "
"Polarity: %s" % (Pweight, SNRP, SNRPdB, FM))
Sflag = 1
@@ -352,7 +352,7 @@ def autopickstation(wfstream, pickparam):
Sflag = 0
else:
print ("autopickstation: No vertical component data available!, " \
print ("autopickstation: No vertical component data available!, "
"Skipping station!")
if edat is not None and ndat is not None and len(edat) > 0 and len(
@@ -560,7 +560,7 @@ def autopickstation(wfstream, pickparam):
hdat += ndat
h_copy = hdat.copy()
[cordat, restflag] = data.restituteWFData(invdir, h_copy)
# calculate WA-peak-to-peak amplitude
# calculate WA-peak-to-peak amplitude
# using subclass WApp of superclass Magnitude
if restflag == 1:
if Sweight < 4:
@@ -591,10 +591,10 @@ def autopickstation(wfstream, pickparam):
h_copy = hdat.copy()
[cordat, restflag] = data.restituteWFData(invdir, h_copy)
if restflag == 1:
# calculate WA-peak-to-peak amplitude
# calculate WA-peak-to-peak amplitude
# using subclass WApp of superclass Magnitude
wapp = WApp(cordat, mpickP, mpickP + sstop + (0.5 * (mpickP \
+ sstop)), iplot)
wapp = WApp(cordat, mpickP, mpickP + sstop + (0.5 * (mpickP
+ sstop)), iplot)
Ao = wapp.getwapp()
else:
@@ -771,14 +771,14 @@ def autopickstation(wfstream, pickparam):
# create dictionary
# for P phase
phase = 'P'
phasepick = {'lpp': lpickP, 'epp': epickP, 'mpp': mpickP, 'spe': Perror, \
phasepick = {'lpp': lpickP, 'epp': epickP, 'mpp': mpickP, 'spe': Perror,
'snr': SNRP, 'snrdb': SNRPdB, 'weight': Pweight, 'fm': FM}
picks = {phase: phasepick}
# add P marker
picks[phase]['marked'] = Pmarker
# add S phase
phase = 'S'
phasepick = {'lpp': lpickS, 'epp': epickS, 'mpp': mpickS, 'spe': Serror, \
phasepick = {'lpp': lpickS, 'epp': epickS, 'mpp': mpickS, 'spe': Serror,
'snr': SNRS, 'snrdb': SNRSdB, 'weight': Sweight, 'fm': None}
picks[phase] = phasepick
# add Wood-Anderson amplitude

View File

@@ -6,8 +6,8 @@
Only for test purposes!
"""
from obspy.core import read
import matplotlib.pyplot as plt
from obspy.core import read
import matplotlib.pyplot as plt
import numpy as np
from pylot.core.pick.CharFuns import CharacteristicFunction
from pylot.core.pick.Picker import AutoPicking
@@ -56,7 +56,7 @@ def run_makeCF(project, database, event, iplot, station=None):
st_copy = st.copy()
#filter and taper data
tr_filt = st[0].copy()
tr_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
tr_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
tr_filt.taper(max_percentage=0.05, type='hann')
st_copy[0].data = tr_filt.data
##############################################################
@@ -120,8 +120,8 @@ def run_makeCF(project, database, event, iplot, station=None):
#filter and taper data
trH1_filt = H[0].copy()
trH2_filt = H[1].copy()
trH1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
trH2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
trH1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
trH2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
trH1_filt.taper(max_percentage=0.05, type='hann')
trH2_filt.taper(max_percentage=0.05, type='hann')
H_copy[0].data = trH1_filt.data
@@ -167,9 +167,9 @@ def run_makeCF(project, database, event, iplot, station=None):
All1_filt = AllC[0].copy()
All2_filt = AllC[1].copy()
All3_filt = AllC[2].copy()
All1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
All2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
All3_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
All1_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
All2_filt.filter('bandpass', freqmin=bph[0], freqmax=bph[1], zerophase=False)
All3_filt.filter('bandpass', freqmin=bpz[0], freqmax=bpz[1], zerophase=False)
All1_filt.taper(max_percentage=0.05, type='hann')
All2_filt.taper(max_percentage=0.05, type='hann')
All3_filt.taper(max_percentage=0.05, type='hann')
@@ -209,19 +209,19 @@ def run_makeCF(project, database, event, iplot, station=None):
plt.ylim([-1.5, 1.5])
plt.xlabel('Time [s]')
plt.ylabel('Normalized Counts')
plt.title('%s, %s, CF-SNR=%7.2f, CF-Slope=%12.2f' % (tr.stats.station, \
tr.stats.channel, aicpick.getSNR(), aicpick.getSlope()))
plt.title('%s, %s, CF-SNR=%7.2f, CF-Slope=%12.2f' % (tr.stats.station,
tr.stats.channel, aicpick.getSNR(), aicpick.getSlope()))
plt.suptitle(tr.stats.starttime)
plt.legend([p1, p2, p3, p4, p5], ['Data', 'HOS-CF', 'HOSAIC-CF', 'ARZ-CF', 'ARZAIC-CF'])
plt.legend([p1, p2, p3, p4, p5], ['Data', 'HOS-CF', 'HOSAIC-CF', 'ARZ-CF', 'ARZAIC-CF'])
#plot horizontal traces
plt.figure(2)
plt.subplot(2,1,1)
tsteph = tpredh / 4
tsteph = tpredh / 4
th1data = np.arange(0, trH1_filt.stats.npts / trH1_filt.stats.sampling_rate, trH1_filt.stats.delta)
th2data = np.arange(0, trH2_filt.stats.npts / trH2_filt.stats.sampling_rate, trH2_filt.stats.delta)
tarhcf = np.arange(0, len(arhcf.getCF()) * tsteph, tsteph) + cuttimes[0] + tdeth +tpredh
p21, = plt.plot(th1data, trH1_filt.data/max(trH1_filt.data), 'k')
p22, = plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
p22, = plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
p23, = plt.plot(arhaiccf.getTimeArray(), arhaiccf.getCF()/max(arhaiccf.getCF()))
plt.plot([aicarhpick.getpick(), aicarhpick.getpick()], [-1, 1], 'b')
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [1, 1], 'b')
@@ -238,10 +238,10 @@ def run_makeCF(project, database, event, iplot, station=None):
plt.ylabel('Normalized Counts')
plt.title([trH1_filt.stats.station, trH1_filt.stats.channel])
plt.suptitle(trH1_filt.stats.starttime)
plt.legend([p21, p22, p23], ['Data', 'ARH-CF', 'ARHAIC-CF'])
plt.legend([p21, p22, p23], ['Data', 'ARH-CF', 'ARHAIC-CF'])
plt.subplot(2,1,2)
plt.plot(th2data, trH2_filt.data/max(trH2_filt.data), 'k')
plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
plt.plot(arhcf.getTimeArray(), arhcf.getCF()/max(arhcf.getCF()), 'r')
plt.plot(arhaiccf.getTimeArray(), arhaiccf.getCF()/max(arhaiccf.getCF()))
plt.plot([aicarhpick.getpick(), aicarhpick.getpick()], [-1, 1], 'b')
plt.plot([aicarhpick.getpick()-0.5, aicarhpick.getpick()+0.5], [1, 1], 'b')
@@ -271,7 +271,7 @@ def run_makeCF(project, database, event, iplot, station=None):
plt.ylabel('Normalized Counts')
plt.title([tr.stats.station, tr.stats.channel])
plt.suptitle(trH1_filt.stats.starttime)
plt.legend([p31, p32], ['Data', 'AR3C-CF'])
plt.legend([p31, p32], ['Data', 'AR3C-CF'])
plt.subplot(3,1,2)
plt.plot(th1data, trH1_filt.data/max(trH1_filt.data), 'k')
plt.plot(ar3ccf.getTimeArray(), ar3ccf.getCF()/max(ar3ccf.getCF()), 'r')

View File

@@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -*- coding: utf-8 -*-
"""
@@ -14,7 +15,7 @@ from obspy.core import Stream, UTCDateTime
import warnings
def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
'''
Function to derive earliest and latest possible pick after Diehl & Kissling (2009)
as reasonable uncertainties. Latest possible pick is based on noise level,
@@ -43,7 +44,8 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
LPick = None
EPick = None
PickError = None
#print 'earllatepicker: Get earliest and latest possible pick relative to most likely pick ...'
if stealthMode is False:
print 'earllatepicker: Get earliest and latest possible pick relative to most likely pick ...'
x = X[0].data
t = np.arange(0, X[0].stats.npts / X[0].stats.sampling_rate,
@@ -74,8 +76,9 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
# if EPick stays NaN the signal window size will be doubled
while np.isnan(EPick):
if count > 0:
print("earllatepicker: Doubled signal window size %s time(s) "
"because of NaN for earliest pick." %count)
if stealthMode is False:
print("\nearllatepicker: Doubled signal window size %s time(s) "
"because of NaN for earliest pick." %count)
isigDoubleWinStart = pis[-1] + 1
isignalDoubleWin = np.arange(isigDoubleWinStart,
isigDoubleWinStart + len(pis))
@@ -91,7 +94,7 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
T0 = np.mean(np.diff(zc)) * X[0].stats.delta # this is half wave length
# T0/4 is assumed as time difference between most likely and earliest possible pick!
EPick = Pick1 - T0 / 2
# get symmetric pick error as mean from earliest and latest possible pick
# by weighting latest possible pick two times earliest possible pick
@@ -109,7 +112,7 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
markersize=14)
plt.legend([p1, p2, p3, p4, p5],
['Data', 'Noise Window', 'Signal Window', 'Noise Level',
'Zero Crossings'], \
'Zero Crossings'],
loc='best')
plt.plot([t[0], t[int(len(t)) - 1]], [-nlevel, -nlevel], '--k')
plt.plot([Pick1, Pick1], [max(x), -max(x)], 'b', linewidth=2)
@@ -182,10 +185,10 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
i = 0
for j in range(ipick[0][1], ipick[0][len(t[ipick]) - 1]):
i = i + 1
if xraw[j - 1] <= 0 and xraw[j] >= 0:
if xraw[j - 1] <= 0 <= xraw[j]:
zc1.append(t[ipick][i])
index1.append(i)
elif xraw[j - 1] > 0 and xraw[j] <= 0:
elif xraw[j - 1] > 0 >= xraw[j]:
zc1.append(t[ipick][i])
index1.append(i)
if len(zc1) == 3:
@@ -224,10 +227,10 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
i = 0
for j in range(ipick[0][1], ipick[0][len(t[ipick]) - 1]):
i = i + 1
if xfilt[j - 1] <= 0 and xfilt[j] >= 0:
if xfilt[j - 1] <= 0 <= xfilt[j]:
zc2.append(t[ipick][i])
index2.append(i)
elif xfilt[j - 1] > 0 and xfilt[j] <= 0:
elif xfilt[j - 1] > 0 >= xfilt[j]:
zc2.append(t[ipick][i])
index2.append(i)
if len(zc2) == 3:
@@ -262,15 +265,15 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
if P1 is not None and P2 is not None:
if P1[0] < 0 and P2[0] < 0:
FM = 'D'
elif P1[0] >= 0 and P2[0] < 0:
elif P1[0] >= 0 > P2[0]:
FM = '-'
elif P1[0] < 0 and P2[0] >= 0:
elif P1[0] < 0 <= P2[0]:
FM = '-'
elif P1[0] > 0 and P2[0] > 0:
FM = 'U'
elif P1[0] <= 0 and P2[0] > 0:
elif P1[0] <= 0 < P2[0]:
FM = '+'
elif P1[0] > 0 and P2[0] <= 0:
elif P1[0] > 0 >= P2[0]:
FM = '+'
print ("fmpicker: Found polarity %s" % FM)
@@ -285,7 +288,7 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
p3, = plt.plot(zc1, np.zeros(len(zc1)), '*g', markersize=14)
p4, = plt.plot(t[islope1], datafit1, '--g', linewidth=2)
plt.legend([p1, p2, p3, p4],
['Pick', 'Slope Window', 'Zero Crossings', 'Slope'], \
['Pick', 'Slope Window', 'Zero Crossings', 'Slope'],
loc='best')
plt.text(Pick + 0.02, max(xraw) / 2, '%s' % FM, fontsize=14)
ax = plt.gca()
@@ -493,9 +496,9 @@ def wadaticheck(pickdic, dttolerance, iplot):
if len(SPtimes) >= 3:
# calculate slope
p1 = np.polyfit(Ppicks, SPtimes, 1)
wdfit = np.polyval(p1, Ppicks)
# calculate slope
p1 = np.polyfit(Ppicks, SPtimes, 1)
wdfit = np.polyval(p1, Ppicks)
wfitflag = 0
# calculate vp/vs ratio before check
@@ -532,40 +535,40 @@ def wadaticheck(pickdic, dttolerance, iplot):
pickdic[key]['S']['marked'] = marker
if len(checkedPpicks) >= 3:
# calculate new slope
p2 = np.polyfit(checkedPpicks, checkedSPtimes, 1)
wdfit2 = np.polyval(p2, checkedPpicks)
# calculate new slope
p2 = np.polyfit(checkedPpicks, checkedSPtimes, 1)
wdfit2 = np.polyval(p2, checkedPpicks)
# calculate vp/vs ratio after check
cvpvsr = p2[0] + 1
print ("wadaticheck: Average Vp/Vs ratio after check: %f" % cvpvsr)
print ("wadatacheck: Skipped %d S pick(s)" % ibad)
# calculate vp/vs ratio after check
cvpvsr = p2[0] + 1
print ("wadaticheck: Average Vp/Vs ratio after check: %f" % cvpvsr)
print ("wadatacheck: Skipped %d S pick(s)" % ibad)
else:
print ("###############################################")
print ("wadatacheck: Not enough checked S-P times available!")
print ("Skip Wadati check!")
print ("###############################################")
print ("wadatacheck: Not enough checked S-P times available!")
print ("Skip Wadati check!")
checkedonsets = pickdic
else:
print ("wadaticheck: Not enough S-P times available for reliable regression!")
print ("wadaticheck: Not enough S-P times available for reliable regression!")
print ("Skip wadati check!")
wfitflag = 1
# plot results
if iplot > 1:
plt.figure(iplot)
f1, = plt.plot(Ppicks, SPtimes, 'ro')
plt.figure(iplot)
f1, = plt.plot(Ppicks, SPtimes, 'ro')
if wfitflag == 0:
f2, = plt.plot(Ppicks, wdfit, 'k')
f3, = plt.plot(checkedPpicks, checkedSPtimes, 'ko')
f4, = plt.plot(checkedPpicks, wdfit2, 'g')
plt.title('Wadati-Diagram, %d S-P Times, Vp/Vs(raw)=%5.2f,' \
'Vp/Vs(checked)=%5.2f' % (len(SPtimes), vpvsr, cvpvsr))
plt.legend([f1, f2, f3, f4], ['Skipped S-Picks', 'Wadati 1', \
'Reliable S-Picks', 'Wadati 2'], loc='best')
f2, = plt.plot(Ppicks, wdfit, 'k')
f3, = plt.plot(checkedPpicks, checkedSPtimes, 'ko')
f4, = plt.plot(checkedPpicks, wdfit2, 'g')
plt.title('Wadati-Diagram, %d S-P Times, Vp/Vs(raw)=%5.2f,' \
'Vp/Vs(checked)=%5.2f' % (len(SPtimes), vpvsr, cvpvsr))
plt.legend([f1, f2, f3, f4], ['Skipped S-Picks', 'Wadati 1',
'Reliable S-Picks', 'Wadati 2'], loc='best')
else:
plt.title('Wadati-Diagram, %d S-P Times' % len(SPtimes))
plt.title('Wadati-Diagram, %d S-P Times' % len(SPtimes))
plt.ylabel('S-P Times [s]')
plt.xlabel('P Times [s]')
@@ -579,8 +582,8 @@ def wadaticheck(pickdic, dttolerance, iplot):
def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
'''
Function to detect spuriously picked noise peaks.
Uses RMS trace of all 3 components (if available) to determine,
how many samples [per cent] after P onset are below certain
Uses RMS trace of all 3 components (if available) to determine,
how many samples [per cent] after P onset are below certain
threshold, calculated from noise level times noise factor.
: param: X, time series (seismogram)
@@ -612,7 +615,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
print ("Checking signal length ...")
if len(X) > 1:
# all three components available
# all three components available
# make sure, all components have equal lengths
ilen = min([len(X[0].data), len(X[1].data), len(X[2].data)])
x1 = X[0][0:ilen]
@@ -639,7 +642,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
numoverthr = len(np.where(rms[isignal] >= minsiglevel)[0])
if numoverthr >= minnum:
print ("checksignallength: Signal reached required length.")
print ("checksignallength: Signal reached required length.")
returnflag = 1
else:
print ("checksignallength: Signal shorter than required minimum signal length!")
@@ -649,15 +652,15 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
if iplot == 2:
plt.figure(iplot)
p1, = plt.plot(t,rms, 'k')
p1, = plt.plot(t,rms, 'k')
p2, = plt.plot(t[inoise], rms[inoise], 'c')
p3, = plt.plot(t[isignal],rms[isignal], 'r')
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]], \
[minsiglevel, minsiglevel], 'g', linewidth=2)
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]],
[minsiglevel, minsiglevel], 'g', linewidth=2)
p5, = plt.plot([pick, pick], [min(rms), max(rms)], 'b', linewidth=2)
plt.legend([p1, p2, p3, p4, p5], ['RMS Data', 'RMS Noise Window', \
'RMS Signal Window', 'Minimum Signal Level', \
'Onset'], loc='best')
plt.legend([p1, p2, p3, p4, p5], ['RMS Data', 'RMS Noise Window',
'RMS Signal Window', 'Minimum Signal Level',
'Onset'], loc='best')
plt.xlabel('Time [s] since %s' % X[0].stats.starttime)
plt.ylabel('Counts')
plt.title('Check for Signal Length, Station %s' % X[0].stats.station)
@@ -729,32 +732,32 @@ def checkPonsets(pickdic, dttolerance, iplot):
badjkmarker = 'badjkcheck'
for i in range(0, len(goodstations)):
# mark P onset as checked and keep P weight
pickdic[goodstations[i]]['P']['marked'] = goodmarker
pickdic[goodstations[i]]['P']['marked'] = goodmarker
for i in range(0, len(badstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badstations[i]]['P']['marked'] = badmarker
pickdic[badstations[i]]['P']['weight'] = 9
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badstations[i]]['P']['marked'] = badmarker
pickdic[badstations[i]]['P']['weight'] = 9
for i in range(0, len(badjkstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badjkstations[i]]['P']['marked'] = badjkmarker
pickdic[badjkstations[i]]['P']['weight'] = 9
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badjkstations[i]]['P']['marked'] = badjkmarker
pickdic[badjkstations[i]]['P']['weight'] = 9
checkedonsets = pickdic
if iplot > 1:
p1, = plt.plot(np.arange(0, len(Ppicks)), Ppicks, 'r+', markersize=14)
p1, = plt.plot(np.arange(0, len(Ppicks)), Ppicks, 'r+', markersize=14)
p2, = plt.plot(igood, np.array(Ppicks)[igood], 'g*', markersize=14)
p3, = plt.plot([0, len(Ppicks) - 1], [pmedian, pmedian], 'g', \
linewidth=2)
p3, = plt.plot([0, len(Ppicks) - 1], [pmedian, pmedian], 'g',
linewidth=2)
for i in range(0, len(Ppicks)):
plt.text(i, Ppicks[i] + 0.2, stations[i])
plt.text(i, Ppicks[i] + 0.2, stations[i])
plt.xlabel('Number of P Picks')
plt.ylabel('Onset Time [s] from 1.1.1970')
plt.legend([p1, p2, p3], ['Skipped P Picks', 'Good P Picks', 'Median'], \
loc='best')
plt.legend([p1, p2, p3], ['Skipped P Picks', 'Good P Picks', 'Median'],
loc='best')
plt.title('Check P Onsets')
plt.show()
raw_input()
@@ -789,37 +792,37 @@ def jackknife(X, phi, h):
g = len(X) / h
if type(g) is not int:
print ("jackknife: Cannot divide quantity X in equal sized subgroups!")
print ("jackknife: Cannot divide quantity X in equal sized subgroups!")
print ("Choose another size for subgroups!")
return PHI_jack, PHI_pseudo, PHI_sub
else:
# estimator of undisturbed spot check
if phi == 'MEA':
phi_sc = np.mean(X)
# estimator of undisturbed spot check
if phi == 'MEA':
phi_sc = np.mean(X)
elif phi == 'VAR':
phi_sc = np.var(X)
phi_sc = np.var(X)
elif phi == 'MED':
phi_sc = np.median(X)
phi_sc = np.median(X)
# estimators of subgroups
# estimators of subgroups
PHI_pseudo = []
PHI_sub = []
for i in range(0, g - 1):
# subgroup i, remove i-th sample
xx = X[:]
del xx[i]
# calculate estimators of disturbed spot check
if phi == 'MEA':
phi_sub = np.mean(xx)
elif phi == 'VAR':
phi_sub = np.var(xx)
elif phi == 'MED':
phi_sub = np.median(xx)
# subgroup i, remove i-th sample
xx = X[:]
del xx[i]
# calculate estimators of disturbed spot check
if phi == 'MEA':
phi_sub = np.mean(xx)
elif phi == 'VAR':
phi_sub = np.var(xx)
elif phi == 'MED':
phi_sub = np.median(xx)
PHI_sub.append(phi_sub)
# pseudo values
phi_pseudo = g * phi_sc - ((g - 1) * phi_sub)
PHI_pseudo.append(phi_pseudo)
PHI_sub.append(phi_sub)
# pseudo values
phi_pseudo = g * phi_sc - ((g - 1) * phi_sub)
PHI_pseudo.append(phi_pseudo)
# jackknife estimator
PHI_jack = np.mean(PHI_pseudo)
@@ -899,29 +902,29 @@ def checkZ4S(X, pick, zfac, checkwin, iplot):
# vertical P-coda level must exceed horizontal P-coda level
# zfac times encodalevel
if zcodalevel < minsiglevel:
print ("checkZ4S: Maybe S onset? Skip this P pick!")
print ("checkZ4S: Maybe S onset? Skip this P pick!")
else:
print ("checkZ4S: P onset passes checkZ4S test!")
returnflag = 1
if iplot > 1:
te = np.arange(0, edat[0].stats.npts / edat[0].stats.sampling_rate,
te = np.arange(0, edat[0].stats.npts / edat[0].stats.sampling_rate,
edat[0].stats.delta)
tn = np.arange(0, ndat[0].stats.npts / ndat[0].stats.sampling_rate,
tn = np.arange(0, ndat[0].stats.npts / ndat[0].stats.sampling_rate,
ndat[0].stats.delta)
plt.plot(tz, z / max(z), 'k')
plt.plot(tz, z / max(z), 'k')
plt.plot(tz[isignal], z[isignal] / max(z), 'r')
plt.plot(te, edat[0].data / max(edat[0].data) + 1, 'k')
plt.plot(te[isignal], edat[0].data[isignal] / max(edat[0].data) + 1, 'r')
plt.plot(tn, ndat[0].data / max(ndat[0].data) + 2, 'k')
plt.plot(tn[isignal], ndat[0].data[isignal] / max(ndat[0].data) + 2, 'r')
plt.plot([tz[isignal[0]], tz[isignal[len(isignal) - 1]]], \
[minsiglevel / max(z), minsiglevel / max(z)], 'g', \
linewidth=2)
plt.plot([tz[isignal[0]], tz[isignal[len(isignal) - 1]]],
[minsiglevel / max(z), minsiglevel / max(z)], 'g',
linewidth=2)
plt.xlabel('Time [s] since %s' % zdat[0].stats.starttime)
plt.ylabel('Normalized Counts')
plt.yticks([0, 1, 2], [zdat[0].stats.channel, edat[0].stats.channel, \
ndat[0].stats.channel])
plt.yticks([0, 1, 2], [zdat[0].stats.channel, edat[0].stats.channel,
ndat[0].stats.channel])
plt.title('CheckZ4S, Station %s' % zdat[0].stats.station)
plt.show()
raw_input()