merged 3 files

Merge branch 'develop' of ariadne.geophysik.ruhr-uni-bochum.de:/data/git/pylot into develop

Conflicts:
	pylot/core/active/activeSeismoPick.py
	pylot/core/active/seismicshot.py
	pylot/core/active/surveyPlotTools.py
This commit is contained in:
Marcel Paffrath
2015-10-19 13:15:28 +02:00
31 changed files with 454 additions and 289 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created Oct/Nov 2014
@@ -120,7 +121,7 @@ class CharacteristicFunction(object):
def getTimeArray(self):
incr = self.getIncrement()
self.TimeArray = np.arange(0, len(self.getCF()) * incr, incr) + self.getCut()[0]
self.TimeArray = np.arange(0, len(self.getCF()) * incr, incr) + self.getCut()[0]
return self.TimeArray
def getFnoise(self):
@@ -176,7 +177,7 @@ class CharacteristicFunction(object):
h2 = hh[1].copy()
hh[0].data = h1.data[int(start):int(stop)]
hh[1].data = h2.data[int(start):int(stop)]
data = hh
data = hh
return data
elif len(self.orig_data) == 3:
if self.cut[0] == 0 and self.cut[1] == 0:
@@ -197,12 +198,12 @@ class CharacteristicFunction(object):
hh[0].data = h1.data[int(start):int(stop)]
hh[1].data = h2.data[int(start):int(stop)]
hh[2].data = h3.data[int(start):int(stop)]
data = hh
data = hh
return data
else:
data = self.orig_data.copy()
return data
def calcCF(self, data=None):
self.cf = data
@@ -289,7 +290,7 @@ class HOScf(CharacteristicFunction):
LTA[j] = lta / np.power(lta1, 1.5)
elif self.getOrder() == 4:
LTA[j] = lta / np.power(lta1, 2)
nn = np.isnan(LTA)
if len(nn) > 1:
LTA[nn] = 0
@@ -319,7 +320,7 @@ class ARZcf(CharacteristicFunction):
cf = np.zeros(len(xnp))
loopstep = self.getARdetStep()
arcalci = ldet + self.getOrder() #AR-calculation index
for i in range(ldet + self.getOrder(), tend - lpred - 1):
for i in range(ldet + self.getOrder(), tend - lpred - 1):
if i == arcalci:
#determination of AR coefficients
#to speed up calculation, AR-coefficients are calculated only every i+loopstep[1]!
@@ -366,7 +367,7 @@ class ARZcf(CharacteristicFunction):
rhs = np.zeros(self.getOrder())
for k in range(0, self.getOrder()):
for i in range(rind, ldet+1):
ki = k + 1
ki = k + 1
rhs[k] = rhs[k] + data[i] * data[i - ki]
#recursive calculation of data array (second sum at left part of eq. 6.5 in Kueperkoch et al. 2012)
@@ -386,7 +387,7 @@ class ARZcf(CharacteristicFunction):
def arPredZ(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, time series to be predicted
:type: array
@@ -404,9 +405,9 @@ class ARZcf(CharacteristicFunction):
'''
#be sure of the summation indeces
if rind < len(arpara):
rind = len(arpara)
rind = len(arpara)
if rind > len(data) - lpred :
rind = len(data) - lpred
rind = len(data) - lpred
if lpred < 1:
lpred = 1
if lpred > len(data) - 2:
@@ -426,7 +427,7 @@ class ARHcf(CharacteristicFunction):
def calcCF(self, data):
print 'Calculating AR-prediction error from both horizontal traces ...'
xnp = self.getDataArray(self.getCut())
n0 = np.isnan(xnp[0].data)
if len(n0) > 1:
@@ -434,7 +435,7 @@ class ARHcf(CharacteristicFunction):
n1 = np.isnan(xnp[1].data)
if len(n1) > 1:
xnp[1].data[n1] = 0
#some parameters needed
#add noise to time series
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
@@ -445,7 +446,7 @@ class ARHcf(CharacteristicFunction):
#Time2: length of AR-prediction window [sec]
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
cf = np.zeros(len(xenoise))
loopstep = self.getARdetStep()
arcalci = lpred + self.getOrder() - 1 #AR-calculation index
@@ -519,7 +520,7 @@ class ARHcf(CharacteristicFunction):
def arPredH(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
p (=size(arpara)), with AR parameters arpara calculated in arDet. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, horizontal component seismograms to be predicted
:type: structured array
@@ -562,7 +563,7 @@ class AR3Ccf(CharacteristicFunction):
def calcCF(self, data):
print 'Calculating AR-prediction error from all 3 components ...'
xnp = self.getDataArray(self.getCut())
n0 = np.isnan(xnp[0].data)
if len(n0) > 1:
@@ -573,7 +574,7 @@ class AR3Ccf(CharacteristicFunction):
n2 = np.isnan(xnp[2].data)
if len(n2) > 1:
xnp[2].data[n2] = 0
#some parameters needed
#add noise to time series
xenoise = xnp[0].data + np.random.normal(0.0, 1.0, len(xnp[0].data)) * self.getFnoise() * max(abs(xnp[0].data))
@@ -585,7 +586,7 @@ class AR3Ccf(CharacteristicFunction):
#Time2: length of AR-prediction window [sec]
ldet = int(round(self.getTime1() / self.getIncrement())) #length of AR-determination window [samples]
lpred = int(np.ceil(self.getTime2() / self.getIncrement())) #length of AR-prediction window [samples]
cf = np.zeros(len(xenoise))
loopstep = self.getARdetStep()
arcalci = ldet + self.getOrder() - 1 #AR-calculation index
@@ -620,7 +621,7 @@ class AR3Ccf(CharacteristicFunction):
Function to calculate AR parameters arpara after Thomas Meier (CAU), published
in Kueperkoch et al. (2012). This function solves SLE using the Moore-
Penrose inverse, i.e. the least-squares approach. "data" is a structured array.
AR parameters are calculated based on both horizontal components and vertical
AR parameters are calculated based on both horizontal components and vertical
componant.
:param: data, horizontal component seismograms to calculate AR parameters from
:type: structured array
@@ -662,7 +663,7 @@ class AR3Ccf(CharacteristicFunction):
def arPred3C(self, data, arpara, rind, lpred):
'''
Function to predict waveform, assuming an autoregressive process of order
p (=size(arpara)), with AR parameters arpara calculated in arDet3C. After
p (=size(arpara)), with AR parameters arpara calculated in arDet3C. After
Thomas Meier (CAU), published in Kueperkoch et al. (2012).
:param: data, horizontal and vertical component seismograms to be predicted
:type: structured array

View File

@@ -312,7 +312,7 @@ class PragPicker(AutoPicking):
else:
for i in range(1, len(self.cf)):
if i > ismooth:
ii1 = i - ismooth;
ii1 = i - ismooth
cfsmooth[i] = cfsmooth[i - 1] + (self.cf[i] - self.cf[ii1]) / ismooth
else:
cfsmooth[i] = np.mean(self.cf[1 : i])

View File

@@ -1 +1,2 @@
#
# -*- coding: utf-8 -*-
#

View File

@@ -317,29 +317,29 @@ def autopickstation(wfstream, pickparam):
data = Data()
[corzdat, restflag] = data.restituteWFData(invdir, zdat)
if restflag == 1:
# integrate to displacement
corintzdat = integrate.cumtrapz(corzdat[0], None, corzdat[0].stats.delta)
# class needs stream object => build it
z_copy = zdat.copy()
z_copy[0].data = corintzdat
# largest detectable period == window length
# after P pulse for calculating source spectrum
winzc = (1 / bpz2[0]) * z_copy[0].stats.sampling_rate
impickP = mpickP * z_copy[0].stats.sampling_rate
wfzc = z_copy[0].data[impickP : impickP + winzc]
# calculate spectrum using only first cycles of
# waveform after P onset!
zc = crossings_nonzero_all(wfzc)
if np.size(zc) == 0:
print ("Something is wrong with the waveform, " \
"no zero crossings derived!")
print ("Cannot calculate source spectrum!")
else:
calcwin = (zc[3] - zc[0]) * z_copy[0].stats.delta
# calculate source spectrum and get w0 and fc
specpara = DCfc(z_copy, mpickP, calcwin, iplot)
w0 = specpara.getw0()
fc = specpara.getfc()
# integrate to displacement
corintzdat = integrate.cumtrapz(corzdat[0], None, corzdat[0].stats.delta)
# class needs stream object => build it
z_copy = zdat.copy()
z_copy[0].data = corintzdat
# largest detectable period == window length
# after P pulse for calculating source spectrum
winzc = (1 / bpz2[0]) * z_copy[0].stats.sampling_rate
impickP = mpickP * z_copy[0].stats.sampling_rate
wfzc = z_copy[0].data[impickP : impickP + winzc]
# calculate spectrum using only first cycles of
# waveform after P onset!
zc = crossings_nonzero_all(wfzc)
if np.size(zc) == 0:
print ("Something is wrong with the waveform, " \
"no zero crossings derived!")
print ("Cannot calculate source spectrum!")
else:
calcwin = (zc[3] - zc[0]) * z_copy[0].stats.delta
# calculate source spectrum and get w0 and fc
specpara = DCfc(z_copy, mpickP, calcwin, iplot)
w0 = specpara.getw0()
fc = specpara.getfc()
print ("autopickstation: P-weight: %d, SNR: %f, SNR[dB]: %f, " \
"Polarity: %s" % (Pweight, SNRP, SNRPdB, FM))
@@ -560,7 +560,7 @@ def autopickstation(wfstream, pickparam):
hdat += ndat
h_copy = hdat.copy()
[cordat, restflag] = data.restituteWFData(invdir, h_copy)
# calculate WA-peak-to-peak amplitude
# calculate WA-peak-to-peak amplitude
# using subclass WApp of superclass Magnitude
if restflag == 1:
if Sweight < 4:
@@ -591,7 +591,7 @@ def autopickstation(wfstream, pickparam):
h_copy = hdat.copy()
[cordat, restflag] = data.restituteWFData(invdir, h_copy)
if restflag == 1:
# calculate WA-peak-to-peak amplitude
# calculate WA-peak-to-peak amplitude
# using subclass WApp of superclass Magnitude
wapp = WApp(cordat, mpickP, mpickP + sstop + (0.5 * (mpickP \
+ sstop)), iplot)

View File

@@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -*- coding: utf-8 -*-
"""
@@ -93,7 +94,7 @@ def earllatepicker(X, nfac, TSNR, Pick1, iplot=None, stealthMode = False):
T0 = np.mean(np.diff(zc)) * X[0].stats.delta # this is half wave length
# T0/4 is assumed as time difference between most likely and earliest possible pick!
EPick = Pick1 - T0 / 2
# get symmetric pick error as mean from earliest and latest possible pick
# by weighting latest possible pick two times earliest possible pick
@@ -495,9 +496,9 @@ def wadaticheck(pickdic, dttolerance, iplot):
if len(SPtimes) >= 3:
# calculate slope
p1 = np.polyfit(Ppicks, SPtimes, 1)
wdfit = np.polyval(p1, Ppicks)
# calculate slope
p1 = np.polyfit(Ppicks, SPtimes, 1)
wdfit = np.polyval(p1, Ppicks)
wfitflag = 0
# calculate vp/vs ratio before check
@@ -534,40 +535,40 @@ def wadaticheck(pickdic, dttolerance, iplot):
pickdic[key]['S']['marked'] = marker
if len(checkedPpicks) >= 3:
# calculate new slope
p2 = np.polyfit(checkedPpicks, checkedSPtimes, 1)
wdfit2 = np.polyval(p2, checkedPpicks)
# calculate new slope
p2 = np.polyfit(checkedPpicks, checkedSPtimes, 1)
wdfit2 = np.polyval(p2, checkedPpicks)
# calculate vp/vs ratio after check
cvpvsr = p2[0] + 1
print ("wadaticheck: Average Vp/Vs ratio after check: %f" % cvpvsr)
print ("wadatacheck: Skipped %d S pick(s)" % ibad)
# calculate vp/vs ratio after check
cvpvsr = p2[0] + 1
print ("wadaticheck: Average Vp/Vs ratio after check: %f" % cvpvsr)
print ("wadatacheck: Skipped %d S pick(s)" % ibad)
else:
print ("###############################################")
print ("wadatacheck: Not enough checked S-P times available!")
print ("Skip Wadati check!")
print ("###############################################")
print ("wadatacheck: Not enough checked S-P times available!")
print ("Skip Wadati check!")
checkedonsets = pickdic
else:
print ("wadaticheck: Not enough S-P times available for reliable regression!")
print ("wadaticheck: Not enough S-P times available for reliable regression!")
print ("Skip wadati check!")
wfitflag = 1
# plot results
if iplot > 1:
plt.figure(iplot)
f1, = plt.plot(Ppicks, SPtimes, 'ro')
plt.figure(iplot)
f1, = plt.plot(Ppicks, SPtimes, 'ro')
if wfitflag == 0:
f2, = plt.plot(Ppicks, wdfit, 'k')
f3, = plt.plot(checkedPpicks, checkedSPtimes, 'ko')
f4, = plt.plot(checkedPpicks, wdfit2, 'g')
plt.title('Wadati-Diagram, %d S-P Times, Vp/Vs(raw)=%5.2f,' \
'Vp/Vs(checked)=%5.2f' % (len(SPtimes), vpvsr, cvpvsr))
plt.legend([f1, f2, f3, f4], ['Skipped S-Picks', 'Wadati 1', \
'Reliable S-Picks', 'Wadati 2'], loc='best')
f2, = plt.plot(Ppicks, wdfit, 'k')
f3, = plt.plot(checkedPpicks, checkedSPtimes, 'ko')
f4, = plt.plot(checkedPpicks, wdfit2, 'g')
plt.title('Wadati-Diagram, %d S-P Times, Vp/Vs(raw)=%5.2f,' \
'Vp/Vs(checked)=%5.2f' % (len(SPtimes), vpvsr, cvpvsr))
plt.legend([f1, f2, f3, f4], ['Skipped S-Picks', 'Wadati 1', \
'Reliable S-Picks', 'Wadati 2'], loc='best')
else:
plt.title('Wadati-Diagram, %d S-P Times' % len(SPtimes))
plt.title('Wadati-Diagram, %d S-P Times' % len(SPtimes))
plt.ylabel('S-P Times [s]')
plt.xlabel('P Times [s]')
@@ -581,8 +582,8 @@ def wadaticheck(pickdic, dttolerance, iplot):
def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
'''
Function to detect spuriously picked noise peaks.
Uses RMS trace of all 3 components (if available) to determine,
how many samples [per cent] after P onset are below certain
Uses RMS trace of all 3 components (if available) to determine,
how many samples [per cent] after P onset are below certain
threshold, calculated from noise level times noise factor.
: param: X, time series (seismogram)
@@ -614,7 +615,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
print ("Checking signal length ...")
if len(X) > 1:
# all three components available
# all three components available
# make sure, all components have equal lengths
ilen = min([len(X[0].data), len(X[1].data), len(X[2].data)])
x1 = X[0][0:ilen]
@@ -641,7 +642,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
numoverthr = len(np.where(rms[isignal] >= minsiglevel)[0])
if numoverthr >= minnum:
print ("checksignallength: Signal reached required length.")
print ("checksignallength: Signal reached required length.")
returnflag = 1
else:
print ("checksignallength: Signal shorter than required minimum signal length!")
@@ -651,7 +652,7 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
if iplot == 2:
plt.figure(iplot)
p1, = plt.plot(t,rms, 'k')
p1, = plt.plot(t,rms, 'k')
p2, = plt.plot(t[inoise], rms[inoise], 'c')
p3, = plt.plot(t[isignal],rms[isignal], 'r')
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]], \
@@ -731,27 +732,27 @@ def checkPonsets(pickdic, dttolerance, iplot):
badjkmarker = 'badjkcheck'
for i in range(0, len(goodstations)):
# mark P onset as checked and keep P weight
pickdic[goodstations[i]]['P']['marked'] = goodmarker
pickdic[goodstations[i]]['P']['marked'] = goodmarker
for i in range(0, len(badstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badstations[i]]['P']['marked'] = badmarker
pickdic[badstations[i]]['P']['weight'] = 9
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badstations[i]]['P']['marked'] = badmarker
pickdic[badstations[i]]['P']['weight'] = 9
for i in range(0, len(badjkstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badjkstations[i]]['P']['marked'] = badjkmarker
pickdic[badjkstations[i]]['P']['weight'] = 9
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badjkstations[i]]['P']['marked'] = badjkmarker
pickdic[badjkstations[i]]['P']['weight'] = 9
checkedonsets = pickdic
if iplot > 1:
p1, = plt.plot(np.arange(0, len(Ppicks)), Ppicks, 'r+', markersize=14)
p1, = plt.plot(np.arange(0, len(Ppicks)), Ppicks, 'r+', markersize=14)
p2, = plt.plot(igood, np.array(Ppicks)[igood], 'g*', markersize=14)
p3, = plt.plot([0, len(Ppicks) - 1], [pmedian, pmedian], 'g', \
linewidth=2)
for i in range(0, len(Ppicks)):
plt.text(i, Ppicks[i] + 0.2, stations[i])
plt.text(i, Ppicks[i] + 0.2, stations[i])
plt.xlabel('Number of P Picks')
plt.ylabel('Onset Time [s] from 1.1.1970')
@@ -791,37 +792,37 @@ def jackknife(X, phi, h):
g = len(X) / h
if type(g) is not int:
print ("jackknife: Cannot divide quantity X in equal sized subgroups!")
print ("jackknife: Cannot divide quantity X in equal sized subgroups!")
print ("Choose another size for subgroups!")
return PHI_jack, PHI_pseudo, PHI_sub
else:
# estimator of undisturbed spot check
if phi == 'MEA':
phi_sc = np.mean(X)
# estimator of undisturbed spot check
if phi == 'MEA':
phi_sc = np.mean(X)
elif phi == 'VAR':
phi_sc = np.var(X)
phi_sc = np.var(X)
elif phi == 'MED':
phi_sc = np.median(X)
phi_sc = np.median(X)
# estimators of subgroups
# estimators of subgroups
PHI_pseudo = []
PHI_sub = []
for i in range(0, g - 1):
# subgroup i, remove i-th sample
xx = X[:]
del xx[i]
# calculate estimators of disturbed spot check
if phi == 'MEA':
phi_sub = np.mean(xx)
elif phi == 'VAR':
phi_sub = np.var(xx)
elif phi == 'MED':
phi_sub = np.median(xx)
# subgroup i, remove i-th sample
xx = X[:]
del xx[i]
# calculate estimators of disturbed spot check
if phi == 'MEA':
phi_sub = np.mean(xx)
elif phi == 'VAR':
phi_sub = np.var(xx)
elif phi == 'MED':
phi_sub = np.median(xx)
PHI_sub.append(phi_sub)
# pseudo values
phi_pseudo = g * phi_sc - ((g - 1) * phi_sub)
PHI_pseudo.append(phi_pseudo)
PHI_sub.append(phi_sub)
# pseudo values
phi_pseudo = g * phi_sc - ((g - 1) * phi_sub)
PHI_pseudo.append(phi_pseudo)
# jackknife estimator
PHI_jack = np.mean(PHI_pseudo)
@@ -901,17 +902,17 @@ def checkZ4S(X, pick, zfac, checkwin, iplot):
# vertical P-coda level must exceed horizontal P-coda level
# zfac times encodalevel
if zcodalevel < minsiglevel:
print ("checkZ4S: Maybe S onset? Skip this P pick!")
print ("checkZ4S: Maybe S onset? Skip this P pick!")
else:
print ("checkZ4S: P onset passes checkZ4S test!")
returnflag = 1
if iplot > 1:
te = np.arange(0, edat[0].stats.npts / edat[0].stats.sampling_rate,
te = np.arange(0, edat[0].stats.npts / edat[0].stats.sampling_rate,
edat[0].stats.delta)
tn = np.arange(0, ndat[0].stats.npts / ndat[0].stats.sampling_rate,
tn = np.arange(0, ndat[0].stats.npts / ndat[0].stats.sampling_rate,
ndat[0].stats.delta)
plt.plot(tz, z / max(z), 'k')
plt.plot(tz, z / max(z), 'k')
plt.plot(tz[isignal], z[isignal] / max(z), 'r')
plt.plot(te, edat[0].data / max(edat[0].data) + 1, 'k')
plt.plot(te[isignal], edat[0].data[isignal] / max(edat[0].data) + 1, 'r')