Merge branch 'develop' of 134.147.164.251:/data/git/pylot into develop

This commit is contained in:
Sebastian Wehling-Benatelli 2015-06-28 19:35:08 +02:00
commit a46fb88282
5 changed files with 189 additions and 31 deletions

View File

@ -12,7 +12,7 @@ from pylot.core.util import _getVersionString
from pylot.core.read import Data, AutoPickParameter from pylot.core.read import Data, AutoPickParameter
from pylot.core.pick.run_autopicking import run_autopicking from pylot.core.pick.run_autopicking import run_autopicking
from pylot.core.util.structure import DATASTRUCTURE from pylot.core.util.structure import DATASTRUCTURE
from pylot.core.pick.utils import wadaticheck from pylot.core.pick.utils import wadaticheck, checkPonsets
import pdb import pdb
__version__ = _getVersionString() __version__ = _getVersionString()
@ -51,6 +51,7 @@ def autoPyLoT(inputfile):
# get some parameters for quality control from # get some parameters for quality control from
# parameter input file (usually autoPyLoT.in). # parameter input file (usually autoPyLoT.in).
wdttolerance = parameter.getParam('wdttolerance') wdttolerance = parameter.getParam('wdttolerance')
mdttolerance = parameter.getParam('mdttolerance')
iplot = parameter.getParam('iplot') iplot = parameter.getParam('iplot')
data = Data() data = Data()
@ -105,10 +106,11 @@ def autoPyLoT(inputfile):
allonsets[station] = picks allonsets[station] = picks
# quality control # quality control
# jackknife on P onset times # median check and jackknife on P onset times
checkedonsetsjk = checkPonsets(allonsets, mdttolerance, iplot)
# check S-P times (Wadati) # check S-P times (Wadati)
checkedonsets = wadaticheck(allonsets, wdttolerance, iplot) checkedonsetwd = wadaticheck(checkedonsetsjk, wdttolerance, iplot)
# jackknife on S onset times
print '------------------------------------------' print '------------------------------------------'
print '-----Finished event %s!-----' % event print '-----Finished event %s!-----' % event
print '------------------------------------------' print '------------------------------------------'
@ -128,11 +130,12 @@ def autoPyLoT(inputfile):
station = wfdat[0].stats.station station = wfdat[0].stats.station
allonsets = {station: picks} allonsets = {station: picks}
for i in range(len(wfdat)): for i in range(len(wfdat)):
#for i in range(0,10):
stationID = wfdat[i].stats.station stationID = wfdat[i].stats.station
#check if station has already been processed #check if station has already been processed
if stationID not in procstats: if stationID not in procstats:
procstats.append(stationID) procstats.append(stationID)
#find corresponding streams # find corresponding streams
statdat = wfdat.select(station=stationID) statdat = wfdat.select(station=stationID)
###################################################### ######################################################
# get onset times and corresponding picking parameters # get onset times and corresponding picking parameters
@ -142,11 +145,12 @@ def autoPyLoT(inputfile):
station = stationID station = stationID
allonsets[station] = picks allonsets[station] = picks
#quality control # quality control
#jackknife on P onset times # median check and jackknife on P onset times
#check S-P times (Wadati) checkedonsetsjk = checkPonsets(allonsets, mdttolerance, iplot)
checkedonsets = wadaticheck(allonsets, wdttolerance, iplot) # check S-P times (Wadati)
#jackknife on S onset times checkedonsetswd = wadaticheck(checkedonsetsjk, wdttolerance, iplot)
print '------------------------------------------' print '------------------------------------------'
print '-------Finished event %s!-------' % parameter.getParam('eventID') print '-------Finished event %s!-------' % parameter.getParam('eventID')
print '------------------------------------------' print '------------------------------------------'

View File

@ -90,10 +90,8 @@ ARH #algoS# %choose algorithm for S-onset
70 #minpercent# %required percentage of samples higher than threshold 70 #minpercent# %required percentage of samples higher than threshold
#check for spuriously picked S-onsets# #check for spuriously picked S-onsets#
3.0 #zfac# %P-amplitude must exceed zfac times RMS-S amplitude 3.0 #zfac# %P-amplitude must exceed zfac times RMS-S amplitude
#jackknife-processing for P-picks# #check statistics of P onsets#
3 #thresholdweight#%minimum required weight of picks 2.5 #mdttolerance# %maximum allowed deviation of P picks from median [s]
3 #dttolerance# %maximum allowed deviation of P picks from median [s]
4 #minstats# %minimum number of stations with reliable P picks
#wadati check# #wadati check#
0.8 #wdttolerance# %maximum allowed deviation from Wadati-diagram 0.5 #wdttolerance# %maximum allowed deviation from Wadati-diagram

View File

@ -36,7 +36,7 @@ HYPOSAT #locrt# %location routine used ("HYPO
3 10 #bpz1# %lower/upper corner freq. of first band pass filter Z-comp. [Hz] 3 10 #bpz1# %lower/upper corner freq. of first band pass filter Z-comp. [Hz]
3 12 #bpz2# %lower/upper corner freq. of second band pass filter Z-comp. [Hz] 3 12 #bpz2# %lower/upper corner freq. of second band pass filter Z-comp. [Hz]
3 8 #bph1# %lower/upper corner freq. of first band pass filter H-comp. [Hz] 3 8 #bph1# %lower/upper corner freq. of first band pass filter H-comp. [Hz]
3 6 #bph2# %lower/upper corner freq. of second band pass filter z-comp. [Hz] 3 6 #bph2# %lower/upper corner freq. of second band pass filter H-comp. [Hz]
#special settings for calculating CF# #special settings for calculating CF#
%!!Be careful when editing the following!! %!!Be careful when editing the following!!
#Z-component# #Z-component#
@ -49,10 +49,10 @@ HOS #algoP# %choose algorithm for P-onset
0.6 #tdet2z# %for AR-picker, length of AR determination window [s] for Z-component, 2nd pick 0.6 #tdet2z# %for AR-picker, length of AR determination window [s] for Z-component, 2nd pick
0.2 #tpred2z# %for AR-picker, length of AR prediction window [s] for Z-component, 2nd pick 0.2 #tpred2z# %for AR-picker, length of AR prediction window [s] for Z-component, 2nd pick
0.001 #addnoise# %add noise to seismogram for stable AR prediction 0.001 #addnoise# %add noise to seismogram for stable AR prediction
4 0.2 2.0 1.5 #tsnrz# %for HOS/AR, window lengths for SNR-and slope estimation [tnoise,tsafetey,tsignal,tslope] [s] 5 0.2 3.0 1.5 #tsnrz# %for HOS/AR, window lengths for SNR-and slope estimation [tnoise,tsafetey,tsignal,tslope] [s]
4 #pickwinP# %for initial AIC and refined pick, length of P-pick window [s] 3 #pickwinP# %for initial AIC and refined pick, length of P-pick window [s]
8 #Precalcwin# %for HOS/AR, window length [s] for recalculation of CF (relative to 1st pick) 8 #Precalcwin# %for HOS/AR, window length [s] for recalculation of CF (relative to 1st pick)
3.0 #aictsmooth# %for HOS/AR, take average of samples for smoothing of AIC-function [s] 1.0 #aictsmooth# %for HOS/AR, take average of samples for smoothing of AIC-function [s]
0.3 #tsmoothP# %for HOS/AR, take average of samples for smoothing CF [s] 0.3 #tsmoothP# %for HOS/AR, take average of samples for smoothing CF [s]
0.3 #ausP# %for HOS/AR, artificial uplift of samples (aus) of CF (P) 0.3 #ausP# %for HOS/AR, artificial uplift of samples (aus) of CF (P)
1.3 #nfacP# %for HOS/AR, noise factor for noise level determination (P) 1.3 #nfacP# %for HOS/AR, noise factor for noise level determination (P)
@ -88,10 +88,8 @@ ARH #algoS# %choose algorithm for S-onset
60 #minpercent# %required percentage of samples higher than threshold 60 #minpercent# %required percentage of samples higher than threshold
#check for spuriously picked S-onsets# #check for spuriously picked S-onsets#
3.0 #zfac# %P-amplitude must exceed zfac times RMS-S amplitude 3.0 #zfac# %P-amplitude must exceed zfac times RMS-S amplitude
#jackknife-processing for P-picks# #check statistics of P onsets#
3 #thresholdweight#%minimum required weight of picks 35 #mdttolerance# %maximum allowed deviation of P picks from median [s]
3 #dttolerance# %maximum allowed deviation of P picks from median [s]
4 #minstats# %minimum number of stations with reliable P picks
#wadati check# #wadati check#
1.5 #wdttolerance# %maximum allowed deviation from Wadati-diagram 2.0 #wdttolerance# %maximum allowed deviation from Wadati-diagram

View File

@ -1 +1 @@
694a-dirty 1abc-dirty

View File

@ -13,7 +13,7 @@ import scipy as sc
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from obspy.core import Stream, UTCDateTime from obspy.core import Stream, UTCDateTime
import warnings import warnings
import pdb
def earllatepicker(X, nfac, TSNR, Pick1, iplot=None): def earllatepicker(X, nfac, TSNR, Pick1, iplot=None):
''' '''
Function to derive earliest and latest possible pick after Diehl & Kissling (2009) Function to derive earliest and latest possible pick after Diehl & Kissling (2009)
@ -155,7 +155,7 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
xraw[ipick] = xraw[ipick] - np.mean(xraw[ipick]) xraw[ipick] = xraw[ipick] - np.mean(xraw[ipick])
xfilt[ipick] = xfilt[ipick] - np.mean(xfilt[ipick]) xfilt[ipick] = xfilt[ipick] - np.mean(xfilt[ipick])
# get next zero crossing after most likely pick # get zero crossings after most likely pick
# initial onset is assumed to be the first zero crossing # initial onset is assumed to be the first zero crossing
# first from unfiltered trace # first from unfiltered trace
zc1 = [] zc1 = []
@ -199,7 +199,7 @@ def fmpicker(Xraw, Xfilt, pickwin, Pick, iplot=None):
datafit1 = np.polyval(P1, xslope1) datafit1 = np.polyval(P1, xslope1)
# now using filterd trace # now using filterd trace
# next zero crossing after most likely pick # next zero crossings after most likely pick
zc2 = [] zc2 = []
zc2.append(Pick) zc2.append(Pick)
index2 = [] index2 = []
@ -492,7 +492,7 @@ def wadaticheck(pickdic, dttolerance, iplot):
wddiff = abs(pickdic[key]['SPt'] - wdfit[ii]) wddiff = abs(pickdic[key]['SPt'] - wdfit[ii])
ii += 1 ii += 1
# check, if deviation is larger than adjusted # check, if deviation is larger than adjusted
if wddiff >= dttolerance: if wddiff > dttolerance:
# mark onset and downgrade S-weight to 9 # mark onset and downgrade S-weight to 9
# (not used anymore) # (not used anymore)
marker = 'badWadatiCheck' marker = 'badWadatiCheck'
@ -526,7 +526,7 @@ def wadaticheck(pickdic, dttolerance, iplot):
print 'wadaticheck: Not enough S-P times available for reliable regression!' print 'wadaticheck: Not enough S-P times available for reliable regression!'
print 'Skip wadati check!' print 'Skip wadati check!'
wfitflag = 1 wfitflag = 1
iplot=2
# plot results # plot results
if iplot > 1: if iplot > 1:
plt.figure(iplot) plt.figure(iplot)
@ -615,11 +615,13 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
if iplot == 2: if iplot == 2:
plt.figure(iplot) plt.figure(iplot)
p1, = plt.plot(t,x, 'k') p1, = plt.plot(t,x, 'k')
p2, = plt.plot(t[inoise], e[inoise], 'c')
p3, = plt.plot(t[isignal],e[isignal], 'r')
p2, = plt.plot(t[inoise], e[inoise]) p2, = plt.plot(t[inoise], e[inoise])
p3, = plt.plot(t[isignal],e[isignal], 'r') p3, = plt.plot(t[isignal],e[isignal], 'r')
p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]], \ p4, = plt.plot([t[isignal[0]], t[isignal[len(isignal)-1]]], \
[minsiglevel, minsiglevel], 'g') [minsiglevel, minsiglevel], 'g')
p5, = plt.plot([pick, pick], [min(x), max(x)], 'c') p5, = plt.plot([pick, pick], [min(x), max(x)], 'b', linewidth=2)
plt.legend([p1, p2, p3, p4, p5], ['Data', 'Envelope Noise Window', \ plt.legend([p1, p2, p3, p4, p5], ['Data', 'Envelope Noise Window', \
'Envelope Signal Window', 'Minimum Signal Level', \ 'Envelope Signal Window', 'Minimum Signal Level', \
'Onset'], loc='best') 'Onset'], loc='best')
@ -633,6 +635,162 @@ def checksignallength(X, pick, TSNR, minsiglength, nfac, minpercent, iplot):
return returnflag return returnflag
def checkPonsets(pickdic, dttolerance, iplot):
'''
Function to check statistics of P-onset times: Control deviation from
median (maximum adjusted deviation = dttolerance) and apply pseudo-
bootstrapping jackknife.
: param: pickdic, dictionary containing picks and quality parameters
: type: dictionary
: param: dttolerance, maximum adjusted deviation of P-onset time from
median of all P onsets
: type: float
: param: iplot, if iplot > 1, Wadati diagram is shown
: type: int
'''
checkedonsets = pickdic
# search for good quality P picks
Ppicks = []
stations = []
for key in pickdic:
if pickdic[key]['P']['weight'] < 4:
# add P onsets to list
UTCPpick = UTCDateTime(pickdic[key]['P']['mpp'])
Ppicks.append(UTCPpick.timestamp)
stations.append(key)
# apply jackknife bootstrapping on variance of P onsets
print 'checkPonsets: Apply jackknife bootstrapping on P-onset times ...'
[xjack,PHI_pseudo,PHI_sub] = jackknife(Ppicks, 'VAR', 1)
# get pseudo variances smaller than average variances
# these picks passed jackknife test
ij = np.where(PHI_pseudo <= xjack)
# these picks did not pass jackknife test
badjk = np.where(PHI_pseudo > xjack)
badjkstations = np.array(stations)[badjk]
# calculate median from these picks
pmedian = np.median(np.array(Ppicks)[ij])
# find picks that deviate less than dttolerance from median
ii = np.where(abs(np.array(Ppicks)[ij] - pmedian) <= dttolerance)
jj = np.where(abs(np.array(Ppicks)[ij] - pmedian) > dttolerance)
igood = ij[0][ii]
ibad = ij[0][jj]
goodstations = np.array(stations)[igood]
badstations = np.array(stations)[ibad]
print 'checkPonset: Skipped %d P onsets out of %d' % (len(badstations) \
+ len(badjkstations), len(stations))
goodmarker = 'goodPonsetcheck'
badmarker = 'badPonsetcheck'
badjkmarker = 'badjkcheck'
for i in range(0, len(goodstations)):
# mark P onset as checked and keep P weight
pickdic[goodstations[i]]['P']['marked'] = goodmarker
for i in range(0, len(badstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badstations[i]]['P']['marked'] = badmarker
pickdic[badstations[i]]['P']['weight'] = 9
for i in range(0, len(badjkstations)):
# mark P onset and downgrade P weight to 9
# (not used anymore)
pickdic[badjkstations[i]]['P']['marked'] = badjkmarker
pickdic[badjkstations[i]]['P']['weight'] = 9
checkedonsets = pickdic
iplot = 2
if iplot > 1:
p1, = plt.plot(np.arange(0, len(Ppicks)), Ppicks, 'r+', markersize=14)
p2, = plt.plot(igood, np.array(Ppicks)[igood], 'g*', markersize=14)
p3, = plt.plot([0, len(Ppicks) - 1], [pmedian, pmedian], 'g', \
linewidth=2)
for i in range(0, len(Ppicks)):
plt.text(i, Ppicks[i] + 0.2, stations[i])
plt.xlabel('Number of P Picks')
plt.ylabel('Onset Time [s] from 1.1.1970')
plt.legend([p1, p2, p3], ['Skipped P Picks', 'Good P Picks', 'Median'], \
loc='best')
plt.title('Check P Onsets')
plt.show()
raw_input()
return checkedonsets
def jackknife(X, phi, h):
'''
Function to calculate the Jackknife Estimator for a given quantity,
special type of boot strapping. Returns the jackknife estimator PHI_jack
the pseudo values PHI_pseudo and the subgroup parameters PHI_sub.
: param: X, given quantity
: type: list
: param: phi, chosen estimator, choose between:
"MED" for median
"MEA" for arithmetic mean
"VAR" for variance
: type: string
: param: h, size of subgroups, optinal, default = 1
: type: integer
'''
PHI_jack = None
PHI_pseudo = None
PHI_sub = None
# determine number of subgroups
g = len(X) / h
if type(g) is not int:
print 'jackknife: Cannot divide quantity X in equal sized subgroups!'
print 'Choose another size for subgroups!'
return PHI_jack, PHI_pseudo, PHI_sub
else:
# estimator of undisturbed spot check
if phi == 'MEA':
phi_sc = np.mean(X)
elif phi == 'VAR':
phi_sc = np.var(X)
elif phi == 'MED':
phi_sc = np.median(X)
# estimators of subgroups
PHI_pseudo = []
PHI_sub = []
for i in range(0, g - 1):
# subgroup i, remove i-th sample
xx = X[:]
del xx[i]
# calculate estimators of disturbed spot check
if phi == 'MEA':
phi_sub = np.mean(xx)
elif phi == 'VAR':
phi_sub = np.var(xx)
elif phi == 'MED':
phi_sub = np.median(xx)
PHI_sub.append(phi_sub)
# pseudo values
phi_pseudo = g * phi_sc - ((g - 1) * phi_sub)
PHI_pseudo.append(phi_pseudo)
# jackknife estimator
PHI_jack = np.mean(PHI_pseudo)
return PHI_jack, PHI_pseudo, PHI_sub
if __name__ == '__main__': if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()