Compare commits

..

No commits in common. "d0fbb91ffef042479033237ce2fc18d22bca4e6a" and "e6a4ba7ee2a226b7d2a36c4e2907b2c6ffc72df3" have entirely different histories.

13 changed files with 22 additions and 65934 deletions

1
.gitignore vendored
View File

@ -2,4 +2,3 @@
*~
.idea
pylot/RELEASE-VERSION
/tests/test_autopicker/dmt_database_test/

View File

@ -1031,23 +1031,22 @@ class MainWindow(QMainWindow):
if not fname:
return
qmb = QMessageBox(self, icon=QMessageBox.Question,
text='Do you want to overwrite this data?',)
overwrite_button = qmb.addButton('Overwrite', QMessageBox.YesRole)
merge_button = qmb.addButton('Merge', QMessageBox.NoRole)
qmb.exec_()
if qmb.clickedButton() == overwrite_button:
merge_strategy = 'Overwrite'
elif qmb.clickedButton() == merge_button:
merge_strategy = 'Merge'
else:
return
if not event:
event = self.get_current_event()
if event.picks:
qmb = QMessageBox(self, icon=QMessageBox.Question,
text='Do you want to overwrite the data?',)
overwrite_button = qmb.addButton('Overwrite', QMessageBox.YesRole)
merge_button = qmb.addButton('Merge', QMessageBox.NoRole)
qmb.exec_()
if qmb.clickedButton() == overwrite_button:
merge_strategy = 'Overwrite'
elif qmb.clickedButton() == merge_button:
merge_strategy = 'Merge'
else:
return
data = Data(self, event)
try:
data_new = Data(self, evtdata=str(fname))

View File

@ -184,15 +184,15 @@ def autoPyLoT(input_dict=None, parameter=None, inputfile=None, fnames=None, even
if not input_dict:
# started in production mode
datapath = datastructure.expandDataPath()
if fnames in [None, 'None'] and parameter['eventID'] == '*':
if fnames == 'None' and parameter['eventID'] == '*':
# multiple event processing
# read each event in database
events = [event for event in glob.glob(os.path.join(datapath, '*')) if
(os.path.isdir(event) and not event.endswith('EVENTS-INFO'))]
elif fnames in [None, 'None'] and parameter['eventID'] != '*' and not type(parameter['eventID']) == list:
elif fnames == 'None' and parameter['eventID'] != '*' and not type(parameter['eventID']) == list:
# single event processing
events = glob.glob(os.path.join(datapath, parameter['eventID']))
elif fnames in [None, 'None'] and type(parameter['eventID']) == list:
elif fnames == 'None' and type(parameter['eventID']) == list:
# multiple event processing
events = []
for eventID in parameter['eventID']:
@ -234,15 +234,12 @@ def autoPyLoT(input_dict=None, parameter=None, inputfile=None, fnames=None, even
data.get_evt_data().path = eventpath
print('Reading event data from filename {}...'.format(filename))
except Exception as e:
if type(e) == FileNotFoundError:
print('Creating new event file.')
else:
print('Could not read event from file {}: {}'.format(filename, e))
print('Could not read event from file {}: {}'.format(filename, e))
data = Data()
pylot_event = Event(eventpath) # event should be path to event directory
data.setEvtData(pylot_event)
if fnames in [None, 'None']:
data.setWFData(glob.glob(os.path.join(event_datapath, '*')))
if fnames == 'None':
data.setWFData(glob.glob(os.path.join(datapath, event_datapath, '*')))
# the following is necessary because within
# multiple event processing no event ID is provided
# in autopylot.in

View File

@ -17,10 +17,7 @@ autoregressive prediction: application ot local and regional distances, Geophys.
:author: MAGS2 EP3 working group
"""
import numpy as np
try:
from scipy.signal import tukey
except ImportError:
from scipy.signal.windows import tukey
from scipy import signal
from obspy.core import Stream
from pylot.core.pick.utils import PickingFailedException
@ -230,7 +227,7 @@ class AICcf(CharacteristicFunction):
ind = np.where(~np.isnan(xnp))[0]
if ind.size:
xnp[:ind[0]] = xnp[ind[0]]
xnp = tukey(len(xnp), alpha=0.05) * xnp
xnp = signal.tukey(len(xnp), alpha=0.05) * xnp
xnp = xnp - np.mean(xnp)
datlen = len(xnp)
k = np.arange(1, datlen)

View File

@ -27,10 +27,6 @@ class Metadata(object):
# saves which metadata files are from obspy dmt
self.obspy_dmt_invs = []
if inventory:
# make sure that no accidental backslashes mess up the path
if isinstance(inventory, str):
inventory = inventory.replace('\\', '/')
inventory = os.path.abspath(inventory)
if os.path.isdir(inventory):
self.add_inventory(inventory)
if os.path.isfile(inventory):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,99 +0,0 @@
%This is a parameter input file for PyLoT/autoPyLoT.
%All main and special settings regarding data handling
%and picking are to be set here!
%Parameters are optimized for %extent data sets!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#main settings#
dmt_database_test #datapath# %data path
20171010_063224.a #eventID# %event ID for single event processing (* for all events found in database)
#invdir# %full path to inventory or dataless-seed file
PILOT #datastructure# %choose data structure
True #apverbose# %choose 'True' or 'False' for terminal output
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#NLLoc settings#
None #nllocbin# %path to NLLoc executable
None #nllocroot# %root of NLLoc-processing directory
None #phasefile# %name of autoPyLoT-output phase file for NLLoc
None #ctrfile# %name of autoPyLoT-output control file for NLLoc
ttime #ttpatter# %pattern of NLLoc ttimes from grid
AUTOLOC_nlloc #outpatter# %pattern of NLLoc-output file
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#parameters for seismic moment estimation#
3530.0 #vp# %average P-wave velocity
2500.0 #rho# %average rock density [kg/m^3]
300.0 0.8 #Qp# %quality factor for P waves (Qp*f^a); list(Qp, a)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#settings local magnitude#
1.0 1.0 1.0 #WAscaling# %Scaling relation (log(Ao)+Alog(r)+Br+C) of Wood-Anderson amplitude Ao [nm] If zeros are set, original Richter magnitude is calculated!
1.0 1.0 #magscaling# %Scaling relation for derived local magnitude [a*Ml+b]. If zeros are set, no scaling of network magnitude is applied!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#filter settings#
0.03 0.03 #minfreq# %Lower filter frequency [P, S]
0.5 0.5 #maxfreq# %Upper filter frequency [P, S]
4 4 #filter_order# %filter order [P, S]
bandpass bandpass #filter_type# %filter type (bandpass, bandstop, lowpass, highpass) [P, S]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#common settings picker#
global #extent# %extent of array ("local", "regional" or "global")
-100.0 #pstart# %start time [s] for calculating CF for P-picking (if TauPy: seconds relative to estimated onset)
50.0 #pstop# %end time [s] for calculating CF for P-picking (if TauPy: seconds relative to estimated onset)
-50.0 #sstart# %start time [s] relative to P-onset for calculating CF for S-picking
50.0 #sstop# %end time [s] after P-onset for calculating CF for S-picking
True #use_taup# %use estimated traveltimes from TauPy for calculating windows for CF
ak135 #taup_model# %Define TauPy model for traveltime estimation. Possible values: 1066a, 1066b, ak135, ak135f, herrin, iasp91, jb, prem, pwdk, sp6
P,Pdiff,S,SKS #taup_phases# %Specify possible phases for TauPy (comma separated). See Obspy TauPy documentation for possible values.
0.03 0.5 #bpz1# %lower/upper corner freq. of first band pass filter Z-comp. [Hz]
0.01 0.5 #bpz2# %lower/upper corner freq. of second band pass filter Z-comp. [Hz]
0.03 0.5 #bph1# %lower/upper corner freq. of first band pass filter H-comp. [Hz]
0.01 0.5 #bph2# %lower/upper corner freq. of second band pass filter z-comp. [Hz]
#special settings for calculating CF#
%!!Edit the following only if you know what you are doing!!%
#Z-component#
HOS #algoP# %choose algorithm for P-onset determination (HOS, ARZ, or AR3)
300.0 #tlta# %for HOS-/AR-AIC-picker, length of LTA window [s]
4 #hosorder# %for HOS-picker, order of Higher Order Statistics
2 #Parorder# %for AR-picker, order of AR process of Z-component
16.0 #tdet1z# %for AR-picker, length of AR determination window [s] for Z-component, 1st pick
10.0 #tpred1z# %for AR-picker, length of AR prediction window [s] for Z-component, 1st pick
12.0 #tdet2z# %for AR-picker, length of AR determination window [s] for Z-component, 2nd pick
6.0 #tpred2z# %for AR-picker, length of AR prediction window [s] for Z-component, 2nd pick
0.001 #addnoise# %add noise to seismogram for stable AR prediction
60.0 5.0 20.0 12.0 #tsnrz# %for HOS/AR, window lengths for SNR-and slope estimation [tnoise, tsafetey, tsignal, tslope] [s]
50.0 #pickwinP# %for initial AIC pick, length of P-pick window [s]
30.0 #Precalcwin# %for HOS/AR, window length [s] for recalculation of CF (relative to 1st pick)
2.0 #aictsmooth# %for HOS/AR, take average of samples for smoothing of AIC-function [s]
2.0 #tsmoothP# %for HOS/AR, take average of samples in this time window for smoothing CF [s]
0.006 #ausP# %for HOS/AR, artificial uplift of samples (aus) of CF (P)
2.0 #nfacP# %for HOS/AR, noise factor for noise level determination (P)
#H-components#
ARH #algoS# %choose algorithm for S-onset determination (ARH or AR3)
12.0 #tdet1h# %for HOS/AR, length of AR-determination window [s], H-components, 1st pick
6.0 #tpred1h# %for HOS/AR, length of AR-prediction window [s], H-components, 1st pick
8.0 #tdet2h# %for HOS/AR, length of AR-determinaton window [s], H-components, 2nd pick
4.0 #tpred2h# %for HOS/AR, length of AR-prediction window [s], H-components, 2nd pick
4 #Sarorder# %for AR-picker, order of AR process of H-components
100.0 #Srecalcwin# %for AR-picker, window length [s] for recalculation of CF (2nd pick) (H)
195.0 #pickwinS# %for initial AIC pick, length of S-pick window [s]
60.0 10.0 30.0 12.0 #tsnrh# %for ARH/AR3, window lengths for SNR-and slope estimation [tnoise, tsafetey, tsignal, tslope] [s]
22.0 #aictsmoothS# %for AIC-picker, take average of samples in this time window for smoothing of AIC-function [s]
20.0 #tsmoothS# %for AR-picker, take average of samples for smoothing CF [s] (S)
0.001 #ausS# %for HOS/AR, artificial uplift of samples (aus) of CF (S)
2.0 #nfacS# %for AR-picker, noise factor for noise level determination (S)
#first-motion picker#
1 #minfmweight# %minimum required P weight for first-motion determination
3.0 #minFMSNR# %miniumum required SNR for first-motion determination
10.0 #fmpickwin# %pick window [s] around P onset for calculating zero crossings
#quality assessment#
0.1 0.2 0.4 0.8 #timeerrorsP# %discrete time errors [s] corresponding to picking weights [0 1 2 3] for P
4.0 8.0 16.0 32.0 #timeerrorsS# %discrete time errors [s] corresponding to picking weights [0 1 2 3] for S
0.005 #minAICPslope# %below this slope [counts/s] the initial P pick is rejected
1.1 #minAICPSNR# %below this SNR the initial P pick is rejected
0.002 #minAICSslope# %below this slope [counts/s] the initial S pick is rejected
1.3 #minAICSSNR# %below this SNR the initial S pick is rejected
20.0 #minsiglength# %length of signal part for which amplitudes must exceed noiselevel [s]
1.0 #noisefactor# %noiselevel*noisefactor=threshold
10.0 #minpercent# %required percentage of amplitudes exceeding threshold
0.1 #zfac# %P-amplitude must exceed at least zfac times RMS-S amplitude
100.0 #mdttolerance# %maximum allowed deviation of P picks from median [s]
50.0 #wdttolerance# %maximum allowed deviation from Wadati-diagram
25.0 #jackfactor# %pick is removed if the variance of the subgroup with the pick removed is larger than the mean variance of all subgroups times safety factor

View File

@ -1,67 +0,0 @@
import os
import pytest
from obspy import read_events
from autoPyLoT import autoPyLoT
class TestAutopickerGlobal():
def init(self):
self.params_infile = 'pylot_alparray_mantle_corr_stack_0.03-0.5.in'
self.test_event_dir = 'dmt_database_test'
self.fname_outfile_xml = os.path.join(
self.test_event_dir, '20171010_063224.a', 'PyLoT_20171010_063224.a_autopylot.xml'
)
# check if the input files exist
if not os.path.isfile(self.params_infile):
print(f'Test input file {os.path.abspath(self.params_infile)} not found.')
return False
if not os.path.exists(self.test_event_dir):
print(
f'Test event directory not found at location "{os.path.abspath(self.test_event_dir)}". '
f'Make sure to load it from the website first.'
)
return False
return True
def test_autopicker(self):
assert self.init(), 'Initialization failed due to missing input files.'
# check for output file in test directory and remove it if necessary
if os.path.isfile(self.fname_outfile_xml):
os.remove(self.fname_outfile_xml)
autoPyLoT(inputfile=self.params_infile, eventid='20171010_063224.a', obspyDMT_wfpath='processed')
# test for different known output files if they are identical or not
compare_pickfiles(self.fname_outfile_xml, 'PyLoT_20171010_063224.a_autopylot.xml', True)
compare_pickfiles(self.fname_outfile_xml, 'PyLoT_20171010_063224.a_saved_from_GUI.xml', True)
compare_pickfiles(self.fname_outfile_xml, 'PyLoT_20171010_063224.a_corrected_taup_times_0.03-0.5_P.xml', False)
def compare_pickfiles(pickfile1: str, pickfile2: str, samefile: bool = True) -> None:
"""
Compare the pick times and errors from two pick files.
Parameters:
pickfile1 (str): The path to the first pick file.
pickfile2 (str): The path to the second pick file.
samefile (bool): A flag indicating whether the two files are expected to be the same. Defaults to True.
Returns:
None
"""
cat1 = read_events(pickfile1)
cat2 = read_events(pickfile2)
picks1 = sorted(cat1[0].picks, key=lambda pick: str(pick.waveform_id))
picks2 = sorted(cat2[0].picks, key=lambda pick: str(pick.waveform_id))
pick_times1 = [pick.time for pick in picks1]
pick_times2 = [pick.time for pick in picks2]
pick_terrs1 = [pick.time_errors for pick in picks1]
pick_terrs2 = [pick.time_errors for pick in picks2]
# check if times and errors are identical or not depending on the samefile flag
assert (pick_times1 == pick_times2) is samefile, 'Pick times error'
assert (pick_terrs1 == pick_terrs2) is samefile, 'Pick time errors errors'