12 Commits

Author SHA1 Message Date
8a1da72d1c [update] increased robustness of correlation picker. If autoPyLoT fails on the stacked trace it tries to pick other stacked traces. Ignoring failed autoPyLoT picks can manually be set if they are not important (e.g. when only pick differences are important) 2025-04-03 11:23:17 +02:00
c989b2abc9 [update] re-implemented code that was lost when corr_pick was integrated in pylot. Use all reference-pick-corrected theoretical picks as correlation reference time instead of using only reference picks 2025-03-19 15:43:24 +01:00
2dc27013b2 Update README.md 2025-03-06 12:18:41 +01:00
4bd2e78259 [bugfix] explicitly pass parameters to "picksdict_from_picks" to calculate pick weights. Otherwise no weights could be calculated. Closes #40 2024-11-20 17:16:15 +01:00
468a7721c8 [bugfix] changed default behavior of PylotParameter class to use default Parameter if called without input parameters. Related to #40 2024-11-20 17:01:53 +01:00
555fb8a719 [minor] small code fixes 2024-11-20 16:57:27 +01:00
5a2a1fe990 [bugfix] flawed logic after parameter renaming corrected 2024-11-20 11:14:27 +01:00
64b719fd54 [minor] increased robustness of correlation algorithm for unknown exceptions... 2024-11-20 11:13:15 +01:00
71d4269a4f [bugfix] reverting code from commit 3069e7d5. Checking for coordinates in dataless Parser IS necessary to make sure correct Metadata were found. Fixes #37.
[minor] Commented out search for network name in metadata filename considered being unsafe
2024-10-09 17:07:22 +02:00
81e34875b9 [update] small changes increasing code robustness 2024-10-09 16:59:12 +02:00
d7ee820de3 [minor] adding missing image to doc 2024-09-30 16:40:41 +02:00
621cbbfbda [minor] modify README 2024-09-18 16:59:34 +02:00
12 changed files with 165 additions and 85 deletions

14
PyLoT.py Normal file → Executable file
View File

@@ -1011,7 +1011,7 @@ class MainWindow(QMainWindow):
for event in events: for event in events:
for filename in filenames: for filename in filenames:
if os.path.isfile(filename) and event.pylot_id in filename: if os.path.isfile(filename) and event.pylot_id in filename:
self.load_data(filename, draw=False, event=event, ask_user=True, merge_strategy=sld.merge_strategy) self.load_data(filename, draw=False, event=event, ask_user=False, merge_strategy=sld.merge_strategy)
refresh = True refresh = True
if not refresh: if not refresh:
return return
@@ -1020,8 +1020,8 @@ class MainWindow(QMainWindow):
self.fill_eventbox() self.fill_eventbox()
self.setDirty(True) self.setDirty(True)
def load_data(self, fname=None, loc=False, draw=True, event=None, ask_user=False, merge_strategy='Overwrite'): def load_data(self, fname=None, loc=False, draw=True, event=None, ask_user=True, merge_strategy='Overwrite',):
if not ask_user: if ask_user:
if not self.okToContinue(): if not self.okToContinue():
return return
if fname is None: if fname is None:
@@ -1034,7 +1034,7 @@ class MainWindow(QMainWindow):
if not event: if not event:
event = self.get_current_event() event = self.get_current_event()
if event.picks: if event.picks and ask_user:
qmb = QMessageBox(self, icon=QMessageBox.Question, qmb = QMessageBox(self, icon=QMessageBox.Question,
text='Do you want to overwrite the data?',) text='Do you want to overwrite the data?',)
overwrite_button = qmb.addButton('Overwrite', QMessageBox.YesRole) overwrite_button = qmb.addButton('Overwrite', QMessageBox.YesRole)
@@ -3590,7 +3590,7 @@ class MainWindow(QMainWindow):
def calc_magnitude(self): def calc_magnitude(self):
self.init_metadata() self.init_metadata()
if not self.metadata: if not self.metadata:
return None return []
wf_copy = self.get_data().getWFData().copy() wf_copy = self.get_data().getWFData().copy()
@@ -3599,6 +3599,10 @@ class MainWindow(QMainWindow):
for station in np.unique(list(self.getPicks('manual').keys()) + list(self.getPicks('auto').keys())): for station in np.unique(list(self.getPicks('manual').keys()) + list(self.getPicks('auto').keys())):
wf_select += wf_copy.select(station=station) wf_select += wf_copy.select(station=station)
if not wf_select:
logging.warning('Empty Stream in calc_magnitude. Return.')
return []
corr_wf = restitute_data(wf_select, self.metadata) corr_wf = restitute_data(wf_select, self.metadata)
# calculate moment magnitude # calculate moment magnitude
moment_mag = MomentMagnitude(corr_wf, self.get_data().get_evt_data(), self.inputs.get('vp'), moment_mag = MomentMagnitude(corr_wf, self.get_data().get_evt_data(), self.inputs.get('vp'),

View File

@@ -63,6 +63,9 @@ You may need to do some modifications to these files. Especially folder names sh
PyLoT has been tested on Mac OSX (10.11), Debian Linux 8 and on Windows 10/11. PyLoT has been tested on Mac OSX (10.11), Debian Linux 8 and on Windows 10/11.
## Example Dataset
An example dataset with waveform data, metadata and automatic picks in the obspy-dmt dataset format for testing the teleseismic picking can be found at https://zenodo.org/doi/10.5281/zenodo.13759803
## Release notes ## Release notes
#### Features: #### Features:
@@ -83,13 +86,13 @@ Current release is still in development progress and has several issues. We are
## Staff ## Staff
Original author(s): M. Rische, S. Wehling-Benatelli, L. Kueperkoch, M. Bischoff (PILOT) Developer(s): M. Paffrath, S. Wehling-Benatelli, L. Kueperkoch, D. Arnold, K. Cökerim, K. Olbert, M. Bischoff, C. Wollin, M. Rische, S. Zimmermann
Developer(s): S. Wehling-Benatelli, M. Paffrath, L. Kueperkoch, K. Olbert, M. Bischoff, C. Wollin, M. Rische, D. Arnold, K. Cökerim, S. Zimmermann Original author(s): M. Rische, S. Wehling-Benatelli, L. Kueperkoch, M. Bischoff (PILOT)
Others: A. Bruestle, T. Meier, W. Friederich Others: A. Bruestle, T. Meier, W. Friederich
[ObsPy]: http://github.com/obspy/obspy/wiki [ObsPy]: http://github.com/obspy/obspy/wiki
September 2024 March 2025

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View File

@@ -36,8 +36,17 @@ class Data(object):
loaded event. Container object holding, e.g. phase arrivals, etc. loaded event. Container object holding, e.g. phase arrivals, etc.
""" """
def __init__(self, parent=None, evtdata=None): def __init__(self, parent=None, evtdata=None, picking_parameter=None):
self._parent = parent self._parent = parent
if not picking_parameter:
if hasattr(parent, '_inputs'):
picking_parameter = parent._inputs
else:
logging.warning('No picking parameters found! Using default input parameters!!!')
picking_parameter = PylotParameter()
self.picking_parameter = picking_parameter
if self.getParent(): if self.getParent():
self.comp = parent.getComponent() self.comp = parent.getComponent()
else: else:
@@ -403,23 +412,19 @@ class Data(object):
not implemented: {1}'''.format(evtformat, e)) not implemented: {1}'''.format(evtformat, e))
if fnext == '.cnv': if fnext == '.cnv':
try: try:
velest.export(picks_copy, fnout + fnext, eventinfo=self.get_evt_data()) velest.export(picks_copy, fnout + fnext, self.picking_parameter, eventinfo=self.get_evt_data())
except KeyError as e: except KeyError as e:
raise KeyError('''{0} export format raise KeyError('''{0} export format
not implemented: {1}'''.format(evtformat, e)) not implemented: {1}'''.format(evtformat, e))
if fnext == '_focmec.in': if fnext == '_focmec.in':
try: try:
parameter = PylotParameter() focmec.export(picks_copy, fnout + fnext, self.picking_parameter, eventinfo=self.get_evt_data())
logging.warning('Using default input parameter')
focmec.export(picks_copy, fnout + fnext, parameter, eventinfo=self.get_evt_data())
except KeyError as e: except KeyError as e:
raise KeyError('''{0} export format raise KeyError('''{0} export format
not implemented: {1}'''.format(evtformat, e)) not implemented: {1}'''.format(evtformat, e))
if fnext == '.pha': if fnext == '.pha':
try: try:
parameter = PylotParameter() hypodd.export(picks_copy, fnout + fnext, self.picking_parameter, eventinfo=self.get_evt_data())
logging.warning('Using default input parameter')
hypodd.export(picks_copy, fnout + fnext, parameter, eventinfo=self.get_evt_data())
except KeyError as e: except KeyError as e:
raise KeyError('''{0} export format raise KeyError('''{0} export format
not implemented: {1}'''.format(evtformat, e)) not implemented: {1}'''.format(evtformat, e))

View File

@@ -53,10 +53,16 @@ class PylotParameter(object):
self.__parameter = {} self.__parameter = {}
self._verbosity = verbosity self._verbosity = verbosity
self._parFileCont = {} self._parFileCont = {}
# io from parsed arguments alternatively # io from parsed arguments alternatively
for key, val in kwargs.items(): for key, val in kwargs.items():
self._parFileCont[key] = val self._parFileCont[key] = val
self.from_file() self.from_file()
# if no filename or kwargs given, use default values
if not fnin and not kwargs:
self.reset_defaults()
if fnout: if fnout:
self.export2File(fnout) self.export2File(fnout)

View File

@@ -278,7 +278,6 @@ def picksdict_from_picks(evt, parameter=None):
weight = phase.get('weight') weight = phase.get('weight')
if not weight: if not weight:
if not parameter: if not parameter:
logging.warning('Using ')
logging.warning('Using default input parameter') logging.warning('Using default input parameter')
parameter = PylotParameter() parameter = PylotParameter()
pick.phase_hint = identifyPhase(pick.phase_hint) pick.phase_hint = identifyPhase(pick.phase_hint)
@@ -513,7 +512,7 @@ def writephases(arrivals, fformat, filename, parameter=None, eventinfo=None):
# write header # write header
fid.write('# EQEVENT: %s Label: EQ%s Loc: X 0.00 Y 0.00 Z 10.00 OT 0.00 \n' % fid.write('# EQEVENT: %s Label: EQ%s Loc: X 0.00 Y 0.00 Z 10.00 OT 0.00 \n' %
(parameter.get('datapath'), parameter.get('eventID'))) (parameter.get('datapath'), parameter.get('eventID')))
arrivals = chooseArrivals(arrivals) # MP MP what is chooseArrivals? It is not defined anywhere arrivals = chooseArrivals(arrivals)
for key in arrivals: for key in arrivals:
# P onsets # P onsets
if 'P' in arrivals[key]: if 'P' in arrivals[key]:
@@ -666,7 +665,7 @@ def writephases(arrivals, fformat, filename, parameter=None, eventinfo=None):
fid = open("%s" % filename, 'w') fid = open("%s" % filename, 'w')
# write header # write header
fid.write('%s, event %s \n' % (parameter.get('datapath'), parameter.get('eventID'))) fid.write('%s, event %s \n' % (parameter.get('datapath'), parameter.get('eventID')))
arrivals = chooseArrivals(arrivals) # MP MP what is chooseArrivals? It is not defined anywhere arrivals = chooseArrivals(arrivals)
for key in arrivals: for key in arrivals:
# P onsets # P onsets
if 'P' in arrivals[key] and arrivals[key]['P']['mpp'] is not None: if 'P' in arrivals[key] and arrivals[key]['P']['mpp'] is not None:
@@ -757,11 +756,11 @@ def writephases(arrivals, fformat, filename, parameter=None, eventinfo=None):
cns, eventsource['longitude'], cew, eventsource['depth'], eventinfo.magnitudes[0]['mag'], ifx)) cns, eventsource['longitude'], cew, eventsource['depth'], eventinfo.magnitudes[0]['mag'], ifx))
n = 0 n = 0
# check whether arrivals are dictionaries (autoPyLoT) or pick object (PyLoT) # check whether arrivals are dictionaries (autoPyLoT) or pick object (PyLoT)
if isinstance(arrivals, dict) == False: if isinstance(arrivals, dict) is False:
# convert pick object (PyLoT) into dictionary # convert pick object (PyLoT) into dictionary
evt = ope.Event(resource_id=eventinfo['resource_id']) evt = ope.Event(resource_id=eventinfo['resource_id'])
evt.picks = arrivals evt.picks = arrivals
arrivals = picksdict_from_picks(evt) arrivals = picksdict_from_picks(evt, parameter=parameter)
# check for automatic and manual picks # check for automatic and manual picks
# prefer manual picks # prefer manual picks
usedarrivals = chooseArrivals(arrivals) usedarrivals = chooseArrivals(arrivals)
@@ -822,7 +821,7 @@ def writephases(arrivals, fformat, filename, parameter=None, eventinfo=None):
# convert pick object (PyLoT) into dictionary # convert pick object (PyLoT) into dictionary
evt = ope.Event(resource_id=eventinfo['resource_id']) evt = ope.Event(resource_id=eventinfo['resource_id'])
evt.picks = arrivals evt.picks = arrivals
arrivals = picksdict_from_picks(evt) arrivals = picksdict_from_picks(evt, parameter=parameter)
# check for automatic and manual picks # check for automatic and manual picks
# prefer manual picks # prefer manual picks
usedarrivals = chooseArrivals(arrivals) usedarrivals = chooseArrivals(arrivals)
@@ -873,7 +872,7 @@ def writephases(arrivals, fformat, filename, parameter=None, eventinfo=None):
# convert pick object (PyLoT) into dictionary # convert pick object (PyLoT) into dictionary
evt = ope.Event(resource_id=eventinfo['resource_id']) evt = ope.Event(resource_id=eventinfo['resource_id'])
evt.picks = arrivals evt.picks = arrivals
arrivals = picksdict_from_picks(evt) arrivals = picksdict_from_picks(evt, parameter=parameter)
# check for automatic and manual picks # check for automatic and manual picks
# prefer manual picks # prefer manual picks
usedarrivals = chooseArrivals(arrivals) usedarrivals = chooseArrivals(arrivals)

View File

@@ -220,9 +220,9 @@ class Metadata(object):
network_name = network.code network_name = network.code
if not station_name in self.stations_dict.keys(): if not station_name in self.stations_dict.keys():
st_id = '{}.{}'.format(network_name, station_name) st_id = '{}.{}'.format(network_name, station_name)
self.stations_dict[st_id] = {'latitude': station[0].latitude, self.stations_dict[st_id] = {'latitude': station.latitude,
'longitude': station[0].longitude, 'longitude': station.longitude,
'elevation': station[0].elevation} 'elevation': station.elevation}
read_stat = {'xml': stat_info_from_inventory, read_stat = {'xml': stat_info_from_inventory,
'dless': stat_info_from_parser} 'dless': stat_info_from_parser}
@@ -268,9 +268,6 @@ class Metadata(object):
if not fnames: if not fnames:
# search for station name in filename # search for station name in filename
fnames = glob.glob(os.path.join(path_to_inventory, '*' + station + '*')) fnames = glob.glob(os.path.join(path_to_inventory, '*' + station + '*'))
if not fnames:
# search for network name in filename
fnames = glob.glob(os.path.join(path_to_inventory, '*' + network + '*'))
if not fnames: if not fnames:
if self.verbosity: if self.verbosity:
print('Could not find filenames matching station name, network name or seed id') print('Could not find filenames matching station name, network name or seed id')
@@ -282,7 +279,7 @@ class Metadata(object):
continue continue
invtype, robj = self._read_metadata_file(os.path.join(path_to_inventory, fname)) invtype, robj = self._read_metadata_file(os.path.join(path_to_inventory, fname))
try: try:
# robj.get_coordinates(station_seed_id) # TODO: Commented out, failed with Parser, is this needed? robj.get_coordinates(station_seed_id)
self.inventory_files[fname] = {'invtype': invtype, self.inventory_files[fname] = {'invtype': invtype,
'data': robj} 'data': robj}
if station_seed_id in self.seed_ids.keys(): if station_seed_id in self.seed_ids.keys():
@@ -290,6 +287,7 @@ class Metadata(object):
self.seed_ids[station_seed_id] = fname self.seed_ids[station_seed_id] = fname
return True return True
except Exception as e: except Exception as e:
logging.warning(e)
continue continue
print('Could not find metadata for station_seed_id {} in path {}'.format(station_seed_id, path_to_inventory)) print('Could not find metadata for station_seed_id {} in path {}'.format(station_seed_id, path_to_inventory))
@@ -654,6 +652,8 @@ def restitute_data(data, metadata, unit='VEL', force=False, ncores=0):
""" """
# data = remove_underscores(data) # data = remove_underscores(data)
if not data:
return
# loop over traces # loop over traces
input_tuples = [] input_tuples = []
@@ -661,9 +661,14 @@ def restitute_data(data, metadata, unit='VEL', force=False, ncores=0):
input_tuples.append((tr, metadata, unit, force)) input_tuples.append((tr, metadata, unit, force))
data.remove(tr) data.remove(tr)
pool = gen_Pool(ncores) if ncores == 0:
result = pool.imap_unordered(restitute_trace, input_tuples) result = []
pool.close() for input_tuple in input_tuples:
result.append(restitute_trace(input_tuple))
else:
pool = gen_Pool(ncores)
result = pool.imap_unordered(restitute_trace, input_tuples)
pool.close()
for tr, remove_trace in result: for tr, remove_trace in result:
if not remove_trace: if not remove_trace:

View File

@@ -51,7 +51,6 @@ def readDefaultFilterInformation():
:rtype: dict :rtype: dict
""" """
pparam = PylotParameter() pparam = PylotParameter()
pparam.reset_defaults()
return readFilterInformation(pparam) return readFilterInformation(pparam)

View File

@@ -59,6 +59,9 @@ P:
filter_type: bandpass filter_type: bandpass
sampfreq: 20.0 sampfreq: 20.0
# ignore if autopylot fails to pick master-trace (not recommended if absolute onset times matter)
ignore_autopylot_fail_on_master: True
# S-phase # S-phase
S: S:
min_corr_stacking: 0.7 min_corr_stacking: 0.7
@@ -93,3 +96,6 @@ S:
filter_type: bandpass filter_type: bandpass
sampfreq: 20.0 sampfreq: 20.0
# ignore if autopylot fails to pick master-trace (not recommended if absolute onset times matter)
ignore_autopylot_fail_on_master: True

View File

@@ -436,6 +436,7 @@ def correlation_main(database_path_dmt: str, pylot_infile_path: str, params: dic
# iterate over all events in "database_path_dmt" # iterate over all events in "database_path_dmt"
for eventindex, eventdir in enumerate(eventdirs): for eventindex, eventdir in enumerate(eventdirs):
eventindex += 1
if not istart <= eventindex < istop: if not istart <= eventindex < istop:
continue continue
@@ -443,12 +444,16 @@ def correlation_main(database_path_dmt: str, pylot_infile_path: str, params: dic
continue continue
logging.info('\n' + 100 * '#') logging.info('\n' + 100 * '#')
logging.info('Working on event {} ({}/{})'.format(eventdir, eventindex + 1, len(eventdirs))) logging.info('Working on event {} ({}/{})'.format(eventdir, eventindex, len(eventdirs)))
if event_blacklist and get_event_id(eventdir) in event_blacklist: if event_blacklist and get_event_id(eventdir) in event_blacklist:
logging.info('Event on blacklist. Continue') logging.info('Event on blacklist. Continue')
correlate_event(eventdir, pylot_parameter, params=params, channel_config=channel_config, try:
update=update) correlate_event(eventdir, pylot_parameter, params=params, channel_config=channel_config,
update=update)
except Exception as e:
logging.error(f'Could not correlate event {eventindex}: {e}')
continue
logging.info('Finished script after {} at {}'.format(datetime.now() - tstart, datetime.now())) logging.info('Finished script after {} at {}'.format(datetime.now() - tstart, datetime.now()))
@@ -522,7 +527,7 @@ def cut_stream_to_same_length(wfdata: Stream) -> None:
st.trim(tstart, tend, pad=True, fill_value=0.) st.trim(tstart, tend, pad=True, fill_value=0.)
# check for stream length # check for stream length
if len(st) < 3: if len(st) < 3:
logging.info('Not enough traces in stream, remove it from dataset:', str(st)) logging.info(f'Not enough traces in stream, remove it from dataset: {st}')
remove(wfdata, st) remove(wfdata, st)
continue continue
if st[0].stats.starttime != st[1].stats.starttime or st[1].stats.starttime != st[2].stats.starttime: if st[0].stats.starttime != st[1].stats.starttime or st[1].stats.starttime != st[2].stats.starttime:
@@ -635,10 +640,19 @@ def correlate_event(eventdir: str, pylot_parameter: PylotParameter, params: dict
if len(picks) < params[phase_type]['min_picks_autopylot']: if len(picks) < params[phase_type]['min_picks_autopylot']:
logging.info('Not enough automatic picks for correlation. Continue!') logging.info('Not enough automatic picks for correlation. Continue!')
continue continue
# calculate corrected taupy picks and remove strong outliers ## calculate corrected taupy picks and remove strong outliers - part commented out for now. Maybe make this
taupypicks_corr_initial, median_diff = get_corrected_taupy_picks(picks, taupypicks_orig) ## another option but I think it is not worth it
picks = remove_outliers(picks, taupypicks_corr_initial, # taupypicks_corr_initial, median_diff = get_corrected_taupy_picks(picks, taupypicks_orig)
params[phase_type]['initial_pick_outlier_threshold']) # picks = remove_outliers(picks, taupypicks_corr_initial,
# params[phase_type]['initial_pick_outlier_threshold'])
# use mean/median corrected picks from taupy as the new reference picks. This largely increases the yield
# of final picks since every station has a decent reference onset now for a correlation
taupypicks, time_shift = get_corrected_taupy_picks(picks, taupypicks_orig, all_available=True)
picks = copy.deepcopy(taupypicks)
for pick in picks:
pick.method_id.id = 'auto'
if phase_type == 'S': if phase_type == 'S':
# check whether rotation to ZNE is possible (to get rid of horizontal channel 1,2,3) # check whether rotation to ZNE is possible (to get rid of horizontal channel 1,2,3)
@@ -671,19 +685,56 @@ def correlate_event(eventdir: str, pylot_parameter: PylotParameter, params: dict
if not params[phase_type]['use_stacked_trace']: if not params[phase_type]['use_stacked_trace']:
# first stack mastertrace by using stations with high correlation on that station in station list with the # first stack mastertrace by using stations with high correlation on that station in station list with the
# highest correlation coefficient # highest correlation coefficient
stack_result = stack_mastertrace(wfdata_lowf, wfdata_highf, wfdata, picks, params=params[phase_type], logging.info('Searching for master trace. ' + 20 * '-')
channels=channels_list, method=method, fig_dir=fig_dir) best_result = None
else: for stack_result in stack_mastertrace(wfdata_lowf, wfdata_highf, wfdata, picks, params=params[phase_type],
stack_result = load_stacked_trace(eventdir, params[phase_type]['min_corr_stacking']) channels=channels_list, method=method, fig_dir=fig_dir):
if not stack_result:
continue
# save best result in case it is still needed
if not best_result:
best_result = stack_result
# extract stack result
correlations_dict, nwst_id, trace_master, nstack = stack_result
# now pick stacked trace with PyLoT for a more precise pick (use raw trace, gets filtered by autoPyLoT)
pick_stacked = repick_master_trace(wfdata_lowf, trace_master, pylot_parameter, event, event_id, metadata,
phase_type, correlation_out_dir)
if not pick_stacked or not pick_stacked.time_errors.uncertainty:
logging.info(f'Invalid autoPyLoT pick on master trace. Try next one.')
continue
else:
break
else:
logging.info('Did not find autoPyLoT pick for any stacked trace.')
if params[phase_type]['ignore_autopylot_fail_on_master'] is True:
# Fallback in case autopylot failed
logging.warning(f'Could not pick stacked trace. Using reference pick instead. If you need '
f'absolute onsets you need to account for a time-shift.')
correlations_dict, nwst_id, trace_master, nstack = best_result
pick_stacked = get_pick4station(picks, network_code=trace_master.stats.network,
station_code=trace_master.stats.station, method='auto')
else:
pick_stacked = None
if not pick_stacked:
logging.info('Could not find reference pick also. Continue with next phase.')
continue
else:
raise NotImplementedError('Loading stacked trace currently not implemented')
# stack_result = load_stacked_trace(eventdir, params[phase_type]['min_corr_stacking'])
if not stack_result:
logging.info('No stack result. Continue.')
continue
############################################# #############################################
# NOW DO THE FINAL CORRELATION # NOW DO THE FINAL CORRELATION
# extract stack result
correlations_dict, nwst_id, trace_master, nstack = stack_result
if params[phase_type]['plot']: if params[phase_type]['plot']:
# plot correlations of traces used to generate stacked trace # plot correlations of traces used to generate stacked trace
@@ -700,11 +751,6 @@ def correlate_event(eventdir: str, pylot_parameter: PylotParameter, params: dict
# write unfiltered trace # write unfiltered trace
trace_master.write(os.path.join(correlation_out_dir, '{}_stacked.mseed'.format(trace_master.id))) trace_master.write(os.path.join(correlation_out_dir, '{}_stacked.mseed'.format(trace_master.id)))
# now pick stacked trace with PyLoT for a more precise pick (use raw trace, gets filtered by autoPyLoT)
pick_stacked = repick_master_trace(wfdata_lowf, trace_master, pylot_parameter, event, event_id, metadata,
phase_type, correlation_out_dir)
if not pick_stacked:
continue
# correlate stations with repicked and stacked master trace # correlate stations with repicked and stacked master trace
fig_dir_traces = make_figure_dirs(fig_dir, trace_master.id) fig_dir_traces = make_figure_dirs(fig_dir, trace_master.id)
@@ -826,7 +872,7 @@ def get_picks_mean(picks: list) -> UTCDateTime:
def get_corrected_taupy_picks(picks: list, taupypicks: list, all_available: bool = False) -> tuple: def get_corrected_taupy_picks(picks: list, taupypicks: list, all_available: bool = False) -> tuple:
""" get mean/median from picks taupy picks, correct latter for the difference """ """ get mean/median from picks relative to taupy picks, correct latter for the difference """
def nwst_id_from_wfid(wfid): def nwst_id_from_wfid(wfid):
return '{}.{}'.format(wfid.network_code if wfid.network_code else '', return '{}.{}'.format(wfid.network_code if wfid.network_code else '',
@@ -1165,38 +1211,38 @@ def stack_mastertrace(wfdata_lowf: Stream, wfdata_highf: Stream, wfdata_raw: Str
A master trace will be created by stacking well correlating traces onto this station. A master trace will be created by stacking well correlating traces onto this station.
""" """
def get_best_station4stack(sta_result): def get_best_stations4stack(sta_result, n_max=4):
""" return station with maximum mean_ccc""" """ return stations sorted after maximum mean_ccc"""
ccc_means = {nwst_id: value['mean_ccc'] for nwst_id, value in sta_result.items() if ccc_means = {nwst_id: value['mean_ccc'] for nwst_id, value in sta_result.items() if
not np.isnan(value['mean_ccc'])} not np.isnan(value['mean_ccc'])}
if len(ccc_means) < 1: if len(ccc_means) < 1:
logging.warning('No valid station found for stacking! Return.') logging.warning('No valid station found for stacking! Return.')
return return
best_station_id = max(ccc_means, key=ccc_means.get) best_station_ids = sorted(ccc_means, key=ccc_means.get, reverse=True)[:n_max]
logging.info( logging.info(f'Found mean correlations for stations: {ccc_means}')
'Found highest mean correlation for station {} ({})'.format(best_station_id, max(ccc_means.values()))) return best_station_ids, ccc_means
return best_station_id
station_results = iterate_correlation(wfdata_lowf, wfdata_highf, channels, picks, method, params, fig_dir=fig_dir) station_results = iterate_correlation(wfdata_lowf, wfdata_highf, channels, picks, method, params, fig_dir=fig_dir)
nwst_id_master = get_best_station4stack(station_results) nwst_ids_master, ccc_means = get_best_stations4stack(station_results)
# in case no stream with a valid pick is found # in case no stream with a valid pick is found
if not nwst_id_master: if not nwst_ids_master:
logging.info('No mastertrace found! Will skip this event.') logging.info('No mastertrace found! Continue with next.')
return None return None
trace_master = station_results[nwst_id_master]['trace'] for nwst_id_master in nwst_ids_master:
stations4stack = station_results[nwst_id_master]['stations4stack'] trace_master = station_results[nwst_id_master]['trace']
correlations_dict = station_results[nwst_id_master]['correlations_dict'] stations4stack = station_results[nwst_id_master]['stations4stack']
correlations_dict = station_results[nwst_id_master]['correlations_dict']
wfdata_highf += trace_master wfdata_highf += trace_master
dt_pre, dt_post = params['dt_stacking'] dt_pre, dt_post = params['dt_stacking']
trace_master, nstack = apply_stacking(trace_master, stations4stack, wfdata_raw, picks, method=method, trace_master, nstack = apply_stacking(trace_master, stations4stack, wfdata_raw, picks, method=method,
do_rms_check=params['check_RMS'], plot=params['plot'], fig_dir=fig_dir, do_rms_check=params['check_RMS'], plot=params['plot'], fig_dir=fig_dir,
dt_pre=dt_pre, dt_post=dt_post) dt_pre=dt_pre, dt_post=dt_post)
return correlations_dict, nwst_id_master, trace_master, nstack yield correlations_dict, nwst_id_master, trace_master, nstack
def iterate_correlation(wfdata_lowf: Stream, wfdata_highf: Stream, channels: list, picks: list, method: str, def iterate_correlation(wfdata_lowf: Stream, wfdata_highf: Stream, channels: list, picks: list, method: str,
@@ -1984,4 +2030,5 @@ if __name__ == "__main__":
# MAIN +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # MAIN +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
correlation_main(ARGS.dmt_path, ARGS.pylot_infile, params=CORR_PARAMS, istart=int(ARGS.istart), correlation_main(ARGS.dmt_path, ARGS.pylot_infile, params=CORR_PARAMS, istart=int(ARGS.istart),
istop=int(ARGS.istop), istop=int(ARGS.istop),
channel_config=CHANNELS, update=ARGS.update, event_blacklist=ARGS.blacklist) channel_config=CHANNELS, update=ARGS.update, event_blacklist=ARGS.blacklist,
select_events=None) # can add a list of event ids if only specific events shall be picked

View File

@@ -32,9 +32,10 @@ export PYTHONPATH="$PYTHONPATH:/home/marcel/git/pylot/"
#python pick_correlation_correction.py '/data/AlpArray_Data/dmt_database_mantle_M5.8-6.0' '/home/marcel/.pylot/pylot_alparray_mantle_corr_stack_0.03-0.5.in' -pd -n ${NSLOTS:=1} -istart 100 -istop 200 #python pick_correlation_correction.py '/data/AlpArray_Data/dmt_database_mantle_M5.8-6.0' '/home/marcel/.pylot/pylot_alparray_mantle_corr_stack_0.03-0.5.in' -pd -n ${NSLOTS:=1} -istart 100 -istop 200
#python pick_correlation_correction.py 'H:\sciebo\dmt_database' 'H:\Sciebo\dmt_database\pylot_alparray_mantle_corr_S_0.01-0.2.in' -pd -n 4 -t #python pick_correlation_correction.py 'H:\sciebo\dmt_database' 'H:\Sciebo\dmt_database\pylot_alparray_mantle_corr_S_0.01-0.2.in' -pd -n 4 -t
pylot_infile='/home/marcel/.pylot/pylot_alparray_syn_fwi_mk6_it3.in' #pylot_infile='/home/marcel/.pylot/pylot_alparray_syn_fwi_mk6_it3.in'
#pylot_infile='/home/marcel/.pylot/pylot_adriaarray_corr_P_and_S.in' pylot_infile='/home/marcel/.pylot/pylot_adriaarray_corr_P_and_S.in'
# THIS SCRIPT SHOLD BE CALLED BY "submit_to_grid_engine.py" using the following line: # THIS SCRIPT SHOLD BE CALLED BY "submit_to_grid_engine.py" using the following line:
python pick_correlation_correction.py $1 $pylot_infile -pd -n ${NSLOTS:=1} -istart $2 --params 'parameters_fwi_mk6_it3.yaml' # use -pd for detailed plots in eventdir/correlation_XX_XX/figures
python pick_correlation_correction.py $1 $pylot_infile -n ${NSLOTS:=1} -istart $2 --params 'parameters_adriaarray.yaml' # -pd
#--event_blacklist eventlist.txt #--event_blacklist eventlist.txt

View File

@@ -3,21 +3,26 @@
import subprocess import subprocess
fnames = [ fnames = [
('/data/AlpArray_Data/dmt_database_synth_model_mk6_it3_no_rotation', 0), ('/data/AdriaArray_Data/dmt_database_mantle_M5.0-5.4', 0),
('/data/AdriaArray_Data/dmt_database_mantle_M5.4-5.7', 0),
('/data/AdriaArray_Data/dmt_database_mantle_M5.7-6.0', 0),
('/data/AdriaArray_Data/dmt_database_mantle_M6.0-6.3', 0),
('/data/AdriaArray_Data/dmt_database_mantle_M6.3-10.0', 0),
# ('/data/AdriaArray_Data/dmt_database_ISC_mantle_M5.0-5.4', 0),
# ('/data/AdriaArray_Data/dmt_database_ISC_mantle_M5.4-5.7', 0),
# ('/data/AdriaArray_Data/dmt_database_ISC_mantle_M5.7-6.0', 0),
# ('/data/AdriaArray_Data/dmt_database_ISC_mantle_M6.0-10.0', 0),
] ]
#fnames = [('/data/AlpArray_Data/dmt_database_mantle_0.01-0.2_SKS-phase', 0), #fnames = [('/data/AlpArray_Data/dmt_database_mantle_0.01-0.2_SKS-phase', 0),
# ('/data/AlpArray_Data/dmt_database_mantle_0.01-0.2_S-phase', 0),] # ('/data/AlpArray_Data/dmt_database_mantle_0.01-0.2_S-phase', 0),]
#### ####
script_location = '/home/marcel/VersionCtrl/git/code_base/correlation_picker/submit_pick_corr_correction.sh' script_location = '/home/marcel/VersionCtrl/git/pylot/pylot/correlation/submit_pick_corr_correction.sh'
#### ####
for fnin, istart in fnames: for fnin, istart in fnames:
input_cmds = f'qsub -q low.q@minos15,low.q@minos14,low.q@minos13,low.q@minos12,low.q@minos11 {script_location} {fnin} {istart}' input_cmds = f'qsub -q low.q@minos15,low.q@minos14,low.q@minos13,low.q@minos12,low.q@minos11 {script_location} {fnin} {istart}'
print(input_cmds) print(input_cmds)
print(subprocess.check_output(input_cmds.split())) print(subprocess.check_output(input_cmds.split()))