Browse Source

docs error chapter

master
asteinbe 2 years ago
parent
commit
4e1da5609c
7 changed files with 122 additions and 85 deletions
  1. +19
    -0
      docs/errors.rst
  2. +1
    -0
      docs/index.rst
  3. +1
    -1
      docs/overview.rst
  4. +5
    -2
      docs/processing.rst
  5. +95
    -62
      src/apps/plot.py
  6. +1
    -0
      src/process/main.py
  7. +0
    -20
      src/process/sembCalc.py

+ 19
- 0
docs/errors.rst View File

@ -0,0 +1,19 @@
Common errors and fixes
==============
A list of common errors and how to avoid/fix them.
1. Time window too short
---------------
If the time window given by duration and forerun is too short in comparison to the
step and window size, no semblance can be reliably calculated.
The following error will appear:
.. highlight:: console
::
$ ValueError: zero-size array to reduction operation maximum which has no identity
A simple fix is to choose at least an forerun+duration length equal to the step size.

+ 1
- 0
docs/index.rst View File

@ -36,6 +36,7 @@ Contents
setup
configuration/index
processing
errors
Indices and tables
------------------


+ 1
- 1
docs/overview.rst View File

@ -3,7 +3,7 @@ Overview
========
Palantiri is an open source seismology toolbox for backprojection using a multi-array approach.
Seismic backprojection is a method where the reciprocity theorem is applied to seismic to waveforms to image the time
Seismic backprojection is a method where the reciprocity theorem is applied to seismic to waveforms to image the time dependent evolution of an coseismic rupture.
.. raw:: html


+ 5
- 2
docs/processing.rst View File

@ -104,12 +104,12 @@ The command to cluster the stations into virtual arrays is:
.. highlight:: console
::
$ cd events
$ palantiri_cluster
Usage: palantiri_cluster eventname_date
The command needs to be executed in the events folder.
The desired stations in each array can be given/modified in the eventname_date.config file, also allowing for manual array creation.
As input a comma separated list of station names is expected in the format::
@ -127,6 +127,9 @@ The last step is the actual processing.
This chapter describes the main processing. After the pre-processing you will have a folder named after the specific event in the events subfolder and your eventname_date.config file contains a list of arrays.
The eventfolder will contain all work and data specific to this event. If you reprocess a certain event the results will be overwritten.
For beamforming several methods are incorporated, including linear, phase-weighted and coherence based stacking.
In the Palantiri code we also implemented the possibility to weight the individual traces contribution to each virtual arrays semblance by the variance of the pre-event noise.
Furthermore our code supports also linear and coherency stacking. In the coherency stacking approach we stack according to the coherence of all stations in an an array to the reference station of that array. We than stack the coherence of all arrays together.
This only works given that all stations in an array have the same polarity.
The MUSIC algorithm is at this stage partly supported but will be fully implemented in later versions.


+ 95
- 62
src/apps/plot.py View File

@ -431,11 +431,12 @@ def distance_time():
for path in sorted(pathlist):
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
if maxs < max:
maxs = max
datamax = np.max(data[:, 2])
if path_in_str[-14] is not "o":
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
if maxs < max:
maxs = max
datamax = np.max(data[:, 2])
pathlist = Path(rel).glob('0-*.ASC')
maxs = 0.
@ -446,12 +447,13 @@ def distance_time():
distances = []
times = []
for path in sorted(pathlist):
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
data_int = data_int + data[:,2]
for i in range(0, len(data[:, 2])):
if data_int[i] > datamax*0.1:
time_grid[i] = float(path_in_str[-8:-6]) * step
if path_in_str[-14] is not "o":
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
data_int = data_int + data[:,2]
for i in range(0, len(data[:, 2])):
if data_int[i] > datamax*0.1:
time_grid[i] = float(path_in_str[-8:-6]) * step
for i in range(0, len(data[:, 2])):
if data_int[i] > datamax*0.1:
lats = data[i, 1]
@ -502,9 +504,12 @@ def distance_time():
datamax = []
for path in sorted(pathlist):
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
datamax.append(np.max(data[:, 2]))
if path_in_str[-14] is not "o":
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
datamax.append(np.max(data[:, 2]))
rel = 'events/' + str(sys.argv[1]) + '/work/semblance/'
pathlist = Path(rel).glob('0-*.ASC')
maxs = 0.
@ -514,31 +519,57 @@ def distance_time():
times = []
k = 0
for path in sorted(pathlist):
counter = 0
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
for i in range(0, len(data[:, 2])):
if data[i, 2] == datamax[k]:
lats = data[i, 1]
lons = data[i, 0]
datas.append(data[i, 2])
dist = orthodrome.distance_accurate50m(lats, lons,
if path_in_str[-14] is not "o":
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
for i in range(0, len(data[:, 2])):
for kl in data[:, 2]:
if kl == datamax[k]:
counter = counter+1
if data[i, 2] > datamax[k]*0.6:
lats_list = []
lons_list = []
times_list = []
if counter == 0:
lats = data[i, 1]
lons = data[i, 0]
datas.append(data[i, 2])
dist = orthodrome.distance_accurate50m(lats, lons,
lat_ev,
lon_ev)
azis.append(toAzimuth(lat_ev, lon_ev,
lats, lons))
distances.append(dist)
time = float(path_in_str[-8:-6]) * step
times.append(time)
else:
lats_list.append(data[i, 1])
lons_list.append(data[i, 0])
if counter != 0:
dist = orthodrome.distance_accurate50m(num.mean(lats_list),
num.mean(lons_list),
lat_ev,
lon_ev)
azis.append(toAzimuth(lat_ev, lon_ev,
lats, lons))
distances.append(dist)
time = float(path_in_str[-8:-6]) * step
times.append(time)
k = k+1
print(((distances[0]+distances[2])/2)/num.mean(time))
k = k+1
print(num.mean(distances)/num.mean(time))
if sys.argv[3] == 'stepwise_max':
datamax = []
for path in sorted(pathlist):
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
datamax.append(np.max(data[:, 2]))
if path_in_str[-14] is not "o":
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
max = np.max(data[:, 2])
datamax.append(np.max(data[:, 2]))
rel = 'events/' + str(sys.argv[1]) + '/work/semblance/'
pathlist = Path(rel).glob('0-*.ASC')
maxs = 0.
@ -550,41 +581,43 @@ def distance_time():
for path in sorted(pathlist):
counter = 0
path_in_str = str(path)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
for i in range(0, len(data[:, 2])):
for kl in data[:, 2]:
if kl == datamax[k]:
counter = counter+1
if data[i, 2] == datamax[k]:
lats_list = []
lons_list = []
times_list = []
if counter == 0:
lats = data[i, 1]
lons = data[i, 0]
datas.append(data[i, 2])
dist = orthodrome.distance_accurate50m(lats, lons,
lat_ev,
lon_ev)
azis.append(toAzimuth(lat_ev, lon_ev,
lats, lons))
distances.append(dist)
time = float(path_in_str[-8:-6]) * step
times.append(time)
else:
lats_list.append(data[i, 1])
lons_list.append(data[i, 0])
if path_in_str[-14] is not "o":
if counter != 0:
dist = orthodrome.distance_accurate50m(num.mean(lats_list),
num.mean(lons_list),
lat_ev,
lon_ev)
distances.append(dist)
data = num.loadtxt(path_in_str, delimiter=' ', skiprows=5)
for i in range(0, len(data[:, 2])):
for kl in data[:, 2]:
if kl == datamax[k]:
counter = counter+1
if data[i, 2] == datamax[k]:
lats_list = []
lons_list = []
times_list = []
if counter == 0:
lats = data[i, 1]
lons = data[i, 0]
datas.append(data[i, 2])
dist = orthodrome.distance_accurate50m(lats, lons,
lat_ev,
lon_ev)
azis.append(toAzimuth(lat_ev, lon_ev,
lats, lons))
distances.append(dist)
time = float(path_in_str[-8:-6]) * step
times.append(time)
else:
lats_list.append(data[i, 1])
lons_list.append(data[i, 0])
if counter != 0:
dist = orthodrome.distance_accurate50m(num.mean(lats_list),
num.mean(lons_list),
lat_ev,
lon_ev)
distances.append(dist)
time = float(path_in_str[-8:-6]) * step
times.append(time)
k = k+1
time = float(path_in_str[-8:-6]) * step
times.append(time)
k = k+1
print(num.mean(distances)/num.mean(time))
plt.figure()
@ -2780,7 +2813,7 @@ def main():
plot_semblance_timestep()
elif sys.argv[2] == 'distance_time':
distance_time()
elif sys.argv[2] == 'semblance_movie':
elif sys.argv[2] == 'semblance_map_movie':
plot_semblance_movie()
elif sys.argv[2] == 'timeshifts':
empiricial_timeshifts()


+ 1
- 0
src/process/main.py View File

@ -364,6 +364,7 @@ def processLoop(traces=None, stations=None, cluster=None):
desired = 'Z'
if phase is 'S':
desired = 'T'
rpe = rpe +'_S'
# ==================================loop over filter setups=====
for filterindex in xrange(0, filters):
# ==================================loop over depth=======


+ 0
- 20
src/process/sembCalc.py View File

@ -758,26 +758,6 @@ def collectSemb(SembList, Config, Origin, Folder, ntimes, arrays, switch,
if semb > sembmax:
sembmax = semb
# delta_min = None
# for js in range(num.shape(latv)[0]):
# xs = latv[js]
# ys = lonv[js]
# delta = orthodrome.distance_accurate50m_numpy(xs, ys, x_shift,
# y_shift)
#
# if delta_min is None:
# delta_min = delta
# x_use = xs
# y_use = ys
# if delta < delta_min:
# delta_min = delta
# x_use = xs
# y_use = ys
# x = x_use
# y = y_use
sembmaxX = x
sembmaxY = y


Loading…
Cancel
Save