Tools, scripts, apps and functions to run fast source and ground shaking estimation
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2049 lines
58 KiB

# GPLv3
#
# The Developers, 21st Century
import logging
import re
import os
import numpy as num
from pyrocko import orthodrome as pod, util as putil, moment_tensor as pmt
from pyrocko.cake import earthradius
from pyrocko.guts import Float, Int, List, Object, StringChoice, Tuple
from pyrocko.guts_array import Array
from pyrocko.model import GNSSCampaign, GNSSComponent, GNSSStation, Station
from pyrocko.trace import Trace
from pyrocko.gf.seismosizer import map_anchor, Location
from ewrica.sources import (EwricaIDSSource, EwricaIDSPatch, EwricaIDSSTF,
EwricaIDSLog)
op = os.path
logger = logging.getLogger('ewrica.io.ids')
km = 1e3
d2r = num.pi / 180.
class WaveformResult(Location):
nslc = Tuple.T(optional=True)
observed = Trace.T(optional=True)
synthetic = Trace.T(optional=True)
misfit = Float.T(optional=True)
class WaveformResultList(Object):
results = List.T(
WaveformResult.T(),
default=[],
optional=True)
@classmethod
def primitive_merge(cls, lists):
result = cls()
for lst in lists:
result.results += lst.results
return result
def get_results(self, subset=None):
results = self.results
if subset == 'with_traces':
results = [r for r in self.results
if r.observed is not None and r.synthetic is not None]
elif subset == 'without_traces':
results = [r for r in self.results
if r.observed is None or r.synthetic is None]
return results
def iter_traces(self, *args, **kwargs):
for res in self.get_results(*args, **kwargs):
for tr in (res.observed, res.synthetic):
yield tr
def grouped_results(self, gather, *args, **kwargs):
results = self.get_results(*args, **kwargs)
sort = list(map(gather, results))
if all(map(lambda x: isinstance(x, bool), sort)):
idx = num.arange(0, len(sort))[sort]
res = [results[i] for i in idx]
yield WaveformResultList(results=res)
else:
sort_set = set(sort)
for it in sort_set:
idx = [i for i, s in enumerate(sort) if s == it]
res = [results[i] for i in idx]
yield WaveformResultList(results=res)
def grouped_traces(self, gather, *args, **kwargs):
traces = list(self.iter_traces(*args, **kwargs))
sort = list(map(gather, traces))
if all(map(lambda x: isinstance(x, bool), sort)):
idx = num.arange(0, len(sort))[sort]
yield [traces[i] for i in idx]
else:
sort_set = set(sort)
for it in sort_set:
idx = [i for i, s in enumerate(sort) if s == it]
yield [traces[i] for i in idx]
snapshot_parameters = [
'Latitude[deg]',
'Longitude[deg]',
'Depth[km]',
'X_strike[km]',
'Y_downdip[km]',
'Slip[m]',
'Moment[Nm]']
slipmodel_parameters = [
'Lat[deg]',
'Lon[deg]',
'Depth[km]',
'X_strike[km]',
'Y_downdip[km]',
'Length[km]',
'Width[km]',
'Strike[deg]',
'Dip[deg]',
'Rake[deg]',
'Slip[m]',
'Moment[Nm]']
earthquake_stf_parameters = [
'Time',
'Moment_Rate',
'Accu_Moment',
'Mw']
rupture_front_parameters = [
'Lat[deg]',
'Lon[deg]',
'Depth[km]',
'X_strike[km]',
'Y_downdip[km]',
'Ruptime[s]']
obssyn_nearfield_parameters = [
'TimeH',
'ObsUe',
'ObsUn',
'SynUe',
'SynUn',
'TimeZ',
'ObsUz',
'SynUz']
obssyn_farfield_parameters = [
'TimeSH',
'ObsUe',
'ObsUn',
'SynUe',
'SynUn',
'TimeP',
'ObsUz',
'SynUz']
obssyn_gnss_parameters = [
'Lat',
'Lon',
'Ue_Obs',
'Un_obs',
'Up_Obs',
'Ue_Syn',
'Un_Syn',
'Up_Syn']
wfmisfitvar_parameters = [
'Station',
'Lat[deg]',
'Lon[deg]',
'nrv_dsp_e',
'nrv_dsp_n',
'nrv_dsp_z']
class IDSUnpackError(Exception):
pass
def _validate_ids_file(fn, parameter):
with open(fn) as f:
header = f.readline().split()
if header != parameter:
raise IDSUnpackError(
'Parameter order in IDS file "{}" differs from known '
'one "{}".'.format(fn, parameter))
def _validate_snapshot_file(fn):
_validate_ids_file(fn, snapshot_parameters)
def _validate_slipmodel_file(fn):
_validate_ids_file(fn, slipmodel_parameters)
def _validate_earthquake_stf_file(fn):
_validate_ids_file(fn, earthquake_stf_parameters)
def _validate_rupture_front_file(fn):
_validate_ids_file(fn, rupture_front_parameters)
def _validate_obssyn_nearfield_file(fn):
_validate_ids_file(fn, obssyn_nearfield_parameters)
def _validate_obssyn_farfield_file(fn):
_validate_ids_file(fn, obssyn_farfield_parameters)
def _validate_obssyn_gnss_file(fn):
_validate_ids_file(fn, obssyn_gnss_parameters)
def _validate_wf_misfit_file(fn):
_validate_ids_file(fn, wfmisfitvar_parameters)
def load_stationinfo_file(fn):
'''Load all information from IDS StationInfo.dat input file
Returns all information on input stations
All output is given in standard units.
:param fn: filename of the StationInfo file
:type fn: str
:returns: list of stations
:rtype: list of :py:class:`pyrocko.model.station.Station`
'''
stations = []
with open(fn) as f:
lines = []
for i, line in enumerate(f):
if line.startswith('#'):
continue
lines.append(line)
if len(lines[2].split('!')) > 1:
nstations = int(lines[2].split('!')[1].strip())
else:
nstations = int(lines[2].strip())
for line in lines[4:]:
line = line.strip().split()
nsl = tuple(line[0].split('.'))
net = nsl[0] if len(nsl) > 1 else ''
sta = nsl[0] if len(nsl) == 1 else nsl[1]
loc = nsl[2] if len(nsl) > 2 else ''
stations.append(
Station(
network=net,
station=sta,
location=loc,
lat=float(line[1]),
lon=float(line[2])))
if len(stations) != nstations:
raise IDSUnpackError(
'Expected and found number of stations from {} differ.'.format(fn))
return stations
def load_snapshot_file(fn):
'''Load all information from IDSSnapshot or SnapshotR file
Returns all information on the patch wise moment release within the time
frame of the Snapshotfile.
All output is given in standard units.
:param fn: filename of the snapshot file
:type fn: str
:returns: patch center latitude in [deg],
patch center longitude in [deg],
patch center depth in [m],
patch center x coordinate along strike in [m] relative to hypocenter,
patch center y coordinate along downdip in [m] relative to hypocenter,
patch slip vector length in [m],
patch moment release in [Nm]
:rtype: :py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`
'''
_validate_snapshot_file(fn)
lats, lons, depth, x, y, slip, moment = num.genfromtxt(
fn, unpack=True, skip_header=1)
depth *= km
x *= km
y *= km
return lats, lons, depth, x, y, slip, moment
def load_slipmodel_file(fn):
'''Load all information from IDSSlipModel.dat file
That includes extension and orientation of the patches. Slip vector length
and orientation are given as well as the final patch moment release.
All output is given in standard units.
:param fn: filename of the slipmodel file
:type fn: str
:returns: patch center latitude in [deg],
patch center longitude in [deg],
patch center depth in [m],
patch center x coordinate along strike in [m] relative to hypocenter,
patch center y coordinate along downdip in [m] relative to hypocenter,
patch length in [m],
patch width in [m],
patch strike in [deg],
patch dip in [deg],
patch rake in [deg]
patch slip vector length in [m],
patch moment release in [Nm]
:rtype: :py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`
'''
_validate_slipmodel_file(fn)
lats, lons, depth, x, y, ln, wd, strike, dip, rake, slip, moment = \
num.genfromtxt(fn, unpack=True, skip_header=1)
depth *= km
x *= km
y *= km
ln *= km
wd *= km
return lats, lons, depth, x, y, ln, wd, strike, dip, rake, slip, moment
def load_patch_stf_file(fn):
'''Load all information from PT_STF.dat patch source time function file
The patch order is the same as in the corresponding slipmodel.dat file (
see :py:func:`ewrica.io.ids.load_slipmodel_file`). Here only the relative
time after event origin time and the corresponding patch moment rates are
given.
:param fn: filename of the patch source time function file
:type fn: str
:returns: time after event origin in [s],
patch moment rates (source time functions) in [Nm/s]
:rtype: :py:class:`numpy.ndarray` ``(ntimes,)``,
:py:class:`numpy.ndarray` ``(ntimes, nsubfaults)``
'''
data = num.genfromtxt(fn, skip_header=1)
return data[:, 0], data[:, 1:]
def load_earthquake_stf_file(fn):
'''Load all information from EQ_STF.dat source time function file
Here only the relative time after event origin time and the corresponding
source moment rates are given.
:param fn: filename of the earthquake source time function file
:type fn: str
:returns: time after event origin in [s],
source moment rate (source time function) in [Nm/s],
source cumulative moment in [Nm]
:rtype: :py:class:`numpy.ndarray` ``(ntimes,)``,
:py:class:`numpy.ndarray` ``(ntimes,)``,
:py:class:`numpy.ndarray` ``(ntimes,)``
'''
_validate_earthquake_stf_file(fn)
time, moment_rate, moment = num.genfromtxt(
fn, unpack=True, skip_header=1, usecols=[0, 1, 2])
magnitude = num.genfromtxt(
fn, unpack=True, skip_header=(moment > 0).argmax() + 1, usecols=[3])
atol = 0.01
try:
num.testing.assert_allclose(
pmt.moment_to_magnitude(moment[1:]), magnitude, atol=atol)
except AssertionError:
IDSUnpackError(
'Read magnitude and moments converted to magnitude differ by more '
'than {}'.format(atol))
rtol = 0.001
dt = num.mean(num.diff(time))
try:
num.testing.assert_allclose(
num.cumsum(moment_rate)[:-1] * dt, moment[1:], rtol=rtol)
except AssertionError:
IDSUnpackError(
'Read moments and moment rates converted to moment differ by more '
'than {}%%'.format(rtol*100))
return time, moment_rate, moment
def load_rupture_front_file(fn):
'''Load all information from Rup_Front.dat rupture front file
Here only the relative time after event origin time is given. The order of
patches is not the same as in :py:func:`ewrica.io.ids.load_patch_stf_file`
or :py:func:`ewrica.io.ids.load_slipmodel_file`.
:param fn: filename of the rupture front file
:type fn: str
:returns: patch center latitude in [deg],
patch center longitude in [deg],
patch center depth in [m],
patch center x coordinate along strike in [m] relative to hypocenter,
patch center y coordinate along downdip in [m] relative to hypocenter,
patch rupture arrival time in [s]
:rtype: :py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`
'''
_validate_rupture_front_file(fn)
lat, lon, depth, x, y, time = num.genfromtxt(
fn, unpack=True, skip_header=1)
depth *= km
x *= km
y *= km
# interpolator = nearest_interp(num.vstack([x, y]).T, time)
return lat, lon, depth, x, y, time # , interpolator
def load_idslog_file(fn):
'''Load final misfit only from idslod_iteration.dat log file
The misfit is given as normalized residual variance.
:param fn: filename of the idslog file
:type fn: str
:returns: Detailed IDS Log information
:rtype: :py:class:`ewrica.sources.EwricaIDSLog`
'''
rows_header = 5
rows_footer = 3
data = num.genfromtxt(
fn,
unpack=True,
skip_header=rows_header,
skip_footer=rows_footer,
names=True)
names = data.dtype.names
subset_dict = dict(
nrv_swf='waveform',
nrv_osm='sm_offset',
nrv_gns='gnss',
nrv_sar='insar')
log = EwricaIDSLog(
misfit_all=num.array([i[names.index('nrv_all')] for i in data]),
misfit_subsets={
subset_dict[name]: num.array([i[names.index(name)] for i in data])
for name in subset_dict.keys() if name in names},
smoothing=num.array(
[i[names.index('smoothing')] for i in data]).astype(num.int64),
max_slip=num.array([i[names.index('max_slip')] for i in data]),
magnitude=num.array([i[names.index('Mw')] for i in data]))
msg = ('IDS file "{}" format differs from known one with {} header and {} '
'footer rows.'.format(fn, rows_header, rows_footer))
if int(data[0][names.index('iteration')]) != 1:
raise IDSUnpackError(msg)
if int(data[-1][names.index('iteration')]) != log.n_iterations:
raise IDSUnpackError(msg)
with open(fn) as f:
line = f.readlines()[-rows_footer].strip()
if not line.startswith('iteration terminated:'):
raise IDSUnpackError(msg)
log.stop_cause = line.split(':')[1].strip().replace(' ', '_')[:-1]
# log.validate()
return log
def load_earthquake_mp_file(fn):
'''Load selected information from EQ_MP.dat information file
:param fn: filename of the earthquake mp file
:type fn: str
:returns: source cumulative moment in [Nm],
number of subfaults,
moment centroid latitude in [deg],
moment centroid longitude in [deg],
moment centroid depth in [m],
mean rupture velocity (based on first 10% of moment release) in [m/s]
:rtype: float,
int,
float,
float,
float,
float
'''
pattern_moment = (r'Seismic moment:\s{1,}(?P<moment>\d+\.\d+)[eE]'
r'(?P<moment_exp>[+-]\d+) Nm')
pattern_magnitude = r'Moment magnitude:\s{1,}(?P<mag>\d+\.\d+)'
pattern_n_subfaults = r'Number of sub-faults:\s{1,}(?P<n_subfaults>\d+)'
pattern_centroid = (
r'Centroid of moment distribution: \(\s+(?P<clat>[-]{0,1}\d+\.\d+) '
r'deg_N,\s+(?P<clon>[-]{0,1}\d+\.\d+) deg_E,\s+'
r'(?P<cdepth>[-]{0,1}\d+\.\d+) km\)')
pattern_velocity = (
r'Average rupture velocity \(based on the first \d+\% of local moment '
r'release\):\s+(?P<vrup>\d+\.\d+) km\/s')
pattern = '|'.join([
pattern_moment,
pattern_magnitude,
pattern_n_subfaults,
pattern_centroid,
pattern_velocity])
matches = []
with open(fn) as f:
for line in f:
line = line.strip()
m = re.match(pattern, line)
if m is None:
continue
matches.append(m)
d = {k: float(v) for m in matches for k, v in m.groupdict().items()
if v is not None}
d['cdepth'] *= km
d['vrup'] *= km
d['moment'] *= 10**(d['moment_exp'])
atol = 0.01
try:
num.testing.assert_allclose(
pmt.moment_to_magnitude(d['moment']),
d['mag'],
atol=atol)
except AssertionError:
IDSUnpackError(
'Read magnitude and moment converted to magnitude differ by more '
'than {}'.format(atol))
return (d['moment'], d['mag'], int(d['n_subfaults']), d['clat'], d['clon'],
d['cdepth'], d['vrup'])
def load_ids_config_file(fn):
'''Load all information from ids config file
:param fn: filename of the ids config file
:type fn: str
:returns: dictionary of all settings, files, paths and parameters set in
the given config
:rtype: dict
'''
def string_or_float(string):
try:
a = float(string)
try:
return int(string)
except ValueError:
return a
except (ValueError, TypeError):
return string.replace("'", '')
data = []
with open(fn) as f:
for line in f:
if line.startswith('#'):
continue
data.append([string_or_float(d) for d in line.strip().split()])
d = {}
d['time'] = putil.stt('{}-{}-{} {}:{}:{}'.format(*data[0]))
d['hypocenter_lat'] = data[1][0]
d['hypocenter_lon'] = data[1][1]
d['hypocenter_depth'] = data[1][2] * km
d['magnitude'] = data[2][0]
d['wf_path'] = data[3][0]
d['wf_distance_range'] = tuple(d * km for d in data[4])
d['wf_static_weight'] = data[5][0]
d['gf_dyn_path'] = data[6][0]
d['gf_dyn_synthetic_filter'] = tuple(data[7])
d['gnss_sites'] = data[8][0]
d['gnss_path'] = data[8][1]
d['gnss_weight'] = data[9][0]
d['insar_grids'] = data[10][0]
d['insar_path'] = data[10][1]
d['insar_weight'] = data[11][0]
d['gf_geodetic_path'] = data[12][0]
d['gf_geodetic_filenames'] = tuple(data[13])
d['idisc'] = data[14][0]
if d['idisc'] == 1:
d['strike'], d['dip'], d['rake'] = data[15]
elif d['idisc'] == 0:
logger.warn('Loading from sub fault file not implemented.')
d['subfault_file'], d['n_subfaults'], d['iref'] = data[15]
d['niter_max'] = data[16][0]
d['rundir'] = data[17][0]
d['o2sdir'] = data[18][0]
d['ebcdir'] = data[19][0]
d['eqmp_file'] = data[20][0]
d['eqstf_file'] = data[21][0]
d['ptstf_file'] = data[22][0]
d['rupfront_file'] = data[23][0]
d['slipmodel_file'] = data[24][0]
d['wfvariance_file'] = data[25][0]
d['smstaticoffset_file'] = data[26][0]
d['gnssfit_file'] = data[27][0]
d['insarfit_file'] = data[28][0]
d['nsnapshots'] = data[29][0]
if len(data[30:]) != d['nsnapshots']:
raise IDSUnpackError(
'Mismatching number of snapshots and given snapshots in config')
d['snapshot_files'] = [i[0] for i in data[30:]]
d['snapshot_times'] = num.array([i[1:] for i in data[30:]])
return d
def load_rectangular_source(
config,
lat,
lon,
lats,
lons,
depths,
slips,
strikes,
dips,
rakes,
xs,
ys,
patch_length,
patch_width,
nx,
ny,
mp_n_subfaults,
mp_vrup,
deltat,
patch_stfs,
patch_stf_times,
moments,
rundir,
*args, **kwargs):
patch_length, patch_width = patch_length[0], patch_width[0]
if nx * ny != mp_n_subfaults:
raise IDSUnpackError('Patch grid has different size than expected.')
source_length = nx * patch_length
source_width = ny * patch_width
nucleation_x = 2 * (num.abs(
xs.min() - 0.5 * patch_length) / source_length) - 1
nucleation_y = 2 * (num.abs(
ys.min() - 0.5 * patch_width) / source_width) - 1
# Distance to new source reference point (at fault top center)
anchor = 'top'
strike, dip = config['strike'], config['dip']
anch_x, anch_y = map_anchor[anchor]
dx = (anch_x - nucleation_x) * 0.5 * source_length # along strike shift
dy = (anch_y - nucleation_y) * 0.5 * source_width # downdip shift
anch_depth = config['hypocenter_depth'] + num.sin(d2r*dip) * dy
anch_northshift = num.cos(d2r*strike) * dx - \
num.sin(d2r*strike) * num.cos(d2r*dip) * dy
anch_eastshift = num.sin(d2r*strike) * dx + \
num.cos(d2r*strike) * num.cos(d2r*dip) * dy
anch_lat, anch_lon = pod.ne_to_latlon(
lat, lon, anch_northshift, anch_eastshift)
norths, easts = pod.latlon_to_ne_numpy(anch_lat, anch_lon, lats, lons)
patches = []
for ip in range(lats.shape[0]):
stf = EwricaIDSSTF(deltat=deltat, amplitudes=patch_stfs[:, ip])
stf_moment = stf.moment
time = None
if moments[ip] > 0.:
time = patch_stf_times[patch_stfs[:, ip] > 0.][0] + config['time']
# Based on maximum decimal digit of pt_stf file
atol = 2. * 10**(int(num.floor(num.log10(moments[ip]))) - 3)
try:
num.testing.assert_allclose(
moments[ip], stf_moment, atol=atol)
except AssertionError:
logger.warn(
'Summed patch source time function (moment={:e} Nm) '
'differs more than the maximum absolut tolerance of {:e} '
'Nm from the moment given in the slip model file ({:e} '
'Nm). The source time function is rescaled to slip model '
'moment.'.format(stf_moment, moments[ip], atol))
stf.amplitudes *= moments[ip] / stf_moment
patches.append(EwricaIDSPatch(
lat=anch_lat,
lon=anch_lon,
time=time,
depth=depths[ip],
north_shift=norths[ip],
east_shift=easts[ip],
x_coordinate=xs[ip] - dx,
y_coordinate=ys[ip] - dy,
length=patch_length,
width=patch_width,
slip=slips[ip],
strike=strikes[ip],
dip=dips[ip],
rake=rakes[ip],
anchor='center',
stf=stf))
source = EwricaIDSSource(
lat=anch_lat,
lon=anch_lon,
depth=anch_depth,
anchor=anchor,
length=source_length,
width=source_width,
time=config['time'],
strike=config['strike'],
dip=config['dip'],
rake=config['rake'],
nucleation_x=nucleation_x,
nucleation_y=nucleation_y,
patches=patches,
nx=nx,
ny=ny,
velocity=mp_vrup,
misfit_log=load_idslog_file(op.join(rundir, 'idslog_iteration.dat')))
return source
def load_curved_source(
config,
lats,
lons,
depths,
slips,
strikes,
dips,
rakes,
xs,
ys,
patch_length,
patch_width,
mp_vrup,
deltat,
patch_stfs,
patch_stf_times,
moments,
rundir,
*args, **kwargs):
anch_lat, anch_lon = pod.geographic_midpoint(lats, lons)
norths, easts = pod.latlon_to_ne_numpy(anch_lat, anch_lon, lats, lons)
patches = []
for ip in range(lats.shape[0]):
stf = EwricaIDSSTF(deltat=deltat, amplitudes=patch_stfs[:, ip])
stf_moment = stf.moment
time = None
if moments[ip] > 0.:
time = patch_stf_times[patch_stfs[:, ip] > 0.][0] + config['time']
# Based on maximum decimal digit of pt_stf file
atol = 2. * 10**(int(num.floor(num.log10(moments[ip]))) - 3)
try:
num.testing.assert_allclose(
moments[ip], stf_moment, atol=atol)
except AssertionError:
logger.warn(
'Summed patch source time function (moment={:e} Nm) '
'differs more than the maximum absolut tolerance of {:e} '
'Nm from the moment given in the slip model file ({:e} '
'Nm). The source time function is rescaled to slip model '
'moment.'.format(stf_moment, moments[ip], atol))
stf.amplitudes *= moments[ip] / stf_moment
patches.append(EwricaIDSPatch(
lat=anch_lat,
lon=anch_lon,
time=time,
depth=depths[ip],
north_shift=norths[ip],
east_shift=easts[ip],
x_coordinate=xs[ip],
y_coordinate=ys[ip],
length=patch_length[ip],
width=patch_width[ip],
slip=slips[ip],
strike=strikes[ip],
dip=dips[ip],
rake=rakes[ip],
anchor='center',
stf=stf))
source = EwricaIDSSource(
curved=True,
lat=anch_lat,
lon=anch_lon,
depth=0.,
anchor='center',
length=0.,
width=0.,
time=config['time'],
strike=None,
dip=None,
rake=None,
nucleation_x=None,
nucleation_y=None,
patches=patches,
nx=None,
ny=None,
velocity=mp_vrup,
misfit_log=load_idslog_file(op.join(rundir, 'idslog_iteration.dat')))
return source
def load_ids_source(config_fn):
'''Load all information/results from ids file and rund dir into source
:param config_fn: filename of the ids config file
:type config_fn: str
:returns: Ewrica IDS Source object with as mutch information converted as
possible
:rtype: :py:class:`ewrica.sources.EwricaIDSSource`
'''
config = load_ids_config_file(config_fn)
rundir = op.join(op.dirname(config_fn), config['rundir'])
lat = config['hypocenter_lat']
lon = config['hypocenter_lon']
slipmodel = load_slipmodel_file(op.join(rundir, config['slipmodel_file']))
lats = slipmodel[0]
lons = slipmodel[1]
depths = slipmodel[2]
xs = slipmodel[3]
ys = slipmodel[4]
lns = slipmodel[5]
wds = slipmodel[6]
strikes = slipmodel[7]
dips = slipmodel[8]
rakes = slipmodel[9]
slips = slipmodel[10]
moments = slipmodel[11]
patch_stf_times, patch_stfs = load_patch_stf_file(
op.join(rundir, config['ptstf_file']))
mp_moment, mp_mag, mp_n_subfaults, mp_clat, mp_clon, mp_cdepth, mp_vrup = \
load_earthquake_mp_file(op.join(rundir, config['eqmp_file']))
if (lats.shape[0] != patch_stfs.shape[1] or
lats.shape[0] != mp_n_subfaults):
raise IDSUnpackError(
'Mismatching number of patches between output files.')
deltat = num.unique(num.diff(patch_stf_times))
if deltat.shape[0] > 1:
deltat_mean = num.diff(patch_stf_times).mean()
try:
atol = 0.002 # Based on maximum decimal digit of pt_stf file
num.testing.assert_allclose(deltat_mean, deltat, atol=atol)
deltat = deltat_mean
logger.warn('patch source time function deltat is not uniform, '
'but within tolerance of {} s.'.format(atol))
except AssertionError:
raise IDSUnpackError('Sampling rate of the patch source time '
'function is not uniform.')
nx, ny = num.unique(xs).shape[0], num.unique(ys).shape[0]
kwargs_source = dict(
config=config,
lat=lat,
lon=lon,
lats=lats,
lons=lons,
depths=depths,
slips=slips,
strikes=strikes,
dips=dips,
rakes=rakes,
xs=xs,
ys=ys,
patch_length=lns,
patch_width=wds,
nx=nx,
ny=ny,
mp_n_subfaults=mp_n_subfaults,
mp_vrup=mp_vrup,
deltat=deltat,
patch_stfs=patch_stfs,
patch_stf_times=patch_stf_times,
moments=moments,
rundir=rundir)
curved = False
if (len(set(strikes)) > 1 or len(set(dips)) > 1 or len(set(rakes)) or
len(set(lns)) > 1 or len(set(wds)) > 1):
curved = True
if curved:
source = load_curved_source(**kwargs_source)
else:
kwargs_source.update(dict(
patch_length=num.unique(lns),
patch_width=num.unique(wds)))
source = load_rectangular_source(**kwargs_source)
# Based on maximum decimal digit of eq_mp file moment
atol = 2. * 10**(int(num.floor(num.log10(mp_moment))) - 3)
try:
num.testing.assert_allclose(source.moment, mp_moment, atol=atol)
except AssertionError:
raise IDSUnpackError(
'Cumulative source moment (moment={:e} Nm) differs more than the '
'maximum absolut tolerance of {:e} Nm from the moment given in '
'the eq_mp file ({:e} Nm). The source '
'time function is rescaled to slip model moment.'.format(
source.moment, mp_moment, atol))
times, moment_rates, moments = load_earthquake_stf_file(
op.join(rundir, config['eqstf_file']))
if times.shape[0] != source.stf.n_samples:
raise IDSUnpackError(
'Number of samples in EQ_STF and PT_STF files differs.')
# Based on maximum decimal digit of eq_stf file times
atol = 0.001
try:
num.testing.assert_allclose(times, source.stf.times, atol=atol)
except AssertionError:
raise IDSUnpackError('Sampling times of the patch and EQ_STF source '
'time function differ.')
# Based on maximum decimal digit of eq_stf file moments
stf_moment_rates = source.stf.amplitudes
stf_cum_moment = source.stf.cumulative_moment_function
for i in range(times.shape[0]):
if moment_rates[i] == 0.:
continue
atol = 1. * 10**(int(num.floor(num.log10(moment_rates[i]))) - 2)
try:
num.testing.assert_allclose(
moment_rates[i], stf_moment_rates[i], atol=atol)
except AssertionError:
raise IDSUnpackError(
'EQ STF and calculated source time function differ by more '
'than {:e} Nm/s at {} s'.format(atol, times[i]))
for i in range(times.shape[0] - 1):
if moments[i+1] == 0.:
continue
atol = 2. * 10**(int(num.floor(num.log10(moments[i+1]))) - 3)
try:
num.testing.assert_allclose(
moments[i+1], stf_cum_moment[i], atol=atol)
except AssertionError:
raise IDSUnpackError(
'EQ STF and calculated cumulative moment function differ by '
'more than {:e} Nm/s at {} s'.format(atol, times[i]))
return source
def load_wfmisfit_file(fn):
'''Load waveform variance misfit file
:param fn: filename of the WFMisfitvar.dat file
:type fn: str
:returns: station nsl codes, lats, lons, misfit variances of the velocity
in E, N and Z
:rtype: str, float, float, float, float, float
'''
_validate_wf_misfit_file(fn)
stations = num.genfromtxt(
fn,
unpack=True,
skip_header=1,
usecols=(0),
dtype=str)
lat, lon, mf_e, mf_n, mf_z = num.genfromtxt(
fn,
unpack=True,
skip_header=1,
usecols=(1, 2, 3, 4, 5))
return stations, lat, lon, mf_e, mf_n, mf_z
def load_obssyn_file(fn, config_fn):
'''Load one ObsSyn.dat file
:param fn: filename of the obssyn file
:type fn: str
:param config_fn: filename of the ids config file
:type config_fn: str
:returns: pyrocko trace
:rtype: :py:class:`pyrocko.trace.Trace`
'''
try:
_validate_obssyn_nearfield_file(fn)
nearfield = True
except IDSUnpackError:
_validate_obssyn_farfield_file(fn)
nearfield = False
config = load_ids_config_file(config_fn)
time_h, obs_e, obs_n, syn_e, syn_n, time_z, obs_z, syn_z = num.genfromtxt(
fn, unpack=True, skip_header=1)
try:
station = re.match(
r'(?P<sta>[\w\.]+)_ObsSyn.dat',
op.basename(fn)).group('sta')
except AttributeError:
raise IDSUnpackError('No dat file {} found.'.format(fn))
net, sta, loc = station.split('.')
if nearfield:
if (time_h != time_z).any():
raise IDSUnpackError(
'Time vectors need to be equal in {}'.format(fn))
time = config['time']
tmin_h = time + time_h[0]
tmin_z = time + time_z[0]
deltat = num.mean(num.diff(time_h))
stations = load_stationinfo_file(
fn=op.join(
op.dirname(config_fn),
config['wf_path'],
'StationInfo.dat'))
station = [s for s in stations
if s.nsl() == (net, sta, loc)][0]
def generate_traces(obs, syn, channel, tmin):
return WaveformResult(
lat=station.lat,
lon=station.lon,
north_shift=station.north_shift,
east_shift=station.east_shift,
depth=station.depth,
elevation=station.elevation,
observed=Trace(
tmin=tmin,
deltat=deltat,
ydata=obs,
network=net,
station=sta,
location=loc,
channel=channel),
synthetic=Trace(
tmin=tmin,
deltat=deltat,
ydata=syn,
network=net,
station=sta,
location=loc,
channel=channel),
nslc=(net, sta, loc, channel))
results = []
results.append(generate_traces(obs_e, syn_e, 'E', tmin_h))
results.append(generate_traces(obs_n, syn_n, 'N', tmin_h))
results.append(generate_traces(obs_z, syn_z, 'Z', tmin_z))
return WaveformResultList(results=results)
def load_obssyn(config_fn):
'''Load all ObsSyn.dat files and return them as traces
:param config_fn: filename of the ids config file
:type config_fn: str
:returns: list of pyrocko traces
:rtype: list of :py:class:`pyrocko.trace.Trace`
'''
config = load_ids_config_file(config_fn)
rundir = op.join(op.dirname(config_fn), config['rundir'])
wfmisfit_fn = op.join(rundir, config['wfvariance_file'])
stations_mf, _, _, mf_e, mf_n, mf_z = load_wfmisfit_file(wfmisfit_fn)
misfits = dict(
E=mf_e,
N=mf_n,
Z=mf_z)
results = []
for fn in os.listdir(op.join(rundir, config['o2sdir'])):
results.append(load_obssyn_file(
op.join(rundir, config['o2sdir'], fn), config_fn))
nsl_loaded = {
res.nslc[:3]: res.nslc[:3]
for res_list in results for res in res_list.results}
stations_all = load_stationinfo_file(
fn=op.join(
op.dirname(config_fn),
config['wf_path'],
'StationInfo.dat'))
stations_missing = [s for s in stations_all if s.nsl() not in nsl_loaded]
for station in stations_missing:
results.append(
WaveformResultList(
results=[WaveformResult(
lat=station.lat,
lon=station.lon,
north_shift=station.north_shift,
east_shift=station.east_shift,
depth=station.depth,
elevation=station.elevation,
observed=None,
synthetic=None,
nslc=(
station.network,
station.station,
station.location,
channel)) for channel in 'E N Z'.split()]))
waveform_result = WaveformResultList.primitive_merge(results)
for result in waveform_result.results:
idx = num.where(stations_mf == '.'.join(result.nslc[:3]))[0]
if idx.size != 0:
result.misfit = misfits[result.nslc[3]][num.asscalar(idx)]
return waveform_result
def load_obssyn_gnss(config_fn):
'''Load GNSS fit.dat file and return them as GNSScampaign
:param config_fn: filename of the ids config file
:type config_fn: str
:returns: GNSScampaign with observed data, GNSScampaign with synthetic data
:rtype: :py:class:`pyrocko.model.gnss.GNSSCampaign`,
:py:class:`pyrocko.model.gnss.GNSSCampaign`
'''
config = load_ids_config_file(config_fn)
rundir = op.join(op.dirname(config_fn), config['rundir'])
fn = op.join(rundir, config['gnssfit_file'])
_validate_obssyn_gnss_file(fn)
camp_obs = GNSSCampaign(name='observation')
camp_syn = GNSSCampaign(name='synthetic')
with open(fn) as f:
next(f)
for line in f:
row = [float(i) for i in line.strip().split()]
camp_obs.add_station(
GNSSStation(
code='',
lat=row[0],
lon=row[1],
north=GNSSComponent(
shift=row[3],
sigma=0.),
east=GNSSComponent(
shift=row[2],
sigma=0.),
up=GNSSComponent(
shift=row[4],
sigma=0.)))
camp_syn.add_station(
GNSSStation(
code='',
lat=row[0],
lon=row[1],
north=GNSSComponent(
shift=row[6],
sigma=0.),
east=GNSSComponent(
shift=row[5],
sigma=0.),
up=GNSSComponent(
shift=row[7],
sigma=0.)))
return camp_obs, camp_syn
def fdip(dipup, x, z):
'''
Calculate dip angle [deg] at the bottom edge of a curved fault.
It assumes that the dip varies linearly with depth.
Last modified: Potsdam, March, 2013, by R. Wang.
Translated to python: Potsdam, Feb. 2021, by M. Metz.
:param dipup: Dip at upper edge [deg].
:type dipup: float
:param x: Horizontal length of the fault in [m].
:type x:: float
:param z: Depth difference between bottom and top edge of the fault in [m].
:type z: float
:returns: Dip angle at the bottom edge in [deg].
:rtype: float
'''
imax = 10000
eps = 1e-6
if x <= 0.:
raise ValueError('x is not > 0.')
if z <= 0.:
raise ValueError('z is not > 0.')
di_0 = num.arctan(z / x)
di_1 = dipup * d2r
if num.abs(di_1 - di_0) <= eps:
fdip = di_1 / d2r
return fdip
if di_1 >= di_0:
# listric fault
di_l = 0.
di_r = di_0
fl = 1.e6
fr = di_r - di_1 - (z / x) * num.log(num.sin(di_r) / num.sin(di_1))
else:
# opposite listric fault
di_l = di_0
di_r = 2. * num.arctan(1.)
fl = di_l - di_1 - (z / x) * num.log(num.sin(di_l) / num.sin(di_1))
fr = di_r - di_1 - (z / x) * num.log(num.sin(di_r) / num.sin(di_1))
if fl * fr > 0.:
raise ValueError('Error in fdip: a problem appears.')
dim = 0.5 * (di_l + di_r)
fm = dim - di_1 - (z / x) * num.log(num.sin(dim) / num.sin(di_1))
i = 0
while num.abs(fm) > eps and i < imax:
i = i + 1
if fm * fl <= 0.:
di_r = dim
fr = fm
else:
di_l = dim
fl = fm
dim = 0.5 * (di_l + di_r)
fm = dim - di_1 - (z / x) * num.log(num.sin(dim) / num.sin(di_1))
fdip = dim / d2r
return fdip
def reweft(
n_wefts,
ndgrid,
top_lat,
top_lon,
bottom_lat,
bottom_lon,
dip,
patch_size,
delta_depth):
'''
Recalculate number of wefts for faults.
Last modified: Potsdam, March, 2013, by R. Wang.
Translated to python: Potsdam, Feb. 2021, by M. Metz.
:param n_wefts: Number of wefts.
:type n_wefts: int
:param ndgrid: If set to 1, possible uniform patch size, else possibly
uniform number of patches downdip.
:type ndgrid: float
:param top_lat: Top edge node latitudes in [deg].
:type top_lat: :py:class:`numpy.ndarray`
:param top_lon: Top edge node longitudes in [deg].
:type top_lon: :py:class:`numpy.ndarray`
:param bottom_lat: Bottom edge node latitudes in [deg].
:type bottom_lat: :py:class:`numpy.ndarray`
:param bottom_lon: Bottom edge node longitudes in [deg].
:type bottom_lon: :py:class:`numpy.ndarray`
:param dip: Dip angles of the wefts in [deg].
:type dip: :py:class:`numpy.ndarray`
:param patch_size: Wished patch size in [m].
:type patch_size: float
:param delta_depth: Depth difference between bottom and top edge in [m].
:type delta_depth: float
:returns: Number of wefts, ND grid, top edge latitudes in [deg], top edge
longitudes in [deg], bottom edge latitudes in [deg], bottom edge
longitudes in [deg], dip angles in [deg].
:rtype: int, float, :py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`,
:py:class:`numpy.ndarray`
'''
n = n_wefts
nd = ndgrid
lat1 = num.copy(top_lat)
lon1 = num.copy(top_lon)
lat2 = num.copy(bottom_lat)
lon2 = num.copy(bottom_lon)
step = patch_size
z = delta_depth
delta = 0.1 * step
k = 0
x1 = [lat1[0]]
y1 = [lon1[0]]
x2 = [lat2[0]]
y2 = [lon2[0]]
d = [dip[0]]
lenk = [0.]
for i in range(1, n):
x, y = disazi(lat1[i - 1], lon1[i - 1], lat1[i], lon1[i])
ln = num.sqrt(x**2 + y**2)
nl = 1 + int(ln / delta)
for il in range(nl):
k = k + 1
x1.append(lat1[i - 1] + (lat1[i] - lat1[i - 1]) * (il + 1.) / nl)
y1.append(lon1[i - 1] + (lon1[i] - lon1[i - 1]) * (il + 1.) / nl)
x2.append(lat2[i - 1] + (lat2[i] - lat2[i - 1]) * (il + 1.) / nl)
y2.append(lon2[i - 1] + (lon2[i] - lon2[i - 1]) * (il + 1.) / nl)
d.append(dip[i - 1] + (dip[i] - dip[i - 1]) * (il + 1.) / nl)
lenk.append(lenk[k - 1] + ln / nl)
n = 2 + int(lenk[-1] / step)
delta = lenk[-1] / float(n - 1)
lat1 = num.ones(n) * lat1[0]
lon1 = num.ones(n) * lon1[0]
lat2 = num.ones(n) * lat2[0]
lon2 = num.ones(n) * lon2[0]
dip = num.ones(n) * dip[0]
for i in range(1, n - 1):
for j in range(1, k):
if lenk[j] >= i * delta:
lat1[i] = x1[j]
lon1[i] = y1[j]
lat2[i] = x2[j]
lon2[i] = y2[j]
dip[i] = d[j]
break
lat1[-1] = x1[k]
lon1[-1] = y1[k]
lat2[-1] = x2[k]
lon2[-1] = y2[k]
dip[-1] = d[k]
if nd == 1:
widmax = 0.
for i in range(n):
x, y = disazi(lat1[i], lon1[i], lat2[i], lon2[i])
x = num.sqrt(x**2 + y**2)
diptop = dip[i]
dipbtm = fdip(dip[i], x, z)
wid = (z / ((dipbtm - diptop) * d2r)) * \
num.log(
num.tan(0.5 * dipbtm * d2r) /
num.tan(0.5 * diptop * d2r))
widmax = num.nanmax((widmax, wid)) # TODO check NAN case
nd = 1 + int(widmax / step)
else:
nd = 0
return n, nd, lat1, lon1, lat2, lon2, dip
def disazi(lat1, lon1, lat2, lon2):
rearth = earthradius
lateq = lat1
loneq = lon1
latst = lat2
lonst = lon2
latb = lateq * d2r
lonb = loneq * d2r
latc = latst * d2r
lonc = lonst * d2r
if lonb < 0.:
lonb = lonb + num.pi * 2.
if lonc < 0.:
lonc = lonc + num.pi * 2.
b = 0.5 * num.pi - latb
c = 0.5 * num.pi - latc
if lonc > lonb:
aa = lonc - lonb
if aa <= num.pi:
iangle = 1
else:
aa = num.pi - aa
iangle = -1
else:
aa = lonb - lonc
if aa <= num.pi:
iangle = -1
else:
aa = num.pi * 2. - aa
iangle = 1
s = num.cos(b) * num.cos(c) + num.sin(b) * num.sin(c) * num.cos(aa)
a = num.arccos(num.sign(s) * num.min([num.abs(s), 1.]))
dis = a * rearth
if a * b * c == 0.:
angleb = 0.
anglec = 0.
else:
s = 0.5 * (a + b + c)
a = num.min([a, s])
b = num.min([b, s])
c = num.min([c, s])
anglec = 2. * num.arcsin(
num.min([
1., num.sqrt(
num.sin(s - a) * num.sin(s - b) / (num.sin(a) * num.sin(b))
)]))
angleb = 2. * num.arcsin(
num.min([
1., num.sqrt(
num.sin(s-a)*num.sin(s-c)/(num.sin(a)*num.sin(c)))]))
if iangle == 1:
angleb = num.pi * 2. - angleb
else:
anglec = num.pi * 2. - anglec
# cartesian coordinates of the sation
xnorth = dis * num.cos(anglec)
yeast = dis * num.sin(anglec)
return xnorth, yeast
class FaultSegment(Object):
top_depth = Float.T(
default=0.,
help='top edge depth in [m]')
top_lat = Array.T(
default=num.array([]),
shape=(None,),
dtype=num.float,
serialize_as='list')
top_lon = Array.T(
default=num.array([]),
shape=(None,),
dtype=num.float,
serialize_as='list')
bottom_depth = Float.T(
default=0.,
help='bottom edge depth in [m]')
bottom_lat = Array.T(
default=num.array([]),
shape=(None,),
dtype=num.float,
serialize_as='list')
bottom_lon = Array.T(
default=num.array([]),
shape=(None,),
dtype=num.float,
serialize_as='list')
patch_size = Float.T(
default=10000.,
help='approximate patch size in [m]')
dip__ = Array.T(
default=num.array([]),
shape=(None,),
dtype=num.float,
serialize_as='list')
rake = Float.T(
default=0.,
help='rake on the subfaults')
n_wefts__ = Int.T(
optional=True,
help='number (2 <= nwft <= nwft_max) of reference wefts defining the '
'curved fault surface, which are not necessarily equidistant but '
'approximately perpendicular to and arranged along the strike.')
gridding = StringChoice.T(
default='uniform_size',
choices=('uniform_size', 'uniform_number'),
help='selection of grid mesh rule: "uniform_size" - possibly uniform '
'patch size, "uniform_number" - uniform number of patches along '
'dip')
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self._ndgrid = None
def check(self):
unique_shapes = set([
self.top_lat.shape,
self.top_lon.shape,
self.bottom_lat.shape,
self.bottom_lon.shape])
if len([sh for sh in unique_shapes]) > 1:
raise ValueError(
'Top and bottom coordinates do not have identical shape')
if self.dip.shape[0] > 1 and self.dip.shape != self.top_lat.shape:
raise ValueError(
'Dips and coordinates do not have identical shape')
if self.top_depth > self.bottom_depth:
raise ValueError(
'Top depth larger than bottom depth')
return True
@property
def ndgrid(self):
if self._ndgrid is None:
self._ndgrid = {
'uniform_size': 1, 'uniform_number': 0}[self.gridding]
return self._ndgrid
@ndgrid.setter
def ndgrid(self, ndgrid):
self._ndgrid = ndgrid
@property
def npoints(self):
if self.check():
return self.top_lat.shape[0]
@property
def n_wefts(self):
n_wefts = self.n_wefts__
if n_wefts is None:
n_wefts = self.npoints
if n_wefts < 2 or n_wefts > self.npoints:
raise ValueError(
'Invalid number of wefts: {}. Should be between 2 and '
'{}.'.format(n_wefts, self.npoints))
return n_wefts
@n_wefts.setter
def n_wefts(self, n_wefts):
self.n_wefts__ = n_wefts
@property
def dip(self):
return self.dip__
@dip.setter
def dip(self, dip):
if isinstance(dip, float) or isinstance(dip, list):
dip = num.array([dip])
self.dip__ = dip
def reweft(self):
new_data = reweft(
self.n_wefts,
self.ndgrid,
self.top_lat,
self.top_lon,
self.bottom_lat,
self.bottom_lon,
num.ones(self.npoints) * self.dip,
self.patch_size,
self.bottom_depth - self.top_depth)
self.n_wefts = new_data[0]
self.ndgrid = new_data[1]
self.top_lat = new_data[2]
self.top_lon = new_data[3]
self.bottom_lat = new_data[4]
self.bottom_lon = new_data[5]
self.dip = new_data[6]
class Fault(Object):
segments = List.T(
FaultSegment.T(),
default=[],
help='List of indipendend fault segments')
@property
def n_subfaults(self):
return len(self.segments)
class DiscretizedPatch(Object):
lat = Float.T(
default=0.,
help='center latitude in [degree]')
lon = Float.T(
default=0.,
help='center longitude in [degree]')
depth = Float.T(
default=0.,
help='center depth in [km]')
strike = Float.T(
default=0.,
help='strike in [degree]')
dip = Float.T(
default=0.,
help='dip in [degree]')
rake = Float.T(
default=0.,
help='rake in [degree]')
length = Float.T(
default=0.,
help='length (along strike) in [km]')
width = Float.T(
default=0.,
help='width (downdip) in [km]')
header_patch_file = [
'lat[deg]',
'lon[deg]',
'depth[km]',
'length[km]',
'width[km]',
'strike[deg]',
'dip[deg]',
'rake[deg]']
class DiscretizedFault(Object):
patches = List.T(
DiscretizedPatch.T(),
default=[],
help='list of discretized patches')
@property
def n_patches(self):
return len(self.patches)
def dump_ids(self, filename=None):
fmt_hd = ''.join(['{:>12s}' for i in range(len(header_patch_file))])
fmt_ln = ''.join(['{:>12.4f}' for i in range(len(header_patch_file))])
s = [fmt_hd.format(*header_patch_file)]
s += [
fmt_ln.format(
p.lat,
p.lon,
p.depth / km,
p.length / km,
p.width / km,
p.strike,
p.dip,
p.rake)
for p in self.patches]
s = '\n'.join(s)
if filename is not None:
with open(filename, 'w') as f:
f.write(s)
else:
return s
def discretize_curved_fault(fault):
'''
Translate fault attributes to sdm2013 convention
Based on SDM2013 code:
Last modified: Potsdam, Oct, 2008, by R. Wang
Converted to python: Potsdam, Feb, 2021, by M. Metz
'''
ns = fault.n_subfaults
for s in fault.segments:
s.reweft()
topdep = [s.top_depth for s in fault.segments]
btmdep = [s.bottom_depth for s in fault.segments]
patchsize = [s.patch_size for s in fault.segments]
nwft = [s.n_wefts for s in fault.segments]
ndgrid = [s.ndgrid for s in fault.segments]
nps1 = num.zeros(ns, dtype=num.int64)
nps2 = num.zeros(ns, dtype=num.int64)
nps1[0] = 1
nps2[0] = 0
nps = -1 # CHECK
plat = []
plon = []
pz = []
dlen = []
dwid = []
parea = []
strike = []
dip = []
rake = []
for isrc in range(ns):
if isrc > 1:
nps1[isrc] = nps2[isrc - 1] + 1
nps2[isrc] = nps1[isrc] - 1
for iwft in range(nwft[isrc] - 1):
ditop = num.zeros(2)
xptop = num.zeros(2)
yptop = num.zeros(2)
xpbtm = num.zeros(2)
ypbtm = num.zeros(2)
lenp = num.zeros(2)
widp = num.zeros(2)
depp = num.zeros(2)
dibtm = num.zeros(2)
xpbtm = num.zeros(2)
ypbtm = num.zeros(2)
seg = fault.segments[isrc]
toplat = seg.top_lat
toplon = seg.top_lon
btmlat = seg.bottom_lat
btmlon = seg.bottom_lon
topdip = num.ones((seg.npoints, 1)) * seg.dip \
if seg.dip.shape[0] == 1 else seg.dip[:, num.newaxis]
lat0 = toplat[iwft]
lon0 = toplon[iwft]
if topdip[iwft] > 90. or topdip[iwft + 1] > 90.:
ditop[0] = (180. - topdip[iwft]) * d2r
ditop[1] = (180. - topdip[iwft + 1]) * d2r
bigdip = True
else:
ditop[0] = topdip[iwft] * d2r
ditop[1] = topdip[iwft + 1] * d2r
bigdip = False
xptop[0] = 0.
yptop[0] = 0.
xptop[1], yptop[1] = disazi(
lat0, lon0,
toplat[iwft + 1], toplon[iwft + 1])
xpbtm[0], ypbtm[0] = disazi(
lat0, lon0,
btmlat[iwft], btmlon[iwft])
xpbtm[1], ypbtm[1] = disazi(
lat0, lon0,
btmlat[iwft + 1], btmlon[iwft + 1])
for i in range(2):
lenp[i] = num.sqrt(
(xpbtm[i] - xptop[i])**2 +
(ypbtm[i] - yptop[i])**2)
depp[i] = btmdep[isrc] - topdep[isrc]
dibtm[i] = fdip(ditop[i] / d2r, lenp[i], depp[i]) * d2r
# TODO Potential pitfall
# (workaround for equal dipvalues not fully tested!)
if dibtm[i] == ditop[i]:
dibtm[i] += 1e-10 * ditop[i]
widp[i] = (
depp[i] / (dibtm[i] - ditop[i])) * \
num.log(num.tan(0.5 * dibtm[i]) / num.tan(0.5 * ditop[i]))
if ndgrid[isrc] <= 0:
nwid = 2 + int(num.max([widp[0], widp[1]]) / patchsize[isrc])
else:
nwid = 1 + ndgrid[isrc]
x = num.zeros((2, nwid))
y = num.zeros((2, nwid))
z = num.zeros((2, nwid))
for i in range(2):
dw = widp[i] / float(nwid - 1)
for iw in range(nwid):
wid = iw * dw
zp = \
(2. * num.arctan(
num.exp(wid * (dibtm[i] - ditop[i]) / depp[i]) *
num.tan(0.5 * ditop[i])) - ditop[i]
) * depp[i] / (dibtm[i] - ditop[i])
lens = num.log(
num.sin(
ditop[i] + zp * (dibtm[i] - ditop[i]) / depp[i]) /
num.sin(ditop[i])) * depp[i] / (dibtm[i] - ditop[i])
# print(dibtm[i] - ditop[i])
x[i, iw] = xptop[i] + (
xpbtm[i] - xptop[i]) * lens / lenp[i]
y[i, iw] = yptop[i] + (
ypbtm[i] - yptop[i]) * lens / lenp[i]
z[i, iw] = topdep[isrc] + zp
# determine patch parameters
for iw in range(1, nwid):
nps += 1
nps2[isrc] += 1
pn = 0.25 * (x[1, iw] + x[0, iw] + x[1, iw - 1] + x[0, iw - 1])
pe = 0.25 * (y[1, iw] + y[0, iw] + y[1, iw - 1] + y[0, iw - 1])
pz.append(0.25 * (
z[1, iw] + z[0, iw] + z[1, iw - 1] + z[0, iw - 1]))
strike.append(0.5 * (
num.arctan2(
y[1, iw - 1] - y[0, iw - 1],
x[1, iw - 1] - x[0, iw - 1]) +
num.arctan2(
y[1, iw] - y[0, iw],
x[1, iw] - x[0, iw])) / d2r)
if strike[nps] < 0.:
strike[nps] = strike[nps] + 360.
# determine two diagonal vectors
dx1 = x[1, iw - 1] - x[0, iw]
dy1 = y[1, iw - 1] - y[0, iw]
dz1 = z[1, iw - 1] - z[0, iw]
dx2 = x[1, iw] - x[0, iw - 1]
dy2 = y[1, iw] - y[0, iw - 1]
dz2 = z[1, iw] - z[0, iw - 1]
# calculate cross product of the two diagonal vectors
# amplitude = 2 times the area
dnx = dy1 * dz2 - dy2 * dz1
dny = dx2 * dz1 - dx1 * dz2
dnz = dx1 * dy2 - dx2 * dy1
parea.append(0.5 * num.sqrt(dnx**2 + dny**2 + dnz**2))
dip.append(num.arccos(0.5 * dnz / parea[nps]) / d2r)
if bigdip:
dip[nps] = 180. - dip[nps]
# rake.append(fault.segments[isrc].rake)
rake.append(seg.rake)
dlen.append(0.5 * (
num.sqrt(
(x[1, iw] - x[0, iw])**2 +
(y[1, iw] - y[0, iw])**2) +
num.sqrt(
(x[1, iw - 1] - x[0, iw - 1])**2 +
(y[1, iw - 1] - y[0, iw - 1])**2)))
dwid.append(parea[nps] / dlen[nps])
# convert to geographic coordinates
#
# spherical triangle:
# A = pole, B = source position, C = reference position
sma = num.sqrt(pn**2 + pe**2) / earthradius
smb = 0.5 * num.pi - lat0 * d2r
bgc = num.arctan2(pe, pn)
smc = num.arccos(
num.cos(sma) * num.cos(smb) +
num.sin(sma) * num.sin(smb) * num.cos(bgc))
bga = num.arcsin(num.sin(sma) * num.sin(bgc) / num.sin(smc))
# geographic coordinate of the equivalent point source
plat.append(90. - smc / d2r)
plon.append((lon0 + bga / d2r) % 360.)
di = dip[nps] * d2r
# upper patch edge exceeds the surface
if (pz[nps] - 0.5 * dwid[nps] * num.sin(di)) <= 0.:
pz[nps] = 0.5001 * dwid[nps] * num.sin(di)
plon = [lo if (lo % 360) <= 180. else ((lo % 360.) - 360.) for lo in plon]
strike = [s if (s % 360) <= 180. else ((s % 360.) - 360.) for s in strike]
rake = [r if (r % 360) <= 180. else ((r % 360.) - 360.) for r in rake]
return DiscretizedFault(
patches=[
DiscretizedPatch(
lat=plat[i], lon=plon[i],
depth=pz[i], length=dlen[i], width=dwid[i],
strike=strike[i],
dip=dip[i],
rake=rake[i])
for i in range(nps + 1)])