Skip to content

Commit

Permalink
Merge pull request #563 from SasView/pr-simplified-Iq-interface
Browse files Browse the repository at this point in the history
Simplified interface for computing I(q,dq)
  • Loading branch information
caitwolf authored Jun 27, 2023
2 parents 460b026 + 67770f9 commit 366f9b3
Show file tree
Hide file tree
Showing 4 changed files with 201 additions and 70 deletions.
18 changes: 11 additions & 7 deletions sasmodels/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,9 @@ class Source:
class Sample:
...

def _as_numpy(data):
return None if data is None else np.asarray(data)

class Data1D(object):
"""
1D data object.
Expand Down Expand Up @@ -163,11 +166,12 @@ class Data1D(object):
"""
def __init__(self, x=None, y=None, dx=None, dy=None):
# type: (OptArray, OptArray, OptArray, OptArray) -> None
self.x, self.y, self.dx, self.dy = x, y, dx, dy
self.x, self.dx = _as_numpy(x), _as_numpy(dx)
self.y, self.dy = _as_numpy(y), _as_numpy(dy)
self.dxl = None
self.filename = None
self.qmin = x.min() if x is not None else np.NaN
self.qmax = x.max() if x is not None else np.NaN
self.qmin = self.x.min() if self.x is not None else np.NaN
self.qmax = self.x.max() if self.x is not None else np.NaN
# TODO: why is 1D mask False and 2D mask True?
self.mask = (np.isnan(y) if y is not None
else np.zeros_like(x, 'b') if x is not None
Expand Down Expand Up @@ -240,13 +244,13 @@ class Data2D(object):
"""
def __init__(self, x=None, y=None, z=None, dx=None, dy=None, dz=None):
# type: (OptArray, OptArray, OptArray, OptArray, OptArray, OptArray) -> None
self.qx_data, self.dqx_data = x, dx
self.qy_data, self.dqy_data = y, dy
self.data, self.err_data = z, dz
self.qx_data, self.dqx_data = _as_numpy(x), _as_numpy(dx)
self.qy_data, self.dqy_data = _as_numpy(y), _as_numpy(dy)
self.data, self.err_data = _as_numpy(z), _as_numpy(dz)
self.mask = (np.isnan(z) if z is not None
else np.zeros_like(x, dtype='bool') if x is not None
else None)
self.q_data = np.sqrt(x**2 + y**2)
self.q_data = np.sqrt(self.qx_data**2 + self.qy_data**2)
self.qmin = 1e-16
self.qmax = np.inf
self.detector = []
Expand Down
189 changes: 143 additions & 46 deletions sasmodels/direct_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,12 +118,15 @@ def get_mesh(model_info, values, dim='1d', mono=False):
active = lambda name: True

#print("in get_mesh: pars:",[p.id for p in parameters.call_parameters])
mesh = [_get_par_weights(p, values, active(p.name))
values = values.copy()
mesh = [_pop_par_weights(p, values, active(p.name))
for p in parameters.call_parameters]
if values:
raise TypeError(f"Unused parameters in call: {', '.join(values.keys())}")
return mesh


def _get_par_weights(parameter, values, active=True):
def _pop_par_weights(parameter, values, active=True):
# type: (Parameter, Dict[str, float], bool) -> Tuple[float, np.ndarray, np.ndarray]
"""
Generate the distribution for parameter *name* given the parameter values
Expand All @@ -132,34 +135,27 @@ def _get_par_weights(parameter, values, active=True):
Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma"
from the *pars* dictionary for parameter value and parameter dispersion.
"""
value = float(values.get(parameter.name, parameter.default))
npts = values.get(parameter.name+'_pd_n', 0)
width = values.get(parameter.name+'_pd', 0.0)
relative = parameter.relative_pd
if npts == 0 or width == 0.0 or not active:
# Note: orientation parameters have the viewing angle as the parameter
# value and the jitter in the distribution, so be sure to set the
# empty pd for orientation parameters to 0.
pd = [value if relative or not parameter.polydisperse else 0.0], [1.0]
value = float(values.pop(parameter.name, parameter.default))
if parameter.polydisperse:
npts = values.pop(parameter.name+'_pd_n', 0)
width = values.pop(parameter.name+'_pd', 0.0)
nsigma = values.pop(parameter.name+'_pd_nsigma', 3.0)
distribution = values.pop(parameter.name+'_pd_type', 'gaussian')
relative = parameter.relative_pd
if npts == 0 or width == 0.0 or not active:
# Note: orientation parameters have the viewing angle as the parameter
# value and the jitter in the distribution, so be sure to set the
# empty pd for orientation parameters to 0.
pd = [value if relative else 0.0], [1.0]
else:
limits = parameter.limits
pd = weights.get_weights(distribution, npts, width, nsigma,
value, limits, relative)
else:
limits = parameter.limits
disperser = values.get(parameter.name+'_pd_type', 'gaussian')
nsigma = values.get(parameter.name+'_pd_nsigma', 3.0)
pd = weights.get_weights(disperser, npts, width, nsigma,
value, limits, relative)
pd = [value], [1.0]
return value, pd[0], pd[1]


def _vol_pars(model_info, values):
# type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray]
vol_pars = [_get_par_weights(p, values)
for p in model_info.parameters.call_parameters
if p.type == 'volume']
#import pylab; pylab.plot(vol_pars[0][0],vol_pars[0][1]); pylab.show()
dispersity, weight = dispersion_mesh(model_info, vol_pars)
return dispersity, weight


def _make_sesans_transform(data):
# Pre-compute the Hankel matrix (H)
SElength, SEunits = data.x, data._xunit
Expand Down Expand Up @@ -260,10 +256,11 @@ def _interpret_data(self, data: Data, model: KernelModel) -> None:
else:
res = resolution.Perfect1D(q)
elif (getattr(data, 'dxl', None) is not None
and getattr(data, 'dxw', None) is not None):
res = resolution.Slit1D(data.x[index],
qx_width=data.dxl[index],
qy_width=data.dxw[index])
or getattr(data, 'dxw', None) is not None):
res = resolution.Slit1D(
data.x[index],
q_length=None if data.dxl is None else data.dxl[index],
q_width=None if data.dxw is None else data.dxw[index])
else:
res = resolution.Perfect1D(data.x[index])
elif self.data_type == 'Iq-oriented':
Expand All @@ -278,9 +275,10 @@ def _interpret_data(self, data: Data, model: KernelModel) -> None:
or getattr(data, 'dxw', None) is None):
raise ValueError("oriented sample with 1D data needs slit resolution")

res = resolution2d.Slit2D(data.x[index],
qx_width=data.dxw[index],
qy_width=data.dxl[index])
res = resolution2d.Slit2D(
data.x[index],
qx_width=data.dxw[index],
qy_width=data.dxl[index])
else:
raise ValueError("Unknown data type") # never gets here

Expand Down Expand Up @@ -456,43 +454,142 @@ def test_reparameterize():
except Exception:
pass

def _direct_calculate(model, data, pars):
from .core import load_model_info, build_model
model_info = load_model_info(model)
kernel = build_model(model_info)
calculator = DirectModel(data, kernel)
return calculator(**pars)

def Iq(model, q, dq=None, ql=None, qw=None, **pars):
"""
Compute I(q) for *model*. Resolution is *dq* for pinhole or *ql* and *qw*
for slit geometry. Use 0 or None for infinite slits.
Model is the name of a builtin or custom model, or a model expression, such
as sphere+sphere for a mixture of spheres of different radii, or
sphere@hardsphere for concentrated solutions where the dilute approximation
no longer applies.
Use additional keywords for model parameters, tagged with *_pd*, *_pd_n*,
*_pd_nsigma*, *_pd_type* to set polydispersity parameters, or *_M0*,
*_mphi*, *_mtheta* for magnetic parameters.
This is not intended for use when the same I(q) is evaluated many times
with different parameter values. For that you should set up the model
with `model = build_model(load_model_info(model_name))`, set up a data
object to define q values and resolution, then use
`calculator = DirectModel(data, model)` to set up a calculator, or
`problem = bumps.FitProblem(sasmodels.bumps_model.Experiment(data, model))`
to define a fit problem for uses with the bumps optimizer. Data can be
loaded using the `sasdata` package, or use one of the empty data generators
from `sasmodels.data`.
Models are cached. Custom models will not be reloaded even if the
underlying files have changed. If you are using this in a long running
application then you will need to call
`sasmodels.direct_model._model_cache.clear()` to reset the cache and force
custom model reload.
"""
from .data import Data1D, _as_numpy
data = Data1D(x=q, dx=dq)
def broadcast(v):
return (
None if v is None
else np.full(len(q), v) if np.isscalar(v)
else _as_numpy(v))
data.dxl, data.dxw = broadcast(ql), broadcast(qw)
return _direct_calculate(model, data, pars)

def Iqxy(model, qx, qy, dqx=None, dqy=None, **pars):
"""
Compute I(qx, qy) for *model*. Resolution is *dqx* and *dqy*.
See :func:`Iq` for details on model and parameters.
"""
from .data import Data2D
data = Data2D(x=qx, y=qy, dx=dqx, dy=dqy)
return _direct_calculate(model, data, pars)

def Gxi(model, xi, **pars):
"""
Compute SESANS correlation G' = G(xi) - G(0) for *model*.
See :func:`Iq` for details on model and parameters.
"""
from .data import empty_sesans
data = empty_sesans(z=xi)
return _direct_calculate(model, data, pars)

def main():
# type: () -> None
"""
Program to evaluate a particular model at a set of q values.
"""
import sys
from .data import empty_data1D, empty_data2D
from .core import load_model_info, build_model

if len(sys.argv) < 3:
print("usage: python -m sasmodels.direct_model modelname (q|qx,qy) par=val ...")
sys.exit(1)
model_name = sys.argv[1]
model = sys.argv[1]
call = sys.argv[2].upper()
pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
for pair in sys.argv[3:]
for k, v in [pair.split('=')])
try:
values = [float(v) for v in call.split(',')]
except ValueError:
values = []
if len(values) == 1:
q, = values
data = empty_data1D([q])
dq = dqw = dql = None
#dq = [q*0.05] # 5% pinhole resolution
#dqw, dql = [q*0.05], [1.0] # 5% horizontal slit resolution
print(Iq(model, [q], dq=dq, qw=dqw, ql=dql, **pars)[0])
#print(Gxi(model, [q], **pars)[0])
elif len(values) == 2:
qx, qy = values
data = empty_data2D([qx], [qy])
dq = None
#dq = [0.005] # 5% pinhole resolution at q = 0.1
print(Iqxy(model, [qx], [qy], dqx=dq, dqy=dq, **pars)[0])
else:
print("use q or qx,qy")
sys.exit(1)

model_info = load_model_info(model_name)
model = build_model(model_info)
calculator = DirectModel(data, model)
pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
for pair in sys.argv[3:]
for k, v in [pair.split('=')])
Iq = calculator(**pars)
print(Iq[0])
def test_simple_interface():
def near(value, target):
"""Close enough in single precision"""
#print(f"value: {value}, target: {target}")
return np.allclose(value, target, rtol=1e-6, atol=0, equal_nan=True)
# Note: target values taken from running main() on parameters.
# Resolution was 5% dq/q.
pars = dict(radius=200)
# simple sphere in 1D (perfect, pinhole, slit)
assert near(Iq('sphere', [0.1], **pars), [0.6200146273894904])
assert near(Iq('sphere', [0.1], dq=[0.005], **pars), [2.3019224683980215])
assert near(Iq('sphere', [0.1], qw=[0.005], ql=[1.0], **pars), [0.3673431784535172])
# simple sphere in 2D (perfect, pinhole)
assert near(Iqxy('sphere', [0.1], [0.1], **pars), [1.1781532874802199])
assert near(Iqxy('sphere', [0.1], [0.1], dqx=[0.005], dqy=[0.005], **pars),
[0.8177780778578667])
# sesans
assert near(Gxi('sphere', [100], **pars), [-0.19146959126623486])
# Check that single point sesans matches value in an array
xi = np.logspace(1, 3, 100)
y = Gxi('sphere', xi, **pars)
for k in (0, len(xi)//5, len(xi)//2, len(xi)-1):
ysingle = Gxi('sphere', [xi[k]], **pars)[0]
print(f"SESANS point check {k}: xi={xi[k]:.1f} single={ysingle:.4f} vector={y[k]:.4f}")
assert abs((ysingle-y[k])/y[k]) < 0.1, "SESANS point value not matching vector value within 10%"
# magnetic 2D
pars = dict(radius=200, sld_M0=3, sld_mtheta=30)
assert near(Iqxy('sphere', [0.1], [0.1], **pars), [1.5577852226925908])
# polydisperse 1D
pars = dict(
radius=200, radius_pd=0.1, radius_pd_n=15, radius_pd_nsigma=2.5,
radius_pd_type="uniform")
assert near(Iq('sphere', [0.1], **pars), [2.703169824954617])

if __name__ == "__main__":
import logging
logging.disable(logging.ERROR)
main()
#test_simple_interface()
40 changes: 27 additions & 13 deletions sasmodels/resolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,34 +124,41 @@ class Slit1D(Resolution):
"""

def __init__(self, q, q_length, q_width=0., q_calc=None):
def __init__(self, q, q_length=None, q_width=None, q_calc=None):
# Remember what width/dqy was used even though we won't need them
# after the weight matrix is constructed
self.q_length, self.q_width = q_length, q_width

# Allow independent resolution on each point even though it is not
# needed in practice.
if np.isscalar(q_width):
if q_width is None:
q_width = np.zeros(len(q))
elif np.isscalar(q_width):
q_width = np.ones(len(q))*q_width
else:
q_width = np.asarray(q_width)
if np.isscalar(q_length):
if q_length is None:
q_length = np.zeros(len(q))
elif np.isscalar(q_length):
q_length = np.ones(len(q))*q_length
else:
q_length = np.asarray(q_length)

self.q = q.flatten()
self.q_calc = slit_extend_q(q, q_width, q_length) \
self.q_calc = (
slit_extend_q(q, q_width, q_length)
if q_calc is None else np.sort(q_calc)
)

# Protect against models which are not defined for very low q. Limit
# the smallest q value evaluated (in absolute) to 0.02*min
cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q)
self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff]

# Build weight matrix from calculated q values
self.weight_matrix = \
self.weight_matrix = (
slit_resolution(self.q_calc, self.q, q_length, q_width)
)
self.q_calc = abs(self.q_calc)

def apply(self, theory):
Expand Down Expand Up @@ -451,12 +458,14 @@ def linear_extrapolation(q, q_min, q_max):
"""
q = np.sort(q)
if q_min + 2*MINIMUM_RESOLUTION < q[0]:
n_low = int(np.ceil((q[0]-q_min) / (q[1]-q[0]))) if q[1] > q[0] else 15
delta = q[1] - q[0] if len(q) > 1 else 0
n_low = int(np.ceil((q[0]-q_min) / delta)) if delta > 0 else 15
q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
else:
q_low = []
if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
n_high = int(np.ceil((q_max-q[-1]) / (q[-1]-q[-2]))) if q[-1] > q[-2] else 15
delta = q[-1] - q[-2] if len(q) > 1 else 0
n_high = int(np.ceil((q_max-q[-1]) / delta)) if delta > 0 else 15
q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
else:
q_high = []
Expand Down Expand Up @@ -496,21 +505,26 @@ def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
/ (\log q_n - \log q_1)
"""
DEFAULT_POINTS_PER_DECADE = 10
q = np.sort(q)
data_min, data_max = q[0], q[-1]
if points_per_decade is None:
log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
if data_max > data_min:
log_delta_q = (len(q) - 1) / (log(data_max) - log(data_min))
else:
log_delta_q = log(10.) / DEFAULT_POINTS_PER_DECADE
else:
log_delta_q = log(10.) / points_per_decade
if q_min < q[0]:
if q_min < data_min:
if q_min < 0:
q_min = q[0]*MINIMUM_ABSOLUTE_Q
q_min = data_min*MINIMUM_ABSOLUTE_Q
n_low = int(np.ceil(log_delta_q * (log(q[0])-log(q_min))))
q_low = np.logspace(log10(q_min), log10(q[0]), n_low+1)[:-1]
else:
q_low = []
if q_max > q[-1]:
n_high = int(np.ceil(log_delta_q * (log(q_max)-log(q[-1]))))
q_high = np.logspace(log10(q[-1]), log10(q_max), n_high+1)[1:]
if q_max > data_max:
n_high = int(np.ceil(log_delta_q * (log(q_max)-log(data_max))))
q_high = np.logspace(log10(data_max), log10(q_max), n_high+1)[1:]
else:
q_high = []
return np.concatenate([q_low, q, q_high])
Expand Down
Loading

0 comments on commit 366f9b3

Please sign in to comment.