Skip to content

Commit

Permalink
Merge pull request #24 from mschauer/new2024
Browse files Browse the repository at this point in the history
New 2024
  • Loading branch information
mschauer committed Mar 19, 2024
2 parents 8019c85 + 4ea2062 commit 4fa0f64
Show file tree
Hide file tree
Showing 10 changed files with 113 additions and 52 deletions.
4 changes: 1 addition & 3 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,14 @@ version = "0.2.1"

[deps]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MeasureTheory = "eadaa1a4-d27c-401d-8699-e962e1bbc33b"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"

[compat]
MeasureTheory = "0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8"
StatsBase = "0.33"
StatsBase = "0.33, 0.34"
UnPack = "1.0"
julia = "1"

Expand Down
59 changes: 59 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,65 @@ This package will contain the general infrastructure and the rules for non-linea

In parallel, rules for stochastic differential equations are developed in [MitosisStochasticDiffEq.jl](https://github.com/mschauer/MitosisStochasticDiffEq.jl)

## Show reel

### Bayesian regression on the drift parameter of an SDE
```julia
using StochasticDiffEq
using Random
using MitosisStochasticDiffEq
import MitosisStochasticDiffEq as MSDE
using LinearAlgebra, Statistics

# Model and sensitivity
function f(du, u, θ, t)
c = 0.2 * θ
du[1] = -0.1 * u[1] + c * u[2]
du[2] = - c * u[1] - 0.1 * u[2]
return
end
function g(du, u, θ, t)
fill!(du, 0.15)
return
end

# b is linear in the parameter with Jacobian
function b_jac(J,x,θ,t)
J .= false
J[1,1] = 0.2 * x[2]
J[2,1] = - 0.2 * x[1]
nothing
end
# and intercept
function b_icpt(dx,x,θ,t)
dx .= false
dx[1] = -0.1 * x[1]
dx[2] = -0.1 * x[2]
nothing
end

# Simulate path ensemble
x0 = [1.0, 1.0]
tspan = (0.0, 20.0)
θ0 = 1.0
dt = 0.05
t = range(tspan...; step=dt)

prob = SDEProblem{true}(f, g, x0, tspan, θ0)
ensembleprob = EnsembleProblem(prob)
ensemblesol = solve(
ensembleprob, EM(), EnsembleThreads(); dt=dt, saveat=t, trajectories=1000
)

# Inference on drift parameters
sdekernel = MSDE.SDEKernel(f,g,t,0*θ0)
ϕprototype = zeros((length(x0),length(θ0))) # prototypes for vectors
yprototype = zeros((length(x0),))
R = MSDE.Regression!(sdekernel,yprototype,paramjac_prototype=ϕprototype,paramjac=b_jac,intercept=b_icpt)
prior_precision = 0.1I(1)
posterior = MSDE.conjugate(R, ensemblesol, prior_precision)
print(mean(posterior)[], " ± ", sqrt(cov(posterior)[]))
```


## References
Expand Down
1 change: 0 additions & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
[deps]
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
MeasureTheory = "eadaa1a4-d27c-401d-8699-e962e1bbc33b"
Mitosis = "e74b8d68-74d0-42b9-99ba-91d43d3ab0e8"
2 changes: 1 addition & 1 deletion example/bffg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ lls = []
a = rand(forward(BFFG(), k_0a, m0, weighted(ξ0)))
a1, a2 = rand(forward(BFFG(), cp2, ma, a))
b = rand(forward(BFFG(), k_ab, ma1, a1))
c = rand(forward(BFFG(), (k_ac, k_ac_approx), ma2, a2)) # forward model with nonlinear transition
c = rand(forward(BFFG(), k_ac, ma2, a2)) # forward model with nonlinear transition
c1, c2 = rand(forward(BFFG(), cp2, mc, c))
d = rand(forward(BFFG(), k_cd, mc1, c1))
e = rand(forward(BFFG(), k_ce, mc2, c2))
Expand Down
13 changes: 8 additions & 5 deletions src/Mitosis.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@ using UnPack
using Statistics, StatsBase
using LinearAlgebra
using Random
using MeasureTheory
import MeasureTheory: kernel
# using MeasureTheory
# import MeasureTheory: kernel, mapcall
# import MeasureTheory.Kernel, MeasureTheory.kernel

"""
kernel(f, M)
Expand All @@ -25,17 +26,19 @@ If the argument is a named tuple `(;a=f1, b=f1)`, `κ(x)` is defined as
"""
kernel

using MeasureTheory: AbstractMeasure
import MeasureTheory: density, logdensity
#using MeasureTheory: AbstractMeasure
#import MeasureTheory: density, logdensity
import Base: iterate, length
import Random.rand
import StatsBase.sample

include("mt.jl")


export Gaussian, Copy, fuse, weighted
export Traced, BFFG, left′, right′, forward, backward, backwardfilter, forwardsampler
export BF, density, logdensity, , kernel, correct, Kernel, WGaussian, Gaussian, ConstantMap, AffineMap, LinearMap, GaussKernel


function independent_sum
end
const = independent_sum
Expand Down
12 changes: 6 additions & 6 deletions src/gauss.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import LinearAlgebra.logdet
Gaussian{(:μ,:Σ)}
Gaussian{(:F,:Γ)}
Mitosis provides the measure `Gaussian` based on MeasureTheory.jl,
Mitosis provides the measure `Gaussian`,
with a mean `μ` and covariance `Σ` parametrization,
or parametrised by natural parameters `F = Γ μ`, `Γ = Σ⁻¹`.
Expand All @@ -25,8 +25,8 @@ struct Gaussian{P,T} <: AbstractMeasure
end
Gaussian{P}(nt::NamedTuple{P,T}) where {P,T} = Gaussian{P,T}(nt)
Gaussian{P}(args...) where {P} = Gaussian(NamedTuple{P}(args))
Gaussian(;args...) = Gaussian(args.data)
Gaussian{P}(;args...) where {P} = Gaussian{P}(args.data)
Gaussian(;args...) = Gaussian(values(args))
Gaussian{P}(;args...) where {P} = Gaussian{P}(values(args))

# the following propagates uncertainty if `μ` is `Gaussian`
Gaussian(par::NamedTuple{(:μ,:Σ),Tuple{T,S}}) where {T<:Gaussian,S} = Gaussian((;μ = mean(par.μ),Σ=par.Σ +cov(par.μ)))
Expand Down Expand Up @@ -81,9 +81,9 @@ rand(rng::AbstractRNG, p::Gaussian) = unwhiten(p, randwn(rng, mean(p)))

_logdet(p::Gaussian{(:μ,:Σ)}) = _logdet(p.Σ, dim(p))
_logdet(p::Gaussian{(:Σ,)}) = logdet(p.Σ)
MeasureTheory.logdensity(p::Gaussian, x) = -(sqmahal(p,x) + _logdet(p) + dim(p)*log(2pi))/2
MeasureTheory.density(p::Gaussian, x) = exp(logdensity(p, x))
function MeasureTheory.logdensity(p::Gaussian{(:F,:Γ)}, x)
logdensity(p::Gaussian, x) = -(sqmahal(p,x) + _logdet(p) + dim(p)*log(2pi))/2
density(p::Gaussian, x) = exp(logdensity(p, x))
function logdensity(p::Gaussian{(:F,:Γ)}, x)
C = cholesky(sym(p.Γ))
-x'*p.Γ*x/2 + x'*p.F - p.F'*(C\p.F)/2 + logdet(C)/2 - dim(p)*log(2pi)/2
end
Expand Down
7 changes: 2 additions & 5 deletions src/markov.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
import MeasureTheory.Kernel, MeasureTheory.kernel

MeasureTheory.mapcall(t) = map(func -> func(), t)
(k::Kernel{M,<:NamedTuple})() where {M} = M(; MeasureTheory.mapcall(k.ops)...)

const GaussKernel = MeasureTheory.Kernel{<:Gaussian}
#const Copy = MeasureTheory.Kernel{<:Dirac}
const GaussKernel = Kernel{Gaussian}
#const Copy = Kernel{Dirac}

"""
AffineMap(B, β)
Expand Down
18 changes: 18 additions & 0 deletions src/mt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
abstract type AbstractMeasure end

struct Kernel{T, NT}
ops::NT
end
mapcall(t) = map(func -> func(), t)
mapcall(t, arg) = map(func -> func(arg), t)
(k::Kernel{M,<:NamedTuple})() where {M} = M(mapcall(k.ops))
(k::Kernel{M,NT})(arg) where {M,NT<:NamedTuple} = M(mapcall(k.ops, arg))


Kernel{T}(args::NT) where {T,NT} = Kernel{T,NT}(args)
kernel(::Type{T}; kwargs...) where {T} = Kernel{T}(NamedTuple(kwargs))
struct Dirac{T} <: AbstractMeasure
x::T
end
rand(d::Dirac) = d.x
rand(_::AbstractRNG, d::Dirac) = d.x
19 changes: 15 additions & 4 deletions src/rules.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ function backward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, q
B, β, Q = params(k)
Σ = inv(Γ) # requires invertibility of Γ
K = B'/+ Q)
ν̃ = Σ*F - β
Fp = K*ν̃
Γp = K*B
ν̃ = Σ*F - β
Fp = K*ν̃ # B'F - B'Γ β #
Γp = K*B # B'Γ B
# Corollary 7.2 [Automatic BFFG]
if !unfused
cp = c - logdet(B)
Expand Down Expand Up @@ -153,6 +153,17 @@ function forward_(::BFFG, k::GaussKernel, m::Message{<:WGaussian{(:F,:Γ,:c)}},
WGaussian{(:μ,:Σ,:c)}(μᵒ, Qᵒ, c)
end

function backward(::BF, ::Copy, args::Union{Leaf{<:Gaussian{(:μ,:Σ)}},Gaussian{(:μ,:Σ)}}...; unfused=true)
unfused = false
F, H = params(convert(Gaussian{(:F,)}, args[1]))
for b in args[2:end]
F2, H2 = params(convert(Gaussian{(:F,)}, b))
F += F2
H += H2
end
message(), convert(Gaussian{(,)}, Gaussian{(:F,:Γ)}(F, H))
end


function backward(::BFFG, ::Copy, args::Union{Leaf{<:WGaussian{(:μ,:Σ,:c)}},WGaussian{(:μ,:Σ,:c)}}...; unfused=true)
unfused = false
Expand Down Expand Up @@ -201,5 +212,5 @@ function forward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, m:
Dirac(weighted(y[], x.ll))
end
function forward(::BFFG, ::Copy{2}, _, x::Weighted)
MeasureTheory.Dirac((x, weighted(x[])))
Dirac((x, weighted(x[])))
end
30 changes: 3 additions & 27 deletions src/wgaussian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ struct WGaussian{P,T} <: AbstractMeasure
end
WGaussian{P}(nt::NamedTuple{P,T}) where {P,T} = WGaussian{P,T}(nt)
WGaussian{P}(args...) where {P} = WGaussian(NamedTuple{P}(args))
WGaussian(;args...) = WGaussian(args.data)
WGaussian{P}(;args...) where {P} = WGaussian{P}(args.data)
WGaussian(;args...) = WGaussian(values(args))
WGaussian{P}(;args...) where {P} = WGaussian{P}(values(args))

# the following propagates uncertainty if `μ` is `Gaussian`
WGaussian(par::NamedTuple{(:μ,:Σ,:c),Tuple{T,S,U}}) where {T<:WGaussian,S,U} = WGaussian((;μ=mean(par.μ), Σ=par.Σ +cov(par.μ), c=par.c + par.μ.c))
Expand All @@ -31,7 +31,7 @@ moment1(p::WGaussian{(:F, :Γ, :c)}) = p.c*p.Γ\p.F
Base.isapprox(p1::WGaussian, p2::WGaussian; kwargs...) =
all(isapprox.(Tuple(params(p1)), Tuple(params(p2)); kwargs...))

function MeasureTheory.logdensity(p::WGaussian{(:F,:Γ,:c)}, x)
function logdensity(p::WGaussian{(:F,:Γ,:c)}, x)
C = cholesky_(sym(p.Γ))
p.c - x'*p.Γ*x/2 + x'*p.F - p.F'*(C\p.F)/2 + logdet(C)/2 - dim(p)*log(2pi)/2
end
Expand All @@ -52,30 +52,6 @@ function Base.convert(::Type{WGaussian{(:μ,:Σ,:c)}}, p::WGaussian{(:F,:Γ,:c)}
end
Base.convert(::Type{WGaussian{(:μ,:Σ,:c)}}, p::Leaf) = convert(WGaussian{(,,:c)}, p[])
Base.convert(::Type{WGaussian{(:F,:Γ,:c)}}, p::Leaf) = convert(WGaussian{(:F,,:c)}, p[])
#=
Base.:(==)(p1::WGaussian, p2::WGaussian) = mean(p1) == mean(p2) && cov(p1) == cov(p2)
Base.isapprox(p1::WGaussian, p2::WGaussian; kwargs...) =
isapprox(mean(p1), mean(p2); kwargs...) && isapprox(cov(p1), cov(p2); kwargs...)
mean(p::WGaussian{(:μ, :Σ)}) = p.μ
cov(p::WGaussian{(:μ, :Σ)}) = p.Σ
meancov(p) = mean(p), cov(p)
precision(p::WGaussian{(:μ, :Σ)}) = inv(p.Σ)
mean(p::WGaussian{(:F, :Γ)}) = Γ\p.F
cov(p::WGaussian{(:F, :Γ)}) = inv(p.Γ)
precision(p::WGaussian{(:F, :Γ)}) = p.Γ
dim(p::WGaussian) = length(mean(p))
whiten(p::WGaussian{(:μ, :Σ)}, x) = cholesky(p.Σ).U'\(x - p.μ)
unwhiten(p::WGaussian{(:μ, :Σ)}, z) = cholesky(p.Σ).U'*z + p.μ
sqmahal(p::WGaussian, x) = norm_sqr(whiten(p, x))

rand(p::WGaussian) = rand(Random.GLOBAL_RNG, p)
rand(RNG::AbstractRNG, p::WGaussian) = unwhiten(p, randn!(RNG, zero(mean(p))))

MeasureTheory.logdensity(p::WGaussian, x) = -(sqmahal(p,x) + _logdet(p, dim(p)) + dim(p)*log(2pi))/2
MeasureTheory.density(p::WGaussian, x) = exp(logpdf(p, x))

=#

0 comments on commit 4fa0f64

Please sign in to comment.