From bdda17547e4e2e70b8d1898eb01cc03907cfb821 Mon Sep 17 00:00:00 2001 From: Brian DePasquale Date: Mon, 26 Aug 2024 10:42:14 -0400 Subject: [PATCH] removed 'not needed for tests' dir and moved to private --- .devcontainer.json | 20 -- .gitignore | 3 +- .../neural-choice_model-negBin.jl | 133 --------- .../not_needed_for_tests/process_data.jl | 186 ------------- .../not_needed_for_tests/filter/filtered.jl | 259 ------------------ .../not_needed_for_tests/load_and_optimize.jl | 238 ---------------- .../not_needed_for_tests/neural_model-th.jl | 192 ------------- src/neural_model/not_needed_for_tests/null.jl | 139 ---------- .../polynomial/neural_poly_model.jl | 245 ----------------- .../polynomial/sample_model-Copy1.jl | 209 -------------- .../sample_model_functions_FP.jl | 165 ----------- 11 files changed, 2 insertions(+), 1787 deletions(-) delete mode 100644 .devcontainer.json delete mode 100644 src/neural-choice_model/not_needed_for_tests/neural-choice_model-negBin.jl delete mode 100644 src/neural-choice_model/not_needed_for_tests/process_data.jl delete mode 100644 src/neural_model/not_needed_for_tests/filter/filtered.jl delete mode 100644 src/neural_model/not_needed_for_tests/load_and_optimize.jl delete mode 100644 src/neural_model/not_needed_for_tests/neural_model-th.jl delete mode 100644 src/neural_model/not_needed_for_tests/null.jl delete mode 100644 src/neural_model/not_needed_for_tests/polynomial/neural_poly_model.jl delete mode 100644 src/neural_model/not_needed_for_tests/polynomial/sample_model-Copy1.jl delete mode 100644 src/neural_model/not_needed_for_tests/sample_model_functions_FP.jl diff --git a/.devcontainer.json b/.devcontainer.json deleted file mode 100644 index cc8ca967..00000000 --- a/.devcontainer.json +++ /dev/null @@ -1,20 +0,0 @@ -// See https://github.com/julia-vscode/julia-devcontainer/blob/master/Dockerfile for image contents -{ - "name": "Julia (Community)", - "image": "ghcr.io/julia-vscode/julia-devcontainer", - - // Configure tool-specific properties. - "customizations": { - // Configure properties specific to VS Code. - "vscode": { - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - "julialang.language-julia" - ] - } - }, - - "postCreateCommand": "/julia-devcontainer-scripts/postcreate.jl", - - "remoteUser": "vscode" -} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2d425fd5..faccac34 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ examples/joint model/.ipynb_checkpoints examples/choice model/.ipynb_checkpoints examples/neural model/.ipynb_checkpoints .DS_STORE -.vscode \ No newline at end of file +.vscode +.devcontainer.json diff --git a/src/neural-choice_model/not_needed_for_tests/neural-choice_model-negBin.jl b/src/neural-choice_model/not_needed_for_tests/neural-choice_model-negBin.jl deleted file mode 100644 index f094e3bb..00000000 --- a/src/neural-choice_model/not_needed_for_tests/neural-choice_model-negBin.jl +++ /dev/null @@ -1,133 +0,0 @@ - -@with_kw struct Softplus_negbin{T1} <: DDMf - r::T1 = 1e6 - c::T1 = 5.0*rand([-1,1]) -end - - -# should eventually do what I did here for softplus and then can reuse the likelihood function -function (θ::Softplus_negbin)(x::Union{U,Vector{U}}, λ0::Union{T,Vector{T}}, dt) where {U,T <: Real} - - @unpack r, c = θ - - μ = softplus.(c*x .+ softplusinv.(λ0)) - p = r/(μ*dt + r) - p = min(1. - eps(), p) - #sig2 = μ*dt + r*(μ*dt)^2 - #p = μ*dt/sig2 - #p = exp(-log((μ*dt/r) + 1.)) - #p = 1. /((μ*dt)/r + 1.) - - NegativeBinomial(r, p) - -end - - -""" - all_Softplus_negbin(data) - -Returns: `array` of `array` of `string`, of all Softplus_negbin -""" -function all_Softplus_negbin(data) - - ncells = getfield.(first.(data), :ncells) - f = repeat(["Softplus_negbin"], sum(ncells)) - borg = vcat(0,cumsum(ncells)) - f = [f[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - -end - - -#= -function likelihood(θz, θy::Vector{Softplus_negbin{T2}}, data::neuraldata, - P::Vector{T1}, M::Array{T1,2}, - xc::Vector{T1}, dx::T3, n, cross) where {T1,T2,T3 <: Real} - - @unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz - @unpack spikes, input_data = data - @unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data - @unpack nT, nL, nR = binned_clicks - @unpack L, R = clicks - - #adapt magnitude of the click inputs - La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross) - - F = zeros(T1,n,n) #empty transition matrix for time bins with clicks - - time_bin = (-(pad-1):nT+pad) .- delay - - alpha = log.(P) - - @inbounds for t = 1:length(time_bin) - - #mm = maximum(alpha) - py = vcat(map(xc-> sum(map((k,θy,λ0)-> logpdf(θy(xc, λ0[t], dt), k[t]), spikes, θy, λ0)), xc)...) - - if time_bin[t] >= 1 - - alpha, F = latent_one_step_alt!(alpha, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt) - - #= - any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(T1) - any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(T1) - σ2 = σ2_s * (sL + sR); μ = -sL + sR - - if (sL + sR) > zero(T1) - transition_M!(F,σ2+σ2_a*dt,λ, μ, dx, xc, n, dt) - alpha = log.((exp.(alpha .- mm)' * F')') .+ mm .+ py - else - alpha = log.((exp.(alpha .- mm)' * M')') .+ mm .+ py - end - - - else - alpha = alpha .+ py - end - =# - end - - alpha = alpha .+ py - - end - - return exp(logsumexp(alpha)), exp.(alpha)/sum(exp.(alpha)) - -end -=# - - -function likelihood(θz, θy::Vector{Softplus_negbin{T2}}, data::neuraldata, - P::Vector{T1}, M::Array{T1,2}, - xc::Vector{T1}, dx::T3, n, cross) where {T1,T2,T3 <: Real} - - @unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz - @unpack spikes, input_data = data - @unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data - @unpack nT, nL, nR = binned_clicks - @unpack L, R = clicks - - #adapt magnitude of the click inputs - La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross) - - F = zeros(T1,n,n) #empty transition matrix for time bins with clicks - - time_bin = (-(pad-1):nT+pad) .- delay - - c = Vector{T1}(undef, length(time_bin)) - - @inbounds for t = 1:length(time_bin) - - if time_bin[t] >= 1 - P, F = latent_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt) - end - - P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(θy(xc, λ0[t], dt), k[t]), spikes, θy, λ0))), xc)...)) - - c[t] = sum(P) - P /= c[t] - - end - - return c, P - -end \ No newline at end of file diff --git a/src/neural-choice_model/not_needed_for_tests/process_data.jl b/src/neural-choice_model/not_needed_for_tests/process_data.jl deleted file mode 100644 index 1652a30e..00000000 --- a/src/neural-choice_model/not_needed_for_tests/process_data.jl +++ /dev/null @@ -1,186 +0,0 @@ -""" - reload_joint_model(file) - -`reload_joint_model` will bring back the parameters from your fit, some details about the optimization (such as the `fit` and bounds vectors) and some details about how you filtered the data. All of the data is not saved in the format that it is loaded by `load_neural_data` because it's too cumbersome to seralize it, so you have to load it again, as above, to re-build `neuralDDM` but you can use some of the stuff that `reload_neural_data` returns to reload the data in the same way (such as `pad` and `dt`) - -Returns: - -- `θneural` -- `neural_options` -- `n` -- `cross` -- `dt` -- `delay` -- `pad` - -See also: [`save_neural_model`](@ref) - -""" -function reload_joint_model(file) - - xf = read(matopen(file), "ML_params") - f = string.(read(matopen(file), "f")) - ncells = collect(read(matopen(file), "ncells")) - nparams = read(matopen(file), "nparams") - - borg = vcat(0,cumsum(ncells, dims=1)) - nparams = [nparams[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - f = [f[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - - lb = read(matopen(file), "lb") - ub = read(matopen(file), "ub") - fit = read(matopen(file), "fit") - - n = read(matopen(file), "n") - cross = read(matopen(file), "cross") - dt = read(matopen(file), "dt") - delay = read(matopen(file), "delay") - pad = read(matopen(file), "pad") - - θneural_choice(xf, f), neural_choice_options(lb=lb, ub=ub, fit=fit), n, cross, dt, delay, pad - -end - - -""" - load_neural_data(path, files) - -Load neural data .MAT files and return a Dict. -""" -function load_neural_choice(file::String, break_sim_data::Bool, centered::Bool=true; - dt::Float64=1e-2, delay::Int=0, pad::Int=20, filtSD::Int=5, - extra_pad::Int=10, cut::Int=10, pcut::Float64=0.01, include_choices::Bool=true) - - data = read(matopen(file), "rawdata") - - T = vec(data["T"]) - L = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("left", collect(keys(data)))][1]])) - R = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("right", collect(keys(data)))][1]])) - choices = vec(convert(BitArray, data["pokedR"])) - - click_times = clicks.(L, R, T) - binned_clicks = bin_clicks(click_times, centered=centered, dt=dt) - nT = map(x-> x.nT, binned_clicks) - - ncells = size(data["spike_times"][1], 2) - - spikes = vec(map(x-> vec(vec.(collect.(x))), data["spike_times"])) - - output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=0), spikes, nT) - - spikes = getindex.(output, 1) - FR = map(i-> map((x,T)-> sum(x[i])/T, spikes, T), 1:ncells) - choice = vec(convert(BitArray, data["pokedR"])) - pval = map(x-> pvalue(EqualVarianceTTest(x[choice], x[.!choice])), FR) - ptest = pval .< pcut - - if any(ptest) - - ncells = sum(ptest) - - if break_sim_data - - spike_data = Vector{Vector{neuraldata}}(undef, ncells) - μ_rnt = Vector(undef, ncells) - μ_t = Vector(undef, ncells) - - for n = 1:ncells - - spikes = vec(map(x-> [vec(collect(x[findall(ptest)][n]))], data["spike_times"])) - - output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=pad), spikes, nT) - - spikes = getindex.(output, 1) - padded = getindex.(output, 2) - - μ_rnt[n] = filtered_rate.(padded, dt; filtSD=filtSD, cut=cut) - - μ_t[n] = map(n-> [max(0., mean([μ_rnt[n][i][1][t] - for i in findall(nT .+ 2*pad .>= t)])) - for t in 1:(maximum(nT) .+ 2*pad)], n:n) - - λ0 = map(nT-> bin_λ0(μ_t[n], nT+2*pad), nT) - #λ0 = map(nT-> map(μ_t-> zeros(nT), μ_t), nT) - - input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad) - spike_data[n] = neuraldata(input_data, spikes, 1) - - nRBFs=6 - model, = optimize([spike_data[n]], μ_RBF_options(ncells=[1], nRBFs=nRBFs); show_trace=false) - maxnT = maximum(nT) - x = 1:maxnT+2*pad - rbf = UniformRBFE(x, nRBFs, normalize=true) - μ_t[n] = [rbf(x) * model.θ.θμ[1][1]] - - #model, = optimize([spike_data[n]], μ_poly_options(ncells=[1]); show_trace=false) - #μ_t[n] = [model.θ.θμ[1][1](1:length(μ_t[n][1]))] - - λ0 = map(nT-> bin_λ0(μ_t[n], nT+2*pad), nT) - input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad) - - if include_choices - spike_data[n] = neural_choice_data(input_data, choices, spikes, 1) - else - spike_data[n] = neuraldata(input_data, spikes, 1) - end - - end - - else - - spikes = vec(map(x-> vec(vec.(collect.(x))), data["spike_times"])) - - output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=pad), spikes, nT) - - spikes = getindex.(output, 1) - padded = getindex.(output, 2) - - spikes = map(spikes-> spikes[ptest], spikes) - padded = map(padded-> padded[ptest], padded) - - μ_rnt = filtered_rate.(padded, dt; filtSD=filtSD, cut=cut) - - μ_t = map(n-> [max(0., mean([μ_rnt[i][n][t] - for i in findall(nT .+ 2*pad .>= t)])) - for t in 1:(maximum(nT) .+ 2*pad)], 1:ncells) - - #μ_t = map(n-> [max(0., mean([spikes[i][n][t]/dt - # for i in findall(nT .>= t)])) - # for t in 1:(maximum(nT))], 1:ncells) - - λ0 = map(nT-> bin_λ0(μ_t, nT+2*pad), nT) - #λ0 = map(nT-> map(μ_t-> zeros(nT), μ_t), nT) - - input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad) - spike_data = neuraldata(input_data, spikes, ncells) - - nRBFs=6 - model, = optimize([spike_data], μ_RBF_options(ncells=[ncells], nRBFs=nRBFs); show_trace=false) - maxnT = maximum(nT) - x = 1:maxnT+2*pad - rbf = UniformRBFE(x, nRBFs, normalize=true) - μ_t = map(n-> rbf(x) * model.θ.θμ[1][n], 1:ncells) - - #model, = optimize([spike_data], μ_poly_options(ncells=[ncells]); show_trace=false) - #μ_t = map(n-> model.θ.θμ[1][n](1:length(μ_t[n])), 1:ncells) - - λ0 = map(nT-> bin_λ0(μ_t, nT+2*pad), nT) - input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad) - if include_choices - spike_data = neural_choice_data(input_data, choices, spikes, ncells) - else - spike_data = neuraldata(input_data, spikes, ncells) - end - - - end - - return spike_data, μ_rnt, μ_t - - else - - return nothing - - end - -end \ No newline at end of file diff --git a/src/neural_model/not_needed_for_tests/filter/filtered.jl b/src/neural_model/not_needed_for_tests/filter/filtered.jl deleted file mode 100644 index c883c56a..00000000 --- a/src/neural_model/not_needed_for_tests/filter/filtered.jl +++ /dev/null @@ -1,259 +0,0 @@ -""" -""" -@with_kw struct filtinputs{T1,T2,T3} - clicks::T1 - binned_clicks::T2 - λ0::Vector{Vector{Float64}} - LR::T3 - dt::Float64 - centered::Bool - delay::Int - pad::Int -end - - -""" -""" -@with_kw struct filtdata <: DDMdata - input_data::filtinputs - spikes::Vector{Vector{Int}} - ncells::Int - choice::Bool -end - - -""" -""" -@with_kw struct filtoptions - ncells::Vector{Int} - nparams::Union{Vector{Int}, Vector{Vector{Int}}} - filt_len::Int = 50 - half_len::Int=0 - f::Union{Vector{String}, Vector{Vector{String}}} - fit::Vector{Bool} - ub::Vector{Float64} - x0::Vector{Float64} - lb::Vector{Float64} -end - - -""" -""" -@with_kw struct θneural_filt{T1, T2, T3} <: DDMθ - w::Vector{T1} - B::T3 - θy::T2 - ncells::Vector{Int} - nparams::Union{Vector{Int}, Vector{Vector{Int}}} - f::Union{Vector{String}, Vector{Vector{String}}} - filt_len::Int -end - - -""" -""" -function make_filt_data(data, filt_len; half_len=0) - - @unpack input_data, spikes, ncells, choice = data - @unpack binned_clicks, clicks, dt, centered, λ0, delay, pad = input_data - - # time_bin = (-(pad-1):nT+pad) .- delay - - L,R = binLR(binned_clicks, clicks, dt) - #LR = -L + R - LR = vcat(zeros(Int, pad), -L + R, zeros(Int, pad)); - #LRX = map(i-> vcat(missings(Int, max(0, filt_len - i)), LR[max(1,i-filt_len+1):i]), 1:length(LR)) - #LRX = map(i-> vcat(missings(Int, max(0, filt_len - (i+shift))), - # LR[max(1,(i+shift)-filt_len+1): min(length(LR), i+shift)], - # missings(Int, max(0, -(length(LR) - (i+shift))))), 1:length(LR)) - - #LRX = map(i-> vcat(missings(Int, max(0, filt_len - (i-delay+half_len))), - # LR[max(1, i-delay+half_len-filt_len+1): min(length(LR), i-delay+half_len)], - # missings(Int, max(0, -(length(LR) - (i-delay+half_len))))), 1:length(LR)) - - LRX = map(i-> vcat(missings(Int, max(0, half_len+1 - (i-delay))), - LR[max(1, 1+i-delay-(half_len+1)): min(length(LR), i-delay+half_len)], - missings(Int, max(0, -(length(LR) - (i-delay+half_len))))), 1:length(LR)); - - filtdata(filtinputs(clicks, binned_clicks, λ0, LRX, dt, centered, delay, pad), spikes, ncells, choice) - -end - - -function prior(x::Vector{T1}, filt_len; sig_σ::Float64=1.) where {T1 <: Real} - - - sig_σ * sum(diff(x[1:filt_len]).^2) - -end - - -function optimize(data, options::filtoptions; - x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3, - iterations::Int=Int(2e3), show_trace::Bool=true, - outer_iterations::Int=Int(1e1), scaled::Bool=false, - extended_trace::Bool=false, sig_σ::Float64=1.) - - @unpack fit, lb, ub, x0, ncells, f, nparams, filt_len, half_len = options - - filt_data = map(data-> make_filt_data.(data, Ref(filt_len); half_len=half_len), data) - θ = θneural_filt(x0, ncells, nparams, f, filt_len) - - lb, = unstack(lb, fit) - ub, = unstack(ub, fit) - x0,c = unstack(x0, fit) - - ℓℓ(x) = -(loglikelihood(stack(x,c,fit), filt_data, θ) + prior(stack(x,c,fit), filt_len; sig_σ=sig_σ)) - - output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol, - f_tol=f_tol, iterations=iterations, show_trace=show_trace, - outer_iterations=outer_iterations, scaled=scaled, - extended_trace=extended_trace) - - x = Optim.minimizer(output) - x = stack(x,c,fit) - θ = θneural_filt(x, ncells, nparams, f, filt_len) - model = neuralDDM(θ, data) - converged = Optim.converged(output) - - return model, output - -end - - -""" - loglikelihood(x, data, ncells) - -A wrapper function that accepts a vector of mixed parameters, splits the vector -into two vectors based on the parameter mapping function provided as an input. Used -in optimization, Hessian and gradient computation. -""" -function loglikelihood(x::Vector{T1}, data, θ::θneural_filt) where {T1 <: Real} - - @unpack ncells, nparams, f, filt_len = θ - θ = θneural_filt(x, ncells, nparams, f, filt_len) - loglikelihood(θ, data) - -end - - -""" -""" -function θneural_filt(x::Vector{T}, ncells::Vector{Int}, nparams::Vector{Vector{Int}}, - f::Vector{Vector{String}}, filt_len::Int) where {T <: Real} - - borg = vcat(filt_len+1, filt_len +1 .+cumsum(vcat(nparams...))) - blah = [x[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - - blah = map((f,x) -> f(x...), getfield.(Ref(@__MODULE__), Symbol.(vcat(f...))), blah) - - borg = vcat(0,cumsum(ncells)) - θy = [blah[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - - θneural_filt(x[1:filt_len], x[filt_len+1], θy, ncells, nparams, f, filt_len) - -end - - -""" -""" -function flatten(θ::θneural_filt) - - @unpack w, θy, B = θ - vcat(w, B, vcat(collect.(Flatten.flatten.(vcat(θy...)))...)) - -end - - -""" -""" -function loglikelihood(θ::θneural_filt, data) - - @unpack w, θy, B = θ - - #sum(map((θy, data) -> loglikelihood(w, θy, data), θy, data)) - sum(loglikelihood.(Ref(w), B, θy, data)) - -end - - -""" -""" -function loglikelihood(w, B, θy, data) - - #@unpack spikes, input_data = data - #@unpack dt = input_data - dt = data[1].input_data.dt - - LR = vcat(map(x-> x.input_data.LR, data)...) - spikes = map(i-> vcat(map(x-> x.spikes[i], data)...), 1:data[1].ncells) - λ0 = map(i-> vcat(map(x-> x.input_data.λ0[i], data)...), 1:data[1].ncells) - - #a = pmap(LR-> afilt(w, LR), LR) - a = afilt.(Ref(w),B, LR) - λ = map((θy, λ0)-> θy(a, λ0), θy, λ0) - - sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...))) - -end - - -#= -""" -""" -function loglikelihood(w, θy, input_data::filtinputs) - - @unpack λ0 = input_data - - a = rand(w, input_data) - λ = map((θy, λ0)-> θy(a, λ0), θy, λ0) - - return λ, a - -end - - -""" -""" -function rand(w, inputs) - - @unpack LR = inputs - - afilt.(Ref(w), LR) - -end -=# - - -""" -""" -afilt(w, B, LR) = max(-B, min(B, sum(skipmissing(w .* LR)))) - -#= - - -""" - Sample rates from latent model with multiple rngs, to average over -""" -function synthetic_λ(θ::θfilt, data; nconds::Int=2) - - @unpack θτ,θμ,θy,ncells = θ - - μ_λ = rand.(Ref(θτ), θμ, θy, data) - - μ_c_λ = cond_mean.(μ_λ, data, ncells; nconds=nconds) - - return μ_λ, μ_c_λ - -end - - -""" - Sample all trials over one session -""" -function rand(θτ::θτ, θμ, θy, data) - - pmap(data -> loglikelihood(θτ,θμ,θy,data.input_data)[1], data) - -end - -=# \ No newline at end of file diff --git a/src/neural_model/not_needed_for_tests/load_and_optimize.jl b/src/neural_model/not_needed_for_tests/load_and_optimize.jl deleted file mode 100644 index cddba7d7..00000000 --- a/src/neural_model/not_needed_for_tests/load_and_optimize.jl +++ /dev/null @@ -1,238 +0,0 @@ - -function generate_syn_data_fit_CI(pz::Dict{}, py::Dict{}, ntrials_per_sess; - n::Int=203, dt=1e-3, f_str::String="softplus", rng::Int=0,use_bin_center::Bool=true) - - data = sample_input_and_spikes_multiple_sessions(pz["generative"], py["generative"], ntrials_per_sess; f_str=f_str, - rng=rng,use_bin_center=false) - - data = map(x->bin_clicks_spikes_and_λ0!(x,use_bin_center;dt=dt), data) - - pz["initial"][pz["fit"] .== false] = pz["generative"][pz["fit"] .== false] - #py["initial"][py["fit"] .== false] = py["generative"][py["fit"] .== false] - - if n == 0 - pz, py, = optimize_model(pz, py, data, f_str, show_trace=true) - pz, py = compute_H_CI!(pz, py, data, f_str) - else - pz, py, = optimize_model(pz, py, data, f_str, n, show_trace=true) - pz, py = compute_H_CI!(pz, py, data, f_str, n) - end - -end - -function generate_syn_data_fit_CI(nsessions, N_per_sess, ntrials_per_sess; - n::Int=53, dt=1e-2, dimy::Int=3, f_str::String="softplus", - pz::Dict = Dict("generative" => [0., 15., -1., 100., 0., 1., 0.02], - "name" => ["σ_i","B", "λ", "σ_a","σ_s","ϕ","τ_ϕ"], - "fit" => vcat(falses(1),trues(3),falses(3)), - "initial" => [2.,20.,-3,100.,2.,0.2,0.005], - "lb" => [-eps(), 4., -5., -eps(), -eps(), eps(), eps()], - "ub" => [10., 100, 5., 800., 40., 2., 10.]), - use_bin_center::Bool=true) - - pz["initial"][pz["fit"] .== false] = pz["generative"][pz["fit"] .== false] - #pz["initial"] = pz["generative"] - - #py = Dict("generative" => [[[1e-6, 10., 1e-6] for n in 1:N] for N in N_per_sess], - # "fit" => map(N-> repeat([falses(3)],outer=N), N_per_sess), - # "initial" => [[[Vector{Float64}(undef,dimy)] for n in 1:N] for N in N_per_sess], - # "dimy"=> dimy, - # "N"=> N_per_sess, - # "nsessions"=> nsessions) - - py = Dict("generative" => [[[10., 10., -10.] for n in 1:N] for N in N_per_sess], - "fit" => map(N-> repeat([falses(1),trues(1),falses(1)],outer=N), N_per_sess), - "initial" => [[[Vector{Float64}(undef,dimy)] for n in 1:N] for N in N_per_sess], - "dimy"=> dimy, - "N"=> N_per_sess, - "nsessions"=> nsessions) - - data = sample_input_and_spikes_multiple_sessions(pz["generative"], py["generative"], ntrials_per_sess; - use_bin_center=false) - - data = map(x->bin_clicks_spikes_and_λ0!(x,use_bin_center;dt=dt), data) - - #py["initial"] = map(data-> regress_init(data, f_str), data) - #py["initial"] = map(data -> optimize_model(data, f_str, show_trace=false), data) - py["initial"] = py["generative"] - - if n == 0 - pz, py, = optimize_model(pz, py, data, f_str, show_trace=false) - pz, py = compute_H_CI!(pz, py, data, f_str) - else - pz, py, = optimize_model(pz, py, data, f_str, n, show_trace=true) - pz, py = compute_H_CI!(pz, py, data, f_str, n) - end - -end - -function load_and_optimize(path::String, sessids, ratnames, f_str, n::Int; - dt::Float64=1e-3, delay::Float64=0., - pz::Dict = Dict("name" => ["σ_i","B", "λ", "σ_a","σ_s","ϕ","τ_ϕ"], - "fit" => vcat(falses(1),trues(2),falses(4)), - "initial" => [2*eps(), 10., -0.1, 2*eps(), 2*eps(), 1.0, 0.005], - "lb" => [eps(), 8., -5., eps(), eps(), eps(), eps()], - "ub" => [10., 40, 5., 100., 2.5, 2., 10.]), - show_trace::Bool=true, iterations::Int=Int(2e3), - use_bin_center::Bool=true) - - data = aggregate_spiking_data(path,sessids,ratnames) - data = map(x->bin_clicks_spikes_and_λ0!(x,use_bin_center; dt=dt,delay=delay), data) - - pz, py = load_and_optimize(data, f_str, n; pz=pz, - show_trace=show_trace, iterations=iterations) - - return pz, py, data - -end - -function do_all_CV(data::Vector{Dict{Any,Any}}, f_str, n; frac = 0.9) - - train_data, test_data = train_test_divide(data[1],frac) - pz,py = init_pz_py([train_data], f_str) - pz, py, H = optimize_and_errorbars(pz, py, [train_data], f_str, n) - ΔLL = compute_ΔLL(pz, py, [test_data], n, f_str) - - return pz, py, H, ΔLL - -end - -function compute_ΔLL(pz, py, data, n, f_str) - - LL_ML = compute_LL(pz["final"], py["final"], data, n, f_str) - - #f_py(0.,d["λ0"][r][n],py["final"],f_str) + art - - #LL_null = mapreduce(d-> mapreduce(r-> mapreduce(n-> - # neural_null(d["spike_counts"][r][n], d["λ0"][r][n], d["dt"]), - # +, 1:d["N"]), +, 1:d["ntrials"]), +, data) - - LL_null = mapreduce(d-> mapreduce(r-> mapreduce(n-> - neural_null(d["spike_counts"][r][n], map(λ-> f_py(0.,λ,py["final"][1][n],f_str), d["λ0"][r][n]), d["dt"]), - +, 1:d["N"]), +, 1:d["ntrials"]), +, data) - - #ΔLL = (LL_ML - LL_null)/data[1]["ntrials"] - #ΔLL = 1. - (LL_ML/LL_null) - - #return ΔLL, LL_ML, LL_null - LL_ML - LL_null - -end - -function optimize_and_errorbars(pz, py, data, f_str, n) - - #@time pz, py, opt_output, = optimize_model(pz, py, data, f_str, n, show_trace=true, iterations=500) - print("trying constrained opt \n") - @time pz, py, opt_output, = optimize_model_con(pz, py, data, f_str, n, show_trace=true) - print("optimization complete \n") - print("converged: $(Optim.converged(opt_output)) \n") - - if Optim.converged(opt_output) - print("computing Hessian \n") - #@time pz, py, H = compute_H_CI!(pz, py, data, f_str, n) - @time pz, py, H = compute_H_CI_con!(pz, py, data, f_str, n) - else - print("not computing Hessian \n") - H = [] - end - - return pz, py, H - -end - -function init_pz_py(data::Vector{Dict{Any,Any}}, f_str) - - nsessions = length(data) - N_per_sess = map(data-> data["N"], data) - - if f_str == "softplus" - - dimy = 3 - - py = Dict("fit" => map(N-> repeat([trues(dimy)],outer=N), N_per_sess), - "initial" => [[[Vector{Float64}(undef,dimy)] for n in 1:N] for N in N_per_sess], - "dimy"=> dimy, - "N"=> N_per_sess, - "nsessions"=> nsessions, - "lb" => [[[2*eps(),-Inf,-Inf] for n in 1:N] for N in N_per_sess], - "ub" => [[[Inf,Inf,Inf] for n in 1:N] for N in N_per_sess]) - - elseif f_str == "sig" - - dimy = 4 - - py = Dict("fit" => map(N-> repeat([trues(dimy)],outer=N), N_per_sess), - "initial" => [[[Vector{Float64}(undef,dimy)] for n in 1:N] for N in N_per_sess], - "dimy"=> dimy, - "N"=> N_per_sess, - "nsessions"=> nsessions, - "lb" => [[[-100.,0.,-10.,-10.] for n in 1:N] for N in N_per_sess], - "ub" => [[[100.,100.,10.,10.] for n in 1:N] for N in N_per_sess]) - end - - pz::Dict = Dict("name" => ["σ_i","B", "λ", "σ_a","σ_s","ϕ","τ_ϕ"], - "fit" => vcat(falses(1),trues(2),falses(4)), - "initial" => [2*eps(), 10., -0.1, 2*eps(), 2*eps(), 1., 0.01], - "lb" => [eps(), 8., -5., eps(), eps(), 0.01, 0.005], - "ub" => [100., 100., 5., 100., 2.5, 1.2, 1.5]) - - py["initial"] = map(data-> regress_init(data, f_str), data) - pz, py = optimize_model_con(pz, py, data, f_str, show_trace=false) - - pz["initial"] = vcat(1.,10.,-0.1,20.,2*eps(),1.,0.01) - pz["state"][pz["fit"] .== false] = pz["initial"][pz["fit"] .== false] - pz["fit"] = vcat(trues(4),falses(3)) - - return pz, py - -end - -function load_and_optimize(data::Vector{Dict{Any,Any}}, f_str, n::Int; - pz::Dict = Dict("name" => ["σ_i","B", "λ", "σ_a","σ_s","ϕ","τ_ϕ"], - "fit" => vcat(falses(1),trues(2),falses(4)), - "initial" => [2*eps(), 10., -0.1, 2*eps(), 2*eps(), 1.0, 0.005], - "lb" => [eps(), 8., -5., eps(), eps(), eps(), eps()], - "ub" => [10., 40, 5., 100., 2.5, 2., 10.]), - show_trace::Bool=true, iterations::Int=Int(2e3),CI::Bool=false) - - nsessions = length(data) - N_per_sess = map(data-> data["N"], data) - - if f_str == "softplus" - dimy = 3 - elseif f_str == "sig" - dimy = 4 - end - - #I should map over this, no map within this.... - #parameters for the neural observation model - py = Dict("fit" => map(N-> repeat([trues(dimy)],outer=N), N_per_sess), - "initial" => [[[Vector{Float64}(undef,dimy)] for n in 1:N] for N in N_per_sess], - "dimy"=> dimy, - "N"=> N_per_sess, - "nsessions"=> nsessions) - - py["initial"] = map(data-> regress_init(data, f_str), data) - pz, py = optimize_model(pz, py, data, f_str, show_trace=show_trace, iterations=200) - - pz["initial"] = vcat(1.,10.,-0.1,20.,0.5,1.0,0.005) - pz["state"][pz["fit"] .== false] = pz["initial"][pz["fit"] .== false] - pz["fit"] = vcat(trues(7)) - - pz, py, = optimize_model(pz, py, data, f_str, n, show_trace=show_trace, iterations=iterations) - - if CI - pz, py = compute_H_CI!(pz, py, data, f_str, n) - end - - LL_ML = compute_LL(pz["final"], py["final"], data, n, f_str) - - LL_null = mapreduce(d-> mapreduce(r-> mapreduce(n-> - neural_null(d["spike_counts"][r][n], d["λ0"][r][n], d["dt"]), - +, 1:d["N"]), +, 1:d["ntrials"]), +, data) - - ΔLL = LL_ML - LL_null - - return pz, py, ΔLL - -end diff --git a/src/neural_model/not_needed_for_tests/neural_model-th.jl b/src/neural_model/not_needed_for_tests/neural_model-th.jl deleted file mode 100644 index 4325f3f7..00000000 --- a/src/neural_model/not_needed_for_tests/neural_model-th.jl +++ /dev/null @@ -1,192 +0,0 @@ -const dimz_th = 8 - -""" -""" -@with_kw struct th_options - ncells::Vector{Int} - nparams::Union{Vector{Int}, Vector{Vector{Int}}} - f::Union{Vector{String}, Vector{Vector{String}}} - fit::Vector{Bool} - ub::Vector{Float64} - x0::Vector{Float64} - lb::Vector{Float64} -end - - -""" -""" -@with_kw struct θneural_th{T1, T2, T3} <: DDMθ - θz::T1 - μ0::T3 - θy::T2 - ncells::Vector{Int} - nparams::Union{Vector{Int}, Vector{Vector{Int}}} - f::Union{Vector{String}, Vector{Vector{String}}} -end - - - -""" -""" -function θneural_th(x::Vector{T}, ncells::Vector{Int}, nparams::Vector{Vector{Int}}, - f::Vector{Vector{String}}) where {T <: Real} - - borg = vcat(dimz_th, dimz_th.+cumsum(vcat(nparams...))) - blah = [x[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - - blah = map((f,x) -> f(x...), getfield.(Ref(@__MODULE__), Symbol.(vcat(f...))), blah) - - borg = vcat(0,cumsum(ncells)) - θy = [blah[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]] - - θneural_th(θz(x[1:dimz]...), x[dimz_th], θy, ncells, nparams, f) - -end - - - -""" - flatten(θ) - -Extract parameters related to the choice model from a struct and returns an ordered vector -``` -""" -function flatten(θ::θneural_th) - - @unpack θy, θz, μ0 = θ - @unpack σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz - vcat(σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ, μ0, - vcat(collect.(Flatten.flatten.(vcat(θy...)))...)) - -end - - -function optimize(data, options::T1; n::Int=53, - x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3, - iterations::Int=Int(2e3), show_trace::Bool=true, - outer_iterations::Int=Int(1e1), scaled::Bool=false, - extended_trace::Bool=false, σ::Vector{Float64}=[0.], - μ::Vector{Float64}=[0.], do_prior::Bool=false, cross::Bool=false, - sig_σ::Float64=1.) where T1 <: th_options - - @unpack fit, lb, ub, x0, ncells, f, nparams = options - - θ = θneural_th(x0, ncells, nparams, f) - - lb, = unstack(lb, fit) - ub, = unstack(ub, fit) - x0,c = unstack(x0, fit) - - ℓℓ(x) = -(loglikelihood(stack(x,c,fit), data, θ; n=n, cross=cross) + sigmoid_prior(stack(x,c,fit), data, θ; sig_σ=sig_σ)) - - output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol, - f_tol=f_tol, iterations=iterations, show_trace=show_trace, - outer_iterations=outer_iterations, scaled=scaled, - extended_trace=extended_trace) - - x = Optim.minimizer(output) - x = stack(x,c,fit) - θ = θneural_th(x, ncells, nparams, f) - model = neuralDDM(θ, data) - converged = Optim.converged(output) - - return model, output - -end - - -function sigmoid_prior(x::Vector{T1}, data::Union{Vector{Vector{T2}}, Vector{Any}}, - θ::θneural_th; sig_σ::Float64=1.) where {T1 <: Real, T2 <: neuraldata} - - @unpack ncells, nparams, f = θ - θ = θneural_th(x, ncells, nparams, f) - - if typeof(f) == String - if f == "Sigmoid" - sum(map(x-> sum(logpdf.(Normal(0., sig_σ), map(x-> x.c, x))), θ.θy)) - else - 0. - end - else - sum(map(x-> sum(logpdf.(Normal(0., sig_σ), x.c)), vcat(θ.θy...)[vcat(f...) .== "Sigmoid"])) - end - -end - - -""" - loglikelihood(x, data; n=53) - -A wrapper function that accepts a vector of mixed parameters, splits the vector -into two vectors based on the parameter mapping function provided as an input. Used -in optimization, Hessian and gradient computation. -""" -function loglikelihood(x::Vector{T}, data::Vector{Vector{T2}}, - θ::θneural_th; n::Int=53, - cross::Bool=false) where {T <: Real, T2 <: neuraldata} - - @unpack ncells, nparams, f = θ - θ = θneural_th(x, ncells, nparams, f) - loglikelihood(θ, data; n=n, cross=cross) - -end - - -""" -""" -function initialize_latent_model(σ2_i::TT, B::TT, λ::TT, σ2_a::TT, - μ0::VV, n::Int, dt::Float64; lapse::UU=0.) where {TT,UU,VV <: Any} - - xc,dx = bins(B,n) - P = P0(σ2_i,μ0,n,dx,xc,dt; lapse=lapse) - M = transition_M(σ2_a*dt,λ,zero(TT),dx,xc,n,dt) - - return P, M, xc, dx - -end - - -""" - P0(σ2_i, n dx, xc, dt; lapse=0.) - -""" -function P0(σ2_i::TT, μ0::WW, n::Int, dx::VV, xc::Vector{TT}, dt::Float64; - lapse::UU=0.) where {TT,UU,VV,WW <: Any} - - P = zeros(TT,n) - P[ceil(Int,n/2)] = one(TT) - lapse - P[1], P[n] = lapse/2., lapse/2. - M = transition_M(σ2_i,zero(TT),μ0,dx,xc,n,dt) - P = M * P - -end - - -""" - LL_all_trials(pz, py, data; n=53) - -Computes the log likelihood for a set of trials consistent with the observed neural activity on each trial. -""" -function loglikelihood(θ::θneural_th, - data::Vector{Vector{T1}}; n::Int=53, cross::Bool=false) where {T1 <: neuraldata} - - @unpack θz, θy, μ0 = θ - @unpack σ2_i, B, λ, σ2_a = θz - @unpack dt = data[1][1].input_data - - choice = map(x-> map(x-> x.choice, x), data) - - P0,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, zero(μ0), n, dt) - P0_L, = initialize_latent_model(σ2_i, B, λ, σ2_a, -μ0, n, dt) - P0_R, = initialize_latent_model(σ2_i, B, λ, σ2_a, μ0, n, dt) - - sum(map((data, θy, choice) -> sum(pmap(data -> - loglikelihood(θz,θy,data, P0_R, M, xc, dx; n=n, cross=cross), - data[2:end][choice[1:end-1]])), data, θy, choice)) + - sum(map((data, θy, choice) -> sum(pmap(data -> - loglikelihood(θz,θy,data, P0_L, M, xc, dx; n=n, cross=cross), - data[2:end][.!choice[1:end-1]])), data, θy, choice)) + - sum(map((data, θy) -> sum(pmap(data -> - loglikelihood(θz,θy,data, P0, M, xc, dx; n=n, cross=cross), [data[1]])), data, θy)) - -end \ No newline at end of file diff --git a/src/neural_model/not_needed_for_tests/null.jl b/src/neural_model/not_needed_for_tests/null.jl deleted file mode 100644 index 1206d9bf..00000000 --- a/src/neural_model/not_needed_for_tests/null.jl +++ /dev/null @@ -1,139 +0,0 @@ -""" -""" -@with_kw struct null_options - ncells::Vector{Int} - nparams::Int = 4 - f::String = "Sigmoid" - fit::Vector{Bool} = repeat([true,false,false,false], sum(ncells)) - lb::Vector{Float64} = repeat([-100.,0.,-10.,-10.], sum(ncells)) - ub::Vector{Float64} = repeat([100.,100.,10.,10.], sum(ncells)) - x0::Vector{Float64} = repeat([1e-1,eps(),0.,0.], sum(ncells)) -end - - -""" -""" -function θneural_null(x::Vector{T}, ncells::Vector{Int}, nparams::Int, f::String) where {T <: Real} - - dims2 = vcat(0,cumsum(ncells)) - - blah = Tuple.(collect(partition(x[1:nparams*sum(ncells)], nparams))) - - if f == "Sigmoid" - blah2 = map(x-> Sigmoid(x...), blah) - elseif f == "Softplus" - blah2 = map(x-> Softplus(x...), blah) - end - - θy = map(idx-> blah2[idx], [dims2[i]+1:dims2[i+1] for i in 1:length(dims2)-1]) - - θneural_null(θy, ncells, nparams, f) - -end - - -""" -""" -@with_kw struct θneural_null{T1} <: DDMθ - θy::T1 - ncells::Vector{Int} - nparams::Int - f::String -end - - -function optimize(data, options::null_options; - x_tol::Float64=1e-10, f_tol::Float64=1e-6, g_tol::Float64=1e-3, - iterations::Int=Int(2e3), show_trace::Bool=false, - outer_iterations::Int=Int(1e1)) - - @unpack fit, lb, ub, x0, ncells, f, nparams = options - - θ = θneural_null(x0, ncells, nparams, f) - - lb, = unstack(lb, fit) - ub, = unstack(ub, fit) - x0,c = unstack(x0, fit) - ℓℓ(x) = -loglikelihood(stack(x,c,fit), data, θ) - - output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol, - f_tol=f_tol, iterations=iterations, show_trace=show_trace, - outer_iterations=outer_iterations) - - x = Optim.minimizer(output) - x = stack(x,c,fit) - θ = θneural_null(x, ncells, nparams, f) - model = neuralDDM(θ, data) - converged = Optim.converged(output) - - return model, output - -end - - - - -""" - flatten(θ) - -Extract parameters related to the model from an defined abstract type and returns an ordered vector. -``` -""" -function flatten(θ::θneural_null) - - @unpack θy = θ - vcat(collect.(Flatten.flatten.(vcat(θy...)))...) - -end - - -""" - loglikelihood(x, data, θ) - -A wrapper function that accepts a vector of mixed parameters and packs the parameters into an appropirate model structure -Used in optimization, Hessian and gradient computation. -""" -function loglikelihood(x::Vector{T1}, data::Union{Vector{Vector{T2}}, Vector{Any}}, - θ::θneural_null) where {T1 <: Real, T2 <: neuraldata} - - @unpack ncells, nparams, f = θ - θ = θneural_null(x, ncells, nparams, f) - loglikelihood(θ, data) - -end - - -""" -""" -function loglikelihood(θ::θneural_null, - data::Union{Vector{Vector{T1}}, Vector{Any}}) where T1 <: neuraldata - - @unpack θy = θ - - sum(map((θy, data) -> sum(pmap(data-> loglikelihood(θy, data), data, - batch_size=length(data))), θy, data)) - -end - - -""" -""" -function loglikelihood(θy::Vector{T1}, data::neuraldata) where T1 <: DDMf - - @unpack spikes, input_data = data - @unpack dt = input_data - λ = loglikelihood(θy,input_data) - sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...))) - -end - - -""" -""" -function loglikelihood(θy::Vector{T1}, input_data::neuralinputs) where T1 <: DDMf - - @unpack λ0, dt = input_data - - λ = map((θy,λ0)-> θy(zeros(length(λ0)), λ0), θy, λ0) - -end \ No newline at end of file diff --git a/src/neural_model/not_needed_for_tests/polynomial/neural_poly_model.jl b/src/neural_model/not_needed_for_tests/polynomial/neural_poly_model.jl deleted file mode 100644 index 39c45b19..00000000 --- a/src/neural_model/not_needed_for_tests/polynomial/neural_poly_model.jl +++ /dev/null @@ -1,245 +0,0 @@ -abstract type neural_poly_options end - - -""" -""" -@with_kw struct Sigmoid_poly_options <: neural_poly_options - ncells::Vector{Int} - nparams::Int = 4 - npolys::Int = 4 - f::String = "Sigmoid" - fit::Vector{Bool} = vcat(trues(dimz + sum(ncells)*npolys + sum(ncells)*nparams)) - lb::Vector{Float64} = vcat([0., 8., -5., 0., 0., 0.01, 0.005], - repeat(-Inf * ones(npolys), sum(ncells)), - repeat([-100.,0.,-10.,-10.], sum(ncells))) - ub::Vector{Float64} = vcat([Inf, Inf, 10., Inf, Inf, 1.2, 1.], - repeat(Inf * ones(npolys), sum(ncells)), - repeat([100.,100.,10.,10.], sum(ncells))) - x0::Vector{Float64} = vcat([0.1, 15., -0.1, 20., 0.5, 0.8, 0.008], - repeat(zeros(npolys), sum(ncells)), - repeat([10.,10.,1.,0.], sum(ncells))) -end - - -""" -""" -@with_kw struct Softplus_poly_options <: neural_poly_options - ncells::Vector{Int} - nparams::Int = 3 - npolys::Int = 4 - f::String = "Softplus" - fit::Vector{Bool} = vcat(trues(dimz + sum(ncells)*npolys + sum(ncells)*nparams)) - lb::Vector{Float64} = vcat([0., 8., -10., 0., 0., 0., 0.005], - repeat(-Inf * ones(npolys), sum(ncells)), - repeat([1e-12, -10., -10.], sum(ncells))) - ub::Vector{Float64} = vcat([Inf, 200., 10., Inf, Inf, 1.2, 1.], - repeat(Inf * ones(npolys), sum(ncells)), - repeat([100., 10., 10.], sum(ncells))) - x0::Vector{Float64} = vcat([0.1, 15., -0.1, 20., 0.5, 0.8, 0.008], - repeat(zeros(npolys), sum(ncells)), - repeat([10.,1.,0.], sum(ncells))) -end - - -""" -""" -mutable struct θneural_poly{T1, T2, T3} <: DDMθ - θz::T1 - θμ::T2 - θy::T3 - ncells::Vector{Int} - nparams::Int - f::String - npolys::Int -end - - -""" -""" -@with_kw struct θμ{T1} - θ::T1 -end - - -""" -""" -function θneural_poly(x::Vector{T}, ncells::Vector{Int}, nparams::Int, f::String, npolys::Int) where {T <: Real} - - dims2 = vcat(0,cumsum(ncells)) - - blah = Tuple.(collect(partition(x[dimz + npolys*sum(ncells) + 1:dimz + npolys*sum(ncells) + nparams*sum(ncells)], nparams))) - - if f == "Sigmoid" - blah2 = map(x-> Sigmoid(x...), blah) - elseif f == "Softplus" - blah2 = map(x-> Softplus(x...), blah) - end - - θy = map(idx-> blah2[idx], [dims2[i]+1:dims2[i+1] for i in 1:length(dims2)-1]) - - blah = Tuple.(collect(partition(x[dimz+1:dimz+npolys*sum(ncells)], npolys))) - - blah2 = map(x-> Poly(collect(x)), blah) - - θμ = map(idx-> blah2[idx], [dims2[i]+1:dims2[i+1] for i in 1:length(dims2)-1]) - - θneural_poly(θz(Tuple(x[1:dimz])...), θμ, θy, ncells, nparams, f, npolys) - -end - - -""" - loglikelihood(x, data; n=53) - -A wrapper function that accepts a vector of mixed parameters, splits the vector -into two vectors based on the parameter mapping function provided as an input. Used -in optimization, Hessian and gradient computation. -""" -function loglikelihood(x::Vector{T}, data, θ::θneural_poly; n::Int=53) where {T <: Real} - - @unpack ncells, nparams, f, npolys = θ - θ = θneural_poly(x, ncells, nparams, f, npolys) - loglikelihood(θ, data; n=n) - -end - - -""" - gradient(model; n=53) -""" -function gradient(model::neural_poly_DDM, n::Int) - - @unpack θ, data = model - @unpack ncells, nparams, f, npolys = θ - x = flatten(θ) - #x = [flatten(θ)...] - ℓℓ(x) = -loglikelihood(x, data, ncells, nparams, f, npolys, n) - - ForwardDiff.gradient(ℓℓ, x)::Vector{Float64} - -end - - -""" - Hessian(model; n=53) -""" -function Hessian(model::neural_poly_DDM, n::Int; chuck_size::Int=4) - - @unpack θ, data = model - @unpack ncells, nparams, f, npolys = θ - x = flatten(θ) - ℓℓ(x) = -loglikelihood(x, data, ncells, nparams, f, npolys, n) - - cfg = ForwardDiff.HessianConfig(ℓℓ, x, ForwardDiff.Chunk{chuck_size}()) - ForwardDiff.hessian(ℓℓ, x, cfg) - -end - - -""" - flatten(θ) - -Extract parameters related to the choice model from a struct and returns an ordered vector -``` -""" -function flatten(θ::θneural_poly) - - @unpack θy, θz, θμ = θ - @unpack σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz - vcat(σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ, - vcat(coeffs.(vcat(θμ...))...), - vcat(collect.(Flatten.flatten.(vcat(θy...)))...)) - -end - - -function optimize(data, options::neural_poly_options; n::Int=53, - x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3, - iterations::Int=Int(2e3), show_trace::Bool=true, - outer_iterations::Int=Int(1e1), scaled::Bool=false, - extended_trace::Bool=false, α1::Float64=10. .^0) - - @unpack fit, lb, ub, x0, ncells, f, nparams, npolys = options - θ = θneural_poly(x0, ncells, nparams, f, npolys) - - lb, = unstack(lb, fit) - ub, = unstack(ub, fit) - x0,c = unstack(x0, fit) - #ℓℓ(x) = -loglikelihood(stack(x,c,fit), data, ncells, nparams, f, npolys, n) - ℓℓ(x) = -(loglikelihood(stack(x,c,fit), data, θ; n=n) - - α1 * (x[2] - lb[2]).^2) - - output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol, - f_tol=f_tol, iterations=iterations, show_trace=show_trace, - outer_iterations=outer_iterations, scaled=scaled, - extended_trace=extended_trace) - - x = Optim.minimizer(output) - x = stack(x,c,fit) - θ = θneural_poly(x, ncells, nparams, f, npolys) - model = neural_poly_DDM(θ, data) - converged = Optim.converged(output) - - return model, output - -end - - -""" - LL_all_trials(pz, py, data; n=53) - -Computes the log likelihood for a set of trials consistent with the observed neural activity on each trial. -""" -function loglikelihood(θ::θneural_poly, data; n::Int=53) - - @unpack θz, θμ, θy = θ - @unpack σ2_i, B, λ, σ2_a = θz - @unpack dt = data[1][1].input_data - - P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt) - - sum(map((data, θμ, θy) -> - sum(pmap(data -> loglikelihood(θz,θμ,θy,data,P, M, xc, dx, n=n), data)), data, θμ, θy)) - #sum(pmap((data, θy) -> loglikelihood(θz,θy,data,P, M, xc, dx, n), - # vcat(data...), vcat(map((x,y)-> repeat([x],y), θy, length.(data))...))) - -end - - -""" -""" -function loglikelihood(θz,θμ,θy,data, - P::Vector{TT}, M::Array{TT,2}, - xc::Vector{TT}, dx::VV; n::Int=53) where {TT,UU,VV <: Any} - - @unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz - @unpack spikes, input_data = data - @unpack binned_clicks, clicks, dt, centered = input_data - @unpack nT, nL, nR = binned_clicks - @unpack L, R = clicks - - #adapt magnitude of the click inputs - La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R) - - c = Vector{TT}(undef,nT) - F = zeros(TT,n,n) #empty transition matrix for time bins with clicks - - @inbounds for t = 1:nT - - if centered && t == 1 - P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt/2) - else - P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt) - end - - P .*= vcat(map(xc-> exp(sum(map((k,θy,θμ)-> logpdf(Poisson(θy(xc,θμ(t)) * dt), - k[t]), spikes, θy, θμ))), xc)...) - - c[t] = sum(P) - P /= c[t] - - end - - return sum(log.(c)) - -end diff --git a/src/neural_model/not_needed_for_tests/polynomial/sample_model-Copy1.jl b/src/neural_model/not_needed_for_tests/polynomial/sample_model-Copy1.jl deleted file mode 100644 index 7bde96f2..00000000 --- a/src/neural_model/not_needed_for_tests/polynomial/sample_model-Copy1.jl +++ /dev/null @@ -1,209 +0,0 @@ -#= -""" -""" -function boot_LL(pz,py,data,f_str,i,n) - dcopy = deepcopy(data) - dcopy["spike_counts"] = sample_spikes_multiple_sessions(pz, py, [dcopy], f_str; rng=i)[1] - - LL_ML = compute_LL(pz, py, [dcopy], n, f_str) - - #LL_null = mapreduce(d-> mapreduce(r-> mapreduce(n-> - # neural_null(d["spike_counts"][r][n], d["λ0"][r][n], d["dt"]), - # +, 1:d["N"]), +, 1:d["ntrials"]), +, [data]) - - #(LL_ML - LL_null) / dcopy["ntrials"] - - LL_null = mapreduce(d-> mapreduce(r-> mapreduce(n-> - neural_null(d["spike_counts"][r][n], map(λ-> f_py(0.,λ, py[1][n],f_str), d["λ0"][r][n]), d["dt"]), - +, 1:d["N"]), +, 1:d["ntrials"]), +, [dcopy]) - - #return 1. - (LL_ML/LL_null), LL_ML, LL_null - LL_ML - LL_null - -end -=# - -""" - Sample rates from latent model with multiple rngs, to average over -""" -function synthetic_λ(θ::θneural, data; num_samples::Int=100, nconds::Int=2) - - @unpack θz,θy,ncells = θ - - λ = map(rng-> rand.(Ref(θz), θy, data, Ref(rng)), 1:num_samples) - μ_λ = mean(λ) - - μ_c_λ = cond_mean.(μ_λ, data, ncells; nconds=nconds) - - return μ_λ, μ_c_λ - -end - - -""" - Sample rates from latent model with multiple rngs, to average over -""" -function synthetic_λ(θ::θneural_poly, data; num_samples::Int=100, nconds::Int=2) - - @unpack θz,θμ,θy,ncells = θ - - λ = map(rng-> rand.(Ref(θz), θμ, θy, data, Ref(rng)), 1:num_samples) - μ_λ = mean(λ) - - μ_c_λ = cond_mean.(μ_λ, data, ncells; nconds=nconds) - - return μ_λ, μ_c_λ - -end - - -""" - Sample all trials over one session -""" -function rand(θz::θz, θy::θy, data, rng) - - ntrials = length(data) - rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false) - - pmap((data,rng) -> rand(θz,θy,data.input_data; rng=rng)[1], data, rng) - -end - - -""" - Sample all trials over one session -""" -function rand(θz::θz, θμ, θy::θy, data, rng) - - ntrials = length(data) - rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false) - - pmap((data,rng) -> rand(θz,θμ,θy,data.input_data; rng=rng)[1], data, rng) - -end - - -""" -""" -function cond_mean(μ_λ, data, ncells; nconds=2) - - nT = map(x-> x.input_data.binned_clicks.nT, data) - ΔLRT = last.(diffLR.(data)) - conds = encode(LinearDiscretizer(binedges(DiscretizeUniformWidth(nconds), ΔLRT)), ΔLRT) - - map(n-> map(c-> [mean([μ_λ[conds .== c][k][n][t] - for k in findall(nT[conds .== c] .>= t)]) - for t in 1:(maximum(nT[conds .== c]))], - 1:nconds), 1:ncells) - -end - - -""" -""" -function synthetic_data(θ::θneural, - ntrials::Vector{Int}; centered::Bool=true, - dt::Float64=1e-2, rng::Int=1, dt_synthetic::Float64=1e-4, pad::Int=10) - - nsess = length(ntrials) - rng = sample(Random.seed!(rng), 1:nsess, nsess; replace=false) - - @unpack θz,θμ,θy,ncells = θ - - output = rand.(Ref(θz), θμ, θy, ntrials, ncells, rng) - - spikes = getindex.(output, 1) - clicks = getindex.(output, 2) - - output = bin_clicks_spikes_λ0.(spikes, clicks; - centered=centered, dt=dt, dt_synthetic=dt_synthetic, synthetic=true) - - spikes = getindex.(output, 1) - binned_clicks = getindex.(output, 2) - - input_data = neuralinputs.(clicks, binned_clicks, dt, centered) - - padded = map(spikes-> map(spikes-> map(SCn-> vcat(rand.(Poisson.((sum(SCn[1:10])/(10*dt))*ones(pad)*dt)), - SCn, rand.(Poisson.((sum(SCn[end-9:end])/(10*dt))*ones(pad)*dt))), spikes), spikes), spikes) - - μ_rnt = map(padded-> filtered_rate.(padded, dt), padded) - - nT = map(x-> map(x-> x.nT, x), binned_clicks) - - μ_t = map((μ_rnt, ncells, nT)-> map(n-> [max(0., mean([μ_rnt[i][n][t] - for i in findall(nT .>= t)])) - for t in 1:(maximum(nT))], 1:ncells), μ_rnt, ncells, nT) - - neuraldata.(input_data, spikes, ncells), μ_rnt, μ_t - -end - - -""" -""" -function rand(θz::θz, θy::θy, ntrials, ncells, rng; centered::Bool=false, dt::Float64=1e-4, pos_ramp::Bool=false) - - clicks = synthetic_clicks.(ntrials, rng) - binned_clicks = bin_clicks.(clicks, centered=centered, dt=dt) - λ0 = synthetic_λ0.(clicks, ncells; dt=dt, pos_ramp=pos_ramp) - input_data = neuralinputs.(clicks, binned_clicks, λ0, dt, centered) - - rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false) - - spikes = pmap((input_data,rng) -> rand(θz,θy,input_data; rng=rng)[3], input_data, rng) - - return spikes, λ0, clicks - -end - - - -""" -""" -function rand(θz::θz, θμ, θy::θy, ntrials, ncells, rng; centered::Bool=false, dt::Float64=1e-4) - - clicks = synthetic_clicks.(ntrials, rng) - binned_clicks = bin_clicks.(clicks, centered=centered, dt=dt) - input_data = neuralinputs.(clicks, binned_clicks, dt, centered) - - rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false) - - spikes = pmap((input_data,rng) -> rand(θz,θμ,θy,input_data; rng=rng)[3], input_data, rng) - - return spikes, clicks - -end - - -""" -""" -function rand(θz::θz, θy::θy, input_data::neuralinputs; rng::Int=1) - - @unpack λ0, dt = input_data - - Random.seed!(rng) - a = rand(θz,input_data) - λ = map((θy,λ0)-> θy(a, λ0), θy, λ0) - spikes = map(λ-> rand.(Poisson.(λ*dt)), λ) - - return λ, a, spikes - -end - - - -""" -""" -function rand(θz::θz, θμ, θy::θy, input_data::neuralinputs; rng::Int=1) - - @unpack binned_clicks, dt = input_data - @unpack nT = binned_clicks - - Random.seed!(rng) - a = rand(θz,input_data) - λ = map((θy,θμ)-> θy(a, θμ(1:nT)), θy, θμ) - spikes = map(λ-> rand.(Poisson.(λ*dt)), λ) - - return λ, a, spikes - -end diff --git a/src/neural_model/not_needed_for_tests/sample_model_functions_FP.jl b/src/neural_model/not_needed_for_tests/sample_model_functions_FP.jl deleted file mode 100644 index 3ca5068a..00000000 --- a/src/neural_model/not_needed_for_tests/sample_model_functions_FP.jl +++ /dev/null @@ -1,165 +0,0 @@ -#################################### generate data with FP ######################### - -function sample_input_and_spikes_multiple_sessions(n::Int, pz::Vector{Float64}, py::Vector{Vector{Vector{Float64}}}, - ntrials_per_sess::Vector{Int}; f_str::String="softplus", dtMC::Float64=1e-4, use_bin_center::Bool=false) - - nsessions = length(ntrials_per_sess) - - data = map((py,ntrials,rng)-> sample_inputs_and_spikes_single_session(n, pz, py, ntrials; rng=rng, f_str=f_str, - dtMC=dtMC, use_bin_center=use_bin_center), - py, ntrials_per_sess, 1:nsessions) - - return data - -end - -function sample_inputs_and_spikes_single_session(n::Int, pz::Vector{Float64}, py::Vector{Vector{Float64}}, ntrials::Int; - dtMC::Float64=1e-2, rng::Int=1, f_str::String="softplus", use_bin_center::Bool=false) - - data = sample_clicks(ntrials; rng=rng) - data["dtMC"] = dtMC - data["N"] = length(py) - - data["λ0_MC"] = [repeat([zeros(Int(ceil(T./dtMC)))], outer=length(py)) for T in data["T"]] - #data["spike_times"] = sample_spikes_single_session(data, pz, py; dtMC=dtMC, rng=rng, f_str=f_str) - data["spike_counts_MC"] = sample_spikes_single_session(n, data, pz, py; dtMC=dtMC, rng=rng, f_str=f_str, - use_bin_center=use_bin_center) - - return data - -end - -function sample_spikes_single_session(n::Int, data::Dict, pz::Vector{Float64}, py::Vector{Vector{Float64}}; - f_str::String="softplus", dtMC::Float64=1e-2, rng::Int=1, use_bin_center::Bool=false) - - Random.seed!(rng) - nT,nL,nR = bin_clicks(data["T"],data["leftbups"],data["rightbups"],dtMC,use_bin_center) - P,M,xc,dx, = initialize_latent_model(pz,n,dtMC) - - spike_counts = pmap((λ0,nT,L,R,nL,nR,rng) -> sample_spikes_single_trial(n,pz,py,λ0,nT,L,R,nL,nR,P,M,dx,xc; - f_str=f_str, rng=rng, dtMC=dtMC, use_bin_center=use_bin_center), - data["λ0_MC"], nT, data["leftbups"], data["rightbups"], nL, nR, shuffle(1:length(data["T"]))) - - return spike_counts - -end - -function sample_spikes_single_trial(n::Int, pz::Vector{Float64}, py::Vector{Vector{Float64}}, λ0::Vector{Vector{Float64}}, - nT::Int, L::Vector{Float64}, R::Vector{Float64}, nL::Vector{Int}, nR::Vector{Int}, - P, M, dx, xc; - f_str::String="softplus", dtMC::Float64=1e-4, rng::Int=1, use_bin_center::Bool=false) - - Random.seed!(rng) - a = sample_latent_FP(pz, P, M, dx, xc, L, R, nT, nL, nR, dtMC, n, use_bin_center) - - Y = map((py,λ0)-> poisson_noise!.(map((a, λ0)-> f_py!(a, λ0, py, f_str), a, λ0), dtMC), py, λ0) - -end - - -#################################### Expected rates, FP ######################### - -function sample_expected_rates_single_session(n::Int, data::Dict, pz::Vector{Float64}, py::Vector{Vector{Float64}}; - f_str::String="softplus", rng::Int=1) - - Random.seed!(rng) - dt = data["dt"] - use_bin_center = data["use_bin_center"] - P,M,xc,dx, = initialize_latent_model(pz,n,dt) - - output = pmap((λ0,nT,L,R,nL,nR,rng) -> sample_expected_rates_single_trial(n,pz,py,λ0,nT,L,R,nL,nR,P,M, - dx,xc,dt,use_bin_center; - f_str=f_str, rng=rng), - data["λ0"], data["nT"], data["leftbups"], data["rightbups"], data["binned_leftbups"], - data["binned_rightbups"], shuffle(1:length(data["T"]))) - - λ = map(x-> x[1], output) - a = map(x-> x[2], output) - - return λ,a - - -end - -function sample_expected_rates_single_trial(n::Int, pz::Vector{Float64}, py::Vector{Vector{Float64}}, - λ0::Vector{Vector{Float64}}, - nT::Int, L::Vector{Float64}, R::Vector{Float64}, nL::Vector{Int}, nR::Vector{Int}, - P, M, dx, xc, - dt::Float64, use_bin_center::Bool; f_str::String="softplus", rng::Int=1) - - Random.seed!(rng) - a = sample_latent_FP(pz, P, M, dx, xc, L, R, nT, nL, nR, dt, n, use_bin_center) - - λ = map((py,λ0)-> map((a, λ0)-> f_py!(a, λ0, py, f_str), a, λ0), py, λ0) - - return λ, a - -end - -#################################### Pa, FP ######################### - -function Pa_single_session(n::Int, data::Dict, pz::Vector{Float64}, py::Vector{Vector{Float64}}; - f_str::String="softplus") - - dt = data["dt"] - use_bin_center = data["use_bin_center"] - P,M,xc,dx, = initialize_latent_model(pz,n,dt) - - Pa = pmap((λ0,nT,L,R,nL,nR,rng) -> Pa_single_trial(n,pz,py,λ0,nT,L,R,nL,nR,P,M,dx,xc,dt,use_bin_center; - f_str=f_str), - data["λ0"], data["nT"], data["leftbups"], data["rightbups"], data["binned_leftbups"], - data["binned_rightbups"], shuffle(1:length(data["T"]))) - - #λ = map(x-> x[1], output) - #a = map(x-> x[2], output) - - #return λ,a - - return Pa - - -end - -function Pa_single_trial(n::Int, pz::Vector{Float64}, py::Vector{Vector{Float64}}, - λ0::Vector{Vector{Float64}}, - nT::Int, L::Vector{Float64}, R::Vector{Float64}, nL::Vector{Int}, nR::Vector{Int}, - P, M, dx, xc, - dt::Float64,use_bin_center::Bool; f_str::String="softplus") - - Pa = Pa_FP(pz, P, M, dx, xc, L, R, nT, nL, nR, dt, n, use_bin_center) - - #λ = map((py,λ0)-> map((a, λ0)-> f_py!(a, λ0, py, f_str=f_str), a, λ0), py, λ0) - - #return λ, a - - return Pa - -end - -function Pa_FP(pz::Vector{TT}, P::Vector{TT}, M::Array{TT,2}, dx::VV, - xc::Vector{WW},L::Vector{Float64}, R::Vector{Float64}, T::Int, - nL::Vector{Int}, nR::Vector{Int}, - dt::Float64,n::Int,use_bin_center::Bool) where {UU,TT,VV,WW <: Any} - - #adapt magnitude of the click inputs - La, Ra = make_adapted_clicks(pz,L,R) - - Pa = Array{TT,2}(undef,n,T) - F = zeros(TT,n,n) #empty transition matrix for time bins with clicks - - @inbounds for t = 1:T - - if use_bin_center && t == 1 - P, = latent_one_step!(P,F,pz,t,nL,nR,La,Ra,M,dx,xc,n,dt/2) - else - P, = latent_one_step!(P,F,pz,t,nL,nR,La,Ra,M,dx,xc,n,dt) - end - - Pa[:,t] = P - P /= sum(P) - - end - - return Pa - -end