julianlsolvers / nlsolvers.jl Goto Github PK
View Code? Open in Web Editor NEWNo bells and whistles foundation of Optim.jl
Home Page: https://julianlsolvers.github.io/NLSolvers.jl/dev/
License: MIT License
No bells and whistles foundation of Optim.jl
Home Page: https://julianlsolvers.github.io/NLSolvers.jl/dev/
License: MIT License
Just allow the user to pass it in via the caches and update that according to x0. If x0 and the cache array is the same, then it's just a simple inplace update.
adding iter = options.maxiter
before the return solves the problem. also there is a hardcoded B0 at the start of the solve
#38 made sure that edges were evaluated. It might be better to use fa, fx, fb to set (v,fv)),
(w,fw)`, and fx instead of only x and fx.
This issue is used to trigger TagBot; feel free to unsubscribe.
If you haven't already, you should update your TagBot.yml
to include issue comment triggers.
Please see this post on Discourse for instructions and more details.
If you'd like for me to do this for you, comment TagBot fix
on this issue.
I'll open a PR within a few hours, please be patient!
i'm rewiewing some code that uses optim, and it uses the x_converged
, g_converged
and f_converged
fields of OptimizationResults
. at the moment, there are some functions with the same name, but not defined on the ConvergenceInfo
object. a mockup (based on show(io,mime,ci::ConvergenceInfo)
would be the following:
x_converged(ci::NLSolvers.ConvergenceInfo) = false
f_converged(ci::NLSolvers.ConvergenceInfo) = false
g_converged(ci::NLSolvers.ConvergenceInfo) = false
function x_converged(ci::NLSolvers.ConvergenceInfo{<:NLSolvers.NelderMead,<:Any,<:NLSolvers.OptimizationOptions})
return ci.info.ρs <= ci.options.x_abstol
end
function x_converged(ci::NLSolvers.ConvergenceInfo{<:NLSolvers.SimulatedAnnealing,<:Any,<:NLSolvers.OptimizationOptions})
#don't know what to do do here, simulated annealing does not "converge" in the typical sense
return true
end
function x_converged(ci::NLSolvers.ConvergenceInfo{<:Any,<:Any,<:NLSolvers.OptimizationOptions})
info = ci.info
opt = ci.options
x_abstol = haskey(info, :ρs) && (info.ρ <= opt.x_abstol)
x_reltol = haskey(info, :ρs) && (info.ρs/info.ρx <= opt.x_reltol)
return x_abstol || x_reltol
end
function f_converged(ci::NLSolvers.ConvergenceInfo{<:Any,<:Any,<:NLSolvers.OptimizationOptions})
info = ci.info
opt = ci.options
f_limit = !isfinite(opt.f_limit) || (info.minimum <= opt.f_limit)
f_abstol = haskey(info, :fx) && (abs(info.fx - info.minimum) <= opt.f_abstol)
f_reltol = haskey(info, :fx) && (abs((info.fx - info.minimum)/info.fx) <= opt.f_reltol)
return f_limit || f_abstol || f_reltol
end
function f_converged(ci::NLSolvers.ConvergenceInfo{<:Any,<:Any,<:NLSolvers.NEqOptions})
ρF = norm(ci.info.best_residual, Inf)
#ρFz = norm(ci.info.solution, 2)
f_abstol = ρF <= opt.f_abstol
return f_abstol
end
function g_converged(ci::NLSolvers.ConvergenceInfo{<:Any,<:Any,<:NLSolvers.OptimizationOptions})
info = ci.info
opt = ci.options
if haskey(info, :∇fz)
ρ∇f = opt.g_norm(info.∇fz)
g_abstol = ρP<=opt.g_abstol
g_reltol = ρ∇f/info.∇f0<=opt.g_reltol
if haskey(info, :prob) && hasbounds(info.prob)
ρP = opt.g_norm(
info.solution .- clamp.(info.solution .- info.∇fz, info.prob.bounds...),
)
gp_abstol = ρP <= opt.g_abstol
else
gp_abstol = false
end
else
g_abstol = false
g_reltol = false
end
return g_abstol || g_reltol || gp_abstol
end
function converged(ci::NLSolvers.ConvergenceInfo)
opt = ci.options
info = ci.info
conv_flags = x_converged(ci) || f_converged(ci) || g_converged(ci)
if haskey(info, :Δ)
Δmin = ci.solver.Δupdate.Δmin isa Nothing ? 0 : ci.solver.Δupdate.Δmin
Δ = info.Δ <= Δmin
Δ_finite = isfinite(info.Δ)
else
Δ = false
Δ_finite = true
end
x = x_converged(ci)
f = f_converged(ci)
g = g_converged(ci)
#finite flags
x_finite = all(isfinite,NLSolvers.solution(ci))
conv_flags = f || x || g || Δ
finite_flags = x_finite && Δ_finite # && g_finite && f_finite
return conv_flags && finite_flags
end
what is missing is a general way to calculate the finiteness of the other stopping criteria, to address JuliaNLSolvers/Optim.jl#997
some docs are already done. while some methods are commented, transforming those to a doc and putting those online would help
something like this?
function NLSolvers.upto_gradient(meritobj::NLSolvers.MeritObjective, ∇f, x)
neq = meritobj.prob
G = neq.R.F(∇f, x)
F = (norm(G)^2) / 2
return F,G
end
with that, NLSolvers.solve(prob,x0,LineSearch(Newton(),HZAW()))
seems to work
EDIT: no it doesn't. but at least it hits an error in HZAW
instead:
MethodError: no method matching isfinite(::NamedTuple{(:ϕ, :Fx), Tuple{Float64, StaticArrays.MVector{2, Float64}}})
because eigen
is not defined for BigFloat
on some testing:
sol = Results of solving non-linear equations
* Algorithm:
Newton's method with default linsolve with backtracking (no interp)
* Candidate solution:
Final residual 2-norm: 2.09e-09
Final residual Inf-norm: 1.29e-09
Initial residual 2-norm: 7.91e-05
Initial residual Inf-norm: 4.92e-05
* Convergence measures
|F(x')| = 1.29e-09 <= 0.00e+00 (false)
* Work counters
Seconds run: 1.41e-01
Iterations: 1
i know that the non linear solving converged via f_abstol
, but that is not shown in the convergence measures. seems that the keys don't correspond to the actual keys used in non-linear solving (ρF0,ρ2F0,ρs)
?
I'm trying to get more libraries using the LinearSolve.jl interface
http://linearsolve.sciml.ai/dev/
It's pretty complete now. The suggested form is that you use the caching interface so that it can store factorizations and iterative solver inits (http://linearsolve.sciml.ai/dev/tutorials/caching_interface/). The suggested algorithm form is to just have the user pass the type, like Newton(; linsolve=KLU())
which would then switch over to using SuiteSparse.KLU and accelerate sparse matrix factorizations for unstructured cases over UMFPACK. http://linearsolve.sciml.ai/dev/solvers/solvers/#SuiteSparse.jl
NLSolvers.jl/src/nlsolve/linesearch/newton.jl
Lines 25 to 29 in 486e562
This question follows from this discussion:
https://discourse.julialang.org/t/modify-custom-solver-to-deal-with-multidimensional-arrays/74845
Would it be possible to allow for this? Apparently the routines in NLSolvers
only accept AbstractVector
s.
NLsolve
already accepts systems given as multidimensional arrays. So it would be kind of a regression to not allow for that.
i was looking and comparing the code with Optim.jl, and one of the only missing features is the ability to provide a callback function to short-circuit procedure
on NLSolvers 0.3, when calling nlsolve(prob,Anderson,NEqOptions(maxiter=0))
, the package complains about t0
not defined. it seems to be fixed on the master branch
Big oops, fixedB0
is hardcoded to be [1.0 0.0; 0.0 1.0].
should be possible to input B0
Hi,
It will be great if we incorporate a scout option to the PSO algorithm, as proposed by https://link.springer.com/chapter/10.1007/978-3-319-11128-5_21 and https://academic.oup.com/jcde/article/6/2/129/5732336
The scout option helps the algorithm to regenerate particles that are stuck (cannot update their Pbest for some iterations). and hence allows deeper exploration of the space.
This issue is mainly a remainder for me, as I intend to eventually code it myself and prepare a PR ;)
when testing for cholesky in 9, i found that is was easier to suply the linsolve via Newton(;linsolve)
than modify the code each time. but for custom linsolve, the summary prints just the type, instead of "Newton with user supplied Linsolve" . the error is here:
Any
, instead of <:Any
.linsolve
?. i have this:
function cholesky_linsolve(d,B,∇f)
cholesky!(Positive, B)
Bchol = Cholesky(B,'L',0)
d .= Bchol\∇f
end
function cholesky_linsolve(B,∇f)
Bchol =cholesky(Positive, B)
Bchol\∇f
end
Base.summary(::NLSolvers.Newton{<:Direct, typeof(cholesky_linsolve)}) = "Newton's method with Cholesky linsolve"
the only downside is that it does require a dependence on PositiveFactorizations
the code is already in the main branch, so this can be closed on a new release
Should be combined with updates of B in the non-Newton case.
seems that beta
is set to nothing
instead of being beta = method.beta
?
NLSolvers.jl/src/fixedpoints/anderson.jl
Line 89 in 1ade351
A declarative, efficient, and flexible JavaScript library for building user interfaces.
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google ❤️ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.