Source for file LevenbergMarquardt.php
Documentation is available at LevenbergMarquardt.php
// Levenberg-Marquardt in PHP
// http://www.idiom.com/~zilla/Computer/Javanumeric/LM.java
* Calculate the current sum-squared-error
* Chi-squared is the distribution of squared Gaussian errors,
for($i = 0; $i < $npts; $i++ ) {
$d = $y[$i] - $f->val($x[$i], $a);
* Minimize E = sum {(y[k] - f(x[k],a)) / s[k]}^2
* The individual errors are optionally scaled by s[k].
* Note that LMfunc implements the value and gradient of f(x,a),
* NOT the value and gradient of E with respect to a!
* @param x array of domain points, each may be multidimensional
* @param y corresponding array of values
* @param a the parameters/state of the model
* @param vary false to indicate the corresponding a[k] is to be held fixed
* @param s2 sigma^2 for point i
* @param lambda blend between steepest descent (lambda high) and
* jump to bottom of quadratic (lambda zero).
* @param termepsilon termination accuracy (0.01)
* @param maxiter stop and return after this many iterations if not done
* @param verbose set to zero (no prints), 1, 2
* @return the new lambda for future iterations.
* Can use this and maxiter to interleave the LM descent with some other
* task, setting maxiter to something small.
function solve($x, $a, $y, $s, $vary, $f, $lambda, $termepsilon, $maxiter, $verbose) {
print ("solve x[". count($x). "][". count($x[0]). "]");
print (" a[". count($a). "]");
println(" y[". count(length). "]");
// g = gradient, H = hessian, d = step to minimum
//double[] d = new double[nparm];
for($i = 0; $i < $npts; $i++ ) $oos2[$i] = 1./ ($s[$i]* $s[$i]);
$term = 0; // termination count test
for( $r = 0; $r < $nparm; $r++ ) {
for( $c = 0; $c < $nparm; $c++ ) {
for( $i = 0; $i < $npts; $i++ ) {
if ($i == 0) $H[$r][$c] = 0.;
$H[$r][$c] += ($oos2[$i] * $f->grad($xi, $a, $r) * $f->grad($xi, $a, $c));
// boost diagonal towards gradient descent
for( $r = 0; $r < $nparm; $r++ )
$H[$r][$r] *= (1. + $lambda);
for( $r = 0; $r < $nparm; $r++ ) {
for( $i = 0; $i < $npts; $i++ ) {
if ($i == 0) $g[$r] = 0.;
$g[$r] += ($oos2[$i] * ($y[$i]- $f->val($xi,$a)) * $f->grad($xi, $a, $r));
// scale (for consistency with NR, not necessary)
for( $r = 0; $r < $nparm; $r++ ) {
for( $c = 0; $c < $nparm; $c++ ) {
// solve H d = -g, evaluate error at new location
//double[] d = DoubleMatrix.solve(H, g);
//double[] na = DoubleVector.add(a, d);
double[] na = (new Matrix(a, nparm)). plus(new Matrix(d, nparm)). getRowPackedCopy();
System. out. println("\n\niteration "+ iter+ " lambda = "+ lambda);
System. out.print ("a = ");
(new Matrix(a, nparm)).print (10, 2);
System. out.print ("H = ");
System. out.print ("g = ");
(new Matrix(g, nparm)).print (10, 2);
System. out.print ("d = ");
(new Matrix(d, nparm)).print (10, 2);
System. out.print ("e0 = " + e0 + ": ");
System. out.print ("moved from ");
(new Matrix(a, nparm)).print (10, 2);
System. out.print ("e1 = " + e1 + ": ");
(new Matrix(na, nparm)).print (10, 2);
System. out. println("move rejected");
// termination test (slightly different than NR)
if (Math. abs(e1- e0) > termepsilon) {
System. out. println("terminating after " + iter + " iterations");
if (iter >= maxiter) done = true;
// in the C++ version, found that changing this to e1 >= e0
// was not a good idea. See comment there.
if (e1 > e0 || Double. isNaN(e1)) { // new location worse than before
} else { // new location better, accept new parameters
// simply assigning a = na will not get results copied back to caller
for( int i = 0; i < nparm; i++ ) {
if (vary[i]) a[i] = na[i];
|