by Harald Hofstätter, 25.10.2015
In this Julia notebook we calculate the Feigenbaum constants $\alpha=-2.502907875\dots$ and $\delta=4.669201609\dots$ to high precision. The notebook can be downloaded here.
We use the method given in
Keith Briggs, A precise calculation of the Feigenbaum constants, Math. Comp. 57 (1991), 435-439.
The following subroutine evaluates $$g(x)=1+\sum_{j=1}^{n}g_jx^{2j}$$ for given $x$ and $g_j$ using Horner's method:
function eval_g(x, g::Vector, n::Int)
x2 = x^2
y = g[n]
for j = n-1:-1:1
y = y*x2 + g[j]
end
y = y*x2 + 1
end
The following subroutine evaluates the derivative $$g'(x)=\sum_{j=1}^{n}2jg_jx^{2j-1}$$ for given $x$ and $g_j$ using Horner's method:
function eval_diff_g(x, g::Vector, n::Int)
x2 = x^2
y = 2*n*g[n]
for j = n-1:-1:1
y = y*x2 + 2*j*g[j]
end
y = y*x
end
For a given set of collocation points $\{x_1,\dots,x_n\}\subset(0,1]$, the following system of $n$ nonlinear equations has to be solved for the coefficients $g_1,\dots,g_n$ of $g(x)=1+\sum_{j=1}^{n}g_jx^{2j}$:
$$F_i(g_1,\dots,g_n)=g(1)g(x_i)-g(g(g(1)g(x_i)))=0,\quad i=1,\dots,n$$
For Newton's method we also need the Jacobian $J$ of $F$:
$$J_{ij}=\frac{\partial F_i}{\partial g_j} = g(x_i)+g(1)x_i^{2j}-g(g(1)x_i)^{2j}-g'(g(g(1)x_i))\cdot\big(g(1)^{2j}x_i^{2j}+g'(g(1)x_i)x_i\big)$$
The following subroutine computes $F$ and $J$ for given $g_j$ and $x_i$:
function generate_F_J(F::Vector, J::Matrix, g::Vector, x::Vector, n::Int)
g1 = eval_g(1, g, n)
gx = [eval_g(x[j], g, n) for j=1:n]
gdax = [eval_diff_g(g1*x[j], g, n) for j=1:n]
gax = [eval_g(g1*x[j], g, n) for j=1:n]
gdgax = [eval_diff_g(gax[j], g, n) for j=1:n]
ggax = [eval_g(gax[j], g, n) for j=1:n]
g12 = g1*g1
x2 = x.*x
gax2 = gax.*gax
F[:] = g1*gx - ggax
for j=1:n
if j==1
x2j = x2
gax2j = gax2
g12j = g12
else
x2j .*= x2
gax2j .*= gax2
g12j .*= g12
end
J[:,j] = gx + g1*x2j - gax2j - gdgax.*(g12j*x2j + gdax.*x)
end
end
At each step of the Newton iteration the approximation for the Feigenbaum constant $\alpha=1/g(1)=1/(1+\sum_{j=1}^{n}g_j)$ together with the residuum $\max_i|F_i(g_1,\dots,g_n)|$ for the current values of $g_j$ is printed. The iteration is terminated if the residuum is less than 100 times the machine epsilon. Some heuristics is used to determine the number of valid digits of $\alpha$.
function newton_iteration_for_alpha_g(g::Vector, x::Vector, n::Int, maxit::Int)
T=typeof(g[1])
J = zeros(T,n,n)
F = zeros(T,n)
alpha = 1/eval_g(1, g, n)
alphaold = alpha
diff = 1000
diffold = diff
for k=1:maxit
generate_F_J(F, J, g, x, n)
g[:] = g - J\F # Newton iteration
res = maximum(abs(F))
alpha = 1/eval_g(1, g, n)
diff = abs(alpha-alphaold)
println(alpha ," ",@sprintf(" %.3e %.3e",res, diff))
alphaold = alpha
if res<100*eps(T) || diff>.1*diffold
break
end
diffold = diff
end
digits = int(floor(-exponent(diff)*log(2)/log(10)))
println(digits, " digits seem to be correct:")
println("α=", string(alpha)[1:digits+3])
return alpha
end
We use $x_i=\sqrt{i/n},\ i=1,\dots,n$ as collocation points and (the coefficients of)
$$g(x) = 1-1.5218x^2+1.0481\cdot 10^{-1}x^4+2.67076\cdot 10^{-2}x^6$$
as the starting approximation for Newton's method:
MyType=BigFloat
set_bigfloat_precision(1024)
n=int(get_bigfloat_precision()/5) # For this relation between bigfloat_presision and n the heuristsics
# in newton_iteration_for_alpha_g and power_method_for_delta_h seems to work.
maxit=30
x = sqrt([1:n]/MyType(n)) # collocation points
g = zeros(x)
g[1:3]=[-1.5218 ,1.0481e-1, 2.67076e-2]
alpha=newton_iteration_for_alpha_g(g, x, n, maxit)
Here the value from http://www.plouffe.fr/simon/constants/feigenbaum.txt for comparision:
-2.50290787509589282228390287321821578638127137672714997733619205677923546317959020670329964974643383412959523186999585472394218237 77854451792728633149933725781121635948795037447812609973805986712397117373289276654044010306698313834600094139322364490657889951220 58431725078733774630878534242853519885875000423582469187408204281700901714823051821621619413199856066129382742649709844084470100805 45496779367608881264464068851815527093240075425064971570470475419932831783645332562415378693957125097066387979492654623137
The Feigenbaum constant $\delta=4.6692016091\dots$ is calculated as the largest eigenvalue of $V^{-T}L$, where $V$ is the Vandermonde matrix with respect to $x_1^2,\dots,x_n^2$ (the squares of the collocationg points from above), and $L$ is defined as
$$L_{ij}=\alpha\big(g(x_i/\alpha))^{2(j-1)}+g'(g(x_i/\alpha))(x_i/\alpha)^{2(j-1)}\big),$$
with the polynomial $g(x)=1+\sum_{j=1}^{n}g_jx^{2j}$ calculated above and $\alpha=1/g(1)$.
The following subroutine calculates the matrix $L$ for given $g_j$ and $x_i$:
function generate_L(L::Matrix, g::Vector , x::Vector, n::Int)
g1 = eval_g(1, g, n)
ax = g1*x
gax = [eval_g(ax[k], g, n) for k=1:n]
gdgax = [eval_diff_g(gax[k], g, n) for k=1:n]
ax2 = ax.^2
gax2 = gax.^2
alpha = 1/g1
for j=1:n
if j==1
ax2j = 1
gax2j = 1
elseif j==2
ax2j = ax2
gax2j = gax2
else
ax2j .*= ax2
gax2j .*= gax2
end
L[:,j] = alpha*(gax2j + gdgax.*ax2j)
end
end
For the solution of the Vandermonde system $V^T a=f$ we use Algorithm 4.6.1 from
Gene H. Golub and Charles F. van Loan, Matrix Computations, 3rd edition, John Hopkins University Press.
Note that the input vector $f$ is overwritten by the solution vector $a$.
function solve_vander(x::Vector, f::Vector, n::Int)
# Algorithm 4.6.1 from Golub/van Loan
for k=1:n-1
for i=n:-1:k+1
f[i] = (f[i]-f[i-1])/(x[i]-x[i-k])
end
end
for k=n-1:-1:1
for i=k:n-1
f[i] = f[i]-f[i+1]*x[k]
end
end
end
The largest eigenvalue $\delta$ and the corresponding eigenvector $h$ of the eigenvalue problem $$V^{-T}Lh=\delta h$$ are computed using the power method, cf. Golub/van Loan, section 7.3. Some heuristics is used to terminate the iteration and to determine the number of valid digits of $\delta$.
function power_method_for_delta_h(h::Vector, g::Vector, x::Vector, n::Int, maxit::Int)
L=zeros(typeof(g[1]),n,n)
generate_L(L, g, x, n)
x2=x.^2
delta=4.669
deltaold=delta
diff=100
diffold=diff
for k=1:maxit
h = L*h
solve_vander(x2, h, n)
delta = h[1]
if mod(k,20)==0
diff = abs(delta-deltaold)
println(k, " ",delta, @sprintf(" %.3e", diff))
deltaold = delta
if diff>.9*diffold
break
end
diffold=diff
end
h = h/delta
end
digits = int(floor(-exponent(diff)*log(2)/log(10)))
println(digits, " digits seem to be correct:")
println("δ=", string(delta)[1:digits+2])
return delta
end
h=zeros(x)
h[1:3]=[1.0, -3.2565e-1, -5.05539e-2]
maxit=1000
delta = power_method_for_delta_h(h, g, x, n, maxit)
Here the value from http://www.plouffe.fr/simon/constants/feigenbaum.txt for comparison:
4.669201609102990671853203820466201617258185577475768632745651343004134330211314737138689744023948013817165984855189815134408627142 02793252231244298889089085994493546323671341153248171421994745564436582379320200956105833057545861765222207038541064674949428498145 33917262005687556659523398756038256372256480040951071283890611844702775854285419801113440175002428585382498335715522052236087250291 6788603626745272133990571316068753450834339344461037063094520191158769724322735898389037949462572512890979489867683346116