Hi All,
I want to make the output "u = u + alpha*d_k" at the very end of this code an input, i.e., as an argument of the function "f(u)" at the beginning of the code, in every iteration. Note that I have posted only part of the code which can easily aid in explaining the problem.
Anyone with idea how to do this?
""" """
from numpy import *
from math import *
from scitools.std import *
N = 1001
M = 200
dx = 2*pi/M
a = 1.0
dt = 0.0005
Eps = 10e-8
alpha = 0.25
U = zeros(M); w = zeros(M); z = zeros(M)
u = fromfunction(u_initial_function,(M,))
def f(u):
return u
v = 0.5*(f(u))**2
def Initialize(v):
return v
#Solve for
def Evolve_vs(u,v):
vs = zeros((M,), float32)
for k in range (1, M-1):
vs[k] = Eps/(Eps -dt)*(v[k] - (dt/Eps)*0.5*(u[k])**2)
vs[0] = vs[1]
vs[M-1] =vs[M-2]
return vs
def Evolve_u1(u,v):
u1 = zeros((M,), float32)
vs = Evolve_vs(u,v)
for k in range (1, M-1):
u1[k] = u[k]-0.5*(dt/dx)*(vs[k+1] - vs[k-1]) + 0.5*(dt/dx)*sqrt(a)*(u[k+1] - 2*u[k] + u[k-1])
u1[0] = u1[1]
u1[M-1] = u1[M-2]
return u1
def Evolve_v1(u,v):
v1 = zeros((M,), float32)
vs = Evolve_vs(u,v)
for k in range (1, M-1):
v1[k] = vs[k] - 0.5*(dt/dx)*a*(u[k+1] - u[k-1]) + 0.5*(dt/dx)*(a/sqrt(a))*(vs[k+1] - 2*vs[k] + vs[k-1])
v1[0] = v1[1]
v1[M-1] = v1[M-2]
return v1
for n in xrange(N):
u = f(Evolve_u1(u,v))
v = Initialize(Evolve_v1(u,v))
#Solve backward problem
q_T = v
p_T = u
def V_T(q_T):
return q_T
def U_T(p_T):
return p_T
def EvolveV_T(q_T):
q_ns = zeros((M,), float32)
for k in xrange (M-2, 0,-1):
q_ns[k] = (Eps/(Eps+dt))*q_T[k] + (Eps/(Eps+dt))*(dt/(2.*dx))*(p_T[k+1] - p_T[k-1]) + (Eps/(Eps+dt))*(dt*a/(2.*dx))*(q_T[k+1] - 2.*q_T[k] + q_T[k-1])
q_ns[0] = q_ns[1]
q_ns[M-1] = q_ns[M-2]
return q_ns
def EvolveU_T(p_T):
q_s = EvolveV_T(q_T)
p_ns = zeros((M,), float32)
for k in range (M-2, 0,-1):
p_ns[k] = p_T[k] + (dt/Eps)*q_s[k]*(p_T[k]) + 0.5*(a**2)*(dt/dx)*(q_T[k+1] - q_T[k-1]) + 0.5*a*(dt/dx)*(p_T[k+1] - 2.*p_T[k] + p_T[k-1])
p_ns[0] = p_ns[1]
p_ns[M-1] = p_ns[M-2]
return p_ns
for n in xrange(N-1,-1, -1):
p_T = U_T(EvolveU_T(p_T))
q_T = V_T(EvolveV_T(q_T))
#plot(linspace(0.0,2*pi,M),p_T,'r0.2')
#Solve the optimal value by perturbing u
def gradient():
I = sum(dx*(abs(u - target))*p_T)
return I
if __name__=="__main__":
while abs(gradient()) >= Eps:
d_k = -gradient()
u = u + alpha*d_k