import numpy as np
import matplotlib.pyplot as plt
#from examples import *
#from time_integrators import *
def Lorenz63(state,*args): #Lorenz 63 model
= args[0]
sigma = args[1]
beta = args[2]
rho = state #Unpack the state vector
x, y, z = np.zeros(3) #Derivatives
f 0] = sigma * (y - x)
f[1] = x * (rho - z) - y
f[2] = x * y - beta * z
f[return f
def RK4(rhs,state,dt,*args):
= rhs(state,*args)
k1 = rhs(state+k1*dt/2,*args)
k2 = rhs(state+k2*dt/2,*args)
k3 = rhs(state+k3*dt,*args)
k4
= state + (dt/6)*(k1+2*k2+2*k3+k4)
new_state return new_state
def Lin3dvar(ub,w,H,R,B,opt):
# The solution of the 3DVAR problem in the linear case requires
# the solution of a linear system of equations.
# Here we utilize the built-in numpy function to do this.
# Other schemes can be used, instead.
if opt == 1: #model-space approach
= np.linalg.inv(B)
Bi = np.linalg.inv(R)
Ri = Bi + (H.T)@Ri@H
A = Bi@ub + (H.T)@Ri@w
b = np.linalg.solve(A,b) #solve a linear system
ua
elif opt == 2: #model-space incremental approach
= np.linalg.inv(B)
Bi = np.linalg.inv(R)
Ri = Bi + (H.T)@Ri@H
A = (H.T)@Ri@(w-H@ub)
b = ub + np.linalg.solve(A,b) #solve a linear system
ua
elif opt == 3: #observation-space incremental approach
= R + H@B@(H.T)
A = (w-H@ub)
b = ub + B@(H.T)@np.linalg.solve(A,b) #solve a linear system
ua
return ua
3D-Var for Lorenz63 System
3D-Var for Lorenz63 System
We analyze the Lorenz63 system of nonlinear ODEs
\[ \begin{align} \frac{\mathrm{d} x}{\mathrm{d} t} &= -\sigma(x-y), \nonumber \\ \frac{\mathrm{d} y}{\mathrm{d} t} &= \rho x-y-xz, \label{eq:lorenz} \\ \frac{\mathrm{d} z}{\mathrm{d} t} &= xy -\beta z, \nonumber \end{align} \]
where \(x=x(t),\) \(y=y(t)\), \(z=z(t)\) and \(\sigma\) (ratio of kinematic viscosity divided by thermal diffusivity), \(\rho\) (measure of stability) and \(\beta\) (related to the wave number) are parameters. Chaotic behavior is obtained when the parameters are chosen as
\[ \sigma = 10,\quad \rho=28,\quad \beta = 8/3. \]
This system is a simplified model for atmospheric convection and is an excellent example of the lack of predictability. It is ill-posed in the sense of Hadamard.
Problem setup
We set up a twin experiment with the following parameters:
- true initial condition $u_t (0) = [1,1,1]^T $ and initial guess $u (0) = [2,3,4]^T $
- observations every \(0.2\) time units, for a duration of \(2\) units
- complete measurement of system state, i.e. \(H(u)=u.\)
- measurement noise is Gaussian, zero mean, equal variances \(\sigma = \sigma_1 = \sigma_2 = \sigma_3 = 0.15\) and hence \(R= \mathrm{diag} (\sigma)\)
- fixed background covariance matrix \(B = \mathrm{diag} (0.01,\ 0.01,\ 0.01)\)
Procedure
- Background state values are computed at \(t = 0.2\) by time integration of the L63 system, starting from the initial guess.
- Observations at \(t = 0.2\) are assimilated to provide the analysis at $t = 0.2. $
- After that, background state values are computed at \(t = 0.4\) by time integration, starting from the analysis at \(t = 0.2,\) and so on.
The prediction is made up to time \(t=10.\)
In [1]:
In [2]:
#Application: Lorenz 63
# parameters
= 10.0
sigma = 8.0/3.0
beta = 28.0
rho = 0.01
dt = 10
tm = int(tm/dt)
nt = np.linspace(0,tm,nt+1)
t
############################ Twin experiment ##################################
= np.array([1,1,1]) # True initial conditions
u0True =1)
np.random.seed(seed= 0.15 # standard deviation for measurement noise
sig_m= sig_m**2*np.eye(3) #covariance matrix for measurement noise
R = np.eye(3) #linear observation operator
H
= 0.2 #time period between observations
dt_m = 2 #maximum time for observations
tm_m = int(tm_m/dt_m) #number of observation instants
nt_m
#t_m = np.linspace(dt_m,tm_m,nt_m) #np.where( (t<=2) & (t%0.1==0) )[0]
= (np.linspace(int(dt_m/dt),int(tm_m/dt),nt_m)).astype(int)
ind_m = t[ind_m]
t_m
#time integration
= np.zeros([3,nt+1])
uTrue 0] = u0True
uTrue[:,= 0
km = np.zeros([3,nt_m])
w for k in range(nt):
+1] = RK4(Lorenz63,uTrue[:,k],dt,sigma,beta,rho)
uTrue[:,kif (km<nt_m) and (k+1==ind_m[km]):
= H@uTrue[:,k+1] + np.random.normal(0,sig_m,[3,])
w[:,km] = km+1
km
Plot the ground truth and the observations. Note what you observe. Why might this be a difficult problem?
In [4]:
= plt.subplots(nrows=1,ncols=1, figsize=(10,8))
fig, ax
1,:], label=r'\bf{True}', linewidth = 3)
ax.plot(t,uTrue[1,:], 'o', fillstyle='none', \
ax.plot(t[ind_m],w[=r'\bf{Observation}', markersize = 8, markeredgewidth = 2)
labelr'$t$',fontsize=22)
ax.set_xlabel(0, tm_m, color='y', alpha=0.4, lw=0)
ax.axvspan(
="center", bbox_to_anchor=(0.5,1.25),ncol =4,fontsize=15)
ax.legend(loc
r'$x(t)$', labelpad=5)
ax.set_ylabel(r'$y(t)$', labelpad=-12)
ax.set_ylabel(r'$z(t)$')
ax.set_ylabel(=0.5) fig.subplots_adjust(hspace
In [5]:
########################### Data Assimilation ################################
= np.array([2.0,3.0,4.0])
u0b
= 0.1
sig_b= sig_b**2*np.eye(3)
B
#time integration
= np.zeros([3,nt+1])
ub 0] = u0b
ub[:,= np.zeros([3,nt+1])
ua 0] = u0b
ua[:,= 0
km for k in range(nt):
+1] = RK4(Lorenz63,ub[:,k],dt,sigma,beta,rho)
ub[:,k+1] = RK4(Lorenz63,ua[:,k],dt,sigma,beta,rho)
ua[:,k
if (km<nt_m) and (k+1==ind_m[km]):
+1] = Lin3dvar(ua[:,k+1],w[:,km],H,R,B,3)
ua[:,k= km+1 km
Plot results and write some conclusions
In [7]:
############################### Plotting ######################################
= plt.subplots(nrows=3,ncols=1, figsize=(10,8))
fig, ax = ax.flat
ax
for k in range(3):
=r'\bf{True}', linewidth = 3)
ax[k].plot(t,uTrue[k,:], label':', label=r'\bf{Background}', linewidth = 3)
ax[k].plot(t,ub[k,:], 'o', fillstyle='none', \
ax[k].plot(t[ind_m],w[k,:], =r'\bf{Observation}', markersize = 8, markeredgewidth = 2)
label'--', label=r'\bf{Analysis}', linewidth = 3)
ax[k].plot(t,ua[k,:], r'$t$',fontsize=22)
ax[k].set_xlabel(0, tm_m, color='y', alpha=0.4, lw=0)
ax[k].axvspan(
0].legend(loc="center", bbox_to_anchor=(0.5,1.25),ncol =4,fontsize=15)
ax[
0].set_ylabel(r'$x(t)$', labelpad=5)
ax[1].set_ylabel(r'$y(t)$', labelpad=-12)
ax[2].set_ylabel(r'$z(t)$')
ax[=0.5) fig.subplots_adjust(hspace