Spatial Lag - Fixed Effects Panel Model
This post aims to introduce the Spatial Lag model for Fixed Effects Panel data. It is based on the estimation procedure outline in:
- Elhorst (2003). Specification and Estimation of Spatial Panel Data Models. International Regional Science Review (page 250).
If you want to replicate this code, please use the Panel branch from my forked repository of spreg
.
Imports
import spreg
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse.linalg import splu as SuperLU
from spreg.utils import inverse_prod
from spreg.sputils import spdot, spfill_diagonal, spinv
from libpysal import weights
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
from spreg.panel_utils import check_panel, demean_panel
Step 0: Read data
# First import libpysal to load the dataset.
import libpysal
from libpysal.examples import load_example
from libpysal.weights import Queen
# Open data on NCOVR US County Homicides (3085 areas).
nat = load_example('Natregimes')
db = libpysal.io.open(nat.get_path('natregimes.dbf'),'r')
nat_shp = libpysal.examples.get_path("natregimes.shp")
w = Queen.from_shapefile(nat_shp)
w.transform = 'r'
name_y = ['HR70','HR80','HR90']
y = np.array([db.by_col(name) for name in name_y]).T
name_x = ['RD70','RD80','RD90','PS70','PS80','PS90']
x = np.array([db.by_col(name) for name in name_x]).T
method = "full"
epsilon = 0.0000001
Step 1: Transform variables
I need to transform the variables $X$ and $y$ using the demeaning procedure.
# Check the data structure and converts from wide to long if needed.
bigy, bigx, name_y, name_x = check_panel(y, x, w, name_y, name_x)
Warning: Assuming panel is in wide format, i.e. y[:, 0] refers to T0, y[:, 1] refers to T1, etc.
Similarly, assuming x[:, 0:T] refers to T periods of k1, x[:, T+1:2T] refers to k2, etc.
Demeaning the variables using $$ y^\ast = Q_0 y $$
where $Q_0 = J_T \otimes I_N$ and $J_T = I_T - \iota \cdot \iota’ / t$
n = w.n
t = bigy.shape[0] // n
k = bigx.shape[1]
# Demeaned variables
y = demean_panel(bigy, n, t)
x = demean_panel(bigx, n, t)
# Big W matrix
W = w.full()[0]
W_nt = np.kron(np.identity(t), W)
Wsp = w.sparse
Wsp_nt = sp.kron(sp.identity(t), Wsp)
# Lag dependent variable
ylag = spdot(W_nt, y)
Step 2: Estimation
First, I’ll compute the residuals of these two regressions: $$ y = X\beta_0 + e_0 $$ and $$ Wy = X\beta_1 + e_1 $$
# b0, b1, e0 and e1
xtx = spdot(x.T, x)
xtxi = la.inv(xtx)
xty = spdot(x.T, y)
xtyl = spdot(x.T, ylag)
b0 = spdot(xtxi, xty)
b1 = spdot(xtxi, xtyl)
e0 = y - spdot(x, b0)
e1 = ylag - spdot(x, b1)
Then, maximize the concentrated log-likehood function with respect to $\rho$: $$ L = \frac{NT}{2} \ln (e’_r e_r) - T \ln | I_N - \rho W | $$
where $e_r = e_0 - \rho e_1$.
def lag_c_loglik(rho, n, t, e0, e1, W):
# concentrated log-lik for lag model, no constants, brute force
er = e0 - rho * e1
sig2 = spdot(er.T, er) / (n*t)
nlsig2 = (n*t / 2.0) * np.log(sig2)
a = -rho * W
spfill_diagonal(a, 1.0)
jacob = t * np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
methodML = method.upper()
res = minimize_scalar(lag_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(n, t, e0, e1, W), method='bounded',
tol=epsilon)
rho = res.x[0][0]
rho
0.1903042364374238
Calculate betas as: $$ \beta = \beta_o - \rho \beta_1 $$
# b, residuals and predicted values
b = b0 - rho * b1
betas = np.vstack((b, rho)) # rho added as last coefficient
betas
array([[ 0.80058859],
[-2.60035236],
[ 0.19030424]])
Calculate $\sigma^2$ as: $$ \sigma^2 = (e_0 - \rho \cdot e_1)’ (e_0 - \rho \cdot e_1) $$
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - (n*t) / 2.0 * ln2pi - (n*t) / 2.0
logll = llik[0][0]
# Calculate sigma2
u = e0 - rho * e1
sig2 = spdot(u.T, u)
Step 3: Variance matrix
$$ Var[\beta, \delta, \sigma^2] = \begin{pmatrix} \frac{X’X}{\sigma^2} & & \ X’ (I_T \otimes \tilde{W}) X \beta & T \cdot tr(\tilde{W}^2 + \tilde{W}’\tilde{W}) + \beta’ X’ (I_T \otimes \tilde{W}’\tilde{W}) X \beta & \ 0 & \frac{T}{\sigma^2} tr(\tilde{W}) & \frac{NT}{2 \sigma^4} \ \end{pmatrix} $$
where $\tilde{W} = W (I_N - \rho W)^{-1}$
predy = y - u
xb = spdot(x, b)
predy_e = inverse_prod(
Wsp_nt, xb, rho, inv_method="power_exp", threshold=epsilon)
e_pred = y - predy_e
# information matrix
a = -rho * W
spfill_diagonal(a, 1.0)
ai = spinv(a)
wai = spdot(W, ai)
tr1 = wai.diagonal().sum() #same for sparse and dense
wai2 = spdot(wai, wai)
tr2 = wai2.diagonal().sum()
waiTwai = spdot(wai.T, wai)
tr3 = waiTwai.diagonal().sum()
wai_nt = np.kron(np.identity(t), wai)
wpredy = spdot(wai_nt, xb)
xTwpy = spdot(x.T, wpredy)
waiTwai_nt = np.kron(np.identity(t), waiTwai)
wTwpredy = spdot(waiTwai_nt, xb)
wpyTwpy = spdot(xb.T, wTwpredy)
# order of variables is beta, rho, sigma2
v1 = np.vstack(
(xtx / sig2, xTwpy.T / sig2, np.zeros((1, k))))
v2 = np.vstack(
(xTwpy / sig2, t*(tr2 + tr3) + wpyTwpy / sig2, t * tr1 / sig2))
v3 = np.vstack(
(np.zeros((k, 1)), t * tr1 / sig2, n * t / (2.0 * sig2 ** 2)))
v = np.hstack((v1, v2, v3))
vm1 = la.inv(v) # vm1 includes variance for sigma2
vm = vm1[:-1, :-1] # vm is for coefficients only
vm
array([[ 2.41034357e+02, 2.02069853e+02, -7.48911836e-05],
[ 2.02069853e+02, 2.24784043e+03, 4.30066912e-04],
[-7.48911836e-05, 4.30066912e-04, 2.57894113e-04]])
Hints: To use a customized module, you have to call the command conda-develop /path/to/module/
in the Anaconda prompt.