%2multibyte Version: 5.50.0.2960 CodePage: 936 %\usepackage{pdffig} %\usepackage{amstex} %\usepackage{enumerate} %\setlength{\textwidth}{6.4 in} %\newtheorem{assumption}{Assumption}[section] %\newtheorem{subassumption}{}[assumption] %\newcommand{\disp}{\displaystyle} %\newcommand{\btau}{\mbox{\boldmath{$\tau$}}} %\newcommand{\bbmu}{\mbox{\boldmath{$\mu$}}} % the symbol P for probability used the sans serif letter % the symbol E for expectation used the sans serif letter % the symbol Cov for covariance used the sans serif letter % the symbol Var for covariance used the sans serif letter % bold Greek letter % bold Greek letter used for script %\usepackage{subcaption} %\usepackage{pdffig} %\usepackage{amstex} \documentclass[11pt]{article} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{float} \usepackage{amssymb} \usepackage{graphicx,color} \usepackage{subfig} \usepackage{caption} \usepackage{amsmath} \usepackage{amsmath,bm} \usepackage{amsbsy} \usepackage{epsfig} \setcounter{MaxMatrixCols}{10} %TCIDATA{OutputFilter=LATEX.DLL} %TCIDATA{Version=5.50.0.2960} %TCIDATA{Codepage=936} %TCIDATA{} %TCIDATA{BibliographyScheme=Manual} %TCIDATA{LastRevised=Tuesday, May 10, 2016 22:49:43} %TCIDATA{} %TCIDATA{Language=American English} \topmargin -1.5truecm \rightmargin -0.5truein \leftmargin -0.5truein \oddsidemargin 22pt \evensidemargin 22pt \newtheorem{lemma}{Lemma}[section] \newtheorem{proposition}{Proposition}[section] \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}{Corollary}[section] \newtheorem{remark}{Remark}[section] \renewcommand{\thefootnote}{\fnsymbol{footnote}} \def\proclaim#1{\par \bigskip\noindent {\bf #1}\bgroup\it\ } \def\endproclaim{\egroup\par\bigskip} \def\proof{\par\noindent{\bf Proof.} \;} \setlength{\textwidth}{15.3 truecm} \setlength{\textheight}{23.9 truecm} \newbox\TempBox \newbox\TempBoxA \newcommand{\non}{\nonumber \\} \def\pr{\textsf{P}} \def\ep{\textsf{E}} \def\Cov{\textsf{Cov}} \def\Var{\textsf{Var}} \def\Cal#1{{\mathcal #1}} \def\bk#1{{\mathbf #1}} \def\bkg#1{\mbox{\boldmath{$#1$}}} \def\smallbkg#1{\mbox{\scriptsize \boldmath{$#1$}}} \def\text#1{\mbox{\rm #1}} \def\overset#1#2{\stackrel{#1}{#2} } \def\mb{\mathbf} \def\mr{\mathrm} \def\dsum{\displaystyle\sum} \def\dint{\displaystyle\int} \def\dfrac{\displaystyle\frac} \renewcommand{\theequation}{\thesection.\arabic{equation}} \def\underwiggle 1{ \ifmmode\setbox\TempBox=\hbox{$ 1$}\else\setbox\TempBox=\hbox{ 1}\fi \setbox\TempBoxA=\hbox to \wd\TempBox{\hss\char'176\hss} \rlap{\copy\TempBox}\smash{\lower9pt\hbox{\copy\TempBoxA}} } \renewcommand{\baselinestretch}{1.5} \newcommand{\be}{\begin{eqnarray}} \newcommand{\ee}{\end{eqnarray}} \newcommand{\by}{\begin{eqnarray*}} \newcommand{\ey}{\end{eqnarray*}} \newcommand{\bn}{\begin{enumerate}} \newcommand{\en}{\end{enumerate}} \newcommand{\bi}{\begin{itemize}} \newcommand{\ei}{\end{itemize}} \newcommand{\bds}{\begin{description}} \newcommand{\eds}{\end{description}} \newcommand{\bcen}{\begin{center}} \newcommand{\ecen}{\end{center}} \def\diag{\mbox{diag}} \def\wconv{\stackrel{\mbox{w}}{\longrightarrow}} \def\pconv{\stackrel{\mbox{p}}{\longrightarrow}} \def\I{\mbox{I}} \def\dsp{\displaystyle} %\input{tcilatex} \allowdisplaybreaks[2] \begin{document} \title{\textbf{Online supplementary material to ``Structural change in non-stationary AR(1) models"}\textbf{\thanks{Tianxiao Pang and Yanling Liang's research was supported by the Department of Education of Zhejiang Province in China (N20140202). \newline \hspace*{5mm}E-mail addresses: txpang@zju.edu.cn, chong2064@cuhk.edu.hk, zhangdanna0507@gmail.com and 1040857986@qq.com.}}\\ } \date{\today } \author{} \maketitle \begin{center} \vskip-1cm {{\small Tianxiao Pang$^{1,}$\footnote[2]{Corresponding author.}, Terence Tai-Leung Chong$^{2,3}$, Danna Zhang$^{4}$ and Yanling Liang$^{1}$, }} {\small \centerline{$^{1}$School of Mathematical Sciences, Yuquan Campus, Zhejiang University, Hangzhou 310027, P.R.China}} {\small \centerline{$^{2}$Department of Economics and Lau Chor Tak Institute of Global Economics and Finance, }} {\small \centerline{The Chinese University of Hong Kong, Hong Kong}} {\small \centerline{$^{3}$Department of International Economics and Trade, Nanjing University, Nanjing 210093, P.R.China}} {\small \centerline{$^{4}$Department of Statistics, University of Chicago, 5734 S. University Ave., Chicago, Illinois 60637, USA}} \end{center} \bigskip \renewcommand{\thesection}{S} \renewcommand\theequation{S.\arabic{equation}} \setcounter{equation}{0} \noindent \textbf{Proof of Lemma A.1}. \ The proofs of (a) and (b) can be completed by following the proof of Theorem 3.2 in Phillips and Magdalinos (2007a) and by using the truncation technique as shown in (A.1). The details are omitted. To prove part (c), it suffices to note that \begin{eqnarray*} \frac{y_{[rT]}}{\sqrt{k_{T}l(\eta _{T})}} &=&\frac{1}{\sqrt{k_{T}l(\eta_{T})}}\Big((1-\frac{c}{k_{T}})^{[rT]}y_{0}+\sum_{i=0}^{[rT]-1}(1-\frac{c}{k_{T}})^{i}\varepsilon_{\lbrack rT]-i}\Big) \\ &=&\sum_{i=0}^{[rT]-1}\big((1-\frac{c}{k_{T}})^{k_{T}}\big)^{i/k_{T}}\frac{\varepsilon_{\lbrack rT]-i}}{\sqrt{k_{T}l(\eta _{T})}}+o_{p}(1) \\ &\Rightarrow &\int_{0}^{\infty}\exp {(-cs)}dW(s) \end{eqnarray*}% by Theorem 1 in Cs\"{o}rg\H{o} et al. (2003), (3.4) in Gin\'{e} et al. (1997) and assumptions C2 and C3. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma A.2}. \ To prove part (a), we first note that \begin{equation*} y_{t-1}\varepsilon _{t}=\frac{1}{2}(y_{t}^{2}-y_{t-1}^{2}-\varepsilon_{t}^{2}),~~~t=[\tau _{0}T]+1,\cdots ,T, \end{equation*} which implies that \begin{equation}\label{1.1} \frac{1}{Tl(\eta_{T})}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon_{t}=\frac{1}{2Tl(\eta_{T})}\Big(y_{T}^{2}-y_{[\tau_{0}T]}^{2}-\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon _{t}^{2}\Big). \end{equation} Note also that \begin{equation}\label{1.3} \frac{y_{[\tau_{0}T]}}{\sqrt{Tl(\eta _{T})}}=o_{p}(1) \end{equation} by part (c) of Lemma A.1. We let \begin{equation*} S_{T}(\tau _{0},s):=\frac{1}{\sqrt{Tl(\eta _{T})}}\sum_{t=[\tau_{0}T]+1}^{[sT]}\varepsilon _{t}. \end{equation*} Then \begin{equation} \label{1.2} S_{T}(\tau _{0},s)\Rightarrow \overline{W}(s)-\overline{W}(\tau _{0}) \end{equation} for any $\tau _{0}\leq s\leq 1$ by the functional central limit theorem for the i.i.d. random variables from the DAN (see Theorem 1 in Cs\"{o}rg\H{o} et al. (2003) and (3.4) in Gin\'{e} et al. (1997)). Combining this result with (\ref{1.3}) immediately leads to \begin{equation} \label{1.12} \frac{y_{T}^{2}-y_{[\tau _{0}T]}^{2}}{2Tl(\eta _{T})}=\frac{(y_{[\tau_{0}T]}+\sum_{i=[\tau _{0}T]+1}^{T}\varepsilon _{i})^{2}-y_{[\tau _{0}T]}^{2}}{2Tl(\eta _{T})}\Rightarrow \frac{1}{2}(\overline{W}(1)-\overline{W}(\tau_{0}))^{2}. \end{equation} In addition, it is trivial that \begin{equation} \label{1.11} \frac{\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon _{t}^{2}}{2Tl(\eta_{T})}\overset{p}{\rightarrow} \frac{1-\tau_{0}}{2} \end{equation} when $\varepsilon_{t}^{\prime }s$ are i.i.d. random variables in the DAN. Now, it follows from (\ref{1.1}), (\ref{1.12}) and (\ref{1.11}) that part (a) holds. To prove part (b), using (\ref{1.3}) and (\ref{1.2}), we have \begin{eqnarray} \label{1.9} &&\frac{1}{T^{2}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}^{2} \notag\\ &=&\frac{1}{T^{2}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}\Big(y_{[\tau_{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\varepsilon _{i}\Big)^{2} \notag \\ &=&\frac{1}{T}\sum_{t=[\tau _{0}T]+1}^{T}\Big(\frac{y_{[\tau _{0}T]}}{\sqrt{Tl(\eta _{T})}}+S_{T}(\tau _{0},\frac{t-1}{T})\Big)^{2} \notag \\ &=&\sum_{t=[\tau _{0}T]+1}^{T}\int_{\frac{t-1}{T}}^{\frac{t}{T}}\Big(\frac{y_{[\tau _{0}T]}}{\sqrt{Tl(\eta _{T})}}+S_{T}(\tau _{0},\frac{t-1}{T})\Big)^{2}ds \notag \\ &=&\sum_{t=[\tau _{0}T]+1}^{T}\int_{\frac{t-1}{T}}^{\frac{t}{T}}\Big(\frac{y_{[\tau _{0}T]}}{\sqrt{Tl(\eta _{T})}}+S_{T}(\tau _{0},s)\Big)^{2}ds\cdot (1+o_{p}(1)) \notag \\ &=&\int_{\tau _{0}}^{1}\Big(\frac{y_{[\tau _{0}T]}}{\sqrt{Tl(\eta _{T})}}+S_{T}(\tau _{0},s)\Big)^{2}ds\cdot (1+o_{p}(1)) \notag \\ &\Rightarrow &\int_{\tau _{0}}^{1}(\overline{W}(s)-\overline{W}(\tau_{0}))^{2}ds. \end{eqnarray} Finally, since part (a) and part (b) hold jointly, this completes our proof. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma A.3}. \ Noting that the fact $|\hat{\tau}_{T}-\tau _{0}|=O_{p}(k_{T}/T)$ is proved in the proof of part (a) of Theorem 1.1 and the limiting distribution in Lemma A.1(c) is irrespective of the constant $r$, it suffices to study the magnitude of $\sum_{t=[\tau_{0}T-k_{T}]+1}^{[\tau _{0}T]}y_{t-1}^{2}$ in order to obtain the magnitude of $\sum_{t=[\hat{\tau}_{T}T]+1}^{[\tau _{0}T]}y_{t-1}^{2}$. Therefore, when $\hat{\tau}_{T}\leq \tau _{0}$, Lemma A.1(c) implies that \begin{equation} \label{A.6} \sum_{t=[\tau_{0}T-k_{T}]+1}^{[\tau_{0}T]}y_{t-1}^{2}=O_{p}(k_{T}l(\eta_{T}))\cdot k_{T}=O_{p}(k_{T}^{2}l(\eta _{T})), \end{equation} which yields \begin{equation} \label{A.1} \sum_{t=[\hat{\tau}_{T}T]+1}^{[\tau_{0}T]}y_{t-1}^{2}=O_{p}(k_{T}^{2}l(\eta_{T})). \end{equation} In addition, to obtain the magnitude of $\sum_{t=[\hat{\tau}_{T}T]+1}^{[\tau_{0}T]}y_{t-1}\varepsilon_{t}$, it suffices to study the magnitude of $\sum_{t=[\tau_{0}T-k_{T}]}^{[\tau_{0}T]}y_{t-1}\varepsilon_{t}$. By squaring $y_{t}=\beta _{1T}y_{t-1}+\varepsilon_{t}$ and summing over $t\in\{[\tau _{0}T-k_{T}]+1,\cdots ,[\tau_{0}T]\}$ we obtain \begin{eqnarray}\label{A.0} \sum_{t=[\tau _{0}T-k_{T}]+1}^{[\tau _{0}T]}y_{t-1}\varepsilon _{t} &=&\frac{1-\beta _{1T}^{2}}{2\beta _{1T}}\sum_{t=[\tau _{0}T-k_{T}]+1}^{[\tau_{0}T]}y_{t-1}^{2}-\frac{\beta _{1T}}{2}(y_{[\tau _{0}T-k_{T}]}^{2}-y_{[\tau_{0}T]}^{2}) \notag \\ &&-\frac{1}{2\beta _{1T}}\sum_{t=[\tau _{0}T-k_{T}]+1}^{[\tau_{0}T]}\varepsilon _{t}^{2}. \end{eqnarray} It follows from (\ref{A.6}) and Lemma A.1(c) respectively that\label{A.2} \begin{eqnarray} \frac{1-\beta _{1T}^{2}}{2\beta _{1T}}\sum_{t=[\tau_{0}T-k_{T}]+1}^{[\tau _{0}T]}y_{t-1}^{2}=O_{p}(k_{T}l(\eta _{T})) \end{eqnarray} and \begin{eqnarray} \label{A.3} \frac{\beta _{1T}}{2}(y_{[\tau _{0}T-k_{T}]}^{2}-y_{[\tau_{0}T]}^{2})=\frac{\beta _{1T}}{2}(y_{[\tau _{0}T-k_{T}]}-y_{[\tau _{0}T]})(y_{[\tau _{0}T-k_{T}]}+y_{[\tau_{0}T]})=O_{p}(k_{T}l(\eta _{T})). \end{eqnarray} Moreover, by applying the Law of Large Numbers, we have \begin{equation} \label{A.4} \frac{1}{2\beta _{1T}}\sum_{t=[\tau _{0}T-k_{T}]+1}^{[\tau_{0}T]}\varepsilon _{t}^{2}=O_{p}(k_{T}l(\eta _{T})). \end{equation} Combining (\ref{A.0})-(\ref{A.4}) leads to \begin{equation*} \sum_{t=[\tau _{0}T-k_{T}]+1}^{[\tau _{0}T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})), \end{equation*} yielding \begin{equation} \sum_{t=[\hat{\tau}_{T}T]+1}^{[\tau _{0}T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})). \label{1.14} \end{equation} Similarly, if $\hat{\tau}_{T}>\tau _{0}$, we have \begin{eqnarray*} \sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}]}y_{t-1}^{2} &=&\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T+k_{T}]}\Big(y_{[\tau_{0}T]}+\sum_{i=1}^{t-1-[\tau_{0}T]}\varepsilon _{\lbrack \tau _{0}T]+i}\Big)^{2} \\ &\leq &2\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T+k_{T}]}y_{[\tau_{0}T]}^{2}+2\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}]}\Big(\sum_{i=1}^{t-1-[\tau _{0}T]}\varepsilon _{\lbrack \tau_{0}T]+i}\Big)^{2} \\ &=&O_{p}(k_{T}^{2}l(\eta_{T}))+O_{p}(k_{T}^{2}l(\eta_{T})) \\ &=&O_{p}(k_{T}^{2}l(\eta_{T})), \end{eqnarray*} leading to \begin{equation} \label{A.5} \sum_{t=[\tau_{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}^{2}=O_{p}(k_{T}^{2}l(\eta_{T})). \end{equation} In addition, by squaring $y_{t}=y_{t-1}+\varepsilon_{t}$ and summing over $t\in \{[\tau _{0}T]+1,\cdots ,[\tau _{0}T+k_{T}]\}$ we obtain \begin{eqnarray*} \sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T+k_{T}]}y_{t-1}\varepsilon_{t} &=&\frac{1}{2}\left( y_{[\tau_{0}T+k_{T}]}^{2}-y_{[\tau_{0}T]}^{2}\right) -\frac{1}{2}\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T+k_{T}]}\varepsilon_{t}^{2} \\ &=&\frac{1}{2}\left( 2y_{[\tau_{0}T]}\sum_{t=[\tau _{0}T]+1}^{[\tau_{0}T+k_{T}]}\varepsilon_{t}+\Big(\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T+k_{T}]}\varepsilon _{t}\Big)^{2}\right)-\frac{1}{2}\sum_{t=[\tau_{0}T]+1}^{[\tau _{0}T+k_{T}]}\varepsilon_{t}^{2}. \end{eqnarray*} Similarly to the derivation of (\ref{1.14}), we have \begin{equation} \label{1.16} \sum_{t=[\tau _{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})). \end{equation} $\hfill \Box $\newline \bigskip \noindent \textbf{Proof of Lemma B.1}. \ This lemma can easily be proved by using the standard arguments in the unit root model with finite variance and by applying the truncation technique (A.1). The details are omitted. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma B.2.} \ The proof can be completed by following the proof of part (c) of Lemma A.1. The details are therefore omitted. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma B.3.} \ To prove part (a), we note that the following decomposition holds, \begin{eqnarray}\label{2.12} &&\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon _{t} \notag \\ &=&\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}\Big(\beta_{2T}^{t-[\tau _{0}T]-1}y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\beta _{2T}^{t-i-1}\varepsilon _{i}\Big)\varepsilon _{t} \notag \\ &=&\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\Big(\Big(y_{0}+\sum_{j=1}^{[\tau_{0}T]}\varepsilon _{j}\Big)\sum_{t=[\tau _{0}T]+1}^{T}\beta _{2T}^{t-[\tau _{0}T]-1}\varepsilon _{t}+\sum_{t=[\tau _{0}T]+1}^{T}\varepsilon_{t}\sum_{i=[\tau _{0}T]+1}^{t-1}\beta _{2T}^{t-i-1}\varepsilon _{i}\Big)\notag \\ &:=&I_{1}+I_{2}, \end{eqnarray} where \begin{eqnarray*} I_{1} &=&\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\Big(y_{0}+\sum_{j=1}^{[\tau_{0}T]}\varepsilon _{j}\Big)\sum_{t=[\tau _{0}T]+1}^{T}\beta_{2T}^{t-[\tau _{0}T]-1}\varepsilon _{t} \\ &=&\frac{1+o_{p}(1)}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{j=1}^{[\tau_{0}T]}\varepsilon _{j}\sum_{t=[\tau _{0}T]+1}^{T}\beta _{2T}^{t-[\tau_{0}T]-1}\varepsilon _{t} \\ &=&\frac{1+o_{p}(1)}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{j=1}^{[\tau_{0}T]}\varepsilon _{j}^{(1)}\sum_{t=[\tau _{0}T]+1}^{T}\beta _{2T}^{t-[\tau_{0}T]-1}\varepsilon _{t}^{(1)} \end{eqnarray*} and \begin{eqnarray*} I_{2} &=&\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon _{t}\sum_{i=[\tau _{0}T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon _{i} \\ &=&\frac{1+o_{p}(1)}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon _{t}^{(1)}\sum_{i=[\tau _{0}T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_{i}^{(1)} \end{eqnarray*} by assumptions C1-C3 and Lemma 1 in Cs\"{o}rg\H{o} et al. (2003). For the term $I_{1}$, it is obvious that \begin{equation} \label{2.5} \phi_j=\frac{1}{\sqrt{Tk_T}}\frac{\varepsilon _{j}^{(1)}}{\sqrt{l(\eta _{T})}}\sum_{t=[\tau_{0}T]+1}^{T}\beta _{2T}^{t-[\tau _{0}T]-1}\frac{\varepsilon _{t}^{(1)}}{\sqrt{l(\eta _{T})}},~~j=1, \cdots, [\tau _{0}T] \end{equation} is a sequence of martingale difference with respect to the filtration $\mathfrak{F}_j=\sigma(\varepsilon_1,\cdots,\varepsilon_j)$ with \begin{equation*} \sum_{j=1}^{[\tau_0 T]}E(\phi_j^2|\mathfrak{F}_{j-1})=\frac{1}{Tk_T}\sum_{t=[\tau_{0}T]+1}^{T}\beta _{2T}^{2(t-[\tau _{0}T]-1)}\cdot (1+o(1))=\frac{\tau_0}{2c}\cdot (1+o(1)). \end{equation*} In addition, for any $\delta>0$, we have \begin{eqnarray*} \sum_{j=1}^{[\tau_0 T]}E(\phi_j^2I\{|\phi_j|>\delta\}|\mathfrak{F}_{j-1})&=&[\tau_0 T]E(\phi_1^2I\{|\phi_1|>\delta\})\non &=&\frac{[\tau_0 T]}{T}E\left((\sqrt{T}\phi_1)^2I\{|\sqrt{T}\phi_1|>\delta\sqrt{T}\}\right)\non &\rightarrow&0 \end{eqnarray*} since $E(\sqrt{T}\phi_1)^2<\infty$. Hence, the Lindeberg condition is verified. Then, applying the central limit theorem for martingale difference sequences, we have \begin{equation} \label{2.11} I_{1}\Rightarrow N(0,\frac{\tau_{0}}{2c}). \end{equation} For the term $I_{2}$, it is easy to see that \begin{equation}\label{2.8} \varphi_t=\frac{1}{\sqrt{Tk_T}}\frac{\varepsilon _{t}^{(1)}}{\sqrt{l(\eta _{T})}}\sum_{i=[\tau_{0}T]+1}^{t-1}\beta _{2T}^{t-i-1}\frac{\varepsilon _{i}^{(1)}}{\sqrt{l(\eta_{T})}},~~t=[\tau _{0}T]+1,\cdots, T \end{equation} is a sequence of martingale difference with respect to the filtration $\mathfrak{F}_{t}=\sigma (\varepsilon _{\lbrack \tau _{0}T]},\cdots,\varepsilon_{t})$ with %\begin{equation*} %E\Big(\frac{1}{\sqrt{Tk_T}}\frac{\varepsilon _{t}^{(1)}}{\sqrt{l(\eta _{T})}}\sum_{i=[\tau_{0}T]+1}^{t-1}\beta _{2T}^{t-i-1}\frac{\varepsilon %_{i}^{(1)}}{\sqrt{l(\eta_{T})}}|\mathfrak{F}_{t-1}\Big)=0 %\end{equation*} %and \begin{equation*} \sum_{t=[\tau _{0}T]+1}^{T}E(\varphi_t^2|\mathfrak{F}_{t-1})=\frac{1-\tau _{0}}{2c}(1+o_p(1)). \end{equation*} In addition, by the similar arguments in pages 203-204 in Huang et al. (2014), it is easy to verify that the Lindeberg condition \begin{eqnarray*} \sum_{t=[\tau _{0}T]+1}^{T}E(\varphi_t^2I\{|\varphi_t|>\delta\}|\mathfrak{F}_{t-1})=o_p(1),~~{\rm for~ any}~ \delta>0 \end{eqnarray*} holds. Hence, applying the central limit theorem for martingale difference sequences leads to \begin{equation}\label{2.6} I_{2}\Rightarrow N(0,\frac{1-\tau _{0}}{2c}). \end{equation} Note that the two sequences (\ref{2.5}) and (\ref{2.8}) are independent. The proof of part (a) is then completed by combining (\ref{2.12}), (\ref{2.11}) and (\ref{2.6}). To prove part (b), by squaring $y_{t}=(1-c/k_{T})y_{t-1}+\varepsilon_{t}$ and summing over $t\in \{[\tau _{0}T]+1,\cdots ,T\}$, we obtain \begin{equation*} \sum_{t=[\tau_{0}T]+1}^{T}y_{t}^{2}=(1-\frac{c}{k_{T}})^{2}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}+\sum_{t=[\tau _{0}T]+1}^{T}\varepsilon _{t}^{2}+2(1-\frac{c}{k_{T}})\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon_{t}. \end{equation*} This implies that \begin{equation} (\frac{2c}{k_{T}}-\frac{c^{2}}{k_{T}^{2}})\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}=y_{[\tau _{0}T]}^{2}-y_{T}^{2}+\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon _{t}^{2}+2(1-\frac{c}{k_{T}})\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon_{t}. \label{2.16} \end{equation} Note that \begin{equation}\label{2.18} \frac{y_{[\tau _{0}T]}}{\sqrt{Tl(\eta _{T})}}\Rightarrow W(\tau _{0}) \end{equation} since $\beta_{1}=1$, it follows from Lemma B.2 and assumption C2 that \begin{equation}\label{2.17} \frac{y_{T}}{\sqrt{Tl(\eta _{T})}}=\frac{\beta _{2T}^{T-[\tau_{0}T]}y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{T}\beta_{2T}^{T-i}\varepsilon _{i}}{\sqrt{Tl(\eta _{T})}}=o_{p}(1). \end{equation} In addition, \begin{equation}\label{2.19} 2(1-\frac{c}{k_{T}})\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}\varepsilon_{t}=o_{p}(Tl(\eta _{T})) \end{equation} by part (a). Then, combining (\ref{2.16})-(\ref{2.19}) and (\ref{1.11}) together yields \begin{equation*} \frac{1}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}\Rightarrow \frac{1}{2c}(W^{2}(\tau _{0})+1-\tau_{0}). \end{equation*} Note that the random part (i.e., $W^{2}(\tau _{0})$) of the limiting distribution of $\frac{1}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}$ is due to the limiting behavior of $(\sum_{i=1}^{[\tau _{0}T]}\varepsilon _{i})^{2}$, while the limiting distribution of $\frac{1}{\sqrt{Tk_{T}}l(\eta _{T})}\sum_{t=[% \tau_{0}T]+1}^{T}y_{t-1}\varepsilon _{t}$ is determined by the limiting behavior of \begin{equation*} \sum_{j=1}^{[\tau_{0}T]}\varepsilon_{j}\sum_{t=[\tau_{0}T]+1}^{T}\beta_{2T}^{t-[\tau_{0}T]-1}\varepsilon_{t}~~\mathrm{and}~~\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon_{t}\sum_{i=[\tau_{0}T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_{i}. \end{equation*} Note also that $\sum_{i=1}^{[\tau _{0}T]}\varepsilon _{i}$ and $\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon_{t}\sum_{i=[\tau_{0}T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon _{i}$ are independent and $\sum_{i=1}^{[\tau _{0}T]}\varepsilon_{i}$ and $\sum_{j=1}^{[\tau_{0}T]}\varepsilon _{j}\sum_{t=[\tau _{0}T]+1}^{T}\beta_{2T}^{t-[\tau_{0}T]-1}\varepsilon _{t}$ are uncorrelated. Hence, the two limiting distributions in part (a) and part (b) are independent. Therefore, part (a) and part (b) hold jointly. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma B.4.} \ By applying Lemmas B.1 and B.3, it is trivial that $A_{1}=O_{p}(1/\sqrt{Tk_{T}})=o_{p}(1/k_{T})$ and $A_{4}=O_{p}(1/T)=o_{p}(1/k_{T})$. To find the order of the term $A_{2}$, note that since $\beta _{1}=1$, it is not difficult to see that \begin{eqnarray*} \sum_{t=m+1}^{[\tau_{0}T]}y_{t-1}\varepsilon_{t}&=&y_{0}\sum_{t=m+1}^{[\tau_{0}T]}\varepsilon_{t}+\sum_{t=m+1}^{[\tau _{0}T]}\left( \sum_{i=1}^{t-1}\varepsilon _{i}\right) \varepsilon _{t} \\ &=&\left( y_{0}\sum_{t=m+1}^{[\tau_{0}T]}\varepsilon_{t}^{(1)}+\sum_{t=m+1}^{[\tau_{0}T]}\left( \sum_{i=1}^{t-1}\varepsilon _{i}^{(1)}\right) \varepsilon_{t}^{(1)}\right) \cdot (1+o_{p}(1)) \\ &=&o_{p}\left( \sqrt{T([\tau_{0}T]-m)l(\eta_{T})}\right) +O_{p}\left(\sqrt{\sum_{t=m+1}^{[\tau_{0}T]}E\left( \sum_{i=1}^{t-1}\varepsilon _{i}^{(1)}\right) ^{2}l(\eta _{T})}\right) \\ &=&o_{p}\left( \sqrt{T([\tau_{0}T]-m)l(\eta_{T})}\right) +O_{p}\left(\sqrt{([\tau _{0}T]-m)([\tau_{0}T]+m)}l(\eta_{T})\right) \\ &=&O_{p}\left( \sqrt{([\tau_{0}T]-m)([\tau_{0}T]+m)}l(\eta_{T})\right) \end{eqnarray*} and \begin{eqnarray*} \sum_{t=m+1}^{[\tau_{0}T]}y_{t-1}^{2} &=&T([\tau_{0}T]-m)l(\eta_{T})\cdot\frac{1}{[\tau _{0}T]-m}\sum_{t=m+1}^{[\tau _{0}T]}\left( \frac{y_{t-1}}{\sqrt{Tl(\eta_{T})}}\right) ^{2} \\ &=&O_{p}(T([\tau_{0}T]-m)l(\eta_{T})). \end{eqnarray*} Consequently, we have \begin{equation}\label{B.4} A_{2}=O_{p}\left( \sup_{m\in D_{1T}}\frac{\sqrt{([\tau_{0}T]-m)([\tau_{0}T]+m)}}{T([\tau _{0}T]-m)}\right) \leq O_{p}\left( \frac{1}{\sqrt{TM_{T}}}\right)=o_{p}(1/k_{T}). \end{equation} To find the order of the term $A_{5}$, note that for any small $0<\delta <1$, there exist two large constants $N_{1}=N_{1}(\delta )$ such that $\beta _{2T}^{k_{T}/N_{1}}\rightarrow e^{-c/N_{1}}>1-\delta $ and $N_{2}=N_{2}(\delta )$ such that $\beta _{2T}^{N_{2}k_{T}}\rightarrow e^{-cN_{2}}<\delta $. We then divide the set $\{M_{T}+1,\cdots,T-[\tau_{0}T]\}$, which is the domain of $m-[\tau _{0}T]$ when $m\in D_{2T}$, into three subsets: $\{M_{T}+1,\cdots ,[k_{T}/N_{1}]\}$, $\{[k_{T}/N_{1}]+1,\cdots ,[N_{2}k_{T}]\}$ and $\{[N_{2}k_{T}]+1,\cdots,T-[\tau _{0}T]\}$. Note that \begin{equation*} \sum_{t=[\tau _{0}T]+1}^{m}y_{t-1}\varepsilon _{t}=\sum_{t=[\tau_{0}T]+1}^{m}\left( \beta _{2T}^{t-1-[\tau _{0}T]}y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\beta _{2T}^{t-1-i}\varepsilon_{i}\right) \varepsilon _{t}, \end{equation*} \begin{eqnarray*} &&y_{[\tau _{0}T]}\sum_{t=[\tau _{0}T]+1}^{m}\beta _{2T}^{t-1-[\tau_{0}T]}\varepsilon _{t} \\ &=&O_{p}(\sqrt{Tl(\eta _{T})})\cdot O_{p}\left( \sqrt{k_{T}(1-\beta_{2T}^{2(m-[\tau _{0}T])})l(\eta _{T})}\right) \\ &=&O_{p}\left( \sqrt{Tk_{T}(1-\beta _{2T}^{2(m-[\tau _{0}T])})}l(\eta_{T})\right) \\ &=&\left\{ \begin{array}{ll} O_{p}\left( \sqrt{T(m-[\tau _{0}T])}l(\eta _{T})\right) ,~ & \mbox{if}~M_{T}\tau _{0}$, it follows from Lemma B.2 that \begin{eqnarray}\label{B.2} \sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}^{2} &=&\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\Big(\beta_{2T}^{t-1-[\tau _{0}T]}y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\beta_{2T}^{t-1-i}\varepsilon _{i}\Big)^{2} \notag \\ &=&\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\beta_{2T}^{2(t-1-[\tau _{0}T])}y_{[\tau _{0}T]}^{2}\cdot (1+o_{p}(1)) \notag \\ &=&k_{T}^{2}/T\cdot O_{p}(Tl(\eta_{T})) \notag \\ &=&O_{p}(k_{T}^{2}l(\eta _{T})), \end{eqnarray} implying \begin{equation}\label{B.13} \sum_{t=[\tau _{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}^{2}=O_{p}(k_{T}^{2}l(\eta_{T})). \end{equation} By squaring $y_{t}=\beta _{2T}y_{t-1}+\varepsilon _{t}$ and summing over $t\in \{[\tau _{0}T]+1,\cdots ,[\tau _{0}T+k_{T}^{2}/T]\}$, we obtain \begin{equation} \sum_{t=[\tau _{0}T]+1}^{[\tau_{0}T+k_{T}^{2}/T]}y_{t-1}\varepsilon_{t}=\frac{1-\beta _{2T}^{2}}{2\beta _{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}^{2}+\frac{y_{[\tau_{0}T+k_{T}^{2}/T]}^{2}-y_{[\tau _{0}T]}^{2}}{2\beta _{2T}}-\frac{1}{2\beta_{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\varepsilon_{t}^{2}. \label{B.14} \end{equation} First, applying (\ref{B.2}) immediately yields \begin{equation}\label{B.15} \frac{1-\beta_{2T}^{2}}{2\beta_{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau_{0}T+k_{T}^{2}/T]}y_{t-1}^{2}=O_{p}(k_{T}l(\eta _{T})). \end{equation} Secondly, since $\sqrt{T}=o(k_{T})$, we have \begin{equation}\label{B.16} \frac{1}{2\beta _{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau_{0}T+k_{T}^{2}/T]}\varepsilon _{t}^{2}=O_{p}(k_{T}^{2}l(\eta _{T})/T). \end{equation}% Thirdly, since $\left\vert \beta _{2T}\right\vert <1$, we have \begin{equation*} \sum_{i=0}^{[k_{T}^{2}/T]-1}\beta _{2T}^{2i}=O(k_{T}^{2}/T) \end{equation*} which implies that \begin{equation*} \sum_{i=0}^{[k_{T}^{2}/T]-1}\beta _{2T}^{i}\varepsilon _{\lbrack \tau_{0}T+k_{T}^{2}/T]-i}=O_{p}(k_{T}\sqrt{l(\eta _{T})}/\sqrt{T}). \end{equation*}% Then, we have \begin{eqnarray}\label{B.17} &&y_{[\tau_{0}T+k_{T}^{2}/T]}^{2}-y_{[\tau _{0}T]}^{2} \notag\\ &=&\left( y_{[\tau _{0}T+k_{T}^{2}/T]}+y_{[\tau _{0}T]}\right) \left(y_{[\tau _{0}T+k_{T}^{2}/T]}-y_{[\tau _{0}T]}\right) \notag \\ &=&\left( \sum_{i=0}^{[k_{T}^{2}/T]-1}\beta _{2T}^{i}\varepsilon _{\lbrack\tau _{0}T+k_{T}^{2}/T]-i}+(\beta _{2T}^{[k_{T}^{2}/T]}+1)y_{[\tau _{0}T]}\right) \notag \\ &&\cdot \left( \sum_{i=0}^{[k_{T}^{2}/T]-1}\beta _{2T}^{i}\varepsilon_{\lbrack \tau _{0}T+k_{T}^{2}/T]-i}+(\beta _{2T}^{[k_{T}^{2}/T]}-1)y_{[\tau _{0}T]}\right) \notag \\ &=&\left( O_{p}(k_{T}\sqrt{l(\eta _{T})}/\sqrt{T})+O_{p}(\sqrt{Tl(\eta _{T})})\right) \left( O_{p}(k_{T}\sqrt{l(\eta _{T})}/\sqrt{T})+\frac{k_{T}^{2}/T}{k_{T}}O_{p}(\sqrt{Tl(\eta _{T})}\right) \notag \\ &=&O_{p}(\sqrt{Tl(\eta _{T})})O_{p}(k_{T}\sqrt{l(\eta _{T})}/\sqrt{T})\notag \\ &=&O_{p}(k_{T}l(\eta _{T})). \end{eqnarray}% Combining (\ref{B.14})-(\ref{B.17}) together leads to \begin{equation}\label{B.1} \sum_{t=[\tau_{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})), \end{equation} which implies \begin{equation}\label{B.18} \sum_{t=[\tau_{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})). \end{equation} $\hfill \Box $\newline \noindent\textbf{Proof of Lemma C.1.} \; The proof can be completed by following the proof of Lemma 4.2 in Phillips and Magdalinos (2007a) and the truncation technique (A.1). Thus the details are omitted. $\hfill \Box $\newline \noindent\textbf{Proof of Lemma C.2.} \; To prove part (a), we first note that the following decomposition holds: \begin{eqnarray} \label{C.23} &&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}y_{t-1}\varepsilon_t \notag \\ &=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\Big(\beta_{2T}^{t-[\tau_0 T] -1}y_{[\tau_0 T] }+\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\Big)\varepsilon_t \notag \\ &=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\left(\Big(y_0+\sum_{j=1}^{[\tau_0 T] }\varepsilon_j\Big)\sum_{t=[\tau_0 T] +1}^{T}\beta_{2T}^{t-[\tau_0 T] -1}\varepsilon_t+ \sum_{t=[\tau_0 T]+1}^{T}\varepsilon_t\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\right) \notag \\ &:=&II_1+II_2, \end{eqnarray} where \begin{eqnarray*} II_1&=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\Big(y_0+\sum_{j=1}^{[\tau_0 T] }\varepsilon_j\Big)\sum_{t=[\tau_0 T] +1}^{T}\beta_{2T}^{t-[\tau_0 T] -1}\varepsilon_t \notag \\ &=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{j=1}^{[\tau_0 T] }\varepsilon_j\sum_{t=[\tau_0 T]+1}^{T}\beta_{2T}^{t-[\tau_0 T] -1}\varepsilon_t\cdot (1+o_p(1)) \end{eqnarray*} and \begin{eqnarray*} II_2&=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i \notag \\ &=&\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t^{(1)}\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i^{(1)}\cdot (1+o_p(1)) \end{eqnarray*} by the truncation technique (A.1) and some simple calculus. For the term $II_{1}$, applying part (a) of Lemma C.1 and observing the fact $\frac{\sum_{t=1}^{[\tau _{0}T]}\varepsilon _{t}}{\sqrt{Tl(\eta _{T})}}\Rightarrow W(\tau _{0})$, it can be shown that \begin{equation} \label{C.21} II_{1}\Rightarrow XW(\tau _{0}) \end{equation} by noting that \begin{eqnarray*} &&\frac{1}{\beta _{2T}^{T-[\tau _{0}T]}\sqrt{k_{T}l(\eta _{T})}}\sum_{t=[\tau _{0}T]+1}^{T}\beta _{2T}^{t-[\tau _{0}T]-1}\varepsilon _{t} \\ &=&\frac{1}{\beta _{2T}^{T-[\tau _{0}T]}\sqrt{k_{T}l(\eta _{T})}}\sum_{s=1}^{T-[\tau _{0}T]}\beta _{2T}^{s-1}\varepsilon _{\lbrack \tau_{0}T]+s} \\ &=&\frac{1}{\sqrt{k_{T}l(\eta _{T})}}\sum_{s=1}^{[(1-\tau _{0})T]}\beta_{2T}^{s-1-[(1-\tau _{0})T]}\varepsilon _{\lbrack \tau _{0}T]+s}\cdot(1+o_{p}(1)) \end{eqnarray*} and \begin{equation*} \frac{1}{\sqrt{k_{T}l(\eta _{T})}}\sum_{s=1}^{[(1-\tau _{0})T]}\beta_{2T}^{s-1-[(1-\tau _{0})T]}\varepsilon _{\lbrack \tau _{0}T]+s}\overset{d}{=} \frac{1}{\sqrt{k_{T}l(\eta _{T})}}\sum_{s=1}^{[(1-\tau _{0})T]}\beta_{2T}^{s-1-[(1-\tau _{0})T]}\varepsilon _{s}\Rightarrow X. \end{equation*} For the term $II_2$, it is trivial that \begin{eqnarray*} E\Big(\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t^{(1)}\sum_{i=[\tau_0 T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i^{(1)}\Big)=0 \end{eqnarray*} and \begin{eqnarray} \label{C.14} &&Var\Big(\frac{1}{\beta_{2T}^{T-[\tau_0 T]}\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t^{(1)}\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i^{(1)}\Big) \notag \\ &=&\frac{1}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T}\sum_{t=[\tau_0 T]+1}^{T}\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{2(t-i-1)}\cdot (1+o(1)) \notag \\ &=&\frac{1}{\beta_{2T}^{2(T-[\tau_0 T])}T }\cdot \frac{1}{k_T(\beta_{2T}^2-1)}\cdot \Big(\frac{\beta_{2T}^{2(T-[\tau_0 T])}-1}{\beta_{2T}^2-1}-(T-[\tau_0 T])\Big)\cdot (1+o(1)) \notag \\ &=&o(1) \end{eqnarray} by $k_T=o(T)$. Consequently, \begin{eqnarray} \label{C.22} II_2=o_p(1). \end{eqnarray} Obviously, combining (\ref{C.23}), (\ref{C.21}) and (\ref{C.22}) yields part (a). To prove part (b), we write \begin{eqnarray} \label{C.18} &&\frac{1}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}y_{t-1}^2 \notag \\ &=&\frac{1}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(\beta_{2T}^{t-[\tau_0 T] -1}y_{[\tau_0 T] }+\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\Big)^2 \notag \\ &=&\frac{y_{[\tau_0 T]}^2}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\beta_{2T}^{2(t-[\tau_0 T]-1)}+\frac{1}{% \beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\Big)^2 \notag \\ &&+\frac{2 y_{[\tau_0 T]}}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(\beta_{2T}^{t-[\tau_0 T] -1}\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\Big) \notag \\ &:=&III_1+III_2+III_3, \end{eqnarray} where \begin{eqnarray*} III_1=\frac{y_{[\tau_0 T]}^2}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\beta_{2T}^{2(t-[\tau_0 T]-1)}, \end{eqnarray*} \begin{eqnarray*} III_2=\frac{1}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i% \Big)^2 \end{eqnarray*} and \begin{eqnarray*} III_3=\frac{2 y_{[\tau_0 T]}}{\beta_{2T}^{2(T-[\tau_0 T])}T k_T l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(\beta_{2T}^{t-[\tau_0 T] -1}\sum_{i=[\tau_0 T] +1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon_i\Big). \end{eqnarray*} Obviously, \begin{equation} \label{C.15} III_{1}\Rightarrow \frac{1}{2c}W^{2}(\tau _{0}). \end{equation} For the term $III_{2}$, it is true that \begin{equation*} III_{2}=\frac{1}{\beta _{2T}^{2(T-[\tau _{0}T])}Tk_{T}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}\Big(\sum_{i=[\tau _{0}T]+1}^{t-1}\beta_{2T}^{t-i-1}\varepsilon _{i}^{(1)}\Big)^{2}\cdot (1+o_{p}(1)) \end{equation*} and \begin{eqnarray*} &&E\left( \frac{1}{\beta _{2T}^{2(T-[\tau _{0}T])}Tk_{T}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}\Big(\sum_{i=[\tau _{0}T]+1}^{t-1}\beta _{2T}^{t-i-1}\varepsilon _{i}^{(1)}\Big)^{2}\right) \\ &=&\frac{1}{\beta _{2T}^{2(T-[\tau _{0}T])}Tk_{T}}\sum_{t=[\tau_{0}T]+1}^{T}\sum_{i=[\tau _{0}T]+1}^{t-1}\beta _{2T}^{2(t-i-1)}\cdot (1+o(1)) \\ &=&o(1) \end{eqnarray*}% by similar arguments as in (\ref{C.14}). Hence, we have \begin{equation} \label{C.16} III_{2}=o_{p}(1). \end{equation} For the term $III_{3}$, using (\ref{C.15}) and (\ref{C.16}) and applying Cauchy-Schwarz inequality immediately leads to \begin{equation} \label{C.17} III_{3}=o_{p}(1). \end{equation} Combining (\ref{C.18})-(\ref{C.17}), we show part (b). Moreover, by checking the above proofs carefully, one can find that parts (a) and (b) hold jointly and $X$ and $W(\tau _{0})$ are mutually independent. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma C.3.} \ First, it is trivial that \begin{equation}\label{C.2} A_{1}=o_{p}(1/k_{T}),\quad A_{4}=o_{p}(1/k_{T}) \end{equation} by Lemma B.1 and Lemma C.2. Secondly, note that the same reasoning in (\ref{B.4}) also implies \begin{equation} \label{C.3} A_{2}=o_{p}(1/k_{T}). \end{equation} For the term $A_{5}$, following the proof of Theorem 1.2, we have \begin{eqnarray*} &&y_{[\tau_{0}T]}\sum_{t=[\tau _{0}T]+1}^{m}\beta _{2T}^{t-1-[\tau_{0}T]}\varepsilon _{t} \\ &=&O_{p}\left( \sqrt{Tk_{T}(\beta _{2T}^{2(m-[\tau _{0}T])}-1)}l(\eta_{T})\right) \\ &=&\left\{ \begin{array}{ll} O_{p}\left( \sqrt{T(m-[\tau _{0}T])}l(\eta _{T})\right) ,~ & \mbox{if}~M_{T}\tau _{0}$, it follows from the similar arguments in the proofs of (\ref{B.2}) and (\ref{B.1}) respectively that \begin{eqnarray*} \sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}^{2}&=&\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\Big(\beta_{2T}^{t-1-[\tau _{0}T]}y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\beta_{2T}^{t-1-i}\varepsilon _{i}\Big)^{2} \\ &=&\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\Big(\beta_{2T}^{t-1-[\tau _{0}T]}y_{[\tau _{0}T]}+O_{p}(\sqrt{k_{T}\beta_{2T}^{t-1-[\tau _{0}T]}l(\eta _{T})})\Big)^{2} \\ &=&\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\beta_{2T}^{2(t-1-[\tau _{0}T])}y_{[\tau _{0}T]}^{2}\cdot (1+o_{p}(1)) \\ &=&k_{T}^{2}/T\cdot O_{p}(Tl(\eta _{T})) \\ &=&O_{p}(k_{T}^{2}l(\eta _{T})), \end{eqnarray*} yielding \begin{equation}\label{C.9} \sum_{t=[\tau _{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}^{2}=O_{p}(k_{T}^{2}l(\eta_{T})) \end{equation} and \begin{eqnarray*} \sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}\varepsilon _{t} &=&\frac{1-\beta _{2T}^{2}}{2\beta _{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}y_{t-1}^{2}+\frac{y_{[\tau_{0}T+k_{T}^{2}/T]}^{2}-y_{[\tau _{0}T]}^{2}}{2\beta _{2T}}-\frac{1}{2\beta_{2T}}\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T+k_{T}^{2}/T]}\varepsilon _{t}^{2}\\ &=&O_{p}(k_{T}l(\eta _{T}))+O_{p}(\sqrt{Tl(\eta _{T})})\cdot O_{p}(k_{T}\sqrt{l(\eta _{T})}/\sqrt{T})+O_{p}(k_{T}^{2}l(\eta _{T})/T) \\ &=&O_{p}(k_{T}l(\eta _{T})), \end{eqnarray*} yielding \begin{equation} \label{C.10} \sum_{t=[\tau _{0}T]+1}^{[\hat{\tau}_{T}T]}y_{t-1}\varepsilon_{t}=O_{p}(k_{T}l(\eta _{T})). \end{equation} $\hfill \Box $\newline \noindent \textbf{Proof of Lemma C.5.} \ The proof is similar to that of Lemma B.5, with the use of the result in (C.1). The details are omitted. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma D.1.} \ The results can be proved by following the proof of Theorem 4.3 in Phillips and Magdalinos (2007a) and applying the truncation technique as shown in (A.1). The details are omitted. $\hfill \Box $\newline \noindent\textbf{Proof of Lemma D.2.} \; To prove part (a), we write \begin{eqnarray} \label{D.0} &&\frac{\beta_{1T}^{-[\tau_0 T]}}{\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}y_{t-1}\varepsilon_t \notag \\ &=&\frac{\beta_{1T}^{-[\tau_0 T]}}{\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T]+1}^{T}\Big(y_{[\tau_0 T] }+\sum_{i=[\tau_0 T] +1}^{t-1}\varepsilon_i\Big)\varepsilon_t \notag \\ &=&\frac{\beta_{1T}^{-[\tau_0 T]}}{\sqrt{T k_T} l(\eta_T)}\left(\Big(\beta_{1T}^{[\tau_0 T] }y_0+\sum_{j=1}^{[\tau_0 T] }\beta_{1T}^{[\tau_0 T] -j}\varepsilon_j\Big)\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t+\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t\sum_{i=[\tau_0 T]+1}^{t-1}\varepsilon_i\right) \notag \\ &:=&IV_1+IV_2, \end{eqnarray} where \begin{eqnarray*} IV_1=\frac{\beta_{1T}^{-[\tau_0 T]}}{\sqrt{T k_T} l(\eta_T)}\Big(\beta_{1T}^{[\tau_0 T] }y_0+\sum_{j=1}^{[\tau_0 T] }\beta_{1T}^{[\tau_0 T]-j}\varepsilon_j\Big)\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t \end{eqnarray*} and \begin{eqnarray*} IV_2=\frac{\beta_{1T}^{-[\tau_0 T]}}{\sqrt{T k_T} l(\eta_T)}\sum_{t=[\tau_0 T] +1}^{T}\varepsilon_t\sum_{i=[\tau_0 T] +1}^{t-1}\varepsilon_i. \end{eqnarray*} Applying part (b) of Lemma C.1 and assumption C3, it can be shown that \begin{equation} \label{D.1} IV_{1}=\frac{1}{\sqrt{k_{T}l(\eta_{T})}}\sum_{j=1}^{[\tau_{0}T]}\beta_{1T}^{-j}\varepsilon_{j}\cdot \frac{1}{\sqrt{Tl(\eta_{T})}}\sum_{t=[\tau_{0}T]+1}^{T}\varepsilon_{t}+o_{p}(1)\Rightarrow Y(\overline{W}(1)-\overline{W}(\tau_{0})). \end{equation} In addition, it is not difficult to show that \begin{equation} \label{D.2} IV_{2}=O_{p}\Big(\frac{\beta_{1T}^{-[\tau_{0}T]}}{\sqrt{Tk_{T}}l(\eta_{T})}\cdot Tl(\eta_{T})\Big)=o_{p}(1) \end{equation} by (C.1). Combining (\ref{D.0}), (\ref{D.1}) and (\ref{D.2}) leads to part (a). To prove part (b), we write \begin{eqnarray} &&\frac{\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2} \notag \label{D.4} \\ &=&\frac{\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}\Big(y_{[\tau _{0}T]}+\sum_{i=[\tau _{0}T]+1}^{t-1}\varepsilon _{i}\Big)^{2} \notag \\ &=&\frac{\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}\Big(\sum_{j=1}^{[\tau _{0}T]}\beta _{1T}^{[\tau _{0}T]-j}\varepsilon _{j}+\sum_{i=[\tau _{0}T]+1}^{t-1}\varepsilon _{i}\Big)^{2}\cdot (1+o_{p}(1)) \notag \\ &:=&(V_{1}+V_{2}+V_{3})\cdot (1+o_{p}(1)), \end{eqnarray} where \begin{equation*} V_{1}=\frac{1}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau _{0}T]+1}^{T}\Big(\sum_{j=1}^{[\tau _{0}T]}\beta _{1T}^{-j}\varepsilon _{j}\Big)^{2}, \end{equation*} \begin{equation*} V_{2}=\frac{\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\sum_{t=[\tau_{0}T]+1}^{T}\Big(\sum_{i=[\tau _{0}T]+1}^{t-1}\varepsilon _{i}\Big)^{2} \end{equation*} and \begin{equation*} V_{3}=\frac{2\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\cdot\sum_{j=1}^{[\tau _{0}T]}\beta _{1T}^{[\tau _{0}T]-j}\varepsilon _{j}\cdot \sum_{t=[\tau _{0}T]+1}^{T}\sum_{i=[\tau _{0}T]+1}^{t-1}\varepsilon_{i} \end{equation*} Obviously, \begin{eqnarray} \label{D.5} V_1\Rightarrow (1-\tau_0)Y^2 \end{eqnarray} by applying part (b) of Lemma C.1. For the term $V_{2}$, we have \begin{equation} \label{D.6} V_{2}=O_{p}\Big(\frac{\beta _{1T}^{-2[\tau _{0}T]}}{Tk_{T}l(\eta _{T})}\cdot T^{2}l(\eta _{T})\Big)=o_{p}(1) \end{equation} by making use of (C.1). Finally, applying Cauchy-Schwarz inequality, it follows from (\ref{D.5}) and (\ref{D.6}) that \begin{eqnarray} \label{D.7} V_3=o_p(1). \end{eqnarray} It is ready to see that part (b) is proved by combining (\ref{D.4})-(\ref{D.7}). One can also verfiy that parts (a) and (b) hold jointly and $Y$ and $\overline{W}(1)-\overline{W}(\tau _{0})$ are mutually independent. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma D.3.} \ First, by applying Lemmas D.1 and D.2, it can be shown that \begin{equation*} A_{1}=o_{p}(1/k_{T})~~\mbox{and}~~A_{4}=o_{p}(1/k_{T}). \end{equation*} Secondly, we investigate the magnitude of the terms $A_{2}$ and $A_{5}$ in probability. For $A_{2}$, note that Cauchy-Schwarz inequality implies \begin{equation*} \sup_{m\in D_{1T}}\left\vert \frac{\sum_{t=m+1}^{[\tau_{0}T]}y_{t-1}\varepsilon _{t}}{\sum_{t=m+1}^{[\tau _{0}T]}y_{t-1}^{2}}\right\vert \leq \sup_{m\in D_{1T}}\sqrt{\frac{\sum_{t=m+1}^{[\tau_{0}T]}\varepsilon _{t}^{2}}{\sum_{t=m+1}^{[\tau _{0}T]}y_{t-1}^{2}}}, \end{equation*} so it suffices to prove that the magnitude of the right hand side of the above inequality is $o_{p}(1/k_{T})$. Applying Lemma C.1, we immediately have \begin{equation*} \sup_{m\in D_{1T}}\sqrt{\frac{\sum_{t=m+1}^{[\tau _{0}T]}\varepsilon _{t}^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}}\leq \sqrt{\frac{\sum_{t=1}^{[\tau_{0}T]}\varepsilon _{t}^{2}}{y_{[\tau _{0}T]-1}^{2}}}\leq O_{p}\left(\sqrt{\frac{Tl(\eta_{T})}{k_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta _{T})}}\right) =o_{p}(1/k_{T}) \end{equation*} by (C.1), which implies that \begin{equation*} A_{2}\leq \sup_{m\in D_{1T}}\sqrt{\frac{\sum_{t=m+1}^{[\tau_{0}T]}\varepsilon _{t}^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}}=o_{p}(1/k_{T}). \end{equation*} Similarly, for the term $A_{5}$, we have \begin{eqnarray*} \sup_{m\in D_{2T}}\left\vert \frac{\sum_{t=[\tau_{0}T]+1}^{m}y_{t-1}\varepsilon _{t}}{\sum_{t=[\tau _{0}T]+1}^{m}y_{t-1}^{2}}\right\vert &\leq &\sup_{m\in D_{2T}}\sqrt{\frac{\sum_{t=[\tau_{0}T]+1}^{m}\varepsilon _{t}^{2}}{\sum_{t=[\tau _{0}T]+1}^{m}y_{t-1}^{2}}}\\ &\leq &\sqrt{\frac{\sum_{t=[\tau _{0}T]+1}^{T}\varepsilon _{t}^{2}}{y_{[\tau_{0}T]}^{2}}} \\ &=&O_{p}\left( \sqrt{\frac{Tl(\eta _{T})}{k_{T}\beta _{1T}^{2[\tau_{0}T]}l(\eta _{T})}}\right) \\ &=&o_{p}(1/k_{T}), \end{eqnarray*} which implies that \begin{equation*} A_{5}=o_{p}(1/k_{T}). \end{equation*} Thirdly, note also that \begin{eqnarray*} A_{3} &=&\sup_{m\in D_{1T}}\Big|\frac{\sum_{t=m+1}^{T}y_{t-1}^{2}}{\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}^{2}\sum_{t=m+1}^{[\tau _{0}T]}y_{t-1}^{2}}\Lambda _{T}(\frac{m}{T})\Big| \\ &\leq &\bigg(\frac{1}{\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}^{2}}+\frac{1}{\sum_{t=[\tau _{0}T]-M_{T}}^{[\tau _{0}T]}y_{t-1}^{2}}\bigg)\bigg(\sup_{m\in D_{1T}}\Big|\frac{(\sum_{t=1}^{[\tau _{0}T]}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}-\frac{(\sum_{t=1}^{m}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=1}^{m}y_{t-1}^{2}}\Big| \\ &&~~~~+\sup_{m\in D_{1T}}\Big|\frac{(\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}}-\frac{(\sum_{t=m+1}^{T}y_{t-1}\varepsilon_{t})^{2}}{\sum_{t=m+1}^{T}y_{t-1}^{2}}\Big|\bigg) \\ &\leq &\Big(O_{p}\big(\frac{1}{Tk_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta_{T})}\big)+O_{p}\big(\frac{1}{k_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T})}\big)\Big)\cdot O_{p}(l(\eta_{T})) \\ &=&o_{p}(1/k_{T}^{2}), \end{eqnarray*} and \begin{eqnarray*} A_{6} &=&\sup_{m\in D_{2T}}\Big|\frac{\sum_{t=1}^{m}y_{t-1}^{2}}{\sum_{t=[\tau _{0}T]+1}^{m}y_{t-1}^{2}\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}\Lambda _{T}(\frac{m}{T})\Big| \\ &\leq &\bigg(\frac{1}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}+\frac{1}{\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T]+M_{T}}y_{t-1}^{2}}\bigg)\bigg(\sup_{m\in D_{2T}}\Big|\frac{(\sum_{t=1}^{[\tau _{0}T]}y_{t-1}\varepsilon_{t})^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}-\frac{(\sum_{t=1}^{m}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=1}^{m}y_{t-1}^{2}}\Big|\\ &&~~~~+\sup_{m\in D_{2T}}\Big|\frac{(\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}}-\frac{(\sum_{t=m+1}^{T}y_{t-1}\varepsilon _{t})^{2}}{\sum_{t=m+1}^{T}y_{t-1}^{2}}\Big|\bigg) \\ &\leq &\Big(O_{p}(\frac{1}{k_{T}^{2}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T})})+O_{p}(\frac{1}{k_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T})})\Big)\cdot O_{p}(l(\eta _{T})) \\ &=&o_{p}(1/k_{T}^{2}). \end{eqnarray*} These complete the proofs. $\hfill \Box $\newline \noindent \textbf{Proof of Lemma D.4.} \ To prove part (a), applying equation (B.2) in Chong (2001), we have \begin{eqnarray*} &&RSS_{T}(\tau _{0}-\frac{m}{T})-RSS_{T}(\tau _{0}) \\ &=&2(\beta _{2}-\beta _{1T})\bigg(\frac{\sum_{t=[\tau _{0}T]-m+1}^{[\tau_{0}T]}y_{t-1}^{2}\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}\varepsilon_{t}}{\sum_{t=[\tau _{0}T]-m+1}^{T}y_{t-1}^{2}}-\frac{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}\sum_{t=[\tau _{0}T]-m+1}^{[\tau_{0}T]}y_{t-1}\varepsilon _{t}}{\sum_{t=[\tau _{0}T]-m+1}^{T}y_{t-1}^{2}}\bigg) \\ &&~+(\beta _{2}-\beta _{1T})^{2}\frac{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}\sum_{t=[\tau _{0}T]-m+1}^{[\tau _{0}T]}y_{t-1}^{2}}{\sum_{t=[\tau_{0}T]-m+1}^{T}y_{t-1}^{2}}+\Lambda _{T}(\tau _{0}-\frac{m}{T}), \end{eqnarray*} where \begin{eqnarray*} &&\Lambda _{T}(\tau _{0}-\frac{m}{T}) \\ &=&\frac{\Big(\sum_{t=1}^{[\tau _{0}T]}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}-\frac{\Big(\sum_{t=1}^{[\tau_{0}T]-m}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=1}^{[\tau_{0}T]-m}y_{t-1}^{2}}+\frac{\Big(\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}}-\frac{\Big(\sum_{t=[\tau _{0}T]-m+1}^{T}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=[\tau_{0}T]-m+1}^{T}y_{t-1}^{2}}. \end{eqnarray*} Obviously, $\Lambda_{T}(\tau _{0}-\frac{m}{T})=O_{p}(l(\eta_{T}))$. It follows that \begin{eqnarray*} &&\frac{k_{T}}{\beta_{1T}^{2[\tau _{0}T]}l(\eta_{T})}\Big(RSS_{T}(\tau_{0}-\frac{m}{T})-RSS_{T}(\tau _{0})\Big) \\ &=&\frac{k_{T}}{\beta_{1T}^{2[\tau _{0}T]}l(\eta_{T})}\cdot \Big(-\frac{2c}{k_{T}}\Big)\bigg(\frac{O_{p}(mk_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T}))O_{p}(\sqrt{Tk_{T}}\beta_{1T}^{[\tau_{0}T]}l(\eta _{T}))}{O_{p}(mk_{T}\beta _{1T}^{2[\tau_{0}T]}l(\eta _{T}))+O_{p}(Tk_{T}\beta_{1T}^{2[\tau _{0}T]}l(\eta _{T}))}+\frac{O_{p}(Tk_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta _{T}))}{O_{p}(\sqrt{k_{T}}\beta_{1T}^{[\tau_{0}T]})}\bigg) \\ &&+\frac{c^{2}}{k_{T}\beta_{1T}^{2[\tau _{0}T]}l(\eta _{T})}\cdot \frac{O_{p}(Tk_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T}))}{O_{p}(mk_{T}\beta_{1T}^{2[\tau _{0}T]}l(\eta _{T}))+O_{p}(Tk_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta _{T}))}\sum_{[\tau_{0}T]-m+1}^{[\tau_{0}T]}y_{t-1}^{2}+o_{p}(1) \\ &=&\frac{c^{2}(1+o_{p}(1))}{k_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta _{T})}\sum_{[\tau _{0}T]-m+1}^{[\tau _{0}T]}y_{t-1}^{2}+o_{p}(1) \\ &\Rightarrow &c^{2}mY^{2} \end{eqnarray*}% by Lemmas C.1 and D.2 and the observations \begin{equation*} \bigg|\frac{\sum_{t=[\tau _{0}T]-m+1}^{[\tau _{0}T]}y_{t-1}\varepsilon_{t}}{\sum_{t=[\tau _{0}T]-m+1}^{T}y_{t-1}^{2}}\bigg|\leq \frac{\sqrt{\sum_{t=[\tau _{0}T]-m+1}^{[\tau _{0}T]}\varepsilon _{t}^{2}}}{\sqrt{\sum_{t=[\tau _{0}T]-m+1}^{T}y_{t-1}^{2}}}=O_{p}(\frac{1}{\sqrt{k_{T}}\beta_{1T}^{[\tau_{0}T]}}) \end{equation*} and \begin{equation*} \frac{\sum_{t=[\tau _{0}T]-m+1}^{[\tau _{0}T]}y_{t-1}^{2}}{my_{[\tau_{0}T]}^{2}}\overset{p}{\rightarrow } 1. \end{equation*} To prove part (b), applying equation (B.4) in Chong (2001), we have \begin{eqnarray*} &&RSS_{T}(\tau _{0}+\frac{m}{T})-RSS_{T}(\tau_{0}) \\ &=&2(\beta _{2}-\beta _{1T})\bigg(\frac{\sum_{t=1}^{[\tau_{0}T]}y_{t-1}^{2}\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T]+m}y_{t-1}\varepsilon _{t}}{\sum_{t=1}^{[\tau _{0}T]+m}y_{t-1}^{2}}-\frac{\sum_{t=[\tau_{0}T]+1}^{[\tau _{0}T]+m}y_{t-1}^{2}\sum_{t=1}^{[\tau_{0}T]}y_{t-1}\varepsilon _{t}}{\sum_{t=1}^{[\tau _{0}T]+m}y_{t-1}^{2}}\bigg)\\ &&~+(\beta_{2}-\beta_{1T})^{2}\frac{\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T]+m}y_{t-1}^{2}\sum_{t=1}^{[\tau_{0}T]}y_{t-1}^{2}}{\sum_{t=1}^{[\tau_{0}T]+m}y_{t-1}^{2}}+\Lambda _{T}(\tau _{0}+\frac{m}{T}) \end{eqnarray*} where \begin{eqnarray*} &&\Lambda _{T}(\tau _{0}+\frac{m}{T}) \\ &=&\frac{\Big(\sum_{t=1}^{[\tau _{0}T]}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=1}^{[\tau _{0}T]}y_{t-1}^{2}}-\frac{\Big(\sum_{t=1}^{[\tau _{0}T]+m}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=1}^{[\tau_{0}T]+m}y_{t-1}^{2}}+\frac{\Big(\sum_{t=[\tau _{0}T]+1}^{T}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=[\tau_{0}T]+1}^{T}y_{t-1}^{2}}-\frac{\Big(\sum_{t=[\tau _{0}T]+m+1}^{T}y_{t-1}\varepsilon _{t}\Big)^{2}}{\sum_{t=[\tau_{0}T]+m+1}^{T}y_{t-1}^{2}}. \end{eqnarray*} Then, using the facts $\Lambda _{T}(\tau _{0}+\frac{m}{T})=O_{p}(l(\eta_{T}))$, \begin{equation*} \bigg|\frac{\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T]+m}y_{t-1}\varepsilon_{t}}{\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T]+m}y_{t-1}^{2}}\bigg|\leq \frac{\sqrt{\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T]+m}\varepsilon_{t}^{2}}}{\sqrt{\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T]+m}y_{t-1}^{2}}}=O_{p}(\frac{1}{\sqrt{k_{T}}\beta_{1T}^{[\tau_{0}T]}}), \end{equation*} and \begin{equation*} \frac{\sum_{t=[\tau _{0}T]+1}^{[\tau _{0}T]+m}y_{t-1}^{2}}{my_{[\tau_{0}T]}^{2}}\overset{p}{\rightarrow } 1, \end{equation*} we have \begin{eqnarray*} &&\frac{k_{T}}{\beta_{1T}^{2[\tau _{0}T]}l(\eta_{T})}\Big(RSS_{T}(\tau_{0}+\frac{m}{T})-RSS_{T}(\tau _{0})\Big) \\ &=&\frac{k_{T}}{\beta_{1T}^{2[\tau _{0}T]}l(\eta_{T})}\cdot \Big(-\frac{2c}{k_{T}}\Big)\bigg(\frac{O_{p}(k_{T}^{2}\beta_{1T}^{2[\tau_{0}T]}l(\eta _{T}))}{\sqrt{k_{T}}\beta_{1T}^{[\tau _{0}T]}}+\frac{O_{p}(mk_{T}\beta_{1T}^{2[\tau _{0}T]}l(\eta _{T}))O_{p}(k_{T}\beta_{1T}^{[\tau _{0}T]}l(\eta _{T}))}{O_{p}(k_{T}^{2}\beta_{1T}^{2[\tau_{0}T]}l(\eta_{T}))+O_{p}(mk_{T}\beta _{1T}^{2[\tau _{0}T]}l(\eta_{T}))}\bigg) \\ &&+\frac{c^{2}}{k_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta_{T})}\cdot \frac{O_{p}(k_{T}^{2}\beta _{1T}^{2[\tau _{0}T]}l(\eta_{T}))}{O_{p}(k_{T}^{2}\beta _{1T}^{2[\tau _{0}T]}l(\eta_{T}))+O_{p}(mk_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta _{T}))}\sum_{t=[\tau_{0}T]+1}^{[\tau_{0}T]+m}y_{t-1}^{2}+o_{p}(1) \\ &=&\frac{c^{2}(1+o_{p}(1))}{k_{T}\beta_{1T}^{2[\tau_{0}T]}l(\eta_{T})}\sum_{t=[\tau _{0}T]+1}^{[\tau_{0}T]+m}y_{t-1}^{2}+o_{p}(1) \\ &\Rightarrow &c^{2}mY^{2} \end{eqnarray*} by Lemmas C.1 and D.1. $\hfill \Box $\newline \bigskip \begin{thebibliography}{99} \bibitem{} {\footnotesize \textsc{Cs\"{o}rg\H{o}, M., Szyszkowicz, B. and Wang, Q. Y.} (2003). Donsker's theorem for self-normalized partial sums processes. \emph{Annals of Probability} \textbf{31} (3): 1228-1240. } \bibitem{} {\footnotesize \textsc{Gin\'{e}, E., G\"{o}tze, F. and Mason, D. M.} (1997). When is the Student $t$-statistic asymptotically standard normal? \emph{Annals of Probability} \textbf{25} (3): 1514--1531. } \bibitem{} {\footnotesize \textsc{Huang S. H., Pang, T. X. and Weng, C. G.} (2014). Limit theory for moderate deviations from a unit root under innovations with a possibly infinite variance. \emph{Methodology and Computing in Applied Probability } \textbf{16} (1): 187-206. } \bibitem{} {\footnotesize \textsc{Phillips, P. C. B. and Magdalinos, T.} (2007a). Limit theory for moderate deviations from a unit root. \emph{Journal of Econometrics} \textbf{136}: 115--130. } \end{thebibliography} \end{document}