text
stringlengths 9
7.94M
| subset
stringclasses 1
value | meta
dict | file_path
stringclasses 1
value | question
dict | answers
listlengths |
|---|---|---|---|---|---|
\begin{document}
{
\begin{center} \Large\bf Devinatz's moment problem: a description of all solutions. \end{center} \begin{center} \bf S.M. Zagorodnyuk \end{center}
\section{Introduction.} We shall study the following problem: to find a non-negative Borel measure $\mu$ in a strip $$ \Pi = \{ (x,\varphi):\ x\in \mathbb{R},\ -\pi\leq \varphi < \pi \}, $$ such that \begin{equation} \label{f1_1} \int_\Pi x^m e^{in\varphi} d\mu = s_{m,n},\qquad m\in \mathbb{Z}_+, n\in \mathbb{Z}, \end{equation} where $\{ s_{m,n} \}_{m\in \mathbb{Z}_+, n\in \mathbb{Z}}$ is a given sequence of complex numbers. We shall refer to this problem as to {\bf the Devinatz moment problem}.
\noindent A.~Devinatz was the first who introduced and studied this moment problem~\cite{cit_1000_D}. He obtained the necessary and sufficient conditions of solvability for the moment problem~(\ref{f1_1}) and gave a sufficient condition for the moment problem to be determinate~\cite[Theorem 4]{cit_1000_D}.
\noindent Our aim here is threefold. Firstly, we present a new proof of the Devinatz solvability criterion. Secondly, we describe canonical solutions of the Devinatz moment problem (see the definition below). Finally, we describe all solutions of the Devinatz moment problem. We shall use an abstract operator approach~\cite{cit_1500_Z} and results of Godi\v{c}, Lucenko and Shtraus~\cite{cit_2000_GL},\cite[Theorem 1]{cit_3000_GP},\cite{cit_4000_S}.
{\bf Notations. } As usual, we denote by $\mathbb{R},\mathbb{C},\mathbb{N},\mathbb{Z},\mathbb{Z}_+$ the sets of real numbers, complex numbers, positive integers, integers and non-negative integers, respectively. For a subset $S$ of the complex plane we denote by $\mathfrak{B}(S)$ the set of all Borel subsets of $S$. Everywhere in this paper, all Hilbert spaces are assumed to be separable. By
$(\cdot,\cdot)_H$ and $\| \cdot \|_H$ we denote the scalar product and the norm in a Hilbert space $H$, respectively. The indices may be ommited in obvious cases. For a set $M$ in $H$, by $\overline{M}$ we mean the closure of $M$ in the norm $\| \cdot \|_H$. For $\{ x_k \}_{k\in T}$, $x_k\in H$, we write $\mathop{\rm Lin}\nolimits \{ x_k \}_{k\in T}$ for the set of linear combinations of vectors $\{ x_k \}_{k\in T}$ and $\mathop{\rm span}\nolimits \{ x_k \}_{k\in T} = \overline{ \mathop{\rm Lin}\nolimits \{ x_k \}_{k\in T} }$. Here $T := \mathbb{Z}_+ \times \mathbb{Z}$, i.e. $T$ consists of pairs $(m,n)$, $m\in \mathbb{Z}_+$, $n\in\mathbb{Z}$. The identity operator in $H$ is denoted by $E$. For an arbitrary linear operator $A$ in $H$, the operators $A^*$,$\overline{A}$,$A^{-1}$ mean its adjoint operator, its closure and its inverse
(if they exist). By $D(A)$ and $R(A)$ we mean the domain and the range of the operator $A$. By $\sigma(A)$, $\rho(A)$ we denote the spectrum of $A$ and the resolvent set of $A$, respectively. We denote by $R_z (A)$ the resolvent function of $A$, $z\in \rho(A)$. The norm of a bounded operator $A$ is denoted by $\| A \|$. By $P^H_{H_1} = P_{H_1}$ we mean the operator of orthogonal projection in $H$ on a subspace $H_1$ in $H$. By $\mathbf{B}(H)$ we denote the set of all bounded operators in $H$.
\section{Solvability.} Let a moment problem~(\ref{f1_1}) be given. Suppose that the moment problem has a solution $\mu$. Choose an arbitrary power-trigonometric polynomial $p(x,\varphi)$ of the following form: \begin{equation} \label{f1_2} \sum_{m=0}^\infty \sum_{n=-\infty}^\infty \alpha_{m,n} x^m e^{in\varphi},\qquad \alpha_{m,n}\in \mathbb{C}, \end{equation} where all but finite number of coefficients $\alpha_{m,n}$ are zeros. We can write
$$ 0 \leq \int_\Pi |p(x,\varphi)|^2 d\mu = \int_\Pi \sum_{m=0}^\infty \sum_{n=-\infty}^\infty \alpha_{m,n} x^m e^{in\varphi} \overline{ \sum_{k=0}^\infty \sum_{l=-\infty}^\infty \alpha_{k,l} x^k e^{il\varphi} } d\mu $$ $$ = \sum_{m,n,k,l} \alpha_{m,n}\overline{\alpha_{k,l}} \int_\Pi x^{m+k} e^{i(n-l)\varphi} d\mu = \sum_{m,n,k,l} \alpha_{m,n}\overline{\alpha_{k,l}} s_{m+k,n-l}. $$ Thus, for arbitrary complex numbers $\alpha_{m,n}$ (where all but finite numbers are zeros) we have \begin{equation} \label{f2_1} \sum_{m,k=0}^\infty \sum_{n,l=-\infty}^\infty \alpha_{m,n}\overline{\alpha_{k,l}} s_{m+k,n-l} \geq 0. \end{equation} Let $T = \mathbb{Z}\times \mathbb{Z}_+$ and for $t,r\in T$, $t=(m,n)$, $r=(k,l)$, we set \begin{equation} \label{f2_2} K(t,r) = K((m,n),(k,l)) = s_{m+k,n-l}. \end{equation} Thus, for arbitrary elements $t_1,t_2,...,t_n$ of $T$ and arbitrary complex numbers $\alpha_1,\alpha_2,...,\alpha_n$, with $n\in \mathbb{N}$, the following inequality holds: \begin{equation} \label{f2_3} \sum_{i,j=1}^n K(t_i,t_j) \alpha_{i} \overline{\alpha_j} \geq 0. \end{equation} The latter means that $K(t,r)$ is a positive matrix in the sense of E.H.~Moore \cite[p.344]{cit_5000_A}.
Suppose now that a Devinatz moment problem is given and conditions~(\ref{f2_1}) (or what is the same conditions~(\ref{f2_3})) hold. Let us show that the moment problem has a solution. We shall use the following important fact (e.g.~\cite[pp.361-363]{cit_6000_AG}). \begin{thm} \label{t2_1} Let $K = K(t,r)$ be a positive matrix on $T=\mathbb{Z}\times \mathbb{Z}_+$. Then there exist a separable Hilbert space $H$ with a scalar product $(\cdot,\cdot)$ and a sequence $\{ x_t \}_{t\in T}$ in $H$, such that \begin{equation} \label{f2_4} K(t,r) = (x_t,x_r),\qquad t,r\in T, \end{equation} and $\mathop{\rm span}\nolimits\{ x_t \}_{t\in T} = H$. \end{thm} {\bf Proof. } Consider an arbitrary infinite-dimensional linear vector space $V$ (for example, we can choose a space of complex sequences $(u_n)_{n\in \mathbb{N}}$, $u_n\in \mathbb{C}$). Let $X = \{ x_t \}_{t\in T}$ be an arbitrary infinite sequence of linear independent elements in $V$ which is indexed by elements of $T$. Set $L_X = \mathop{\rm Lin}\nolimits\{ x_t \}_{t\in T}$. Introduce the following functional: \begin{equation} \label{f2_5} [x,y] = \sum_{t,r\in T} K(t,r) a_t\overline{b_r}, \end{equation} for $x,y\in L_X$, $$ x=\sum_{t\in T} a_t x_t,\quad y=\sum_{r\in T} b_r x_r,\quad a_t,b_r\in \mathbb{C}. $$ Here all but finite number of indices $a_t,b_r$ are zeros.
\noindent The set $L_X$ with $[\cdot,\cdot]$ will be a pre-Hilbert space. Factorizing and making the completion we obtain the required space $H$ (\cite[p. 10-11]{cit_7000_B}). $\Box$
By applying this theorem we get that there exist a Hilbert space $H$ and a sequence $\{ x_{m,n} \}_{m\in \mathbb{Z}_+, n\in \mathbb{Z}}$, $x_{m,n}\in H$, such that \begin{equation} \label{f2_6} (x_{m,n}, x_{k,l})_H = K((m,n),(k,l)),\qquad m,k\in \mathbb{Z}_+,\ n,l\in \mathbb{Z}. \end{equation} Set $L = \mathop{\rm Lin}\nolimits\{ x_{m,n} \}_{(m,n)\in T}$. We introduce the following operators \begin{equation} \label{f2_7} A_0 x = \sum_{(m,n)\in T} \alpha_{m,n} x_{m+1,n}, \end{equation} \begin{equation} \label{f2_8} B_0 x = \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n+1}, \end{equation} where \begin{equation} \label{f2_9} x = \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n} \in L. \end{equation} We should show that these definitions are correct. Indeed, suppose that the element $x$ in~(\ref{f2_9}) has another representation: \begin{equation} \label{f2_10} x = \sum_{(k,l)\in T} \beta_{k,l} x_{k,l}. \end{equation} We can write $$ \left( \sum_{(m,n)\in T} \alpha_{m,n} x_{m+1,n}, x_{a,b} \right) = \sum_{(m,n)\in T} \alpha_{m,n} K((m+1,n),(a,b)) $$ $$= \sum_{(m,n)\in T} \alpha_{m,n} s_{m+1+a,n-b} = \sum_{(m,n)\in T} \alpha_{m,n} K((m,n),(a+1,b)) $$ $$ = \left(\sum_{(m,n)\in T} \alpha_{m,n} x_{m,n}, x_{a+1,b} \right) = (x,x_{a+1,b}), $$ for arbitrary $(a,b)\in T$. In the same manner we get $$ \left(\sum_{(k,l)\in T} \beta_{k,l} x_{k+1,l}, x_{a,b} \right) = (x,x_{a+1,b}). $$ Since $\mathop{\rm span}\nolimits\{ x_{a,b} \}_{(a,b)\in T} = H$, we get $$ \sum_{(m,n)\in T} \alpha_{m,n} x_{m+1,n} = \sum_{(k,l)\in T} \beta_{k,l} x_{k+1,l}. $$ Thus, the operator $A_0$ is defined correctly.
\noindent We can write
$$ \left\| \sum_{(m,n)\in T} (\alpha_{m,n}-\beta_{m,n}) x_{m,n+1} \right\|^2 $$ $$= \left( \sum_{(m,n)\in T} (\alpha_{m,n}-\beta_{m,n}) x_{m,n+1}, \sum_{(k,l)\in T} (\alpha_{k,l}-\beta_{k,l}) x_{k,l+1} \right) $$ $$ = \sum_{(m,n),(k,l)\in T} (\alpha_{m,n}-\beta_{m,n}) \overline{(\alpha_{k,l}-\beta_{k,l})} K((m,n+1),(k,l+1)) $$ $$= \sum_{(m,n),(k,l)\in T} (\alpha_{m,n}-\beta_{m,n}) \overline{(\alpha_{k,l}-\beta_{k,l})} K((m,n),(k,l)) $$ $$= \left( \sum_{(m,n)\in T} (\alpha_{m,n}-\beta_{m,n}) x_{m,n}, \sum_{(k,l)\in T} (\alpha_{k,l}-\beta_{k,l}) x_{k,l} \right) = 0. $$ Consequently, the operator $B_0$ is defined correctly, as well.
Choose an arbitrary $y = \sum_{(a,b)\in T} \gamma_{a,b} x_{a,b} \in L$. We have $$ (A_0 x,y) = \sum_{m,n,a,b} \alpha_{m,n}\gamma_{a,b} (x_{m+1,n},x_{a,b}) = \sum_{m,n,a,b} \alpha_{m,n}\gamma_{a,b} K((m+1,n),(a,b)) $$ $$ = \sum_{m,n,a,b} \alpha_{m,n}\gamma_{a,b} K((m,n),(a+1,b)) =
\sum_{m,n,a,b} \alpha_{m,n}\gamma_{a,b} (x_{m,n},x_{a+1,b}) = (x,A_0 y). $$ Thus, $A_0$ is a symmetric operator. Its closure we denote by $A$. On the other hand, we have $$ (B_0 x,B_0 y) = \sum_{m,n,a,b} \alpha_{m,n}\overline{\gamma_{a,b}} (x_{m,n+1},x_{a,b+1}) = \sum_{m,n,a,b} \alpha_{m,n}\overline{\gamma_{a,b}} K((m,n+1),(a,b+1)) $$ $$ = \sum_{m,n,a,b} \alpha_{m,n}\overline{\gamma_{a,b}} K((m,n),(a,b)) =
\sum_{m,n,a,b} \alpha_{m,n}\overline{\gamma_{a,b}} (x_{m,n},x_{a,b}) = (x,y). $$ In particular, this means that $B_0$ is bounded. By continuity we extend $B_0$ to a bounded operator $B$ such that $$ (Bx,By) = (x,y),\qquad x,y\in H. $$ Since $R(B_0)=L$ and $B_0$ has a bounded inverse, we have $R(B)=H$. Thus, $B$ is a unitary operator in $H$.
Notice that operators $A_0$ and $B_0$ commute. It is straightforward to check that $A$ and $B$ commute: \begin{equation} \label{f2_11} AB x = BA x,\qquad x\in D(A). \end{equation} Consider the following operator: \begin{equation} \label{f2_12} J_0 x = \sum_{(m,n)\in T} \overline{\alpha_{m,n}} x_{m,-n}, \end{equation} where \begin{equation} \label{f2_13} x = \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n} \in L. \end{equation} Let us check that this definition is correct. Consider another representation for $x$ as in~(\ref{f2_10}). Then
$$ \left\| \sum_{(m,n)\in T} (\overline{\alpha_{m,n}} - \overline{\beta_{m,n}}) x_{m,-n} \right\|^2 $$ $$= \left( \sum_{(m,n)\in T} \overline{ (\alpha_{m,n}-\beta_{m,n}) } x_{m,-n}, \sum_{(k,l)\in T} \overline{ (\alpha_{k,l}-\beta_{k,l}) } x_{k,-l} \right) $$ $$ = \sum_{(m,n),(k,l)\in T} \overline{(\alpha_{m,n}-\beta_{m,n})} (\alpha_{k,l}-\beta_{k,l}) K((m,-n),(k,-l)) $$ $$= \overline{ \sum_{(m,n),(k,l)\in T} (\alpha_{m,n}-\beta_{m,n}) \overline{(\alpha_{k,l}-\beta_{k,l})} K((m,n),(k,l)) } $$ $$= \overline{ \left( \sum_{(m,n)\in T} (\alpha_{m,n}-\beta_{m,n}) x_{m,n}, \sum_{(k,l)\in T} (\alpha_{k,l}-\beta_{k,l}) x_{k,l} \right) } = 0. $$ Thus, the definition of $J_0$ is correct. For an arbitrary $y = \sum_{(a,b)\in T} \gamma_{a,b} x_{a,b} \in L$ we can write $$ (J_0 x,J_0 y) = \sum_{m,n,a,b} \overline{\alpha_{m,n}}\gamma_{a,b} (x_{m,-n},x_{a,-b}) = \sum_{m,n,a,b} \overline{\alpha_{m,n}}\gamma_{a,b} K((m,-n),(a,-b)) $$ $$ = \sum_{m,n,a,b} \overline{\alpha_{m,n}} \gamma_{a,b} K((a,b),(m,n)) =
\sum_{m,n,a,b} \overline{\alpha_{m,n}}\gamma_{a,b} (x_{a,b},x_{m,n}) = (y,x). $$ In particular, this implies that $J_0$ is bounded. By continuity we extend $J_0$ to a bounded antilinear operator $J$ such that $$ (Jx,Jy) = (y,x),\qquad x,y\in H. $$ Moreover, we get $J^2 = E_H$. Consequently, $J$ is a conjugation in $H$ (\cite{cit_8000_S}).
\noindent Notice that $J_0$ commutes with $A_0$. It is easy to check that \begin{equation} \label{f2_14} AJ x = JA x,\qquad x\in D(A). \end{equation} On the other hand, we have $J_0 B_0 = B_0^{-1} J_0$. By continuity we get \begin{equation} \label{f2_15} JB = B^{-1}J. \end{equation} Consider the Cayley transformation of the operator A: \begin{equation} \label{f2_16} V_A := (A+iE_H)(A-iE_H)^{-1}, \end{equation} and set \begin{equation} \label{f2_17} H_1 := \Delta_A(i),\ H_2 := H\ominus H_1,\ H_3:= \Delta_A(-i),\ H_4 := H\ominus H_3. \end{equation} \begin{prop} \label{p2_1} The operator $B$ reduces subspaces $H_i$, $1\leq i\leq 4$: \begin{equation} \label{f2_18} BH_i = H_i,\qquad 1\leq i\leq 4. \end{equation} Moreover, the following equality holds: \begin{equation} \label{f2_19} BV_Ax = V_ABx,\qquad x\in H_1. \end{equation} \end{prop} {\bf Proof. } Choose an arbitrary $x\in \Delta_A(z)$, $x=(A-zE_H)f_A$, $f_A\in D(A)$, $z\in \mathbb{C}\backslash \mathbb{R}$. By~(\ref{f2_11}) we get $$ Bx = BAf_A - zBf_A = ABf_A - zBf_A = (A-zE_H)Bf_A\in \Delta_A(z). $$ In particular, we have $BH_1\subseteq H_1$, $BH_3\subseteq H_3$. Notice that $B_0^{-1}A_0 = A_0 B_0^{-1}$. It is a straightforward calculation to check that \begin{equation} \label{f2_20} AB^{-1} x = B^{-1}A x,\qquad x\in D(A). \end{equation} Repeating the above argument with $B^{-1}$ instead of $B$ we get $B^{-1}H_1\subseteq H_1$, $B^{-1}H_3\subseteq H_3$, and therefore $H_1\subseteq BH_1$, $H_3\subseteq BH_3$. Consequently, the operator $B$ reduces subspaces $H_1$ and $H_3$. It follows directly that $B$ reduces $H_2$ and $H_4$, as well.
\noindent Since $$ (A-iE_H) Bx = B(A-iE_H)x,\qquad x\in D(A), $$ for arbitrary $y\in H_1$, $y = (A-iE_H)x_A$, $x_A\in D(A)$, we have $$ (A-iE_H) B (A-iE_H)^{-1} y = B y; $$ $$ B (A-iE_H)^{-1} y = (A-iE_H)^{-1} B y,\qquad y\in H_1, $$ and~(\ref{f2_19}) follows. $\Box$
Our aim here is to construct a unitary operator $U$ in $H$, $U\supset V_A$, which commutes with $B$. Choose an arbitrary $x\in H$, $x= x_{H_1} + x_{H_2}$. For an operator $U$ of the required type by~Proposition~\ref{p2_1} we could write: $$ BU x = BV_Ax_{H_1} + BU x_{H_2} = V_ABx_{H_1} + BU x_{H_2}, $$ $$ UB x = UB x_{H_1} + UB x_{H_2} = V_ABx_{H_1} + UB x_{H_2}. $$ So, it is enough to find an isometric operator $U_{2,4}$ which maps $H_2$ onto $H_4$, and commutes with $B$: \begin{equation} \label{f2_21} B U_{2,4} x = U_{2,4}B x,\qquad x\in H_2. \end{equation} Moreover, all operators $U$ of the required type have the following form: \begin{equation} \label{f2_22} U = V_A \oplus U_{2,4}, \end{equation} where $U_{2,4}$ is an isometric operator which maps $H_2$ onto $H_4$, and commutes with $B$.
\noindent We shall denote the operator $B$ restricted to $H_i$ by $B_{H_i}$, $1\leq i\leq 4$. Notice that \begin{equation} \label{f2_23} A^* J x= JA^* x,\qquad x\in D(A^*). \end{equation} Indeed, for arbitrary $f_A\in D(A)$ and $g_{A^*}\in D(A^*)$ we can write $$ \overline{ (Af_A,Jg_{A^*}) } = (JAf_A, g_{A^*}) = (AJf_A, g_{A^*}) = (Jf_A, A^*g_{A^*}) $$ $$ = \overline{ (f_A,JA^*g_{A^*}) }, $$ and~(\ref{f2_23}) follows.
\noindent Choose an arbitrary $x\in H_2$. We have $$ A^* x = -i x, $$ and therefore $$ A^* Jx = JA^* x = ix. $$ Thus, we have $$ JH_2 \subseteq H_4. $$ In a similar manner we get $$ JH_4 \subseteq H_2, $$ and therefore \begin{equation} \label{f2_24} JH_2 = H_4,\quad JH_4 = H_2. \end{equation} By the Godi\v{c}-Lucenko Theorem (\cite{cit_2000_GL},\cite[Theorem 1]{cit_3000_GP}) we have a representation: \begin{equation} \label{f2_25} B_{H_2} = KL, \end{equation} where $K$ and $L$ are some conjugations in $H_2$. We set \begin{equation} \label{f2_26} U_{2,4} := JK. \end{equation} From~(\ref{f2_24}) it follows that $U_{2,4}$ maps isometrically $H_2$ onto $H_4$. Notice that \begin{equation} \label{f2_27} U_{2,4}^{-1} := KJ. \end{equation} Using relation~(\ref{f2_15}) we get $$ U_{2,4} B_{H_2} U_{2,4}^{-1} x = JK KL KJ x = J LK J x = J B_{H_2}^{-1} J x $$ $$ = JB^{-1}J x = B x = B_{H_4} x,\qquad x\in H_4. $$ Therefore relation~(\ref{f2_21}) is true.
We define an operator $U$ by~(\ref{f2_22}) and define \begin{equation} \label{f2_28} A_U := i(U+E_H)(U-E_H)^{-1} = iE_H + 2i(U-E_H)^{-1}. \end{equation} The inverse Cayley transformation $A_U$ is correctly defined since $1$ is not in the point spectrum of $U$. Indeed, $V_A$ is the Cayley transformation of a symmetric operator while eigen subspaces $H_2$ and $H_4$ have the zero intersection. Let \begin{equation} \label{f2_29} A_U = \int_\mathbb{R} s dE(s),\quad B = \int_{ [-\pi,\pi) } e^{i\varphi} dF(\varphi), \end{equation} where $E(s)$ and $F(\varphi)$ are the spectral measures of $A_U$ and $B$, respectively. These measures are defined on $\mathfrak{B}(\mathbb{R})$ and $\mathfrak{B}([-\pi,\pi))$, respectively (\cite{cit_9000_BS}). Since $U$ and $B$ commute, we get that $E(s)$ and $F(\varphi)$ commute, as well. By induction argument we have $$ x_{m,n} = A^m x_{0,n},\qquad m\in \mathbb{Z}_+,\ n\in \mathbb{Z}, $$ and $$ x_{0,n} = B^n x_{0,0},\qquad n\in \mathbb{Z}. $$ Therefore we have \begin{equation} \label{f2_30} x_{m,n} = A^m B^n x_{0,0},\qquad m\in \mathbb{Z}_+,\ n\in \mathbb{Z}. \end{equation} We can write $$ x_{m,n} = \int_\mathbb{R} s^m dE(s) \int_{ [-\pi,\pi) } e^{in\varphi} dF(\varphi) x_{0,0} = \int_\Pi s^m e^{in\varphi} d(E\times F) x_{0,0}, $$ where $E\times F$ is the product spectral measure on $\mathfrak{B}(\Pi)$. Then \begin{equation} \label{f2_31} s_{m,n} = (x_{m,n},x_{0,0})_H = \int_\Pi s^m e^{in\varphi} d((E\times F) x_{0,0}, x_{0,0})_H,\quad (m,n)\in T. \end{equation} The measure $\mu := ((E\times F) x_{0,0}, x_{0,0})_H$ is a non-negative Borel measure on $\Pi$ and relation~(\ref{f2_31}) shows that $\mu$ is a solution of the Devinatz moment problem.
Thus, we obtained a new proof of the following criterion. \begin{thm} \label{t2_2} Let a Devinatz moment problem~(\ref{f1_1}) be given. This problem has a solution if an only if conditions~(\ref{f2_1}) hold for arbitrary complex numbers $\alpha_{m,n}$ such that all but finite numbers are zeros. \end{thm} {\bf Remark. } The original proof of Devinatz used the theory of reproducing kernels Hilbert spaces (RKHS). In particular, he used properties of RKHS corresponding to the product of two positive matrices and an inner structure of a RKHS corresponding to the moment problem. We used an abstract approach with the Godi\v{c}-Lucenko Theorem and basic facts from the standard operator theory.
\section{Canonical solutions. A set of all solutions.} Let a moment problem~(\ref{f1_1}) be given. Construct a Hilbert space $H$ and operators $A,B,J$ as in the previous Section. Let $\widetilde A\supseteq A$ be a self-adjoint extension of $A$ in a Hilbert space $\widetilde H\supseteq H$. Let $R_z(\widetilde A)$, $z\in \mathbb{C}\backslash \mathbb{R}$, be the resolvent function of $\widetilde A$, and $E_{\widetilde A}$ be its spectral measure. Recall that the function \begin{equation} \label{f3_1} \mathbf{R}_z(A) := P^{\widetilde H}_H R_z(\widetilde A),\qquad z\in \mathbb{C}\backslash \mathbb{R}, \end{equation} is said to be a generalized resolvent of $A$. The function \begin{equation} \label{f3_2} \mathbf{E}_A (\delta) := P^{\widetilde H}_H E_{\widetilde A} (\delta),\qquad \delta\in \mathfrak{B}(\mathbb{R}), \end{equation} is said to be a spectral measure of $A$. There exists a one-to-one correspondence between generalized resolvents and spectral measures established by the following relation~\cite{cit_6000_AG}: \begin{equation} \label{f3_3} (\mathbf{R}_z(A) x,y)_H = \int_{\mathbb{R}} \frac{1}{t-z} d(\mathbf{E}_A x,y)_H,\qquad x,y\in H. \end{equation} We shall reduce the Devinatz moment problem to a problem of finding of generalized resolvents of a certain class. \begin{thm} \label{t3_1} Let a Devinatz moment problem~(\ref{f1_1}) be given and conditions~(\ref{f2_1}) hold. Consider a Hilbert space $H$ and a sequence $\{ x_{m,n} \}_{m\in \mathbb{Z}_+, n\in \mathbb{Z}}$, $x_{m,n}\in H$, such that relation~(\ref{f2_6}) holds where $K$ is defined by~(\ref{f2_2}). Consider operators $A_0$,$B_0$ defined by~(\ref{f2_7}),(\ref{f2_8}) on $L = \mathop{\rm Lin}\nolimits\{ x_{m,n} \}_{(m,n)\in T}$. Let $A=\overline{A_0}$, $B=\overline{B_0}$. Let $\mu$ be an arbitrary solution of the moment problem. Then it has the following form: \begin{equation} \label{f3_4} \mu (\delta)= ((\mathbf{E}\times F)(\delta) x_{0,0}, x_{0,0})_H,\qquad \delta\in \mathfrak{B}(\mathbb{R}), \end{equation} where $F$ is the spectral measure of $B$, $\mathbf{E}$ is a spectral measure of $A$ which commutes with $F$. By $((\mathbf{E}\times F)(\delta) x_{0,0}, x_{0,0})_H$ we mean the non-negative Borel measure on $\mathbb{R}$ which is obtained by the Lebesgue continuation procedure from the following non-negative measure on rectangules \begin{equation} \label{f3_5} ((\mathbf{E}\times F)(I_x\times I_\varphi)) x_{0,0}, x_{0,0})_H := ( \mathbf{E}(I_x) F(I_\varphi)) x_{0,0}, x_{0,0})_H, \end{equation} where $I_x\subset \mathbb{R}$, $I_\varphi\subseteq [-\pi,\pi)$ are arbitrary intervals.
\noindent On the other hand, for an arbitrary spectral measure $\mathbf{E}$ of $A$ which commutes with the spectral measure $F$ of $B$, by relation~(\ref{f3_4}) it corresponds a solution of the moment problem~(\ref{f1_1}).
\noindent Moreover, the correspondence between the spectral measures of $A$ which commute with the spectral meeasure of $B$ and solutions of the Devinatz moment problem is bijective. \end{thm} {\bf Remark. } The measure in~(\ref{f3_5}) is non-negative. Indeed, for arbitrary intervals $I_x\subset \mathbb{R}$, $I_\varphi\subseteq [-\pi,\pi)$, we can write $$ \left( \mathbf{E}(I_x) F(I_\varphi) x_{0,0}, x_{0,0} \right)_H = \left( F(I_\varphi) \mathbf{E}(I_x) F(I_\varphi) x_{0,0}, x_{0,0} \right)_H $$ $$ = \left( \mathbf{E}(I_x) F(I_\varphi) x_{0,0}, F(I_\varphi) x_{0,0} \right)_H = \left( \widehat E(I_x) F(I_\varphi) x_{0,0}, \widehat E(I_x) F(I_\varphi) x_{0,0} \right)_{\widehat H} \geq 0, $$ where $\widehat E$ is the spectral function of a self-adjoint extension $\widehat A\supseteq A$ in a Hilbert space $\widehat H\supseteq H$ such that $\mathbf{E} = P^{\widehat H}_H \widehat E$. The measure in~(\ref{f3_5}) is additive. If $I_\varphi = I_{1,\varphi}\cup I_{2,\varphi}$, $I_{1,\varphi}\cap I_{2,\varphi} = \emptyset$, then $$ \left( \mathbf{E}(I_x) F(I_\varphi) x_{0,0}, x_{0,0} \right)_H = \left( F( I_{1,\varphi}\cup I_{2,\varphi} )\mathbf{E}(I_x) x_{0,0}, x_{0,0} \right)_H $$ $$ = \left( F(I_{1,\varphi})\mathbf{E}(I_x) x_{0,0}, x_{0,0} \right)_H + \left( F(I_{2,\varphi})\mathbf{E}(I_x) x_{0,0}, x_{0,0} \right)_H. $$ The case $I_x = I_{1,x}\cup I_{2,x}$ is analogous. Moreover, repeating the standard arguments~\cite[Chapter 5, Theorem 2, p. 254-255]{cit_9500_KF} we conclude that the measure in~(\ref{f3_5}) is $\sigma$-additive. Thus, it posesses the (unique) Lebesgue continuation to a (finite) non-negative Borel measure on $\Pi$.
{\bf Proof. } Consider a Hilbert space $H$ and operators $A$,$B$ as in the statement of the Theorem. Let $F$ be the spectral measure of $B$. Let $\mu$ be an arbitrary solution of the moment problem~(\ref{f1_1}). Consider the space $L^2_\mu$ of complex functions on $\Pi$ which are square integrable with respect to the measure $\mu$. The scalar product and the norm are given by $$ (f,g)_\mu = \int_\Pi f(x,\varphi) \overline{ g(x,\varphi) } d\mu,\quad
\|f\|_\mu = \left( (f,f)_\mu \right)^{ \frac{1}{2} },\quad f,g\in L^2_\mu. $$ Consider the following operators: \begin{equation} \label{f3_6} A_\mu f(x,\varphi) = xf(x,\varphi),\qquad D(A_\mu) = \{ f\in L^2_\mu:\ xf(x,\varphi)\in L^2_\mu \}, \end{equation} \begin{equation} \label{f3_7} B_\mu f(x,\varphi) = e^{i\varphi} f(x,\varphi),\qquad D(B_\mu) = L^2_\mu. \end{equation} The operator $A_\mu$ is self-adjoint and the operator $B_\mu$ is unitary. Moreover, these operators commute and therefore the spectral measure $E_\mu$ of $A_\mu$ and the spectral measure $F_\mu$ of $B_\mu$ commute, as well.
\noindent Let $p(x,\varphi)$ be a (power-trigonometric) polynomial of the form~(\ref{f1_1}) and $q(x,\varphi)$ be a (power-trigonometric) polynomial of the form~(\ref{f1_1}) with $\beta_{m,n}\in \mathbb{C}$ instead of $\alpha_{m,n}$. Then $$ (p,q)_\mu = \sum_{(m,n)\in T, (k,l)\in T} \alpha_{m,n}\overline{ \beta_{k,l} } \int_\Pi x^{m+k} e^{i(n-l)\varphi} d\mu $$ $$ = \sum_{(m,n)\in T, (k,l)\in T} \alpha_{m,n}\overline{ \beta_{k,l} } s_{m+k,n-l}, $$ On the other hand, we can write $$ \left( \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n}, \sum_{(k,l)\in T} \beta_{k,l} x_{k,l} \right)_H = \sum_{(m,n)\in T, (k,l)\in T} \alpha_{m,n}\overline{ \beta_{k,l} } (x_{m,n},x_{k,l})_H $$ $$ = \sum_{(m,n)\in T, (k,l)\in T} \alpha_{m,n}\overline{ \beta_{k,l} } K((m,n),(k,l)) = \sum_{(m,n)\in T, (k,l)\in T} \alpha_{m,n}\overline{ \beta_{k,l} } s_{m+k,n-l}. $$ Therefore \begin{equation} \label{f3_8} (p,q)_\mu = \left( \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n}, \sum_{(k,l)\in T} \beta_{k,l} x_{k,l} \right)_H. \end{equation} Consider thr following operator: \begin{equation} \label{f3_9} V[p] = \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n},\quad p=\sum_{(m,n)\in T} \alpha_{m,n} x^m e^{in\varphi}. \end{equation} Here by $[p]$ we mean the class of equivalence in $L^2_\mu$ defined by $p$. If two different polynomials $p$ and $q$ belong to the same class of equivalence then by~(\ref{f3_8}) we get
$$ 0 = \| p-q \|_\mu^2 = (p-q,p-q)_\mu = \left( \sum_{(m,n)\in T} (\alpha_{m,n}-\beta_{m,n}) x_{m,n}, \sum_{(k,l)\in T} (\alpha_{k,l}-\beta_{k,l}) x_{k,l} \right) $$
$$ = \left\| \sum_{(m,n)\in T} \alpha_{m,n} x_{m,n} - \sum_{(m,n)\in T} \beta_{m,n} x_{m,n} \right\|_\mu^2. $$ Thus, the definition of $V$ is correct. It is not hard to see that $V$ maps a set of polynomials $P^2_{0,\mu}$ in $L^2_\mu$ on $L$. By continuity we extend $V$ to the isometric transformation from the closure of polynomials $P^2_\mu = \overline{P^2_{0,\mu}}$ onto $H$.
\noindent Set $H_0 := L^2_\mu \ominus P^2_\mu$. Introduce the following operator: \begin{equation} \label{f3_10} U := V \oplus E_{H_0}, \end{equation} which maps isometrically $L^2_\mu$ onto $\widetilde H := H\oplus H_0$. Set \begin{equation} \label{f3_11} \widetilde A := UA_\mu U^{-1},\quad \widetilde B := UB_\mu U^{-1}. \end{equation} Notice that $$ \widetilde A x_{m,n} = UA_\mu U^{-1} x_{m,n} = UA_\mu x^m e^{in\varphi} = Ux^{m+1} e^{in\varphi} = x_{m+1,n}, $$ $$ \widetilde B x_{m,n} = UB_\mu U^{-1} x_{m,n} = UB_\mu x^m e^{in\varphi} = Ux^{m} e^{i(n+1)\varphi} = x_{m,n+1}. $$ Therefore $\widetilde A\supseteq A$ and $\widetilde B\supseteq B$. Let \begin{equation} \label{f3_12} \widetilde A = \int_\mathbb{R} s d\widetilde E(s),\quad \widetilde B = \int_{ [-\pi,\pi) } e^{i\varphi} d \widetilde F(\varphi), \end{equation} where $\widetilde E(s)$ and $\widetilde F(\varphi)$ are the spectral measures of $\widetilde A$ and $\widetilde B$, respectively. Repeating arguments after relation~(\ref{f2_29}) we obtain that \begin{equation} \label{f3_13} x_{m,n} = \widetilde A^m \widetilde B^n x_{0,0},\qquad m\in \mathbb{Z}_+,\ n\in \mathbb{Z}, \end{equation} \begin{equation} \label{f3_14} s_{m,n} = \int_\Pi s^m e^{in\varphi} d((\widetilde E\times \widetilde F) x_{0,0}, x_{0,0})_{\widetilde H},\quad (m,n)\in T, \end{equation} where $(\widetilde E\times \widetilde F)$ is the product measure of $\widetilde E$ and $\widetilde F$. Thus, the measure $\widetilde \mu := ((\widetilde E\times \widetilde F) x_{0,0}, x_{0,0})_{\widetilde H}$ is a solution of the Devinatz moment problem.
\noindent Let $I_x\subset \mathbb{R}$, $I_\varphi\subseteq [-\pi,\pi)$ be arbitrary intervals. Then $$ \widetilde \mu (I_x \times I_\varphi) = ((\widetilde E\times \widetilde F) (I_x \times I_\varphi) x_{0,0}, x_{0,0})_{\widetilde H} $$ $$ = ( \widetilde E(I_x) \widetilde F(I_\varphi) x_{0,0}, x_{0,0})_{\widetilde H} = ( P^{\widetilde H}_H \widetilde E(I_x) \widetilde F(I_\varphi) x_{0,0}, x_{0,0})_{\widetilde H} $$ $$ = ( \mathbf{E}(I_x) F(I_\varphi) x_{0,0}, x_{0,0})_{H}, $$ where $\mathbf{E}$ is the correponding spectral function of $A$ and $F$ is the spectral function of $B$. Thus, the measure $\widetilde \mu$ has the form~(\ref{f3_4}) since the Lebesgue continuation is unique.
\noindent Let us show that $\widetilde \mu = \mu$. Consider the following transformation: \begin{equation} \label{f3_15} S:\ (x,\varphi) \in \Pi \mapsto \left( \mathop{\rm Arg }\nolimits \frac{x-i}{x+i}, \varphi \right) \in \Pi_0, \end{equation} where $\Pi_0 = [-\pi,\pi) \times [-\pi,\pi)$ and $\mathop{\rm Arg }\nolimits e^{iy} = y\in [-\pi,\pi)$. By virtue of $V$ we define the following measures: \begin{equation} \label{f3_16} \mu_0 (VG) := \mu (G),\quad \widetilde\mu_0 (VG) := \widetilde\mu (G),\qquad G\in \mathfrak{B}(\Pi), \end{equation} It is not hard to see that $\mu_0$ and $\widetilde\mu_0$ are non-negative measures on $\mathfrak{B}(\Pi_0)$. Then \begin{equation} \label{f3_17} \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d\mu = \int_{\Pi_0} e^{im\psi} e^{in\varphi} d\mu_0, \end{equation} \begin{equation} \label{f3_18} \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d\widetilde\mu = \int_{\Pi_0} e^{im\psi} e^{in\varphi} d\widetilde\mu_0,\qquad m,n\in \mathbb{Z}; \end{equation} and $$ \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d\widetilde\mu = \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d((\widetilde E\times \widetilde F) x_{0,0}, x_{0,0})_{\widetilde H} $$ $$ = \left( \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d(\widetilde E\times \widetilde F) x_{0,0}, x_{0,0} \right)_{\widetilde H} $$ $$ = \left( \int_\mathbb{R} \left( \frac{x-i}{x+i} \right)^m d\widetilde E \int_{[-\pi,\pi)} e^{in\varphi} d\widetilde F x_{0,0}, x_{0,0} \right)_{\widetilde H} $$ $$ = \left( \left( (\widetilde A - iE_{\widetilde H})(\widetilde A + iE_{\widetilde H})^{-1} \right)^m \widetilde B^n x_{0,0}, x_{0,0} \right)_{\widetilde H} $$ $$ = \left( U^{-1}\left( (\widetilde A - iE_{\widetilde H})(\widetilde A + iE_{\widetilde H})^{-1} \right)^m \widetilde B^n U 1, U 1 \right)_\mu $$ $$ = \left( \left( (A_\mu - iE_{L^2_\mu})(A_\mu + iE_{L^2_\mu})^{-1} \right)^m B_\mu^n 1, 1 \right)_\mu $$ \begin{equation} \label{f3_19}
= \int_\Pi \left( \frac{x-i}{x+i} \right)^m e^{in\varphi} d\mu,\qquad m,n\in \mathbb{Z}. \end{equation} By virtue of relations~(\ref{f3_17}),(\ref{f3_18}) and~(\ref{f3_19}) we get \begin{equation} \label{f3_20} \int_{\Pi_0} e^{im\psi} e^{in\varphi} d\mu_0 = \int_{\Pi_0} e^{im\psi} e^{in\varphi} d\widetilde\mu_0,\qquad m,n\in \mathbb{Z}. \end{equation} By the Weierstrass theorem we can approximate any continuous function by exponentials and therefore \begin{equation} \label{f3_21} \int_{\Pi_0} f(\psi) g(\varphi) d\mu_0 = \int_{\Pi_0} f(\psi) g(\varphi) d\widetilde\mu_0, \end{equation} for arbitrary continuous functions on $\Pi_0$. In particular, we have \begin{equation} \label{f3_22} \int_{\Pi_0} \psi^n \varphi^m d\mu_0 = \int_{\Pi_0} \psi^n \varphi^m d\widetilde\mu_0,\qquad n,m\in \mathbb{Z}_+. \end{equation} However, the two-dimensional Hausdorff moment problem is determinate (\cite{cit_10000_ST}) and therefore we get $\mu_0 = \widetilde\mu_0$ and $\mu=\mu_0$. Thus, we have proved that an arbitrary solution $\mu$ of the Devinatz moment problem can be represented in the form~(\ref{f3_4}).
Let us check the second assertion of the Theorem. For an arbitrary spectral measure $\mathbf{E}$ of $A$ which commutes with the spectral measure $F$ of $B$, by relation~(\ref{f3_4}) we define a non-negative Borel measure $\mu$ on $\Pi$. Let us show that the measure $\mu$ is a solution of the moment problem~(\ref{f1_1}).
\noindent Let $\widehat A$ be a self-adjoint extension of the operator $A$ in a Hilbert space $\widehat H\supseteq H$, such that $$ \mathbf{E} = P^{\widehat H}_H \widehat E, $$ where $\widehat E$ is the spectral measure of $\widehat A$. By~(\ref{f2_30}) we get $$ x_{m,n} = A^m B^n x_{0,0} = \widehat A^m B^n x_{0,0} = P^{\widehat H}_H \widehat A^m B^n x_{0,0} $$ $$ = P^{\widehat H}_H \left( \lim_{a\to +\infty} \int_{[-a,a)} x^m d\widehat E \right) \int_{[-\pi,\pi)} e^{in\varphi} dF x_{0,0} = \left( \lim_{a\to +\infty} \int_{[-a,a)} x^m d\mathbf{E} \right) $$ $$ * \int_{[-\pi,\pi)} e^{in\varphi} dF x_{0,0} = \left( \lim_{a\to +\infty} \left( \int_{[-a,a)} x^m d\mathbf{E} \int_{[-\pi,\pi)} e^{in\varphi} dF \right) \right) x_{0,0}, $$ \begin{equation} \label{f3_23} \qquad m\in \mathbb{Z}_+,\ n\in \mathbb{Z}, \end{equation} where the limits are understood in the weak operator topology. Then we choose arbitrary points $$ -a = x_0 < x_1 < ... < x_{N}=a; $$ \begin{equation} \label{f3_24}
\max_{1\leq i\leq N}|x_{i}-x_{i-1}| =: d,\quad N\in \mathbb{N}; \end{equation} $$ -\pi = \varphi_0 < \varphi_1 < ... < \varphi_{M}=\pi; $$ \begin{equation} \label{f3_25}
\max_{1\leq j\leq M}|\varphi_{j}-\varphi_{j-1}| =: r;\quad M\in \mathbb{N}. \end{equation} Set $$ C_a := \int_{[-a,a)} x^m d\mathbf{E} \int_{[-\pi,\pi)} e^{in\varphi} dF = \lim_{d\rightarrow 0} \sum_{i=1}^N x_{i-1}^m \mathbf{E}([x_{i-1},x_i)) $$ $$ * \lim_{r\rightarrow 0} \sum_{j=1}^M e^{in\varphi_{j-1}} F([\varphi_{j-1},\varphi_j)), $$ where the integral sums converge in the strong operator topology. Then $$ C_a = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N x_{i-1}^m \mathbf{E}([x_{i-1},x_i)) \sum_{j=1}^M e^{in\varphi_{j-1}} F([\varphi_{j-1},\varphi_j)) $$ $$ = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N \sum_{j=1}^M x_{i-1}^m e^{in\varphi_{j-1}} \mathbf{E}([x_{i-1},x_i)) F([\varphi_{j-1},\varphi_j)), $$ where the limits are understood in the strong operator topology. Then $$ (C_a x_{0,0}, x_{0,0})_H = \left( \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N \sum_{j=1}^M x_{i-1}^m e^{in\varphi_{j-1}} \mathbf{E}([x_{i-1},x_i)) F([\varphi_{j-1},\varphi_j)) x_{0,0}, x_{0,0} \right)_H $$ $$ = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N \sum_{j=1}^M x_{i-1}^m e^{in\varphi_{j-1}} \left( \mathbf{E}([x_{i-1},x_i)) F([\varphi_{j-1},\varphi_j)) x_{0,0}, x_{0,0} \right)_H $$ $$ = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N \sum_{j=1}^M x_{i-1}^m e^{in\varphi_{j-1}} \left( (\mathbf{E}\times F) ( [x_{i-1},x_i)\times [\varphi_{j-1},\varphi_j) ) x_{0,0}, x_{0,0} \right)_H $$ $$ = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \sum_{i=1}^N \sum_{j=1}^M x_{i-1}^m e^{in\varphi_{j-1}} \left( \mu ( [x_{i-1},x_i)\times [\varphi_{j-1},\varphi_j) ) x_{0,0}, x_{0,0} \right)_H. $$
Therefore $$ (C_a x_{0,0}, x_{0,0})_H = \lim_{d\rightarrow 0} \lim_{r\rightarrow 0} \int_{[-a,a)\times[-\pi,\pi)} f_{d,r} (x,\varphi) d\mu, $$ where $f_{d,r}$ is equal to $x_{i-1}^m e^{in\varphi_{j-1}}$ on the rectangular $[x_{i-1},x_i) \times [\varphi_{j-1},\varphi_j)$, $1\leq i\leq N$, $1\leq j\leq M$.
\noindent If $r\rightarrow 0$, then the simple function $f_{d,r}$ converges uniformly to the function $f_d$ which is equal to $x_{i-1}^m e^{in\varphi}$ on the rectangular $[x_{i-1},x_i) \times [\varphi_{j-1},\varphi_j)$, $1\leq i\leq N$, $1\leq j\leq M$. Then $$ (C_a x_{0,0}, x_{0,0})_H = \lim_{d\rightarrow 0} \int_{[-a,a)\times[-\pi,\pi)} f_{d} (x,\varphi) d\mu. $$ If $d\rightarrow 0$, then the function $f_{d}$ converges uniformly to the function $x^m e^{in\varphi}$. Since
$|f_d|\leq A^m$, by the Lebesgue theorem we get \begin{equation} \label{f3_26} (C_a x_{0,0}, x_{0,0})_H = \int_{[-a,a)\times[-\pi,\pi)} x^m e^{in\varphi} d\mu. \end{equation} By virtue of relations~(\ref{f3_23}) and~(\ref{f3_26}) we get $$ s_{m,n} = (x_{m,n},x_{0,0})_H = \lim_{a\to +\infty} (C_a x_{0,0},x_{0,0})_H $$ \begin{equation} \label{f3_27} = \lim_{a\to+\infty} \int_{[-a,a)\times[-\pi,\pi)} x^m e^{in\varphi} d\mu = \int_\Pi x^m e^{in\varphi} d\mu. \end{equation} Thus, the measure $\mu$ is a solution of the Devinatz moment problem.
Let us prove the last assertion of the Theorem. Suppose to the contrary that two different spectral measures $\mathbf{E}_1$ and $\mathbf{E}_1$ of $A$ commute with the spectral measure $F$ of $B$ and produce by relation~(\ref{f3_4}) the same solution $\mu$ of the Devinatz moment problem. Choose an arbitrary $z\in \mathbb{C}\backslash \mathbb{R}$. Then $$ \int_\Pi \frac{x^m}{x-z} e^{in\varphi} d\mu = \int_\Pi \frac{x^m}{x-z} e^{in\varphi} ((\mathbf{E}_k\times F)(\delta) x_{0,0}, x_{0,0})_H $$ \begin{equation} \label{f3_28} = \lim_{a\to +\infty} \int_{[-a,a)\times [-\pi,\pi)} \frac{x^m}{x-z} e^{in\varphi} d((\mathbf{E}_k\times F)(\delta) x_{0,0}, x_{0,0})_H,\quad k=1,2. \end{equation} Consider arbitrary partitions of the type~(\ref{f3_24}),(\ref{f3_25}). Then $$ D_a := \int_{[-a,a)\times [-\pi,\pi)} \frac{x^m}{x-z} e^{in\varphi} d((\mathbf{E}_k\times F)(\delta) x_{0,0}, x_{0,0})_H $$ $$ = \lim_{d\to 0} \lim_{r\to 0} \int_{[-a,a)\times [-\pi,\pi)} g_{z;d,r}(x,\varphi) d((\mathbf{E}_k\times F)(\delta) x_{0,0}, x_{0,0})_H. $$ Here the function $g_{z;d,r}(x,\varphi)$ is equal to $\frac{x_{i-1}^m}{x_{i-1}-z} e^{in\varphi_{j-1}}$ on the rectangular $[x_{i-1},x_i) \times [\varphi_{j-1},\varphi_j)$, $1\leq i\leq N$, $1\leq j\leq M$. Then $$ D_a = \lim_{d\to 0} \lim_{r\to 0} \sum_{i=1}^N \sum_{j=1}^M \frac{ x_{i-1}^m }{ x_{i-1}-z } e^{in\varphi_{j-1}} \left( \mathbf{E}_k ([x_{i-1},x_i)) F([\varphi_{j-1},\varphi_j)) x_{0,0}, x_{0,0} \right)_H $$ $$ = \lim_{d\to 0} \lim_{r\to 0} \left( \sum_{i=1}^N \frac{ x_{i-1}^m }{ x_{i-1}-z } \mathbf{E}_k ([x_{i-1},x_i)) \sum_{j=1}^M e^{in\varphi_{j-1}} F([\varphi_{j-1},\varphi_j)) x_{0,0}, x_{0,0} \right)_H $$ $$ = \left( \int_{[-a,a)} \frac{ x^m }{ x-z } d\mathbf{E}_k \int_{[-\pi,\pi)} e^{in\varphi} dF x_{0,0}, x_{0,0} \right)_H. $$ Let $n = n_1+n_2$, $n_1,n_2\in \mathbb{Z}$. Then we can write: $$ D_a = \left( B^{n_1} \int_{[-a,a)} \frac{ x^m }{ x-z } d\mathbf{E}_k B^{n_2} x_{0,0}, x_{0,0} \right)_H $$ $$ = \left( \int_{[-a,a)} \frac{ x^m }{ x-z } d\mathbf{E}_k x_{0,n_2}, x_{0,-n_1} \right)_H. $$ By~(\ref{f3_28}) we get $$ \int_\Pi \frac{x^m}{x-z} e^{in\varphi} d\mu = \lim_{a\to +\infty} D_a = \lim_{a\to +\infty}\left( \int_{[-a,a)} \frac{ x^m }{ x-z } d \widehat{E}_k x_{0,n_2}, x_{0,-n_1} \right)_{\widehat H_k} $$ $$ = \left( \int_\mathbb{R} \frac{ x^m }{ x-z } d\widehat{E}_k x_{0,n_2}, x_{0,-n_1} \right)_{\widehat H_k} = \left( \widehat{A}^{m_2} R_z(\widehat{A}_k) \widehat{A}^{m_1} x_{0,n_2}, x_{0,-n_1} \right)_{\widehat H_k} $$ \begin{equation} \label{f3_29} = \left( R_z(\widehat{A}_k) x_{m_1,n_2}, x_{m_2,-n_1} \right)_H, \end{equation} where $m_1,m_2\in \mathbb{Z}_+:\ m_1+m_2 = m$, and $\widehat A_k$ is a self-adjoint extension of $A$ in a Hilbert space $\widehat H_k\supseteq H$ such that its spectral measure $\widehat E_k$ generates $\mathbf{E}_k$: $\mathbf{E}_k = P^{\widehat H_k}_H \widehat E_k$; $k=1,2$.
\noindent Relation~(\ref{f3_29}) shows that the generalized resolvents corresponding to $\mathbf{E}_k$, $k=1,2$, coincide. That means that the spectral measures $\mathbf{E}_1$ and $\mathbf{E}_2$ coincide. We obtained a contradiction. This completes the proof. $\Box$
\begin{dfn} \label{d3_1} A solution $\mu$ of the Devinatz moment problem~(\ref{f1_1}) we shall call {\bf canonical} if it is generated by relation~(\ref{f3_4}) where $\mathbf{E}$ is an {\bf orthogonal} spectral measure of $A$ which commutes with the spectral measure of $B$. Orthogonal spectral measures are those measures which are the spectral measures of self-adjoint extensions of $A$ inside $H$. \end{dfn} Let a moment problem~(\ref{f1_1}) be given and conditions~(\ref{f2_1}) hold. Let us describe canonical solutions of the Devinatz moment problem. In the proof of Theorem~\ref{t2_2} we have constructed one canonical solution, see relation~(\ref{f2_31}). Let $\mu$ be an arbitrary canonical solution and $\mathbf{E}$ be the corresponding orthogonal spectral measure of $A$. Let $\widetilde A$ be the self-adjoint operator in $H$ which corresponds to $\mathbf{E}$. Consider the Cayley transformation of $\widetilde A$: \begin{equation} \label{f3_30} U_{\widetilde A} = (\widetilde A + iE_H)(\widetilde A - iE_H)^{-1} \supseteq V_A, \end{equation} where $V_A$ is defined by~(\ref{f2_16}). Since $\mathbf{E}$ commutes with the spectral measure $F$ of $B$, then $U_{\widetilde A}$ commutes with $B$. By relation~(\ref{f2_22}) the operator $U_{\widetilde A}$ have the following form: \begin{equation} \label{f3_31} U_{\widetilde A} = V_A \oplus \widetilde U_{2,4}, \end{equation} where $\widetilde U_{2,4}$ is an isometric operator which maps $H_2$ onto $H_4$, and commutes with $B$. Let the operator $U_{2,4}$ be defined by~(\ref{f2_26}). Then the following operator \begin{equation} \label{f3_32} U_2 = U_{2,4}^{-1} \widetilde U_{2,4}, \end{equation} is a unitary operator in $H_2$ which commutes with $B_{H_2}$.
Denote by $\mathbf{S}(B;H_2)$ a set of all unitary operators in $H_2$ which commute with $B_{H_2}$. Choose an arbitrary operator $\widehat U_2\in \mathbf{S}(B;H_2)$. Define $\widehat U_{2,4}$ by the following relation: \begin{equation} \label{f3_33} \widehat U_{2,4} = U_{2,4} \widehat U_2. \end{equation} Notice that $\widehat U_{2,4}$ commutes with $B_{H_2}$. Then we define a unitary operator $U = V_A \oplus \widehat U_{2,4}$ and its Cayley transformation $\widehat A$ which commute with the operator $B$. Repeating arguments before~(\ref{f2_31}) we get a canonical solution of the Devinatz moment problem.
\noindent Thus, all canonical solutions of the Devinatz moment problem are generated by operators $\widehat U_2\in \mathbf{S}(B;H_2)$. Notice that different operators $U',U''\in \mathbf{S}(B;H_2)$ produce different orthogonal spectral measures $\mathbf{E}',\mathbf{E}$. By Theorem~\ref{t3_1}, these spectral measures produce different solutions of the moment problem.
Recall some definitions from~\cite{cit_9000_BS}. A pair $(Y,\mathfrak{A})$, where $Y$ is an arbitrary set and $\mathfrak{A}$ is a fixed $\sigma$-algebra of subsets of $A$ is said to be a {\it measurable space}. A triple $(Y,\mathfrak{A},\mu)$, where $(Y,\mathfrak{A})$ is a measurable space and $\mu$ is a measure on $\mathfrak{A}$ is said to be a {\it space with a measure}.
Let $(Y,\mathfrak{A})$ be a measurable space, $\mathbf{H}$ be a Hilbert space and $\mathcal{P}=\mathcal{P}(\mathbf{H})$ be a set of all orthogonal projectors in $\mathbf{H}$. A countably additive mapping $E:\ \mathfrak{A}\rightarrow \mathcal{P}$, $E(Y) = E_{\mathbf{H}}$, is said to be a {\it spectral measure} in $\mathbf{H}$. A set $(Y,\mathfrak{A},H,E)$ is said to be a {\it space with a spectral measure}. By $S(Y,E)$ one means a set of all $E$-measurable $E$-a.e. finite complex-valued functions on $Y$.
Let $(Y,\mathfrak{A},\mu)$ be a separable space with a $\sigma$-finite measure and to $\mu$-everyone $y\in Y$ it corresponds a Hilbert space $G(y)$. A function $N(y) = \dim G(y)$ is called the {\it dimension function}. It is supposed to be $\mu$-measurable. Let $\Omega$ be a set of vector-valued functions $g(y)$ with values in $G(y)$ which are defined $\mu$-everywhere and are measurable with respect to some base of measurability. A set of (classes of equivalence) of such functions with the finite norm \begin{equation} \label{f3_34}
\| g \|^2_{\mathcal{H}} = \int |g(y)|^2_{G(y)} d\mu(y) <\infty \end{equation} form a Hilbert space $\mathcal{H}$ with the scalar product given by \begin{equation} \label{f3_35} ( g_1,g_2 )_{\mathcal{H}} = \int (g_1,g_2)_{G(y)} d\mu(y). \end{equation} The space $\mathcal{H}= \mathcal{H}_{\mu,N} = \int_Y \oplus G(y) d\mu(y)$ is said to be a {\it direct integral of Hilbert spaces}. Consider the following operator \begin{equation} \label{f3_36} \mathbf{X}(\delta) g = \chi_\delta g,\qquad g\in \mathcal{H},\ \delta\in \mathfrak{A}, \end{equation} where $\chi_\delta$ is the characteristic function of the set $\delta$. The operator $\mathbf{X}$ is a spectral measure in $\mathcal{H}$.
Let $t(y)$ be a measurable operator-valued function with values in $\mathbf{B}(G(y))$ which is
$\mu$-a.e. defined and $\mu-\sup \|t(y)\|_{G(y)} < \infty$. The operator \begin{equation} \label{f3_37} T:\ g(y) \mapsto t(y)g(y), \end{equation} is said to be {\it decomposable}. It is a bounded operator in $\mathcal{H}$ which commutes with $\mathbf{X}(\delta)$, $\forall\delta\in \mathfrak{A}$. Moreover, every bounded operator in $\mathcal{H}$ which commutes with $\mathbf{X}(\delta)$, $\forall\delta\in \mathfrak{A}$, is decomposable~\cite{cit_9000_BS}. In the case $t(y) = \varphi(y)E_{G(y)}$, where $\varphi\in S(Y,\mu)$, we set $T =: Q_\varphi$. The decomposable operator is unitary if and only if $\mu$-a.e. the operator $t(y)$ is unitary.
Return to the study of canonical solutions. Consider the spectral measure $F_2$ of the operator $B_{H_2}$ in $H_2$. There exists an element $h\in H_2$ of the maximal type, i.e. the non-negative Borel measure \begin{equation} \label{f3_38} \mu(\delta) := (F_2(\delta)h,h),\qquad \delta\in \mathfrak{B}([-\pi,\pi)), \end{equation} has the maximal type between all such measures (generated by other elements of $H_2$). This type is said to be the {\it spectral type} of the measure $F_2$. Let $N_2$ be the multiplicity function of the measure $F_2$. Then there exists a unitary transformation $W$ of the space $H_2$ on $\mathcal{H}=\mathcal{H}_{\mu,N_2}$ such that \begin{equation} \label{f3_39} W B_{H_2} W^{-1} = Q_{e^{iy}},\qquad W F_2(\delta) W^{-1} = \mathbf{X}(\delta). \end{equation} Notice that $\widehat U_2\in \mathbf{S}(B;H_2)$ if and only if the operator \begin{equation} \label{f3_40} V_2 := W \widehat U_2 W^{-1}, \end{equation} is unitary and commutes with $\mathbf{X}(\delta)$, $\forall\delta\in \mathfrak{[-\pi,\pi)}$. The latter is equivalent to the condition that $V_2$ is decomposable and the values of the corresponding operator-valued function $t(y)$ are $\mu$-a.e. unitary operators. A set of all decomposable operators in $\mathcal{H}$ such that the values of the corresponding operator-valued function $t(y)$ are $\mu$-a.e. unitary operators we denote by $\mathbf{D}(B;H_2)$.
\begin{thm} \label{t3_2} Let a Devinatz moment problem~(\ref{f1_1}) be given. In conditions of Theorem~\ref{t3_1} all canonical solutions of the moment problem have the form~(\ref{f3_4}) where the spectral measures $\mathbf{E}$ of the operator $A$ are constructed by operators from $\mathbf{D}(B;H_2)$. Namely, for an arbitrary $V_2\in \mathbf{D}(B;H_2)$ we set $U_2 = W^{-1} V_2 W$, $\widehat U_{2,4} = U_{2,4} \widehat U_2$, $U = V_A \oplus \widehat U_{2,4}$, $\widehat A = i(U+E_H)(U-E_H)^{-1}$, and then $\mathbf{E}$ is the spectral measure of $\widehat A$.
\noindent Moreover, the correspondence between $\mathbf{D}(B;H_2)$ and a set of all canonical solutions of the Devinatz moment problem is bijective. \end{thm} {\bf Proof. } The proof follows directly from the previous considerations. $\Box$
Consider a Devinatz moment problem~(\ref{f1_1}) and suppose that conditions~(\ref{f2_1}) hold. Let us turn to a parameterization of all solutions of the moment problem. We shall use Theorem~\ref{t3_1}. Consider relation~(\ref{f3_4}). The spectral measure $\mathbf{E}$ commutes with the operator $B$. Choose an arbitrary $z\in \mathbb{C}\backslash \mathbb{R}$. By virtue of relation~(\ref{f3_3}) we can write: $$ (B\mathbf{R}_z(A) x,y)_H = (\mathbf{R}_z(A) x,B^*y)_H = \int_{\mathbb{R}} \frac{1}{t-z} d(\mathbf{E}(t) x,B^*y)_H $$ \begin{equation} \label{f3_41} \int_{\mathbb{R}} \frac{1}{t-z} d(B\mathbf{E}(t) x,y)_H = \int_{\mathbb{R}} \frac{1}{t-z} d(\mathbf{E}(t)B x,y)_H,\qquad x,y\in H; \end{equation} \begin{equation} \label{f3_42} (\mathbf{R}_z(A) Bx,y)_H = \int_{\mathbb{R}} \frac{1}{t-z} d(\mathbf{E}(t) Bx,y)_H,\qquad x,y\in H, \end{equation} where $\mathbf{R}_z(A)$ is the generalized resolvent which corresponds to $\mathbf{E}$. Therefore we get \begin{equation} \label{f3_43} \mathbf{R}_z(A) B = B \mathbf{R}_z(A),\qquad z\in \mathbb{C}\backslash \mathbb{R}. \end{equation} On the other hand, if relation~(\ref{f3_43}) holds, then \begin{equation} \label{f3_44} \int_{\mathbb{R}} \frac{1}{t-z} d(\mathbf{E} Bx,y)_H = \int_{\mathbb{R}} \frac{1}{t-z} d(B\mathbf{E} x,y)_H,\quad x,y\in H,\ z\in \mathbb{C}\backslash \mathbb{R}. \end{equation} By the Stieltjes inversion formula~\cite{cit_10000_ST}, we obtain that $\mathbf{E}$ commutes with $B$.
\noindent We denote by $\mathbf{M}(A,B)$ a set of all generalized resolvents $\mathbf{R}_z(A)$ of $A$ which satisfy relation~(\ref{f3_43}).
Recall some known facts from~\cite{cit_4000_S} which we shall need here. Let $K$ be a closed symmetric operator in a Hilbert space $\mathbf{H}$, with the domain $D(K)$, $\overline{D(K)} = \mathbf{H}$. Set $N_\lambda = N_\lambda(K) = \mathbf{H} \ominus \Delta_K(\lambda)$, $\lambda\in \mathbb{C}\backslash \mathbb{R}$.
Consider an arbitrary bounded linear operator $C$, which maps $N_i$ into $N_{-i}$. For \begin{equation} \label{f3_45} g = f + C\psi - \psi,\qquad f\in D(K),\ \psi\in N_i, \end{equation} we set \begin{equation} \label{f3_46} K_C g = Kf + i C \psi + i \psi. \end{equation} Since an intersection of $D(K)$, $N_i$ and $N_{-i}$ consists only of the zero element, this definition is correct. Notice that $K_C$ is a part of the operator $K^*$. The operator $K_C$ is said to be a {\it quasiself-adjoint extension of the operator $K$, defined by the operator $K$}.
The following theorem can be found in~\cite[Theorem 7]{cit_4000_S}: \begin{thm} \label{t3_3} Let $K$ be a closed symmetric operator in a Hilbert space $\mathbf{H}$ with the domain $D(K)$, $\overline{D(K)} = \mathbf{H}$. All generalized resolvents of the operator $K$ have the following form: \begin{equation} \label{f3_47} \mathbf R_\lambda (K) = \left\{ \begin{array}{cc} (K_{F(\lambda)} - \lambda E_\mathbf{H})^{-1}, & \mathop{\rm Im}\nolimits\lambda > 0\\ (K_{F^*(\overline{\lambda}) } - \lambda E_\mathbf{H})^{-1}, & \mathop{\rm Im}\nolimits\lambda < 0 \end{array}\right., \end{equation}
where $F(\lambda)$ is an analytic in $\mathbb{C}_+$ operator-valued function, which values are contractions which map $N_i(A) = H_2$ into $N_{-i}(A) = H_4$ ($\| F(\lambda) \|\leq 1$), and $K_{F(\lambda)}$ is the quasiself-adjoint extension of $K$ defined by $F(\lambda)$.
On the other hand, for any operator function $F(\lambda)$ having the above properties there corresponds by relation~(\ref{f3_47}) a generalized resolvent of $K$. \end{thm} Notice that the correspondence between all generalized resolvents and functions $F(\lambda)$ in Theorem~\ref{t3_3} is bijective~\cite{cit_4000_S}.
Return to the study of the Devinatz moment problem. Let us describe the set $\mathbf{M}(A,B)$. Choose an arbitrary $\mathbf{R}_\lambda\in \mathbf{M}(A,B)$. By~(\ref{f3_47}) we get \begin{equation} \label{f3_48} \mathbf{R}_\lambda = (A_{F(\lambda)} - \lambda E_H)^{-1},\qquad \mathop{\rm Im}\nolimits\lambda > 0, \end{equation} where $F(\lambda)$ is an analytic in $\mathbb{C}_+$ operator-valued function, which values are contractions which map $H_2$ into $H_4$, and $A_{F(\lambda)}$ is the quasiself-adjoint extension of $A$ defined by $F(\lambda)$. Then $$ A_{F(\lambda)} = \mathbf{R}_\lambda^{-1} + \lambda E_H,\qquad \mathop{\rm Im}\nolimits\lambda > 0. $$ By virtue of relation~(\ref{f3_43}) we obtain \begin{equation} \label{f3_49} BA_{F(\lambda)} h = A_{F(\lambda)} B h,\qquad h\in D(A_{F(\lambda)}),\ \lambda\in \mathbb{C}_+. \end{equation} Consider the following operators \begin{equation} \label{f3_50} W_{\lambda} := (A_{F(\lambda)} + iE_H)(A_{F(\lambda)} - iE_H)^{-1} = E_H + 2i(A_{F(\lambda)} - iE_H)^{-1}, \end{equation} \begin{equation} \label{f3_51} V_A = (A +iE_H)(A - iE_H)^{-1} = E_H + 2i(A - iE_H)^{-1}, \end{equation} where $\lambda\in \mathbb{C}_+$. Notice that (\cite{cit_4000_S}) \begin{equation} \label{f3_52} W_{\lambda} = V_A \oplus F(\lambda),\qquad \lambda\in \mathbb{C}_+. \end{equation} The operator $(A_{F(\lambda)} - iE_H)^{-1}$ is defined on the whole $H$, see~\cite[p.79]{cit_4000_S}. By relation~(\ref{f3_49}) we obtain \begin{equation} \label{f3_53} B (A_{F(\lambda)} - iE_H)^{-1} h = (A_{F(\lambda)} - iE_H)^{-1} B h,\qquad h\in H,\ \lambda\in \mathbb{C}_+. \end{equation} Then \begin{equation} \label{f3_54} B W_\lambda = W_\lambda B,\qquad \lambda\in \mathbb{C}_+. \end{equation} Recall that by Proposition~\ref{p2_1} the operator $B$ reduces the subspaces $H_j$, $1\leq j\leq 4$, and $BV_A = V_A B$. If we choose an arbitrary $h\in H_2$ and apply relations~(\ref{f3_54}),(\ref{f3_52}), we get \begin{equation} \label{f3_55} B F(\lambda) = F(\lambda) B,\qquad \lambda\in \mathbb{C}_+. \end{equation} Denote by $\mathbf{F}(A,B)$ a set of all analytic in $\mathbb{C}_+$ operator-valued functions which values are contractions which map $H_2$ into $H_4$ and which satisfy relation~(\ref{f3_55}). Thus, for an arbitrary $\mathbf{R}_\lambda\in \mathbf{M}(A,B)$ the corresponding function $F(\lambda)\in \mathbf{F}(A,B)$. On the other hand, choose an arbitrary $F(\lambda)\in \mathbf{F}(A,B)$. Then we derive~(\ref{f3_54}) with $W_\lambda$ defined by~(\ref{f3_50}). Then we get~(\ref{f3_53}),(\ref{f3_49}) and therefore \begin{equation} \label{f3_56} B \mathbf{R}_\lambda = \mathbf{R}_\lambda B,\qquad \lambda\in \mathbb{C}_+. \end{equation} Calculating the conjugate operators for the both sides of the last equality we conclude that this relation holds for all $\lambda\in \mathbb{C}$.
\noindent Consider the spectral measure $F_2$ of the operator $B_{H_2}$ in $H_2$. We have obtained relation~(\ref{f3_39}) which we shall use one more time. Notice that $F(\lambda)\in \mathbf{F}(A,B)$ if and only if the operator-valued function \begin{equation} \label{f3_57} G(\lambda) := W F(\lambda) U_{2,4}^{-1} W^{-1},\qquad \lambda\in \mathbb{C}_+, \end{equation} is analytic in $\mathbb{C}_+$ and has values which are contractions in $\mathcal{H}$ which commute with $\mathbf{X}(\delta)$, $\forall\delta\in \mathfrak{[-\pi,\pi)}$.
This means that for an arbitrary $\lambda\in \mathbb{C}_+$ the operator $G(\lambda)$ is decomposable and the values of the corresponding operator-valued function $t(y)$ are $\mu$-a.e. contractions. A set of all decomposable operators in $\mathcal{H}$ such that the values of the corresponding operator-valued function $t(y)$ are $\mu$-a.e. contractions we denote by $\mathrm{T}(B;H_2)$. A set of all analytic in $\mathbb{C}_+$ operator-valued functions $G(\lambda)$ with values in $\mathrm{T}(B;H_2)$ we denote by $\mathbf{G}(A,B)$.
\begin{thm} \label{t3_4} Let a Devinatz moment problem~(\ref{f1_1}) be given. In conditions of Theorem~\ref{t3_1} all solutions of the moment problem have the form~(\ref{f3_4}) where the spectral measures $\mathbf{E}$ of the operator $A$ are defined by the corresponding generalized resolvents $\mathbf{R}_\lambda$ which are constructed by the following relation: \begin{equation} \label{f3_58} \mathbf{R}_\lambda = (A_{F(\lambda)} - \lambda E_H)^{-1},\qquad \mathop{\rm Im}\nolimits\lambda > 0, \end{equation} where $F(\lambda) = W^{-1} G(\lambda) W U_{2,4}$, $G(\lambda)\in \mathbf{G}(A,B)$.
\noindent Moreover, the correspondence between $\mathbf{G}(A,B)$ and a set of all solutions of the Devinatz moment problem is bijective. \end{thm} {\bf Proof. } The proof follows from the previous considerations. $\Box$
Consider an arbitrary non-negative Borel measure $\mu$ in the strip $\Pi$ which has all finite moments~(\ref{f1_1}). What can be said about the density of power-trigonometric polynomials~(\ref{f1_2}) in the corresponding space $L^2_\mu$? The measure $\mu$ is a solution of the corresponding moment problem~(\ref{f1_1}). Thus, $\mu$ admits a representation~(\ref{f3_4}) where $F$ is the spectral measure of $B$ and $\mathbf{E}$ is a spectral measure of $A$ which commutes with $F$ (the operators $A$ and $B$ in a Hilbert space $H$ are defined as above).
Suppose that (power-trigonometric) polynomials are dense in $L^2_\mu$. Repeating arguments from the beginning of the Proof of Theorem~\ref{t3_1} we see that in our case $H_0 = \{ 0 \}$ and $\widetilde A$, $\widetilde B$ are operators in $H$. Moreover, we have $\mu = ((\widetilde E\times \widetilde F) x_{0,0}, x_{0,0})_{H}$, where $\widetilde E$ is the spectral measure of $\widetilde A$, $\widetilde F = F$. Consequently, $\mu$ is a canonical solution of the Devinatz moment problem.
\noindent The converse assertion is more complicated and will be studied elsewhere.
Sergey M. Zagorodnyuk
School of Mathematics and Mekhanics
Karazin Kharkiv National University
Kharkiv, 61077
Ukraine
\begin{center} \bf Devinatz's moment problem: a description of all solutions. \end{center}
\begin{center} \bf S.M. Zagorodnyuk \end{center}
In this paper we study Devinatz's moment problem: to find a non-negative Borel measure $\mu$ in a strip $\Pi = \{ (x,\varphi):\ x\in \mathbb{R},\ -\pi\leq \varphi < \pi \},$ such that $\int_\Pi x^m e^{in\varphi} d\mu = s_{m,n}$, $m\in \mathbb{Z}_+$, $n\in \mathbb{Z}$, where $\{ s_{m,n} \}_{m\in \mathbb{Z}_+, n\in \mathbb{Z}}$ is a given sequence of complex numbers. We present a new proof of the Devinatz solvability criterion for this moment problem. We obtained a parameterization of all solutions of Devinatz's moment problem. We used an abstract operator approach and results of Godi\v{c}, Lucenko and Shtraus.
Key words: moment problem, measure, generalized resolvent.
MSC 2000: 44A60, 30E05.
}
\end{document}
|
arXiv
|
{
"id": "1004.4087.tex",
"language_detection_score": 0.5822731852531433,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Complete Test Sets And Their Approximations}
\author{ \IEEEauthorblockN{Eugene Goldberg} \IEEEauthorblockN{\emph{[email protected]}}}
\maketitle
\begin{abstract}
We use testing to check if a combinational circuit $N$ always
evaluates to 0 (written as $N \equiv 0$). We call a set of tests
proving $N \equiv 0$ a complete test set (CTS). The conventional
point of view is that to prove $N \equiv 0$ one has to generate a
\ti{trivial} CTS. It consists of all $2^{|X|}$ input assignments
where $X$ is the set of input variables of $N$. We use the notion of
a Stable Set of Assignments (SSA) to show that one can build a
\ti{non-trivial} CTS consisting of less than $2^{|X|}$ tests. Given
an unsatisfiable CNF formula $H(W)$, an SSA of $H$ is a set of
assignments to $W$ that proves unsatisfiability of $H$. A trivial
SSA is the set of all $2^{|W|}$ assignments to $W$. Importantly,
real-life formulas can have non-trivial SSAs that are much smaller
than $2^{|W|}$. In general, construction of even non-trivial CTSs
is inefficient. We describe a much more efficient approach where
tests are extracted from an SSA built for a ``projection'' of $N$ on
a subset of variables of $N$. These tests can be viewed as an
approximation of a CTS for $N$. We give experimental results and
describe potential applications of this approach.
\end{abstract}
\section{Introduction}
Testing is an important part of verification flows. For that reason,
any progress in understanding testing and improving its quality is
of great importance. In this paper, we consider the following
problem. Given a single-output combinational circuit $N$, find a set
of input assignments (tests) proving that $N$ evaluates to 0 for
every test (written as $N \equiv 0$) or find a counterexample. We
will call a set of input assignments proving $N \equiv 0$ a
\ti{complete test set} (\ti{CTS})\footnote{\input{f7ootnote}}. We
will call the set of all possible tests a \ti{trivial
CTS}. Typically, one assumes that proving $N \equiv 0$ involves
derivation of the trivial CTS, which is infeasible in practice.
Thus, testing is used only for finding an input assignment refuting
$N \equiv 0$. We present an approach for building a non-trivial CTS
consisting only of a subset of all possible tests. In general,
finding even a non-trivial CTS for a large circuit is
impractical. We describe a much more efficient approach where an
\ti{approximation} of a CTS is generated.
The circuit $N$ above usually describes a property $\xi$ of a
multi-output combinational circuit $M$, the latter being the
\ti{real object of testing}. For instance, $\xi$ may state that $M$
never produces some output assignments. To differentiate CTSs and
their approximations from conventional test sets verifying $M$ ``as
a whole'', we will refer to the former as \ti{property-checking test
sets}. Let $\Xi :=\s{\xi_1,\dots,\xi_k}$ be the set of properties
of $M$ formulated by a designer. Assume that every property of $\Xi$
holds and $T_i$ is a test set generated to check property $\xi_i \in
\Xi$. There are at least two reasons why applying $T_i$ to $M$
makes sense. First, if $\Xi$ is \ti{incomplete}\footnote{That is $M$
can be incorrect even if all properties of $\Xi$ hold.}, a test of
$T_i$ can expose a bug, if any, breaking a property of $M$ that is
not in $\Xi$. Second, if property $\xi_i$ is defined
\ti{incorrectly}, a test of $T_i$ may expose a bug breaking the
correct version of $\xi_i$. On the other hand, if $M$ produces
proper output assignments for all tests of $T_1 \cup \dots \cup
T_k$, one gets extra guarantee that $M$ is correct. In
Section~\ref{sec:appl}, we list some other applications of
property-checking test sets such as verification of design changes,
hitting corner cases and testing sequential circuits.
Let $N(X,Y,z)$ be a single-output combinational circuit where $X$
and $Y$ specify the sets of input and internal variables of $N$
respectively and $z$ specifies the output variable of $N$. Let
$F_N(X,Y,z)$ be a formula defining the functionality of $N$ (see
Section~\ref{sec:cts}). We will denote the set of variables of
circuit $N$ (respectively formula $H$) as \V{N} (respectively
\V{H}). Every assignment\footnote{\input{f1ootnote}} to \V{F_N}
satisfying $F_N$ corresponds to a consistent
assignment\footnote{\input{f2ootnote}} to \V{N} and vice versa. Then
the problem of proving $N \equiv 0$ reduces to showing that formula
$F_N \wedge z$ is unsatisfiable. From now on, we assume that all
formulas mentioned in this paper are \ti{propositional}. Besides, we
will assume that every formula is represented in CNF i.e. as a
conjunction of disjunctions of literals.
Our approach is based on the notion of a Stable Set of Assignments
(SSA) introduced in~\cite{ssp}. Given formula $H(W)$, an SSA of $H$
is a set $P$ of assignments to variables of $W$ that have two
properties. First, every assignment of $P$ falsifies $H$. Second,
$P$ is a transitive closure of some neighborhood relation between
assignments (see Section~\ref{sec:ssa}). The fact that $H$ has an
SSA means that the former is unsatisfiable. Otherwise, an assignment
satisfying $H$ is generated when building its SSA. If $H$ is
unsatisfiable, the set of all $2^{|W|}$ assignments is always an SSA
of $H$ . We will refer to it as \ti{trivial}. Importantly, a
real-life formula $H$ can have a lot of SSAs whose size is much less
than $2^{|W|}$. We will refer to them as \ti{non-trivial}. As we
show in Section~\ref{sec:ssa}, the fact that $P$ is an SSA of $H$ is
a \ti{structural} property of the latter. That is this property
cannot be expressed in terms of the truth table of $H$ (as opposed
to a \ti{semantic} property of $H$). For that reason, if $P$ is an
SSA for $H$, it may not be an SSA for some other formula $H'$ that
is logically equivalent to $H$. In other words, a structural
property is \ti{formula-specific}.
We show that a CTS for $N$ can be easily extracted from an SSA of
formula $F_N \wedge z$. This makes a non-trivial CTS a structural
property of circuit $N$ that cannot be expressed in terms of its
truth table. Building an SSA for a large formula is inefficient.
So, we present a procedure constructing a simpler formula $H(V)$
implied by $F_N \wedge z$ $($where $V \subseteq \V{F_N \wedge z})$ and
building an SSA of $H$. The existence of such an SSA means that $H$
(and hence $F_N \wedge z$) is unsatisfiable. So, $N \equiv 0$ holds.
A test set extracted from an SSA of $H$ can be viewed as a way to
verify a ``projection'' of $N$ on variables of $V$. On the other
hand, one can consider this set as an approximation of a CTS for
$N$.
We will refer to the procedure above as \mbox{$\mi{SemStr}$}\xspace (``\ti{Sem}antics and
\ti{Str}ucture''). \mbox{$\mi{SemStr}$}\xspace combines semantic and structural
derivations, hence the name. The semantic part of \mbox{$\mi{SemStr}$}\xspace
is\footnote{\input{f6ootnote}} to derive $H$. Its structural part
consists of constructing an SSA of $H$ thus proving that $H$ is
unsatisfiable.
The contribution of this paper is fourfold. First, we introduce the
notion of non-trivial CTSs (Section~\ref{sec:cts}). Second, we
present a method for efficient construction of property-checking
tests that are approximations of CTSs (Sections~\ref{sec:algor}
and~\ref{sec:app_cts}). Third, we describe applications of such
tests (Section~\ref{sec:appl}). Fourth, we give experimental
results showing the effectiveness of property-checking tests
(Section~\ref{sec:exper}).
\section{Stable Set Of Assignments} \label{sec:ssa}
\subsection{Definitions} We will refer to a disjunction of literals as a \ti{clause}. Let \pnt{p}\, be an assignment to a set of variables $V$. Let \pnt{p}\, falsify a clause $C$. Denote by {\boldmath \nbhd{p}{C}} the set of assignments to $V$ satisfying $C$ that are at Hamming distance 1 from \pnt{p}. (Here \ti{Nbhd} stands for ``Neighborhood''). Thus, the number of assignments in \nbhd{p}{C} is equal to that of literals in $C$. Let \pnt{q}\, be another assignment to $V$ (that may be equal to \pnt{p}). Denote by {\boldmath \Nbh{q}{p}{C}} the subset of \nbhd{p}{C} consisting only of assignments that are farther away from \pnt{q} than \pnt{p} (in terms of the Hamming distance).
\begin{example}
Let $V=\s{v_1,v_2,v_3,v_4}$ and \pnt{p}=0110. We assume that the
values are listed in \pnt{p} in the order the corresponding
variables are numbered i.e. \mbox{$v_1=0$}, $v_2=1,v_3=1,v_4=0$. Let $C= v_1
\vee \overline{v_3}$. (Note that \pnt{p} falsifies $C$.) Then
\nbhd{p}{C}=\s{\ppnt{p}{1},\ppnt{p}{2}} where \ppnt{p}{1} = 1110 and
\ppnt{p}{2}=0100. Let \pnt{q} = 0000. Note that \ppnt{p}{2} is
actually closer to \pnt{q} than \pnt{p}. So
\Nbh{q}{p}{C}=\s{\ppnt{p}{1}}. \end{example}
\begin{definition}
\label{def:ac_fun}
Let $H$ be a formula\footnote{\input{f3ootnote}} specified by a set
of clauses \s{C_1,\dots,C_k}. Let $P$ =
\s{\ppnt{p}{1},\dots,\ppnt{p}{m}} be a set of assignments to \V{H}
such that every $\ppnt{p}{i} \in P$ falsifies $H$. Let \mbox{$\Phi$}\xspace denote a
mapping $P \rightarrow H$ where \ac{\ppnt{p}{i}} is a clause $C$ of
$H$ falsified by \ppnt{p}{i}. We will call \mbox{$\Phi$}\xspace an \tb{AC-mapping}
where ``AC'' stands for ``Assignment-to-Clause''. We will denote the
range of \mbox{$\Phi$}\xspace as \ac{P}. (So, a clause $C$ of $H$ is in \ac{P} iff
there is an assignment $\ppnt{p}{i} \in P$ such that $C =
\mbox{$\Phi$}\xspace(\ppnt{p}{i})$.) \end{definition}
\begin{definition}
\label{def:ssa} Let $H$ be a formula specified by a set of clauses \s{C_1,\dots,C_k}. Let $P$ = \s{\ppnt{p}{1},\dots,\ppnt{p}{m}} be a set of assignments to \V{H}. $P$ is called a \tb{Stable Set of
Assignments}\footnote{\input{f5ootnote}} (\tb{SSA}) of $H$ with \tb{center} $\sub{p}{init} \in P$ if there is an AC-mapping \mbox{$\Phi$}\xspace such that for every $\ppnt{p}{i}\in P$, $\NNbhd{p}{p}{i}{C} \subseteq P$ holds where $C = \ac{\ppnt{p}{i}}$. \end{definition}
\begin{example}
\label{exmp:ssa}
Let $H$ consist of four clauses: $C_1 = v_1 \vee v_2 \vee v_3$, $C_2
= \overline{v}_1$, $C_3 = \overline{v}_2$, $C_4 = \overline{v}_3$.
Let $P =\s{\ppnt{p}{1},\ppnt{p}{2},\ppnt{p}{3},\ppnt{p}{4}}$ where
$\ppnt{p}{1} = 000$, $\ppnt{p}{2} = 100$, $\ppnt{p}{3} = 010$,
$\ppnt{p}{4}=001$. Let \mbox{$\Phi$}\xspace be an AC-mapping specified as
$\ac{\ppnt{p}{i}} = C_i, i = 1,\dots,4$. Since $\ppnt{p}{i}$
falsifies $C_i$, $i=1,\dots,4$,~~\mbox{$\Phi$}\xspace is a correct AC-mapping. $P$ is
an SSA of $H$ with respect to \mbox{$\Phi$}\xspace and center
\sub{p}{init}=\ppnt{p}{1}. Indeed,
\NNbhd{p}{p}{1}{C_1}=\s{\ppnt{p}{2},\ppnt{p}{3},\ppnt{p}{4}} where
$C_1 = \ac{\ppnt{p}{1}}$ and \NNbhd{p}{p}{i}{C_i} = $\emptyset$,
where $C_i = \ac{\ppnt{p}{i}}$, $i=2,3,4$. Thus,
$\mi{Nbhd}(\sub{p}{init},\ppnt{p}{i},\ac{\ppnt{p}{i}}) \subseteq P$,
$i=1,\dots,4$. \end{example}
\subsection{SSAs and satisfiability of a formula} \label{ssec:ssa_sat} \begin{proposition} \label{prop:ssa}
Formula $H$ is unsatisfiable iff it has an SSA. \end{proposition}
The proof\footnote{The proof of Proposition~\ref{prop:ssa} presented
in report~\cite{cmpl_tst} is inacurate.} is given Appendix~\ref{app:proofs}. A similar proposition is proved in~\cite{ssp} for ``uncentered'' SSAs (see Footnote~\ref{ftn:ssa}).
\input{b0uild_path.fig}
The set of all assignments to \V{H} forms the \ti{trivial} uncentered SSA of $H$. Example~\ref{exmp:ssa} shows a \ti{non-trivial} SSA. The fact that formula $H$ has a non-trivial SSA $P$ is its \ti{structural} property. That is one cannot check whether $P$ is an SSA of $H$ if only the truth table of $H$ is known. In particular, $P$ may not be an SSA of a formula $H'$ logically equivalent to $H$.
\input{b1uild_ssa.fig}
The relation between SSAs and satisfiability can be explained as follows. Suppose that formula $H$ is satisfiable. Let \sub{p}{init} be an arbitrary assignment to \V{H} and \pnt{s} be a satisfying assignment that is the closest to \sub{p}{init} in terms of the Hamming distance. Let $P$ be the set of all assignments to \V{H} that falsify $H$ and \mbox{$\Phi$}\xspace be an AC-mapping from $P$ to $H$. Then \pnt{s} can be reached from \sub{p}{init} by procedure \ti{BuildPath} shown in Figure~\ref{fig:bld_path}. It generates a sequence of assignments $\ppnt{p}{1},\dots,\ppnt{p}{i}$ where \ppnt{p}{1} = \sub{p}{init} and \ppnt{p}{i}=\pnt{s}. First, \ti{BuildPath} checks if current assignment \ppnt{p}{i} equals \pnt{s}. If so, then \pnt{s} has been reached. Otherwise, \ti{BuildPath} uses clause $C=\ac{\ppnt{p}{i}}$ to generate next assignment. Since \pnt{s} satisfies $C$, there is a variable $v \in \V{C}$ that is assigned differently in \ppnt{p}{i} and \pnt{s}. \ti{BuildPath} generates a new assignment \ppnt{p}{i+1} obtained from \ppnt{p}{i} by flipping the value of $v$.
\ti{BuildPath} reaches \pnt{s} in $k$ steps where $k$ is the Hamming distance between \sub{p}{init} and \pnt{s}. Importantly, \ti{BuildPath} reaches \pnt{s} for \ti{any} AC-mapping. Let $P$ be an SSA of $H$ with respect to center \sub{p}{init} and AC-mapping \mbox{$\Phi$}\xspace. Then if \ti{BuildPath} starts with \sub{p}{init} and uses \mbox{$\Phi$}\xspace as an AC-mapping, it can reach only assignments of $P$. Since every assignment of $P$ falsifies $H$, no satisfying assignment can be reached.
A procedure for generation of SSAs called \ti{BuildSSA} is shown in Figure~\ref{fig:bld_ssa}. It accepts formula $H$ and outputs either a satisfying assignment or an SSA of $H$, center \sub{p}{init} and AC-mapping \mbox{$\Phi$}\xspace. \ti{BuildSSA} maintains two sets of assignments denoted as $E$ and $Q$. Set $E$ contains the examined assignments i.e. those whose neighborhood is already explored. Set $Q$ specifies assignments that are queued to be examined. $Q$ is initialized with an assignment \sub{p}{init} and $E$ is originally empty. \ti{BuildSSA} updates $E$ and $Q$ in a \ti{while} loop. First, \ti{BuildSSA} picks an assignment \pnt{p} of $Q$ and checks if it satisfies $H$. If so, \pnt{p} is returned as a satisfying assignment. Otherwise, \ti{BuildSSA} removes \pnt{p}~\,from $Q$ and picks a clause $C$ of $H$ falsified by \pnt{p}. The assignments of $\Nbhd{p}{p}{C}$ that are not in $E$ are added to $Q$. After that, \pnt{p} is added to $E$ as an examined assignment, pair $(\pnt{p},C)$ is added to \mbox{$\Phi$}\xspace and a new iteration begins. If $Q$ is empty, $E$ is an SSA with center \sub{p}{init} and AC-mapping \mbox{$\Phi$}\xspace.
\section{Complete Test Sets} \label{sec:cts}
\input{m0iter.fig}
Let $N(X,Y,z)$ be a single-output combinational circuit where $X$ and $Y$ specify the input and internal variables of $N$ respectively and $z$ specifies the output variable of $N$. Let $N$ consist of gates $G_1,\dots,G_k$. Then $N$ can be represented as $F_N = F_{G_1} \wedge \dots \wedge F_{G_k}$ where $F_{G_i},i=1,\dots,k$ is a CNF formula specifying the consistent assignments of gate $G_i$. Proving $N \equiv 0$ reduces to showing that formula $F_N \wedge z$ is unsatisfiable.
\begin{example}
\label{exmp:circ}
Circuit $N$ shown in Figure~\ref{fig:miter} represents equivalence
checking of expressions $(x_1 \vee x_2) \wedge x_3$ and $(x_1 \wedge
x_3) \vee (x_2 \wedge x_3)$ specified by gates $G_1,G_2$ and
$G_3,G_4,G_5$ respectively. Formula $F_N$ is equal to $F_{G_1} \wedge
\dots \wedge F_{G_6}$ where, for instance, $F_{G_1} = C_1 \wedge C_2
\wedge C_3$, $C_1 = x_1 \vee x_2 \vee \overline{y}_1$, $C_2 =
\overline{x}_1 \vee y_1$, $C_3 = \overline{x}_2 \vee y_1$. Every
satisfying assignment to \V{F_{G_1}} corresponds to a consistent
assignment to gate $G_1$ and vice versa. For instance,
$(x_1=0,x_2=0,y_1=0)$ satisfies $F_{G_1}$ and is a consistent
assignment to $G_1$ since the latter is an OR gate. Formula $F_N
\wedge z$ is unsatisfiable due to functional equivalence of
expressions $(x_1 \vee x_2) \wedge x_3$ and $(x_1 \wedge x_3) \vee
(x_2 \wedge x_3)$. Thus, $N \equiv 0$. \end{example}
Let \pnt{x} be a test i.e. an assignment to $X$. The set of assignments to \V{N} sharing the same assignment \pnt{x} to $X$ forms a cube of $2^{|Y|+1}$ assignments. $($Recall that $\V{N} = X \cup Y \cup \s{z}).$ Denote this set as \cube{x}. Only one assignment of \cube{x} specifies the correct execution trace produced by $N$ under \pnt{x}. All other assignments can be viewed as ``erroneous'' traces under test \pnt{x}.
\input{s2em_str.fig}
\begin{definition} \label{def:cts}
Let $T$ be a set of tests \s{\ppnt{x}{1},\dots,\ppnt{x}{k}} where $k
\leq 2^{|X|}$. We will say that $T$ is a \tb{Complete Test Set
(CTS)} for $N$ if $\Cube{x}{1} \cup \dots \cup \Cube{x}{k}$
contains an SSA for formula $F_N \wedge z$. \end{definition}
If $T$ satisfies Definition~\ref{def:cts}, set $\Cube{x}{1} \cup \dots \cup \Cube{x}{k}$ ``contains'' a proof that $N \equiv 0$ and so $T$
can be viewed as complete. If $k = 2^{|X|}$, $T$ is the \ti{trivial} CTS. In this case, $\Cube{x}{1} \cup \dots \cup \Cube{x}{k}$ contains the trivial SSA consisting of all assignments to \V{F_N \wedge
z}. Given an SSA $P$ of $F_N \wedge z$, one can easily generate a CTS by extracting all different assignments to $X$ that are present in the assignments of $P$.
\begin{example}
Formula $F_N \wedge z$ of Example~\ref{exmp:circ} has an~SSA of 21
assignments to \V{F_N\!\wedge\!z}. They have only~5 different
assignments to\,\,$X\!=\!\s{x_1,\!x_2,\!x_3}$. The set
$\{101,\!100,\!011,\!010,\!000\}$ of\,\,those assignments\,is a CTS for $N$. \end{example}
Definition~\ref{def:cts} is meant for circuits that are not ``too redundant''. Highly-redundant circuits are discussed in report \cite{cmpl_tst} and Appendix~\ref{app:red}.
\section{\mbox{$\mi{SemStr}$}\xspace Procedure} \label{sec:algor}
\subsection{Motivation}
Building an SSA for a large formula is inefficient. So, constructing
a CTS of $N$ from an SSA of $F_N \wedge z$ is impractical. To
address this problem, we introduce a procedure called \mbox{$\mi{SemStr}$}\xspace (a short
for ``Semantics and Structure''). Given formula $F_N \wedge z$ and a
set of variables $V \subseteq \V{F_N \wedge z}$, \mbox{$\mi{SemStr}$}\xspace generates a
simpler formula $H(V)$ implied by $F_N \wedge z$ at the same time
trying to build an SSA for $H$. If \mbox{$\mi{SemStr}$}\xspace succeeds in constructing
such an SSA, formula $H$ is unsatisfiable and so is $F_N \wedge z$.
Then a set of tests $T$ is extracted from this SSA. As we show in
Subsection~\ref{ssec:approx}, one can view $T$ as an approximation
of a CTS for $N$ (if $X \subseteq V$) or an ``approximation of
approximation'' of a CTS (if $X \not\subseteq V$).
\begin{example}
Consider the circuit $N$ of Figure~\ref{fig:miter} where
$X=\s{x_1,x_2,x_3}$. Assume that $V = X$. Application of \mbox{$\mi{SemStr}$}\xspace to
$F_N \wedge z$ produces $H(X)= (\overline{x}_1 \vee \overline{x}_3)
\wedge (\overline{x}_2 \vee \overline{x}_3) \wedge (x_1 \vee x_2)
\wedge x_3$. \mbox{$\mi{SemStr}$}\xspace also generates an SSA of $H$ of four assignments
to $X$: \s{000,001,011,101} with center \sub{p}{init}=000. (We omit
the AC-mapping here.) These assignments form an approximation of CTS
for $N$. \end{example}
\subsection{Description of \mbox{$\mi{SemStr}$}\xspace} The pseudocode of \mbox{$\mi{SemStr}$}\xspace is shown in Figure~\ref{fig:sem_str}. \mbox{$\mi{SemStr}$}\xspace accepts formula $G$ (in our case, $G := F_N \wedge z$) and a set of variables $V \subseteq \V{G}$. \mbox{$\mi{SemStr}$}\xspace outputs an assignment satisfying $G$ or formula $H(V)$ implied by $G$ and an SSA of $H$. Originally, the set of clauses $H$ is empty. $H$ is computed in a \ti{while} loop. First, \mbox{$\mi{SemStr}$}\xspace tries to build an SSA for the current formula $H$ by calling \ti{BuildSSA} (line 3). If $H$ is unsatisfiable, \ti{BuildSSA} computes an SSA $P$ returned by \mbox{$\mi{SemStr}$}\xspace (line 5). Otherwise, \ti{BuildSSA} returns an assignment \pnt{v} satisfying $H$. In this case, \mbox{$\mi{SemStr}$}\xspace calls procedure \ti{GenCls} to build a clause $C$ falsified by \pnt{v}. Clause $C$ is obtained by resolving clauses of $G$ on variables of $W$. (Hence $C$ is implied by $G$.) If \pnt{v} can be extended to an assignment \pnt{s} satisfying $G$, \mbox{$\mi{SemStr}$}\xspace terminates (lines 7-8). Otherwise, $C$ is added to $H$ and a new iteration begins.
\input{g3en_clause.fig}
Procedure \ti{GenCls} is shown in Figure~\ref{fig:gen_cls}. First, \ti{GenCls} generates formula \cof{G}{v} obtained from $G$ by discarding clauses satisfied by \pnt{v} and removing literals falsified by \pnt{v}. Then \ti{GenCls} checks if there is an assignment \pnt{s} satisfying \cof{G}{v}. If so, $\pnt{s} \cup \pnt{v}$ is returned as an assignment satisfying $G$. Otherwise, a proof $R$ of unsatisfiability of \cof{G}{v} is produced. Then \ti{GenCls} forms a set $V' \subseteq V$. A variable $w$ is in $V'$ iff a clause of \cof{G}{v} is used in proof $R$ and its parent clause from $G$ has a literal of $w$ falsified by \pnt{v}. Finally, clause $C$ is generated as a disjunction of literals of $V'$ falsified by \pnt{v}. By construction, clause $C$ is implied by $G$ and falsified by \pnt{v}.
\section{Building Approximations Of CTS} \label{sec:app_cts}
\subsection{Two kinds of approximations of CTSs} \label{ssec:approx} \input{g4en_tests.fig}
As before, let $H(V)$ denote a formula implied by $F_N \wedge z$ that is generated by \mbox{$\mi{SemStr}$}\xspace and $P$ denote an SSA for $H$. Projections of $N$ can be of two kinds depending on whether $X \subseteq V$ holds. Let $X \subseteq V$ hold and $T$ be the test set extracted from $P$ as described in Section~\ref{sec:cts}. That is $T$ consists of all different assignments to $X$ present in the assignments of $P$. On one hand, using the reasoning of Section~\ref{sec:cts} one can show that $T$ is a CTS for projection of $N$ on $V$. On the other hand, since $H(V)$ is essentially an abstraction of $F_N \wedge z$, set $T$ is an approximation of a CTS for $N$. For that reason, we will refer to $T$ as a \tb{CTS\textsuperscript{a}} of $N$ where superscript ``a'' stands for ``approximation''.
Now assume that $X \not\subseteq V$ holds. Generation of a test set $T$ from $P$ for this case is described in the next section. The set $T$ can be viewed as an approximation of a set $T'$ built for projection of $N$ on set $V \cup X$. Since $T'$ is a CTS\textsuperscript{a}\xspace for $N$, we will refer to $T$ as \tb{CTS\textsuperscript{aa}} where ``aa'' stands for ``approximation of approximation''.
\subsection{Construction of CTS\textsuperscript{aa}\xspace}
Consider extraction of a test set $T$ from SSA $P$ of formula $H(V)$ when $X \not\subseteq V$. Since $V$, in general, contains internal variables\footnote{If the special case $V \subset X$ holds, every
assignment of $P$ can be easily turned into a test by assigning
values to variables of $X \setminus V$ (e.g. randomly).}
of $N$, translation of $P$ to a test set $T$ needs a special procedure \ti{GenTests} shown in Figure~\ref{fig:gen_tests}. For every assignment \pnt{v} of $P$, \ti{GenTests} checks if formula $F_N$ is satisfiable under assignment \pnt{v} (i.e. if there exists a test under which $N$ assigns \pnt{v} to $V$). If so, an assignment \pnt{x} to $X$ is extracted from the satisfying assignment and added to $T$ as a test. Otherwise, \ti{GenTests} runs a \ti{for} loop (lines 8-13) of $\mi{Tries}$ iterations. In every iteration, \ti{GenTests} relaxes $F_N$ by removing the clauses specifying a small subset of gates picked randomly. If the relaxed version of $F_N$ is satisfiable, a test is extracted from the satisfying assignment and added to $T$.
\subsection{Finding a set of variables to project on} \label{ssec:int_cut}
\input{i3nt_cut.fig}
Intuitively, a good choice of the set $V$ to project $N$ on is a (small) coherent subset of variables of $N$ reflecting its structure and/or semantics. One obvious choice of $V$ is the set $X$ of input variables of $N$. In this section, we describe generation of a set $V$ whose variables form an internal cut of $N$ denoted as \ti{Cut}. Procedure \ti{GenCut} for generation of set \ti{Cut} consisting of \ti{Size} gates is shown in Figure~\ref{fig:int_cut}. Set $V$ is formed from output variables of the cut gates.
The current cut is specified by $\mi{Gts} \cup \mi{Inps}$. Set \ti{Gts} is initialized with the output gate \Sub{G}{out} of circuit $N$ and \ti{Inps} is originally empty. \ti{GenCut} computes the \ti{depth} of every gate of \ti{Gts}. The depth of \Sub{G}{out} is set to 0. Set \ti{Gts} is processed in a \ti{while} loop (lines 5-15). In every iteration, a gate of the smallest depth is picked from \ti{Gts}. Then \ti{GenCut} removes gate $G$ from \ti{Gts} and examines the fan-in gates of $G$ (lines 9-15). Let $G'$ be a fan-in gate of $G$ that has not been seen yet and is not a primary input of $N$. Then the depth of $G'$ is set to that of $G$ plus 1 and $G'$ is added to \ti{Gts}. If $G'$ is a primary input of $N$ it is added to \ti{Inps}.
\section{Applications Of Property-Checking Tests} \label{sec:appl} Given a multi-output circuit $M$, traditional testing is used to verify $M$ ``as a whole''. In this paper, we describe generation of a test set meant for checking a \ti{particular property} of $M$ specified by a single-output circuit $N$. In this section, we present some applications of property-checking test sets.
\subsection{Testing properties specified by similar circuits} \label{ssec:des_changes}
Let $N$ be a single-output circuit and $T$ be a test set generated when proving $N\equiv0$. Let $N^*$ be a circuit that is similar to $N$. (For instance, $N$ can specify a property of a circuit $M$ whereas $N^*$ specifies the same property after a modification of $M$.) Then one can use $T$ to verify if $N^* \equiv 0$. Since $T$ is generated for a similar circuit $N$, there is a good chance that it contains a counterexample to $N^* \equiv 0$, if any. (Of course, the fact that $N^*$ evaluates to 0 for all tests of $T$ does not mean that $N^* \equiv 0$ even if $T$ is a CTS for $N$). In Subsection~\ref{ssec:bug}, we give experimental evidence supporting the observation above.
Assuming that $N \equiv 0$ was proved formally, checking if $N^* \equiv 0$ holds can be verified formally too. So applying tests of $T$ to $N^*$ can be viewed as a ``light'' verification procedure for exposing bugs. On the other hand, one can re-use test $T$ in situations where the necessity to apply a formal tool is overlooked or formal methods are not powerful enough. Let $N$ specify a property $\xi$ of a \ti{component} of a design $D$. Suppose that this component is modified under assumption that preserving $\xi$ is not necessary any more. By applying $T$ to $D$ one can invoke behaviors that break $\xi$ and expose a bug in $D$, if any, caused by ignoring $\xi$. If $D$ is a large design, finding such a bug by formal verification may not be possible.
\subsection{Verification of corner cases} \label{ssec:corners}
\input{s3ubcirc.fig}
Let $K$ be a single-output subcircuit of circuit $M$ as shown in Figure~\ref{fig:subcirc}. For the sake of simplicity we consider here the case where the set $X_K$ of input variables of $K$ is a subset of the set $X$ of input variables of $M$. (The technique below can also be applied when input variables of $K$ are \ti{internal} variables of $M$.) Suppose $K$ evaluates, say, to value 0 much more frequently then to 1. Then one can view an input assignment of $M$ for which $K$ evaluates to 1 as specifying a ``corner case'' i.e. a rare event. Hitting such a corner case by a random test can be very hard. This issue can be addressed by using a coverage metric that \ti{requires} setting the value of $K$ to both 0 and 1. (The task of finding a test for which $K$ evaluates to 1 can be solved, for instance, by a SAT-solver.) The problem however is that hitting a corner case only once may be insufficient.
One can increase the frequency of hitting the corner case above as follows. Let $N$ be a miter of circuits $K'$ and $K''$ (see Figure~\ref{fig:gen_miter}) i.e. a circuit that evaluates to 1 iff $K'$ and $K''$ are functionally inequivalent. Let $K'$ and $K''$ be two copies of circuit $K$. So $N \equiv 0$ holds. Let test set $T_K$ be extracted from an SSA built for a projection of $N$ on a set $V \subseteq \V{N}$. Set $T_K$ can be viewed as a result of ``squeezing'' the truth table of $K$. Since this truth table is dominated by input assignments for which $K$ evaluates to 0, this part of the truth table is \ti{reduced the most}. So, one can expect that the ratio of tests of $T_K$ for which $K$ evaluates to 1 is higher than in the truth table of $K$. In Subsection~\ref{ssec:ecorners}, we substantiate this intuition experimentally. One can easily extend an assignment \ppnt{x}{K} of $T_K$ to an assignment \pnt{x} to $X$ e.g. by randomly assigning values to the variables of $X \setminus X_K$.
\subsection{Dealing with incomplete specifications} \label{ssec:incomp_spec} One can use property-checking tests to mitigate the problem of incomplete specifications. By running tests generated for an incomplete set of properties of $M$, one can expose bugs overlooked due to missing some properties. An important special case of this problem is as follows. Let $\xi$ be a property of $M$ that holds. Assume that the correctness of $M$ requires proving a slightly \ti{different} property $\xi'$ that is not true. By running a test set $T$ built for property $\xi$, one may expose a bug overlooked in formal verification due to proving $\xi$ instead of $\xi'$. In Subsection~\ref{ssec:missed_props}, we illustrate the idea above experimentally.
\subsection{Testing sequential circuits} \label{ssec:seq_circ} There are a few ways to apply property-checking tests meant for combinational circuits to verification of \ti{sequential} circuits. Here is one of them based on bounded model checking~\cite{bmc}. Let $M$ be a sequential circuit and $\xi$ be a property of $M$. Let $N(X,Y,z)$ be a circuit such that $N \equiv 0$ holds iff $\xi$ is true for $k$ time frames. Circuit $N$ is obtained by unrolling $M$ $k$ times and adding logic specifying property $\xi$. Set $X$ consists of the subset $X'$ specifying the state variables of $M$ in the first time frame and subset $X''$ specifying the combinational input variables of $M$ in $k$ time frames.
\input{g1en_miter.fig}
Having constructed $N$, one can build CTSs, CTS\textsuperscript{a}\xspace{s} and CTS\textsuperscript{aa}\xspace{s} for testing property $\xi$ of $M$. The only difference here from the problem we have considered so far is as follows. Circuit $M$ starts in a state satisfying some formula $I(X')$ that specifies the initial states. So, one needs to check if $N \equiv 0$ holds only for the assignments to $X$ satisfying $I(X')$. A test here is an assignment $(\ppnt{x'}{1},\ppnt{x''}{1},\dots,\ppnt{x''}{k})$ where \ppnt{x'}{1} is an initial state and \ppnt{x''}{i}, $1 \leq i \leq k$ is an assignment to the combinational input variables of $i$-th time frame. Given a test, one can easily compute the corresponding sequence of states $(\ppnt{x'}{1},\dots,\ppnt{x'}{k})$ of $M$. In Subsection~\ref{ssec:missed_props}, we give an example of building an CTS\textsuperscript{aa}\xspace for a sequential circuit.
\section{Experiments} \label{sec:exper} \input{g5en_pc_tests}
In this section, we describe experiments with property-checking tests (PCT) generated by procedure \ti{GenPCT} shown in Figure~\ref{fig:gen_pct}. \ti{GenPCT} accepts a single-output circuit $N$ and outputs a set of tests $T$. (For the sake of simplicity, we assume here that $N \equiv 0$ holds.) \ti{GenPCT} starts with generating formula $F_N \wedge z$ and a set of variables $V \subseteq \V{F_N \wedge z}$. Then it calls \mbox{$\mi{SemStr}$}\xspace (see Fig.~\ref{fig:sem_str}) to compute an SSA $P$ of formula $H(V)$ describing a projection of circuit $N$ on $V$\!. If $H(V)$ does not depend on a variable $w \in V$, all assignments of $P$ have the same value of $w$. Procedure \ti{Diversify} randomizes the value of $w$ in the assignments of $P$.
Finally, \ti{BldTests} uses $P$ to extract a test set for circuit $N$. If $X \subseteq V$ holds (where $X$ is the set of input variables of $N$), \ti{BldTests} outputs all the different assignments to $X$ present in assignments of $P$. Otherwise, \ti{BldTests} calls procedure \ti{GenTests} (see Fig.~\ref{fig:gen_tests}).
If $V = \V{F_N \wedge z}$, then $H(V)$ is $F_N \wedge z$ itself and \ti{GenPCT} produces a CTS of $N$. Otherwise, according to definitions of Subsection~\ref{ssec:approx}, \ti{GenPCT} generates a CTS\textsuperscript{a}\xspace (if $X \subseteq V$) or CTS\textsuperscript{aa}\xspace (if $X \not\subseteq V$).
In the following subsections, we describe results of four experiments. In the first three experiments we used circuits specifying next state functions of latches of HWMCC-10 benchmarks. (The motivation was to use realistic circuits.) In our implementation of \mbox{$\mi{SemStr}$}\xspace, as a SAT-solver, we used Minisat 2.0~\cite{minisat,minisat2.0}. We also employed Minisat to run simulation. To compute the output value of $N$ under test \pnt{x}, we added unit clauses specifying \pnt{x} to formula $F_N \wedge z$ and checked its satisfiability.
\subsection{Comparing CTSs, CTS\textsuperscript{a}\xspace{s} and CTS\textsuperscript{aa}\xspace{s}} \label{ssec:cts}
\input{c5ts.tbl}
The objective of the first experiment was to give examples of circuits with non-trivial CTSs and compare the efficiency of computing CTSs, CTS\textsuperscript{a}\xspace{s} and CTS\textsuperscript{aa}\xspace{s}. In this experiment, $N$ was a miter specifying equivalence checking of circuits $M'$ and $M''$ (see Figure~\ref{fig:gen_miter}). $M''$ was obtained from $M'$ by optimizing the latter with ABC~\cite{abc}.
The results of the first experiment are shown in Table~\ref{tbl:cts}. The first two columns specify an HWMCC-10 benchmark and its latch whose next state function was used as $M'$. The next two columns give the number of input variables and that of gates in the miter $N$. The following pair of columns describe computing a CTS for $N$. The first column of this pair gives the size of the SSA $P$ found by \ti{GenPCT} in thousands. The number of tests in the set $T$ extracted from $P$ is shown in the parentheses in thousands. The second column of this pair gives the run time of \ti{GenPCT} in seconds.
The last four columns of Table~\ref{tbl:cts} describe results of computing test sets for a projection of $N$ on a set of variables $V$. The first column of this group shows if CTS\textsuperscript{a}\xspace or CTS\textsuperscript{aa}\xspace was computed whereas the next column gives the size of $V$. The third column of this group provides the size of SSA $P$ and the test set $T$ extracted from $P$ (in parentheses). Both sizes are given in thousands. The last column shows the run time of \ti{GenPCT}. For the first five examples, we used a projection of $N$ on $X$, thus constructing a CTS\textsuperscript{a}\xspace of $N$. For the last four examples we computed a projection of $N$ on an internal cut (see Subsection~\ref{ssec:int_cut}) thus generating a CTS\textsuperscript{aa}\xspace of $N$. \ti{GenPCT}\xspace was called with parameter $\mi{Tries}$ set to 5 (see Fig.~\ref{fig:gen_tests} and~\ref{fig:gen_pct}).
For the first three examples, \ti{GenPCT}\xspace managed to build non-trivial CTSs that are smaller than $2^{|X|}$. For instance, the trivial CTS for example \ti{bob3} consists of $2^{14}$=16,384 tests, whereas \ti{GenPCT}\xspace found a CTS of 2,004 tests. (So, to prove $M'$ and $M''$ equivalent it suffices to run 2,004 out of 16,384 tests.) For the other examples, \ti{GenPCT}\xspace failed to build a non-trivial CTS due to exceeding the memory limit (1.5 Gbytes). On the other hand, \ti{GenPCT}\xspace built a CTS\textsuperscript{a}\xspace or CTS\textsuperscript{aa}\xspace for all nine examples of Table~\ref{tbl:cts}. Note, however, that CTS\textsuperscript{a}\xspace{s} give only a moderate improvement over CTSs. For the last four examples \ti{GenPCT}\xspace failed to compute an CTS\textsuperscript{a}\xspace of $N$ due to memory overflow whereas it had no problem computing an CTS\textsuperscript{aa}\xspace of $N$. So CTS\textsuperscript{aa}\xspace{s} can be computed efficiently even for large circuits. Further, we show that CTS\textsuperscript{aa}\xspace{s} are also very effective.
\input{e7xper_bug_hunting} \input{e8xper_corner_cases} \input{e9xper_missed_props}
\section{Background}
As we mentioned earlier, traditional testing checks if a circuit $M$ is correct as a whole. This notion of correctness means satisfying a conjunction of \ti{many} properties of $M$. For this reason, one tries to spray tests uniformly in the space of all input assignments. To improve the effectiveness of testing, one can try to run many tests at once as it is done in symbolic simulation~\cite{SymbolSim}. To avoid generation of tests that for some reason should be or can be excluded, a set of constraints can be used~\cite{cnst_rand}. Another method of making testing more reliable is to generate tests exciting a particular set of events specified by a coverage metric~\cite{cov_metr}. Our approach is different from those above in that it is aimed at testing a particular property of $M$.
The method of testing introduced in~\cite{bridging} is based on the idea that tests should be treated as a ``proof encoding'' rather than a sample of the search space. (The relation between tests and proofs have been also studied in software verification, e.g. in~\cite{UnitTests,godefroid,Beckman}). In this paper, we take a different point of view where testing becomes a \ti{part} of a formal proof namely the part that performs structural derivations.
Reasoning about SAT in terms of random walks was pioneered in~\cite{rand_walk}. The centered SSAs we introduce in this paper bear some similarity to sets of assignments generated in de-randomization of Sch\"oning's algorithm~\cite{balls}. Typically, centered SSAs are much smaller than uncentered SSAs of~\cite{ssp}.
The first version of \mbox{$\mi{SemStr}$}\xspace procedure is presented in report~\cite{cmpl_tst}. It has a much tighter integration between the structural part (computation of SSAs) and semantic part (derivation of formula $H$ implied by the original formula). The advantage of the new version of \mbox{$\mi{SemStr}$}\xspace described in this paper is twofold. First, it is much simpler than \mbox{$\mi{SemStr}$}\xspace of~\cite{cmpl_tst}. In particular, any resolution based SAT-solver that generates proofs can be used to implement the new \mbox{$\mi{SemStr}$}\xspace. Second, the simplicity of the new version makes it much easier to achieve the level of scalability where \mbox{$\mi{SemStr}$}\xspace becomes practical.
\section{Conclusion} We consider the problem of finding a Complete Test Set (CTS) for a combinational circuit $N$ that is a test set proving $N \equiv 0$. We use the machinery of stable sets of assignments to derive non-trivial CTSs i.e. those that do not include all possible input assignments. Computing a CTS for a large circuit $N$ is inefficient. So, we present a procedure that generates a test set for a ``projection'' of $N$ on a subset $V$ of variables of $N$. Depending on the choice of $V$\!, this procedure generates a test set CTS\textsuperscript{a}\xspace that is an approximation of an CTS or a test set CTS\textsuperscript{aa}\xspace that is an approximation of CTS\textsuperscript{a}\xspace. We give experimental results showing that CTS\textsuperscript{aa}\xspace{s} can be efficiently computed even for large circuits and are effective in solving verification problems.
\appendices
\section{Proofs} \label{app:proofs} \setcounter{proposition}{0} \begin{proposition}
Formula $H$ is unsatisfiable iff it has an SSA. \end{proposition} \begin{proof} \tb{If part.} Assume the contrary i.e. $P$ is an SSA of $H$ with center \sub{p}{init} and AC-mapping \mbox{$\Phi$}\xspace and $H$ is satisfiable. Let \pnt{s}~\,be an assignment satisfying $H$ that is the closest to \sub{p}{init} in terms of the Hamming distance. Then procedure \ti{BuildPath} (see Fig.~\ref{fig:bld_path}) can build a sequence of assignments $\ppnt{p}{1},\dots,\ppnt{p}{i}$ such that \begin{itemize} \item $i = \mi{Hamming\_distance}(\sub{p}{init},\pnt{s})+1$ \item $\ppnt{p}{1} = \sub{p}{init}$ and $\ppnt{p}{i} = \pnt{s}$ \end{itemize} By definition of \ti{BuildPath}, assignment \ppnt{p}{j+1} is closer to \pnt{s} and farther away from \sub{p}{init} than \ppnt{p}{j} where $1 \leq j \leq i-1$. This means that \ppnt{p}{j+1} is in $\mi{Nbhd}(\sub{p}{init},\ppnt{p}{j},C)$ where $C = \mbox{$\Phi$}\xspace(\ppnt{p}{j})$. In particular, \pnt{s} is in $\mi{Nbhd}(\sub{p}{init},\ppnt{p}{i-1},C)$ and so \pnt{s} is in $P$. However, by definition of an SSA, $P$ consists only of assignments falsifying $H$. Thus, we have a contradiction.
\noindent\tb{Only if part}. Assume that formula $H$ is unsatisfiable. By applying \ti{BuildSSA} (see Fig.~\ref{fig:bld_ssa}) to $H$, one generates a set $P$ that is an SSA of $H$ with respect to some center \sub{p}{init} and AC-mapping \mbox{$\Phi$}\xspace. \end{proof}
\section{CTSs And Circuit Redundancy} \label{app:red} Let $N \equiv 0$ hold. Let $R$ be a cut of circuit $N$. We will denote the circuit between this cut and the output of $N$ as $N_R$ (see Figure~\ref{fig:cut}). We will say that $N$ is \tb{non-redundant} if $N_R \not\equiv 0$ for any cut $R$ other than the cut specified by primary inputs of $N$. Note that if $N_{R} \not\equiv 0$ for some cut $R$, then $N_{R'} \not\equiv 0$ for \ti{every} cut $R'$ located above $R$.
Definition~\ref{def:cts} of a CTS may not work well if $N$ is highly redundant. Assume, for instance, that $N_R \equiv 0$ holds for a cut $R$. This means that the clauses specifying gates of $N$ below $R$ (i.e. those that are not in $N_R$) are redundant in $F_N \wedge z$. Then one can build an SSA $P$ for $F_N \wedge z$ as follows. Let $P_R$ be an SSA for $F_{N_R} \wedge z$. Let \pnt{v} be an arbitrary assignment to the variables of $\V{N} \setminus \V{N_R}$. Then by adding \pnt{v} to every assignment of $P_R$ one obtains an SSA for $F_N \wedge z$. This means that for any test \pnt{x}, \cube{x} contains an SSA of $F_N \wedge z$. Therefore, according to Definition~\ref{def:cts}, circuit $N$ has a CTS consisting of just one test.
\input{c2ut.fig}
The problem above can be solved using the following observation. Let $T$ be a set of tests \s{\ppnt{x}{1},\dots,\ppnt{x}{k}} for $N$ where
$k \leq 2^{|X|}$. Denote by $\vec{r}_i$ the assignment to the variables of cut $R$ produced by $N$ under input \ppnt{x}{i}. Let $T_R$ denote \s{\ppnt{r}{1},\dots, \ppnt{r}{k}}. Denote by $T^*_R$ the set of assignments to variables of $R$ that cannot be produced in $N$ by any input assignment. Now assume that $T$ is constructed so that $T_R \cup T^*_R$ is a CTS for circuit $N_R$. This does not change anything if $N_R$ is itself redundant (i.e. if $N_{R'} \equiv 0$ for some cut $R'$ that is closer to the output of $N$ than $R$). In this case, it is still sufficient to use $T$ of one test because $N_R$ has a CTS of one assignment (in terms of cut $R$). Assume however, that
$N_R$ is non-redundant. In this case, there is no ``degenerate'' CTS for $N_R$ and $T$ has to contain at least $|T_R|$ tests. Assuming that $T^*_R$ alone is far from being a CTS for $N_R$, a CTS $T$ for $N$ will consist of many tests.
So, one can modify the definition of CTS for a redundant circuit $N$ as follows. A test set $T$ is a CTS for $N$ if there is a cut $R$ such that \begin{itemize} \item circuit $N_R$ is non-redundant i.e.
\begin{itemize}
\item[$\bullet$] $N_R \equiv0$ holds
\item[$\bullet$] $N_R' \not\equiv 0$ for every cut $R'$ above $R$
\end{itemize} \item set $T_R \cup T^*_R$ is a CTS for $N_R$. \end{itemize}
just something to fill out the page
just something to fill out the page
\end{document}
|
arXiv
|
{
"id": "1808.05750.tex",
"language_detection_score": 0.7906724214553833,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{center}
{\huge{\bf On the conjugacy class of the Fibonacci \\[.3cm]dynamical system}}
{\large{Michel Dekking (Delft University of Technology)\\ and\\ Mike Keane (Delft University of Technology and University of Leiden)}}
{\large{{Version: August 16, 2016}}}
\end{center}
\section{Introduction}\label{sec:intro}
We study the Fibonacci substitution $\varphi$ given by $$\varphi:\quad 0\rightarrow\,01,\;1\rightarrow 0.$$ The infinite Fibonacci word $w_{\rm F}$ is the unique one-sided sequence (to the right) which is a fixed point of $\varphi$: $$w_{\rm F}=0100101001\dots.$$ We also consider one of the two two-sided fixed points $x_{\rm F}$ of $\varphi^2$: $$x_{\rm F}=\dots01001001\!\cdot\!0100101001\dots.$$ The dynamical system generated by taking the orbit closure of $x_{\rm F}$ under the shift map $\sigma$ is denoted by $(X_\varphi,\sigma)$.
The question we will be concerned with is: what are the substitutions $\eta$ which generate a symbolical dynamical system topologically isomorphic to the Fibonacci dynamical system? Here topologically isomorphic means that there exists a homeomorphism $\psi: X_\varphi\rightarrow X_\eta$, such that $\psi\sigma=\sigma\psi$, where we denote the shift on $X_\eta$ also by $\sigma$. In this case $(X_\eta, \sigma)$ is said to be conjugate to $(X_\varphi,\sigma)$.
This question has been completely answered for the case of constant length substitutions in the paper \cite{CDK}. It is remarkable that there are only finitely many injective primitive substitutions of length $L$ which generate a system conjugate to a given substitution of length $L$. Here a substitution $\alpha$ is called \emph{injective} if $\alpha(a)\ne \alpha(b)$ for all letters $a$ and $b$ from the alphabet with $a\ne b$. When we extend to the class of all substitutions, replacing $L$ by the Perron-Frobenius eigenvalue of the incidence matrix of the substitution, then the conjugacy class can be infinite in general. See \cite{Dekking-TCS} for the case of the Thue-Morse substitution. In the present paper we will prove that there are infinitely many injective primitive substitutions with Perron-Frobenius eigenvalue $\Phi=(1+\sqrt{5})/2$ which generate a system conjugate to the Fibonacci system---see Theorem~\ref{th:inf}.
In the non-constant length case some new phenomena appear. If one has an injective substitution $\alpha$ of constant length $L$, then all its powers $\alpha^n$ will also be injective. This is no longer true in the general case. For example, consider the injective substitution $\zeta$ on the alphabet $\{1,2,3,4,5\}$ given by
$$\zeta: \qquad 1\rightarrow 12,\;
2\rightarrow 3,\;
3\rightarrow 45,\;
4\rightarrow 1,\;
5\rightarrow 23.$$
An application of Theorem~\ref{th:Nblock} followed by a partition reshaping (see Section~\ref{sec:reshaping}) shows that the system $(X_\zeta,\sigma)$ is conjugate to the Fibonacci system.
However, the square of $\zeta$ is given by
$$\zeta^2: \qquad 1\rightarrow 123,\;
2\rightarrow 45,\;
3\rightarrow 123,\;
4\rightarrow 12,\;
5\rightarrow 345, $$
which is \emph{not} injective. To deal with this undesirable phenomenon we introduce the following notion. A substitution $\alpha$ is called a \emph{full rank} substitution if its incidence matrix has full rank (non-zero determinant). This is a strengthening of injectivity, because obviously a substitution which is not injective can not have full rank. Moreover, if the substitution $\alpha$ has full rank, then all its powers $\alpha^n$ will also have full rank, and thus will be injective.
Another phenomenon, which does not exist in the constant length case, is that non-primitive substitutions $\zeta$ may generate uniquely defined minimal systems
conjugate to a given system. For example, consider the injective substitution $\zeta$ on the alphabet $\{1,2,3,4\}$ given by
$$\zeta:\qquad 1\rightarrow 12,\quad
2\rightarrow 31,\quad
3\rightarrow 4,\quad
4\rightarrow 3. $$ With the partition reshaping technique from Section~\ref{sec:reshaping} one can show that the system $(X_\zeta,\sigma)$ is conjugate to the Fibonacci system (ignoring the system on two points generated by $\zeta$). In the remainder of this paper we concentrate on primitive substitutions.
The structure of the paper is as follows. In Section~\ref{sec:Nblock} we show that all systems in the conjugacy class of the Fibonacci substitution can be obtained by letter-to-letter projections of the systems generated by so-called $N$-block substitutions. In Section~\ref{sec:C3} we give a very general characterization of symbolical dynamical systems in the Fibonacci conjugacy class, in the spirit of a similar result on the Toeplitz dynamical system in \cite{CKL08}. In Section~\ref{sec:reshaping} we introduce a tool which admits to turn non-injective substitutions into injective substitutions. This is used in Section~\ref{sec:C1} to show that the Fibonacci class has infinitely many primitive injective substitutions as members. In Section~\ref{sec:two} we quickly analyse the case of a 2-symbol alphabet. Sections \ref{sec:equi} and \ref{sec:mat} give properties of maximal equicontinuous factors and incidence matrices, which are used to analyse the 3-symbol case in Section \ref{sec:C2}. In the final Section \ref{sec:L2L} we show that the system obtained by doubling the 0's in the infinite Fibonacci word is conjugate to the Fibonacci dynamical system, but can not be generated by a substitution.
\section{$N$-block systems and $N$-block substitutions}\label{sec:Nblock}
For any $N$ the $N$-block substitution $\hat{\theta}_N$ of a substitution $\theta$ is defined on an alphabet of $p_\theta(N)$ symbols, where $p_\theta(\cdot)$ is the complexity function of the language ${\cal L}_\theta$ of $\theta$ (cf.\ \cite[p.~95]{Queff}). What is \emph{not} in \cite{Queff}, is that this $N$-block substitution generates the $N$-block presentation of the system $(X_\theta,\sigma)$.
We denote the letters of the alphabet of the $N$-block presentation by $[a_1a_2\dots a_N]$, where $a_1a_2\dots a_N$ is an element from ${\cal L}_\theta^N$, the set of words of length $N$ in the language of $\theta$. The $N$-block presentation $(X^{[N]}_\theta,\sigma)$ emerges by applying an sliding block code $\Psi$ to the sequences of $X_\theta$, so $\Psi$ is the map \\[-.3cm]
$$\Psi(a_1a_2\dots a_N)=[a_1a_2\dots a_N].$$
We denote by $\psi$ the induced map from $X_\theta$ to $X^{[N]}_\theta$:
$$\psi(x)=\dots\Psi(x_{-N},\dots,x_{-1})\Psi(x_{-N+1},\dots,x_{0})\dots.$$
It is easy to see that $\psi$ is a conjugacy, where the inverse is $\pi_0$ induced by the 1-block map (also denoted $\pi_0$) given by $\pi_0([a_1a_2\dots a_N])=a_1$.
The $N$-block substitution $\hat{\theta}_N$ is defined by requiring that for each word $a_1a_2\dots a_N$ the length of $\hat{\theta}_N([a_1a_2\dots a_N])$ is equal to the length $L_1$ of $\theta(a_1)$, and the letters of $\hat{\theta}_N([a_1a_2\dots a_N])$ are the $\Psi$-codings of the first $L_1$ consecutive $N$-blocks in $\theta(a_1a_2\dots a_N)$.
\begin{theorem}\label{th:Nblock} Let $\hat{\theta}_N$ be the $N$-block substitution of a primitive substitution $\theta$. Let $(X^{[N]}_\theta,\sigma)$ be the $N$-block presentation of the system $(X_\theta,\sigma)$. Then the set $X^{[N]}_\theta$ equals $X_{\hat{\theta}_N}$. \end{theorem}
{\em Proof:} Let $x$ be a fixed point of $\theta$, and let $y=\psi(x)$, where $\psi$ is the $N$-block conjugacy, with inverse $\pi_0$. The key equation is $\pi_0\,\hat{\theta}_N=\theta\,\pi_0$.
This implies\\[-.3cm]
$$\pi_0\,\hat{\theta}_N(y)=\theta\,\pi_0(y)=\theta\,\pi_0(\psi(x))=\theta(x)=x.$$
Applying $\psi$ on both sides gives $\hat{\theta}_N(y)=\psi(x)=y$, i.e., $y$ is a fixed point of $\hat{\theta}_N$. But then $X^{[N]}_\theta=X_{\hat{\theta}_N}$, by minimality of $X^{[N]}_\theta$. $\Box$
It is well known (see, e.g., \cite[p.~105]{Queff}) that $p_\varphi(N)=N+1$, so for the Fibonacci substitution $\varphi$ the $N$-block substitution $\hat{\varphi}_N$ is a substitution on an alphabet of $N+1$ symbols.
We describe how one obtains $\hat{\varphi}_2$. We have ${\cal L}_\varphi^2=\{00, 01, 10\}$. Since 00 and 01 start with 0, and 10 with 1, we obtain $$\hat{\varphi}_2:\quad [00]\mapsto [01][10],\;[01]\mapsto [01][10],\; [10]\mapsto [00],$$ reading off the consecutive 2-blocks from $\varphi(00)=0101,\, \varphi(01)=010$ and $\varphi(10)=001$. It is useful to recode the alphabet $\{[00],[01],[10]\}$ to the standard alphabet $\{1,2,3\}$. We do this in the order in which they appear for the first time in the infinite Fibonacci word $w_{\rm F}$--- we call this the \emph{canonical coding}, and will use the same principle for all $N$. For $N=2$ this gives
$[01]\rightarrow 1,\; [10]\rightarrow 2,\; [00]\rightarrow 3$. Still using the notation $\hat{\varphi}_2$ for the
substitution on this new alphabet, we obtain $$\hat{\varphi}_2(1)=12 \quad \hat{\varphi}_2(2)=3, \quad \hat{\varphi}_2(3)=12.$$ In this way the substitution is in standard form (cf.~\cite{CDK} and \cite{Dekking-2016}).
\section{The Fibonacci conjugacy class}\label{sec:C3}
Let $F_n$ for $n=1,2,\dots$ be the Fibonacci numbers $$F_1=1,\, F_2=1,\, F_3=2,\, F_4=3,\, F_5=5, \dots.$$
\begin{theorem} Let $(Y,\sigma)$ be any subshift. Then $(Y,\sigma)$ is topologically conjugate to the Fibonacci system $(X_\varphi,\sigma)$ if and only if there exist $n\ge 3 $ and two words $B_0$ and $B_1$ of length $F_n$ and $F_{n-1}$, such that any $y$ from $Y$ is a concatenation of $B_0$ and $B_1$, and moreover, if\, $\cdots B_{x_{-1}} B_{x_0} B_{x_1}\cdots B_{x_k}\cdots$ is such a concatenation, then $x=(x_k)$ is a sequence from the Fibonacci system. \end{theorem}
\noindent \emph{Proof:} First let us suppose that $(Y,\sigma)$ is topologically isomorphic to the Fibonacci system. By the Curtis-Hedlund-Lyndon theorem, there exists an integer $N$ such that $Y$ is obtained by a letter-to-letter projection $\pi$ from the $N$-block presentation $(X^{[N]}_\varphi, \sigma)$ of the Fibonacci system. Now if $B_0$ and $B_1$ are two decomposition blocks of sequences from $X^{[N]}_\varphi$ of length $F_n$ and $F_{n-1}$, then $\pi(B_0)$ and $\pi(B_1)$ are decomposition blocks of sequences from $Y$ with lengths $F_n$ and $F_{n-1}$, again satisfying the concatenation property.
So it suffices to prove the result for $X^{[N]}_\varphi$. Note that we may suppose that the integers $N$ pass through an infinite subsequence; we will use $N=F_n$,where $n=3,4,\dots$. Useful to us are the \emph{singular words} $w_n$ introduced in \cite{WenWen}. The $w_n$ are the unique words of length $F_{n+1}$ having a different Parikh vector from all the other words of length $F_{n+1}$ from the language of $\varphi$. Here $w_1=1, w_2=00$, $w_3=101$, and for $n\ge4$
$$w_n=w_{n-2}w_{n-3}w_{n-2}.$$
The set of return words of $w_n$ has only two elements which are $u_n=w_nw_{n+1}$ and $v_n=w_nw_{n-1}$ (see page 108 in \cite{HuangWen}).
The lengths of these words are $|u_n|=F_{n+3}$ and $|v_n|=F_{n+2}$. Let $w_n^-$ be $w_n$ with the last letter deleted.
Define for $n\ge5$
$$B_0=\Psi(u_{n-3}w_{n-3}^-), \quad B_1=\Psi(v_{n-3}w_{n-3}^-),$$
where $\Psi$ is the $N$-block code from ${\cal L}_\varphi^N$ to ${\cal L}_{\varphi^{[N]}}$, with $N=F_{n-2}$.
Then these blocks have the right lengths, and by Theorem 2.11 in \cite{HuangWen}, the two return words partition the infinite Fibonacci word $w_{\rm F}$ according to the infinite Fibonacci word---except for a prefix $r_{n,0}$:
$$w_{\rm F}=r_{n,0}u_nv_nu_nu_nv_nu_n\dots.$$
By minimality this property carries over to all two-sided sequences in the Fibonacci dynamical system.
For the converse, let $Y$ be a Fibonacci concatenation system as above. Let $C_0=\varphi^{n-2}(0)$ and $C_1=\varphi^{n-2}(1)$. We define a map $g$ from $(Y,\sigma)$ to a subshift of $\{0,1\}^{\mathbb{Z}}$ by $$g:\quad \cdots B_{x_{-1}} B_{x_0} B_{x_1}\cdots B_{x_k}\cdots\; \mapsto \; \cdots C_{x_{-1}} C_{x_0} C_{x_1}\cdots C_{x_k}\cdots,$$
respecting the position of the $0^{\rm th}$ coordinate. Since $|C_0|=|B_0|$ and $|C_1|=|B_1|$, $g$ commutes with the shift. Also, $g$ is obviously continuous. Moreover, since for any sequence $x$ in the Fibonacci system $\varphi^{n-2}(x)$ is again a sequence in the Fibonacci system, $g(Y)\subseteq X_\varphi$. So, by minimality, $(X_\varphi,\sigma)$ is a factor of $(Y,\sigma)$. Since $g$ is invertible, with continuous inverse, $(Y,\sigma)$ is in the conjugacy class of the Fibonacci system.
$\Box$
\noindent {\bf Example}\; The case $(F_n,F_{n-1})=(13,8)$. Then $n=7$, so we have to consider the singular word $w_4=00100$ of length 5.
\noindent The set of $5$-blocks is $\{01001,\,10010,\,00101,\,01010,\,10100,\,00100\}.$\\ These will be coded by the canonical coding $\Psi$ to the standard alphabet $\{1,2,3,4,5,6\}$. Note that $\Psi(w_4)=6$. Further, $w_3=101$ and $w_5=10100101$. So $u_n=0010010100101$ and $v_n=00100101$. Applying $\Psi$ gives the two decomposition blocks $B_0 = 6123451234512$ and $B_1 = 61234512$.
\section{Reshaping substitutions}\label{sec:reshaping}
We call a language preserving transformation of a substitution a reshaping. An example is the prefix-suffix change used in \cite{Dekking-TCS}. Here we consider a variation which we call a \emph{partition reshaping}.
We give an example of this technique. Take the $N$-block representation of the Fibonacci system for $N=4$. All five 4-blocks occur consecutively at the beginning of the Fibonacci word $w_{\rm F}$ as $\{0100,\,1001,\,0010,\, 0101,\, 1010\}.$
The canonical coding to $\{1,2,3,4,5\}$ gives the 4-block substitution $\hat{\varphi}_4$: $$\hat{\varphi}_4:\qquad 1\rightarrow 12,\;
2\rightarrow 3,\;
3\rightarrow 45, \;
4\rightarrow 12,\;
5\rightarrow 3.$$
\noindent Its square is equal to $$\hat{\varphi}_4^2: \qquad 1\rightarrow 123,\;
2\rightarrow 45,\;
3\rightarrow 123,\;
4\rightarrow 123,\;
5\rightarrow 45. $$
Since the two blocks $B_0=123$ and $B_1=45$ have no letters in common
this permits to do a partition reshaping. Symbolically this can be represented by
\begin{table}[h!]
\centering
\caption{\small Partition reshaping.}
\label{tab:table1}
\begin{tabular}{ccccccccc}\\[.005cm]
1 & \; & 2 & 3 & & \qquad\qquad 4 & \; & 5 & \\
$\downarrow$ & \; & $\downarrow$ & $\downarrow$& & \qquad\qquad $\downarrow$ & \; & $\downarrow$ & \\
1 & 2 & 3 & 4 & 5 & \qquad\qquad 1 & 2 & 3 & \\
1 & \,\; 2 $\|$& 3 &\,\; 4 $\|$ & 5 & \qquad\qquad \,\; 1 $\|$ & 2 & \,\; 3 $\|$ &
\end{tabular} \end{table}
Here the third line gives the images $\hat{\varphi}_4(B_0)=\hat{\varphi}_4(123)=12345$ and $\hat{\varphi}_4(B_1)=\hat{\varphi}_4(45)=123$; the fourth line gives a \emph{another} partition of these two words in three, respectively two subwords from which the new substitution $\eta$ can be read of: $$\eta: \qquad 1\rightarrow 12,\;
2\rightarrow 34,\;
3\rightarrow 5 ,\;
4\rightarrow 1,\;
5\rightarrow 23. $$
What we gain is that the partition reshaped substitution $\eta$ generates the same language as $\hat{\varphi}_4$, but that $\eta$ is injective---it is even of full rank.
\section{The Fibonacci class has infinite cardinality}\label{sec:C1}
\begin{theorem}\label{th:inf} There are infinitely many primitive injective substitutions with Perron-Frobenius eigenvalue the golden mean that generate dynamical systems topologically isomorphic to the Fibonacci system.
\end{theorem}
We will explicitly construct infinitely many primitive injective substitutions whose systems are topologically conjugate to the Fibonacci system. The topological conjugacy will follow from the fact that the systems are $N$-block codings of the Fibonacci system, where $N$ will run through the numbers $F_n-1$. As an introduction we look at $n=5$, i.e., we consider the blocks of length $N=F_5-1=4$. With the canonical coding of the $N$-blocks we obtain the 4-block substitution $\hat{\varphi}_4$---see Section~\ref{sec:reshaping}:
$$\hat{\varphi}_4:\qquad 1\rightarrow 12,\,
2\rightarrow 3,\,
3\rightarrow 45, \,
4\rightarrow 12,\,\
5\rightarrow 3.$$
\noindent An \emph{interval} $I$ starting with $a\in A$ is a word of length $L$ of the form $$I=a,a+1,...,a+L-1.$$
\noindent Note that $\hat{\varphi}_4(123)=12345$, and $\hat{\varphi}_4(45)=123$, and these four words are intervals.
This is a property that holds in general. First we need the fact that the first $F_n$ words of length $F_n-1$ in the fixed point of $\varphi$ are all different. This result is given by Theorem 2.8 in \cite{Chuan-Ho}. We code these $N+1$ words by the canonical coding to the letters $1,2,\dots,F_n$. We then have \begin{equation}\label{eq:Fib}\hat{\varphi}_N(12...F_{n-1})=12\dots F_{n}, \qquad \hat{\varphi}_N(F_{n-1}\!+1,\dots F_n)=12\dots F_{n-1}.\end{equation} This can be seen by noting that $\pi_0 \hat{\varphi}_N^n=\varphi^n \pi_0,$ for all $n$, and that the fixed point of $\varphi$ starts with $\varphi^{n-2}(0)\varphi^{n-3}(0)$.
We continue for $n \ge 5$ with the construction of a substitution $\eta=\eta_n$ which is a partition reshaping of $\hat{\varphi}_N$. The $F_n$ letters in the alphabet $A^{[N]}$ are divided in three species, S, M and L (for Small, Medium and Large). $${\rm S}:={1,...,F_{n-3}}, \quad {\rm M}:={F_{n-3}+1,...,F_{n-1}},\quad {\rm L}:={F_{n-1}\!+1,...,F_n}.$$ Note that ${\rm Card}\, {\rm M}=F_{n-1}-F_{n-3}=F_{n-2}=F_{n}-F_{n-1}={\rm Card}\, {\rm L}.$
An important role is played by $a_{ \rm M}:=F_{n-3}+1$, the smallest letter in M, and $a_{ \rm L}:=F_{n-1}+1$, the smallest letter in L.
For the letters in M (except for $a_{ \rm M})$ the rules are very simple: $$\eta(a)= a+F_{n-2}$$ (i.e., a single letter obtained by addition of the two integers). The first letter in M has the rule $$\eta(a_{ \rm M})=\eta(F_{n-3}\!+1)= F_{n-1}, F_{n-1}\!+1= F_{n-1},a_{ \rm L} .$$ The images of the letters in L are intervals of length 1 or 2, obtained from a partition of the word $12\dots F_{n-1}$. Their lengths are coming from $\varphi^{n-4}(0)$, rotated once (put the 0 in front at the back). This word is denoted $\rho(\varphi^{n-4 }(0))$. The choice of this word is somewhat arbitrary, other choices would work. The properties of $v:=\rho(\varphi^{n-4}(0))$ which matter to us are
(V1) $\ell:=|v|=F_{n-2}$.
(V2) $v_1=1$, $v_\ell=0$.
(V3) $v$ does not contain any 11.
\noindent Now the images of the letters in L are determined by $v$ according to the following rule: $|\eta(a_{ \rm L}+k-1)|=2-v_k$, for all $k=1,\dots,F_{n-2}$. Note that this implies in particular that for all $n\ge 5$ one has by property (V2) $$\eta(a_{\rm L})=\eta(F_{n-1}\!+1)= 1, \;\qquad \eta(F_n)= F_{n-1}-1,F_{n-1}.$$
The images of the letters in S are then obtained by choosing the lengths of the $\eta(a)$ in such a way that the largest common refinement of the induced partitions of the images of S and L is the singleton partition.
\noindent{\bf Example} The case $n=7$, so $ F_n=13$, $ F_{n-1}=8$, and $ F_{n-2}=5$.
\noindent Then ${\rm S}=\{1,2,3\},\, {\rm M}=\{4,5,6,7,8\},\, {\rm L}=\{9,10,11,12,13\}.$
\noindent Rules for M: \quad $4\rightarrow 89,\;5\rightarrow 10, \;6\rightarrow11, \;7\rightarrow 12, \;8\rightarrow13.$
Now $$\varphi^3(0)=01001\; \Rightarrow\; v= 10010\; \Rightarrow\; {\rm the\, partition\, is}\; 1|23|45|6|78.$$ This partition gives the following rules for L: $$9\rightarrow1,\; 10\rightarrow23,\; 11\rightarrow45,\; 12\rightarrow6,\; 13\rightarrow 78.$$
The induced partition for the images of the letters in S is $|12|34|567|8$, yielding rules
$$1\rightarrow12,\; 2\rightarrow34,\; 3\rightarrow567.$$
\noindent In summary we obtain the substitution $\eta=\eta_7$ given by : \vspace*{-0.1cm}
\begin{align*} {\rm S}: \begin{cases} 1& \rightarrow 1,2\\ 2& \rightarrow 3,4\\ 3& \rightarrow 5,6,7 \end{cases} \qquad {\rm M}: \begin{cases} 4& \rightarrow 8,9\\ 5& \rightarrow 10\\ 6& \rightarrow 11\\ 7& \rightarrow 12\\ 8& \rightarrow 13 \end{cases} \qquad {\rm L}: \begin{cases} \,\,9\!\!& \rightarrow 1\\ 10& \rightarrow 2,3\\ 11& \rightarrow 4,5\\ 12& \rightarrow 6\\ 13& \rightarrow 7,8. \end{cases} \end{align*}
The substitution $\eta$ is primitive because you `can go' from the letter 1 to any letter and from any letter to the letter 1. This gives irreducibility; there is primitivity because periodicity is impossible by the first rule $1\rightarrow 1,2$.
\noindent The substitution $\eta$ has full rank because any unit vector $$e_a=(0,\dots,0,1,0,\dots,0)$$ is a linear combination of rows of the incidence matrix $M_\eta$ of $\eta$. For $a\in {\rm L}\setminus\{9\}$ this combination is trivial, and for the other letters this is exactly forced by the choice of lengths in such a way that the largest common refinement of the induced partitions of the images of S and L is the singleton partition. In more detail: denote the $a^{\rm th}$ row of $M_\eta$ by $R_a$. Then $e_1=R_9$, and thus $e_2=R_1-R_9$, $e_3=R_{10}-e_2=R_{10}-R_1+R_9$, etc.
The argument yielding the property of full rank will hold in general for all $n\ge5$. To prove primitivity for all $n$ we need some more details.
\begin{proposition} The substitution $\eta=\eta_n$ is primitive for all $n \ge 5$.\end{proposition}
\noindent \emph{Proof:} The proposition will be proved if we show that for all $a\in A$ the letter $a$ will occur in some iteration $\eta^k(1)$, and conversely, that for all $a\in A$ the letter $1$ will occur in some iteration $\eta^k(a)$. The first part is easy to see from the fact that $\eta(1)=1,2$ and that $\eta^2(1,\dots,F_{n-2})=1,\dots,F_n-1$, plus $\eta^2(a_{\rm M})=F_n,1$. For the second part, we show that A) for any $a\in$ M$\cup$L a letter from S will occur in $\eta^k(a)$ in $k\le$ Card M$\cup$L steps (see Lemma~\ref{lem:dec}) and B), that for any $a\in$ S the letter 1 will occur in $\eta^k(a)$ in $k\le$ 2Card $A$ steps (see Lemma~\ref{lem:occ1}).
$\Box$
\begin{lemma}\label{lem:dec} Let $f:A\rightarrow A$ be the map that assigns the first letter of $\eta^2(a)$ to $a$. Then $f$ is strictly decreasing on L $\cup$ M$\backslash \{a_{\rm M}\}$.\end{lemma}
\noindent \emph{Proof:} First we consider $f$ on ${\rm L}$. We have $$\eta^2(a_{\rm L}\dots F_n)=\eta(1,\dots, F_{n-1}-1, F_{n-1})=1\dots F_n.$$ Since $$\eta^2(F_n)=\eta( F_{n-1}-1,F_{n-1})=F_{n-1}-1+F_{n-2},F_{n-1}+F_{n-2}=F_n-1, F_n,$$ we obtain $f(F_n)=F_n-1<F_n$. This implies that also the previous letters in ${\rm L}$ are mapped by $f$ to a smaller letter.
\noindent Next we consider $f$ on M$\backslash \{a_{\rm M}\}$. Here $$\eta^2(a_{\rm M}+1,\dots, F_{n-1})=\eta(a_{\rm L}+1,\dots, F_{n})=2,3,\dots, F_{n-1}.$$ Now $$\eta^2(F_{n-1})=\eta( F_{n})=F_{n-1}-1,F_{n-1}.$$ So we obtain $f(F_{n-1})=F_{n-1}-1<F_{n-1}$. This implies that also the previous letters in ${\rm M}$ are mapped by $f$ to a smaller letter.
$\Box$
\begin{lemma}\label{lem:occ1} For all $a\in S$ there exists $k \le 2\,{\rm Card}\, A$ such that the letter 1 occurs in $\eta^{k}(a)$. \end{lemma}
\noindent \emph{Proof:} The substitution $\eta^2$ maps intervals $I$ to intervals $\eta^2(I)$, provided $I$ does not contain $a_{\rm M}$ or $a_{\rm L}$. By construction, since the $\eta(b)$ for $b\in {\rm L}$ have length 1 or 2, the length of $\eta(a)$ for $a\in {\rm S}$ is 2 or 3, and so $\eta(a)$ contains a word $c, c+1$ for some $c\in A$. Since $\rho\varphi^{(n-4)}(0)$ does not contain two consecutive 1's (property (V3)), the image $\eta^2(c,c+1)$ has at least length 3. Since\footnote{This follows from the fact that any word in the language of $\eta$ occurs in some concatenation of the two words $12\dots F_{n}$ and $12\dots F_{n-1}$.} any word of length at least 3 in the language of $\eta$ contains an interval of length 2, the length increases by at least 1 if you apply $\eta^2$. It follows that for all $n\ge 5$ and all $a\in {\mathrm S}$ one has $|\eta^{2n+1}(a)| \ge n+2$. But then after less than ${\rm Card}\, A$ steps a letter $a_{\rm M}$ or a letter $a_{\rm L}$ must occur in $\eta^{2n+1}(a)$. This implies that the letter 1 occurs in $\eta^{2n+3}(a)$, since both $\eta^2(a_{\rm M})$ and $\eta^2(a_{\rm L})$ contain a 1.
$\Box$
\section{The 2-symbol case}\label{sec:two}
The eigenvalue group of the Fibonacci system is the rotation over the small golden mean $\gamma=(\sqrt{5}-1)/2$ on the unit circle, and any system topologically isomorphic to the Fibonacci system must have an incidence matrix with Perron Frobenius eigenvalue the golden mean or a power of the golden mean (cf.~\cite[Section 7.3.2]{Pytheas}). Thus, modulo a permutation of the symbols, on an alphabet of two symbols the incidence matrix with Perron-Frobenius eigenvalue the golden mean has to be $\left( \begin{smallmatrix} 1 \, 1\\ 1\, 0 \end{smallmatrix} \right).$ There are two substitutions with this incidence matrix: Fibonacci $\varphi$, and reverse Fibonacci ${\varphi_{\textsc{\tiny R}}}$, defined by $${\varphi_{\textsc{\tiny R}}}: \qquad 0\rightarrow\,10,\;1\rightarrow 0.$$ These two substitutions are essentially different, as they have different standard forms (see \cite{Dekking-2016} for the definition of standard form).
However, it follows directly from Tan Bo's criterion in his paper \cite{Tan}
that ${\varphi_{\textsc{\tiny R}}}$ and $\varphi$ have the same language\footnote{This follows also directly from the well-known formula ${\varphi_{\textsc{\tiny R}}}^{\!2n}(0)\,10=01\,\varphi^{2n}(0)$ for all $n\ge1$ (see \cite[p.17]{Berstel}).}, but then they also generate the same system. Conclusion: the conjugacy class of Fibonacci with Perron-Frobenius eigenvalue the golden mean restricted to two symbols consists of Fibonacci and reverse Fibonacci.
\section{Maximal equicontinuous factors}\label{sec:equi}
Let $T$ be the mapping from the unit circle $Z$ to itself defined by $Tz=z+\gamma \mod 1$, where $\gamma$ is the small golden mean. This, being an irrational rotation, is indeed an equicontinuous dynamical system -- the usual distance metric is an invariant metric under the mapping.
The factor map from the Fibonacci dynamical system $(X_\varphi,\sigma)$ to $(Z,T)$ is given by requiring that the cylinder sets $\{x:x_0=0\}$ and $\{x:x_0=1\}$ are mapped to the intervals $[0,\gamma]$ and $[\gamma,1]$ respectively, and requiring equivariance.
If we take any point of $Z$ not of the form $n\gamma \mod 1$ ($n$ any integer), then the corresponding sequence is unique. If, however, we use an element in the orbit of $\gamma$, then for this point there will be two codes, a ``left" one and a ``right" one.
We want to understand more generally why two or more points map to a single point. Suppose $x$ and $y$ are two
points of a system $(X,\sigma)$ that map to two points $x'$ and $y'$ in an equicontinuous factor. Then for any power of $T$ (the map of the factor system) the distance between $T^n(x')$ and $T^n(y')$ is just equal to the distance between $x'$ and $y'$. So $x$ and $y$ map to the same point $x'$ if either all $x_n$ and $y_n$ are equal for sufficiently large $n$, or all $x_n$ and $y_n$ are equal for sufficiently large $-n$. We say that $x$ and $y$ are respectively \emph{right asymptotic} or \emph{left asymptotic}
A pair of letters $(b,a)$ is called a \emph{cyclic pair} of a substitution $\alpha$ if $ba$ is an element of the language of $\alpha$, and for some integer $m$ $$\alpha^m(b)=\dots b \quad{\rm and}\quad \alpha^m(a)=a\dots. $$ Such a pair gives an infinite sequence of words $\alpha^{mk}(ba)$ in the language of $\alpha$, which---if properly centered---converge to an infinite word which is a fixed point of $\alpha^m$. With a slight abuse of notation we denote this word by $\alpha^{\infty}(b)\cdot \alpha^{\infty}(a)$.
For the Fibonacci substitution $\varphi$, $(0,0)$ and $(1,0)$ are cyclic pairs, and the two synchronized points $\varphi^\infty(0)\cdot\varphi^\infty(0)$ and $\varphi^\infty(1)\cdot\varphi^\infty(0)$, are right asymptotic so they map to the same point in the equicontinuous factor.
Because of these considerations we now define $Z$-triples. Let $\eta$ be a primitive substitution. Call three points $x$, $y$, and $z$ in $X_\eta$ a $Z$-\emph{triple} if they are generated by three cyclic pairs
of the form $(b,a),\, (b,d)$ and $(c,d)$, where $a,b,c,d \in A$. Then $x$, $y$, and $z$ are mapped to the same point in the maximal equicontinuous factor.
\begin{theorem}\label{th:Zth} \; Let $(X_\eta,\sigma)$ be any substitution dynamical system topologically isomorphic to the Fibonacci dynamical system. Then there do not exist $Z$-triples in $X_\eta$. \end{theorem}
\noindent \emph{Proof:} Since $(X_\eta,\sigma)$ is topologically isomorphic to $(X_\varphi,\sigma)$, its maximal equicontinuous factor is $(Z,T$), and the factor map is at most 2-to-1. Suppose $(b,a),\, (b,d)$ and $(c,d)$ gives a $Z$-triple $x,y,z$ in $X_\eta$. Noting that $$x=\eta^\infty(b)\cdot\eta^\infty(a), \quad y=\eta^\infty(b)\cdot \eta^\infty(d)$$ are left asymptotic, and $y=\eta^\infty(b)\cdot \eta^\infty(d)$ and $z=\eta^\infty(c)\cdot\eta^\infty(d)$ are right asymptotic, this would give a contradiction. $\Box$
\noindent {\bf Example} Let $\eta$ be the substitution given by $$\eta:\qquad 1\rightarrow 12,\,
2\rightarrow 34,\,
3\rightarrow 5 ,\,
4\rightarrow 1,\,
5\rightarrow 23. $$
Then $\eta$ generates a system that is topologically isomorphic to the Fibonacci system ($\eta$ is the substitution at the end of Section~\ref{sec:reshaping}). Quite remarkably, $\eta^6$ admits 5 fixed points generated by the cyclic pairs $(1, 2),\, (2, 3),\, (3, 1),\, (4, 5)$ and $(5, 1)$.
Note however, that no three of these form a $Z$-triple.
\section{Fibonacci matrices}\label{sec:mat}
Let $\mathcal{F}_r$ be the set of all non-negative primitive $r\times r$ integer matrices, with Perron-Frobenius eigenvalue the golden mean $\Phi = (1+\sqrt{5})/2$.\\
We have seen already that $\mathcal{F}_2$ consists of the single matrix $\left( \begin{smallmatrix} 1 \, 1\\ 1\, 0 \end{smallmatrix} \right).$
\begin{theorem}\label{th:F3} The class $\mathcal{F}_3$ essentially consists of the three matrices
\qquad $ \left( \begin{smallmatrix} 0\, 1\, 0\\ 1\, 0\, 1\\ 1\, 1\, 0 \end{smallmatrix} \right),\;
\left( \begin{smallmatrix} 0\, 1\, 0\\ 0\, 0 \,1 \\ 1\, 2\,0 \end{smallmatrix} \right),\;
\left( \begin{smallmatrix} 0\, 1\, 0\\ 1 \,0\, 1\\ 1\, 0\, 1 \end{smallmatrix} \right).$ \end{theorem}
Here essentially means that in each class of 6 matrices corresponding to the permutations of the $r=3$ symbols, one representing member has been chosen (actually corresponding to the smallest standard form of the substitutions having that matrix).
\emph{Proof:} Let $M$ be a non-negative primitive $3\times 3$ integer matrix, with Perron-Frobenius eigenvalue the golden mean $\Phi = (1+\sqrt{5})/2$. We write\\ [-0.7cm]
$$ M= \left( \begin{matrix} a\; b\; c\\ d\; e\; f\\ g\; h\; i \end{matrix} \right).$$ The characteristic polynomial of $M$ is $\chi_M(u)=u^3-Tu^2+Fu-D,$ where $T=a+e+i$ is the trace of $M$, and \begin{equation}\label{eq:FandD} F=ae+ai+ei-bd-cg-fh,\quad D=aei+bfg+cdh-afh-bdi-ceg.\quad \end{equation} Of course $D$ is the determinant of $M$. Since $\Phi$ is an eigenvalue of $M$, and we consider matrices over the integers, $u^2-u-1$ has to be a factor of $\chi_M$. Performing the division we obtain $$\chi_M(u)=\big(u-(T-1)\big)\big(u^2-u-1\big),$$ and requiring that the remainder vanishes, yields
\begin{equation}\label{eq:DF} F=T-2,\quad D=1-T. \end{equation} Note that the third eigenvalue equals $\lambda_3=T-1$. From the Perron-Frobenius theorem follows that this has to be smaller than $\Phi$ in absolute value, and since it is an integer, only $\lambda_3=-1, 0, 1$ are possible. Thus there are only 3 possible values for the trace of $M$: $T=0,\, T=1$ and $T=2$.
The smallest row sum of $M$ has to be smaller than the PF-eigenvalue $\Phi$ (well known property of primitive non-negative matrices). Therefore $M$ has to have one of the rows $(0,0,1)$, $(0,1,0)$ or $(0,0,1)$. Also, because of primitivity of $M$, the 1 in this row can not be on the diagonal. By performing permutation conjugacies of the matrix we may then assume that $M$ has the form
$$ M= \left( \begin{matrix} 0\;\; 1\;\; 0\\ d\;\; e\;\; f\\ g\;\; h\;\; i \end{matrix} \right).$$
The equation \eqref{eq:FandD} combined with \eqref{eq:DF} then simplifies to
\begin{equation}\label{eq:DF2} T-2=F=ei-d-fh, \quad 1-T=D=fg-di. \end{equation}
\noindent{\bf Case $\mathbf{{\emph T}=0}$}
\noindent In this case $e=i=0$, so \eqref{eq:DF2} simplifies to
\begin{equation}\label{eq:T0F} -2=F=-d-fh, \quad 1=D=fg. \end{equation} Then $f=g=1$, and so $d+h=2$. This gives three possibilities leading to the matrices $ \left( \begin{smallmatrix} 0\, 1\, 0\\ 1\, 0\, 1\\ 1\, 1\, 0 \end{smallmatrix} \right),\;
\left( \begin{smallmatrix} 0\, 1\, 0\\ 0\, 0 \,1 \\ 1\, 2\,0 \end{smallmatrix} \right),\;
\left( \begin{smallmatrix} 0\, 1\, 0\\ 2 \,0\, 1\\ 1\, 0\, 0 \end{smallmatrix} \right).$
\noindent Here the third matrix is permutation conjugate to the second one.
\noindent{\bf Case $\mathbf{{\emph T}=1}$}
\noindent In this case $e=1, i=0$, or $e=0, i=1$.
\noindent First case: $e=1, i=0$. Now \eqref{eq:DF2} simplifies to
\begin{equation}\label{eq:T1F} -1=F=-d-fh, \quad 0=D=fg. \end{equation} Then $g=0$, since $f=0$ is not possible because of primitivity. But $g=0$ also contradicts primitivity, as $d+fh=1$, gives either $d=0$ or $h=0$.
\noindent Second case: $e=0, i=1$. Now \eqref{eq:DF2} simplifies to \begin{equation}\label{eq:T1F2} -1=F=-d-fh, \quad 0=D=fg-d. \end{equation} Then $d=0$ would imply that $f=h=1$. But, as $g>0$ because of primitivity, we get a contradiction with $fg=d=0$.
On the other hand, if $d>0$, then $d=1$ and $f=0$ or $h=0$. But $fg=d=1$ gives $f=g=1$, so $h=0$, and we obtain the matrix
$ \left( \begin{smallmatrix} 0\, 1\, 0\\ 1\, 0\, 1\\ 1\, 0\, 1 \end{smallmatrix} \right).$
\noindent{\bf Case $\mathbf{{\emph T}=2}$}
\noindent In this case \eqref{eq:DF2} becomes
\begin{equation}\label{eq:T1F} 0=F=ei-d-fh, \quad -1=D=fg-di. \end{equation} Since $ei=0$ would lead to $f=0$, which is not allowed by primitivity, what remains is $e=1,i=1$. Then, substituting $d=fg+1$ in the first equation gives $0=f(g+h)$. But both $f=0$ and $g=h=0$ contradict primitivity.
Final conclusion: there are three matrices in $\mathcal{F}_3$, modulo permutation conjugacies.
$\Box$
\noindent {\bf Remark} It is well-known that the PF-eigenvalue lies between the smallest and the largest row sum of the matrix. One might wonder whether this largest row sum is bounded for the class $\mathcal{F}=\cup_r\mathcal{F}_r$. Actually the class $\mathcal{F}_r$ contains matrices with some row sum equal to $r-1$ for all $r\ge 3$:
take the matrix $M$ with $M_{1,j}=1$ for $j=2,\dots,r$, $M_{2,2}=1$ and
$M_{i,{i+1}}=1$, for $i=2,\dots,r-1$, $M_{r,1}=1$ and all other entries 0.
Now note that $(1, \Phi,...,\Phi)$ is a left eigenvector of $M$ with eigenvalue $\Phi$ (since $\Phi^2=1+\Phi$). Since the eigenvector has all entries positive, it must be a PF-eigenvector (well known property of primitive, non-negative matrices), and hence $M$ is in $\mathcal{F}_r$.
\section{The 3-symbol case}\label{sec:C2}
\begin{theorem} There are two primitive injective substitutions
$\eta$ and $\zeta$ on a three letter alphabet $\{a,b,c\}$ that generate dynamical systems topologically isomorphic to the Fibonacci system. These are given\footnote{Standard forms: replace $a,b,c$ by $1,2,3$.} by\\[-.4cm]
$$\eta(a)=b,\,\eta(b)=ca,\, \eta(c)=ba,\quad \zeta(a)=b,\,\zeta(b)=ac,\, \zeta(c)=ab.$$ \end{theorem}
\noindent\emph{Proof:} The possible matrices for candidate substitutions are given in Theorem~\ref{th:F3}. Let us consider the first matrix $ \left( \begin{smallmatrix} 0\, 1\, 0\\ 1\, 0\, 1\\ 1\, 1\, 0 \end{smallmatrix} \right)$.
\noindent There are four substitutions with this matrix as incidence matrix: \begin{align*} \eta_1: \; a& \rightarrow b,\, b\rightarrow ca,\,c\rightarrow ba,& \eta_2: \; a \rightarrow b,\, b\rightarrow ca,\,c\rightarrow ab,\\ \eta_3: \; a& \rightarrow b,\, b\rightarrow ac,\,c\rightarrow ba,& \eta_4: \; a \rightarrow b,\, b\rightarrow ac,\,c\rightarrow ab, \end{align*} Here $\eta_1=\eta$. To prove that the system of $\eta$ is conjugate to the Fibonacci system consider the letter-to-letter map $\pi$ given by $$\pi(a)=1,\quad \pi(b)=\pi(c)=0.$$ Then $\pi$ maps $X_\eta$ onto $X_\varphi$, because $\pi\eta=\varphi\pi$. Moreover, $\pi$ is a conjugacy, since if $x\ne y$ and $\pi(x)=\pi(y)$, then there is a $k$ such that $x_k=b$ and $y_k=c$. But the words of length 2 in the language of $\eta$ are $ab, ba, bc$ and $ca$, implying that $x_{k-1}=a$ and $y_{k-1}=b$, contradicting $\pi(x)=\pi(y)$.
Since $\zeta$ is the time reversal of $\eta$, and we know already that the system of ${\varphi_{\textsc{\tiny R}}}$ is conjugate to the Fibonacci system, the system generated by $\eta_4=\zeta={\eta_{\textsc{\tiny R}}}$ is conjugate to the Fibonacci system.
It remains to prove that $\eta_2$ and $\eta_3$ generate systems that are \emph{not} conjugate to the Fibonacci system. Again, since $\eta_3$ is the time reversal of $\eta_2$, it suffices to do this for $\eta_2$.
The language of $\eta_2$ contains the words $ab, bb$ and $bc$. These words generate fixed points of $\eta_2^6$ in the usual way. But these three fixed points form a $Z$-triple, implying that the system of $\eta_2$ can not be topologically isomorphic to the Fibonacci system
(see Theorem~\ref{th:Zth}).
The next matrix we have to consider is $\left( \begin{smallmatrix} 0\, 1\, 0\\ 0\, 0 \,1 \\ 1\, 2\,0 \end{smallmatrix} \right).$
There are three substitutions with this matrix as incidence matrix: \begin{align*} \eta_1: \; a& \rightarrow b,\, b\rightarrow c,\,c\rightarrow abb,& \eta_2: \; a \rightarrow b,\, b\rightarrow c,\,c\rightarrow bab,\\ \eta_3: \; a& \rightarrow b,\, b\rightarrow c,\,c\rightarrow bba. \end{align*} Again, the system of $\eta_1$ contains a $Z$-triple generated by $ab, bb$ and $bc$. So this system is not conjugate to the Fibonacci system, and neither is the one generated by $\eta_3$ (time reversal of $\eta_1$). The system generated by $\eta_2$ behaves similarly to the Fibonacci system, \emph{but} is has an eigenvalue $-1$ (it has a two-point factor via the projection $a,c\rightarrow 0, b\rightarrow 1$.)
Finally, we have to consider the matrix $\left( \begin{smallmatrix} 0\, 1\, 0\\ 1 \,0\, 1\\ 1\, 0\, 1 \end{smallmatrix} \right).$
There are four substitutions with this matrix as incidence matrix:
\begin{align*} \eta_1: \; a& \rightarrow b,\, b\rightarrow ac,\,c\rightarrow ac,& \eta_2: \; a \rightarrow b,\, b\rightarrow ac,\,c\rightarrow ca,\\ \eta_3: \; a& \rightarrow b,\, b\rightarrow ca,\,c\rightarrow ac,& \eta_4: \; a \rightarrow b,\, b\rightarrow ca,\,c\rightarrow ca. \end{align*} Here $\eta_1$ and $\eta_4$ generate systems conjugate to the Fibonacci system, but the substitutions are not injective. The substitution $\eta_2$ has all 9 words of length 2 in its language, and all of these generate fixed points of $\eta_2^6$. So the system of $\eta_2$ is certainly not topologically isomorphic to the Fibonacci system. The proof is finished, since $\eta_3$ is the time reversal of $\eta_2$. $\Box$
\section{Letter-to-letter maps}\label{sec:L2L}
By the Curtis-Hedlund-Lyndon theorem all members in the conjugacy class of the Fibonacci system can be obtained by applying letter-to-letter maps $\pi$ to $N$-block presentations $(X^{[N]},\sigma)$. Here we analyse the case $N=2$. The 2-block presentation of the Fibonacci system is generated by (see Section~\ref{sec:Nblock}) the 2-block substitution $$\hat{\varphi}_2(1)=12 \quad \hat{\varphi}_2(2)=3, \quad \hat{\varphi}_2(3)=12.$$ There are (modulo permutations of the symbols) three letter-to-letter maps from $\{1,2,3\}$ to $\{0,1\}$. Two of these project onto the Fibonacci system, as they are projections on the first respectively the second letter of the 2-blocks. The third is $\pi$ given by $$\pi(1)=0,\quad \pi(2)=0, \quad \pi(3)=1.$$ What is the system $(Y,\sigma)$ with $Y=\pi\big(X^{[2]}\big)$?
First note that $(Y,\sigma)$ is conjugate to the Fibonacci system since $\pi$ is clearly invertible. Secondly, we note that the points in $Y$ can be obtained by doubling the 0's in the points of the Fibonacci system. This holds because $\pi(12)=00,\, \pi(3)=1$, but also $$\pi(\hat{\varphi}_2(12))=\pi(123)=001,\;\pi(\hat{\varphi}_2(3))=\pi(12)=00.$$ Thirdly, we claim that the system $(Y,\sigma)$ cannot be generated by a substitution. This follows from the fact that the sequences in $Y$ contain the word 0000, but no other fourth powers. This is implied by the $4^{\rm th}$ power free-ness of the Fibonacci word, proved in \cite{Karhumaki}.
A fourth property is that the sequence $y^+$ obtained by doubling the 0's in $w_{\rm F}$, where $w_{\rm F}$ is the infinite Fibonacci word is given by $$y^+_n=[(n+2)\Phi]-[n\Phi]-[2\Phi], \qquad {\rm for\;} n\ge 1,$$ according to \cite{Wolfdieter}, and \cite{OEIS-Fib} (here $[\cdot]$ denotes the floor function).
Finally we remark that Durand shows in the paper \cite{Durand} that the Fibonacci system is prime \emph{modulo topological isomorphism}, and ignoring finite factors and rotation factors. This implies that all the projections are automatically invertible, if the projected system is not finite.
\end{document}
|
arXiv
|
{
"id": "1608.04487.tex",
"language_detection_score": 0.7031218409538269,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Equidistribution estimates for eigenfunctions and eigenvalue bounds for random operators}
\author{D. Borisov}
\address{Institute of Mathematics USC RAS, Chernyshevskii str., 112,\\ Ufa, 450000, Russia\\ \& Bashkir State Pedagogical University, October rev. st., 3a, \\ Ufa, 450008, Russia,\\ E-mail: [email protected]\\ matem.anrb.ru \& www.bspu.ru}
\author{M.~Tautenhahn}
\address{Fakult\"at f\"ur Mathematik, Reichenhainer Str. 41,\\ Chemnitz, D-09126, Germany,\\
www.tu-chemnitz.de/\~{}mtau}
\author{I.~Veseli\'c}
\address{Fakult\"at f\"ur Mathematik, Reichenhainer Str. 41,\\ Chemnitz, D-09126, Germany,\\ www.tu-chemnitz.de/stochastik/}
\begin{abstract} We discuss properties of $L^2$-eigenfunctions of Schr\"odinger operators and elliptic partial differential operators. The focus is set on unique continuation principles and equidistribution properties. We review recent results and announce new ones. \end{abstract}
\keywords{scale-free unique continuation property, equidistribution property, observability estimate, uncertainty relation, Carleman estimate, Schr\"odinger operator, elliptic differential equation}
\bodymatter \section{Introduction} In this note we present recent results in Harmonic Analysis for solutions of (time-independent) Schr{\"o}dinger equations and other partial differential equations. They are motivated by interest in techniques relevant for proving localization for random Schr{\"o}dinger operators. The mentioned Harmonic Analysis results which we present are a quantitative unique continuation principle and an equidistribution property for eigenfunctions, which is scale-uniform. These results, and variants thereof, go under various names, depending on the particular field of mathematics: They are called observability estimate, uncertainty relation, scale-free unique continuation principle, or local positive definiteness. The latter term signifies that a self-adjoint operator is (strictly) positive definite when restricted to a relevant subspace, while it is not so on the whole Hilbert space. For the purpose of motivation we discuss this property in the next section. \par The term \emph{localization} refers to the phenomenon, that quantum Hamiltonians describing the movement of electrons in certain disordered media exhibit pure point spectrum in appropriately specified energy regions. The corresponding eigenfunctions decay exponentially in space. The (time-dependent) wavepackets describing electrons stay localized essentially in a compact region of space for all times. Nota bene, all mentioned properties hold \emph{almost surely}. This is natural in the context of random operators. \par An important partial result for deriving localization are Wegner estimates. These are bounds on the expected number of eigenvalues in a bounded
energy interval of a random Schr{\"o}dinger operator restricted to a box. \par The localization problem has been studied for other classes of random operators beyond those of Schr{\"o}dinger type. An example are random divergence type operators, see e.g.\ Refs.~\citenum{FigotinK-96} and \citenum{Stollmann-98}. This are partial differential operators with randomness in coefficients of higher order terms. In paricular, the second order term is no longer the Laplacian, but a variable coefficient operator. In this context one is again lead to consider the above mentioned questions of Harmonic Analysis for eigenfunctions of differential operators. In this note we present an exposition of recently published results, and an announcement of a quantitative unique continuation principle and an equidistribution estimate for eigenfunctions for a class of elliptic operators with variable coefficients.
\subsection{Motivation: Moving and lifting of eigenvalues} \label{ss:motivation}
Here we discuss some aspects of eigenvalue perturbation theory. It will provide an accessible explanation why one is interested in the results presented in Sections~\ref{sec:schroedinger} and \ref{sec:elliptic} below in the context of random Schr{\"o}dinger operators and elliptic differential operators, respectively. In fact, to illustrate the main questions it will be for the moment completely sufficient to restrict our attention to the finite dimensional situation, i.e.\ to perturbation theory for finite symmetric matrices. The focus will be on how (local) positive definiteness of the perturbation relates to lifting of eigenvalues. \par Let $A$ and $B$ be symmetric $n \times n$ matrices, with $B\geq b>0$ positive definite. The variational min-max principle for eigenvalues shows that for any $k \in \{1,\dots, n\}$ and $t\geq 0$ \begin{equation} \label{eq:positive_definite_perturbation} \lambda_k(A+tB) \geq \lambda_k(A) + b \, t \end{equation} where $\lambda_k(M)$ denotes the $k$th lowest eigenvalue, counting multiplicities, of a symmetric matrix $M$. Note that the dimension $n$ does not enter in the bound \eqref{eq:positive_definite_perturbation}. Without the positive definiteness assumption on $B$ this universal bound will fail, most blatantly if \begin{equation*} A =\begin{pmatrix} A_1 & \ 0 \\ 0 & A_2 \end{pmatrix} \quad \text{and} \quad B =\begin{pmatrix} \operatorname{Id} & \ 0 \\ 0 & -\operatorname{Id} \end{pmatrix} . \end{equation*} In this case, all eigenvalue $\lambda_k(A+tB)$ \emph{will move}, even with constant speed w.r.t.\ the variable $t$, albeit in different directions. If $B$ is singular, some eigenvalues may not move at all. However, for appropriate classes of symmetric matrices $A$, and of positive semidefinite matrices $B$, one may still aim to prove \begin{equation} \label{eq:positive_semidefinite_perturbation} \forall \, t\geq 0, k \in \{1,\dots, n\} \, \exists \, \kappa >0 \text{ such that } \lambda_k(A+tB) \geq \lambda_k(A) + \kappa t \end{equation} Note however, that $\kappa $ is now not a uniform bound but depends on \begin{itemlist}
\item the class of symmetric matrices from which $A$ is chosen,
\item the class of semidefinite matrices from which $B$ is chosen,
\item the range from which the coupling $t$ is chosen, and
\item the range from which the index $k \in \{1,\dots, n\}$ is chosen. \end{itemlist}
In the case of random operators or matrices one in is interested in the situation where \begin{equation} \label{eq:multiparameter_family} A(\omega) =A_0+\sum_{j \in Q} \omega_j B_j =\Big(A_0+\sum_{j \in Q, j\neq 0} \omega_j B_j \Big)+ \omega_0 B_0 \end{equation} is a multi-parameter pencil. Here $Q$ is some subset of $\mathbb{Z}^d$ containing $0$. The real variables $\omega_j$ model random coupling constants determining the strength of the perturbation $B_j$ in each configuration $\omega=(\omega_j)_{j \in Q}$. Now, \eqref{eq:multiparameter_family} already suggest to write $A(\omega)$ as \[ A(\omega_0^\perp)+tB \quad\text{where} \quad t=\omega_0,\ B=B_0,\ \text{and} \ \omega_0^\perp=(\omega_j)_{j \in Q, j\neq0} . \] This highlights that if we consider $A(\omega)$ as a function of the single variable $t=\omega_0$, it is clearly a one-parameter family of operators, albeit the ``unperturbed part'' $A(\omega_0^\perp)$ of $A(\omega)=A(\omega_0^\perp)+tB$ is not a single operator, but varying over the ensemble $(A(\omega_0^\perp))_{\omega_0^\perp}$. To have a useful version of \eqref{eq:positive_semidefinite_perturbation} in this situation, the constant $\kappa $ needs to have a uniform lower bound $\inf_{A} \kappa $ where $A=A(\omega_0^\perp)$ varies over all matrices in the ensemble. \par In what follows we present rigorous results of the type \eqref{eq:positive_semidefinite_perturbation}, but where $A$ and $B$ are not finite matrices, but differential and multiplication operators. The relevant operators have all compact resolvent, ensuring that the entire spectrum consists of eigenvalues.
\section{Equidistribution property of Schr\"odinger eigenfunctions} \label{sec:schroedinger} The following result is taken from Ref.~\citenum{Rojas-MolinaV-13}. It is an equidistribution estimate for Schr{\"o}dinger eigenfunctions, which is uniform w.r.t.\ the naturally arising length scales, and has strong implications for the spectral theory of random Schr\"odinger operators. \par We fix some notation. For $L>0$ we denote by $\Lambda_L = (-L/2 , L/2)^d$ a cube in $\mathbb{R}^d$. For $\delta>0$ the open ball centered at $x\in \mathbb{R}$ with radius $\delta$ is denoted by $B(x, \delta)$. For a sequence of points $(x_j)_j$ indexed by $j \in \mathbb{Z}^d$ we denote the collection of balls $\cup_{j \in \mathbb{Z}^d} B(x_j , \delta) $ by $S$ and its intersection with $\Lambda_L$ by $S_L$.
We will be dealing with certain subspaces of the standard second order Sobolev space $W^{2,2}(\Lambda_L)$ on the cube. Let $\Delta$ be the $d$-dimensional Laplacian. Its restriction to the cube $\Lambda=\Lambda_L$ needs boundary conditions to be self-adjoint. The domain of the Dirichlet Laplacian will be denoted by $\mathcal{D}(\Delta_{\Lambda,0})$ and the domain of the Laplacian with periodic boundary conditions by $\mathcal{D}(\Delta_{\Lambda,\mathrm{per}})$. Let $V \colon \mathbb{R}^d\to \mathbb{R}$ be a bounded measurable function, and $H_L = (-\Delta + V)_{\Lambda_L} $ a Schr\"odinger operator on the cube $\Lambda_L$ with Dirichlet or periodic boundary conditions. The corresponding domains are still $\mathcal{D}(\Delta_{\Lambda,0})$ and $ \mathcal{D}(\Delta_{\Lambda,\mathrm{per}})$, respectively. Note that we denote a multiplication operator by the same symbol as the corresponding function. \par The following theorem was proven in Ref.~\citenum{Rojas-MolinaV-13}.
\begin{theorem}[Scale-free unique continuation principle] \label{thm:RojasVeselic}
Let $\delta, K_{V} > 0$. Then there exists $C_{\rm sfUC} \in (0,\infty)$ such that for all
$L \in 2\mathbb{N}+1 $, all measurable $V : \mathbb{R}^d \to [-K_{V} , K_{V}]$, all real-valued $\psi \in
\mathcal{D}(\Delta_{\Lambda,0}) \cup \mathcal{D}(\Delta_{\Lambda,\mathrm{per}})$
with $(-\Delta + V)\psi = 0$ almost everywhere on $\Lambda_L$, and all sequences $(x_j)_{j \in \mathbb{Z}^d} \subset \mathbb{R}^d$,
such that for all $j \in \mathbb{Z}^d$ the ball $B(x_j , \delta) \subset \Lambda_1 + j$, we have \begin{equation} \label{eq:observability} \int_{S_L} \psi^2 \geq C_{\rm sfUC} \int_{\Lambda_L} \psi^2 . \end{equation} \end{theorem}
\begin{figure}
\caption{Examples of collections of balls $S_L$ within region $\Lambda_L\subset \mathbb{R}^2$.}
\label{fig:equidistributed}
\end{figure}
The value of the result is not in the \emph{existence} of the constant $C_{\rm sfUC}$, but in the \emph{quantitative control} of the dependence of $C_{\rm sfUC}$ on parameters entering the model. The very formulation of the theorem states that $C_{\rm sfUC}$ is independent of the position of the balls $B(x_j,\delta)$ within $\Lambda_1 +j$, and independent of the scale $L\in2\mathbb{N} +1$. From the estimates given in Section~2 of Ref.~\citenum{Rojas-MolinaV-13} one infers that $C_{\rm sfUC}$ depends on the potential $V$ only through the norm $\lVert V \rVert_\infty$ (on an exponential scale), and it depends on the small radius $\delta>0$ polynomially, i.e.\ $C\gtrsim \delta^N$, for some $N\in\mathbb{N}$ which depends on the dimension on $d$ and $\lVert V \rVert_\infty$. \par The theorem states a property of functions in the kernel of the operator. It is easily applied to eigenfunctions corresponding to other eigenvalues since \[
H_L\psi=E\psi \Leftrightarrow (H_L-E)\psi=0 . \] As a consequence of the energy shift the constant $K_{V}$ has to be replaced with $K_{V-E}$, which may be larger than $K_{V}$. It may always be estimated by
$K_{V-E}\leq K_V+|E|$. \par There is a very natural question supported by earlier results, which was spelled out in Ref.~\citenum{Rojas-MolinaV-13}, namely does the following generalisation of Theorem~\ref{thm:RojasVeselic} hold:
Given $\delta >0$, $K\geq0$ and $E\in\mathbb{R}$ there is a constant $C>0$ such that for all measurable $ V\colon \mathbb{R}^d \rightarrow [-K,K] $, all $L \in 2\mathbb{N}+1$, and all sequences $(x_j)_{j\in\mathbb{Z}^d} \subset \mathbb{R}^d$ with $B(x_j,\delta) \subset\Lambda_1 +j$ for all $j \in \mathbb{Z}^d$ we have \begin{equation} \label{eq:uncertainty}
\chi_{(-\infty,E]} (H_L) \, W_L \, \chi_{(-\infty,E]} (H_L) \geq C~ \chi_{(-\infty,E]} (H_L) , \end{equation} where $W_L=\chi_{S_L}$ is the indicator function of $S_L$ and $\chi_{I} (H_L)$ denotes the spectral projector of $H_L$ onto the interval $I$. Here $C=C_{\delta, K, E}$ is determined by $\delta, K, E$ alone. \par Klein obtained a positive answer to the question for sufficiently short subintervals of $(-\infty,E]$. \begin{theorem}[Ref.~\citenum{Klein-13}] \label{thm:Klein-13} Let $d \in \mathbb{N}$, $E\in \mathbb{R}$, $\delta\in (0,1/2]$ and $V:\mathbb{R}^d \to \mathbb{R}$ be measurable and bounded. There is a constant $M_d>0$ such that if we set \[
\gamma = \frac{1}{2} \delta^{M_d \bigl(1 + (2\lVert V \rVert_\infty + E)^{2/3}\bigr)} , \] then for all energy intervals $I\subset (-\infty, E]$ with length bounded by $2\gamma$, all $L \in 2\mathbb{N}+1$, $L\geq 72 \sqrt{d}$ and all sequences $(x_j)_{j\in\mathbb{Z}^d} \subset \mathbb{R}^d$ with $B(x_j,\delta) \subset\Lambda_1 +j$ for all $j \in \mathbb{Z}^d$ \begin{equation}
\chi_{I} (H_L) \, W_L \, \chi_{I} (H_L) \geq \gamma^2\chi_{I} (H_L) . \end{equation} \end{theorem}
This does not answer the above posed question question completely due to the restriction
$|I| \leq 2\gamma$. However, the result is sufficient for many questions in spectral theory of random Schr\"odinger operators. For a history of the questions discussed here and earlier results we refer to Ref.~\citenum{Rojas-MolinaV-13}.
\subsection{Random Schr{\"o}dinger operators}\label{ss:rSo}
Let ${\Lambda_L}$ be a cube of side $L\in2\mathbb{N}+1$, $(\Omega, \mathbb{P})$ a probability space, $V_0 \colon {\Lambda_L}\to \mathbb{R}$ a bounded, measurable deterministic potential, $V_\omega \colon {\Lambda_L}\to \mathbb{R}$ a bounded random potential and $H_{\omega,L}= (-\Delta + V_0+V_\omega)_{\Lambda_L}$ a random Schr\"odinger operator on $L^2({\Lambda_L})$ with Dirichlet or periodic boundary conditions. We assume that the random potential is of Delone-Anderson form \begin{equation*}
V_\omega(x):= \sum_{j \in{\mathbb{Z}^d}} \ \omega_j u_j(x) . \end{equation*} The random variables $\omega_j, j\in {\mathbb{Z}^d},$ are independent with probability distributions $\mu_j$, such that for some $m>0$ an all $j\in {\mathbb{Z}^d}$ we have $\supp \mu_j \subset [-m, m]$. Fix $0 < \delta_- < \delta_+<\infty$ and $0 < C_- \leq C_+ <\infty$. The sequence of measurable functions $u_j \colon \mathbb{R}^d \to \mathbb{R}$, $j \in {\mathbb{Z}^d}$, is such that \begin{align*} \forall j \in {\mathbb{Z}^d}:
\quad C_- \chi_{B(z_j,\delta_-)} \leq u_j \leq C_+ \chi_{B(z_j,\delta_+)}, \ \text{and} \ B(z_j,\delta_-) \subset \Lambda_1 + j .
\end{align*}
\subsection{Lifting of eigenvalues} \label{ss:lifting}
Let $\lambda_k^L(\omega)$ denote the eigenvalues of $H_{\omega,L}$ enumerated in non-decreasing order and counting multiplicities and $\psi_k=\psi_k^L(\omega)$ the normalised eigenvectors corresponding to $\lambda_k^L(\omega)$. While we suppress the dependence of $\psi_k$ on $L$ and $\omega$ in the notation, it should be kept in mind. Then \[ \lambda_k^L(\omega) = \langle \psi_k, H_{\omega,L} \psi_k\rangle = \int_{\Lambda_L} \overline{\psi_k} ( H_{\omega,L} \psi_k ) . \] Define the vector $ e=(e_j)_{j\in{\mathbb{Z}^d}}$ by $e_j=1$ for $j\in{\mathbb{Z}^d}$. Consider the monotone shift of $V_\omega$ \[
V_{\omega+ {t} \cdot e} = \sum_{j \in{\mathbb{Z}^d}} (\omega_j+ {t} ) u_j \] and set $Q=Q_L= \Lambda_L \cap {\mathbb{Z}^d}$. By first order perturbation theory we have \[
\frac{\rm d}{{\rm d}{\tau}} \lambda_k^L(\omega+ {\tau} \cdot e) |_{\tau=t} = \langle \psi_k, \sum_{k \in Q} u_j \, \psi_k \rangle. \] Note that the right hand side depends on $t$ implicitly through the eigenfunction $\psi_k$. Let us fix some $E_0\in\mathbb{R}$ and restrict our attention only to those eigenvalues satisfying $\lambda_n^L(\omega) \leq E_0$. By Theorem~\ref{thm:RojasVeselic} there exists a constant $C_{\rm sfUC}$ depending on the energy $E_0$, $\delta_-$ and the overall supremum \begin{equation*}
\label{eq:Vsupremum}
\sup_{|s|\leq m} \ \sup_{|\omega_j|\leq m} \ \sup_{x\in\mathbb{R}^d}
\big|V_{0}(x) +V_\omega(x) +s \sum_{j\in Q} u_j \big| \end{equation*} of the potential, such that \begin{equation*} \sum_{k \in Q} \langle \psi_k, u_j \, \psi_k \rangle \geq C_- \sum_{k \in Q}\langle \psi_k, \chi_{B(z_k,\delta_-)}\psi_k \rangle \geq C_-\cdot C_{\rm sfUC} =: \kappa . \end{equation*}
Here we used that $\|\psi\|_{L^2 (\Lambda)}=1$. (Note that the quantity $\kappa$ depends a-priori on the model parameters.) Integrating the derivative gives \begin{align}
\nonumber \lambda_k^L(\omega+ {t} \cdot e) &=
\lambda_k^L(\omega) + \int_0^{t} \frac{\mathrm{d} \lambda_k^L(\omega+ \tau \cdot e) }{\mathrm{d} \tau}|_{\tau=s} \, \mathrm{d} s \\
& \geq \lambda_k^L(\omega) + \int_0^{t} \kappa \, \mathrm{d} s = \lambda_k^L(\omega) + t \kappa . \label{eq:lifting} \end{align} This is the lifting estimate for eigenvalues of random (Schr\"odinger) operators alluded to in \S \ref{ss:motivation}. It should be compared with \eqref{eq:positive_semidefinite_perturbation} there. Indeed, due to the uniform nature of the estimate in Theorem~\ref{thm:RojasVeselic} we have \begin{equation} \label{eq:uniform_kappa} \inf_{ L \in 2\mathbb{N}+1}
\ \inf_{\omega \text{ s.t. } \forall \, k : |\omega_j|\leq m}
\ \inf_{ |{t}|\leq m} \ \inf_{n \text{ s.t. } \lambda_n^L(\omega)\leq E_0} \kappa >0 . \end{equation}
Thus eigenvalues lifting estimate is almost as uniform as \eqref{eq:positive_definite_perturbation}. A parameter, with respect to which the lifting estimate is \emph{not} uniform is the cut-off energy $E_0$. Indeed, if we add in \eqref{eq:uniform_kappa} an infimum over $E_0>0$ on the left hand side, it becomes zero, unless $\sum_k\chi_{B(z_k,\delta_-)}\geq 1$ almost everywhere on $\mathbb{R}^d$.
\subsection{Wegner estimates} Here we present a Wegner estimate. Such estimates play an important role in the proof of localization via the multiscale analysis. The latter is an induction argument over increasing length scales. The Wegner bound is used to prove the induction step. \par Let $ s\colon [0,\infty) \to[0,1]$ be the global modulus of continuity of the family $\{\mu_j\}_{j\in {\mathbb{Z}^d}}$, that is, \begin{equation*} \label{definition-s-mu-epsilon}
s(\epsilon):= \sup_{j \in {\mathbb{Z}^d}} \sup_{a \in \mathbb{R}} \, \mu_j\Big(\Big[a-\frac{\epsilon}{2},a+\frac{\epsilon}{2}\Big]\Big) \end{equation*} The main result of Ref.~\citenum{Rojas-MolinaV-13} on the model described in the last paragraph is a Wegner estimate which is valid for all compact energy intervals. \begin{theorem}[Ref.~\citenum{Rojas-MolinaV-13}] \label{t:Wegner} Let $H_{\omega,L}$ be a random Schr\"odinger operator as in \S \ref{ss:rSo}. Then for each $E_0\in \mathbb{R}$ there exists a constant $C_W$, such that for all $E\le E_0$, $\epsilon \le 1/3$, and all $L\in 2\mathbb{N}+1$ we have \begin{equation*} \label{eq:WE} \mathbb{E}\{{\mathop{\mathrm{Tr} \,}} [ \chi_{[E-\epsilon,E+\epsilon]}(H_{\omega, L}) ]\} \le C_W \ s(\epsilon) \, \lvert \ln \, \epsilon \rvert^d \ \lvert \Lambda_L \rvert . \end{equation*}
\end{theorem}
The Wegner constant $C_W$ depends only on $E_0$, $\|V_0\|_\infty$, $m$, $C_-$, $C_+$, $\delta_-$, and $\delta_+$.
Klein\cite{Klein-13} obtains an improvement over this result based on his above quoted Theorem~\ref{thm:Klein-13}. There are many earlier, related Wegner estimates. For an overview we refer to Ref.~\citenum{Rojas-MolinaV-13}.
\subsection{Comparison of local $L^2$-norms} An important step in the proof of Theorem~\ref{thm:RojasVeselic} is the following result which compares $L^2$-norms of the restrictions of a PDE-solution to two distinct subsets. In our applications the solution will be an eigenfunction of the Schr\"odinger operator. Various estimates of this type have been given in Refs.~\citenum{GerminetK-13}, \citenum{BourgainK-13} and \citenum{Rojas-MolinaV-13}. We quote here the version from the last mentioned paper.
\begin{theorem} \label{thm:quantitative-UCP} Let $K, R, \beta\in [0, \infty), \delta \in (0,1]$. There exists a constant $C_{\rm qUC}=C_{\rm qUC}(d,K, R,\delta, \beta) >0$ such that, for any $G\subset \mathbb{R}^d$ open, any $\Theta\subset G$ measurable, satisfying the geometric conditions \[ \operatorname{diam} \Theta + \operatorname{dist} (0 , \Theta) \leq 2R \leq 2 \operatorname{dist} (0 , \Theta), \quad \delta < 4R, \quad B(0, 14R ) \subset G, \] and any measurable $V\colon G \to [-K,K]$ and real-valued $\psi\in W^{2,2}(G)$ satisfying the differential inequality \begin{equation*} \label{eq:subsolution} \lvert \Delta \psi \rvert \leq \lvert V\psi \rvert \quad \text{a.e.on } G \quad \text{ as well as } \quad \int_{G} \lvert \psi \rvert^2 \leq \beta \int_{\Theta} \lvert \psi \rvert^2 , \end{equation*}
we have \begin{equation} \label{eq:aim} \int_{B(0,\delta)} \lvert \psi \rvert^2 \geq
C_{\rm qUC} \int_{\Theta} \lvert \psi\rvert^2 . \end{equation} \end{theorem}
\begin{figure}
\caption{Assumptions in Theorem~\ref{thm:quantitative-UCP} on the geometric constellation of $G$, $\Theta$, and $B(0,\delta)$}
\end{figure}
\section{Equidistribution property eigenfunctions of second order elliptic operators}\label{sec:elliptic}
\subsection{Notation} Let $\mathcal{L}$ be the second order partial differential operator \[
\mathcal{L} u = -\sum_{i,j=1}^d \partial_i \left( a^{ij} \partial_j u \right) \] acting on functions $u$ on $\mathbb{R}^d$. Here $\partial_i$ denotes the $i$th weak derivative. Moreover, we introduce the following assumption on the coefficient functions $a^{ij}$. \begin{assumption}\label{ass:elliptic+} Let $r,\vartheta_1 , \vartheta_2 > 0$. The operator $\mathcal{L} $ satisfies $A(r,\vartheta_1 , \vartheta_2)$, if and only if $a^{ij} = a^{ji}$ for all $i,j \in \{1,\ldots , d\}$ and for almost all $x,y \in B(0,r)$ and all $\xi \in \mathbb{R}^d$ we have \begin{equation*} \label{eq:elliptic} \vartheta_1^{-1} \lvert \xi \rvert^2 \leq \sum_{i,j=1}^d a^{ij} (x) \xi_i \xi_j \leq \vartheta_1 \lvert \xi \rvert^2 \quad\text{and}\quad \sum_{i,j=1}^d \lvert a^{ij} (x) - a^{ij} (y) \rvert \leq \vartheta_2 \lvert x-y \rvert . \end{equation*} \end{assumption}
\subsection{A quantitative unique continuation principle} We first present an extension of the quantitative continuation principle, formulated for Schr\"odinger operators in Theorem~\ref{thm:quantitative-UCP}, to elliptic operators with variable coefficients.
\begin{theorem}[Ref.~\citenum{BorisovTV}] \label{thm:qUC-elliptic} Let $R\in (0,\infty)$, $K_V, \beta \in [0,\infty)$ and $\delta \in (0, 4 R]$. There is an $\epsilon> 0$, such that if $ A(14R, 1+\epsilon, \epsilon)$ holds then there is a constant $C_{\rm qUC} > 0$, such that for any open $G\subset \mathbb{R}^d$ containing the origin and $\Theta \subset G$ measurable satisfying
\[
\operatorname{diam} \Theta + \dist (0 , \Theta) \leq 2R \leq 2 \dist (0 , \Theta) \quad
\text{and} \quad B(0,14R) \subset G, \] any measurable $V : G \to [-K_V , K_V]$ and real-valued $\psi \in W^{2,2} (G)$ satisfying the differential inequality \begin{equation*} \label{eq:psi}
\lvert \mathcal{L} \psi \rvert \leq \lvert V\psi \rvert \quad \text{a.e.\ on $G$} \quad \text{as well as} \quad \frac{\lVert \psi \rVert_G^2}{\lVert \psi \rVert_\Theta^2} \leq \beta , \end{equation*} we have \begin{equation}
\lVert \psi \rVert_{B(x,\delta)}^2 \geq C_{\rm qUC} \lVert \psi \rVert_{\Theta}^2 . \end{equation} \end{theorem}
\subsection{Scale-free unique continuation principle} We move on to discuss the equidistribution property or scale-free unique continuation principle for eigenfunctions. The aim is to formulate an analog of Theorem~\ref{thm:RojasVeselic} for variable coefficient elliptic operators. As presented below, for the moment we have solved only the situation where the second order term is sufficiently close to the Laplacian. \par As before, we denote by $\Lambda_L$ a box of side $L\in \mathbb{N}$. By $V$ we indicate a bounded measurable potential on $\mathbb{R}^d$ taking values in $[-K_V,K_V]$, where $K_V$ is a positive constant. We restrict the operator $\mathcal{L} $ on $\Lambda_L(0)$ and add either periodic or Dirichlet boundary conditions. In the former case we denote such an operator by $\mathcal{L} _{L,0}$, and its domain $\mathcal{D}(\mathcal{L} _{L,0})$ is the subspace of $W^{2,2}(\Lambda_L)$ consisting of functions vanishing on $\partial \Lambda_L$. The notation for the operator with periodic boundary condition is $\mathcal{L} _{L,\mathrm{per}}$ and its domains $\mathcal{D}(\mathcal{L} _{L,\mathrm{per}})$ consists of the functions in $W^{2,2}(\Lambda_L)$ satisfying periodic boundary conditions.
\begin{assumption}\label{ass:periodicCoefficients} For each pair $i,j$ the function $a^{ij}\colon \mathbb{R}^d \to \mathbb{R}$ is $\mathbb{Z}^d$-periodic. \par Assume that in the case of operator $\mathcal{L} _{L,0}$ its coefficients $a^{ij}$, $i\not= j$ vanish on the sides of box $\Lambda_L$, while the coefficients $a^{ii}$ satisfy periodic boundary conditions on the sides of box $\Lambda_L$. In the case of operator $\mathcal{L} _{L,\mathrm{per}}$ suppose that all its coefficients satisfy periodic boundary conditions on the sides of box $\Lambda_L$. \end{assumption}
\begin{theorem}\label{thm:equidistribution-elliptic} Fix $K_V\in [0,+\infty)$, $\delta\in(0,1]$. Assume $A(\sqrt{d},1+\epsilon,\epsilon)$ with $\epsilon>0$ as in Theorem \ref{thm:qUC-elliptic} . Assume \ref{ass:periodicCoefficients}. \par Then there exists a constant $C_{sfUC}>0$ such that for any $L\in 2\mathbb{N}+1$, any sequence \begin{equation*}\label{d1.1} Z:=\{z_k\}_{k\in\mathbb{Z}^d} \ \text{ in }\ \mathbb{R}^d \quad \text{such that} \ B(z_k,\delta)\subset \Lambda_1(k) \text{ for each } k\in\mathbb{Z}^d, \end{equation*} any measurable $V: \Lambda_L\mapsto [-K_V,K_V]$ and any real-valued $\psi\in\mathcal{D}(\mathcal{L} _{L,0})$, respectively $\psi\in \mathcal{D}(\mathcal{L} _{L,\mathrm{per}})$ satisfying \begin{equation*}\label{d1.2}
|\mathcal{L}\psi|\leqslant |V\psi|\quad \text{a.e.}\quad \Lambda_L \end{equation*} we have \begin{equation}\label{d1.3}
\int\limits_{S_L} |\psi(x)|^2 dx=\sum\limits_{k\in Q_L} \|\psi\|_{L_2(B(z_k,\delta))}^2\geqslant C_{sfUC} \|\psi\|_{L_2(\Lambda_L)}^2, \end{equation} where $S_L:=S\cap\Lambda_L=\cup_{k\in Q_L} B(z_k,\delta)$, $Q_L=\Lambda_L\cap \mathbb{Z}^d$, and $S:=\cup_{k\in \mathbb{Z}^d} B(z_k,\delta)$. \end{theorem}
As a \emph{Corollary} we obtain immediately an eigenvalue lifting estimate analogous to \eqref{eq:lifting}, where $\kappa$ is again uniform w.r.t.\ many parameters, as spelled out in subsection \ref{ss:lifting} explicitly. \par The proof of Theorem~\ref{thm:equidistribution-elliptic} is based on the strategy implemented in Ref.~\citenum{Rojas-MolinaV-13}. First one uses the conditions on the coefficients $a^{ij}$ described in Assumption \ref{ass:periodicCoefficients} to extend $\psi$ as well as the differential expression $\mathcal{L}$ to the whole of $\mathbb{R}^d$ while keeping the $W^{2,2}$-regularity and the differential inequality originally satisfied by $\psi$. Then one uses the comparison Theorem~\ref{thm:qUC-elliptic} for local $L^2$-norms. Note that now the condition concerning the minimal distance to the boundary of $G$ plays no role, since $\psi$ has been extended to the whole of $\mathbb{R}^d$. From this point the combinatorial and geometric arguments of Ref~\citenum{Rojas-MolinaV-13} take over. In fact, one can prove a abstract meta-theorem: Once the comparison of local $L^2$-norms of $\psi$ holds up to the boundary, an equidistribution property for $\psi$ follows. Interestingly, such an argument no longer uses the fact that $\psi$ is a solution of an differential equation or inequality.
\section*{Acknowledgments}
D.B. was partially supported by RFBR, the grant of the President of Russia for young scientists - doctors of science (MD-183.2014.1), and the fellowship of Dynasty foundation for young mathematicians. \par M.T. and I.V. have been partially supported by the DAAD and the Croatian Ministry of Science, Education and Sports through the PPP-grant ``Scale-uniform controllability of partial differential equations''. M.T. and I.V. have been partially supported by the DFG.
\end{document}
|
arXiv
|
{
"id": "1405.1659.tex",
"language_detection_score": 0.6562762260437012,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Isomorphism of Hilbert modules over stably finite C$^*$-algebras}
\author{Nathanial P. Brown} \address{Department of Mathematics, Penn State University, State College, PA, 16802, USA} \email{[email protected]} \author{Alin Ciuperca} \address{Fields Institute, 222 College Street, Toronto, Ontario, Canada, M5T 3J1} \email{[email protected]} \keywords{$C^*$-algebras, Hilbert modules, Cuntz semigroup, compact} \subjclass[2000]{Primary 46L08, Secondary 46L80}
\thanks{N.B. was partially supported by DMS-0554870; A.C. was partially supported by Fields Institute.}
\begin{abstract} It is shown that if $A$ is a stably finite C$^*$-algebra and $E$ is a countably generated Hilbert $A$-module, then $E$ gives rise to a compact element of the Cuntz semigroup if and only if $E$ is algebraically finitely generated and projective. It follows that if $E$ and $F$ are equivalent in the sense of Coward, Elliott and Ivanescu (CEI) and $E$ is algebraically finitely generated and projective, then $E$ and $F$ are isomorphic. In contrast to this, we exhibit two CEI-equivalent Hilbert modules over a stably finite C$^*$-algebra that are not isomorphic. \end{abstract}
\maketitle
\section{Introduction}
In \cite{cowelliottiv} a new equivalence relation -- we'll call it \emph{CEI equivalence} -- on Hilbert modules was introduced. In general CEI equivalence is weaker than isomorphism, but it was shown that if $A$ has stable rank one, then it is the same as isomorphism (\cite[Theorem 3]{cowelliottiv}). Quite naturally, the authors wondered whether their result could be extended to the stably finite case. Unfortunately, it can't. In Section \ref{sec:counterexample}, we give examples of Hilbert modules over a stably finite C$^*$-algebra which are CEI-equivalent, but not isomorphic. On the other hand, we show in Section \ref{sec:main} that CEI equivalence amounts to isomorphism when restricted to ``compact" elements of the Cuntz semigroup, in the stably finite case.
\noindent\textbf{Acknowledgments:} We thank George Elliott, Francesc Perera, Leonel Robert, Luis Santiago, Andrew Toms and Wilhelm Winter for valuable conversations on topics related to this work.
\section{Definitions and Preliminaries}
Throughout this note all C$^*$-algebras are assumed to be separable and all Hilbert modules are assumed to be right modules and countably generated. We will follow standard terminology and notation in the theory of Hilbert modules (see, for example, \cite{lance}). In particular, $\mathcal{K}$ denotes the compact operators on $\ell^2(\mathbb{N})$, while $\mathcal{K}(E)$ will denote the ``compact" operators on a Hilbert module $E$.
For the reader's convenience, we recall a few definitions that are scattered throughout \cite{cowelliottiv}.
\begin{defn} \label{defn:compactcontain} If $E \subset F$ are Hilbert $A$-modules, we say $E$ is \emph{compactly contained in} $F$
if there exists a self-adjoint $T \in \mathcal{K}(F)$ such that $T|_E = \operatorname{id}_E$. In this situation we write $E \subset \subset F$. \end{defn}
Note that $E \subset \subset E$ if and only if $\mathcal{K}(E)$ is unital; it can be shown that this is also equivalent to $E$ being algebraically finitely generated and projective (in the purely algebraic category of right $A$-modules) -- see the proof of \cite[Corollary 5]{cowelliottiv} (this part of the proof did not require the assumption of stable rank one.).
\begin{defn} We say a Hilbert $A$-module $E$ is \emph{CEI subequivalent} to another Hilbert $A$-module $F$ if every compactly contained submodule of $E$ is isomorphic to a compactly contained submodule of $F$.
We say $E$ and $F$ are \emph{CEI equivalent} if they are CEI subequivalent to each other -- i.e., a third Hilbert $A$-module $X$ is isomorphic to a compactly contained submodule of $E$ if and only if $X$ is isomorphic to a compactly contained submodule of $F$. \end{defn}
\begin{defn} We let $Cu(A)$ denote the set of Hilbert $A$-modules, modulo CEI equivalence. The class of a module $E$ in $Cu(A)$ will be denoted $[E]$. \end{defn}
It turns out that $Cu(A)$ is an abelian semigroup with $[E] + [F] := [E\oplus F]$. (Note: it isn't even obvious that this is well defined!) Moreover $Cu(A)$ is partially ordered -- $[E] \leq [F] \Longleftrightarrow$ $E$ is CEI subequivalent to $F$ -- and every increasing sequence has a supremum (i.e., least upper bound). See \cite[Theorem 1]{cowelliottiv} for proofs of these facts.
\begin{defn} An element $x \in Cu(A)$ is \emph{compact} (in the order-theoretic sense) if for every increasing sequence $\{ x_n \} \subset Cu(A)$ with $x \leq \sup_n x_n$ there exists $n_0 \in \mathbb{N}$ such that $x \leq x_{n_0}$. \end{defn}
For a unital C$^*$-algebra $A$, \emph{stable finiteness} means that for every $n \in \mathbb{N}$, $M_n(A)$ contains no infinite projections. In the nonunital case there are competing definitions, but it seems most popular to say $A$ is stably finite if the unitization $\tilde{A}$ is stably finite, so this is the definition we will use.
\section{Main Results} \label{sec:main}
The proof of our first lemma is essentially contained in the proof of \cite[Corollary 5]{cowelliottiv}.
\begin{lem} \label{lem:equality} Assume $E\subset \subset F$ is a compact inclusion of Hilbert $A$-modules. If $E \cong F$ then either $E = F$ or $A\otimes \mathcal{K}$ contains a scaling element (in the sense of \cite{BC}). If $A$ is stably finite, then $A\otimes \mathcal{K}$ cannot contain a scaling element; hence, in this case, $E \cong F$ if and only if $E = F$ \end{lem}
\begin{proof} Assume $E$ is properly contained in $F$; we'll show $A\otimes \mathcal{K}$ contains a scaling element. Let $v\colon F \to E$ be an isomorphism and $T \in \mathcal{K}(F)$ be a positive operator such that $T|_E = \operatorname{id}_E$. As observed in \cite{cowelliottiv}, the map $vT$ is adjointable -- i.e.\ defines an element of $\mathcal{L}(F)$ -- and, in fact, is compact. (This assertion is readily checked whenever $T$ is a ``finite-rank" operator). Moreover, a calculation shows that $(vT)^*|_E = Tv^{-1}$. It is also worth noting that $T(vT) = vT$, since $T|_E = \operatorname{id}_E$ and $vT(F) \subset E$.
The scaling element we are after is $x = vT$. Indeed, one checks that $x^* x = T^2$; hence, $(x^* x)(xx^*) = T^2(vT)(vT)^* = (vT)(vT)^* = xx^*$. Finally, we must see why $xx^* \neq x^* x$. But if $xx^* = x^* x$, then $T^2 = (vT)(vT)^*$ and thus $T^2(F) \subset vT(F) \subset E$. It follows that $T^2$ is a self-adjoint projection onto $E$ (since $T^2|_E = \operatorname{id}_E$, too), and hence $x = vT$ is a partial isometry whose support and range coincide with $E$. But this is impossible because $T = T^2$ (since $T \geq 0$), so $vT(F) \subsetneqq E$ (since $T(F) = E \subsetneqq F$).
We've shown that if $E \subsetneqq F$, then $\mathcal{K}(F)$ contains a scaling element. But Kasparov's stabilization theorem provides us with an inclusion $\mathcal{K}(F) \subset A\otimes \mathcal{K}$, so the proof of the first part is complete.
In the case that $A$ is stably finite, it is well known to the experts that $A\otimes \mathcal{K}$ cannot contain a scaling element. Indeed, if it did, then \cite[Corollary 4.4]{BC} implies that $M_n(A)$ contains a scaling element, for some $n \in \mathbb{N}$. But it was shown in \cite{BC} that the unitization $\widetilde{M_n(A)}$ would then have an infinite projection. However, there is a natural embedding $\widetilde{M_n(A)} \subset M_n(\tilde{A})$, which contradicts the assumption of stable finiteness. \end{proof}
Note that the canonical Hilbert module $\ell^2(A)$ is isomorphic to lots of (non-compactly contained) proper submodules.
\begin{prop} \label{prop} Let $E$ be a Hilbert $A$-module such that $[E]$ is compact in $Cu(A)$. Then either $E \subset \subset E$ or $A\otimes \mathcal{K}$ contains a scaling element. \end{prop}
\begin{proof} Let $h \in \mathcal{K}(E)$ be strictly positive. If $0$ is an isolated point in the spectrum $\sigma(h)$, then functional calculus provides a projection $p \in \mathcal{K}(E)$ such that $p = \operatorname{id}_E$; so $E \subset \subset E$, in this case. If $0 \in \sigma(h)$ is not isolated, then, again using functional calculus, we can find $E_1 \subset \subset E_2 \subset \subset E_3 \cdots \subset \subset E$ such that $\cup_i E_i$ is dense in $E$ and $E_i \subsetneqq E_{i+1}$ for all $i \in \mathbb{N}$.
Since $[E]$ is compact, there exists $i$ such that $[E_i] = [E]$. Since $E_{i+1} \subset \subset E$, $E_{i+1}$ is isomorphic to a compactly contained submodule of $E_i$ and this isomorphism restricted to $E_i$ maps onto a \emph{proper} submodule of $E_i$ (since $E_i \subsetneqq E_{i+1}$). Thus $E_i$ is isomorphic to a proper compactly contained submodule of itself. Hence, by Lemma \ref{lem:equality}, $A\otimes \mathcal{K}$ contains a scaling element. \end{proof}
\begin{cor} Let $A$ be stably finite and $E$ be a Hilbert $A$-module. Then $[E] \in Cu(A)$ is compact if and only if $E \subset \subset E$. In particular, if $[E]$ is compact and $[E] \leq [F]$, then $E$ is isomorphic to a compactly contained submodule of $F$. \end{cor}
\begin{proof} The ``only if" direction is immediate from the previous proposition. So assume $E \subset \subset E$ and let $[F_n] \in Cu(A)$ be an increasing sequence such that $[E] \leq [F] := \sup [F_n]$. By definition, $E$ is then isomorphic to a compactly contained submodule $E' \subset \subset F$. In the proof of \cite[Theorem 1]{cowelliottiv} it is shown that if $E' \subset \subset F$ and $[F] = \sup [F_n]$, then there is some $n \in \mathbb{N}$ such that $[E'] \leq [F_n]$. Since $[E] = [E']$, the proof is complete. \end{proof}
\begin{cor} \label{cor:isom} Let $A$ be stably finite and $E, F$ be Hilbert $A$-modules. If $[E]= [F] \in Cu(A)$ is compact, then $E \cong F$. In particular, if $[E]= [F]$ and $E$ is algebraically finitely generated and projective, then $[E] \in Cu(A)$ is compact; hence, $E \cong F$. \end{cor}
\begin{proof} Assume $[E] = [F]$ is compact. Then $E \subset \subset E$ and $F \subset \subset F$, by the previous corollary. Hence there exist isomorphisms $v\colon F \to F' \subset \subset E$ and $u\colon E \to E' \subset \subset F$. It follows that $F \cong u(v(F)) \subset \subset F$, which, by Lemma \ref{lem:equality}, implies that $u(v(F)) = F$. Hence $u$ is surjective, as desired.
As mentioned after Definition \ref{defn:compactcontain}, if $E$ is algebraically finitely generated and projective, then $E \subset \subset E$, which implies $[E]$ is compact (as we've seen). \end{proof}
In the appendix of \cite{cowelliottiv} it is shown that $Cu(A)$ is isomorphic to the classical Cuntz semigroup $W(A\otimes \mathcal{K})$. When $A$ is stable, the isomorphism $W(A) \to Cu(A)$ is very easy to describe: the Cuntz class of $a \in A_+$ is sent to $H_a := \overline{aA}$ (with its canonical Hilbert $A$-module structure).
\begin{thm} \label{thm:main} Let $A$ be a stable, finite C$^*$-algebra, $a \in A_+$ and $H_a = \overline{aA}$. The following are equivalent: \begin{enumerate} \item $H_a$ is algebraically finitely generated and projective;
\item $[H_a] \in Cu(A)$ is compact;
\item $\sigma(a) \subset \{0\} \cup [\varepsilon, \infty)$ for some $\varepsilon > 0$;
\item $\langle a \rangle = \langle p \rangle \in W(A)$ for some projection $p\in A$. \end{enumerate} \end{thm}
\begin{proof} The implication $(1) \Longrightarrow (2)$ was explained above.
$(2) \Longrightarrow (3)$: Let $a_\varepsilon = (a-\varepsilon)_+$. Then $H_{a_\varepsilon} \subset \subset H_a$ and $\cup_\varepsilon H_{a_\varepsilon}$ is dense in $H_a$. Since $[H_a] \in Cu(A)$ is compact, there exists $\varepsilon > 0$ such that $[H_a] = [H_{a_\varepsilon}]$. Corollary \ref{cor:isom} implies that $H_a \cong H_{a_\varepsilon}$; thus $H_a = H_{a_\varepsilon}$, by Lemma \ref{lem:equality}. It follows that $\sigma(a) \subset \{0\} \cup [\varepsilon, \infty)$, because otherwise functional calculus would provide a nonzero element $b \in C^*(a)$ such that $0 \leq b \leq a$ (so $b \in H_a$) and $a_\varepsilon b = 0$ (so $b \notin H_{a_\varepsilon}$), which would contradict the equality $H_a = H_{a_\varepsilon}$.
$(3) \Longrightarrow (4)$ is a routine functional calculus exercise.
$(4) \Longrightarrow (1)$: Assume $\langle a \rangle = \langle p \rangle \in W(A)$. Since $pA$ is singly generated and algebraically projective, Corollary \ref{cor:isom} implies $H_a$ is isomorphic to $pA$. \end{proof}
The equivalence of $(3)$ and $(4)$ above generalizes Proposition 2.8 in \cite{PT}.
\begin{cor} If $A$ is stably finite, then $A\otimes \mathcal{K}$ has no nonzero projections if and only if $Cu(A)$ contains no compact element. \end{cor}
\section{A Counterexample} \label{sec:counterexample}
Now let us show that if $A$ is stably finite and $E,F$ are Hilbert $A$-modules such that $[E] = [F]$, then it need not be true that $E$ and $F$ are isomorphic. Let $A = C_0(0,1] \otimes \mathcal{O}_3 \otimes \mathcal{K}$, where $\mathcal{O}_3$ is the Cuntz algebra with three generators. Voiculescu's homotopy invariance theorem (cf.\ \cite{dvv}) implies that $A$ is quasidiagonal, hence stably finite. Let $p, q \in \mathcal{O}_3 \otimes \mathcal{K}$ be two nonzero projections which are \emph{not} Murray-von Neumann equivalent. If $x \in C_0(0,1]$ denotes the function $t \mapsto t$, then we define $f_p = x \otimes p$ and $f_q = x \otimes q$ in $A$. Since $A$ is purely infinite in the sense of \cite{KR} and the ideals generated by $f_p$ and $f_q$ coincide, it follows that $[\overline{f_p A}] = [\overline{f_q A}] \in Cu(A)$. We claim that the modules $\overline{f_p A}$ and $\overline{f_q A}$ are not isomorphic.
Indeed, if they were isomorphic, then we could find $v \in A$ such that $v^* v = f_p$ and $\overline{vv^*A} = \overline{f_q A}$. (See \cite[Lemma 3.4.2]{ciuperca}; if $T\colon \overline{f_p A}\to\overline{f_q A}$ is an isomorphism, then $v = T(f_p^{1/2})$ has the asserted properties.) Letting $\pi\colon A \to \mathcal{O}_3 \otimes \mathcal{K}$ be the quotient map corresponding to evaluation at $1 \in (0,1]$, it follows that $\pi(v)^* \pi(v) = p$ and $\overline{\pi(v) \pi(v)^* (\mathcal{O}_3 \otimes \mathcal{K})} = \overline{q (\mathcal{O}_3 \otimes \mathcal{K})}$. Since $\pi(v) \pi(v)^*$ is a projection whose associated hereditary subalgebra agrees with the hereditary subalgebra generated by $q$, it follows that $\pi(v) \pi(v)^* = q$ (since both projections are units for the same algebra). This contradicts the assumption that $p$ and $q$ are not Murray-von Neumann equivalent, so $\overline{f_p A}$ and $\overline{f_q A}$ cannot be isomorphic.
\section{Questions and Related Results}
If the following question has an affirmative answer, then the proof of \cite[Corollary 5]{cowelliottiv} would show that $A$ has real rank zero if and only if the compacts are ``dense" in $Cu(A)$.
\begin{question} Can Corollary \ref{cor:isom} be extended to the ``closure" of the compact elements? That is, if $A$ is stably finite and $E$ and $F$ are Hilbert A-modules such that $[E]=[F] = \sup [C_n]$ for an increasing sequence of compact elements $[C_n]$, does it follow that $E\cong F$? \end{question}
The next question was raised in \cite{cowelliottiv}, but we repeat it because the modules in Section \ref{sec:counterexample} are not counterexamples -- they mutually embed into each other. (To prove this, use the fact that $p$ is Murray-von Neumann equivalent to a subprojection of $q$, and vice versa.)
\begin{question} Are there two Hilbert modules $E$ and $F$ such that $[E] = [F]$, but $F$ is not isomorphic to a submodule of $E$? \end{question}
\begin{question} If $x \in Cu(A)$ is compact, is there a projection $p \in A\otimes \mathcal{K}$ such that $x = \langle p \rangle$? \end{question}
Of course, in the stably finite case the results of Section \ref{sec:main} tell us that much more is true, but for general C$^*$-algebras we don't know the answer to this question. However, we can give an affirmative answer in some interesting cases, as demonstrated below. First, a definition.
\begin{defn} An element $x \in Cu(A)$ will be called \emph{infinite} if $x+y=x$ for some non-zero $y\in Cu(A)$. Otherwise, $x$ will be called \emph{finite}. \end{defn}
Note that $[\ell^2(A)] \in Cu(A)$ is always infinite.
\begin{lem} \label{lem:unique} If $A$ is simple, then $[\ell^2(A)] \in Cu(A)$ is the unique infinite element. \end{lem}
\begin{proof} Assume $[E] + [F] = [E]$ for some nonzero Hilbert $A$-module $F$. Adding $[F]$ to both sides, we see that $[E] + 2[F] = [E]$; repeating this, we have that $[E] + k[F] = [E]$ for all $k \in \mathbb{N}$. By uniqueness of suprema, it follows that $[E] + [\ell^2(F)] = [E]$ (cf.\ \cite[Theorem 1]{cowelliottiv}). Since $A$ is simple, $F$ is necessarily full and hence $\ell^2(F) \cong \ell^2(A)$ (\cite[Proposition 7.4]{lance}). Thus $$[E] = [E] + [\ell^2(F)] = [E \oplus \ell^2(A)] = [\ell^2(A)],$$ by Kasparov's stabilization theorem. \end{proof}
In the proof of the following lemma, we use the operator inequality $$xbx^*+ y^*by\geq xby + y^*bx^*,$$ for any $b$ in $A^+$, and $x, y\in A$. (Which follows from the fact that $(x-y^*)b(x-y^*)^*\geq0$.)
\begin{lem}\label{algsimple} Let $A$ be a stable algebraically simple C*-algebra. \begin{enumerate} \item For any non-zero $x\in Cu(A)$ there exists $n\in \mathbb{N}$ such that $nx=[A]$.
\item There exists a projection $q\in A$ such that $[A]=[qA]$. In particular, $[A]$ is a compact element of the Cuntz semigroup $Cu(A)$. \end{enumerate} \end{lem}
\begin{proof} It will be convenient to work in the original positive-element picture of the Cuntz semigroup. Our notation is by now standard (cf.\ \cite{PT}).
Proof of (1): Let $x=[\overline{bA}]$ for some $0\neq b\in A^+$ and let $a\in A$ be a strictly positive element. (Stability implies that every right Hilbert $A$-module is isomorphic to a closed right ideal of $A$.) Since $A$ is algebraically simple, one can find $x_1,\ldots, x_n, y_1, \ldots, y_n \in A$ such that $a=\sum_{i=1}^k x_iby_i$. Thus, \begin{align*}
a\sim 2a=a+a^* &=\sum_{i=1}^k (x_iby_i + y_i^*bx_i^*)\\
& \leq \sum_{i=1}^k (x_ibx_i^*+y_i^*by_i)\\ & \lesssim x_1bx_1^*\oplus y_1^*by_1 \oplus \cdots x_kbx_k^*\oplus y_k^*by_k\\ & \lesssim b\oplus b\oplus \cdots \oplus b, \end{align*} where the last sum has $n=2k$ summands.
Since $A$ is stable, one can embed the Cuntz algebra $O_n$ in the multiplier algebra $M(A)$. This gives us isometries $s_1,\cdots, s_n\in M(A)$ with orthogonal ranges. Set $b_i'=s_ibs_i^*$ and note that $b_i'\sim b$ and $b_i'\perp b_j'$. Moreover, $a\lesssim b_1'+\cdots +b_n'\lesssim a$ (since $a$ is strictly positive, it Cuntz-dominates any element of $A$). Therefore, $\langle a \rangle = n\langle b\rangle = nx$, or equivalently, $[A] = nx$.
Proof of (2): Since $A$ is stable and algebraically simple, \cite[Theorem 3.1]{BC} implies $A$ has a non-zero projection $p$. As above, we can find orthogonal projections $p_1,\ldots, p_n \in A$ such that $p_i\sim p$ and $\langle p_1+\cdots +p_n \rangle = n\langle p \rangle = [A]$. Defining $q = p_1+\cdots +p_n$, we are done. \end{proof}
We'll also need a consequence of the work in Section \ref{sec:main}.
\begin{prop} \label{stablyfinite} If $A$ is stable, $\langle a \rangle \in W(A) = Cu(A)$ is compact and $0 \in \sigma(a)$ is not an isolated point, then $A$ contains a scaling element and $\langle a \rangle$ is infinite. \end{prop}
\begin{proof} Assume $A$ contains no scaling element. Since $\langle a \rangle$ is compact, Proposition \ref{prop} implies that $H_a \subset \subset H_a$. As in the proof of $(2) \Longrightarrow (3)$ in Theorem \ref{thm:main}, there exists $\varepsilon > 0$ such that $[H_a] = [H_{a_\varepsilon}]$ and hence $H_a$ is isomorphic to a compactly contained submodule $E$ of $H_{a_\varepsilon}$. Lemma \ref{lem:equality} implies $E = H_a$, so $H_{a_\varepsilon} = H_a$ too. As we've seen, this implies $\sigma(a) \subset \{0\} \cup [\varepsilon, \infty)$, contradicting our hypothesis; hence, $A$ contains a scaling element.
To prove the second assertion, choose $\varepsilon > 0$ such that $[H_a] = [H_{a_\varepsilon}]$. Since $0 \in \sigma(a)$ is not isolated, we can find a nonzero positive function $f \in C_0(0,\|a\|]$ such that $f(t) = 0$ for all $t \geq \varepsilon$. Thus $f(a) + (a-\varepsilon)_+ \precsim a$ and $f(a) (a-\varepsilon)_+ = 0$. It follows that $$[H_{f(a)}] + [H_a] = [H_{f(a)}] + [H_{a_\varepsilon}] \leq [H_a]$$ and thus $[H_a]$ is infinite. \end{proof}
\begin{thm} Let $x \in Cu(A)$ be compact. \begin{enumerate} \item If $A$ is simple, then there exists a projection $p \in A\otimes \mathcal{K}$ such that $x = \langle p \rangle$.
\item If $x$ is finite, then there exists a projection $p \in A\otimes \mathcal{K}$ such that $x = \langle p \rangle$. \end{enumerate} \end{thm}
\begin{proof} In both cases we may assume $A$ is stable.
Proof of (1): Fix a nonzero positive element $a\in A$ such that $x = [H_a]$. If $0 \in \sigma(a)$ is an isolated point, then functional calculus provides us with a Cuntz equivalent projection, and we're done. Otherwise Proposition \ref{stablyfinite} tells us that $x$ is infinite and $A$ contains a scaling element. By simplicity and Lemma \ref{lem:unique}, we have that $x = [\ell^2(A)] = [A]$ (by stability). Moreover, the existence of a scaling element ensures that $A$ is algebraically simple (see \cite[Theorem 1.2]{BC}). Hence part (2) of Lemma \ref{algsimple} provides the desired projection.
Proof of (2): Choose $a \in A_+$ such that $x = \langle a \rangle$. Since $x$ is finite, Proposition \ref{stablyfinite} implies $0 \in \sigma(a)$ is an isolated point, so we're done. \end{proof}
\begin{rem} It is possible to improve part (2) of the theorem above. Namely, it is shown in \cite{ciuperca} that if $x \in Cu(A)$ is compact and there is no \emph{compact} element $y \in Cu(A)$ such that $x = x + y$, then there exists a projection $p \in A\otimes \mathcal{K}$ such that $x = \langle p \rangle$. \end{rem}
\end{document}
|
arXiv
|
{
"id": "0811.0958.tex",
"language_detection_score": 0.7587250471115112,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Differential Operators, Gauges, and Mixed Hodge Modules}
\author{Christopher Dodd} \begin{abstract} The purpose of this paper is to develop a new theory of gauges in mixed characteristic. Namely, let $k$ be a perfect field of characteristic $p>0$ and $W(k)$ the $p$-typical Witt vectors. Making use of Berthelot's arithmetic differential operators, we define for a smooth formal scheme $\mathfrak{X}$ over $W(k)$, a new sheaf of algebras $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ which can be considered a higher dimensional analogue of the (commutative) Dieudonne ring. Modules over this sheaf of algebras can be considered the analogue (over $\mathfrak{X}$) of the gauges of Ekedahl and Fontain-Jannsen. We show that modules over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ admit all of the usual $\mathcal{D}$-module operations, and we prove a robust generalization of Mazur's theorem in this context. Finally, we show that an integral form of a mixed Hodge module of geometric origin admits, after a suitable $p$-adic completion, the structure of a module over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$. This allows us to prove a version of Mazur's theorem for the intersection cohomology and the ordinary cohomology of an arbitrary quasiprojective variety defined over a number field. \end{abstract}
\maketitle \tableofcontents{}
\section{Introduction}
In this work, we will develop the technology needed to state and prove \emph{Mazur's theorem for a mixed Hodge module}. In order to say what this means, we begin by recalling the original Mazur's theorem. Fix a perfect field $k$ of positive characteristic; let $W(k)$ denote the $p$-typical Witt vectors. Let $X$ be a smooth proper scheme over $k$. To $X$ is attached its crystalline cohomology groups $\mathbb{H}_{crys}^{i}(X)$, which are finite type $W(k)$-modules; the complex $\mathbb{H}_{crys}^{\cdot}(X)$ has the property that $\mathbb{H}_{crys}^{\cdot}(X)\otimes_{W(k)}^{L}k\tilde{\to}\mathbb{H}_{dR}^{\cdot}(X)$ (the de Rham cohomology of $X$ over $k$). Furthermore, if $\mathfrak{X}$ is a smooth, proper formal scheme over $W(k)$, whose special fibre is $X$, then there is a canonical isomorphism \[ \mathbb{H}_{crys}^{i}(X)\tilde{\to}\mathbb{H}_{dR}^{i}(\mathfrak{X}) \] for any $i$. In particular, the action of the Frobenius endomorphism on $X$ endows $\mathbb{H}_{dR}^{i}(\mathfrak{X})$ with an endomorphism
$\Phi$ which is semilinear over the Witt-vector Frobenius $F$. It is known that $\Phi$ becomes an automorphism after inverting $p$; the ``shape'' of the map $\Phi$ is an interesting invariant of the pair $(\mathbb{H}_{crys}^{i}(X),\Phi)$. To make this precise, one attaches, to any $r\in\mathbb{Z}$, the submodule $(\mathbb{H}_{crys}^{i}(X))^{r}=\{m\in\mathbb{H}_{crys}^{i}(X)|\Phi(m)\in p^{r}\mathbb{H}_{crys}^{i}(X)\}$ (the equality takes place in $\mathbb{H}_{crys}^{i}(X)[p^{-1}]$). Thus we have a decreasing, exhaustive filtration, whose terms measure how far $\Phi$ is from being an isomorphism.
On the other hand, the de Rham cohomology of $X$ comes with another filtration, the Hodge filtration, which comes from the Hodge to de Rham spectral sequence $E_{1}^{r,s}=\mathbb{H}^{s}(X,\Omega_{X}^{r})\Rightarrow\mathbb{H}_{dR}^{r+s}(X)$. Then we have the following remarkable \begin{thm} \label{thm:(Mazur)}(Mazur) Suppose that, for each $i$, the group $\mathbb{H}_{crys}^{i}(X)$ is $p$-torsion-free, and that the Hodge to de Rham spectral sequence of $X$ degenerates at $E_{1}$. Then the image of the filtration $(\mathbb{H}_{crys}^{i}(X))^{r}$ in $\mathbb{H}_{dR}^{i}(X)$ is the Hodge filtration. \end{thm}
This is (the first half of) \cite{key-13}, theorem 3 (in fact, under slightly weaker hypotheses; compare \cite{key-14} corollary 3.3, and \cite{key-10}, theorem 8.26). The theorem also includes a similar description of the conjugate filtration (the filtration coming from second spectral sequence of hypercohomology) on $\mathbb{H}_{dR}^{i}(X)$; we will address this as part of the more general theorem 1.2 below. This result allowed Mazur to prove Katz's conjecture relating the slopes of $\Phi$ to the Hodge numbers of $X$.
In the years following \cite{key-13}, it was realized that the theorem can be profitably rephrased in terms of certain additional structures on $\mathbb{H}_{crys}^{i}(X)$. Let $A$ be a commutative ring. Denote by $D(A)$ the commutative ring $A[f,v]/(fv-p)$; put a grading on this ring by placing $A$ in degree $0$, $f$ in degree $1$, and $v$ in degree $0$. Then a\emph{ gauge }(over \emph{$A$}) is a graded module over $D(A)$, ${\displaystyle M=\bigoplus_{i\in\mathbb{Z}}M^{i}}$. Set ${\displaystyle M^{\infty}:=M/(f-1)\tilde{=}\lim_{\to}M^{i}}$, and ${\displaystyle M^{-\infty}:=M/(v-1)\tilde{=}\lim_{\to}M^{-i}}$. One says that $M$ is an $F$-gauge if there is an isomorphism $F^{*}M^{\infty}\tilde{\to}M^{-\infty}$ (c.f. \cite{key-20}, definition 2.1, \cite{key-5}, chapter 1, or section 2.1 below).
Then, in the above situation, one associates the $W(k)$- gauge \begin{equation} \mathbb{H}_{\mathcal{G}}^{i}(X):=\bigoplus_{r\in\mathbb{Z}}(\mathbb{H}_{crys}^{i}(X))^{r}\label{eq:Basic-Gauge-defn} \end{equation} where $f:(\mathbb{H}_{crys}^{i}(X))^{r}\to(\mathbb{H}_{crys}^{i}(X))^{r+1}$ acts by multiplication by $p$, and $v:(\mathbb{H}_{crys}^{i}(X))^{r}\to(\mathbb{H}_{crys}^{i}(X))^{r-1}$ acts as the inclusion. One has $\mathbb{H}_{\mathcal{G}}^{i}(X)^{\infty}\tilde{=}\mathbb{H}_{crys}^{i}(X)$, and the isomorphism\linebreak{}
$F^{*}(\mathbb{H}_{crys}^{i}(X))^{\infty}\to(\mathbb{H}_{crys}^{i}(X))^{-\infty}$ comes from the action of $\Phi$.
Remarkably, it turns out that there is a reasonable definition of $\mathbb{H}_{\mathcal{G}}^{i}(X)$ for any $X$, even without the assumption that each group $\mathbb{H}_{crys}^{i}(X)$ is $p$-torsion-free, or that the Hodge to de Rham spectral sequence degenerates at $E_{1}$. To state the result, note that for any gauge $M$ (over any $A$), $M^{-\infty}$ carries a decreasing filtration defined by $F^{i}(M^{-\infty})=\text{image}(M^{i}\to M^{\infty})$. Passing to derived categories, we obtain a functor $D(\mathcal{G}(D(A)))\to D((A,F)-\text{mod})$ (here $\mathcal{G}(D(A))$ is the category of gauges, and $D((A,F)-\text{mod})$ is the filtered derived category of $A$); we will denote this functor $M^{\cdot}\to M^{\cdot,-\infty}$. The analogous construction can be carried out for $+\infty$ as well using the increasing filtration $C^{i}(M^{\infty})=\text{image}(M^{i}\to M^{\infty})$. In particular, if $M^{\cdot}\in D(\mathcal{G}(D(A)))$, then each $H^{i}(M^{\cdot,-\infty})$ and each $H^{i}(M^{\cdot,\infty})$ is a filtered $A$-module. \begin{thm} \label{thm:=00005BFJ=00005D} For any smooth $X$ over $k$, there is a functorially attached complex of $W(k)$-gauges, $\mathbb{H}_{\mathcal{G}}^{\cdot}(X)$, such that $\mathbb{H}_{\mathcal{G}}^{i}(X)^{\infty}\tilde{=}\mathbb{H}_{crys}^{i}(X)$ for all $i$. Further, there is an $F$-semilinear isomorphism $H^{i}((\mathbb{H}_{\mathcal{G}}^{\cdot}(X)\otimes_{W(k)}^{L}k))^{-\infty}\tilde{\to}(\mathbb{H}_{dR}^{i}(X),F)$ and a linear isomorphism $H^{i}((\mathbb{H}_{\mathcal{G}}^{\cdot}(X)\otimes_{W(k)}^{L}k))^{\infty}\tilde{\to}(\mathbb{H}_{dR}^{i}(X),C)$, where $F$ and $C$ denote the Hodge and conjugate filtrations, respectively.
When $\mathbb{H}_{crys}^{i}(X)$ is torsion-free for all $i$ and the Hodge to de Rham spectral sequence degenerates at $E_{1}$, then this functor agrees with the gauge constructed above in \eqref{Basic-Gauge-defn}. \end{thm}
As far as I am aware, the first proof of this theorem appears in Ekedahl's book \cite{key-20}. This is also the first place that the above notion of gauge is defined; Ekedahl points out that Fontaine discovered the notion independantly. Ekedahl's proof relies on deep properties of the de Rham-Witt complex and on the results of the paper \cite{key-37}; in that paper, it is shown that there is attached to $X$ a complex inside another category $D^{b}(\mathcal{R}-\text{mod})$ where $\mathcal{R}$ is the so-called Raynaud ring; then, in definition 2.3.1 of \cite{key-20} Ekehahl constructs a functor from $D^{b}(\mathcal{R}-\text{mod})$ to the derived category $D^{b}(\mathcal{G}(D(A)))$; the composition of these two functors yeilds the construction of the theorem. Another, rather different proof of the theorem is given in \cite{key-5}, section 7.
Now let us turn to $\mathcal{D}$-modules and Hodge modules. From at least the time of Laumon's work (\cite{key-19}), it has been understood that the filtered complex $\mathbb{H}_{dR}^{\cdot}(X)$ (with its Hodge filtration) can be understood as an object of filtered $\mathcal{D}$-module theory. To explain this, let $\mathcal{D}_{X}^{(0)}$ denote the level zero PD-differential operators on $X$. Then $\mathcal{D}_{X}^{(0)}$ acts on $\mathcal{O}_{X}$, and we have a canonical isomorphism \[ \int_{\varphi}\mathcal{O}_{X}[d_{X}]\tilde{\to}\mathbb{H}_{dR}^{\cdot}(X) \] where $\varphi$ denotes the map $X\to\text{Spec}(k)$, $d_{X}=\text{dim}(X)$, and ${\displaystyle \int_{\varphi}}$ is the push-forward for $\mathcal{D}_{X}^{(0)}$-modules. In addition, $\mathcal{D}_{X}^{(0)}$ comes equipped with a natural \emph{increasing }filtration, the symbol filtration. Laumon's work\footnote{Strictly speaking, Laumon works in characteristic zero. But the same formalism works for $\mathcal{D}_{X}^{(0)}$ in positive characteristic; I'll address this below in the paper} upgrades the push-forward functor to a functor from filtered $\mathcal{D}_{X}^{(0)}$-modules to filtered $k$-vector spaces; and we have that\emph{ \[ \int_{\varphi}\mathcal{O}_{X}[d_{X}]\tilde{\to}(\mathbb{H}_{dR}^{\cdot}(X),F') \] }where $F'$ is the Hodge filtration, suitably re-indexed to make it an increasing filtration. Furthermore, Laumon works in the relative setting; i.e., he constructs a filtered push-forward for any morphism $\varphi:X\to Y$ of smooth varieties.
This leads to the question, of weather the construction of \thmref{=00005BFJ=00005D} can be understood in terms of some sort of upgrade of filtered $\mathcal{D}$-modules to a category of graded modules. The main body of this work shows that, at least when the schemes in question lift to smooth formal schemes over $W(k)$, the answer is yes\footnote{In fact, the answer is always yes. But we will adress the non-liftable case in future work}. To state the first result, recall that, in addition to the symbol filtration, the algebra $\mathcal{D}_{X}^{(0)}$ carries a decreasing filtration by two sided ideals, the conugate filtration, denoted $\{C^{i}(\mathcal{D}_{X}^{(0)})\}_{i\in\mathbb{Z}}$ (it was first defined in \cite{key-11}, section 3.4, c.f. also \defref{Hodge-and-Con} below). \begin{thm} \label{thm:D01}Let $\mathfrak{X}$ be a smooth formal scheme over $W(k)$. Then there is a locally noetherian sheaf of algebras $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ with the following properties:
1) ${\displaystyle \widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}=\bigoplus_{i}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),i}}$ is a graded $D(W(k))$-algebra, and $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/(v-1)\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$, while the sheaf $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/(f-1)$ has $p$-adic completion equal to $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}$.
2) Let $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/p:=\mathcal{D}_{X}^{(0,1)}$, a graded sheaf of $k$-algebras on $X$. The filtration $\text{im}(\mathcal{D}_{X}^{(0,1),i}\to\mathcal{D}_{X}^{(0)}\tilde{=}\mathcal{D}_{X}^{(0,1)}/(v-1))$ agrees with the conugate filtration on $\mathcal{D}_{X}^{(0)}$.
3) We have $\mathcal{D}_{X}^{(1)}=\mathcal{D}_{X}^{(0,1)}/(f-1)$. Consider the filtration $F^{i}(\mathcal{D}_{X}^{(1)})=\text{im}(\mathcal{D}_{X}^{(0,1),i}\to\mathcal{D}_{X}^{(0)}\tilde{=}\mathcal{D}_{X}^{(0,1)}/(f-1))$. Then filtered modules over $(\mathcal{D}_{X}^{(1)},F^{\cdot})$ are equivalent to filtered modules over $(\mathcal{D}_{X}^{(0)},F^{\cdot})$ (the symbol filtration on $\mathcal{D}_{X}^{(0)}$). \end{thm}
This sheaf of algebras is constructed in \secref{The-Algebra} below; part $2)$ of the theorem is proved in \remref{Description-of-conjugate}, and part $3)$ is \thmref{Filtered-Frobenius}. This theorem shows that a graded module over $\mathcal{D}_{X}^{(0,1)}$ is a simultanious generalization of a conugate-filtered and a Hodge-filtered $\mathcal{D}_{X}^{(0)}$-module.
The algebra $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ admits analogues of all of the usual $\mathcal{D}$-module operations; namely, tensor product, duality, left-right interchange, and well as push-forward and pull-back over arbitrary morphisms (between smooth formal schemes). By construction the sheaf $D(\mathcal{O}_{\mathfrak{X}})=\mathcal{O}_{\mathfrak{X}}[f,v]/(fv-p)$ carrries an action of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$. Let $D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ denotes the derived category of graded $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-modules; then we have \begin{thm} For any morphism $\varphi:\mathfrak{X}\to\mathfrak{Y}$ of smooth formal schemes we denote the pushforward by ${\displaystyle \int_{\varphi}:D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))\to D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))}$. If $\varphi$ is proper, then the pushforward takes $D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ to $D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$. We have ${\displaystyle (\int_{\varphi}\mathcal{M})^{-\infty}\tilde{=}(\int_{\varphi}\mathcal{M}^{-\infty})}$, where the pushforward on the right is in the category of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-modules. In particular, if $\mathfrak{Y}$ is $\text{Specf}(W(k))$, then ${\displaystyle {\displaystyle \int_{\varphi}}D(\mathcal{O}_{\mathfrak{X}})}$ is a bounded complex of finite type gauges, and we have isomorphisms \[ ({\displaystyle \int_{\varphi}}D(\mathcal{O}_{\mathfrak{X}}))^{-\infty}[d_{X}]\tilde{=}\mathbb{H}_{dR}^{\cdot}(\mathfrak{X}) \] and \[ ({\displaystyle \int_{\varphi}}D(\mathcal{O}_{\mathfrak{X}}))^{\infty}[d_{X}]\tilde{=}F^{*}\mathbb{H}_{dR}^{\cdot}(\mathfrak{X}) \] where $F$ is the Witt-vector Frobenius. After passing to $k$ we obtain isomorphisms in the filtered derived category \[ ({\displaystyle \int_{\varphi}}D(\mathcal{O}_{\mathfrak{X}})\otimes_{W(k)}^{L}k)^{-\infty}[d_{X}]\tilde{=}(\mathbb{H}_{dR}^{\cdot}(X),C') \] (where $C'$ in the conjugate filtration, appropriately re-indexed to make it a decreasing filtration), and \[ ({\displaystyle \int_{\varphi}}D(\mathcal{O}_{\mathfrak{X}})\otimes_{W(k)}^{L}k)^{\infty}[d_{X}]\tilde{=}F^{*}(\mathbb{H}_{dR}^{\cdot}(X),F') \] where where $F'$ is the Hodge filtration, suitably re-indexed to make it an increasing filtration \end{thm}
This theorem is proved in \secref{Push-Forward} below.
In fact $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ has many more favorable properties which are developed extensively in this paper; including a well-behaved pull-back for arbitrary maps, an internal tensor product which satisfies the projection formula, and a relative duality theory; these are sections five through eight below. Simultaneously, we develop the analogous theory $\mathcal{D}_{X}^{(0,1)}$-modules; here $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/p$; technically, we do a little more than that, and develop the theory of $\mathcal{D}_{X}^{(0,1)}$-modules over smooth varieties which do not have to lift to $W(k)$. The two theories play off each other nicely- we often use reduction mod $p$ and various versions of Nakayama's lemma to reduce statements about $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ to statements about $\mathcal{D}_{X}^{(0,1)}$; on the other hand, there are always local lifts of a smooth variety over $k$, so local questions about $\mathcal{D}_{X}^{(0,1)}$ often reduce to questions about $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$. There is also an interesting and rich theory over the truncated Witt vectors $W_{n}(k)$, but, given the length of this paper, we will undertake a detailed study of it in another work.
We also have a comparison with the gauge constructed in \thmref{=00005BFJ=00005D}; however, we will defer the proof of this result to a later paper. That is because it seems best to prove it as a consequence of a more general comparison theorem between the category of gauges constructed here and the one constructed in \cite{key-5}; and this general statement is still a work in progress. It also seems that there is a close connection with the recent works of Drinfeld \cite{key-39} and Bhatt-Lurie \cite{key-40} via a kind of Koszul duality formalism; again, the details are a work in progress\footnote{The author has been discussing these topics with Bhargav Bhatt }.
Now we discuss Mazur's theorem in the relative context. We begin with the \begin{defn} A module $\mathcal{M}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ is standard if if $\mathcal{M}^{-\infty}$ and $\mathcal{M}^{\infty}$ are $p$-torsion-free, each map $f_{\infty}:\mathcal{M}^{i}\to\mathcal{M}^{\infty}$ is injective; and, finally, there is a $j_{0}\in\mathbb{Z}$ so that \[
f_{\infty}(\mathcal{M}^{i+j_{0}})=\{m\in\mathcal{M}^{\infty}|p^{i}m\in f_{\infty}(\mathcal{M}^{j_{0}})\} \] for all $i\in\mathbb{Z}$. \end{defn}
Note that, over $W(k)$, this is a generalization of the construction of the gauge in \eqref{Basic-Gauge-defn}; with the roles of $f$ and $v$ reversed (this is related to the re-indexing of the Hodge and conjugate filtrations; c.f. also \remref{basic-equiv} below). Thus a general version of Mazur's theorem will give conditions on a complex of gauges which ensure that each cohomology group is standard. In order to state such a theorem, we need to note that there is a notion of $F$-gauge in this context, or, to be more precise, a notion of $F^{-1}$-gauge: \begin{defn} (c.f. \defref{Gauge-Defn!}) Let $F^{*}:\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}-\text{mod}\to\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}-\text{mod}$ denote Berthelot's Frobenius pullback (c.f. \thmref{Berthelot-Frob} below for details). Then an $F^{-1}$-gauge over $\mathfrak{X}$ is an object of $\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ equipped with an isomorphism $F^{*}\mathcal{M}^{-\infty}\tilde{\to}\widehat{\mathcal{M}^{\infty}}$ (here $\widehat{?}$ denotes $p$-adic completion). There is also a version for complexes in $D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$, namely, an $F^{-1}$-gauge in $D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$ is a complex $\mathcal{M}^{\cdot}$ equipped with an isomorphism $F^{*}\mathcal{M}^{\cdot,-\infty}\tilde{\to}\widehat{\mathcal{M}^{\cdot,\infty}}$ (here $\widehat{?}$ denotes the cohomolocial or derived completion, c.f. \defref{CC} and \propref{Basic-CC-facts} below). \end{defn}
We denote by $D_{F^{-1}}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ the category of complexes for which there exists an isomorphism $F^{*}\mathcal{M}^{\cdot,-\infty}\tilde{\to}\widehat{\mathcal{M}^{\cdot,\infty}}$ as above. Then we have the following rather general version of Mazur's theorem: \begin{thm} (c.f. \thmref{F-Mazur}) Let $\mathcal{M}^{\cdot}\in D_{\text{coh},F^{-1}}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. Suppose that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{-\infty}$ is $p$-torsion-free for all $n$, and suppose that $\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free for all $n$. Then $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is standard for all $n$. \end{thm}
Using the formalism of filtered $\mathcal{D}$-modules one verifies that the condition that $\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free for all $n$ is a generalization of the degeneration of the Hodge-to-de Rham spectral sequence. Therefore this theorem, along with the previous one, provide a robust generalization of Mazur's theorem, which allows much more general kinds of coefficients.
The conditions of the theorem are satisfied in several important cases. Suppose $R$ is a finitely generated $\mathbb{Z}$-algebra, and suppose that $X_{R}$ is a smooth $R$ scheme, and let $\varphi:X_{R}\to Y_{R}$ be a proper map. Suppose that $(\mathcal{M}_{R},F)$ is a filtered coherent $\mathcal{D}_{X_{R}}^{(0)}$-module on $X_{R}$. If the associated complex filtered $\mathcal{D}$-module, $(\mathcal{M}_{\mathbb{C}},F)$ undergirds a mixed Hodge module, then by Saito's theory the Hodge-to-de Rham spectral sequence for ${\displaystyle \int_{\varphi}(\mathcal{M}_{\mathbb{C}},F)}$ degenerates at $E_{1}$. Thus the same is true over $R$, after possibly localizing. Further localization ensures that each ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}(\mathcal{M}_{R},F))}$ is flat over $R$.
Now suppose we have a map $R\to W(k)$. Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ denote the formal completion of the base change to $W(k)$. Then the theorem applies if there exist a $p$-torsion-free gauge $\mathcal{N}$ over $\mathfrak{X}$ such that $\mathcal{N}^{-\infty}\tilde{=}\widehat{\mathcal{M}\otimes_{R}W(k)}$ and $F^{*}(\mathcal{M}_{k},F)\tilde{\to}\mathcal{N}^{\infty}/p$. By a direct construction, this happens for $\mathcal{M}=\mathcal{O}_{X}$ as well as $\mathcal{M}=j_{\star}\mathcal{O}_{U}$ and $\mathcal{M}=j_{!}\mathcal{O}_{U}$ (where $U\subset X$ is an open inclusion whose compliment is a normal crossings divisor, and $j_{\star}$ and $j_{!}$ denote the pushforwards in mixed Hodge module theory). Therefore, by the theorem itself, it happens when $(\mathcal{M}_{\mathbb{C}},F)$ is itself a Hodge module ``of geometric origin'' (c.f. \corref{Mazur-for-Hodge-1}). In this paper we give some brief applications of this to the case where $\mathcal{M}_{\mathbb{C}}$ is the local cohomology along some subcheme; but we expect that there are many more.
Finally, let's mention that Hodge modules of geometric origin control both the intersection cohomology and singular cohomology of singular varieties over $\mathbb{C}$. So we can obtain \begin{thm} \label{thm:Mazur-for-IC-Intro}Let $X_{R}$ be a (possibly singular) quasiprojective variety over $R$. Then, after possibly localizing $R,$ there is a filtered complex of $R$-modules $I\mathbb{H}^{\cdot}(X_{R})$, whose base change to $\mathbb{C}$ yields $I\mathbb{H}^{\cdot}(X_{\mathbb{C}})$, with its Hodge filtration. Now suppose $R\to W(k)$ for some perfect field $k$. Then for each $i$, there is a standard gauge $\tilde{I\mathbb{H}}^{i}(X)_{W(k)}$ so that \[ \tilde{I\mathbb{H}}^{i}(X)_{W(k)}^{-\infty}\tilde{=}I\mathbb{H}^{\cdot}(X_{R})\otimes_{R}W(k) \]
and so that \[ \tilde{I\mathbb{H}}^{i}(X)_{W(k)}^{\infty}\tilde{=}F^{*}(I\mathbb{H}^{\cdot}(X_{R})\otimes_{R}W(k)) \] Under this isomorphism, the Hodge filtration on $\tilde{I\mathbb{H}}^{i}(X)_{W(k)}^{\infty}/p$ agrees with the Frobenius pullback of the image of the Hodge filtration in $I\mathbb{H}^{\cdot}(X_{R})\otimes_{R}k$.
The analogous statement holds for the ordinary cohomology of a quasiprojective variety $X_{R}$, with its Hodge filtration; as well as the compactly supported cohomology. \end{thm}
This is proved in \corref{Mazur-for-IC} and \corref{Mazur-for-Ordinary}below. As in \cite{key-13} and \cite{key-38}, \cite{key-55} this result implies that the ``Newton polygon'' lies on or above the ``Hodge polygon'' for both the ordinary and the intersection cohomology of quasiprojective varieties, in the circumstances of the above theorem. We note here that the theorem gives an $F$-semilinear action on the groups $I\mathbb{H}^{\cdot}(X_{R})\otimes_{R}W(k)[p^{-1}]$, as well as the ordinary cohomology groups $\mathbb{H}^{\cdot}(X_{R})\otimes_{R}W(k)[p^{-1}]$, and the compactly supported cohomology as well. This action has already been constructed as a consequence of the formalism of rigid cohomology (c.f. \cite{key-80},\cite{key-81}). However, to my knowledge this ``integral'' version of the action has not been considered before.
\subsection{Plan of the Paper}
The first chapter has two sections. In the first, we quickly review the theory of gauges over $W(k)$, and in particular give the equivalence between $F$-guages and $F^{-1}$-guages in this context. In the second, we give a quick recollection of some generalities on graded modules, before reviewing and extending (to the case of graded modules) the very important technical notion of cohomological completeness (also known as derived completeness). The Nakayama lemma is key here, as the reduction mod $p$ will be one of our main technical tools for proving theorems.
The next chapter introduces $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$, as well as its analogue $\mathcal{D}_{X}^{(0,1)}$ over a smooth $k$-variety $X$ (which does not have to lift to a smooth formal scheme), and performs some basic local calculations. In particular, we prove \corref{Local-coords-over-A=00005Bf,v=00005D}, which provides a local description of $\mathcal{D}_{X}^{(0,1)}$ which is analogous to the basic descriptions of differential operators ``in local coordinates'' that one finds in other contexts.
In chapter $4$, we study the categories of graded modules over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ and $\mathcal{D}_{X}^{(0,1)}$, importing and generalizing some key results of \cite{key-5}. We prove the ``abstract'' version of Mazur's theorem (\thmref{Mazur!}) for a complex of gauges. Then we go on to introduce the notion of an $F^{-1}$-gauge over $X$ (and $\mathfrak{X}$), which makes fundamental use of Berthelot's Frobenius descent. We explain in \thmref{Filtered-Frobenius} how this Frobenius descent interacts with the natural filtrations coming from the grading on $\mathcal{D}_{X}^{(0,1)}$. Along the way, we look at the relationship between modules over $\mathcal{D}_{X}^{(0,1)}$ and modules over two important Rees algebras:$\mathcal{R}(\mathcal{D}_{X}^{(0)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, the Rees algebras of $\mathcal{D}_{X}^{(0)}$ with respect to the symbol and conjugate filtrations, respectively.
Chapters $5$ through $8$ introduce and study the basic $\mathcal{D}$-module operations in this context: pullback, tensor product, left-right interchange, pushforward, and duality. Much of this is similar to the story for algebraic $\mathcal{D}$-modules (as covered in \cite{key-49}, for instance). For instance, even though $\mathcal{D}_{X}^{(0,1)}$ and $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ do not have finite homological dimension, we show that the pushforward, pullback, and duality functors do have finite homological dimension. As usual, the study of the pushforward (chapter $7$ below) is the most involved, and we spend some time exploring the relationship with the pushforwards for $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, respectively; these admit descriptions in terms of the more standard filtered pushforwards of $\mathcal{D}$-modules.
Finally, in the last chapter we put everything together and prove Mazur's theorem for a Hodge module of geometric origin; this uses, essentially, all of the theory built in the previous sections. In addition to the applications explained in the introduction, we give some applications to the theory of the Hodge filtration on the local cohomology of a subcheme of a smooth complex variety.
There is one appendix to the paper- in which we prove a technical result useful for constructing the gauge $j_{\star}(D(\mathcal{O}))$, the pushforward of the trivial gauge over a normal crossings divisor.
\subsection{Notations and Conventions}
Let us introduce some basic notations which are used throughout the paper. For any ring (or sheaf of rings) $\mathcal{R}$, we will denote by $D(\mathcal{R})$ the graded ring in which $\mathcal{R}$ has degree $0$, $f$ has degree $1$, $v$ has degree $-1$, and $fv=p$. The symbol $k$ will always denote a perfect field of positive characteristic, and $W(k)$ the $p$-typical Witt vectors. Letters $X$, $Y$,$Z$ will denote smooth varieties over $k$, while $\mathfrak{X}$,$\mathfrak{Y}$,$\mathfrak{Z}$ will denote smooth formal schemes over $W(k)$. When working with formal schemes, we let $\Omega_{\mathfrak{X}}^{1}$ denote the sheaf of continuous differentials (over $W(k)$), and $\mathcal{T}_{\mathcal{X}}$ denote the continuous $W(k)$-linear derivations; we set $\Omega_{\mathfrak{X}}^{i}=\bigwedge^{i}\Omega_{\mathfrak{X}}^{1}$ and $\mathcal{T}_{\mathfrak{X}}^{i}=\bigwedge^{i}\mathcal{T}_{\mathfrak{X}}$.
We denote by $X^{(i)}$ the $i$th Frobenius twist of $X$; i.e., the scheme $X\times_{\text{Spec}(k)}\text{Spec}(k)$, where $k$ map to $k$ via $F^{i}$. Since $k$ is perfect, the natural map $\sigma:X^{(i)}\to X$ is an isomorphism. On the other hand, the relative Frobenius $X\to X^{(i)}$ is a bijection on topological spaces, which allows us to identify $\mathcal{O}_{X^{(i)}}\tilde{=}\mathcal{O}_{X}^{p^{i}}$; we shall tacitly use this below.
Now we introduce some conventions on differential operators. If $\mathfrak{X}$ is a smooth formal scheme over $W(k)$, then for each $i\geq0$ we have Berthelot's ring of differential operators of level $i$, $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(i)}$, introduced in \cite{key-1} This is a $p$-adically complete, locally noetherian sheaf of rings on $\mathfrak{X}$. In general, this sheaf is somewhat complicated to define, but when $\mathfrak{X}=\text{Specf}(\mathcal{A})$ is affine and admits local coordinates\footnote{i.e., $\Gamma(\Omega_{\mathfrak{X}}^{1})$ is a free module over $\mathcal{A}$} one has the following description of its global sections: let $D_{\mathcal{A}}^{(\infty)}$ denote the subring of $\text{End}_{W(k)}(\mathcal{A})$ consisting of the the finite order, continuous differential operators on $\mathcal{A}$. Define $D_{\mathcal{A}}^{(i)}\subset D_{\mathcal{A}}^{(\infty)}$ to be the subring generated by differential operators of level $\leq p^{i}$. Then we have \[ \Gamma(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(i)})=\widehat{D_{\mathcal{A}}^{(i)}} \] where $\widehat{?}$ stands for $p$-adic completion. For each $i\geq0$ there is a natural, injective map $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(i)}\to\widehat{\mathcal{D}}_{\mathfrak{X}}^{(i+1)}$; when $\mathfrak{X}=\text{Specf}(\mathcal{A})$ is as above it is given by the $p$-adic completion of the tautological inclusion $D_{\mathcal{A}}^{(i)}\subset D_{\mathcal{A}}^{(i+1)}$.
Similarly, we have the sheaves of algebras $\mathcal{D}_{X}^{(i)}$ when $X$ is smooth over $k$. In the case $i=0$, this is simply the usual sheaf of pd-differential operators on $X$ (c.f. \cite{key-10}). This sheaf can be rather rapidly defined (as in \cite{key-3} chapter 1, though there they are called crystalline differential operators) as the enveloping algebroid of the tangent sheaf $\mathcal{T}_{X}$.
Finally let us mention that we will be often working with derived categories of graded modules in this work. In that context, the symbol $[i]$ denotes a shift in homological degree, while $(i)$ denotes a shift in the grading degree.
\section{Preliminaries}
\subsection{Gauges over $W(k)$}
In this section we set some basic notation and terminology; all of which is essentially taken from the paper \cite{key-5}. Let $k$ be a perfect field of characteristic $p>0$; and let $W(k)$ be the $p$-typical Witt vectors. Let $S$ be a noetherian $W(k)$-algebra. We recall from \cite{key-5} (also \cite{key-20}) that a gauge over $S$ is a graded module ${\displaystyle M=\bigoplus_{i=\infty}^{\infty}M^{i}}$ over the graded ring $D(S)$ where, (as always) we suppose $\text{deg}(f)=1$, $\text{deg}(v)=-1$, and $fv=p$. A morphism of gauges is a morphism in the category of graded modules.
If $M$ is a gauge, we denote the resulting multiplication maps by $f:M^{i}\to M^{i+1}$ and $v:M^{i}\to M^{i-1}$ for all $i$.
As explained in \cite{key-5}, lemma 1.1.1, such a module is finitely generated over $R$ iff each $M^{i}$ is finite over $S$ and the maps $f:M^{r}\to M^{r+1}$ and $v:M^{-r}\to M^{-r-1}$ are isomorphisms for $r>>0$. It follows that in this case the map $v:M^{r}\to M^{r-1}$ is $p\cdot$ for $r>>0$, and $f:M^{-r}\to M^{-r+1}$ is $p\cdot$ for $r>>0$. In the terminology of \cite{key-5}, such a gauge is \emph{concentrated in a finite interval}. \begin{defn} \label{def:endpoints} Let $M$ be a gauge.
1) Set ${\displaystyle M^{\infty}:=M/(f-1)M\tilde{\to}\lim_{r\to\infty}M^{r}}$ and ${\displaystyle M^{-\infty}:=M/(v-1)M\tilde{\to}\lim_{r\to-\infty}M^{r}}$.
2) For each $i$, denote by $f_{\infty}:M^{i}\to M^{\infty}$ and $v_{-\infty}:M^{i}\to M^{-\infty}$ the induced maps.
3) Define $F^{i}(M^{\infty}):=\text{image}(M^{i}\to M^{\infty})$ and $C^{i}(M^{-\infty}):=\text{image}(M^{i}\to M^{-\infty})$. In particular, $F^{i}$ is an increasing filtration on $M^{\infty}$ and $C^{i}$ is a decreasing filtration on $M^{-\infty}$. Clearly any morphism of gauges $M\to N$ induces morphisms of filtered modules $(M^{\infty},F^{\cdot})\to(N^{\infty},F^{\cdot})$ and $(M^{-\infty},C^{\cdot})\to(N^{-\infty},C^{\cdot})$. \end{defn}
If $M$ is finitely generated we see that $M^{r}\tilde{=}M^{\infty}$ and $M^{-r}\tilde{=}M^{-\infty}$ for all $r>>0$.
Many gauges arising in examples posses an additional piece of structure- a Frobenius semi-linear isomorphism from $M^{\infty}$ to $M^{-\infty}$. So let us now suppose that $S$ is equipped with an endomorphism $F$ which extends the Frobenius on $W(k)$. \begin{defn} \label{def:F-gauge} (\cite{key-5}, section 1.4) An $F$-gauge is a gauge $M$ equipped with an isomorphism $\varphi:F^{*}M^{\infty}\tilde{\to}M^{-\infty}$. A morphism of $F$-gauges is required to respect the isomorphism $\varphi$. More precisely, given a morphism $G:M\to N$, it induces $G^{\infty}:M^{\infty}\to N^{\infty}$ and $G^{-\infty}:M^{-\infty}\to N^{-\infty}$, and we demand $\varphi\circ F^{*}G^{\infty}=G^{\infty}\circ\varphi$. This makes the category of $F$-gauges into an additive category, which is abelian if $F^{*}$ is an exact functor. \end{defn}
Now suppose in addition that $F:S\to S$ is an isomorphism. Then: \begin{rem} \label{rem:basic-equiv}There is an equivalence of categories from $F$-gauges to $F^{-1}$-gauges; namely, send $M$ to the gauge $N$ where $N^{i}=M^{-i}$, $f:N^{i}\to N^{i+1}$ is defined to be $v:M^{-i}\to M^{-i-1}$ , $v:N^{i}\to N^{i-1}$ is defined to be $f:M^{-i}\to M^{-i+1}$. Then $M^{\infty}=N^{-\infty}$ , $M^{-\infty}=N^{-\infty}$, and the isomorphism $\varphi:F^{*}M^{\infty}\tilde{\to}M^{-\infty}$ yields an isomorphism $\psi^{-1}:F^{*}N^{-\infty}\tilde{\to}N^{\infty}$; which is equivalent to giving an isomorphism $\psi:(F^{-1})^{*}N^{\infty}\tilde{\to}N^{-\infty}$. \end{rem}
Finally, we want to quickly review an important construction of gauges. We suppose here that $S=W(k)$; equipped with its Frobenius automorphism $F$. We use the same letter $F$ to denote the induced automorphism of the field $B=W(k)[p^{-1}]$. We will explain how gauges arrive from lattices of $B$-vector spaces: \begin{example} \label{exa:BasicGaugeConstruction}Let $D$ be a finite dimensional $B$-vector space, and let $M$ and $N$ be two lattices (i.e., finite free $W(k)$-modules which span $D$) in $D$. To this situation we may attach a gauge over $W(k)$ as follows: for all $i\in\mathbb{Z}$ define \[
M^{i}=\{m\in M|p^{i}m\in N\} \] We let $f:M^{i}\to M^{i+1}$ be the inclusion, and $v:M^{i}\to M^{i-1}$ be the multiplication by $p$. For $i>>0$ we have $p^{i}M\subset N$ and so $M^{i}=M$ for all such $i$. For $i<<0$ we have $p^{-i}N\subset M$ and so $M^{i}=p^{-i}N\tilde{=}N$ for such $i$. In particular we obtain $M^{-\infty}\tilde{=}N$ and $M^{\infty}\tilde{=}M$. This is evidently a finite-type gauge over $W(k)$. Now suppose that there is an $F$-semi-linear automorphism $\Phi:D\to D$ so that $M=\Phi(N)$. Then the previous construction gives an $F^{-1}$ gauge via the isomorphism $\Phi:N=M^{-\infty}\to M^{\infty}=M$. \end{example}
\begin{rem} \label{rem:=00005BFJ=00005D-standard}In \cite{key-5}, section 2.2, there is associated an $F$-gauge to a finite dimensional $B$ vector space $D$, equipped with lattice $M\subset D$ and a semi-linear automorphism $\Phi:D\to D$. We recall that their construction is \end{rem}
\[
M^{i}=\{m\in M|\Phi(m)\in p^{i}M\}=\{m\in M|m\in p^{i}\Phi^{-1}(M)\} \] for all $i\in\mathbb{Z}$. In this instance $f:M^{i}\to M^{i+1}$ is the multiplication by $p$, and $v:M^{i}\to M^{i-1}$ is the inclusion. If we set $N=\Phi^{-1}(M)$ then this is exactly the $F$-gauge which corresponds to the $F^{-1}$ gauge constructed in \exaref{BasicGaugeConstruction} above, via the equivalence of categories of \remref{basic-equiv}.
In \cite{key-5} this construction is referred to as the standard construction of gauges. We will generalize this below in \subsecref{Standard}.
\subsection{Cohomological Completion of Graded Modules}
In this section we give some generalities on sheaves of graded modules. Throughout this section, we let $X$ be a noetherian topological space and $\tilde{\mathcal{R}}=\bigoplus_{i\in\mathbb{Z}}\tilde{\mathcal{R}}^{i}$ a $\mathbb{Z}$-graded sheaf of rings on $X$. The noetherian hypothesis ensures that, for each open subset $U\subset X$, the functor $\mathcal{F}\to\mathcal{F}(U)$ respects direct sums; although perhaps not strictly necessary, it simplifies the discussion of graded sheaves (and it always applies in this paper). Denote $\tilde{\mathcal{R}}^{0}=\mathcal{R}$, a sheaf of rings on $X$.
Let $\mathcal{G}(\tilde{\mathcal{R}})$ denote the category of graded sheaves of modules over $\tilde{\mathcal{R}}$. This is a Grothendieck abelian category; the direct sum is given by the usual direct sum of sheaves. To construct the product of sheaves $\{\mathcal{M}_{i}\}_{i\in I}$, one takes the sheafification of the pre-sheaf of local sections of the form $(m_{i})_{i\in I}$ for which there is a bound on the degree; i.e. $-N\leq\text{deg}(m_{i})\le N$ for a fixed $N\in\mathbb{N}$ and all $i\in I$. Since $X$ is a noetherian space, this pre-sheaf is actually already a sheaf.
It follows formally that $\mathcal{G}(\tilde{\mathcal{R}})$ has enough injectives; this can also be proved in the traditional way by constructing enough injective in the category of modules over a graded ring and then noting that the sheaf ${\displaystyle \prod_{x\in X}\mathcal{I}_{x}}$ is injective if $\mathcal{I}_{x}$ is an injective object in the category of graded $\tilde{\mathcal{R}}_{x}$-modules. We note that an injective in $\mathcal{G}(\tilde{\mathcal{R}})$ might not be an injective $\mathcal{\tilde{R}}$-module. However, from the previous remark it follows that any injective in $\mathcal{G}(\tilde{\mathcal{R}})$ is a summand of a sheaf of the form $\prod_{x\in X}\mathcal{I}_{x}$; as such sheaves are clearly flasque it follows that any injective in $\mathcal{G}(\tilde{\mathcal{R}})$ is flasque.
For each $i\in\mathbb{Z}$ we have the exact functor $\mathcal{M}\to\mathcal{M}^{i}$ which takes $\mathcal{G}(\tilde{\mathcal{R}})\to\mathcal{R}-\text{mod}$; the direct sum of all of these functors is isomorphic to the identity (on the underlying sheaves of $\mathcal{R}$-modules). Note that the functor $\mathcal{M}\to\mathcal{M}^{0}$ admits the left adjoint $\mathcal{N}\to\tilde{\mathcal{R}}\otimes_{\mathcal{R}}\mathcal{N}$.
Let $D(\mathcal{G}(\tilde{\mathcal{R}}))$ denote the (unbounded) derived category of $\mathcal{G}(\tilde{\mathcal{R}})$. Then the exact functor $\mathcal{M}\to\mathcal{M}^{i}$ derives to a functor $\mathcal{M}^{\cdot}\to\mathcal{M}^{\cdot,i}$, and we have $\mathcal{M}^{\cdot}={\displaystyle \bigoplus_{i}\mathcal{M}^{\cdot,i}}$ for any complex in $D(\mathcal{G}(\tilde{\mathcal{R}}))$. \begin{lem} Let $\varphi:X\to Y$ be a continuous map, and let $\tilde{\mathcal{R}}_{X}$ and $\tilde{\mathcal{R}}_{Y}$ be graded sheaves of algebras on $X$ and $Y$, respectively. Suppose there is a morphism of graded rings $\varphi^{-1}(\tilde{\mathcal{R}}_{Y})\to\tilde{\mathcal{R}}_{X}$. Then we can form the derived functor $R\varphi_{*}:D(\mathcal{G}(\tilde{\mathcal{R}}_{X}))\to D(\mathcal{G}(\tilde{\mathcal{R}}_{Y}))$, as well as $R\varphi_{*}:D(\tilde{\mathcal{R}}_{X}-\text{mod})\to D(\tilde{\mathcal{R}}_{Y}-\text{mod})$.
1) Let $\mathcal{F}_{X}$ denote the forgetful functor from $\mathcal{G}(\tilde{\mathcal{R}}_{X})$ to $\tilde{\mathcal{R}}_{X}-\text{mod}$ (and similarly for $\mathcal{F}_{Y}$). Then for any $\mathcal{M}^{\cdot}\in D^{+}(\mathcal{G}(\tilde{\mathcal{R}}_{X}))$, we have $\mathcal{F}_{Y}R\varphi_{*}(\mathcal{M}^{\cdot})\tilde{\to}R\varphi_{*}(\mathcal{F}_{X}\mathcal{M}^{\cdot})$; where on the right hand side $R\varphi_{*}$ denotes the pushforward $D^{+}(\tilde{\mathcal{R}}_{X}-\text{mod})\to D^{+}(\tilde{\mathcal{R}}_{Y}-\text{mod})$. If $X$ and $Y$ have finite dimension, then this isomorphism holds for all $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}_{X}))$.
2) Again assuming $X$ and $Y$ have finite dimension; for each $i\in\mathbb{Z}$ we have $R\varphi_{*}(\mathcal{M}^{\cdot,i})\tilde{=}R\varphi_{*}(\mathcal{M}^{\cdot})^{i}$ in $D(\mathcal{R}_{Y}-\text{mod})$.
3) For every $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}_{X}))$ and $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}_{Y}))$ we have \[ R\varphi_{*}R\underline{\mathcal{H}om}_{\varphi^{-1}(\tilde{\mathcal{R}}_{Y})}(\varphi^{-1}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}_{Y}}(\mathcal{N}^{\cdot},R\varphi_{*}\mathcal{M}^{\cdot}) \] \end{lem}
\begin{proof} 1) The statement about $D^{+}(\mathcal{G}(\tilde{\mathcal{R}}_{X}))$ follows immediately from the fact that injectives are flasque. For the unbounded derived category, the assumption implies $\varphi_{*}$ has finite homological dimension; and by what we have just proved the forgetful functor takes acyclic objects to acyclic objects. Therefore we can apply the composition of derived functors (as in \cite{key-9}, corollary 14.3.5), which implies that, since $\varphi_{*}$, $\mathcal{F}_{X}$, and $\mathcal{F}_{Y}$ have finite homological dimension in this case, there is an isomorphism $R\varphi_{*}\circ\mathcal{F}_{X}\tilde{=}R(\varphi_{*}\circ\mathcal{F}_{X})\tilde{\to}R(\mathcal{F}_{Y}\circ\varphi_{*})\tilde{=}\mathcal{F}_{Y}\circ R\varphi_{*}$.
2) As above this follows from \cite{key-9}, corollary 14.3.5, using $\varphi_{*}\circ\mathcal{M}^{i}\tilde{=}(\varphi_{*}\mathcal{M})^{i}$.
3) This is essentially identical to the analogous fact in the ungraded case. \end{proof} Now we briefly discuss the internal Hom and tensor on these categories. If $\mathcal{M}$ and $\mathcal{N}$ are objects of $\mathcal{G}(\tilde{\mathcal{R}})$, we have the sheaf of $\mathbb{Z}$-modules $\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\mathcal{M},\mathcal{N})$ as well as the sheaf of graded $\mathbb{Z}$-modules ${\displaystyle \underline{\mathcal{H}om}(\mathcal{M},\mathcal{N})=\bigoplus_{i\in\mathbb{Z}}\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\mathcal{M},\mathcal{N}(i))}$; if $\mathcal{M}$ is locally finitely presented this agrees with $\mathcal{H}om$ on the underlying $\tilde{\mathcal{R}}$-modules. Also, if $\mathcal{M}\in\mathcal{G}(\tilde{\mathcal{R}})$ and $\mathcal{N}\in\mathcal{G}(\tilde{\mathcal{R}}^{opp})$, we have the tensor product $\mathcal{N}\otimes_{\tilde{\mathcal{R}}}\mathcal{M}$ which is graded in the natural way. Suppose now that $\tilde{\mathcal{S}}$ is another sheaf of graded algebras on $X$, \begin{lem} \label{lem:basic-hom-tensor}1) Let $\mathcal{N}$ be a graded $(\mathcal{\tilde{\mathcal{R}}},\mathcal{\tilde{\mathcal{S}}})$ bimodule, $\mathcal{M}\in\mathcal{G}(\tilde{\mathcal{S}})$, and $\mathcal{P}\in\mathcal{G}(\tilde{\mathcal{R}})$. Then there is an isomorphism \[ \underline{\mathcal{H}om}_{\mathcal{\tilde{R}}}(\mathcal{N}\otimes_{\mathcal{\tilde{S}}}\mathcal{M},\mathcal{P})\tilde{\to}\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\mathcal{M},\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{N},\mathcal{P})) \] Now, if we consider $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{S}}))$ and $\mathcal{P}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$, we have a map \[ R\underline{\mathcal{H}om}_{\mathcal{\tilde{R}}}(\mathcal{N}\otimes_{\mathcal{\tilde{S}}}^{L}\mathcal{M}^{\cdot},\mathcal{P}^{\cdot})\to R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\mathcal{M}^{\cdot},R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{N},\mathcal{P}^{\cdot})) \] and if, further, $\mathcal{N}$ is flat over $\tilde{\mathcal{S}}^{opp}$, then this map is an isomorphism.
2) Now suppose $\tilde{\mathcal{S}}\subset\tilde{\mathcal{R}}$ is a central inclusion of graded rings (in particular $\tilde{\mathcal{S}}$ is commutative). Then for any $\mathcal{M}\in\mathcal{G}(\tilde{\mathcal{R}})$, $\mathcal{N}\in\mathcal{G}(\tilde{\mathcal{R}}^{opp})$, and $\mathcal{P}\in\mathcal{G}(\tilde{\mathcal{R}})$ there are isomorphisms \[ \underline{\mathcal{H}om}_{\mathcal{\tilde{S}}}(\mathcal{N}\otimes_{\mathcal{\tilde{R}}}\mathcal{M},\mathcal{P})\tilde{\to}\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{M},\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\mathcal{N},\mathcal{P})) \] the analogous result holds at the level of complexes: if $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$, $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}^{opp}))$, and $\mathcal{P}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ there are isomorphisms \[ R\underline{\mathcal{H}om}_{\mathcal{\tilde{S}}}(\mathcal{N}^{\cdot}\otimes_{\mathcal{\tilde{R}}}^{L}\mathcal{M}^{\cdot},\mathcal{P}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{M}^{\cdot},R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\mathcal{N}^{\cdot},\mathcal{P}^{\cdot})) \] \end{lem}
This is proved in a nearly identical way to the ungraded case (c.f. \cite{key-9}, theorem 14.4.8).
Throughout this work, we will make extensive use of various sheaves of rings over $W(k)$ and derived categories of sheaves of modules over them. One of our main techniques will be to work with complexes of sheaves which are\emph{ }complete in a suitable sense, and then to apply Nakayama's lemma to deduce properties of those complexes from their $\text{mod}$ $p$ analogues. The technical set-up for this is the theory of cohomologically complete complexes (also called \emph{derived complete complexes }in many places) which has been treated in the literature in many places, e.g., \cite{key-41}, \cite{key-42}, \cite{key-43}, Tag 091N, and \cite{key-82}, section 3.4. We will use the reference \cite{key-8}, chapter 1.5, which deals with non-commutative sheaves of algebras in a very general setting (namely, they work with sheaves of rings over $\mathbb{Z}[h]$, which are $h$-torsion-free).
However, we actually have to extend the theory slightly to get exactly what we need, because our interest is in complexes of \emph{graded} modules, and the useful notion of completeness in this setting is to demand, essentially, that each graded piece of a module (or complex) is complete. We will set this up in a way that we can derive the results in a similar way to \cite{key-8} (or even derive them from \cite{key-8} sometimes).
From now on, we impose the assumption that $\tilde{\mathcal{R}}$ is a $W(k)$-algebra (where $W(k)$ sits in degree $0$) which is $p$-torsion-free. Note that we have the sheaf of algebras $\tilde{\mathcal{R}}[p^{-1}]$, which we regard as an object of $\mathcal{G}(\tilde{\mathcal{R}})$ via $\tilde{\mathcal{R}}[p^{-1}]=\bigoplus_{i\in\mathbb{Z}}\tilde{\mathcal{R}}^{i}[p^{-1}]$. There is the category $\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}])$ of graded sheaves of modules over $\tilde{\mathcal{R}}[p^{-1}]$, and there is the functor $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))\to D(\mathcal{G}(\tilde{\mathcal{R}}))$; which is easily seen to be fully faithful, with essential image consisting of those complexes in $D(\mathcal{G}(\tilde{\mathcal{R}}))$ for which $p$ acts invertibly on each cohomology sheaf (compare \cite{key-8}, lemma 1.5.2); we shall therefore simply regard $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$ as being a full subcategory of $D(\mathcal{G}(\tilde{\mathcal{R}}))$. Then, following \cite{key-8}, definition 1.5.5, we make the \begin{defn} \label{def:CC}1) An object $\mathcal{M}^{\cdot}\in D(\mathcal{R}-\text{mod})$ is said to be cohomologically complete if $R\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{\cdot})=R\mathcal{H}om_{W(k)}(W(k)[p^{-1}],\mathcal{M}^{\cdot})=0$.
2) An object $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\tilde{R}}))$ is said to be cohomologically complete if \linebreak{} $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})=0$. \end{defn}
We shall see below that two notions are not quite consistent with one another, however, we shall only use definition $2)$ when working with graded objects, so this will hopefully cause no confusion.
Following \cite{key-8}, proposition 1.5.6), we have: \begin{prop} \label{prop:Basic-CC-facts}1) The cohomologically complete objects in $D(\mathcal{G}(\tilde{\mathcal{R}}))$ form a thick triangulated subcategory, denoted $D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$. An object $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\tilde{R}}))$ is in $D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$ iff $R\underline{\mathcal{H}om}(\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})=0$ for all $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$.
2) If $\tilde{\mathcal{S}}$ is any graded sheaf of $p$-torsion-free $W(k)$-algebras equipped with a graded algebra map $\tilde{\mathcal{S}}\to\tilde{\mathcal{R}}$, and $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$, then $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$ iff $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\tilde{\mathcal{S}}))$
3) For every $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ there is a distinguished triangle \[ R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})\to\mathcal{M}^{\cdot}\to R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}[-1],\mathcal{M}^{\cdot}) \] and we have $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}[-1],\mathcal{M}^{\cdot})\in D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$ while $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})\in D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$. In particular, the category $D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$ is naturally equivalent to the quotient of $D(\mathcal{G}(\tilde{\mathcal{R}}))$ by $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$.
4) Recall that for each object $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ we have, for $i\in\mathbb{Z}$, the $i$th graded piece $\mathcal{M}^{\cdot,i}\in D(\mathcal{R}-\text{mod})$. Then $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ is in $D_{cc}(\mathcal{G}(\mathcal{R}))$ iff each $\mathcal{M}^{\cdot,i}\in D_{cc}(\mathcal{R}-\text{mod})$. \end{prop}
\begin{proof} 1) For any $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$ we have \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{=}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}]\otimes_{\tilde{\mathcal{R}}}^{L}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{N}^{\cdot},R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})) \] here, we have used the fact $\tilde{\mathcal{R}}[p^{-1}]$ is an $(\tilde{\mathcal{R}},\tilde{\mathcal{R}})$-bimodule, along with \lemref{basic-hom-tensor}, 1).
Thus if $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}]\mathcal{M}^{\cdot})=0$ then $R\underline{\mathcal{H}om}(\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})=0$ as claimed. Therefore $D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$ is the (right) orthogonal subcategory to the thick subcategory $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$; it follows that is a thick triangulated subcategory.
2) We have \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\tilde{\mathcal{S}}[p^{-1}],\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{S}}[p^{-1}]\otimes_{\tilde{\mathcal{S}}}^{L}\tilde{\mathcal{R}},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot}) \] from which the result follows.
3) This triangle follows by applying $R\underline{\mathcal{H}om}$ to the short exact sequence \[ \tilde{\mathcal{R}}\to\tilde{\mathcal{R}}[p^{-1}]\to\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}} \] and noting that $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}},)$ is the identity functor. The complex $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})$ is contained in $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$ via the action of $\tilde{\mathcal{R}}[p^{-1}]$ on itself. On the other hand, as above there is a canonical isomorphism \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}],R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}[-1],\mathcal{M}^{\cdot}))\tilde{\leftarrow}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}]\otimes_{\tilde{\mathcal{R}}}^{L}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}),\mathcal{M}^{\cdot})[1] \] and the term on the right is zero since $\tilde{\mathcal{R}}[p^{-1}]\otimes_{\tilde{\mathcal{R}}}^{L}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}})=0$; therefore \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}[-1],\mathcal{M}^{\cdot})\in D_{cc}(\mathcal{G}(\mathcal{R})) \]
This shows that the inclusion $D_{cc}(\mathcal{G}(\mathcal{R}))\to D(\mathcal{G}(\mathcal{R}))$ admits a right adjoint, and the statement about the quotient category follows immediately.
4) For each $\mathcal{M}\in\mathcal{G}(\tilde{\mathcal{R}})$ there is an isomorphism of functors \[ \mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{0})\tilde{=}\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}) \] given by restricting a morphism on the right hand side to degree $0$; this follows from the fact that an local section of $\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M})$ is simply a system $(m_{i})$ of local sections of $\mathcal{M}^{0}$ satisfying $pm_{i}=m_{i-1}$; which is exactly a local section of $\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{0})$.
Now, $\mathcal{M}\to\mathcal{M}^{0}$ admits a left adjoint (namely $\mathcal{N}\to\tilde{\mathcal{R}}\otimes_{\mathcal{R}}\mathcal{N}$), and $\mathcal{N}\to\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{N})$ admits a left adjoint (namely $\mathcal{M}\to\mathcal{R}[p^{-1}]\otimes_{\mathcal{R}}\mathcal{M}$). So by \cite{key-9}, proposition 14.4.7, the derived functor of $\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{0})$ is given by the functor $R\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{\cdot,0})$ for any $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{R}))$. Therefore there is an isomorphism of functors \[ R\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{\cdot,0})\tilde{\to}R\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}) \] Therefore \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})=\bigoplus_{i}R\mathcal{H}om_{\mathcal{G}(\tilde{\mathcal{R}})}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot}(i))\tilde{=}\bigoplus_{i}R\mathcal{H}om_{\mathcal{R}}(\mathcal{R}[p^{-1}],\mathcal{M}^{\cdot,-i}) \] and the result follows. \end{proof} We will refer to the functor $\mathcal{M}^{\cdot}\to R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}]/\tilde{\mathcal{R}}[-1],\mathcal{M}^{\cdot})$ as the \emph{graded derived completion} of $\mathcal{M}^{\cdot}$, or, usually, simply the completion of $\mathcal{M}^{\cdot}$ if no confusion seems likely; we will denote it by $\widehat{\mathcal{M}}^{\cdot}$.
A typical example of a cohomologically complete complex in $D(\mathcal{R}-\text{mod})$ is the following: suppose $\mathcal{M}^{\cdot}=\mathcal{M}$ is concentrated in degree $0$. Then if $\mathcal{M}$ is $p$-torsion free, then $\mathcal{M}$ is $p$-adically complete iff $\mathcal{M}^{\cdot}$ is cohomologically complete (c.f. \cite{key-8}, lemma 1.5.4). By part $4)$ of the proposition, if $\mathcal{M}=\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ is concentrated in a single degree, then if each $\mathcal{M}^{i}$ is $p$-torsion free and $p$-adically complete, then $\mathcal{M}^{\cdot}$ is cohomologically complete (in the graded sense). Therefore the two notions are not in general compatible; an infinite direct sum of $p$-adically complete modules is generally not complete.
Now we develop this notion a bit more: \begin{lem} \label{lem:reduction-of-completion}Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$. Then the natural map $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\to\mathcal{\widehat{M}}^{\cdot}\otimes_{W(k)}^{L}k$ is an isomorphism. \end{lem}
\begin{proof} The cone of the map $\mathcal{M}^{\cdot}\to\mathcal{\widehat{M}}^{\cdot}$ is contained in $D(\mathcal{G}(\tilde{\mathcal{R}}[p^{-1}]))$, and therefore vanishes upon applying $\otimes_{W(k)}^{L}k$. \end{proof} Now we can transfer the Nakayama lemma into the graded setting: \begin{cor} \label{cor:Nakayama}Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\tilde{R}}))$, and let $a\in\mathbb{Z}$. If $\mathcal{H}^{i}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)=0$ for all $i<a$, then $\mathcal{H}^{i}(\mathcal{M}^{\cdot})=0$ for all $i<a$. In particular $\mathcal{M}^{\cdot}=0$ iff $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k=0$.
Therefore, if $\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\tilde{R}}))$ and $\eta:\mathcal{M}^{\cdot}\to\mathcal{N}^{\cdot}$ is a morphism such that $\eta\otimes_{W(k)}^{L}k:\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\to\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k$ is an isomorphism, then $\eta$ is an isomorphism. \end{cor}
\begin{proof} By part 4) of the previous proposition this follows immediately from the analogous fact for cohomologically complete sheaves over $\mathcal{R}$; which is \cite{key-8}, proposition 1.5.8. \end{proof} For later use, we record a few more useful properties of cohomologically complete sheaves, following \cite{key-8}, propositions 1.5.10 and 1.5.12. \begin{prop} \label{prop:Push-and-complete}1) Suppose $\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$, and let $\tilde{\mathcal{S}}$ be any central graded sub-algebra of $\tilde{\mathcal{R}}$ which contains $W(k)$. Then $R\underline{\mathcal{H}om}(\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})\in D_{cc}(\mathcal{G}(\tilde{\mathcal{S}}))$.
2) Suppose $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ and $\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$. Then the map $\mathcal{M}^{\cdot}\to\widehat{\mathcal{M}}^{\cdot}$ induces an isomorphism \[ R\underline{\mathcal{H}om}(\widehat{\mathcal{M}}^{\cdot},\mathcal{N}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}(\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}) \]
3) Suppose $\varphi:X\to Y$ is a continuous map, and suppose $\tilde{\mathcal{R}}$ is a graded sheaf of algebras on $Y$ (satisfying the running assumptions of the section). Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\varphi^{-1}(\tilde{\mathcal{R}})))$. Then $R\varphi_{*}(\mathcal{M}^{\cdot})\in D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$. Therefore, if $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\varphi^{-1}(\tilde{\mathcal{R}})))$ is any complex, then we have \[ \widehat{R\varphi_{*}(\mathcal{M}^{\cdot})}\tilde{\to}R\varphi_{*}(\widehat{\mathcal{M}^{\cdot}}) \] \end{prop}
\begin{proof} 1) As $\tilde{\mathcal{S}}$ is central we have \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\tilde{\mathcal{S}}[p^{-1}],R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}))\tilde{\leftarrow}R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\mathcal{M}^{\cdot}\otimes_{\tilde{\mathcal{S}}}^{L}\tilde{\mathcal{S}}[p^{-1}],\mathcal{N}^{\cdot}) \] \[ \tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{M}^{\cdot},R\underline{\mathcal{H}om}_{\tilde{\mathcal{S}}}(\tilde{\mathcal{S}}[p^{-1}],\mathcal{N}^{\cdot})) \] where the second isomorphism follows from the flatness of $\tilde{\mathcal{S}}[p^{-1}]$ over $\tilde{\mathcal{S}}$, and first isomorphism follows directly from \[ \tilde{\mathcal{S}}[p^{-1}]\tilde{=}\text{lim}(\tilde{\mathcal{S}}\xrightarrow{p}\tilde{\mathcal{S}}\xrightarrow{p}\tilde{\mathcal{S}}\cdots)\tilde{=}{\displaystyle \text{hocolim}(\tilde{\mathcal{S}}\xrightarrow{p}\tilde{\mathcal{S}}\xrightarrow{p}\tilde{\mathcal{S}}\cdots)} \] so the result follows from part $2)$ of \propref{Basic-CC-facts}.
2) This follows since $\text{cone}(\mathcal{M}^{\cdot}\to\widehat{\mathcal{M}}^{\cdot})$ is contained in the orthogonal to $D_{cc}(\mathcal{G}(\tilde{\mathcal{R}}))$, by definition.
3) For the first claim, we use the adjunction \[ R\varphi_{*}R\underline{\mathcal{H}om}_{\varphi^{-1}(\tilde{\mathcal{R}})}(\varphi^{-1}(\tilde{\mathcal{R}})[p^{-1}],\mathcal{M}^{\cdot})\tilde{=}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\tilde{\mathcal{R}}[p^{-1}],R\varphi_{*}(\mathcal{M}^{\cdot})) \] along with part $2)$ of \propref{Basic-CC-facts}. For the second, we use the distinguished triangle \[ R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})\to\mathcal{M}^{\cdot}\to\widehat{\mathcal{M}^{\cdot}} \] Since $p$ acts invertibly on $R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot})$, it will also act invertibly on\linebreak{} $R\varphi_{*}(R\underline{\mathcal{H}om}(\tilde{\mathcal{R}}[p^{-1}],\mathcal{M}^{\cdot}))$, and the result follows from the fact that $R\varphi_{*}(\widehat{\mathcal{M}^{\cdot}})$ is already cohomologically complete. \end{proof} In using this theory, it is also useful to note the following straightforward \begin{lem} \label{lem:Hom-tensor-and-reduce}Let $\tilde{\mathcal{R}}$ be as above. Then, for any $\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}}))$ we have \[ R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}}(\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})\otimes_{W(k)}^{L}k\tilde{\to}R\underline{\mathcal{H}om}_{\tilde{\mathcal{R}}/p}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k,\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k) \] If we have $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\tilde{\mathcal{R}})^{\text{opp}})$, then we have \[ (\mathcal{N}^{\cdot}\otimes_{\tilde{\mathcal{R}}}^{L}\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k\tilde{\to}(\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{\tilde{\mathcal{R}}/p}^{L}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k) \] \end{lem}
To close out this section, we will give an explicit description of the (ungraded) cohomological completion functor in a special case. Let $\mathcal{R}$ be a $p$-torsion-free sheaf of rings on $X$ as above; suppose $\mathcal{R}$ is left noetherian. Let us suppose that, in addition, the $p$-adic completion $\widehat{\mathcal{R}}$ is $p$-torsion-free, left noetherian, and that there exists a base of open subsets $\mathcal{B}$ on $X$ such that, for any $U\in\mathcal{B}$ and any coherent sheaf $\mathcal{M}$ of $\mathcal{R}_{0}=\mathcal{R}/p=\widehat{\mathcal{R}}/p$ modules on $U$, we have $H^{i}(U,\mathcal{M})=0$ for all $i>0$ (these are assumptions $1.2.2$ and $1.2.3$ of \cite{key-8}; they are always satisfied in this paper). Then we have \begin{prop} \label{prop:Completion-for-noeth}Let $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{R}-\text{mod})$. Then there is an isomorphism \[ \widehat{\mathcal{M}^{\cdot}}\tilde{=}\widehat{\mathcal{R}}\otimes_{\mathcal{R}}^{L}\mathcal{M}^{\cdot} \] where $\widehat{\mathcal{M}^{\cdot}}$ denotes the derived completion as usual. \end{prop}
\begin{proof} Let $\mathcal{M}$ be a coherent $\mathcal{R}$-module, and let $\widehat{\mathcal{M}}$ denote its $p$-adic completion. By \cite{key-8}, lemma 1.1.6 and the assumption on $\mathcal{B}$, we have ${\displaystyle \widehat{\mathcal{M}}(U)=\lim_{\leftarrow}\mathcal{M}(U)/p^{n}}$ for any $U\in\mathcal{B}$. So, by the noetherian hypothesis, we see that the natural map $\widehat{\mathcal{R}}(U)\otimes_{\mathcal{R}(U)}\mathcal{M}(U)\to\widehat{\mathcal{M}}(U)$ is an isomorphism for all $U\in\mathcal{B}$ . It follows that the map $\widehat{\mathcal{R}}\otimes_{\mathcal{R}}\mathcal{M}\to\widehat{\mathcal{M}}$ is an isomorphism of sheaves; and therefore (as $p$-adic completion is exact on $\mathcal{R}(U)-\text{mod}$) that $\widehat{\mathcal{R}}$ is flat over $\mathcal{R}$.
Now consider an arbitrary $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{R}-\text{mod})$. The above implies \[ \mathcal{H}^{i}(\widehat{\mathcal{R}}\otimes_{\mathcal{R}}^{L}\mathcal{M}^{\cdot})\tilde{=}\widehat{\mathcal{R}}\otimes_{\mathcal{R}}\mathcal{H}^{i}(\mathcal{M}^{\cdot})\tilde{\to}\widehat{\mathcal{H}^{i}(\mathcal{M}^{\cdot})} \] Therefore $\widehat{\mathcal{R}}\otimes_{\mathcal{R}}^{L}\mathcal{M}^{\cdot}\in D_{coh}^{b}(\widehat{\mathcal{R}}-\text{mod})$, which is contained in $D_{cc}(\mathcal{R}-\text{mod})$ by \cite{key-8}, theorem 1.6.1.
Let $\mathcal{C}^{\cdot}$ be the cone of the map $\mathcal{M}^{\cdot}\to\widehat{\mathcal{R}}\otimes_{\mathcal{R}}^{L}\mathcal{M}^{\cdot}$ . Then we have a long exact sequence \[ \mathcal{H}^{i-1}(\mathcal{C}^{\cdot})\to\mathcal{H}^{i}(\mathcal{M}^{\cdot})\to\widehat{\mathcal{H}^{i}(\mathcal{M}^{\cdot})}\to\mathcal{H}^{i}(\mathcal{C}^{\cdot}) \] and since both the kernel and cokernel of $\mathcal{H}^{i}(\mathcal{M}^{\cdot})\to\widehat{\mathcal{H}^{i}(\mathcal{M}^{\cdot})}$ are in $\mathcal{R}[p^{-1}]-\text{mod}$, we conclude that $\mathcal{C}^{\cdot}\in D(\mathcal{R}[p^{-1}]-\text{mod})$. Since $\widehat{\mathcal{R}}\otimes_{\mathcal{R}}^{L}\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{R}-\text{mod})$, the result follows from the fact that $D_{cc}(\mathcal{R}-\text{mod})$ is the quotient of $D(\mathcal{R}-\text{mod})$ by $D(\mathcal{R}[p^{-1}]-\text{mod})$. \end{proof}
\section{\label{sec:The-Algebra}The Algebra $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$}
To define the algebra $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ and prove \thmref{D01}, we are going to apply the basic gauge construction (\exaref{BasicGaugeConstruction}) to Berthelot's differential operators. Let $\mathfrak{X}$ be a smooth formal scheme over $W(k)$, and $X$ its special fibre. If $\mathfrak{X}$ is affine, then we denote $\mathfrak{X}=\text{Specf}(\mathcal{A})$, and $X=\text{Spec}(A)$. \begin{defn} \label{def:D^(0,1)-in-the-lifted-case} We set \[
\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i}:=\{\Phi\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(1)}|p^{i}\Phi\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}\} \] We let $f:\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i}\to\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i+1}$ denote the inclusion, and $v:\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i}\to\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i-1}$ denote the multiplication by $p$. If $\Phi_{1}\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i}$ and $\Phi_{2}\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),j}$, then $\Phi_{1}\cdot\Phi_{2}\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i+j}$, and in this way we give \[ \mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}=\bigoplus_{i\in\mathbb{Z}}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i} \] the structure of a sheaf of graded algebras over $D(W(k))$.
Now suppose $\mathfrak{X}=\text{Specf}(\mathcal{A})$. Then we have ring theoretic analogue of the above: define $\widehat{D}_{\mathcal{A}}^{(0,1),i}:=\{\Phi\in\widehat{D}_{\mathcal{A}}^{(1)}|p^{i}\Phi\in\widehat{D}_{\mathcal{A}}^{(0)}\}$, and as above we obtain the graded ring \[ \widehat{D}_{\mathcal{A}}^{(0,1)}=\bigoplus_{i}\widehat{D}_{\mathcal{A}}^{(0,1),i} \]
over $D(W(k))$.
In this case, we also have the finite-order analogue: define $D_{\mathcal{A}}^{(0,1),i}:=\{\Phi\in D_{\mathcal{A}}^{(1)}|p^{i}\Phi\in D_{\mathcal{A}}^{(0)}\}$, and as above we obtain the graded ring \[ D_{\mathcal{A}}^{(0,1)}=\bigoplus_{i}D_{\mathcal{A}}^{(0,1),i} \]
over $D(W(k))$. \end{defn}
It is easy to see that $\widehat{D}_{\mathcal{A}}^{(0,1),i}:=\Gamma(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i})$ when $\mathfrak{X}=\text{Specf}(\mathcal{A})$.
With the help of local coordinates, this algebra is not too difficult to study. We now suppose $\mathfrak{X}=\text{Specf}(\mathcal{A})$ where $\mathcal{A}$ possesses local coordinates; i.e., there is a collection $\{x_{i}\}_{i=1}^{n}\in\mathcal{A}$ and derivations $\{\partial_{i}\}_{i=1}^{n}$ such that $\partial_{i}(x_{j})=\delta_{ij}$ and such that $\{\partial_{i}\}_{i=1}^{n}$ form a free basis for the $\mathcal{A}$-module of $W(k)$-linear derivations. We let $\partial_{i}^{[p]}:=\partial_{i}^{p}/p!$, this is a differential operator of order $p$ on $\mathcal{A}$. \begin{lem} \label{lem:Basic-structure-of-D_A^(i)} For $i\geq0$ we have that $\widehat{D}_{\mathcal{A}}^{(0,1),i}$ is the left $\widehat{D}_{\mathcal{A}}^{(0)}$-module\footnote{In fact, it is also the right $\widehat{D}_{\mathcal{A}}^{(0)}$-module generated by the same elements, as an identical proof shows } generated by $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$ where ${\displaystyle \sum_{t=1}^{n}j_{t}\leq i}$. For $i\leq0$ we have that $\widehat{D}_{\mathcal{A}}^{(0,1),i}=p^{-i}\cdot\widehat{D}_{\mathcal{A}}^{(0)}$. \end{lem}
\begin{proof} Let $i>0$. Clearly the module described is contained in $\widehat{D}_{\mathcal{A}}^{(0,1),i}$. For the converse, we begin with the analogous finite-order version of the statement. Namely, let $\Phi\in D_{\mathcal{A}}^{(1)}$ be such that $p^{i}\Phi\in D_{\mathcal{A}}^{(0)}$. We can write \[
\Phi=\sum_{I,J}a_{I,J}\partial_{1}^{i_{1}}(\partial_{1}^{[p]})^{j_{1}}\cdots\partial_{n}^{i_{n}}(\partial_{n}^{[p]})^{j_{n}}=\sum_{I,J}a_{I,J}\frac{\partial_{1}^{i_{1}+pj_{1}}\cdots\partial_{n}^{i_{n}+pj_{n}}}{(p!)^{|J|}} \]
where $|J|=j_{1}+\dots+j_{n}$, and the sum is finite. After collecting like terms together, we may suppose that $0\leq i_{j}<p$. In that case, the $a_{I,J}\in\mathcal{A}$ are uniquely determined by $\Phi$, and $\Phi\in D_{\mathcal{A}}^{(0)}$ iff $p^{|J|}|a_{I,J}$ for all
$I,J$. So, if $p^{i}\Phi\in D_{\mathcal{A}}^{(0)}$, we have ${\displaystyle a_{I,J}\frac{p^{i}}{p^{|J|}}\in\mathcal{A}}$. Thus whenever $|J|>i$ we have $a_{I,J}\in p^{|J|-i}\mathcal{A}$. On the other hand \[
p^{|J|-i}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}=u\cdot\partial_{1}^{pj'_{1}}\cdots\partial_{n}^{pj'_{n}}\cdot(\partial_{1}^{[p]})^{j''_{1}}\cdots\partial_{n}^{i_{n}}(\partial_{n}^{[p]})^{j''_{n}} \]
where $u$ is a unit in $\mathbb{Z}_{p}$, ${\displaystyle \sum j'_{i}=|J|-i}$, and ${\displaystyle \sum j''_{i}=i}$ (this follows from the relation
$p!\partial_{j}^{[p]}=\partial_{j}^{p}$). Therefore if $|J|>i$ we have \[
a_{I,J}\frac{\partial_{1}^{i_{1}+pj_{1}}\cdots\partial_{n}^{i_{n}+pj_{n}}}{(p!)^{|J|}}\in D_{\mathcal{A}}^{(0)}\cdot(\partial_{1}^{[p]})^{j''_{1}}\cdots\partial_{n}^{i_{n}}(\partial_{n}^{[p]})^{j''_{n}} \] It follows that $\Phi$ is contained in the $D_{\mathcal{A}}^{(0)}$-submodule generated by $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$
where $j_{1}+\dots+j_{n}\leq i$. So this submodule is exactly $\{\Phi\in D_{\mathcal{A}}^{(1)}|p^{i}\Phi\in D_{\mathcal{A}}^{(0)}\}$.
Now let $\Phi\in\widehat{D}_{\mathcal{A}}^{(0,1),i}$. Then we can write \[ p^{i}\Phi=\sum_{j=0}^{\infty}p^{j}\Phi_{j} \] where $\Phi_{j}\in D_{\mathcal{A}}^{(0)}$. Therefore, if $j\le i$ we have, by the previous paragraph, that $p^{-i}(p^{j}\Phi_{j})$ is contained in the $D_{\mathcal{A}}^{(0)}$ submodule generated by $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$ where $j_{1}+\dots+j_{n}\leq i$. So the result follows from ${\displaystyle \Phi=\sum_{j=0}^{i}p^{j-i}\Phi_{j}+\sum_{j=i+1}^{\infty}p^{j-i}\Phi_{j}}$ as the second term in this sum is contained in $\widehat{\mathcal{D}}_{\mathcal{A}}^{(0)}$. This proves the lemma for $i\geq0$; while for $i\leq0$ it follows immediately from the definition. \end{proof} From this it follows that ${\displaystyle \widehat{D}_{\mathcal{A}}^{(0,1),\infty}:=\lim_{\rightarrow}\widehat{D}_{\mathcal{A}}^{(0,1),i}}$ is the sub-algebra of $\text{End}_{W(k)}(\mathcal{A})$ generated by $\widehat{D}_{\mathcal{A}}^{(0)}$ and $\{\partial_{1}^{[p]},\dots,\partial_{n}^{[p]}\}$. We have \begin{lem} \label{lem:Basic-Structure-of-D^(1)} The algebra $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}$ is a (left and right) noetherian ring, whose $p$-adic completion is isomorphic to $\widehat{D}_{\mathcal{A}}^{(1)}$. Further, we have $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}[p^{-1}]\tilde{=}\widehat{D}_{\mathcal{A}}^{(0)}[p^{-1}]$. \end{lem}
\begin{proof} First, put a filtration on $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}$ by setting $F^{j}(\widehat{D}_{\mathcal{A}}^{(0,1),\infty})$ to be the $\widehat{D}_{\mathcal{A}}^{(0)}$-submodule generated by $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$ where $j_{1}+\dots+j_{n}\leq i$. Then $\text{gr}(\widehat{D}_{\mathcal{A}}^{(0,1),\infty})$ is a quotient of a polynomial ring $\widehat{D}_{\mathcal{A}}^{(0)}[T_{1},\dots T_{n}]$ where $T_{i}$ is sent to the class of $\partial_{i}^{[p]}$ in $\text{gr}_{1}(\widehat{D}_{\mathcal{A}}^{(0,1),\infty})$. To see this, we need to show that the image of $\partial_{i}^{[p]}$ in $\text{gr}(\widehat{D}_{\mathcal{A}}^{(0,1),\infty})$ commutes with $\widehat{D}_{\mathcal{A}}^{(0)}=\text{gr}^{0}(\widehat{D}_{\mathcal{A}}^{(0,1),\infty})$; this follows from the relation \[ [\partial_{i}^{[p]},a]=\sum_{j=1}^{p}\partial_{i}^{[j]}(a)\partial_{i}^{[p-j]}\in\widehat{D}_{\mathcal{A}}^{(0)} \] for any $a\in\mathcal{A}$. So the fact that $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}$ is a (left and right) noetherian ring follows from the Hilbert basis theorem and the fact that $\widehat{D}_{\mathcal{A}}^{(0)}$ is left and right noetherian.
Now we compute the $p$-adic completion of $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}$. Inside $\text{End}_{W(k)}(\mathcal{A})$, we have \[ D_{\mathcal{A}}^{(1)}\subset\widehat{D}_{\mathcal{A}}^{(0,1),\infty}\subset\widehat{D}_{\mathcal{A}}^{(1)} \] and so, for all $n>0$ we have \[ D_{\mathcal{A}}^{(1)}/p^{n}\to\widehat{D}_{\mathcal{A}}^{(0,1),\infty}/p^{n}\to\widehat{D}_{\mathcal{A}}^{(1)}/p^{n} \] and the composition is the identity. Thus $D_{\mathcal{A}}^{(1)}/p^{n}\to\widehat{D}_{\mathcal{A}}^{(0,1),\infty}/p^{n}$ is injective. On the other hand, suppose $\Phi\in\widehat{D}_{\mathcal{A}}^{(0,1),\infty}$. By definition we can write \[ \Phi=\sum_{I}\varphi_{i}\cdot(\partial_{1}^{[p]})^{i_{1}}\cdots(\partial_{n}^{[p]})^{i_{n}} \] where $I=(i_{1},\dots,i_{n})$ is a multi-index, $\varphi_{i}\in\widehat{D}_{\mathcal{A}}^{(0)}$, and the sum is finite. Choose elements $\psi_{i}\in D_{A}^{(0)}$ such that $\psi_{i}-\varphi_{i}\in p^{n}\cdot\widehat{D}_{\mathcal{A}}^{(0)}$ (this is possible since $\widehat{D}_{\mathcal{A}}^{(0)}$ is the $p$-adic completion of $D_{A}^{(0)}$). Then if we set \[ \Phi'=\sum_{I}\psi_{i}\cdot(\partial_{1}^{[p]})^{i_{1}}\cdots(\partial_{n}^{[p]})^{i_{n}}\in D_{\mathcal{A}}^{(1)} \] we see that the class of $\Phi'$ in $D_{\mathcal{A}}^{(1)}/p^{n}$ maps to the class of $\Phi\in\widehat{D}_{\mathcal{A}}^{(0,1),\infty}/p^{n}$. Thus $D_{\mathcal{A}}^{(1)}/p^{n}\to\widehat{D}_{\mathcal{A}}^{(0,1),\infty}/p^{n}$ is onto and therefore an isomorphism, and the completion result follows by taking the inverse limit.
Finally, since each $\partial_{i}^{[p]}=\partial_{i}^{p}/p!$ is contained in $\widehat{D}_{\mathcal{A}}^{(0)}[p^{-1}]$, we must have $\widehat{D}_{\mathcal{A}}^{(0)}\subset D_{\mathcal{A}}^{(0,1),\infty}\subset\widehat{D}_{\mathcal{A}}^{(0)}[p^{-1}]$, so that $\widehat{D}_{\mathcal{A}}^{(0,1),\infty}[p^{-1}]\tilde{=}\widehat{D}_{\mathcal{A}}^{(0)}[p^{-1}]$. \end{proof} \begin{cor} $\widehat{D}_{\mathcal{A}}^{(0,1)}$ is a (left and right) noetherian ring, which is finitely generated as an algebra over $\widehat{D}_{\mathcal{A}}^{(0)}[f,v]$. Therefore the sheaf $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ is a coherent, locally noetherian sheaf of rings which is stalk-wise noetherian. \end{cor}
This follows immediately from the above. Set $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),-\infty}:=\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/(v-1)\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$, while $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}:=\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}/(f-1)$ has $p$-adic completion equal to $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}$.
\subsection{Generators and Relations, Local Coordinates}
In addition to the description above as via endomorphisms of $\mathcal{O}_{\mathfrak{X}}$, it is also useful to have a more concrete (local) description of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ and, especially, $\mathcal{D}_{\mathfrak{X}}^{(0,1)}/p$ . Suppose $\mathfrak{X}=\text{Specf}(\mathcal{A})$ possesses local coordinates as above. We'll start by describing ${\displaystyle \widehat{D}_{\mathcal{A}}^{(0,1),+}:=\bigoplus_{i=0}^{\infty}\widehat{D}_{\mathcal{A}}^{(0,1),i}}$. \begin{defn} Let $M_{\mathcal{A}}$ be the free graded $\mathcal{A}$-module on generators $\{\xi_{i}\}_{i=1}^{n}$ (in degree $0$), and $f$ and $\{\xi_{i}^{[p]}\}_{i=1}^{n}$ (in degree $1$). Let $\mathcal{B}^{(0,1),+}$ be the quotient of the tensor algebra $T_{\mathcal{A}}(M_{\mathcal{A}})$ by the relations $[f,m]$ (for all $m\in M_{\mathcal{A}}$), $[\xi_{i},a]-\partial_{i}(a)$ (for all $i$, and for any $a\in A$), $[\xi_{i},\xi_{j}]$, $[\xi_{i}^{[p]},\xi_{j}^{[p]}]$, $[\xi_{i}^{[p]},\xi_{j}]$ (for all $i,j$), ${\displaystyle [\xi_{i}^{[p]},a]-f\cdot\sum_{r=0}^{p-1}\frac{\partial_{i}^{p-r}}{(p-r)!}(a)\cdot\frac{\xi_{i}^{r}}{r!}}$ (for all $i$, and for any $a\in A$), $f\xi_{i}^{p}-p!\xi_{i}^{[p]}$ for all $i$, and $\xi_{i}^{p}\xi_{j}^{[p]}-\xi_{j}^{p}\xi_{i}^{[p]}$ for all $i$ and $j$.
The algebra $\mathcal{B}^{(0,1),+}$ inherits a grading from $T_{\mathcal{A}}(M_{\mathcal{A}})$. Let $\mathcal{C}^{(0,1),+}$ be the graded ring obtained by $p$-adically completing each component of $\mathcal{B}^{(0,1),+}$. \end{defn}
Then we have \begin{lem} \label{lem:Reduction-is-correct}There is an isomorphism of graded algebras $\mathcal{C}^{(0,1),+}\tilde{\to}\widehat{D}_{\mathcal{A}}^{(0,1),+}$. \end{lem}
\begin{proof} There is an evident map $T_{\mathcal{A}}(M_{\mathcal{A}})\to\widehat{D}_{\mathcal{A}}^{(0,1),+}$ which is the identity on $\mathcal{A}$ and which sends $\xi_{i}\to\partial_{i}$, $f\to f$ and $\xi_{i}^{[p]}\to\partial_{i}^{[p]}$. Clearly this induces a graded map $\mathcal{B}^{(0,1),+}\to\tilde{\to}\widehat{D}_{\mathcal{A}}^{(0,1),+}$. Since each graded component of $\widehat{D}_{\mathcal{A}}^{(0,1),+}$ is $p$-adically complete, we obtain a map $\mathcal{C}^{(0,1),+}\to\tilde{\to}\widehat{D}_{\mathcal{A}}^{(0,1),+}$. Let us show that is is an isomorphism.
We begin with the surjectivity. In degree $0$, we have that $\mathcal{B}^{(0,1),0}$ is generated by $\mathcal{A}$ and $\{\xi_{i}\}_{i=1}^{n}$ and satisfies $[\xi_{i},a]=\partial_{i}(a)$ for all $i$ and all $a\in\mathcal{A}$. Thus the obvious map $\mathcal{B}^{(0,1),0}\to\mathcal{D}_{\mathcal{A}}^{(0)}$ is an isomorphism, and therefore so is the completion $\mathcal{C}^{(0,1),0}\to\widehat{\mathcal{D}}_{\mathcal{A}}^{0}=\widehat{\mathcal{D}}_{\mathcal{A}}^{(0,1),0}$. Further, we saw above (in \lemref{Basic-structure-of-D_A^(i)}) that each $\widehat{\mathcal{D}}_{\mathcal{A}}^{(0,1),i}$ (for $i\geq0$) is generated, as a module over $\widehat{\mathcal{D}}_{\mathcal{A}}^{0}$, by terms of the form $\{f^{i_{0}}\partial_{1}^{[p]i_{1}}\cdots\partial_{n}^{[p]i_{n}}\}$ where $i_{0}+i_{1}\dots i_{n}=i$. By definition, $\mathcal{C}^{(0,1),i}$ is exactly the $\mathcal{C}^{(0,1),0}$-module generated by terms of the form $\{f^{i_{0}}\xi_{1}^{[p]i_{1}}\cdots\xi_{n}^{[p]i_{n}}\}$. Thus we see that the map surjects onto the piece of degree $i$ for all $i$; hence the map is surjective.
To show the injectivity, consider the graded ring ${\displaystyle \mathcal{A}[f]=\bigoplus_{i=0}^{\infty}\mathcal{A}}$. The algebra $\widehat{D}_{\mathcal{A}}^{(0,1),+}$ acts on $\mathcal{A}[f]$ as follows: if $\Phi\in\widehat{D}_{\mathcal{A}}^{(0,1),j}$ then $\Phi\cdot(af^{i})=\Phi(a)f^{i+j}$, where $\Phi(a)$ is the usual action of $\Phi$ on $\mathcal{A}$, coming from the fact that $\Phi\in\widehat{D}_{\mathcal{A}}^{(1)}$. In addition, $\mathcal{C}^{(0,1),+}$ acts on $\mathcal{A}[f]$ via $\xi_{i}(af^{j})=\xi_{i}(a)f^{j}$ and $\xi_{i}^{[p]}(af^{j})=\partial_{i}^{[p]}(a)f^{j+1}$. This action agrees with the composed map \[ \mathcal{C}^{(0,1),+}\to\widehat{D}_{\mathcal{A}}^{(0,1),+}\to\text{End}_{W(k)}(\mathcal{A}[f]) \] where the latter map comes from the action of $\widehat{D}_{\mathcal{A}}^{(0,1),+}$ on $\mathcal{A}[f]$. We will therefore be done if we can show that this composition is injective.
For this, we proceed by induction on the degree $i$. When $i=0$ it follows immediately from the fact that $\mathcal{C}^{(0,1),0}\tilde{=}\widehat{D}_{\mathcal{A}}^{(0)}$. Let $\Phi\in\mathcal{C}^{(0,1),i}$. If $\Phi$ acts as zero on $\mathcal{A}[f]$, we will show that $\Phi\in f\cdot\mathcal{C}^{(0,1),i-1}$; the induction assumption (and the fact that $f$ acts injectively on $\mathcal{A}[f]$) then implies that $\Phi=0$.
Write \[ \Phi=\sum_{J}\Phi_{J}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}}-f^{i}\Psi_{0}-\sum_{s=1}^{i-1}f^{i-s}\sum_{J}\Psi_{sJ}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}} \]
where, in the first sum, each $J$ satisfies ${\displaystyle i=|J|=\sum_{i=1}^{n}j_{i}}$, and in the second sum we have $|J|=i-s$, and $\Phi_{J},\Psi_{0},\Psi_{sJ}\in\mathcal{C}^{(0,1),0}\tilde{=}\widehat{D}_{\mathcal{A}}^{(0)}$. We shall show that every term in the first sum is contained in $f\cdot\mathcal{C}^{(0,1),i-1}$.
Expanding in terms of monomials in the $\{\xi_{i}\}$, denoted $\{\xi^{I}\}$, we obtain an equation \[ \Phi=\sum_{I,J}a_{J,I}\xi^{I}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}}-f^{i}\sum_{I}b_{0,I}\xi^{I}-\sum_{s=1}^{i-1}f^{i-s}\sum_{I,J}b_{s,I,J}\xi^{I}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}} \] where $a_{J,I}\to0$, $b_{0,I}\to0$, and $b_{s,I,J}\to0$ (in the
$p$-adic topology on $\mathcal{A}$) as $|I|\to\infty$. For any multi-index $J=(j_{1},\dots,j_{n})$ let $pJ=(pj_{1},\dots,pj_{n})$. The relations $\xi_{i}^{p}\xi_{j}^{[p]}=\xi_{j}^{p}\xi_{i}^{[p]}$ (for all $i,j$ ) in $\mathcal{C}^{(0,1),+}$ ensure that $\xi^{I}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}}=\xi^{I'}(\xi_{1}^{[p]})^{j'_{1}}\cdots(\xi_{n}^{[p]})^{j'_{n}}$
whenever $I+pJ=I'+pJ'$ and $|J|=|J'|$. Since, in the sum ${\displaystyle \sum_{I,J}a_{J,I}\xi^{I}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}}}$, we have $|J|=i$ for all $J$, we may collect terms together and assume that each multi-index $I+pJ$ is represented only once.
Now, the fact that $\Phi$ acts as zero on $\mathcal{A}[f]$ implies that the differential operators ${\displaystyle \sum_{I,J}a_{J,I}\partial^{I}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}}$ and ${\displaystyle \sum_{I}b_{0,I}\partial^{I}+\sum_{s=1}^{i-1}\sum_{I,J}b_{s,I,J}\partial^{I}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}}$ act as the same endomorphism on $\mathcal{A}$. Therefore, for each $a_{J,I}$ which is nonzero, we have \[ a_{J,I}\partial^{I}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}=\sum_{I'=I+pJ}b_{0,I'}\partial^{I'}+\sum_{s=1}^{i-1}\sum_{I'+pJ'=I+pJ}b_{s,I',J'}\partial^{I'}(\partial_{1}^{[p]})^{j'_{1}}\cdots(\partial_{n}^{[p]})^{j'_{n}} \] Now, after inverting $p$, and using $\partial_{i}^{[p]}=\partial_{i}^{p}/p!$ inside $\widehat{\mathcal{D}}_{\mathcal{A}}^{(1)}[p^{-1}]$, we obtain the equation \[ \frac{a_{J,I}}{(p!)^{i}}=b_{0,I'}+\sum_{s=1}^{i-1}\sum_{I',J'}\frac{b_{s,I',J'}}{(p!)^{s}} \] which implies $a_{J,I}\in p\cdot\mathcal{A}$. But we have the relation $f\xi_{i}^{p}-p!\xi_{i}^{[p]}$ in $\mathcal{C}^{(0,1),+}$; i.e., $p\xi_{i}^{[p]}\in f\cdot\mathcal{C}^{(0,1),+}$. Therefore $a_{J,I}\in p\cdot\mathcal{A}$ implies $a_{J,I}\xi^{I}(\xi_{1}^{[p]})^{j_{1}}\cdots(\xi_{n}^{[p]})^{j_{n}}\in f\cdot\mathcal{C}^{(0,1),i-1}$. Since this holds for all $I,J$, we see that in fact $\Phi\in f\cdot\mathcal{C}^{(0,1),i-1}$ as desired. \end{proof} \begin{rem} Given the isomorphism of the theorem, from now on, we shall denote $\xi_{i}$ by $\partial_{i}$ and $\xi_{i}^{[p]}$ by $\partial_{i}^{[p]}$ inside $\mathcal{C}^{(0,1),+}$. \end{rem}
Next, we have \begin{lem} \label{lem:linear-independance-over-D_0-bar} Suppose that $\{\Phi_{sJ}\}$ are elements of $\widehat{D}_{\mathcal{A}}^{(0)}$, and suppose that, for some $i\geq1$, we have \[
\sum_{s=0}^{i-1}\sum_{|J|=s}f^{i-s}\Phi_{sJ}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\in p\cdot\widehat{D}_{\mathcal{A}}^{(0,1),i} \] in $\widehat{D}_{\mathcal{A}}^{(0,1),+}$. Then each $\Phi_{sJ}$ is contained in the right ideal generated by $\{\partial_{1}^{p},\dots,\partial_{n}^{p}\}$ and $p$. \end{lem}
\begin{proof} As in the previous proof we may expand the $\Phi_{0}$ and $\Phi_{s,J}$ in terms of the $\{\partial^{I}\}$ to obtain \begin{equation}
\Phi=\sum_{s=0}^{i-1}f^{i-s}\sum_{I,J,|J|=s}b_{s,I,J}\partial^{I}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\label{eq:first-form-for-phi} \end{equation}
where $b_{s,I,J}\to0$ as $|I|\to\infty$. Om the other hand, since $\Phi\in p\cdot\widehat{D}_{\mathcal{A}}^{(0,1),i}$, and $\widehat{D}_{\mathcal{A}}^{(0,1),i}$
is generated over $\widehat{D}_{\mathcal{A}}^{(0)}$ by $\{f^{i-s}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{0\leq s\leq i,|J|=s}$, we also obtain \begin{equation}
\Phi=\sum_{s=0}^{i}f^{i-s}\sum_{I,J,|J|=s}a_{s,I,J}\partial^{I}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\label{eq:second-form-for-phi} \end{equation}
where each $a_{0I}$ and $a_{s,I,J}$ are contained in $p\cdot\widehat{D}_{\mathcal{A}}^{(0)}$, and $a_{0I}\to0$, $a_{s,I,J}\to0$ as $|I|\to\infty$.
For a multi-index $K$, let $\tilde{K}$ denote the multi-index $(\tilde{k}_{1},\dots,\tilde{k}_{n})$ such that $\tilde{k}_{i}\leq k_{i}$ for all $i$ and such that $\tilde{k}{}_{i}$ is the greatest multiple of $p$ less than or equal $k_{i}$. Write $\tilde{K}=p\tilde{J}$, and $K=\tilde{I}+p\tilde{J}$. Then if $K=I'+pJ'$ for some $I'\neq\tilde{I}$ and $J'\neq\tilde{J}$, we must have $j_{m}<\tilde{j}_{m}$ for some $m$; which implies that $\partial^{I'}$ is contained in the right ideal generated by $\partial_{m}^{p}$. Since $f\cdot\partial_{i}^{p}=p!\partial_{i}^{[p]}$, we obtain \[ f^{i-s}b_{s,I',J'}\partial^{I'}(\partial_{1}^{[p]})^{j'_{1}}\cdots(\partial_{n}^{[p]})^{j'_{n}}=f^{i-s-1}b_{s-1,I'',J''}\partial^{I''}(\partial_{1}^{[p]})^{j''_{1}}\cdots(\partial_{n}^{[p]})^{j''_{n}} \] where $I''+pJ''=I'+pJ'=K$, with $j''_{i}=j_{i}'+1$, and $b_{s-1,I'',J''}\in p\cdot\mathcal{A}$. Therefore each such term is in the right ideal generated by $\{\partial_{i}^{p}\}$ and is contained in $p\cdot\widehat{D}_{\mathcal{A}}^{(0,1),i}$, and so we may subtract each of these terms from $\Phi$ without affecting the statement.
Thus we may assume that each nonzero $b_{s,I,J}$ in \eqref{first-form-for-phi} is of the form $\tilde{I}+p\tilde{J}$ as above, and so there is only one nonzero $b_{s,I,J}$ for each multi-index $K$.
Now, comparing the actions of each of the expressions \eqref{first-form-for-phi} and \eqref{second-form-for-phi} on $\mathcal{A}[f]$, we obtain, for each multi-index $K$, the equality \[ b_{s\tilde{,I},\tilde{J}}=\sum_{I+pJ=K}\sum_{s}a_{s,I,J} \] and since each $a_{s,I,J}\in p\cdot\mathcal{A}$, we see $b_{s\tilde{,I},\tilde{J}}\in p\cdot\mathcal{A}$. Since this is true for all $b_{s\tilde{,I},\tilde{J}}$ the result follows. \end{proof} Using these results, we can give a description of $\widehat{D}_{\mathcal{A}}^{(0,1),+}/p:=D_{A}^{(0,1),+}.$ Let $I$ be the two-sided ideal of $D_{A}^{(0)}:=\widehat{D}_{\mathcal{A}}^{(0)}/p$ generated by $\mathcal{Z}(D_{A}^{(0)})^{+}$, the positive degree elements of the center\footnote{The center of $D_{A}^{(0)}$ is a graded algebra via the isomorphism $\mathcal{Z}(D_{A}^{(0)})\tilde{=}A^{(1)}[\partial_{1}^{p},\dots,\partial_{n}^{p}]$, the degree of each $\partial_{i}^{p}$ is $1$}, and let $\overline{D_{A}^{(0)}}=D_{A}^{(0)}/I$. \begin{thm} \label{thm:Local-Coords-for-D+} Let ${\displaystyle D_{A}^{(0,1),+}=\bigoplus_{i=0}^{\infty}D_{A}^{(0,1),i}}$ be the decomposition according to grading. Then each $D_{A}^{(0,1),i}$ is a module over $D_{A}^{(0)}=D_{A}^{(0,1),0}$, and \[
D_{A}^{(0,1),i}=f\cdot D_{A}^{(0,1),i-1}\oplus\sum_{|J|=i}D_{A}^{(0)}\cdot(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}} \] as $D_{A}^{(0)}$-modules. Further, $f\cdot D_{A}^{(0,1),i-1}$ is free over $\overline{D_{A}^{(0)}}$, and the module\linebreak{}
${\displaystyle \sum_{|J|=i}D_{A}^{(0)}\cdot(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}}$ is isomorphic, as a $D_{A}^{(0)}$-module, to $I^{i}$, via the map which sends $(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}$ to $\partial_{1}^{p}{}^{j_{1}}\cdots\partial_{n}^{p}{}^{j_{n}}$. In particular, on each $D_{A}^{(0,1),i}$ we have $\text{ker}(f)=\text{im}(v)$ and $\text{im}(f)=\text{ker}(v)$. \end{thm}
\begin{proof} Let $i\geq1$. By definition $D_{A}^{(0,1),i}$ is generated, over $D_{A}^{(0)}$ terms of the form \linebreak{}
$\{f^{i-s}\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{|J|=s}$; and so it is also generated by $f\cdot D_{A}^{(0,1),i-1}$ and $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{|J|=i}$ . Suppose we have an equality of the form \[ \sum_{J=i}\bar{\Phi}_{J}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}=\sum_{s=0}^{i-1}f^{i-s}\sum_{J}\bar{\Psi}_{sJ}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}} \] in $D_{A}^{(0,1),i}$ (here, $\bar{\Phi}_{J},\bar{\Psi}_{sJ}$ are in $D_{A}^{(0)}$). Choosing lifts to $\Phi_{J},\Psi_{J}\in\widehat{D}_{\mathcal{A}}^{(0)}$ yields \[ \sum_{J=i}\Phi_{J}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}-\sum_{s=0}^{i-1}f^{i-s}\sum_{J}\Psi_{sJ}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\in p\cdot D_{\mathcal{A}}^{(0,1),i}\subset f\cdot D_{\mathcal{A}}^{(0,1),i-1} \] (the last inclusion follows from $(p!)\partial_{i}^{[p]}=f\partial_{i}^{p}$); and so (the proof of) \lemref{Reduction-is-correct} now forces $\Phi_{J}\in p\cdot\widehat{D}_{\mathcal{A}}^{(0,1),i}$ for all $J$ so $\bar{\Phi}_{J}=0$ as desired. The isomorphism of
${\displaystyle \sum_{|J|=i}D_{A}^{(0)}\cdot(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}}$ with $I^{i}$ is given by the reduction of the morphism $p^{i}\cdot$ on $D_{\mathcal{A}}^{(0,1),i}$, and \lemref{linear-independance-over-D_0-bar}
yields that $f\cdot D_{A}^{(0,1),i-1}$ is free over $\overline{D}_{A}^{(0)}$; a basis is given by $\{f^{i-|J|}(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{0\leq|J|\leq i}$. The last statement follows directly from this description. \end{proof} We now use this to describe the entire graded algebra $D_{A}^{(0,1)}:=D_{\mathcal{A}}^{(0,1)}/p$. \begin{cor} \label{cor:Local-coords-over-A=00005Bf,v=00005D} The algebra $D_{A}^{(0,1)}$ is a free graded module over $D(A)$, with a basis given by the set $\{\partial^{I}(\partial^{[p]})^{J}\}$, where $I=(i_{1},\dots,i_{n})$ is a multi-index with $0\leq i_{j}\leq p-1$ for all $j$ and $J$ is any multi-index with entries $\geq0$. \end{cor}
\begin{proof} By the previous corollary, any element of $D_{A}^{(0,1),+}$ can be written as a finite sum \[ \sum_{I,J}a_{I,J}\partial^{I}(\partial^{[p]})^{J} \] where $a_{I,J}\in A[f]$ and $I$ and $J$ are arbitrary multi-indices. As any element in $D_{A}^{(0,1),-}$ is a sum of the form \[ \sum_{i=1}^{m}v^{i}\sum_{J}b_{i,J}(\partial)^{J} \] we see that in fact any element of $D_{A}^{(0,1)}$ can be written as a finite sum \[ \sum_{I,J}a_{I,J}\partial^{I}(\partial^{[p]})^{J} \] where $a_{I,J}\in A[f,v]$ and $I$ and $J$ are arbitrary multi-indices. Iteratively using the relations $(p-1)!\partial_{i}^{p}=v\partial_{i}^{[p]}$ we see that we may suppose that each entry of $I$ is contained in $\{0,\dots,p-1\}$; this shows that these elements span.
To see the linear independence, suppose we have \begin{equation} \sum_{I,J}a_{I,J}\partial^{I}(\partial^{[p]})^{J}=0\label{eq:lin-dep} \end{equation} where now each entry of $I$ is contained in $\{0,\dots,p-1\}$. Write \[ a_{I,J}=\sum_{s\geq0}f^{s}a_{I,J,s}+\sum_{t<0}v^{t}a_{I,J,t} \] We have \[
a_{I,J}\partial^{I}(\partial^{[p]})^{J}=\sum_{s\geq0}f^{s}a_{I,J,s}\partial^{I}(\partial^{[p]})^{J}+\sum_{J',t}a_{I,J,t}\partial^{I+pJ'}(\partial^{[p]})^{J''}+\sum_{t<0}v^{-t-|J|}a_{I,J,t}\partial^{I+pJ} \]
where, in the middle sum, $t$ satisfies $0<-t\leq|J|$; for each such $t$ we pick $J'$ such that $J'+J''=J$ and $|J'|=-t$. Now, the previous corollary gives an isomorphism \[ D_{A}^{(0,1),i}\tilde{=}D_{A}^{(0)}/I^{i}\oplus I^{i} \] where $I=C^{1}(D_{A}^{(0)})$, for all $i\geq0$; this in fact holds for all $i\in\mathbb{Z}$ if we interpret $I^{i}=D_{A}^{(0)}$ for
$i<0$. This implies that the elements $\{f^{s}\partial^{I}(\partial^{[p]})^{J},\partial^{I+pJ'}(\partial^{[p]})^{J''},v^{-t-|J|}\partial^{I+pJ}\}$ where $I,J$ are multi-indices with each entry of $I$ is contained in $\{0,\dots,p-1\}$, are linearly independent over $A$ (one may look at each degree separately and use the above description). Thus \eqref{lin-dep} implies $a_{I,J,s}=0=a_{I,J,t}$ for all $I,J,s,t$; hence each $a_{I,J}=0$ as desired. \end{proof} Finally, let us apply this result to describe the finite order operators $D_{\mathcal{A}}^{(0,1)}$. Namely, we have \begin{cor} \label{cor:Each-D^(i)-is-free}The algebra $D_{\mathcal{A}}^{(0,1)}$ is free over $D(\mathcal{A})$, with a basis given by the set $\{\partial^{I}(\partial^{[p]})^{J}\}$, where $I=(i_{1},\dots,i_{n})$ is a multi-index with $0\leq i_{j}\leq p-1$ for all $j$ and $J$ is any multi-index with entries $\geq0$. \end{cor}
\begin{proof} By the previous result, the images of these elements in $D_{A}^{(0,1)}=D_{\mathcal{A}}^{(0,1)}/p$ form a basis over $D(\mathcal{A})$. Since $D_{\mathcal{A}}^{(0,1)}$ is $p$-torsion-free, and $D(\mathcal{A})$ is $p$-adically separated, it follows directly that these elements are linearly independent over $D(\mathcal{A})$. The fact that they span follows (as in the previous proof) from \lemref{Basic-structure-of-D_A^(i)}. \end{proof}
\subsection{$\mathcal{D}^{(0,1)}$-modules over $k$}
Now let $X$ be an arbitrary smooth variety over $k$; in this subsection we make no assumption that there is a lift of $X$; however, if $U\subset X$ is an affine, there is a always a lift of $U$ to as smooth formal scheme $\mathfrak{U}$. In this section we will construct a sheaf of algebras $\mathcal{D}_{X}^{(0,1)}$ such that, on each open affine $U$ which possesses local coordinates, we have $\mathcal{D}_{X}^{(0,1)}(U)=\widehat{\mathcal{D}}_{\mathfrak{U}}^{(0,1)}(\mathfrak{U})/p$.
There is a natural action of $\mathcal{D}_{X}^{(0)}$ on $\mathcal{O}_{X}$; inducing a map $\mathcal{D}_{X}^{(0)}\to\mathcal{E}nd_{k}(\mathcal{O}_{X})$, and we let $\overline{\mathcal{D}_{X}^{(0)}}\subset\mathcal{E}nd_{k}(\mathcal{O}_{X})$ denote the image of $\mathcal{D}_{X}^{(0)}$ under this map. It is a quotient algebra of $\mathcal{D}_{X}^{(0)}$, and a quick local calculation gives \begin{lem} \label{lem:Basic-description-of-D-bar} Let $U\subset X$ be an open subset, which possesses local coordinates $\{x_{1},\dots,x_{n}\}$, and let $\{\partial_{1},\dots,\partial_{n}\}$ denote derivations satisfying $\partial_{i}(x_{j})=\delta_{ij}$. Then the kernel of the map $\mathcal{D}_{X}^{(0)}(U)\to\mathcal{E}nd_{k}(\mathcal{O}_{X}(U))$ is the two sided ideal $\mathcal{I}$ generated by $\{\partial_{1}^{p},\dots,\partial_{n}^{p}\}$. The image consists of differential operators of the form ${\displaystyle \sum a_{I}\partial^{I}}$ where the sum ranges over multi-indices $I=(i_{1},\dots,i_{n})$ for which $0\leq i_{j}<p$ (for all $j$), the $a_{I}\in\mathcal{O}_{X}(U)$, and $\partial^{I}=\partial_{1}^{i_{1}}\cdots\partial_{n}^{i_{n}}$. \end{lem}
In particular, if $U=\text{Spec}(A)$ then we have $\overline{\mathcal{D}_{X}^{(0)}}(U)=\overline{D_{A}^{(0)}}$ as defined in the previous section.
Now let $\mathcal{D}iff_{X}^{\leq n}$ denote the sheaf of differential operators of order $\leq n$ on $X$. This is a sub-sheaf of $\mathcal{E}nd_{k}(\mathcal{O}_{X})$. \begin{defn} \label{def:L}1) Let $\tilde{\mathcal{D}iff}_{X}^{\leq p}$ denote the sub-sheaf of $\mathcal{D}iff_{X}^{\leq p}$ defined by the following condition: a local section $\delta$ of $\mathcal{D}iff_{X}^{\leq p}$ is contained in $\tilde{\mathcal{D}iff}_{X}^{\leq p}$ if, for any local section $\Phi\in\overline{\mathcal{D}_{X}^{(0)}}$, we have $[\delta,\Phi]\in\overline{\mathcal{D}_{X}^{(0)}}$ (Here, the bracket is the natural Lie bracket on $\mathcal{E}nd_{k}(\mathcal{O}_{X})$ coming from the algebra structure).
2) We define the sub-sheaf $\mathfrak{l}_{X}\subset\mathcal{D}iff_{X}$ to be $\tilde{\mathcal{D}iff}_{X}^{\leq p}+\overline{\mathcal{D}_{X}^{(0)}}$. \end{defn}
The sections in $\mathfrak{l}_{X}$ can easily be identified in local coordinates. Suppose $U=\text{Spec}(A)$ possess local coordinates $\{x_{1},\dots,x_{n}\}$, and coordinate derivations $\{\partial_{1},\dots,\partial_{n}\}$. \begin{prop} \label{lem:O^p-action} Let $U\subset X$ be an open subset as above. Then we have \[ \mathfrak{l}_{X}(U)=\bigoplus_{i=1}^{n}\mathcal{O}_{U}^{p}\cdot\partial_{i}^{[p]}\oplus\overline{\mathcal{D}_{X}^{(0)}}(U) \] In particular, $\mathfrak{l}_{X}$ is a sheaf of $\mathcal{O}_{X}^{p}$-modules (via the left action of $\mathcal{O}_{X}^{p}$ on $\mathcal{E}nd_{k}(\mathcal{O}_{X})$). \end{prop}
\begin{proof} First, let's show that the displayed sum is contained in $\mathfrak{l}_{X}(U)$. By definition $\overline{\mathcal{D}_{X}^{(0)}}(U)\subset\mathfrak{l}_{X}(U)$. Let $\Phi\in\overline{\mathcal{D}_{X}^{(0)}}(U)$, and write ${\displaystyle \Phi=\sum_{I}a_{I}\partial^{I}}$ as in \lemref{Basic-description-of-D-bar}. Then, for any $g\in O_{X}(U)$, we have \[ [g^{p}\partial_{i}^{[p]},\sum_{I}a_{I}\partial^{I}]=\sum_{I}[g^{p}\partial_{i}^{[p]},a_{I}\partial^{I}]=\sum_{I}[g^{p}\partial_{i}^{[p]},a_{I}]\partial^{I}+\sum_{I}a_{I}[g^{p}\partial_{i}^{[p]},\partial^{I}] \] Now, ${\displaystyle [g^{p}\partial_{i}^{[p]},a_{I}]\partial^{I}=g^{p}[\partial_{i}^{[p]},a_{I}]\partial^{I}=g^{p}\sum_{r=0}^{p-1}\partial_{i}^{[p-r]}(a_{I})\partial_{i}^{[r]}\cdot\partial^{I}\in\overline{\mathcal{D}_{X}^{(0)}}(U)}$. Further, $a_{I}[g^{p}\partial_{i}^{[p]},\partial^{I}]=a_{I}g^{p}[\partial_{i}^{[p]},\partial^{I}]+a_{I}[g^{p},\partial^{I}]\partial_{i}^{[p]}=0$. Thus we see that each $g^{p}\partial_{i}^{[p]}\in\mathfrak{l}_{X}(U)$, and the right hand side is contained in the left.
For the converse, let $\Phi\in\mathcal{D}iff_{X}^{\leq p}(U)$. It may be uniquely written as \[ \Phi=\sum_{i=1}^{n}a_{i}\partial_{i}^{[p]}+\sum_{I}a_{I}\partial^{I} \] where $a_{i}$ and $a_{I}$ are in $\mathcal{O}_{X}(U)$, and the second sum ranges over multi-indices $I=(i_{1},\dots,i_{n})$ with each $i_{j}<p$ and so that $i_{1}+\dots+i_{n}\leq p$. For any coordinate derivation $\partial_{j}$, we have \[ [\Phi,\partial_{j}]=-(\sum_{i=1}^{n}\partial_{j}(a_{i})\partial_{i}^{[p]}+\sum_{I}\partial_{j}(a_{I})\partial^{I}) \] For this to be contained in $\overline{\mathcal{D}_{X}^{(0)}}(U)$, we must have $\partial_{j}(a_{i})=0$ for all $i$. Therefore, if $[\Phi,\partial_{j}]\in\overline{\mathcal{D}_{X}^{(0)}}(U)$ for all $j$, we must have $\partial_{j}(a_{i})=0$ for all $j$ (and all $i$), which means that each $a_{i}\in\mathcal{O}_{X}(U)^{p}$. Therefore, if $\Phi\in\tilde{\mathcal{D}iff}_{X}^{\leq p}(U)$, then $\Phi$ must be contained in ${\displaystyle \bigoplus_{i=1}^{n}\mathcal{O}_{U}^{p}\cdot\partial_{i}^{[p]}\oplus\overline{\mathcal{D}_{X}^{(0)}}(U)}$, and the result follows. \end{proof} \begin{cor} $\mathfrak{l}_{X}$ is a sheaf of Lie subalgebras of $\mathcal{E}nd_{k}(\mathcal{O}_{X})$. \end{cor}
\begin{proof} As the question is local, it suffices to prove that $\mathfrak{l}_{X}(U)$ is closed under the bracket for a neighborhood $U$ which possesses local coordinates. We use the description of the previous lemma. So we must show that all brackets of the form \[ [g^{p}\partial_{i}^{[p]},h^{p}\partial_{j}^{[p]}] \] and \[ [g^{p}\partial_{i}^{[p]},\sum_{I}a_{I}\partial^{I}] \] are contained in $\mathfrak{l}_{X}(U)$. Here the notation is as above; so $g,h\in\mathcal{O}_{X}(U)$, and $I=(i_{1},\dots,i_{n})$ is a multi-index with each $i_{j}<p$. In fact, we already showed that ${\displaystyle [g^{p}\partial_{i}^{[p]},\sum_{I}a_{I}\partial^{I}]\in\overline{\mathcal{D}_{X}^{(0)}}(U)}$ in the course of the proof of the previous lemma. So we are left to analyze the first bracket. Now, \[ [g^{p}\partial_{i}^{[p]},h^{p}\partial_{j}^{[p]}]=h^{p}[g^{p}\partial_{i}^{[p]},\partial_{j}^{[p]}]+[g^{p}\partial_{i}^{[p]},h^{p}]\partial_{j}^{[p]} \] \[ =h^{p}[g^{p},\partial_{j}^{[p]}]\partial_{i}^{[p]}+g^{p}[\partial_{i}^{[p]},h^{p}]\partial_{j}^{[p]} \] and we have \[ [\partial_{i}^{[p]},h^{p}]=\sum_{r=0}^{p-1}\partial_{i}^{[p-r]}(h^{p})\partial_{i}^{[r]}=\partial_{i}^{[p]}(h^{p}) \] and similarly, $[g^{p},\partial_{j}^{[p]}]=-\partial_{j}^{[p]}(g^{p})$. It is a well-known fact that $\partial_{i}^{[p]}(h^{p})=(\partial_{i}(h))^{p}$ (for the sake of completeness, we include a proof directly below). It follows immediately that $[g^{p}\partial_{i}^{[p]},h^{p}\partial_{j}^{[p]}]\in\mathfrak{l}_{X}(U)$, and the corollary follows.
To prove that $\partial_{i}^{[p]}(h^{p})=(\partial_{i}(h))^{p}$, recall the following formula for Hasse-Schmidt derivations acting on powers: \[ \partial_{i}^{[j]}(h^{m})=\sum_{i_{1}+\dots+i_{m}=j}\partial_{i}^{[i_{1}]}(h)\cdots\partial_{i}^{[i_{m}]}(h) \] which is easily checked by induction. Put $m=j=p$ in the formula. The set \[
\{(i_{1},\dots,i_{p})\in\mathbb{Z}_{\geq0}^{p}|i_{1}+\dots+i_{p}=p\} \] is acted upon by the symmetric group $S_{p}$, and, after grouping like terms together, we see that each term $\partial_{i}^{[i_{1}]}(h)\cdots\partial_{i}^{[i_{m}]}(h)$ in the sum is repeated $N$ times, where $N$ is the size of the $S_{p}$ orbit of $(i_{1},\dots,i_{p})$. There is a unique orbit of size $1$, namely $i_{1}=i_{2}=\cdots=i_{p}=1$; and for every other orbit, the size is a number of the form ${\displaystyle \frac{p!}{c_{1}!\cdots c_{r}!}}$ for some numbers $c_{i}<p$ such that $\sum c_{i}=p$ . Any such is divisible by $p$, and so all these terms are zero in the sum since we are in characteristic $p$. Thus we obtain \[ \partial_{i}^{[p]}(h^{p})=\partial_{i}^{[1]}(h)\cdots\partial_{i}^{[1]}(h)=(\partial_{i}(h))^{p} \] as claimed. \end{proof} Now we will build the ring $\mathcal{D}_{X}^{(0,1)}$ out of $\mathcal{D}_{X}^{(0)}$ and $\mathfrak{l}_{X}$, in a manner quite analogous to the way in which $\mathcal{D}_{X}^{(0)}$ is built out of $\mathcal{O}_{X}$ and $\mathcal{T}_{X}$ as an enveloping algebra of a Lie algebroid; in the classical case, this construction is given in \cite{key-44} (for schemes) and \cite{key-46} (for rings). Our construction is similar in spirit to these works (c.f. also \cite{key-45}). \begin{defn} \label{def:D-=00005Cplus-L}Let $f:\mathcal{D}_{X}^{(0)}\to\mathfrak{l}_{X}$ denote the map $\mathcal{D}_{X}^{(0)}\to\overline{\mathcal{D}_{X}^{(0)}}\subset\mathfrak{l}_{X}$. Define the sheaf \[ \mathfrak{L}_{X}:=\mathcal{D}_{X}^{(0)}\oplus\bigoplus_{i=1}^{\infty}\mathfrak{l}_{X}=\bigoplus_{i=0}^{\infty}\mathfrak{L}_{X}^{i} \] and make it into a graded $k[f]$-module by letting $f:\mathfrak{l}_{X}\to\mathfrak{l}_{X}$ be the identity in degrees $\geq1$; thus any homogenous element in degree $i\geq1$ can be uniquely written $f^{i-1}\Psi$ for some $\Psi\in\mathfrak{l}_{X}$.
For local sections $\Phi\in\mathcal{D}_{X}^{(0)}$ and $f^{i-1}\Psi\in\mathfrak{L}_{X}^{i}$, define $[\Phi,f^{i-1}\Psi]:=f^{i-1}[f\circ\Phi,\Psi]$ where on the right we have the bracket in $\mathfrak{l}_{X}$. We then make $\mathfrak{L}_{X}$ into a sheaf of graded Lie algebras by setting $[f^{i-1}\Psi_{1},f^{j-1}\Psi_{2}]=f^{i+j-1}[\Psi_{1},\Psi_{2}]$ where $\{\Psi_{1},\Psi_{2}\}$ are local sections of $\mathfrak{l}_{X}$. The Jacobi identity can be verified by a direct computation. \end{defn}
Next we introduce the action of $v$: \begin{lem} \label{lem:Construction-of-v-1}There is a unique endomorphism $v$ of $\mathfrak{L}_{X}$ satisfying $v(\mathcal{D}_{X}^{(0)})=0$ and, upon restriction to an open affine $U$ which possesses local coordinates, $v(\partial_{i}^{[p]})=(p-1)!\partial_{i}^{p}$ for coordinate derivations $\{\partial_{i}\}_{i=1}^{n}$. This endomorphism vanishes on $f(\mathcal{D}_{X}^{(0)})$, and on ${\displaystyle \bigoplus_{i=2}^{\infty}\mathfrak{L}_{X}^{i}}$. \end{lem}
\begin{proof} Since $v(\mathcal{D}_{X}^{(0)})=0$ it suffices to define $v$ on $\mathfrak{l}_{X}$. For any $\Phi\in\mathfrak{l}_{X}$, the action of $\Phi$ preserves $\mathcal{O}_{X}^{p}$, and the restriction of $\Phi$ to $\mathcal{O}_{X}^{p}\tilde{=}\mathcal{O}_{X^{(1)}}$ is a derivation on $\mathcal{O}_{X}^{p}$ (this follows immediately from \lemref{O^p-action} and the fact that $\partial_{i}^{[p]}(g^{p})=(\partial_{i}(g))^{p}$). Further this derivation is trivial iff $\Phi\in f(\mathcal{D}_{X}^{(0)})\subset\mathfrak{l}_{X}$.
On the other hand, since $k$ is perfect there is a natural isomorphism between the sheaf of derivations on $\mathcal{O}_{X^{(1)}}$ and the sheaf of derivations on $\mathcal{O}_{X}$, given as follows: if $\partial'$ is a (local) derivation on $\mathcal{O}_{X^{(1)}}$, then we can define a derivation of $\mathcal{O}_{X}$ by $\partial(g)=(\partial'(g^{p}))^{1/p}$; this is possible precisely by the identification $\mathcal{O}_{X}^{p}\tilde{=}\mathcal{O}_{X^{(1)}}$. This association is easily checked to be an isomorphism using local coordinates; let's name it $\tau:\text{Der}(\mathcal{O}_{X^{(1)}})\to\text{Der}(\mathcal{O}_{X})$.
Further, there is a map $\sigma:\text{Der}(\mathcal{O}_{X})\to\mathcal{Z}(\mathcal{D}_{X}^{(0)})$ defined by $\partial\to\partial^{p}-\partial^{[p]}$, where $\partial^{[p]}$ is the $p$th iterate of the derivation (c.f. \cite{key-3}, chapter 1). In particular this map takes $\partial_{i}\to\partial_{i}^{p}$ if $\partial_{i}$ is a coordinate derivation as above.
Now we define $v(\Phi)=(p-1)!\cdot\sigma\circ\tau(\Phi|_{\mathcal{O}_{X^{(1)}}})$; by the above discussion this satisfies all the properties of the lemma. \end{proof} Now we proceed to the definition of $\mathcal{D}_{X}^{(0,1),+}$. By the functoriality of the enveloping algebra construction, we can now form the pre-sheaf of enveloping algebras $\mathcal{U}(\mathfrak{L}_{X})$; this is a pre-sheaf of graded algebras with the grading inherited from $\mathfrak{L}_{X}$. Inside this pre-sheaf is the pre-sheaf $\mathcal{U}^{+}(\mathfrak{L}_{X})$, which is the pre-sheaf of non-unital algebras generated by $\mathfrak{L}_{X}\subset\mathcal{U}(\mathfrak{L}_{X})$.
For any local section $\Phi\in\mathcal{D}_{X}^{(0)}$, let $\Phi'$ denote its image in $\mathcal{U}^{+}(\mathfrak{L}_{X})$, by regarding $\Phi\in\mathcal{D}_{X}^{(0)}\subset\mathfrak{L}_{X}$; similarly, for a local sections $\Psi\in\mathfrak{L}_{X}$, let $\Psi'\in\mathfrak{L}_{X}\subset\mathcal{U}^{+}(\mathfrak{L}_{X})$ denote its image. \begin{defn} Let $\mathcal{J}$ be the pre-sheaf of homogenous two-sided ideals in $\mathcal{U}^{+}(\mathfrak{L}_{X})$ generated by the following sections: for any local sections $\Phi_{1},\Phi_{2}\in\mathcal{D}_{X}^{(0)}$: $(\Phi_{1}\cdot\Phi_{2})'-\Phi_{1}'\cdot\Phi_{2}'$ , $f\cdot\Phi_{1}'-f(\Phi_{1})'$, $\Phi'_{1}\cdot f(\Phi'_{2})-f(\Phi_{1}')\cdot\Phi'_{2}$, $\Phi'_{1}\cdot f(\Phi'_{2})-f\cdot(\Phi_{1}'\cdot\Phi'_{2})$. Further, if $\Psi_{1},\Psi_{2}\in\mathfrak{L}_{X}$ are any local sections, we add the elements $\Psi_{1}'\Psi_{2}'-\Psi_{2}'\Psi_{1}'-[\Psi_{1},\Psi_{2}]'$, as well as $g'\cdot\Psi_{1}=(g\cdot\Psi_{1})'$ for any local section $g\in\mathcal{O}_{X}^{p}$ (the action of $\mathcal{O}_{X}^{p}$ on $\mathfrak{l}_{X}$ is that of \lemref{O^p-action}). Finally, we add $\Phi_{1}'\cdot\Psi'_{1}-\Phi_{2}'\cdot\Psi'_{2}$ where $\Phi_{i}$ are local sections of $\mathcal{Z}(D_{X}^{(0)})$ such that $\Phi_{1}\cdot v(\Psi_{1})=\Phi_{2}\cdot v(\Psi_{2})$.
Define $\mathcal{D}_{X}^{(0,1),+}$ to be the sheafification of the presheaf $\mathcal{U}^{+}(\mathfrak{L}_{X})/\mathcal{J}$. It is a graded sheaf of algebras on $X$. \end{defn}
Of course, such a definition is only really useful if we can write the algebra out explicitly in the presence of coordinates. Fortunately, this is the case; in fact, if $U=\text{Spec}(A)$ we can compare it with the presentation of $D_{A}^{(0,1),+}=D_{\mathcal{A}}^{(0,1),+}/p$ discussed in the previous section: \begin{thm}
\label{thm:D-is-quasi-coherent} Let $\tilde{D}_{A}^{(0,1),+}$ be the quasi-coherent sheaf on $U$ obtained by localizing $D_{A}^{(0,1),+}$. This a sheaf of algebras on $U$. There is an isomorphism (of graded sheaves of algebras) $\mathcal{D}_{X}^{(0,1),+}|_{U}\tilde{=}\tilde{D}_{A}^{(0,1),+}$. In particular, $\mathcal{D}_{X}^{(0,1),+}$ is a quasi-coherent sheaf of algebras on $X$, and we have $\mathcal{D}_{X}^{(0,1),0}\tilde{=}\mathcal{D}_{X}^{(0)}$. \end{thm}
\begin{proof} We have the algebra $\mathcal{U}^{+}(\mathfrak{L}_{X})(U)/\mathcal{J}(U)$. It admits a map to $D_{A}^{(0,1),+}$ as follows: by \lemref{O^p-action}, the lie algebra $\mathfrak{L}_{X}(U)$ is equal to \[ \mathcal{D}_{X}^{(0)}(U)\oplus\bigoplus_{i=1}^{\infty}(f^{i}(\overline{\mathcal{D}_{X}^{(0)}}(U))\oplus\bigoplus_{i=1}^{n}f^{i-1}\mathcal{O}_{X}^{p}(U)\cdot\partial_{i}^{[p]}) \] \[ =D_{A}^{(0)}\oplus\bigoplus_{i=1}^{\infty}(f^{i}(\overline{D_{A}^{(0)}})\oplus\bigoplus_{i=1}^{n}f^{i-1}A^{p}\cdot\partial_{i}^{[p]}) \] We map this to $D_{A}^{(0,1),+}$ via the identification of $D_{A}^{(0)}$ with $D_{A}^{(0,1),0}$, and by sending $f^{i}(\overline{D_{A}^{(0)}})$ to $f^{i}\cdot D_{A}^{(0)}\tilde{=}\overline{D_{A}^{(0)}}$ and $f^{i-1}g^{p}\partial_{i}^{[p]}$ to $f^{i-1}g^{p}\partial_{i}^{[p]}\in D_{A}^{(0,1),i}$. By sending $f$ to $f$ we get a map of algebras $\mathcal{U}^{+}(\mathfrak{L}_{X})(U)/\mathcal{J}(U)\to D_{A}^{(0,1),+}$ (one checks the relations directly).
Conversely, we get a map $D_{A}^{(0,1),+}\to\mathcal{U}^{+}(\mathfrak{L}_{X})(U)/\mathcal{J}(U)$ by sending $A\to A\subset\mathcal{D}_{X}^{(0)}(U)$, $\partial_{i}\to\partial_{i}\in\mathcal{D}_{X}^{(0)}(U)$, $\partial_{i}^{[p]}\to\partial_{i}^{[p]}\in\mathfrak{l}_{X}(U)$ and $f\to f$. Again checking the relations, this is a morphism of algebras, and the compositions in both directions are the identity on generators. Therefore the presheaf $U\to\mathcal{U}^{+}(\mathfrak{L}_{X})(U)/\mathcal{J}(U)$, when restricted to open affines which admit local coordinates, agrees with the assignment $U\to D_{A}^{(0,1),+}$. But the latter, by the description of \thmref{Local-Coords-for-D+}, clearly agrees with the quasi-coherent sheaf $\tilde{D}_{A}^{(0,1),+}$ on $\text{Spec}(A)$, and the result follows. \end{proof} Finally, we need to define the entire algebra $\mathcal{D}_{X}^{(0,1)}$. This entails extending the operator $v$ to an endomorphism of all of $\mathcal{D}_{X}^{(0,1),+}$. \begin{lem} \label{lem:Construction-of-v} There is a unique $\mathcal{D}_{X}^{(0)}$-linear endomorphism $v$ of $\mathcal{D}_{X}^{(0,1),+}$ satisfying $v(\mathcal{D}_{X}^{(0,1),i})\subset D_{X}^{(0.1),i-1}$ for all $i\geq1$ (and $v(\mathcal{D}_{X}^{(0,1),0})=0$), $v(\Phi_{1}\cdot\Phi_{2})=\Phi_{1}v(\Phi_{2})$ for all $\Phi_{1},\Phi_{2}\in\bigoplus_{i=1}^{\infty}\mathcal{D}_{X}^{(0,1),i}$, $v(f\cdot\Phi)=0$ for all $\Phi$, and such that the restriction of $v$ to $\mathfrak{L}_{X}$ agrees with the map $v$ constructed in \lemref{Construction-of-v-1}. \end{lem}
\begin{proof} Define $v$ on $\mathcal{D}_{X}^{(0)}\oplus\mathfrak{l}_{X}$ to be the map constructed in \lemref{Construction-of-v-1}. The claim is that there is a unique extension of this map to all of $\mathcal{D}_{X}^{(0,1),+}$ satisfying the conditions of the lemma.
By the uniqueness, it is enough to check this locally. Let $U=\text{Spec}(A)$ posses local coordinates. By \thmref{Local-Coords-for-D+}, if we set $v$ to be zero on $D_{A}^{(0,1),0}$ and $f\cdot(D_{A}^{(0,1),+})$, and we define \[ v((\partial_{j}^{[p]})^{i_{j}}\cdots(\partial_{n}^{[p]})^{i_{n}})=\partial_{j}^{p}\cdot(\partial_{j}^{[p]})^{i_{j}-1}\cdots(\partial_{n}^{[p]})^{i_{n}} \] where $j$ is the first index such that $i_{j}\geq1$, then we have a well-defined $D_{A}^{(0)}$-linear map satisfying all the properties of the lemma, and which agrees with the $v$ defined above on $\mathfrak{L}_{X}(U)$. On the other hand, $D_{A}^{(0,1),+}$ is generated as a $D_{A}^{(0)}$-module by $D_{A}^{(0)}$, $f\cdot(D_{A}^{(0,1),+})$, and elements which are products of $\mathfrak{L}_{X}(U)$ (again by \thmref{Local-Coords-for-D+}). So any map which satisfies the above list of properties and equals $v$ on $\mathfrak{L}_{X}(U)$ is equal to the one we have written down; so the uniqueness follows as well. \end{proof} Now we arrive at \begin{defn} \label{def:D(0,1)}The sheaf of algebras $\mathcal{D}_{X}^{(0,1)}$ is defined as the $\mathbb{Z}$-graded sheaf of $k[v,f]$-algebras, which as a graded sheaf is given by \[ \bigoplus_{i=-\infty}^{-1}\mathcal{D}_{X}^{(0)}\oplus\mathcal{D}_{X}^{(0,1),+} \] and where we extend the action of $f$ (to an operator of degree $1$) from $\mathcal{D}_{X}^{(0,1),+}$ to $\mathcal{D}_{X}^{(0,1)}$ by setting $f=0$ on ${\displaystyle \bigoplus_{i=-\infty}^{-1}\mathcal{D}_{X}^{(0)}}$, and we extend the action of $v$ (to an operator of degree $-1$) on $\bigoplus_{i=1}^{\infty}\mathcal{D}_{X}^{(0,1),i}$ by letting $v:\mathcal{D}_{X}^{(0,1),i}\to\mathcal{D}_{X}^{(0,1),i-1}$ be the identity whenever $i\leq0$. The product on this algebra extends the product on $\mathcal{D}_{X}^{(0,1),+}$ as follows: on the negative half ${\displaystyle \bigoplus_{i=-\infty}^{0}\mathcal{D}_{X}^{(0)}}=\mathcal{D}_{X}^{(0)}\otimes_{k}k[v]$, we use the obvious graded product. For $i\leq0$, if $\Phi\in\mathcal{D}_{X}^{(0,1),i}\tilde{=}D_{X}^{(0)}$ and $\Psi\in\mathcal{D}_{X}^{(0,1),+}$, we set \[ \Phi\cdot\Psi=\Phi_{0}v^{i}(\Psi) \] where $\Phi_{0}$ is the element $\Phi\in D_{X}^{(0)}$, now regarded as an element of degree $0$. \end{defn}
From this definition and \thmref{D-is-quasi-coherent}, we see that this is a quasicoherent sheaf of algebras, and we have an isomorphism \[ \widehat{D}_{\mathcal{A}}^{(0,1)}/p\tilde{=}\mathcal{D}_{X}^{(0,1)}(U) \] for any $U=\text{Spec}(A)$ which possesses local coordinates. It follows that $\mathcal{D}_{X}^{(0,1)}$ is a coherent, locally noetherian sheaf of rings which is stalk-wise noetherian. One sees directly the isomorphism $\mathcal{D}_{X}^{(0)}\tilde{=}\mathcal{D}^{(0,1)}/(v-1)$, and we may now define $\mathcal{D}_{X}^{(1)}:=\mathcal{D}^{(0,1)}/(f-1)$. We will see below that this agrees with Berthelot's definition; this is clear if $X$ is liftable but not quite obvious in general.
\section{Gauges Over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$}
We now have several locally noetherian graded rings and so we can consider categories of modules over them; in particular we have the category of graded $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-modules, (which we refer to a gauges over $\mathfrak{X}$) $\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ and the category of coherent graded modules $\mathcal{G}_{coh}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$. We have the analogous categories in positive characteristic as well as $\mathcal{G}_{qcoh}(\mathcal{D}_{X}^{(0,1)})$, the graded quasicoherent $\mathcal{D}_{X}^{(0,1)}$-modules; as $\mathcal{D}_{X}^{(0,1)}$ is itself a quasi-coherent sheaf of algebras, this is simply the category of sheaves in $\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ which are quasi-coherent over $\mathcal{O}_{X}[f,v]$.
In this chapter we develop the basic properties of these categories of gauges; we begin by collecting a few of their most basic properties.
For any object in $\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ (or $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$) set $\mathcal{M}^{\infty}:=\mathcal{M}/(f-1)$ and $\mathcal{M}^{-\infty}:=\mathcal{M}/(v-1)$; these are exact functors to the categories of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}$ and $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}(=\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),-\infty})$-modules, respectively; there are obvious maps $f_{\infty}:\mathcal{M}^{i}\to\mathcal{M}^{\infty}$ and $v_{-\infty}:\mathcal{M}^{i}\to\mathcal{M}^{-\infty}$ for each $i$. We use the same notation to denote the analogous constructions for $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$.
We have: \begin{lem} \label{lem:Basic-v}Let $\mathcal{M}\in\mathcal{G}_{coh}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$. Then each $\mathcal{M}^{i}$ is coherent as a $\mathcal{\widehat{D}}_{X}^{(0)}$-module. Further, for all $i<<0$, the map $v:\mathcal{M}^{i}\to\mathcal{M}^{i-1}$ is an isomorphism. The same holds for $\mathcal{M}\in\mathcal{G}_{coh}(\mathcal{D}_{X}^{(0,1)})$. \end{lem}
\begin{proof} By definition we have, at least locally, an exact sequence \[ \bigoplus_{i=1}^{s}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}(r_{i})\to\bigoplus_{i=1}^{m}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}(l_{i})\to\mathcal{M}\to0 \] Now the result follows as the lemma is true for $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$ by construction. As the same holds for $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$, we may prove $2)$ in an identical manner. \end{proof} This allows us to give: \begin{defn} \label{def:Index!}Let $\mathcal{M}\in\mathcal{G}_{coh}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$. Then the index of $\mathcal{M}$ in $\mathbb{Z}\cup\{\infty\}$ is the largest integer $i$ for which $v:\mathcal{M}^{j}\to\mathcal{M}^{j-1}$ is an isomorphism for all $j\leq i$. The index is $\infty$ if $v$ is an isomorphism for all $i$ (this can indeed happen; c.f. \exaref{Exponential!} below). We can make the same definition for $\mathcal{M}\in\mathcal{G}_{coh}(\mathcal{D}_{X}^{(0,1)})$. \end{defn}
We will now use show how cohomological completeness gives a convenient criterion for a complex to be in $D_{coh}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$. \begin{prop} \label{prop:coh-to-coh}We have $D_{coh}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))\subset D_{cc}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$. Further, for $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$, we have $\mathcal{M}^{\cdot}\in D_{coh}^{?}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ iff $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{coh}^{?}(\mathcal{G}(\mathcal{\widehat{D}}_{X}^{(0,1)}))$, where $?=+$ or $?=b$. \end{prop}
\begin{proof} Recall that if $\mathcal{F}$ is a sheaf of $W(k)$-modules which is $p$-torsion free and $p$-adically complete; or if it is annihilated by $p^{N}$ for some fixed $N\in\mathbb{N}$, then $\mathcal{F}$ (considered as a complex concentrated in one degree) is contained in $D_{cc}(W(k))$. It follows that a coherent $\mathcal{\widehat{D}}_{X}^{(0)}$-module is cohomologically complete; therefore so is an element of $\mathcal{G}_{coh}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ by \propref{Basic-CC-facts}, part $4)$. Since $D_{cc}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$ is closed under extensions, the first statement follows directly (c.f. \cite{key-8}, theorem 1.6.1).
For the second statement, the forward direction is obvious. For the converse, we note that by \cite{key-8} theorem 1.6.4, since (for either $?=+$ or $?=b$) each \linebreak{} $(\mathcal{M}^{\cdot})^{i}\otimes_{W(k)}^{L}k\in D_{coh}^{+}(\mathcal{D}_{X}^{(0)}-\text{mod})$, we must have $(\mathcal{M}^{\cdot})^{i}\in D_{coh}^{+}(\mathcal{D}_{\mathfrak{X}}^{(0)}-\text{mod})$. In particular $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})$ is $p$-adically complete for each $i$ and $j$. Further, we have the short exact sequences for the functor $\otimes_{W(k)}^{L}k$ \[ 0\to\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p\to\mathcal{H}^{j}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\to\mathcal{T}or_{1}^{W(k)}(\mathcal{H}^{j+1}(\mathcal{M}^{\cdot}),k))\to0 \] which implies also that $\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p$ is coherent over $\mathcal{D}_{X}^{(0,1)}$ for all $j$ (this follows from the fact that $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})/p$ is coherent, and hence quasi-coherent, for all $i$; which implies $\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p$ is a quasicoherent sub-sheaf of the coherent $\mathcal{D}_{X}^{(0,1)}$-module $\mathcal{H}^{j}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)$, and hence coherent).
Now, for a fixed $j$, we can consider, for any $i$ \[ v:\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})\to\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i-1}) \] and \[ \mathcal{D}_{\mathfrak{X}}^{(0,1),1}\otimes_{\mathcal{D}_{\mathfrak{X}}^{(0)}}\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})\to\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i+1}) \] Since $\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p$ is coherent over $\mathcal{D}_{X}^{(0,1)}$, we have that the reduction mod $p$ of $v$ is surjective for $i<<0$ and the the reduction mod $p$ of the second map is surjective for $i>>0$. By the usual complete Nakayama lemma, we see that $v$ is surjective for $i<<0$ and the second map is surjective for $i>>0$; therefore $\mathcal{H}^{j}(\mathcal{M}^{\cdot})$ is locally finitely generated over $\mathcal{D}_{\mathfrak{X}}^{(0,1)}$ (since each $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})$ is coherent over $\mathcal{D}_{\mathfrak{X}}^{(0)}$).
Now, let $U\subset X$ be an open affine and let $D(g)\subset U$ be a principle open inside $U$; let $\tilde{g}$ be a lift of the function $g$ to $\Gamma(\mathcal{O}_{U})$. As each $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})$ is coherent, we have that $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})(D(g))$ is isomorphic to the completion of the localization of $\mathcal{H}^{j}((\mathcal{M}^{\cdot})^{i})(U)$ at $\tilde{g}$. It follows that $\mathcal{H}^{j}(\mathcal{M}^{\cdot})(D(g))$ is given by localizing $\mathcal{H}^{j}(\mathcal{M}^{\cdot})(U)$ at $\tilde{g}$ and then completing each component. If $\mathcal{F}$
is a graded free module over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$, it has the same description; and so the kernel of any map $\mathcal{F}\to\mathcal{H}^{j}(\mathcal{M}^{\cdot})|_{U}$ also has this description (as the functor of localizing and completing is exact on coherent $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-modules); hence it is locally finitely generated and so $\mathcal{H}^{j}(\mathcal{M}^{\cdot})$ is itself coherent.
Finally, we note that $\mathcal{H}^{j}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)=0$ implies $\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p=0$ by the above short exact sequence. So, if $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{coh}^{+}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$, we see that $\mathcal{H}^{j}(\mathcal{M}^{\cdot})/p=0$ for all $j<<0$; which implies $\mathcal{H}^{j}(\mathcal{M}^{\cdot})=0$ for $j<<0$ since each $\mathcal{H}^{j}(\mathcal{M}^{\cdot})^{i}$ is $p$-adically complete; i.e., we have $\mathcal{M}^{\cdot}\in D_{\text{Coh}}^{+}(\mathcal{D}_{\mathfrak{X}}^{(0,1)})$; the same argument applies for bounded complexes. \end{proof} This proposition will be our main tool for showing that elements of $D_{cc}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$ are actually in $D_{coh}^{b}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$.
\subsection{\label{subsec:Standard}Standard Gauges, Mazur's Theorem}
In this subsection we discuss the analogue of (the abstract version of) Mazur's theorem in the context of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-gauges. Since the notion of gauge was invented in order to isolate the structures used in the proof of Mazur's theorem, it comes as no surprise that there is a very general version of the theorem available in this context. Before proving it, we discuss some generalities, starting with \begin{defn} \label{def:Standard!}Let $\mathcal{M}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$. We say $\mathcal{M}$ is standard if $\mathcal{M}^{-\infty}$ and $\mathcal{M}^{\infty}$ are $p$-torsion-free, each $f_{\infty}:\mathcal{M}^{i}\to\mathcal{M}^{\infty}$ is injective; and, finally, there is a $j_{0}\in\mathbb{Z}$ so that \[
f_{\infty}(\mathcal{M}^{i+j_{0}})=\{m\in\mathcal{M}^{\infty}|p^{i}m\in f_{\infty}(\mathcal{M}^{j_{0}})\} \] for all $i\in\mathbb{Z}$. \end{defn}
The $j_{0}$ appearing in this definition is not unique; indeed, from the definition if $i<0$ we have $f_{\infty}(\mathcal{M}^{i+j_{0}})=p^{-i}\cdot f_{\infty}(\mathcal{M}^{j_{0}})$ which implies that we can replace $j_{0}$ with any $j<j_{0}$. In particular the\emph{ }index\emph{ }of a standard gauge (as in \defref{Index!}) is the maximal $j_{0}$ for which the description in the definition is true (and it takes the value $\infty$ if this description is true for all integers). Note that if $\mathcal{M}$ is standard, so is the shift $\mathcal{M}(j)$, and the index of $\mathcal{M}(j)$ is equal to $\text{index}(\mathcal{M})+j$.
As in the case where $\mathfrak{X}$ is a point (which is discussed above in \exaref{BasicGaugeConstruction}), standard gauges are (up to a shift of index) exactly the ones that can be constructed from lattices: \begin{example} \label{exa:Basic-Construction-over-X} Let $\mathcal{N}'$ be a $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}[p^{-1}]$-module, and let $\mathcal{N}$ be a lattice; i.e., a $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-submodule such that $\mathcal{N}[p^{-1}]=\mathcal{N}'$. Recalling the isomorphism $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}[p^{-1}]\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}[p^{-1}]$ (c.f. \lemref{Basic-Structure-of-D^(1)}), we also suppose given a $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}$-lattice of $\mathcal{N}'$ called $\mathcal{M}^{\infty}$. Then we may produce a standard gauge $\mathcal{M}$ via \[
\mathcal{M}^{i}=\{m\in\mathcal{M}^{\infty}|p^{i}m\in\mathcal{N}\} \] If $\mathcal{M}^{\infty}$ is coherent over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}$ and $\mathcal{N}$ is coherent over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$, then $\mathcal{M}$ is a coherent gauge. \end{example}
Let us give some general properties of standard gauges: \begin{lem} \label{lem:Standard-is-rigid}Suppose $\mathcal{M}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ is standard; and let $\mathcal{M}_{0}=\mathcal{M}/p$ be its reduction mod $p$. Then $\mathcal{M}_{0}$ has $\text{ker}(f)=\text{im}(v)$ and $\text{ker}(v)=\text{im}(f)$; further, if $\overline{m}_{i}\in\mathcal{M}_{0}^{i}$, then $f\overline{m}_{i}=0=v\overline{m}_{i}$ iff $\overline{m}_{i}=0$. \end{lem}
\begin{proof} Since $fv=0$ on $\mathcal{M}_{0}$, we always have $\text{im}(f)\subset\text{ker}(v)$ and $\text{im}(v)\subset\text{ker}(f)$; so we consider the other inclusions.
Let $m_{i}\in\mathcal{M}^{i}$, and denote its image in $\mathcal{M}_{0}^{i}$ by $\overline{m}_{i}$. Suppose $v\overline{m}_{i}=0$. Then $vm_{i}=pm_{i-1}$ for some $m_{i-1}\in\mathcal{M}^{i-1}$, so that $f_{\infty}(vm_{i})=pf_{\infty}(m_{i})=pf_{\infty}(m_{i-1})$. Since $\mathcal{M}^{\infty}$ is $p$-torsion-free this yields $f_{\infty}(m_{i})=f_{\infty}(m_{i-1})$ so that $fm_{i-1}=m_{i}$ by the injectivity of $f_{\infty}$. Thus $\overline{m}_{i}\in\text{im}(f)$ and we see $\text{ker}(v)\subset\text{im}(f)$ as required.
Now suppose $f\overline{m}_{i}=0$. Then $fm_{i}=pm_{i+1}$ for some $m_{i+1}\in\mathcal{M}^{i+1}$ so that $f_{\infty}(m_{i})=pf_{\infty}(m_{i+1})=f_{\infty}(vm_{i+1})$, and the injectivity of $f_{\infty}$ implies $m_{i}=vm_{i+1}$ so that $\overline{m}_{i}\in\text{im}(v)$ as required.
To obtain the last property; since $\mathcal{M}$ is standard, after shifting the grading if necessary, we may identify $f_{\infty}(\mathcal{M}^{i})$
with $\{m\in\mathcal{M}^{\infty}|p^{i}m\in f_{\infty}(\mathcal{M}^{0})\}$. If $m_{i}\in\mathcal{M}^{i}$ and $f\overline{m}_{i}=0=v\overline{m}_{i}$ then $fm_{i}=pm_{i+1}$ and $vm_{i}=pm_{i-1}$; therefore $f_{\infty}(m_{i})=pf_{\infty}(m_{i+1})$ and $pf_{\infty}(m_{i})=pf_{\infty}(m_{i-1})$ so that $p^{2}f_{\infty}(m_{i+1})=pf_{\infty}(m_{i-1})$ which implies $pf_{\infty}(m_{i+1})=f_{\infty}(m_{i-1})$. But $p^{i-1}f_{\infty}(m_{i-1})\in f_{\infty}(\mathcal{M}^{0})$, so that $p^{i}f_{\infty}(m_{i+1})\in f_{\infty}(\mathcal{M}^{0})$ which forces $f_{\infty}(m_{i+1})\in f_{\infty}(\mathcal{M}^{i})$ so that $m_{i+1}=fm'_{i}$ for some $m'_{i}\in\mathcal{M}^{i}$. So $fm_{i}=pm_{i+1}=f(pm_{i}')$ which implies $m_{i}=pm'_{i}$ and so $\overline{m}_{i}=0$. \end{proof} This motivates the \begin{defn} (\cite{key-5}, definition 2.2.2) A gauge $\mathcal{M}_{0}$ over $\mathcal{D}_{X}^{(0,1)}$ is called quasi-rigid if it satisfies $\text{ker}(f)=\text{im}(v)$ and $\text{ker}(v)=\text{im}(f)$, it is called rigid if it is quasi-rigid and, in addition, $\text{ker}(f)\cap$$\text{ker}(v)=0$. \end{defn}
By the above lemma, a gauge is rigid if it is of the form $\mathcal{M}/p$ for some standard gauge $\mathcal{M}$.
As explained in \cite{key-5}, rigidity is a very nice condition; and in particular we have the following generalization of \cite{key-5}, lemma 2.2.5: \begin{lem} \label{lem:Basic-Facts-on-Rigid}Let $\mathcal{M}_{0}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$. Then $\mathcal{M}_{0}$ is rigid iff $\mathcal{M}_{0}/f$ is $v$-torsion free and $\mathcal{M}_{0}/v$ is $f$-torsion-free.
Further, $\mathcal{M}_{0}$ is quasi-rigid iff $\mathcal{M}_{0}\otimes_{k[f,v]}^{L}k[f]\tilde{=}\mathcal{M}_{0}/v$ and $\mathcal{M}_{0}\otimes_{k[f,v]}^{L}k[v]\tilde{=}\mathcal{M}_{0}/f$. \end{lem}
\begin{proof} Suppose $\mathcal{M}_{0}$ is rigid. To show $\mathcal{M}_{0}/f$ is $v$-torsion free we have to show that if $m$ is a local section of $\mathcal{M}_{0}$ with $vm=fm'$, then $m\in\text{im}(f)$. Since $\text{im}(f)=\text{ker}(v)$ we have $v(vm)=0$, and since also $f(vm)=0$ we must (by the second condition of rigidity) have $vm=0$. Therefore $m\in\text{ker}(v)=\text{im}(f)$ as desired. The proof that $\mathcal{M}_{0}/v$ is $f$-torsion-free is essentially identical.
Now suppose $\mathcal{M}_{0}$ satisfies $\mathcal{M}_{0}/f$ is $v$-torsion free and $\mathcal{M}_{0}/v$ is $f$-torsion-free. Suppose $m\in\text{ker}(f)$. Then the image of $m$ in $\mathcal{M}_{0}/v$ is $f$-torsion, hence $0$; and so $m\in\text{im}(v)$; therefore $\text{ker}(f)=\text{im}(v)$ and similarly $\text{ker}(v)=\text{im}(f)$. If $fm=0=vm$, then $m\in\text{ker}(f)\cap\text{ker}(v)=\text{im}(v)\cap\text{ker}(v)=\text{ker}(f)\cap\text{im}(f)$. Since $m\in\text{im}(v)$ the image of $m$ in $\mathcal{M}_{0}/v$ is zero; also, $m=fm'$, so since $\mathcal{M}_{0}/v$ is $f$-torsion free we see $m'\in\text{im}(v)$. So $m=fm'=fv(m'')=0$ as desired.
Now we consider the quasi-rigidity condition: we can write the following free resolution of $k[f]$ over $D(k)$: \[ \cdots\rightarrow D(k)(-1)\xrightarrow{v}D(k)\xrightarrow{f}D(k)(-1)\xrightarrow{v}D(k) \] so that $\mathcal{M}_{0}\otimes_{D(k)}^{L}k[f]$ has no higher cohomology groups iff $\text{ker}(v)=\text{im}(f)$ and $\text{ker}(f)=\text{im}(v)$; the same holds for $\mathcal{M}_{0}\otimes_{D(k)}^{L}k[v]\tilde{=}\mathcal{M}_{0}/f$. \end{proof} Now we turn to conditions for checking that a gauge is standard. \begin{prop} \label{prop:Baby-Mazur}Let $\mathcal{M}\in\mathcal{G}_{\text{coh}}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$, and suppose that $\mathcal{M}^{-\infty}$ and $\mathcal{M}^{\infty}$ are $p$-torsion-free. Set $\mathcal{M}_{0}=\mathcal{M}/p$, and suppose $\mathcal{M}_{0}/v$ is $f$-torsion-free. Then $\mathcal{M}$ is standard; in particular $\mathcal{M}$ is $p$-torsion free. \end{prop}
\begin{proof} Let $\mathcal{N}_{i}=\text{ker}(f_{\infty}:\mathcal{M}^{i}\to\mathcal{M}^{\infty})$ and let $\mathcal{N}=\bigoplus_{i}\mathcal{N}_{i}$. Clearly $\mathcal{N}$ is preserved under $W(k)[f]$; further, since $f_{\infty}(vm)=pf_{\infty}(m)$ we see that $\mathcal{N}$ is preserved under $W(k)[f,v]$.
For $m$ a local section of $\mathcal{M}^{i}$ let $\overline{m}$ denote its image in $\mathcal{M}_{0}$. If $m\in\mathcal{N}$ then certainly $f_{\infty}(\overline{m})=0$ in $\mathcal{M}_{0}^{\infty}$. Since $\mathcal{M}_{0}/v$ is $f$-torsion-free, the map $f_{\infty}:\mathcal{M}_{0}^{i}/v\mathcal{M}_{0}^{i+1}\to\mathcal{M}_{0}^{\infty}$ is injective; so $\overline{m}\in\text{im}(v)$. Thus there is some $m'$ so that $m-vm'\in p\cdot\mathcal{M}^{i}$; since $p=fv$ we see that $m\in\text{im}(v)$ as well; i.e., we can assume $m=vm'$. Since $0=f_{\infty}(vm')=pf_{\infty}(m')$ and $\mathcal{M}^{\infty}$ is $p$-torsion-free, we see that $m'\in\mathcal{N}$ as well. So in fact $\mathcal{N}=v\cdot\mathcal{N}$.
Now, as $\mathcal{M}$ is coherent, we may choose some $j_{0}$ for which $v_{-\infty}:\mathcal{M}^{j}\to\mathcal{M}^{-\infty}$ is an isomorphism for all $j\le j_{0}$. Then, for each such $j$, $\mathcal{M}^{j}$ is $p$-torsion-free (since $\mathcal{M}^{-\infty}$ is). Further, since $fv=p$, we have that $f$ and $v$ are isomorphisms after inverting $p$, which shows $f_{\infty}:\mathcal{M}^{j}[p^{-1}]\tilde{\to}\mathcal{M}^{\infty}[p^{-1}]$. Since $\mathcal{M}^{j}$ and $\mathcal{M}^{\infty}$ are $p$-torsion-free, we see that $f_{\infty}$ is injective on $\mathcal{M}^{j}$. Thus $\mathcal{N}$ is concentrated in degrees above $j_{0}$, and we see that every element of $\mathcal{N}$ is killed by a power of $v$. Since $\mathcal{M}$ is coherent, it is locally noetherian, so that every local section of $\mathcal{M}$ killed by a power of $v$ is actually killed by $v^{N}$ for some fixed $N\in\mathbb{N}$. Therefore, we have $v^{N}\cdot\mathcal{N}=0$. Since also $\mathcal{N}=v\cdot\mathcal{N}$ we obtain $\mathcal{N}=0$. Thus each $f_{\infty}:\mathcal{M}^{i}\to\mathcal{M}^{\infty}$ is injective. It follows that each $\mathcal{M}^{i}$ is $p$-torsion-free, and since $fv=p$ we see that $\mathcal{M}$ is $f$ and $v$-torsion-free as well.
Choose $j_{0}$ so that $v:\mathcal{M}^{j}\to\mathcal{M}^{j-1}$ is an isomorphism for all $j\leq j_{0}$. To finish the proof, we have to show that, for all $i\in\mathbb{Z}$, $f_{\infty}(\mathcal{M}^{i+j_{0}})=\{m\in\mathcal{M}^{\infty}|p^{i}m\in f_{\infty}(\mathcal{M}^{j_{0}})\}$. If $i\leq0$, then $v^{-i}:\mathcal{M}^{j_{0}}\to\mathcal{M}^{i+j_{0}}$ is an isomorphism, and $f_{\infty}(\mathcal{M}^{i+j_{0}})=p^{-i}f_{\infty}(\mathcal{M}^{j_{0}})$ as required. If $i>0$, then for $m\in\mathcal{M}^{i+j_{0}}$ we have $f_{\infty}(v^{i}m)=p^{i}f_{\infty}(m)\in f_{\infty}(\mathcal{M}^{j_{0}})$
so that $f_{\infty}(\mathcal{M}^{i+j_{0}})\subseteq\{m\in\mathcal{M}^{\infty}|p^{i}m\in f_{\infty}(\mathcal{M}^{j_{0}})\}$.
For the reverse inclusion, let $m\in\mathcal{M}^{\infty}$ be such that $p^{i}m=f_{\infty}(m_{j_{0}})$ for some $m_{j_{0}}\in\mathcal{M}^{j_{0}}$. By definition $\mathcal{M}^{\infty}$ is the union of its sub-sheaves $f_{\infty}(\mathcal{M}^{n})$, so suppose $m=f_{\infty}(m_{l})$ for some $m_{l}\in\mathcal{M}^{l}$, with $l>i+j_{0}$. Since $f_{\infty}(v^{i}m_{l})=p^{i}f_{\infty}(m_{l})=p^{i}m=f_{\infty}(m_{j_{0}})$, we see that \[ f^{l-(i+j_{0})}(m_{j_{0}})=v^{i}m_{l} \] Consider the image of this equation in $\mathcal{M}_{0}$. It shows that that $f^{l-(i+j_{0})}(\overline{m}_{j_{0}})\in v\cdot\mathcal{M}_{0}$. Since $f$ is injective on $\mathcal{M}_{0}/v$, the assumption that
$l-(i+j_{0})>0$ implies $\overline{m}_{j_{0}}\in v\cdot\mathcal{M}_{0}$. As above, since $fv=p$ this implies $m_{j_{0}}\in v\cdot\mathcal{M}$; writing $m_{j_{0}}=vm_{j_{0}+1}$ we now have the equation $f^{l-(i+j_{0})}(vm_{j_{0}+1})=v^{i}m_{l}$. Since $v$ acts injectively on $\mathcal{M}$ we see that $f^{l-(i+j_{0})}(m_{j_{0}+1})=v^{i-1}m_{l}$. Applying $f_{\infty}$, we see that $p^{i-1}m\in f_{\infty}(\mathcal{M}^{j_{0}+1})$. If $i=1$, this immediately proves $f_{\infty}(\mathcal{M}^{1+j_{0}})=\{m\in\mathcal{M}^{\infty}|pm\in f_{\infty}(\mathcal{M}^{j_{0}})\}$.
For $i>1$, then by induction on $i$ we can suppose $pm\in f_{\infty}(\mathcal{M}^{j_{0}+i-1})$. But then $f_{\infty}(vm_{l})=pf_{\infty}(m_{l})=pm=f_{\infty}(m_{j_{0}+i-1})$ for some $m_{j_{0}+i-1}\in\mathcal{M}^{j_{0}+i-i}$. This implies $f^{l-(j_{0}+i)}(m_{j_{0}+i-1})=vm_{l}$ so if $l>j_{0}+i$ then, arguing exactly as in the previous paragraph, we have $m_{j_{0}+i-1}=vm_{j_{0}+i}$ for some $m_{j_{0}+i}\in\mathcal{M}^{j_{0}+i}$ and so $f^{l-(j_{0}+i)}(m_{j_{0}+i})=m_{l}$ which implies $m=f_{\infty}(m_{l})\in f_{\infty}(\mathcal{M}^{j_{0}+i})$ as required. \end{proof} This result implies a convenient criterium for ensuring that gauges are standard; this is the first analogue of Mazur's theorem in this context: \begin{thm} \label{thm:Mazur!}Let $\mathcal{M}^{\cdot}\in D_{\text{coh}}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. Suppose that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{-\infty}$ and $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{\infty}$ are $p$-torsion-free for all $n$, and suppose that $\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free for all $n$. Then $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is standard for all $n$.
In particular, $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is $p$-torsion-free, and $\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p$ is rigid for all $n$. We have $\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p\tilde{=}\mathcal{H}^{n}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)$ , $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/v\tilde{=}\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$, and $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/f\tilde{=}\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[v])$ for all $n$. Further, $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/f$ is $v$-torsion-free and $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/v$ is $f$-torion-free for all $n$. \end{thm}
\begin{proof} Suppose that $b\in\mathbb{Z}$ is the largest integer so that $\mathcal{H}^{b}(\mathcal{M}^{\cdot})\neq0$. Then $b$ is the largest integer for which $\mathcal{H}^{b}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k))\neq0$, and \[ \mathcal{H}^{b}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k))\tilde{=}\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p \] Thus we have a distinguished triangle \[ \tau_{\leq b-1}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\to\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\to(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)[-b] \] to which we may apply the functor $\otimes_{k[f,v]}^{L}k[f]$. This yields \begin{equation} \tau_{\leq b-1}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]\to(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]\to(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)[-b]\otimes_{D(k)}^{L}k[f]\label{eq:triangle!} \end{equation} Since $\otimes_{D(k)}k[f]$ is right exact, the complex on the left is still concentrated in degrees $\leq b-1$, and the middle and right complex are concentrated in degrees $\leq b$. Further \[ \mathcal{H}^{b}((\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)[-b]\otimes_{D(k)}^{L}k[f])\tilde{=}\mathcal{H}^{0}((\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)\otimes_{D(k)}^{L}k[f])\tilde{=}(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)/v \] Therefore $(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)/v\tilde{=}\mathcal{H}^{b}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free by assumption. Thus we may apply the previous proposition to $\mathcal{H}^{b}(\mathcal{M}^{\cdot})$ and conclude that it is standard.
Now, to finish the proof that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is standard for all $n$, we proceed by induction on the cohomological length of $\mathcal{M}^{\cdot}$. If the length is $1$ we are done. If not, we have the distinguished triangle \[ \tau_{\leq b-1}(\mathcal{M}^{\cdot})\to\mathcal{M}^{\cdot}\to\mathcal{H}^{b}(\mathcal{M}^{\cdot})[-b] \] which yields the triangle \[ \tau_{\leq b-1}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k\to\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\to(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)[-b] \] where we have used that $\mathcal{H}^{b}(\mathcal{M}^{\cdot})$ is $p$-torsion-free to identify $(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)\tilde{=}\mathcal{H}^{b}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k$. As noted above, we have $\mathcal{H}^{b}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k))\tilde{=}\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p$, so this triangle implies the isomorphism \[ \tau_{\leq b-1}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k\tilde{=}\tau_{\leq b-1}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k) \] Further, since $\mathcal{H}^{b}(\mathcal{M}^{\cdot})$ is standard we have that $\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p$ is rigid; therefore by \lemref{Basic-Facts-on-Rigid} we have $(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)\otimes_{D(k)}^{L}k[f]\tilde{=}(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)/v$ is concentrated in a single degree. Thus, the distinguished triangle \eqref{triangle!} becomes \[ (\tau_{\leq b-1}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]\to(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]\to(\mathcal{H}^{b}(\mathcal{M}^{\cdot})/p)/v[-b] \] and so we have the isomorphism \[ (\tau_{\leq b-1}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]\tilde{=}\tau_{\leq b-1}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]) \] Thus the complex $\tau_{\leq b-1}(\mathcal{M}^{\cdot})$ satisfies the assumption that $(\tau_{\leq b-1}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f]$ has cohomology sheaves which are $f$-torsion-free, and so the complex $\tau_{\leq b-1}(\mathcal{M}^{\cdot})$ satisfies all of the assumptions of the theorem, but has a lesser cohomological length than $\mathcal{M}^{\cdot}$. So we conclude by induction that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is standard for all $n$.
For the final part, since standard modules are torsion-free, we see \[ \mathcal{H}^{n}(\mathcal{M}^{\cdot})/p\tilde{=}\mathcal{H}^{n}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k) \] for all $n$, and since each $\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p$ is rigid, the complex $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k$ has cohomology sheaves which are all acyclic for $\otimes_{D(k)}k[f]$ and for $\otimes_{D(k)}k[f]$, by (\lemref{Basic-Facts-on-Rigid}); and the last sentence follows. \end{proof} \begin{rem} As we shall see below, the condition that each cohomology sheaf of $((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free is quite natural; it says that the spectral sequence associated to the Hodge filtration on $(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)^{\infty}$ degenerates at $E^{1}$; this can be checked using Hodge theory in many geometric situations. On the other hand, one conclusion of the theorem is that each cohomology sheaf of $(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[v]$ is $v$-torsion-free; this corresponds to degeneration of the conjugate spectral sequence on $(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)^{-\infty}$. Over a point, one checks in an elementary way (using the finite dimensionality of the vector spaces involved) that these two degenerations are equivalent; this is true irrespective of weather the lift $\mathcal{M}^{\cdot}$ has $p$-torsion-free cohomology groups. This allows one to make various stronger statements in this case (c.f., e.g., \cite{key-10}, proof of theorem 8.26). I don't know if this is true over a higher dimensional base. \end{rem}
In most cases of interest, the assumption that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{\infty}$ is $p$-torsion-free is actually redundant, more precisely, it is implied by the assumption that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{-\infty}$ is $p$-torsion-free when one has a Frobenius action; c.f. \thmref{F-Mazur} below.
\subsection{Filtrations, Rees algebras, and filtered Frobenius descent}
In this section, we consider how the various gradings and filtrations appearing in this paper (in positive characteristic) relate to the more usual Hodge and conjugate filtrations in $\mathcal{D}$-module theory. We start with the basic definitions; as usual $X$ is smooth over $k$. \begin{defn} \label{def:Hodge-and-Con} The decreasing filtration ${\displaystyle \text{image}(\mathcal{D}^{(0,1),i}\to\mathcal{D}_{X}^{(0)})}:=C^{i}(\mathcal{D}_{X}^{(0)})$ is called the conjugate filtration. The increasing filtration ${\displaystyle \text{image}(\mathcal{D}^{(0,1),i}\to\mathcal{D}_{X}^{(1)})}:=F^{i}(\mathcal{D}_{X}^{(1)})$ is called the Hodge filtration.
Similarly, for any $\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ we may define ${\displaystyle \text{image}(\mathcal{M}^{i}\xrightarrow{v_{\infty}}\mathcal{M}^{-\infty})}:=C^{i}(\mathcal{M}^{-\infty})$ and ${\displaystyle \text{image}(\mathcal{M}^{i}\xrightarrow{f_{\infty}}\mathcal{M}^{\infty})}:=F^{i}(\mathcal{M}^{\infty})$, the conjugate and Hodge filtrations, respectively. \end{defn}
\begin{rem} \label{rem:Description-of-conjugate}1) From the explicit description of $v$ given in (the proof of) \lemref{Construction-of-v}, we see that $C^{i}(\mathcal{D}_{X}^{(0)})=\mathcal{I}^{i}\mathcal{D}_{X}^{(0)}$ where $\mathcal{I}$ is the two-sided ideal of $\mathcal{D}_{X}^{(0)}$ generated by $\mathcal{Z}(\mathcal{D}_{X}^{(0)})^{+}$, the positive degree elements of the center\footnote{The center is a graded sheaf of algebras via the isomorphism $\mathcal{Z}(\mathcal{D}_{X}^{(0)})\tilde{=}\mathcal{O}_{T^{*}X^{(1)}}$}. In local coordinates, $\mathcal{I}$ is the just ideal generated by $\{\partial_{1}^{p},\dots,\partial_{n}^{p}\}$, which matches the explicit description of the action of $v$ given above. This is the definition of the conjugate filtration on $\mathcal{D}_{X}^{(0)}$ given, in {[}OV{]} section 3.4, extended to a $\mathbb{Z}$-filtration by setting $C_{i}(\mathcal{D}_{X}^{(0)})=\mathcal{D}_{X}^{(0)}$ for all $i\leq0$.
2) On the other hand, from \thmref{Local-Coords-for-D+}, we see that
$F^{i}(\mathcal{D}_{X}^{(1)})$ a locally free, finite $\overline{\mathcal{D}_{X}^{(0)}}$-module; in local coordinates it has a basis $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{0\leq|J|\leq i}$.
3) If $\mathcal{M}$ is a coherent gauge over $X$, then by \lemref{Basic-v} the Hodge filtration of $\mathcal{M}^{\infty}$ is exhaustive and $F^{i}(\mathcal{M}^{\infty})=0$ for $i<<0$, and the conjugate filtration satisfies $C^{i}(\mathcal{M}^{-\infty})=\mathcal{M}^{-\infty}$ for all $i<<0$. \end{rem}
\begin{defn} \label{def:Rees-and-Rees-bar}Let $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ denote the Rees algebra of $\mathcal{D}_{X}^{(1)}$ with respect to the Hodge filtration; and let $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$ denote the Rees algebra of $\mathcal{D}_{X}^{(0)}$ with respect to the conjugate filtration. We will denote the Rees parameters (i.e., the element $1\in F^{1}(\mathcal{D}_{X}^{(1)})$, respectively $1\in C^{-1}(\mathcal{D}_{X}^{(0)})$) by $f$ and $v$, respectively. We also let $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ denote the Rees algebra of $\mathcal{D}_{X}^{(0)}$ with respect to the symbol filtration; here the Rees parameter will also be denoted $f$. \end{defn}
\begin{lem} We have $\mathcal{D}_{X}^{(0,1)}/v\tilde{=}\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\mathcal{D}_{X}^{(0,1)}/f\tilde{=}\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$ as graded rings. \end{lem}
\begin{proof} By \corref{Local-coords-over-A=00005Bf,v=00005D}, we have that $f$ acts injectively on $\mathcal{D}_{X}^{(0,1)}/v$. Since $fv=0$ the map $f_{\infty}:\mathcal{D}_{X}^{(0,1),i}\to\mathcal{D}_{X}^{(1)}$ factors through a map $f_{\infty}:\mathcal{D}_{X}^{(0,1),i}/v\to\mathcal{D}_{X}^{(1)}$, which has image equal to $F^{i}(\mathcal{D}_{X}^{(1)})$ (by definition). The kernel is $0$ since $f$ acts injectively; so $\mathcal{D}_{X}^{(0,1),i}/v\tilde{\to}F^{i}(\mathcal{D}_{X}^{(1)})$ as required. The isomorphism $\mathcal{D}_{X}^{(0,1)}/f\tilde{=}\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$ is proved identically. \end{proof} Therefore we have the natural functors \[ \mathcal{M}^{\cdot}\to\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{\to}k[f]\otimes_{D(k)}^{L}\mathcal{M}^{\cdot} \] from $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ to $D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$ and \[ \mathcal{M}^{\cdot}\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{\to}k[v]\otimes_{D(k)}^{L}\mathcal{M}^{\cdot} \] from $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ to $D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$.
We are going to give some basic results on the derived categories of modules over these rings. As a motivation, we recall general result of Schapira-Schneiders (\cite{key-47}, theorem 4.20; c.f, also example 4.22) \begin{thm} Let $(\mathcal{A},F)$ be a $\mathbb{Z}$-filtered sheaf of rings on a topological space; let $\mathcal{R}(\mathcal{A})$ denote the associated Rees algebra. Let $D((\mathcal{A},F)-\text{mod})$ denote the filtered derived category of modules over $(\mathcal{A},F)$. Then there is an equivalence of categories \[ \mathcal{R}:D((\mathcal{A},F)-\text{mod})\tilde{\to}D(\mathcal{G}(\mathcal{R}(\mathcal{A}))) \] which preserves the subcategories of bounded, bounded below, and bounded above complexes. To a filtered module $\mathcal{M}$ (considered as a complex in degree $0$) this functor attaches the usual Rees module $\mathcal{R}(\mathcal{M})$. \end{thm}
Recall that a filtered complex $\mathcal{M}^{\cdot}$ over $(\mathcal{A},F)$ is said to be strict if each morphism $d:(\mathcal{M}^{i},F)\to(\mathcal{M}^{i+1},F)$ satisfies $d(m)\in F_{j}(\mathcal{M}^{i+1})$ iff $m\in F_{j}(\mathcal{M}^{i})$ (for all local sections $m$). Then $\mathcal{M}^{\cdot}$ is quasi-isomorphic to a strict complex iff each cohomology sheaf $\mathcal{H}^{i}(\mathcal{R}(\mathcal{M}^{\cdot}))$ is torsion-free with respect to the Rees parameter. If $\mathcal{M}^{\cdot}$ is a bounded complex, for which the filtration is bounded below (i.e. there is some $j\in\mathbb{Z}$ so that $F_{j}(\mathcal{M}^{i})=0$ for all $i$), then this condition is equivalent to the degeneration at $E_{1}$ of the spectral sequence associated to the filtration.
Now we return the discussion to $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$. We begin with the latter; recall that Ogus and Vologodsky in \cite{key-11} have considered the filtered derived category associated to the conjugate filtration on $\mathcal{D}_{X}^{(0)}$; by the above theorem\footnote{The careful reader will note that in their work they require filtrations to be separated; however, this leads to a canonically isomorphic filtered derived category, as explained in \cite{key-59}, proposition 3.1.22 } this category is equivalent to $\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$. After we construct our pushforward on $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, we will show that it is compatible with the one constructed on \cite{key-11}, for now, we will just prove the following basic structure theorem for $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$; following \cite{key-3}, theorem 2.2.3: \begin{prop} We have $\mathcal{Z}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))\tilde{=}\mathcal{O}_{T^{*}X^{(1)}}[v]$; this is a graded ring where $\mathcal{O}_{T^{*}X^{(1)}}$ is graded as usual and $v$ is placed in degree $-1$. The algebra $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is Azumaya over $\mathcal{Z}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$, of index $p^{\text{dim}(X)}$. In particular, $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})(U)$ has finite homological dimension for each open affine $U$. \end{prop}
\begin{proof} The filtered embedding $\mathcal{O}_{T^{*}X^{(1)}}\to\mathcal{D}_{X}^{(0)}$ induces the map $\mathcal{O}_{T^{*}X^{(1)}}[v]\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, by the very definition of the conjugate filtration it is a map of graded rings. To show that this map is an isomorphism onto the center, note that by \corref{Local-coords-over-A=00005Bf,v=00005D}, after choosing etale local coordinates we have that a basis for $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ over $\mathcal{O}_{X}[v]$ is given by $\{\partial^{I}(\partial^{[p]})^{J}\}$ where each entry of $I$is contained in $\{0,\dots,p-1\}$; and the formula for the bracket by $\partial_{i}^{[p]}$ (c.f. \thmref{Local-Coords-for-D+}) shows that $(\partial^{[p]})^{J}$ is now central. Thus the center is given by $\mathcal{O}_{X^{(1)}}[v,\partial_{1}^{[p]},\dots,\partial_{n}^{[p]}]$ which is clearly the (isomorphic) image of the map.
The above local coordinates also show that $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is locally free over $\mathcal{Z}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$, of rank $p^{2\text{dim}(X)}$. Now we can follow the strategy of \cite{key-3}, to show that $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is Azumaya: we consider the commutative subalgebra $\mathcal{A}_{X,v}:=\mathcal{O}_{X}\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{O}_{T^{*}X^{(1)}}[v]$ inside $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$; it acts by right multiplication on $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is a locally free module over it of rank $p^{\text{dim}(X)}$. We have the action map \[ A:\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{T^{*}X^{(1)}}[v]}\mathcal{A}_{X,v}\to\mathcal{E}nd_{\mathcal{A}_{X,v}}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})) \] which is a morphism of algebras, both of which are locally free modules of rank $p^{2\text{dim}(X)}$ over $\mathcal{A}_{X,v}$. Since the left hand side is the pullback of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, considered as a sheaf of algebras over $T^{*}X^{(1)}\times\mathbb{A}^{1}$, to the flat cover $X\times_{X^{(1)}}T^{*}X^{(1)}\times\mathbb{A}^{1}$, we see that $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is Azumaya if $A$ is an isomorphism.
To prove that $A$ an isomorphism it suffices to prove it after inverting $v$ and after setting $v=0$. Upon inverting $v$, we have $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})=\mathcal{D}_{X}^{(0)}[v,v^{-1}]$, so the map $A$ simply becomes the analogous map for $\mathcal{D}_{X}^{(0)}$ tensored with $k[v,v^{-1}]$; this is shown to be an isomorphism by \cite{key-3}, proposition 2.2.2. Upon setting $v=0$, we obtain \[ A_{0}:\text{gr}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{T^{*}X^{(1)}}}\mathcal{A}_{X}\to\mathcal{E}nd_{\mathcal{A}_{X}}(\text{gr}(\mathcal{D}_{X}^{(0)})) \] where $\text{gr}(\mathcal{D}_{X}^{(0)})$ is the associated graded of $\mathcal{D}_{X}^{(0)}$ with respect to the conjugate filtration; this is a (split) Azumaya algebra which is easily seen to be isomorphic to $\overline{\mathcal{D}}_{X}^{(0)}\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{O}_{T^{*}X^{(1)}}$ (c.f. \cite{key-11}; the discussion below lemma 3.18). Thus the map $A_{0}$ is again an isomorphism; indeed, we have \[ \text{gr}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{T^{*}X^{(1)}}}\mathcal{A}_{X}\tilde{\to}\overline{\mathcal{D}}_{X}^{(0)}\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{O}_{T^{*}X^{(1)}}\otimes_{\mathcal{O}_{T^{*}X^{(1)}}}\mathcal{A}_{X} \] \[ \tilde{\to}\mathcal{E}nd_{\mathcal{O}_{X^{(1)}}}(\mathcal{O}_{X})\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{A}_{X} \] so that each reduction of $\text{gr}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{T^{*}X^{(1)}}}\mathcal{A}_{X}$ to closed point in $X\times_{X^{(1)}}T^{*}X^{(1)}$ is a matrix algebra of rank $p^{\text{dim}(X)}$, and hence a central simple ring, and the result follows immediately. \end{proof} Next we turn to the category of modules over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$, in this case, we can describe them in terms of the familiar filtered $\mathcal{D}_{X}^{(0)}$-modules (in terms of the symbol filtration). The key to doing so is a version of Berthelot's Frobenius descent for filtered $\mathcal{D}_{X}^{(1)}$-modules; while we will consider the more general Frobenius descent over $\mathfrak{X}$ in the next subsection, we will give the basic construction on $X$ for now.
To proceed, recall that we have the embedding $\overline{\mathcal{D}_{X}^{(0)}}\subset\mathcal{D}_{X}^{(1)}$ which is simply the image of map $f_{\infty}:\mathcal{D}_{X}^{(0)}\to\mathcal{D}_{X}^{(1)}$. Let $\mathcal{J}\subset\overline{\mathcal{D}_{X}^{(0)}}$ denote the annihilator of $1\in\mathcal{O}_{X}$ under the action of $\overline{\mathcal{D}_{X}^{(0)}}$ on $\mathcal{O}_{X}$; we have the left ideal $\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$. \begin{prop} \label{prop:Basic-F^*-over-k}There is an isomorphism of $\mathcal{O}_{X}$-modules $\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}\tilde{\to}F^{*}\mathcal{D}_{X}^{(0)}$, thus endowing $F^{*}\mathcal{D}_{X}^{(0)}$ with the structure of a left $\mathcal{D}_{X}^{(1)}$-module; and hence the structure of a $(\mathcal{D}_{X}^{(1)},\mathcal{D}_{X}^{(0)})$-bimodule. Let $F^{i}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J})$ be the filtration induced from the Hodge filtration on $\mathcal{D}_{X}^{(1)}$, and let $F^{i}(\mathcal{D}_{X}^{(0)})$ be the symbol filtration. Then we have \[ F^{i}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J})\cdot F^{j}(\mathcal{D}_{X}^{(0)})=F^{i+j}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}) \] for all $i,j\geq0$. The induced morphism $\mathcal{D}_{X}^{(1)}\to\mathcal{E}nd_{\mathcal{D}_{X}^{(0),\text{opp}}}(F^{*}\mathcal{D}_{X}^{(0)})$ is an isomorphism of filtered algebras. \end{prop}
\begin{proof} We put a right $\mathcal{D}_{X}^{(0)}$-module structure on $\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$ as follows: let $\Phi\in\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$ be a section, over some open affine subset $U$ which possesses local coordinates. Let $\partial$ be a derivation over $U$. We may choose a differential operator $\delta$ of order $p$ on $U$ such that $\delta(f^{p})=(\partial f)^{p}$ for all $f\in\mathcal{O}_{X}(U)$; for instance, if $\partial=\sum a_{i}\partial_{i}$ then we may choose $\delta=\sum a_{i}^{p}\partial_{i}^{[p]}$. If $\delta'$ is another such differential operator, then $\delta-\delta'$ is a section of $\overline{\mathcal{D}_{X}^{(0)}}(U)$ which annihilates $\mathcal{O}_{X}(U)^{p}$. In particular, $\delta-\delta'\in\mathcal{J}$, and so $\Phi\delta=\Phi\delta'$ inside $\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$.
So ,if we set $\Phi\star f=\Phi\cdot f^{p}$ and $\Phi\star\partial=\Phi\cdot\delta$ we obtain a (semilinear) right action of $\mathcal{D}_{X}^{(0)}$ on $\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$. Since $\mathcal{O}_{X}$ acts on $\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$ on the left, the map \[ (f,\Psi)\to f\star\Psi \] induces a morphism $F^{*}\mathcal{D}_{X}^{(0)}\to\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$. To show it is an isomorphism, let us consider filtrations: by \thmref{Local-Coords-for-D+} we have that $F^{i}(\mathcal{D}_{X}^{(1)})(U)$ is the free $\overline{\mathcal{D}_{X}^{(0)}}(U)$
module on $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{|J|\leq i}$. Since $\overline{\mathcal{D}_{X}^{(0)}}/\mathcal{J}\tilde{\to}\mathcal{O}_{X}$, we see that $F^{i}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J})$
is the free $\mathcal{O}_{X}(U)$-module on $\{(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}_{|J|\leq i}$. On the other hand, $F^{i}(\mathcal{D}_{X}^{(0)})(U)$ is the free
$\mathcal{O}_{X}(U)$-module on $\{\partial_{1}^{j_{1}}\cdots\partial_{n}^{j_{n}}\}_{|J|\leq i}$. Since $1\star\partial_{i}=\partial_{i}^{[p]}$ we deduce $F^{*}F^{i}(\mathcal{D}_{X}^{(0)})=F^{i}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J})$ which implies that the map $F^{*}\mathcal{D}_{X}^{(0)}\to\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}$ is an isomorphism. The same calculation gives \[ F^{i}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J})\cdot F_{j}(\mathcal{D}_{X}^{(0)})=F^{i+j}(\mathcal{D}_{X}^{(1)}/\mathcal{D}_{X}^{(1)}\cdot\mathcal{J}) \] Therefore, the map \[ \mathcal{D}_{X}^{(1)}\to\mathcal{E}nd_{\mathcal{D}_{X}^{(0),\text{op}}}(F^{*}\mathcal{D}_{X}^{(0)}) \] is a morphism of filtered algebras, where the filtration on the right hand side is defined by \[
F^{i}(\mathcal{E}nd_{\mathcal{D}_{X}^{(0)}}(F^{*}\mathcal{D}_{X}^{(0)}))=\{\varphi\in\mathcal{E}nd_{\mathcal{D}_{X}^{(0)}}(F^{*}\mathcal{D}_{X}^{(0)})|\varphi(F^{j}(F^{*}\mathcal{D}_{X}^{(0)})\subset F^{i+j}(F^{*}\mathcal{D}_{X}^{(0)})\phantom{i}\text{for all}\phantom{i}j\} \] Upon passing to the associated graded, we obtain the morphism \[ \text{gr}(\mathcal{D}_{X}^{(1)})\to\text{gr}\mathcal{E}nd_{\mathcal{D}_{X}^{(0),\text{op}}}(F^{*}\mathcal{D}_{X}^{(0)})\tilde{=}\mathcal{E}nd_{\text{gr}(\mathcal{D}_{X}^{(0)})}(\text{gr}(F^{*}\mathcal{D}_{X}^{(0)})) \] (the last isomorphism follows from the fact that $F^{*}\mathcal{D}_{X}^{(0)}$ is a locally free filtered module over $\mathcal{D}_{X}^{(0)}$). Working in local coordinates, we obtain the morphism \[ \mathcal{\overline{D}}_{X}^{(0)}[\partial_{1}^{[p]},\dots,\partial_{n}^{[p]}]\to\mathcal{E}nd_{\text{Sym}_{\mathcal{O}_{X}}(\mathcal{T}_{X})}(F^{*}(\text{Sym}_{\mathcal{O}_{X}}(\mathcal{T}_{X}))) \] where $\partial_{i}^{[p]}$ is sent to $\partial_{i}\in\mathcal{T}_{X}$. By Cartier descent, there is an isomorphism $\mathcal{\overline{D}}_{X}^{(0)}\tilde{=}\mathcal{E}nd_{\mathcal{O}_{X}}(F^{*}\mathcal{O}_{X})$ (here, the action of $\mathcal{O}_{X}$ on $F^{*}\mathcal{O}_{X}$ is on the right-hand factor in the tensor product; in other words, it is the action of $\mathcal{O}_{X}$ on itself through the Frobenius); and so we see that this map is an isomorphism. Thus the map $\mathcal{D}_{X}^{(1)}\to\mathcal{E}nd_{\mathcal{D}_{X}^{(0),\text{op}}}(F^{*}\mathcal{D}_{X}^{(0)})$ is an isomorphism as claimed. \end{proof} This yields a functor $\mathcal{M}\to F^{*}\mathcal{M}:=F^{*}\mathcal{D}_{X}^{(0)}\otimes_{\mathcal{D}_{X}^{(0)}}\mathcal{M}$ (the Frobenius pullback) from $\mathcal{D}_{X}^{(0)}-\text{mod}$ to $\mathcal{D}_{X}^{(1)}-\text{mod}$; from the last part of the above proposition and standard Morita theory one sees that it is an equivalence of categories. Further: \begin{thm} \label{thm:Filtered-Frobenius} The Frobenius pullback $F^{*}$ can be upgraded to an equivalence from $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)}))$ to $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$. Therefore, the functor $F^{*}$ can also be upgraded to an equivalence of categories from filtered $\mathcal{D}_{X}^{(0)}$-modules to filtered $\mathcal{D}_{X}^{(1)}$-modules. In particular, $\mathcal{R}(\mathcal{D}_{X}^{(1)})(U)$ has finite homological dimension for each open affine $U$. \end{thm}
\begin{proof} In \propref{Basic-F^*-over-k}, we showed that $F^{*}\mathcal{D}_{X}^{(0)}$ is filtered, in a way that strictly respects the filtered action of both $\mathcal{D}_{X}^{(1)}$ and $\mathcal{D}_{X}^{(0)}$. So, consider the Rees module $\mathcal{R}(F^{*}\mathcal{D}_{X}^{(0)})$. This is a graded $(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\mathcal{R}(\mathcal{D}_{X}^{(0)}))$-bimodule; and the isomorphism $F^{*}F_{i}(\mathcal{D}_{X}^{(0)}\tilde{=}F_{i}(F^{*}\mathcal{D}_{X}^{(0)})$ proved in loc.cit. shows that $\mathcal{R}(F^{*}\mathcal{D}_{X}^{(0)})\tilde{=}F^{*}\mathcal{R}_{X}^{(0)}$ as a right $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-module. Thus the result will follow if we can show that the action map \begin{equation} \mathcal{R}(\mathcal{D}_{X}^{(1)})\to\mathcal{E}nd_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}(\mathcal{R}(F^{*}\mathcal{D}_{X}^{(0)}))\tilde{=}\underline{\mathcal{E}nd}{}_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}(\mathcal{R}(F^{*}\mathcal{D}_{X}^{(0)}))\label{eq:first-map} \end{equation} is an isomorphism (the latter isomorphism follows from the fact that $\mathcal{R}(F^{*}\mathcal{D}_{X}^{(0)})$ is coherent over $\mathcal{R}(\mathcal{D}_{X}^{(0)})$). Both sides are therefore positively graded algebras over the ring $k[f]$; taking reduction mod $f$ we obtain the map $\text{gr}(\mathcal{D}_{X}^{(1)})\to\mathcal{E}nd_{\text{gr}(\mathcal{D}_{X}^{(0)})}(\text{gr}(F^{*}\mathcal{D}_{X}^{(0)}))$ which we already showed to be an isomorphism. Thus by the graded Nakayama lemma \eqref{first-map} is surjective. As both sides are $f$-torsion free it follows that it is an isomorphism. Thus the first result of $1)$ is proved, the second follows by identifying filtered modules with graded modules over the Rees ring which are torsion-free with respect to $f$. \end{proof} \begin{rem} \label{rem:The-inverse-to-F^*}The inverse to the functor $F^{*}$ can be described as follows: via the embedding $\overline{\mathcal{D}_{X}^{(0)}}\subset\mathcal{R}(\mathcal{D}_{X}^{(1)})$, any module $\mathcal{M}$ over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ possesses a connection which has $p$-curvature $0$. Apply this to $\mathcal{R}(\mathcal{D}_{X}^{(1)})/\mathcal{J}\cdot\mathcal{R}(\mathcal{D}_{X}^{(1)})$, where as above $\mathcal{J}\subset\overline{\mathcal{D}_{X}^{(0)}}$ denotes the annihilator of $1\in\mathcal{O}_{X}$ under the action of $\overline{\mathcal{D}_{X}^{(0)}}$ on $\mathcal{O}_{X}$. We obtain from the above argument the isomorphism \[ (\mathcal{R}(\mathcal{D}_{X}^{(1)})/\mathcal{J}\cdot\mathcal{R}(\mathcal{D}_{X}^{(1)}))^{\nabla}\tilde{=}\mathcal{R}(\mathcal{D}_{X^{(1)}}^{(0)}) \] Thus for any the sheaf $\mathcal{M}^{\nabla}:=\text{ker}(\nabla:\mathcal{M}\to\mathcal{M})$ inherits the structure of a module over $\mathcal{R}(\mathcal{D}_{X^{(1)}}^{(0)})$. As $k$ is perfect we have an isomorphism of schemes $\sigma:X^{(1)}\to X$, and so composing with this we can obtain from $\mathcal{M}^{\nabla}$ an $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-module; this is the inverse functor to $F^{*}$. \end{rem}
To close out this subsection, we'd like to discuss an important tool for studying $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$; namely, reducing statements to their analogues in $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. For any $\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$, we have a short exact sequence \[ 0\to\text{ker}(f)\to\mathcal{M}\to\mathcal{M}/\text{ker}(f)\to0 \] the module on the left is annihilated by $f$; i.e., it is a module over $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$ while the module on the right is annihilated by $v$; i.e., it is a module over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$. This allows us to deduce many basic structural properties of $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ from properties of $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$ and $\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))$. We now give the key technical input; to state it, we will abuse notation slightly, so that if $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$ (or in $D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$) we will use the same symbol $\mathcal{M}^{\cdot}$ to denote its image in $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ \begin{prop} \label{prop:Sandwich!}1) Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$. Suppose $\mathcal{N}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1),\text{opp}})$ is quasi-rigid. Then \[ \mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}\mathcal{N}/v\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{M}^{\cdot} \] Similarly, if $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$ we have \[ \mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}\mathcal{N}/f\otimes_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}^{L}\mathcal{M}^{\cdot} \] The analogous statement holds for $\mathcal{N}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ quasi-rigid and $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})^{\text{opp}}))$, resp. $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})^{\text{opp}}))$.
2) As above, let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$ and suppose $\mathcal{N}$ is quasi-rigid. Then \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}^{\cdot},\mathcal{N})\tilde{=}R\underline{\mathcal{H}om}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{M}^{\cdot},\text{ker}(v:\mathcal{N}\to\mathcal{N})) \] Similarly, if $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$ then \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M},\mathcal{N})\tilde{=}R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\mathcal{M},\text{ker}(f:\mathcal{N}\to\mathcal{N})) \] \end{prop}
\begin{proof}
1) Choose a flat resolution $\mathcal{F}^{\cdot}\to\mathcal{N}$ (in the category of right $\mathcal{D}_{X}^{(0,1)}$-gauges); concretely, the terms of $\mathcal{F}^{\cdot}$ are direct sums of sheaves of the form $j_{!}(\mathcal{D}_{X}^{(0,1)}(i)|_{U})$ (where $U\subset X$ is open and $j_{!}$ denotes extension by zero). Then $\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}$ is represented by the complex \[ \mathcal{F}^{\cdot}\otimes_{\mathcal{D}_{X}^{(0,1)}}\mathcal{M}^{\cdot}\tilde{=}(\mathcal{F}/v)^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}\mathcal{M}^{\cdot} \] where the isomorphism follows from the fact that each term of $\mathcal{M}^{\cdot}$ is annihilated by $v$. On the other hand, $(\mathcal{F}/v)^{\cdot}$
is a complex, whose terms are direct sums of sheaves of the form $j_{!}(\mathcal{R}(\mathcal{D}_{X}^{(1)}(i)|_{U})$, which computes $\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{R}(\mathcal{D}_{X}^{(1)})$. However, we have $\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{R}(\mathcal{D}_{X}^{(1)})\tilde{=}\mathcal{N}/v$ by the assumption on $\mathcal{N}$ (c.f. \lemref{Basic-Facts-on-Rigid}) Therefore $(\mathcal{F}/v)^{\cdot}$ is a flat resolution (in the category of graded right $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules) of $\mathcal{N}$, and so \[ (\mathcal{F}/v)^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}\mathcal{M}^{\cdot}\tilde{=}\mathcal{N}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{M}^{\cdot} \] as claimed. The case $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$ is essentially identical.
2) Choose an injective resolution $\mathcal{N}\to\mathcal{I}^{\cdot}$. Then we have that $R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}^{\cdot},\mathcal{N})$ is represented by \[ \underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}^{\cdot},\mathcal{I}^{\cdot})=\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}^{\cdot},\mathcal{I}^{\cdot,v=0})=\underline{\mathcal{H}om}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{M}^{\cdot},\mathcal{I}^{\cdot,v=0}) \]
where $\mathcal{I}^{j,v=0}=\{m\in\mathcal{I}^{j}|vm=0\}$. From the isomorphism $\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M},\mathcal{I}^{\cdot})=\underline{\mathcal{H}om}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{M},\mathcal{I}^{\cdot,v=0})$ we see that the the functor $\mathcal{I}\to\mathcal{I}^{v=0}$ takes injectives in $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ to injectives in $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$. On the other hand, we have $\mathcal{I}^{j,v=0}=\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\mathcal{I}^{j,v=0})$. Thus the functor $\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{R}(\mathcal{D}_{X}^{(1)}),)$ takes injectives in $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ to injectives in $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and so \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}^{\cdot},\mathcal{N})\tilde{=}R\underline{\mathcal{H}om}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{M}^{\cdot},R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\mathcal{N})) \] On the other hand, using the resolution \[ \cdots\to\mathcal{D}_{X}^{(0,1)}(-1)\xrightarrow{v}\mathcal{D}_{X}^{(0,1)}\xrightarrow{f}\mathcal{D}_{X}^{(0,1)}(-1)\xrightarrow{v}\mathcal{D}_{X}^{(0,1)}\to\mathcal{R}(\mathcal{D}_{X}^{(1)}) \] one deduces \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\mathcal{N})\tilde{=}\text{ker}(v:\mathcal{N}\to\mathcal{N}) \] and the first statement in $2)$ follows; the second statement is proved in an identical fashion. \end{proof} Here is a typical application: \begin{prop} \label{prop:Quasi-rigid=00003Dfinite-homological}A quasicoherent gauge $\mathcal{N}\in\mathcal{G}_{qcoh}(\mathcal{D}_{X}^{(0,1)})$ is quasi-rigid iff, for each open affine $U\subset X$, $\mathcal{N}(U)$ has finite projective dimension over $\mathcal{D}_{X}^{(0,1)}(U)$. \end{prop}
\begin{proof} Let $\mathcal{N}$ be quasi-rigid. Then for any quasicoherent $\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1),\text{opp}})$, we have the short exact sequence \[ 0\to\text{ker}(f)\to\mathcal{M}\to\mathcal{M}/\text{ker}(f)\to0 \] which yields the distinguished triangle \[ 0\to\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\text{ker}(f)\to\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}\to\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}/\text{ker}(f)\to0 \] Applying the previous result; we see that the outer two tensor products are isomorphic to tensor products over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$, respectively. As these algebras have finite homological dimension (the dimension is $2\text{dim}(X)+1$, in fact) over any open affine, we see that $\mathcal{N}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}$ is a bounded complex; since this is true for all $\mathcal{M}$ we obtain the forward implication. For the reverse, note that by \lemref{Basic-Facts-on-Rigid}, the functor $\mathcal{M}\to\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}\mathcal{M}\tilde{\to}k[f]\otimes_{D(k)}\mathcal{M}$ has infinite homological dimension when $\mathcal{M}$ is not quasi-rigid. \end{proof}
\subsection{\label{subsec:Frobenius-Descent,--Gauges}Frobenius Descent, $F^{-1}$-Gauges}
In this section we recall Berthelot's theory of Frobenius descent for $\mathcal{D}$-modules and give the definition of an $F^{-1}$-gauge over a higher dimensional base.
We begin by briefly recalling Berthelot's theory of the Frobenius action in mixed characteristic. This is developed using the theory of (mixed) divided powers in \cite{key-2}; for the reader's convenience we will recall a simple description in the case of interest to us (this point of view is emphasized in \cite{key-48}).
First suppose that $\mathfrak{X}$ admits an endomorphism $F$ which lifts the Frobenius on $X$, and whose restriction to $W(k)$ agrees with the Witt-vector Frobenius on $W(k)$. This is equivalent to giving a morphism $\mathfrak{X}\to\mathfrak{X}^{(1)}$ whose composition with the natural map $\mathfrak{X}^{(1)}\to\mathfrak{X}$ agrees with $F$ (here, $\mathfrak{X}^{(1)}$ denotes the first Frobenius twist of $\mathfrak{X}$ over $W(k)$); we will also denote the induced morphism $\mathfrak{X}\to\mathfrak{X}^{(1)}$ by $F$. On the underlying topological spaces (namely $X$ and $X^{(1)}$), this map is a bijection, and we shall consistently consider $\mathcal{O}_{\mathfrak{X}^{(1)}}$ as a sheaf of rings on $X$, equipped with an injective map of sheaves of algebras $F^{\#}:\mathcal{O}_{\mathfrak{X}^{(1)}}\to\mathcal{O}_{\mathfrak{X}}$ which makes $\mathcal{O}_{\mathfrak{X}}$ into a finite $\mathcal{O}_{\mathfrak{X}^{(1)}}$-module.
Now consider the sheaf $\mathcal{H}om_{W(k)}(\mathcal{O}_{\mathfrak{X}^{(1)}},\mathcal{O}_{\mathfrak{X}})$. For any $i\geq0$, this is a $(\mathcal{D}_{\mathfrak{X}}^{(i+1)},\mathcal{D}_{\mathfrak{X}^{(1)}}^{(i)})$ bi-module (via the actions of these rings of differential operators on $\mathcal{O}_{\mathfrak{X}}$ and $\mathcal{O}_{\mathfrak{X}^{(1)}}$, respectively). Then we have the \begin{thm} \label{thm:Berthelot-Frob}(Berthelot) The $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)},\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)})$ bi-sub-module of $\mathcal{H}om_{W(k)}(\mathcal{O}_{\mathfrak{X}^{(1)}},\mathcal{O}_{\mathfrak{X}})$ locally generated by $F^{\#}$ is isomorphic to $\mathcal{O}_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}^{(1)}}}\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}$, via the map \[ (f,\Phi)\to f\circ F^{\#}\circ\Phi\in\mathcal{H}om_{W(k)}(\mathcal{O}_{\mathfrak{X}^{(1)}},\mathcal{O}_{\mathfrak{X}}) \] for local sections $f\in\mathcal{O}_{\mathfrak{X}}$ and $\Phi\in\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}$. This gives the sheaf $\mathcal{O}_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}^{(1)}}}\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}=F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}$ the structure of a $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)},\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)})$ bimodule. The associated functor, denoted $F^{*}$, \[ \mathcal{M}\to F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}\otimes_{\mathcal{D}_{\mathfrak{X}^{(1)}}^{(i)}}\mathcal{M}\tilde{=}F^{*}\mathcal{M} \] is an equivalence of categories from $\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)}-\text{mod}\to\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}-\text{mod}$ ; which induces an equivalence $\text{Coh}(\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)})\to\text{Coh}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)})$. In particular, the map $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(i+1)}\to\mathcal{E}nd_{\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i),\text{op}}}(F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}^{(1)}}^{(i)})$ is an isomorphism of sheaves of algebras.
As $W(k)$ is perfect, we have an isomorphism $\mathfrak{X}^{(1)}\tilde{\to}\mathfrak{X}$; and we may therefore regard $F^{*}$ as being an equivalence of categories from $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}-\text{mod}$ to $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}-\text{mod}$ \end{thm}
This is proved in \cite{key-2}, section 2.3. In fact, in the case where $\mathfrak{X}=\text{Specf}(\mathcal{A})$ is affine and admits etale local coordinates, and the map $F$ acts on coordinates $\{t_{i}\}_{i=1}^{n}$ via $F(t_{i})=t_{i}^{p}$, then the first assertion can be proved quite directly. The second is the theory of \cite{key-2}. Note that this description implies that the reduction mod $p$ of the bimodule $F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}$ agrees with the bimodule $F^{*}\mathcal{D}_{X}^{(0)}$ constructed in \propref{Basic-F^*-over-k}. \begin{rem} \label{rem:Compare-With-Berthelot}1) Let $\mathcal{D}_{X,\mathbf{Ber}}^{(1)}$ denote Berthelot's ring of divided power differential operators of level $1$ on $X$. Then the Frobenius descent theory of the previous theorem gives an isomorphism \[ \mathcal{D}_{X,\text{Ber}}^{(1)}\to\mathcal{E}nd_{\mathcal{D}_{X}^{(0),\text{op}}}(F^{*}\mathcal{D}_{X}^{(0)}) \] It follows that $\mathcal{D}_{X,\mathbf{Ber}}^{(1)}\tilde{=}\mathcal{D}_{X}^{(1)}$ even if $X$ is not liftable to $W(k)$.
2) The Frobenius descent over $X$ implies the Frobenius descent over $\mathfrak{X}$, once one constructs the $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(1)},\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)})$ bimodule structure on $F^{*}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$. Indeed, this structure yields a morphism \[ \widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}\to\mathcal{E}nd_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0),\text{op}}}(F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}) \] as both sides are $p$-adically complete and $p$-torsion-free, to check that this map is an isomorphism one simply has to reduce mod $p$. \end{rem}
Now let us return to a general $\mathfrak{X}$. It is an fundamental fact that Frobenius descent doesn't really depend on the existence of the lift $F$: \begin{thm} (Berthelot) Suppose $F_{1},F_{2}$ are two lifts of Frobenius on $\mathfrak{X}$. Then there is an isomorphism of bimodules $\sigma_{1,2}:F_{1}^{*}\mathcal{D}_{\mathfrak{X}}^{(i)}\tilde{\to}F_{2}^{*}\mathcal{D}_{\mathfrak{X}}^{(i)}$. If $F_{3}$ is a third lift, we have $\sigma_{2,3}\circ\sigma_{12}=\sigma_{1,3}$. \end{thm}
This is \cite{key-2}, theorem 2.2.5; c.f. also \cite{key-21}, corollary 13.3.8. As lifts of Frobenius always exist locally, this implies that there is a globally defined bimodule $F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$, which induces an equivalence $F^{*}:\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}-\text{mod}\to\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}-\text{mod}$; we use the same letter to denote the derived equivalence $F^{*}:D(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}-\text{mod})\to D(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}-\text{mod})$.
The equivalence of categories $F^{*}$ has many remarkable properties, in particular its compatibility with the push-forward, pullback, and duality functors for $\mathcal{D}$-modules; we will recall these properties in the relevant sections below.
It will also be useful to recall some basic facts about the right-handed version of the equivalence. Recall that we have equivalences of categories $\mathcal{M}\to\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}$ from $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}-\text{mod}$ to $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i),\text{op}}-\text{mod}$ for any $i$ (c.f, \cite{key-1}, or \propref{Left-Right-Swap} below). This implies that there is a functor $\mathcal{M}\to F^{!}\mathcal{M}:=\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}F^{*}(\omega_{\mathfrak{X}}^{-1}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M})$ which is an equivalence from $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i),\text{op}}-\text{mod}$ to $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1),\text{op}}-\text{mod}$. By basic Grothendieck duality theory (c.f. \cite{key-2}, 2.4.1), there is an isomorphism \[ F^{!}\mathcal{M}\tilde{=}F^{-1}\mathcal{H}om_{\mathcal{O}_{\mathfrak{X}}}(F_{*}\mathcal{O}_{\mathfrak{X}},\mathcal{M}) \] of sheaves of $\mathcal{O}_{\mathfrak{X}}$-modules (this justifies the notation). If we put $\mathcal{M}=\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$ this isomorphism exhibits the left $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$-module structure on $F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$. \begin{prop} \label{prop:F^*F^!}1) The equivalence of categories $F^{!}:\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i),\text{op}}-\text{mod}\to\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1),\text{op}}-\text{mod}$ is given by $\mathcal{M}\to\mathcal{M}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}}F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$.
2) There are isomorphisms of $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+!)},\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)})$ bimodules $F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}}F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}=F^{*}F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}\tilde{\to}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}$ and $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}\tilde{\leftarrow}F^{!}F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}=F^{*}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}}F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$. In particular, for a $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}$-module $\mathcal{M}$, we have $\mathcal{M}=F^{*}\mathcal{N}$ iff $F^{!}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i+1)}}\mathcal{M}\tilde{=}\mathcal{N}$. \end{prop}
This is proved in \cite{key-2}, 2.5.1 (c.f. also \cite{key-21}, lemma 13.5.1). Further, by applying the Rees functor it directly implies the analogue for the filtered Frobenius descent of \thmref{Filtered-Frobenius}: \begin{cor} \label{cor:Filtered-right-Frob}There is an equivalence of categories $F^{!}:\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})^{\text{op}})\to\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})^{\text{op}})$; which yields a $(\mathcal{R}(\mathcal{D}_{X}^{(0)}),\mathcal{R}(\mathcal{D}_{X}^{(1)}))$ bimodule $F^{!}\mathcal{R}(\mathcal{D}_{X}^{(0)})$. We have isomorphisms of $(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\mathcal{R}(\mathcal{D}_{X}^{(1)}))$ bimodules \[ F^{!}F^{*}\mathcal{R}(\mathcal{D}_{X}^{(0)})\tilde{\to}\mathcal{R}(\mathcal{D}_{X}^{(1)})\leftarrow F^{*}F^{!}\mathcal{R}(\mathcal{D}_{X}^{(0)}) \] \end{cor}
Now we proceed to the \begin{defn} \label{def:Gauge-Defn!}An $F^{-1}$-gauge over $\mathfrak{X}$ is an object of $\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ equipped with an isomorphism $F^{*}\mathcal{M}^{-\infty}\tilde{\to}\widehat{\mathcal{M}^{\infty}}$ (here $\widehat{?}$ denotes $p$-adic completion). A coherent $F^{-1}$-gauge is an $F^{-1}$-gauge whose underlying $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module is coherent. We define the category of $F^{-1}$-gauges, $\mathcal{G}_{F^{-1}}(\mathcal{D}_{\mathfrak{X}}^{(0,1)})$ by demanding that morphisms between $F^{-1}$-gauges respect the $F^{-1}$-structure (as in \defref{F-gauge}), and similarly for the category of coherent $F^{-1}$-gauges, $\mathcal{G}_{F^{-1},coh}(\mathcal{D}_{\mathfrak{X}}^{(0,1)})$.
Similarly, An $F^{-1}$-Gauge over $X$ is an object of $\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ equipped with an isomorphism $F^{*}\mathcal{M}^{-\infty}\tilde{\to}\widehat{\mathcal{M}^{\infty}}$, and for the category $\mathcal{G}_{F^{-1}}(\mathcal{D}_{X}^{(0,1)})$ we demand that morphisms between $F^{-1}$-gauges respect the $F^{-1}$-structure. We have the obvious subcategories of quasi-coherent and coherent gauges. \end{defn}
In the world of coherent gauges, we have seen in \propref{Completion-for-noeth} that completion is an exact functor. Therefore, the category of coherent $F^{-1}$-gauges over $\mathfrak{X}$ is abelian; the same does not seem to be true for the category of all gauges over $\mathfrak{X}$. On the other hand, the category of all $F^{-1}$-gauges over $X$ is abelian, as are the categories of coherent and quasicoherent $F^{-1}$-gauges.
Now let us turn to the derived world: \begin{defn} \label{def:F-gauge-for-complexes}A complex $\mathcal{M}^{\cdot}$ in $D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ is said to admit the structure of an $F^{-1}$-gauge if there is an isomorphism $F^{*}(\mathcal{M}^{\cdot})^{-\infty}\tilde{\to}\widehat{(\mathcal{M}^{\cdot})^{\infty}}$ where $\widehat{}$ denotes the cohomological completion. Similarly, we say that a complex $\mathcal{M}^{\cdot}$ in $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ admits the structure of an $F^{-1}$-gauge if there is an isomorphism $F^{*}(\mathcal{M}^{\cdot})^{-\infty}\tilde{\to}(\mathcal{M}^{\cdot})^{\infty}$. We will use a subscript $F^{-1}$ to denote the relevant categories; e.g. $D_{F^{-1}}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$. \end{defn}
These are not triangulated categories in general, though there is an obvious functor $D^{b}(\mathcal{G}_{F^{-1},coh}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))\to D_{coh,F^{-1}}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ (and similarly for $X$). To give the correct triangulated analogue of \defref{Gauge-Defn!} one must use higher homotopy theory; namely, the glueing of $\infty$-categories along a pair of functors. I intend to peruse this in a later project. For the purposes of this paper, \defref{F-gauge-for-complexes} will suffice. \begin{rem} \label{rem:Cut-off-for-F-gauges}Suppose $\mathcal{M}^{\cdot}\in D_{coh,F^{-1}}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$, then by \propref{Completion-for-noeth} $\widehat{\mathcal{H}^{i}(\mathcal{M}^{\cdot})^{\infty}}\tilde{=}\mathcal{H}^{i}(\widehat{\mathcal{M}^{\cdot,\infty}})$. Therefore $\mathcal{H}^{i}(\mathcal{M}^{\cdot})$ admits the structure of an $F^{-1}$-gauge for each $i$. Further, as both $F^{*}$ and the completion functor are exact, we have that $\tau_{\leq i}(\mathcal{M}^{\cdot})$ and $\tau_{\geq i}(\mathcal{M}^{\cdot})$ are contained in $\mathcal{M}^{\cdot}\in D_{coh,F^{-1}}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$, where $\tau_{\leq i},\tau_{\geq i}$ are the cut-off functors. \end{rem}
Given this, we can give the more refined version of Mazur's theorem for $F^{-1}$-gauges: \begin{thm} \label{thm:F-Mazur}Let $\mathcal{M}^{\cdot}\in D_{\text{coh},F^{-1}}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. Suppose that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{-\infty}$ is $p$-torsion-free for all $n$, and suppose that $\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free for all $n$. Then $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is standard for all $n$.
In particular, $\mathcal{H}^{n}(\mathcal{M}^{\cdot})$ is $p$-torsion-free, and $\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p$ is rigid for all $n$. We have $\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p\tilde{=}\mathcal{H}^{n}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)$ , $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/v\tilde{=}\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$, and $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/f\tilde{=}\mathcal{H}^{n}((\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[v])$ for all $n$. Further, $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/f$ is $v$-torsion-free and $(\mathcal{H}^{n}(\mathcal{M}^{\cdot})/p)/v$ is $f$-torion-free for all $n$. \end{thm}
\begin{proof} This follows from \thmref{Mazur!} if we can show that $\mathcal{H}^{n}(\mathcal{M}^{\cdot})^{\infty}\tilde{=}\mathcal{H}^{n}(\mathcal{M}^{\cdot,\infty})$ is also $p$-torsion-free for all $n$. Since $\mathcal{M}^{\cdot}\in D_{coh,F^{-1}}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ , we have that the cohomological completion of the complex $\mathcal{\widehat{M}}^{\cdot,\infty}$ is isomorphic to $F^{*}(\mathcal{M}^{\cdot,-\infty})$; and this complex has $p$-torsion-free cohomologies by the assumption. Since $\mathcal{M}^{\cdot,\infty}$ is a bounded complex with coherent cohomology sheaves, by \propref{Completion-for-noeth} we have that $\mathcal{H}^{n}(\mathcal{\widehat{M}}^{\cdot,\infty})\tilde{=}\widehat{\mathcal{H}^{n}(\mathcal{M}^{\cdot,\infty})}$, where the completion on the right denotes the usual $p$-adic completion. But the module $\mathcal{H}^{n}(\mathcal{M}^{\cdot,\infty})$, being coherent, is $p$-torsion-free iff its $p$-adic completion is. Thus each $\mathcal{H}^{n}(\mathcal{M}^{\cdot,\infty})$ is $p$-torsion-free as desired. \end{proof} In the case where $\mathfrak{X}=\text{Specf}(W(k))$ is a point, and $\mathcal{M}^{\cdot}$ is the gauge coming from cohomology of some smooth proper $\mathfrak{X}$ (this exists by \thmref{=00005BFJ=00005D}, and we'll construct it, in the language of this paper, in \secref{Push-Forward} below), this is exactly the content of \thmref{(Mazur)}; indeed, the first assumption is that $\mathbb{H}_{dR}^{i}(\mathfrak{X})$ is $p$-torsion-free for all $i$, and the second assumption is the degeneration of the Hodge to de Rham spectral sequence.
\subsection{Examples of Gauges}
We close out this chapter by giving a few important examples of gauges, beyond $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ itself. \begin{example} Let $\mathfrak{X}$ be a smooth formal scheme. Then $D(\mathcal{O}_{\mathfrak{X}})\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ by the very definition of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$
; indeed, we have $D(\mathcal{O}_{\mathfrak{X}}){}^{i}=\{g\in\mathcal{O}_{\mathfrak{X}}|p^{i}g\in\mathcal{O}_{\mathfrak{X}}\}$ so that the natural action of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}[p^{-1}]$ on $\mathcal{O}_{\mathfrak{X}}[p^{-1}]$ induces the action of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ on $\mathcal{O}_{\mathfrak{X}}[f,v]$. This is an $F^{-1}$-gauge via the isomorphism $F^{*}\mathcal{O}_{\mathfrak{X}}\tilde{\to}\mathcal{O}_{\mathfrak{X}}$. \end{example}
To generalize this, suppose $\mathfrak{D}\subset\mathfrak{X}$ is a locally normal crossings divisor. Let $\mathfrak{U}$ be the compliment of $\mathfrak{D}$. Denote the inclusion map by $j$. We are going to define a coherent $F^{-1}$-gauge ${\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{U}})}$, whose cohomology is the gauge version of the log de Rham cohomology of $\mathfrak{X}$ with respect to $\mathfrak{D}$.
To proceed, let $\mathfrak{V}\subset\mathfrak{X}$ be an affine open, on which there are local coordinates $\{x_{1},\dots,x_{n}\}$ in which the divisor $\mathfrak{D}$ is given by $\{x_{1}\cdots x_{j}=0\}$. Then (starting with the action of finite-order differential operators), we may consider the $D_{\mathfrak{V}}^{(0)}$-submodule of $\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]$ generated by $x_{1}^{-1}\cdots x_{j}^{-1}$; it is easily seen to be independent of the choice of coordinates; hence we obtain a well-defined
$D_{\mathfrak{V}}^{(0)}$-module denoted ${\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{U}})}^{\text{fin}}$; and we define the $\mathcal{\widehat{D}}_{\mathfrak{V}}^{(0)}$ module, denoted $(j_{\star}\mathcal{O}_{\mathfrak{U}})^{-\infty}|_{\mathfrak{V}}$ to be the $p$-adic completion of ${\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{U}})}^{\text{fin}}$. By glueing we obtain a coherent $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}$-module $(j_{\star}\mathcal{O}_{\mathfrak{U}})^{-\infty}$. We have \begin{lem}
\label{lem:Injectivity-of-completion}For any $\mathfrak{V}$ as above, the natural map $\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$ (where $\widehat{}$ denotes $p$-adic completion) is injective. \end{lem}
We'll give a proof of this rather technical result in \secref{Appendix:-an-Inectivity}. From this we deduce \begin{lem} \label{lem:Hodge-filt-on-log}Let $F$ be a lift of Frobenius satisfying $F(x_{i})=x_{i}^{p}$ for all $1\leq i\leq n$. Then the natural map
$F^{*}\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$ is injective, and its image is the $\widehat{\mathcal{D}}_{\mathfrak{V}}^{(1)}$-submodule generated by $x_{1}^{-1}\cdots x_{j}^{-1}$. \end{lem}
\begin{proof}
For each $r>0$ we have an isomorphism $F^{*}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]/p^{r})\tilde{\to}\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]/p^{r}$; upon taking the inverse limit we obtain $F^{*}\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}\tilde{\to}\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$. Since $F^{*}$ is an exact, conservative functor on $\mathcal{O}_{\mathfrak{V}}-\text{mod}$, the previous lemma implies that $F^{*}\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$
is injective. Since the image of $({\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$
is the $\widehat{\mathcal{D}}_{\mathfrak{V}}^{(0)}$-submodule generated by $x_{1}^{-1}\cdots x_{j}^{-1}$, the image of $F^{*}(j_{\star}{\displaystyle \mathcal{O}_{\mathfrak{U}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$ is the $\widehat{\mathcal{D}}_{\mathfrak{V}}^{(1)}$-submodule generated by $F(x_{1}^{-1}\cdots x_{j}^{-1})=x_{1}^{-p}\cdots x_{j}^{-p}$. But since $\partial_{i}^{[p]}x_{i}^{-1}=-x_{i}^{-p-1}$ we see that $\widehat{\mathcal{D}}_{\mathfrak{V}}^{(1)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}=\widehat{\mathcal{D}}_{\mathfrak{V}}^{(1)}\cdot x_{1}^{-p}\cdots x_{j}^{-p}$ as claimed. \end{proof} We can now construct the full gauge ${\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}[f,v]}$ as follows: denote by $\widehat{\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{\infty}}$ the $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}$-submodule of $\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$ locally generated by $x_{1}^{-1}\cdots x_{j}^{-1}$; as above this is independent of the choice of coordinates for the divisor $\mathfrak{D}$. Then we have \begin{example}
\label{exa:Integral-j} Define $({\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{U}})})^{i}:=\{m\in\widehat{\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{\infty}}|p^{i}m\in\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}\}$. By the above discussion this is an object in $\text{Coh}_{F^{-1}}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ via the isomorphism \linebreak{} $F^{*}\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}\tilde{\to}\widehat{\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{\infty}}$. Let ${\displaystyle j_{\star}D(\mathcal{O}_{U}})$ denote the reduction mod $p$. We claim that the $l$th term of the Hodge filtration on $({\displaystyle j_{\star}D(\mathcal{O}_{U})})^{\infty}$ is given by $F^{*}(F^{l}(\mathcal{D}_{X}^{(0)})\cdot(x_{1}^{-1}\cdots x_{j}^{-1}))$, where $F^{l}\mathcal{D}_{X}^{(0)}$ is the $l$th term of the symbol filtration.
To see this, we work again in local coordinates over $\mathfrak{V}$. One computes that $(\partial_{i}^{[p]})^{l}(x_{i}^{-p})=u\cdot l!x_{i}^{-p(l+1)}$ where $u$ is a unit in $\mathbb{Z}_{p}$. Therefore the module \linebreak{} $D_{\mathfrak{V}}^{(1)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}=D_{\mathfrak{V}}^{(1)}\cdot x_{1}^{-p}\cdots x_{j}^{-p}$ is spanned over $\mathcal{O}_{\mathfrak{V}}$ by terms of the form $I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}$; the $p$-adic completion of this module is ${\displaystyle \widehat{\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{\infty}}}$.
For a multi-index $I$, set $\tilde{I}=(pi_{1}+p-1,\dots,pi_{j}+p-1)$. Then \linebreak{} $I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}\in({\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{U}})})^{r}$ iff $p^{r}\cdot I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}\in\mathcal{\widehat{D}}_{\mathfrak{V}}^{(0)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}$ . Furthermore, it is not difficult to see that $p^{r}\cdot I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}\in\mathcal{\widehat{D}}_{\mathfrak{V}}^{(0)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}$ iff $p^{r}\cdot I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}\in\mathcal{D}_{\mathfrak{V}}^{(0)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}$; in turn, this holds iff $r\geq\text{val}(\tilde{I}!)-\text{val}(I!)$ (since $\mathcal{D}_{\mathfrak{V}}^{(0)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}$ is spanned by terms of the form $I!x_{1}^{-(i_{1}+1)}\cdots x_{j}^{-(i_{j}+1)}$); here $\text{val}$ denotes the usual $p$-adic valuation; so that $\text{val}(p)=1$.
On the other hand one has \[ \text{val}((pi+p-1)!)-\text{val}(i!)=i \] for all $i\geq0$. So ${\displaystyle \text{val}(\tilde{I}!)-\text{val}(I!)=\sum_{t=1}^{j}i_{t}}$ which implies $I!\cdot x_{1}^{-p(i_{1}+1)}\cdots x_{j}^{-p(i_{j}+1)}\in({\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{U}})})^{r}$ iff ${\displaystyle r\geq\sum_{t=1}^{j}i_{t}}$.
On the other hand, $(F^{l}(\mathcal{D}_{\mathfrak{X}}^{(0)})\cdot(x_{1}^{-1}\cdots x_{j}^{-1})$ is spanned over $\mathcal{O}_{\mathfrak{V}}$ by terms of the form $I!\cdot x_{1}^{-(i_{1}+1)}\cdots x_{j}^{-(i_{j}+1)}$ where ${\displaystyle \sum_{t=1}^{j}i_{t}\leq l}$. Thus the module $F^{*}(F^{l}(\mathcal{D}_{X}^{(0)})\cdot(x_{1}^{-1}\cdots x_{j}^{-1}))$ is exactly the image in $({\displaystyle j_{\star}\mathcal{O}_{U}[f,v]})^{\infty}$ of $({\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}[f,v]})^{r}$, which is the claim. \end{example}
Finally, we end with an example of a standard, coherent gauge which definitely does not admit an $F^{-1}$-action: \begin{example} \label{exa:Exponential!} Let $\mathfrak{X}=\widehat{\mathbb{A}_{W(k)}^{1}}$. Consider the $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-module $e^{x}$; i.e., the sheaf $\mathcal{O}_{\mathfrak{X}}$ equipped with the action determined by \[ \sum_{i=0}^{\infty}a_{i}\partial^{i}\cdot1=\sum_{i=0}^{\infty}a_{i} \] (here $a_{i}\to0$ as $i\to\infty$). Then $\mathcal{O}_{\mathfrak{X}}[p^{-1}]$ is a coherent $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}$-module since $\partial^{[p]}\cdot1=(p!)^{-1}$; it has a $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-lattice given by $\mathcal{O}_{\mathfrak{X}}$. Thus by \exaref{Basic-Construction-over-X} we may define \[
(e^{x})^{i}:=\{m\in\mathcal{O}_{\mathfrak{X}}[p^{-1}]|p^{i}m\in\mathcal{O}_{\mathfrak{X}}\} \] and we obtain a gauge, also denoted $e^{x}$, such that $(e^{x})^{i}\tilde{=}\mathcal{O}_{\mathfrak{X}}$ for all $i$, and such that $v$ is an isomorphism for all $i$, while $f$ is given by multiplication by $p$. We have $(e^{x})^{-\infty}\tilde{=}\mathcal{O}_{\mathfrak{X}}$ while $(e^{x})^{\infty}=\mathcal{O}_{\mathfrak{X}}[p^{-1}]$. \end{example}
This example indicates that the ``exponential Hodge theory'' appearing, e.g., in Sabbah's work \cite{key-22}, could also be a part of this story; this should be interesting to pursue in future work.
\section{\label{sec:Operations:PullBack}Operations on Gauges: Pull-back}
Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a morphism of smooth formal schemes over $W(k)$. Let us begin by setting our conventions on the pullback of $\mathcal{O}$-modules: \begin{defn} \label{def:Correct-Pullback}1) If $\mathcal{M}\in\mathcal{O}_{\mathfrak{Y}}-\text{mod}$, we set $\varphi^{*}\mathcal{M}:=\mathcal{O}_{\mathfrak{X}}\widehat{\otimes}_{\varphi^{-1}(\mathcal{O}_{\mathfrak{Y}})}\varphi^{-1}(\mathcal{M}^{\cdot})$, the $p$-adic completion of the naive tensor product. If $\mathcal{M}^{\cdot}\in D(\mathcal{O}_{\mathfrak{Y}})$, then we define $L\varphi^{*}\mathcal{M}^{\cdot}:=\mathcal{O}_{\mathfrak{X}}\widehat{\otimes}_{\varphi^{-1}(\mathcal{O}_{\mathfrak{Y}})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})$; the cohomological completion of the usual derived tensor product.
2) Consider $D(\mathcal{O}_{\mathfrak{X}})$ and $D(\mathcal{O}_{\mathfrak{Y}})$ as graded sheaves of rings as usual. If $\mathcal{M}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{\mathfrak{Y}})))$, then we define $L\varphi^{*}\mathcal{M}^{\cdot}:=D(\mathcal{O}_{\mathfrak{X}})\widehat{\otimes}_{\varphi^{-1}(D(\mathcal{O}_{\mathfrak{Y}}))}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})$, the graded cohomological completion of the usual derived tensor product. \end{defn}
\begin{rem} The reader will note several inconsistencies in these notations. First of all, we do not, in general, have $\mathcal{H}^{0}(L\varphi^{*}\mathcal{M})=\varphi^{*}\mathcal{M}$. Furthermore, the functor $L\varphi^{*}$ does not commute with the forgetful functor from graded $\mathcal{O}[f,v]$-modules to $\mathcal{O}$-modules. However, we will only use the underived $\varphi^{*}$ in a few very special cases (c.f. the lemma directly below), when in fact the equality $\mathcal{H}^{0}(L\varphi^{*}\mathcal{M})=\varphi^{*}\mathcal{M}$ does hold. Further, we will only apply the graded functor when working with a graded module; and this will almost always be the case. Hopefully this notational scheme does not cause any undue confusion. \end{rem}
Now we should check that this operation behaves well on the basic objects of interest in our paper: \begin{lem} \label{lem:phi-pullback-of-D^i}For each $i\in\mathbb{Z}$ we have \[ L\varphi^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i})\tilde{=}\varphi^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i})\tilde{=}\lim_{n}(\mathcal{O}_{\mathfrak{X}_{n}}\otimes_{\varphi^{-1}(\mathcal{O}_{\mathfrak{Y}_{n}})}\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}/p^{n})) \] In particular, we have \[ \mathcal{H}^{0}(L\varphi^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}))\tilde{=}\varphi^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}) \]
under the conventions of \defref{Correct-Pullback}. The same holds if we replace $\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}$ by $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)}$ for any $j\geq0$. \end{lem}
\begin{proof} As this question is local, we can assume $\mathfrak{X}=\text{Specf}(\mathcal{B})$ and $\mathfrak{Y}=\text{Specf}(\mathcal{A})$ where $\mathcal{A}$ possess local coordinates $\{t_{1},\dots,t_{n}\}$. By definition we have that $\widehat{D}_{\mathcal{A}}^{(0,1),i}$ is the $p$-adic completion of $D_{\mathcal{A}}^{(0,1),i}$. By \corref{Each-D^(i)-is-free} we have that $D_{\mathcal{A}}^{(0,1),i}$ is free over $\mathcal{A}$. In particular, it is $p$-torsion free and $p$-adically separated; and so by \cite{key-8}, lemma 1.5.4 its cohomological completion is equal to $\widehat{D}_{\mathcal{A}}^{(0,1),i}$. Therefore we have the short exact sequence \[ D_{\mathcal{A}}^{(0,1),i}\to\widehat{D}_{\mathcal{A}}^{(0,1),i}\to K \] where $p$ acts invertibly on $K$. Now we apply the functor $\mathcal{B}\otimes_{\mathcal{A}}^{L}$. By \cite{key-8}, theorem 1.6.6, we have that $\widehat{D}_{\mathcal{A}}^{(0,1),i}$ is flat over $\mathcal{A}$. Thus we see that $\mathcal{B}\widehat{\otimes}_{\mathcal{A}}^{L}\mathcal{\widehat{D}}_{\mathcal{A}}^{(0,1),i}$, the cohomological completion of $\mathcal{B}\otimes_{\mathcal{A}}^{L}\widehat{D}_{\mathcal{A}}^{(0,1),i}$, is isomorphic to the cohomological completion of $\mathcal{B}\otimes_{\mathcal{A}}^{L}\mathcal{D}_{\mathcal{A}}^{(0,1),i}$, which is just the usual $p$-adic completion since this is a free $\mathcal{B}$-module, and the statement follows. An identical argument works for $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)}$. \end{proof} Now let $j\geq0$. We recall that, for each $j\geq0$, Berthelot has constructed a pullback functor $\varphi^{!,(j)}$ from $\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(j)}-\text{mod}$ to $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(j)}-\text{mod}$. In fact, in \cite{key-2}, section 3.2, he has shown that $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(j)}:=\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)})$ carries the structure of a left $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(j)}$-module. By definition $\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)}$ carries the structure of a right $\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)})$-module. This, in turn allows one to define the functor $\varphi^{*,(j)}$ via \[ L\varphi^{*,(j)}(\mathcal{M})=\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(j)}\widehat{\otimes}_{\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}})}^{L}\varphi^{-1}(\mathcal{M})\tilde{=}L\varphi^{*}(\mathcal{M}) \] (where the last isomorphism is as sheaves of $\mathcal{O}_{\mathfrak{X}}$-modules). One sets $\varphi^{!,(j)}:=L\varphi^{*,(j)}[d_{X/Y}]$ (where $d_{X/Y}=\text{dim}(X)-\text{dim}(Y)$).
In fact, this is not quite Berthelot's definition, as he does not use the cohomological completion; rather, he first defines the functor in the case of a morphism $\varphi:\mathfrak{X}_{n}\to\mathfrak{Y}_{n}$ (the reductions mod $p^{n}$ of $\mathfrak{X}$ and $\mathfrak{Y}$, respectively), and then applies the $\text{R}\lim$ functor. However, the two notions agree on bounded complexes of coherent $\widehat{\mathcal{D}}_{\mathfrak{Y}}$-modules; the version introduced here seems better suited to very general complexes.
In order to upgrade this to gauges, we must upgrade the bimodule $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)}$ to a bimodule $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}$: \begin{defn} \label{def:Transfer-Bimod} We set \[ \mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}:=\bigoplus_{i\in\mathbb{Z}}\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i} \] The sheaf ${\displaystyle \bigoplus_{i\in\mathbb{Z}}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1),i}}$ is a graded sheaf of $D(W(k))$-modules; induced from the $D(W(k))$ action on $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}$. Note that $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1),-\infty}=\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0)}$. \end{defn}
Let us analyze this sheaf: \begin{prop} \label{prop:Basic-properties-of-the-transfer-module}1) For each $i\in\mathbb{Z}$ , the natural map $\iota:\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}\to\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)}$ (induced from the inclusion $\eta:\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}\to\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)}$) is injective.
2) The image $\iota(\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i})$
is equal to the sheaf whose local sections are given by $\{\Psi\in\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(1)}|p^{i}\Psi\in\iota(\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0)})\}$. In particular, $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}$ is a standard gauge.
3) The sheaf ${\displaystyle \mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}}$ carries the structure of a graded $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)},\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}))$-bimodule as follows: we have the inclusions $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}\subset\mathcal{\widehat{D}}_{\mathfrak{X}}^{(1)}$, so if $\Phi\in\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),i}$ and $\Psi\in\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1),j}$ are local sections, then $p^{i+j}(\Phi\cdot\Psi)=(p^{i}\Phi)\cdot(p^{j}\Psi)\in\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)}$. Similarly, $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}$ becomes a right $\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)})$-module via $\varphi^{-1}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0)}\subset\varphi^{-1}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(1)}$. \end{prop}
\begin{proof} 1) As the statement is local, we can suppose $\mathfrak{Y}=\text{Specf}(\mathcal{A})$ and $\mathfrak{X}=\text{Specf}(\mathcal{B})$ where $\mathcal{A}$ and $\mathcal{B}$ admit local coordinates; let the reductions mod $p$ be $Y=\text{Spec}(A)$ and $X=\text{Spec}(B)$. By \corref{Local-coords-over-A=00005Bf,v=00005D} we know that $D_{A}^{(0,1)}$ is a free graded $A[f,v]$-module, therefore $\varphi^{*}D_{A}^{(0,1)}=B\otimes_{A}D_{A}^{(0,1)}$ is a free graded $B[f,v]$-module; and we have that the kernel of $f_{\infty}:\varphi^{*}D_{A}^{(0,1),i}\to\varphi^{*}D_{A}^{(0,1),\infty}=\varphi^{*}D_{A}^{(1)}$ is exactly the image of $v:\varphi^{*}D_{A}^{(0,1),i+1}\to\varphi^{*}D_{A}^{(0,1),i}$.
Now consider $m\in\text{ker}(\iota:\varphi^{*}\widehat{D}_{\mathcal{A}}^{(0,1),i}\to\varphi^{*}\widehat{D}_{\mathcal{A}}^{(1)})$. The reduction mod $p$ of $\iota$ agrees with $f_{\infty}:\varphi^{*}D_{A}^{(0,1),i}\to\varphi^{*}D_{A}^{(1)}$. Let $\overline{m}$ denote the image of $m$ in $\varphi^{*}D_{A}^{(0,1),i}$. Then $\overline{m}\in\text{ker}(\varphi^{*}D_{A}^{(0,1),i}\to\varphi^{*}D_{A}^{(1)})=v\cdot\varphi^{*}D_{A}^{(0,1),i+1}$. So, since $fv=p,$ we have $m\in v\cdot\varphi^{*}\widehat{D}_{\mathcal{A}}^{(0,1),i+1}$; write $m=vm'$. By definition, the composition $\widehat{D}_{\mathcal{A}}^{(0,1),i+1}\xrightarrow{v}\widehat{D}_{\mathcal{A}}^{(0,1),i}\xrightarrow{\eta}\widehat{D}_{\mathcal{A}}^{(1)}$ is equal to $p\cdot\eta:\widehat{D}_{\mathcal{A}}^{(0,1),i+1}\to\widehat{D}_{\mathcal{A}}^{(1)}$; thus also $\iota\circ v=p\cdot\iota$ and so $\iota(m)=\iota(vm')=p\iota(m')=0$; therefore $m'\in\text{ker}(\iota)$ as $\varphi^{*}\widehat{D}_{\mathcal{A}}^{(1)}$ is $p$-torsion-free\footnote{Indeed, it is the inverse limit of the $W_{n}(k)$-flat modules $(\varphi^{*}\widehat{D}_{\mathcal{A}}^{(1)})/p^{n}=(\mathcal{B}/p^{n})\otimes_{\mathcal{A}/p^{n}}(\widehat{D}_{\mathcal{A}}^{(1)}/p^{n})$}. Iterating the argument, we see that $m\in v^{N}\varphi^{*}\widehat{D}_{\mathcal{A}}^{(0,1),i+N}$ for all $N>0$; reducing mod $p$, this forces $\overline{m}=0$ since (again by \corref{Local-coords-over-A=00005Bf,v=00005D}) $\varphi^{*}D_{A}^{(0,1)}$ is $v$-adically seperated. Thus $m=pm_{1}$; and then $\iota(m_{1})=0$ since $\varphi^{*}\widehat{D}_{\mathcal{A}}^{(1)}$ is $p$-torsion-free; continuing in this way we obtain $m\in\bigcap_{n}p^{n}\cdot\varphi^{*}\widehat{D}_{\mathcal{A}}^{(0,1),i}=0$.
2) For each $i\geq0$ we have a short exact sequence \[ 0\to\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}\to\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i+1}\to\mathcal{F}_{i}\to0 \]
where $\mathcal{F}_{i}$ is a sheaf which is annihilated by $p$. By the injectivity just proved (and the equality $L\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}=\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1),i}$) we obtain the short exact sequence \[ 0\to\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}\to\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i+1}\to\mathcal{H}^{0}(L\varphi^{*}\mathcal{F}_{i})\to0 \]
and, since $\mathcal{F}_{i}$ is annihilated by $p$, we have $\mathcal{H}^{0}(L\varphi^{*}\mathcal{F}_{i})=\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\mathcal{F}_{i})$. So we obtain $p\cdot\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i+1}\subset\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}$, and since $\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),0}=\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0)}$, we see inductively that $\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1),i}\subset\{\Psi\in\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(1)}|p^{i}\Psi\in\iota(\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0)})\}$ for all $i$.
For the converse direction, we work locally and assume $\mathfrak{X}=\text{Specf}(\mathcal{B})$ and $\mathfrak{Y}=\text{Specf}(\mathcal{A})$ where $\mathcal{A}$ possess etale local coordinates $\{t_{1},\dots,t_{n}\}$. Then we have that $\Gamma(\varphi^{*}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(1)})=\mathcal{B}\widehat{\otimes}_{\mathcal{A}}\widehat{D}_{\mathcal{A}}^{(1)}\tilde{=}\mathcal{B}\widehat{\otimes}_{\mathcal{A}}D_{\mathcal{A}}^{(1)}$. As in the proof of \lemref{Basic-structure-of-D_A^(i)}, we will consider the finite-order analogue first. From (the proof of) that lemma, it follows that, any element of $\mathcal{B}\otimes_{\mathcal{A}}D_{\mathcal{A}}^{(1)}$ admits a unique expression of the form \[
\Psi=\sum_{I,J}b_{I,J}\frac{\partial_{1}^{i_{1}+pj_{1}}\cdots\partial_{n}^{i_{n}+pj_{n}}}{(p!)^{|J|}} \] for which $0\leq i_{j}<p$, all $b_{I,J}\in\mathcal{B}$, and the sum is finite. We have that $p^{i}\Psi\in\mathcal{B}\otimes_{\mathcal{A}}D_{\mathcal{A}}^{(0)}$
iff ${\displaystyle \frac{p^{i}}{p^{|J|}}b_{I,J}}\in\mathcal{B}$. So, if $|J|>i$ we can conclude (again, as in the proof of \lemref{Basic-structure-of-D_A^(i)}) that \[
b_{I,J}\frac{\partial_{1}^{i_{1}+pj_{1}}\cdots\partial_{n}^{i_{n}+pj_{n}}}{(p!)^{|J|}}=\tilde{b}_{I,J}\cdot\partial_{1}^{i_{1}+pj'_{1}}\cdots\partial_{n}^{i_{n}+pj'_{n}}\cdot(\partial_{1}^{[p]})^{j''_{1}}\cdots\partial_{n}^{i_{n}}(\partial_{n}^{[p]})^{j''_{n}} \] where $\tilde{b}_{I,J}\in\mathcal{B}$, and $j''_{1}+\dots+j_{n}''=i$. In particular $\Psi$ is contained in the $\mathcal{B}$-submodule spanned by $\{\partial_{1}^{i_{1}}\cdots\partial_{n}^{i_{n}}\cdot(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$ where $j_{1}+\dots+j_{n}\le i$, which is exactly the image of $\mathcal{B}\otimes_{\mathcal{A}}D_{\mathcal{A}}^{(0,1),i}$ in $\mathcal{B}\otimes_{\mathcal{A}}D_{\mathcal{A}}^{(1)}$.
Now, if $\Psi\in\mathcal{B}\widehat{\otimes}_{\mathcal{A}}\widehat{D}_{\mathcal{A}}^{(1)}$ is such that $p^{i}\Psi\in\mathcal{B}\widehat{\otimes}_{\mathcal{A}}\widehat{D}_{\mathcal{A}}^{(0)}$, then we can write ${\displaystyle p^{i}\Psi=\sum_{j=0}^{\infty}p^{j}\Psi_{j}}$ where $\Psi_{j}\in\mathcal{B}\otimes_{\mathcal{A}}D_{\mathcal{A}}^{(0)}$. Therefore \[ \Psi=\sum_{j=0}^{i}p^{j-i}\Psi_{j}+\sum_{j=i+1}^{\infty}p^{j-i}\Psi_{j} \] where, by the previous paragraph, the first sum is contained in the $\mathcal{B}$-submodule spanned by $\{\partial_{1}^{i_{1}}\cdots\partial_{n}^{i_{n}}\cdot(\partial_{1}^{[p]})^{j_{1}}\cdots(\partial_{n}^{[p]})^{j_{n}}\}$ where $j_{1}+\dots+j_{n}\le i$, and the second sum is contained in $\mathcal{B}\widehat{\otimes}_{\mathcal{A}}\widehat{D}_{\mathcal{A}}^{(0)}$. Thus $\Psi$ is in the image of $\mathcal{B}\widehat{\otimes}_{\mathcal{A}}\widehat{D}_{\mathcal{A}}^{(0,1),i}$ as required. It follows directly from the definition that $\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}$ is standard. Part $3)$ of the proposition follows immediately. \end{proof} \begin{rem} \label{rem:Direct-defn-of-transfer-bimodule}Combining the previous proposition with \lemref{phi-pullback-of-D^i}, we also obtain the description \[ \mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\tilde{=}L\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)})=D(\mathcal{O}_{\mathfrak{X}})\widehat{\otimes}_{\varphi^{-1}(D(\mathcal{O}_{\mathfrak{Y}}))}^{L}\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}) \] in the category $D_{cc}(\mathcal{G}(D(\mathcal{O}_{\mathfrak{X}}))$. \end{rem}
This leads to the \begin{defn} \label{def:Pullback!}Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}))$. Then we define \[ L\varphi^{*}(\mathcal{M}^{\cdot}):=\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})\in\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})) \]
where, as usual $\widehat{?}$ denotes graded derived completion. The induced left action of $\mathcal{D}_{\mathfrak{X}}^{(0,1)}$ given by the above definition; set $\varphi^{!}:=L\varphi^{*}[d_{X/Y}]$. \end{defn}
In order to study this definition, we shall use the corresponding mod $p$ theory; as usual this can be defined by reduction mod $p$ when the schemes $X$ and $Y$ are liftable, but it actually exists for all $\varphi:X\to Y$. This is contained in the the following \begin{prop} \label{prop:pull-back-in-pos-char}Let $\varphi:X\to Y$ be a morphism of smooth varieties over $k$.
1) There is a map of sheaves $\alpha:\mathfrak{l}_{X}\to\varphi^{*}\mathcal{D}_{Y}^{(0,1),1}$ (where $\mathfrak{l}_{X}$ is defined in \defref{L}).
2) Let $\beta:\mathcal{T}_{X}\to\varphi^{*}\mathcal{D}_{Y}^{(0,1),0}=\varphi^{*}\mathcal{D}_{Y}^{(0)}$ denote the natural map. There is a left action of $\mathcal{D}_{X}^{(0,1)}$ on $\varphi^{*}\mathcal{D}_{Y}^{(0,1)}$ satisfying $\partial\cdot(1\otimes1)=\beta(\partial)$ for all $\partial\in\mathcal{T}_{X}$ and $\delta\cdot(1\otimes1)=\alpha(\delta)$ for all $\delta\in\mathfrak{l}_{X}$. This action commutes with the right action of $\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})$ on $\varphi^{*}\mathcal{D}_{Y}^{(0,1)}$. \end{prop}
\begin{proof} 1) Let $\Phi$ be a local section of $\mathfrak{l}_{X}$. Composing the map $\varphi^{\#}:\varphi^{-1}(\mathcal{O}_{Y})\to\mathcal{O}_{X}$ with $\Phi$ gives a differential operator from $\varphi^{-1}(\mathcal{O}_{Y})$ to $\mathcal{O}_{X}$; call this operator $\Phi'$. We claim $\Phi'\in\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}\mathfrak{l}_{Y}$ (here, we are using the fact that the sheaf $\mathfrak{l}_{Y}$ is a subsheaf of $\mathcal{D}iff_{Y}$ and that $\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\mathcal{D}iff_{Y})\tilde{=}\mathcal{D}iff(\varphi^{-1}(\mathcal{O}_{Y}),\mathcal{O}_{X})$).
Let $U\subset X$ and $V\subset Y$ be open subsets which possess local coordinates, such that $\varphi(U)\subset V$. As in \lemref{O^p-action} write \[ \Phi=\sum_{i=1}^{n}a_{i}^{p}\partial_{i}^{[p]}+\sum_{I}a_{I}\partial^{I} \] where $a_{i},a_{I}\in\mathcal{O}_{X}(U)$. The map $(\sum_{I}a_{I}\partial^{I})\circ\varphi^{\#}:\varphi^{-1}(\mathcal{O}_{V})\to\mathcal{O}_{U}$ is a differential operator which satisfies $((\sum_{I}a_{I}\partial^{I})\circ\varphi^{\#})(g^{p}\cdot h)=\varphi^{\#}(g^{p})\cdot((\sum_{I}a_{I}\partial^{I})\circ\varphi^{\#})(h)$ for all $g,h\in\mathcal{O}_{V}$. From this we conclude \[ (\sum_{I}a_{I}\partial^{I})\circ\varphi^{\#}=\sum b_{J}\partial^{J} \] where $b_{J}\in\mathcal{O}_{X}(U)$ and now $\partial^{J}=\partial_{1}^{j_{1}}\cdots\partial_{r}^{j_{r}}$ are coordinate derivations on $V$ (to prove this, write the differential operator $(\sum_{I}a_{I}\partial^{I})\circ\varphi^{\#}$ in terms of $\partial_{1}^{[j_{1}]}\cdots\partial_{r}^{[j_{r}]}$ and then use the linearity over $\varphi^{\#}(g^{p})$ to deduce that there are no terms with any $j_{i}\geq p$).
Similarly, the map ${\displaystyle \sum_{i=1}^{n}a_{i}^{p}\partial_{i}^{[p]}}\circ\varphi^{\#}:\varphi^{-1}(\mathcal{O}_{V})\to\mathcal{O}_{U}$ is a differential operator of order $\leq p$, whose action on any $p$th power in $\varphi^{-1}(\mathcal{O}_{V})$ is a $p$th power in $\mathcal{O}_{U}$. From this one easily sees \[ (\sum_{i=1}^{n}a_{i}^{p}\partial_{i}^{[p]})\circ\varphi^{\#}=\sum_{j=1}^{r}b_{j}^{p}\partial_{j}^{[p]}+\sum_{J}b_{J}\partial^{J} \] for some $b_{j},b_{J}\in\mathcal{O}_{U}$. So we conclude $\Phi'\in\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}\mathfrak{l}_{Y}$ as desired. Further, since $\mathfrak{l}_{Y}\subset\mathcal{D}_{Y}^{(0,1),1}$ we obtain $\varphi^{-1}(\mathfrak{l}_{Y})\subset\varphi^{-1}(\mathcal{D}_{Y}^{(0,1),1})$ and therefore a map $\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\mathfrak{l}_{Y})\to\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\mathcal{D}_{Y}^{(0,1),1})$; we can now define $\alpha$ as the composition.
2) It suffices to check this locally. Restrict to an open affine $U\subset X$ which posses etale local coordinates, and we may suppose $\varphi(U)\subset V$, where $V$ also possesses etale local coordinates. Writing $U=\text{Spec}(A)$ and $V=\text{Spec}(B)$, we let $\mathcal{A}$ and $\mathcal{B}$ be flat lifts of $A$ and $B$ to $W(k)$, as in the proof of \lemref{linear-independance-over-D_0-bar} above. Let $\varphi^{\#}:\mathcal{B}\to\mathcal{A}$ be a lift of $\varphi^{\#}:B\to A$ (these always exist for affine neighborhoods which posses local coordinates, by the infinitesimal lifting property). Then the construction of \defref{Transfer-Bimod} provides an action of $D_{\mathcal{B}}^{(0,1)}$ on $\varphi^{*}(D_{\mathcal{A}}^{(0,1)})$ which commutes with the obvious right action of $D_{\mathcal{A}}^{(0,1)}$. The reduction mod $p$ of this action, when restricted to $\mathcal{T}_{X}\subset\mathcal{D}_{X}^{(0)}$ and $\mathfrak{l}_{X}\subset\mathcal{D}_{X}^{(0,1),1}$ clearly agrees with the map described above. Thus the map extends (uniquely) to an action, as claimed. \end{proof} Thus we have \begin{defn} Let $\mathcal{D}_{X\to Y}^{(0,1)}:=\varphi^{*}\mathcal{D}_{Y}^{(0,1)}$, equipped with the structure of a graded $(\mathcal{D}_{X}^{(0,1)},\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)}))$-bimoddule as above. Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$. Then we define $L\varphi^{*}(\mathcal{M}^{\cdot}):=\mathcal{D}_{X\to Y}^{(0,1)}\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})$ with the induced left action of $\mathcal{D}_{X}^{(0,1)}$ given by the above. Set $\varphi^{!}=L\varphi^{*}[d_{X/Y}]$. The functor $L\varphi^{*}$ takes $D_{\text{qcoh}}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$ to $D_{\text{qcoh}}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. \end{defn}
\begin{rem} In fact, as an object in $D(\mathcal{G}(D(\mathcal{O}_{X})))$, we have that $L\varphi^{*}(\mathcal{M}^{\cdot})$ agrees with the usual pullback of $\mathcal{O}$-modules. This follows directly from the isomorphism $\varphi^{*}\mathcal{D}_{Y}^{(0,1)}\tilde{=}\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})$, and the fact that $\mathcal{D}_{Y}^{(0,1)}$ is flat over $\mathcal{O}_{Y}$. The analogous fact is also true for $\varphi:\mathfrak{X}\to\mathfrak{Y}$; making use of \remref{Direct-defn-of-transfer-bimodule}. It follows that $L\varphi^{*}$ has finite homological dimension. \end{rem}
Now we record some basic properties of these functors: \begin{lem} \label{lem:composition-of-pullbacks}If $\psi:\mathfrak{Y}\to\mathfrak{Z}$, there is an isomorphism of functors \linebreak{} $L\varphi^{*}\circ L\psi^{*}\tilde{=}L(\psi\circ\varphi)^{*}$. The same result holds for $\varphi:X\to Y$ and $\psi:Y\to Z$. \end{lem}
\begin{proof} (compare \cite{key-49}, proposition 1.5.11) We have, by \remref{Direct-defn-of-transfer-bimodule}, \[ \mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{D}_{\mathfrak{Y\to\mathfrak{Z}}}^{(0,1)}) \] \[ =(D(\mathcal{O}_{\mathfrak{X}})\widehat{\otimes}_{\varphi^{-1}(D(\mathcal{O}_{\mathfrak{Y}}))}^{L}\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}))\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{O}_{\mathfrak{Y}}[f,v]\widehat{\otimes}_{\psi^{-1}(D(\mathcal{O}_{\mathfrak{Z}}))}^{L}\psi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Z}}^{(0,1)})) \] \[ \tilde{=}D(\mathcal{O}_{\mathfrak{X}})\widehat{\otimes}_{\varphi^{-1}(D(\mathcal{O}_{\mathfrak{Y}}))}^{L}(\varphi^{-1}D(\mathcal{O}_{\mathfrak{Y}})\widehat{\otimes}_{(\psi\circ\varphi)^{-1}(D(\mathcal{O}_{\mathfrak{Z}}))}^{L}(\psi\circ\varphi)^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Z}}^{(0,1)})) \] \[ \tilde{=}D(\mathcal{O}_{\mathfrak{X}})\widehat{\otimes}_{(\psi\circ\varphi)^{-1}(D(\mathcal{O}_{\mathfrak{Z}}))}^{L}(\psi\circ\varphi)^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Z}}^{(0,1)})=\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Z}}^{(0,1)} \] as $(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)},(\psi\circ\varphi)^{-1}\mathcal{\widehat{D}}_{\mathfrak{Z}}^{(0,1)})$-bimodules. This yields \[ L(\psi\circ\varphi)^{*}(\mathcal{M}^{\cdot})=\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Z}}^{(0,1)}\widehat{\otimes}_{(\psi\circ\varphi)^{-1}(\mathcal{D}_{\mathfrak{Z}}^{(0,1)})}^{L}(\psi\circ\varphi)^{-1}(\mathcal{M}^{\cdot}) \] \[ \tilde{=}(\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{D}_{\mathfrak{Y\to\mathfrak{Z}}}^{(0,1)}))\widehat{\otimes}_{(\psi\circ\varphi)^{-1}(\mathcal{D}_{\mathfrak{Z}}^{(0,1)})}^{L}(\psi\circ\varphi)^{-1}(\mathcal{M}^{\cdot}) \] \[ \tilde{=}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}((\mathcal{D}_{\mathfrak{Y\to\mathfrak{Z}}}^{(0,1)})\widehat{\otimes}_{\psi^{-1}(\mathcal{D}_{\mathfrak{Z}}^{(0,1)})}^{L}\psi^{-1}(\mathcal{M}^{\cdot}))=L\varphi^{*}(L\psi^{*}\mathcal{M}^{\cdot}) \] An identical argument works for $\varphi:X\to Y$ and $\psi:Y\to Z$. \end{proof} Next, we have \begin{prop} \label{prop:Basic-base-change-for-pullback}1) Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)}))$. Then $L\varphi^{*}(\mathcal{M}^{\cdot})^{-\infty}\tilde{\to}L\varphi^{*,(0)}(\mathcal{M}^{\cdot,-\infty})$ and $\widehat{L\varphi^{*}(\mathcal{M}^{\cdot})^{\infty}}\tilde{\to}L\varphi^{*,(1)}(\widehat{\mathcal{M}^{\cdot,\infty}})$. The analogous result holds for $\varphi:X\to Y$.
2) Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)}))$. Then $L\varphi^{*}(\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k\tilde{\to}L\varphi^{*,(0)}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)$. \end{prop}
\begin{proof} 1) By construction we have \[ (\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\widehat{\otimes}_{D(W(k))}^{L}W(k)[f,v]/(f-1)\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(1)} \] and \[ (\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)})\widehat{\otimes}_{D(W(k))}^{L}W(k)[f,v]/(v-1)\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)} \] from which the result follows directly. Similarly, for part $2)$ one uses \[ (\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\otimes_{W(k)}^{L}k\tilde{=}\mathcal{\widehat{D}}_{X\to Y}^{(0,1)} \] \end{proof} Specializing to the case of positive characteristic, it is also useful to have comparisons with the pullbacks of $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-modules. First, we need to give the relevant definitions: \begin{defn} Suppose $\varphi:X\to Y$. We let $\mathcal{R}_{X\to Y}^{(1)}:=\varphi^{*}\mathcal{D}_{Y}^{(0,1)}/(v)$ and $\mathcal{\overline{R}}_{X\to Y}^{(0)}:=\varphi^{*}\mathcal{D}_{Y}^{(0,1)}/(f)$; considered as a graded $(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))$ bimodule (respectively a graded $(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}),\varphi^{-1}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}))$ bimodule). Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{Y}^{(1)})))$. Then we define $L\varphi^{*,(1)}(\mathcal{M}^{\cdot}):=\mathcal{R}_{X\to Y}^{(1)}\otimes_{\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})$ with the induced left action of $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ given by the bimodule structure.. Set $\varphi^{\dagger,(1)}=L\varphi^{*,(1)}[d_{X/Y}]$.
We make the analogous definition for $\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$-modules; and denote the corresponding functors $L\varphi^{*,(0)}$ and $\varphi^{\dagger,(1)}$. \end{defn}
We note that the functor $L\varphi^{*,(1)}$ takes $D_{qc}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))$ to $D_{qc}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$; and similarly for $\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$. Then we have the \begin{prop} \label{prop:pullback-and-R}Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$. There is an isomorphism of functors \[ \mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\varphi^{\dagger}\mathcal{M}^{\cdot}\tilde{=}\varphi^{\dagger,(1)}(\mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] and similarly for $\varphi_{0}^{\dagger}$. \end{prop}
\begin{proof} We have \[ \varphi^{\dagger,(1)}(\mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}\mathcal{M}^{\cdot})=\mathcal{R}_{X\to Y}^{(1)}\otimes_{\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))}^{L}\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}\mathcal{M}^{\cdot})[d_{X/Y}] \] \[ \tilde{=}\mathcal{R}_{X\to Y}^{(1)}\otimes_{\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))}^{L}\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})[d_{X/Y}] \] \[ \tilde{=}\mathcal{R}_{X\to Y}^{(1)}\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})[d_{X/Y}] \] Now, by definition, the module $\mathcal{D}_{X\to Y}^{(0,1)}$, admits, locally on $X$ and $Y$, a lift $\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}$ which we have constructed above in \defref{Transfer-Bimod}. This lift is a standard gauge, and so $\mathcal{D}_{X\to Y}^{(0,1)}$ is quasi-rigid. So, using the resolution (c.f. \lemref{Basic-Facts-on-Rigid}) \[ \cdots\to\mathcal{D}_{X}^{(0,1)}(-1)\xrightarrow{v}\mathcal{D}_{X}^{(0,1)}\xrightarrow{f}\mathcal{D}_{X}^{(0,1)}(-1)\xrightarrow{v}\mathcal{D}_{X}^{(0,1)}\to\mathcal{R}(\mathcal{D}_{X}^{(1)}) \] for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ over $\mathcal{D}_{X}^{(0,1)}$, this tell us that \begin{equation} \mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{D}_{X\to Y}^{(0,1)}\tilde{=}\mathcal{D}_{X\to Y}^{(0,1)}/v=\mathcal{R}_{X\to Y}^{(1)}\label{eq:transfer-iso-1} \end{equation} i.e., this complex is concentrated in degree $0$ and is equal to $\mathcal{R}_{X\to Y}^{(1)}$ there. Thus \[ \mathcal{R}_{X\to Y}^{(1)}\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})[d_{X/Y}] \] \[ \tilde{=}\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{D}_{X\to Y}^{(0,1)}\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})[d_{X/Y}]=\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\varphi^{\dagger}\mathcal{M}^{\cdot} \] as desired. The case of $\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$-modules is essentially identical. \end{proof} Finally, we also have \begin{prop} \label{prop:Smooth-pullback-preserves-coh}If $\varphi$ is smooth, then $L\varphi^{*}$ takes $D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)}))$ to $D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$. The same holds for a smooth morphism $\varphi:X\to Y$. \end{prop}
\begin{proof} By part $2)$ of \propref{Basic-base-change-for-pullback}, as well as \propref{coh-to-coh}, the first statement reduces to the second. We may assume that $X=\text{Spec}(B)$ and $Y=\text{Spec}(A)$ both possess local coordinates. After further localizing if necessary we can suppose that there are local coordinates $\{\partial_{1},\dots,\partial_{n}\}$ on $B$ such that the $A$-linear derivations of $B$ are $\{\partial_{1},\dots,\partial_{d}\}$. In this case, if we let $J\subset\mathcal{D}_{B}^{(0,1)}$ be the ideal generated by $\{\partial_{1},\dots,\partial_{d},\partial_{1}^{[p]},\dots,\partial_{d}^{[p]}\}$, then we have \[ \mathcal{D}_{B}^{(0,1)}/J\tilde{=}B\otimes_{A}\mathcal{D}_{A}^{(0,1)} \] which shows that $B\otimes_{A}\mathcal{D}_{A}^{(0,1)}=\varphi^{*}\mathcal{D}_{A}^{(0,1)}$ is a coherent $\mathcal{D}_{B}^{(0,1)}$-module, which is flat as a module over $\mathcal{D}_{A}^{(0,1),\text{opp}}$. This shows that $\varphi^{*}$ is exact; and the coherence of the pullback for an arbitrary coherent $\mathcal{D}_{A}^{(0,1)}$-module $\mathcal{M}$ follows by taking a finite presentation for $\mathcal{M}$. \end{proof}
\section{\label{sec:Operations:Swap-Tensor}Operations on Gauges: Left-Right Interchange and tensor Product}
The first goal of this subsection is to prove \begin{prop} \label{prop:Left-Right-Swap} Let $\mathcal{M}\in\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$. Then $\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}$ carries the structure of a right graded $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module. This functor defines an equivalence of categories, which preserves coherent modules. The derived functor preserves the subcategories of derived complete complexes.
The analogous result holds for $X$ (i.e., in positive characteristic); there, the functor preserves the category of quasi-coherent sheaves as well. \end{prop}
In order to prove this, we first recall that $\omega_{\mathfrak{X}}$ naturally carries the structure of a right $\mathcal{D}_{\mathfrak{X}}^{(i)}$-module for all $i\geq0$; indeed, $\omega_{\mathfrak{X}}[p^{-1}]$ carries a right $\mathcal{D}_{\mathfrak{X}}^{(i)}[p^{-1}]=\mathcal{D}_{\mathfrak{X}}^{(0)}[p^{-1}]$ structure via the Lie derivative (c.f., e.g. \cite{key-4}, page 8). In local coordinates, this action is simply given by \[ (gdx_{1}\wedge\cdots\wedge dx_{n})\partial=-\partial(g)dx_{1}\wedge\cdots\wedge dx_{n} \] for any derivation $\partial$. It follows that $\mathcal{D}_{\mathfrak{X}}^{(i)}$ preserves $\omega_{\mathfrak{X}}$ (for all $i$). As $\omega_{\mathfrak{X}}$ is $p$-adically complete, we see that it also inherits a right $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(i)}$-module structure. \begin{lem} Let $D(\omega_{\mathfrak{X}})=\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}D(\mathcal{O}_{\mathfrak{X}})$. Then $D(\omega_{\mathfrak{X}})$ has a natural right graded $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module structure. Similarly, $D(\omega_{X})$ admits a right graded $\mathcal{D}_{X}^{(0,1)}$-module structure, for any smooth $X$ over $k$. \end{lem}
\begin{proof}
We note that $(\omega_{\mathfrak{X}}[f,v])^{i}=\{m\in\omega_{\mathfrak{X}}[p^{-1}]|p^{i}m\in\omega_{\mathfrak{X}}\}$. Thus the first result follows by using the right $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(1)}$-module structure on $\omega_{\mathfrak{X}}$. To prove the second result, we choose on open affine $\text{Spec}(A)\subset X$ which possesses etale local coordinates. In coordinates, the required action is given by \[ (gdx_{1}\wedge\cdots\wedge dx_{n})\partial=-\partial(g)dx_{1}\wedge\cdots\wedge dx_{n} \] and \[ (gdx_{1}\wedge\cdots\wedge dx_{n})\partial^{[p]}=-f\cdot\partial^{[p]}(g)dx_{1}\wedge\cdots\wedge dx_{n} \] for any $g\in D(\mathcal{O}_{\mathfrak{X}})$. If we choose a lift $\mathcal{A}$ of $A$, then, after lifting the coordinates, we see that this action is the reduction mod $p$ of the action just defined; in particular it is actually independent of the choice of coordinates and therefore glues to define an action on all of $X$. \end{proof} Now we recall a very general construction from \cite{key-4}, section 1.4b \begin{lem} Let $\mathcal{L}$ be any line bundle on $\mathfrak{X}$. Placing $\mathcal{L}$ and $\mathcal{L}^{-1}$ in degree $0$, the sheaf $\mathcal{\widehat{D}}_{\mathfrak{X},\mathcal{L}}^{(0,1)}:=\mathcal{L}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{L}^{-1}$ carries the structure of a graded algebra on $\mathfrak{X}$, via the multiplication \[ (s_{1}\otimes\Phi_{1}\otimes t_{1})\cdot(s_{2}\otimes\Phi_{2}\otimes t_{2})=s_{1}\otimes\Phi_{1}<t_{1},s_{1}>\Phi_{2}\otimes t_{2} \] There is a functor $\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})\to\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X},\mathcal{L}}^{(0,1)})$ given by $\mathcal{M}\to\mathcal{L}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}$; the action of $\mathcal{\widehat{D}}_{\mathfrak{X},\mathcal{L}}^{(0,1)}$ on $\mathcal{L}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}$ is defined by \[ (s\otimes\Phi\otimes t)\cdot(s_{1}\otimes m)=s\otimes\Phi_{1}<t,s_{1}>m \] This functor is an equivalence of categories, whose inverse is given by $\mathcal{N}\to\mathcal{L}^{-1}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{N}$. \end{lem}
So, \propref{Left-Right-Swap} follows directly from \begin{lem} There is an isomorphism of algebras $\mathcal{\widehat{D}}_{\mathfrak{X},\omega_{\mathfrak{X}}}^{(0,1)}\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\text{op}}$. The same is true over $X$. \end{lem}
\begin{proof} We have the isomorphism $\mathcal{\widehat{D}}_{\mathfrak{X},\omega_{\mathfrak{X}}}^{(0,1)}\tilde{=}D(\omega_{\mathfrak{X}})\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}D(\mathcal{\omega}_{\mathfrak{X}}^{-1})$. This yields a left action of $\mathcal{\widehat{D}}_{\mathfrak{X},\omega_{\mathfrak{X}}}^{(0,1)}$ on $\omega_{\mathfrak{X}}[f,v]$, given by \[ (s\otimes\Phi\otimes t)\cdot s_{1}=s\otimes\Phi\cdot<t,s_{1}> \] where $<,>$ refers to the pairing $D(\mathcal{\omega}_{\mathfrak{X}})\otimes_{D(\mathcal{O}_{\mathfrak{X}})}D(\mathcal{\omega}_{\mathfrak{X}}^{-1})\to D(\mathcal{O}_{\mathfrak{X}})$. Computing in local coordinates, one sees that the image of $\mathcal{\widehat{D}}_{\mathfrak{X},\omega_{\mathfrak{X}}}^{(0,1)}$ in $\mathcal{E}nd_{W(k)}(D(\omega_{\mathfrak{X}}))$ is the same as the image of $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\text{op}}$ in $\mathcal{E}nd_{W(k)}(D(\omega_{\mathfrak{X}}))$ via the right action defined above. This yields the isomorphism over $\mathfrak{X}$. To deal with $X$, one first obtains the isomorphism locally (via a local lifting of the variety), and then shows that the resulting isomorphism is independent of the choice of coordinates (as in the proof of the previous lemma). \end{proof} Next, we define tensor products of (left) $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-modules. The first step is to define the external product of sheaves: \begin{defn} 1) Let $\mathfrak{X}$ and $\mathfrak{Y}$ be smooth formal schemes, and let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{\mathfrak{X}})))$, $\mathcal{N}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{\mathfrak{Y}})))$. Then we define \[ \mathcal{M}^{\cdot}\boxtimes\mathcal{N}^{\cdot}:=Lp_{1}^{*}(\mathcal{M}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}})}^{L}Lp_{2}^{*}(\mathcal{N}^{\cdot})\in D_{cc}(\mathcal{G}(D(\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}))) \] where $p_{i}$ ($i\in\{1,2\}$) are the projections and $Lp_{1}^{*},Lp_{2}^{*}$ are defined as in \defref{Correct-Pullback}.
2) Let $X$ and $Y$ be smooth schemes over $k$. Then for $\mathcal{M}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{X})))$, $\mathcal{N}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{Y})))$. Then we define \[ \mathcal{M}^{\cdot}\boxtimes\mathcal{N}^{\cdot}:=Lp_{1}^{*}(\mathcal{M}^{\cdot})\otimes_{D(\mathcal{O}_{X\times Y})}^{L}Lp_{2}^{*}(\mathcal{N}^{\cdot})\in D(\mathcal{G}(D(\mathcal{O}_{X\times Y}))) \] where for $\mathcal{M}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{X})))$ we have $Lp_{1}^{*}\mathcal{M}^{\cdot}=D(\mathcal{O}_{X\times Y})\otimes_{p_{1}^{-1}(D(\mathcal{O}_{X}))}^{L}\mathcal{M}^{\cdot}\in D(\mathcal{G}(D(\mathcal{O}_{X\times Y})))$ ( and similarly for $p_{2}$). \end{defn}
The relationship with $\mathcal{D}$-modules is the following: \begin{lem} 1) There is an isomorphism \[ \widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(0,1)} \] of sheaves of algebras on $\mathfrak{X}\times\mathfrak{Y}$.
2) There is an isomorphism \[ \mathcal{D}_{X}^{(0,1)}\boxtimes\mathcal{D}_{Y}^{(0,1)}\tilde{=}\mathcal{D}_{X\times Y}^{(0,1)} \] of sheaves of algebras on $X\times Y$. \end{lem}
\begin{proof} First suppose $\mathfrak{X}=\text{Specf}(\mathcal{A})$ and $\mathfrak{Y}=\text{Specf}(\mathcal{B})$. Then there is a morphism $\mathcal{D}_{\mathcal{A}}^{(\infty)}\otimes_{W(k)}\mathcal{D}_{\mathcal{B}}^{(\infty)}\to\mathcal{D}_{\mathcal{A}\widehat{\otimes}_{W(k)}\mathcal{B}}^{(\infty)}$ defined as follows: for sections $a\in\mathcal{A}$ and $b\in\mathcal{B}$, we set \[ (\Phi_{1}\otimes\Phi_{2})(a\otimes b)=\Phi_{1}(a)\otimes\Phi_{2}(b) \] and we extend to $\mathcal{A}\widehat{\otimes}_{W(k)}\mathcal{B}$ by linearity and continuity. For a fixed integer $j\geq0$, this yields a map $\mathcal{D}_{\mathcal{A}}^{(j)}\otimes_{W(k)}\mathcal{D}_{\mathcal{B}}^{(j)}\to\mathcal{D}_{\mathcal{A}\widehat{\otimes}_{W(k)}\mathcal{B}}^{(j)}$; these maps are compatible with localization at any element of $\mathcal{A}$ or $\mathcal{B}$. After $p$-adically completing we get a map $\widehat{\mathcal{D}}_{\mathcal{A}}^{(j)}\widehat{\otimes}_{W(k)}\widehat{\mathcal{D}}_{\mathcal{B}}^{(j)}\to\widehat{\mathcal{D}}_{\mathcal{A}\widehat{\otimes}_{W(k)}\mathcal{B}}^{(j)}$, and these maps sheafifiy to a map \linebreak{} $p_{1}^{-1}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(j)})\widehat{\otimes}_{W(k)}p_{2}^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(j)})\to\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(j)}$. Note that since $\mathcal{D}_{\mathcal{A}}^{(j)}\otimes_{W(k)}\mathcal{D}_{\mathcal{B}}^{(j)}$ is $p$-torsion-free (as is $\mathcal{D}_{\mathcal{A}\widehat{\otimes}_{W(k)}\mathcal{B}}^{(j)}$), the usual $p$-adic completion of these sheaves agrees with the cohomological completion. It follows that $p_{1}^{-1}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(j)})\widehat{\otimes}_{W(k)}p_{2}^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(j)})\tilde{=}p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(j)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(j)}))$.
1) We claim that the map \[ p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(j)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(j)}))\to\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(j)} \] is an isomorphism; indeed, both sides are $p$-adically complete and $p$-torsion-free, so it suffices to check this after reduction mod $p$, where it becomes an easy computation in local coordinates. Thus we obtain isomorphisms \[
\{\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)})|p^{i}\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0)})\} \] \[
\tilde{\to}\{\Phi\in\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(1)}|p^{i}\Phi\in\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(0)}\}=\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(0,1),i} \] for each $i\in\mathbb{Z}$.
On the other hand, we claim that there is an isomorphism \[
(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})^{i}\tilde{\to}\{\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)})|p^{i}\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0)})\} \] Combined with the above, this proves $(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})^{i}\tilde{\to}\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{Y}}^{(0,1),i}$ as required. To see it, note that we have the map \[ f_{\infty}:(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})^{i}\to(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})^{\infty} \] The completion of the right hand side is $p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)})$; so we obtain a map \[
(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})^{i}\to\{\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(1)})|p^{i}\Phi\in p_{1}^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{X}\times\mathfrak{Y}}}p_{2}^{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0)})\} \] and to see that it is an isomorphism, one may check it after reduction mod $p$; then it follows from the result of part $2)$ proved directly below.
2) As above we have the map $p_{1}^{-1}\mathcal{D}_{X}^{(\infty)}\otimes_{k}p_{2}^{-1}\mathcal{D}_{Y}^{(\infty)}\to\mathcal{D}_{X\times Y}^{(\infty)}$. Restricting to $\mathcal{T}_{X}$ and $\mathfrak{l}_{X}$ (a defined in \defref{L} above)we get maps $p_{1}^{\#}:p_{1}^{-1}(\mathcal{T}_{X})\to\mathcal{T}_{X\times Y}$ and $p_{1}^{\#}:p_{1}^{-1}(\mathfrak{l}_{X})\to\mathfrak{l}_{X\times Y}$; and similarly for $p_{2}$. Thus we get a map \[ A:(\mathcal{T}_{X}\boxtimes1)\oplus(1\boxtimes\mathcal{T}_{Y})\oplus(\mathfrak{l}_{X}\boxtimes1)\oplus(1\boxtimes\mathfrak{l}_{Y})\to\mathcal{D}_{X\times Y}^{(0,1)} \] defined by \[ A(\partial_{1}\boxtimes1+1\boxtimes\partial_{2}+\delta_{1}\boxtimes1+1\boxtimes\delta_{2})=p_{1}^{\#}(\partial_{1})+p_{2}^{\#}(\partial_{2})+p_{1}^{\#}(\delta_{1})+p_{2}^{\#}(\delta_{2}) \] On the other hand, the sheaf $(\mathcal{T}_{X}\boxtimes1)\oplus(1\boxtimes\mathcal{T}_{Y})\oplus(\mathfrak{l}_{X}\boxtimes1)\oplus(1\boxtimes\mathfrak{l}_{Y})$ generates $\mathcal{D}_{X}^{(0,1)}\boxtimes\mathcal{D}_{Y}^{(0,1)}$ as a sheaf of algebras over $\mathcal{O}_{X\times Y}[f,v]$. Thus to show that $A$ extends (necessarily uniquely) to an isomorphism of algebras, we can so do locally.
So, let $\{x_{1},\dots,x_{n}\}$ and $\{y_{1},\dots,y_{m}\}$ be local coordinates on $X$ and $Y$, respectively, with associated derivations $\{\partial_{x_{1}},\dots,\partial_{x_{n}}\}$ and $\{\partial_{y_{1}},\dots,\partial_{y_{m}}\}$. Then by \corref{Local-coords-over-A=00005Bf,v=00005D} an $D(\mathcal{O}_{X})$-basis for $\mathcal{D}_{X}^{(0,1)}$ is given by the set $\{\partial_{x}^{I}(\partial_{x}^{[p]})^{J}\}$ for multi-indices $I,J$ such that each entry of $I$ is contained in $\{0,1,\dots,p-1\}$; the analogous statement holds over $Y$. Therefore the set $\{\partial_{x}^{I_{1}}(\partial_{x}^{[p]})^{J_{1}}\otimes\partial_{y}^{I_{2}}(\partial_{y}^{[p]})^{J_{2}}\}$ is an $\mathcal{O}_{X\times Y}[f,v]$-basis for $\mathcal{D}_{X}^{(0,1)}\boxtimes\mathcal{D}_{Y}^{(0,1)}$; but also $\{\partial_{x}^{I_{1}}\partial_{y}^{I_{2}}(\partial_{x}^{[p]})^{J_{1}}(\partial_{y}^{[p]})^{J_{2}}\}$ is certainly an $D(\mathcal{O}_{X\times Y})$-basis for $\mathcal{D}_{X\times Y}^{(0,1)}$ and so the result follows immediately. \end{proof} Now we can define the tensor product: \begin{defn} Let $\Delta:\mathfrak{X}\to\mathfrak{X}\times\mathfrak{X}$ denote the diagonal morphism.
1) Then for $\mathcal{M}^{\cdot},\mathcal{N}^{\cdot}\in D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ we define $\mathcal{M}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}^{\cdot}:=L\Delta^{*}(\mathcal{M}^{\cdot}\boxtimes\mathcal{N}^{\cdot})\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$, where $\mathcal{M}^{\cdot}\boxtimes\mathcal{N}^{\cdot}$ is regarded as an element of $D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{X}}^{(0,1)}))$ via the isomorphism $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\boxtimes\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{X}}^{(0,1)}$.
2) For $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\text{op}}))$ and $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$, we define $\mathcal{M}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}^{\cdot}:=\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}((\omega_{\mathfrak{X}}^{-1}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}^{\cdot})\in D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\text{op}}))$
One has the analogous constructions for a smooth $X$ over $k$. \end{defn}
From the construction, one sees directly that, as an $D(\mathcal{O}_{\mathfrak{X}})$-module, the module $\mathcal{M}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}^{\cdot}$ agrees with the $D(\mathcal{O}_{\mathfrak{X}})$-module denoted in the same way. The issue that this construction resolves is how to put a $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-module structure on this object.
To proceed further, it is useful to note some explicit formulas in coordinates: \begin{rem} \label{rem:Two-actions-agree}Suppose we have local coordinates $\{x_{i}\}_{i=1}^{n}$ and $\{\partial_{i}\}_{i=1}^{n}$ on $\mathfrak{X}$. Then for modules $\mathcal{M},\mathcal{N}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ we can put an action of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ on $\mathcal{M}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{N}$ via the following formulas: \[ \partial_{i}(m\otimes n)=\partial_{i}m\otimes n+m\otimes\partial_{i}n \] and \[ \partial_{i}^{([p]}(m\otimes n)=f\sum_{j=1}^{p-1}\partial^{[j]}(m)\otimes\partial^{[p-j]}(m)+\partial^{[p]}(m)\otimes n+m\otimes\partial^{[p]}(n) \] Taking a flat resolution of $\mathcal{N}$, this gives $\mathcal{M}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}$ the structure of an element of $D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$, which means that $\mathcal{M}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{N}$ belongs to $D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. This object is isomorphic to the tensor product defined above. Indeed, in local coordinates the action of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ on $\Delta^{*}(\widehat{\mathcal{D}}_{\mathfrak{X}\times\mathfrak{X}}^{(0,1)})$ is given as follows: let $\{\partial_{i},\partial'_{i}\}_{i=1}^{n}$ be local coordinate derivations on $\mathfrak{X}\times\mathfrak{X}$. Then the action is given by $\partial_{i}\cdot1=\partial_{i}+\partial_{i}'$ and $\partial_{i}^{[p]}\cdot1=f\sum_{j=1}^{p-1}\partial_{i}^{[j]}\cdot(\partial'_{i})^{[p-j]}+\partial_{i}^{[p]}+(\partial'_{i})^{[p]}$, which agrees with the above formula. \end{rem}
This allows us to prove the following useful \begin{lem} \label{lem:Juggle}(Compare \cite{key-50}, lemma 2.2.5) Let $\mathcal{M}^{\cdot},\mathcal{P}^{\cdot}$ be elements of $D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\text{opp}}))$. Then there is an isomorphism \[ \mathcal{N}^{\cdot}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}(\mathcal{M}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{P}^{\cdot})\tilde{\to}(\mathcal{N}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{M}^{\cdot})\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{P}^{\cdot} \] \end{lem}
\begin{proof} Let $\mathcal{M},\mathcal{P}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ and $\mathcal{N}\in\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\text{opp}})$. We have a map of $D(\mathcal{O}_{\mathfrak{X}})$-modules \[ \mathcal{N}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}(\mathcal{M}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{P})\to(\mathcal{N}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{M})\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}\mathcal{P} \] simply because $D(\mathcal{O}_{\mathfrak{X}})$ is a sub-algebra of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$. Using the local description of the $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-module action on $\mathcal{N}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{M}$ given by \remref{Two-actions-agree}, one sees that this map factors through $\mathcal{N}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{P})$ and we obtain a morphism \[ \mathcal{N}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{P})\to(\mathcal{N}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}\mathcal{M})\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}\mathcal{P} \] Since $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ is flat over $D(\mathcal{O}_{\mathfrak{X}})$, we can compute the associated derived functors using K-flat resolutions over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ of $\mathcal{N}$, and $\mathcal{P}$, respectively. Doing so gives a map in the derived category \[ \mathcal{N}^{\cdot}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}(\mathcal{M}^{\cdot}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{P}^{\cdot})\to(\mathcal{N}^{\cdot}\otimes_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{M}^{\cdot})\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{P}^{\cdot} \] and passing to the derived completions gives the map in the statement of the lemma; to show it is an isomorphism we may reduce mod $p$ and, taking K-flat resolutions, assume that each term of both $\mathcal{N}^{\cdot}$ and $\mathcal{P}^{\cdot}$ is stalk-wise free over $\mathcal{D}_{X}^{(0,1)}$; thus the statement comes down to the claim that \[ \mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{M}\otimes_{D(\mathcal{O}_{X})}\mathcal{D}_{X}^{(0,1)})\tilde{\to}(\mathcal{D}_{X}^{(0,1)}\otimes_{D(\mathcal{O}_{X})}\mathcal{M})\otimes_{\mathcal{D}_{X}^{(0,1)}}\mathcal{D}_{X}^{(0,1)} \] which is immediate. \end{proof} Finally, we note the following compatibility of tensor product and pull-back, which follows directly from unpacking the definitions. \begin{lem} \label{lem:Tensor-and-pull}Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a morphism. Then there is a canonical isomorphism $L\varphi^{*}(\mathcal{M}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\mathcal{N}^{\cdot})\tilde{\to}L\varphi^{*}(\mathcal{M}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}L\varphi^{*}(\mathcal{N}^{\cdot})$. The analogous statement holds for a morphism of smooth $k$-schemes $\varphi:X\to Y$. \end{lem}
\section{\label{sec:Push-Forward}Operations on Gauges: Push-Forward}
As above let $\varphi:\mathfrak{X}\to\mathfrak{Y}$. Now that we have both the pull-back and the left-right swap, we can define the push-forward.
We start by noting that $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}$ carries a natural right module structure over itself (by right multiplication). Therefore, by \propref{Left-Right-Swap} there is a natural left $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}$ gauge structure on $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1}$. By \defref{Pullback!} there is a natural left $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module structure on $\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1})=L\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1})$. \begin{defn} \label{def:Push!}1) Define the $(\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}),\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)})$ bimodule $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}:=\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1})\otimes\omega_{\mathfrak{X}}$; here, the right $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module structure comes from the left $\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}$-module structure on $\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1})$; the left $\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)})$-structure comes from the left multiplication of $\varphi^{-1}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)})$ on $\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}\otimes\omega_{\mathfrak{Y}}^{-1})$.
2) Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$. Then we define ${\displaystyle \int_{\varphi}\mathcal{M}^{\cdot}:=R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})}\in D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}))$.
3) If we instead have $\varphi:X\to Y$ over $k$; then for $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ we define ${\displaystyle \int_{\varphi}\mathcal{M}^{\cdot}:=R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})}\in D(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$ where $\mathcal{D}_{Y\leftarrow X}^{(0,1)}$ is defined analogously to $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}$.
4) if $\mathfrak{Y}=\text{Specf}(W(k))$, then we denote ${\displaystyle \mathbb{H}_{\mathcal{G}}^{\cdot}(\mathcal{M}^{\cdot}):=\int_{\varphi}\mathcal{M}^{\cdot}}$ for any $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$.
Similarly, there are push-forwards in the category of right $\mathcal{\widehat{D}}^{(0,1)}$-modules defined by ${\displaystyle \int_{\varphi}\mathcal{M}_{r}^{\cdot}:=R\varphi_{*}(\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})}$ for $\mathcal{M}_{r}^{\cdot}\in D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))^{\text{op}}$; clearly the left-right interchange intertwines the two pushforwards. Similar remarks apply to a morphism $\varphi:X\to Y$ over $k$. \end{defn}
We begin by recording some basic compatibilities; for these note that we have the transfer bimodule $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}:=\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}/(v-1)$ in the category of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-modules, and $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}:=(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}/(f-1))^{\widehat{}}$ (here the $()^{\widehat{}}$ denotes $p$-adic completion, which is the same as cohomological completion in this case by \propref{Basic-properties-of-the-transfer-module}). One may therefore define ${\displaystyle \int_{\varphi,0}\mathcal{M}^{\cdot}:=R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}}^{L}\mathcal{M}^{\cdot})}$ for $\mathcal{M}^{\cdot}\in\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$ and ${\displaystyle \int_{\varphi,1}\mathcal{M}^{\cdot}:=R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}}^{L}\mathcal{M}^{\cdot})}$ for $\mathcal{M}^{\cdot}\in\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}$. As in the case of the pullback, this is not quite Berthelot's definition of these functors; because he uses the more traditional $\text{R}\lim$. However, they do agree in important cases, such as when $\varphi$ is proper and $\mathcal{M}^{\cdot}$ is coherent.
We have \begin{prop} \label{prop:push-and-complete-for-D} Let $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$.
1) ${\displaystyle (\int_{\varphi}\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k\tilde{=}\int_{\varphi}(\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k)}$ in the category $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$.
2) $(\int_{\varphi}\mathcal{M}^{\cdot})^{-\infty}\tilde{=}(\int_{\varphi,0}\mathcal{M}^{\cdot,-\infty})$ where the pushforward on the right is defined as $R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0)}}^{L}\mathcal{M}^{\cdot,-\infty})$.
3) If $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$, then $\widehat{((\int_{\varphi}\mathcal{M}^{\cdot})^{\infty})}\tilde{=}\int_{\varphi,1}\widehat{(\mathcal{M}^{\cdot,\infty})}$ where both uses of $\widehat{}$ denote derived completion. \end{prop}
\begin{proof} 1) We have \[ \int_{\varphi}\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k=R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k \] \[ \tilde{=}R\varphi_{*}((\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k) \] (since $k$ is a perfect complex over $W(k)$, this is a special case of the projection formula where we consider $X$ and $Y$ as ringed spaces with the locally constant sheaf of rings $W(k)$; c.f. {[}Stacks{]}, tag 0B54). We have the isomorphism \[ k\otimes_{W(k)}^{L}\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\tilde{=}\mathcal{D}_{Y\leftarrow X}^{(0,1)} \] since $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}$ is a $p$-torsion-free sheaf; and so \[ R\varphi_{*}((\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\otimes_{W(k)}^{L}k)\tilde{=}R\varphi_{*}(k\otimes_{W(k)}^{L}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] \[ R\varphi_{*}(k\otimes_{W(k)}^{L}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}))\tilde{=}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] where we used that for any complex $\mathcal{N}^{\cdot}$ we have $k\otimes_{W(k)}^{L}\mathcal{N}^{\cdot}\tilde{=}k\otimes_{W(k)}^{L}\widehat{\mathcal{N}^{\cdot}}$ (c.f. \lemref{reduction-of-completion}). Now, we have \[ R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\tilde{=}\int_{\varphi}\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}(\mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] But since $\mathcal{D}_{X}^{(0,1)}=\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}/p$ we have $\mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k$ and the result follows.
2) For any complex we have $\mathcal{M}^{\cdot,-\infty}=\mathcal{M}^{\cdot}\otimes_{D(W(k))}^{L}(D(W(k))/(v-1))$. Thus the proof is an easier variant of that of $1)$, replacing $\otimes_{W(k)}^{L}k$ with $\otimes_{D(W(k))}^{L}D(W(k))/(v-1)$.
3) We have \[ \mathcal{K}^{\cdot}\to\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}\to\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)} \] where $\mathcal{K}^{\cdot}$ is a complex of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}[p^{-1}]$-modules; indeed, $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}$ is a $p$-torsion-free sheaf whose completion is exactly $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}$ (c.f. \propref{Basic-properties-of-the-transfer-module} and \lemref{Basic-Structure-of-D^(1)}).Thus there is a distinguished triangle \[ \mathcal{K}^{\cdot}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}\to\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}\to\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty} \] and the term on the left is a complex of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\infty}[p^{-1}]$-modules. Thus the derived completion of $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}$ is isomorphic to the derived completion of $\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}$.
Further, we have \[ \mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}}^{L}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}) \] And, since $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}))$, we have (by \propref{Completion-for-noeth}) that $\widehat{\mathcal{M}^{\cdot,\infty}}\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}$ as modules over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}$. Therefore we obtain \[ \mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty}\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(1)}}^{L}\widehat{(\mathcal{M}^{\cdot,\infty})} \] and so, taking $R\varphi_{*}$ yields \[ R\varphi_{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1),\infty}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1),\infty}}^{L}\mathcal{M}^{\cdot,\infty})\tilde{\to}\int_{\varphi,1}\widehat{(\mathcal{M}^{\cdot,\infty})} \] But the term on the left is isomorphic to the derived completion of ${\displaystyle \int_{\varphi}\mathcal{M}^{\cdot,\infty}}$ by \propref{Push-and-complete}. \end{proof} Now we will discuss the relationship between the $\mathcal{D}_{X}^{(0,1)}$ pushforward and the push-forwards over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. As usual we'll work with the functors $\text{\ensuremath{\mathcal{M}}}^{\cdot}\to\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}k[f]\otimes_{D(k)}^{L}\mathcal{M}^{\cdot}$ and $\text{\ensuremath{\mathcal{M}}}^{\cdot}\to\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}k[v]\otimes_{D(k)}^{L}\mathcal{M}^{\cdot}$ which take $D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ to $D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$ and $D(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$, respectively (as in \propref{Quasi-rigid=00003Dfinite-homological}).
Both of the algebras $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ and $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ possess transfer bimodules associated to any morphism $\varphi:X\to Y$, and hence are equipped with a push-pull formalism. In the case of $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ this is well known (c.f., e.g \cite{key-22}, chapter $1$), while in the case of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ this theory is developed in \cite{key-11}, in the language of filtered derived categories. We shall proceed using the push-pull formalism for $\mathcal{D}_{X}^{(0,1)}$-modules that we have already developed, and discuss the relations with the other theories in section \subsecref{Hodge-and-Conjugate} below. \begin{defn} Let $\varphi:X\to Y$ be a morphism. We define a $(\varphi^{-1}\mathcal{R}(\mathcal{D}_{Y}^{(1)}),\mathcal{R}(\mathcal{D}_{X}^{(1)}))$ bimodule $\mathcal{R}_{Y\leftarrow X}^{(1)}:=\mathcal{D}_{Y\leftarrow X}^{(0,1)}/v$. Define a $(\varphi^{-1}\mathcal{\overline{R}}(\mathcal{D}_{Y}^{(1)}),\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$ bimodule $\mathcal{R}_{Y\leftarrow X}^{(1)}:=\mathcal{D}_{Y\leftarrow X}^{(0,1)}/f$. Define ${\displaystyle \int_{\varphi,1}}\mathcal{M}^{\cdot}=R\varphi_{*}(\mathcal{R}_{Y\leftarrow X}^{(1)}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{M}^{\cdot})$ on the category $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$, and analogously ${\displaystyle \int_{\varphi,0}}$ for $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$-modules. As above, there is also a push-forward for right modules defined by ${\displaystyle \int_{\varphi,1}}\mathcal{M}_{r}^{\cdot}=R\varphi_{*}(\mathcal{M}_{r}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}_{X\to Y}^{(1)})$, and analogously for right $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$-modules.
We have the basic compatibility: \end{defn}
\begin{prop} If $\mathcal{M}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$, then we have \[ {\displaystyle \mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}\int_{\varphi}\mathcal{M}^{\cdot}\tilde{=}\int_{\varphi,1}(\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})} \] The analogous result holds for $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$. \end{prop}
\begin{proof} We have \[ \mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}\int_{\varphi}\mathcal{M}^{\cdot}=\mathcal{R}(\mathcal{D}_{Y}^{(1)})\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] \[ \tilde{=}R\varphi_{*}(\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] (we will prove this last isomorphism in the lemma directly below). We have the isomorphism \[ \varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}\mathcal{D}_{Y\leftarrow X}^{(0,1)}\tilde{=}\mathcal{R}_{Y\leftarrow X}^{(1)} \] which is proved in the same way as \eqref{transfer-iso-1} above. Therefore \[ R\varphi_{*}(\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}))\tilde{=}R\varphi_{*}(\mathcal{R}_{Y\leftarrow X}^{(1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] \[ \tilde{=}R\varphi_{*}(\mathcal{R}_{Y\leftarrow X}^{(1)}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})=\int_{\varphi,1}(\mathcal{R}(\mathcal{D}_{X}^{(1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] as claimed. The proof for the case of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ is essentially identical. \end{proof} In the previous proof we used the \begin{lem} \label{lem:baby-projection-1}Let $\mathcal{M}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$, and $\mathcal{N}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)})^{\text{opp}})$. Then there is an isomorphism \[ \mathcal{N}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\tilde{=}R\varphi_{*}(\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] \end{lem}
\begin{proof} (c.f. the proof of \cite{key-17}, proposition 5.3). First, we construct a canonical map \[ \mathcal{N}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\to R\varphi_{*}(\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] as follows: one may replace $\mathcal{N}^{\cdot}$ with a complex of $K$-flat graded $\mathcal{D}_{Y}^{(0,1)}$-modules, $\mathcal{F}^{\cdot}$. Choosing a quasi-isomorphism $\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{\to}\mathcal{I}^{\cdot}$, a $K$-injective complex of graded $\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})$-modules, one obtains the quasi-isomorphism \[ \mathcal{N}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\tilde{\to}\mathcal{F}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}\varphi_{*}\mathcal{I}^{\cdot} \] Then there is the obvious isomorphism \[ \mathcal{F}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}\varphi_{*}\mathcal{I}^{\cdot}\tilde{\to}\varphi_{*}(\varphi^{-1}(\mathcal{F}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}\mathcal{I}^{\cdot}) \] and a canonical map \[ \varphi_{*}(\varphi^{-1}(\mathcal{F}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}\mathcal{I}^{\cdot})\to R\varphi_{*}((\varphi^{-1}(\mathcal{F}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}\mathcal{I}^{\cdot})) \] \[ \tilde{\to}R\varphi_{*}(\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] Thus we obtain the canonical map \[ \mathcal{N}^{\cdot}\otimes_{\mathcal{D}_{Y}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})\to R\varphi_{*}(\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] this map exists for all $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)})^{\text{op}})$ and $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. To check whether it is an isomorphism, we may work locally on $Y$ and suppose that $Y$ is affine from now on.
To prove this, we proceed in a similar manner to the proof of the projection formula for quasi-coherent sheaves, in the general version of \cite{key-17}, proposition 5.3. Fix $\mathcal{M}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. For any $\mathcal{N}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)})^{\text{opp}})$, we claim that $\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})$ is quasi-isomorphic to a complex in $D_{qcoh}(D(\mathcal{O}_{X}))$. To see this, we observe that any quasicoherent $\mathcal{D}_{X}^{(0,1)}$-module $\mathcal{M}$ is a quotient of the $\mathcal{D}_{X}^{(0,1)}$-module $\mathcal{D}_{X}^{(0,1)}\otimes_{D(\mathcal{O}_{X})}\mathcal{M}$ (where the $\mathcal{D}_{X}^{(0,1)}$-module is via the action on the left hand factor on the tensor product). It follows that any bounded-above complex in $D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ is quasi-isomorphic to a complex, whose terms are of the form $\mathcal{D}_{X}^{(0,1)}\otimes_{D(\mathcal{O}_{X})}\mathcal{M}$ for quasi-coherent $\mathcal{M}$. Therefore any complex in $D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ is a homotopy colimit of such complexes. Therefore $\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}$ is quasi-isomorphic to a complex of quasicoherent $D(\mathcal{O}_{X})$-modules. In addition, since $Y$ is affine, $\mathcal{N}^{\cdot}$ is quasi-isomorphic to a $K$-projective complex of $\mathcal{D}_{Y}^{(0,1)}$-modules; in particular, a complex whose terms are projective $\mathcal{D}_{Y}^{(0,1)}$-modules. It follows that $\varphi^{-1}(\mathcal{N}^{\cdot})\otimes_{\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})}^{L}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot})$ is quasi-isomorphic to a complex in $D_{qcoh}(D(\mathcal{O}_{X}))$ as claimed.
Now, since $R\varphi_{*}$ commutes with arbitrary direct sums on $D_{qcoh}(D(\mathcal{O}_{X}))$ (by \cite{key-17}, lemma 1.4), we see that both sides of arrow commute with arbitrary direct sums (over objects in $D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)})^{\text{opp}})$); so the set of objects on which the arrow is an isomorphism is closed under arbitrary direct sums. Since $Y$ is affine, the category $D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)})^{\text{op}})$ is generated by the compact objects $\{\mathcal{D}_{Y}^{(0,1)}[i]\}_{i\in\mathbb{Z}}$; therefore (as in the proof of \cite{key-17}, lemma 5.3), it actually suffices to show that the arrow is an isomorphism on $\mathcal{D}_{Y}^{(0,1)}$ itself, but this is obvious. \end{proof} This type of projection formula is so useful that we will record here a minor variant: \begin{lem} \label{lem:proj-over-D}Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a morphism. Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1),\text{opp}}))$, such that $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)})^{\text{opp}})$. Then we have \[ (\int_{\varphi}\mathcal{N}^{\cdot})\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{\to}R\varphi_{*}(\mathcal{N}^{\cdot}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}L\varphi^{*}\mathcal{M}^{\cdot}) \] The analogous statement holds for $\mathcal{M}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{X}^{(0,1),\text{opp}}))$; as well as for the Rees algebras $\mathcal{R}(\mathcal{D}^{(1)})$ and $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$. \end{lem}
\begin{proof} We have that ${\displaystyle \int_{\varphi}\mathcal{N}^{\cdot}=R\varphi_{*}(\mathcal{N}^{\cdot}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})}$. As in the proof of \lemref{baby-projection-1}, there is a morphism \begin{equation} R\varphi_{*}(\mathcal{N}^{\cdot}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\to R\varphi_{*}(\mathcal{N}^{\cdot}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot}))\label{eq:adunction} \end{equation} Indeed, one constructs the map \[ R\varphi_{*}(\mathcal{N}^{\cdot}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\otimes_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\to R\varphi_{*}(\mathcal{N}^{\cdot}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\otimes_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})) \] exactly as above; and then passes to the cohomological completion.
Since $L\varphi^{*}\mathcal{M}^{\cdot}=\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})$ by definition, the result will follow if \eqref{adunction} is an isomorphism. To prove that, apply $\otimes_{W(k)}^{L}k$ and quote the previous result. The proof in the case of the Rees algebras is completely analogous. \end{proof} Here is an important application of these ideas: \begin{lem} \label{lem:Composition-of-pushforwards}Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ and $\psi:\mathfrak{Y}\to\mathfrak{Z}$ be morphisms. There is a canonical map \[ \int_{\psi}\circ\int_{\varphi}\mathcal{M}^{\cdot}\to\int_{\psi\circ\varphi}\mathcal{M}^{\cdot} \] for any $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$, which is an isomorphism if $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. If $\varphi:X\to Y$ and $\psi:Y\to Z$ are morphisms, we have the analogous statements in $D(\mathcal{G}(\mathcal{D}_{Z}^{(0,1)}))$. \end{lem}
\begin{proof} As in \lemref{composition-of-pullbacks}, we have an isomorphism \[ \varphi^{-1}(\mathcal{D}_{\mathfrak{Z\leftarrow\mathfrak{Y}}}^{(0,1)})\widehat{\otimes}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)})}^{L}\mathcal{\widehat{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\tilde{=}\mathcal{\widehat{D}}_{\mathfrak{Z}\leftarrow\mathfrak{X}}^{(0,1)} \] as $((\psi\circ\varphi)^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Z}}^{(0,1)}),\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ bimodules. Then we have \[ \int_{\psi}\circ\int_{\varphi}\mathcal{M}^{\cdot}=R\psi_{*}(\mathcal{D}_{\mathfrak{Z\leftarrow\mathfrak{Y}}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}R\varphi_{*}(\mathcal{D}_{\mathfrak{Y\leftarrow\mathfrak{X}}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})) \] \[ \to R\psi_{*}R\varphi_{*}(\varphi^{-1}(\mathcal{D}_{\mathfrak{Z\leftarrow\mathfrak{Y}}}^{(0,1)})\widehat{\otimes}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}^{L}\mathcal{D}_{\mathfrak{Y\leftarrow\mathfrak{X}}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}) \] \[ \tilde{\to}R(\psi\circ\varphi)_{*}(\mathcal{\widehat{D}}_{\mathfrak{Z}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})=\int_{\psi\circ\varphi}\mathcal{M}^{\cdot} \] where the first arrow is constructed as in \lemref{proj-over-D} and the second isomorphism is given above. Applying the functor $\otimes_{W(k)}^{L}k$ and using \propref{Push-and-complete}, part $1)$, we reduce to proving the analogous statement for $\varphi:X\to Y$ and $\psi:Y\to Z$; where it follows exactly as in \lemref{baby-projection-1}. \end{proof} We shall also need results relating the pushforwards when $\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ is already annihilated by $f$ (or $v$): \begin{prop} \label{prop:Sandwich-push}Suppose $\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$ satisfies $v\mathcal{M}=0$. Then ${\displaystyle \int_{\varphi}\mathcal{M}}$ is contained in the image of the functor $D(\mathcal{R}(\mathcal{D}_{X}^{(1)})-\text{mod})\to D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. In fact, there is an isomorphism of graded sheaves of $\mathcal{O}_{X}[f,v]$-modules \[ R\varphi_{*}(\mathcal{R}_{Y\leftarrow X}^{(1)}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{M})\tilde{=}R\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}) \] In other words, the pushforward of $\mathcal{M}$, regarded as a module over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$, agrees with its pushforward as a $\mathcal{D}_{X}^{(0,1)}$-module. The analogous result hold when $f\mathcal{M}=0$. \end{prop}
\begin{proof} This is an immediate consequence of \propref{Sandwich!} \end{proof} As a consequence of these results, we obtain: \begin{thm} \label{thm:phi-push-is-bounded}Let $\varphi:X\to Y$ be a morphism. Then, for each of the algebras $\mathcal{R}(\mathcal{D}_{X}^{(1)})$, $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, and $\mathcal{D}_{X}^{(0,1)}$, the pushforward along $\varphi$ takes $D_{qcoh}^{b}$ to $D_{qcoh}^{b}$. If $\varphi$ is proper, then the pushforward along $\varphi$ takes $D_{coh}^{b}$ to $D_{coh}^{b}$. \end{thm}
\begin{proof} Let us start with the statement, that the pushforward takes $D_{qcoh}$ to $D_{qcoh}$ in all of these cases. For this, we can argue as in the proof of \lemref{baby-projection-1}: namely, one may assume $Y$ is affine, and then if $\mathcal{M}^{\cdot}\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$, we may replace $\mathcal{M}^{\cdot}$ by a homotopy colimit of $\mathcal{D}_{X}^{(0,1)}$-modules of the form $\mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{O}_{X}[f,v]}\mathcal{M}$, for quasi-coherent $\mathcal{M}$. Therefore $\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{M}^{\cdot}$ is quasi-isomorphic to a complex of quasicoherent $\mathcal{O}_{X}[f,v]$-modules, which implies that the cohomology sheaves of its pushforward are quasi-coherent $\mathcal{O}_{Y}[f,v]$-modules; and therefore quasi-coherent $\mathcal{D}_{Y}^{(0,1)}$-modules. The same argument works for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$.
To prove the boundedness, we can factor $\varphi$ as a closed immersion (the graph $X\to X\times Y$) followed by the projection $X\times Y\to Y$, and, applying \lemref{Composition-of-pushforwards}, we see that it suffices to consider separately the case of a closed immersion and the case of a smooth morphism. For a closed immersion $\iota:X\to Y$ we have that the bimodule $\mathcal{D}_{X\to Y}^{(0)}$ is locally free over $\mathcal{D}_{X}^{(0,1)}$ (this elementary fact will be checked below in \lemref{transfer-is-locally-free}) and so the tensor product $\otimes_{\mathcal{D}_{X}^{(0,1)}}\mathcal{D}_{X\to Y}^{(0)}$ takes quasicoherent sheaves to quasicoherent sheaves.
Now, if $X\to Y$ is smooth, we have by (the proof of) \propref{Smooth-pullback-preserves-coh}, that $\mathcal{D}_{Y\leftarrow X}^{(0,1)}$ is a coherent $\mathcal{D}_{X}^{(0,1),\text{opp}}$-module. Further, since it is locally the reduction mod $p$ of a standard module, it is rigid, so that by \propref{Quasi-rigid=00003Dfinite-homological} it is locally of finite homological dimension; and the result follows directly. Thus we see that ${\displaystyle \int_{\varphi}}$ is bounded on $D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$, the same holds for the pushforward on $D_{qcoh}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$ and $D_{qcoh}(\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})))$ by the previous proposition.
Now suppose $\varphi$ is proper. Let us say that a right $\mathcal{D}_{X}^{(0,1)}$-module is induced if it is of the form $\mathcal{F}\otimes_{D(\mathcal{O}_{X})}\mathcal{D}_{X}^{(0,1)}$ for some coherent $\mathcal{F}$ over $D(\mathcal{O}_{X})$. In this case we have \[ \int_{\varphi}\mathcal{F}\otimes_{D(\mathcal{O}_{X})}\mathcal{D}_{X}^{(0,1)}=R\varphi_{*}(\mathcal{F}\otimes_{D(\mathcal{O}_{X})}\mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{D}_{X\to Y}^{(0,1)}) \] \[ \tilde{=}R\varphi_{*}(\mathcal{F}\otimes_{D(\mathcal{O}_{X})}^{L}\mathcal{D}_{X}^{(0,1)}\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\varphi^{*}(\mathcal{D}_{Y}^{(0,1)}))\tilde{\to}R\varphi_{*}(\mathcal{F}\otimes_{D(\mathcal{O}_{X})}^{L}\varphi^{*}(\mathcal{D}_{Y}^{(0,1)})) \] \[ \tilde{\to}R\varphi_{*}(\mathcal{F})\otimes_{D(O_{Y})}^{L}\mathcal{D}_{Y}^{(0,1)} \] Thus the result is true for any induced module. If $\mathcal{M}$ is an arbitrary coherent right $\mathcal{D}_{X}^{(0,1)}$-module, then, as a quasicoherent sheaf over $\mathcal{O}_{X}[f,v]$, it is the union of its $\mathcal{O}_{X}[f,v]$ coherent sub-sheaves. Selecting such a subsheaf which generates $\mathcal{M}$ as a $\mathcal{D}_{X}^{(0,1)}$-module, we obtain a short exact sequence \[ 0\to\mathcal{K}\to\mathcal{F}\otimes_{D(\mathcal{O}_{X})}\mathcal{D}_{X}^{(0,1)}\to\mathcal{M}\to0 \] where $\mathcal{K}$ is also coherent. Since the functor ${\displaystyle \int_{\varphi}}$ is concentrated in homological degrees $\leq d_{X/Y}$ for all coherent $\mathcal{D}_{X}^{(0,1)}$-modules, we can now deduce the coherence of ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}\mathcal{M})}$ by descending induction on $i$. This proves the result for $\mathcal{D}_{X}^{(0,1)}$-modules, and we can deduce the result for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$, $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-modules by again invoking \propref{Sandwich-push}. \end{proof} From this and the formalism of cohomological completion (specifically, \propref{coh-to-coh}), we deduce \begin{cor} \label{cor:proper-push-over-W(k)}Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be proper. Then ${\displaystyle \int_{\varphi}}$ takes $D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ to $D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$. If $\mathcal{M}^{\cdot}\in D_{coh,F^{-1}}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ then ${\displaystyle \int_{\varphi}\mathcal{M}}\in D_{coh,F^{-1}}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$. \end{cor}
\begin{proof} The first part follows immediately from the proceeding theorem by applying $\otimes_{W(k)}^{L}k$. The second part follows from \propref{push-and-complete-for-D} (part $3)$), as well as Berthelot's theorem that ${\displaystyle \int_{\varphi,0}F^{*}\tilde{\to}F^{*}\int_{\varphi,1}}$ (c.f. \cite{key-2}, section 3.4, and also \thmref{Hodge-Filtered-Push} below). \end{proof}
\subsection{\label{subsec:Hodge-and-Conjugate}Push-forwards for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. }
In this section we take a close look at the theory over $k$. In particular, we study the pushforwards of modules over $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, and compare them with more traditional filtered pushforwards found in the literature. For $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ modules themselves, we will construct the analogue of the relative de Rham resolution. This will allow us to exhibit an adjunction between ${\displaystyle \int_{\varphi}}$ and $\varphi^{\dagger}$ when $\varphi$ is smooth.
We begin with $\mathcal{R}(\mathcal{D}_{X}^{(1)})$, where we can reduce everything to the more familiar situation of $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-modules using the fact that $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ is Morita equivalent to $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ (c.f. \thmref{Filtered-Frobenius}).
Let $\varphi:X\to Y$. Recall that Laumon constructed in \cite{key-19} the push-forward in the filtered derived category of $\mathcal{D}_{X}^{(0)}$-modules (with respect to the symbol filtration); essentially, his work upgrades the bimodule $\mathcal{D}_{Y\leftarrow X}^{(0)}$ to a filtered $(\varphi^{-1}(\mathcal{D}_{Y}^{(0)}),\mathcal{D}_{X}^{(0)})$-bimodule via \[ F_{i}(\mathcal{D}_{Y\leftarrow X}^{(0)}):=\varphi^{-1}(F_{i}(\mathcal{D}_{Y}^{(0)})\otimes_{\mathcal{O}_{Y}}\omega_{Y}^{-1})\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\omega_{X} \] (c.f. \cite{key-19}, formula 5.1.3); then one may define ${\displaystyle \int_{\varphi}}$ via the usual formula, but using the tensor product and push-forward in the filtered derived categories. On the other hand, we can apply the Rees construction to the above filtered bimodule to obtain $\mathcal{R}(\mathcal{D}_{Y\leftarrow X}^{(0)})$, a graded $(\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(0)})),\mathcal{R}(\mathcal{D}_{X}^{(0)}))$ bimodule, which (again by the usual formula) yields a push-forward functor ${\displaystyle \int_{\varphi}}:D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))\to D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{Y}^{(0)})))$, and we have the following evident compatibility: \begin{lem} Let $\mathcal{M}^{\cdot}\in D((\mathcal{D}_{X}^{(0)},F)-\text{mod})$. Then we have \[ \mathcal{R}(\int_{\varphi}\mathcal{M}^{\cdot})\tilde{\to}\int_{\varphi}\mathcal{R}(\mathcal{M}^{\cdot}) \] In particular, the Hodge-to-deRham spectral sequence for ${\displaystyle \int_{\varphi}\mathcal{M}^{\cdot}}$ degenerates at $E_{1}$ iff each of the sheaves ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}\mathcal{R}(\mathcal{M}^{\cdot}))}$ is torsion-free over the Rees parameter $f$. \end{lem}
Next, we relate this to the pull-back and push-forward for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ modules; starting with the analogous statement for pull-back: \begin{lem} \label{lem:Hodge-Filtered-Pull}Let $\varphi:X\to Y$ and suppose $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$. Then $L\varphi^{*}\circ F_{Y}^{*}\mathcal{M}^{\cdot}\tilde{=}F_{X}^{*}\circ L\varphi^{*}\mathcal{M}^{\cdot}$. Here, the pullback on the left is in the category of $\mathcal{R}(\mathcal{D}^{(1)})$-modules, while the pullback on the right is in the category of $\mathcal{R}(\mathcal{D}^{(0)})$-modules. \end{lem}
\begin{proof} Since $\varphi\circ F_{X}=F_{Y}\circ\varphi$ we have \[ L\varphi^{*}\circ F_{Y}^{*}\mathcal{M}^{\cdot}\tilde{\to}F_{X}^{*}\circ L\varphi^{*}\mathcal{M}^{\cdot} \] as (graded) $\mathcal{O}_{X}$-modules; we need to check that this map preserves the $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-module structures on both sides.This question is local, so we may suppose $X=\text{Spec}(B)$ and $Y=\text{Spec}(A)$ both posses local coordinates. Further, by taking a K-flat resolution of $\mathcal{M}^{\cdot}$ we may suppose that $\mathcal{M}^{\cdot}=\mathcal{M}$ is concentrated in a single degree. Now, as an $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-module, $F_{Y}^{*}\mathcal{M}$ possesses the structure of a connection with $p$-curvature $0$, and so the induced connection on $\varphi^{*}F_{Y}^{*}\mathcal{M}$ also has $p$-curvature $0$; and the kernel of this connection is equal to $(\varphi^{(1)})^{*}\mathcal{M}^{(1)}\subset\varphi^{*}F_{Y}^{*}\mathcal{M}$ (here $\mathcal{M}^{(1)}$ denotes $\sigma^{*}\mathcal{M}$ where $\sigma:X^{(1)}\to X$ is the natural isomorphism of schemes). Note that $\mathcal{M}^{(1)}$ possesses the action of $\mathcal{R}(\mathcal{D}_{Y^{(1)}}^{(0)})$ (c.f. \remref{The-inverse-to-F^*}).
Let $\{\partial_{i}\}_{i=1}^{n}$ be coordinate derivations on $X$. Then the action of $\partial_{i}^{[p]}$ on $\varphi^{*}F_{Y}^{*}\mathcal{M}$ is given (by \propref{pull-back-in-pos-char}) by first restricting $\partial_{i}^{[p]}$ to a differential operator $\varphi^{-1}(\mathcal{O}_{Y})\to\mathcal{O}_{X}$, writing the resulting operator as \[ \sum_{j=1}^{r}b_{j}^{p}\partial_{j}^{[p]}+\sum_{J}b_{J}\partial^{J} \] (where $\{\partial_{j}\}_{j=1}^{r}$ are coordinate derivations on $Y$, and $b_{j},b_{J}\in B$) and then letting $\partial_{i}^{[p]}$ act as \[ \sum_{j=1}^{r}b_{j}^{p}\partial_{j}^{[p]}+\sum_{J}b_{J}\partial^{J} \] therefore, the action of $\partial_{i}^{[p]}$ preserves $(\varphi^{(1)})^{*}\mathcal{M}^{(1)}$ and it acts there as ${\displaystyle \partial_{i}^{[p]}(1\otimes m)=\sum_{j=1}^{r}b_{j}^{p}\cdot\partial_{j}^{[p]}(m)}$. But the action of $\{\partial_{j}^{[p]}\}$ on $\mathcal{M}^{(1)}$ defines the action of $\mathcal{R}(\mathcal{D}_{X^{(1)}}^{(0)})$ on $\mathcal{M}^{(1)}$, and this formula simply defines the pullback from $\mathcal{R}(\mathcal{D}_{Y^{(1)}}^{(0)})$ to $\mathcal{R}(\mathcal{D}_{X^{(1)}}^{(0)})$-modules; in other words, $(\varphi^{(1)})^{*}\mathcal{M}^{(1)}=((\varphi)^{*}\mathcal{M})^{(1)}$ where $\varphi^{*}\mathcal{M}$ is the usual pullback of $\mathcal{R}(\mathcal{D}^{(0)})$-modules. Thus we see that $\varphi^{*}F_{Y}^{*}\mathcal{M}=F_{X}^{*}((\varphi)^{*}\mathcal{M})$ as $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules, as desired. \end{proof} Now we discuss push-forward: \begin{thm} \label{thm:Hodge-Filtered-Push}Let $\mathcal{M}^{\cdot}$ be a complex of graded $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules, and via \thmref{Filtered-Frobenius} write $\mathcal{M}^{\cdot}\tilde{=}F_{X}^{*}\mathcal{N}^{\cdot}$, where $\mathcal{N}^{\cdot}$ is a complex of graded $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-modules. There is an isomorphism \[ \int_{\varphi,1}\mathcal{M}^{\cdot}\tilde{\to}F_{X}^{*}\int_{\varphi}\mathcal{N}^{\cdot} \] where ${\displaystyle \int_{\varphi}\mathcal{N}}^{\cdot}$ is the pushforward of $\mathcal{N}^{\cdot}$ over $\mathcal{R}(\mathcal{D}_{X}^{(0)})$. \end{thm}
\begin{proof} (following \cite{key-2}, theoreme 3.4.4) By left-right interchange it is equivalent to prove the right-handed version \[ \int_{\varphi,1}F_{X}^{!}\mathcal{N}^{\cdot}\tilde{\to}F_{Y}^{!}\int_{\varphi}\mathcal{N}^{\cdot} \] for any $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})^{\text{opp}}))$.
We have \[ \int_{\varphi,1}F_{X}^{!}(\mathcal{N}^{\cdot})=R\varphi_{*}(F_{X}^{!}(\mathcal{N}^{\cdot})\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}_{X\to Y}^{(1)})=R\varphi_{*}(\mathcal{N}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}^{L}F_{X}^{!}(\mathcal{R}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}_{X\to Y}^{(1)}) \] Now, recall \[ \mathcal{R}_{X\to Y}^{(1)}=\varphi^{*}\mathcal{R}(\mathcal{D}_{X}^{(1)})\tilde{=}\varphi^{*}F_{Y}^{*}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(1)})\tilde{=}F_{X}^{*}\varphi^{*}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(1)}) \] where the second isomorphism is \corref{Filtered-right-Frob}, and the third is by the lemma above; note that this isomorphism preserves the natural right $\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))$ -module structures on both sides. It follows (c.f. \propref{F^*F^!}, part $2)$, and \corref{Filtered-right-Frob}) that \[ F_{X}^{!}(\mathcal{R}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}_{X\to Y}^{(1)}\tilde{=}\varphi^{*}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(0)}) \] (as $(\mathcal{R}(\mathcal{D}_{X}^{(1)}),\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(1)}))$ bimodules). Therefore \[ R\varphi_{*}(\mathcal{N}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}^{L}F^{!}(\mathcal{R}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}^{L}\mathcal{R}_{X\to Y}^{(1)})\tilde{=}R\varphi_{*}(\mathcal{N}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}^{L}\varphi^{*}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(0)})) \] \[ \tilde{=}\int_{\varphi}\mathcal{N}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{Y}^{(0)})}^{L}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(0)})) \] where the last line is \lemref{proj-over-D}. However, \[ \int_{\varphi}\mathcal{N}^{\cdot}\otimes_{\mathcal{R}(\mathcal{D}_{Y}^{(0)})}^{L}F_{Y}^{!}\mathcal{R}(\mathcal{D}_{Y}^{(0)}))=F_{Y}^{!}\int_{\varphi}\mathcal{N}^{\cdot} \] whence the result. \end{proof} To exploit this result, we recall that the formalism of de Rham cohomology applies to $\mathcal{R}(\mathcal{D}_{X}^{(0)})$: \begin{prop} Let $\varphi:X\to Y$ be smooth of relative dimension $d$. Then the induced connection $\nabla:\mathcal{D}_{X}^{(0)}\to\mathcal{D}_{X}^{(0)}\otimes\Omega_{X/Y}^{1}(1)$ is a morphism of filtered right $\mathcal{D}_{X}^{(0)}$-modules (with respect to the symbol filtration; the symbol $(1)$ denotes a shift in the filtration). The associated de Rham complex \[ \mathcal{D}_{X}^{(0)}\to\mathcal{D}_{X}^{(0)}\otimes\Omega_{X/Y}^{1}(1)\to\mathcal{D}_{X}^{(0)}\otimes\Omega_{X/Y}^{2}(2)\to\dots\to\mathcal{D}_{X}^{(0)}\otimes\Omega_{X/Y}^{d}(d) \] is exact except at the right most term, where the cokernel is $\mathcal{D}_{Y\leftarrow X}^{(0)}(d)$ (as a filtered module).
After applying the left-right swap and a shift in the filtration, we obtain the Spencer complex \[ \mathcal{D}_{X}^{(0)}\otimes\mathcal{T}_{X/Y}^{d}(-d)\to\mathcal{D}_{X}^{(0)}\otimes\mathcal{T}_{X/Y}^{d-1}(-d+1)\to\dots\to\mathcal{D}_{X}^{(0)}\otimes\mathcal{T}_{X/Y}(-1)\to\mathcal{D}_{X}^{(0)} \] of left filtered $\mathcal{D}_{X}^{(0)}$-modules, which is exact except at the right most term, and the cokernel is $\mathcal{D}_{X\to Y}^{(0)}$ (as a filtered module). Applying the Rees functor yields a complex \[ \mathcal{R}(\mathcal{D}_{X}^{(0)})\otimes\mathcal{T}_{X/Y}^{d}(-d)\to\mathcal{R}(\mathcal{D}_{X}^{(0)})\otimes\mathcal{T}_{X/Y}^{d-1}(-d+1)\to\dots\to\mathcal{R}(\mathcal{D}_{X}^{(0)})\otimes\mathcal{T}_{X/Y}(-1)\to\mathcal{R}(\mathcal{D}_{X}^{(0)}) \] which is exact except at the right most term, and the cokernel is $\mathcal{R}(\mathcal{D}_{X\to Y}^{(0)})$. \end{prop}
The proof of this is identical to that of the corresponding result in characteristic zero (\cite{key-4}, proposition 4.2); one notes that the associated graded is a Koszul resolution. Applying this resolution in the definition of the filtered push-forward, one deduces \begin{cor} Let $\varphi:X\to Y$ be smooth of relative dimension $d$. Let $\mathcal{M}$ be a filtered $\mathcal{D}_{X}^{(0)}$-module (with respect to the symbol filtration). Then there is an isomorphism \[ \int_{\varphi}\mathcal{M}[-d]\tilde{=}R\varphi_{*}(\mathcal{M}(-d)\xrightarrow{\nabla}\mathcal{M}\otimes\Omega_{X/Y}^{1}(1-d)\xrightarrow{\nabla}\mathcal{M}\otimes\Omega_{X/Y}^{2}(2)\xrightarrow{\nabla}\dots\xrightarrow{\nabla}\mathcal{M}\otimes\Omega_{X/Y}^{d}) \] in the filtered derived category of $\mathcal{O}_{Y}$-modules. \end{cor}
In fact, with a little more work, one can show that, for any $i$, the $\mathcal{D}_{Y}^{(0)}$-module structure on the sheaf ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}\mathcal{M})}$ is given by the Gauss-Manin connection (c.f., e.g. \cite{key-51}, proposition 1.4). Thus the push-forward for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules is exactly the ``Frobenius pullback of Gauss-Manin.''
As another corollary, we have \begin{cor} \label{cor:sm-adunction-for-filtered-D}Let $\varphi:X\to Y$ be smooth of relative dimension $d$.
1) There is an isomorphism $R\underline{\mathcal{H}om}{}_{\mathcal{R}(\mathcal{D}_{X}^{(0})}(\mathcal{R}(\mathcal{D}_{X\to Y}^{(0)}),\mathcal{R}(\mathcal{D}_{X}^{(0)}))\tilde{=}\mathcal{R}(\mathcal{D}_{Y\leftarrow X}^{(0)})(d)[-d]$ as $(\varphi^{-1}(\mathcal{R}(\mathcal{D}_{Y}^{(0)})),\mathcal{R}(\mathcal{D}_{X}^{(0)}))$ bimodules.
2) There is an isomorphism of functors \[ R\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{R}(\mathcal{D}_{X}^{(0)})}(\varphi^{\dagger}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}{}_{\mathcal{R}(\mathcal{D}_{Y}^{(0)})}(\mathcal{N}^{\cdot},\int_{\varphi}\mathcal{M}^{\cdot}(d)) \] for any $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{Y}^{(0)})))$ and any $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$. The analogous isomorphism holds for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules. \end{cor}
\begin{proof} Part $1)$ follows directly from the previous proposition; compare \cite{key-4}, propositions 4.2 and 4.19. Then $2)$ follows from $1)$, as in \cite{key-4}, Theorem 4.40 (we'll give the argument below in a slightly different context in \corref{smooth-adjunction}) Finally, the statement for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules follows from the Frobenius descent (\lemref{Hodge-Filtered-Pull} and \thmref{Hodge-Filtered-Push}). \end{proof} Next we are going to give the analogue of these results for the push-forward of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-modules; and compare with the constructions of \cite{key-11}, section 3.4. We start with the analogues of the de Rham resolution and the adjunction for smooth morphisms. Although $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ does possess a canonical flat connection, the resulting (relative) de Rham complex is not a resolution of a transfer bimodule. Instead, we consider the action of the center \[ \mathcal{Z}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))\tilde{=}\mathcal{O}_{T^{*}X^{(1)}}[v] \]
The action map $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{T}_{X^{(1)}}(-1)\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ yields (by dualizing) a map \[ \Theta:\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X^{(1)}}}\Omega_{X^{(1)}}^{1}(1) \] which makes $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ into a Higgs sheaf over $X^{(1)}$. In particular we have $\Theta\circ\Theta=0$ and so we can form the complex $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X^{(1)}}}\Omega_{X^{(1)}}^{i}(i)$ with the differential induced from $\Theta$. In addition, we can form the analogue of the Spencer complex, whose terms are $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X^{(1)}}}\mathcal{T}_{X^{(1)}}^{i}(-i)$.
Now let $\varphi:X\to Y$ be a smooth morphism of relative dimension $d$. Let $X_{Y}^{(1)}\to Y$ be the base change of this morphism over the absolute Frobenius on $Y$. Then we can perform the above constructions for $\Omega_{X_{Y}^{(1)}/Y}^{i}$ instead of $\Omega_{X^{(1)}}^{i}$. We have \begin{lem} \label{lem:Koszul-Res-For-R-bar} The complex $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\mathcal{T}_{X_{Y}^{(1)}/Y}^{i}(-i)$ is exact except at the right-most term. The image of the map \[ \overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\mathcal{T}_{X_{Y}^{(1)}/Y}(-1)\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}) \] is the central ideal $\mathcal{J}$ generated by $\mathcal{T}_{X_{Y}^{(1)}/Y}\subset\mathcal{Z}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$. The cokernel of the map, $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}$, carries the structure of a right $\mathcal{D}_{X/Y}^{(0)}$-module, of $p$-curvature zero; this action commutes with the natural left $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-module structure on $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}$. The cokernel of the associated map $(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J})\otimes_{\mathcal{O}_{X/Y}}\mathcal{T}_{X/Y}\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}$ is isomorphic to $\mathcal{\overline{R}}_{X\to Y}^{(0)}$.
The complex $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\Omega_{X_{Y}^{(1)}/Y}^{i}(i)$ is exact except at the right-most term. The cokernel of the map \[ \overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\Omega_{X_{Y}^{(1)}/Y}^{d-1}(d-1)\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\Omega_{X_{Y}^{(1)}/Y}^{d}(d) \] denoted $\mathcal{K}_{X/Y}$, carries the structure of a left $\mathcal{D}_{X/Y}^{(0)}$-module, of $p$-curvature zero; this action commutes with the natural right $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-module structure on $\mathcal{K}_{X/Y}$. The kernel of the associated connection on $\mathcal{K}_{X/Y}$ is isomorphic to $\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)$. \end{lem}
\begin{proof} Choose local coordinates on $X$ for which $\text{Der}(\mathcal{O}_{X})$ is the free module on $\{\partial_{1},\dots\partial_{n}\}$ and $\text{Der}_{\mathcal{O}_{Y}}(\mathcal{O}_{X})=\{\partial_{n-d+1},\dots\partial_{n}\}$. Then the complex under consideration is simply the Koszul complex for the elements $\{\partial_{n-d+1}^{p},\dots,\partial_{n}^{p}\}$, which proves the exactness statements. Furthermore, as the elements $\{\partial_{n-d+1}^{p},\dots,\partial_{n}^{p}\}$ are central in $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, we see that $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}$ has the structure of a left and right $\mathcal{D}_{X}^{(0)}$-module (we are here using the fact that $\mathcal{D}_{X}^{(0)}$ is the degree $0$ part of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$). Now, we have \[ \mathcal{\overline{R}}_{X\to Y}^{(0)}=\text{coker}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X_{Y}^{(1)}}}\mathcal{T}_{X/Y}(-1)\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})) \] \[ =\text{coker}((\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J})\otimes_{\mathcal{O}_{X/Y}}\mathcal{T}_{X/Y}\to\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}) \] since $X\to Y$ is smooth. The second statement follows similarly. \end{proof} Now we can give the analogue of \corref{sm-adunction-for-filtered-D}. It reads: \begin{cor} \label{cor:sm-adjunction-for-R-bar}Let $\varphi:X\to Y$ be smooth of relative dimension $d$.
1) There is an isomorphism $R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})}(\mathcal{\overline{R}}(\mathcal{D}_{X\to Y}^{(0)}),\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))\tilde{=}\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)[-d]$ as $(\varphi^{-1}(\mathcal{\overline{R}}(\mathcal{D}_{Y}^{(0)})),\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$ bimodules.
2) There is an isomorphism of functors \[ R\varphi_{*}R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{\dagger,(0)}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})}(\mathcal{N}^{\cdot},\int_{\varphi,0}\mathcal{M}^{\cdot}(d)) \] for any $\mathcal{N}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{Y}^{(0)})))$ and any $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$. \end{cor}
\begin{proof} As in the proof of \corref{smooth-adjunction} below, $2)$ follows formally from $1)$. To see $1)$, we note that for any $\mathcal{N}\in\mathcal{G}(\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))$ the complex $R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J},\mathcal{N})$ can be considered a complex of left $\mathcal{D}_{X/Y}^{(0)}$-modules with $p$-curvature $0$ (as $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J}$ is a right $\mathcal{D}_{X/Y}^{(0)}$-module of $p$-curvature zero, and this action commutes with the left $\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})$-action). As Cartier descent for $\mathcal{D}_{X/Y}^{(0)}$-modules of $p$-curvature $0$ is an exact functor, applying the previous lemma we obtain \[ R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})}(\mathcal{\overline{R}}(\mathcal{D}_{X\to Y}^{(0)}),\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))\tilde{=}R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})/\mathcal{J},\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))^{\nabla} \] \[ (\mathcal{K}_{X/Y}[-d])^{\nabla}=\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)[-d] \] as desired. \end{proof} Now we'll give the relation of our pushforward to the constructions of \cite{key-11}, section 3.4. We recall that to any morphism $\varphi:X\to Y$ we may attach the diagram \[ T^{*}X\xleftarrow{d\varphi}X\times_{Y}T^{*}Y\xrightarrow{\pi}T^{*}Y \] and we use the same letters to denote the products of these morphisms with $\mathbb{A}^{1}$; We have the following analogue of \cite{key-52}, proposition 3.7 (c.f. also \cite{key-11}, theorem 3.11) \begin{lem} \label{lem:Bez-Brav}There is an equivalence of graded Azumaya algebras $(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\sim(\pi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$. \end{lem}
\begin{proof} Consider the (graded) Azumaya algebra $\mathcal{A}:=(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}[v]}(\pi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})^{\text{opp}}$ on $(X\times_{Y}T^{*}Y)^{(1)}\times\mathbb{A}^{1}$. It is enough to find a (graded) splitting module for $\mathcal{A}$; i.e., a graded $\mathcal{A}$-module which is locally free of rank $p^{\text{dim}(X)+\text{dim}(Y)}$ over $\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}[v]$.
The graded $(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}),\varphi^{-1}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})))$ bimodule $\overline{\mathcal{R}}_{X\to Y}:=\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$ inherits the structure of an $\mathcal{A}$-module; we claim it is locally free over $\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}[v]$ of the correct rank. This can be checked after inverting $v$ and setting $v=0$; upon inverting $v$ it becomes (via the isomorphisms $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})[v^{-1}]\tilde{=}\mathcal{D}_{X}^{(0)}[v,v^{-1}]$ and $\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})[v^{-1}]\tilde{=}\mathcal{D}_{Y}^{(0)}[v,v^{-1}]$) a direct consequence of \cite{key-52}, proposition 3.7. After setting $v=0$ we have $\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})/v=\text{gr}(\mathcal{D}_{Y}^{(0)})=\overline{\mathcal{D}}_{Y}^{(0)}\otimes_{\mathcal{O}_{Y^{(1)}}}\mathcal{O}_{T^{*}Y^{(1)}}$; and similarly for $X$. Therefore \[ \varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})/v\tilde{=}\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\overline{\mathcal{D}}_{Y}^{(0)}\otimes_{\mathcal{O}_{Y^{(1)}}}\mathcal{O}_{T^{*}Y^{(1)}}) \] \[ \tilde{=}\mathcal{O}_{X}\otimes_{\varphi^{-1}(\mathcal{O}_{Y})}\varphi^{-1}(\overline{\mathcal{D}}_{Y}^{(0)})\otimes_{\varphi^{-1}(\mathcal{O}_{Y^{(1)}})}\varphi^{-1}(\mathcal{O}_{T^{*}Y^{(1)}}) \] But $\varphi^{-1}(\overline{\mathcal{D}}_{Y}^{(0)})$ is locally free of rank $p^{\text{dim}(Y)}$ over $\varphi^{-1}(\mathcal{O}_{Y})$, and $\mathcal{O}_{X}$ is locally free of rank $p^{\text{dim}(X)}$ over $\mathcal{O}_{X^{(1)}}$; so $\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})/v$ is locally free of rank $p^{\text{dim}(X)+\text{dim}(Y)}$ over $\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}$ as claimed. \end{proof} Next, we have the following straightforward: \begin{lem} Let $\varphi:X\to Y$ be smooth. Then $d\varphi$ is a closed immersion, and we may regard the algebra $(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ as a (graded) central quotient of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. The obvious functor $(d\varphi)_{*}:D(\mathcal{G}((d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))\to D(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$ admits a right adjoint $(d\varphi)^{!}$ defined by $\mathcal{M}^{\cdot}\to R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}((d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}),\mathcal{M}^{\cdot})$. \end{lem}
Therefore we obtain \begin{cor} \label{cor:Filtered-Bez-Brav}Let $C:D(\mathcal{G}((d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))\to D(\mathcal{G}((\pi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})))$ denote the equivalence of categories resulting from \lemref{Bez-Brav}. Then, when $\varphi:X\to Y$ is smooth of relative dimension $d$, there is an isomorphism of functors \[ \int_{\varphi,0}\tilde{\to}R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}[-d]:D(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))\to D(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}))) \] Therefore, the functor ${\displaystyle \int_{\varphi,0}}[d]$ agrees, under the application of the Rees functor, with the pushforward of conjugate-filtered derived categories constructed in \cite{key-11}, section 3.4. \end{cor}
\begin{proof} (in the spirit of \cite{key-11}, proposition 3.12) We have, for any $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$, \[ C\circ(d\varphi)^{!}(\mathcal{M}^{\cdot})=C\circ R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}((d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}),\mathcal{M}^{\cdot}) \] \[ \tilde{=}\underline{\mathcal{H}om}_{(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}),R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}((d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}),\mathcal{M}^{\cdot})) \] Since $\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})$ is locally projective over $(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$, this is canonically isomorphic to \[ R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})\otimes_{(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(d\varphi^{(1)})^{*}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}),\mathcal{M}^{\cdot})) \] \[ =R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}),\mathcal{M}^{\cdot})) \] so that \[ R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}(\mathcal{M}^{\cdot})\tilde{\to}R\pi_{*}^{(1)}R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}),\mathcal{M}^{\cdot})) \] \[ \tilde{=}R\varphi_{*}R\underline{\mathcal{H}om}_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\varphi^{*}\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}),\mathcal{M}^{\cdot})) \] But the right-hand functor is canonically isomorphic to ${\displaystyle \int_{\varphi,0}}$ by smooth adjunction (\corref{sm-adjunction-for-R-bar}). \end{proof} From this description it follows directly (c.f. \cite{key-11}, lemma 3.18) that (up to a renumbering) the spectral sequence associated to the filtration on ${\displaystyle \int_{\varphi}\mathcal{O}_{X}}$ agrees with the usual conjugate spectral sequence; i.e., the ``second spectral sequence'' for $R\varphi_{dR,*}(\mathcal{O}_{X})$ as discussed in \cite{key-12}.
\subsection{Adjunction for a smooth morphism, base change, and the projection formula}
In this section, we prove adjunction for a smooth morphism $\varphi:\mathfrak{X}\to\mathfrak{Y}$ and the projection formula for an arbitrary morphism; as consequences we obtain the smooth base change and the and the Kunneth formula, in fairly general contexts. To start off, let us recall: \begin{prop} For a smooth morphism $\varphi:\mathfrak{X}\to\mathfrak{Y}$ there is an isomorphism of sheaves $\mathcal{R}\mathcal{H}om_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)})\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}[-d_{X/Y}]$. \end{prop}
This is proved identically to the analogous fact for $\mathcal{D}_{X}^{(0)}$ and $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-modules, as discussed above in \corref{sm-adunction-for-filtered-D}. \begin{prop} For a smooth morphism $\varphi:\mathfrak{X}\to\mathfrak{Y}$ of relative dimension $d$, there is an isomorphism $\mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}(d)[-d]$ \end{prop}
\begin{proof} We have \[ \mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)} \] \[ \tilde{=}\mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\otimes_{D(W(k))}^{L}(W(k)[f,v]/(v-1) \] \[ \tilde{=}\mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\otimes_{D(W(k))}^{L}(W(k)[f,v]/(v-1),\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}\otimes_{D(W(k))}^{L}(W(k)[f,v]/(v-1)) \] \[ \tilde{=}R\mathcal{H}om_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}) \] and \[ \mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\otimes_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{D}_{X}^{(0,1)}\tilde{=}\mathcal{R}\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\otimes_{W(k)}^{L}k \] \[ \tilde{=}R\mathcal{H}om_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)}) \] By the same token, we have
\[ R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{R}(\mathcal{D}_{X}^{(1)})\tilde{=}R\underline{\mathcal{H}om}{}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{R}_{X\to Y}^{(1)},\mathcal{R}(\mathcal{D}_{X}^{(1)})) \] and the analogous statement for $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. Applying the smooth adjunction for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules (\corref{sm-adunction-for-filtered-D}, part $2)$) to the case where $\mathcal{N}^{\cdot}=\mathcal{R}(\mathcal{D}_{Y}^{(1)})$ and $\mathcal{M}^{\cdot}=\mathcal{R}(\mathcal{D}_{X}^{(1)})$, we have an isomorphism \[ R\underline{\mathcal{H}om}{}_{\mathcal{R}_{X}}(\mathcal{R}_{X\to Y}^{(1)},\mathcal{R}(\mathcal{D}_{X}^{(1)}))\tilde{=}\mathcal{R}_{Y\leftarrow X}^{(1)}(d)[-d] \] and by \corref{sm-adjunction-for-R-bar} we have \[ R\underline{\mathcal{H}om}{}_{\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0})}(\mathcal{\overline{R}}(\mathcal{D}_{X\to Y}^{(0)}),\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))\tilde{=}\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)[-d] \] Furthermore, using the relative de Rham resolution for $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-modules (or, equivalently, the previous proposition) we have $\mathcal{R}\mathcal{H}om_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)})\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}[-d_{X/Y}]$.
On the other hand, we have the short exact sequence \[ \mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})\to\mathcal{D}_{X}^{(0,1)}\to\mathcal{R}(\mathcal{D}_{X}^{(1)})(-1) \] which by \propref{Sandwich!} yields the distinguished triangle \[ R\underline{\mathcal{H}om}{}_{\bar{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\mathcal{\overline{R}}_{X\to Y},\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)}))\to R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)}) \] \[ \to R\underline{\mathcal{H}om}{}_{\mathcal{R}(\mathcal{D}_{X}^{(1)})}(\mathcal{R}_{X\to Y},\mathcal{R}(\mathcal{D}_{X}^{(1)}))(-1) \] which implies that $R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)})$ is concentrated in a single homological degree (namely $d$). So, since $R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$ is cohomologically complete, we see that $\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ is $p$-torsion-free and concentrated in degree $0$. We also see, by \propref{coh-to-coh}, that this module is coherent over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ (each of $\mathcal{R}_{Y\leftarrow X}^{(1)}$ and $\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}$ are coherent, since $X\to Y$ is smooth). Further, since $R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)})\otimes_{\mathcal{D}_{X}^{(0,1)}}^{L}\mathcal{\overline{R}}(\mathcal{D}_{X}^{(0)})$ are concentrated in degree $d$ as well, we see that $\text{im}(f)=\text{ker}(v)$ and $\text{im}(v)=\text{ker}(f)$ on $\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)}))$ (by \lemref{Basic-Facts-on-Rigid}). Furthermore, the distinguished triangle above now yields the short exact sequence \[ \mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)\to\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\mathcal{D}_{X}^{(0,1)}}(\mathcal{D}_{X\to Y}^{(0,1)},\mathcal{D}_{X}^{(0,1)}))\to\mathcal{R}_{Y\leftarrow X}^{(1)}(d-1) \] and since $\mathcal{R}_{Y\leftarrow X}^{(1)}$ is $f$-torsion-free, we see that $\text{im}(v)=\text{ker}(f)=\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)$ and so $\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ satisfies the conditions of \propref{Baby-Mazur}. So we may conclude that the module $\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ is standard. Furthermore, we see that the grading on $\mathcal{\overline{R}}_{Y\leftarrow X}^{(0)}(d)$ is zero in degrees $<-d$ and is nontrivial in degree $-d$ and above. Therefore, the index (as defined directly below \defref{Standard!}) is $d$. Since we identified $\mathcal{H}^{d}(R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})^{-\infty})$ with $\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}$ we see that \[
\mathcal{H}^{d_{X/Y}}(R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})^{i-d})=\{m\in\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}[p^{-1}]|p^{i}m\in\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0)}\} \] which is exactly the definition of $\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}(d)$. \end{proof} From this one deduces \begin{cor} \label{cor:smooth-adjunction}Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be smooth of relative dimension $d$; let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$. Then there is an isomorphism of functors \[ R\varphi_{*}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\varphi^{\dagger}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\mathcal{N}^{\cdot},\int_{\varphi}\mathcal{M}^{\cdot}(d)) \] In particular, if $\varphi$ is also proper, then since both $\varphi^{\dagger}$ and ${\displaystyle \int_{\varphi}}$ preserve $D_{coh}^{b}$, we obtain that these functors form an adjoint pair on $D_{coh}^{b}$.
Further, the analogous isomorphism for $\varphi:X\to Y$ holds, and in this situation the functors are adjoint on $D_{qcoh}^{b}$ in this setting (even if $\varphi$ is not proper). \end{cor}
\begin{proof} (following \cite{key-4}, Theorem 4.40). We have \[ R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\varphi^{\dagger}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})\tilde{\to}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}\widehat{\otimes}_{\varphi^{-1}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}\varphi^{-1}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot})[d] \] \[ \tilde{\to}R\underline{\mathcal{H}om}{}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}(\varphi^{-1}\mathcal{N}^{\cdot},R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\mathcal{M}^{\cdot}))[d] \] To prove the last isomorphism, one may reduce mod $p$, and then apply \lemref{basic-hom-tensor} (part $1$), noting that $\mathcal{D}_{X\to Y}^{(0,1)}$ is faithfully flat over $\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})$.
Further, we have \[ R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\mathcal{M}^{\cdot})\tilde{\leftarrow}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}\tilde{=}\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}(d)[-d] \] where the first isomorphism again follows by reduction mod $p$ and then applying the fact that $\mathcal{D}_{X\to Y}^{(0,1)}$ is (locally) isomorphic to a bounded complex of projective $\mathcal{D}_{X}^{(0,1)}$-modules (by \propref{Quasi-rigid=00003Dfinite-homological}) and the second isomorphism is the previous proposition. Applying this to the previous isomorphism we obtain \[ R\underline{\mathcal{H}om}{}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}(\varphi^{-1}\mathcal{N}^{\cdot},R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)},\mathcal{M}^{\cdot}))[d]\tilde{=}R\underline{\mathcal{H}om}{}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}(\varphi^{-1}\mathcal{N}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}(d)) \] Then applying $R\varphi_{*}$ we obtain \[ R\varphi_{*}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\varphi^{\dagger}\mathcal{N}^{\cdot},\mathcal{M}^{\cdot}) \] \[ \tilde{=}R\varphi_{*}R\underline{\mathcal{H}om}{}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}(\varphi^{-1}\mathcal{N}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}(d)) \] \[ \tilde{\to}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\mathcal{N}^{\cdot},R\varphi_{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}(d)))\tilde{\to}R\underline{\mathcal{H}om}{}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\mathcal{N}^{\cdot},\int_{\varphi}\mathcal{M}^{\cdot}(d)) \] where the final isomorphism, is the adjunction between $\varphi^{-1}$ and $R\varphi_{*}$. One applies analogous reasoning for $\varphi:X\to Y$. \end{proof} Now we prove the projection formula, and then give the the smooth base change and Kunneth formulas in this context. We start with \begin{thm} \label{thm:Projection-Formula}(Projection Formula) Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a morphism. Let $\mathcal{M}^{\cdot}\in D_{cc}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{cc}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$, be such that $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$. Then we have \[ \int_{\varphi}(L\varphi^{*}(\mathcal{N}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{M}^{\cdot})\tilde{\to}\mathcal{N}^{\cdot}\otimes_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\int_{\varphi}\mathcal{M}^{\cdot} \] \end{thm}
The proof works essentially the same way as the complex analytic one (c.f. \cite{key-50}, theorem 2.3.19). In particular, we use \lemref{proj-over-D}, as well as the tensor product juggling lemma \lemref{Juggle} \begin{proof} By the left-right interchange it suffices to prove \[ \int_{\varphi}(\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}L\varphi^{*}\mathcal{N}^{\cdot})\tilde{=}\int_{\varphi}(\mathcal{M}_{r}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\mathcal{N}^{\cdot} \] where $\mathcal{M}_{r}^{\cdot}=\omega_{\mathfrak{X}}\otimes_{\mathcal{O}_{\mathfrak{X}}}\mathcal{M}^{\cdot}$. We have \[ \int_{\varphi}(\mathcal{M}_{r}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\mathcal{N}^{\cdot}\tilde{=}\int_{\varphi}(\mathcal{M}_{r}^{\cdot})\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}}^{L}(\mathcal{N}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}) \] \[ \tilde{=}R\varphi_{*}(\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}L\varphi^{*}(\mathcal{N}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{Y}})}^{L}\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)}))\tilde{=}R\varphi_{*}(\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}L\varphi^{*}(\mathcal{N}^{\cdot})\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\varphi^{*}(\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0,1)})) \] \[ \tilde{=}R\varphi_{*}((\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}L\varphi^{*}(\mathcal{N}^{\cdot}))\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\tilde{=}R\varphi_{*}((\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}L\varphi^{*}(\mathcal{N}^{\cdot}))\widehat{\otimes}_{\mathcal{\widehat{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{\widehat{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}) \] \[ =\int_{\varphi}(\mathcal{M}_{r}^{\cdot}\widehat{\otimes}_{D(\mathcal{O}_{\mathfrak{X}})}^{L}L\varphi^{*}\mathcal{N}^{\cdot}) \] as claimed; note that the second isomorphism is \lemref{proj-over-D} which uses the assumption on $\mathcal{M}^{\cdot}$ and $\mathcal{N}^{\cdot}$. \end{proof} Now we turn to the smooth base change. Consider the fibre square of smooth formal schemes
$$ \begin{CD} \mathfrak{X}_{\mathfrak{Z}} @>\tilde{\psi} >> \mathfrak{X} \\ @VV\tilde{\varphi}V @VV{\varphi}V \\ \mathfrak{Z} @>\psi >> \mathfrak{Y} \end{CD} $$where the bottom row $\psi:\mathfrak{Z}\to\mathfrak{Y}$ is smooth of relative dimension $d$.
We have also the analogous square for smooth varieties over $k$. \begin{thm} \label{thm:Smooth-base-change}Suppose that $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. There is an isomorphism \[ \psi^{\dagger}\int_{\varphi}\mathcal{M}^{\cdot}\tilde{\to}\int_{\tilde{\varphi}}\tilde{\psi}{}^{\dagger}\mathcal{M}^{\cdot} \] inside $D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Z}}^{(0,1)}))$. The analogous statement holds for smooth varieties over $k$. \end{thm}
\begin{proof} By the adjunction for ${\displaystyle (\tilde{\psi}^{\dagger},\int_{\tilde{\psi}}(d))}$ there is a morphism of functors \[ \int_{\varphi}\to\int_{\varphi}\circ\int_{\tilde{\psi}}(\tilde{\psi})^{\dagger}(d)\tilde{=}\int_{\psi}\circ\int_{\tilde{\varphi}}(\tilde{\psi})^{\dagger}(d) \] where the last isomorphism follows from the composition of push-forwards (\lemref{Composition-of-pushforwards}). Now, applying the adjunction for ${\displaystyle (\psi^{\dagger},\int_{\psi}(d))}$, we obtain a morphism \[ \psi^{\dagger}\int_{\varphi}\to\int_{\tilde{\varphi}}(\tilde{\psi})^{\dagger} \] After applying $\otimes_{W(k)}^{L}k$ we obtain the analogous map over $k$. So it suffices to show that the map is an isomorphism for varieties over $k$. Furthermore, working locally on $Z$, we reduce to the case where the map $\psi:Z\to Y$ factors as an etale morphism $Z\to Z'$ followed by a projection $Z'\tilde{=}Y\times\mathbb{A}^{d}\to Y$. In the case of an etale morphism, the functor ${\displaystyle \int_{\varphi}}$ agrees with $R\varphi_{*}$, so the result follows from the usual flat base change for quasicoherent sheaves. In the case of the projection, we have \[ \int_{\tilde{\varphi}}(\tilde{\psi})^{\dagger}\mathcal{M}^{\cdot}\tilde{=}\int_{\text{id}\times\varphi}D(\mathcal{O}_{\mathbb{A}_{k}^{d}})\boxtimes\mathcal{M}^{\cdot}[d]\tilde{=}D(\mathcal{O}_{\mathbb{A}_{k}^{d}})\boxtimes\int_{\varphi}\mathcal{M}^{\cdot}[d]\tilde{=}\psi^{\dagger}\mathcal{M}^{\cdot} \] where the second isomorphism follows directly from the definition of the pushforward; this implies the result in this case. \end{proof} From this we deduce the Kunneth formula: \begin{cor} Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}))$, so that $\mathcal{M}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\otimes_{W(k)}^{L}k\in D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$. Then there is an isomorphism \[ \mathbb{H}_{\mathcal{G}}^{\cdot}(\mathcal{M}^{\cdot}\boxtimes\mathcal{N}^{\cdot})\tilde{=}\mathbb{H}_{\mathcal{G}}^{\cdot}(\mathcal{M}^{\cdot})\widehat{\otimes}_{W(k)[f,v]}^{L}\mathbb{H}_{\mathcal{G}}^{\cdot}(\mathcal{N}^{\cdot}) \] (where $\mathbb{H}_{\mathcal{G}}^{\cdot}$ is defined in \defref{Push!})The analogous statement holds for complexes in $D_{qcoh}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ and $D_{qcoh}(\mathcal{G}(\mathcal{D}_{Y}^{(0,1)}))$. \end{cor}
This is a formal consequence of the projection formula and the smooth base change (compare, e.g. \cite{key-53}, corollary 2.3.30).
\section{Operations on Gauges: Duality}
In this section we study the duality functor on $D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ (and on $D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$. Although neither $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$ nor $\mathcal{D}_{X}^{(0,1)}$ have finite homological dimension, we shall show (using \propref{Sandwich!}) that there is a well-behaved duality functor $\mathbb{D}$ which takes bounded complexes of coherent modules to bounded complexes of coherent modules. Further, under suitable conditions this functor commutes with push-forward, in the following sense: \begin{thm} Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be either a smooth proper morphism or a projective morphism. Then there is an isomorphism of functors \[ \int_{\varphi}\mathbb{D}_{\mathfrak{X}}\tilde{\to}\mathbb{D}_{\mathfrak{Y}}\int_{\varphi} \] The analogous statement holds for either a smooth proper or a projective morphism $\varphi:X\to Y$. In particular; when $\varphi$ is smooth proper the functors $(\int_{\varphi},\varphi^{\dagger})$ form an adjoint pair on $D_{coh}^{b}$. \end{thm}
The proof, which will essentially occupy this section of the paper, is somewhat unsatisfactory. The key point is to construct a trace morphism \[ \text{tr}:\int_{\varphi}D(\mathcal{O}_{\mathfrak{X}})[d_{X}]\to D(\mathcal{O}_{\mathfrak{Y}})[d_{Y}] \] When $\varphi$ is smooth proper this is done by first constructing the map in $\mathcal{D}^{(0)}$ and $\mathcal{D}^{(1)}$ modules (using the Hodge to de Rham spectral sequence), and then deducing its existence for $\mathcal{D}^{(0,1)}$-modules. When $\varphi$ is a closed immersion the construction of the trace follows from a direct consideration of the structure of ${\displaystyle \int_{\varphi}}$ (the transfer bimodule is easy to describe in this case). For a projective $\varphi$ one defines the trace by breaking up the map into an immersion followed by a projection. Presumably there is a way to construct the trace for all proper morphisms at once, but I have been unable to find it.
To kick things off, we need to define the duality functor and show that it has finite homological dimension. \begin{defn} Let $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. We define $\mathbb{D}_{\mathfrak{X}}(\mathcal{M}^{\cdot}):=\omega_{\mathfrak{X}}^{-1}\otimes_{\mathcal{O}_{\mathfrak{X}}}R\underline{\mathcal{H}om}(\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})[d_{X}]\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ (where we have used the natural right $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}$-module structure on $R\underline{\mathcal{H}om}(\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})$).
The same formula defines $\mathbb{D}_{X}$ for a smooth variety $X$ over $k$; and in the analogous way we define the duality functors for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. \end{defn}
This is really a duality on the category of coherent modules: \begin{prop} Suppose $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$ then $\mathbb{D}_{\mathfrak{X}}(\mathcal{M}^{\cdot})\in D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. Further, the natural transformation $\mathcal{M}^{\cdot}\to\mathbb{D}_{\mathfrak{X}}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}$ is an isomorphism.
The same result holds for a smooth variety $X$ over $k$. \end{prop}
\begin{proof} By reduction mod $p$ it suffices to prove the result for $X$. Using \propref{Sandwich!}, and the fact that $\text{ker}(f:\mathcal{D}_{X}^{(0,1)}\to\mathcal{D}_{X}^{(0,1)})\tilde{=}\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})(1)$ and $\text{ker}(v:\mathcal{D}_{X}^{(0,1)}\to\mathcal{D}_{X}^{(0,1)})\tilde{=}\mathcal{R}(\mathcal{D}_{X}^{(1)})(-1)$ one reduces to proving the analogous result for $\mathcal{R}(\mathcal{D}_{X}^{(1)})$ and $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. But these algebras have finite homological dimension, as noted above, and the results follow at once. \end{proof}
\subsection{Duality for a smooth proper morphism}
Now we turn to defining the trace morphism, and proving the duality, for a smooth proper map $\mathfrak{X}\to\mathfrak{Y}$ of relative dimension $d$. In this case the usual Grothendieck duality theory gives us a canonical morphism \[ \text{tr}:R^{d}\varphi_{*}(\omega_{\mathfrak{X}/\mathfrak{Y}})\to\mathcal{O}_{\mathfrak{Y}} \] Now consider $\mathcal{O}_{\mathfrak{X}}$ as a module over $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$. As the pushforward ${\displaystyle \int_{\varphi,0}\mathcal{O}_{\mathfrak{X}}}$ can be computed by the relative de Rham complex, looking at the Hodge-to-de Rham spectral sequence in degree $2d$ yields an isomorphism of $\mathcal{O}_{\mathfrak{Y}}$-modules \[ \mathcal{H}^{d}(\int_{\varphi,0}\mathcal{O}_{\mathfrak{X}})\tilde{=}R^{d}\varphi_{*}(\omega_{\mathfrak{X}/\mathfrak{Y}}) \] so; composing with the trace morphism above, we obtain a map \[ \text{tr}:\mathcal{H}^{d}(\int_{\varphi,0}\mathcal{O}_{\mathfrak{X}})\to\mathcal{O}_{\mathfrak{Y}} \] of $\mathcal{\widehat{D}}_{\mathfrak{Y}}^{(0)}$-modules.
Now consider $\varphi:\mathfrak{X}_{n}\to\mathfrak{Y}_{n}$, the reduction mod $p^{n}$ of $\varphi$ for each $n\geq0$. Repeating the argument, we can construct \[ \text{tr}:\mathcal{H}^{d}(\int_{\varphi,0}\mathcal{O}_{\mathfrak{X}_{n}})\to\mathcal{O}_{\mathfrak{Y}_{n}} \] and, in fact, the inverse limit of these maps is the trace constructed above. In this setting, the de Rham complex $\Omega_{\mathfrak{X}_{n}/\mathfrak{Y}_{n}}^{\cdot}$ has the structure of a complex of coherent sheaves over the scheme $W_{n}(\mathcal{O}_{X^{(n)}})$ (here we are identifying the underlying topological spaces of $\mathfrak{X}_{n}$ and $W_{n}(\mathcal{O}_{X^{(n)}})$). Thus we may also consider the second spectral sequence for the pushforward of this complex, and we obtain an isomorphism \[ R^{d}\varphi_{*}(\text{coker}(d:\Omega_{\mathfrak{X}_{n}/\mathfrak{Y}_{n}}^{d-1}\to\omega_{\mathfrak{X}_{n}/\mathfrak{Y}_{n}}))\tilde{\to}R^{d}\varphi_{*}(\omega_{\mathfrak{X}/\mathfrak{Y}}) \] or, equivalently, \[ R^{d}\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0)}}\mathcal{O}_{\mathfrak{X}_{n}})\tilde{\to}R^{d}\varphi_{*}(\omega_{\mathfrak{X}_{n}/\mathfrak{Y}_{n}}) \]
Now we consider the the pushforward of $\mathcal{O}_{\mathfrak{X}_{n}}$, in the category of $\mathcal{D}_{\mathfrak{X}_{n}}^{(1)}$-modules. By the commutativity of Frobenius descent with push-forward (\cite{key-2}, theoreme 3.4.4), we have \[ \int_{\varphi,1}\mathcal{O}_{\mathfrak{X}_{n}}\tilde{=}\int_{\varphi,1}F^{*}\mathcal{O}_{\mathfrak{X}_{n}}\tilde{\to}F^{*}\int_{\varphi,0}\mathcal{O}_{\mathfrak{X}_{n}} \] Therefore we obtain a trace map \[ \text{tr}:\mathcal{H}^{d}(\int_{\varphi,1}\mathcal{O}_{\mathfrak{X}_{n}})\to\mathcal{O}_{\mathfrak{Y}_{n}} \] in the category of $\mathcal{D}_{\mathfrak{Y}_{n}}^{(1)}$-modules; and, using the second spectral sequence for the pushforward as above, we have \[ R^{d}\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(1)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(1)}}\mathcal{O}_{\mathfrak{X}_{n}})\tilde{\to}\mathcal{H}^{d}(\int_{\varphi,1}\mathcal{O}_{\mathfrak{X}_{n}}) \]
Using these maps, we construct a trace for $\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}$-modules: \begin{lem} There is a canonical morphism \[ \text{tr}:R^{d}\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes{}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}D(\mathcal{O}_{\mathfrak{X}_{n}}))\to D(\mathcal{O}_{\mathfrak{Y}_{n}}) \] which has the property that the map $\text{tr}^{\infty}:R^{d}\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes{}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}D(\mathcal{O}_{\mathfrak{X}_{n}}))^{\infty}\to D(\mathcal{O}_{\mathfrak{Y}_{n}}){}^{\infty}$ agrees with the trace map for $\mathcal{D}_{\mathfrak{X}_{n}}^{(1)}$-modules constructed above; and the map $\text{tr}^{-\infty}:R^{d}\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes{}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}D(\mathcal{O}_{\mathfrak{X}_{n}}))^{-\infty}\to D(\mathcal{O}_{\mathfrak{Y}_{n}})^{-\infty}$ agrees with the trace map for $\mathcal{D}_{\mathfrak{X}_{n}}^{(0)}$-modules constructed above. We have the analogous statement for a proper morphism $\varphi:X\to Y$, as well as in the categories of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-modules and $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules.
This map yields a trace map in the derived category: \[ \text{tr}:\int_{\varphi}D(\mathcal{O}_{\mathfrak{X}_{n}})[d]\to D(\mathcal{O}_{\mathfrak{Y}_{n}}) \]
Upon taking the inverse limit over $n$, we obtain a map \[ \text{tr}:\int_{\varphi}D(\mathcal{O}_{\mathfrak{X}})[d]\to D(\mathcal{O}_{\mathfrak{Y}}) \] \end{lem}
\begin{proof} We begin with the case $n=1$; i.e., $\mathfrak{X}_{n}=X$ and $\mathfrak{Y}_{n}=Y$. We claim that the $\varphi^{-1}(\mathcal{D}_{Y}^{(0,1)})$-gauge $\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X})$ satisfies the property that $v$ is an isomorphism in degrees $0$ and below and $f$ is an isomorphism in degrees $1$ and above. This can be checked in local coordinates, where we have the isomorphism \[ \mathcal{D}_{Y\leftarrow X}^{(0,1)}=\mathcal{J}\backslash\mathcal{D}_{X}^{(0,1)} \] where $\mathcal{J}$ is the right ideal generated by $\{\partial_{n-d+1},\dots,\partial_{n},\partial_{n-d+1}^{[p]},\dots,\partial_{n}^{[p]}\}$. In degrees below $0$, the elements $\{\partial_{n-d+1}^{[p]},\dots,\partial_{n}^{[p]}\}$ act trivially; so that \[ (\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X}))^{i}=\mathcal{O}_{X}/(\partial_{n-d+1},\dots,\partial_{n}) \] for all $i\leq0$. On the other hand we have \[ (\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X}))^{i}=\mathcal{O}_{X}/(\partial_{n-d+1},\dots,\partial_{n},\partial_{n-d+1}^{[p]},\dots,\partial_{n}^{[p]}) \] for $i>0$; and the claim about $f$ and $v$ follows immediately. As the functor $R^{d}\varphi_{*}$ commutes with direct sums, we see that the gauge \[ R^{d}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X})) \] has the same property: $v$ is an isomorphism in degrees $0$ and below and $f$ is an isomorphism in degrees $1$ and above. Thus we may define \[ \text{tr}:R^{d}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X}))^{i}\to\mathcal{O}_{Y} \] for any $i$ as follows: if $i\leq0$ we have $v_{-\infty}:R^{d}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X}))^{i}\tilde{=}R^{d}\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(0)}\otimes_{\mathcal{D}_{X}^{(0)}}\mathcal{O}_{X})$ and so we define the trace as the composition $\text{tr}\circ v_{-\infty}$, where here $\text{tr}$ denotes the trace for $\mathcal{D}_{X}^{(0)}$-modules constructed above. If $i>0$ we have $f_{\infty}:R^{d}(\mathcal{D}_{Y\leftarrow X}^{(0,1)}\otimes{}_{\mathcal{D}_{X}^{(0,1)}}D(\mathcal{O}_{X}))^{i}\tilde{=}R^{d}\varphi_{*}(\mathcal{D}_{Y\leftarrow X}^{(1)}\otimes_{\mathcal{D}_{X}^{(1)}}\mathcal{O}_{X})$ and so we define the trace as the composition $\text{tr}\circ f_{\infty}$, where here $\text{tr}$ denotes the trace for $\mathcal{D}_{X}^{(1)}$-modules constructed above. In a similar way, we construct the trace map in the categories of $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$-modules and $\mathcal{R}(\mathcal{D}_{X}^{(1)})$-modules.
Now we consider $\mathfrak{X}_{n}$ for $n>1$. Since the functor $\varphi_{*}$ has homological dimension $d$ (on the category of quasicoherent sheaves), we have that $(R^{d}\varphi_{*}\mathcal{F})\otimes_{W(k)}k\tilde{=}(R^{d}\varphi_{*}\mathcal{F}\otimes_{W(k)}^{L}k)$ for any $\mathcal{F}\in\text{Qcoh}(\mathfrak{X}_{n})$. So, by Nakayama's lemma and the result of the previous paragraph, we see that $f$ is onto in degrees $1$ and above while $v$ is onto in degrees $0$ and below; by the coherence of the sheaves involved we see that these maps are isomorphisms for $i<<0$ and $i>>0$. Since the target of the trace map, $D(\mathcal{O}_{\mathfrak{Y}_{n}})$, has the property that $v$ is an isomorphism in degrees $0$ and below and $f$ is an isomorphism in degrees $1$ and above, we may define the trace map in the exact same way as above. \end{proof} \begin{rem} \label{rem:trace-and-compose}If $\varphi:\mathfrak{X}\to\mathfrak{Y}$ and $\psi:\mathfrak{Y}\to\mathfrak{Z}$, then the trace map for the composition satisfies ${\displaystyle \text{tr}_{\psi\circ\varphi}=\text{tr}_{\psi}\circ\int_{\psi}\text{tr}_{\varphi}}$. This follows from the analogous result for the trace map in coherent sheaf theory. \end{rem}
Now, following the usual method of algebraic $\mathcal{D}$-module theory (c.f. \cite{key-49}, theorem 2.7.2), we have \begin{prop} There is a canonical morphism \[ \int_{\varphi}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}\to\mathbb{D}_{\mathfrak{Y}}\int_{\varphi}\mathcal{M}^{\cdot} \]
for any $\mathcal{M}^{\cdot}\in D_{cc}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. The same holds for $\mathcal{M}^{\cdot}\in D(\mathcal{G}(\mathcal{D}_{X}^{(0,1)}))$ when we have a proper map $\varphi:X\to Y$. Further, these maps are compatible under application of $\otimes_{W(k)}^{L}k$. \end{prop}
\begin{proof} We have \[ \int_{\varphi}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}=R\varphi_{*}(R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)})\widehat{\otimes}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{Y}}}^{L}\omega_{\mathfrak{Y}}^{-1}[d_{X}] \] \[ =R\varphi_{*}(R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{Y}}}^{L}\omega_{\mathfrak{Y}}^{-1}[d_{X}] \] while \[ \mathbb{D}_{\mathfrak{Y}}\int_{\varphi}\mathcal{M}^{\cdot}=R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})\widehat{\otimes}_{\mathcal{O}_{\mathfrak{Y}}}^{L}\omega_{\mathfrak{Y}}^{-1}[d_{Y}] \] To construct a canonical map between these complexes, we begin by considering ${\displaystyle \int_{\varphi}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}}$. By $\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}=L\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}$, we may apply \thmref{Projection-Formula} to obtain \[ \int_{\varphi}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}=\int_{\varphi}L\varphi^{*}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}\tilde{\to}(\int_{\varphi}D(\mathcal{O}_{\mathfrak{X}}))\widehat{\otimes}_{\mathcal{O}_{\mathfrak{Y}}[f,v]}^{L}\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)} \] so applying the trace map yields a canonical morphism \[ \int_{\varphi}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}[d]\to\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)} \] and since $d=d_{X}-d_{Y}$ we have \[ \int_{\varphi}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}[d_{X}]\to\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}[d_{Y}] \] Then we have \[ R\varphi_{*}(R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})[d_{X}] \] \[ \to R\varphi_{*}(R\underline{\mathcal{H}om}_{\varphi^{-1}(\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)})}(\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)})[d_{X}] \] \[ \to R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(R\varphi_{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}),R\varphi_{*}(\widehat{\mathcal{D}}_{\mathfrak{Y}\leftarrow\mathfrak{X}}^{(0,1)}\otimes_{\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}}^{L}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}))[d_{X}] \] \[ =R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\int_{\varphi}\widehat{\mathcal{D}}_{\mathfrak{X}\to\mathfrak{Y}}^{(0,1)}[d_{X}])\to R\underline{\mathcal{H}om}_{\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0,1)}[d_{Y}]) \] where the last map is the trace. Combining with the above description yields the canonical map \[ \int_{\varphi}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}\to\mathbb{D}_{\mathfrak{Y}}\int_{\varphi}\mathcal{M}^{\cdot} \] as desired; the case of a proper map $\varphi:X\to Y$ is identical. \end{proof} Now we turn to \begin{thm} \label{thm:Duality-for-smooth-proper}The canonical map $\int_{\varphi}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}\to\mathbb{D}_{\mathfrak{Y}}\int_{\varphi}\mathcal{M}^{\cdot}$ is an isomorphism for $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0,1)}))$. The same is true for a proper map $\varphi:X\to Y$. \end{thm}
The proof of this result will make use of several auxiliary results. First, we recall a basic computation for pushforwards of $\mathcal{R}(\mathcal{D}_{X}^{(0)})$-modules; as in the previous section we have the diagram \[ T^{*}X\xleftarrow{d\varphi}X\times_{Y}T^{*}Y\xrightarrow{\pi}T^{*}Y \] and the result reads \begin{lem} Let $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$. There is an isomorphism \[ (\int_{\varphi}\mathcal{M}^{\cdot})\otimes_{k[f]}^{L}k\tilde{=}R\pi_{*}((d\varphi)^{!}(\mathcal{M}^{\cdot}\otimes_{k[f]}^{L}k)) \] inside $D_{coh}^{b}(T^{*}Y)$; in this formula $d\varphi^{!}$ is the extraordinary inverse image in coherent sheaf theory. \end{lem}
This is a result of Laumon, (c.f. \cite{key-19}, construction 5.6.1). For a proof in the Rees algebra language, see \cite{key-70}, corollary 3.9.
Next, we need the following Grothendieck duality statement for Azumaya algebras: \begin{lem} \label{lem:GD-for-Az}Let $X$ and $Y$ be smooth varieties over $k$ and let $\pi:X\to Y$ be a smooth proper morphism, of relative dimension $d$. Suppose that $\mathcal{A}_{Y}$ be an Azumaya algebra on $Y$. Set $\mathcal{A}_{X}=\pi^{*}\mathcal{A}_{Y}$, an Azumaya algebra on $X$. Then there is a trace map $R^{d}\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})\to\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}$ which induces, for any $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{A}_{X}-\text{mod})$ a functorial isomorphism \[ R\pi_{*}R\mathcal{H}om_{\mathcal{A}_{X}}(\mathcal{M}^{\cdot},\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\tilde{\to}R\mathcal{H}om_{\mathcal{A}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}) \] inside $D_{coh}^{b}(\mathcal{O}_{Y}-\text{mod})$. \end{lem}
\begin{proof} Via the projection formula we have \[ R\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})=R\pi_{*}(\pi^{*}\mathcal{A}_{Y}\otimes_{\mathcal{O}_{X}}\omega_{X})\tilde{\to}\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}^{L}R\pi_{*}(\omega_{X}) \] so the usual trace $\text{tr}:R^{d}\pi_{*}(\omega_{X})\to\omega_{Y}$ induces a trace $\text{tr}:R^{d}\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})\to\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}$. Since $\pi$ has homological dimension $d$, we have $R\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\to R^{d}\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})$ so that there is a map \[ R\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\to\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y} \] Thus we obtain \[ R\pi_{*}R\mathcal{H}om_{\mathcal{A}_{X}}(\mathcal{M}^{\cdot},\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\to R\mathcal{H}om_{\mathcal{A}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},R\pi_{*}(\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]) \] \[ \to R\mathcal{H}om_{\mathcal{A}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}) \] for any $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{A}_{X}-\text{mod})$. To prove that this map is an isomorphism, we can can work in the etale (or flat) topology on $Y$ and so assume that $\mathcal{A}_{Y}$ is split; i.e., $\mathcal{A}_{Y}=\mathcal{E}nd(\mathcal{E}_{Y})$ for some vector bundle $\mathcal{E}_{Y}$. This implies $\mathcal{A}_{X}=\mathcal{E}nd(\mathcal{E}_{X})$ where $\mathcal{E}_{X}=\pi^{*}\mathcal{E}_{Y}$. Then for any $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{A}_{X}-\text{mod})$ we have $\mathcal{M}^{\cdot}=\mathcal{E}_{X}\otimes_{\mathcal{O}_{X}}\mathcal{N}^{\cdot}$ for a complex $\mathcal{N}^{\cdot}\in D_{coh}^{b}(\mathcal{O}_{X}-\text{mod})$. Therefore \[ R\pi_{*}R\mathcal{H}om_{\mathcal{A}_{X}}(\mathcal{M}^{\cdot},\mathcal{A}_{X}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\tilde{=}R\pi_{*}R\mathcal{H}om_{\mathcal{A}_{X}}(\mathcal{E}_{X}\otimes_{\mathcal{O}_{X}}\mathcal{N}^{\cdot},\mathcal{E}_{X}\otimes_{\mathcal{O}_{X}}(\mathcal{E}_{X}^{*}\otimes_{\mathcal{O}_{X}}\omega_{X}))[d] \] \[ \tilde{=}R\pi_{*}R\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{N}^{\cdot},\mathcal{E}_{X}^{*}\otimes_{\mathcal{O}_{X}}\omega_{X})[d]\tilde{=}R\pi_{*}R\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{M}^{\cdot},\omega_{X}[d]) \] \[ \tilde{\to}R\mathcal{H}om_{\mathcal{O}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},\omega_{Y})\tilde{=}R\mathcal{H}om_{\mathcal{A}_{Y}}(\mathcal{E}_{Y}\otimes_{\mathcal{O}_{Y}}R\pi_{*}\mathcal{M}^{\cdot},\mathcal{E}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}) \] \[ \tilde{\to}R\mathcal{H}om_{\mathcal{A}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},\mathcal{A}_{Y}\otimes_{\mathcal{O}_{Y}}\omega_{Y}) \] where the isomorphism $R\pi_{*}R\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{M}^{\cdot},\omega_{X}[d])\tilde{\to}R\mathcal{H}om_{\mathcal{O}_{Y}}(R\pi_{*}\mathcal{M}^{\cdot},\omega_{Y})$ is Grothendieck duality for coherent sheaves. \end{proof} Now we can proceed to the \begin{proof} (of \thmref{Duality-for-smooth-proper}) By applying $\otimes_{W(k)}^{L}k$ we reduce to the characteristic $p$ situation of a smooth proper morphism $\varphi:X\to Y$. By induction on the cohomological length, we may suppose that $\mathcal{M}^{\cdot}$ is concentrated in a single degree; i.e., $\mathcal{M}^{\cdot}=\mathcal{M}\in\mathcal{G}(\mathcal{D}_{X}^{(0,1)})$. Then $\mathcal{M}$ admits a short exact sequence \[ \mathcal{M}_{0}\to\mathcal{M}\to\mathcal{M}_{1} \] where $\mathcal{M}_{0}\in\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))$ and $\mathcal{M}_{1}\in\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$. By \propref{Sandwich!} and \propref{Sandwich-push}, we see that is suffices to prove the analogous statements in $D_{coh}^{b}(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$ and $D_{coh}^{b}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)})))$. By Frobenius descent (\thmref{Hodge-Filtered-Push}), one sees that it suffices to prove the result for $D_{coh}^{b}(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$ and $D_{coh}^{b}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$. These two cases require similar, but slightly different techniques; we begin with the case of $\mathcal{R}(\mathcal{D}_{X}^{(0)})$. In this case, since the grading on $\mathcal{R}(\mathcal{D}_{X}^{(0)})$ is concentrated in degrees $\geq0$, the graded Nakayama lemma applies and so it suffices to prove that the map is an isomorphism after applying $\otimes_{k[f]}^{L}k$; i.e., we have to prove \[ R\pi_{*}(d\varphi)^{!}R\mathcal{H}om_{\mathcal{O}_{T^{*}X}}((\mathcal{M}\otimes_{k[f]}^{L}k),\omega_{T^{*}X})[d] \] \[ \tilde{\to}R\mathcal{H}om_{\mathcal{O}_{T^{*}Y}}(R\pi_{*}((d\varphi)^{!}(\mathcal{M}\otimes_{k[f]}^{L}k),\omega_{T^{*}Y}) \] Since $d\varphi$ is a closed immersion of smooth schemes, we have \[ (d\varphi)^{!}R\mathcal{H}om_{\mathcal{O}_{T^{*}X}}((\mathcal{M}\otimes_{k[f]}^{L}k),\omega_{T^{*}X})[d] \] \[ \tilde{=}R\mathcal{H}om_{\mathcal{O}_{X\times_{Y}T^{*}Y}}((d\varphi)^{!}(\mathcal{M}\otimes_{k[f]}^{L}k),(d\varphi)^{!}\omega_{T^{*}X})[d] \] Furthermore, $(d\varphi)^{!}\omega_{T^{*}X}=\omega_{X\times_{Y}T^{*}Y}\tilde{=}\pi^{!}(\omega_{T^{*}Y})$. Therefore \[ R\pi_{*}R\mathcal{H}om_{\mathcal{O}_{T^{*}X}}((d\varphi)^{!}(\mathcal{M}\otimes_{k[f]}^{L}k),(d\varphi)^{!}\omega_{T^{*}X})[d] \] \[ \tilde{=}R\pi_{*}R\mathcal{H}om_{\mathcal{O}_{X\times_{Y}T^{*}Y}}((d\varphi)^{!}(\mathcal{M}\otimes_{k[f]}^{L}k),\pi^{!}\omega_{T^{*}Y}[d]) \] \[ \tilde{\to}R\mathcal{H}om_{\mathcal{O}_{T^{*}Y}}(R\pi_{*}((d\varphi)^{!}(\mathcal{M}\otimes_{k[f]}^{L}k),\omega_{T^{*}Y}) \] where the last isomorphism is induced by the trace for $X\times_{Y}T^{*}Y\xrightarrow{\pi}T^{*}Y$, i.e., it is given by Grothendieck duality for $\pi$; this proves the result for $D_{coh}^{b}(\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(0)})))$.
In order to handle $D_{coh}^{b}(\mathcal{G}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})))$, we apply a similar technique, but working directly\footnote{As the grading on $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$ in unbounded, the graded Nakayama lemma does not apply} with the Azumaya algebra $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. Since the morphism $\varphi$ is smooth, we can make use of \corref{Filtered-Bez-Brav} and work with the functor $R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}$. We therefore have to prove \[ R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}R\mathcal{H}om_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\mathcal{M}^{\cdot},\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{O}_{X}}\omega_{X}^{-1}[d] \] \[ \tilde{\to}R\mathcal{H}om_{\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})}(R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}\mathcal{M}^{\cdot},\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{O}_{Y}}\omega_{Y}^{-1} \] We proceed as above. We have an isomorphism \[ C\circ(d\varphi^{(1)})^{!}R\mathcal{H}om_{\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})}(\mathcal{M}^{\cdot},\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)}))\otimes_{\mathcal{O}_{X}}\omega_{X}^{-1} \] \[ \tilde{=}R\mathcal{H}om_{(\pi^{(1)})^{*}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}))}(C\circ(d\varphi^{(1)})^{!}\mathcal{M}^{\cdot},C\circ(d\varphi^{(1)})^{!}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X}}\omega_{X}^{-1})) \] Applying the definition of $(d\varphi^{(1)})^{!}$ and $C$, one deduces \[ C\circ(d\varphi^{(1)})^{!}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X}}\omega_{X}^{-1})\tilde{=}(\pi^{(1)})^{*}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})\otimes_{\mathcal{O}_{Y}}\omega_{Y}^{-1})\otimes_{\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}}\omega_{(X\times_{Y}T^{*}Y)^{(1)}} \]
Therefore \[ R\mathcal{H}om_{\pi^{*}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}))}(C\circ(d\varphi^{(1)})^{!}\mathcal{M}^{\cdot},C\circ(d\varphi^{(1)})^{!}(\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{X}}\omega_{X}^{-1}))[d] \] \[ \tilde{\to}R\mathcal{H}om_{\pi^{*}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)}))}(C\circ(d\varphi^{(1)})^{!}\mathcal{M}^{\cdot},(\pi^{(1)})^{*}(\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})\otimes_{\mathcal{O}_{Y}}\omega_{Y}^{-1})\otimes_{\mathcal{O}_{(X\times_{Y}T^{*}Y)^{(1)}}}\omega_{(X\times_{Y}T^{*}Y)^{(1)}}) \] \[ \tilde{\to}R\mathcal{H}om_{\overline{\mathcal{R}}(\mathcal{D}_{Y}^{(0)})}(R\pi_{*}^{(1)}\circ C\circ(d\varphi^{(1)})^{!}\mathcal{M}^{\cdot},\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})\otimes_{\mathcal{O}_{Y}}\omega_{Y}^{-1}) \] where the last isomorphism follows from \lemref{GD-for-Az}; this proves the result for $\overline{\mathcal{R}}(\mathcal{D}_{X}^{(0)})$. \end{proof} This implies, by an identical argument to theorem 2.7.3 of \cite{key-49}: \begin{cor} \label{cor:Smooth-proper-adunction}There is a functorial isomorphism \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})\tilde{\to}\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}^{\cdot},\varphi^{\dagger}\mathcal{N}^{\cdot}) \] for all $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)}))$. \end{cor}
\subsection{Duality for a Projective morphism}
Now we turn to constructing the trace map in the case where $\varphi:\mathfrak{X}\to\mathfrak{Y}$ is a closed embedding, of relative dimension $d$. In this case the pushforward is fairly easy to describe: \begin{lem} \label{lem:transfer-is-locally-free}Let $\varphi:\mathfrak{X}_{n}\to\mathfrak{Y}_{n}$ be the reduction to $W_{n}(k)$ of the closed embedding $\varphi$. Then the transfer bimodule $\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)}$ is locally free over $\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}$ and is coherent over $\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1),\text{opp}}$. Thus the functor $\int_{\varphi}^{0}:\mathcal{G}_{coh}(\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)})\to\mathcal{G}_{coh}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})$ is exact. \end{lem}
\begin{proof} Working locally, we can assume that $\mathfrak{X}_{n}=\text{Spec}(B_{n})$, $\mathfrak{Y}_{n}=\text{Spec}(A_{n})$, and $A_{n}$ admits local coordinates $\{x_{1},\dots,x_{n}\}$ for which $B_{n}=A_{n}/(x_{1},\dots,x_{m})$. Then \[ \Gamma(\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)})=\mathcal{D}_{A_{n}}^{(0,1)}/I\cdot\mathcal{D}_{A_{n}}^{(0,1)} \] is coherent over $\mathcal{D}_{A_{n}}^{(0,1),\text{opp}}$. Now, \corref{Local-coords-over-A=00005Bf,v=00005D} implies that $\mathcal{D}_{A_{n}}^{(0,1)}$ is free over $D(A_{n})$ (c.f. also the proof of \corref{Each-D^(i)-is-free}), with basis given by the set $\{\partial^{I}(\partial^{[p]})^{J}\}$, where $I=(i_{1},\dots,i_{n})$ is a multi-index with $0\leq i_{j}\leq p-1$ for all $j$ and $J$ is any multi-index with entries $\geq0$. So $\mathcal{D}_{A_{n}}^{(0,1)}/I\cdot\mathcal{D}_{A_{n}}^{(0,1)}$ is free over $\mathcal{D}_{B_{n}}^{(0,1)}$with basis given by $\{\partial^{I}(\partial^{[p]})^{J}\}$, where $I=(i_{1},\dots,i_{m})$ is a multi-index with $0\leq i_{j}\leq p-1$ for all $j$ and $J=(j_{1},\dots,j_{m})$ is any multi-index with entries $\geq0$. \end{proof} Now we can proceed to analyze this functor, and the pullback $\varphi^{\dagger}$, in exactly the same way as is done in the usual algebraic $\mathcal{D}$-module theory. In this case, the existence of the trace map is essentially deduced from the duality. To start off, we have \begin{prop} Let $\varphi:\mathfrak{X}_{n}\to\mathfrak{Y}_{n}$ be as above. Define $\varphi^{\sharp}(\mathcal{M}^{\cdot}):=R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)},\varphi^{-1}(\mathcal{M}^{\cdot}))$. Then there is an isomorphism of functors $\varphi^{\dagger}\tilde{=}\varphi^{\sharp}$ on $D(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))$. \end{prop}
\begin{proof} This is very similar to \cite{key-49}, propositions 1.5.14 and 1.5.16. One first shows \[ R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1),\text{opp}})}(\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)},\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))\tilde{=}\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}[-d] \] by using the Koszul complex to write a locally free resolution for $\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)}$ over $\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1),\text{opp}})$; note that by the left-right interchange this implies \[ R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)},\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))\tilde{=}\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)}[-d] \]
Then, we have \[ \varphi^{\dagger}(\mathcal{M}^{\cdot})=\mathcal{D}_{\mathfrak{X}_{n}\to\mathfrak{Y}_{n}}^{(0,1)}\otimes_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot})[-d] \] \[ \tilde{=}R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)},\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))\otimes_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}^{L}\varphi^{-1}(\mathcal{M}^{\cdot}) \] \[ \tilde{\to}R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)},\varphi^{-1}(\mathcal{M}^{\cdot})) \] where the last isomorphism uses the fact that $\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}$ admits, locally, a finite free resolution over $\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})$. \end{proof} In turn, this implies \begin{cor} We have a functorial isomorphism \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})\tilde{\to}\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}(\mathcal{M}^{\cdot},\varphi^{\dagger}\mathcal{N}^{\cdot}) \] for all $\mathcal{M}^{\cdot}\in D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))$. \end{cor}
\begin{proof} (Just as in \cite{key-49}, proposition 1.5.25). By the previous proposition, it suffices to prove the result for $\varphi^{\sharp}$ instead of $\varphi^{\dagger}$. To proceed, note that we have the local cohomology functor $\mathcal{N}^{\cdot}\to R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot})$ which takes $\mathcal{N}^{\cdot}\in D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))$ to $D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))$. We have \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})=R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}}(\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}),\mathcal{N}^{\cdot}) \] \[ \tilde{=}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}}(\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}),R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot})) \] \[ \tilde{=}\varphi_{*}(R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\varphi^{-1}(\varphi_{*}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}^{L}\mathcal{M}^{\cdot})),\varphi^{-1}(R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot}))) \] \[ \tilde{=}\varphi_{*}(R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)}\otimes_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}^{L}\mathcal{M}^{\cdot}),\varphi^{-1}(R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot}))) \] \[ \tilde{=}\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}(\mathcal{M}^{\cdot},R\underline{\mathcal{H}om}_{\varphi^{-1}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)})}(\mathcal{D}_{\mathfrak{Y}_{n}\leftarrow\mathfrak{X}_{n}}^{(0,1)},\varphi^{-1}(R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot})))) \] \[ \tilde{=}\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{X}_{n}}^{(0,1)}}(\mathcal{M}^{\cdot},\varphi^{\sharp}\mathcal{N}^{\cdot}) \] where, in both the second isomorphism and the last, we have used the existence of an exact triangle \[ R\Gamma_{\mathfrak{X}_{n}}(\mathcal{N}^{\cdot})\to\mathcal{N}^{\cdot}\to\mathcal{K}^{\cdot} \] where $\mathcal{K}^{\cdot}\in D_{qcoh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}_{n}}^{(0,1)}))$
is isomorphic to $Rj_{*}(\mathcal{N}^{\cdot}|_{\mathfrak{Y}_{n}\backslash\mathfrak{X}_{n}})$; here $j:\mathfrak{Y}_{n}\backslash\mathfrak{X}_{n}\to\mathfrak{Y}_{n}$ is the inclusion. In particular, we have that $R\underline{\mathcal{H}om}(\mathcal{C}^{\cdot},\mathcal{K}^{\cdot})=0$ for any $\mathcal{C}^{\cdot}$ supported along $\mathfrak{X}_{n}$. \end{proof} \begin{cor} There is a canonical map \[ \text{tr}:\int_{\varphi}\mathcal{O}_{\mathfrak{X}_{n}}[d_{X}]\to\mathcal{O}_{\mathfrak{Y}_{n}}[d_{Y}] \] After taking inverse limit, we obtain a trace map ${\displaystyle \text{tr}:\int_{\varphi}\mathcal{O}_{\mathfrak{X}}[d_{X}]\to\mathcal{O}_{\mathfrak{Y}}[d_{Y}]}$. If $\psi:\mathfrak{Y}\to\mathfrak{Z}$ is a smooth morphism,
We also have a trace map \[ \text{tr}:\int_{\varphi}\mathcal{O}_{\mathfrak{X}}[d_{X}]\to\mathcal{O}_{\mathfrak{Y}}[d_{Y}] \] given by taking the inverse limit of the above maps; the same compatibility holds for this trace as well. \end{cor}
\begin{proof} The previous corollary gives an adjunction ${\displaystyle \int_{\varphi}\varphi^{\dagger}\to\text{Id}}$. Since $\varphi^{\dagger}(\mathcal{O}_{\mathfrak{Y}_{n}})=\mathcal{O}_{\mathfrak{X}_{n}}[d_{X}-d_{Y}]$ we obtain the trace map via this adjunction. \end{proof} Now, by factoring an arbitrary projective morphism as a closed immersion followed by a smooth projective map, we obtain by composing the trace maps a trace map for an arbitrary projective morphism. Arguing as in the classical case (c.f. \cite{key-54}, section 2.10), we see that this map is independent of the choice of the factorization. Therefore we obtain \begin{thm} Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a projective morphism. Then we have a functorial morphism \[ \int_{\varphi}\mathbb{D}_{\mathfrak{X}}\mathcal{M}^{\cdot}\to\mathbb{D}_{\mathfrak{Y}}\int_{\varphi}\mathcal{M}^{\cdot} \] which is an isomorphism for $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$. Further, we have a functorial isomorphism \[ R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{Y}}^{(0,1)}}(\int_{\varphi}\mathcal{M}^{\cdot},\mathcal{N}^{\cdot})\tilde{\to}\varphi_{*}R\underline{\mathcal{H}om}_{\mathcal{D}_{\mathfrak{X}}^{(0,1)}}(\mathcal{M}^{\cdot},\varphi^{\dagger}\mathcal{N}^{\cdot}) \] for all $\mathcal{M}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{X}}^{(0,1)}))$ and $\mathcal{N}^{\cdot}\in D_{coh}^{b}(\mathcal{G}(\mathcal{D}_{\mathfrak{Y}}^{(0,1)}))$. \end{thm}
\section{Applications}
In this section we put things together and give the statement and proof of our generalization of Mazur's theorem for a mixed Hodge module. We begin with a brief review of the pushforward operation in the world of mixed Hodge modules.
Let $X_{\mathbb{C}}$ be a smooth complex variety, and suppose that $(\mathcal{M}_{\mathbb{C}},F^{\cdot},\mathcal{K}_{\mathbb{Q}},W_{\cdot})$ is a mixed Hodge module on $X_{\mathbb{C}}$. We won't attempt to recall a complete definition here, instead referring the reader to\cite{key-15},\cite{key-16}, and the excellent survey \cite{key-56}. We will only recall that $\mathcal{M}_{\mathbb{C}}$ is a coherent $\mathcal{D}$-module which comes equipped with a good filtration $F^{\cdot}$, a weight filtration $W_{\cdot}$, and $\mathcal{K}_{\mathbb{Q}}$ is a perverse sheaf defined over $\mathbb{Q}$ which corresponds to $\mathcal{M}_{\mathbb{C}}$ under the Riemann-Hilbert correspondence. In this paper, our attention is on the filtration $F^{\cdot}$ and we will mostly suppress the other aspects of the theory. For the sake of notational convenience, we will denote simply by $\mathcal{O}_{X_{\mathbb{C}}}$ the mixed Hodge module whose underlying filtered $\mathcal{D}$-module is $\mathcal{O}_{X_{\mathbb{C}}}$ with its trivial filtration: $F^{i}(\mathcal{O}_{X_{\mathbb{C}}})=\mathcal{O}_{X_{\mathbb{C}}}$ for all $i\geq0$, while $F^{i}(\mathcal{O}_{X_{\mathbb{C}}})=0$ for $i<0$.
Now let $\varphi:X_{\mathbb{C}}\to Y_{\mathbb{C}}$ be a morphism of smooth complex varieties. By Nagata's compatification theorem, combined with Hironaka's resolution of singularities, we can find an open immersion $j:X_{\mathbb{C}}\to\overline{X}_{\mathbb{C}}$ into a smooth variety, whose compliment is a normal crossings divisor, and a proper morphism $\overline{\varphi}:\overline{X}_{\mathbb{C}}\to Y_{\mathbb{C}}$, with $\varphi=\overline{\varphi}\circ j$.
Then, the following is one of the main results of \cite{key-16} (c.f. theorem 4.3 and theorem 2.14) \begin{thm} Let $\varphi,\overline{\varphi},j$ be morphisms as above.
1) There is a mixed Hodge module $(j_{\star}(\mathcal{M}_{\mathbb{C}}),F^{\cdot}j_{*}\mathcal{K}_{\mathbb{Q}},W_{\cdot})$, whose underlying $\mathcal{D}$-module agrees with the usual pushforward of $\mathcal{D}$-modules under $j$. This defines an exact functor $j_{\star}:\text{MHM}(X_{\mathbb{C}})\to\text{MHM}(\overline{X}_{\mathbb{C}})$.
2) There is an object of $D^{b}(\text{MHM}(Y_{\mathbb{C}}))$, $R\overline{\varphi}_{\star}(j_{\star}(\mathcal{M}_{\mathbb{C}}),F^{\cdot}j_{*}\mathcal{K}_{\mathbb{Q}},W_{\cdot})$, whose underlying complex of filtered $\mathcal{D}$-modules agrees with ${\displaystyle \int_{\overline{\varphi}}(j_{\star}\mathcal{M}_{\mathbb{C}})}$. This object of $D^{b}(\text{MHM}(Y_{\mathbb{C}}))$ is, up to isomorphism, independent of the choice of factorization $\varphi=\overline{\varphi}\circ j$. Furthermore, the filtration on this complex is strict. \end{thm}
The reason for stating the theorem this way is that, if $\varphi$ is not proper, the filtered pushforward ${\displaystyle \int_{\varphi}}$ of filtered $\mathcal{D}$-modules does not agree with the pushforward of mixed Hodge modules. The issue appears already if $Y_{\mathbb{C}}$ is a point and $\mathcal{M}_{\mathbb{C}}=\mathcal{O}_{X_{\mathbb{C}}}$. In that case, the pushforward $R\varphi_{\star}$ returns\footnote{up to a homological shift, and a re-indexing of the Hodge filtration} Deligne's Hodge cohomology of $X_{\mathbb{C}}$, while ${\displaystyle {\displaystyle \int_{\varphi}}}$ returns the de Rham cohomology of $X_{\mathbb{C}}$ equipped with the naive Hodge-to-de Rham filtration; these disagree, e.g., if $X_{\mathbb{C}}$ is affine.
The construction of the extension $j_{\star}(\mathcal{M}_{\mathbb{C}})$ is, in general, quite deep, and relies on the detailed study of the degenerations of Hodge structures given in \cite{key-60} and \cite{key-61}. However, when $\mathcal{M}_{\mathbb{C}}=\mathcal{O}_{X_{\mathbb{C}}}$ is the trivial mixed Hodge module, one can be quite explicit: \begin{lem} \label{lem:Hodge-filt-on-j_push}Let $j:X_{\mathbb{C}}\to\overline{X}_{\mathbb{C}}$ be an open immersion of smooth varieties, whose compliment is a normal crossings divisor $D_{\mathbb{C}}$. Let $x\in X_{\mathbb{C}}$ be a point, about which $D_{\mathbb{C}}$ is given by the equation $\{x_{1}\cdots x_{j}=0\}$. Then as filtered $\mathcal{D}$-modules we have $j_{\star}\mathcal{O}_{X_{\mathbb{C}}}=(j_{*}(\mathcal{O}_{X_{\mathbb{C}}}),F^{\cdot})$ where $F^{l}(j_{*}(\mathcal{O}_{X_{\mathbb{C}}})):=F^{l}(\mathcal{D}_{X_{\mathbb{C}}})\cdot(x_{1}\cdots x_{j})^{-1}$.
In particular, $F^{l}(j_{*}(\mathcal{O}_{X_{\mathbb{C}}}))$ is spanned over $\mathcal{O}_{X_{\mathbb{C}}}$ by terms of the form $x_{1}^{-(i_{1}+1)}\cdots x_{j}^{-(i_{j}+1)}$ where ${\displaystyle \sum_{t=1}^{j}i_{t}\leq l}$. \end{lem}
For a proof, see \cite{key-6}, section 8. This implies that the Hodge cohomology of $X_{\mathbb{C}}$, as an object in the filtered derived category of vector spaces, can be computed as ${\displaystyle \int_{\overline{\varphi}}j_{\star}\mathcal{O}_{X_{\mathbb{C}}}(d)[d]}$ where $\overline{\varphi}:\overline{X}_{\mathbb{C}}\to\{*\}$. Of course, this can be checked directly by comparing the log de Rham complex with the de Rham complex of a the filtered $\mathcal{D}$-module $j_{\star}\mathcal{O}_{X_{\mathbb{C}}}$.
Combining \thmref{Mazur!}with \corref{proper-push-over-W(k)} gives: \begin{prop} 1) Let $\varphi:\mathfrak{X}\to\mathfrak{Y}$ be a projective morphism, and let $\mathfrak{D}\subset\mathfrak{X}$ be a (possibly empty) normal crossings divisor. Let ${\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}})}$ be the gauge of \exaref{Integral-j} on $\mathfrak{X}$. Suppose that each $\mathcal{H}^{i}({\displaystyle \int_{\varphi}(j_{\star}\mathcal{O}_{\mathfrak{X}})^{-\infty}})$ is a $p$-torsion-free $\widehat{\mathcal{D}}_{\mathfrak{Y}}^{(0)}$-module, and that each $\mathcal{H}^{i}({\displaystyle (\int_{\varphi}{\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}}})}\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])$ is $f$-torsion-free. Then each $\mathcal{H}^{i}{\displaystyle (\int_{\varphi}{\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}}}}))$ is a standard gauge on $\mathfrak{Y}$.
2) Let ${\displaystyle j_{!}D(\mathcal{O}_{\mathfrak{X}}):=\mathbb{D}_{\mathfrak{X}}j_{\star}D(\mathcal{O}_{\mathfrak{X}})}$. The same conclusion holds for $j_{!}D(\mathcal{O}_{\mathfrak{X}})$. \end{prop}
When $\mathfrak{Y}$ is a point this recovers the log-version of Mazur's theorem, as discussed in Ogus' paper \cite{key-18}.
Now let $R$ be a finite type algebra over $\mathbb{Z}$ so that there exists smooth (over $R$) models $X_{R},Y_{R}$ for $X_{\mathbb{C}}$ and $Y_{\mathbb{C}}$, respectively, and a projective morphism $\varphi:X_{R}\to Y_{R}$ whose base change to $\mathbb{C}$ is the original morphism. We may suppose the divisor $D_{\mathbb{C}}$ is defined over $R$ as well.
Let $\mathcal{D}_{X_{R}}^{(0)}$ be the level zero differential operators over $X_{R}$, equipped with the symbol filtration; let the associated Rees algebra be $\mathcal{R}(\mathcal{D}_{X_{R}}^{(0)})$ (as usual we will use $f$ for the Rees parameter). Since $\text{Rees}(j_{*}\mathcal{O}_{U_{\mathbb{C}}})$ is a coherent $\mathcal{R}(\mathcal{D}_{X_{\mathbb{C}}})$-module, we can by generic flatness choose a flat model for $\text{Rees}(j_{*}\mathcal{O}_{U_{\mathbb{C}}})$; in fact, we can describe it explicitly as follows: if $D_{R}$ is given, in local coordinates, by $\{x_{1}\cdots x_{j}=0\}$, then we may consider \[ \mathcal{D}_{X_{R}}^{(0)}\cdot x_{1}^{-1}\cdots x_{j}^{-1}\subset j_{*}\mathcal{O}_{U_{R}} \] with the filtration inherited from the symbol filtration on $\mathcal{D}_{X_{R}}^{(0)}$. The Rees module of this filtered $\mathcal{D}_{X_{R}}^{(0)}$-module is a flat $R$-model for $\text{Rees}(j_{*}\mathcal{O}_{U_{\mathbb{C}}})$. Let us call this sheaf ${\displaystyle j_{\star}\mathcal{O}_{U_{R}}[f]}$; we will denote the associated filtered $\mathcal{D}_{X_{R}}^{(0)}$-module by ${\displaystyle j_{\star}\mathcal{O}_{U_{R}}}$. Then, localizing $R$ if necessary, we have that \[ \int_{\varphi}j_{\star}\mathcal{O}_{U_{R}}[f] \] is an $f$-torsion-free complex inside $D_{coh}^{b}(\mathcal{D}_{Y_{R}}^{(0)}-\text{mod})$ (since it becomes $f$-torsion-free after base change to $\mathbb{C}$, as remarked above). By generic flatness, we may also suppose (again, localizing $R$ if necessary), that each cohomology sheaf ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j_{\star}\mathcal{O}_{U_{R}})}$ is flat over $R$. Let $k$ be a perfect field of characteristic $p>0$, for which there is a morphism $R\to W(k)$ (so that $R/p\to k$)\footnote{If we extend $R$ so that it is smooth over $\mathbb{Z}$, then any map $R/p\to k$ lifts to $R\to W(k)$}. Then, combining this discussion with the previous proposition, we obtain \begin{cor} \label{cor:Mazur-for-Hodge-1}Let $\mathfrak{X}$ be the formal completion of $X_{R}\times_{R}W(k)$, and similarly for $\mathfrak{Y}$. Then each gauge $\mathcal{H}^{i}({\displaystyle (\int_{\varphi}{\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}})}}))$ is a standard, coherent, $F^{-1}$-gauge on $\mathfrak{Y}$. There is an isomorphism \[ \mathcal{H}^{i}(({\displaystyle (\int_{\varphi}{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{X}}[f,v]}})\otimes_{W(k)}^{L}k)\otimes_{D(k)}^{L}k[f])\tilde{\to}F^{*}\mathcal{H}^{i}(\int_{\varphi}j_{\star}\mathcal{O}_{U_{R}}[f]\otimes_{R}^{L}k) \] in $\mathcal{G}(\mathcal{R}(\mathcal{D}_{X}^{(1)}))$. In particular, the Hodge filtration on ${\displaystyle \mathcal{H}^{i}({\displaystyle (\int_{\varphi}{\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}}})}))^{\infty}/p}$ is the Frobenius pullback of the Hodge filtration on ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j_{\star}\mathcal{O}_{U_{R}}\otimes_{R}^{L}k)}$. The same holds if we replace ${\displaystyle j_{\star}D(\mathcal{O}_{\mathfrak{X}}})$ by ${\displaystyle j_{!}D(\mathcal{O}_{\mathfrak{X}}})$. The same statement holds for the pushforward of ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j_{\star}\mathcal{O}_{U_{R}})}$ under another proper morphism $\psi:Y\to Z$. \end{cor}
\begin{proof} The displayed isomorphism follows immediately from \thmref{Hodge-Filtered-Push}. Since ${\displaystyle \int_{\varphi}j_{\star}\mathcal{O}_{U_{R}}[f]}$ has $f$-torsion free cohomology sheaves, which are also flat over $R$, we deduce that ${\displaystyle {\displaystyle ((\int_{\varphi}{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{X}}[f,v]}})\otimes_{W(k)}^{L}k})\otimes_{D(k)}^{L}k[f]$ has $f$-torsion free cohomology sheaves. Comparing the description of the Hodge filtration on ${\displaystyle ({\displaystyle j_{\star}\mathcal{O}_{\mathfrak{X}}[f,v]}})^{\infty}/p$ with the result of \lemref{Hodge-filt-on-j_push}, the result now follows from \thmref{F-Mazur}. \end{proof} Let us give some first applications of these results.
Suppose that $X_{\mathbb{C}}$ is an arbitrary (possibly singular) quasi-projective variety. Let $V_{\mathbb{C}}$ be a smooth quasi-projective variety such that there is a closed embedding $X_{\mathbb{C}}\to V_{\mathbb{C}}$, and let $\overline{V}_{\mathbb{C}}$ be a projective compatification of $V_{\mathbb{C}}$ (i.e., $\overline{V}_{\mathbb{C}}\backslash V_{\mathbb{C}}$ is a normal crossings divisor). Let $U_{\mathbb{C}}\subset X_{\mathbb{C}}$ be an affine open \emph{smooth} subset. Let $\varphi:\tilde{X}_{\mathbb{C}}\to X_{\mathbb{C}}$ denote a resolution of singularities so that $\varphi$ is an isomorphism over $U_{\mathbb{C}}$ and $\varphi^{-1}(X_{\mathbb{C}}\backslash U_{\mathbb{C}})$ is a normal crossings divisor $\tilde{D}_{\mathbb{C}}\subset\tilde{X}_{\mathbb{C}}$. The decomposition theorem for Hodge modules implies that the complex ${\displaystyle \int_{\varphi}\mathcal{O}_{\tilde{X}_{\mathbb{C}}}}\in D^{b}(\text{MHM}_{X})$ is quasi-isomorphic to the direct sum of its cohomology sheaves, and that each such sheaf is a direct sum of simple, pure Hodge modules.
Therefore, if $j:U_{\mathbb{C}}\to X_{\mathbb{C}}$ (resp. $j':U_{\mathbb{C}}\to\tilde{X}_{\mathbb{C}}$) denotes the inclusion, then the image of the natural map \[ \mathcal{H}^{0}({\displaystyle \int_{\varphi}\mathcal{O}_{\tilde{X}_{\mathbb{C}}}})\to\mathcal{H}^{0}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{\mathbb{C}}})\tilde{\to}\mathcal{H}^{0}(j_{\star}\mathcal{O}_{U_{\mathbb{C}}}) \] is the Hodge module $\text{IC}_{X}$; indeed, ${\displaystyle \mathcal{H}^{0}({\displaystyle \int_{\varphi}\mathcal{O}_{\tilde{X}_{\mathbb{C}}}})}=\text{IC}_{X}\oplus\mathcal{M}$ where $\mathcal{M}$ is a pure Hodge module supported on $X_{\mathbb{C}}\backslash U_{\mathbb{C}}$; its image in $\mathcal{H}^{0}({\displaystyle j_{\star}\mathcal{O}_{U_{\mathbb{C}}})}$ is therefore isomorphic to $\text{IC}_{X}$ (as a Hodge module, and so in particular as a filtered $\mathcal{D}$-module).
Now let $\overline{X}_{\mathbb{C}}$ denote the closure of $X_{\mathbb{C}}$ in $\overline{V}_{\mathbb{C}}$, and let $\varphi:\tilde{\overline{X}}_{\mathbb{C}}\to\overline{X}_{\mathbb{C}}$ be a resolution of singularities, whose restriction to $X_{\mathbb{C}}\subset\overline{X}_{\mathbb{C}}$ is isomorphic to $\varphi:\tilde{X}_{\mathbb{C}}\to X_{\mathbb{C}}$, and so that the inverse image of $\overline{X}_{\mathbb{C}}\backslash X_{\mathbb{C}}$ is a normal crossings divisor (we can modify $\varphi$ if necessary to ensure that this happens). Let $i:X_{\mathbb{C}}\to\overline{X}_{\mathbb{C}}$ and $i':\tilde{X}_{\mathbb{C}}\to\tilde{\overline{X}}_{\mathbb{C}}$ denote the inclusions. Since Hodge modules on $X_{\mathbb{C}}$ are, by definition, Hodge modules on $V_{\mathbb{C}}$ which are supported on $X_{\mathbb{C}}$, the fact that $\overline{V}_{\mathbb{C}}\backslash V_{\mathbb{C}}$ is a divisor implies that $i_{*}$ is an exact functor on the category of mixed Hodge modules. Therefore the image of the natural map \[ \mathcal{H}^{0}(\int_{\varphi}i'_{\star}\mathcal{O}_{\tilde{X}_{\mathbb{C}}})\tilde{=}i_{\star}\mathcal{H}^{0}({\displaystyle \int_{\varphi}\mathcal{O}_{\tilde{X}_{\mathbb{C}}}})\to i_{\star}\mathcal{H}^{0}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{\mathbb{C}}})\tilde{=}\mathcal{H}^{0}(i\circ j)_{\star}\mathcal{O}_{U_{\mathbb{C}}} \] is isomorphic to $i_{\star}(\text{IC}_{X})$ (again, as a Hodge module, and so in particular as a filtered $\mathcal{D}$-module).
As above, we now select a finite type $\mathbb{Z}$-algebra $R$ so that everything in sight is defined and flat over $R$, and let $R\to W(k)$ for some perfect $k$ of characteristic $p>0$. Let $\tilde{\mathfrak{\overline{X}}}\to\mathfrak{\overline{X}}\subset\overline{\mathcal{V}}$ be the formal completion of $\tilde{\overline{X}}_{R}\times_{R}W(k)\to\overline{X}_{R}\times_{R}W(k)\subset\overline{V}_{R}\times_{R}W(k)$. Abusing notation slightly we'll also denote by $\varphi$ the composed map $\tilde{\mathfrak{\overline{X}}}\to\widehat{\overline{\mathcal{V}}}$. \begin{cor} \label{cor:Mazur-for-IC}1) The image of the map \[ \mathcal{H}^{0}(\int_{\varphi}i'_{\star}D(\mathcal{O}_{\tilde{\mathfrak{X}}}))\to\mathcal{H}^{0}(\int_{\varphi}(i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}})) \] defines a coherent, standard $F^{-1}$-gauge on $\widehat{\mathbb{P}^{n}}$, denoted $\text{IC}_{\mathfrak{X}}$. The $\widehat{\mathcal{D}}_{\overline{\mathcal{V}}}^{(0)}$-module $\text{IC}_{\mathfrak{X}}^{-\infty}$ is isomorphic to the $p$-adic completion of $\text{IC}_{X_{R}}\otimes_{R}W(k)$, where $\text{IC}_{X_{R}}$ is an $R$-model for $\text{IC}_{X_{\mathbb{C}}}$. The Hodge filtration on the $\mathcal{D}_{\overline{V}_{k}}^{(1)}$-module $\widehat{\text{IC}_{\mathfrak{X}}^{\infty}}/p\tilde{=}F^{*}\text{IC}_{\mathfrak{X}}^{-\infty}/p$ is equal to the Frobenius pullback of the Hodge filtration on $\text{IC}_{\mathfrak{X}}^{-\infty}/p\tilde{=}\text{IC}_{X_{R}}\otimes_{R}k$ coming from the Hodge filtration on $\text{IC}_{X_{R}}$.
2) The intersection cohomology groups $\text{IH}^{i}(X_{R})\otimes_{R}W(k):=\mathbb{H}_{dR}^{i}(\text{IC}_{X_{R}})\otimes_{R}W(k)$ satisfy the conclusions of Mazur's theorem; as in \thmref{Mazur-for-IC-Intro} \end{cor}
\begin{proof} Since the displayed map is a map of coherent gauges, the image, $i_{\star}\text{IC}_{\mathfrak{X}}$, is a coherent gauge. Since both ${\displaystyle i'_{\star}D(\mathcal{O}_{\tilde{\mathfrak{X}}})}$ and $(i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}}))$ are $F^{-1}$gauges, and the natural map $i'_{\star}D({\displaystyle \mathcal{O}_{\tilde{\mathfrak{X}}})\to(i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}})}$ is $F^{-1}$-equivariant, the same is true of the displayed map, and so $i_{\star}\text{IC}_{\mathfrak{X}}$ is an $F^{-1}$-gauge. By \propref{push-and-complete-for-D} (and the exactness of the functor $\mathcal{M}\to\mathcal{M}^{-\infty}$) we have that the image of \[ \mathcal{H}^{0}(\int_{\varphi}i'_{\star}D(\mathcal{O}_{\tilde{\mathfrak{X}}}))^{-\infty}\to\mathcal{H}^{0}(\int_{\varphi}(i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}}))^{-\infty} \] is equal to the image of \[ \mathcal{H}^{0}(\int_{\varphi}(i_{\star}\mathcal{O}_{\tilde{\mathfrak{X}}})^{-\infty})\to\mathcal{H}^{0}\int_{\varphi}((i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}}))^{-\infty} \] in the category of $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-modules. On the other hand, we have $R$-flat filtered $\mathcal{D}_{X_{R}}^{(0)}$-modules ${\displaystyle \mathcal{H}^{0}(\int_{\varphi}i_{\star}\mathcal{O}_{\tilde{X}_{R}})}$ and ${\displaystyle \mathcal{H}^{0}(\int_{\varphi}(i'\circ j')_{*}\mathcal{O}_{U_{R}})}$ such that the $p$-adic completion of $\mathcal{H}^{0}(\int_{\varphi}i_{\star}\mathcal{O}_{\tilde{X}_{R}}){\displaystyle \otimes_{R}W(k)}$ is ${\displaystyle \mathcal{H}^{0}(\int_{\varphi}(i_{\star}\mathcal{O}_{\tilde{\mathfrak{X}}})^{-\infty})}$, and the $p$-adic completion of ${\displaystyle \mathcal{H}^{0}(\int_{\varphi}(i'\circ j')_{*}\mathcal{O}_{U_{R}})}$ is ${\displaystyle \mathcal{H}^{0}\int_{\varphi}((i'\circ j')_{*}D(\mathcal{O}_{\mathfrak{U}}))^{-\infty}}$, and, after localizing $R$ if necessary, we may further suppose that the kernel of the map \begin{equation} {\displaystyle \mathcal{H}^{0}(\int_{\varphi}i_{\star}\mathcal{O}_{\tilde{X}_{R}})}\to\mathcal{H}^{0}(\int_{\varphi}(i'\circ j')_{*}\mathcal{O}_{U_{R}})\label{eq:natural-map-over-R} \end{equation} is a summand (in the category of filtered $\mathcal{D}_{X_{R}}^{(0)}$-modules) of ${\displaystyle {\displaystyle \mathcal{H}^{0}(\int_{\varphi}\mathcal{O}_{\tilde{X}_{R}})}}$ (as this is true over $\mathbb{C}$). Thus the image is flat over $R$, and so its $p$-adic completion is $p$-torsion-free; therefore $\text{IC}_{\mathfrak{X}}^{-\infty}$ is $p$-torsion-free, as is $\text{IC}_{\mathfrak{X}}^{\infty}$ (since $\text{IC}_{\mathfrak{X}}^{-\infty}$ is an $F^{-1}$-gauge; c.f. the proof of \thmref{F-Mazur}) Further, the map \eqref{natural-map-over-R} is strict with respect to the Hodge filtration, and so the same is true after taking reduction mod $p$ and applying $F^{*}$. It follows that $\text{IC}_{\mathfrak{X}}^{\infty}/p/v$ is $f$-torsion-free.
Thus by \propref{Baby-Mazur}, we see that $\text{IC}_{\mathfrak{X}}$ is a standard gauge; the statement about the Hodge filtration follows from \corref{Mazur-for-Hodge-1}. This proves part $1)$, and part $2)$ follows from taking the pushforward to a point. \end{proof} \begin{rem} The construction above involved a few auxiliary choices- namely, the ring $R$ and the resolution $\tilde{X}_{R}$. However, any two resolutions of singularities can be dominated by a third. Therefore, after possibly localizing $R$, any two definitions of $\text{IC}_{X_{R}}$ agree. Further, we if we have an inclusion of rings $R\to R'$ then $\text{IC}_{X_{R}}\otimes_{R}R'=\text{IC}_{X_{R'}}$. Therefore we have $\mathbb{H}_{dR}^{i}(\text{IC}_{X_{R}})\otimes_{R}R'\tilde{\to}\mathbb{H}_{dR}^{i}(\text{IC}_{X_{R'}})$ when both are flat. Since any two finite-type $\mathbb{Z}$-algebras can be embedded into a third, we also obtain a comparison for any two such algebras. \end{rem}
Now suppose $X_{\mathbb{C}}$ is a smooth (quasiprojective) scheme, and let $i:Y_{\mathbb{C}}\to X_{\mathbb{C}}$ be a closed immersion; here, $Y_{\mathbb{C}}$ can be singular; let $j:X_{\mathbb{C}}\backslash Y_{\mathbb{C}}\to X_{\mathbb{C}}$ be the open immersion. Now, let $\tilde{j}:X_{\mathbb{C}}\to\overline{X}_{\mathbb{C}}$ be a smooth proper compactification of $X_{\mathbb{C}}$, so that $\overline{X}_{\mathbb{C}}\backslash X_{\mathbb{C}}$ is a normal crossings divisor. Choose flat $R$ models for everything in sight. Then we have \begin{cor} \label{cor:Mazur-for-Ordinary}For each $i$, the Hodge cohomology group $H^{i}(Y_{\mathbb{C}})$ admits a flat model $H^{i}(Y_{R})$ (as a filtered vector space). Let $k$ is a perfect field such that $R\to W(k)$. Then there is a standard $F^{-1}$-gauge $H^{i}(Y_{R})_{W(k)}^{\cdot}$ such that $H^{i}(Y_{R})_{W(k)}^{-\infty}\tilde{=}H^{i}(Y_{R})\otimes_{R}W(k)$, and such that the Hodge filtration on $H^{i}(Y_{R})_{W(k)}^{\infty}/p$ agrees with the Frobenius pullback of the Hodge filtration on $H^{i}(Y_{R})\otimes_{R}k$. In particular, there is a Frobenius-linear isomorphism of $H^{i}(Y_{R})_{W(k)}[p^{-1}]$ for which the Hodge filtration on $H^{i}(Y_{R})_{W(k)}$ satisfies the conclusions of Mazur's theorem. The same holds for the compactly supported Hodge cohomology $H_{c}^{i}(Y_{\mathbb{C}})$. \end{cor}
\begin{proof} As the usual Hodge cohomology and the compactly supported Hodge cohomology are interchanged under applying the filtered duality functor, it suffices to deal with the case of the compactly supported cohomology. Let us recall how to define this in the language of mixed Hodge modules. We have the morphism \[ Rj_{!}(\mathcal{O}_{X_{\mathbb{C}}\backslash Y_{\mathbb{C}}})\to\mathcal{O}_{X_{\mathbb{C}}} \] in the category of mixed Hodge modules (where $\mathcal{O}$ has its usual structure as the trivial mixed Hodge module). The cone of this map is, by definition, the complex of mixed Hodge modules representing the unit object on $Y_{\mathbb{C}}$; we denote it by $\mathbb{I}_{Y_{\mathbb{C}}}$. Then we have \[ H_{c}^{i}(Y_{\mathbb{C}})=\int_{\varphi}^{d+i}R\tilde{j}_{!}\mathbb{I}_{Y_{\mathbb{C}}}=\int_{\varphi}^{d+i}R\tilde{j}_{!}(\text{cone}(Rj_{!}(\mathcal{O}_{X_{\mathbb{C}}\backslash Y_{\mathbb{C}}})\to\mathcal{O}_{X_{\mathbb{C}}})) \] \[ \tilde{=}\int_{\varphi}^{d+i}\text{cone}(R(\tilde{j}\circ j)_{!}(\mathcal{O}_{X_{\mathbb{C}}\backslash Y_{\mathbb{C}}})\to R\tilde{j}_{!}\mathcal{O}_{X_{\mathbb{C}}}) \] Now, after spreading out both $R(\tilde{j}\circ j)_{!}(\mathcal{O}_{X_{\mathbb{C}}\backslash Y_{\mathbb{C}}})$ and $R\tilde{j}_{!}\mathcal{O}_{X_{\mathbb{C}}})$ over $R$, we can apply \corref{Mazur-for-Hodge-1}. \end{proof} \begin{rem} The previous two corollaries also hold for quasiprojective varieties defined over $\overline{\mathbb{Q}}$. Although the theory of mixed Hodge modules only exists over $\mathbb{C}$, its algebraic consequences, such as the strictness of the pushforward of modules of the form $j_{\star}(\mathcal{O}_{X})$, hold over any field of characteristic $0$. So the above results go through in this case as well. \end{rem}
Finally, we wish to give some relations of the theory of this paper to the Hodge structure of the local cohomology sheaves $\mathcal{H}_{Y_{\mathbb{C}}}^{i}(\mathcal{O}_{X_{\mathbb{C}}})$, as developed in {[}MP1{]}, {[}MP2{]}. Here, $X_{\mathbb{C}}$ is a smooth affine variety and $Y_{\mathbb{C}}\subset X_{\mathbb{C}}$ is a subscheme defined by $(Q_{1},\dots,Q_{r})$. In this case, the nontrivial sheaf is \[ \mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}})\tilde{=}\mathcal{O}_{X_{\mathbb{C}}}[Q_{1}^{-1}\cdots Q_{r}^{-1}]/\sum_{i=1}^{r}\mathcal{O}_{X_{\mathbb{C}}}\cdot Q_{1}^{-1}\cdots\widehat{(Q_{i}^{-1})}\cdots Q_{r}^{-1} \] where $\widehat{?}$ stands for ``omitted.'' As above, these sheaves admits a Hodge structure via \[ \mathcal{H}_{Y_{\mathbb{C}}}^{i}(\mathcal{O}_{X_{\mathbb{C}}})\tilde{=}\mathcal{H}^{i}(\int_{\varphi}\int_{j'}\mathcal{O}_{U_{\mathbb{C}}}) \] where $\varphi:\tilde{X}_{\mathbb{C}}\to X_{\mathbb{C}}$ is a resolution of singularities such that $\varphi^{-1}(Y_{\mathbb{C}})$ is a normal crossings divisor; and $j':U_{\mathbb{C}}\to\tilde{X}_{\mathbb{C}}$ is the inclusion. The resulting Hodge filtration is independent of the choice of the resolution. Taking $R$-models for everything in sight at above, we obtain a filtered $\mathcal{D}_{X_{R}}^{(0)}$-module ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})}$ which (localizing $R$ if necessary) is a flat $R$-model for $\mathcal{H}_{Y_{\mathbb{C}}}^{i}(\mathcal{O}_{X_{\mathbb{C}}})$.
Now let $R\to W(k)$, and let $\mathfrak{X}$, $\tilde{\mathfrak{X}}$, etc. be the formal completions of the base-change to $W(k)$ as usual. Then we have a gauge \[ \mathcal{M}_{Y}:=\mathcal{H}^{i}(\int_{\varphi}j'_{\star}D(\mathcal{O}_{\mathfrak{U}})) \] which satisfies $\mathcal{M}_{Y}^{-\infty}={\displaystyle \mathcal{H}^{i}(\int_{\varphi}(j'_{\star}\mathcal{O}_{\mathfrak{U}_{W(k)}})^{-\infty})}$. \begin{lem} \label{lem:injectivity-for-local-coh}Let $\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}:=\mathcal{H}^{i}(Rj_{*}\mathcal{O}_{\mathfrak{U}})$. (This is simply the $p$-adic completion of the $i$th algebraic local cohomology of $\mathfrak{X}$ along $\mathfrak{Y}$). Then the natural map \[ \mathcal{M}_{Y}^{-\infty}\to\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})} \] is injective. If $F$ is a lift of Frobenius, the natural map $F^{*}\mathcal{M}_{Y}^{-\infty}\to\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}$ is also injective. \end{lem}
\begin{proof} We have the Hodge filtration on ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})}$, which is is a filtration by coherent $\mathcal{O}_{X_{R}}$-modules; base changing to $W(k)$ yields a Hodge filtration on ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{W(k)}})}$. The map in question is the $p$-adic completion of the natural map \[ \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{W(k)}})\to\mathcal{H}_{Y_{W(k)}}^{i}(\mathcal{O}_{X_{W(k)}}) \] and the right hand module also has a Hodge filtration, which is simply the restriction of the Hodge filtration on $\mathcal{H}_{Y_{B}}^{r}(\mathcal{O}_{X_{B}})$ where $B=\text{Frac}(W(k))$. So the proof proceeds in an essentially identical manner to \lemref{Injectivity-of-completion}. \end{proof} Now, fix an integer $m\geq0$. Let us explain how to use this gauge to obtain an arithmetic description of the Hodge filtration, up to level $m$. Since $m$ is fixed, we may, after localizing $R$ as needed, suppose that the image of the map ${\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})})\to\mathcal{H}_{Y_{R}}^{i}(\mathcal{O}_{X_{R}})$ is equal to $F^{m}(\mathcal{H}_{Y_{F}}^{i}(\mathcal{O}_{X_{F}}))\cap\mathcal{H}_{Y_{R}}^{i}(\mathcal{O}_{X_{R}})$. In particular the map \[ F^{m}({\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})})\otimes_{R}k\to\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}}) \] is injective; under the isomorphism $F^{*}\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}})\tilde{\to}\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}})$, we also obtain an injection $F^{*}(F^{m}({\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})}))\otimes_{R}k\to\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}})$. Then \begin{prop}
\label{prop:Hodge-for-local-coh!}Let the be notation as above. We have that the image of $\{g\in F^{*}\mathcal{M}_{Y}^{-\infty}|p^{j}g\in\mathcal{M}_{Y}^{-\infty}\}$
in $\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}})$ is exactly $F^{*}(F^{j}({\displaystyle \mathcal{H}^{i}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})}))\otimes_{R}k)$. For each $0\leq j\leq m$, this is also the image of $\{g\in\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}|p^{j}g\in\mathcal{M}_{Y}^{-\infty}\}$. \end{prop}
\begin{proof} By construction $\mathcal{M}_{Y}$ is a standard, coherent, $F^{-1}$-gauge of index $0$ (this can be easily seen as the Hodge filtration is concentrated in degrees $\geq0$). Therefore, we have $\widehat{\mathcal{M}_{Y}^{\infty}}\tilde{=}F^{*}\mathcal{M}_{Y}$, and by the previous lemma $F^{*}\mathcal{M}_{Y}\to F^{*}\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}\tilde{\to}\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}$ is injective. Since $\mathcal{M}_{Y}$ is standard of index $0$, we have \[
\mathcal{M}_{Y}^{j}=\{m\in\mathcal{M}_{Y}^{\infty}|p^{j}m\in f_{\infty}(\mathcal{M}_{Y}^{0})\} \] Note that if $j\leq0$, this means $\mathcal{M}_{Y}^{j}\tilde{=}\mathcal{M}_{Y}^{-\infty}$, and the map \[ \eta_{i}:\mathcal{M}_{Y}^{j}\xrightarrow{f_{\infty}}\mathcal{M}_{Y}^{\infty}\xrightarrow{\widehat{?}}F^{*}\mathcal{M}_{Y}\to\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})} \] is simply $p^{-j}$ times the injection $\mathcal{M}_{Y}^{-\infty}\to\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}$, and is therefore injective by \lemref{injectivity-for-local-coh} . If $j>0$, then $p^{j}\cdot\eta_{j}=\eta_{0}\circ v^{j}$ is injective for the same reason, and so $\eta_{j}$ is injective since everything in sight is $p$-torsion -free. Thus the entire gauge embeds into $\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}$ and the first result follows.
For the second result, consider the standard gauge $\mathcal{N}_{Y}$
defined by $\mathcal{N}_{Y}^{j}:=\{m\in\widehat{\mathcal{H}_{\mathfrak{Y}}^{i}(\mathcal{O}_{\mathfrak{X}})}|p^{j}m\in\mathcal{M}_{Y}^{0}\}$ (the actions of $f$ and $v$ are inclusion and multiplication by $p$ as usual). We have the natural injection for each $j$ $\mathcal{M}_{Y}^{j}\to\mathcal{N}_{Y}^{j}$, which yields a morphism of gauges $\mathcal{M}_{Y}\to\mathcal{N}_{Y}$. Let us show that for $j\leq m$ the map $\psi:\mathcal{M}_{Y}^{j}\to\mathcal{N}_{Y}^{j}$ is an isomorphism. For $j\leq0$ this is clear by definition, so suppose it is true for some $j-1\leq m-1$. For any $j$ let $\mathcal{M}_{Y,0}^{j}:=\mathcal{M}_{Y}^{j}/p$ and similarly define $\mathcal{N}_{Y,0}^{j}$.
We first claim that the isomorphism $\psi:\mathcal{M}_{Y,0}^{j-1}\to\mathcal{N}_{Y,0}^{j-1}$ induces an isomorphism $\text{ker}(f:\mathcal{M}_{Y,0}^{j-1}\to\mathcal{M}_{Y,0}^{j})\tilde{\to}\text{ker}(f:\mathcal{N}_{Y,0}^{j-1}\to\mathcal{N}_{Y,0}^{j})$. Indeed, we have that $\mathcal{M}_{Y,0}^{j-1}/\text{ker}(f)\tilde{=}F^{j-1}(\mathcal{M}_{Y,0}^{\infty})$ and $\mathcal{N}_{Y,0}^{j-1}/\text{ker}(f)\tilde{=}F^{j-1}(\mathcal{N}_{Y,0}^{\infty})$ (as $\mathcal{M}_{Y}$ and $\mathcal{N}_{Y}$ are standard gauges). Further, the composed morphism \[ F^{j-1}(\mathcal{M}_{Y,0}^{\infty})\to F^{j-1}(\mathcal{N}_{Y,0}^{\infty})\to\mathcal{H}_{Y_{k}}^{i}(\mathcal{O}_{X_{k}}) \] is injective (since $j-1\leq m$); therefore $F^{j-1}(\mathcal{M}_{Y,0}^{\infty})\to F^{j-1}(\mathcal{N}_{Y,0}^{\infty})$ is injective, and it is clearly surjective since $\mathcal{M}_{Y,0}^{j-1}\to\mathcal{N}_{Y,0}^{j-1}$ is surjective. Therefore it is an isomorphism; and hence so is $\text{ker}(f:\mathcal{M}_{Y,0}^{j-1}\to\mathcal{M}_{Y,0}^{j})\to\text{ker}(f:\mathcal{N}_{Y,0}^{j-1}\to\mathcal{N}_{Y,0}^{j})$ as claimed.
Now suppose $m\in\text{ker}(\psi:\mathcal{M}_{Y,0}^{j}\to\mathcal{N}_{Y,0}^{j})$. Then $vm\in\text{ker}(\psi:\mathcal{M}_{Y,0}^{j-1}\to\mathcal{N}_{Y,0}^{j-1})=0$, so that $m\in\text{ker}(v)=\text{im}(f)$. If $m=fm'$, then we see $\psi m'\in\text{ker}(f)$; but by the above paragraph this implies $m'\in\text{ker}(f)$; therefore $m=0$ and $\psi:\mathcal{M}_{Y,0}^{j}\to\mathcal{N}_{Y,0}^{j}$ is injective. Thus the cokernel of $\psi:\mathcal{M}_{Y}^{j}\to\mathcal{N}_{Y}^{j}$ is $p$-torsion-free. On the other hand, we clearly have $p^{j}\cdot\mathcal{N}_{Y}^{j}\subset\mathcal{M}_{Y}^{j}$; so that the cokernel of $\psi$ is annihilated by $p^{j}$; therefore the cokernel is $0$ and we see that $\psi:\mathcal{M}_{Y}^{j}\to\mathcal{N}_{Y}^{j}$ is an isomorphism as claimed. \end{proof} Note that this gives a description of the reduction mod $p$ of the Hodge filtration (up to $F^{m}$) which makes no reference to a resolution of singularities. It does depend on an $R$-model for the $\mathcal{D}$-module $\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}})$, though any two such models agree after localizing $R$ at an element.
Now let us further suppose that $Y_{\mathbb{C}}\subset X_{\mathbb{C}}$ is a complete intersection of codimension $r$. By {[}MP2{]}, proposition 7.14, (c.f. also section $9$ of loc. cit.) we have \[
F^{m}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))\subset O^{m}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=\text{span}_{\mathcal{O}_{X_{\mathbb{C}}}}\{Q_{1}^{-a_{1}}\cdots Q_{r}^{-a_{r}}|\sum a_{i}\leq m+r\} \]
In {[}MP2{]} the condition $F^{m}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O^{m}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$ is discussed at length; and the point of view developed there shows that the largest $m$ for which there is equality is a subtle measure of the singularities of $Y_{\mathbb{C}}$. In fact, equality for any $m$ already implies serious restrictions on the singularities; indeed, $F^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$ is equivalent to $Y_{\mathbb{C}}$ having du Bois singularities (this is the first case of theorem F of loc. cit.).
Now, using the methods of this paper, let us show \begin{cor} \label{cor:Canonical-Singularities} Suppose $F^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$, i.e., $Q_{1}^{-1}\cdots Q_{r}^{-1}\in F^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$. Then the log-canonical threshold of $Y_{\mathbb{C}}$ is $r$. \end{cor}
Combined with the above, this gives a new proof of the famous fact that du Bois singularities are canonical, in the l.c.i. case at least (c.f. \cite{key-57}, \cite{key-58}). It is also a (very) special case of {[}MP2{]}, conjecture 9.11; of course, it also follows from theorem C of {[}MP2{]}, using the results of \cite{key-57}.
To prove this result, we will recall a few facts from positive characteristic algebraic geometry, following {[}BMS{]}. We return to a perfect field $k$ of positive characteristic and $X$ smooth over $k$. Let $\mathcal{I}\subset\mathcal{O}_{X}$ be an ideal sheaf. For each $m>0$ we let $\mathcal{I}^{[1/p^{m}]}$ be the minimal ideal sheaf such that $\mathcal{I}\subset(F^{m})^{*}(\mathcal{I}^{[1/p^{m}]})$ (here we are using the isomorphism $(F^{m})^{*}\mathcal{O}_{X}\tilde{\to}\mathcal{O}_{X}$; for any ideal sheaf $\mathcal{J}$ we have $(F^{m})^{*}\mathcal{J}=\mathcal{J}^{[p^{m}]}$, the ideal locally generated by $p^{m}$th powers of elements of $\mathcal{J}$). Then, for each $i>0$ one has inclusions \[ (\mathcal{I}^{i})^{[1/p^{m}]}\subset(\mathcal{I}^{i'})^{[1/p^{m'}]} \] whenever $i/p^{m}\leq i'/p^{m'}$ and $m\leq m'$ (this is {[}BMS{]}, lemma 2.8). These constructions are connected to $\mathcal{D}$-module theory as follows: for any ideal sheaf $\mathcal{I}$, we have $\mathcal{D}_{X}^{(m)}\cdot\mathcal{I}=(F^{m+1})^{*}(\mathcal{I}^{[1/p^{m+1}]})$ (c.f. \cite{key-64}, remark 2.6, and \cite{key-62}, lemma 3.1).
Now, fix a number $c\in\mathbb{R}^{+}$. If $x\to\lceil x\rceil$ denotes the ceiling function, then the previous discussion implies inclusions \[ (\mathcal{I}^{\lceil cp^{m}\rceil})^{[1/p^{m}]}\subset(\mathcal{I}^{\lceil cp^{m+1}\rceil})^{[1/p^{m+1}]} \] for all $m$. Thus we have a chain of ideals, which necessarily stabilizes, and so we can define \[ \tau(\mathcal{I}^{c})=(\mathcal{I}^{\lceil cp^{m}\rceil})^{[1/p^{m}]} \] for all $m>>0$. These ideals are called generalized test ideals. There is a deep connection to the theory of multiplier ideals in complex algebraic geometry, which is due to Hara and Yoshida ({[}HY{]}, theorems 3.4 and 6.8). Suppose we have a complex variety $X_{\mathbb{C}}$, and flat $R$-model $X_{R}$, and an ideal sheaf $\mathcal{I}_{R}$ which is also flat over $R$. Fix a rational number $c$; we may then choose a flat model $\mathcal{J}(\mathcal{I}_{R}^{c})$ for the multiplier ideal $\mathcal{J}(\mathcal{I}_{\mathbb{C}}^{c})$. Then for all perfect fields $k$ of sufficiently large positive characteristic, we have \[ \mathcal{J}(\mathcal{I}_{R}^{c})\otimes_{R}k=\tau(\mathcal{I}_{k}^{c}) \]
Finally, we note that since $\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}})$ is a coherent $\mathcal{D}_{X_{\mathbb{C}}}$-module, there exists some $l>0$ such that $\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}})=\mathcal{D}_{X_{\mathbb{C}}}\cdot(Q_{1}\cdot Q_{r})^{-l}$ . Therefore we may obtain an $R$-model by taking the sheaf \[ \mathcal{D}_{X_{R}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l}\subset\mathcal{H}_{Y_{R}}^{r}(\mathcal{O}_{X_{R}}) \] After base change to $F=\text{Frac}(R)$ this agrees with ${\displaystyle \mathcal{H}^{r}(\int_{\varphi}j'_{\star}\mathcal{O}_{U_{R}})}$; therefore the two models agree after possibly localizing $R$. In particular, $\mathcal{M}_{Y}^{-\infty}$ is the $p$-adic completion of $\mathcal{D}_{X_{W(k)}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l}$.
Now let us turn to the \begin{proof} (of \corref{Canonical-Singularities}) Let $\mathcal{I}_{\mathbb{C}}=(Q_{1},\dots,Q_{r})$ and let us fix a rational number $0<c<r$. Suppose that the ideal $\mathcal{J}(\mathcal{I}_{\mathbb{C}}^{c})\subsetneq\mathcal{O}_{X_{\mathbb{C}}}$. We spread everything out over $R$, and reduce to $k$ of large positive characteristic. Then the above implies $\tau(\mathcal{I}_{k}^{c})\subsetneq\mathcal{O}_{X_{k}}$.
Now, recall that we have fixed an $R$-model $\mathcal{D}_{X_{R}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l}$ of $\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}})$. Then the description of the Hodge filtration in \propref{Hodge-for-local-coh!} implies that $F^{*}(F_{0}(\mathcal{D}_{X_{R}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l}))\otimes_{R}k)$ is the image of $\mathcal{D}_{X_{R}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l}\otimes_{R}k$ in $\mathcal{H}_{Y_{k}}^{r}(\mathcal{O}_{X_{k}})$; in other words, the $\mathcal{D}_{X_{k}}^{(0)}$-submodule generated by $(Q_{1}\cdots Q_{r})^{-l}$. Thus the assumption $F_{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O_{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$ is equivalent to the statement \[ (Q_{1}\cdots Q_{r})^{-p}\in\mathcal{D}_{X_{k}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l} \] inside $\mathcal{H}_{Y_{k}}^{r}(\mathcal{O}_{X_{k}})$. Since $\mathcal{H}_{Y_{k}}^{r}(\mathcal{O}_{X_{k}})$ is the quotient of $\mathcal{O}_{X_{k}}[(Q_{1}\cdots Q_{r})^{-1}]$ by the submodule generated by $\{Q_{1}^{-1}\cdots\widehat{Q_{i}^{-1}}\cdots Q_{r}^{-1}\}_{i=1}^{r}$, which is contained in the $\mathcal{D}_{X_{k}}^{(0)}$-submodule generated by $(Q_{1}\cdots Q_{r})^{-l}$, we see that the assumption actually implies \[ (Q_{1}\cdots Q_{r})^{-p}\in\mathcal{D}_{X_{k}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{-l} \] inside $\mathcal{O}_{X_{k}}[(Q_{1}\cdots Q_{r})^{-1}]$.
To use this, note that the map $(Q_{1}\cdots Q_{r})^{p}\cdot$ is a $\mathcal{D}_{X_{k}}^{(0)}$-linear isomorphism on $\mathcal{O}_{X_{k}}[(Q_{1}\cdots Q_{r})^{-1}]$. Thus we see \[ \mathcal{O}_{X_{k}}=\mathcal{D}_{X_{k}}^{(0)}\cdot(Q_{1}\cdots Q_{r})^{p-l}\subset F^{*}(\mathcal{I}^{r(p-l)})^{[1/p]} \] so that $(\mathcal{I}^{r(p-l)})^{[1/p]}=\mathcal{O}_{X_{k}}$ which implies $\tau(\mathcal{I}^{r(1-l/p)})=\mathcal{O}_{X_{k}}$. Taking $p$ large enough so that $r(1-l/p)>c$, we deduce $\tau(\mathcal{I}_{k}^{c})=\mathcal{O}_{X_{k}}$ (the test ideals form a decreasing filtration, by {[}BMS{]}, proposition 2.11); contradiction. Therefore in fact $\mathcal{J}(\mathcal{I}_{\mathbb{C}}^{c})=\mathcal{O}_{X_{\mathbb{C}}}$ for all $c\in(0,r)$ which is the statement. \end{proof} As a corollary of this argument, we have: \begin{cor} Suppose $r=1$ in the previous corollary (so that $\mathcal{I}=(Q)$). Then, under the assumption that $F^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O^{0}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$, we have that, for all $p>>0$, $\tau(Q^{(1-l/p)})=\mathcal{O}_{X_{k}}$. \end{cor}
This says that, after reducing mod $p$, for $p>>0$ the $F$-pure threshold of $Q$ is $\geq1-l/p$. Recall that $l$ is any integer for which $Q^{-l}$ generates the $\mathcal{D}_{X_{\mathbb{C}}}$-module $j_{*}(\mathcal{O}_{U_{\mathbb{C}}})$; thus we may take $l$ to be the least natural number such that $b_{Q}(-l-t)\neq0$ for all $t\in\mathbb{N}$ (here $b_{Q}$ is the $b$-function for $Q$). In this language, this result was recently reproved (and generalized) in \cite{key-65}, by completely different techniques.
To finish off this section, we'll spell out how the description of the Hodge filtration in \propref{Hodge-for-local-coh!} relates to the condition $F_{i}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))=O_{i}(\mathcal{H}_{Y_{\mathbb{C}}}^{r}(\mathcal{O}_{X_{\mathbb{C}}}))$ when $Y_{\mathbb{C}}$ is a hypersurface inside $X_{\mathbb{C}}$; $Y_{\mathbb{C}}=Z(Q)$. In this case, we get an intriguing description in terms of the behavior of $\mathcal{H}_{Y}^{1}(\mathcal{O}_{X})$ in mixed characteristic: \begin{cor} We have $F_{i}(\mathcal{H}_{Y}(\mathcal{O}_{X}))=O_{i}(\mathcal{H}_{Y}^{1}(\mathcal{O}_{X}))$ iff $p^{i}Q^{-(i+1)p}\in\mathcal{D}_{X_{W_{i+1}(k)}}^{(0)}\cdot Q^{-l}$ inside $\mathcal{H}_{Y_{W_{i+1}(k)}}^{1}(\mathcal{O}_{X_{W_{i+1}(k)}})$ for $p>>0$. \end{cor}
\begin{proof} Applying the condition of \propref{Hodge-for-local-coh!}, we see that $F_{i}(\mathcal{H}_{Y}(\mathcal{O}_{X}))=O_{i}(\mathcal{H}_{Y}^{1}(\mathcal{O}_{X}))$ iff, for all $p>>0$, there exists some $g\in\widehat{\mathcal{H}_{\mathfrak{Y}}^{1}(\mathcal{O}_{\mathfrak{X}})}$, with $p^{i}g\in\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}\cdot Q^{-l}$ whose image, in $\mathcal{H}_{Y_{k}}^{1}(\mathcal{O}_{X_{k}})$ is $Q^{-(i+1)p}$. This holds iff there is some $g_{1}\in\widehat{\mathcal{H}_{\mathfrak{Y}}^{1}(\mathcal{O}_{\mathfrak{X}})}$ so that \[ Q^{-(i+1)p}=g+pg_{1} \] inside $\widehat{\mathcal{H}_{\mathfrak{Y}}^{1}(\mathcal{O}_{\mathfrak{X}})}$. But this is equivalent to \[ p^{i}Q^{-(i+1)p}=p^{i}g+p^{i+1}g_{1}=\Phi\cdot Q^{-l}+p^{i+1}g_{1} \] for some $\Phi\in\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$. This, in turn, is a restatement of the corollary. \end{proof}
\section{\label{sec:Appendix:-an-Inectivity}Appendix: an Injectivity Result}
In this appendix we give a proof of the following technical result used in \lemref{Hodge-filt-on-log}: \begin{lem}
The natural map $\text{(\ensuremath{{\displaystyle j_{\star}\mathcal{O}_{\mathfrak{U}}}}})^{-\infty}|_{\mathfrak{V}}\to\widehat{(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}$ (where $\widehat{}$ denotes $p$-adic completion) is injective. \end{lem}
Recall that $j_{\star}(\mathcal{O}_{\mathfrak{U}})$ was defined as the $\widehat{\mathcal{D}}_{\mathfrak{X}}^{(0)}$-module locally generated by $x_{1}^{-1}\cdots x_{j}^{-1}$, where $x_{1}\cdots x_{j}$ is a local equation for the divisor $\mathfrak{D}\subset\mathfrak{X}$. \begin{proof} Let $\mathfrak{V}$ be an open affine. On $\mathfrak{V}$, the map in question is the $p$-adic completion of the inclusion ${\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}\to\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]$, where ${\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}$ is the $D_{\mathfrak{V}}^{(0)}$-submodule of $j_{*}(\mathcal{O}_{\mathfrak{V}})$ generated by $x_{1}^{-1}\cdots x_{j}^{-1}$. This is a map of $p$-torsion-free sheaves, let $\mathcal{C}$ denote its cokernel. Then the kernel of the completion is given by \[ \lim_{\leftarrow}\mathcal{C}[p^{n}] \]
where $\mathcal{C}[p^{n}]=\{m\in\mathcal{C}|p^{n}m=0\}$, and the maps in the inverse system are multiplication by $p$.
Now, both ${\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}$ and $\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]$ are filtered by the Hodge filtration; on ${\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}$ it is given by $F^{l}(\mathcal{D}_{\mathfrak{V}}^{(0)})\cdot(x_{1}^{-1}\cdots x_{r}^{-1})$, which is precisely the span over $\mathcal{O}_{\mathfrak{V}}$ of terms of the form $I!\cdot x_{1}^{-i_{1}-1}\cdots x_{j}^{-i_{j}-1}=\partial^{I}x_{1}^{-1}\cdots x_{j}^{-1}$
for $|I|\leq l$, here we have denoted $I!=i_{1}!\cdots i_{j}!$. The Hodge filtration $F^{l}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])$
is defined to be the span over $\mathcal{O}_{\mathfrak{V}}$ of terms of the form $x_{1}^{-i_{1}-1}\cdots x_{j}^{-i_{j}-1}$ for $|I|\leq l$. From this description it follows that, in both cases, all of the terms $F^{i}$ and $F^{i}/F^{i-1}$ are $p$-torsion-free; and the morphism is strict with respect the the filtrations; i.e., \[ F^{i}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])\cap{\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}=F^{i}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}}) \]
Now we consider the inclusion $\mathcal{R}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}})\to\mathcal{R}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])$ (where $\mathcal{R}$ stands for the Rees functor with respect to the Hodge filtrations on both sides). The strictness of the map implies \[ \text{coker}(\mathcal{R}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}})\to\mathcal{R}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]))=\mathcal{R}(\mathcal{C}) \]
We now shall show that the $p$-adic completion\footnote{In this appendix only, we use the completion of the \emph{entire} Rees module, NOT the graded completion of the rest of the paper; similarly, the product is the product in the category of all modules, not the category of graded modules} of this map is injective. The natural map \[ \mathcal{R}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}})=\bigoplus_{i=0}^{\infty}F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}}\to\prod_{i=0}^{\infty}F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}} \] is injective, and the cokernel is easily seen to be $p$-torsion-free; therefore we obtain an injection \[ \widehat{\mathcal{R}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}})}\to\widehat{\prod_{i=0}^{\infty}F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}}} \] and the analogous statement holds for $\mathcal{R}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])$. Further, one has an isomorphism \[ \widehat{\prod_{i=0}^{\infty}F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}}}\tilde{=}\prod_{i=0}^{\infty}(\widehat{F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}})}=\prod_{i=0}^{\infty}F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}} \] where the last equality is because $F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}}$ is a coherent $\mathcal{O}_{\mathfrak{V}}$-module and therefore $p$-adically complete; similarly \[ \widehat{\prod_{i=0}^{\infty}F^{i}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]}\tilde{=}\prod_{i=0}^{\infty}(\widehat{F^{i}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])}=\prod_{i=0}^{\infty}F^{i}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}] \] So, since each $F^{i}({\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})^{\text{fin}}}\to F^{i}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}]$ is injective, we obtain an injection \[ \widehat{\mathcal{R}({\displaystyle {\displaystyle (j_{\star}\mathcal{O}_{\mathfrak{V}})}}^{\text{fin}})}\to\widehat{\mathcal{R}(\mathcal{O}_{\mathfrak{V}}[x_{1}^{-1}\cdots x_{j}^{-1}])} \] This means that ${\displaystyle \lim_{\leftarrow}\mathcal{R}(\mathcal{C})[p^{n}]}=0$. Let $f$ denote the parameter in the Rees ring. Then, for each $n$ we have a short exact sequence \[ \mathcal{R}(\mathcal{C})[p^{n}]\xrightarrow{f-1}\mathcal{R}(\mathcal{C})[p^{n}]\to C[p^{n}] \] Since ${\displaystyle \lim_{\leftarrow}\mathcal{R}(\mathcal{C})[p^{n}]}=0$, to prove ${\displaystyle \lim_{\leftarrow}\mathcal{C}[p^{n}]=0}$ we must show that $f-1$ acts injectively on ${\displaystyle \text{R}^{1}\lim_{\leftarrow}\mathcal{R}(\mathcal{C})[p^{n}]}$. Recall that this module is the cokernel of \[ \eta:\prod_{n=1}^{\infty}\mathcal{R}(C)[p^{n}]\to\prod_{n=1}^{\infty}\mathcal{R}(C)[p^{n}] \] where $\eta(c_{1},c_{2},c_{3},\dots)=(c_{1}-pc_{2},c_{2}-pc_{3},\dots)$. Now, since each $\mathcal{R}(C)[p^{n}]$ is graded, we may define a homogenous element of degree $i$ in ${\displaystyle \prod_{n=1}^{\infty}\mathcal{R}(C)[p^{n}]}$ to be an element $(c_{1},c_{2},\dots)$ such that each $c_{j}$ has degree $i$. Any element of $d\in{\displaystyle \prod_{n=1}^{\infty}\mathcal{R}(C)[p^{n}]}$ has a unique representation of the form ${\displaystyle \sum_{i=0}^{\infty}d_{i}}$ where $d_{i}$ is homogenous of degree $i$ (this follows by looking at the decomposition by grading of each component). Since the map $\eta$ preserves the set of homogenous elements of degree $i$, we have ${\displaystyle \eta(\sum_{i=0}^{\infty}d_{i})=\sum_{i=0}^{\infty}\eta(d_{i})}$.
Suppose that $(f-1)d=\eta(d')$. Write ${\displaystyle d=\sum_{i=j}^{\infty}d_{i}}$ where $d_{j}\neq0$. Then \[ (f-1){\displaystyle \sum_{i=j}^{\infty}d_{j}}=-d_{j}+\sum_{i=j+1}^{\infty}(fd_{i-1}-d_{i})=\sum_{i=0}^{\infty}\eta(d_{i}') \] So we obtain $d_{j}=-\eta(d_{j}')$, and $d_{i}=fd_{i-1}+\eta(d_{i}')$ for all $i>j$, which immediately gives $d_{i}\in\text{image}(\eta)$ for all $i$; so $d\in\text{image }(\eta)$ and $f-1$ acts injectively on $\text{coker}(\eta)$ as required. \end{proof}
The University of Illinois at Urbana-Champaign, [email protected]
\end{document}
|
arXiv
|
{
"id": "2210.12611.tex",
"language_detection_score": 0.5520514249801636,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Forman-Ricci curvature and Persistent homology of unweighted complex networks}
\author{Indrava Roy} \email{Correspondence to: [email protected]} \affiliation{The Institute of Mathematical Sciences (IMSc), Homi Bhabha National Institute (HBNI), Chennai 600113 India} \author{Sudharsan Vijayaraghavan} \affiliation{Department of Applied Mathematics and Computational Sciences, PSG College of Technology, Coimbatore 641004 India} \author{Sarath Jyotsna Ramaia} \affiliation{Department of Applied Mathematics and Computational Sciences, PSG College of Technology, Coimbatore 641004 India} \author{Areejit Samal} \email{Correspondence to: [email protected]} \affiliation{The Institute of Mathematical Sciences (IMSc), Homi Bhabha National Institute (HBNI), Chennai 600113 India} \affiliation{Max Planck Institute for Mathematics in the Sciences, Leipzig 04103 Germany}
\begin{abstract} We present the application of topological data analysis (TDA) to study unweighted complex networks via their persistent homology. By endowing appropriate weights that capture the inherent topological characteristics of such a network, we convert an unweighted network into a weighted one. Standard TDA tools are then used to compute their persistent homology. To this end, we use two main quantifiers: a local measure based on Forman's discretized version of Ricci curvature, and a global measure based on edge betweenness centrality. We have employed these methods to study various model and real-world networks. Our results show that persistent homology can be used to distinguish between model and real networks with different topological properties. \end{abstract}
\maketitle
\section{Introduction}
Recent advances in topological data analysis (TDA) \cite{Zomorodian2005,Edelsbrunner2008,Carlsson2009} have made it a powerful tool in data science. TDA has lead to important applications in different areas of science. For example, in astrophysics, TDA has be used for analysis of the Cosmic Microwave Background (CMB) radiation data \cite{Pranav2016}; in imaging, TDA has been used for feature detection in 3D gray-scale images \cite{Gunther2011}; in biology, TDA has been used for detection of breast cancer type with high survival rates \cite{Nicolau2011} and understanding cell fate from single-cell RNA sequencing data \cite{Rizvi2017}. The main tool in TDA is that of \textit{persistent homology} \cite{Zomorodian2005,Edelsbrunner2008,Carlsson2009}, which has the power to detect the topology of the underlying data. The field of algebraic topology \cite{Munkres2018} provides the basic mathematical tool required for TDA, namely that of homology. The conceptual roots of persistent homology, however, are in \textit{differential} topology, in particular Morse theory \cite{Edelsbrunner2008}.
Network science \cite{Watts1998,Barabasi1999,Albert2002,Newman2010,Barabasi2016}, on the other hand, investigates the topological and dynamical properties of various complex networks, that encode interactions between various agents in the natural as well as artificial setting. The ability to understand and predict the nature of these interactions is a key challenge. Historically, graph theory \cite{Bollobas1998,Newman2010,Barabasi2016} has provided the main tools and techniques for studying such networks, via their graph representation. Although graph theory has provided significant insights into such problems, recent studies have shown that such techniques do not adequately capture higher-order interactions and correlations arising in networks \cite{De2007,Horak2009,Petri2013,Petri2014,Bianconi2015, Wu2015,Sizemore2016,Courtney2017,Ritchie2017,Courtney2018,Kartun-Giles2019,Iacopini2019,Kannan2019}. These higher-order phenomena can be encoded in \textit{hypergraph} \cite{Klamt2009,Zlatic2009} and \textit{simplicial complex} \cite{De2007,Horak2009,Lee2012,Petri2013,Petri2014,Sizemore2016,Iacopini2019} representations of networks. The tools of TDA are applicable to any simplicial complex and can be used to determine the important topological characteristics of networks. In this work, we employ TDA to study the persistent homology of unweighted and undirected simple graphs arising from model and real-world networks.
Previous research in this direction have investigated the persistent homology of weighted and undirected networks \cite{Petri2013,Petri2014}. The filtration scheme required to compute persistent homology in weighted networks was then provided by the edge weights \cite{Petri2013,Petri2014}. However, this technique is not immediately applicable to unweighted graphs due to the absence of edge weights. At present, due to insufficient information, the interaction networks underlying many real-world complex systems are available only as unweighted and undirected graphs. Examples of such unweighted and undirected real networks include the Yeast protein interaction network \cite{Jeong2001}, the US Power Grid network \cite{Leskovec2007} and the Euro road network \cite{Subelj2011}. In order to reveal the higher-order topological features of such real-world networks, it is important to develop methods to study persistent homology in unweighted and undirected networks. A simple way to devise such a method would be to transform the given unweighted graph into an edge-weighted graph by assigning certain weights to all edges, and then, using the induced filtration to compute persistent homology. However, \textit{a priori} it is not evident which edge weighting scheme would capture the topological characteristics of different types of unweighted networks.
Previously, Horak \textit{et al.} \cite{Horak2009} used a dimension-based weighting scheme for unweighted networks where the weights are simply the dimension of the simplices. In particular, Horak \textit{et al.} assign all edges with the weight $+1$ to study persistent homology in unweighted networks. However, we have recently shown that the dimension-based filtration scheme of Horak \textit{et al.}, though computationally fast, may not be able to conclusively distinguish between various model networks \cite{Kannan2019}. In recent work \cite{Kannan2019}, we gave another weighting method based on a \textit{discrete Morse function} as introduced by Robin Forman \cite{Forman1998,Forman2002}, which assigns weights to each simplex in the clique complex corresponding to the unweighted graph according to a global acyclicity constraint. This method \cite{Kannan2019} simplifies the topological structure of the underlying simplicial complex, that leads to a computationally efficient way to compute homology and persistent homology. Moreover, we also showed that the persistent homology computed using this method was able to distinguish various unweighted model networks having different topological characteristics, the difference being quantified by the averaged bottleneck distance between the corresponding persistence diagrams \cite{Kannan2019}. A natural question then is whether other choices of weights can also be used to distinguish such unweighted networks via persistent homology.
In the present contribution, we shall use both local and global network quantifiers for obtaining edge weighting schemes to compute persistent homology, namely that of \textit{discrete Ricci curvature} \cite{Forman2003, Sreejith2016,Sreejith2017,Samal2018}, also introduced by R. Forman \cite{Forman2003}, which plays the role of local curvature in a discrete setting, and \textit{edge betweenness centrality} \cite{Freeman1977,Girvan2002, Newman2010} which is an edge-based measure analogous to the classical betweenness centrality for vertices of a graph. We shall show that the simpler methods introduced here to study persistent homology based on Forman-Ricci curvature or edge betweenness centrality are also able to distinguish unweighted model networks like our recent method \cite{Kannan2019} based on discrete Morse functions. However, note that the advantages in topological simplification and computational efficiency that result from using a discrete Morse function are lost with the simpler method presented here. Nevertheless, if the sole goal is to compute persistent homology in unweighted networks, the weighting schemes presented here are likely to be much simpler to use in practice. In this context, we have also applied our methods to study the persistent homology of some real-world networks. Note that our recent method based on discrete Morse functions \cite{Kannan2019} and the simpler methods presented here based on Forman-Ricci curvature or edge betweenness centrality are much better at distinguishing between different types of model networks in comparison to dimension-based method of Horak \textit{et al.} \cite{Horak2009}.
The remainder of the paper is organized as follows. In the Theory section, we present an overview of the concepts needed to study persistent homology in unweighted networks based on Forman-Ricci curvature and edge betweenness centrality. In the Datasets section, we describe the model and real networks analyzed here. In the Results section, we describe our new methods to study persistent homology in unweighted networks, and its application to both model and real-world networks. In the last section, we conclude with a brief summary and future outlook.
\begin{figure*}
\caption{Schematic figure illustrating our method to study persistent homology in an unweighted and undirected network using Forman-Ricci curvature. (a) An example of an unweighted graph $G$. (b) Transformation of the unweighted graph into an edge-weighted graph using Forman-Ricci curvature. (c) Assignment of normalized filtration weights to edges in the weighted graph shown in (b) based on Forman-Ricci curvature. (d) Weighted clique simplicial complex $K$ corresponding to the unweighted graph $G$. (e) Assignment of normalized filtration weights to vertices ($0$-simplices) and $2$-simplices in the weighted clique complex shown in (d) based on edge weights. (f) Filtration of the weighted clique complex $K$ based on the ascending sequence of weights assigned to simplices. Barcodes depict that there is a $0$-hole (or connected component) that persists across the 6 stages of the filtration while another $0$-hole is born at the last stage on addition of the isolated vertex $v_9$. Moreover, a $1$-hole is born at stage 4 on addition of the edge $[v_3,v_4]$.}
\label{schemfig}
\end{figure*}
\section{Theory}
\subsection{Clique complex of a graph}
Let $G(V,E)$ be a finite simple graph with $V$ being the set of vertices and $E$ being the set of edges. Each edge in the graph $G$ is an unordered pair of distinct vertices. We remark that a simple graph does not contain self-loops or multi-edges \cite{Bollobas1998}. An induced subgraph $K$ of $G$ that is complete is called a \textit{clique}. We can view $G$ as a finite clique simplicial complex $K$ where a $p$-dimensional simplex (or $p$-simplex) is determined by a set of $p+1$ vertices that form a clique \cite{Zomorodian2005,Edelsbrunner2008}. Specifically, a $p$-simplex is a polytope which is the convex hull of its $p+1$ vertices. Note that a \textit{simplex} can be thought of as a generalization of points, lines, triangles, tetrahedron, and so on in higher dimensions. In the clique complex $K$, $0$-simplices correspond to vertices in $G$, $1$-simplices to edges in $G$, $2$-simplices to triangles in $G$, and so on. Given a $p$-simplex $\alpha$ in $K$, a \textit{face} $\gamma$ of $\alpha$ is determined by a subset of the vertex set of $\alpha$ of cardinality less than or equal to $p+1$. Dually, a \textit{co-face} $\beta$ of $\alpha$ is a simplex that contains $\alpha$ as a face. The dimension of a clique simplicial complex $K$ is the maximum dimension of its constituent simplices. An orientation of a $p$-simplex is given by an ordering of its constituent vertices \cite{Munkres2018}. Moreover, two orientations of a simplex are equivalent if they differ by an even permutation of its vertices.
\subsection{Persistent homology of a simplicial complex}
A simplicial complex is a collection $K$ of simplices which satisfies following two properties \cite{Munkres2018}. Firstly, any face $\gamma$ of a simplex $\alpha$ in $K$ is also included in $K$. Secondly, if two simplices $\alpha$ and $\beta$ in $K$ have a non-empty intersection $\gamma$, then $\gamma$ is a common face of $\alpha$ and $\beta$. A subcomplex $K'$ of a simplicial complex $K$ is a collection of simplices in $K$ such that $K'$ is also a simplicial complex. A \textit{filtration} on a simplicial complex $K$ is given by a nested sequence of subcomplexes $K_i$, $i=0,1,\ldots,n$, such that: \begin{equation*} \emptyset=K_0\subseteq K_1\subseteq \ldots \subseteq K_n=K. \end{equation*}
For a simplicial complex $K$ with a given filtration, one can define its persistent homology groups as follows. First we fix a base field $\mathbb{F}$ \cite{Munkres2018}. The set of all oriented $p$-simplices in $K$ generate a free group $C_p$ over $\mathbb{F}$, called $p^{\text{th}}$-chain group \cite{Munkres2018}. An element in $C_p$ is called a $p$-chain, and is given by a finite formal sum: \begin{equation*} C_p =\sum_{i=1}^{N} c_i\alpha_i \end{equation*} where the coefficients $c_i$ are in $\mathbb{F}$, and $\alpha_i$ are oriented $p$-simplices in $K$ \cite{Munkres2018}. Component-wise addition endows $C_p$ with the structure of a group, whose identity element is given by the unique $p$-chain with all coefficients $c_i$ equal to zero. If a $p$-simplex $\alpha$ is given an opposite orientation, then it is represented as $-\alpha$ in $C_p$, and gives the inverse of $\alpha$ in $C_p$. To define the persistent homology groups, we use the so-called boundary operator $\partial_p$, which is a map $\partial_p: C_p\rightarrow C_{p-1}$.
For an oriented $p$-simplex $\alpha = [x_0,x_1,\ldots,x_p]$ (i.e., the ordered vertex set $\{x_0, x_1,\ldots,x_p\}$ of $\alpha$), we define the boundary operator $\partial_p$ as: \begin{equation*} \partial_p(\alpha)=\sum_{i=0}^{p}(-1)^i[x_0,\ldots,\hat{x}_i, \ldots,x_p] \end{equation*} where $[x_0,\ldots,\hat{x}_i,\ldots,x_p]$ denotes the $(p-1)$-face of $\alpha$ obtained by removing the vertex $x_i$ \cite{Munkres2018}. Since the right hand side of the above equation is a linear combination of $(p-1)$-simplices, it belongs to $C_{p-1}$. One can then extend the definition of $\partial_p$ to all elements of $C_p$ by linearity. The boundary operators satisfy the fundamental property: \begin{equation*} \partial_p\circ \partial_{p+1}=0. \end{equation*}
The kernel of the boundary operator is called the group of $p$-cycles and denoted by $Z_p$ \cite{Munkres2018}. It is given by the set of elements in $C_p$ that is mapped to $0$ in $C_{p-1}$ by the boundary operator $\partial_p$: \begin{equation*}
Z_p =\operatorname{Ker}(\partial_p)=\{c\in C_p|\partial_p(c)=0\}. \end{equation*}
A $p$-boundary is a $p$-cycle which lies in the image of the boundary operator $\partial_{p+1}$. The set of $p$-boundaries is denoted by $B_p$ and is a subgroup of $Z_p$ \cite{Munkres2018}. \begin{equation*}
B_p =\operatorname{Image}(\partial_{p+1})=\{c\in C_p| \exists b \in C_{p+1}, \partial_{p+1}(b)=c\}. \end{equation*}
Thus, the $p$-homology group is defined as \cite{Munkres2018}: \begin{equation*} H_p(K)=\frac{Z_p(K)}{B_p(K)}. \end{equation*} Note that $H_p$ is a vector space over the field $\mathbb{F}$. The $p$-Betti number $\beta_p$ is given by the dimension of the homology group $H_p$. Informally, $\beta_p$ represents the number of holes in the $p$-homology group.
Now, every subcomplex $K_i$ in the filtration of the simplicial complex $K$ has an index $i$ associated with it. Also, for each $K_i$ there exists its corresponding $p$-chain, $p$-boundary operators, and thus, $p$-boundaries and $p$-cycles. We shall denote the $p$-cycles of $K_i$ as $Z^i_p$ and the $p$-boundaries of $K_i$ as $B^i_p$. The $j$-persistent $p$-homology of $K_i$ is defined as \cite{Zomorodian2005,Edelsbrunner2008}: \begin{equation} H^{i,j}_p = \frac{Z^i_p}{(B^{i+j}_p \cap Z^i_p)} \end{equation} and the corresponding $j$-persistent $p$-Betti number as: \begin{equation} \beta^{i,j}_p = \operatorname{dim}(H^{i,j}_p). \end{equation}
A $p$-homology class $\alpha$ is \textit{born} at $K_i$ if it is not in the image of the map induced on $p$-homology by the inclusion $K_{i-1} \subseteq K_i$ . Furthermore, if $\alpha$ is born at $K_i$, we say that it \textit{dies} entering $K_{i+j}$, if it becomes the boundary of a $(p+1)$-chain in $K_{i+j}$. The persistent homology group $H^{i,j}_p$ thus encodes information of $p$-homology classes that are born at the filtration index $i$ and survive until the index $i+j$. Each $p$-hole across the filtration can be characterized by its birth and death. By studying persistent homology, the persistence of such holes can be quantified, thus revealing the importance of the corresponding topological features across the filtration.
\subsubsection{Filtration of a weighted simplicial complex}
Let $K$ be a simplicial complex, endowed with a set of numerical numbers called \textit{weights} associated with its simplices, i.e. to each of its constituent simplices $\alpha$ is assigned a number $w(\alpha)$. To study the persistent homology of such simplicial complexes, we can consider the following filtration on $K$ \cite{Zomorodian2005,Edelsbrunner2008}. Given a real number $r$, we define the subcomplex $K(r)$ as: \begin{equation} K(r)= \bigcup_{\{\alpha: w(\alpha)\leq r\}} \bigcup_{\beta\leq \alpha} \beta \end{equation} In simple terms, $K(r)$ is the smallest simplicial subcomplex of $K$ containing simplices which have weight less than or equal to $r$. Note that all faces $\beta$ of an simplex $\alpha$ of weight less than $r$ are admitted to this subcomplex, irrespective of the weight of $\beta$. In the particular case of a finite simplicial complex $K$ with simplices $\alpha_i, i=1,2,\ldots,n$, we can arrange the corresponding weights $w(\alpha_i)$ in ascending order, say $\lambda_1 \leq \lambda_2 \leq \cdots \leq \lambda_n$. Then the associated filtration of $K$ is given by: \begin{equation} \label{filtration} \emptyset\subseteq K(\lambda_1)\subseteq K(\lambda_2)\subseteq \ldots \subseteq K(\lambda_n)=K \end{equation}
Throughout this work, we shall consider the particular case of a finite weighted simplicial complex arising from a finite \textit{edge-weighted} simple graph, i.e. a graph with weights assigned to all its edges. The clique complex of such an edge-weighted graph already has weights on its $1$-simplices corresponding to its edges, and we can extend this weighting scheme to any $0$-simplex $\beta$ or $2$-simplex $\sigma$ by defining their weights $w(\beta)$ and $w(\sigma)$ by the following \textit{min/max} formulae: \begin{eqnarray} w(\beta)= \min\{ w(\alpha) : \alpha \text{ is a 1-dimensional co-face of } \beta\}\ \text{and} \nonumber \\ w(\sigma)= \max\{ w(\alpha) : \alpha \text{ is a 1-dimensional face of } \sigma\}. \label{simplexweight} \end{eqnarray} Weights of higher-dimensional simplices can be defined in a similar way. Note that with these weights, we enforce the following conditions. Any vertex of an edge $e$ that is included in a subcomplex containing $e$ has weight less than or equal to $e$. Moreover, if a collection of edges forms a higher-dimensional simplex $\gamma$, then $\gamma$ is included in a subcomplex that includes the edge with the maximum weight. With respect to the filtration induced by this weighting scheme arranged in increasing order $\lambda_1 \leq \lambda_2 \leq \cdots \leq \lambda_n$, one can now compute the persistent homology groups of $K$ as defined above. The \textit{persistence} of a class in $p$-homology that is born at the $i^{\text{th}}$-stage of the filtration and dies at the $j^{\text{th}}$-stage is then defined to be $\lambda_j- \lambda_i$, where $\lambda_i$ is the weight associated to the $i^{\text{th}}$-subcomplex $K(\lambda_i)$ as above.
\subsubsection{Barcode diagrams}
The $p^{\text{th}}$-barcode diagram for a given filtration of a finite simplicial complex $K$ gives a graphical summary of the birth and death of $p$-holes across the filtration \cite{Ghrist2008}. In this work, the x-axis of the $p^{\text{th}}$-barcode diagram corresponds to the filtration weights of $p$-simplices in $K$; the filtration weights have been normalized to lie in the range 0 to 1. A horizontal line in the $p^{\text{th}}$-barcode diagram of $K$ is referred to as a barcode. A barcode that begins at a x-axis value of $w_i$ and ends at a x-axis value of $w_j$ represents a $p$-hole in $K$ whose birth and death weights are $w_i$ and $w_j$, respectively. The number of barcodes between $w_i$ and $w_j$ in the diagram is precisely the $p$-Betti number $\beta^{i,j-i}$, i.e., the dimension of the persistent homology group $H^{i,j-i}_p$.
\subsubsection{Persistence diagrams and bottleneck distance between them}
Given two multisets $X$ and $Y$ in $\mathbb{R}^2$, the $\infty$-Wasserstein distance or bottleneck distance between them is defined as: \begin{equation*} \label{botdist}
W_\infty(X,Y) = \inf_{\eta:X \rightarrow Y} \text{sup}_{x \in X} || x - \eta(x) ||_{\infty}. \end{equation*} In the above equation, the supremum is taken over all bijections $\eta:X\rightarrow Y$ (with the convention that a point with multiplicity $k \in \mathbb N$ is considered as $k$ individual points) and for $(a,b) \in
\mathbb{R}^2$, $||(a,b)||_\infty:= \max\{|a|, |b|\}$ \cite{DiFabio2015}.
Given a filtration of a weighted simplicial complex $K$ with weights $w_i, i= 1, 2, \cdots, n$, the $p^{\text{th}}$-persistence diagram $D^pK$, is defined as follows. Consider the multiset of points $W^pK:=\{(w_i, w_j): w_i<w_j, i, j = 1, 2, \cdots, n\}$ with each point $(w_i, w_j)$ endowed with the multiplicity $\mu_p(w_i, w_j)$ given by \cite{DiFabio2015}: \begin{equation*} \mu_p(w_i, w_j) := \lim_{\epsilon \rightarrow 0^+} (\beta_{w_i+\epsilon}^{w_j-\epsilon} - \beta_{w_i+\epsilon}^{w_j+\epsilon} + \beta_{w_i-\epsilon}^{w_j+\epsilon} - \beta_{w_i-\epsilon}^{w_j-\epsilon}) \end{equation*} where $\beta_x^y$ is the dimension of the image of the induced map in $p$-homology from $K(x)$ to $K(y)$ for $x, y \in \mathbb R$ with $x<y$. Denote by $\Delta$ the diagonal in $\mathbb R^2$ considered as a multiset with infinite multiplicity given to each of its points. The persistence diagram $D^pK$ is the subset of $W^p_K \cup \Delta$ consisting of points $(u, v)$ with $\mu_p(u, v)>0$. In this work, we consider the total persistence diagram \cite{Cohen-Steiner2007} given by the union of all $D^pK$ for $0 \leq p \leq \operatorname{dim}{K}$. Thereafter, we consider the bottleneck distance \cite{Cohen-Steiner2007} between total persistence diagrams considered as multisets in $\mathbb{R}^2$.
\subsection{Forman-Ricci curvature}
In previous work \cite{Sreejith2016,Sreejith2017,Samal2018}, some of us have ported a discretization of the classical notion of Ricci curvature due to Robin Forman \cite{Forman2003} to graphs or networks. Briefly, in Riemannian geometry, curvature measures the amount of deviation of a smooth Riemannian manifold from being Euclidean. The Ricci curvature tensor quantifies the dispersion of geodesic lines in the neighbourhood of a given tangential direction as well as volume growth of metric balls. Forman \cite{Forman2003} has proposed a discretization of the classical Ricci curvature based on the \textit{Bochner-Weitzenb\"{o}ck formula} which measures the difference between the \textit{Laplace-Beltrami operator} and the \textit{connection Laplacian} \cite{Jost2017}. Forman's discretized version of the Ricci curvature is applicable to a large class of topological objects, namely, \textit{weighted $CW$-complexes} which includes graphs and simplicial complexes \cite{Forman2003,Sreejith2016}.
Starting from a graph or network, one may construct a two-dimensional polyhedral complex by inserting a solid triangle into any connected triple of vertices or cycle of length 3, a solid quadrangle into a cycle of length 4, a solid pentagon into a cycle of length 5, and so on. The mathematical definition of Forman-Ricci curvature \cite{Forman2003} for general weighted $CW$-complexes is also applicable to such a two-dimensional polyhedral complex constructed from a graph, and is given by: \begin{equation*} {\rm F} (e) = w_e \left[ \sum_{e < f} \frac{w_e}{w_f}+\sum_{v < e} \frac{w_v}{w_e} \right.
- \left. \sum_{\hat{e} \parallel e} \left| \sum_{\hat{e},e < f} \frac{\sqrt{w_e \cdot w_{\hat{e}}}}{w_f} - \sum_{v < e, v < \hat{e}} \frac{w_v}{\sqrt{w_e \cdot w_{\hat{e}}}}
\right| \right] \; ; \end{equation*} where $w_e$ denotes the weight of edge $e$, $w_v$ denotes the weight of vertex $v$, $w_f$ denotes the weight of face $f$, $\sigma < \tau$ means that $\sigma$
is a face of $\tau$, and $||$ signifies \textit{parallelism}, i.e. the two cells have a common \textit{parent} (higher dimensional co-face) or a common \textit{child} (lower dimensional face), but not both a common parent and common child \cite{Samal2018}. For the particular case of restricted two-dimensional complexes containing only triangular faces $t$ while ignoring faces consisting of more than 3 vertices, the above equation simplifies to \cite{Samal2018}: \begin{equation} \label{AugmentedFormanRicciEdge} {\rm F} (e) = w_e \left[ \sum_{e < t} \frac{w_e}{w_t} + (\frac{w_{v_1}}{w_e} + \frac{w_{v_2}}{w_e}) \right. - \left. \sum_{e_{v_1},e_{v_2}\ \sim\ e,\ e_{v_1},e_{v_2}\ \nless\ t} \left( \frac{w_{v_1}}{\sqrt{w_e \cdot w_{e_{v_1}}}} + \frac{w_{v_2}}{\sqrt{w_e \cdot w_{e_{v_2}}}} \right) \right] \; ; \end{equation} where $w_e$ is the weight of the edge $e$ under consideration, $w_{v_1}$ and $w_{v_2}$ denote the weights associated with the vertices $v_1$ and $v_2$, respectively, which anchor the edge $e$ under consideration. In the above equation, $e_{v_1} \sim e$ and $e_{v_2} \sim e$ denote the set of edges incident on vertices $v_1$ and $v_2$, respectively, after \textit{excluding} the edge $e$ under consideration which connects the two vertices $v_1$ and $v_2$. While computing the Forman-Ricci curvature of an edge in an unweighted graph $G$, we substitute in the above equation $w_t = w_e = w_v = 1, \; \forall\ t \in T(G), e \in E(G), v \in V(G)$, where $T(G)$, $E(G)$ and $V(G)$ represent the set of triangular faces, edges and vertices, respectively. Note that the above definition (Eq. \ref{AugmentedFormanRicciEdge}) of the Forman-Ricci curvature of an edge or $1$-simplex in the restricted two-dimensional complex constructed from a graph was referred to as \textit{augmented} Forman-Ricci curvature in earlier contributions \cite{Samal2018,Saucan2019}. For brevity, we here refer to the quantity defined in Eq. \ref{AugmentedFormanRicciEdge} as Forman-Ricci curvature of an edge. From a geometric point of view, the Forman-Ricci curvature quantifies the information spread at the ends of edges in a network. Higher information spread at the ends of an edge implies more negative value for its Forman-Ricci curvature. In this work, we employ Forman-Ricci curvature of an edge (given by Eq. \ref{AugmentedFormanRicciEdge}) to transform an unweighted graph into a weighted graph which captures the local curvature properties (Figure \ref{schemfig}).
\subsection{Edge Betweenness Centrality}
Edge betweenness centrality \cite{Freeman1977,Girvan2002,Newman2010} quantifies the importance of edges for global information flow in networks. For any edge $e$, this measure is computed based on the number of shortest paths between different pairs of vertices in the network that contain the considered edge $e$. Formally, in a graph $G(V,E)$, the edge betweenness centrality of an edge $e \in E$ is given by: \begin{equation} \label{EBC} {\rm EBC} (e) = \sum_{v_i} \sum_{v_j, v_j \neq v_i} \frac{\sigma_{v_i v_j}(e)}{\sigma_{v_i v_j}} \end{equation} where $\sigma_{v_i v_j}$ gives the number of shortest paths between vertices $v_i$ and $v_j$ in the network and $\sigma_{v_i v_j}(e)$ gives the number of shortest paths between vertices $v_i$ and $v_j$ in the network that contain the considered edge $e$. Note that an edge with a high edge betweenness centrality is critical for maintaining information flow in the network.
\section{Datasets}
The proposed method for studying persistent homology in unweighted networks has been investigated in different network models, namely, Erd\"{o}s-Renyi (ER) model \cite{Erdos1961}, Watts-Strogatz (WS) model \cite{Watts1998}, Barab\'{a}si-Albert (BA) model \cite{Barabasi1999}, and the Hyperbolic Graph Generator (HGG) \cite{Krioukov2010}. We give brief descriptions of each below. \begin{itemize} \setlength\itemsep{0em} \item \textit{ER model}: The ER model has two parameter $n$ and $p$, where $n$ is the number of vertices and $p$ is the probability for the existence of an edge between distinct pairs of vertices. ER graph is obtained by starting with a set of vertices and connecting a distinct pair of vertices by an edge with probability $p$. The presence of an edge between any two pairs of vertices is independent of the other edges. \item \textit{WS model}: The WS model can be characterized by three parameters: $n$, the number of vertices; $k$, the number of neighbours the vertex has before rewiring; and $p$, the rewiring probability. The construction of the WS graph begins with a graph with $n$ vertices where each vertex has $k$ nearest neighbours. Thereafter, the endpoints of each edge is chosen randomly based on the rewiring probability and it is rewired to another randomly chosen vertex with uniform probability. \item \textit{BA model}: The BA model generates a scale-free graph with $n$ vertices by satisfying the so-called preferential attachment condition. Under preferential attachment scheme, at each iteration step, the graph expansion takes place by addition of a new vertex with $m$ edges to existing vertices in such a way that existing vertices with higher degree have more probability to gain additional edges to the new vertex than the vertices with lower degree. In the BA model, the probability of connecting the new vertex to an existing vertex is directly proportional to the degree of that vertex at that time. The BA model generates graphs with power-law degree distribution \cite{Barabasi1999}. \item \textit{Hyperbolic random graphs}: The input parameters of HGG are the number of vertices $n$, the target average degree $k$, the target exponent $\gamma$ of the power-law degree distribution and temperature $T$. For the construction of a hyperbolic random graph, HGG scatters $n$ vertices on a hyperbolic space and the existence of an edge between the vertices is based on a probability value, which is given by a function of the hyperbolic distance between the vertices. The vertex degree distribution in the hyperbolic random graphs produced by HGG follow a power-law. The HGG generates a hyperbolic random graph for $\gamma=[0,\infty)$. \item \textit{Spherical random graphs}: Similar to the hyperbolic random graph, a spherical random graph can be constructed using HGG by scattering $n$ vertices on a sphere of radius $R$, and the probability for existence of an edge between two vertices is a function of the spherical distance between the vertices. The HGG model produces a spherical random graph for $\gamma=\infty$. \end{itemize}
In addition to model networks, the proposed method has also been studied in the following seven real-world networks. \begin{itemize} \setlength\itemsep{0em} \item \textit{Yeast protein interaction} network \cite{Jeong2001} with 1870 vertices representing proteins and 2277 edges signifying protein-protein interactions. \item \textit{Human protein interaction} network \cite{Rual2005} with 3133 vertices representing proteins and 6726 edges signifying protein-protein interactions. \item \textit{US Power Grid} network \cite{Leskovec2007} with 4941 vertices representing generators, transformers and substations in the Western states of USA and the 6594 edges signifying power links between them. \item \textit{Euro road} network \cite{Subelj2011} with 1174 vertices corresponding to cities in Europe and the 1417 edges signifying roads linking the cities. \item \textit{Email} network \cite{Guimera2003} with 1133 vertices representing users in the University of Rovira i Virgili and 5451 edges signifying the existence of at least one Email communication between pairs of users. \item \textit{Route views} network \cite{Leskovec2007} with 6474 vertices corresponding to autonomous systems and 13895 edges signifying communication between the autonomous systems or vertices. \item \textit{Hamsterster friendship} network \cite{Kunegis2013} with 1858 vertices representing the users and 12534 edges signifying friendships between the users. \end{itemize} We remark that self-loops have been omitted while constructing the clique complexes from the undirected graphs corresponding to real networks. Note that the dataset of model and real-world networks analyzed here using the proposed methods based on Forman-Ricci curvature or edge betweenness centrality were also studied using our previous method \cite{Kannan2019} based on discrete Morse theory.
\begin{figure*}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model networks with average degree 4. (a) ER model with $n=1000$ and $p=0.004$. (b) WS model with $n=1000$, $k=4$ and $p=0.5$. (c) BA model with $n=1000$ and $m=2$. (d) Spherical random graphs produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=\infty$. (e) Hyperbolic random graphs produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=2$.}
\label{fig2}
\end{figure*}
\begin{figure*}
\caption{$H_2$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model and real networks. (a) Spherical random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=\infty$. (b) Hyperbolic random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=2$. (c) Email communication. (d) Route views. (e) Hamsterster friendship. (f) Human protein interaction.}
\label{fig3}
\end{figure*}
\begin{figure*}
\caption{$H_3$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model and real networks. (a) Spherical random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=\infty$. (b) Hyperbolic random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=2$. (c) US Power Grid. (d) Email communication. (e) Route views. (f) Yeast protein interaction. (g) Hamsterster friendship. (h) Human protein interaction.}
\label{fig4}
\end{figure*}
\begin{figure*}
\caption{Bottleneck distance between persistence diagrams obtained using our new method based on Forman-Ricci curvature in model networks with average degree 4. For each of the five model networks, 10 random samples are generated by fixing the number of vertices $n$ and other parameters of the model. We report the distance (rounded to two decimal places) between two different models as the average of the distance between each of the possible pairs of the 10 sample networks corresponding to the two models along with the standard error.}
\label{fig5}
\end{figure*}
\begin{figure*}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in real networks. (a) US Power Grid. (b) Email communication. (c) Route views. (d) Yeast protein interaction. (e) Hamsterster friendship. }
\label{fig6}
\end{figure*}
\section{Results and Discussion}
\subsection{Persistent homology of unweighted networks using Forman-Ricci curvature}
We here present a new method based on Forman-Ricci curvature \cite{Sreejith2016,Sreejith2017,Samal2018} to study persistent homology in unweighted and undirected networks. Essentially, our method relies on transforming an unweighted and undirected graph $G$ into an edge-weighted network followed by construction of a weighted clique simplicial complex $K$.
We begin by transforming a given unweighted and undirected network into an edge-weighted network by assigning weights to edges based on their Forman-Ricci curvature (Figure \ref{schemfig}). The Forman-Ricci curvature of each edge in an unweighted network can be computed using Equation \ref{AugmentedFormanRicciEdge} as described in the Theory section. Thereafter, we assign weights to edges in the network by normalization of the associated Forman-Ricci curvatures using the following formula: \begin{equation} \label{FormanWeight} w(e) = \frac { \rm{F}(e) - ( \rm{F}_{\text{min}} - \epsilon)} { ( \rm{F}_{\text{max}} + \epsilon ) - ( \rm{F}_{\text{min}} - \epsilon ) } \end{equation} where $w(e)$ is the weight of edge $e$, $\rm{F}(e)$ is the Forman-Ricci curvature of edge $e$, $\rm{F}_{\text{min}}$ and $\rm{F}_{\text{max}}$ are the minimum value and maximum value, respectively, of the Forman-Ricci curvature across all edges in the network, and $\epsilon$ is a positive number which is taken here to be 1. In sum, the above formula gives the weights of edges in the weighted network corresponding to the given unweighted and undirected network. In schematic Figure \ref{schemfig}, we show this transformation of an unweighted and undirected graph into an edge-weighted network using an example.
Next, we construct a weighted clique simplicial complex starting from the edge-weighted network as follows (Figure \ref{schemfig}). The $1$-simplices or edges in the clique complex corresponding to the edge-weighted network already have normalized weights based on their Forman-Ricci curvature. Based on the weights of $1$-simplices or edges, we assign weights to $0$-simplices or vertices such that the weight of a vertex in the clique complex is equal to the minimum of the weights of edges incident on the vertex (Equation \ref{simplexweight}). In other words, the weight of the $0$-simplex is the minimum of the weights of its $1$-dimensional co-faces in the clique complex. Similarly, we assign weights to $2$-simplices such that the weight of a $2$-simplex in the clique complex is equal to the maximum of the weights of its $1$-dimensional faces or edges (Equation \ref{simplexweight}). In the same manner, we can assign weights to higher-dimensional simplices (see Theory section). For example, the weight of a $3$-simplex in the clique complex is equal to the maximum of the weights of its $2$-dimensional faces. In schematic Figure \ref{schemfig}, we show this construction of a weighted clique simplicial complex starting from an edge-weighted network using an example.
To construct a weighted clique simplicial complex corresponding to an unweighted and undirected network, our scheme hinges on assignment of weights to $0$-simplices (vertices) based on weights of their $1$-dimensional co-faces (i.e., edges attached to vertices). In many real-world networks, there are isolated vertices which are not attached to any edges in the graph. In our scheme, isolated vertices ($0$-simplices) are assigned weights equal to the maximum of the weights given to any simplex in the clique complex. In the example shown in schematic Figure \ref{schemfig}, we assign weight to the isolated vertex $v_9$ in the weighted clique complex as described above.
After constructing the weighted clique complex $K$ corresponding to a given unweighted and undirected graph $G$, we investigate the persistent homology of the simplicial complex via the associated filtration described in Equation \ref{filtration} in the Theory section. In order to construct this filtration of the weighted clique complex $K$, the assigned weights $w(\alpha_i)$ to simplices $\alpha_i$ in $K$ are arranged in an increasing order, say $\lambda_1 \leq \lambda_2 \leq \cdots \leq \lambda_n$, and thereafter, the sequence of subcomplexes, $K(\lambda_1) \subseteq K(\lambda_2) \subseteq \cdots \subseteq K(\lambda_n)$ is used to compute the persistent homology groups of $K$ as described in the Theory section. In schematic Figure \ref{schemfig}, we show this filtration of the weighted clique complex corresponding to an unweighted and undirected network using an example.
In previous work \cite{Sreejith2016,Sreejith2017,Samal2018}, it was shown that edges critical for the robustness of a complex network have highly negative Forman-Ricci curvature. From Equation \ref{FormanWeight}, it follows that the assigned weights to edges or $1$-simplices in the weighted clique complex $K$ constructed by our scheme is likely to be inversely proportional to their importance from robustness perspective. Noteworthy, critical edges for the integrity of the network are likely to be added in the initial stages of the filtration of the weighted clique complex $K$, and thus, our method for studying the persistent homology revolves around the central idea that the more important features of the network are included in the initial stages of filtration.
We emphasize that the proposed method summarized in Figure \ref{schemfig} to study persistent homology in unweighted networks basically relies on transforming an unweighted network into an edge-weighted graph which is then used to construct a weighted clique complex. In principle, an edge-weighted graph can be obtained from an unweighted graph by assigning weights to edges based on any edge-centric measure. In our method summarized in Figure \ref{schemfig}, we have chosen the edge-centric measure, Forman-Ricci curvature, for this transformation. Another possible and attractive choice of an edge-centric measure for this transformation is the edge betweenness centrality \cite{Freeman1977,Girvan2002,Newman2010}.
In this work, we have also explored this alternate choice of edge betweenness centrality to construct edge-weighted networks and study the persistent homology of unweighted networks. The edge betweenness centrality of an edge in an unweighted network can be computed using Equation \ref{EBC} as described in the Theory section. Thereafter, we can assign weights to edges in the network by normalization of the associated edge betweenness centralities using the following formula: \begin{equation} \label{EBCWeight} w(e) = \frac { ( \rm{EBC}_{\text{max}} + \epsilon) - \rm{EBC}(e)} { ( \rm{EBC}_{\text{max}} + \epsilon ) - ( \rm{EBC}_{\text{min}} - \epsilon ) } \end{equation} where $w(e)$ is the weight of edge $e$, $\rm{EBC}(e)$ is the edge betweenness centrality of edge $e$, $\rm{EBC}_{\text{min}}$ and $\rm{EBC}_{\text{max}}$ are the minimum value and maximum value, respectively, of the edge betweenness centrality across all edges in the network, and $\epsilon$ is a positive number which is taken here to be 1. Since the edges with high edge betweenness centrality are highly critical for information flow in the network, the above equation assigns lower weights to such critical edges to ensure their addition during initial stages of the filtration.
In the main text of this paper, we report results from the investigation of persistent homology in unweighted networks using our method based on Forman-Ricci curvature. In the supplementary information (SI) of this paper, we report results from the investigation of persistent homology in unweighted networks using our method based on alternate choice of edge betweenness centrality. In the sequel, we will show that the qualitative and quantitative results obtained in unweighted model and real networks using our method based on Forman-Ricci curvature is very similar to our method based on edge betweenness centrality. However, the calculation of edge betweenness centrality requires computing all shortest paths between every distinct pair of vertices in the network, and thus, it is much more computationally expensive than Forman-Ricci curvature. Therefore, our method based on Forman-Ricci curvature is a better choice from a computational perspective to study persistent homology in unweighted and undirected networks.
\subsection{Implementation in model and real networks}
In this work, we have investigated five model networks and seven real-world networks using our methods based on Forman-Ricci curvature and edge betweenness centrality described in the preceding section to study persistent homology in unweighted and undirected networks.
For a given unweighted and undirected network $G$, either model or real-world, we first construct the corresponding edge-weighted network based on Forman-Ricci curvature (Equation \ref{FormanWeight}) or edge betweenness centrality (Equation \ref{EBCWeight}); thereafter, we construct a weighted clique simplicial complex $K$ and then study the corresponding filtration based on the edge weights as described in the preceding section. We remark that our investigation of the persistent homology in model and real networks is limited to the $3$-dimensional clique simplicial complex $K$ corresponding to $G$. That is, we only include $p$-simplices which have $0 \leq p \leq 3$ while constructing the weighted clique simplicial complex $K$ starting from an unweighted graph $G$. For these computations of persistent homology in model and real networks, we use GUDHI \cite{Maria2014} which is a C++ based library for Topological Data Analysis (http://gudhi.gforge.inria.fr/).
In the following, we present our results from application of our method based on Forman-Ricci curvature to study persistent homology in model and real networks (Figure \ref{schemfig}). We have studied here five different model networks, namely, ER, WS, BA, spherical random graphs and hyperbolic random graphs (see Datasets section). The $0$-holes or $H_0$ barcode diagram gives the number of connected components in the network at every stage of the filtration. We find that the ER and WS networks possess a large number of components throughout the filtration in comparison to BA networks where there is typically a single component which persists across the entire filtration (Figure \ref{fig2}, SI Figures S1 and S2). This suggests that the simplices which are critical for the overall connectivity of the network are introduced during the initial stages of the filtration of BA networks in contrast to ER and WS networks. The $H_1$ barcode diagram indicates the presence of $1$-holes in the network. We find that the $1$-holes appear earlier during filtration in ER and WS networks in comparison to BA networks where $1$-holes appear towards the end of filtration (Figure \ref{fig2}, SI Figures S1 and S2). Thus, the $H_0$ and $H_1$ barcode diagrams are able to qualitatively distinguish scale-free BA networks from random ER networks and small-world WS networks (Figure \ref{fig2}, SI Figures S1 and S2). Lastly, $H_2$ and $H_3$ barcode diagrams do not provide any insight into the structure of ER, WS and BA networks due to the lack of $2$-holes and $3$-holes.
The ER, WS and BA networks can be distinguished from both spherical and hyperbolic random graphs based on significantly larger number of $0$-holes or connected components in the later (Figure \ref{fig2}, SI Figures S1 and S2). Though the number of components in the spherical and hyperbolic random graphs are similar, the patterns of filtration sequence is a distinguishing factor. The $0$-holes in the spherical random graph are more distributed across the filtration sequence, while there are very few $0$-holes in the hyperbolic random graph for most part of the filtration with the introduction of a large number of $0$-holes just before the end of the filtration. This can be understood by the presence of many isolated vertices in the hyperbolic random graphs which are assigned maximum weight in the corresponding weighted clique complex, and thus, appear at the end of filtration (Figure \ref{fig2}, SI Figures S1 and S2). Moreover, $1$-holes and $2$-holes appear during intermediate stages of filtration in both spherical and hyperbolic random graphs, however, these holes do not persist till the end of filtration (Figures \ref{fig2}-\ref{fig3}, SI Figures S1-S3). Noteworthy, there are many more $3$-holes in hyperbolic random graphs in comparison to spherical graphs (Figure \ref{fig4}, SI Figure S3). Overall, these features enable qualitative distinction between hyperbolic and spherical model graphs with very different underlying geometry.
The barcode diagrams for the five model networks obtained from our method based on Forman-Ricci curvature can be used to make a qualitative distinction between different types of networks (Figure \ref{fig2}-\ref{fig4}, SI Figures S1-S3). For a quantitative distinction between the topological features of the five model networks, we use the bottleneck distance between total persistence diagrams obtained from our method based on Forman-Ricci curvature as described in the Theory section. From Figure \ref{fig5}, it is seen that the bottleneck distance between BA and ER or WS networks of similar average degree is higher than the distance between ER and WS networks. Furthermore, the bottleneck distance is high between spherical and hyperbolic random graphs of similar average degree. Overall, the bottleneck distances between the total persistence diagrams for the five model networks provide a quantitative validation of the applicability of our method based on Forman-Ricci curvature to reveal distinct topological features in unweighted and undirected networks.
We have also studied here seven well-known real-world networks (see Datasets section). From the $H_0$ barcode of the US Power Grid network, it is clear that there is one connected component which persists across the entire filtration despite many transient components appearing and disappearing during intermediate stages of filtration (Figure \ref{fig6}). The $H_0$ barcode diagrams of the E-mail and Route views networks are similar in the sense that the number of connected components across the entire filtration is low (Figure \ref{fig6}). The $H_0$ barcode diagrams of the two biological networks, namely Human protein interaction and Yeast protein interaction show a sudden increase in the number of connected components at the final stage of filtration (Figure \ref{fig6}, SI Figure S4). The $H_0$ barcode diagram of the Euro road network displays a distributed pattern with components spanning across varied intervals of filtration (SI Figure S4). The $H_1$ barcode diagrams of the seven real networks are similar in the respect that there is typically an increase in the number of $1$-holes around the middle to later stages of filtration (Figure \ref{fig6}, SI Figure S4). We also display the $H_2$ and $H_3$ barcode diagrams for real networks in Figures \ref{fig3} and \ref{fig4}, respectively. Note that the Euro road network is devoid of $2$-holes and $3$-holes, while the Yeast protein interaction network and US Power Grid network are devoid or $2$-holes (Figures \ref{fig3} and \ref{fig4}). In sum, the barcode diagrams obtained using our method based on Forman-Ricci curvature can be used to reveal differences between model and real networks.
In the above paragraphs, we reported our results from application of our method based on Forman-Ricci curvature to study persistent homology in unweighted networks, both model and real-world. As described in the preceding section, we can also apply an alternate method based on edge betweenness centrality to study persistent homology in unweighted networks, both model and real-world. In SI Figures S5-S9, we present the $H_0$, $H_1$, $H_2$ and $H_3$ barcode diagrams obtained using our alternate method based on edge betweenness centrality in five model and seven real-world networks. Based on SI Figures S5-S9, it is evident that we are also able to make qualitative distinction between varied model networks using the barcode diagrams obtained from our method based on edge betweenness centrality, and these results are similar to results described above from our method based on Forman-Ricci curvature. Moreover, in SI Figure S10, we display the bottleneck distances between persistence diagrams corresponding to the five model networks obtained using the alternate method based on edge betweenness centrality, and these results are also similar to those shown in Figure \ref{fig5} which are obtained using our method based on Forman-Ricci curvature.
\subsection{Comparison with method based on discrete Morse theory}
Classical Morse theory on smooth manifolds has been a rich theory to detect the topology of the underlying space \cite{Morse1934}. However, it requires a smooth structure to probe the topology via real-valued smooth functions. Robin Forman introduced \textit{discrete Morse theory}, the discrete counterpart of classical Morse theory \cite{Forman1998,Forman2002}, which is applicable to a large class of topological objects called $CW$-complexes, even those which lack smoothness. Similar to classical Morse theory, this discretized version also captures the topology of the underlying object. A fundamental notion in discrete Morse theory is that of \textit{critical cells}, which are the discrete analogues of equilibrium points of a Morse function, i.e. points on which its gradient vanishes. The number of such critical cells is intricately related to the Betti numbers and the Euler characteristic of the topological space via the Morse inequalities \cite{Forman1998,Forman2002}.
In our previous work \cite{Kannan2019}, we have used discrete Morse theory of Robin Forman to set weights on the clique complex of an unweighted graph. We also gave an algorithm to produce a near-optimal discrete Morse function, in the sense that the number of so-called critical simplices of the function is close to the theoretical minimum given by Betti numbers. The advantages of this approach lies in the ability for \textit{preprocessing} of the simplicial complex, which leads to significant simplification that in turn, leads to computational efficiency for homology calculations \cite{Mischaikow2013,Harker2014}. In principle, one can use this method independent of TDA, for example to study combinatorial topology aspects of such complexes \cite{Shareshian2001}. As an application to TDA, we \cite{Kannan2019} used this method to compute the persistent homology of the model and real-world networks which are also analyzed in this contribution, and showed that it was able to distinguish between various networks with different topological features.
Our present method focusses on the computation of persistent homology only, setting aside the computational advantages of using a near-optimal discrete Morse function. We have shown that even though we lose the simplified topological structure provided by discrete Morse theory, we can still apply other weighting schemes using local and global measures like Forman-Ricci curvature and edge betweenness centrality, to compute persistent homology of various networks. Indeed, the new method also distinguishes the various model networks as well as the previous method using discrete Morse theory. Therefore, using these new weighting schemes can be seen as a tradeoff between computational efficiency and applicability of TDA to unweighted graphs.
\section{Conclusions}
This work is meant to provide techniques to apply TDA, for the investigation of topological properties of unweighted networks. We employ two methods to convert an unweighted network into a weighted network. The first one is a discretized version of Ricci curvature which takes into account only local information around an edge in the network, while the second one is a global quantifier which measures the importance of an edge based on the number of shortest paths between any two distinct vertices of the network passing through that edge. Once we have a weighted graph, standard techniques allow us to obtain a filtration of the associated clique complexes, and thereby compute their persistent homology (Figure \ref{schemfig}). We have applied this method to study five different kinds of model networks. We also show that this method can distinguish different model networks via the averaged bottleneck distance between the corresponding persistence diagrams (Figure \ref{fig5}). We also apply the same techniques to study some well-known real-world networks to obtain insights into their underlying topology. In future work, we plan to apply these techniques to networks arising out of physical systems, with the goal of trying to find correlations between dynamics of such systems and their underlying topology via the application of TDA.
\subsection*{Funding}
This work was supported by Max Planck Society, Germany, through the award of a Max Planck Partner Group in Mathematical Biology (to A.S.) and Science and Engineering Research Board (SERB), Department of Science and Technology (DST) India through the award of a MATRICS grant [MTR/2017/000835] (to I.R.).
\subsection*{Declaration of Competing Interest}
None.
\subsection*{Acknowledgments}
S.V. and S.J.R. thank the Institute of Mathematical Sciences (IMSc), Chennai, India for their local hospitality and R. Nadarajan for encouragement.
\subsection*{Author contributions}
I.R. and A.S. designed the study and developed the method. S.V. and S.J.R. performed the simulations. I.R. and A.S. analyzed results. I.R., S.V., S.J.R. and A.S. wrote the manuscript. All authors reviewed and approved the manuscript.
\begin{thebibliography}{56} \makeatletter \providecommand \@ifxundefined [1]{
\@ifx{#1\undefined} } \providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Zomorodian}\ and\ \citenamefont
{Carlsson}(2005)}]{Zomorodian2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Zomorodian}}\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Carlsson}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Discrete {\&} Computational Geometry}\ }\textbf {\bibinfo {volume} {33}},\
\bibinfo {pages} {249} (\bibinfo {year} {2005})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Edelsbrunner}\ and\ \citenamefont
{Harer}(2008)}]{Edelsbrunner2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Edelsbrunner}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Harer}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Contemporary Mathematics}\ }\textbf {\bibinfo {volume} {453}},\ \bibinfo
{pages} {257} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Carlsson}(2009)}]{Carlsson2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Carlsson}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Bulletin of the American Mathematical Society}\ }\textbf {\bibinfo {volume}
{46}},\ \bibinfo {pages} {255} (\bibinfo {year} {2009})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Pranav}\ \emph {et~al.}(2016)\citenamefont {Pranav},
\citenamefont {Edelsbrunner}, \citenamefont {Van~de Weygaert}, \citenamefont
{Vegter}, \citenamefont {Kerber}, \citenamefont {Jones},\ and\ \citenamefont
{Wintraecken}}]{Pranav2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Pranav}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Edelsbrunner}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Van~de
Weygaert}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Vegter}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Kerber}}, \bibinfo
{author} {\bibfnamefont {B.}~\bibnamefont {Jones}}, \ and\ \bibinfo {author}
{\bibfnamefont {M.}~\bibnamefont {Wintraecken}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Monthly Notices of the Royal Astronomical
Society}\ }\textbf {\bibinfo {volume} {465}},\ \bibinfo {pages} {4281}
(\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {G{\"u}nther}\ \emph {et~al.}(2011)\citenamefont
{G{\"u}nther}, \citenamefont {Reininghaus}, \citenamefont {Hotz},\ and\
\citenamefont {Wagner}}]{Gunther2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{G{\"u}nther}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Reininghaus}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Hotz}}, \
and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Wagner}},\ }in\
\href@noop {} {\emph {\bibinfo {booktitle} {2011 24th SIBGRAPI Conference on
Graphics, Patterns and Images}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo
{year} {2011})\ pp.\ \bibinfo {pages} {25--32}\BibitemShut {NoStop} \bibitem [{\citenamefont {Nicolau}\ \emph {et~al.}(2011)\citenamefont
{Nicolau}, \citenamefont {Levine},\ and\ \citenamefont
{Carlsson}}]{Nicolau2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Nicolau}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Levine}}, \
and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Carlsson}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Proceedings of the
National Academy of Sciences USA}\ }\textbf {\bibinfo {volume} {108}},\
\bibinfo {pages} {7265} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rizvi}\ \emph {et~al.}(2017)\citenamefont {Rizvi},
\citenamefont {Camara}, \citenamefont {Kandror}, \citenamefont {Roberts},
\citenamefont {Schieren}, \citenamefont {Maniatis},\ and\ \citenamefont
{Rabadan}}]{Rizvi2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~H.}\ \bibnamefont
{Rizvi}}, \bibinfo {author} {\bibfnamefont {P.~G.}\ \bibnamefont {Camara}},
\bibinfo {author} {\bibfnamefont {E.~K.}\ \bibnamefont {Kandror}}, \bibinfo
{author} {\bibfnamefont {T.~J.}\ \bibnamefont {Roberts}}, \bibinfo {author}
{\bibfnamefont {I.}~\bibnamefont {Schieren}}, \bibinfo {author}
{\bibfnamefont {T.}~\bibnamefont {Maniatis}}, \ and\ \bibinfo {author}
{\bibfnamefont {R.}~\bibnamefont {Rabadan}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Nature Biotechnology}\ }\textbf {\bibinfo
{volume} {35}},\ \bibinfo {pages} {551} (\bibinfo {year} {2017})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Munkres}(2018)}]{Munkres2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Munkres}},\ }\href@noop {} {\emph {\bibinfo {title} {Elements of algebraic
topology}}}\ (\bibinfo {publisher} {CRC Press},\ \bibinfo {year}
{2018})\BibitemShut {NoStop} \bibitem [{\citenamefont {Watts}\ and\ \citenamefont
{Strogatz}(1998)}]{Watts1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont
{Watts}}\ and\ \bibinfo {author} {\bibfnamefont {S.~H.}\ \bibnamefont
{Strogatz}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Nature}\ }\textbf {\bibinfo {volume} {393}},\ \bibinfo {pages} {440}
(\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Barab{\'a}si}\ and\ \citenamefont
{Albert}(1999)}]{Barabasi1999}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~L.}\ \bibnamefont
{Barab{\'a}si}}\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Albert}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Science}\ }\textbf {\bibinfo {volume} {286}},\ \bibinfo {pages} {509}
(\bibinfo {year} {1999})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Albert}\ and\ \citenamefont
{Barab{\'a}si}(2002)}]{Albert2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Albert}}\ and\ \bibinfo {author} {\bibfnamefont {A.~L.}\ \bibnamefont
{Barab{\'a}si}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {74}},\ \bibinfo
{pages} {47} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Newman}(2010)}]{Newman2010}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~E.~J.}\
\bibnamefont {Newman}},\ }\href@noop {} {\emph {\bibinfo {title} {Networks:
{A}n {I}ntroduction}}}\ (\bibinfo {publisher} {Oxford University Press},\
\bibinfo {year} {2010})\BibitemShut {NoStop} \bibitem [{\citenamefont {Barab{\'a}si}(2016)}]{Barabasi2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.-L.}\ \bibnamefont
{Barab{\'a}si}},\ }\href@noop {} {\emph {\bibinfo {title} {Network
Science}}}\ (\bibinfo {publisher} {Cambridge University Press},\ \bibinfo
{year} {2016})\BibitemShut {NoStop} \bibitem [{\citenamefont {Bollobas}(1998)}]{Bollobas1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Bollobas}},\ }\href@noop {} {\emph {\bibinfo {title} {Modern Graph
Theory}}}\ (\bibinfo {publisher} {Springer},\ \bibinfo {year}
{1998})\BibitemShut {NoStop} \bibitem [{\citenamefont {De~Silva}\ and\ \citenamefont
{Ghrist}(2007)}]{De2007}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{De~Silva}}\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Ghrist}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Notices of the {A}merican mathematical society}\ }\textbf {\bibinfo {volume}
{54}} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Horak}\ \emph {et~al.}(2009)\citenamefont {Horak},
\citenamefont {Maleti{\'c}},\ and\ \citenamefont {Rajkovi{\'c}}}]{Horak2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Horak}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Maleti{\'c}}},
\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Rajkovi{\'c}}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Journal of
Statistical Mechanics: Theory and Experiment}\ ,\ \bibinfo {pages} {P03034}}
(\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Petri}\ \emph {et~al.}(2013)\citenamefont {Petri},
\citenamefont {Scolamiero}, \citenamefont {Donato},\ and\ \citenamefont
{Vaccarino}}]{Petri2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Petri}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Scolamiero}},
\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Donato}}, \ and\ \bibinfo
{author} {\bibfnamefont {F.}~\bibnamefont {Vaccarino}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {PloS One}\ }\textbf {\bibinfo
{volume} {8}},\ \bibinfo {pages} {e66506} (\bibinfo {year}
{2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Petri}\ \emph {et~al.}(2014)\citenamefont {Petri},
\citenamefont {Expert}, \citenamefont {Turkheimer}, \citenamefont
{Carhart-Harris}, \citenamefont {Nutt}, \citenamefont {Hellyer},\ and\
\citenamefont {Vaccarino}}]{Petri2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Petri}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Expert}},
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Turkheimer}}, \bibinfo
{author} {\bibfnamefont {R.}~\bibnamefont {Carhart-Harris}}, \bibinfo
{author} {\bibfnamefont {D.}~\bibnamefont {Nutt}}, \bibinfo {author}
{\bibfnamefont {P.~J.}\ \bibnamefont {Hellyer}}, \ and\ \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Vaccarino}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Journal of The Royal Society Interface}\
}\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {20140873} (\bibinfo
{year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bianconi}(2015)}]{Bianconi2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Bianconi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Europhysics Letters}\ }\textbf {\bibinfo {volume} {111}},\ \bibinfo {pages}
{56001} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}\ \emph {et~al.}(2015)\citenamefont {Wu},
\citenamefont {Menichetti}, \citenamefont {Rahmede},\ and\ \citenamefont
{Bianconi}}]{Wu2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont
{Wu}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Menichetti}},
\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Rahmede}}, \ and\
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Bianconi}},\ }\href@noop
{} {\bibfield {journal} {\bibinfo {journal} {Scientific Reports}\ }\textbf
{\bibinfo {volume} {5}},\ \bibinfo {pages} {10073} (\bibinfo {year}
{2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sizemore}\ \emph {et~al.}(2016)\citenamefont
{Sizemore}, \citenamefont {Giusti},\ and\ \citenamefont
{Bassett}}]{Sizemore2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Sizemore}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Giusti}}, \
and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Bassett}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Journal of Complex
Networks}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {245}
(\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Courtney}\ and\ \citenamefont
{Bianconi}(2017)}]{Courtney2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {O.~T.}\ \bibnamefont
{Courtney}}\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Bianconi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review E}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages}
{062301} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ritchie}\ \emph {et~al.}(2017)\citenamefont
{Ritchie}, \citenamefont {Berthouze},\ and\ \citenamefont
{Kiss}}]{Ritchie2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ritchie}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Berthouze}},
\ and\ \bibinfo {author} {\bibfnamefont {I.~Z.}\ \bibnamefont {Kiss}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Journal of complex
networks}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {1} (\bibinfo
{year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Courtney}\ and\ \citenamefont
{Bianconi}(2018)}]{Courtney2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {O.~T.}\ \bibnamefont
{Courtney}}\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Bianconi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review E}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages}
{052303} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kartun-Giles}\ and\ \citenamefont
{Bianconi}(2019)}]{Kartun-Giles2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~P.}\ \bibnamefont
{Kartun-Giles}}\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Bianconi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Chaos, Solitons \& Fractals: X}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo
{pages} {100004} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Iacopini}\ \emph {et~al.}(2019)\citenamefont
{Iacopini}, \citenamefont {Petri}, \citenamefont {Barrat},\ and\
\citenamefont {Latora}}]{Iacopini2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont
{Iacopini}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Petri}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Barrat}}, \ and\ \bibinfo
{author} {\bibfnamefont {V.}~\bibnamefont {Latora}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Nature Communications}\ }\textbf
{\bibinfo {volume} {10}},\ \bibinfo {pages} {2485} (\bibinfo {year}
{2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kannan}\ \emph {et~al.}(2019)\citenamefont {Kannan},
\citenamefont {Saucan}, \citenamefont {Roy},\ and\ \citenamefont
{Samal}}]{Kannan2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Kannan}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Saucan}},
\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Roy}}, \ and\ \bibinfo
{author} {\bibfnamefont {A.}~\bibnamefont {Samal}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Scientific Reports}\ }\textbf
{\bibinfo {volume} {9}},\ \bibinfo {pages} {13817} (\bibinfo {year}
{2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Klamt}\ \emph {et~al.}(2009)\citenamefont {Klamt},
\citenamefont {Haus},\ and\ \citenamefont {Theis}}]{Klamt2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Klamt}}, \bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {Haus}}, \ and\
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Theis}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {PLoS Computational Biology}\
}\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {e1000385} (\bibinfo
{year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zlati{\'c}}\ \emph {et~al.}(2009)\citenamefont
{Zlati{\'c}}, \citenamefont {Ghoshal},\ and\ \citenamefont
{Caldarelli}}]{Zlatic2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Zlati{\'c}}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Ghoshal}},
\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Caldarelli}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review
E}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo {pages} {036118} (\bibinfo
{year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lee}\ \emph {et~al.}(2012)\citenamefont {Lee},
\citenamefont {Kang}, \citenamefont {Chung}, \citenamefont {Kim},\ and\
\citenamefont {Lee}}]{Lee2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Lee}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Kang}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Chung}}, \bibinfo {author}
{\bibfnamefont {B.-N.}\ \bibnamefont {Kim}}, \ and\ \bibinfo {author}
{\bibfnamefont {D.}~\bibnamefont {Lee}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {IEEE transactions on medical imaging}\
}\textbf {\bibinfo {volume} {31}},\ \bibinfo {pages} {2267} (\bibinfo {year}
{2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jeong}\ \emph {et~al.}(2001)\citenamefont {Jeong},
\citenamefont {Mason}, \citenamefont {Barab{\'a}si},\ and\ \citenamefont
{Oltvai}}]{Jeong2001}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Jeong}}, \bibinfo {author} {\bibfnamefont {S.~P.}\ \bibnamefont {Mason}},
\bibinfo {author} {\bibfnamefont {A.~L.}\ \bibnamefont {Barab{\'a}si}}, \
and\ \bibinfo {author} {\bibfnamefont {Z.~N.}\ \bibnamefont {Oltvai}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf
{\bibinfo {volume} {411}},\ \bibinfo {pages} {41} (\bibinfo {year}
{2001})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Leskovec}\ \emph {et~al.}(2007)\citenamefont
{Leskovec}, \citenamefont {Kleinberg},\ and\ \citenamefont
{Faloutsos}}]{Leskovec2007}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Leskovec}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Kleinberg}},
\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Faloutsos}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {ACM Transactions
on Knowledge Discovery from Data (TKDD)}\ }\textbf {\bibinfo {volume} {1}},\
\bibinfo {pages} {2} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{\v{S}}ubelj}\ and\ \citenamefont
{Bajec}(2011)}]{Subelj2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{{\v{S}}ubelj}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Bajec}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{European Physical Journal B}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo
{pages} {353} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Forman}(1998)}]{Forman1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Forman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Advances in Mathematics}\ }\textbf {\bibinfo {volume} {134}},\ \bibinfo
{pages} {90} (\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Forman}(2002)}]{Forman2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Forman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{S{\'e}m. Lothar. Combin.}\ }\textbf {\bibinfo {volume} {48}},\ \bibinfo
{pages} {1} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Forman}(2003)}]{Forman2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Forman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Discrete and Computational Geometry}\ }\textbf {\bibinfo {volume} {29}},\
\bibinfo {pages} {323} (\bibinfo {year} {2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sreejith}\ \emph {et~al.}(2016)\citenamefont
{Sreejith}, \citenamefont {Mohanraj}, \citenamefont {Jost}, \citenamefont
{Saucan},\ and\ \citenamefont {Samal}}]{Sreejith2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Sreejith}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Mohanraj}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Jost}}, \bibinfo {author}
{\bibfnamefont {E.}~\bibnamefont {Saucan}}, \ and\ \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Samal}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Journal of Statistical Mechanics: Theory and
Experiment}\ ,\ \bibinfo {pages} {P063206}} (\bibinfo {year}
{2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sreejith}\ \emph {et~al.}(2017)\citenamefont
{Sreejith}, \citenamefont {Jost}, \citenamefont {Saucan},\ and\ \citenamefont
{Samal}}]{Sreejith2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont
{Sreejith}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Jost}},
\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Saucan}}, \ and\ \bibinfo
{author} {\bibfnamefont {A.}~\bibnamefont {Samal}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Chaos, Solitons \& Fractals}\
}\textbf {\bibinfo {volume} {101}},\ \bibinfo {pages} {50} (\bibinfo {year}
{2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Samal}\ \emph {et~al.}(2018)\citenamefont {Samal},
\citenamefont {Sreejith}, \citenamefont {Gu}, \citenamefont {Liu},
\citenamefont {Saucan},\ and\ \citenamefont {Jost}}]{Samal2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Samal}}, \bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont {Sreejith}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Gu}}, \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont
{E.}~\bibnamefont {Saucan}}, \ and\ \bibinfo {author} {\bibfnamefont
{J.}~\bibnamefont {Jost}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Scientific Reports}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo
{pages} {8650} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Freeman}(1977)}]{Freeman1977}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~C.}\ \bibnamefont
{Freeman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Sociometry}\ }\textbf {\bibinfo {volume} {40}},\ \bibinfo {pages} {35}
(\bibinfo {year} {1977})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Girvan}\ and\ \citenamefont
{Newman}(2002)}]{Girvan2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Girvan}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Newman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Proceedings of the National Academy of Sciences USA}\ }\textbf {\bibinfo
{volume} {99}},\ \bibinfo {pages} {7821} (\bibinfo {year}
{2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ghrist}(2008)}]{Ghrist2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Ghrist}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Bulletin of the American Mathematical Society}\ }\textbf {\bibinfo {volume}
{45}},\ \bibinfo {pages} {61} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Di~Fabio}\ and\ \citenamefont
{Ferri}(2015)}]{DiFabio2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Di~Fabio}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ferri}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Image Analysis
and Processing --- ICIAP 2015}}},\ \bibinfo {editor} {edited by\ \bibinfo
{editor} {\bibfnamefont {V.}~\bibnamefont {Murino}}\ and\ \bibinfo {editor}
{\bibfnamefont {E.}~\bibnamefont {Puppo}}}\ (\bibinfo {publisher} {Springer
International Publishing},\ \bibinfo {address} {Cham},\ \bibinfo {year}
{2015})\ pp.\ \bibinfo {pages} {294--305}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cohen-Steiner}\ \emph {et~al.}(2007)\citenamefont
{Cohen-Steiner}, \citenamefont {Edelsbrunner},\ and\ \citenamefont
{Harer}}]{Cohen-Steiner2007}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Cohen-Steiner}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Edelsbrunner}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Harer}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Discrete \& Computational Geometry}\ }\textbf {\bibinfo {volume} {37}},\
\bibinfo {pages} {103} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jost}(2017)}]{Jost2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Jost}},\ }\href@noop {} {\emph {\bibinfo {title} {Riemannian Geometry and
Geometric Analysis}}},\ \bibinfo {edition} {7th}\ ed.\ (\bibinfo {publisher}
{Springer International Publishing},\ \bibinfo {year} {2017})\BibitemShut
{NoStop} \bibitem [{\citenamefont {Saucan}\ \emph {et~al.}(2019)\citenamefont {Saucan},
\citenamefont {Sreejith}, \citenamefont {Vivek-Ananth}, \citenamefont
{Jost},\ and\ \citenamefont {Samal}}]{Saucan2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Saucan}}, \bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont
{Sreejith}}, \bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont
{Vivek-Ananth}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Jost}},
\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Samal}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Chaos, Solitons \&
Fractals}\ }\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages} {347}
(\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Erd\"{o}s}\ and\ \citenamefont
{R{\'e}nyi}(1961)}]{Erdos1961}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Erd\"{o}s}}\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{R{\'e}nyi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Bull. Inst. Internat. Statist}\ }\textbf {\bibinfo {volume} {38}},\ \bibinfo
{pages} {343} (\bibinfo {year} {1961})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Krioukov}\ \emph {et~al.}(2010)\citenamefont
{Krioukov}, \citenamefont {Papadopoulos}, \citenamefont {Kitsak},
\citenamefont {Vahdat},\ and\ \citenamefont {Bogun{\'a}}}]{Krioukov2010}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Krioukov}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Papadopoulos}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Kitsak}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Vahdat}}, \
and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Bogun{\'a}}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review
E}\ }\textbf {\bibinfo {volume} {82}},\ \bibinfo {pages} {036106} (\bibinfo
{year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rual}\ \emph {et~al.}(2005)\citenamefont {Rual},
\citenamefont {Venkatesan}, \citenamefont {Hao}, \citenamefont
{Hirozane-Kishikawa}, \citenamefont {Dricot}, \citenamefont {Li},
\citenamefont {Berriz}, \citenamefont {Gibbons}, \citenamefont {Dreze},
\citenamefont {Ayivi-Guedehoussou},\ and\ \citenamefont {{\it{et
al}}}}]{Rual2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~F.}\ \bibnamefont
{Rual}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Venkatesan}},
\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Hao}}, \bibinfo {author}
{\bibfnamefont {T.}~\bibnamefont {Hirozane-Kishikawa}}, \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Dricot}}, \bibinfo {author} {\bibfnamefont
{N.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {G.~F.}\
\bibnamefont {Berriz}}, \bibinfo {author} {\bibfnamefont {F.~D.}\
\bibnamefont {Gibbons}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Dreze}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Ayivi-Guedehoussou}}, \ and\ \bibinfo {author} {\bibnamefont {{\it{et
al}}}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\
}\textbf {\bibinfo {volume} {437}},\ \bibinfo {pages} {1173} (\bibinfo {year}
{2005})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Guimera}\ \emph {et~al.}(2003)\citenamefont
{Guimera}, \citenamefont {Danon}, \citenamefont {Diaz-Guilera}, \citenamefont
{Giralt},\ and\ \citenamefont {Arenas}}]{Guimera2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Guimera}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Danon}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Diaz-Guilera}}, \bibinfo
{author} {\bibfnamefont {F.}~\bibnamefont {Giralt}}, \ and\ \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Arenas}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Physical Review E}\ }\textbf {\bibinfo
{volume} {68}},\ \bibinfo {pages} {065103} (\bibinfo {year}
{2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kunegis}(2013)}]{Kunegis2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Kunegis}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of
the 22nd International Conference on World Wide Web companion}}}\ (\bibinfo
{publisher} {ACM},\ \bibinfo {address} {New York, NY, USA},\ \bibinfo {year}
{2013})\ pp.\ \bibinfo {pages} {1343--1350}\BibitemShut {NoStop} \bibitem [{\citenamefont {Maria}\ \emph {et~al.}(2014)\citenamefont {Maria},
\citenamefont {Boissonnat}, \citenamefont {Glisse},\ and\ \citenamefont
{Yvinec}}]{Maria2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont
{Maria}}, \bibinfo {author} {\bibfnamefont {J.-D.}\ \bibnamefont
{Boissonnat}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Glisse}},
\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Yvinec}},\ }in\
\href@noop {} {\emph {\bibinfo {booktitle} {International Congress on
Mathematical Software}}}\ (\bibinfo {organization} {Springer},\ \bibinfo
{year} {2014})\ pp.\ \bibinfo {pages} {167--174}\BibitemShut {NoStop} \bibitem [{\citenamefont {Morse}(1934)}]{Morse1934}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Morse}},\ }\href@noop {} {\emph {\bibinfo {title} {The calculus of
variations in the large}}},\ Vol.~\bibinfo {volume} {18}\ (\bibinfo
{publisher} {American Mathematical Society},\ \bibinfo {year}
{1934})\BibitemShut {NoStop} \bibitem [{\citenamefont {Mischaikow}\ and\ \citenamefont
{Nanda}(2013)}]{Mischaikow2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Mischaikow}}\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Nanda}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Discrete \& Computational Geometry}\ }\textbf {\bibinfo {volume} {50}},\
\bibinfo {pages} {330} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Harker}\ \emph {et~al.}(2014)\citenamefont {Harker},
\citenamefont {Mischaikow}, \citenamefont {Mrozek},\ and\ \citenamefont
{Nanda}}]{Harker2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Harker}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Mischaikow}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mrozek}}, \ and\ \bibinfo
{author} {\bibfnamefont {V.}~\bibnamefont {Nanda}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Foundations of Computational
Mathematics}\ }\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {151}
(\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Shareshian}(2001)}]{Shareshian2001}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Shareshian}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Topology}\ }\textbf {\bibinfo {volume} {40}},\ \bibinfo {pages} {681}
(\bibinfo {year} {2001})}\BibitemShut {NoStop} \end{thebibliography}
\begin{center} \section*{\large \bf SUPPLEMENTARY INFORMATION (SI)} \end{center} \renewcommand{S.\arabic{equation}}{S.\arabic{equation}} \renewcommand{S\arabic{figure}}{S\arabic{figure}} \setcounter{equation}{0} \setcounter{figure}{0}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model networks with average degree 6.}
\end{figure}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model networks with average degree 8.}
\end{figure}
\begin{figure}
\caption{$H_2$ and $H_3$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in model networks with average degree 6 and 8.}
\end{figure}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on Forman-Ricci curvature in real networks. (a) Euro road. (b) Human protein interaction. }
\end{figure}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on edge betweenness centrality in model networks with average degree 4.}
\end{figure}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on edge betweenness centrality in real networks. (a) US Power Grid. (b) Email communication. (c) Route views. (d) Yeast protein interaction. (e) Hamsterster friendship.}
\end{figure}
\begin{figure}
\caption{$H_0$ and $H_1$ barcode diagrams obtained using our new method based on edge betweenness centrality in real networks. (a) Euro road. (b) Human protein interaction. }
\end{figure}
\begin{figure*}
\caption{$H_2$ barcode diagrams obtained using our new method based on edge betweenness centrality in model and real networks. (a) Spherical random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=\infty$. (b) Hyperbolic random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=2$. (c) Email communication. (d) Route views. (e) Hamsterster friendship. (f) Human protein interaction.}
\end{figure*}
\begin{figure*}
\caption{$H_3$ barcode diagrams obtained using our new method based on edge betweenness centrality in model and real networks. (a) Spherical random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=\infty$. (b) Hyperbolic random graph produced from HGG model with $n=1000$, $T=0$, $k=4$ and $\gamma=2$. (c) US Power Grid. (d) Email communication. (e) Route views. (f) Yeast protein interaction. (g) Hamsterster friendship. (h) Human protein interaction.}
\end{figure*}
\begin{figure*}
\caption{Bottleneck distance between persistence diagrams obtained using our new method based on edge betweenness centrality in model networks with average degree 4. For each of the five model networks, 10 random samples are generated by fixing the number of vertices $n$ and other parameters of the model. We report the distance (rounded to two decimal places) between two different models as the average of the distance between each of the possible pairs of the 10 sample networks corresponding to the two models along with the standard error.}
\end{figure*}
\end{document}
|
arXiv
|
{
"id": "1912.11337.tex",
"language_detection_score": 0.7548841834068298,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Nondivergence form degenerate parabolic equations]{Nondivergence form degenerate linear parabolic equations on the upper half space}
\author[H. Dong]{Hongjie Dong} \address[H. Dong]{Division of Applied Mathematics, Brown University, 182 George Street, Providence RI 02912, USA} \email{hongjie\[email protected]}
\author[T. Phan]{Tuoc Phan} \address[T. Phan]{Department of Mathematics, University of Tennessee, 227 Ayres Hall, 1403 Circle Drive, Knoxville, TN 37996-1320, USA} \email{[email protected]}
\author[H. V. Tran]{Hung Vinh Tran} \address[H. V. Tran]{Department of Mathematics, University of Wisconsin-Madison, Van Vleck Hall 480 Lincoln Drive Madison, WI 53706, USA} \email{[email protected]}
\thanks{ H. Dong is partially supported by the NSF under agreement DMS-2055244 and the Simons Fellows Award 007638. H. Tran is supported in part by NSF CAREER grant DMS-1843320 and a Vilas Faculty Early-Career Investigator Award. } \subjclass[2020]{35K65, 35K67, 35K20, 35D30} \keywords{Degenerate linear parabolic equations; degenerate viscous Hamilton-Jacobi equations; nondivergence form; boundary regularity estimates; existence and uniqueness; weighted Sobolev spaces}
\begin{abstract} We study a class of nondivergence form second-order degenerate linear parabolic equations in $(-\infty, T) \times \mathbb{R}^d_+$ with the homogeneous Dirichlet boundary condition on $(-\infty, T) \times \partial \mathbb{R}^d_+$, where $\mathbb{R}^d_+ = \{x =(x_1,x_2,\ldots, x_d) \in \mathbb{R}^d\,:\, x_d>0\}$ and $T\in {(-\infty, \infty]}$ is given. The coefficient matrices of the equations are the product of $\mu(x_d)$ and bounded positive definite matrices, where $\mu(x_d)$ behaves like $x_d^\alpha$ for some given $\alpha \in (0,2)$, which are degenerate on the boundary $\{x_d=0\}$ of the domain. The divergence form equations in this setting were studied in \cite{DPT21}. Under a partially weighted VMO assumption on the coefficients, we obtain the wellposedness and regularity of solutions in weighted Sobolev spaces. Our research program is motivated by the regularity theory of solutions to degenerate viscous Hamilton-Jacobi equations.
\end{abstract}
\dedicatory{Dedicated to Professor Mikhail Safonov on the occasion of his $70^{\text{th}}$ birthday} \maketitle
\section{Introduction and main results}
\subsection{Settings} Let $T\in (-\infty,\infty]$ and $\Omega_T=(-\infty,T)\times \mathbb{R}^d_+$. We study the following degenerate parabolic equation in nondivergence form \begin{equation}\label{eq:main} \begin{cases} \sL u=\mu(x_d) f \quad &\text{ in } \Omega_T,\\ u=0 \quad &\text{ on } (-\infty, T) \times \partial \mathbb{R}^d_+, \end{cases} \end{equation} where $u: \Omega_T \rightarrow \mathbb{R}$ is an unknown solution, $f: \Omega_T \rightarrow \mathbb{R}$ is a given measurable forcing term, and \begin{equation} \label{L-def} \sL u = a_0(z) u_t+\lambda c_0(z)u-\mu(x_d) a_{ij}(z)D_i D_j u. \end{equation} Here in \eqref{L-def}, $\lambda\ge 0$ is a constant, $z=(t,x) \in \Omega_T$ with $x = (x', x_d) \in \mathbb{R}^{d-1} \times \mathbb{R}_+$, $D_i$ denotes the partial derivative with respect to $x_i$, and $a_0, c_0: \Omega_T \rightarrow \mathbb{R}$ and $\mu: \mathbb{R}_+ \rightarrow \mathbb{R}$ are measurable and satisfy \begin{equation} \label{con:mu}
a_0(z), \ c_0(z), \ \frac{\mu(x_d)}{x_d^\alpha} \in[\nu,\nu^{-1}], \quad \forall \ x_d \in \mathbb{R}_+, \quad \forall \ z \in \Omega_T, \end{equation} for some given $\alpha\in (0,2)$ and $\nu \in (0,1)$. Moreover, $(a_{ij}): \Omega_T \rightarrow \mathbb{R}^{d\times d}$ are measurable and satisfy the uniform ellipticity and boundedness conditions \begin{equation} \label{con:ellipticity}
\nu|\xi|^2 \leq a_{ij}(z) \xi_i \xi_j, \quad |a_{ij}(z)| \leq \nu^{-1}, \quad \forall \ z \in \Omega_T, \end{equation} for all $\xi = (\xi_1, \xi_2, \ldots, \xi_d) \in \mathbb{R}^d$.
We observe that due to \eqref{con:mu} and \eqref{con:ellipticity}, the diffusion coefficients in the PDE in \eqref{eq:main} are degenerate when $x_d \rightarrow 0^+$, and singular when $x_d \rightarrow \infty$. We also note that the PDE in \eqref{eq:main} can be written in the form \[ [a_0(z) u_t + \lambda c_0(z) u]/\mu(x_d) - a_{ij}(z) D_iD_j u = f \quad \text{in} \quad \Omega_T, \] in which the singularity and degeneracy appear in the coefficients of the terms involving $u_t$ and $u$. In the special case when $a_0 = c_0 =1, \mu(x_d) = x_d^\alpha$, and $(a_{ij})$ is an identity matrix, the equation \eqref{eq:main} is reduced to \begin{equation} \label{simplest-eqn} \left\{ \begin{array}{cccl} u_t + \lambda u - x_d^\alpha \Delta u & = & x_d^\alpha f & \quad \text{in} \quad \Omega_T, \\
u & =& 0 & \quad \text{on} \quad (-\infty, T) \times \partial \mathbb{R}^d_+, \end{array} \right. \end{equation} in which the results obtained in this paper are still new.
The theme of this paper is to study the existence, uniqueness, and regularity estimates for solutions to \eqref{eq:main}. To demonstrate our results, let us state the following theorem which gives prototypical estimates of our results in a special weighted Lebesgue space $L_p(\Omega, x_d^\gamma\, dz)$ with the power weight $x_d^\gamma$ and norm \[
\|f\|_{L_p(\Omega, x_d^\gamma dz)} = \left( \int_{\Omega_T} |f(t,x)|^p x_d^\gamma\, dx dt \right)^{1/p}. \] {For any measurable function $f$ and $s \in \mathbb{R}$, we define the multiplicative operator $(\mathbf{M}^s f)(\cdot)=x_d^s f(\cdot)$.} \begin{theorem} \label{thm:demo} Let $\alpha \in (0,2), \lambda >0$, $p\in (1,\infty)$, and $\gamma \in \big(p(\alpha-1)_+-1,2p-1\big)$. Then, for every $f \in L_p(\Omega, x_d^\gamma\, dz)$, there exists a unique strong solution $u$ to \eqref{simplest-eqn}, which satisfies \begin{align} \label{show-est-1}
\|\mathbf{M}^{-\alpha}u_t\|_{L_p}+\lambda\|\mathbf{M}^{-\alpha}u\|_{L_p}
+\|D^2 u\|_{L_p}\le N\|f\|_{L_p}; \end{align} and for $\gamma\in (\alpha p/2-1,2p-1)$, \begin{equation}\label{show-est-2}
\lambda^{1/2}\|\mathbf{M}^{-\alpha/2}Du\|_{L_p}
\le N\|f\|_{L_p}, \end{equation}
where $\|\cdot\|_{L_p} = \|\cdot \|_{L_p(\Omega, x_d^\gamma dz)}$
and $N=N(d,\nu,\alpha, \gamma, p)>0$. \end{theorem} \noindent See Corollary \ref{cor1} and Theorem \ref{thm:xd} for more general results. We note that the ranges of $\gamma$ in \eqref{show-est-1}--\eqref{show-est-2} are optimal as pointed out in Remarks \ref{remark-1-range}--\ref{remark-2-range} below. In fact, in this paper, a much more general result in weighted mixed-norm spaces is established in Theorem \ref{main-thrm}. As an application, we obtain a regularity result for solutions to degenerate viscous Hamilton-Jacobi equations in Theorem \ref{example-thrm}. To the best of our knowledge, our main results (Theorems \ref{thm:demo}, \ref{main-thrm}, \ref{thm:xd}, Corollary \ref{cor1}, and Theorem \ref{example-thrm}) appear for the first time in the literature.
\subsection{Relevant literature} The literature on regularity theory for solutions to degenerate elliptic and parabolic equations is extremely rich, and we only describe results related to \eqref{eq:main}.
The divergence form of \eqref{eq:main} was studied by us in \cite{DPT21} with motivation from the regularity theory of solutions to degenerate viscous Hamilton-Jacobi equations of the form \begin{equation}\label{eq:HJ-intro} u_t+\lambda u-\mu(x_d) \Delta u=H(z,Du) \qquad \text{ in } \Omega_T. \end{equation} Here, $H:\Omega_T \times \mathbb{R}^d \to \mathbb{R}$ is a given Hamiltonian. Under some appropriate conditions on $H$, we obtain a regularity {and solvability} result for \eqref{eq:HJ-intro} in Theorem \ref{example-thrm}. Another class of divergence form equations, which is closely related to that in \cite{DPT21}, was analyzed recently in \cite{JinXiong2} when $\alpha<1$. When $\alpha=2$ and $ d=1$, a specific version of \eqref{eq:HJ-intro} gives the well-known Black-Scholes-Merton PDE that appears in mathematical finance. The analysis for \eqref{eq:main} when $\alpha\geq 2$ is completely open.
A similar equation to \eqref{eq:main}, \eqref{simplest-eqn}, and \eqref{eq:HJ-intro} \[
u_t+\lambda u-\beta D_du - x_d \Delta u = f \qquad \text{ in } \Omega_T \] with an additional structural condition $\beta>0$, an important prototype equation in the study of porous media equations and parabolic Heston equation, was studied extensively in the literature (see \cite{DaHa, FePo, Koch, JinXiong1, JinXiong2} and the references therein). We stress that we do not require this structural condition in the analysis of \eqref{eq:main} and \eqref{eq:HJ-intro}, and thus, our analysis is rather different from those in \cite{DaHa, FePo, Koch}.
We note that similar results on the wellposedness and regularity estimates in weighted Sobolev spaces for a different class of equations with singular-degenerate coefficients were established in a series of papers \cite{DP-20, DP-21, DP-JFA, DP-AMS}. There, the weights of singular/degenerate coefficients of $u_t$ and $D^2u$ appear in a balanced way, which plays a crucial role in the analysis and functional space settings. If this balance is lost, then Harnack's inequalities were proved in \cite{Chi-Se-1, Chi-Se-2} to be false in certain cases. However, with an explicit weight $x_d^\alpha$ as in our setting, it is not known if some version of Harnack's inequalities and H\"{o}lder estimates of the Krylov-Safonov type as in \cite{K-S} still hold for in \eqref{eq:main}. Of course, \eqref{eq:main} does not have this balance structure, and our analysis is quite different from those in \cite{DP-20, DP-21, DP-JFA, DP-AMS}.
Finally, we emphasize again that the literature on equations with singular-degenerate coefficients is vast. Below, let us give some references on other closely related results. The H\"{o}lder regularity for solutions to elliptic equations with singular and degenerate coefficients, which are in the $A_2$-Muckenhoupt {class}, were proved in the classical papers \cite{Fabes, FKS}. See also the books \cite{Fichera, OR}, the papers \cite{KimLee-Yun, Sire-1, Sire-2}, and the references therein for other results on the wellposedness, H\"{o}lder, and Schauder regularity estimates for various classes of degenerate equations. Note also that the Sobolev regularity theory version of the results in \cite{Fabes, FKS} was developed and proved in \cite{Men-Phan}. In addition, we would like to point out that equations with degenerate coefficients also appear naturally in geometric analysis \cite{Lin, WWYZ}, in which H\"{o}lder and Schauder estimates for solutions were proved.
\subsection{Main ideas and approaches} The main ideas of this paper are along the lines with those in \cite{DPT21}. However, at the technical level, the proofs of our main results are quite different from those in \cite{DPT21}. More precisely, instead of the $L_2$-estimates as in \cite{DPT21}, the starting point in this paper is the weighted $L_p$-result in Lemma \ref{l-p-sol-lem} which is based on the weighted $L_p$ for divergence form equations established in \cite{DPT21}, an idea introduced by Krylov \cite{Kr99}, together with a suitable scaling. Moreover, while the proofs in \cite{DPT21} use the Lebesgue measure as an underlying measure, in this paper we make use of more general underlying measure $\mu_1 (dz) = x_d^{\gamma_1}$ with an appropriate parameter $\gamma_1$. In particular, this allows us to obtain an optimal range of exponents for power weights in Corollary \ref{cor1}. See Remarks \ref{remark-1-range} - \ref{remark-2-range}. Several new H\"{o}lder estimates for higher order derivatives of solutions to a class of degenerate homogeneous equations are proved in Subsections \ref{subsec:boundary}--\eqref{subsec:int}. The results and techniques developed in these subsections {might be} of independent interest.
\subsection*{Organization of the paper} The paper is organized as follows. In Section \ref{sec:2}, we introduce various function spaces, assumptions, and then state our main results. The filtration of partitions, a quasi-metric, the weighted mixed-norm Fefferman-Stein theorem and Hardy-Littlewood theorem are recalled in Section \ref{Feffer}. Then, in Section \ref{sec:3}, we consider \eqref{eq:main} in the case when the coefficients in \eqref{eq:main} only depend on the $x_d$ variable. A special version of Theorem \ref{main-thrm}, Theorem \ref{thm:xd}, will be stated and proved in this section. The proofs of Theorem \ref{main-thrm} and Corollary \ref{cor1} are given in Section \ref{sec:4}. Finally, we study the degenerate viscous Hamilton-Jacobi equation \eqref{eq:HJ-intro} in Section \ref{sec:5}.
\section{Function spaces, parabolic cylinders, mean oscillations, and main results}\label{sec:2} \subsection{Function spaces} Fix $p,q \in [1, \infty)$, $-\infty\le S<T\le +\infty$, and a domain $\cD \subset \mathbb{R}^d_+$. Denote by $L_p((S,T)\times \cD)$ the usual Lebesgue space consisting of measurable functions $u$ on $(S,T)\times \cD$ such that \[
\|u\|_{L_p( (S,T)\times \cD)}= \left( \int_{(S,T)\times \cD} |u(t,x)|^p\, dxdt \right)^{1/p} <\infty. \] For a given weight $\omega$ on $(S,T)\times \cD$, let $L_{p}((S,T)\times \cD,\omega)$ be the weighted Lebesgue space on $(S,T)\times \cD$ equipped with the norm \begin{equation*}
\|u\|_{L_{p}((S,T)\times \cD, \omega)}=\left(\int_{(S,T)\times \cD} |u(t,x)|^p \omega (t,x)\, dx dt\right)^{1/p} < \infty. \end{equation*} For the weights $\omega_0=\omega_0(t)$, $\omega_1=\omega_1(x)$, and a measure $\sigma$ on $\cD$, set $\omega(t,x)=\omega_0(t)\omega_1(x)$ and define $L_{q,p}((S,T)\times \cD,\omega d\sigma)$ to be the weighted and mixed-norm Lebesgue space on $(S,T)\times \cD$ equipped with the norm \[
\|u\|_{L_{q,p}((S,T)\times \cD, \omega d\sigma)}=\left(\int_S^T \left(\int_{\cD} |u(t,x)|^p \omega_1(x)\, \sigma(dx)\right)^{q/p} \omega_0(t)\,dt \right)^{1/q} < \infty. \]
\subsubsection{Function spaces for nondivergence form equations} Consider $\alpha>0$.
We define the solution spaces as follows. Firstly, define \[
W_{p}^{1,2}((S,T)\times \cD, \omega)
=\left\{u \,:\, \mathbf{M}^{-\alpha} u, \mathbf{M}^{-\alpha} u_t, D^2u \in L_p((S,T) \times \cD,\omega)\right\}, \] where, for $u\in W_{p}^{1,2}((S,T)\times \cD, \omega)$, \begin{multline*}
\|u\|_{W^{1,2}_p((S,T)\times \cD,\omega)}\\
=\| \mathbf{M}^{-\alpha} u\|_{L_p((S,T)\times \cD,\omega)}+\| \mathbf{M}^{-\alpha} u_t\|_{L_p((S,T)\times \cD,\omega)}+\|D^2u\|_{L_p((S,T)\times \cD,\omega)}. \end{multline*}
Let $\sW^{1,2}_p((S,T)\times \cD,\omega)$ be the closure in $W^{1,2}_p((S,T)\times \cD,\omega)$ of all compactly supported functions in $C^\infty((S,T)\times \overline{\cD})$ vanishing near $\overline{\cD} \cap \{x_d=0\}$ if $\overline{\cD} \cap \{x_d=0\}$ is not empty. The space $\sW^{1,2}_p((S,T)\times \cD,\omega)$ is equipped with the same norm $\|\cdot\|_{\sW^{1,2}_p((S,T)\times \cD,\omega)}=\|\cdot\|_{W^{1,2}_p((S,T)\times \cD,\omega)}$. When there is no time dependence, we write these two spaces as $W^2_p(\cD,\omega)$ and $\sW^2_p(\cD,\omega)$, respectively.
Next, denote by \[ \begin{split} & W_{q,p}^{1,2}((S,T)\times \cD, \omega\, d\sigma)\\ & \qquad =\left\{u \,:\, \mathbf{M}^{-\alpha} u, \mathbf{M}^{-\alpha} u_t,D^2u \in L_{q,p}((S,T) \times \cD,\omega\, d\sigma)\right\}, \end{split} \] which is equipped with the norm \begin{multline*}
\|u\|_{W^{1,2}_{q,p}((S,T)\times \cD,\omega\, d\sigma)}
=\| \mathbf{M}^{-\alpha} u\|_{L_{q,p}((S,T)\times \cD,\omega\, d\sigma)}\\
+\| \mathbf{M}^{-\alpha} u_t\|_{L_{q,p}((S,T)\times \cD,\omega\, d\sigma)}+\|D^2u\|_{L_{q,p}((S,T)\times \cD,\omega\, d\sigma)}. \end{multline*}
Let $\sW^{1,2}_{q,p}((S,T)\times \cD,\omega\, d\sigma)$ be the closure in $W^{1,2}_{q,p}((S,T)\times \cD,\omega d\sigma)$ of all compactly supported functions in $C^\infty((S,T)\times \overline{\cD})$ vanishing near $\overline{\cD} \cap \{x_d=0\}$ if $\overline{\cD} \cap \{x_d=0\}$ is not empty. The space $\sW^{1,2}_{q,p}((S,T)\times \cD,\omega\, d\sigma)$ is equipped with the same norm $\|\cdot\|_{\sW^{1,2}_{q,p}((S,T)\times \cD,\omega\, d\sigma)}=\|\cdot\|_{W^{1,2}_{q,p}((S,T)\times \cD,\omega\, d\sigma)}$.
\subsubsection{Function spaces for divergence form equations} We also need function spaces for divergence form equations in this paper, which are taken from \cite{DPT21}. Set $$ W^1_p((S,T)\times \cD, \omega)=\left\{u\,:\, \mathbf{M}^{-\alpha/2} u, Du\in L_p((S,T)\times \cD, \omega)\right\}, $$ which is equipped with the norm $$
\|u\|_{W^1_p((S,T)\times \cD,\omega)}=\| \mathbf{M}^{-\alpha/2} u\|_{L_p((S,T)\times \cD,\omega)}+\|Du\|_{L_p((S,T)\times \cD,\omega)}. $$
We denote by $\sW^1_p((S,T)\times \cD,\omega)$ the closure in $W^1_p((S,T)\times \cD,\omega)$ of all compactly supported functions in $C^\infty((S,T)\times \overline{\cD})$ vanishing near $\overline{\cD} \cap \{x_d=0\}$ if $\overline{\cD} \cap \{x_d=0\}$ is not empty. The space $\sW^1_p((S,T)\times \cD,\omega)$ is equipped with the same norm $\|\cdot\|_{\sW^1_p((S,T)\times \cD,\omega)}=\|\cdot\|_{W^1_p((S,T)\times \cD,\omega)}$.
Set
\[ \begin{split} & \mathbb{H}_{p}^{-1}( (S,T)\times \cD, \omega) \\ & =\big\{u\,:\, u = \mu(x_d) D_iF_i +f_1+f_2, \ \ \text{where}\ \mathbf{M}^{1-\alpha} f_1,\mathbf{M}^{-\alpha/2}f_2\in L_{p}( (S,T)\times \cD, \omega)\\ & \qquad\text{and }
F= (F_1,\ldots,F_d) \in L_{p}((S,T)\times \cD, \omega)^{d}\big\}, \end{split} \] equipped with the norm \begin{align*}
&\|u\|_{\mathbb{H}_{p}^{-1}((S,T)\times \cD, \omega)} \\
&=\inf\big\{\|F\|_{L_{p}((S,T)\times \cD, \omega)}
+\|| \mathbf{M}^{1-\alpha} f_1|+|\mathbf{M}^{-\alpha/2}f_2|\|_{L_{p}((S,T)\times \cD, \omega)}\,:\\ &\qquad u= \mu(x_d) D_iF_i +f_1+f_2\big\}. \end{align*} Define \[
\cH_{p}^1((S,T)\times \cD, \omega)
=\big\{u \,:\, u \in \sW^1_p((S,T) \times \cD,\omega)),
u_t\in \mathbb{H}_{p}^{-1}( (S,T)\times \cD, \omega)\big\}, \] where, for $u\in \cH_{p}^1((S,T)\times \cD, \omega)$, \begin{align*}
\|u\|_{\cH_{p}^1((S,T)\times \cD, \omega)} &= \|\mathbf{M}^{-\alpha/2} u\|_{L_{p}((S,T)\times \cD, \omega)} + \|Du\|_{L_{p}((S,T)\times \cD, \omega)} \\
& \qquad +\|u_t\|_{\mathbb{H}_{p}^{-1}((S,T)\times \cD, \omega)}. \end{align*}
\subsection{Parabolic cylinders} We use the same setup as that in \cite{DPT21}. For $x_0 = (x_0', x_{0d}) \in \mathbb{R}^{d-1} \times \mathbb{R}_+$ and $\rho>0$, denote by $B_\rho(x_0)$ the usual ball with center $x_0$ radius $\rho$ in $\mathbb{R}^d$, $B_\rho'(x_0')$ the ball center $x_0'$ radius $\rho$ in $\mathbb{R}^{d-1}$, and \[ B_\rho^+(x_0) = B_\rho(x_0) \cap \mathbb{R}^d_+. \] We note that \eqref{eq:main} is invariant under the scaling \begin{equation} \label{scaling} (t,x) \mapsto (s^{2-\alpha} t, sx), \qquad s > 0. \end{equation} For $x_d \sim x_{0d} \gg 1$, $a_{ij} = \delta_{ij}$, and $\lambda =f=0$, then \eqref{eq:main} behaves like a heat equation \[ u_t -x_{0d}^{\alpha} \Delta u = 0, \] which can be reduced to the heat equation with unit heat constant under the scaling \[ (t,x) \mapsto (s^{2-\alpha} t, s^{1-\alpha/2} x_{0d}^{-\alpha/2}x), \quad s>0. \] It is thus natural to use the following parabolic cylinders in $\Omega_T$ in this paper. For $z_0 = (t_0, x_0) \in (-\infty, T) \times \mathbb{R}^d_+$ with $x_0= (x_0', x_{0d}) \in \mathbb{R}^{d-1} \times \mathbb{R}_+$ and $\rho>0$, set \begin{equation} \label{def:Q} \begin{split} & Q_{\rho}(z_0) = (t_0 - \rho^{2-\alpha}, t_0) \times B_{r(\rho, x_{0d})} (x_0), \quad \\ &Q_{\rho}^+(z_0) = Q_\rho(z_0) \cap \{x_d>0\}, \end{split} \end{equation} where \begin{equation} \label{def:r} r(\rho,x_{0d}) = \max\{\rho, x_{0d}\}^{\alpha/2} \rho^{1-\alpha/2}. \end{equation} Of course, $Q_{\rho}(z_0) = Q_{\rho}^+(z_0) \subset (-\infty, T) \times \mathbb{R}^d_+$ for $\rho \in (0,x_{0d})$.
Finally, for $z' = (t, x') \in \mathbb{R} \times \mathbb{R}^{d-1}$, we write \[ Q_{\rho}'(z') = (t-\rho^{2-\alpha}, t_0) \times B_{\rho}'(x'). \] \subsection{Mean oscillations and main results} \label{main-result-sect} Throughout the paper, for a locally integrable function $f$, a locally finite measure $\omega$, and a domain $Q\subset \mathbb{R}^{d+1}$, we write \begin{equation} \label{everage-def} (f)_{Q} = \fint_{Q} f(s,y)\, dyds, \qquad (f)_{Q,\omega} = \frac{1}{\omega(Q)}\int_{Q} f(s,y) \,\omega(dyds). \end{equation} Also, for a number $\gamma_1 \in (-1, \infty)$ to be determined, we define \[ \mu_1(dz) = x_d^{\gamma_1}\, dxdt. \] We impose the following assumption on the partial mean oscillations of the coefficients $(a_{ij})$, $a_0$, and $c_0$. \begin{assumption}[$\rho_0,\gamma_1, \delta$] \label{assumption:osc} For every $\rho \in (0, \rho_0)$ and $z_0= (z_0', z_{0d}) \in \overline{\Omega}_T$, there exist $[a_{ij}]_{\rho, z'}, [a_{0 }]_{\rho, z'}, [c_{0 }]_{\rho, z'}: ((x_{d} -r(\rho, x_d))_+, x_d + r(\rho, x_d)) \rightarrow \mathbb{R}$ such that \eqref{con:mu}--\eqref{con:ellipticity} hold on $((x_{d} -r(\rho, x_d))_+, x_d + r(\rho, x_d))$ with $[a_{ij}]_{\rho, z'}$, $[a_{0 }]_{\rho, z'}$, $[c_{0 }]_{\rho, z'}$ in place of $(a_{ij})$, $a_0$, $c_0$, respectively, and \begin{align*}
a_\rho^{\#}(z_0):= & \max_{1 \leq i, j \leq d}\fint_{Q_\rho^+(z_0)} | a_{ij}(z) -[a_{ij}]_{\rho,z'}(x_d)|\, \mu_1(dz) \\
& \qquad + \fint_{Q_\rho^+(z)} | a_{0}(z) -[a_{0}]_{\rho,z'}(x_d)|\, \mu_1(dz) \\
& \qquad + \fint_{Q_\rho^+(z)} | c_{0}(z) -[c_{0}]_{\rho,z'}(x_d)|\, \mu_1(dz) < \delta. \end{align*} \end{assumption} \noindent We note that the un-weighted partial mean oscillation was introduced in \cite{Kim-Krylov} to study a class of elliptic equations with uniformly elliptic and bounded coefficients (i.e., $\gamma_1=\alpha=0$). Note also that by dividing the equation \eqref{eq:main} by $a_{dd}$ and adjusting $\nu$, we can assume without loss of generality throughout the paper that \begin{equation} \label{add-assumption} a_{dd} \equiv 1. \end{equation}
The theorem below is the first main result of our paper, in which the definition of the $A_p$ Muckenhoupt class of weights can be found in Definition \ref{Def-Muck-wei} below. \begin{theorem} \label{main-thrm} Let $T \in (-\infty, \infty]$, $\nu \in (0,1)$, $p, q, K \in (1, \infty)$, $\alpha \in (0, 2)$, and $\gamma_1 \in (\beta_0 -\alpha, \beta_0 -\alpha +1]$ for $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$. Then, there exist $\delta = \delta(d, \nu, p, q, K, \alpha, \gamma_1)>0$ sufficiently small and $\lambda_0 = \lambda_0(d, \nu, p, q, K, \alpha, \gamma_1)>0$ sufficiently large such that the following assertion holds. Suppose that \eqref{con:mu}, \eqref{con:ellipticity}, and \eqref{add-assumption} are satisfied, $\omega_0 \in A_q(\mathbb{R})$, $\omega_1 \in A_p(\mathbb{R}^d_+, \mu_1)$ with \[ [\omega_0]_{A_q(\mathbb{R})} \leq K \quad \text{and} \quad [\omega_1]_{A_p(\mathbb{R}^d_+, \mu_1)} \leq K, \quad \text{where} \,\, \mu_1(dz) = x_d^{\gamma_1} dxdt. \] Suppose also that Assumption \ref{assumption:osc} $(\rho_0, \gamma_1,\delta)$ holds for some $\rho_0>0$. Then, for any function $f \in L_{q, p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$ and $\lambda \geq \lambda_0 \rho_0^{-(2-\alpha)}$, there exists a strong solution $u{\in \sW^{1,2}_{q, p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)}$ to the equation \eqref{eq:main}, which satisfies \begin{equation} \label{main-est-1}
\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \| f\|_{L_{q,p}}, \end{equation} where $\omega(t, x) = \omega_0(t) \omega_1(x)$ for $(t,x) \in \Omega_T$, $L_{q,p} = L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega \, d\mu_1)$, and $N = N(d, \nu, p, q, \alpha, \gamma_1)>0$. Moreover, if $\beta_0 \in {(\alpha-1}, \alpha/2]$, then it also holds that \begin{equation} \label{main-est-2} \begin{split}
& \|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \| D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} + \lambda^{1/2} \|\mathbf{M}^{-\alpha/2} Du\|_{L_{q,p}} \\
& \leq N \|f\|_{L_{q,p}}. \end{split} \end{equation} \end{theorem}
The following is an important corollary of Theorem \ref{main-thrm} in which $\omega_1$ is a power weight of the $x_d$ variable and $\beta_0$ and $\gamma_1$ are specifically chosen. \begin{corollary} \label{cor1} Let $T \in (-\infty, \infty]$, $\nu \in (0,1)$, $p, q \in (1, \infty)$, $\alpha \in (0, 2)$, and $\gamma \in (p(\alpha-1)_+ -1, 2p-1)$. Then, there exist $\delta = \delta(d, \nu, p, q, \alpha, \gamma)>0$ sufficiently small and $\lambda_0 = \lambda_0(d, \nu, p, q, \alpha, \gamma)>0$ sufficiently large such that the following assertion holds. Suppose that \eqref{con:mu}, \eqref{con:ellipticity} hold and suppose also that Assumption \ref{assumption:osc} $(\rho_0, 1-(\alpha-1)_+, \delta)$ holds for some $\rho_0>0$. Then, for any $f \in L_{q, p}(\Omega_T, x_d^{\gamma} dz)$ and $\lambda \geq \lambda_0 \rho_0^{-(2-\alpha)}$, there exists a strong solution $u \in \sW^{1,2}_{q, p}(\Omega_T, x_d^\gamma\, dz)$ to the equation \eqref{eq:main}, which satisfies \begin{equation} \label{cor-est-1}
\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \|f\|_{L_{q,p}}, \end{equation} where $L_{q,p} = L_{q,p}(\Omega_T, x_d^\gamma dz)$ and $N = N(d, \nu, p, q, \alpha, \gamma)>0$. Additionally, if Assumption \ref{assumption:osc} $(\rho_0, 1-\alpha/2, \delta)$ also holds and $\gamma \in (\alpha p/2 -1, 2p-1)$, then we have \begin{equation} \label{cor-est-2} \begin{split}
&\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} + \lambda^{1/2} \|\mathbf{M}^{-\alpha/2} Du\|_{L_{q,p}} \\
& \leq N \|f\|_{L_{q,p}}. \end{split} \end{equation} \end{corollary}
\begin{remark} By viewing solutions to elliptic equations as stationary solutions to parabolic equations, from Theorem \ref{main-thrm} and Corollary \ref{cor1}, we derive the corresponding results for elliptic equations. Furthermore, it follows from Corollary \ref{cor1} and the weighted Morrey embedding (see, for instance, \cite[Theorem 5.3]{RO}), we obtain the $C^{1,\alpha}$ regularity of solutions to the corresponding elliptic equations when $p>d+\gamma$. \end{remark}
In the remarks below, we give examples showing that the ranges of $\gamma$ in \eqref{show-est-1}--\eqref{show-est-2} as well as \eqref{cor-est-1}--\eqref{cor-est-2} are optimal. We note that the range of $\gamma$ for the estimate of $Du$ in \eqref{show-est-2}, \eqref{main-est-2}, and \eqref{cor-est-2} is smaller than that for $u, u_t, D^2u$ in \eqref{show-est-1}, \eqref{main-est-1}, and \eqref{cor-est-1}. See Remark \ref{remark-3-range} below to see the necessity of such different ranges.
\begin{remark} \label{remark-1-range} When $\alpha \in (0,1)$, the range $(p(\alpha-1)_+ -1, 2p-1)$ for the power $\gamma$ in \eqref{show-est-1} becomes $(-1,2p-1)$, which agrees with the range in \cite{KN} for equations with uniformly elliptic and bounded coefficients. See also \cite{DP-JFA} and \cite{MNS} in which a similar range of the power $\gamma$ is also used in for a class of equations of extensional type. When $\alpha \in [1,2)$, the lower bound $p(\alpha-1)_+ -1$ for $\gamma$ in \eqref{show-est-1} is optimal. To see this, consider an explicit example when $d=1$, $\lambda>0$, $T < \infty$, and \[ u(t,x)=\left(x + c x^{3-\alpha}\right) \xi(x) e^{\lambda t} \qquad \text{ for } (t,x) \in \Omega_T. \]
Here, $\xi \in C^\infty([0,\infty),[0,\infty))$ is a cutoff function such that $\xi=1$ on $[0,1]$, $\xi=0$ on $[3,\infty)$, $\|\xi'\|_{L^\infty(\mathbb{R})} \leq 1$, and \[ c=\frac{2 \lambda}{(3-\alpha)(2-\alpha)}. \] Set \[ f(t,x)=x^{-\alpha}(u_t +\lambda u) - u_{xx}. \] Then, $u$ solves \[ u_t + \lambda u - x^\alpha u_{xx} = x^\alpha f \qquad \text{ in } \Omega_T. \] We see that $\mathbf{M}^{-\alpha}u_t, \mathbf{M}^{-\alpha}u \in L_p(\Omega_T, x^\gamma)$ for $\gamma>p(\alpha-1)-1$. On the other hand, \[ \begin{split}
& \int_{\Omega_T} |x^{-\alpha}u|^p x^{p(\alpha-1)-1}\,dz
=\int_{\Omega_T} |x^{-1}u|^p x^{-1}\,dz \\ & \geq \int_{0}^1 \int_{-\infty}^T x^{-1} e^{p\lambda t}\,dt dx = N\int_{0}^1 x^{-1} \,dx=\infty. \end{split} \] Thus, $\mathbf{M}^{-\alpha}u_t, \mathbf{M}^{-\alpha}u \notin L_p(\Omega_T, x^{p(\alpha-1)-1})$.
We next note that $f(t,x)=0$ for $(t,x) \in (-\infty,T] \times [3,\infty)$, and \[ f(t,x)=2 c\lambda x^{3-2\alpha} e^{\lambda t} \qquad \text{ for } (t,x) \in (-\infty,T] \times [0,1]. \] From this and \[
\int_{0}^1 \int_0^T |x^{3-2\alpha}|^p x^{p(\alpha-1)-1} e^{p \lambda t} dt dx = N \int_0^1 x^{p(2-\alpha)-1} dx < \infty, \] it follows that $f\in L_p(\Omega_T, x^{p(\alpha-1)-1})$. \end{remark}
\begin{remark} \label{remark-3-range} When $\alpha \in (0,2)$, the lower bound $\alpha p/2 -1$ for $\gamma$ in \eqref{show-est-2} is optimal. Indeed, consider the same example as that in Remark \ref{remark-1-range} above.
It is clear that $\mathbf{M}^{-\alpha/2}u_x \in L_p(\Omega_T, x^\gamma)$ for $\gamma> \alpha p/2-1$. On the other hand, $\mathbf{M}^{-\alpha/2}u_x \notin L_p(\Omega_T, x^{\alpha p/2-1})$ as \[
\int_{\Omega_T} |x^{-\alpha/2}u_x|^p x^{\alpha p/2-1}\,dz
=\int_{\Omega_T} |u_x|^p x^{-1}\,dz \geq \int_{0}^1 \int_{-\infty}^T x^{-1} e^{p\lambda t}\,dt dx =\infty. \] Besides, $f(t,x)=0$ for $(t,x) \in (-\infty,T] \times [3,\infty)$, and \[ f(t,x)=2 c\lambda x^{3-2\alpha} e^{\lambda t} \qquad \text{ for } (t,x) \in (-\infty,T] \times [0,1]. \] Hence, $f \in L_p(\Omega_T, x^{\alpha p/2-1})$ as \[
\int_{0}^1 \int_0^T |x^{3-2\alpha}|^p x^{\alpha p/2-1} e^{p \lambda t} dt dx = N \int_0^1 x^{p(3-3\alpha/2)-1} dx < \infty. \] \end{remark}
\begin{remark} \label{remark-2-range} We also have that the upper bound $\gamma < 2p-1$ in \eqref{show-est-1}--\eqref{show-est-2} is optimal. Indeed, for $\gamma=2p-1$, the trace of $W_{p}^{2}(\cD,x_d^{2p-1})$ is not well defined. For simplicity, let $d=1$, $\cD=[0,1/2]$, and consider \[
\phi(x) = \log (|\log x|). \] Then, \[
\phi_{xx} = \frac{1}{x^2} \left( |\log x|^{-1} - |\log x|^{-2} \right). \] It is clear that $\phi \in W_{p}^{2}([0,1/2],x^{2p-1})$, and $\phi$ is not finite at $0$. \end{remark}
\section{A filtration of partitions and a quasi-metric} \label{Feffer} We recall the construction of a filtration of partitions $\{\mathbb{C}_n\}_{n \in \mathbb{Z}}$ (i.e., dyadic decompositions) of $\mathbb{R}\times \mathbb{R}^d_+$ in \cite{DPT21}, which satisfies the following three basic properties (see \cite{Krylov}): \begin{enumerate}[(i)] \item The elements of partitions are ``large'' for big negative $n$'s and ``small'' for big positive $n$'s: for any $f\in L_{1,\text{loc}}$, $$
\inf_{C\in \mathbb{C}_n}|C|\to \infty\quad\text{as}\,\,n\to -\infty,\quad \lim_{n\to \infty}(f)_{C_n(z)}=f(z)\quad\text{a.e.}, $$ where $C_n(z)\in \mathbb{C}_n$ is such that $z\in C_n(z)$.
\item The partitions are nested: for each $n\in \mathbb{Z}$, and $C \in \mathbb{C}_n$, there exists a unique $C' \in \mathbb{C}_{n-1}$ such that $C \subset C'$.
\item The following regularity property holds: For $n,C, C'$ as in (ii), we have $$
|C'|\le N_0|C|, $$ where $N_0>0$ is independent of $n$, $C$, and $C'$. \end{enumerate}
For $s\in \mathbb{R}$, denote by $\lfloor s \rfloor$ the integer part of $s$. For a fixed $\alpha\in (0,2)$ and $n\in \mathbb{Z}$, let $k_0=\lfloor -n/(2-\alpha) \rfloor$. The partition $\mathbb{C}_n$ contains boundary cubes in the form $$ ((j-1)2^{-n},j2^{-n}]\times (i_12^{k_0},(i_1+1)2^{k_0}] \times\cdots\times (i_{d-1}2^{k_0},(i_{d-1}+1)2^{k_0}]\times (0, 2^{k_0}], $$ where $j,i_1,\ldots,i_{d-1}\in \mathbb{Z}$, and interior cubes in the form $$ ((j-1)2^{-n},j2^{-n}]\times (i_12^{k_2},(i_1+1)2^{k_2}] \times\cdots \times (i_d2^{k_2}, (i_d+1)2^{k_2}], $$ where $j,i_1,\ldots,i_{d}\in \mathbb{Z}$ and \begin{equation}
\label{eq:part1} i_d2^{k_2}\in [2^{k_1},2^{k_1+1})\, \text{for some integer}\, k_1\ge k_0, \quad k_2=\lfloor (-n+k_1\alpha)/2 \rfloor-1. \end{equation} It is clear that $k_2$ increases with respect to $k_1$ and decreases with respect to $n$. As $k_1\ge k_0>-n/(2-\alpha)-1$, we have $(-n+k_1\alpha)/2-1\le k_1$, which implies $k_2\le k_1$ and $(i_d+1)2^{k_2}\le 2^{k_1+1}$. According to \eqref{eq:part1}, we also have $$ (2^{k_2}/2^{k_1})^2\sim 2^{-n}/(2^{k_1})^{2-\alpha}, $$ which allows us to apply the interior estimates after a scaling.
The quasi-metric $\varrho: \Omega_\infty\times \Omega_\infty\to [0,\infty)$ is defined as $$
\varrho((t,x),(s,y))=|t-s|^{1/(2-\alpha)}
+\min\big\{|x-y|,|x-y|^{2/(2-\alpha)}\min\{x_d,y_d\}^{-\alpha/(2-\alpha)}\big\}. $$ There exists a constant $K_1=K_1(d,\alpha)>0$ such that $$
\varrho((t,x),(s,y))\le K_1\big(\varrho((t,x),(\hat t,\hat x))+ \varrho((\hat t,\hat x),(s,y))\big) $$ for any $(t,x),(s,y),(\hat t,\hat x)\in \Omega_\infty$, and $ \varrho((t,x),(s,y))=0$ if and only if $(t,x)=(s,y)$. Besides, the cylinder $Q_\rho^+(z_0)$ defined in \eqref{def:Q} is comparable to $$ \{(t,x)\in \Omega: t<t_0,\, \varrho((t,x),(t_0,x_0))<\rho \}. $$ Of course, $(\Omega_T, \varrho)$ equipped with the Lebesgue measure is a space of homogeneous type and we have the above dyadic decomposition.
The dyadic maximal function and sharp function of a locally integrable function $f$ and a given weight $\omega$ in $\Omega_\infty$ are defined as \begin{align*}
\cM_{\text{dy},\omega} f(z)&=\sup_{n<\infty}\frac{1}{\omega(C_n(z))}\int_{C_n(z)\in \mathbb{C}_n}|f(s,y)| \omega(s,y)\,dyds,\\
f_{\text{dy},\omega}^{\#}(z)&=\sup_{n<\infty}\frac{1}{\omega(C_n(z))}\int_{C_n(z)\in \mathbb{C}_n}|f(s,y)-(f)_{C_n(z),\omega}|\omega(s,y)\,dyds. \end{align*} Observe that the average notation in \eqref{everage-def} is used in the above definition. Similarly, the maximal function and sharp function over cylinders are given by \begin{align*}
\cM_\omega f(z)&=\sup_{z\in Q^+_\rho(z_0), z_0\in \overline{\Omega_\infty}} \frac{1}{\omega(Q_\rho^+(z_0))} \int_{Q_\rho^+(z_0)}|f(s,y)|\omega(s,y)\,dyds,\\
f^{\#}_\omega(z)&=\sup_{z\in Q^+_\rho(z_0),z_0\in \overline{\Omega_\infty}}\frac{1}{\omega(Q_\rho^+(z_0))}\int_{Q_\rho^+(z_0)}|f(s,y)-(f)_{Q^+_\rho(z_0)}|\omega(s,y)\,dyds. \end{align*} We have, for any $z\in \Omega_\infty$, $$ \cM_{\text{dy},\omega} f(z)\le N\cM_{\omega} f(z) \qquad \text{ and } \qquad f_{\text{dy},\omega}^{\#}(z)\le Nf^{\#}_\omega(z), $$ where $N=N(d,\alpha)>0$.
We also recall the following definition of the $A_p$ Muckenhoupt class of weights. \begin{definition}
\label{Def-Muck-wei} For each $p \in (1, \infty)$ and for a nonnegative Borel measure $\sigma$ on $\mathbb{R}^d$, a locally integrable function $\omega : \mathbb{R}^d \rightarrow \mathbb{R}_+$ is said to be in the $A_p( \mathbb{R}^d, \sigma)$ Muckenhoupt class of weights if and only if $[\omega]_{A_p(\mathbb{R}^d, \sigma)} < \infty$, where \begin{equation}
\label{Ap.def} \begin{split} & [\omega]_{A_p(\mathbb{R}^d, \sigma)} \\ & = \sup_{\rho >0,x =(x', x_d)\in \mathbb{R}^d } \bigg[\fint_{B_{\rho} (x)} \omega(y)\, \sigma(dy) \bigg]\bigg[\fint_{B_{\rho}(x)} \omega(y)^{\frac{1}{1-p}}\, \sigma(dy) \bigg]^{p-1}. \end{split} \end{equation} Similarly, the class of weights $A_p(\mathbb{R}^d_+, \sigma)$ can be defined in the same way in which the ball $B_{\rho} (x)$ in \eqref{Ap.def} is replaced with $B_\rho^+(x)$ for $x\in \overline{\mathbb{R}^d_+}$. For weights with respect to the time variable, the definition is similar with the balls replaced with intervals $(t_0 -\rho^{2-\alpha}, t_0 + \rho^{2-\alpha})$ and $\sigma(dy)$ replaced with $dt$. If $\sigma$ is a Lebesgue measure, we simply write $A_p(\mathbb{R}^d_+) = A_p(\mathbb{R}^d_+, dx)$ and $A_p(\mathbb{R}^d) = A_p(\mathbb{R}^d, dx)$. Note that if $\omega \in A_p(\mathbb{R})$, then $\tilde{\omega} \in A_p(\mathbb{R}^d)$ with $[\omega]_{A_p(\mathbb{R})} = [\tilde{\omega}]_{A_p(\mathbb{R}^d)}$, where $\tilde{\omega}(x) = \omega(x_d)$ for $x = (x', x_d) \in \mathbb{R}^d$. Sometimes, if the context is clear, we neglect the spatial domain and only write $\omega \in A_p$. \end{definition}
The following version of the weighted mixed-norm Fefferman-Stein theorem and Hardy-Littlewood maximal function theorem can be found in \cite{Dong-Kim-18}. \begin{theorem} \label{FS-thm} Let $p, q \in (1,\infty)$, $\gamma_1 \in (-1, \infty)$, $K\geq 1$, and $\mu_1(dz) = x_d^{\gamma_1}\, dxdt$. Suppose that $\omega_0\in A_q(\mathbb{R})$ and $\omega_1 \in A_p(\mathbb{R}^{d}_{+},\mu_1)$ satisfy $$ [\omega_0]_{A_q}, \,\, [\omega_{1}]_{A_p(\mathbb{R}_+^d, \mu_1)}\le K.$$ Then, for any $f \in L_{q, p}(\Omega_T, \omega\, d\mu_1)$, we have \begin{align*}
& \|f\|_{L_{q, p}(\Omega_T, \omega\, d\mu_1)}
{\leq N \| f^{\#}_{\text{dy},\mu_1}\|_{L_{q,p}(\Omega_T, \omega\, d \mu_1)}}
\leq N \| f^{\#}_{\mu_1}\|_{L_{q,p}(\Omega_T, \omega\, d \mu_1)}, \\
& \|\mathcal{M}_{\mu_1}(f)\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)} \leq N \|f\|_{L_{q, p}(\Omega_T, \omega\, d \mu_1)}, \end{align*} where $N = N(d, q, p, \gamma_1, K)>0$ and $\omega(t,x) = \omega_0(t)\omega_1(x)$ for $(t,x) \in \Omega_T$. \end{theorem}
\section{Equations with coefficients depending only on the \texorpdfstring{$x_d$}{} variable} \label{sec:3} In this section, we consider \eqref{eq:main} when the coefficients in \eqref{eq:main} only depend on the $x_d$ variable. Let us denote \begin{equation} \label{L0-def} \sL_0 u = \bar{a}_0(x_d) u_t+\lambda \bar{c}_0(x_d) u-\mu(x_d) \bar{a}_{ij}(x_d)D_iD_j u. \end{equation} where $\mu, \bar{a}_0, \bar{c}_0, \bar{a}_{ij}: \mathbb{R}_+ \rightarrow \mathbb{R}$ are given measurable functions and they satisfy \eqref{con:mu}-\eqref{con:ellipticity}. We consider \begin{equation}\label{eq:xd} \left\{ \begin{array}{cccl} \sL_0 u & = & \mu(x_d) f \quad &\text{ in } \Omega_T,\\ u & = & 0 \quad &\text{ on } (-\infty, T) \times \partial \mathbb{R}^d_+. \end{array} \right. \end{equation}
The main result of this section is the following theorem, which is a special case of Corollary \ref{cor1}. \begin{theorem}\label{thm:xd} Assume that $\bar{a}_0, \bar{c}_0, (\bar{a}_{ij})$ satisfy \eqref{con:mu}--\eqref{con:ellipticity} and assume further that $f \in {L_p(\Omega_T,x_d^\gamma\, dz)}$ for some given $p>1$ and \[ \gamma \in \big(p(\alpha-1)_+-1,2p-1\big). \] Then, \eqref{eq:xd} admits a strong unique solution $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)$. Moreover, \begin{align} \notag
& \|\mathbf{M}^{-\alpha}u_t\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)} + \|D^2 u\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)} \\ \label{eq:xd-main}
& \quad \qquad +\lambda\|\mathbf{M}^{-\alpha}u\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)} \le N\|f\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)}; \end{align} and if $\gamma\in (\alpha p/2-1,2p-1)$, we also have \begin{equation}
\label{eq3.09}
\lambda^{1/2}\|\mathbf{M}^{-\alpha/2}Du\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)}
\le N\|f\|_{L_p(\Omega_T,x_d^{\gamma}\, dz)}, \end{equation} where $N=N(d,\nu,\alpha, \gamma, p)>0$. \end{theorem}
The proof of Theorem \ref{thm:xd} requires various preliminary results and estimates. Our starting point is Lemma \ref{l-p-sol-lem} below which gives Theorem \ref{thm:xd} when $\gamma$ is large. See Subsection \ref{subsec:L2} below. Then, in Subsections \ref{subsec:boundary} and \ref{subsec:int}, we derive pointwise estimates for solutions to the corresponding homogeneous equations. Afterwards, we derive the oscillation estimates for solutions in Subsection \ref{subsec:osc-est}. The proof of Theorem \ref{thm:xd} will be given in the last subsection, Subsection \ref{proof-xd}.
Before starting, let us point out several observations as well as recall several needed definitions. Note that by dividing the PDE in \eqref{eq:xd} by $\bar{a}_0$ and then absorbing $\bar{a}_{dd}$ into $\mu(x_d)$, without loss of generality, we may assume that \begin{equation} \label{add-cond} \bar{a}_{dd}=1 \qquad \text{ and } \qquad \bar{a}_0 =1. \end{equation} Observe that \eqref{eq:xd} can be rewritten into a divergence form equation \begin{equation}
\label{eq:xd-div} \bar{a}_0 u_t+\lambda \bar{c}_0(x_d)u-\mu(x_d) D_i(\tilde a_{ij}(x_d) D_{j} u)=\mu(x_d)f \quad \text{ in } \Omega_T, \end{equation} where \begin{equation}
\label{eq:change} \tilde a_{ij}=\left\{
\begin{array}{ll}
\bar{a}_{ij}+ \bar{a}_{ji} & \hbox{for $i\neq d$ and $j=d$;} \\
0 & \hbox{for $i=d$ and $j\neq d$;} \\
\bar{a}_{ij} & \hbox{otherwise.}
\end{array}
\right. \end{equation} We note that even though $(\tilde a_{ij})$ is not symmetric, it still satisfies the ellipticity condition \eqref{con:ellipticity} and also $\tilde a_{dd} =1$ when \eqref{add-cond} holds.
Due to the divergence form as in \eqref{eq:xd-div}, we need the definition of its weak solutions. In fact, sometimes in this section, we consider the following class of equations in divergence form which are slightly more general than \eqref{eq:xd-div} \begin{equation} \label{eq:dx-loc} u_t + \lambda \bar{c}_0(x_d) u - \mu(x_d)D_i (\tilde{a}_{ij}(x_d)D_j u - F_i) = \mu(x_d) f \quad \text{in} \quad (S, T) \times \mathcal{D} \end{equation} with the boundary condition \begin{equation*} u = 0 \quad \text{on} \quad (S, T) \times (\overline{\mathcal{D}} \cap \{x_d =0\}) \end{equation*} for some open set $\mathcal{D} \subset \mathbb{R}^d_+$ and $-\infty \leq S < T \leq \infty$.
\begin{definition} For a given weight $\omega$ defined on $(S, T) \times \mathcal{D} $ and for given $F= (F_1, F_2, \ldots, F_2) \in L_{p, \text{loc}}((S, T) \times \mathcal{D} )^d$ and $f \in L_{p, \text{loc}}((S, T) \times \mathcal{D} )$, we say that a function $u \in \cH_p^1((S, T) \times \mathcal{D} , \omega)$ is a weak solution of \eqref{eq:dx-loc} if \begin{equation} \label{def-local-weak-sol} \begin{split} & \int_{(S, T) \times \mathcal{D} }\mu(x_d)^{-1}(-u \partial_t \varphi + \lambda \overline{c}_0 u \varphi)dz + \int_{(S, T) \times \mathcal{D} } (\tilde{a}_{ij} D_ju - F_i) D_i \varphi dz \\ & = \int_{(S, T) \times \mathcal{D} } f(z) \varphi(z) dz, \quad \forall \ \varphi \in C_0^\infty((S, T) \times \mathcal{D} ). \end{split} \end{equation} \end{definition}
\subsection{\texorpdfstring{$L_p$}{} strong solutions when the powers of weights are large} \label{subsec:L2} The following lemma is the main result of this subsection, which gives Theorem \ref{thm:xd} when $\gamma \in (p-1, 2p-1)$. \begin{lemma} \label{l-p-sol-lem} Let $\nu \in (0,1)$, $\lambda>0$, $\alpha \in (0,2)$, $p \in (1, \infty)$, and $\gamma \in (p-1, 2p-1)$. Assume that $\bar{a}_0, \bar{c}_0, (\bar{a}_{ij})$, and $\mu$ satisfy the ellipticity and boundedness conditions \eqref{con:mu}--\eqref{con:ellipticity}. Then, for any $f \in L_p(\Omega_T, x_d^\gamma\, dz)$, there exists a unique strong solution $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)$ to \eqref{eq:xd}. Moreover, for every solution $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)$ of \eqref{eq:xd} with $f \in L_p(\Omega_T, x_d^\gamma\, dz)$, it holds that \begin{align} \notag
& \lambda \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T, x_d^{\gamma}dz)} + \sqrt{\lambda} \|\mathbf{M}^{-\alpha/2}Du \|_{L_p(\Omega_T, x_d^{\gamma}dz)} \\ \label{est-0405-1}
& \qquad + \| D^2u\|_{L_p(\Omega_T, x_d^{\gamma}dz)} + \|\mathbf{M}^{-\alpha} u_t\|_{L_p(\Omega_T, x_d^{\gamma}dz)} \leq N \|f\|_{L_p(\Omega_T, x_d^{\gamma}dz)}, \end{align} where $N = N(d, \alpha, \nu, \gamma, p)>0$. \end{lemma} \begin{proof} The key idea is to apply \cite[Theorem 2.4]{DPT21} to the divergence form equation \eqref{eq:xd-div}, and then use an idea introduced by Krylov in \cite[Lemma 2.2]{Kr99} with a suitable scaling. To this end, we assume that \eqref{add-cond} holds, and let us denote $\gamma' = \gamma -p \in (-1, p-1)$ and we observe that \[
x_d^{1-\alpha}\mu(x_d) |f(z)| \sim x_d |f(z)| \in L_p(\Omega_T, x_d^{\gamma'}dz). \] As $\gamma' \in (-1, p-1)$, we have $x_d^{\gamma'} \in A_p$. Moreover, the equation \eqref{eq:xd} can be written in divergence form as \eqref{eq:xd-div}. Therefore, we apply \cite[Theorem 2.4]{DPT21} to \eqref{eq:xd-div} with $f_1=\mu(x_d) f$ and $f_2= 0$ to yield the existence of a unique weak solution $u \in \mathscr{H}^1_p(\Omega_T, x_d^{\gamma'}dz)$ of \eqref{eq:xd-div} satisfying \begin{align} \notag
& \|Du\|_{L_{p}(\Omega_T,x_d^{\gamma'} dz)} + \sqrt{\lambda} \|\mathbf{M}^{-\alpha/2}u\|_{L_{p}(\Omega_T,x_d^{\gamma'}dz)} \\ \label{Du-0226}
& \le N\|{x_d^{1-\alpha}f_1}\|_{L_{p}(\Omega_T,x_d^{\gamma'}dz)} = N \|f\|_{L_p(\Omega_T, x_d^\gamma dz)}, \end{align} with $N = N(d, \nu, \gamma, p)>0$. We note here that because the coefficients $\bar{c}_0, \bar{a}_{ij}$ only depend on $x_d$, \cite[Theorem 2.4]{DPT21} holds for any $\lambda>0$ by a scaling argument. From \eqref{Du-0226}, the zero boundary condition, and the weighted Hardy inequality (see \cite[Lemma 3.1]{DP-AMS} for example), we infer that \begin{align} \notag
\|u\|_{L_p(\Omega_T, x_d^{\gamma -2p}dz)} & = \|\mathbf{M}^{-1}u\|_{L_p(\Omega_T, x_d^{\gamma'}dz)} \leq N \|Du\|_{L_{p}(\Omega_T,x_d^{\gamma'})} \\ \label{u-op-wei-0226}
& \leq \|f\|_{L_p(\Omega_T, x_d^\gamma)}. \end{align}
It remains to prove that \eqref{est-0405-1} holds as it also implies that $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma)$. We apply the idea introduced by Krylov in \cite[Lemma 2.2]{Kr99} and combine it with a scaling argument to remove the degeneracy of the coefficients. See also \cite[Theorem 3.5]{DK15} and \cite[Lemma 4.6]{DP-JFA}. To this end, let us fix a standard non-negative cut-off function $\zeta \in C_0^\infty((1,2))$. For each $r >0$, let $\zeta_r(s) =\zeta(rs)$ for $s \in \mathbb{R}_+$. Note that with a suitable assumption on the integrability of a given function $v: \Omega_T \rightarrow \mathbb{R}$ and for $\beta \in \mathbb{R}$, by using the substitution $r^{\alpha}t \mapsto s$ for the integration with respect to the time variable, and then using the Fubini theorem, we have \begin{equation} \label{weight-kry} \begin{split}
& \int_0^\infty \left(\int_{\Omega_{r^{-\alpha}T}}|\zeta_r(x_d) v_r(z)|^p\, dz\right) r^{-\beta-1}\, dr =N_1\int_{\Omega_T} |v(z)|^p x_d^{\beta+\alpha}\, dz, \\
& \int_0^\infty \left(\int_{\Omega_{r^{-\alpha} T}}|\zeta_r'(x_d) v_r(z)|^p\, dz\right) r^{-\beta-1}\, dr =N_2\int_{\Omega_T} |v_r(z)|^p x_d^{\beta + \alpha -p}\, dz, \\
& \int_0^\infty \left(\int_{\Omega_{r^{-\alpha} T}}|\zeta_r''(x_d) v_r(z)|^p\, dz\right) r^{-\beta-1}\, dr =N_3\int_{\Omega_T} |v_r(z)|^p x_d^{\beta + \alpha-2p}\, dz, \end{split} \end{equation} where $v_r(z) = v(r^{\alpha}t, x)$ for $z = (t, x) \in \Omega_{r^{-\alpha}T}$,
\[
N_1 = \int_0^\infty |\zeta (s)|^p s^{-\beta-\alpha-1} ds, \quad N_2 = \int_0^\infty |\zeta'(s)|^p s^{p-\beta-\alpha-1} ds, \] and \[
N_3 = \int_0^\infty |\zeta''(s)|^p s^{2p-\beta-\alpha -1} ds. \] Next, for $r>0$, we denote $u_r(z) = u(r^{\alpha}t,x)$, \[ \hat{a}_{ij} (x_d)= r^{\alpha}\mu(x_d) \bar{a}_{ij}(x_d), \quad \bar{\lambda} = \lambda r^{\alpha}, \quad \text{and} \quad f_r (z) = r^{\alpha} \mu(x_d) f(r^{\alpha}t, x). \] Note that $u_r$ solves the equation \[ \partial_t u_r + \bar{\lambda} \bar{c}_0 u_r - \hat{a}_{ij}(x_d) D_i D_j u_r = f_r \quad \text{in} \quad \Omega_{r^{-\alpha}T}. \] Let $w(z) =\zeta_r(x_d) u_r(z)$, which satisfies \begin{equation} \label{w-eqn-0405-1} w_t + \bar{\lambda} \bar{c}_0(x_d) w - \hat{a}_{ij}(x_d) D_i D_j w = \hat{g} \quad \text{in} \quad \Omega_{r^{-\alpha}T} \end{equation} with the boundary condition $w(z', 0) =0$ for $z' \in (-\infty, r^{-\alpha}T)\times \mathbb{R}^{d-1}$, where \[ \begin{split} \hat{g}(z) & = \zeta_r f_r(z) - \hat{a}_{dd} \zeta''_r u_r - \sum_{i \neq d} \big(\hat{a}_{id} + \hat{a}_{di}\big)\zeta '_rD_i u_r. \end{split} \] We note that $\text{supp}(w) \subset (-\infty, r^{-\alpha}T) \times \mathbb{R}^{d-1} \times(1/r, 2/r)$, and on this set the coefficient matrix $(\hat{a}_{ij})$ is uniformly elliptic and bounded as $r^{\alpha}\mu (x_d) \sim 1$ due to \eqref{con:mu}.
We now prove \eqref{est-0405-1} with the extra assumption that $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma'}dz)$. Under this assumption and as $\zeta_r$ is compactly supported in $(0, \infty)$, we see that $w \in W^{1,2}_p(\Omega_{r^{\alpha}T})$, the usual parabolic Sobolev space. Then by applying the $W^{1, 2}_{p}$-estimate for the uniformly elliptic and bounded coefficient equation \eqref{w-eqn-0405-1} (see, for instance, \cite{D12}), we obtain \[
\bar{\lambda} \|w\| + \bar{\lambda}^{1/2}\, \|Dw\| + \|D^2 w\| + \|w_t\| \leq N \|\hat{g}\|, \]
where $\|\cdot \| = \| \cdot \|_{L_p(\Omega_{r^{-\alpha}T})}$ and $N = N(d, \nu, p)>0$. From this, the definition of $\hat{g}$, and a simple manipulation, we obtain \[ \begin{split}
& \lambda r^{\alpha} \|\zeta_r u_r \| + \sqrt{\lambda} r^{\alpha/2} \|\zeta_r Du_r\| + \|\zeta_r D^2u_r\| +\|\zeta_r \partial_t u_r \| \\
& \leq N\Big[ \|\zeta_r f_r\| + \sqrt{\lambda} r^{\alpha/2} \|\zeta_r' u_r\| + \|\zeta''_r u_r\| + \|\zeta_r' Du_r\| \Big]. \end{split} \] Now, we raise this last estimate to the power $p$, multiply both sides by $r^{-(\gamma-\alpha)-1}$, integrate the result with respect to $r$ on $(0,\infty)$, and then apply \eqref{weight-kry} to obtain \[ \begin{split}
& \lambda \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} +\sqrt{\lambda} \|\mathbf{M}^{-\alpha/2}Du \|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} \\
& \qquad + \| D^2u\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} + \|\mathbf{M}^{-\alpha} u_t\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)}\\
& \leq N \Big[ \|f\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} +\sqrt{\lambda} \|\mathbf{M}^{-\alpha/2}u\|_{L_p(\Omega_T, x_d^{\gamma -p}\, dz)} + \|u\|_{L_p(\Omega_T, x_d^{\gamma -2p}\, dz)} \\
& \qquad + \|Du\|_{L_p(\Omega_T, x_d^{\gamma -p}\, dz)}\Big].
\end{split} \] From the last estimate, \eqref{Du-0226}, \eqref{u-op-wei-0226}, and the fact that $\gamma' = \gamma-p$, we infer that \[ \begin{split}
& \lambda \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} + \sqrt{\lambda} \|\mathbf{M}^{-\alpha/2}Du \|_{L_p(\Omega_T, x_d^{\gamma}\,dz)} \\
& \qquad + \| D^2u\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)} + \|\mathbf{M}^{-\alpha} u_t\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)} \leq N \|f\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)}. \end{split} \] This proves \eqref{est-0405-1} under the additional assumption that $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma'}\,dz)$.
It remains to remove the extra assumption that $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma'}dz)$. By mollifying the equation \eqref{eq:xd} in $t$ and $x'$ and applying \cite[Theorem 2.4]{DPT21} to the equations of $u^{(\varepsilon)}_t$ and $D_{x'}u^{(\varepsilon)}$, we obtain $$ \mathbf{M}^{-\alpha}u^{(\varepsilon)}, \mathbf{M}^{-\alpha} u_t^{(\varepsilon)}, DD_{x'}u^{(\varepsilon)} \in L_p(\Omega_T, x_d^{\gamma'} dz). $$ This and the PDE in \eqref{eq:xd} for $u^{(\varepsilon)}$ imply that \[ D_{dd} u^{(\varepsilon)} \in L_p(\Omega_T, x_d^{\gamma'} dz). \] Therefore $u^{(\varepsilon)} \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma'}dz)$ is a strong solution of \eqref{eq:xd} with $f^{(\varepsilon)}$ in place of $f$. From this, we apply the a priori estimate \eqref{est-0405-1} that we just proved for $u^{(\varepsilon)}$ and pass to the limit as $\varepsilon \rightarrow 0^+$ to obtain the estimate \eqref{est-0405-1} for $u$. The proof of the lemma is completed. \end{proof}
\subsection{Boundary H\"older estimates for homogeneous equations} \label{subsec:boundary} Recall the operator $\sL_0$ defined in \eqref{L0-def}. In this subsection, we consider the homogeneous equation \begin{equation}
\label{eq:hom}
\begin{cases} \sL_0u=0 \quad &\text{ in } Q_1^+,\\ u=0 \quad &\text{ on } Q_1\cap \{x_d=0\}. \end{cases} \end{equation} As above, {without loss of generality we assume \ref{add-cond} so that} \eqref{eq:hom} can be written in divergence form as \begin{equation}
\label{eq:hom-div}
\left\{
\begin{array}{cccl} u_t+\lambda \bar{c}_0(x_d)u-\mu(x_d) D_i(\tilde a_{ij}(x_d) D_{j} u) & = &0 & \quad \text{ in } Q_1^+,\\ u & = & 0 & \quad \text{on} \quad Q_1\cap \{x_d=0\}. \end{array} \right. \end{equation} A function $u \in \cH_{p}^1(Q_1^+)$ with $p \in (1,\infty)$ is said to be a weak solution of \eqref{eq:hom} if it is a weak solution of \eqref{eq:hom-div} in the sense defined in \eqref{def-local-weak-sol} and $u=0$ on $Q_1\cap \{x_d=0\}$ in the sense of trace.
For each $\beta \in (0,1)$, the $\beta$-H\"older semi-norm in the spatial variable of a function $u$ on an open set $Q\subset \mathbb{R}^{d+1}$ is given by \[
\llbracket u\rrbracket_{C^{0, \beta}(Q)} = \sup\left\{ \frac{|u(t,x) - u(t,y)|}{|x-y|^{\beta}}: x \not =y, \ (t,x), (t,y) \in Q \right\}. \] For $k, l \in \mathbb{N} \cup \{0\}$, we denote \[
\|u\|_{C^{k, l}(Q)} = \sum_{i=0}^k \sum_{|j| \leq l}\|\partial_t^i D_{x}^j u\|_{L_\infty(Q)} . \] We also use the following H\"{o}lder norm of $u$ on $Q$ \[
\|u\|_{C^{k, \beta}(Q)} = \|u\|_{C^{k,0}(Q)} + \sum_{i=0}^{k} \llbracket \partial_t^i u\rrbracket_{C^{0, \beta}(Q)}. \] We begin with the following Caccioppoli type estimate. \begin{lemma} \label{caccio} Suppose that $u\in \cH^1_2(Q_1^+)$ is a weak solution of \eqref{eq:hom}. Then, for any integers $k,j\ge 0$ and $l=0,1$, \begin{equation}
\label{eq:hom-b1}
\int_{Q_{1/2}^+}|\partial_t^k D_{x'}^j D_{d}^l u|^2 \,dz \le N \int_{Q_{1}^+} u^2 \,dz \end{equation} where $N = N(d,\nu,\alpha,k,j,l)>0$. \end{lemma} \begin{proof} Again, we can assume \eqref{add-cond} holds. The estimate \eqref{eq:hom-b1} follows from \cite[(4.12)]{DPT21} applied to \eqref{eq:hom-div}. \end{proof} \begin{lemma}
\label{lem:boundary} Let $p_0 \in (1, \infty)$ and suppose that $u\in \cH^1_{p_0}(Q_1^+)$ is a weak solution of \eqref{eq:hom}. Then, \begin{equation}
\label{eq:hom-b2}
\begin{split}
& \|u\|_{C^{1,1}(Q_{1/2}^+)}+\|D_{x'}u\|_{C^{1,1}(Q_{1/2}^+)}+\|D_d u\|_{C^{1,\delta_0}(Q_{1/2}^+)} \\
& + \sqrt{\lambda}\|\mathbf{M}^{-\alpha/2}u\|_{C^{1,1-\alpha/2}(Q_{1/2}^+)} \le N \|Du\|_{L_{p_0}(Q_1^+)}, \end{split} \end{equation} where $N=N(d,\nu,\alpha, p_0)>0$ and $\delta_0=\min\{2-\alpha,1\}$. \end{lemma} \begin{proof} As explained, we can assume that \eqref{add-cond} holds. We apply \cite[Lemma 5.5]{DPT21} to \eqref{eq:hom-div} by noting that $U:=\tilde a_{dj}D_j u=D_du$ in view of \eqref{add-cond} and \eqref{eq:change}. \end{proof}
\begin{lemma}
\label{prop:boundary} Let $p_0 \in (1, \infty)$, $\beta_0\in {(-\infty}, \min\{1,\alpha\}]$, and $\alpha_0 > -1$ be fixed constants. There exists a number $\beta_1=\beta_1(\alpha,\beta_0) \in (0,1]$ such that for every weak solution $u\in \cH^1_{p_0}(Q_1^+)$ to \eqref{eq:hom}, it holds that \begin{align}
\label{eq:hom-b3}
\|\mathbf{M}^{-\beta_0} u\|_{C^{1,\beta_1}(Q_{1/2}^+)}&\le N\|\mathbf{M}^{-\beta_0} u\|_{L_{p_0}(Q_{3/4}^+,x_d^{\alpha_0}\,dz)},\\
\label{eq:hom-b4}
\|\mathbf{M}^{-\beta_0} u_t\|_{C^{1,\beta_1}(Q_{1/2}^+)}&\le N\|\mathbf{M}^{-\beta_0} u_t\|_{L_{p_0}(Q_{3/4}^+,x_d^{\alpha_0}\,dz)},\\
\label{eq:hom-b5}
\|\mathbf{M}^{\alpha-\beta_0} DD_{x'}u\|_{C^{1,\beta_1}(Q_{1/2}^+)}&\le N\|\mathbf{M}^{\alpha-\beta_0} DD_{x'}u\|_{L_{p_0}(Q_{3/4}^+,x_d^{\alpha_0}\,dz)}, \end{align} and \begin{equation}
\label{eq:hom-b-Du}
\|\mathbf{M}^{\beta_0}Du\|_{C^{1,\beta_1}(Q_{1/2}^+)} \le N\|\mathbf{M}^{\beta_0}Du\|_{L_{p_0}(Q_{3/4}^+,x_d^{\alpha_0}dz)}, \end{equation} where $N=N(d,\nu,\alpha,\alpha_0,\beta_0, p_0)>0$. \end{lemma} \begin{proof} Again, we assume \eqref{add-cond}. Note that once the lemma with $\alpha_0 \geq 0$ is proved, the case when $\alpha_0 \in (-1, 0)$ will follow immediately. Hence, we only need to prove the lemma with the assumption that $\alpha_0 \geq 0$. We first assume $p_0 =2$. Since $\beta_0\le \min\{1,\alpha\}$, by \eqref{eq:hom-b1} and the boundary Poincar\'e inequality, the right-hand sides of \eqref{eq:hom-b3}, \eqref{eq:hom-b4}, and \eqref{eq:hom-b5} are all finite. We consider two cases.
\noindent {\em Case 1: $\beta_0=0$.} When $\alpha_0=0$, \eqref{eq:hom-b3} and \eqref{eq:hom-b-Du} follow from \eqref{eq:hom-b2} and \eqref{eq:hom-b1}. For general $\alpha_0\ge 0$, by \eqref{eq:hom-b3} with $\beta_0=0$ and $\alpha_0=0$ and H\"older's inequality, we have \begin{align*}
\|u\|_{L_\infty(Q_{1/2}^+)}& \le N \|u\|_{L_2(Q_{2/3}^+)} \leq N\|u\|^{2\alpha_0/(1+2\alpha_0)}_{L_2(Q_{2/3}^+,x_d^{-1/2}\,dz)}
\|u\|^{1/(1+2\alpha_0)}_{L_2(Q_{2/3}^+,x_d^{\alpha_0}\,dz)}\\
&\le N\|u\|^{2\alpha_0/(1+2\alpha_0)}_{L_\infty(Q_{2/3}^+)}
\|u\|^{1/(1+2\alpha_0)}_{L_2(Q_{2/3}^+,x_d^{\alpha_0}\,dz)} \\
& \leq \frac{1}{2} \|u\|_{L_\infty(Q_{2/3}^+)} + N\|u\|_{L_2(Q_{2/3}^+,x_d^{\alpha_0}\,dz)}, \end{align*} where $N = N(d, \nu, \alpha, \alpha_0)>0$. From this and the standard iteration argument (see \cite[p. 75]{HanLin} for example), we obtain \begin{equation} \label{est.alpha-zero}
\|u\|_{L_\infty(Q_{1/2}^+)} \leq N \|u\|_{L_2(Q_{3/4}^+,x_d^{\alpha_0}\,dz)}. \end{equation} The above, together with Lemma \ref{caccio}, yields \begin{equation}
\label{eq:hom-b6}
\int_{Q_{1/2}^+}|\partial_t^k D_{x'}^j D_{d}^l u|^2 \,dz \le N(d,\nu,\alpha, \alpha_0,k,j,l) \int_{Q_{3/4}^+} u^2 x_d^{\alpha_0}\,dz \end{equation} for any integers $k,j\ge 0$ and $l=0,1$. Using this last estimate, \eqref{eq:hom-b2}, and by suitably adjusting the sizes of the cylinders, we obtain \eqref{eq:hom-b3} with $\beta_1 = 1$. Similar to \eqref{est.alpha-zero}, we have \[
\|Du\|_{L_\infty(Q_{1/2}^+)} \leq N \|Du\|_{L_2(Q_{3/4}^+,x_d^{\alpha_0}\,dz)}. \] From this, \eqref{eq:hom-b2}, and by shrinking the cylinders, we obtain \[
\|Du\|_{C^{1,\delta_0}(Q_{1/2}^+)} \leq N \|Du\|_{L_2(Q_{3/4}^+,x_d^{\alpha_0}dz)}, \quad \text{where} \,\, \delta_0=\min\{2-\alpha,1\}, \] which is \eqref{eq:hom-b-Du} when $\beta_0 =0$.
Since $u_t$ and $D_{x'} u$ satisfy the same equation with the same boundary condition, similarly we also obtain \eqref{eq:hom-b4} as well as \begin{equation}
\label{eq:hom-b7}
\|DD_{x'}u\|_{C^{1,\delta_0}(Q_{1/2}^+)}\le N\|DD_{x'}u\|_{L_2(Q_{2/3}^+)}, \end{equation} by Lemma \ref{lem:boundary}. This together with \eqref{eq:hom-b6} implies \eqref{eq:hom-b5} with $$\beta_1=\min\{\delta_0,\alpha \} = \min\{\alpha, 2-\alpha,1\}.$$
\noindent {\em Case 2: $\beta_0\neq 0$.} We first prove \eqref{eq:hom-b5}. By \eqref{eq:hom-b7} and by using the iteration argument as in \eqref{est.alpha-zero}, we have \[
\|DD_{x'} u\|_{L_\infty(Q_{1/2}^+)} \leq N\|DD_{x'} u\|_{L_2(Q_{3/4}^+, x_d^{\alpha_0}\, dz)}, \] where $N = N(d, \nu, \alpha, \alpha_0)>0$. Then, it follows from \eqref{eq:hom-b7} that \begin{equation} \label{eq:hom-b7-bis}
\|DD_{x'}u\|_{C^{1,\delta_0}(Q_{1/2}^+)}\le N\|DD_{x'}u\|_{L_2(Q_{3/4}^+, x_d^{\alpha_0}\, dz)}. \end{equation} Therefore, if $\beta_0=\alpha$, \eqref{eq:hom-b5} with $\beta_1=\delta_0$ follows from \eqref{eq:hom-b7-bis}. If $\beta_0<\alpha$, it follows from \eqref{eq:hom-b7-bis} that \[
\|DD_{x'}u\|_{C^{1,\delta_0}(Q_{1/2}^+)}\le N\|\mathbf{M}^{{\alpha-\beta_0}}DD_{x'}u\|_{L_2(Q_{3/4}^+, x_d^{\alpha_0}\, dz)} \] where $N = N(d, \nu, \alpha, \beta_0, \alpha_0)>0$. Then we also have \eqref{eq:hom-b5} with \[ \beta_1=\min\{\delta_0,\alpha-\beta_0\} = \min\{2-\alpha,1,\alpha-\beta_0\}. \] Similarly, \eqref{eq:hom-b-Du} can be deduced from \eqref{eq:hom-b-Du} when $\beta_0 =0$ by taking $\beta_1 = \min\{\delta_0, \beta_0\}$. Hence, both \eqref{eq:hom-b5} and \eqref{eq:hom-b-Du} hold with \[ \beta_1=\min\{\delta_0,\alpha-\beta_0,{\beta_0}\} = \min\{2-\alpha,1,\alpha-\beta_0, \beta_0\}. \]
Next we show \eqref{eq:hom-b3}. Since $\beta_0\le 1$, using the zero boundary condition, \eqref{eq:hom-b2}, and \eqref{eq:hom-b6}, we get \begin{equation}
\label{eq:hom-b8}
\|\mathbf{M}^{-\beta_0} u\|_{L_\infty(Q_{1/2}^+)}
\le N \|D_d u\|_{L_\infty(Q_{1/2}^+)}\le N\|u\|_{L_2(Q_{3/4}^+,x_d^{\alpha_0}\,dz)}. \end{equation} Since $u_t$ and $D_{x'} u$ satisfy the same equation and the same boundary condition, we have \begin{align}
\label{eq:hom-b9}
&\|\mathbf{M}^{-\beta_0} u_t\|_{L_\infty(Q_{1/2}^+)}
+\|\mathbf{M}^{-\beta_0}D_{x'} u\|_{L_\infty(Q_{1/2}^+)}\notag\\
&\le \ N \|D_d u_t\|_{L_\infty(Q_{1/2}^+)}+N \|D_dD_{x'} u\|_{L_\infty(Q_{1/2}^+)}\notag\\
&\le \ N \|D u\|_{L_2(Q_{2/3}^+,x_d^{\alpha_0})}\le N\|u\|_{L_2(Q_{3/4}^+,x_d^{\alpha_0}\,dz)}, \end{align} where we used \eqref{eq:hom-b2}. To estimate the H\"older semi-norm of $\mathbf{M}^{-\beta_0} u$ in $x_d$, we write $$ x_d^{-\beta_0} u(t,x)=x_d^{1-\beta_0}\int_0^1 (D_d u)(t,x',sx_d)\,ds $$ and use \eqref{eq:hom-b2} and \eqref{eq:hom-b6}. Then we see that \[ \llbracket \mathbf{M}^{-\beta_0} u \rrbracket_{C^{0, \beta_1}(Q_{1/2}^+)} +
\llbracket \mathbf{M}^{-\beta_0} \partial_t u \rrbracket_{C^{0, \beta_1}(Q_{1/2}^+)} \leq N \|\mathbf{M}^{-\beta_0} u\|_{L_2(Q_{3/4}^+, x_d^{\alpha_0}\,dz)} \] where $\beta_1 = \min\{\delta_0, 1-\beta_0\}$. Combining this with \eqref{eq:hom-b8} and \eqref{eq:hom-b9}, we reach \eqref{eq:hom-b3}.
Note that $u_t$ satisfies the same equation and the same boundary condition, we deduce \eqref{eq:hom-b4} from \eqref{eq:hom-b3}. The proof of the lemma when $p_0 =2$ is completed.
Next, we observe that when $p_0 >2$, the estimates \eqref{eq:hom-b3}--\eqref{eq:hom-b-Du} can be derived directly from the case $p_0 =2$ that we just proved using H\"{o}lder's inequality. On the other hand, when $p_0 \in (1, 2)$, it follows from Lemma \ref{lem:boundary} that $u \in \cH_2^1(Q_{3/4}^+)$. Then, by shrinking the cylinders, we apply the assertion when $p_0=2$ that we just proved and an iteration argument as in the proof of \eqref{est.alpha-zero} to obtain the estimates \eqref{eq:hom-b3}--\eqref{eq:hom-b-Du}. \end{proof}
\begin{remark} The number $\beta_1$ defined in Lemma \ref{prop:boundary} can be found explicitly. However, we do not need to use this in the paper. \end{remark} \subsection{Interior H\"older estimates for homogeneous equations} \label{subsec:int} Fix a point $z_0 = (t_0, x_0) \in \Omega_T$, where $x_0 = (x_0', x_{0d}) \in \mathbb{R}^{d-1} \times \mathbb{R}_+$. For $0<\rho < x_{0d}$ and $\beta \in (0,1)$, we define the weighted $\beta$-H\"{o}lder semi-norm of a function $u$ on $Q_\rho(z_0)$ by \[ \begin{split}
\llbracket u\rrbracket_{C^{\beta/2, \beta}_{\alpha}(Q_\rho(z_0))} & = \sup \Big\{ \frac{|u(s,x) - u(t, y)|}{\big(x_{0d}^{-\alpha/2}|x-y| + |t-s|^{1/2}\big)^{\beta}}: (s,x) \not=(t,y) \\ & \qquad \qquad \qquad \text{and } (s,x), (t,y) \in Q_\rho(z_0) \Big\}. \end{split} \] As usual, we denote the corresponding weighted norm by \[
\|u\|_{C^{\beta/2, \beta}_{\alpha}(Q_\rho(z_0))} = \|u\|_{L_\infty(Q_\rho(z_0))} + \llbracket u\rrbracket_{C^{\beta/2, \beta}_{\alpha}(Q_\rho(z_0))}. \] The following result is the interior H\"older estimates of solutions to the homogeneous equation \eqref{eq:hom-div}.
\begin{lemma}\label{prop:int} Let $z_0 = (t_0, x_0) \in \Omega_T$, where $x_0 = (x_0', x_{0d}) \in \mathbb{R}^{d-1} \times \mathbb{R}_+$, and $\rho \in (0, x_{0d}/4)$. Let $u \in \sW_{p_0}^{1,2}(Q_{2\rho}(z_0))$ be a strong solution of \begin{equation*} \sL_0 u=0 \quad \text{ in } Q_{2\rho}(z_0) \end{equation*} with some $p_0 \in (1, \infty)$. Then for any $\beta \in \mathbb{R}$, \begin{align*}
& \|\mathbf{M}^{\beta} u\|_{L_\infty(Q_{\rho}(z_0))} + \rho ^{(1-\alpha/2)/2} \llbracket \mathbf{M}^{\beta} u\rrbracket_{C^{1/4, 1/2}_\alpha(Q_{\rho}(z_0))} \\
&\leq N \left(\fint_{ Q_{2\rho}( z_0)} |x_{d}^{\beta}u|^{p_0} \mu_0(dz)\right)^{1/p_0}, \end{align*} and \begin{align*}
& \|\mathbf{M}^{\beta}Du\|_{L_\infty(Q_{\rho}(z_0))}
+ \rho^{(1-\alpha/2)/2} \llbracket {\mathbf{M}^{\beta} Du}\rrbracket_{C^{1/4, 1/2}_\alpha(Q_{\rho}(z_0))} \\
&\leq N \left(\fint_{ Q_{2\rho}( z_0)} |x_d^{\beta}Du|^{p_0} \mu_0(dz) \right)^{1/p_0}, \end{align*} where $\mu_0(dz) = x_d^{\alpha_0}dtdx$ with some $\alpha_0 > -1$, and $N = N(\nu, d,\alpha, \alpha_0)>0$. \end{lemma} \begin{proof} As in the proof of Lemma \ref{prop:boundary}, we may assume that $p_0 =2$. Without loss of generality, we assume that $x_{0d}=1$. Note that when $\beta = 0$, the assertions follow directly from \cite[Proposition 4.6]{DPT21}. In general, the assertions follow from the case when $\beta =0$ and the fact that \[
\left(\fint_{ Q_{2\rho}( z_0)} |\mathbf{M}^{\beta}f(z)|^{p_0} \mu_0(dz) \right)^{1/p_0} \approx \left(\fint_{ Q_{2\rho}( z_0)} |f(z)|^{p_0} dz \right)^{1/p_0}. \] The lemma is proved. \end{proof} \subsection{Mean oscillation estimates} \label{subsec:osc-est} In this subsection, we apply Lemmas \ref{prop:boundary} and \ref{prop:int} to derive the mean oscillation estimates of $$ U = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda \mathbf{M}^{-\beta_0}u) \qquad \text{and} \qquad Du $$ respectively with the underlying measure \begin{equation} \label{mu-1-def} \mu_1(dz) = x_d^{\gamma_1}\,dx dt \qquad \text{and} \qquad \bar{\mu}_1(dz) = x_d^{\bar{\gamma}_1} dxdt, \end{equation} where $u$ is a strong solution of \eqref{eq:xd}, \[ \gamma_1 \in (p_0(\beta_0-\alpha +1)-1, p_0(\beta_0-\alpha+2)-1) \quad \text{and} \quad \bar{\gamma}_1 =\gamma_1 + p_0 (\alpha /2-\beta_0) \] with some $p_0 \in (1, \infty)$ and $\beta_0 \in (\alpha-1, \min\{1, \alpha\}]$. The main result of the subsection is Lemma \ref{oscil-lemma-2} below.
Let us point out that both $\mu_1$ and $\bar{\mu}_1$ depend on the choice of $\beta_0$, and \begin{equation} \label{mu1=bar-mu-1} \mu_1= \bar{\mu}_1 \quad \text{when} \quad \beta_0 = \alpha/2. \end{equation} To get the weighted estimate of $U$ in $L_p(\Omega_T, x_d^\gamma\, dz)$ with the optimal range for $\gamma$ as in Theorem \ref{thm:xd}, we will use $\beta_0 = \min\{1, \alpha\}$. On the other hand, to derive the estimate for $Du$, we will use $\beta_0 = \alpha/2$ and \eqref{mu1=bar-mu-1}.
For the reader's convenience, let us also recall that for a cylinder $Q \subset \mathbb{R}^{d+1}$, a locally finite measure $\omega$, and an $\omega$-integrable function $g$ on $Q$, we denote the average of $g$ on $Q$ with respect to the measure $\omega$ by \[ (g)_{Q, \omega} = \frac{1}{\omega(Q)}\int_{Q} g(z)\, \omega(dz) \] and the average of $g$ on $Q$ with respect to the Lebesgue measure by \[
(g)_{Q} =\frac{1}{|Q|} \int_{Q} g(z)\, dz. \] We begin with the following lemma on the mean oscillation estimates of solutions to the homogeneous equations. \begin{lemma} \label{oscil-lemma-1} Let $\nu \in (0,1)$, $\alpha \in (0,2)$, $p_0 \in (1, \infty)$, $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$, and $\gamma_1 \in (p_0(\beta_0-\alpha +1) -1, p_0(\beta_0-\alpha+2)-1)$. There exists $N = N(d,\nu, \alpha, \gamma_1, p_0)>0$ such that if $u \in \sW^{1,2}_{p_0}(Q_{14\rho}^+(z_0), x_d^{\gamma_1'}\, dz)$ is a strong solution of \[ \left\{ \begin{array}{cccl} \sL_0 u & =& 0 & \quad \text{in} \quad Q_{14\rho}^+(z_0) \\ u & = & 0 & \quad \text{on} \quad Q_{14\rho}(z_0) \cap \{x_d =0\} \end{array} \right. \] for some $\lambda>0, \rho>0$, $z_0 =(z_0', x_{d0}) \in \overline{\Omega}_T$, and for $\gamma_1' = \gamma_1 -p_0(\beta_0-\alpha)$, then \begin{equation} \label{osc-h}
(|U - (U)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} \leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} \end{equation} and \begin{equation} \label{Du-osc-h}
(|Du - (Du)_{Q_{\kappa \rho}^+(z_0), \bar{\mu}_1}|)_{Q_{\kappa \rho}^+(z_0), \bar{\mu}_1} \leq N \kappa^{\theta} (|Du|^{p_0})_{Q_{14 \rho}^+(z_0), \bar{\mu}_1}^{1/p_0} \end{equation} for every $\kappa \in (0,1)$, where $\mu_1, \bar{\mu}_1$ are defined in \eqref{mu-1-def}, \[ U = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda \mathbf{M}^{-\beta_0}u), \] and $\theta = \min\{\beta_1(\alpha, \beta_0), (2-\alpha)/4, 2-\alpha, 1\} \in (0,1)$ in which $\beta_1$ is defined in lemma \ref{prop:boundary}. \end{lemma} \begin{proof}
By using the scaling \eqref{scaling}, we assume that $\rho =1$. We consider two cases: the boundary case and the interior one.
\noindent
{\em Boundary case.} Consider $x_{0d} <4$. Let $\bar{z} =(t_0, x_0', 0)$ and note that from the definition of cylinders in \eqref{def:Q}, we have \[ Q_{1}^+(z_0) \subset Q_{5}^+(\bar{z}_0) \subset Q_{10}^+(\bar{z}_0) \subset Q_{14}^+(z_0). \] Then, we apply the mean value theorem and the estimates \eqref{eq:hom-b3}-\eqref{eq:hom-b5} in Lemma \ref{prop:boundary} with $\gamma_1$ in place of $\alpha_0$, $\beta_0$ in place of $\beta$ in the estimates of $u$, $u_t$, and $DD_{x'}u$. We infer that \[ \begin{split}
& (|U - (U)_{Q_{\kappa}^+(z_0), \mu_1}|)_{Q_{\kappa}^+(z_0), \mu_1} \\ & \leq N \kappa^{2-\alpha}
\|\partial_t U\|_{L^\infty(Q_1(z_0))} + N \kappa^{\beta_1} \llbracket U \rrbracket_{C^{0, \beta_1}(Q_{1}^+(z_0))} \\
& \leq N \kappa^{\theta}\big[ \|\partial_t U\|_{L^\infty(Q_{5}^+(\bar{z}))} + \llbracket U \rrbracket_{C^{0, \beta_1}(Q_{5}^+(\bar{z}))} \big] \\
& \leq N \kappa^{\theta} (|U|^{p_0})_{Q_{10}^+(\bar{z}), \mu_1}^{1/p_0} \leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14}^+(z_0), \mu_1}^{1/p_0}, \end{split} \] where we used the doubling property of $\mu_1$ in the last step. This implies the estimate \eqref{osc-h} as $\kappa \in (0,1)$. To estimate the oscillation of $Du$ as asserted in \eqref{Du-osc-h}, we note that $$\bar{\gamma}_1 = \gamma_1 - p_0(\beta_0 -\alpha/2) > p_0 (1-\alpha/2)-1 >-1. $$ Therefore, \eqref{Du-osc-h} can be proved in a similar way as that of \eqref{osc-h} using the estimate \eqref{eq:hom-b-Du} in Lemma \ref{prop:boundary} with $\beta =0$ and $\alpha_0 = \bar{\gamma}_1 >-1$.
\ \\ \noindent
{\em Interior case.} Consider $x_{0d} >4\rho=4$. By using Lemma \ref{prop:int} with $\beta = -\beta_0$ and the doubling property of $\mu_1$, we see that \[ \begin{split}
& (|\mathbf{M}^{-\beta_0} u - (\mathbf{M}^{-\beta_0} u)_{Q_{\kappa}^+(z_0), \mu_1}|)_{Q_{\kappa}^+(z_0), \mu_1} \\ & \leq N \kappa^{1/2-\alpha/4}\llbracket \mathbf{M}^{-\beta_0} u \rrbracket_{C_\alpha^{1/4, 1/2}(Q_{1}^+(z_0))} \\
& \leq N \kappa^{1/2-\alpha/4} \left(\fint_{Q_{2}^+(z_0)}|\mathbf{M}^{-\beta_0} u|^{p_0}\mu_1(dz) \right)^{1/p_0} \\
& \leq N \kappa^{1/2-\alpha/4} \left(\fint_{Q_{14}^+(z_0)}|\mathbf{M}^{-\beta_0} u|^{p_0} \mu_1(dz) \right)^{1/p_0}. \end{split} \] Similarly, by using the finite difference quotient, we can apply Lemma \ref{prop:int} to $u_t$ and obtain \[ \begin{split}
& (|\mathbf{M}^{-\beta_0} u_t - (\mathbf{M}^{-\beta_0} u_t)_{Q_{\kappa}^+(z_0), \mu_1}|)_{Q_{\kappa}^+(z_0), \mu_1} \\
& \leq N \kappa^{1/2-\alpha/4} \left(\fint_{Q_{14}^+(z_0)}|\mathbf{M}^{-\beta_0} u_t|^{p_0} \mu_1(dz) \right)^{1/p_0}. \end{split} \] In the same way, by applying Lemma \ref{prop:int} to $D_{x'}u$ with $\gamma=\alpha-\beta_0$ and $\alpha_0 = \gamma_1$, we infer that \[ \begin{split}
& (|\mathbf{M}^{\alpha -\beta_0} DD_{x'} u - (\mathbf{M}^{\alpha -\beta_0} DD_{x'} u)_{Q_{\kappa}^+(z_0), \mu_1}|)_{Q_{\kappa}^+(z_0), \mu_1} \\
& \leq N \kappa^{1/2-\alpha/4} \left(\fint_{Q_{14}^+(z_0)}|\mathbf{M}^{\alpha -\beta_0} DD_{x'} u|^{p_0} \mu_1(dz) \right)^{1/p_0}. \end{split} \] The oscillation estimate of $Du$ can be proved in a similar way. Therefore, we obtain \eqref{osc-h}. The proof of the lemma is completed. \end{proof}
Now, we recall that for a given number $a \in \mathbb{R}$, $a_+ = \max\{a, 0\}$. We derive the oscillation estimates of solutions to the non-homogeneous equation \eqref{eq:xd}, which is the main result of the subsection. \begin{lemma} \label{oscil-lemma-2} Let $\nu \in (0,1)$, $\alpha \in (0,2)$, $p_0 \in (1, \infty)$, $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$, and $\gamma_1 \in (p_0(\beta_0-\alpha +1) -1, p_0(\beta_0-\alpha+2)-1)$. There exists $N = N(d,\nu, \alpha, \gamma_1, p_0)>0$ such that the following assertions hold. Suppose that $u \in \sW^{1,2}_{p_0, \textup{loc}}(\Omega_T, x_d^{\gamma_1'}\, dz)$ is a strong solution of \eqref{eq:xd} with $f \in L_{p_0, \textup{loc}}(\Omega_T, x_d^{\gamma_1'}\, dz)$ and $\gamma_1' = \gamma_1 - p_0(\beta_0-\alpha)$. Then, for every $z_0 \in \overline{\Omega}_T$, $\rho \in (0, \infty)$, $\kappa \in (0,1)$, we have \[ \begin{split}
& (|U - (U)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1}\\
&\leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} \end{split} \] and \[ \begin{split}
& \lambda^{1/2}(|Du - (Du)_{Q_{\kappa \rho}^+(z_0), \bar{\mu}_1}|)_{Q_{\kappa \rho}^+(z_0),\bar{\mu}_1} \\
& \leq N \kappa^{\theta} \lambda^{1/2} (|Du|^{p_0})_{Q_{14 \rho}^+(z_0), \bar{\mu}_1}^{1/p_0} + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|\mathbf{M}^{\alpha/2} f|^{p_0})_{Q_{14\rho}^+(z_0), \bar{\mu}_1}^{1/p_0}, \end{split} \] where $\theta{\in (0,1)}$ is defined in Lemma \ref{oscil-lemma-1}, \[ U = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda \mathbf{M}^{-\beta_0}u), \] and $\mu_1, \bar{\mu}_1$ are defined in \eqref{mu-1-def}. \end{lemma} \begin{proof} As $\gamma_1 \in (p_0(\beta_0-\alpha +1)-1, p_0(\beta_0-\alpha+2)-1)$, we see that \[ \gamma_1' = \gamma_1 - p_0(\beta_0-\alpha) \in (p_0 -1, 2p_0 -1). \] Therefore, by Lemma \ref{l-p-sol-lem}, there is a strong solution $v \in \sW^{1,2}_{p_0}(\Omega_T, x_d^{\gamma_1'}\, dz)$ to \begin{equation} \label{v-sol-1} \left\{ \begin{array}{cccl} \sL_0 v & =& f \mathbf{1}_{Q_{14\rho}^+(z_0)} & \quad \text{in} \quad \Omega_T,\\ v & = & 0 & \quad \text{on} \quad \{x_d =0\} \end{array} \right. \end{equation} satisfying \begin{equation} \label{v-sol-est-1} \begin{split}
\|\mathbf{M}^{-\alpha} v_t\|_{L_{p_0}(\Omega_T, x_d^{\gamma_1'} dz)} & + \|D^2 v\|_{L_{p_0}(\Omega_T, x_d^{\gamma_1'} dz)} + \lambda^{1/2} \|\mathbf{M}^{-\alpha/2} Dv\|_{L_{p_0}(\Omega_T, x_d^{\gamma_1'} dz)}\\
& + \lambda \|\mathbf{M}^{-\alpha} v\|_{L_{p_0}(\Omega_T, x_d^{\gamma_1'} dz)} \leq N \|f\|_{L_{p_0}(Q_{14\rho}^+(z_0), x_d^{\gamma_1'} dz)}. \end{split} \end{equation} Let us denote \[
V = (\mathbf{M}^{-\beta_0} v_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} v, \lambda \mathbf{M}^{-\beta_0}v). \] Then, it follows from \eqref{v-sol-est-1} and the definitions of $\mu_1$ and $\gamma_1'$ that \begin{equation} \label{V-osc-est-1}
(|V|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} \leq N (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0}. \end{equation} Note also that due to \eqref{v-sol-est-1} and the definition of $\bar{\gamma}_1$, \[ \begin{split}
\lambda^{1/2} \left( \int_{Q_{14\rho}^+(z_0)} | Dv|^{p_0} x_d^{\bar{\gamma}_1}dz \right)^{1/p_0} & = \lambda^{1/2} \left( \int_{Q_{14\rho}^+(z_0)} |\mathbf{M}^{-\alpha/2}Dv|^{p_0} x_d^{\gamma_1'} dz \right)^{1/p_0} \\
& \leq N \left( \int_{Q_{14\rho}^+(z_0)} |\mathbf{M}^{ \alpha/2} f|^{p_0} x_d^{\bar{\gamma}_1}dz \right)^{1/p_0}.
\end{split} \] Then, \begin{equation} \label{os-Dv-18}
\lambda^{1/2} (|Dv|^{p_0})_{Q_{14\rho}^+(z_0), \bar{\mu}_1}^{1/p_0} \leq N (|\mathbf{M}^{\alpha/2}f|^{p_0})_{Q_{14\rho}^+(z_0), \bar{\mu}_1}^{1/p_0}. \end{equation} Now, let $w = u- v$. From \eqref{v-sol-1}, we see that $w \in \sW^{1,2}_{p_0}(Q_{14\rho}^+(z_0), x_d^{\gamma_1'}\, dz)$ is a strong solution of \[ \left\{ \begin{array}{cccl} \sL_0 w & =& 0 & \quad \text{in} \quad Q_{14\rho}^+(z_0),\\ w & = & 0 & \quad \text{on} \quad Q_{14\rho}(z_0) \cap \{x_d =0\}. \end{array} \right. \] Then, by applying Lemma \ref{oscil-lemma-1} to $w$, we see that \begin{equation} \label{W-osc-est-1}
(|W - (W)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} \leq N \kappa^{\theta} (|W|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} \end{equation} and \begin{equation} \label{os-Dw-18}
(|Dw - (Dw)_{Q_{\kappa \rho}^+(z_0), \bar{\mu}_1}|)_{Q_{\kappa \rho}^+(z_0), \bar{\mu}_1} \leq N \kappa^{\theta} (|Dw|^{p_0})_{Q_{14 \rho}^+(z_0), \bar{\mu}_1}^{1/p_0}, \end{equation} where \[ \begin{split} & W= (\mathbf{M}^{-\beta_0} w_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} w, \lambda \mathbf{M}^{-\beta_0}w). \end{split} \] Now, note that from \eqref{def:Q} and \eqref{def:r} we have \begin{align} \notag \frac{\mu_1(Q_{14 \rho}^+(z_0))}{\mu_1(Q_{\kappa \rho}^+(z_0))} & = N(d) \kappa^{\alpha-2} \Big(\frac{r(14\rho, x_{0d})}{r(\kappa \rho, x_{0d})}\Big)^{d + (\gamma_1)_+} \\ \label{Q-compared} & \leq N (d)\kappa^{-(d+ (\gamma_1)_+ + 2-\alpha)}. \end{align} Then, it follows from the triangle inequality, H\"{o}lder's inequality, \eqref{W-osc-est-1}, and \eqref{Q-compared} that \[ \begin{split}
& (|U - (U)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} \\
& \leq (|W - (W)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} + (|V - (V)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} \\
& \leq (|W - (W)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1} \\
& \qquad \quad+ N(d) \kappa^{-(d+ (\gamma_1)_+ +2-\alpha)/p_0} (|V|^{p_0})^{1/p_0}_{Q_{14\rho}^+(z_0), \mu_1}\\
& \leq N \kappa^{\theta} (|W|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} + N(d) \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|V|^{p_0})^{1/p_0}_{Q_{14\rho}^+(z_0), \mu_1}. \end{split} \] As $W = U -V$ and $\kappa \in (0,1)$, we apply the triangle inequality again to see that \[ \begin{split}
& (|U - (U)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1}\\
&\leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0) , \mu_1}^{1/p_0} \\
& \qquad + N\big( \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} + \kappa^{\theta}\big)(|V|^{p_0})_{Q_{14\rho}^+(z_0) , \mu_1}^{1/p_0} \\
& \leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|V|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0}. \end{split} \] From this and \eqref{V-osc-est-1}, it follows that \[ \begin{split}
& (|U - (U)_{Q_{\kappa \rho}^+(z_0) , \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1}\\
& \leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0) , \mu_1}^{1/p_0} + N \kappa^{-(d + (\gamma_1)_++2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0) , \mu_1}^{1/p_0}, \end{split} \] where $N = N(d,\nu, \alpha, \gamma_1, p_0)>0$. This proves the assertion on the oscillation of $U$. The oscillation estimate of $Du$ can be proved similarly using \eqref{os-Dv-18} and \eqref{os-Dw-18}. The proof of the lemma is completed. \end{proof}
We now conclude this subsection by pointing out the following important remark, which can be proved in the same way as Lemma \ref{oscil-lemma-2} with minor modifications. \begin{remark} \label{all-oscilla-est} Under the assumptions as in Lemma \ref{oscil-lemma-2}, and if $\beta_0 \in {(\alpha-1}, \alpha/2]$, it holds that \[ \begin{split}
& (|U' - (U')_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1}\\
&\leq N \kappa^{\theta} (|U'|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} \end{split} \] where \[ U' = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda^{1/2}\mathbf{M}^{\alpha/2-\beta_0} Du, \lambda \mathbf{M}^{-\beta_0}u). \] \end{remark}
\subsection{Proof of Theorem \ref{thm:xd}} \label{proof-xd} We are now ready to give the proof of Theorem \ref{thm:xd}. \begin{proof}[Proof of Theorem \ref{thm:xd}] We begin with the proof of the a priori estimates \eqref{eq:xd-main}--\eqref{eq3.09} assuming that $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)$ is a strong solution to the equation \eqref{eq:xd} with \begin{equation} \label{gamma-alla-range} \gamma \in (p (\alpha-1)_{+} -1, 2p-1), \quad \text{where} \,\, (\alpha -1)_+ = \max\{\alpha-1, 0\}. \end{equation} In our initial step, we prove \eqref{eq:xd-main}--\eqref{eq3.09} with an extra assumption that $u$ is compactly supported. We first prove \eqref{eq:xd-main}. Let $\beta_0 = \min\{1, \alpha\}$, and we will apply Lemma \ref{oscil-lemma-2} with this $\beta_0$. Let $p_0 \in (1, p)$ and $\gamma_1 \in (p_0(\beta_0-\alpha +1) -1, p_0(\beta_0-\alpha+2)-1)$. We choose $p_0$ to be sufficiently close to $1$ and $\gamma_1$ to be sufficiently close to $p_0(\beta_0-\alpha+2)-1$ so that \begin{equation} \label{nature-choice-2} \gamma - [\gamma_1 +p(\alpha-\beta_0)] < (1+\gamma_1)(p/p_0 -1). \end{equation} We note that this is possible because $\alpha-\beta_0 = (\alpha-1)_+$ and \[ \gamma - [ \gamma_1 + p(\alpha-\beta_0)] < p[2 - (\alpha-1)_+] -1 -\gamma_1, \] and also from our choices of $p_0$ and $\gamma_1$, \[ \begin{split} (1+\gamma_1)(p/p_0 -1) & \sim p (1+\gamma_1) -1 -\gamma_1 \sim p[2 - (\alpha-1)_+] -1 -\gamma_1. \end{split} \] Now, let us denote \begin{equation} \label{gamma-1-pri} \gamma_1' : = \gamma_1 + p(\alpha-\beta_0) = \gamma_1 + p (\alpha-1)_+. \end{equation} Due to \eqref{gamma-alla-range} and the definition of $\gamma_1'$, it follows that \begin{equation} \label{nature-choice-1} \gamma - \gamma_1' = \gamma - p(\alpha-1)_+ - \gamma_1 > -1 - \gamma_1. \end{equation} From \eqref{nature-choice-1} and \eqref{nature-choice-2}, it holds that \begin{equation} \label{gamma-0414} \gamma' : = \gamma - \gamma_1' \in (-1-\gamma_1, (1+\gamma_1)(p/p_0 -1)). \end{equation}
Now, since $u$ has compactly support in $\Omega_T$, we have $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma_1'} dz)$. Therefore, it follows from Lemma \ref{oscil-lemma-2} that \[
U^{\#}_{\mu_1} \leq N \Big[ \kappa^{\theta} \cM_{\mu_1}(|U|^{p_0}) ^{1/p_0} + \kappa^{-(d+ (\gamma_1)_+ + 2-\alpha)/2} \cM_{\mu_1}(|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})^{1/p_0} \Big], \] where $\mu_1(dz) = x_d^{\gamma_1}dxdt$ and \[ U = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda \mathbf{M}^{-\beta_0}u). \] Next, due to \eqref{gamma-0414}, we see that $x_d^{\gamma'} \in A_{p/p_0}(\mu_1)$. It then follows from the weighted Fefferman-Stein theorem and Hardy-Littlewood theorem (i.e., Theorem \ref{FS-thm}) that \begin{align} \notag
& \|U\|_{L_p(\Omega_T, x_d^{\gamma'}\, d\mu_1)} \leq N \|U^{\#}_{\mu_1}\|_{L_p(\Omega_T, x_d^{\gamma'}\,d\mu_1)} \\ \notag
& \leq N \Big[\kappa^{\theta}\|\cM_{\mu_1}(|U|^{p_0})^{1/p_0}\|_{L_p(\Omega_T, x_d^{\gamma'}\,d\mu_1 )} \\ \notag
& \qquad + \kappa^{-(d + (\gamma_1)_++2-\alpha)/2} \|\cM_{\mu_1}(|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})^{1/p_0}\|_{L_p(\Omega_T, x_d^{\gamma'}\, d\mu_1)} \Big] \\ \label{est:0414-1}
& \leq N \Big[ \kappa^{\theta} \|U\|_{{L_p(\Omega_T, x_d^{\gamma'}\,d\mu_1)}} + \kappa^{-(d+ (\gamma_1)_++2-\alpha)/2} \| \mathbf{M}^{\alpha-\beta_0} f\|_{{L_p(\Omega_T, x_d^{\gamma'}\,d\mu_1)}} \Big]. \end{align} From the definition of $U$, the choices of $\gamma'$ in \eqref{gamma-0414} and $\gamma_1'$ in \eqref{gamma-1-pri}, we have \[ \begin{split}
\|U\|_{{L_p(\Omega_T, x_d^{\gamma'}d\mu_1)}} & = \|\mathbf{M}^{-\alpha} u_t\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \|DD_{x'}u\|_{L_p(\Omega_T, x_d^\gamma dz)} \\
& \quad + \lambda \|\mathbf{M}^{-\alpha} u \|_{L_p(\Omega_T, x_d^\gamma\, dz)} <\infty. \end{split} \] Then, by choosing $\kappa \in (0,1)$ sufficiently small so that $N \kappa^{\theta} < 1/2$, we obtain from \eqref{est:0414-1} that \[ \begin{split}
& \|\mathbf{M}^{-\alpha} u_t\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \|DD_{x'}u\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \lambda \|\mathbf{M}^{-\alpha} u \|_{L_p(\Omega_T, x_d^\gamma\, dz)} \\
& \leq N \| f\|_{{L_p(\Omega_T, x_d^{\gamma'}\,d\mu_1)}} = N \| f\|_{{L_p(\Omega_T, x_d^{\gamma}\,dz)}}. \end{split} \] Also, from the PDE in \eqref{eq:xd}, we see that \[
|D_{dd} u| \leq N[|DD_{x'}u| + (|u_t| + \lambda |u|)x_d^{-\alpha} + |f|], \] and therefore \[ \begin{split}
& \|\mathbf{M}^{-\alpha}u_t\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)}+\lambda\|\mathbf{M}^{-\alpha}u\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)}
+\|D^2 u\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)}\\
&\le N\|f\|_{L_p(\Omega_T, x_d^{\gamma}\,dz)}, \end{split} \] which is \eqref{eq:xd-main}.
Next, we prove the estimate \eqref{eq3.09} also with the extra assumption that $u$ has compact support. We observe that if $\gamma \in (p -1, 2p -1)$, \eqref{eq3.09} follows from \eqref{est-0405-1}. Therefore, it remains to consider the case that $\gamma \in (\alpha p/2-1, p -1]$ or equivalently \begin{equation} \label{gamma-range-2} \gamma - \alpha p/2 \in (-1, p(1-\alpha/2) -1]. \end{equation} The main idea is to apply Lemma \ref{oscil-lemma-2} with this $\beta_0 = \alpha/2$. Let $p_0, \gamma_1$ be as before but with the new choice of $\beta_0$. As noted in \eqref{mu1=bar-mu-1}, we have \[ \bar{\gamma}_1 = \gamma_1 - p_0(\beta_0 -\alpha/2) = \gamma_1 \qquad \text{and} \qquad \bar{\mu}_1 = \mu_1. \] Because of \eqref{gamma-range-2}, we can perform the same calculation as the one that yields \eqref{gamma-0414} to obtain \[ \bar{\gamma}' := \gamma - (\bar{\gamma}_1 + p\alpha/2 ) \in (-1 - \bar{\gamma}_1, (1+\bar{\gamma}_1)(p/p_0 -1)) \] and therefore $x_d^{\bar{\gamma}'} \in A_{p/p_0}(\bar{\mu}_1)$. By using Lemma \ref{oscil-lemma-2} , we have \begin{equation} \label{Du-sharp} \begin{split}
\lambda^{1/2} (Du)^{\#}_{\bar{\mu}_1} & \leq N \Big[ \kappa^{\theta} \lambda^{1/2}\cM_{\bar{\mu}_1}(|Du|^{p_0}) ^{1/p_0} \\
& \quad + \kappa^{-(d+ \bar{\gamma}_1 + 2-\alpha)/2} \cM_{\bar{\mu}_1}(|\mathbf{M}^{\alpha/2} f|^{p_0})^{1/p_0} \Big], \end{split} \end{equation} where $\bar{\mu}_1(dz) = x_d^{\bar{\gamma}_1}dxdt$. We apply Theorem \ref{FS-thm} to \eqref{Du-sharp}, and then choose $\kappa>0$ sufficiently small as in the proof of \eqref{eq:xd-main} to obtain \[
\lambda^{1/2}\|Du\|_{L_p(\Omega_T, x_d^{\bar{\gamma}'} d\bar{\mu}_1)} \leq N \|\mathbf{M}^{\alpha/2} f\|_{L_p(\Omega_T, x_d^{\bar{\gamma}'}\, d\bar{\mu}_1)}. \] This implies \[
\lambda^{1/2} \|\mathbf{M}^{-\alpha/2}Du\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} \leq N \| f\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} \] as $\gamma - p\alpha/2= \bar{\gamma}' + \bar{\gamma}_1$. The estimate \eqref{eq3.09} is proved.
Now, we prove \eqref{eq:xd-main}--\eqref{eq3.09} without the assumption that $u$ is compactly supported. As $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma dz)$, there is a sequence $\{u_n\}$ in $C_0^\infty(\Omega_T)$ such that \begin{equation} \label{u-approx-compact}
\lim_{n\rightarrow \infty} \|u_n -u\|_{\sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)} =0. \end{equation} Let $f_n = f + \sL_0 (u_n - u)/\mu(x_d)$ and observe that $u_n$ is a strong solution of \[ \sL_0 u_n = \mu(x_d) f_n \quad \text{in} \quad \Omega_T \quad \text{and} \quad u_n =0 \quad \text{on} \quad \{x_d =0\}. \] Then, applying the estimates \eqref{eq:xd-main}--\eqref{eq3.09} to $u_n$, we obtain \begin{equation} \label{un-supported}
\|u_n\|_{\sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)} \leq N\|f_n\|_{L_p(\Omega_T, x_d^\gamma\, dz)}. \end{equation} Note that \[ \begin{split}
& \|f_n\|_{L_p(\Omega_T, x_d^\gamma\, dz)} \leq \|f\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + N\lambda \|\mathbf{M}^{-\alpha} (u-u_n)\|_{L_p(\Omega_T, x_d^\gamma\, dz)}\\
& \qquad + N \Big [ \|D^2(u-u_n)\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha}(u-u_n)_t\|_{L_p(\Omega_T, x_d^\gamma\, dz)} \| \Big] \\
& \rightarrow \|f\|_{L_p(\Omega_T, x_d^\gamma\, dz)} \quad \text{as} \quad n \rightarrow \infty. \end{split} \] Therefore, by taking $n\rightarrow \infty$ in \eqref{un-supported} and using \eqref{u-approx-compact}, we obtain the estimates \eqref{eq:xd-main}--\eqref{eq3.09} for $u$. Hence, the proof of \eqref{eq:xd-main}--\eqref{eq3.09} is completed.
It remains to prove the existence of a strong solution $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)$ to \eqref{eq:xd} assuming that $f \in L_p(\Omega_T, x_d^\gamma\, dz)$, for $p \in (1, \infty)$ and $\gamma \in (p (\alpha-1)_{+} -1, 2p-1)$. We observe when $\gamma \in (p-1, 2p-1)$, the existence of solution is already proved in Lemma \ref{l-p-sol-lem}. Therefore, it remains to consider the case when $$\gamma \in (p (\alpha-1)_{+} -1, p-1].$$ We consider two cases.
\noindent {\em Case} 1. Consider $\gamma \in (p(\alpha-1)_+ -1, p-1)$. As $f \in L_p(\Omega_T, x_d^\gamma\, dz)$, there is a sequence $\{f_k\}_k \subset C_0^\infty(\Omega_T)$ such that \begin{equation} \label{fk-approx}
\lim_{k\rightarrow \infty}\|f_k - f\|_{L_p(\Omega_T, x_d^\gamma\, dz)} =0. \end{equation} For each $k \in \mathbb{N}$, because $f_k$ has compact support, we see that \[ x_d^{1-\alpha} \mu(x_d) f_k \sim x_d f_k \in L_p(\Omega_T, x_d^{\gamma}\, dz). \] Then, as in the proof of Lemma \ref{l-p-sol-lem}, we apply \cite[Theorem 2.4]{DPT21} to find a weak solution $u_k \in \cH^1_p(\Omega_T, x_d^{\gamma}\, dz)$ to the divergence form equation \eqref{eq:xd-div} with $f_k$ in place of $f$. Moreover, \begin{equation} \label{uk-Hp-est}
\|Du_k\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T, x_d^\gamma\, dz)} < \infty. \end{equation} We claim that $u_k \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)$ for each $k \in \mathbb{N}$. Note that if the claim holds, we can apply the a priori estimate that we just proved for the equations of $u_k$ and of $u_k - u_l$ to get \[ \begin{split}
& \|u_k\|_{\sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)} \leq N \|f_k\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} \quad \text{and} \\
& \|u_k - u_l\|_{\sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)} \leq N \|f_k - f_l\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} \end{split} \] for any $k, l \in \mathbb{N}$, where $N = N(\nu, \gamma, \alpha, p)>0$ which is independent of $k, l$. The last estimate and \eqref{fk-approx} imply that the sequence $\{u_k\}_k$ is convergent in $\sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)$. Let $u \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)$ be the limit of such sequence, we see that $u$ solves \eqref{eq:xd}.
Hence, in this case, it remains to prove the claim that $u_k \in \sW^{1,2}_p(\Omega_T, x_d^{\gamma}\, dz)$ for every $k \in \mathbb{N}$. Also, let us fix $k \in \mathbb{N}$, and let us denote $\Omega_T' = (-\infty, T) \times \mathbb{R}^{d-1}$. Let $0 < r_0 <R_0$ such that \begin{equation} \label{fk-support} \text{supp}(f_k) \subset {\Omega_T'} \times (r_0, R_0). \end{equation} Without loss of generality, we assume that $ r_0 =2$. From \eqref{uk-Hp-est}, it follows directly that \[ \begin{split}
& \|Du_k\|_{L_p(\Omega_T' \times (1, \infty), x_d^{\gamma -p}\, dz)} + \|u_k\|_{L_p(\Omega_T' \times (1, \infty), x_d^{\gamma-2p}\, dz)} \\
& \qquad + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T' \times (1,\infty), x_d^{\gamma-p}\, dz)} <\infty. \end{split} \] Then, we can follow the proof of Lemma \ref{l-p-sol-lem} to show that \[
\|u_k\|_{\sW^{1,2}_{p}(\Omega_T' \times (1,\infty), x_d^{\gamma}\, dz)} <\infty. \] It now remains to prove that $u_k \in \sW^{1,2}_{p}({\Omega_T'\times (0,1)}, x_d^{\gamma}\, dz)$ and \begin{equation} \label{near-est-uk}
\|u_k\|_{\sW^{1,2}_{p}(\Omega_T' \times (0, 1), x_d^{\gamma}\, dz)} < \infty. \end{equation} To this end, because of \eqref{fk-support}, we note that $u_k$ solves the homogeneous equation \begin{equation} \label{uk-ne-zero} \sL_0 u_k =0 \quad \text{in} \quad \Omega_T' \times (0, 2) \end{equation} with the boundary condition $u_k =0$ on $\{x_d =0\}$. Let us denote \[ \begin{split}
& C_r = [-1, 0) \times \big\{ x = (x_1, \ldots, x_d) \times \mathbb{R}^{d}_+ : {\max_{1 \leq i \leq d}|x_i|}<r\big\}, \\ & C_r(t,x) = C_r + (t,x), \quad r >0. \end{split} \] Consider $\alpha \in (0, 1)$. By using Lemmas \ref{caccio}, and \ref{prop:boundary} with a scaling argument and translation, we obtain \begin{equation*} \begin{split}
& \|\mathbf{M}^{-\alpha} u_k\|_{L_\infty(C_{1}(z_0))} + \|Du_k\|_{L_\infty(C_{1}(z_0))} + \|\mathbf{M}^{-\alpha} \partial_t u_k\|_{L_\infty(C_{1}(z_0))} \\
& \quad + \|DD_{x'} u_k\|_{L_\infty(C_{1}(z_0))} \leq N \Big[\|Du_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2} u_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma dz)} \Big] \end{split} \end{equation*} for every $z_0 = (t_0, x_0', 0) \in \Omega_T' \times\{0\}$. Note that $N$ depends on $k$, but is independent of $z_0$. This and the PDE in \eqref{uk-ne-zero} imply that \[ \begin{split}
& \|\mathbf{M}^{-\alpha} u_k\|_{L_\infty(C_{1}(z_0))} + \|Du_k\|_{L_\infty(C_{1}(z_0))} + \|\mathbf{M}^{-\alpha} \partial_t u_k\|_{L_\infty(C_{1}(z_0))} \\
& \quad + \|D^2 u_k\|_{L_\infty(C_{1}(z_0))} \leq N \Big[\|Du_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2} u_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma\, dz)} \Big]. \end{split} \] Then, as $\gamma > -1$, we see that \[ \begin{split}
& \|\mathbf{M}^{-\alpha}u_k\|_{L_p(C_1(z_0), x_d^{\gamma}\, dz)}
+ \|Du_k\|_{L_p(C_1(z_0), x_d^{\gamma}\, dz)}
+ \|\mathbf{M}^{-\alpha}\partial_t u_k\|_{L_p(C_1(z_0), x_d^{\gamma}\, dz)} \\
& + \|\ D^2u_k\|_{L_p(C_1(z_0), x_d^{\gamma}\, dz)} \leq N \Big[\|Du_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2} u_k\|_{L_{p}(C_{2}(z_0), x_d^\gamma\, dz)} \Big]. \end{split} \] Then, with $z_0 = (t_0, x_0', 0)$ and with $\mathcal{I} = ({(\mathbb{Z}+T)} \cap (-\infty, T{]}) \times (2\mathbb{Z})^{d-1}$, we have \[ \begin{split}
\|u_k\|^p_{\sW^{1,2}_p(\Omega_T' \times (0,1))} & = \sum_{(t_0', x_0') \in \mathcal{I} } \|u_k\|^p_{\sW^{1,2}_p(C_1(z_0))} \\
& \leq N \sum_{(t_0', x_0') \in \mathcal{I} }\Big[\|Du_k\|^p_{L_{p}(C_{2}(z_0))} + \|\mathbf{M}^{-\alpha/2} u_k\|^p_{L_{p}(C_{2}(z_0))} \Big] \\
& = N\Big[ \|Du_k\|^p_{L_{p}(\Omega_T, x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2} u_k\|^p_{L_{p}(\Omega_T, x_d^\gamma\, dz)}\Big] <\infty. \end{split} \] Hence, \eqref{near-est-uk} holds.
Now, we consider $\alpha \in [1, 2)$. As $\gamma + p (1-\alpha) >-1$, we see that \[ \begin{split}
& \int_{C_1(z_0)} |x_d^{-\alpha} u_k(z)|^p x_d^\gamma dz = \int_{C_1(z_0)} |x_d^{-1}u_k(z)|^p x_d^{\gamma + p (1-\alpha)} dz \\
& \leq N \|Du_k\|^p_{L_\infty(C_1(z_0))} \\
& \leq N\Big[ \|Du_k\|^p_{L_{p}(C_2(z_0), x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha/2} u_k\|^p_{L_{p}(C_2(z_0), x_d^\gamma\, dz)}\Big]. \end{split} \] Then, by taking the sum of this inequality for $(t_0, x_0') \in \mathcal{I}$, we also obtain \[
\|\mathbf{M}^{-\alpha} u_k\|_{L_p(\Omega_T' \times (0,1), x_d^\gamma\, dz)} \leq N \Big[ \|Du_k\|_{L_p(\Omega_T, x_d^\gamma\, dz)} + \|\mathbf{M}^{-\alpha}u_k\|_{L_p(\Omega_T, x_d^\gamma\, dz)} \Big]. \] Similarly, we also have $\mathbf{M}^{-\alpha} (u_k)_t, Du_k\in L_p(\Omega_T' \times (0,1), x_d^\gamma\,dz)$. By using the different quotient, we also get $DD_{x'} u_k \in L_p(\Omega_T' \times (0,1), x_d^\gamma\,dz)$. From this, and the PDE of $u_k$, we have $D^2u_k \in L_p(\Omega_T' \times (0,1), x_d^\gamma\, dz)$. Therefore, \eqref{near-est-uk} holds. The proof of the claim in this case is completed.
\noindent {\em Case} 2. We consider $\gamma =p-1$. Let $\{f_k\}_k$ be as in \eqref{fk-approx} and let $\bar{\gamma} \in (p(\alpha-1)_+ -1, p-1)$. As in {\em Case 1}, we can find a weak solution $u_k \in \cH^1_p(\Omega_T, x_d^{\bar{\gamma}}\, dz)$ to the divergence form equation \eqref{eq:xd-div} with $f_k$ in place of $f$, and \begin{equation} \label{uk-Hp-est-b}
\|Du_k\|_{L_p(\Omega_T, x_d^{\bar{\gamma}}\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T, x_d^{\bar{\gamma}}\, dz)} < \infty. \end{equation} We claim that for each $k \in \mathbb{N}$, \begin{equation} \label{uk-Hp-est-b-1}
\|Du_k\|_{L_p(\Omega_T, x_d^{\gamma}\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T, x_d^\gamma\, dz)} < \infty. \end{equation} Once this claim is proved, we can follow the proof in {\em Case 1} to obtain the existence of a solution $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma\, dz)$. Therefore, we only need to prove \eqref{uk-Hp-est-b-1}.
Let us fix $k \in \mathbb{N}$ and let $0 < r_0 < R_0$ such that \eqref{fk-support} holds. As $\bar{\gamma} < \gamma$, we see that \[ \begin{split}
& \|Du_k\|_{L_p(\Omega_T' \times (0, 2R_0), x_d^{\gamma}\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T' \times (0, 2R_0), x_d^\gamma\, dz)} \\
& \leq N\Big[ \|Du_k\|_{L_p(\Omega_T' \times (0, 2R_0), x_d^{\bar\gamma}\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T' \times (0, 2R_0), x_d^{\bar\gamma}\, dz)}\Big] <\infty \end{split} \] due to \eqref{uk-Hp-est-b}. Hence, it remains to prove \begin{equation} \label{uk-Hp-est-b-2}
\|Du_k\|_{L_p(\Omega_T' \times (2R_0, \infty), x_d^{\gamma}\, dz)} + \|\mathbf{M}^{-\alpha/2}u_k\|_{L_p(\Omega_T' \times (2R_0, \infty), x_d^\gamma\, dz)} < \infty. \end{equation} To prove \eqref{uk-Hp-est-b-2}, we use the localization technique along the $x_d$ variable. See \cite[Proof of Theorem 4.5, Case II]{DP-JFA}. We skip the details. \end{proof}
\section{Equations with partially VMO coefficients} \label{sec:4} We study \eqref{eq:main} in this section. Precisely, we consider the equation \begin{equation}\label{eq:main-1} \begin{cases} \sL u=\mu(x_d) f \quad &\text{ in } \Omega_T,\\ u=0 \quad &\text{ on } (-\infty, T) \times \partial \mathbb{R}^d_+, \end{cases} \end{equation} where $\sL$ is defined in \eqref{L-def} in which the coefficients $a_0$, $c_0$, and $a_{ij}$ are measurable functions depending on $z = (z', x_d) \in \Omega_T$. We employ the perturbation method by freezing the coefficients. For $z_0 = (z'_0, x_{0d}) \in \overline{\Omega}_T$, let $[{a}_{ij}]_{Q_{\rho}'(z'_0)}, [a_{0}]_{Q_{\rho}'(z'_0)}$, and $[c_{0}]_{Q_{\rho}'(z'_0)}$ be functions defined in Assumption \ref{assumption:osc} $(\delta, \gamma_1, \rho_0)$, and we denote \begin{equation} \label{a-sharp-def} \begin{split}
a^{\#}_{\rho_0}(z_0) & =\sup_{\rho\in(0,\rho_0)}\left[ \max_{i,j=1, 2,\ldots, d}\fint_{Q_{\rho}^+(z_0)}|a_{ij}(z) -[{a}_{ij}]_{Q_{\rho}'(z'_0)}(x_d)| \mu_1(dz) \right. \\
& \qquad + \fint_{Q_{\rho}^+(z_0)}|a_{0}(z) -[{a}_{0}]_{Q_{\rho}'(z'_0)}(x_d)| \mu_1(dz) \\
& \qquad \left. + \fint_{Q_{\rho}^+(z_0)}|c_{0}(z) -[{c}_{0}]_{Q_{\rho}'(z'_0)}(x_d)| \mu_1(dz) \right]. \end{split} \end{equation} For the reader's convenience, recall that $\mu_1, \bar{\mu}_1$ are defined in \eqref{mu-1-def}. We also recall that for a given $u$, we denote \[ U = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda \mathbf{M}^{-\beta_0}u). \] We also denote \[ U' = (\mathbf{M}^{-\beta_0} u_t, \mathbf{M}^{\alpha-\beta_0}DD_{x'} u, \lambda^{1/2} \mathbf{M}^{\alpha/2-\beta_0}Du, \lambda \mathbf{M}^{-\beta_0}u). \] We begin with the following oscillation estimates for solutions to \eqref{eq:main-1} that have small supports in the time-variable. \begin{lemma} \label{osc-est-small} Let $\nu, \rho_0 \in (0,1)$, $p_0 \in (1, \infty)$, $\alpha \in (0,2)$, $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$, $\gamma_1 \in (p_0(\beta_0-\alpha +1) -1, p_0(\beta_0-\alpha +2)-1)$, and $\gamma_1' = \gamma_1-p_0(\beta_0-\alpha)\in (p_0-1,2p_0-1)$. Assume that $u \in \sW^{1,2}_{p}(Q_{6\rho}^+(z_0), x_d^{\gamma_1'}dz)$ is a strong solution of \[ \left\{ \begin{array}{cccl} \sL u & = & \mu(x_d) f & \quad \text{in} \quad Q_{6\rho}^+(z_0),\\ u & = & 0 & \quad \text{on} \quad Q_{6\rho}(z_0) \cap \{x_d =0\} \end{array} \right. \] for $f \in L_{p_0}(Q_{6\rho}^+(z_0), x_d^{{\gamma_1'}}dz)$. Assume in addition that $\textup{supp}(u) \subset (t_1 -(\rho_0 \rho_1)^{2-\alpha}, t_1 +(\rho_0 \rho_1)^{2-\alpha})$ for some $t_1 \in \mathbb{R}$ and $\rho_0 >0$. Then, \begin{align} \notag
&\big (|U - (U)_{Q_{\kappa\rho}^+(z_0), \mu_1}|\big)_{Q_{\kappa\rho}^+(z_0), \mu_1} \\ \notag
& \leq N \Big[\kappa^{\theta} + \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} \big( a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] (|U|^{p})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p} \\ \label{U-osc-gen}
& \qquad + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0}, \end{align} where $\theta>0$ is defined in Lemma \ref{oscil-lemma-1}, $p\in (p_0,\infty)$, and $N = N(p, p_0, \gamma_1, \alpha, \beta_0, d, \nu)>0$. In addition, if $\beta_0 \in {(\alpha-1}, \alpha/2]$, we also have \begin{align} \notag
&\big (|U' - (U')_{Q_{\kappa\rho}^+(z_0), \mu_1}|\big)_{Q_{\kappa\rho}^+(z_0), \mu_1} \\ \notag
& \leq N \Big[\kappa^{\theta} + \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} \big( a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] (|U'|^{p})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p} \\ \label{Uall-osc-gen}
& \qquad + N \kappa^{-(d + (\gamma_1)_+ +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0}. \end{align}
\end{lemma} \begin{proof} We split the proof into two cases.
\noindent {\em Case 1.} We consider $\rho < \rho_0/14$. We denote \[ \sL_{\rho, z_0} u =[a_0]_{Q_{6\rho}'(z_0')}(x_d) u_t + \lambda [c_0]_{Q_{6\rho}'(z_0')}(x_d)u - \mu(x_d) [a_{ij}]_{Q_{6\rho}'(z_0')}(x_d) D_i D_j u \] and \[ \begin{split} \tilde{f}(z) & = f(z) + [a_{ij} - [a_{ij}]_{Q_{6\rho}'(z_0')}(x_d)] D_i D_ju \\ & \qquad + \big[ \lambda ([c_0]_{Q_{6\rho}'(z_0')} - c_0) u + ([a_0]_{Q_{6\rho}'(z_0')} - a_0) u_t \big]/\mu(x_d). \end{split} \] Then, $u \in \sW^{1,2}_{p}(Q_{6\rho}^+(z_0), x_d^{\gamma_1'}dz)$ is a strong solution of \[ \left\{ \begin{array}{cccl} \sL_{\rho, z_0} u & = &\mu(x_d) \tilde{f} & \quad \text{in} \quad Q_{6\rho}^+(z_0)\\ u & = & 0 & \quad \text{on} \quad Q_{6\rho}^+(z_0) \cap \{x_d =0\}. \end{array} \right. \] We note that due to \eqref{add-assumption}, the term $a_{dd} - \bar{a}_{dd} =0$. Therefore, by using H\"{o}lder's inequality and \eqref{con:ellipticity}, we obtain \[ \begin{split}
& \left(\fint_{Q_{14\rho}^+(z_0)}|\mathbf{M}^{\alpha-\beta_0} \big(a_{ij} - [a_{ij}]_{Q_{6\rho}'(z_0')}(x_d)\big) D_i D_ju|^{p_0} \mu_1(dz) \right)^{1/p_0} \\
& \leq \left(\fint_{Q_{14\rho}^+(z_0)}|a_{ij} - [a_{ij}]_{Q_{6\rho}'(z_0')}(x_d)|^{pp_0/(p-p_0)} \mu_1(dz) \right)^{\frac{1}{p_0} -\frac{1}{p}} \\
& \qquad \times \left(\fint_{Q_{14\rho}^+(z_0)}|\mathbf{M}^{\alpha-\beta_0} DD_{x'}u|^{p} \mu_1(dz)\right)^{1/p} \\
& \leq N a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p}} \left(\fint_{Q_{14\rho}^+(z_0)}|\mathbf{M}^{\alpha-\beta_0} DD_{x'}u|^{p} \mu_1(dz)\right)^{1/p}.
\end{split} \] By a similar calculation using \eqref{con:mu}, we also obtain the estimate for the term $\big[ \lambda ([c_0]_{Q_{6\rho}'(z_0')}(x_d) - c_0) u + ([a_{0}]_{Q_{6\rho}'(z_0')}(x_d) - a_0) u_t \big]/\mu(x_d)$. Thus, \[ \begin{split}
(|\mathbf{M}^{\alpha-\beta_0} \tilde{f}|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} & \leq (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} \\
& \qquad + N a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p}} (|U|^{p})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p}.
\end{split} \] Then, applying Lemma \ref{oscil-lemma-2}, we obtain \[ \begin{split}
& (|U - (U)_{Q_{\kappa \rho}^+(z_0), \mu_1}|)_{Q_{\kappa \rho}^+(z_0), \mu_1}\\
&\leq N \kappa^{\theta} (|U|^{p_0})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p_0} + N \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} \tilde{f}|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0} \\
& \leq N \big(\kappa^{\theta} + \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p}} \big) (|U|^{p})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p} \\
& \qquad + N \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} (|\mathbf{M}^{\alpha-\beta_0} f|^{p_0})_{Q_{14\rho}^+(z_0), \mu_1}^{1/p_0}. \end{split} \] Therefore, \eqref{U-osc-gen} holds. In a similar way but applying Remark \ref{all-oscilla-est}, we also obtain \eqref{Uall-osc-gen}.
\noindent {\em Case 2.} Consider $\rho \geq \rho_0/14$. Denoting $\Gamma = (t_1 -(\rho_0 \rho_1)^{2-\alpha}, t_1 + (\rho_0 \rho_1)^{2-\alpha})$, we apply \eqref{Q-compared} and the triangle inequality to infer that \[ \begin{split}
& \fint_{Q_{\kappa\rho}^+(z_0)} |U - (U)_{Q_{\kappa\rho}^+(z_0), \mu_1}|
\mu_1(dz)\leq 2 \fint_{Q_{\kappa\rho}^+(z_0)} |U(z)|\mu_1(dz) \\
& \leq N \kappa^{-(d+2 -\alpha + (\gamma_1)_+)} \left(\fint_{Q_{14\rho}^+(z_0)} |U(z)|^{p_0}\mu_1(dz)\right)^{\frac 1 {p_0}} \left(\fint_{Q_{14\rho}^+(z_0)} \mathbf{1}_{\Gamma}(z) \mu_1(dz)\right)^{1-\frac 1 {p_0}} \\
& \leq N \kappa^{-(d+2 -\alpha + (\gamma_1)_+)} \rho_1^{(2-\alpha)(1-1/p_0)}\left(\fint_{Q_{14\rho}^+(z_0)} |U(z)|^{p_0}\mu_0(dz)\right)^{1/p_0} \\
& \leq N \kappa^{-(d+2 -\alpha + (\gamma_1)_+)} \rho_1^{(2-\alpha)(1-1/p_0)}(|U|^{p})_{Q_{14 \rho}^+(z_0), \mu_1}^{1/p} . \end{split} \] Therefore, \eqref{U-osc-gen} follows. Similarly, \eqref{Uall-osc-gen} can be proved. \end{proof} Our next lemma gives the a priori estimates of solutions having small supports in $t$. \begin{lemma}[Estimates of solutions having small supports] \label{small-support-sol} Let $T \in (-\infty, \infty]$, $\nu \in (0,1)$, $p, q, K \in (1, \infty)$, $\alpha \in (0, 2)$, and $\gamma_1 \in (\beta_0 -\alpha, \beta_0 -\alpha +1]$ for $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$. Then, there exist sufficiently small positive numbers $\delta$ and $\rho_1$, depending on $d, \nu, p, q, K, \alpha{,\beta_0}$, and $\gamma_1$, such that the following assertion holds. Suppose that $\omega_0 \in A_q(\mathbb{R})$, $\omega_1 \in A_p(\mathbb{R}^d_+, \mu_1)$ with \[ [\omega_0]_{A_q(\mathbb{R})} \leq K \qquad \text{and} \qquad [\omega_1]_{A_p(\mathbb{R}^d_+, \mu_1)} \leq K. \] Suppose that \eqref{con:mu}, \eqref{con:ellipticity}, and \eqref{add-assumption} hold, and \textup{Assumption \ref{assumption:osc}}$(\delta, \gamma_1, \rho_0)$ holds with some $\rho_0>0$. If $u \in \sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$ is a strong solution to \eqref{eq:main} with some $\lambda>0$ and a function $f\in L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$, and $u$ vanishes outside $(t_1 - (\rho_0\rho_1)^{2-\alpha}, t_1+(\rho_0\rho_1)^{2-\alpha})$ for some $t_1 \in \mathbb{R}$, then \begin{equation} \label{est-1-small-supp}
\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \|f\|_{L_{q,p}}, \end{equation} where $N = N(d,\nu, p, q, \alpha,{ \beta_0,}\gamma_1, K)>0$, $L_{q,p}=L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$, $\omega(t,x) =\omega_0(t)\omega_1(x)$ for $(t,x) \in \Omega_T$, and $\mu_1(dz) = x_d^{\gamma_1}\, dxdt$. Moreover, if $\beta_0 \in [0, \alpha/2]$, then it also holds that \begin{equation} \label{est-2-small-supp} \begin{split}
& \|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda^{1/2} \|\mathbf{M}^{-\alpha/2} Du\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \\
& \leq N \|f\|_{L_{q,p}}. \end{split} \end{equation} \end{lemma} \begin{proof} As $\omega_0 \in A_q((-\infty,T))$ and $\omega_1 \in A_p(\mathbb{R}^d_+, d\mu_1)$, by the reverse H\"older's inequality \cite[Theorem 3.2]{MS1981}, we find $p_1=p_1(d,p,q,\gamma_1,K)\in (1,\min(p,q))$ such that \begin{equation} \label{eq0605_13} \omega_0 \in A_{q/p_1}((-\infty,T)),\quad \omega_1 \in A_{p/p_1}(\mathbb{R}^d_+, \mu_1). \end{equation} Because $\gamma_1 \in (\beta_0 -\alpha, \beta_0 -\alpha +1]$, we can choose $p_0 \in (1, p_1)$ sufficiently closed to $1$ so that \[ \gamma_1 \in (p_0(\beta_0-\alpha +1) -1, p_0(\beta_0-\alpha +2) -1). \] By \eqref{U-osc-gen} of Lemma \ref{osc-est-small} and H\"{o}lder's inequality, we have \[ \begin{split}
U^{\#}_{\mu_1} \leq & N \Big[\kappa^{\theta} + \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \big(a_{\rho_0}^{\#}(z_0)^{\frac{1}{p_0} - \frac{1}{p_1}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] \cM_{\mu_1}(|U|^{p_1})^{1/p_1} \\
& \qquad + N \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \cM_{\mu_1}(|\mathbf{M}^{\alpha-\beta_0} f|^{p_1})^{1/p_1}
\quad \text{in} \quad \overline{\Omega_T} \end{split} \]
for any $\kappa\in (0,1)$, where $N = N(\nu, d, p_0, p_1, \alpha,\beta_0, \gamma_1) >0$ and $a_{\rho_0}^{\#}$ is defined in \eqref{a-sharp-def}. Therefore, it follows from Theorem \ref{FS-thm} and \eqref{eq0605_13} that \[ \begin{split} & \norm{U}_{L_{q,p}(\Omega_T, \omega\, d\mu_1)}\\ & \leq N \Big[\kappa^{\theta} + \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \big(\delta^{\frac{1}{p_0} - \frac{1}{p_1}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] \times \\
& \qquad \qquad \times \|\cM_{\mu_1}(|U|^{p_1})^{1/p_1}\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)} \\
& \qquad + N \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \| \cM_{\mu_1} (|\mathbf{M}^{\alpha -\beta_0} f|^{p_1})^{\frac 1 {p_1}}\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)} \\
& \leq N \Big[\kappa^{\theta} + \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \big(\delta^{\frac{1}{p_0} - \frac{1}{p_1}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] \|U\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)} \\
& \qquad + N \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \|\mathbf{M}^{\alpha-\beta_0} f\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)}, \end{split} \] where $N = N(d,\nu, p, q, \alpha,\beta_0, \gamma_1, K)>0$. Now, by choosing $\kappa$ sufficiently small and then $\delta$ and $\rho_1$ sufficiently small depending on $d,\nu, p, q,\alpha, \gamma_1$, and $K$ such that \[ N\Big [\kappa^{\theta} + \kappa^{-(d + \gamma_1 +2-\alpha)/p_0} \big(\delta^{\frac{1}{p_0} - \frac{1}{p_1}} + \rho_1^{(2-\alpha)(1-1/p_0)}\big) \Big] <1/2, \] we obtain \[ \begin{split}
& \norm{U}_{L_{q,p}(\Omega_T, \omega\, d\mu_1)} \leq N(d, \nu, p, q, \alpha,\beta_0 \gamma_0, K) \|\mathbf{M}^{\alpha -\beta_0} f\|_{L_{q,p}(\Omega_T, \omega\, d\mu_1)}. \end{split} \] From this and the PDE in \eqref{eq:main}, we obtain \[
\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \| f\|_{L_{q,p}}. \] This proves \eqref{est-1-small-supp}. The proof of \eqref{est-2-small-supp} is similar by applying \eqref{Uall-osc-gen} instead of \eqref{U-osc-gen}. \end{proof}
Below, we provide the proof of Theorem \ref{main-thrm}.
\begin{lemma}[A priori estimates of solutions] \label{apriori-est-lemma} Let $T \in (-\infty, \infty]$, $ \nu \in (0,1)$, $p, q, K \in (1, \infty)$, $\alpha \in (0, 2)$, and $\gamma_1 \in (\beta_0 -\alpha, \beta_0 -\alpha +1]$ for $\beta_0 \in {(\alpha-1}, \min\{1, \alpha\}]$. Then, there exist $\delta = \delta(d, \nu, p, q, K, \alpha, \beta_0,\gamma_1)>0$ sufficiently small and $\lambda_0 = \lambda_0(d, \nu, p, q, K, \alpha,\beta_0, \gamma_1)>0$ sufficiently large such that the following assertions hold. Let $\omega_0 \in A_q(\mathbb{R})$, $\omega_1 \in A_p(\mathbb{R}^d_+, \mu_1)$ satisfy \[ [\omega_0]_{A_q(\mathbb{R})} \leq K \qquad \text{and} \qquad [\omega_1]_{A_p(\mathbb{R}^d_+, \mu_1)} \leq K. \] Suppose that \eqref{con:mu}, \eqref{con:ellipticity}, and \eqref{add-assumption} hold, and suppose that \textup{Assumption \ref{assumption:osc}}$ (\delta, \gamma_1, \rho_0)$ holds with some $\rho_0>0$. If $u \in \sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)}\omega\,d\mu_1)$ is a strong solution to \eqref{eq:main} with some $\lambda{\ge \lambda_0\rho_0^{\alpha-2}}$ and $f \in L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)}\omega\, d\mu_1)$, then \begin{equation} \label{main-est-1-b}
\|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \|f\|_{L_{q,p}}, \end{equation} where $\omega(t, x) = \omega_0(t) \omega_1(x)$ for $(t,x) \in \Omega_T$, $L_{q,p} = L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)}\omega \,d\mu_1)$, and $N = N(d, \nu, p, q, \alpha,{\beta_0,K,} \gamma_1)>0$. Moreover, if $\beta_0 \in {(\alpha-1}, \alpha/2]$, then it also holds that \begin{equation} \label{main-est-2-b} \begin{split}
& \|\mathbf{M}^{-\alpha} u_t\|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda^{1/2} \|\mathbf{M}^{-\alpha/2} Du\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \\
& \leq N \|f\|_{L_{q,p}}. \end{split} \end{equation} \end{lemma} \begin{proof} Let $\delta, \rho_1$ be positive numbers in Lemma \ref{small-support-sol}, and let $\lambda_0>$ be a number sufficiently large to be determined, depending on $d, p, q, \alpha,{\beta_0,\nu,} \gamma_1, K$. As the proof of \eqref{main-est-1-b} and of \eqref{main-est-2-b} are similar, we only prove the a priori estimate \eqref{main-est-1-b}. We use a partition of unity argument in the time variable. Let $\delta>0$ and $\rho_1>0$ be as in Lemma \ref{small-support-sol} and let $$ \xi=\xi(t) \in C_0^\infty( -(\rho_0\rho_1)^{2-\alpha}, (\rho_0\rho_1)^{2-\alpha}) $$ be a non-negative cut-off function satisfying \begin{equation} \label{xi-0702}
\int_{\mathbb{R}} \xi(s)^q\, ds =1 \qquad \text{and} \qquad \int_{\mathbb{R}}|\xi'(s)|^q\,ds \leq \frac{N}{(\rho_0\rho_1)^{q(2-\alpha)}}. \end{equation} For fixed $s \in (-\infty, \infty)$, let $u^{(s)}(z) = u(z) \xi(t-s)$ for $z = (t, x) \in \Omega_T$. We see that $u^{(s)} \in \sW^{1,2}_p(\Omega_T,x_d^{p(\alpha-\beta_0)}\omega\, d\mu_1)$ is a strong solution of \[ \sL u^{(s)}(z) =\mu(x_d) f^{(s)} (z) \quad \text{in} \quad \Omega_T \] with the boundary condition $u^{(s)} =0$ on $\{x_d =0\}$, where \[ f^{(s)}(z) = \xi(t-s) f(z) + \xi'(t-s) u(z)/\mu(x_d). \] As $\text{spt}(u^{(s)}) \subset (s -(\rho_0\rho_1)^{2-\alpha}, s+ (\rho_0\rho_1)^{2-\alpha}) \times \mathbb{R}^{d}_{+}$, we apply Lemma \ref{small-support-sol} to get \[ \begin{split}
\|\mathbf{M}^{-\alpha} \partial_tu^{(s)}\|_{L_{q,p}} + \|D^2u^{(s)}\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u^{(s)}\|_{L_{q,p}} \leq N \|f^{(s)}\|_{L_{q,p}}. \end{split} \] Then, by integrating the $q$-th power of this estimate with respect to $s$, we get \begin{align}\notag
& \int_{\mathbb{R}}\Big(|\mathbf{M}^{-\alpha} \partial_tu^{(s)}\|_{L_{q,p}}^q + \|D^2u^{(s)}\|_{L_{q,p}}^q + \lambda^q \|\mathbf{M}^{-\alpha} u^{(s)}\|_{L_{q,p}}^q\Big)\, ds\\ \label{par-int-0515}
& \leq N\int_{\mathbb{R}} \|f^{(s)}\|_{L_{q,p}}^q\, ds. \end{align} Now, by the Fubini theorem and \eqref{xi-0702}, it follows that \[ \begin{split}
& \int_{\mathbb{R}}\|\mathbf{M}^{-\alpha} \partial_tu^{(s)}\|_{L_{q,p}}^q\, ds\\
& = \int_{\mathbb{R}} \left(\int_{-\infty}^T \|\mathbf{M}^{-\alpha}u_t(t,\cdot)\|_{L_p(\mathbb{R}^d_+, x_d^{p(\alpha-\beta_0)} \omega_1\, d\mu_1)}^q \omega_0(t) \xi^q(t-s)\, dt \right)\, ds \\
&= \int_{-\infty}^T \left( \int_{\mathbb{R}}\xi^q(t-s)\, ds \right) \|\mathbf{M}^{-\alpha}u_t(t,\cdot)\|_{L_p(\mathbb{R}^d_+,x_d^{p(\alpha-\beta_0)} \omega_1\, d\mu_1)}^q \omega_0(t)\, dt \\
& = \|\mathbf{M}^{-\alpha}u_t \|_{L_{q,p}(\mathbb{R}^d_+,x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)}^q, \end{split} \] and similarly \[ \begin{split}
& \int_{\mathbb{R}} \| D^2u^{(s)}\|_{L_{q,p}}^q\, ds = \|\mathbf{M}^{\alpha-\beta_0} D^2u\|_{L_{q,p}}^q, \\
& \int_{\mathbb{R}} \|\mathbf{M}^{-\alpha} u^{(s)}\|_{L_{q,p}}^q\, ds = \|\mathbf{M}^{-\beta_0} u\|_{L_{q,p}}^q . \end{split} \] Moreover, \[
\int_{\mathbb{R}} \|f^{(s)}\|_{L_{q,p}}^q\, ds \leq \|f\|_{L_{q,p}}^q + \frac{N}{(\rho_0\rho_1)^{q(2-\alpha)}} \|\mathbf{M}^{-\alpha}u\|_{L_{q,p}}^q, \] where \eqref{xi-0702} is used and $N = N(q)>0$. As $\rho_1$ depends on $d, \nu, p, q, K, \alpha{,\beta_0,\gamma_1}$, by combining the estimates we just derived, we infer from \eqref{par-int-0515} that \[ \begin{split}
& \|\mathbf{M}^{-\alpha} \partial_tu \|_{L_{q,p}} + \| D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N\Big(\|f\|_{L_{q,p}} + \rho_0 ^{\alpha-2} \|\mathbf{M}^{-\alpha}u\|_{L_{q,p}} \Big) \end{split} \] with $N=N(d, \nu, \alpha, p, q, \gamma_1) >0$. Now we choose $\lambda_0 = 2N$. Then, with $\lambda \geq \lambda_0 \rho_0^{\alpha-2}$, we have \[ \begin{split}
& \|\mathbf{M}^{-\alpha} \partial_tu \|_{L_{q,p}} + \|D^2u\|_{L_{q,p}} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_{q,p}} \leq N \| f\|_{L_{q,p}} . \end{split} \] This estimate yields \eqref{main-est-1-b}. \end{proof}
Now, we have all ingredients to complete the proof of Theorem \ref{main-thrm}. \begin{proof}[Proof of Theorem \ref{main-thrm}] The a priori estimates \eqref{main-est-1} and \eqref{main-est-2} follow from Lemma \ref{apriori-est-lemma}. Hence, it remains to prove the existence of solutions. We employ the the technique introduced in \cite[Section 8]{Dong-Kim-18}. See also \cite[Proof of Theorem 2.3]{DP-JFA}. The proof is split into two steps, and we only outline the key ideas in each step.
\noindent {\em Step 1.} We consider the case $p =q$, $\omega_0 \equiv 1$, and $\omega_1 \equiv 1$. We employ the method of continuity. Consider the operator \[ \sL_\tau = (1-\tau)\big(\partial_t + \lambda - \mu(x_d) \Delta\big) + \tau \sL, \qquad \tau \in [0, 1]. \] It is a simple calculation to check that the assumptions in Theorem \ref{main-thrm} are satisfied uniformly with respect to $\tau \in [0,1]$. Then, using the solvability in Theorem \ref{thm:xd} and the a priori estimates obtained in Lemma \ref{apriori-est-lemma}, we get the existence of a solution $u \in \sW^{1,2}_p(\Omega_T, x_d^{p(\alpha-\beta_0)}\, d\mu_1)$ to \eqref{eq:main} when $\lambda \geq \lambda_0 \rho_0^{\alpha-2}$, where $\lambda_0$ is the constant in Lemma \ref{apriori-est-lemma}.
\noindent {\em Step 2.} We combine {\em Step 1} and Lemma \ref{apriori-est-lemma} to prove the existence of {a strong} solution $u$ satisfying \eqref{main-est-1}. Let $p_1 > \max\{p,q\}$ be sufficiently large and let $\varepsilon_1, \varepsilon_2 \in (0,1)$ be sufficiently small depending on $K, p, q$, and $\gamma_1$ such that \begin{equation} \label{epsilon12-def} 1-\frac{p}{p_1} = \frac{1}{1+\varepsilon_1} \qquad \text{and} \qquad 1 - \frac{q}{p_1} = \frac{1}{1+\varepsilon_2}, \end{equation} and both $\omega_1^{1+\varepsilon_1}$ and $\omega_0^{1+\varepsilon_2}$ are locally integrable and satisfy the doubling property. Specifically, there is $N_0>0$ such that \begin{equation} \label{omega-0} \int_{\Gamma_{2r}(t_0)} \omega_0^{1+\varepsilon_2}(s)\, ds \leq N_0 \int_{\Gamma_{r}(t_0)} \omega_0^{1+\varepsilon_2}(s)\, ds \end{equation} for any $r>0$ and $t_0 \in \mathbb{R}$, where $\Gamma_{r}(t_0) = (t_0 -r^{2-\alpha}, \min\{t_0 + r^{2-\alpha}, T\})$. Similarly \begin{equation} \label{omega-1-0308} \int_{B_{2r}^+(x_0)} \omega_1^{1+\varepsilon_1}(x)\, d\mu_1 \leq N_0\int_{B_{r}^+(x_0)} \omega_1^{1+\varepsilon_1}(x)\, d\mu_1 \end{equation} for any $r >0$ and $x_0 \in \overline{\mathbb{R}^d_+}$.
Next, let $\{f_k\}$ be a sequence in $C_0^\infty(\Omega_T)$ such that \begin{equation} \label{f-k-converge-0227}
\lim_{k\rightarrow \infty} \|f_k - f\|_{L_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)}\omega\, d\mu_1)} =0. \end{equation} By {\em Step 1}, for each $k \in \mathbb{N}$, we can find a solution $u_k \in \sW^{1,2}_{p_1}(\Omega_T,x_d^{p_1(\alpha-\beta_0)}\, d\mu_1)$ of \eqref{eq:main} with $f_k$ in place of $f$, where $\lambda \geq \lambda_0 \rho_0^{\alpha-2}$ for $\lambda_0 = \lambda_0(d, \nu, p_1, p_1,\alpha,\beta_0, \gamma_1, K)>0$. Observe that if the sequence $\{u_k\}$ is in $\sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega \, d\mu_1)$, then by applying the a priori estimates in Lemma \ref{apriori-est-lemma}, \eqref{f-k-converge-0227}, and the linearity of the equation \eqref{eq:main}, we conclude that $\{u_k\}$ is Cauchy in $\sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$. Let $u \in \sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$ be the limit of the sequence $\{u_k\}$. Then, by letting $k \rightarrow \infty$ in the equation for $u_k$, we see that $u$ solves \eqref{eq:main}.
It remains to prove that for each fixed $k \in \mathbb{N}$, $u_k \in \sW^{1,2}_{q,p}(\Omega_T, x_d^{p(\alpha-\beta_0)} \omega\, d\mu_1)$. To this end, let us denote \[ D_{R} = (-R^{2-\alpha}, \min\{R^{2-\alpha}, T\}) \times B_R^+.\] Then, let $R_0>0$ be sufficiently large such that \begin{equation} \label{fk-spt} \operatorname{supp}(f_k) \subset D_{R_0}. \end{equation}
We note that $R_0$ depends on $k$. It follows from \eqref{epsilon12-def}, \eqref{omega-0}, \eqref{omega-1-0308}, and H\"{o}lder's inequality that \[ \begin{split}
& \|u_k\|_{\sW^{1,2}_{q,p}(D_{2R_0},x_d^{p(\alpha-\beta_0)} \omega d\mu_1)} \\
& \leq N(d, p, q, p_1, \alpha, \gamma_1, R_0) \|u_k\|_{\sW^{1,2}_{p_1}(D_{2R_0}, x_d^{p_1(\alpha-\beta_0)} d\mu_1)} <\infty. \end{split} \] Hence, we only need to prove \[
\|u_k\|_{\sW^{1,2}_{q,p}(\Omega_T\setminus D_{R_0},x_d^{p(\alpha-\beta_0)} \omega d\mu_1)} <\infty. \] This is done by the localization technique employing \eqref{epsilon12-def}, \eqref{omega-0}, \eqref{omega-1-0308}, \eqref{fk-spt}, and H\"{o}lder's inequality, using the fast decay property of solutions when the right-hand side is compactly supported.
We skip the details as the calculation is very similar to that of \cite[Section 8]{Dong-Kim-18}, and also of \cite[Step II - Proof of Theorem 2.3]{DP-JFA}. The proof of Theorem \ref{main-thrm} is completed. \end{proof} Next, we prove Corollary \ref{cor1}. \begin{proof}[Proof of Corollary \ref{cor1}] It is sufficient to show that we can make the choices for $\gamma_1, \beta_0$, and $\omega_1$ to apply Theorem \ref{main-thrm} to obtain \eqref{cor-est-1} and \eqref{cor-est-2}. Indeed, the choices are similar to those in the proof of Theorem \ref{thm:xd}. To obtain \eqref{cor-est-1}, we take $\beta_0 = \min\{1, \alpha\}$, and with this choice of $\beta_0$, we have \[ \alpha - \beta_0 = (\alpha-1)_+ \quad \text{and} \quad (\beta_0 -\alpha, \beta_0-\alpha +1] = (-(\alpha -1)_+, 1- (\alpha-1)_+]. \] Then, let $\gamma_1 = 1- (\alpha-1)_+$ and $\gamma' = \gamma - [\gamma_1 + p(\alpha-1)_+]$. From the choice of $\gamma_1$ and the condition on $\gamma$, we see that \begin{equation} \label{cond-gamma-1}
-1-\gamma_1 < \gamma' < (1+\gamma_1) (p-1). \end{equation} Now, let $ \omega_1(x) = x_d^{\gamma'}$ for $x \in \mathbb{R}^d_+$. It follows from \eqref{cond-gamma-1} that $\omega_1 \in A_p(\mu_1)$. As Assumption $(\rho_0, \gamma_1, \delta)$ holds, we can apply \eqref{main-est-1} to obtain \eqref{cor-est-1}.
Next, we prove \eqref{cor-est-2}. In this case, we choose $\beta_0 = \alpha/2$, $\gamma_1 = 1-\alpha/2$, and \begin{equation} \label{cond-gamma-2}
\gamma' =\gamma - [\gamma_1 + p\alpha/2]. \end{equation} We use the fact that $\gamma \in (p\alpha/2-1, 2p-1)$ and \eqref{cond-gamma-2} to get \eqref{cond-gamma-1}. As Assumption $(\rho_0, 1-\alpha/2, \delta)$ holds, by taking $\omega_1(x) = x_d^{\gamma'}$, we obtain \eqref{cor-est-2} from \eqref{main-est-2}. The proof is complete. \end{proof}
\section{Degenerate viscous Hamilton-Jacobi equations}\label{sec:5} To demonstrate an application of the results in our paper, we consider the following degenerate viscous Hamilton-Jacobi equation \begin{equation}\label{eq:nonlinear} \begin{cases} u_t+\lambda u-\mu(x_d) \Delta u=H(z,Du) \quad &\text{ in } \Omega_T,\\ u=0 \quad &\text{ on } (-\infty, T) \times \partial \mathbb{R}^d_+, \end{cases} \end{equation} where $\mu$ satisfies \eqref{con:mu} and $H:\Omega_T \times \mathbb{R}^d \to \mathbb{R}$ is a given Hamiltonian. We assume that there exist $\beta, \ell >0$, and $h:\Omega_T \to {\overline{\mathbb{R}_+}}$ such that, for all $(z,P) \in \Omega_T \times \mathbb{R}^d$, \begin{equation} \label{G-cond}
|H(z,P)| \leq{ \nu^{-1} (\min\{x_d^\beta,1\} |P|^{\ell}+x_d^\alpha h(z))}. \end{equation}
The following is the main result in this section. \begin{theorem} \label{example-thrm} Let $p \in (1, \infty)$, $\alpha \in (0,2)$, and $\gamma \in (p(\alpha-1)_+-1, 2p-1)$. Assume that \eqref{G-cond} holds with $\ell =1$, $\beta \geq 1$, and $h \in L_p(\Omega_T, x_d^\gamma\, dz)$. Then, there exists $\lambda_0 = \lambda_0(d, p, \alpha, \beta, \gamma)>0$ sufficiently large such that the following assertion holds. For any $\lambda \geq \lambda_0$, there exists a unique solution $u \in \sW^{1,2}_p(\Omega_T, x_d^\gamma \,dz)$ to \eqref{eq:nonlinear} such that \[
\|\mathbf{M}^{-\alpha} u_t\|_{L_p} + \|D^2 u\|_{L_p} + \lambda \|\mathbf{M}^{-\alpha} u\|_{L_p} \leq N \|h\|_{L_p} \]
where $\|\cdot\|_{L_p} = \|\cdot \|_{L_p(\Omega_T, x_d^\gamma\, dz)}$ and $N = N(d, p, \alpha, \beta, \gamma)>0$. \end{theorem} \begin{proof} The proof follows immediately from Theorem \ref{thm:xd} and the interpolation inequality in Lemma \ref{interpolation-inq} (i) below. \end{proof}
\begin{remark}
Overall, it is meaningful to study \eqref{eq:nonlinear} for general Hamiltonians $H$. It is typically the case that if we consider \eqref{eq:nonlinear} in $(0,T)\times \mathbb{R}^d_+$ with a nice given initial data, then we can obtain Lipschitz a priori estimates on the solutions via the classical Bernstein method or the doubling variables method under some appropriate conditions on $H$. See \cite{CIL, AT, LMT} and the references therein. In particular, $\|Du\|_{L^\infty([0,T]\times \mathbb{R}^d_+)} \leq N$, and hence, the behavior of $H(z,P)$ for $|P|>2N+1$ is unrelated and can be modified according to our purpose. As such, if we assume \eqref{G-cond}, then it is natural to require that $\ell=1$ because of the above.
We note however that assuming \eqref{G-cond} with $\ell=1$ and $\beta \geq 1$ in Theorem \ref{example-thrm} is rather restrictive. It is not yet clear to us what happens when $0\leq \beta<1$, and we plan to revisit this point in the future work. \end{remark}
To obtain a priori estimates for solutions to \eqref{eq:nonlinear}, we consider the nonlinear term $H$ as a perturbation. We prove the following interpolation inequalities when the nonlinear term satisfies \eqref{G-cond} with $\ell=1$ and $\ell=2$, which might be of independent interests.
\begin{lemma} \label{interpolation-inq} Let $p \in (1, \infty), \beta\ge 0, \gamma>-1$, $1 \leq \ell \leq \frac{d}{d-p}$, and $\theta = \frac{1}{2}(1+\frac{d}{p}-\frac{d}{\ell p})$. Assume that $H$ satisfies \eqref{G-cond}. The following interpolation inequalities hold for every $u \in C_0^\infty(\Omega_T)$ and $\tilde f(z) = x_d^{-\alpha} {\min\{x_d^\beta,1\}|Du|^\ell}$, \begin{itemize} \item[(i)] If $\ell=1$ and $\beta \geq 1$, \[ \begin{split}
\|\tilde f\|_{L_p(\Omega_T, x_d^\gamma\, dz)} & \leq N \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}^{1/2}\|D^2 u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}^{1/2} \\
& \qquad +N \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}, \end{split} \] where $N = N(d, p, \beta, \gamma) >0$. \item[(ii)] If $\ell=2$, $p \geq \frac{d}{2}$, and $\beta \geq \max\{\frac{\gamma}{p}+\frac{d\alpha}{2p}, \frac{\gamma}{p}+2 +\frac{\alpha}{d} - \frac{d\alpha}{p}\}$, then \[ \begin{split}
\|\tilde f\|_{L_p(\Omega_T, x_d^\gamma\, dz)} & \leq N \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}^{2(1-\theta)}\|D^2 u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}^{2 \theta} \\
& \qquad +N \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_T,x_d^\gamma\, dz)}^2, \end{split} \] where $N = N(d, p, \beta, \gamma) >0$. \end{itemize} \end{lemma}
\begin{proof} For $m\in \mathbb{Z}$, set $ \Omega_m=\{z\in \Omega_T\,:\, 2^{-m-1} < x_d \leq 2^{-m}\}$. By the Gagliado-Nirenberg interpolation inequality, for $m\in \mathbb{Z}$, \[
\|Du\|_{L_{p\ell}(\Omega_m)} \leq N \left (\|u\|_{L_p(\Omega_m)}^{1-\theta} \|D^2u\|_{L_p(\Omega_m)}^\theta + 2^{2m\theta} \|u\|_{L_p(\Omega_m)} \right). \] Hence, for $m\geq 0$, \begin{align*}
&\|\mathbf{M}^{\beta-\alpha} |Du|^\ell\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^p
=\int_{\Omega_m} x_d^{p(\beta-\alpha)+\gamma}|Du|^{p \ell}\,dz\\
&\leq \, 2^{-m(p(\beta-\alpha)+\gamma)} \int_{\Omega_m} |Du|^{p\ell}\,dz\\
&\leq \, N2^{-m(p(\beta-\alpha)+\gamma)} \left(\int_{\Omega_m} |u|^{p}\,dz\right)^{\ell(1-\theta)} \left(\int_{\Omega_m} |D^2u|^{p}\,dz\right)^{\ell \theta} \\
&\qquad+ N2^{-m(p(\beta-\alpha)+\gamma+d-p\ell-d\ell)} \left(\int_{\Omega_m} |u|^{p}\,dz\right)^\ell\\
&\leq \, N2^{-m(p(\beta-\alpha)+\gamma+ p \ell \alpha(1-\theta)-\ell\gamma )} \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p \ell (1-\theta)}\|D^2 u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p \ell\theta} \\
&\qquad+ N2^{-m(p(\beta-\alpha)+\gamma+d-p \ell-d\ell+p \ell\alpha-\ell \gamma)}\|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p \ell}. \end{align*} By performing similar computations, we get that, for $m< 0$, \begin{align*}
&\|\mathbf{M}^{-\alpha} |Du|^\ell \|_{L_p(\Omega_m,x_d^\gamma)}^p\\
&\leq \, N2^{-m(-p\alpha+\gamma+ p \ell \alpha(1-\theta)-\ell \gamma )} \|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p \ell(1-\theta)}\|D^2 u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p \ell \theta} \\
&\qquad + N2^{-m(-p\alpha+\gamma+d-p\ell -d\ell +p\ell\alpha-\ell \gamma)}\|\mathbf{M}^{-\alpha} u\|_{L_p(\Omega_m,x_d^\gamma\, dz)}^{p\ell}. \end{align*} Then, if $\ell=1$ and $\beta \geq 1$, we have \[ \begin{cases} p(\beta-\alpha)+\gamma+ p \ell \alpha(1-\theta)-\ell \gamma =p(\beta - \frac{\alpha}{2}) \geq 0,\\ p(\beta-\alpha)+\gamma+d-p\ell-d\ell+p\ell\alpha-\ell\gamma =p(\beta-1) \geq 0,\\ -p\alpha+\gamma+ p \ell \alpha(1-\theta)-\ell \gamma = -\frac{p\alpha}{2} \leq 0,\\ -p\alpha+\gamma+d-p\ell -d\ell +p\ell \alpha-\ell \gamma=-p \leq 0. \end{cases} \] We thus obtain (i). Similarly, the above four inequalities hold true when $\ell=2$, $p \geq \frac{d}{2}$, and $\beta \geq \max\{\frac{\gamma}{p}+\frac{d\alpha}{2p}, \frac{\gamma}{p}+2 +\frac{\alpha}{d} - \frac{d\alpha}{p}\}$, which yield (ii). \end{proof}
\def$'${$'$}
\end{document}
|
arXiv
|
{
"id": "2306.11567.tex",
"language_detection_score": 0.5176125168800354,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{frontmatter}
\title{An Induced Natural Selection Heuristic for Finding Optimal Bayesian Experimental Designs}
\author[cam,uofm,doherty]{David J. Price\corref{cor1}} \cortext[cor1]{[email protected]}
\author[uofa,acems]{Nigel G. Bean}
\author[uofa,acems]{Joshua V. Ross}
\author[uofa,acems]{Jonathan Tuke}
\address[cam]{Disease Dynamics Unit, Department of Veterinary Medicine, University of Cambridge, Madingley Road Cambridge CB3 0ES, United Kingdom} \address[uofm]{Centre for Epidemiology and Biostatistics, Melbourne School of Population and Global Health, The University of Melbourne, VIC 3010, Australia} \address[doherty]{Victorian Infectious Diseases Reference Laboratory Epidemiology Unit at the Peter Doherty Institute for Infection and Immunity, The University of Melbourne and Royal Melbourne Hospital, VIC 3000, Australia} \address[uofa]{School of Mathematical Sciences, University of Adelaide, SA 5005, Australia} \address[acems]{ARC Centre of Excellence for Mathematical \& Statistical Frontiers, School of Mathematical Sciences, University of Adelaide, SA 5005, Australia}
\begin{abstract}
Bayesian optimal experimental design has immense potential to inform the collection of data so as to subsequently enhance our understanding of a variety of processes. However, a major impediment is the difficulty in evaluating optimal designs for problems with large, or high-dimensional, design spaces. We propose an efficient search heuristic suitable for general optimisation problems, with a particular focus on optimal Bayesian experimental design problems. The heuristic evaluates the objective (utility) function at an initial, randomly generated set of input values. At each generation of the algorithm, input values are ``accepted'' if their corresponding objective (utility) function satisfies some acceptance criteria, and new inputs are sampled about these accepted points. We demonstrate the new algorithm by evaluating the optimal Bayesian experimental designs for the previously considered death, pharmacokinetic and logistic regression models. Comparisons to the current ``gold-standard'' method are given to demonstrate the proposed algorithm as a computationally-efficient alternative for moderately-large design problems (i.e., up to approximately 40-dimensions). \end{abstract}
\begin{keyword}
Bayesian optimal design \sep Optimisation heuristic \sep Stochastic models \sep Sampling windows
\end{keyword}
\end{frontmatter}
\section{Introduction} \label{section:intro}
Optimising the design of an experiment is an important consideration in many areas of science, including, but not limited, to: biology \citep{Faller:2003}, clinical trials \citep{Berry:2004} and epidemiology \citep{Pagendam:2013}. The theory of optimal experimental design is a statistical framework that allows us to determine the optimal experimental protocol to gain the most information about model parameters, given constraints on resources.
In evaluating an optimal Bayesian design, there are two main components: the search across the design space, and the evaluation of the utility. There have been many approaches to improving the efficiency of both aspects, summarised by \citet{Ryan:2015}. Recently, \citet{Overstall:2017} proposed the Approximate Coordinate Exchange (ACE) algorithm to address the search aspect of the Bayesian experimental design problem. The method utilises a coordinate exchange algorithm to update one dimension of the design at a time, coupled with a Gaussian process in order to search each dimension efficiently. It has been asserted that the future of optimal Bayesian experimental design lies in the ability to evaluate the optimal designs for large-scale problems (\emph{i.e.}, large or high-dimensional design spaces), in a computationally-efficient manner \citep{Ryan:2015}. In this paper, we address this by proposing a new search algorithm targeted at finding optimal Bayesian experimental designs.
The search heuristic we present performs targeted sampling of the design space to find high utility designs, without making any assumptions about the shape of the utility function. An initial population of random designs is generated -- synonymous with multiple algorithm runs from random initial conditions as in other optimisation routines. Our method borrows the idea of targeting regions of high utility, as per the MCMC approach of \citet{Muller:1999}, by sampling new designs at each iteration around the ``best'' designs; chosen according to some acceptance criteria. We describe this algorithm using the notion of ``survival-of-the-fittest", as the ``fittest'' individuals -- according to their objective (utility) function value -- survive at each iteration (generation) based on a user-defined acceptance criteria, to produce offspring for the next generation. Hence, we propose this as a new type of evolutionary algorithm (\emph{e.g.}, \citealp{Goldberg:1989}), and refer to it herein as the Induced Natural Selection Heuristic (INSH).
By independently sampling new designs around each accepted design, we aim to avoid the pitfalls associated with some other optimisation routines. For example, INSH is able to sample multiple regions of high utility at a time, thus exploring multiple local optima simultaneously, rather than potentially being stuck at a single local optima. Furthermore, by not combining the retained designs in any way, INSH avoids the potential to move to a region of low utility that is at the ``centre" of multiple local optima -- as may occur in a cross-entropy or genetic algorithm. By taking a sampling approach, as opposed to trying to approximate the function, INSH makes no assumptions about the shape of the utility function -- thus, it is not limited to utility functions that are, for example, smooth. Utilising (embarrassingly) parallel computation tools, the method can efficiently evaluate the utility for a large number of designs in each iteration.
The ACE algorithm has allowed the consideration of Bayesian optimal designs for a larger, more-complex class of statistical models and experiments than was possible with previous algorithms. There are a number of drawbacks to ACE, however. By searching in one-dimension at a time, ACE risks missing the globally-optimal design, and instead may find only local optima. An approach to avoid this is to re-run the algorithm from a number of randomly generated initial designs \citep{Overstall:2017}. Similarly, as noted by the authors, by searching in one-dimension at a time, the algorithm will be inefficient in scenarios where there is a large correlation between the design variables -- a problem which adds to the difficulty in choosing a suitable number of iterations for each phase of the algorithm. The algorithm requires a sufficiently-good estimate of the utility when determining whether to accept the candidate design -- spurious estimates may lead to sub-optimal candidate designs being accepted, and thus push the algorithm away from regions of high utility. Alternatively, a large improvement in the computation time arises from the estimation of the utility surface in each dimension in the form of a Gaussian process based on a number of candidate points. This approximation to the utility surface based on noisy evaluations of the utility aims to provide a smooth approximation to the surface. When the surface is not smooth, or has a discontinuity (\emph{e.g.}, as exists in the utility surface for the death model in Figure \ref{deathmodel:fullutilitysurface} at $\boldsymbol{t}\approx(2.75,t_2)$ and $\boldsymbol{t}\approx(t_1,2.75)$), this has the potential to cause problems for the ACE algorithm.
In the following, we present the INSH search algorithm in a general framework, and we note that efficient evaluation of the utility is another problem that needs to be addressed. We consider two existing approaches to evaluating the utility: an Approximate Bayesian Computation (ABC) approach used by \citet{Price:2016}, in a scenario where the benefits of this approach are realised; and a nested Monte-Carlo approximation using code from the \verb+acebayes+ package \citep{Rpack:acebayes}, otherwise.
We consider the problem of finding the optimal design for the death model, a pharmacokinetic (PK) model tracking the concentration of a drug or treatment in the blood, and a four-factor logistic regression model. In the death and PK examples, a design $d$ consists of $n$ sampling times $(t_1,\dots,t_n)$, subject to some problem-specific constraints. First, we address the question of when to observe the stochastic process in order to gain the most information about the model parameters governing the death model. The Markovian death model has been considered previously in a Bayesian framework by \citet{Cook:2008}, \citet{Drovandi:2013}, and \citet{Price:2016}. We compare the optimal designs for 1-4 observation times in order to demonstrate the efficacy of the method. Second, we consider the question of sampling times for a PK model -- a process where the design space is higher-dimensional -- in order to demonstrate the efficiency of the INSH algorithm for larger design spaces. The optimal designs are compared to those evaluated using the ``gold-standard'' Approximate Coordinate Exchange (ACE) algorithm of \citet{Overstall:2017}. We also consider the idea of sampling windows for this example, which have been considered previously by \citet{Duffull:2003}, \citet{Chenel:2005}, \citet{Graham:2006}, \citet{McGree:2012}, and \citet{Duffull:2012}, for example. Finally, we compare the results of the INSH algorithm to those of the ACE algorithm for a standard four-factor logistic regression model \citep{Overstall:2017} -- a considerably higher-dimensional problem. We consider examples with $n=6,10,24$, and $48$ (independent) replicates in each experiment; corresponding to a design space with up to 192 dimensions (i.e., when $n=48$ replicates).
\subsection{Bayesian Optimal Experimental Design}
The aim of optimal experimental design is to determine the best experimental protocol in order to maximise some utility of the experiment. To achieve this aim, we specify a utility function $U(\boldsymbol{\theta},\boldsymbol{y},d)$ representing how we `value' the experimental design $d$, chosen from the set of all designs $\mathcal{D}$, where $\boldsymbol{\theta}$ represents the model parameters and $\boldsymbol{y}$ is the data. We are interested in the expected utility of using design $d$, over the unknown model parameters and data. That is, we wish to evaluate, \begin{align}
u(d) &= E_{\boldsymbol{\theta},\boldsymbol{y}}[ U(\boldsymbol{\theta},\boldsymbol{y}, d)] \notag \\
&= \int_{\boldsymbol{y}} \int_{\boldsymbol{\theta}} U(\boldsymbol{\theta},\boldsymbol{y}, d) p(\boldsymbol{y} \mid \boldsymbol{\theta},d) p(\boldsymbol{\theta}) d\boldsymbol{\theta} d\boldsymbol{y},
\label{utilitydefn} \end{align} where $p(\boldsymbol{y} \mid \boldsymbol{\theta}, d)$ is the likelihood function of the unobserved data $\boldsymbol{y}$, under design $d$, and $p(\boldsymbol{\theta})$ is the prior distribution of the model parameters. The optimal design $d^*$ maximises the expected utility over the design space $\mathcal{D}$, that is, $d^* = \text{argmax}_{d\in\mathcal{D}} u(d)$. The utility function we use throughout this work is the Kullback-Leibler divergence \citep{Kullback} from the prior distribution to the posterior distribution (which is independent of $\boldsymbol{\theta}$), \begin{equation*} U(\boldsymbol{y}, d) = \int_{\boldsymbol{\theta}} \log \left( \frac{p(\boldsymbol{\theta} \mid \boldsymbol{y},d)}{ p(\boldsymbol{\theta})} \right) p(\boldsymbol{\theta} \mid \boldsymbol{y}, d) d\boldsymbol{\theta}, \end{equation*} which leads to an expected utility: \begin{align}
u(d) = \int_{\boldsymbol{y}} \int_{\boldsymbol{\theta}} \log \left( \frac{p(\boldsymbol{\theta} \mid \boldsymbol{y},d)}{ p(\boldsymbol{\theta})} \right) p( \boldsymbol{y} \mid \boldsymbol{\theta},d) p(\boldsymbol{\theta}) d\boldsymbol{\theta} d\boldsymbol{y}.
\label{ekld} \end{align} See \citet{Price:2016} for details of the derivation. Alternatively, it is commonplace to consider the Shannon Information Gain (SIG), which can be written as: \begin{align} U(\boldsymbol{\theta}, \boldsymbol{y}, d) =& \log p(\boldsymbol{\theta} \mid \boldsymbol{y}, d) - \log p(\boldsymbol{\theta}) \notag \\
=& \log p(\boldsymbol{y} \mid \boldsymbol{\theta}, d) - \log p(\boldsymbol{y} \mid d), \label{eqn:SIG} \end{align} through the application of Bayes' theorem. Maximisation of the expected SIG is equivalent to maximisation of the expected Kullback-Leibler divergence above. Unfortunately, it is often not possible to obtain an analytic evaluation of the expected utility function $u(d)$ (Equation \eqref{utilitydefn}), and approximate methods are required (see Section \ref{subsection:evaluate_utility}).
\subsection{ACE Algorithm}
The Approximate Coordinate Exchange algorithm of \citet{Overstall:2017} directly addresses the need for a computationally-efficient algorithm for determining optimal Bayesian experimental designs in high-dimensional design spaces \citep{Ryan:2015}. The reader is directed to \citet{Overstall:2017} for full details of the algorithm. Briefly, the algorithm considers each dimension of the experimental design one-at-a-time (\emph{e.g.}, the first observation time in an observation schedule), and evaluates the utility at a number of new, candidate values in that dimension (\emph{e.g.}, consider the utility at each of $q$ equally-spaced times across the feasible range of observation times, conditional on the other elements of the design). Having obtained these approximate utilities across the feasible range for the particular dimension of the design, a Gaussian process is fit to these candidate values to find an approximate ``optimal'' value as an update to this dimension of the design (accepted with some probability). The algorithm cycles through each design variable (probabilistically) updating them to the best value according to the Gaussian process approximation to the utility. The ACE algorithm is the first algorithm capable of dealing with high-dimensional design problems, in a computationally feasible amount of time.
\section{Proposed Method: INSH Algorithm}
In the following, we present a new algorithm to find optimal Bayesian experimental designs efficiently. We describe an algorithm that can utilise the current advantages of parallel computing -- which are rapidly improving as parallel-computing becomes more widely-available, more easy to implement, and more powerful. Simultaneously, we embrace an advantageous aspect of the inherently sequential, and thus difficult to parallelise efficiently, MCMC algorithms implemented by \citet{Muller:1999}, \citet{Cook:2008}, and \citet{Drovandi:2013}: namely, we seek to spend less computational effort evaluating designs in low-utility regions. This forms the crux of the efficiency of an MCMC approach, and is achieved by sampling from a function proportional to the utility. The new algorithm we propose instead evaluates the utility of multiple designs simultaneously -- in order to realise the benefits of parallel computing -- and samples new designs at each iteration of the algorithm around designs that satisfy some acceptance criteria. The acceptance criteria for designs at each iteration can be chosen in a number of different ways. In this paper, we demonstrate the acceptance of a fixed number of the ``best'' designs, similar to the proportion of ``elite'' samples in a cross-entropy algorithm \citep{DeBoer:2005}. In contrast to these existing optimisation algorithms, the algorithm presented here considers multiple designs at each iteration, allowing us to explore the design space more efficiently. The INSH algorithm is detailed in Algorithm \ref{INSH_algorithm}. Note that in Step 6, the best design considered in any previous iteration is reintroduced into the set of designs that are to be sampled around, in order to continue to explore this region.
\begin{algorithm}[h] \caption{INSH Algorithm}\label{INSH_algorithm} \begin{algorithmic}[1]
\State Choose an initial set of designs, $D$ (\emph{e.g.}, a coarse grid of design points across the design space, or randomly sample).
\State Specify the number of generations (iterations) of the algorithm $W$, a perturbation function $f(d\mid d')$, and the acceptance criteria.
\For{$w=1$ to $W$}
\State \parbox[t]{0.95\textwidth}{\strut For each design $d^i\in D$, sample parameters $\boldsymbol{\theta}\sim p(\boldsymbol{\theta})$, and simulate data $\boldsymbol{y}^i$ from the model.\strut }
\State \strut Evaluate utility $u(d^i)$, for each design $d^i \in D$. \label{insh_evaluate_utility}
\State \parbox[t]{0.95\textwidth}{\strut Set $D'$ to be the designs which satisfy the acceptance criteria, and the current optimal design $d^*$ (even if it occurred in a previous generation).\strut }
\State \parbox[t]{0.95\textwidth}{\strut Sample $m$ designs from $f(d\mid d')$, for each $d'\in D'$. Set $D$ to be these newly sampled designs.\strut }
\EndFor
\Ensure Set of designs $d$, and corresponding approximate utilities $u(d)$ (and hence, the optimal design $d^* = \underset{d\in\mathcal{D}}{\text{argmax}}(u(d))$). \end{algorithmic} \end{algorithm}
\subsection{Evaluation of the Utility} \label{subsection:evaluate_utility} An efficient approach to evaluate the utility of a design in Step \ref{insh_evaluate_utility} of Algorithm \ref{INSH_algorithm} is that of the ABCdE algorithm \citep{Price:2016}. \comment{We state the ABCdE algorithm in Algorithm 2 of Online Resource A, and direct the reader to \citet{Price:2016} for a complete description of the algorithm}. In particular, we use Steps 3 to 9 of Algorithm 2, in Online Resource A. Note that this approach is suitable for discrete data, and for low-dimensional design spaces. This is due to the majority of the efficiency coming from having to evaluate a posterior distribution only once for each unique data set. As the number of possible unique data sets increases -- for example, either by observing the process more often (increasing the size of the design space), or having a larger population -- this approach to evaluating the utility becomes less efficient. We use this approach to evaluate the utility for the Markovian death model, in order to demonstrate the INSH algorithm.
For cases where the dimension of the data is too large (or continuous), we must consider an alternative approach to evaluating the utility for each design. As noted previously, this is one of the two main challenges when searching for optimal Bayesian designs. A suitable and efficient method for evaluation of the utility for a design is often problem-specific, and a number of different approaches have been considered -- a summary of these approaches can be found in \citet{Ryan:2015}. For the PK and logistic regression examples we consider subsequently, we implement the utility function of \citet{Overstall:2017}, as provided in the \verb+acebayes+ package in R \citep{Rpack:acebayes}. Briefly, the SIG utility in equation \eqref{eqn:SIG}, is estimated by a nested Monte-Carlo approximation of the values $p(\boldsymbol{y} \mid \boldsymbol{\theta}, d)$ and $p(\boldsymbol{y} \mid d)$, within the Monte-Carlo approximation to the expected utility, ${u}(d)$. Borrowing the notation of \citet{Overstall:2017}, define $\boldsymbol{\psi} = (\boldsymbol{\theta}, \boldsymbol{\gamma})$ to be the combination of the parameters of interest, $\boldsymbol{\theta}$, and nuisance parameters, $\boldsymbol{\gamma}$. Then, we use $\tilde{B}$ simulations to approximate the inner Monte-Carlo estimates: \begin{align*} \tilde{p}(\boldsymbol{y} \mid \boldsymbol{\theta}, d) = \frac{1}{\tilde{B}} \sum_{b=1}^{\tilde{B}} p(\boldsymbol{y} \mid \boldsymbol{\theta}, \tilde{\boldsymbol{\gamma}}_b, d ), \quad \text{and } \quad \tilde{p}(\boldsymbol{y} \mid d) = \frac{1}{\tilde{B}} \sum_{b=1}^{\tilde{B}} p(\boldsymbol{y} \mid \tilde{\boldsymbol{\theta}}_b, \tilde{\boldsymbol{\gamma}}_b,d ), \end{align*} where $(\tilde{\boldsymbol{\theta}}_b, \tilde{\boldsymbol{\gamma}}_b)$ are the $\tilde{B}$ parameters sampled from the prior distribution of $\boldsymbol{\psi}$. Similarly, $B$ simulations are used to evaluate the outer Monte-Carlo estimate, \begin{align*} \tilde{u}(d)=\frac{1}{B} \sum_{l=1}^B\left[ \log \tilde{p}(\boldsymbol{y}_l \mid \boldsymbol{\theta}_l, d) - \log \tilde{p}(\boldsymbol{y}_l \mid d) \right], \end{align*} with $\{\boldsymbol{y}_l, \boldsymbol{\theta}_l\}$ parameters, and corresponding simulations, sampled from the prior and simulated from the model, respectively. In the work of \citet{Overstall:2017}, the authors use $\tilde{B}=B=1{,}000$ to evaluate the candidate designs' utilities in the one-dimensional search (Step 1b of the ACE Algorithm in \citealp{Overstall:2017}), and $\tilde{B}=B=20{,}000$ to evaluate the utility when determining whether to accept the candidate design (Steps 1d and 3e of the ACE Algorithm in \citet{Overstall:2017}; note that Step 3 is not implemented for the compartmental model).
\subsection{Choice of Acceptance Criteria}
There are a number of ways in which we can choose to retain designs; taking inspiration from other optimisation routines. For example, one could retain all designs that are greater than some percentage of the current maximum (\emph{e.g.}, retain all designs that have at least $95\%$ of the information compared to the current ``optimal"), although this approach requires some insight to how ``flat" the utility surface is in order to avoid retaining too many or too few designs at each iteration. While we do not present the results here, testing this approach for the Markovian death model showed promising results.
The approach that we implement in this work is similar to the ``elite" samples of a cross-entropy algorithm \citep{DeBoer:2005}. That is, at each generation, the algorithm accepts the best $r$ designs according to their utility. At the next generation of the algorithm, we sample $m$ designs from the perturbation kernel from each of these $r$ designs. In order to balance the trade-off between exploration and exploitation, one can specify a sequence of decreasing and increasing values for $m_w$ and $r_w$, respectively. Specifying the number of designs that are retained and sampled at each iteration ensures full control over the number of designs considered at each generation of the algorithm, allowing specification of the computational effort spent in searching for the optimal design. Thus, one may reasonably evaluate the optimal (or near-optimal) Bayesian design in a computationally-efficient time-frame.
For the high-dimensional design spaces considered in the logistic regression example, we choose to modify this acceptance step slightly. Specifically, we choose to retain the best $r_w$ designs from the current \emph{and} previous iterations of the INSH algorithm. This acts as a failsafe, in instances where the newly proposed designs end up in regions of lower utility than the original (retained) design, which is more likely to occur with particular high-dimensional designs. This acceptance criteria allows the algorithm to start again from the previous iteration (for a subset of the designs), rather than end up missing regions of high utility through a poor round of sampling.
\subsection{Perturbation Kernel}
The \emph{perturbation kernel} is a probability distribution used to sample new designs at each generation of the INSH algorithm, by \emph{perturbing} (\emph{i.e.}, adding some noise to) previous designs. In the death and PK examples we consider in this work, we use a truncated, multivariate-Normal distribution (where the dimension is given by the dimension of the design space, and the truncation is to ensure constraints are satisfied). For the logistic regression example, we demonstrate the flexibility of choice in this aspect, by using a uniform distribution centred on each design point. \comment{There are no explicit guidelines on how to choose the kernel, and the choice is often driven by knowledge and experience of the problem at hand, as with sequential importance sampling methods (e.g., \citealp{Toni:2009}), however one can reasonably sample from any distribution, centred on the current design points, which can suitably explore the design space. The authors propose that without any knowledge of the relationship between the design points, a symmetric perturbation kernel is a sensible starting point.} A standard cross-entropy algorithm uses the accepted samples to define the mean and (co-) variance structure of a (multivariate-) Normal distribution, and all new samples are generated by this distribution. We prefer to avoid this approach, instead allowing the region surrounding each accepted point to be explored individually. Combining all accepted samples into a single distribution from which to sample, may result in new samples not being generated in regions of high utility (for example, when considering multi-modal utility surfaces), and requires re-evaluation of the (co-)variance matrix at each generation.
\subsection{Stopping Criteria} A common feature of optimisation tools is a criterion for stopping the algorithm. It would be straight-forward for the user to implement a stopping criteria based on the change in utility of newly sampled designs at each iteration of the algorithm, based on the level of accuracy desired. In the examples considered herein, we choose to demonstrate the algorithm by running it for a fixed number of iterations, and assessing convergence graphically through box-plots of the estimated utility across each generation of the algorithm (similar to the trace plots of \citealp{Overstall:2017}).
\section{Examples} \subsection{Markovian Death Model} Consider the Markovian death model as defined by \citet{Cook:2008}. There is a population of $N$ individuals which, independently, move to an infectious class $I$ at constant rate $b_1$ -- for example, due to infection from an environmental source. The Markov chain models the number of infectious individuals at time $t$, $I(t)$ (where the number of susceptible individuals is $S(t)=N-I(t)$). The positive transition rates of the Markov chain are given by $q_{i,i+1}=b_1(N-i)$, for $i=0,\dots,N-1$. The prior distribution we consider is $b_1 \sim \log\text{-}N(-0.005, 0.01)$, chosen such that the mean lifetime of individuals in the population is one, with an approximate variance of 0.01 (as per \citealp{Cook:2008}).
The optimal experimental design for the Markovian Death model has previously been considered in a Bayesian framework by \citet{Cook:2008}, \citet{Drovandi:2013}, and \citet{Price:2016}. \citet{Cook:2008} utilised the MCMC approach of \citet{Muller:1999}, and used an exact posterior, hence, the designs of \citet{Cook:2008} provide a gold-standard with which to compare our results. \citet{Drovandi:2013} also utilised the MCMC approach of \citet{Muller:1999}, however, coupled with an approximate posterior distribution evaluated via an ABC approach. We note however, that the MCMC approach struggles to evaluate the optimal design once considering more than four design parameters. This is due to the increasing computational difficulty associated with the evaluation of the mode of the multi-dimensional utility surface (\citealp{Drovandi:2013}). \citet{Price:2016} provided an exhaustive-search across a grid on the design space, where the utility was evaluated using the ABCdE method. The INSH code for the death model is implemented in MATLAB R2015b.
\subsection{Pharmacokinetic Model} Consider the PK experiment considered by \citet{Ryan:2014} and \citet{Overstall:2017}. In these PK experiments, individuals are administered a fixed amount of a drug. Blood samples are taken in order to understand the concentration of the drug within the body over time.
Let $y_t$ represent the observed concentration of the drug at time $t$. We model the concentration as $ y_t = \mu(t)(1 + \epsilon_{1t}) + \epsilon_{2t}$, where, $$\mu(t) = \frac{400 \theta_2}{\theta_3 (\theta_2 - \theta_1)} \left( e^{-\theta_1 t} - e^{-\theta_2t} \right),$$ is the mean concentration at time $t$, and $\epsilon_{1t} \sim N(0, \sigma^2_{prop})$, $\epsilon_{2t} \sim N(0, \sigma^2_{add})$, $\sigma^2_{prop}=0.01$ and $\sigma^2_{add}=0.1$. That is, $y_t \sim N\left( \mu(t), \sigma_{add}^2 + \sigma_{prop}^2\mu(t)^2 \right).$
The blood samples are taken within the first 24 hours after the drug is administered (that is, $t\in[0,24]$), and it is not practical to take blood samples less than 15 minutes apart (hence, $t_{i+1}-t_i\geq0.25$). We wish to make 15 observations of this system in order to obtain information about the model parameters $\boldsymbol{\theta} = (\theta_1, \theta_2, \theta_3)$, where $\theta_1$ represents the first-order elimination rate constant, $\theta_2$ represents the first-order absorption rate constant, and $\theta_3$ represents the \emph{volume of distribution} -- a theoretical volume that a drug would have to occupy in order to provide the same concentration as is currently present in the blood plasma, assuming the drug is uniformly distributed \citep{Ryan:2014}.
As per \citet{Ryan:2014} and \citet{Overstall:2017}, the model parameters $\boldsymbol{\theta}=(\theta_1,\theta_2,\theta_3)$ are assumed \emph{a priori} to be independently, normally distributed on the log-scale, with mean $\log(0.1)$, $\log(1)$, and $\log(20)$ respectively, and variance 0.05. \citet{Duffull:2012}, \citet{McGree:2012}, \citet{Ryan:2014}, and \citet{Ryan:2015pk} have previously evaluated optimal Bayesian experimental designs for pharmacokinetic models, either for only a few sampling times ($<5$), or more sampling times via dimension reduction schemes (\emph{e.g.}, search across the two-parameters of a Beta distribution, where the quantiles are scaled to give the observation times). \citet{Overstall:2017} are currently the only example of a method efficient enough to establish optimal Bayesian designs for a design-problem of this magnitude directly (i.e., without implementing a dimension reduction scheme), in a feasible amount of computation time.
Furthermore, we show how the output of the INSH algorithm can be used simply to construct \emph{sampling windows} -- a range of values for each observation, rather than a fixed value for each observation time. Sampling windows allows those implementing an optimally-chosen design some flexibility in choosing the sampling times, such that the resulting design is more practically feasible. By defining sampling windows, we can dictate a set of near-optimal designs -- which are practically feasible -- which can be implemented more easily. This avoids the scenario where an inferior design is chosen preferentially by those that are implementing the design, having been supplied with an impractical optimal design. Sampling windows have been considered previously for similar types of models, for example, in \citet{Duffull:2003}, \citet{Chenel:2005}, \citet{Graham:2006}, \citet{Duffull:2012}, and \citet{McGree:2012}, to name a few. As the output of the INSH algorithm consists of a large number of designs sampled around regions of high utility -- as opposed to a single design, as in ACE -- the construction of sampling windows is a simple extension to the algorithm. The INSH algorithm for the PK example is implemented in R (version 3.3.0).
\subsection{Logistic Regression in Four Factors}
Finally, we consider the logistic regression model of \citet{Overstall:2017} in order to demonstrate the benefits of INSH for a considerably higher-dimensional design problem. We consider only the case with independent groups (\emph{i.e.}, no random effects). Let $y_s \sim \text{Bernoulli}(\rho_s)$ be the $s^{th}$ response ($s=1,\dots,n$), and, \begin{equation*} \log \left( \frac{\rho_{s}}{1-\rho_{s}} \right) = \beta_0 + \beta_1 x_{1s} + \beta_2 x_{2s} + \beta_3 x_{3s} + \beta_4 x_{4s}, \end{equation*} where $\beta_i$ ($i=0,\dots,4$) are the parameters of interest. The design matrix is $\mathbf{D}=(\boldsymbol{X}_1, \dots, \boldsymbol{X}_4)$, where $\boldsymbol{X}_i$ is a column vector containing the $x_{is}$ values ($s=1,\dots,n$), with $x_{is}\in[-1,1]$. We define the following, independent prior distributions for each of the parameters of interest $\beta_i\sim U[a,b]$, $i=0,\dots,4$, where $a=(-3,4,5,-6,-2.5)$, and $b=(3,10,11,0,3.5)$.
We consider the cases where $n=6,10,24$, and $48$. The INSH algorithm for the logistic regression example is implemented in R (version 3.3.0).
\subsection{Code to Implement INSH}
\comment{ The online repository} \verb+http://www.github.com/DJPrice10/INSH_Code+ \comment{contains code to implement the INSH algorithm in MATLAB (Markovian death model), and R (PK model).}
\section{Results} \label{section:results}
\subsection{Markovian Death Model}
We consider the optimal observation schedule when the number of observations permitted is $n=1,\dots4,6$ or $8$. The designs for $n=1,\dots,4$ observation times are compared to existing results, and $n=6$ and $8$ are reported as it was not computationally feasible using previous methods.
First, however, we provide a graphical demonstration of the INSH algorithm by considering two observation times for the death model. We choose to implement the INSH algorithm for $W=10$ iterations. We start with 100 randomly chosen designs across the feasible region, and retain the best $r_w=10$ and then $5$ designs (for 5 iterations each). At each generation, $m=3$ and then $6$ designs (for 5 iterations each), are sampled around each accepted design from the perturbation kernel, in order to sufficiently explore the space around each retained design. That is, we consider $m_w\times r_w=30$ designs at each iteration of the algorithm. The perturbation kernel in this example is a Normal distribution centred on the accepted design, with fixed standard deviation 0.1 for each design point (to allow reasonable exploration around each design point), zero covariances, and then truncated subject to the design constraints, \emph{i.e.}, $t_{i+1}-t_i>0$, $i=1,\dots,n-1$.
Figure \ref{deathmodel:stagesofINSH} shows the progression of the INSH algorithm at each of the first six generations. For comparison, Figure \ref{deathmodel:fullutilitysurface} shows the full utility surface for the death model, evaluated using the ABCdE algorithm at all observation times across a grid with spacing 0.1, with $t_i\in[0.1,10]$. We can clearly see the optimal design is on a ridge at approximately (0.9, 2.8). There is also a region of high utility around (0.7, 2.0). Regions of low utility exist for very small $t_1$ (and in particular, $t_2>4$), or where both $t_1$ and $t_2$ are large (\emph{e.g.}, both above 3.5). In Figure \ref{deathmodel:stagesofINSH}, Generation 2 (Figure \ref{INSH_G2}) shows that regions of low utility are discarded early, and high utility regions are retained. Generations 2-6 (Figures \ref{INSH_G2}-\ref{INSH_G6}) demonstrate the convergence of the samples towards the region containing the optimal design. Generation 5 demonstrates the samples converging about the two ``peaks" observed in Figure \ref{deathmodel:fullutilitysurface} -- clearly demonstrating the ability to investigate multiple regions of high utility simultaneously. Figure \ref{PK_insh_final_samples} shows all design points considered throughout the INSH algorithm, with each point shaded by the utility value (darker corresponds to higher utility). The regions of high utility have been sampled more thoroughly.
\begin{figure}
\caption{Demonstration of the design regions being considered by the INSH algorithm at each of the first six generations, and the convergence to regions of high utility. The shaded region corresponds to the infeasible design region (i.e., where $t_2<t_1$).}
\label{INSH_G2}
\label{INSH_G3}
\label{INSH_G4}
\label{INSH_G5}
\label{INSH_G6}
\label{deathmodel:stagesofINSH}
\end{figure}
\begin{figure}
\caption{(a) Full utility surface for two observations of the death model evaluated on a grid using the ABCdE algorithm. (b) Samples from every generation of the INSH algorithm for two observations of the death model. In each figure, darker regions/points correspond to higher utility values.}
\label{deathmodel:fullutilitysurface}
\label{PK_insh_final_samples}
\end{figure}
Online Resource B contains: box-plots illustrating the convergence of the sampled observation times towards the optimal, and the corresponding utilities towards the maximum in Figure S1; the optimal designs determined via INSH compared to the existing methods in Table S1, and; the corresponding INSH algorithm inputs in Table S2.
\subsection{Pharmacokinetic Model} \label{section:PKresults}
Due to the physical constraints on the frequency at which sampling can be performed (at least 15 minutes apart), we restrict the designs such that $t_{i+1}-t_i \geq 0.25$, $i=1,\dots,14$. We sample designs from a multivariate-Normal perturbation kernel with fixed standard deviation 0.20, zero covariance, and truncated subject to the design constraints. The standard deviation was chosen such that one standard deviation was roughly the minimum distance between any two design points. The first generation of designs were sampled uniformly from the viable design space, $[0,24]$, subject to the constraints. As with the previous example, we specify a decreasing sequence of the number of retained designs $r_w$, and an increasing sequence of the number of sampled designs $m_w$.
In order to compare the run time of the ACE algorithm to the INSH algorithm, we implemented the ACE algorithm as detailed in \citet{Overstall:2017}, (\emph{i.e.}, running 20 instances of the ACE algorithm from the \verb+acebayes+ package in (embarrassingly) parallel fashion across four cores). On an iMac running OSX 10.11.4 with 4.0GHz Intel Core i7 processor and 32GB memory, this took 15.53 hours. We did not include the run time of the post-processing utility evaluation of the 20 candidate designs, 20 times each, in order to establish the overall optimal design, for reasons we state shortly. The ACE algorithm for this example in \citet{Overstall:2017} was performed 20 times from random initial conditions, each for a total of 20 iterations. Each iteration searches across each of the 15 dimensions of the design, and considers 20 candidate times to fit the Gaussian process. Thus, a total of $120{,}000$ designs are considered (\emph{i.e.}, utility evaluations) in the ACE algorithm, where $6{,}000$ of these utility evaluations are completed using significantly more Monte Carlo simulations. Specifically, the utility for the 20 candidate times used to train the Gaussian process are evaluated using $\tilde{B}=B=1{,}000$ Monte Carlo simulations, while the utility corresponding to the design with the proposed new observation time is evaluated using $\tilde{B}=B=20{,}000$.
The advantage of the INSH algorithm is in its ability to consider a large number of designs in multiple regions simultaneously, and so it is sufficient to use less effort to evaluate the utility of each design, as a noisy estimate of the utility will have less influence on the output of the algorithm. Hence, we used $\tilde{B}=B=5{,}000$ for the evaluation of the utility of each design, which was completed in parallel on four cores (using \verb+foreach+ and \verb+doParallel+ packages in R), on the same machine as stated above. In particular, at each generation of the algorithm, the calculation of the utilities of the designs in the current wave was split across the number of available cores (\emph{i.e.}, 1/4 of the required utility calculations were allocated to each core). We ran the INSH algorithm for $W=60$ iterations, with $1{,}200$ randomly generated initial designs. At each iteration, we retained the ``best" 150, 75, 50, 25, and 10 designs, and proposed two, four, six, 12 and 30 new designs around each accepted design, for 12 iterations of each combination -- maintaining consideration of 300 designs at each iteration, while regularly increasing the exploitation and reducing exploration. These values of $r_w$ and $m_w$ were chosen such that earlier generations of the algorithm retained a reasonable number of designs -- thus, not excluding regions of the design space too quickly -- and as the algorithm progressed, focussed computational effort on high-utility regions of the design space. Given the larger dimension of the design in this example (compared to the death model), we chose to sample a large number of designs around each retained design in later generations of the algorithm, in order to sufficiently explore the design space in proximity to the optimal.
This run of the INSH algorithm took 2.23 hours (approximately 7 times faster than the ACE algorithm). Having obtained the designs and utility evaluations from the INSH algorithms, we perform the same post-processing utility evaluation on the 20 best considered designs, with 20 evaluations of the utility of each design with $\tilde{B}=B=20{,}000$, in order to identify the overall optimal. The total number of designs considered by the INSH algorithm with this selection criteria is approximately: $(\text{No.\ initial designs}) + (W-1)\times r_w\times m_w=1200 + (60-1)\times 300 = 18{,}900$ -- that is, the number of initial designs, plus how many were retained at each generation multiplied by the number that were sampled around each retained design. In practice, this number is often slightly higher, as the $r_w^{th}$ ranked design can be a tie, and the optimal design is re-introduced into the set of designs being considered if it occurred in a previous generation (this run of the INSH algorithm actually considered $19{,}428$ designs).
Figure \ref{PK_all_INSH_designs} shows box-plots of the 20 utility evaluations for each of the 20 best designs that were considered by the INSH algorithm, compared to the same number of evaluations of the ACE optimal design reported in \verb+optdescomp15sig()+ in the \verb+acebayes+ package (each utility evaluation using $\tilde{B}=B=20{,}000$). We can see from this figure that there are three designs (5, 8, 19), that perform similarly well to the design found using the ACE algorithm. Online Resource C contains: these three designs from INSH in Table S3; summaries of the utilities for the top 20 designs evaluated by INSH in Table S4; box-plots demonstrating the convergence of the INSH algorithm to the optimal region in Figure S3, and; a comparison of the ACE and INSH optimal designs performance with regards to inference in Figure S4 (in particular, the posterior variance and bias in posterior mode).
\begin{figure}
\caption{Box-plots of the utility for the optimal design found by the ACE algorithm, compared to the top 20 designs considered by the INSH algorithm. The utility of each design is evaluated 20 times, using $\tilde{B}=B=20{,}000$ Monte Carlo simulations.}
\label{PK_all_INSH_designs}
\end{figure}
\subsubsection{Sampling Windows}
The population-based approach of INSH means that we retain a large number of designs with high utility. We use these ``best'' designs to construct the sampling windows for each sampling time, similar to the approach of \citet{McGree:2012}. \citet{McGree:2012} used percentiles of the designs evaluated once a stopping-criteria has been reached in their algorithm to form the sampling windows -- we choose a fixed number of ``best'' designs to form the windows. Given the windows, those implementing the design can choose observation times from these windows, ensuring that the physical constraint, $t_{i+1}-t_i\geq0.25$, is satisfied.
As an example of this process, we arbitrarily consider the top 20 designs from the output of the INSH algorithm for the PK example, and form sampling windows as the range of values considered at each observation time for these ``best'' designs. Alternatively, one could consider all designs that were within some percentage of the utility corresponding to the maximum, or, use a weighting based on the average utility for each design to approximate a distribution for each sampling time which could subsequently be sampled. In order to construct the sampling window designs, we ``bootstrap" an observation schedule by randomly selecting each of the 15 sampling times (with equal probability), from the 20 candidate observation times, subject to the constraints. A new design is sampled for each of the 20 utility evaluations to demonstrate the range of potential outputs from this approach. Figures \ref{PK_all_plots:sw_range} and \ref{PK_all_plots:sw_density} show the INSH sampling windows for each observation time. Figure \ref{PK_all_plots:opt_comparison} shows the optimal observation schedules evaluated using the ACE and INSH algorithms. Note that the optimal design returned from the INSH method corresponded to the $19^{th}$ highest utility value from the original output of the INSH algorithm (\emph{i.e.}, using $\tilde{B}=B=5{,}000$). It was deemed the optimal design as it corresponded to the highest mean utility, from 20 utility evaluations using $\tilde{B}=B=20{,}000$ (Figure \ref{PK_all_INSH_designs}). Figure \ref{PK_all_plots:util_comparison} shows box-plots of 20 utility evaluations (using $\tilde{B}=B=20{,}000$) for the ACE and INSH optimal designs, and the 20 randomly selected designs from the sampling windows. Note that the average efficiency of the sampling windows designs compared to the INSH optimal design is 99.07\%.
\begin{figure}
\caption{(a) Comparison of the optimal designs from the ACE and INSH methods. (b) Boxplots of 20 utility evaluations for the ACE and INSH optimal designs, and the INSH sampling windows designs, using $\tilde{B}=B=20000$. (c) Sampling windows for each observation time obtained from the 20 designs corresponding to the highest utilities found during the INSH algorithm, plotted over 50 realisations of the PK model simulated at parameter values randomly drawn from the prior distribution. The error bars show the sampling window range. (d) Density plot of the sampling windows for each observation time.
}
\label{PK_insh_v_ace}
\label{PK_all_plots:opt_comparison}
\label{PK_utils}
\label{PK_all_plots:util_comparison}
\label{PK_sw_lineplots}
\label{PK_all_plots:sw_range}
\label{PK_sw_util}
\label{PK_all_plots:sw_density}
\label{PK_all_plots}
\end{figure}
\subsection{Logistic Regression in Four Factors}
We implement the INSH design for $n=6,10, 24$, and $48$, and compare the utility for the best performing design found via INSH to those reported from ACE. INSH is implemented in the same way as for the PK example. We step down the value of $r_w$, and increase the value of $m_w$ as $w$ increases, such that early iterations are geared towards exploration, while later iterations are focussed on exploitation. Table S5 in Online Resource D contains the values of $m_w$ and $r_w$ that are used for the INSH algorithm. In this example, we utilise a uniform perturbation kernel with a fixed-width. In order to further increase exploitation as the algorithm progresses, we step down the width of this proposal distribution in line with the change in $r_w$ and $m_w$. For each example, we retained a reasonable number of designs in early generations of the algorithm in order to exclude less of the design space, and increase computational effort on regions of high utility in later generations, as with the PK example. Given the high-dimensional design space for the $n=48$ example, we sampled more designs around each retained design at each generation (\emph{i.e.}, larger $m_w$), in order to better explore the design space. Furthermore, as we are considering very high-dimensional design spaces, we run the risk of randomly stepping in the wrong direction from a given design -- and one cannot feasibly explore satisfactorily around each design point. To avoid the potential for stepping in a poor direction and not being able to get back to a region of potentially high utility, we slightly alter our acceptance step to consider the best $r_w$ designs out of the current iteration, \emph{and} those that were accepted in the previous iteration. This way, should we move from a region of high utility to a region of low utility through a poorly proposed design, we are able to essentially take one-step back, and propose a new design from the previous design.
\comment{ Initially, our approach was to run INSH for the same computation time as ACE in order to establish the utility of the best designs found via each method. However, the INSH algorithm was unsuccessful in converging to designs with the same utility of those found via ACE for scenarios with a larger design space (INSH designs contained approximately 98.5\% of the utility relative to the ACE designs). Instead, we ran INSH for a reasonable set of parameter values to determine near-optimal designs in a reasonable amount of computation time, and then adjusted the input parameters for ACE to run for the same computation time as INSH. In particular, we reduced the number of iterations of the two phases of the algorithm ($N_I,N_{II}$), or the amount of effort used in evaluating the utility in both training the Gaussian process, and choosing to update a coordinate ($B$). We denote these implementations of ACE as ACE$_N$ and ACE$_B$, respectively. Figure \ref{LR_util} shows box-plots of the utility evaluated at designs found via each implementation of INSH and ACE. For each $n$, the designs found via ACE$_N$ appear to perform as well as those found via INSH, however, ACE$_B$ designs perform better than those found via INSH for larger design spaces (i.e., $n>10$). We note that for $n=6$, and $10$, INSH finds similarly-performing designs to ACE for the same computation time. In Online Resource D: Tables S6-S9 show the designs found via the INSH algorithm; Figures S5-S8 show box-plots of the utility of each design considered at each iteration of the INSH algorithm for each $n$; Table S10 shows the mean and 2.5-97.5\%-percentiles of the designs found by each of the ACE, INSH, ACE$_N$ and ACE$_B$ algorithms, and; Table S11 shows the input parameters for ACE$_N$ and ACE$_B$ to achieve the same computation time as INSH for each $n$.}
\comment{ We do not believe that the discrepancy in performance of the designs found via the ACE and INSH methods is due solely to the increased dimension of the design space. Rather, we believe that it is a combination of the high-dimensional setting rendering the perturbation step less effective, and that the optimal design in each case resides on the boundary. In considerably high-dimension problems, the perturbation step results in designs considered by INSH routinely moving away from some, or all, of the boundary values that would otherwise result in a more informative design. In other words, the resampling approach of INSH means that it is highly unlikely to stay at a large number of boundary values simultaneously. Conceptually, one can see that designs that reside away from the boundary values can be approached from any direction, whereas boundary values can only be approached from, in a loose sense, ``one direction". In these examples, the designs contain many boundary values -- for n=6, 10, 24 and 48 the optimal designs via ACE contain 16/24, 28/40, 70/96, and 143/192 boundary values (i.e., -1 and 1's), respectively. We acknowledge that the optimal design existing on a boundary is a common feature of multi-factor experiments such as this, and that this example has highlighted a shortcoming of the INSH algorithm. However, it is the authors belief that INSH is still a suitable method for high-dimensional problems, however we are unable to demonstrate this with an existing high-dimensional design problem at this stage. }
\comment{ Given that INSH out-performs ACE for small-moderate design spaces -- as demonstrated for both the PK example and LR example (for a fixed computation time) -- we propose that INSH is a suitable, computationally-efficient alternative to the ACE algorithm for up to 40-dimensional design spaces (i.e., corresponding to $n=10$ in this example). Otherwise, for truly high-dimensional design problems (i.e., more than 40-dimensions), the authors propose that ACE is implemented given it has been shown to perform well in these scenarios. }
\begin{figure}
\caption{(a) Box plots of 20 utility evaluations for the optimal designs found by each of the ACE, INSH (solid), ACE$_N$, and ACE$_B$ (dashed) methods, for $n=6,10,24$ and $48$.}
\label{LR_util}
\end{figure}
\section{Discussion} \label{section:discussion}
In this paper, we have considered three common types of statistical model: a Markovian death model, a one-compartment PK model, and a four-factor logistic regression model. Our results for the death model provide a simple demonstration of the efficacy of the proposed INSH algorithm, and gives equivalent answers to previously applied methods. The PK model allowed us to consider a larger design space, and show that the INSH algorithm is able to return designs that marginally outperformed those found using the ``gold-standard" ACE algorithm, in considerably less computation time -- illustrating that for moderate-size design spaces, INSH is more computationally efficient than ACE. We also showed the simple extension to the INSH algorithm that allows the construction of sampling windows. Finally, the logistic regression example provided an example of a truly high-dimensional design space. We were unable to find designs that performed as well as those found via ACE, however, we were able to demonstrate that INSH provides a suitable, computationally-efficient approach for up to approximately 40-dimensions -- which encompasses a large range of experimental design problems. Alternative examples of considerably high-dimension with optimal designs residing away from the boundary should be considered in order to demonstrate the performance of INSH relative to ACE in these instances.
We have not provided a proof that the INSH algorithm will converge to the optimal design, however, one can see that in the limit (\emph{i.e.}, $W\rightarrow\infty$, $\alpha_0=0$ and $\alpha_w\rightarrow1$ as $w\rightarrow\infty$, and sufficiently large $m$), the INSH algorithm will identify the optimal design. However, as with many optimisation routines, the aim of this algorithm is to find near-optimal designs in a computationally feasible amount of time. Thus, practical algorithm inputs must be chosen, which may not guarantee convergence to the optimal solution. However, this trade-off is apparent in a number of existing optimisation routines -- for example, simulated-annealing, cross-entropy, and genetic algorithms all have the potential to converge to local, rather than global, optima.
The INSH algorithm we have presented here is quite general, and there exist many aspects of the algorithm which can be explored in order to improve the efficiency of the algorithm for different optimisation problems. For example: one could update the perturbation kernel based on the correlation/covariance that exists between design parameters of the same design, or; randomly incorporate a sample in a region of the design space that has either not been considered previously, or was dismissed earlier in the algorithm, in order to increase exploration of the design space and maximise the chance of obtaining the optimal design. Another important consideration will be to provide some general rules regarding the choice of algorithm inputs for a particular utility surface, or magnitude and dimension of design space. For example, the initial samples could be used to approximate some characteristics of the utility surface, and provide some insight into sensible choices of the inputs for the algorithm. While we did not consider it here, increasing the number of utility evaluations which form the approximate expected utility could also be increased as the algorithm progresses, \emph{i.e.}, specify a sequence for $\tilde{B}$ and $B$ in the SIG utility evaluation -- ensuring more effort is spent evaluating a more precise estimate of the utility in regions near to the optimal design.
While we have added commentary around our choice of parameters for each example, we note that many of the parameters are problem-specific, and require the user to specify sensible values based on their understanding of the system and design space. As with other stochastic optimisation routines, some trial-and-error may be required in order to choose suitable INSH input values for different problem types. Given the drastic increase in computational-efficiency of the INSH algorithm for small-to-moderately sized design problems, the authors believe that one could be very cautious with some parameter choices (\emph{e.g.}, choose a large number of accepted designs, and number of newly sampled designs), in order to ensure satisfactory exploration of the design space, and obtain designs more efficiently than existing algorithms.
\section*{References}
\begin{thebibliography}{21} \expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi \expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi \expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\bibitem[{Berry(2004)}]{Berry:2004} Berry, D.~A., 2004. Bayesian statistics and the efficiency and ethics of
clinical trials. Statistical Science 19, 175--187.
\bibitem[{Chenel et~al.(2005)Chenel, Ogungbenro, Duval, Laveille, Jochemsen,
and Aarons}]{Chenel:2005} Chenel, M., Ogungbenro, K., Duval, V., Laveille, C., Jochemsen, R., Aarons, L.,
2005. Optimal blood sampling time windows for parameter estimation using a
population approach: Design of a phase {II} clinical trial. Journal of
Pharmacokinetics and Pharmacodynamics 32.
\bibitem[{Cook et~al.(2008)Cook, Gibson, and Gilligan}]{Cook:2008} Cook, A.~R., Gibson, G.~J., Gilligan, C.~A., 2008. Optimal observation times in
experimental epidemic processes. Biometrics 64~(3), 860--868.
\bibitem[{De~Boer et~al.(2005)De~Boer, Kroese, Mannor, and
Rubinstein}]{DeBoer:2005} De~Boer, P., Kroese, D.~P., Mannor, S., Rubinstein, R.~Y., 2005. A tutorial on
the cross-entropy method. Annals of Operations Research 134, 19--67.
\bibitem[{Drovandi and Pettitt(2013)}]{Drovandi:2013} Drovandi, C., Pettitt, A., 2013. Bayesian experimental design for models with
intractable likelihoods. Biometrics 69, 937--948.
\bibitem[{Duffull et~al.(2012)Duffull, Graham, Mengersen, and
Eccleston}]{Duffull:2012} Duffull, S.~B., Graham, G., Mengersen, K., Eccleston, J., 2012. Evaluation of
the pre-posterior distribution of optimized sampling times for the design of
pharmacokinetic studies. Journal of Biopharmaceutical Statistics 22, 16--29.
\bibitem[{Faller et~al.(2003)Faller, Klingm\"{u}ller, and Timmer}]{Faller:2003} Faller, D., Klingm\"{u}ller, U., Timmer, J., 2003. Simulation methods for
optimal experimental design in systems biology. Simulation 79, 717--725.
\bibitem[{Goldberg(1989)}]{Goldberg:1989} Goldberg, D.~E., 1989. Genetic Algorithms in Search, Optimization and Machine
Learning, 1st Edition. Addison-Wesley Longman Publishing Co., Inc., Boston,
MA, USA.
\bibitem[{Graham and Aarons(2006)}]{Graham:2006} Graham, G., Aarons, L., 2006. Optimum blood sampling time windows for parameter
estimation in population pharmacokinetic experiments. Statistics in Medicine
25, 4004--4019.
\bibitem[{Green and Duffull(2003)}]{Duffull:2003} Green, B., Duffull, S.~B., 2003. Prospective evaluation of a {D}-optimal
designed population pharmacokinetic study. Journal of Pharmacokinetics and
Pharmacodynamics 30.
\bibitem[{Kullback and Leibler(1951)}]{Kullback} Kullback, S., Leibler, R.~A., 1951. On information and sufficiency. The Annals
of Mathematical Statistics 22, 79--86.
\bibitem[{McGree et~al.(2012)McGree, Drovandi, and Pettitt}]{McGree:2012} McGree, J.~M., Drovandi, C.~C., Pettitt, A.~N., 2012. A sequential {Monte
Carlo} approach to derive sampling times and windows for population
pharmacokinetic studies. Journal of Pharmacokinetics and Pharmacodynamics 39,
519--526.
\bibitem[{M{\"u}ller(1999)}]{Muller:1999} M{\"u}ller, P., 1999. Simulation based optimal design. In: Bernardo, J. (Ed.),
Bayesian Statistics. Oxford University Press, pp. 459--474.
\bibitem[{Overstall and Woods(2017)}]{Overstall:2017} Overstall, A.~M., Woods, D.~C., 2017. Bayesian design of experiments using
approximate coordinate exchange. Technometrics 59~(4), 458--470.
\bibitem[{Overstall et~al.(2017)Overstall, Woods, and Adamou}]{Rpack:acebayes} Overstall, A.~M., Woods, D.~C., Adamou, M., 2017. acebayes: Optimal Bayesian
Experimental Design using the ACE algorithm. R package version 1.4.1. \newlineURL \url{https://CRAN.R-project.org/package=acebayes}
\bibitem[{Pagendam and Pollett(2013)}]{Pagendam:2013} Pagendam, D.~E., Pollett, P.~K., 2013. Optimal design of experimental
epidemics. Journal of Statistical Planning and Inference 143~(3), 563--572.
\bibitem[{Price et~al.(2016)Price, Bean, Ross, and Tuke}]{Price:2016} Price, D.~J., Bean, N.~G., Ross, J.~V., Tuke, J., 2016. On the efficient
determination of optimal {Bayesian} experimental designs using {ABC}: A case
study in optimal observation of epidemics. Journal of Statistical Planning
and Inference 172, 1--15.
\bibitem[{Ryan et~al.(2015{\natexlab{a}})Ryan, Drovandi, McGree, and
Pettitt}]{Ryan:2015} Ryan, E.~G., Drovandi, C., McGree, J.~M., Pettitt, A., 2015{\natexlab{a}}. A
review of modern computational algorithms for {Bayesian} optimal design.
International Statistics Review.
\bibitem[{Ryan et~al.(2015{\natexlab{b}})Ryan, Drovandi, and
Pettitt}]{Ryan:2015pk} Ryan, E.~G., Drovandi, C.~C., Pettitt, A.~N., 2015{\natexlab{b}}. Fully
{Bayesian} experimental design for pharmacokinetic studies. Entropy 17,
1063--1089.
\bibitem[{Ryan et~al.(2014)Ryan, Drovandi, Thompson, and Pettitt}]{Ryan:2014} Ryan, E.~G., Drovandi, C.~C., Thompson, M.~H., Pettitt, A.~N., 2014. {Towards
Bayesian experimental design for nonlinear models that require a large number
of sampling times}. Computational Statistics \& Data Analysis 70, 45--60.
\bibitem[{Toni et~al.(2009)Toni, Welch, Strelkowa, Ipsen, and
Stumpf}]{Toni:2009} Toni, T., Welch, D., Strelkowa, N., Ipsen, A., Stumpf, M. P.~H., 2009.
Approximate {B}ayesian computation scheme for parameter inference and model
selection in dynamical systems. Journal of The Royal Society Interface 6,
187--202.
\end{thebibliography}
\setcounter{figure}{0} \renewcommand\thefigure{S\arabic{figure}} \renewcommand\thetable{S\arabic{table}}
\section*{Appendix A} \textit{Existing Algorithms}
Algorithm \ref{ourabc} describes the ABC algorithm used by ABCdE and the INSH algorithm (for the death model) to evaluate the posterior distribution. \begin{algorithm}[htbp] \caption{ABC Algorithm: Fixed tolerance}\label{ourabc} \begin{algorithmic}[1] \Require Observed data $\boldsymbol{x}$, simulated data $\boldsymbol{y}=(\boldsymbol{y}^1,\dots,\boldsymbol{y}^N)$, corresponding parameter values $\boldsymbol{\theta}^i, i=1,\dots,N$, and tolerance $\epsilon$.
\State Evaluate discrepancies $\rho^i = \rho(\boldsymbol{x}, \boldsymbol{y}^i)$, creating particles $\{ \boldsymbol{\theta}^i, \rho^i \}$ for $i=1,\dots,N$.
\State Using the posterior sample of parameters $\boldsymbol{\theta}^i$ such that $\rho^i<\epsilon$, evaluate utility. \Ensure Utility for current design, having observed $\boldsymbol{x}$, $U(d,\boldsymbol{x})$. \end{algorithmic} \end{algorithm}
Algorithm \ref{ABCdEalgorithm} describes the ABCdE algorithm of \citet{Price:2016}, to evaluate the optimal Bayesian experimental design. \begin{algorithm}[H] \caption{ABCdE Algorithm}\label{ABCdEalgorithm} \begin{algorithmic}[1]
\State Choose grid over the parameter space for the discrete estimate of the utility, number of simulations $N_{pre}$, and tolerance $\epsilon$.
\State Sample $N_{pre}$ parameters $\boldsymbol{\theta}$ from $p(\boldsymbol{\theta})$.
\State For each of the $N_{pre}$ parameters, and under every design $d$ in the design space $\mathcal{D}$, simulate process and store $X_{N_{pre}\times |\mathcal{D}|}(\boldsymbol{\theta}, d)$. \label{abcde_algorithm_line3}
\For{$i=1$ to $|\mathcal{D}|$}
\State \parbox[t]{0.925\linewidth}{Consider the unique rows of data $Y(\boldsymbol{\theta}, d^i) = \text{ unique}(X(\boldsymbol{\theta}, d^i))$.\\ \emph{Note: We let $K^i$ be the number of such unique data, and $n_{k^i}$ be the number of repetitions of the ${k^i}^{th}$ unique data, for $k^i=1,\dots,K^i$}.\label{abcdealg:uniquedata}
}
\For{$k^i=1$ to $K^i$}
\State \parbox[t]{0.925\linewidth}{Pass `observed data' $\boldsymbol{y}^{k^i}=[Y(\boldsymbol{\theta},d^i)]_{k^i}$, `simulated data' $X(\boldsymbol{\theta},d^i)$, $N_{pre}$ sampled parameters, and tolerance $\epsilon$ to Algorithm \ref{ourabc}, and return contribution $U(\boldsymbol{y}^{k^i},d^i)$ to the expected utility, for ${k^i}^{th}$ unique datum (`observed data') and $i^{th}$ design\label{abcdealg:createposterior}.}
\EndFor
\State {Store $u(d^i) = \frac{1}{N_{pre}} \sum_{k^i} {n_{k^i}} U(\boldsymbol{y}^{k^i}, d^i)$; the average utility over all parameters and data for design $d^i$. \label{abcde_algorithm_line9}}
\EndFor
\Ensure The optimal design $d^* = \underset{d\in\mathcal{D}}{\text{argmax}}(u(d))$. \end{algorithmic} \end{algorithm}
Algorithm \ref{mullersalgorithm} details the MCMC algorithm for determining Bayesian optimal designs proposed by Muller [1999]. \begin{algorithm}[htbp] \caption{MCMC with stationary distribution $h(\boldsymbol{\theta},\boldsymbol{x},d)$, Muller [1999]}\label{mullersalgorithm} \begin{algorithmic}[1] \Require Number of samples $m$, prior distribution of model parameters $p(\boldsymbol{\theta})$, and proposal density $q(\cdot)$.
\State Choose, or simulate an initial design, $d^1$.
\State Sample $\boldsymbol{\theta}^1\sim p(\boldsymbol{\theta})$, simulate $\boldsymbol{x}^1\sim p(\boldsymbol{x}\mid \boldsymbol{\theta}^1, d^1)$, and evaluate $u^1=U(\boldsymbol{\theta}^1, \boldsymbol{x}^1, d^1)$.\label{mulleralgorithm:initial}
\For{$i=1:m$}
\State Generate a candidate design, $\tilde{d}$, from a proposal density $q(\tilde{d} \mid d^i)$.
\State Sample $\tilde{\boldsymbol{\theta}}\sim p(\boldsymbol{\theta})$, simulate $\tilde{\boldsymbol{x}}\sim p(\boldsymbol{x}\mid \tilde{\boldsymbol{\theta}}, \tilde{d})$, and evaluate $\tilde{u}=U(\tilde{\boldsymbol{\theta}}, \tilde{\boldsymbol{x}}, \tilde{d})$. \label{mulleralgorithm:simdata}
\State Calculate,
\begin{align} \alpha &=\min\left\{ 1, \frac{\tilde{u}\ q(d^i\mid \tilde{d})}{u^i\ q(\tilde{d} \mid d^i)} \right\}. \notag
\end{align}
\State Generate $a\sim U(0,1)$
\If{$a<\alpha$}
\State Set $(d^{i+1}, u^{i+1}) = (\tilde{d}, \tilde{u})$
\Else{}
\State Set $(d^{i+1}, u^{i+1}) =(d^i, u^i)$
\EndIf
\EndFor\\
\Ensure Sample of $m$ designs, $d$. \end{algorithmic} \end{algorithm}
\section*{Appendix B}
\textit{Markovian Death Model ABC Choices}
{We provide the parameter choices for the ABC algorithm used to evaluate the approximate posterior distributions when evaluating the utility for the Markovian death model example. Prior to running the ABC algorithm (Algorithm \ref{ourabc}), we sample $N=50,000$ parameter values from the prior distribution, and simulate data corresponding to each under each design. For each of 1, 2, 3, 4, 6, and 8 observation times, we use a tolerance of 0.25, 0.50, 0.75, 1.00, 1.50, 1.50, respectively.}
{We note however, that these choices are problem specific, and suggest that researchers undertake a pilot-study in order to determine sensible parameter choices, as one would do prior to using ABC for inference.}
\textit{Markovian Death Model Results}
Figure \ref{Death_boxplots} demonstrates the convergence of the INSH algorithm to the optimal observation times, and the maximum utility, for two observation times. \begin{figure}
\caption{Boxplots of the two observation times, and the utility corresponding to the considered designs at each generation of the INSH algorithm. The horizontal lines in (a) and (b) correspond to the optimal observation times evaluated using the ABCdE method.}
\label{Death_boxplots}
\end{figure}
\begin{table}[H]
\caption{Comparison of the optimal observation times for the death process, from \citet{Cook:2008}, \citet{Drovandi:2013}, \citet{Price:2016}, and the INSH algorithm. $|t|$ is the pre-determined number of observation times, and $i$ is the $i^{th}$ time.} \label{table:deathmodel_oeds}
\begin{center}
\begin{tabular}{cccccc}
\hline
& & \multicolumn{4}{c}{Design Method}\\
$|t|$ & $i$ & Cook, \emph{et al.}\ & Drovandi $\&$ Pettitt & ABCdE & INSH\\
\hline
1 & 1 & 1.70 & 1.60 & 1.50 & 1.45 \\
\hline
2 & 1 & 0.90 & 1.15 & 0.80 & 0.95 \\
- & 2 & 2.40 & 3.05 & 2.80 & 2.80 \\
\hline
3 & 1 & 0.70 & 0.75 & 0.40 & 0.60 \\
- & 2 & 1.50 & 1.90 & 1.30 & 1.15 \\
- & 3 & 2.90 & 3.90 & 2.60 & 2.70 \\
\hline
4 & 1 & 0.80 & 0.75 & 0.30 & 0.10 \\
- & 2 & 1.70 & 1.70 & 0.70 & 0.50 \\
- & 3 & 3.10 & 2.75 & 1.30 & 1.20 \\
- & 4 & 5.30 & 4.35 & 2.70 & 2.75 \\
\hline
6 & (1,2) & - & - & - & (0.05,0.15) \\
- & (3,4) & - & - & - & (0.45,1.15) \\
- & (5,6) & - & - & - & (1.75,3.05) \\
\hline
8 & (1,2) & - & - & - & (0.05,0.15) \\
- & (3,4) & - & - & - & (0.25,0.45) \\
- & (5,6) & - & - & - & (0.80,1.40) \\
- & (7,8) & - & - & - & (2.20,2.90) \\
\hline
\end{tabular}
\end{center} \end{table} Table \ref{table:deathmodel_oeds} contains the optimal experimental designs for different numbers of observations of the Markovian death model, evaluated by \citet{Cook:2008}, \citet{Drovandi:2013}, \citet{Price:2016} (where computationally feasible), and the INSH algorithm.
Table \ref{death:insh_pars} contains the input parameters for the INSH algorithm, applied to the death model.
\begin{table}[H] \caption{Input parameters for the INSH algorithm, applied to the Markovian death model. Note that $m_w$ and $r_w$ are applied each for $W/2$ iterations.} \label{death:insh_pars}
\begin{center}
\begin{tabular}{rrrrr}
$|t|$ & $W$ & $m_w$ & $r_w$ & No. initial designs \\
\hline
1 & 8 & $(3, 5)$ & $(10,6)$ & 20 \\
2 & 10 & $(3, 5)$ & $(20,12)$ & 50 \\
3 & 16 & $(3, 5)$ & $(20,12)$ & 120 \\
4 & 20 & $(3, 5)$ & $(20,12)$ & 250 \\
6 & 30 & $(3, 5)$ & $(25,15)$ & 400 \\
8 & 50 & $(3, 5)$ & $(25,15)$ & 600 \\
\end{tabular}
\end{center} \end{table}
\section*{Appendix C} \textit{INSH Results for the Pharmacokinetic Model}
Figure \ref{PK_50_mean_concentrations} demonstrates the mean concentrations over time of the pharmacokinetic model evaluated for 50 parameter sets sampled from the prior distribution.
\begin{figure}
\caption{Plot of 50 mean concentrations over time of the pharmacokinetic model simulated using values sampled from the prior distribution.}
\label{PK_50_mean_concentrations}
\end{figure}
Table \ref{PKexample:opttimes} gives the 15 optimal observation times from the top three designs considered by the INSH algorithm. Each chosen optimal design shows the same pattern -- four early observation times ($<1.2$), followed by a cluster of observation times around 4-7, and the remaining observations grouped together towards the final permitted time.
\begin{table}[H] \caption{Three best sampling schedules evaluated from the INSH algorithm for the pharmacokinetic model.}\label{PKexample:opttimes} \begin{center}
\begin{tabular}{lc}
Original rank & Design \\
\hline
19 & $(0.1961, 0.4840, 0.7506, 1.176, 4.069, 4.780, 5.281,$ \\
& $ 6.030, 6.377, 18.22, 18.85, 19.72, 20.33, 21.52, 22.04)$ \\
\\
2 & $(0.2460, 0.5054, 0.8017, 1.211, 4.035, 4.477, 5.173, 6.101, $ \\
& $ 6.632, 17.82, 18.63, 19.71, 20.32, 21.57, 21.98)$ \\
\\
3 &$(0.1989, 0.4801, 0.7778, 1.103, 4.465, 4.754, 5.776, 6.270,$ \\
&$ 6.754, 18.50, 18.99, 20.19, 20.87, 21.16, 21.87)$ \\
\end{tabular}
\end{center} \end{table}
Figure \ref{INSH_pk_convergence} shows box plots of the observation times, and the utility evaluations of the corresponding designs considered at each wave of the INSH algorithm. The figure for $t_9$, for example, depicts the ability of the INSH algorithm to search multiple regions simultaneously. In particular, iterations 17-27 are considering observation times in approximately three clusters -- around times of 5, 12 and 15.
\begin{figure}
\caption{Figure showing the convergence of the sampled designs towards the region near the optimal design. Each panel represents an individual aspect of the sampled designs, the x-axis is the iteration of the INSH algorithm, and the y-axis is the value of the design aspect. The final panel shows the utilities corresponding to the sampled designs.}
\label{INSH_pk_convergence}
\end{figure}
Table \ref{table_pk_klds} contains the estimated expected utility, median utility, and the $10^{th}$ and $90^{th}$ percentiles, corresponding to each of the top 20 designs considered by INSH, and the optimal returned by the ACE algorithm.
\begin{table}[H] \caption{Summary statistics of estimated utilities corresponding to the top 20 designs from the INSH algorithm, and the optimal design returned by the ACE algorithm. The design highlighted in bold is the design considered to be the optimal from the INSH algorithm.} \label{table_pk_klds} \centering
\begin{tabular}{r|cccc}
\hline
& \multicolumn{4}{c}{Utility}\\
Design & Mean & Median & 10\% & 90\% \\
\hline
ACE & 4.4987 & 4.5004 & 4.4844 & 4.5102 \\ 1 & 4.4874 & 4.4865 & 4.4715 & 4.5040 \\
2 & 4.4725 & 4.4710 & 4.4596 & 4.4894 \\
3 & 4.4707 & 4.4685 & 4.4598 & 4.4864 \\
4 & 4.4700 & 4.4686 & 4.4576 & 4.4835 \\
5 & 4.4991 & 4.4995 & 4.4870 & 4.5111 \\
6 & 4.4739 & 4.4719 & 4.4638 & 4.4900 \\
7 & 4.4707 & 4.4733 & 4.4558 & 4.4866 \\
8 & 4.5034 & 4.5015 & 4.4956 & 4.5156 \\
9 & 4.4633 & 4.4648 & 4.4506 & 4.4758 \\
10 & 4.4595 & 4.4633 & 4.4444 & 4.4736 \\
11 & 4.4652 & 4.4633 & 4.4526 & 4.4803 \\
12 & 4.4733 & 4.4742 & 4.4608 & 4.4868 \\
13 & 4.4508 & 4.4497 & 4.4349 & 4.4654 \\
14 & 4.4748 & 4.4754 & 4.4616 & 4.4878 \\
15 & 4.4702 & 4.4690 & 4.4527 & 4.4941 \\
16 & 4.4537 & 4.4523 & 4.4426 & 4.4725 \\
17 & 4.4625 & 4.4633 & 4.4439 & 4.4846 \\
18 & 4.4853 & 4.4877 & 4.4702 & 4.4991 \\
{\bf 19} & {\bf 4.5052} & {\bf 4.5076} & {\bf 4.4866} & {\bf 4.5204} \\
20 & 4.4799 & 4.4780 & 4.4676 & 4.4975 \\
\hline \end{tabular} \end{table}
Figure \ref{PK_inference} provides a comparison of the inferential performance of the two optimal designs -- corresponding to INSH and ACE -- with regards to bias in a point estimate, and the posterior standard deviation. We simulated 100 experiments from random parameters drawn from the prior distribution, and evaluated an approximate posterior distribution using a Metropolis-Hastings algorithm (retaining 100{,}000 samples from the posterior, following a burn-in of 10{,}000). The bias is estimated as the difference between the MAP (\emph{maximum a posteriori}) estimate and the true parameter value that created the simulated data. Recall, the prior variance was 0.05 for each parameter (prior standard deviation is approximately 0.224).
It appears as though the design evaluated by the INSH algorithm performs marginally better with respect to the posterior standard deviation -- that is, the estimated standard deviations are slightly lower for each parameter. The bias in the parameter estimates appears roughly equivalent between the two designs.
\begin{figure}
\caption{Comparison of the bias in MAP estimate, and posterior standard deviation of each parameter in the pharmacokinetic model.}
\label{PK_inference}
\end{figure}
\section{Appendix D} \textit{INSH Results for the Logistic Regression Example}
Table \ref{LR_INSH_pars} contains the choices of parameters $r_w$ and $m_w$ for the INSH algorithm. Overall, for $n=6,10$ and $24$, $W=120, 132$ and $240$ iterations were used, and $r_w$ and $m_w$ were chosen such that a total of 600 designs were evaluated at each iteration. For $n=48$, $W=360$, and a total of 1200 designs were considered at each iteration, and more emphasis was placed on exploration early on -- to account for the larger-dimensional design space. In each example, we initiated the INSH algorithm with 10000 designs -- with probability 0.5, uniformly sampled from the design space, otherwise, on a boundary (i.e., all elements of the design consisted of randomly selected -1's and 1's).
\begin{table}[H] \caption{INSH algorithm parameter choices for the Logistic Regression example.} \label{LR_INSH_pars}
\begin{center}
\begin{tabular}{cccc}
$n$ & Parameter & Values & Iterations per value\\
\hline
& $r_w$ & (200,100,50 ,25,15,10) & \\
$6$ & $m_w$ & $(3, 6,12,24,40,60)$ & 20 \\
& $\sigma_w$ & (0.20, 0.10, 0.05, 0.025, 0.01, 0.005) & \\
\hline
& $r_w$ & (200,100,50 ,25,15,10) & \\
$10$ & $m_w$ & $(3,6,12,24,40, 60)$ & 22 \\
& $\sigma_w$ & (0.20, 0.10, 0.05, 0.025, 0.01, 0.005) & \\
\hline
& $r_w$ & (200,100,50 ,25,15,10) & \\
$24$ & $m_w$ & $(3,6,12,24,40, 60)$ & 40 \\
& $\sigma_w$ & (0.20, 0.10, 0.05, 0.025, 0.01, 0.005) & \\
\hline
& $r_w$ & (200,100,50 ,25,15,10) & \\
$48$ & $m_w$ & $(6,12,24,48,80,120)$ & 60 \\
& $\sigma_w$ & (0.20, 0.10, 0.05, 0.025 0.010, 0.0025) & \\
\end{tabular}
\end{center} \end{table}
Tables \ref{LR_OD_n6}, \ref{LR_OD_n10}, \ref{LR_OD_n24} and \ref{LR_OD_n48} show the optimal designs found by the INSH algorithm for $n=6,10,24$, and $48$, respectively. \begin{table}[H] \caption{Optimal design from the INSH algorithm for the Logistic regression model with $n=6$.} \label{LR_OD_n6}
\begin{center}
\begin{tabular}{c|cccc}
$n$ & $x_1$ & $x_2$ & $x_3$ & $x_4$ \\
\hline
1 & -0.80 & 0.98 & 0.99 & 0.98 \\
2 & 1.00 & -0.47 & 0.97 & -0.99 \\
3 & 1.00 & -0.63 & 1.00 & 1.00 \\
4 & -0.89 & 1.00 & 0.56 & -1.00 \\
5 & 0.81 & -1.00 & -0.99 & -0.85 \\
6 & -0.97 & 0.45 & -1.00 & 0.99 \\
\end{tabular}
\end{center} \end{table}
\begin{table}[H] \caption{Optimal design from the INSH algorithm for the Logistic regression model with $n=10$.} \label{LR_OD_n10} \begin{center}
\begin{tabular}{c|cccc}
$n$ & $x_1$ & $x_2$ & $x_3$ & $x_4$ \\
\hline
1 & -0.80 & 1.00 & 1.00 & 0.98 \\
2 & 0.72 & -1.00 & -0.98 & 0.99 \\
3 & -1.00 & 0.53 & -0.86 & -0.99 \\
4 & 1.00 & -0.85 & 0.39 & -1.00 \\
5 & 0.97 & -0.32 & 0.99 & -0.92 \\
6 & 0.91 & -0.46 & 1.00 & 0.99 \\
7 & -0.72 & 1.00 & 0.90 & -0.97 \\
8 & -1.00 & 0.51 & -0.93 & 0.99 \\
9 & 0.92 & -0.99 & -1.00 & -0.87 \\
10 & -0.97 & 0.64 & -0.99 & -0.96 \\ \end{tabular} \end{center} \end{table}
\begin{table}[H] \caption{Optimal design from the INSH algorithm for the Logistic regression model with $n=24$.} \label{LR_OD_n24}
\begin{center}
\begin{tabular}{c|cccc}
$n$ & $x_1$ & $x_2$ & $x_3$ & $x_4$ \\
\hline
1 & -0.33 & 0.50 & 0.89 & 0.89 \\
2 & -0.25 & 0.78 & 0.89 & -0.96 \\
3 & -0.95 & 0.94 & 0.93 & 0.97 \\
4 & 0.68 & -0.98 & -0.92 & -0.81 \\
5 & -0.93 & 0.99 & 0.86 & -0.88 \\
6 & -0.77 & 0.98 & 0.82 & 0.87 \\
7 & 0.82 & -0.96 & -0.83 & -0.99 \\
8 & -0.64 & 0.11 & -0.66 & 0.98 \\
9 & -0.48 & 0.86 & 0.96 & 0.92 \\
10 & -0.76 & 0.40 & -0.95 & -0.94 \\
11 & 1.00 & -0.74 & 0.97 & 1.00 \\
12 & -0.99 & 0.46 & -0.97 & 0.96 \\
13 & 0.96 & -0.44 & 0.97 & -0.97 \\
14 & 0.78 & -0.93 & -0.89 & 0.96 \\
15 & -0.34 & 0.74 & 0.99 & -0.98 \\
16 & 1.00 & -0.95 & -0.92 & -0.99 \\
17 & -0.22 & -0.14 & -0.86 & -0.96 \\
18 & -0.96 & 0.85 & -0.59 & -0.81 \\
19 & -0.70 & 0.12 & -0.98 & -0.93 \\
20 & -0.74 & 0.30 & -0.98 & 0.96 \\
21 & 0.99 & -0.50 & 0.83 & -0.60 \\
22 & 0.53 & -0.99 & -0.91 & 0.99 \\
23 & -0.21 & -0.26 & -1.00 & 0.85 \\
24 & 1.00 & -0.55 & 0.97 & 0.96 \\
\end{tabular}
\end{center} \end{table}
\begin{table}[H] \caption{Optimal design from the INSH algorithm for the Logistic regression model with $n=48$.} \label{LR_OD_n48}
\begin{center}
\begin{tabular}{c|cccc}
$n$ & $x_1$ & $x_2$ & $x_3$ & $x_4$ \\
\hline
1 & -0.98 & 0.99 & 1.00 & -0.60 \\
2 & -0.70 & 0.67 & 0.73 & 0.99 \\
3 & -0.97 & 0.95 & 0.38 & -0.95 \\
4 & -0.95 & 0.92 & -0.18 & 0.94 \\
5 & 0.76 & -0.50 & 0.94 & -0.88 \\
6 & 0.95 & -0.95 & -0.60 & 0.98 \\
7 & -1.00 & 0.38 & -1.00 & -0.88 \\
8 & 0.42 & -0.84 & -0.94 & 0.89 \\
9 & -0.91 & 0.75 & -0.85 & -0.98 \\
10 & 0.51 & -0.95 & -0.99 & 1.00 \\
11 & 0.52 & -0.13 & 0.97 & -0.89 \\
12 & 0.46 & -0.71 & -0.87 & 0.92 \\
13 & -0.70 & 0.94 & 0.84 & 0.92 \\
14 & -0.95 & 0.59 & -1.00 & -0.98 \\
15 & -0.36 & 0.90 & 0.99 & -0.91 \\
16 & 0.99 & -0.65 & 0.90 & 0.93 \\
17 & 0.81 & -0.50 & 0.65 & 0.74 \\
18 & -0.52 & 1.00 & 0.92 & 0.91 \\
19 & 0.29 & -0.79 & -0.68 & 0.72 \\
20 & 0.98 & -0.96 & -0.93 & -0.93 \\
21 & 0.96 & -0.74 & 0.93 & 0.81 \\
22 & 0.93 & -0.74 & -0.68 & -0.90 \\
23 & 0.63 & -0.69 & -0.96 & -0.94 \\
24 & 0.99 & -0.89 & 0.92 & 1.00 \\
25 & -0.01 & -0.21 & -0.94 & 0.65 \\
26 & 0.98 & -0.85 & -0.55 & -0.91 \\
27 & -0.85 & 0.98 & 0.98 & -0.98 \\
28 & -0.64 & 0.64 & -0.40 & 0.98 \\
29 & 0.94 & -0.78 & 0.85 & -0.43 \\
30 & 0.82 & 0.01 & 0.97 & -0.94 \\
31 & -0.98 & 0.42 & -0.97 & -0.91 \\
32 & 0.38 & -0.89 & -1.00 & -1.00 \\
33 & 0.99 & -0.61 & 0.58 & 0.97 \\
34 & -0.96 & 0.31 & -0.95 & 0.99 \\
35 & 0.32 & -0.73 & -0.99 & -0.06 \\
36 & -0.39 & -0.31 & -1.00 & 0.75 \\
37 & 0.97 & -0.15 & 0.78 & -1.00 \\
38 & -0.99 & 0.31 & -0.96 & 0.96 \\
39 & -0.51 & 0.94 & 0.92 & -1.00 \\
40 & 0.74 & -0.98 & -0.95 & -0.25 \\
41 & -0.81 & 0.46 & -0.62 & 0.99 \\
42 & -0.87 & 0.99 & 0.16 & -0.50 \\
43 & 0.87 & -0.95 & -0.68 & -0.93 \\
44 & 0.37 & -0.90 & -0.92 & -0.75 \\
45 & 0.82 & -0.43 & 0.93 & -0.95 \\
46 & 0.90 & -0.27 & 1.00 & 0.94 \\
47 & 0.91 & -0.25 & 0.98 & -0.71 \\
48 & -0.93 & 0.94 & 0.88 & 0.93 \\
\end{tabular}
\end{center} \end{table}
Note that each optimal design contains many values that are close to the boundary values (-1 and 1). The optimal designs reported in \citet{Overstall:2017} can be found in the \verb+acebayes+ package in R, using the command \verb+optdeslrsig(n)+, where $n$ is the number of replicates.
Figures \ref{LR_trace_n_6}, \ref{LR_trace_n_24}, and \ref{LR_trace_n_48} show the progression of the INSH algorithm for the Logistic regression example with $n=6,24$, and $48$, respectively. It appears as though the algorithm has converged to a optimal design region in each case.
\begin{figure}
\caption{Box plots of utility evaluations from the INSH algorithm, for the Logistic regression example with $n=6$.}
\label{LR_trace_n_6}
\end{figure}
\begin{figure}
\caption{Box plots of utility evaluations from the INSH algorithm, for the Logistic regression example with $n=10$.}
\label{LR_trace_n_10}
\end{figure}
\begin{figure}
\caption{Box plots of utility evaluations from the INSH algorithm, for the Logistic regression example with $n=24$.}
\label{LR_trace_n_24}
\end{figure}
\begin{figure}
\caption{Box plots of utility evaluations from the INSH algorithm, for the Logistic regression example with $n=48$.}
\label{LR_trace_n_48}
\end{figure}
\begin{table}[H] \caption{The average utility (and 2.5, 97.5-percentiles) of the optimal design found via the ACE, INSH, ACE$_N$ and ACE$_B$ algorithms. For each design, 20 evaluations of the utility were made with $\tilde{B}=B=20000$} \label{LR_ace_insh_opt_utils}
\begin{center}
\begin{tabular}{c|cccc}
$n$ & ACE & INSH & ACE$_N$ & ACE$_B$ \\
\hline
6 & 1.99 (1.97, 2.00) & 1.99 (1.97, 2.01) & 1.94 (1.92, 1.96) & 1.96 (1.95, 1.97) \\
10 & 2.67 (2.66, 2.69) & 2.66 (2.65, 2.68) & 2.64 (2.62, 2,66) & 2.68 (2.66, 2.69) \\
24 & 3.97 (3.95, 3.98) & 3.88 (3.86, 3.90) & 3.88 (3.86, 3.90) & 3.96 (3.94, 3.98) \\
48 & 5.11 (5.10, 5.12) & 5.01 (4.98, 5.02) & 5.01 (4.98, 5.02) & 5.06 (5.04, 5.09)
\end{tabular}
\end{center} \end{table}
\begin{table}[H] \caption{Inputs used to run ACE for the same computation time as INSH: Number of phases of the ACE algorithm ($N_I, N_{II}$), and effort used to evaluate the utility in change step ($B_1$) and for fitting the Gaussian process ($B_2$). Numbers are presented as $(N_I,N_{II}),(B_1,B_2)$. The default settings specified by the authors are $(20,100)$, $(20000,1000)$.} \label{}
\begin{center}
\begin{tabular}{c|cc}
$n$ & ACE$_N$ & ACE$_B$\\
\hline
6 & $(4,20),(20000,1000)$ & $(20,100),(10000,500)$ \\
10 & $(4,20),(20000,1000)$ & $(20,100),(8000,400)$ \\
24 & $(3,15),(20000,1000)$ & $(20,100),(5000,250)$ \\
48 & $(5,25),(20000,1000)$ & $(20,100),(10000,500)$ \\
\end{tabular}
\end{center} \end{table}
\end{document}
|
arXiv
|
{
"id": "1703.05511.tex",
"language_detection_score": 0.7766150832176208,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Rectangle Sweepouts and Coincidences} \begin{abstract} We prove an integral formula for continuous paths of rectangles inscribed in a piecewise smooth loop. We then use this integral formula to show that (with a very mild genericity hypothesis) the number of rectangle coincidences, informally described as the number of inscribed rectangles minus the number of isometry classes of inscribed rectangles, grows linearly with the number of positively oriented extremal chords -- a.k.a. diameters -- in a polygon. \end{abstract}
\section{Introduction}
A {\it Jordan loop\/} is the image of a circle under a continuous injective map into the plane. Toeplitz conjectured in 1911 that every Jordan loop contains $4$ points which are the vertices of a square. This is sometimes called the {\it Square Peg Problem\/}. For historical details and a long bibliography, we refer the reader to the excellent survey article [{\bf M\/}] by B. Matschke, written in 2014, and also Chapter 5 of I. Pak's online book [{\bf P\/}].
Some interesting work on problems related to the Square Peg Problem has been done very recently. The paper of C. Hugelmeyer [{\bf H\/}] shows that a smooth Jordan loop always has an inscribed rectangle of aspect ratio $\sqrt 3$. The paper [{\bf AA\/}] proves that any cyclic quadrilateral can (up to similarity) be inscribed in any convex smooth curve. The paper [{\bf ACFSST\/}] proves, among other things, that a dense set of points on an embedded loop in space are vertices of a (possibly degenerate) inscribed parallelogram.
Say that a rectangle $R$ {\it graces\/} a Jordan loop $\gamma$ if the vertices of $R$ lie in $\gamma$ and if the cyclic ordering on the vertices induced by $R$ coincides with the cyclic ordering induced by $\gamma$. Let $G(\gamma)$ denote the space of labeled gracing rectangles. In [{\bf S1\/}] we prove the following result.
\begin{theorem} \label{threepoint} Let $\gamma$ be a Jordan loop. Then $G(\gamma)$ contains a connected set $S$ such that all but at most $4$ vertices of $\gamma$ are vertices of members of $S$. \end{theorem}
We have a more precise characterization of the possibilities for $S$ in [{\bf S1\/}]. We proved Theorem \ref{threepoint} by taking a limit of a result for polygons. We now describe this result.
Given a polygon $P$, we say that a chord $d$ of $P$ is a {\it diameter\/} if $d$ if the two perpendiculars to $d$ based at $\partial P$ do not locally separate $\partial P$ into two arcs. Each diameter can be positively oriented or negatively oriented, but not both. To explain the condition, we rotate the picture so that $d$ is vertical. The endpoints of $d$ divide $P$ into two arcs $P_1$ and $P_2$. Given the non-separating condition associated to a chord, we can say whether $P_1$ locally lies to the left or right of $P_2$ in a neighborhood of each endpoint of $d$. We call $d$ {\it positively oriented\/} if the left/right answer is the same at both endpoints. That is, either $P_1$ locally lies to the left at both endpoints or $P_1$ locally lies to the right at both endpoints. Figure 1 some examples of positive diameters.
\begin{center} \resizebox{!}{2.5in}{\includegraphics{fig1.eps}} \newline {\bf Figure 1:\/} Some positive diameters of polygons. \end{center}
With respect to the distance function on $P$, a diameter can be a minimum, a maximum, or neither. We call the third kind {\it saddles\/}. Let $\Delta_+(P)$ denote the number of positively oriented diameters of $P$.
Let $\Pi_N$ denote the space of embedded $N$-gons. The set $\Pi_N$ is naturally an open subset of $(\mbox{\boldmath{$R$}}^2)^N$ and as such inherits the structure of a smooth manifold. We call a subset $\Pi_N^* \subset \Pi_N$ {\it fat\/} if $\Pi_N-\Pi_N^*$ is a finite union of positive codimension submanifolds. In particular, a fat set is open and has full measure.
\begin{theorem} \label{polygon} There exists a fat subset $\Pi_N^* \subset \Pi_N$ with the following property. For every $N$-gon $P \in \Pi_N^*$ the space $\Gamma(P)$ is a piecewise-smooth $1$-manifold. Each arc component of $\Gamma(P)$ connects two positive diameters of $P$, and every positive diameter arises as the end of $4$ arc components. of $\Gamma(P)$. In particular, there are $2\Delta_+(P)$ arc components of $\Gamma(P)$. \end{theorem} The reason that there are $4$ arc components connecting every pair of positive diameters that is that we are considering cyclically labeled rectangles. Each of the $4$ components is obtained from each other one by cyclically relabeling.
Now we describe the results we prove in this paper. Given a rectangle $R$, we let $X(R)$ and $Y(R)$ respectively denote the lengths of the first and second sides of $R$. For any continuous path of rectangles in $\Gamma(P)$ which is either a closed loop or which connects two diameters of $P$, we define the {\it shape curve\/} $Z(\alpha)$. This curve is given by \begin{equation} Z(\alpha,t)=(X(R_t),Y(R_t)). \end{equation} Here $t \to R_t$ is a parametrization of $\alpha$.
When $\alpha$ is a closed loop, $Z(\alpha)$ is a closed loop as well. When $\alpha$ is an arc component, $Z(\alpha)$ is an arc, not necessarily embedded, that starts and ends on the coordinate axes. Figure 2 shows two of the possibilities.
\begin{center} \resizebox{!}{1.5in}{\includegraphics{fig2.eps}} \newline {\bf Figure 2:\/} Shape curves associated to hyperbolic and null arcs. \end{center}
In the first case, one endpoint of $\alpha$ lies on the $X$-axis and the second endpoint lies on the $Y$-axis. As in [{\bf S1\/}] we call such arcs {\it hyperbolic arcs\/}. In the other cases, both ends lie on the same axis. We call such components {\it null arcs\/}. In the arc cases, we augment $Z(\alpha)$ by adjoining the relevant parts of the coordinate axes so as to create a closed loop. We have shaded in the regions bounded by these closed loops. We call this augmented loop the {\it shape loop\/} associated to $\alpha$ and give it the same name.
In [{\bf S2\/}] we found a kind of integral formula associated to the shape loop, though we stated it in a different context. This invariant is quite similar to the integral invariant in [{\bf Ta\/}], though we use it in a different context. (In \S \ref{squeeze} we give a sample result from [{\bf S2\/}].) Here we adapt the invariant to the present situation and prove the following theorem.
\begin{theorem} \label{sweep} Let $P$ be any piecewise smooth Jordan loop. Let $\alpha$ be a piecewise smooth path in $\Gamma(P)$. If $\alpha$ is a hyperbolic arc then the signed area of the region bounded by $Z(\alpha)$ equals (up to sign) the area of the region bounded by $P$. If $\alpha$ is either a null arc or a closed loop, then the signed area of the region bounded by $Z(\alpha)$ is $0$. \end{theorem}
Theorem \ref{sweep} says something about the number of coincidences that appear amongst the inscribed rectangles. We will give an example which explains the connection. Since the shape loop associated to a null component bounds a region of area $0$, the shape curve must have a self-intersection. This self-intersection corresponds to a pair of isometric rectangles inscribed in the polygon. Now we formulate a general result. We call two labeled rectangles {\it really distinct\/} if their unlabeled versions are also distinct. Thus, two relabelings of the same rectangle are not really distinct.
We define the multiplicity of the pair $(X,Y)$ as follows. \begin{itemize} \item $\mu(X,Y)=n-1$ if there are $n>1$ really distinct labeled rectangles $R_1,...,R_n$ inscribed in $P$ such $X(R_j)=X$ and $Y(R_j)=Y$ for all $j=1,...,n$. We also allow $n=\infty$, \item $\mu(X,Y)=0$ if there are $0$ or $1$ such rectangles. \end{itemize} We define \begin{equation} \label{coincidence} M(P)=\sum \mu(X,Y), \end{equation} where the sum is taken over all pairs $(X,Y)$. Typically this is a sum with finitely many finite nonzero terms. There is a more natural (but somewhat informal) way to think about $M(P)$. Suppose that we color all the points in $\Gamma(P)$ according to the isometry class of rectangles they represent. Then $M(P)$ is the number of points minus the number of colors.
\begin{theorem} \label{main} For each $P \in \Pi_N^*$ we have $M(P) \geq 2(\Delta_+(P)-2)$. \end{theorem} When $P$ is an obtuse triangle we have $M(P)=0$ and $\Delta_+(P)=2$, so the result is sharp in a trivial way.
Some version of Theorem \ref{main} is true for an arbitrary polygon, but here we place a mild constraint so as to make the proof easier. Let $P$ be a polygon. We call a diameter $S$ of $P$ {\it tricky\/} if the endpoints of $S$ are vertices of $P$ and if at least one of the edges of $P$ incident to $S$ is perpendicular to $S$.
\begin{theorem} \label{main2} If $P$ has no tricky diameters, $M(P) \geq \frac{1}{16}(\Delta_+(P)-2)$. \end{theorem}
The rest of the paper is devoted to proving the results above.
\section{The Integral Formula}
\subsection{The Differential Version}
Let $J$ be a piecewise smooth Jordan loop and let $R$ be a labeled rectangle that graces $J$. For each $j=1,2,3,4$ we let $A_j$ denote the signed area of the region $R_j^*$ bounded by the segment $\overline{R_{j}R_{j+1}}$ and the arc of $J$ that connects $R_j$ to $R_{j+1}$ and is between these two points in the counterclockwise order. Figure 3 shows a simple example. The signs are taken so that the signed areas are positive in the convex case, and then in general we define the signs so that the signed areas vary continuously.
\begin{center} \resizebox{!}{1.9in}{\includegraphics{fig3.eps}} \newline {\bf Figure 3:\/} The curve $J$, the rectangle $R$ and the regions $R_j^*$ for $j=1,2,3,4$. \end{center}
Assuming that $J$ is fixed, we introduce the quantity \begin{equation} A(R)=(A_1+A_3)-(A_2+A_4). \end{equation} We also have the point $(X,Y) \in \mbox{\boldmath{$R$}}^2$, where \begin{equation} X={\rm length\/}(\overline{R_1R_2}), \hskip 15 pt Y={\rm length\/}(\overline{R_2R_3}), \hskip 15 pt \end{equation}
Assuming that we have a piecewise smooth path $t \to R_t$ of rectangles gracing $J$, we have the two quantities \begin{equation} A_t=A(R_t), \hskip 30 pt (X_t,Y_t)=(X(R_t),Y(R_t)). \end{equation}
If $t$ is a point of differentiability, we may take derivatives of all these quantities. Here is the main formula. \begin{equation} \frac{dA}{dt}=Y \frac{dX}{dt} - X \frac{dY}{dt}. \end{equation}
It suffices to prove this result for $t=0$. This formula is rotation invariant, so for the purposes of derivation, we rotate the picture so that the first side of $R_0$ is contained in a horizontal line, as shown in Figures 3 and 4. When we differentiate, we evaluate all derivatives at $t=0$.
We write \begin{equation} \frac{dR_j}{dt}=(V_j,W_j). \end{equation}
Up to second order, the region $R_1^*(t)$ is obtained by adding a small quadrilateral with base $X_0$ and adjacent sides parallel to $t(V_1,W_1)$ and $t(V_2,W_2)$. Up to second order, the area of this quadrilateral is $$\frac{X(W_1+W_2)}{2}.$$
\begin{center} \resizebox{!}{2in}{\includegraphics{fig4.eps}} \newline {\bf Figure 4:\/} The change in area. \end{center}
From this equation, we conclude that \begin{equation} \frac{dA_1}{dt}=-\frac{X(W_1+W_2)}{2}. \end{equation} We get the negative sign because the area of the region increases when $W_1$ and $W_2$ are negative. A similar derivation gives \begin{equation} \frac{dA_3}{dt}=+\frac{X(W_3+W_4)}{2}. \end{equation} Adding these together gives $$ \frac{dA_1}{dt}+\frac{dA_3}{dt}= X \times \bigg[\frac{W_3-W_1}{2}\bigg] + X \times \bigg[\frac{W_4-W_2}{2}\bigg]=$$ \begin{equation} \label{term1} -X \times \bigg[\frac{1}{2}\frac{dY}{dt}\bigg]+ -X \times \bigg[\frac{1}{2}\frac{dY}{dt}\bigg]= -X \frac{dY}{dt}. \end{equation}
A similar derivation gives \begin{equation} \frac{dA_2}{dt}=-\frac{X(V_2+V_3)}{2}, \hskip 30 pt \frac{dA_4}{dt}=+\frac{X(V_4+V_1)}{2}. \end{equation} Adding these together gives \begin{equation} \label{term2} \frac{dA_2}{dt}+\frac{dA_4}{dt}= -Y \frac{dX}{dt}. \end{equation} Subtracting Equation \ref{term2} from Equation \ref{term1} gives \begin{equation} \label{diff} \frac{dA}{dt}=-X \frac{dY}{dt}+Y \frac{dX}{dt}, \end{equation} as claimed.
\subsection{The Integral Version}
Let $\omega=-XdY+YdX$. Here we think of $\omega$ as a $1$-form. Suppose that we have parameterized our curve of rectangles so that the parameter $t$ runs from $0$ to $1$. Integrating Equation \ref{diff} over the piecewise smooth path, we see that \begin{equation} \label{int} A_1-A_0=\int_Z \omega. \end{equation} Here $Z$ is the shape curve associated to the path of rectangles. We can interpret this integral geometrically. Letting $O=(0,0)$, consider the closed loop \begin{equation} Z'=\overline{O, Z_0} \cup Z \cup \overline {Z_1,O}. \end{equation} Since $\omega$ vanishes on vectors of the form $(h,h)$, we see that \begin{equation} A_1-A_0=\int_Z\omega=\int_{Z'} \omega= -\int \int_{\Omega} 2dxdy = -2\ {\rm area\/}(\Omega). \end{equation} Here $\Omega$ is the region bounded by $Z'$. The last line of the equation refers to the signed area of $\Omega$. \newline
\noindent {\bf Proof of Theorem \ref{sweep}:\/} Suppose first that $\alpha$ is a piecewise smooth loop rectangles which grace the Jordan curve $J$. Then the curve $Z$ is already a closed loop, and the signed area of the region bounded by $Z$ is the same as the signed area bounded by $Z'$. Since $A_1=A_0$ in this case, we see that $Z$ bounds a region of signed area $0$.
If $\alpha$ is a null arc, then $R_0$ and $R_1$ both have the same aspect ratio, either $0$ or $\infty$. In either case, we have $A_0=A_1$. The common value is, up to sign, the area of the region bounded by $J$. In this case, $Z$ starts and stops on one of the coordinate axes, and the region bounded by $Z$ has the same area as the shape loop, $Z \cup \overline{Z_0Z_1}$. So, in this case we also see that the shape loop bounds a region of area $0$.
If $\alpha$ is a hyperbolic arc, then $A_0=-A_1$ and both quantities up to sign equal the area of the region bounded by $J$. At the same time $Z'$ is precisely the shape loop in this case. So, we see that twice the area of the region bounded by $J$ equals twice the area of the region bounded by $Z$, up to sign. Cancelling the factor of $2$ gives the desired result. $\spadesuit$ \newline
\subsection{Generic Coincidences} \label{generic}
In this section we prove Theorem \ref{main}. Suppose that $P$ is an $N$-gon that satisfies the conclusions of Theorem \ref{polygon}. This happens if $P \in \Pi_N^*$, but it might happen more generally. In any case, the space $\Gamma(P)$ of gracing rectangles has $2\Delta(P)$ arc components. There is a $\mbox{\boldmath{$Z$}}/4$ action on $\Gamma(P)$ and this action freely permutes the arc components of $\Gamma(P)$.
We let $\delta=\Delta/2$ and we let $\alpha_1,...,\alpha_{\delta}$ denote a complete set of representatives of these arc components modulo the $\mbox{\boldmath{$Z$}}/4$ action. It suffices to show that the sum in Equation \ref{coincidence} is at least $\delta-1$ when we restrict our attention to the components just listed.
Consider those arcs on our list which are null arcs. The shape loops associated to each of these arcs bound regions of area $0$ and hence the corresponding loop has a double point. Each double point corresponds to a distinct pair that adds $1$ to the total count for $M(J)$. The remaining rectangle coincidences involve rectangles not associated to these arcs or to their images under the $\mbox{\boldmath{$Z$}}/4$ action.
Now consider those arcs on our list which are hyperbolic arcs whose shape loops are not embedded. In exactly the same way as above, each of these arcs contributes $1$ to the count for $M(J)$ and the rectangle pairs involved are distinct from the ones we have already considered. Again, the remaining rectangle coincidences involve rectangles not associated to these arcs or to their images under the $\mbox{\boldmath{$Z$}}/4$ action. \newline \newline {\bf Remark:\/} Before we move on to the last case, we mention that the count above might be an under-approximation, even in case there is just one double point per shape loop considered. Consider the simple situation where there are just $2$ null arcs. It might happen that the rectangle pairs corresponding to these $2$ arcs are congruent to each other. This would give us a $4$ congruent gracing rectangles and would contribute $3$ rather than $2$ to the total count. \newline
Finally, consider the $d$ hyperbolic arcs on our list which have embedded shape loops. If $\alpha_1$ and $\alpha_2$ are two such arcs, then $Z(\alpha_1)$ and $Z(\alpha_2)$ are two closed loops which bound the same area. If these loops did not intersect in the positive quadrant, then either the region bounded by $Z(\alpha_1)$ would strictly contain the region bounded by $Z(\alpha_2)$ or the reverse. This contradicts the fact that these two regions have the same area. Hence $Z(\alpha_1)$ and $Z(\alpha_2)$ intersect in the positive quadrant, and the intersection point corresponds to a coincidence involving a rectangle associated to $\alpha_1$ and a rectangle associated to $\alpha_2$. Call this the {\it intersection property\/}.
We label so that $\alpha_1,...,\alpha_d$ are the hyperbolic arcs having embedded shape loops. We argue by induction that these $d$ arcs contribute at least $d-1$ to the count for $M(J)$. If $d=1$ then there is nothing to prove. By induction, rectangle coincidences associated to the arcs $\alpha_1,...,\alpha_{d-1}$ contribute $d-2$ to the count for $M(J)$.
By the intersection property, $\alpha_d$ intersects each of the other arcs, and $\Gamma(J)$ is a manifold, there is at least one new rectangle involved in our count, namely one that corresponds to a point on $Z(\alpha_d)$ that is also on some of the shape loop. The corresponding rectangle adds $1$ to the count in Equation \ref{coincidence}, one way or another. So, all in all, we add $d-1$ to the count for $M(J)$ by considering the rectangle coincidences associated to $\alpha_1,...,\alpha_d$. This proves what we want.
\subsection{A Non-Squeezing Result} \label{squeeze}
Here we explain how the invariant above implies one of our main results in [{\bf S2\/}]. Really, it is the same proof. The material in this section plays no role in the rest of the paper.
Suppose that $\gamma_1$ and $\gamma_2$ are $2$ piecewise smooth curves which are disjoint. Suppose also that at each end, $\gamma_j$ coincides with a line segment. Finally suppose that these line segments are parallel at each end, so to speak. Figure 5 shows what we mean.
\begin{center} \resizebox{!}{1.5in}{\includegraphics{fig5.eps}} \newline {\bf Figure 5:\/} Sliding a square along a track. \end{center}
Suppose that we have a piecewise smooth family of rectangles, all having the same aspect ratio, that starts at one end, finishes at the other, and remains inscribed in $\gamma_1 \cup \gamma_2$ the whole time. We imagine $\gamma_1 \cup \gamma_2$ as being a kind of track that the rectangle slides along (changing its size and orientation along the way). Figure $5$ shows an example in which case the rectangle is a square. In Figure 5 we show the starting rectangle $R_0$, the ending rectangle $R_1$, and some $R_t$ for $t \in (0,1)$. This is just a hypothetical example.
We can complete the union $\gamma_1 \cup \gamma_2$ to a piecewise smooth Jordan loop by extending the ends of one or both of these curves, if necessary, and then dropping perpendiculars. Let $\Omega$ be the region bounded by this loop. The shape curve associated to our path lies on a line through the origin, and our $1$-form $\omega$ vanishes on such lines. Referring to the invariant above, we therefore have $A(R_0)=A(R_1)$. But, after suitably labeling the rectangles in our family, we have $$A(R_j)={\rm area\/}(\Omega)-{\rm area\/}(R_j).$$ Hence $R_0$ and $R_1$ have the same area. Since they also have the same aspect ratio, they have the same side-lengths. This is to say that the perpendicular distance between the end of $\gamma_1$ and the end of $\gamma_2$ is the same at either end. This is a kind of non-squeezing result.
In particular, our result shows that Figure 5 depicts an impossible situation. There is no way to slide a square continuously through the shown ``track'' because the widths are different at the $2$ ends.
\section{The General Case}
\subsection{Rectangles Inscribed in Lines} \label{conn}
The goal of this chapter is to prove Theorem \ref{main2}. We plan to take a limit of the result in Theorem \ref{main}.
Let $E=(E_1,E_2,E_3,E_4)$ be a collection of $4$ line segments, not necessarily distinct. We say that a rectangle $R$ {\it graces\/} $E$ if the vertices $R_1,R_2,R_3,R_4$ of $R$ go in cyclic order, either clockwise or counterclockwise, and $R_i \in E_i$ for all $i=1,2,3,4$. We allow $R$ to be degenerate. Let $\Gamma(E) \subset (\mbox{\boldmath{$R$}}^2)^4$ denote the set of rectangles gracing $E$. Note
We call a point $p \in \Gamma(E)$ {\it degenerate\/} if every neighborhood of $p$ in $\Gamma_E$ contains points corresponding to infinitely many distinct but isometric rectangles. We call $E$ {\it degenerate\/} if there is some $p \in \Gamma(E)$ which is degenerate.
\begin{lemma} Suppose that $E$ is nondegenerate. $\Gamma(E)$ is the intersection of a conic section with a rectangular solid. \end{lemma}
{\bf {
}{\noindent}Proof: } Let $E=(E_1,E_2,E_3,E_4)$ be a $4$-tuple of lines. We rotate so that none of the segments is vertical, so that we may parameterize the lines containing our segments by their first coordinates. Let $L_j$ be the line extending $E_j$. We identify $\mbox{\boldmath{$R$}}^3$ with triples $(x_1,x_2,x_3)$ where $p_j=(x_j,y_j) \in L_j$. We let $p_4$ be such that $p_1+p_3=p_2+p_4$. In other words, we choose $p_4$ to that $(p_1,p_2,p_3,p_4)$ is a parallelogram.
Let $\Gamma(L)$ denote the set of rectangles gracing $L$. We describe the subset $\Gamma'(L) \subset \mbox{\boldmath{$R$}}^3$ corresponding to $\Gamma(L)$. The actual set $\Gamma(L)$ is the image of $\Gamma'(L)$ under a linear map from $\mbox{\boldmath{$R$}}^3$ into $(\mbox{\boldmath{$R$}}^2)^4$.
The condition that $p_4 \in L_4$ is a linear condition. Therefore, the set $(x_1,x_2,x_3) \in \mbox{\boldmath{$R$}}^3$ corresponding to parallelograms inscribed in $L$ is a hyperplane $\Pi$. The condition that our parallelogram is a rectangle is $(p_3-p_2) \cdot (p_1-p_2)=0.$ This condition defines a quadric hypersurface $H$ in $\mbox{\boldmath{$R$}}^3$. The intersection $\Gamma'(L)=\Pi \cap H$ corresponds to the inscribed rectangles.
$\Pi \cap H$ is either a plane or a conic section. In the former case, $E$ is degenerate. In the latter case, every point $\Pi \cap H$ is either an analytic curve or two crossing lines. Since $\Gamma(L)$ is the image of $\Gamma'(L)$ under a linear map, the set $\Gamma(L)$ is also a conic section.
Let $[E]=E_1 \times E_2 \times E_3 \times E_4.$ The $[E]$ is a rectangular solid. We have $\Gamma(E)=\Gamma(L) \cap [E]$. $\spadesuit$ \newline
\begin{lemma} When $E$ is non-degenerate, $\Gamma(E)$ has at most $64=2^8$ connected components. \end{lemma}
{\bf {
}{\noindent}Proof: } We use the notation from the previous lemma. Note $[E]$ is bounded by $8$ hyperplanes and a conic section either lies in a hyperplane or intersects it at most twice. So, each boundary component of $[E]$ cuts $\Gamma(L)$ into at most $2$ components. $\spadesuit$ \newline
We call a polygon $P$ {\it degenerate\/} if some $4$-tuple of edges associated to $P$ is degenerate. Otherwise we call $P$ {\it non-degenerate\/}.
\begin{lemma} Let $P$ be a non-degenerate polygon. The space $\Gamma(E)$ is a graph having analytic edges and degree at most $32$. \end{lemma}
{\bf {
}{\noindent}Proof: } Each rectangle $R$ can grace at most $16$ different $4$-tuples of edges of $P$, because each vertex can lie in at most $2$ segments. Hence, each $p \in \Gamma(P)$ lies in the intersection of at most $16$ distinct $\Gamma(E)$. Since $\Gamma(E)$ is the intersection of a conic section with a rectangular solid, $\Gamma(E)$ is a graph with analytic edges and maximum degree $4$. From what we have said above, $\Gamma(P)$ is a graph with analytic edges and maximum degree $64=16 \times 4$.
We can cut down by a factor of $2$ as follows. The only time a point of $\Gamma(P)$ lies in more than $8$ spaces $\Gamma(E)$ is when $p$ corresponds to a gracing rectangle whose every vertex is a vertex of $P$. In this case, $p$ is a vertex of $[E]$ for each $4$-tuple $E$ that the rectangle graces. But then $p$ has degree at most $2$ in each $\Gamma(E)$. So, this exceptional case produces vertices of degree at most $32$. $\spadesuit$ \newline
\subsection{The Inscribing Sequence}
A generic polygon $P$ satisfies the conclusions of Theorem \ref{main}. For such polygons, any $4$-tuple which supports a gracing rectangle is nice.
We label the sides of $P$ by $\{1,...,N\}$. Let $\Omega$ denote the set of ordered $4$-element subsets of $\{1,...,N\}$, not necessarily distinct. Consider some embedded arc $\alpha \subset \Gamma(P)$ of inscribed rectangles. $\alpha$ defines a finite sequence $\Sigma$ of elements of $\Omega$. We simply note which edges of $P_n$ contain any given rectangle and then we order the elements of $\Omega$ we get. We call $\Sigma$ the {\it inscribing sequence\/} for $\alpha$.
\begin{lemma} \label{inscribing} $\Sigma$ has length at most $\kappa N^4$. \end{lemma}
{\bf {
}{\noindent}Proof: } If $\Sigma$ had length longer than this, then we could find a single $4$-tuple $E$ of edges such that the subset of $\alpha$ supported by $E$ has at least $82$ components. In other words the sequence would have to return to the $4$-tuple describing $E$ at least $82$ times. The arcs of $\Gamma(E)$ corresponding to these returns are disconnected from each other, because otherwise $\alpha$ would be a loop rather than an arc. This contradiction proves our claim. $\spadesuit$ \newline
\subsection{Stable Diameters}
For the rest of the chapter, we use the word {\it diameter\/} to mean a positively oriented diameter, in the sense discussed in the introduction.
Let $P$ be a polygon and let $S$ be a diameter of $P$. We call $S$ {\it stable\/} if \begin{itemize} \item At least one endpoint of $S$ is a vertex of $P$. \item If $v$ is an endpoint of $S$ and $e$ is an edge of $P$ incident to $P$ at $v$, then $S$ and $e$ are not perpendicular. \end{itemize}
\begin{lemma} Suppose that $P$ has no tricky diameters. If $P$ has an unstable diameter, then $P$ is non-degenerate. \end{lemma}
{\bf {
}{\noindent}Proof: } This is a case-by-case analysis. Suppose first that $P$ has a diameter $S$ whose endpoints are not vertices of $P$. Then the endpoints of $S$ lie in the interior of a pair of parallel edges of $P$. But then $P$ is degenerate. Suppose that $P$ has a diameter $S$ having one endpoint which is a vertex $v$ of $P$. The other endpoint of $S$ lies in the interior of an edge $e'$ of $P$. By definition $e'$ and $S$ are perpendicular. If $S$ is not stable, then one of the edges $e$ of $P$ is perpendicular to $S$ and hence parallel to $e'$. But then we can shift $S$ over a bit and produce a diameter of $P$ whose endpoints lie in the interior of $e$ and $e'$. Again, $P$ is degenerate. The remaining unstable diameters are (in the technical sense) tricky. $\spadesuit$ \newline
In view of the preceding result, it suffices to prove Theorem \ref{main2} under the assumption that $P$ is non-degenerate and has all stable diameters.
\subsection{Limits of Diameters}
Let $P$ be an $N$-gon with stable diameters. We can find a sequence $\{P_n\}$ of generic $N$-gons converging to $P$. Each $P_n$ satisfies the conclusions of Theorem \ref{main}.
\begin{lemma} Let $D$ be a diameter of $P$. The polygon $P_n$ has a diameter $D_n$ such that $\{D_n\}$ converges to $D$. \end{lemma}
{\bf {
}{\noindent}Proof: } Since $P$ only has stable diameters, there are just $2$ cases to consider. Suppose first that $D$ connects two vertices $v$ and $w$ of $P$. The polygon $P_n$ has vertices $v_n$ and $w_n$ which converge respectively to $v$ and $w$ as $n \to \infty$. Let $D_n$ be the chord whose endpoints are $v_n$ and $w_n$. By construction, $D_n$ converges to $D$ and for large $n$ this chord is a diameter.
Suppose now that $D$ connects a vertex $v$ to a point in the interior of an edge $e$. Let $v_n$ and $e_n$ be the corresponding vertex and edge of $P_n$. Since $v_n \to v$ and since $e_n \to e$ we see that eventually there is a chord $D_n$ that has $v_n$ as one endpoint and has the other endpoint perpendicular to $e_n$. By construction $D_n \to D$ and eventually $D_n$ is a diameter of $P_n$. $\spadesuit$ \newline
\begin{lemma} If $\{D_n\}$ is a sequence of diameters of $P_n$, then $\{D_n\}$ converges on a subsequence to a diameter of $P$. \end{lemma}
{\bf {
}{\noindent}Proof: } Given the sequence $\{D_n\}$ we can pass to a subsequence so that the endpoints of these diameters converge. The limiting segment $D$, provided that it has nonzero length, must be a diameter of $P$ because the required condition is a closed condition. We just have to see that the length of $\{D_n\}$ does not shrink to $0$. Note that $D_n$ is at least as long as the shortest diameter of $P_n$. Furthermore, there is a positive lower bound to the length of any edge of $P_n$, independent of $n$. So, if the length of $D_n$ converges to $0$, there are two non-adjacent vertices of $D_n$ whose distance converges to $0$. This contradicts the fact that $\{P_n\}$ converges to the embedded polygon $P$. $\spadesuit$ \newline
We think of a diameter as a subset of $(\mbox{\boldmath{$R$}}^2)^2$, and in this way we can talk about the distance between two diameters of $P_n$.
\begin{lemma} Suppose that $\{D_n\}$ and $\{D_n'\}$ are two sequences of diameters such that the distance from $D_n$ to $D_n'$ converges to $0$ as $n \to \infty$. Then $D_n=D_n'$ for $n$ sufficiently large. \end{lemma}
{\bf {
}{\noindent}Proof: } Let $v_n$ and $w_n$ be the endpoints of $D_n$ and let $v_n'$ and $w_n'$ be the endpoints of $D_n'$. We label so that
$\|v_n-v_n'\|$ and
$\|w_n-w_n'\|$ both tend to $0$. In all cases, we can re-order so that $v_n$ is a vertex of $P_n$ and $v_n'$ is not. In other words, $v_n'$ lies in the interior of an edge $e_n'$ of $P_n$. Since $v_n'$ converges to $v_n$, a vertex of $P_n$, the segment $e_n'$ becomes perpendicular to $D_n'$ in the limit. This contradicts the fact that $P$ has only stable diameters. $\spadesuit$ \newline
\begin{corollary} \label{stable} For $n$ sufficiently large, there is a bijection between the diameters of $P_n$ and the diameters of $P$ such that each diameter of $P$ is match with a sequence of diameters of $P_n$ which converges to $P$. \end{corollary}
{\bf {
}{\noindent}Proof: } This is an immediate consequence of the preceding $3$ lemmas. $\spadesuit$ \newline
We truncate our sequence of polygons so that the last corollary holds for all $n$. For each $n$, these diameters are paired together by the arc components of the manifold $\Gamma(P_n)$. We pass to a further subsequence so that the same pairs arise for each $n$. This gives us a well defined way to pair the diameters of $D$. We say that two diameters of $D$ are {\it partners\/} if and only if the corresponding diameters of $D_n$ are paired together.
\begin{lemma} \label{connect} Each pair of partner diameters in $P$ is connected by a piecewise smooth path in $J(P)$. \end{lemma}
{\bf {
}{\noindent}Proof: } Let $A$ and $B$ be two partner diameters of $P$. Let $A_n$ and $B_n$ be the corresponding diameters of $P_n$. Let $\alpha_n$ be the arc in $\Gamma(P_n)$ which connects $A_n$ and $B_n$. To understand the convergence of $\{\alpha_n\}$ we work in the Hausdorff topology on the set of compact subsets of $(\mbox{\boldmath{$R$}}^2)^4$. This ambient space contains $\Gamma(J)$ for any Jordan loop.
We consistently label the sides of $P_n$ and $P$. Let $\Sigma_n$ be the inscribing sequence of $\alpha_n$. By Lemma \ref{inscribing} there is a uniform upper bound of $\kappa N^4$ on the length of $\Sigma_n$. Therefore, we may pass to a subsequence so that the inscribing sequence associated to $\alpha_n$ is independent of $n$. We write $$\alpha_n=\alpha_{n1},...,\alpha_{nk},$$ where $\alpha_{nj}$ is the arc of rectangles corresponding to the $j$th element of the sequence in $\Omega$. Here $k$ is the length of the inscribing sequence.
We pass to a subsequence so that $\{\alpha_{nj}\}$ converges in the Hausdorff topology to a subset $\alpha_j \subset \alpha$. The set $\alpha_j$ is connected and contained in a subset of $\Gamma(E)$, where $E$ is the $4$-tuple of edges corresponding to the $j$th element of $\Omega$. From the discussion in \S \ref{conn}, we see that $\alpha_j$ is a compact, connected algebraic arc. By construction $\alpha_j$ and $\alpha_{j+1}$ share one point common for all $j$. This vertex is the limit of the sequence $\{\alpha_{nj} \cap \alpha_{n,j+1}\}$.
The description above reveals $\alpha$ to be a piecewise smooth arc connecting the two diameters $A$ and $B$. $\spadesuit$ \newline
\subsection{The End of the Proof}
Let $P$ be a polygon. We still assume that $P$ has stable diameters, so that the results from the previous section apply. We know from Lemma \ref{connect} that the diameters of $P$ are paired in some way, and each pair is connected by some piecewise smooth path of gracing rectangles. We can erase any loops that these paths have and thereby assume that all these paths are embedded. Next, we can assume that every $2$ arcs in the collection intersect each other in at most one point. Otherwise, we can do a splicing operation to decrease the number of intersection points. (See Figure 6 below.) The splicing operation may change the way that the diameters are paired up, but this doesn't bother us. Finally, we can make our choice of connectors invariant under the $\mbox{\boldmath{$Z$}}/4$ re-labelling action.
As in the proof of Theorem \ref{main} we let $\delta=\Delta_+(P)/2$ and we chose a collection $\alpha_1,...,\alpha_{\delta}$ of connecting arcs which has one representative in each orbit of the $\mbox{\boldmath{$Z$}}/4$ action.
Suppose that our collection of paths contains two hyperbolic arcs $\alpha_1$ and $\alpha_2$ that intersect. Each path connects a (degenerate) rectangle of aspect ratio $0$ to a (degenerate) rectangle of aspect ratio $\infty$. By splicing the paths together and then re-dividing them, we produce $2$ new paths $\beta_1$ and $\beta_2$ such that each $\beta_j$ connects two degenerate rectangles of the same aspect ratio. In other words, we can do a cut-and-paste operation at an intersection point to replace the two hyperbolic arcs by null arcs. If necessary, we can erase any loops created in this process. Figure 6 shows this operation.
\begin{center} \resizebox{!}{1.2in}{\includegraphics{fig6.eps}} \newline {\bf Figure 6:\/} The splicing operation. \end{center}
Suppose first that there are $\delta/2$ arcs in our collection that are hyperbolic arcs. Then this collection is an embedded $1$-manifold contained in $\Gamma(P)$. Just using these arcs, the same argument as in the proof of Theorem \ref{main} shows that $$M(P) \geq \Delta_+(P)-2.$$ That is, we get the same answer as in Theorem \ref{main} except for the factor of $1/2$.
Now suppose that there are at least $\delta/2$ null arcs. For the rest of the proof we just deal with these null arcs. Let $\Gamma_1(P)$ denote the union of these null arcs. We know that $\Gamma_1(P)$ is a subset of $\Gamma(P)$ and also a graph with algebraic edges and maximim valence at most $32$. Let $\widehat \Gamma_1$ denote the formal disjoint union of these embedded null arcs. The space $\widehat \Gamma_1$ is a $1$-manifold, just a union of arcs, and the ``forgetful map'' $\phi: \widehat \Gamma_1 \to \Gamma_1$ is at most $16$ to $1$.
The same argument as in the proof of Theorem \ref{generic} says that there are $\delta$ distinct points in $\widehat \Gamma_1$, two per arc, corresponding to rectangle coincidences. Let $S$ be the set of these points. The image $\phi(S)$ contains at least $\delta/16$ points. For each of these points, there is a second point corresponding to an isometric rectangle. We know this because the map $\phi$ is injective on each null arc, and each null arc contains $2$ points of $S$. So, we can match our $\delta/16$ points into $\delta/32$ distinct pairs of points, corresponding to pairs of isometric but distinct rectangles in $\Gamma(P)$. This adds a count of $\delta/32$ to $M(P)$. To make the comparison with Theorem \ref{main} cleaner, we work with $(\delta-1)/32$ instead.
In the case at hand, we get the same bound as in Theorem \ref{main} except for the factor of $1/32$. Going back to the count of labeled rectangles, we have $$M(P) \geq \frac{1}{16}(\Delta_+(P)-2).$$ This completes the proof of Theorem \ref{main2}.
[{\bf AA\/}] A. Akopyan and S Avvakumov, {\it Any cyclic quadrilateral can be inscribed in any closed convex smooth curve.\/} arXiv: 1712.10205v1 (2017) \newline \newline [{\bf ACFSST\/}] J. Aslam, S. Chen, F. Frick, S. Saloff-Coste, L. Setiabrate, H. Thomas, {\it Splitting Loops and necklaces: Variants of the Square Peg Problem\/}, arXiv 1806.02484 (2018) \newline \newline [{\bf CH\/}] D. Hilbert and S. Cohn-Vossen, {\it Geometry and The Imagination\/}, \newline Chelsea Publishing Company (American Math Society), 1990 \newline \newline [{\bf H\/}] C. Hugelmeyer, {\it Every Smooth Jordan Curve has an inscribed rectangle with aspect ratio equal to $\sqrt 3$.\/} arXiv 1803:07417 (2018) \newline \newline [{\bf M\/}] B. Matschke, {\it A survey on the Square Peg Problem\/}, Notices of the A.M.S. {\bf Vol 61.4\/}, April 2014, pp 346-351. \newline \newline [{\bf S1\/}] R. E. Schwartz, {\it A Trichotomy for Rectangles Inscribed in Jordan Loops\/}, preprint, 2018 \newline \newline [{\bf S2\/}] R. E. Schwartz, {\it Four lines and a Rectangle\/}, preprint, 2018 \newline \newline [{\bf Ta\/}], T. Tao, {\it An integration approach to the Toeplitz square peg conjecture\/} \newline Forum of Mathematics, Sigma, 5 (2017) \newline \newline [{\bf W\/}] S. Wolfram, {\it The Mathematica Book\/}, 4th ed. Wolfram Media/Cambridge University Press, Champaign/Cambridge (1999)
\end{document}
|
arXiv
|
{
"id": "1809.03070.tex",
"language_detection_score": 0.831537127494812,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{abstract}
We determine the asymptotic distribution of the $p$-rank of the sandpile groups of random bipartite graphs. We see that this depends on the ratio between the number of vertices on each side, with a threshold when the ratio between the sides is equal to $\frac{1}{p}$. We follow the approach of Wood in \cite{mw} and consider random graphs as a special case of random matrices, and rely on a variant the definition of min-entropy given by Maples in \cite{km} in order to obtain useful results about these random matrices. Our results show that unlike the sandpile groups of {Erd\H{o}s--R\'{e}nyi } random graphs, the distribution of the sandpile groups of random bipartite graphs depends on the properties of the graph, rather than coming from some more general random group model.
\end{abstract}
\title{Sandpile Groups of Random Bipartite Graphs}
\section{Introduction} \label{sec:intro}
\subsection{The Main Theorem}
In this paper, we study the sandpile group of a random bipartite graph. Recall that the sandpile group $\Gamma(G)$ of a connected graph $G$ is the cokernel of the reduced laplacian matrix $\Delta'$.
Let $0<\alpha,q<1$ be constants. We define a random bipartite graph $G=G(n,\alpha,q)$ as follows: Take two sets of vertices $L$ and $R$ with $|L|=n,|R|=\lfloor\alpha n\rfloor$, and for each pair of vertices $v\in L$ and $u\in R$, include the edge between $v$ and $u$ independently with probability $q$.
We now state our main result about the $p$-rank of $\Gamma(G)$:
\begin{thm} \label{thm:main}
Let $G=G(n,\alpha,q)$ be a random bipartite graph, and $p$ a prime. Then as $n\rightarrow \infty$, the expected value of the $p$-rank of the sandpile group $\Gamma(G)$ is:
\begin{enumerate} \item $\left(\frac{1}{p}-\alpha\right)n+O(1)$ if $\alpha<\frac{1}{p}$ \item $O(1)$ if $\alpha>\frac{1}{p}$ \item $\sqrt{\frac{\frac{1}{p}(1-\frac{1}{p})n}{2\pi}}$+$O(1)$ if $\alpha=\frac{1}{p}.$ \end{enumerate}
\end{thm}
\noindent It is worth noting that the limits in the theorem do not depend on the value of $q$.
Theorem~\ref{thm:strongMain} will also give us explicit information about the distribution of the $p$-ranks. From numerical computations, it appears that the $O(1)$ constants in the first two cases of the theorem are at most $1$, and the $O(1)$ constant in the third case is around $2$.
The proof of Theorem~\ref{thm:main} relies on the assumption that $\alpha<1$. Based on numerical computations of random graphs, we conjecture that Theorem~\ref{thm:main} also holds when $\alpha=1$. This implies that the expected $p$-rank of the sandpile group of a balanced bipartite graphs should be $O(1)$ for all primes $p$. However, the best that can be done with our methods is: \begin{cor} \label{cor:balanced} Let $G=G(n,1,q)$ be a random balanced bipartite graph, $p$ prime. Then as $n\rightarrow \infty$, the expected value of the $p$-rank of the sandpile group $\Gamma(G)$ is $o(n)$. \end{cor} \noindent Which we prove in Section~\ref{sec:details}.
\subsection{Connection to {Erd\H{o}s--R\'{e}nyi } Random Graphs}
It is interesting to ask what the distribution of the sandpile groups of random graphs looks like. The authors of \cite{pk} noted that the sandpile group of a graph comes with a canonical symmetric perfect bilinear pairing $\langle\cdot,\cdot\rangle_{G}$, and conjectured that for an {Erd\H{o}s--R\'{e}nyi } random graph $G$, the pair $(\Gamma(G),\langle\cdot,\cdot\rangle)$ of the sandpile group and its associated pairing can be predicted by certain heuristics of Cohen-Lenstra type.
The Cohen-Lenstra heuristics are an attempt to model what a generic ``random'' group should look like. In \cite{pk}, the authors show that the cokernel of a random symmetric matrix over ${\mathbb Z}_{p}$, distributed according to the Haar measure, follows heuristics of Cohen-Lenstra type, and conjectured that the sandpile groups of {Erd\H{o}s--R\'{e}nyi } random graphs should follow the same heuristics.
In \cite{mw}, Melanie Wood proves several results in this direction. In particular, she shows that for an {Erd\H{o}s--R\'{e}nyi } random graph $G$, the $p$-part of $\Gamma(G)$ follows these heuristics for any finite collection of primes $p$.
However, Theorem~\ref{thm:main} shows that sufficiently unbalanced random bipartite graphs do not follow any similar type of Cohen-Lenstra heuristics: For example, the Cohen-Lenstra heuristics predict that for any $p$, the expected $p$-rank of $\Gamma(G)$ should stay low as $n$ grows. However, Theorem~\ref{thm:main} implies that for sufficiently unbalanced bipartite graphs, the $p$-rank grows linearly with $n$. Furthermore, the Cohen-Lenstra heuristics predict that the probability that $\Gamma(G)$ is cyclic should converge to a constant between $0$ and $1$, but in Section~\ref{sec:details} we prove that this is not the case for sufficiently unbalanced bipartite graphs.
\begin{cor} \label{cor:cyclic} Let $G=G(n,\alpha,q)$ be a random bipartite graph with $\alpha<\frac{1}{2}$. Then as $n\rightarrow\infty$, the probability that $\Gamma(G)$ is cyclic goes to zero exponentially fast.
\end{cor}
\noindent Because of the $O(1)$ factor in Theorem~\ref{thm:main}, the theorem gives us no information on the probability that $\Gamma(G)$ is cyclic when $\alpha\geq\frac{1}{2}$. Numerical computations suggest that this probability converges to a constant around $0.60$ when $\alpha>\frac{1}{2}$, and to a constant around $0.29$ when $\alpha=\frac{1}{2}$.
Here is a brief outline of the paper: In Section~\ref{sec:closeness}, we define when sequences of random variables are ``usually within small distance'', which will give us a useful equivalence relation for random variables. We also give Theorem~\ref{thm:strongMain}, which describes the distribution of the $p$-rank of $S(\Gamma)$, and show that it implies Theorem~\ref{thm:main}.
In Section~\ref{sec:entropy}, we introduce our notion of min-entropy, which is a variant on the one used by Maples in \cite{km}. This notion is meant to replace independence; the matrices we will work with are not independent, but they are ``almost independent'', in the sense described by min-entropy, which will suffice for our purposes.
In Section~\ref{sec:proof} we introduce a random matrix $M$, whose corank is usually within small distance of the $p$-rank of $\Gamma(G)$. Using the min-entropy properties of $M$, we will show that its corank is also usually within small distance of the distribution given in Theorem~\ref{thm:strongMain}, which will complete our proof.
Section~\ref{sec:background} contains some background information, and Section~\ref{sec:details} contains proof of the corollaries of Theorem~\ref{thm:main}.
\textbf{Acknowledgments}. The author is grateful to Sam Payne and Nathan Kaplan for suggesting the problem, as well as their many helpful suggestions along the way. Also to Dan Carmon, for suggesting the proof of Claim~\ref{claim:estimation}.
This work was partially supported by NSF CAREER DMS-1149054.
\section{The Sandpile Group, Binomial Distributions, and Schur Complements} \label{sec:background}
\subsection{The Sandpile Group.} In this section, we define the sandpile group. For a more thorough introduction to the subject with some lovely pictures, see \cite{wias}.
Let $G$ be a connected graph on $n$ vertices, numbered $1$ through $n$. The \textbf{laplacian matrix} of $G$ is the $n\times n$ matrix $\Delta=D-A$, where $A$ is the adjacency matrix of $G$ and $D$ is the diagonal degree matrix of $G$. In other words, $\Delta_{ii}={\operatorname{deg}}(v_{i})$ and for $i\neq j$, $\Delta_{ij}=-1$ if $G$ has an edge between vertices $i$ and $j$, ans to $0$ otherwise. Note that $\Delta$ is a symmetric matrix whose rows and columns sum to zero, so it is singular. In fact, ${\operatorname{corank}}(\Delta)$ is equal to the number of connected components of $G$, where we define the corank of an $n\times m$ matrix $A$ as $\min(n,m)-{\operatorname{rank}}(A)$.
Choose a vertex $i$. The \textbf{reduced laplacian matrix} $\Delta'$ is the $(n-1)\times(n-1)$ matrix obtained by removing row $i$ and its corresponding column from $\Delta$. The \textbf{sandpile group} $\Gamma(G)$ is the cokernel of $\Delta'$, that is, $\Gamma(G)={\mathbb Z}^{n-1}/\Delta'({\mathbb Z}^{n-1})$.
It is shown in \cite{wias} that the sandpile group of a graph is independent of the choice of the vertex $i$. Moreover, the Matrix Tree Theorem shows that for a connected graph $G$, the determinant $\det(\Delta')$ is equal to the number of spanning trees of $G$. In particular, $\Delta'$ has full rank, so ${\mathbb Z}^{n-1}/\Delta'({\mathbb Z}^{n-1})$ is a finite group of order $\det(\Delta')$ and rank at most $n-1$.
If $G$ is disconnected, we define its sandpile group to be the direct sum of the sandpile groups of its connected components. It is easy to see that this is a finite group of rank at most $n-1$. Moreover, it is shown in \cite{ra} that a random bipartite graph $G(n,\alpha,q)$ is connected with probability $1-O(e^{-Kn})$ for some $K>0$ depending only on $q$ and $\alpha$. This will allow us to consider the rank of the cokernel of the reduced laplacian rather than the rank of the sandpile group directly, as they are equal with probability $1-O(e^{-Kn})$.
\subsection{Binomial and Normal Distributions} We use $B(n,q)$ for the binomial distribution, the sum of $n$ independent Bernoulli random variables equal to $1$ with probability $q$ and $0$ otherwise. Recall that ${\mathbb E}(B(n,q))=qn$, where ${\mathbb E}(X)$ is the expected value of $X$.
We will make repeated use of Hoeffding's inequality:
\begin{thm}[Hoeffding's inequality] \label{thm:hoeffding}
Let $B(n,q)$ be the binomial distribution, and let $\epsilon>0$. Then there exists a constant $K>0$, depending only on $q$ and $\epsilon$, such that ${\mathbb P}\left(\left|B(n,q)-qn\right|>\epsilon n\right)<e^{-Kn}$. \end{thm} For the proof, see for example \cite{hf}.
\subsection{Schur Complements}
Finally, we recall the basics of Schur complements, which will be a central tool in our proof. For a more thorough introduction to the subject, see \cite[Chapter 1]{zs}.
\begin{defn} Let $A$ be an $n\times n$ matrix. Let $S$ be a subset of ${1,\dots,n}$, and let $T$ be the complement of $S$. We write $A_{S,S}$ for the submatrix given by restricting $A$ to the rows and columns whose indices are in $S$, $A_{T,T}$ for the submatrix of rows and columns with indices in $T$, and $A_{S,T}$ for the submatrix of rows in $A$ and columns in $T$. \end{defn}
\noindent For example, if $S=\{1,\dots,k\}$, then $A=\begin{pmatrix} A_{S,S} & A_{S,T}\\ A_{T,S} & A_{T,T} \end{pmatrix} $
\begin{defn} Let $A,S,T$ as above. If $A_{S,S}$ is invertible, then we define the \textbf{Schur complement} $A/S$ (or $A/A_{S,S}$) by $A/S=A_{T,T}-A_{T,S}A_{T,T}^{-1}A_{S,T}$. \end{defn}
\noindent Note that $A/S$ is a $|T|\times |T|$ matrix.
Recall that the \textbf{corank} of an $n\times m$ matrix $A$ is defined as $\min(n,m)-{\operatorname{rank}}(A)$. We will use the following theorem several times:
\begin{thm} Let $A$ and $S$ as above such that $A_{S,S}$ is invertible. Then ${\operatorname{corank}}(A/S)={\operatorname{corank}}(A)$. \end{thm}
\begin{proof} Assume that $S$ is composed of the first $k$ entries for some $k\leq n$. It can be seen that \[A=\begin{pmatrix} A_{S,S} & A_{S,T}\\ A_{T,S} & A_{T,T} \end{pmatrix} =\begin{pmatrix} I_{k} & 0 \\ A_{T,S}A_{S,S}^{-1} & I_{n-k} \end{pmatrix} \begin{pmatrix} A_{S,S} & A_{S,T}\\ 0 & A/S \end{pmatrix}. \]
\noindent Where $I_k$ is the $k\times k$ identity matrix. Since the matrix on the left is invertible, we get that \[{\operatorname{corank}}(A)={\operatorname{corank}} \begin{pmatrix} A_{S,S} & A_{S,T}\\ 0 & A/S \end{pmatrix}. \]
As $A_{S,S}$ is invertible, row reduction gives us \[{\operatorname{corank}} \begin{pmatrix} A_{S,S} & A_{S,T}\\ 0 & A/S \end{pmatrix} = {\operatorname{corank}}(A/S), \]
which completes the proof. \end{proof}
\section{Closeness of Random Variables} \label{sec:closeness}
In this section, we define when random variables are usually within small distance. This describes the ``closeness'' of random variables in a useful way.
\begin{defn} \label{defn:closeness}
Let $X_{n},Y_{n}$ be two sequences of random variables. We say $X_{n}$ and $Y_{n}$ are \textbf{usually within small distance} if there exist constants $c,K>0$ such that for every $n,m>0$, ${\mathbb P}(|X_{n}-Y_{n}|\geq m)\leq Ke^{-cm}$. \end{defn}
We will use the following properties.
\begin{lemma} \label{lemma:closeness}
\begin{enumerate} \item If $X_{n},Y_{n}$ and $Y_{n},Z_{n}$ are pairs of sequences of random variables which are usually within small distance, then so are $X_{n},Z_{n}$. Hence being usually within small distance is an equivalence relation for sequences of random variables.
\item If $X_{n},Y_{n}$ are sequences of random variables which usually within small distance, then $\left|{\mathbb E}(X_{n})-{\mathbb E}(Y_{n})\right|=O(1)$. \item If ${\mathbb P}(X_{n}\neq Y_{n})=1-O(e^{-cn})$ for some constant $c>0$ and $X_{n},Y_{n}$ are bounded by $O(n)$, then $X_n,Y_n$ are usually within small distance.
\item If $X_{n},Y_{n}$ are sequences of random variables and $\max|X_{n}-Y_{n}|<K$ for some constant $K$, then $X_{n},Y_{n}$ are usually within small distance. \item If $X_{n},Y_{n},Z_{n}$ are sequences of random variables such that $X_{n}\leq Y_{n}\leq Z_{n}$ and $Z_{n}$ is usually within small distance of $X_{n}$, then so is $Y_{n}$. \item If $X_{n},Y_{n}$ are usually within small distance of $X'_{n},Y'_{n}$ respectively, and $C$ is constant, then $X_{n}+Y_{n},CX_{n},\max(X_{n},Y_{n})$, and $\min(X_{n},Y_{n})$ are usually within small distance of $X'_{n}+Y'_{n},CX'_{n},\max(X'_{n},Y'_{n})$, and $\min(X'_{n},Y'_{n})$ respectively.
\end{enumerate} \end{lemma} \noindent The proofs are straightforward.
Using the above definition, we can now state the main theorem about the distribution of the $p$-rank of the sandpile group, from which we will deduce Theorem~\ref{thm:main}:
\begin{thm} \label{thm:strongMain} Let $G=G(n,\alpha,q)$ be a random bipartite graph, and $p$ a prime. Let $X_{n}=X(n,\alpha,q,p)$ be the $p$-rank of $\Gamma(G)$, and recall that $B(n,q)$ denotes a binomial random variable. Then $X_{n}$ is usually within small distance of $\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)$, where $B\left(n,\frac{1}{p}\right)$ is the binomial distribution. \end{thm}
We will prove Theorem~\ref{thm:strongMain} in Section~\ref{sec:proof}. First, we show:
\begin{prop} \label{prop:reduction} Theorem~\ref{thm:strongMain} implies Theorem~\ref{thm:main}. \end{prop}
\begin{proof}
By Lemma~\ref{lemma:closeness}, Theorem~\ref{thm:strongMain} implies that \[{\mathbb E}(X_{n})={\mathbb E}\left(\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)\right)+O(1).\] Hence it suffices to calculate ${\mathbb E}\left(\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)\right)$. We will split into three cases, depending on whether $\alpha<\frac{1}{p},\alpha>\frac{1}{p}$, or $\alpha=\frac{1}{p}$.
\noindent{\bf The case $\alpha < \frac{1}{p}.$}
Note that by Hoeffding's inequality ${\mathbb P}\left(B\left(n,\frac{1}{p}\right)-\alpha n>0\right)=1-O(e^{-cn}) $ for some constant $c>0$. Hence by lemma~\ref{lemma:closeness}, $B\left(n,\frac{1}{p}\right)-\alpha n$ is usually within small distance of $\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)$.
Because of this, it suffices to calculate ${\mathbb E}\left(B\left(n,\frac{1}{p}\right)-\alpha n\right)$. Using the additivity of the expected value, we see that \[{\mathbb E}\left(B\left(n,\frac{1}{p}\right)-\alpha n\right)={\mathbb E}\left(B\left(n,\frac{1}{p}\right)\right)-\alpha n = \frac{1}{p}n-\alpha n=\left(\frac{1}{p}-\alpha\right)n.\]
\noindent{\bf The case $\alpha > \frac{1}{p}.$}
This case is similar. Again by Hoeffding's inequality, we get that ${\mathbb P}\left(B\left(n,\frac{1}{p}\right)-\alpha n>0\right)=O(e^{-cn})$ and hence $\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)$ is equal to $0$ with probability $1-O(e^{-cn})$. Hence $\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)$ is usually within small distance of $0$, which has expected value $O(1)$.
\noindent{\bf The case $\alpha = \frac{1}{p}.$}
Finally, the case where $\alpha =\frac{1}{p}$. In this case, we wish to calculate ${\mathbb E}\left(\max\left(B\left(n,\alpha\right)-\alpha n,0\right)\right)$. We will rely on the following claim: \begin{claim} \label{claim:estimation} Let $B(n,\alpha)$ be the binomial distribution, $s$ a positive integer. Then
\[{\mathbb E}(B(n,\alpha)|B(n,\alpha)>s)=\alpha n+\alpha(1-\alpha)n\frac{{\mathbb P}(B(n-1,\alpha)=s)}{{\mathbb P}(B(n,\alpha)>s)}.\] \end{claim}
\begin{proof}
Let $Y=B(n,\alpha)$. We wish to calculate ${\mathbb E}(Y|Y>s)=\frac{\sum_{k>s}k{\mathbb P}(Y=k)}{\sum_{k>s}{\mathbb P}(Y=k)}$. Since we expect the main term in the expectation to be ${\mathbb E}\left(B(n,\alpha)\right)= \alpha n$, we wish to estimate
\[{\mathbb E}(Y|Y>s)-\alpha n=\frac{\sum_{k>s}k{\mathbb P}(Y=k)}{\sum_{k>s}{\mathbb P}(Y=k)}-\alpha n = \frac{\sum_{k>s}k{\mathbb P}(Y=k)-\sum_{k>s} \alpha n{\mathbb P}(Y=k)}{\sum_{k>s}{\mathbb P}(Y=k)}.\]
Now consider the two sums \begin{align} & \sum_{k>s} k{\mathbb P}(Y=k) =\sum_{k>s} k\alpha^k(1-\alpha)^{n-k} \binom{n}{k} \\ & \sum_{k>s} \alpha n{\mathbb P}(Y=k) = \alpha n \sum_{k>s} \alpha^k(1-\alpha)^{n-k} \binom{n}{k}. \end{align}
We manipulate the sums as follows: In sum (1), replace $k\binom{n}{k}$ with the equal $n\binom{n-1}{k-1}$ and take $\alpha n$ out, so that it becomes $\alpha n\sum_{k>s}\alpha^{k-1}(1-\alpha)^{n-k}\binom{n-1}{k-1}$.
Now, multiply by $\alpha+(1-\alpha)=1$, and expand, to obtain the two sums $(1)=(1a)+(1b)$, where \begin{align*} &(1a)\ &&\alpha n\sum_{k>s} \alpha^{k}(1-\alpha)^{n-k}\binom{n-1}{k-1} \\ &(1b)\ &&\alpha n\sum_{k>s} \alpha^{k-1}(1-\alpha)^{n-k+1}\binom{n-1}{k-1}. \end{align*}
For sum (2), use $\binom{n}{k}=\binom{n-1}{k}+\binom{n-1}{k-1}$ to obtain $(2)=(2a)+(2b)$, where \begin{align*} &(2a)\ &&\alpha n\sum_{k>s} \alpha^{k}(1-\alpha)^{n-k}\binom{n-1}{k} \\ &(2b)\ &&\alpha n\sum_{k>s} \alpha^{k}(1-\alpha)^{n-k}\binom{n-1}{k-1}. \end{align*}
Now the difference $(1)-(2)$ cancels out! Observe that $(1a)=(2b)$, whereas $(1b)$ and $(2a)$ are just shifts of each other, so the difference cancels out in a telescopic sum, and we obtain \[(1)-(2)=(1b)-(2a)=\alpha n \left(\alpha^{s}(1-\alpha)^{n-s}\binom{n-1}{s}\right)=n\alpha (1-\alpha){\mathbb P}(B(n-1,\alpha)=s). \]
Finally, putting our expression for $(1)-(2)$ back in our equation for the expectation, we get
\begin{align*}
{\mathbb E}(Y|Y>s)-\alpha n &= \frac{\sum_{k>s}k{\mathbb P}(Y=k)-\sum_{k>s} \alpha n{\mathbb P}(Y=k)}{\sum_{k>s}{\mathbb P}(Y=k)} \\ &= \frac{n\alpha (1-\alpha){\mathbb P}(B(n-1,\alpha)=s)}{{\mathbb P}(Y>s)}, \end{align*} \noindent which completes the proof of the claim. \end{proof}
For estimating ${\mathbb E}\left(\max\left(B\left(n,\alpha\right)-\alpha n,0\right)\right)$, the following version of the De Moivre-Laplace theorem will be useful. \begin{thm}[{\cite[Theorem 2]{mt}}] \label{thm:dml}
Let $s$ be an integer such that $|\alpha n-s|<\sqrt{n}$. Then \[{\mathbb P}(B(n,\alpha)=s)=\frac{1}{\sqrt{2\pi \alpha (1-\alpha)n}} e^{-\frac{(s-\alpha n)^{2}}{2\alpha(1-\alpha)n}}\left(1+O\left(\frac{1}{\sqrt{n}}\right)\right).\] \end{thm}
In our calculation, we will need to estimate ${\mathbb P}(B(n-1,\alpha)=s)$ for $s=\lfloor\alpha n\rfloor$. As $(s-\alpha n)^{2}\leq 1$, we get that
\[\left|e^{-\frac{(s-\alpha (n-1))^{2}}{2\alpha(1-\alpha)(n-1)}}-1\right|\leq\left|e^{-\frac{1}{2\alpha(1-\alpha)(n-1)}}-1\right|=O\left(\frac{1}{n}\right),\] and hence $ e^{-\frac{(s-\alpha (n-1))^{2}}{2\alpha(1-\alpha)(n-1)}}=1+O\left(\frac{1}{n}\right)$. As $\frac{1}{\sqrt{n-1}}=\frac{1}{\sqrt{n}}\left(1+O\left(\frac{1}{\sqrt{n}}\right)\right)$ we have by Theorem~\ref{thm:dml}:
\begin{align} \nonumber {\mathbb P}(B(n-1,\alpha)=s)&=\frac{1}{\sqrt{2\pi \alpha (1-\alpha)(n-1)}} e^{-\frac{(s-\alpha (n-1))^{2}}{2\alpha(1-\alpha)(n-1)}}\left(1+O\left(\frac{1}{\sqrt{n}}\right)\right) \\ \nonumber &=\frac{1}{\sqrt{2\pi \alpha (1-\alpha)n}} \left(1+O\left(\frac{1}{\sqrt{n}}\right)\right) \left( 1+O\left(\frac{1}{n}\right) \right) \left(1+O\left(\frac{1}{\sqrt{n}}\right)\right)\\ &=\frac{1}{\sqrt{2\pi \alpha (1-\alpha)n}} \left(1+O\left(\frac{1}{\sqrt{n}}\right)\right). \label{eqn:test} \end{align}
Recall that we wish to estimate ${\mathbb E}\left(\max\left(B\left(n,\alpha\right)-\alpha n,0\right)\right)$.
\begin{align*}
{\mathbb E}(\max(B(n,\alpha)-\alpha n,0)&={\mathbb P}(B(n,\alpha)>\lfloor\alpha n\rfloor){\mathbb E}(B(n,\alpha)-\alpha n|B(n,\alpha)>\lfloor\alpha n\rfloor)\\
&={\mathbb P}(B(n,\alpha)>\lfloor\alpha n\rfloor)({\mathbb E}(B(n,\alpha)|B(n,\alpha)>\lfloor\alpha n\rfloor)-\alpha n). \end{align*} Using Claim~\ref{claim:estimation} with $s=\lfloor\alpha n\rfloor$, we get: \begin{align*}
{\mathbb P}(B(n,\alpha)>\lfloor\alpha n\rfloor)&({\mathbb E}(B(n,\alpha)|B(n,\alpha)>\lfloor\alpha n\rfloor)-\alpha n) \\ &={\mathbb P}(B(n,\alpha)>\lfloor\alpha n\rfloor)\left(\alpha n+\alpha(1-\alpha)n\frac{{\mathbb P}(B(n-1,\alpha)=\lfloor\alpha n\rfloor)}{{\mathbb P}(B(n,\alpha)>\lfloor\alpha n\rfloor)}-\alpha n\right)\\ &=\alpha(1-\alpha)n{\mathbb P}(B(n-1,\alpha)=\lfloor\alpha n\rfloor). \end{align*} Finally, using \ref{eqn:test}, we get: \begin{align*} \alpha(1-\alpha)n{\mathbb P}(B(n-1,\alpha)=\lfloor\alpha n\rfloor) &=\alpha(1-\alpha)n\frac{1}{\sqrt{2\pi \alpha (1-\alpha)n}} \left(1+O\left(\frac{1}{\sqrt{n}}\right)\right)\\ &=\sqrt{\frac{\alpha(1-\alpha)n}{2\pi}}\left(1+O\left(\frac{1}{\sqrt{n}}\right)\right)\\ &=\sqrt{\frac{\alpha(1-\alpha)n}{2\pi}}+O(1). \end{align*}
and substituting $\alpha=\frac{1}{p}$ gives us the expression from Theorem~\ref{thm:main}. \end{proof}
\section{Min-Entropy and Random Matrix Rank} \label{sec:entropy}
In this section, we define our notion of min-entropy, which is a variant on the definition given by Maples in \cite{km} and use it to prove some lemmas which will be useful in the proof of Theorem~\ref{thm:strongMain}.
\begin{defn}
Let $A$ be a random matrix over ${\mathbb Z}/p{\mathbb Z}$. Let $\beta>0$, and let $I$ be a set of entries in $A$. We say that \textbf{an entry $A_{i_{0}j_{0}}\in A$ has min-entropy at least $\beta$ with respect to $I$} if, for any choice of values $a_{ij}$ for the entries in $I$ that can occur with nonzero probability, and every $a\in {\mathbb Z}/p{\mathbb Z}$, the probability ${\mathbb P}(A_{i_{0}j_{0}}=a|A_{ij}=a_{ij}\forall (i,j)\in I)$ is at most $1-\beta$.
We say that \textbf{the matrix $A$ has min-entropy at least $\beta$} if every entry of $A$ has min-entropy at least $\beta$ with respect to the set of all other entries. \end{defn}
In other words, $A_{ij}$ has min-entropy greater than $\beta$ relative to a set of entries if fixing them cannot control $A_{ij}$, in the sense that it still has probability at most $1-\beta$ of being any specific value. We can think of min-entropy as a bound on how much fixing some entries of a matrix can influence other entries. We illustrate this notion of min-entropy with the following examples.
If all the entries of $A$ are independent, the min-entropy of $A_{ij}$ is simply $\min_{x}(1-\mathbb{P}(A_{ij}=x))$. In particular, if the entries of $A_{ij}$ are all independent and uniformly distributed in $\mathbb{Z}/p{\mathbb Z}$, this min-entropy is $1-\frac{1}{p}$, which is the highest possible.
For another example, consider $\Delta=\Delta(n,\alpha,q)$, the laplacian matrix of a random bipartite graph. Since every row in $\Delta$ sums to zero, for any entry $\Delta_{ij}$, fixing the rest of the entries in row $i$ determines $\Delta_{ij}$. Hence $\Delta_{ij}$ has zero min-entropy with respect to the rest of the entries in row $i$.
\begin{thm} \label{thm:rectrank} Let $A$ be an $n\times m$ random matrix over ${\mathbb Z}/p{\mathbb Z}$, for $m\geq n$, with min-entropy at least $\beta$ for some $\beta>0$. Then the probability that $A$ has rank $n$ is at least $1-\frac{1}{\beta^{2}}(1-\beta)^{m+1-n}$. In particular, there exists a constant $K>0$ depending only on $\beta$ such that ${\mathbb P}({\operatorname{rank}}(A)=n)\geq 1-e^{-K(m-n)}$. \end{thm}
\begin{proof} Let $v_{1},\dots,v_{n}$ be the rows of $A$. Then $A$ has rank $n$ only if the $v_{i}$ are independent, so the probability ${\mathbb P}({\operatorname{rank}}(A)=n)$ is equal to the product
\[\prod_{i=1}^{n}\mathbb{P}(v_{i}\text{ is independent of }\{v_{1},\dots,v_{i-1}\}|\{v_{1},\dots,v_{i-1}\} \text{ are independent}).\]
We now note that for each $i$,
\[{\mathbb P}(v_{i}\text{ is independent of }\{v_{1},\dots,v_{i-1}\}|\{v_{1},\dots,v_{i-1}\} \text{ are independent})\geq (1-\beta)^{m-(i-1)}. \] To see this, assume that $\{v_{1},\dots,v_{i-1}\}$ are independent. Then there exists a subset $J=\{j_{1},\dots,j_{i-1}\}\subset [m]$ such that the restrictions of the $\{v_{j}\}_{j<i}$ to the entries in $J$ are independent.
Assume that $J=\{1,\dots,i-1\}$. by the independence of the $v_{l}|_{J}$, there exist unique coefficients $a_{1},\dots,a_{i-1}$ such that for all $j<i$, $(v_{i})_{j}=\sum_{l<i} a_{l}(v_{l})_{j}$.
$v_{i}$ is dependent on $\{v_{1},\dots,v_{i-1}\}$ only if there exists a linear combination of them that sums to $v_{i}$. By the uniqueness of the coefficients $a_{1},\dots,a_{i-1}$, this happens only if $v_{i}=\sum_{l<i}a_{l}v_{l}$. In particular, $v_{i}$ is dependent on the previous row vectors only if for all $j\geq i$, $(v_{i})_{j}=\sum_{l<i} a_{l}(v_{l})_{j}$.
However, by the min-entropy assumption, this happens for each $j$ with probability at most $1-\beta$. As there are $m-(i-1)$ such entries, the probability that this equality holds for all of them is at most $(1-\beta)^{m-(i-1)}$. Hence the probability that $v_{i}$ is independent of $\{v_{1},\dots,v_{i-1}\}$ is at least $1-(1-\beta)^{m-(i-1)}$. Using this, we get the following bound
\begin{align}
\prod_{i=1}^{n}\mathbb{P}(v_{i}\text{ is independent of }\{v_{1},\dots,v_{i-1}\}|\{v_{1},\dots,v_{i-1}\} \text{ are independent}) \nonumber \\ \geq \prod_{i=1}^{n}(1-(1-\beta)^{m-(i-1)})=(1-(1-\beta)^{m})\cdots(1-(1-\beta)^{m+1-n}).\label{eqn:betaprod} \end{align}
We now wish to bound (\ref{eqn:betaprod}) from below. \begin{claim} The product $(1-(1-\beta)^{m})\cdots(1-(1-\beta)^{m+1-n})$ is at least $1-\frac{1}{\beta^{2}}(1-\beta)^{m+1-n}$. \end{claim}
Write $\gamma=1-\beta,r=m+1-n$. We need to find a lower bound on the product $(1-\gamma^{m})\cdots(1-\gamma^{r})$. We will rely on the fact that $0<\gamma<1$.
First, recall that for any positive $x$, $x\geq\log(x)+1$. Using this for $x=(1-\gamma^{m})\cdots(1-\gamma^{r})$, we get: \[(1-\gamma^{m})\cdots(1-\gamma^{r})\geq 1+\log\left((1-\gamma^{m})\cdots(1-\gamma^{r})\right)\] Now split the product to get: \[1+\log((1-\gamma^{m})\cdots(1-\gamma^{r}))=1+\sum_{i=r}^{m}\log(1-\gamma^{i})\]
For any $0<x<1$, $\log(1-x)>-\frac{x}{1-x}$. To see this, let $h(t)=\frac{1}{1-x}(t-(1-x))+\log(1-x)$ be the tangent line to $\log(t)$ at $t=1-x$. Then as $\log(t)$ is concave, $h(1)=\frac{x}{1-x}+\log(1-x)>\log(1)=0$, so $\log(1-x)>-\frac{x}{1-x}$. Using this for $x=1-\gamma^{i}$, we get: \[1+\sum_{i=r}^{m}\log(1-\gamma^{i})\geq 1+\sum_{i=r}^{m}\frac{-\gamma^{i}}{1-\gamma^{i}}\] As $\gamma<1$, we have: \[1+\sum_{i=r}^{m}-\frac{\gamma^{i}}{1-\gamma^{i}}\geq 1+\sum_{i=r}^{m}\frac{-\gamma^{i}}{1-\gamma}=1-\frac{1}{1-\gamma}\sum_{i=r}^{m}\gamma^{i}\]
We will now bound this by the sum of the infinite series: \[1-\frac{1}{1-\gamma}\sum_{i=r}^{m}\gamma^{i}\geq1-\frac{1}{1-\gamma}\sum_{i=r}^{\infty}\gamma^{i}=1-\frac{\gamma^{r}}{(1-\gamma)^{2}}\].
Translating back through $\gamma=1-\beta$,$r=m+1-n$, this is $1-\frac{1}{\beta^{2}}(1-\beta)^{m+1-n}$, which proves the claim, and the theorem follows. \end{proof}
\begin{cor} \label{cor:rectrank} Let $A_{n}$ be an $n\times m$ random matrix over ${\mathbb Z}/p{\mathbb Z}$ with min-entropy at least $\beta$ for some $\beta>0$ independent of $n$. Then ${\operatorname{corank}}(A)$ is usually within small distance of $0$. \end{cor}
\begin{proof} Let $s>0$, and assume that $m\geq n$. We wish to show that ${\mathbb P}({\operatorname{corank}}(A)>s)=O(e^{-Ks})$, where $K>0$ is independent of $n$. Let $A'$ be the submatrix of $A$ given by taking the first $n-s$ rows. Then $A'$ is an $n-s\times m$ matrix, so by Theorem~\ref{thm:rectrank}, its rows are independent with probability a probability at least $1-e^{-K(m-(n-s))}=1-(e^{-K(m-n)})e^{-Ks}\geq 1-e^{-Ks}$, where $K$ depends only on $\beta$. But if $A'$ has rank $n-s$, the corank of $A$ is at most $s$, so ${\mathbb P}({\operatorname{corank}}(A)>s)\leq e^{-Ks}$. \end{proof}
\section{Proof of Theorem~\ref{thm:strongMain}} \label{sec:proof}
For this section, we fix a prime $p$, as well as constants $0<\alpha,q<1$.
We will now prove Theorem~\ref{thm:strongMain}. We do this in two stages. First, we reduce the laplacian mod $p$, remove the first and last $p$ rows and columns, and set the diagonal entries to be uniformly distributed mod $p$. We call the resulting matrix $M$. We show that ${\operatorname{corank}}(M)$ is usually within small distance of the $p$-rank of the sandpile group, which reduces Theorem~\ref{thm:strongMain} to calculating the distribution of ${\operatorname{corank}}(M)$.
In the second stage, we calculate the distribution of ${\operatorname{corank}}(M)$. Removing some of the rows and columns of the laplacian will allow the upper triangular entries of $M$ to have positive min-entropy with respect to the other upper triangular matrix, which will allow us to use Corollary~\ref{cor:rectrank} to compute ${\operatorname{corank}}(M)$.
\subsection{Reduction to $M$}
Let $(L,R)$ be the vertices of our random bipartite graph $G$, and let $\Delta$ be the laplacian of $G$. Note that $\Delta$ is of the form $\bigl(\begin{smallmatrix} D_{0,1} & -A_{0} \\ -A_{0}^{T} & D_{0,2} \end{smallmatrix}\bigr)$, where $A_{0}$ is the adjacency matrix between $L$ and $R$ and $D_{0,1}$ and $D_{0,2}$ are diagonal matrices. Since we wish to work over ${\mathbb Z}/p{\mathbb Z}$, we will consider $\Delta\otimes {\mathbb Z}/p{\mathbb Z}=\Delta/p$.
As we saw earlier, $\Delta/p$ has min-entropy $0$. We resolve this issue by using the submatrix $\Delta_{1}$, which has positive min-entropy. \begin{defn} Let $G$ be a bipartite graph with laplacian $\Delta$, $p$ prime. We define the matrix $\Delta_{1}=\Delta_{1}(G)=\Delta_{1}(n,\alpha,q,p)$ over ${\mathbb Z}/p{\mathbb Z}$ to be the submatrix of $\Delta/p$ given by removing the first $p$ rows, the first $p$ columns, the last $p$ rows, and the last $p$ columns. \end{defn}
\begin{lemma} \label{lemma:properties} Let $G=G(n,\alpha,q)$ be as above, and let $\Delta_{1}=\Delta_{1}(G)$. Write $\Delta_{1}=\bigl(\begin{smallmatrix} D_{1,1} & -A_{1} \\ -A_{1}^{T} & D_{1,2} \end{smallmatrix}\bigr)$. $\Delta_{1}$ has the following properties: \begin{enumerate} \item The diagonal values of $D_{1,1}$ are independent of each other, as well as of entries of $A_{1}$ outside of their row. \item The diagonal values of $D_{1,2}$ are independent of each other, as well as of entries of $A_{1}$ outside of their column. \item There exists $\beta>0$ depending only on $p,q,$ and $\alpha$ such that every non-constant entry in or above the diagonal in $\Delta_{1}$ has min-entropy at least $\beta$ with respect to the set of the entries in or above the diagonal. \item For any $a\in{\mathbb Z}/p{\mathbb Z}$, and any diagonal entry $x$ in $D_{1,1}$ or $D_{1,2}$, ${\mathbb P}(x=a)= \frac{1}{p}+O(e^{-cn})$ for some constant $c$. \end{enumerate} \end{lemma}
\begin{proof}
We first show $(1)$. Note that the value of the diagonal entry $(D_{1,1})_{ii}$ depends only on the $i$\textsuperscript{th} row of $A_{0}$. Hence the $(D_{1,1})_{ii}$ are independent of each other and of any entry outside of the $i$\textsuperscript{th} row of $A_{0}$, which in particular includes the entries of $\Delta_{1}$ outside the $i$\textsuperscript{th} row. The proof of $(2)$ is similar.
We will now prove $(3)$.
Let $x$ be an entry in the upper triangle of $\Delta_{1}$. If $x\in D_{1,1}$, then as $x$ is non-constant, it must be on the diagonal. As we saw above, $x$ depends only on the values in the $i$\textsuperscript{th} row of $A_{0}$.
Fix the rest of the entries of the $i$\textsuperscript{th} row of $\Delta_{1}$. There are still $2p$ entries of the $i$\textsuperscript{th} row of $A_{0}$ not in $\Delta_{1}$, which are left undetermined. For any choice of the first $2p-1$ of these, the last entry can be either $-1$ with probability $q$ or $0$ otherwise, which would change the value of $x$. Hence $x$ has min-entropy at least $\min(q,1-q)$ with respect to the rest of the upper triangular entries. The case where $x\in D_{1,2}$ is similar.
Now, assume $x\in -A_{1}$. Fix all the other entries of $\Delta_{1}$. The only ones of which $x$ is not independent are those in the row and column of $x$. The row sum (in $A_{0}$) must be equal to the corresponding row entry, and the column sum must be equal to the corresponding column entry.
There are $p$ unfixed entries in the row that are in $A_{0}$ but not in $\Delta_{1}$, and the sum of these entries can be equal to any value in ${\mathbb Z}/p{\mathbb Z}$ with probability at least $\min(q,1-q)^{p+1}$.The same goes for the column sum. In particular, the probability that both the row and the column sum allow $x$ to be zero is at least $\min(q,1-q)^{2(p+1)}$. Similarly, the probability that both allow $x=-1$ is at least $\min(q,1-q)^{2(p+1)}$. Hence $x$ has min-entropy at least $\min(q,1-q)^{2(p+1)}$ with respect to the rest of the upper triangular entries.
Finally, we prove $(4)$. Let $a\in{\mathbb Z}/p{\mathbb Z}$. To see that each entry of $D_{1,1}$ is equal to $a$ with probability $\frac{1}{p}+O(e^{-cn})$, note that it is equal to $a$ when the sum of the corresponding row in $A_{0}$ is equal to $a$. Since this row has $\alpha n$ independent entries equal to $1$ with probability $q$ and zero otherwise, its sum is uniformly distributed in ${\mathbb Z}/p{\mathbb Z}$ up to an $O(e^{-cn})$ error term, where $c$ is a constant depending only on $q,\alpha$ and $p$. \end{proof}
We will take $M$ to be equal to $\Delta_{1}(n+2p,\alpha,q,p)$, then adjust the probability space so that the diagonal values of $M$ are equidistributed in $Z/p{\mathbb Z}$. Since this changes only an exponentially small part of the probability space, ${\operatorname{corank}}(M)$ is usually within small distance of ${\operatorname{corank}}(\Delta_{1}(n+2p,\alpha,q,p))$. But
\[\left|{\operatorname{corank}}(\Delta_{1}(n+2p,\alpha,q,p))-{\operatorname{corank}}(\Delta_{1}(n,\alpha,q,p))\right|<4p,\] so by transitivity ${\operatorname{corank}}(M)$ is usually within small distance of ${\operatorname{corank}}(\Delta_{1}(n,\alpha,q,p))$. Hence we have:
\begin{prop} \label{prop:mtomain} The $p$-rank of $\Gamma(G)$ is usually within small distance of ${\operatorname{corank}}(M)$. \end{prop}
We will also assume that $\lfloor\alpha n\rfloor=\alpha n$, so that $M$ is a $(1+\alpha)n\times (1+\alpha)n$ matrix. We will write:
\[M=\begin{pmatrix} D_{1} & A\\ A^{T} & D_{2} \end{pmatrix}\]
\subsection{Calculating the corank of $M$}
In this section, we prove the following statement about $M$: \begin{prop} \label{prop:mrank} Let $M=M(n,\alpha,q,p)=\begin{pmatrix} D_{1} & A\\ A^{T} & D_{2} \end{pmatrix}$ be the matrix described above. Then ${\operatorname{corank}}(M)$ is usually within small distance of $\max\left(B\left(n,\frac{1}{p}\right)-\alpha n,0\right)$. \end{prop}
\noindent Together with Proposition~\ref{prop:mtomain}, this implies Theorem~\ref{thm:strongMain}.
Throughout the proof, we will use ${\operatorname{height}}(A)$ and ${\operatorname{width}}(A)$ to denote the number of rows and columns of $A$ respectively. If $A$ is a square matrix, we use $\dim(A)$ for both of these.
\begin{proof}
Let $r$ be the number of zero entries on the diagonal of $D_{1}$, that is, $r={\operatorname{corank}}(D_{1})$. Since the diagonal values of $D_{1}$ are independent and uniformly distributed, it is easy to see that $r=B\left(n,\frac{1}{p}\right)$. Hence, it suffices to show that ${\operatorname{corank}}(M)$ is usually within small distance of $\max\left(r-\alpha n,0\right)$.
Our proof will rely on finding nonsingular submatrices of $M$, and taking the Schur complement with respect to them. This will allow us to reduce the problem of finding ${\operatorname{corank}}(M)$ to finding the coranks of matrices which are either nonsingular (in the case where $r-\alpha n< 0$), or have a large block of zeros which makes finding the corank straightforwards (in the case where $r-\alpha n\geq 0$).
Assume that the first $n-r$ entries of $D_{1}$ are the nonzero entries, so that $D_{1}$ is of the form $\begin{pmatrix} D_{1}' & 0\\ 0 & 0 \end{pmatrix}$ Where $D_{1}'$ is invertible. Hence we can write \[M=\begin{pmatrix} D_{1}' &0 &B_{1} \\ 0 &0 &B_{2} \\ B_{1}^{T} &B_{2}^{T} &D_{2} \end{pmatrix} \] where $B_{1},B_{2}$ are random matrices of dimension $\alpha n\times (n-r)$ and $\alpha n\times r$ respectively. Taking the Schur Complement of $M$ with respect to $D_{1}'$, we get: \[M/D_{1}'=\begin{pmatrix} 0 & B_{2}\\ B_{2}^{T} & D_{2}-B_{1}^{T}D_{1}'^{-1}B_{1} \end{pmatrix}. \] We will now split into cases:
\noindent{\bf The case $r \ge \alpha n.$}
In this case, we want to show that ${\operatorname{corank}}(M)$ is usually within small distance of $r-\alpha n$. Now, \[{\operatorname{height}}(B_{2})=r\geq\alpha n={\operatorname{width}}(B_{2}).\] As ${\operatorname{rank}}(B_{2})={\operatorname{rank}}(B_{2}^{T})$, it is easy to see that \[{\operatorname{rank}}(M/D_{1}')\geq {\operatorname{rank}}(B_{2})+{\operatorname{rank}}(B_{2}^{T})=2{\operatorname{rank}}(B_{2}),\] and thus \[{\operatorname{corank}}(M)={\operatorname{corank}}(M/D_{1}')\leq \dim(M/D_{1}')-{\operatorname{rank}}(M/D_{1}')=(\alpha n+r)-(2{\operatorname{rank}}(B_{2})).\]
Conversely, The corank of $M/D_{1}'$ is at least the corank of the submatrix of the top $n-r$ rows, given by $\begin{pmatrix} 0 & B_{2} \end{pmatrix}. $ The rank of this submatrix is equal to ${\operatorname{rank}}(B_{2})$, so the corank is $r-{\operatorname{rank}}(B_{2})$.
Since $B_{2}$ has min-entropy at least $\beta$ for some positive constant $\beta$, by Corollary~\ref{cor:rectrank}, ${\operatorname{rank}}(B_{2})$ is usually within small distance of \[\min({\operatorname{height}}(B_{2}),{\operatorname{width}}(B_{2}))=\min(r,\alpha n)=\alpha n.\]
Applying this to our lower and upper bounds for ${\operatorname{corank}}(M)$, we get that the upper bound is usually within small distance of $(\alpha n+r)-(2\alpha n)=r-\alpha n$. Similarly, our lower bound is usually within small distance of $r-\alpha n$. Hence by Lemma~\ref{lemma:properties}, ${\operatorname{rank}}(M)$ is usually within small distance of $r-\alpha n$.
\noindent{\bf The case $r <\alpha n.$}
In this case, we need to show that ${\operatorname{corank}}(M)={\operatorname{corank}}(M/D_{1}')$ is usually within small distance of zero.
Write $C=D_{2}-B_{1}^{T}D_{1}'^{-1}B_{1}$ for the bottom-right $\alpha n\times \alpha n$ submatrix of $M/D_{1}'$. We will use the following claim:
\begin{claim}
Let $s$ be the size of the largest set of indices $J\subseteq \{1,\dots,\alpha n\}$ with the property that $C_{J}=(C_{ij|i,j\in J})$ is nonsingular. Then for any constant $\epsilon>0$, $s\geq \left(\alpha(1 -\frac{1}{p})-\epsilon\right) n$ with probability $1-O(e^{-cn})$ for some constant $c>0$. \end{claim}
\begin{proof}
To see this, we build up a set $J$ by going through the indices $i\in\{1,\dots,\alpha n\}$. For each $i$, we add $i$ to $J$ if $C_{J\cup\{i\}}$ is nonsingular. We will show that for each $i$, we add $i$ with probability at least $1-\frac{1}{p}-\delta$, where $\delta>0$ is arbitrarily small as $n$ grows. Since $J$ is the sum of $n$ Bernoulli random variables, each equal to $1$ with probability at least $1-\frac{1}{p}-\delta$ independently of the previous values, we can say that $|J|\geq B\left(\alpha n, 1-\frac{1}{p}-\delta\right)$.
By Hoeffding's inequality, \[B\left(\alpha n, 1-\frac{1}{p}-\delta\right)>(1-\delta)\alpha\left(1-\frac{1}{p}-\delta\right)n> \alpha\left(1 -\frac{1}{p}-\epsilon\right) n\] with probability $1-O(e^{-cn})$ (the second inequality holds for all sufficiently small $\delta$).
To see that each $i$ can be added with probability at least $1-\frac{1}{p}-\delta$, note that the diagonal entries of $D_{2}$ are the sums of entries in $B_{1}$ with entries of $B_{2}$, which are independent of them. There are $r=B\left(n,\frac{1}{p}\right)$ entries in each column of $B_{2}$, so by Hoeffding's inequality the number of entries in each column of $B_{2}$ is greater than $\frac{1}{2p}n$ with probability $1-O(e^{-cn})$. Hence we can assume that the entries of $D_{2}$ are exponentially close to being uniformly distributed in ${\mathbb Z}/p{\mathbb Z}$, given any condition on $B_{1},D_{1}'$, and the previous diagonal entries of $D_{2}$. This means that for any $x\in Z/p{\mathbb Z}$ and any conditions on the rest of the entries of $C$, ${\mathbb P}(C_{ii}=x)\leq \frac{1}{p}+\delta$, where $\delta$ can be exponentially small in $n$.
Let $J$ be the set of indices we obtain from taking the above process on $\{1,\dots,i-1\}$. We need to show that we add $i$ to $J$ with probability at least $1-\delta-\frac{1}{p}$. We add $i$ to $J$ unless $C_{J\cup\{i\}}$ becomes singular. But this happens only if the last column of $C_{J\cup\{i\}}$ is dependent on the first $|J|$ columns.
Write $C_{J\cup\{i\}}=\begin{pmatrix}
u_{1}\cdots u_{|J|} & u_{i}\\
C_{ij_{0}}\dots C_{ij_{|J|}} & C_{ii} \end{pmatrix}$. Since $C_{J}$ is nonsingular, there exist unique coefficients $a_{j}\in {\mathbb Z}/p{\mathbb Z}$ such that $\sum a_{j}u_{j}=u_{i}$. But $C_{J\cup\{i\}}$ only if its columns are dependent, which happens only if $\sum a_{j}C_{ij}=C_{ii}$. From the above statement with $x=\sum a_{j}C_{ij}$, this happens with probability at most $1-\delta-\frac{1}{p}$. This completes the proof of the claim. \end{proof}
Getting back to the proof, assume that $J$ is composed of the last $s$ indices of $C$. Then the claim implies that, with probability $1-O(e^{-cn})$, we can write
\[ M/D_{1}' = \begin{pmatrix} 0 &B_{3} &B_{4} \\ B_{3}^{T} &C_{1} &C_{2} \\ B_{4}^{T} &C_{2}^{T} & C_{3} \end{pmatrix}, \]
Where $C_3$ is nonsingular and $\dim(C_3)=s\geq\left(\alpha(1 -\frac{1}{p})-\epsilon\right) n$.
Taking $\epsilon$ to be sufficiently small so that $\alpha\left( \frac{1}{p}+\epsilon\right)<\frac{1}{p}-\epsilon$, we can assume that with probability $1-O(e^{-cn})$, \[r>\left(\frac{1}{p}-\epsilon\right)n>\alpha \left( \frac{1}{p}+\epsilon\right) n>\alpha n - s.\]
Since $C_{1}$ is $(\alpha n-s)\times (\alpha n-s)$ and ${\operatorname{height}}(B_{3})=r$, we can assume that ${\operatorname{width}}(B_{3})=\dim(C_{1})=\alpha n-s<r={\operatorname{height}}(B_{3})$.
Note that we can drop rows and columns from $C_3$ if necessary, thus increasing the width of $B_{3}$, up to a maximum of ${\operatorname{width}}(B_{3})+{\operatorname{width}}(B_{4})=\alpha n> r$. In particular, we can assume that $s=\alpha n-r$, so that $B_{3}$ is an $r\times r$ square matrix.
We now wish to shows that ${\operatorname{rank}}(M/D_{1}')$ is usually within small distance of zero. To do this, we will split the rows into three sets, and successively show that that most of the rows are independent:
First, let $u_{1},\dots,u_{\alpha n-r}$ be the bottom $\alpha n-r$ rows (those with elements in $C_{3}$). Since they contain as subrows the rows of $C_{3}$ (which we know are independent), they are independent.
Secondly, let $v_{1},\dots,v_{r}$ be the top $r$ rows. By Corollary~\ref{cor:rectrank}, ${\operatorname{corank}}(B_{3})$ is usually within small distance of zero. In fact, we can make a stronger claim: We claim that the corank of the $\alpha n\times\alpha n$ matrix $\begin{pmatrix} B_{3} & B_{4}\\ C_{2}^{T} & C_{3} \end{pmatrix}$ is usually within small distance of zero.
Let $v_{i}'$ be the top $r$ rows of this matrix, and $u_{i}'$ be the bottom $\alpha n-r$ rows. As before, the $u_{i}'$ are independent since their tails are the rows of $C_{3}$.
Now assume that the first $k$ of the $v_{i}'$ are independent both of each other and of the $u_{i}'$ (that is, the set $\{u_{1}',\dots,u_{\alpha n-r}',v_{1}',\dots,v_{k}'\}$ is independent. We claim that the probability that $v_{k+1}$ is dependent on $\{u_{1}',\dots,u_{\alpha n-r}',v_{1}',\dots,v_{k}'\}$ is at most $(1-\beta)^{r-k}$.
To see this, first choose a set $J$ of $\alpha n-r+k$ indices so that $\{u_{1}',\dots,u_{\alpha n-r}',v_{1}',\dots,v_{k}'\}$ are still independent when restricted to the entries in $J$. If $v_{k+1}'$ is dependent on $\{u_{1}',\dots,u_{\alpha n-r}',v_{1}',\dots,v_{k}'\}$, then we can write $v_{k+1}'=\sum_{i\leq k} a_{i}v_{i}'+\sum b_{i}u_{i}'$, where the $a_{i}$ and the $b_{i}$ are determined by the entries in $J$. This leaves $n-r$ undetermined coefficients in $v_{k+1}$, all of which must be equal to the corresponding entry of $\sum_{i\leq k} a_{i}v_{i}'+\sum b_{i}u_{i}'$.
But the entries of $v_{k+1}$ all have min-entropy at least $\beta$ with respect to the other vectors, so each of them is equal to the corresponding entry of $\sum_{i\leq k} a_{i}v_{i}'+\sum b_{i}u_{i}'$ with conditional probability at most $1-\beta$, hence the probability that all $r-k$ of them satisfy this equality is at most $(1-\beta)^{r-k}$. From here, we can conclude that the corank of the matrix is usually within small distance of zero by following the same reasoning as the proof of Theorem~\ref{thm:rectrank}.
Finally, it remains to show that the middle $r$ rows of $M/D_{1}'$, labeled $w_{1},\dots,w_{r}$, cannot add much to the corank. That is, we need to find a set of independent rows of $M/D_{1}'$ whose size is usually within small distance of $r+\alpha n$. We will assume that the $u_{i}'$ and $v_{i}'$ are all independent (otherwise we only have to drop $k$ of them, where $k$ is usually within small distance of zero).
We proceed in a similar manner to before. For the first $w_{1}$, we let $J$ be the set of the last $\alpha n$ indices. Since the $u_{i}'$ and $v_{i}'$ are all independent, there exists a unique set of indices $a_{i},b_{i}$ so that $w_{1}=\sum a_{i}u_{i}+\sum b_{i}v_{i}$ holds when restricted to the last $\alpha n$ indices. As the first $r$ entries of $w_{1}$ have min-entropy at least $\beta>0$ with respect to the rest of the matrix, they all match the corresponding entries of $\sum a_{i}u_{i}+\sum b_{i}v_{i}$ with probability at most $(1-\beta)^{r}$.
We proceed similarly, showing that for each $w_{k+1}$ such that the set $\{u_{1},\dots,u_{\alpha n-r},v_{1},\dots,v_{r},w_{1},\dots,w_{k}\}$ is independent, the probability that $w_{k+1}$ is dependent on is is at most $(1-\beta)^{r-k}$. As before, this shows that the number of independent $w_{i}$ is usually within small distance of $r$.
Putting this all together, we get a set of independent rows whose size is usually within small distance of the height of $M/D_{1}'$. The corank of $M/D_{1}'$ is at most the number of rows not in our set, which is usually within small distance of zero. This completes the proof.
\end{proof}
\section{Proofs of the Corollaries} \label{sec:details} In this section, we prove the corollaries of Theorem~\ref{thm:main}.
We begin by proving Corollary~\ref{cor:balanced}:
\begin{proof}[Proof of Corollary~\ref{cor:balanced}] Let $\epsilon>0$, and let $X=X(n,p)$ be the $p$-rank of $G$. We need to show that as $n\rightarrow\infty$, ${\mathbb E}(X)<\epsilon n$.
Assume that $\epsilon<\frac{1}{2}$, and remove $\frac{\epsilon}{2} n$ vertices from the right side of the graph. By Theorem~\ref{thm:main}, the expected $p$-rank of the resulting graph is $O(1)$. Since removing a vertex changes the $p$-rank of the sandpile group by at most $1$, removing $\frac{\epsilon}{2} n$ vertices changes it by at most $\frac{\epsilon}{2} n$. Hence $X\leq \frac{\epsilon}{2}n+O(1)<\epsilon n$ for large $n$, which completes the proof. \end{proof}
We now prove Corollary~\ref{cor:cyclic}. To do this, we show that the $2$-rank of $\Gamma(G)$ when $\alpha<\frac{1}{2}$ has low probability of being $\leq 1$, so the $2$-part of the group has low probability of being cyclic.
\begin{proof}[Proof of Corollary~\ref{cor:cyclic}] Consider the $2-$rank of $\Gamma(G)$. As we saw in Theorem~\ref{thm:strongMain}, the $2$-rank of $\Gamma(G)$ is usually within small distance of $\max\left(B\left(n,\frac{1}{2}\right)-\alpha n,0\right)$. As $\alpha<\frac{1}{2}$, we have that by Hoeffding's inequality, \[B\left(n,\frac{1}{2}\right)>\left(\frac{1}{2}-\epsilon\right)n>\alpha n+\epsilon n\] holds with probability $1-O(e^{-cn})$ for all $\epsilon>0$, where the second inequality will hold when $\epsilon<\frac{1}{2}\left(\frac{1}{2}-\alpha\right)$. Hence $\max\left(B\left(n,\frac{1}{2}\right)-\alpha n,0\right)>\epsilon n$ with probability $1-O(e^{-cn})$.
But the $2$-rank of $\Gamma(G)$ is usually within small distance of $\max\left(B\left(n,\frac{1}{2}\right)-\alpha n,0\right)$. Hence the $2$-rank of $\Gamma(G)$ is larger than $\frac{1}{2}\epsilon n$ with probability $1-O(e^{-cn})$ for some $c>0$, and in particular will be at least $2$ with probability $1-O(e^{-cn})$.
But if the $2$-rank of $\Gamma(G)$ is at least $2$, $\Gamma(G)$ cannot be cyclic. Hence the probability that $\Gamma(G)$ is cyclic is bounded by $O(e^{-cn})$ for some constant $c>0$. \end{proof}
\end{document}
|
arXiv
|
{
"id": "1705.07519.tex",
"language_detection_score": 0.7303019762039185,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[On smooth divisors of a projective hypersurface.]{On smooth divisors of a projective hypersurface.}
\author{Ellia Ph.}
\address{Dipartimento di Matematica, via Machiavelli 35, 44100 Ferrara (Italy)} \email{[email protected]}
\author{Franco D.} \address{Dipartimento di Matematica e Applicazioni "R. Caccioppoli", Univ. Napoli "Federico II", Ple Tecchio 80, 80125 Napoli (Italy)} \email{[email protected]}
\date{16/06/2004}
\maketitle
\hskip7cm{\it Dedicated to Christian Peskine.}
\section*{Introduction.}
This paper deals with the existence of smooth divisors of a projective hypersurface $\Sigma \subset\mathbb{P}^n $ (projective space over an algebraically closed field of characteristic zero). According to a celebrated conjecture of Hartshorne, at least when $n\geq 7$, any such a variety should be a complete intersection. Since the existence of smooth, non complete intersection, subcanonical $X \subset \mathbb{P}^n$ of codimension two is equivalent, via the correspondance of Serre, to the existence of indecomposable rank two vector bundles on $\mathbb{P}^n$ and since no indecomposable vector bundle of $\mathbb{P}^n $, $n\geq 5$, is presently known, it is widely believed that any smooth, subcanonical subvariety of $\mathbb{P}^n $, $n\ge5$, of codimension two is a complete intersection. Furthermore recall that, by a theorem of Barth, the subcanonical condition is automatically satisfied if $n \geq 6$. This in turn implies that a smooth (subcanonical if $n=5$) divisor of a projective hypersurface $\Sigma \subset\mathbb{P}^n $, $n\geq 5$, is a complete intersection too. \par In this paper we show that, roughly speaking, for any $\Sigma \subset \mathbb{P}^n$ there can be at most $\textit{finitely many}$ exceptions to the last statement. Indeed our main result is:
\begin{theorem} \label{mainthm} Let $\Sigma \subset \mathbb{P}^n$, $n \geq 5$ be an integral hypersurface of degree $s$. Let $X \subset \Sigma$ be a smooth variety with $dim(X)=n-2$. If $n=5$, assume $X$ subcanonical. If $X$ is not a complete intersection in $\mathbb{P}^n$, then: $$d(X) \leq \frac{s(s-1)[(s-1)^2-n+1]}{n-1}+1.$$ \end{theorem}
In other words a smooth codimension two subvariety of $\mathbb{P}^n$, $n \geq 5$ (if $n=5$, we assume $X$ subcanonical) which is not a complete intersection cannot lie on a hypersurface of too low degree (too low with respect to its own degree) and, {\it on a fixed hypersurface}, Hartshorne's conjecture in codimension two is "asymptotically" true. \par The starting point is Severi-Lefschetz theorem which states that if $n \geq 4$ and if $X$ is a Cartier divisor on $\Sigma$, then $X$ is the complete intersection of $\Sigma$ with another hypersurface. For instance if $\Sigma $ is either smooth or singular in a finite set of points and if $n \geq 5$, the picture is very clear: \begin{enumerate} \item there $\textit{exists}$ smooth $X\subset \Sigma$ with $dim(X)=n-2$ and with degree arbitrarily large; \item $\textit{any}$ smooth $X\subset \Sigma$ with $dim(X)=n-2$ $\textit{is}$ a complete intersection of $\Sigma$ with another hypersurface \item $\textit{no}$ smooth $X\subset \Sigma$ with $dim(X)=n-2$ can meet the singular locus of $\Sigma$. \end{enumerate}
\vskip1cm
Using Theorem \ref{mainthm} we get (the first statement comes again from an easy application of the Theorem of Severi-Lefschetz-Grothendieck):
\begin{theorem} \label{sigma} Let $\Sigma \subset \mathbb{P}^n$, $n\geq 5$, be an integral hypersurface of degree $s$ with \par $dimSing(\Sigma)\geq1$. \begin{enumerate} \item If $n\geq 6$ and $dimSing(\Sigma)\leq n-5$ then $\Sigma $ does not contain any smooth variety of dimension $n-2$. \item Suppose $dimSing(\Sigma)\geq n-4$. If $X\subset \Sigma $ is smooth, subcanonical, with $dim(X)=n-2$ then $d(X)\leq s\frac{(s-1)((s-1)^2-n+1)}{n-1}+1$. \end{enumerate} \end{theorem}
We point out a consequence of this result.
\begin{corollary} \label{sigmahilb} Let $\Sigma \subset \mathbb{P}^n$, $n\geq 5$, be an integral hypersurface s.t. $dimSing(\Sigma)\geq 1$. \begin{enumerate} \item If $n\geq 6$ and $dimSing(\Sigma)\leq n-5$ then $\Sigma $ does not contain any smooth variety of dimension $n-2$. \item Suppose $dimSing(\Sigma)\geq n-4$. Then there are only finitely many components of $\mathcal{H}ilb(\Sigma)$ containing smooth, subcanonical varieties of dimension $n-2$. \end{enumerate} \end{corollary}
Last but not least, at the end of the paper we show how this circle of ideas allows to improve the main results of \cite{EF} about subcanonical varieties of $\mathbb{P}^5 $ and $\mathbb{P}^6$:
\begin{theorem} \label{n=s=5} Let $X \subset \mathbb{P}^5$ be a smooth threefold with $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$. If $h^0(\ensuremath{\mathcal{I}} _X(5)) \neq 0$, then $X$ is a complete intersection. \end{theorem}
\begin{theorem} \label{n=s=6} Let $X \subset \mathbb{P}^6$ be a smooth fourfold. If $h^0(\ensuremath{\mathcal{I}} _X(6)) \neq 0$, then $X$ is a complete intersection. \end{theorem}
Theorem \ref{mainthm} follows, thanks to a crucial remark essentially proved in \cite{EP} (see Lemma \ref{l1}), from a bound of $e$ (where $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$), see Theorem \ref{thmSpec}, which can be viewed as a strong (since the degree is not involved) generalization of the "Speciality theorem" of Gruson-Peskine \cite{GP}. The proof of this bound is quite simple if $X \cap Sing(\Sigma )$ has the right dimension. This is done in the first section where a weaker version of Theorem \ref{thmSpec} and hence of Theorem \ref{mainthm} is proved (if $n=5$ we assume $Pic(X) \simeq \mathbb{Z} .H$). In the second section we show how a refinement of the proof yields our final result. Finally let's observe that our approach doesn't apply to the case $n=4$.
\vskip1cm
\textbf{Acknowledgment:} It is a pleasure to thank Enzo Di Gennaro who explained to one of us (D.F.) some of the deep results of \cite{K}.
\section{Reduction and the speciality theorem, weak version.}
\begin{notations} Given a projective scheme $Y \subset \mathbb{P}^n $ we denote by $d(Y)$ the \textit{degree} of $Y$. \end{notations}
\begin{notations} \label{not2} In this section, $X \subset \mathbb{P}^n, n \geq 5$, will denote a smooth, non degenerate, codimension two subvariety which is not a complete intersection. We will always assume $X$ subcanonical: $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$; notice that this condition is fullfilled if $Pic(X) \simeq \mathbb{Z} .H$; finally, thanks to a theorem of Barth, this last condition is automatically fullfilled if $n \geq 6$.\par\noindent By Serre's construction we may associate to $X$ a rank two vector bundle: $$ 0 \to \ensuremath{\mathcal{O}} \to E \to \ensuremath{\mathcal{I}} _X(e+n+1) \to 0 $$ The Chern classes of $E$ are: $c_1(E)=e+n+1,c_2(E)=d(X)=:d$.\par\noindent Let $\Sigma$ be an hypersurface of degree $s $ containing $X$. Then $\Sigma $ gives a section of $\ensuremath{\mathcal{I}} _X(s)$ which lifts to a section $\sigma _{\Sigma}\in H^0(E(-e-n-1+s))$ (notice that $\sigma_{\Sigma }$ is uniquely defined if $e+n+1-s<0$).
{\it Assume} that $Z$, the zero-locus of $\sigma_{\Sigma }$, has codimension two. Notice that since $X$ is not a complete intersection, this certainly holds if $s = min\{t\:|\:h^0\ensuremath{\mathcal{I}} _X(t)) \neq 0\}$. Anyway, if $Z$ has codimension two, then $d(Z)=c_2(E(-e-n-1+s))=d-s(e+n+1-s)$ and $\omega _Z \simeq \ensuremath{\mathcal{O}} _Z(-e-2n-2+2s)$. \end{notations}
\begin{remark} \label{snb} By \cite{R}, if $X\subset \Sigma \subset \mathbb{P}^n $, $n\geq 3$, with $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$ and $d(\Sigma)\leq n-2$ then $X$ is complete intersection, hence in the remainder of this paper we will assume $s\geq n-1$. \end{remark}
\begin{remark} \label{jac} Notice that $E(-e-n-1)\mid _X\simeq \ensuremath{\mathcal{N}} ^*_X$. It is well known that the scheme $X\cap Z$ is the base locus of the jacobian system of $\Sigma $ on $X$: $X\cap Z=X\cap Jac(\Sigma)$. So, the \textit{fundamental cycle} (\cite{F} 1.5) of $Z$ in $\mathcal{A}_*(X)$ is $c_2(\ensuremath{\mathcal{N}} ^*_X(s))$ as soon as $X$ and $Z$ intersect in the expected codimension. \end{remark}
The main goal of this section is to prove:
\begin{theorem}[Speciality theorem, weak version] \label{thmSpecW} Let $X \subset \mathbb{P}^n$, $n \geq 5$ be a smooth codimension two subvariety. If $n=5$ assume $Pic(X) \simeq \mathbb{Z} .H$. Let $\Sigma$ be an hypersurface of degree $s$ containing $X$. If $X$ is not a complete intersection, then: $$e \leq \frac{(s-1)[(s-1)^2-n+1]}{n-1}-n+1$$ where $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$. \end{theorem}
Let's see how this is related with a bound of the degree. First recall the following:
\begin{lemma} \label{l1} Let $X \subset \mathbb{P}^n$, $n \geq 4$, be a smooth codimension two subvariety which is not a complete intersection. Let $\Sigma$ be an hypersurface of minimal degree containing $X$. Set $s:=d(\Sigma )$. \begin{enumerate} \item $n-4 \leq dim(X \cap Sing(\Sigma )) \leq n-3$. \item If $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$, then $d(X) \leq s(n-1+e)+1$. \item If $dim(X \cap Sing(\Sigma ))=n-3$ and if $Pic(X) \simeq \mathbb{Z} .H$, then $d(X) \leq (s-2)(n-1+e)+1$. \end{enumerate} \end{lemma}
\begin{proof} The first item is \cite{EF}, Lemma 2.1; 2) is \cite{EF} Lemma 2.2 (i) and the last item is \cite{EF} Lemma 2.2 (ii) with $l=2$ (thanks to Severi and Zak theorems $h^1(\ensuremath{\mathcal{I}} _X(1))=0$, \cite{Z}). \end{proof}
Theorem \ref{thmSpecW} and the second item of this lemma give us immediately:
\begin{theorem} \label{thmA} Let $\Sigma \subset \mathbb{P}^n$, $n \geq 5$, be an integral hypersurface of degree $s$. Let $X \subset \Sigma$ be a smooth subvariety with $dim(X)=n-2$. If $n=5$ assume $Pic(X) \simeq \mathbb{Z} .H$. If $X$ is not a complete intersection, then $d(X) < \frac{s(s-1)[(s-1)^2-n+1]}{n-1}+1$. \end{theorem}
In order to prove Theorem \ref{thmSpecW} we need some preliminary results.
\begin{lemma} \label{Ysubcanonical} Let $\Sigma$ denote an hypersurface of degree $s$ containing $X$. With assumptions ($codim(\sigma _{\Sigma})_0=2$) and notations as in \ref{not2}, assume $dim(X\cap Z)=n-4$. Then $Y:=X\cap Z$ is a subcanonical, l.c.i. scheme with $\omega_Y\simeq\ensuremath{\mathcal{O}}_Y(2s-n-1)$. Moreover $Y$ is the base locus of the jacobian system of $\Sigma$ in $X$. \end{lemma}
\begin{proof} We are assuming that $Y$ is a proper intersection between $X$ and $Z$ hence $$ 0 \to \ensuremath{\mathcal{O}} \to E\mid_X(-e-n-1+s) \to \ensuremath{\mathcal{I}} _{Y, X}(-e-n-1+2s) \to 0 $$ so $\ensuremath{\mathcal{N}}^*_{Y,X}\simeq E\mid_X(-s)$ and the first statement follows by adjunction. For the last statement, use \ref{jac}. \end{proof}
\begin{notations} Keep the assumptions of Lemma \ref{Ysubcanonical} and denote by $\Sigma_1$ and $\Sigma_2$ two general partials of $\Sigma$. Since $dim(X\cap Z)=n-4$, $C:= X\cap \Sigma_1\cap \Sigma_2$ is a subcanonical, l.c.i. scheme containing $Y$ such that $\ensuremath{\mathcal{N}}_{C,X}\simeq \ensuremath{\mathcal{O}} _X(s-1) \oplus \ensuremath{\mathcal{O}}_X(s-1)$. We have $\omega_C\simeq \ensuremath{\mathcal{O}}_C(e+2s-2)$. The scheme $C$ is a complete intersection in $X$ which links $Y$ to another subscheme.\\ \end{notations}
\begin{lemma} \label{R} With notations as in Lemma \ref{Ysubcanonical}, denote by $R$ the residual to $Y$ with respect to $C$. Then $C=Y\cup R$ is a geometric linkage and $\Delta:= R\cap Y$ is a Cartier divisor of $Y$ such that: $\ensuremath{\mathcal{I}} _{\Delta , Y}\simeq \ensuremath{\mathcal{O}}_Y (-e-n+1)$.\\ Furthermore: $d(\Delta)\leq (s-1)d(X)((s-1)^2-d(Z))$ and:\\
$d(Z)(e+n+1) \leq (s-1)[(s-1)^2-d(Z)]$.
\end{lemma}
\begin{proof} Denote by $Y_{red}$ the support of $Y$ and set $Y_{red}=Y_1 \cup \dots \cup Y_r$ where $Y_i$, $1\leq i \leq r$, are the irreducible components of $Y_{red}$. Furthermore, denote by $P_i$ the general point of $Y_i$. Since $Y$ is l.c.i. in $X$ and since $\ensuremath{\mathcal{I}} _{Y, X}(s-1)$ is globally generated by the partials of $\Sigma$, we can find two general elements in $Jac(\Sigma)$ generating the fibers of $\ensuremath{\mathcal{N}}^* _{Y, X}(s-1)$ at each $P_i$, $1\leq i\leq r$. This implies that $R\cup Y$ is a geometric linkage. \par Now consider the local Noether sequence (exact sequence of liaison): $$ 0\to \ensuremath{\mathcal{I}}_C \to \ensuremath{\mathcal{I}} _R \to \omega_Y \otimes \omega_C^{-1}\to 0. $$ we get $$ \omega_Y \otimes \omega_C^{-1}\simeq\frac{\ensuremath{\mathcal{I}}_R}{\ensuremath{\mathcal{I}}_C}\simeq \frac{\ensuremath{\mathcal{I}}_R +\ensuremath{\mathcal{I}}_Y}{\ensuremath{\mathcal{I}}_C+\ensuremath{\mathcal{I}} _Y}\simeq \frac{\ensuremath{\mathcal{I}}_{\Delta}}{\ensuremath{\mathcal{I}}_Y}\simeq \ensuremath{\mathcal{I}} _{\Delta , Y}$$ (the second isomorphism follow by geometric linkage, since $\ensuremath{\mathcal{I}}_R\cap\ensuremath{\mathcal{I}}_Y =\ensuremath{\mathcal{I}}_C$) hence $\omega_Y \otimes \omega_C^{-1}\simeq \ensuremath{\mathcal{O}}_Y(-e-n+1)\simeq \ensuremath{\mathcal{I}} _{\Delta , Y}$ and we are done.\\ For the last statement, the scheme $\Delta \subset R $ is the base locus of the jacobian system of $\Sigma $ in $R$, hence $\Delta \subset \tilde{\Sigma}\cap R $ with $\tilde{\Sigma }$ a general element of $Jac(\Sigma)$ and $d(\Delta )\leq d(R)\cdot (s-1)$. We conclude since $d(R)\cdot (s-1)=(d(C)- d(Z))\cdot (s-1)= ((s-1)^2d(X)-d(Z)d(X))\cdot (s-1)$. The last inequality follows from $d(\Delta )=d(Y)\cdot (e+n+1)=d(X)\cdot d(Z) \cdot (e+n+1)$. \end{proof}
Now we can conclude the proof of Theorem \ref{thmSpecW} (and hence of Theorem \ref{thmA}).
\begin{proof}[Proof of Theorem \ref{thmSpecW}] It is enough to prove the theorem for $s$ minimal. Let $\Sigma$ be an hypersurface of minimal degree containing $X$, we set $s:=d(\Sigma )$ and $d:=d(X)$. According to Lemma \ref{l1} we distinguish two cases.\\ 1) $dim(X \cap Sing(\Sigma ))=n-3$. In this case, by Lemma \ref{l1}, we have $d \leq (s-2)(n-1+e)+1$. On the other hand $d(Z) = d-s(e+n+1-s)$ (see \ref{not2}). It follows that: $d(Z) \leq (s-1)^2 -2(n-1+e)$. Since $d(Z) \geq n-1$ by \cite{R}, we get: $\frac{(s-1)^2-n+1}{2}-n+1 \geq e$. One checks (using $s \geq n-1$) that this implies the bound of Theorem \ref{thmSpecW}.\\ 2) $dim(X \cap Sing(\Sigma ))=n-4$. By the last inequality of Lemma \ref{R}, $e \leq (s-1)[\frac{(s-1)^2}{d(Z)}-1]-n+1$. Since $d(Z) \geq n-1$ by \cite{R}, we get the result. \end{proof}
\section{The speciality theorem.}
In this section we will refine the proof of Theorem \ref{thmSpecW} for $n=5$ in order to prove Theorem \ref{mainthm} of the introduction. For this we have to assume only that $X$ is subcanonical, which, of course, is weaker than assuming $Pic(X) \simeq \mathbb{Z} .H$. The assumption $Pic(X) \simeq \mathbb{Z} . H$ is used just to apply the last statement of Lemma \ref{l1} in order to settle the case $dim(X \cap Sing(\Sigma ))=n-3$. Here instead we will argue like in the proof of the case $dim(X \cap Sing(\Sigma ))=n-4$, but working modulo the divisorial part (in $X$) of $X \cap Sing(\Sigma )$; this will introduce some technical complications, but conceptually, the proof runs as before. Since the proof works for every $n \geq 5$ we will state it in this generality giving thus an alternative proof of Theorem \ref{thmSpecW}.
\begin{notations} \label{not3} In this section, with assumptions and notations as in \ref{not2}, we will assume furthermore that $dim(X\cap Z)=n-3$ and will denote by $L$ the dimension $n-3$ part of $X\cap Z\subset X$; moreover we set $\ensuremath{\mathcal{L}} = \ensuremath{\mathcal{O}} _X(L) $. \par\noindent Set $Y':=res_L(X\cap Z)$, we have $\ensuremath{\mathcal{I}} _{Y', X}:=(\ensuremath{\mathcal{I}} _{X\cap Z , X}:\ensuremath{\mathcal{I}} _{L , X})$. Since we have: $$0 \to \ensuremath{\mathcal{O}} \to E\mid_X(-e-n-1+s)\otimes \ensuremath{\mathcal{L}} ^* \to \ensuremath{\mathcal{I}} _{Y' , X}(-e-n-1+2s)\otimes (\ensuremath{\mathcal{L}} ^*)^2 \to 0$$ it follows that $\ensuremath{\mathcal{N}}^*_{Y',X}\simeq E\mid_X(-s)\otimes \ensuremath{\mathcal{L}}$ and $Y'$ is a l.c.i. scheme with $\omega_{Y'}\simeq\ensuremath{\mathcal{O}}_Y(2s-n-1)\otimes (\ensuremath{\mathcal{L}}^*)^2$. \par \noindent Denote by $\Sigma_1$ and $\Sigma_2$ two general partials of $\Sigma$. Since $X \cap Z = X \cap Sing(\Sigma )$, $\Sigma_1$ and $\Sigma_2$ both contain $L$. Let $C':=res_L(X\cap \Sigma_1\cap \Sigma_2)$. Since $\ensuremath{\mathcal{N}}_{C',X}\simeq (\ensuremath{\mathcal{O}} _{C'}(s-1) \oplus \ensuremath{\mathcal{O}}_{C'}(s-1))\otimes \ensuremath{\mathcal{L}}^*$. We have $\omega_{C'}\simeq \ensuremath{\mathcal{O}}_{C'}(e+2s-2)\otimes (\ensuremath{\mathcal{L}}^*)^2$. \end{notations}
\begin{lemma} \label{R2} Denote by $R'$ the residual to $Y'$ with respect to $C'$. Then $C'=Y'\cup R'$ is a geometric linkage and $\Delta':= R'\cap Y'$ is a Cartier divisor of $Y'$ such that: $\ensuremath{\mathcal{I}} _{\Delta' , Y'}\simeq \ensuremath{\mathcal{O}}_{Y'}(-e-n+1)$. \end{lemma}
\begin{proof} We argue as in the proof of Lemma \ref{R}: denote by $Y_{red}'$ the support of $Y'$, set $Y_{red}'=Y_1' \cup \dots \cup Y_r'$, where $Y_i'$, $1\leq i \leq r$, are the irreducible components of $Y_{red}'$, and denote by $P_i$ the general point of $Y_i'$. Choose the partials $\Sigma_1$ and $\Sigma _2$ in such a way that they generate the ideal sheaf of $X\cap Z$ at each $P_i$, $1\leq i\leq r$. In order to check that $R'\cup Y'$ is a geometric linkage we only need to consider the components contained in $L$. Consider a point $P_i\in L$. Since $L\subset X\cap Z \subset \Sigma_1 \cap \Sigma _2$, the local equations of $X\cap Z$ in $(\ensuremath{\mathcal{I}} _{Y, X}(s-1))_{P_i}$ have the form $(lf,lg)$ where $l$ is the equation of $L$, $lf$ is the equation of $\Sigma_1$ and $lg$ the equation of $\Sigma_2$. Since $Y':=res_L(X\cap Z)$ and $C':=res_L(X\cap \Sigma_1\cap \Sigma_2)$ then the ideals of both $Y'$ and $C'$ at $P_i$ are equal to $(f,g)\subset (\ensuremath{\mathcal{I}} _{Y, X}(s-1))_{P_i}$. This implies that $R'\cup Y'$ is a geometric linkage and the remainder of the proof is similar as above. \end{proof}
\begin{lemma} \label{lemmaN-3} Let $\Sigma \subset \mathbb{P}^n$, $n\geq 5$, be an hypersurface of degree $s$ containing $X$, a smooth variety with $dim(X)=n-2$ and $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$. Assume $\sigma _{\Sigma}$ vanishes in codimension two and $dim(X \cap Sing(\Sigma ))=n-3$ (see \ref{not2}). Then $e < s-n$ or $d(Z)\cdot (e+n+1) \leq (s-1)[(s-1)^2-d(Z)]$. \end{lemma}
\begin{proof} We keep back the notations of \ref{not3}. Notice that the fundamental cycle of $Y'$ in $\textbf{A} _{n-4}(X)$ is $$c_2(E\mid_X(-e-n-1+s)\otimes\ensuremath{\mathcal{L}}^*)=d(Z)H^2 + (e+n+1-2s)H\cap L +L^2\:\:(+)$$ ($H$ represents the hyperplane class and $\cap $ denotes the \textit{cap product} in $\textbf{A}_*(X)$. By abuse of notations, for any $A\in \textbf{A} _{i}(X)\subset \textbf{A}_*(X)$ we denote by $d(A)\in \mathbb{Z}$ the \textit{degree} of $A$: $d(A):= d(A\cap H^i)$, $A\cap H^i\in A_0(\mathbb{P}^n )\simeq \mathbb{Z}$. \item For any closed subscheme $\Gamma \subset X$ we still denote by $\Gamma \in \textbf{A} _{*}(X)$ the \textit{fundamental cycle} of $\Gamma $ (\cite{F} 1.5).\\ We claim that: $$d(\Delta')\leq (s-1)d(X)((s-1)^2-d(Z))-[(s-1)(e+n-1)+(s-1)^2-d(Z)]d(H^2\cap L)+$$ $$+(e+n-1)d(H\cap L^2)\:\:(*) $$ Assume the claim for a while and let's show how to conclude the proof. Combining \ref{R2} with $(*)$ we get $$ d(\Delta')=d(Y')(e+n-1)\leq $$ $$ \leq (s-1)d(X)((s-1)^2-d(Z))-[(s-1)(e+n-1)+(s-1)^2-d(Z)]d(H^2\cap L)+$$ $$+(e+n-1)d(H\cap L^2) $$ and by $(+)$ above $$ d(\Delta')=(e+n-1)d(H\cap(d(Z)H^2 + (e+n+1-2s)H\cap L +L^2))\leq $$ $$ \leq (s-1)d(X)((s-1)^2-d(Z))-[(s-1)(e+n-1)+(s-1)^2-d(Z)]d(H^2\cap L)+$$ $$+(e+n-1)d(H\cap L^2). $$ If $e<s-n$ we are done, so we can assume $e+n\geq s$. We have $$d(X)d(Z)(e+n-1)\leq (s-1)d(X)((s-1)^2-d(Z))+$$ $$+[(e+n-1)(s-e-n)-(s-1)^2+d(Z)]d(L)$$
To conclude it is enough to check that $(e+n-1)(s-e-n)-(s-1)^2+d(Z)\leq 0$. Since $d(Z)=d-s(e+n+1-s)$ (see \ref{not2}) and since $d \leq s(n-1+e)+1$ by Lemma \ref{l1}, this follows from: $s(n-1+e)+1 \leq s(e+n+1-s)+(s-1)^2+(e+n-s)(e+n-1)$. A short computation shows that this is equivalent to $0 \leq (e+n-s)(e+n-1)$, which holds thanks to our assumption $e+n\geq s$.\\ {\it Proof of the claim:}\\ Denote by $\mid M \mid $ the moving part of the Jacobian system of $\Sigma $ in $X$ and by $\ensuremath{\mathcal{M}} $ the corresponding line bundle. The scheme $\Delta '$ is the base locus of $\mid M \mid_{R'}$ hence $\Delta '\subset \tilde{M}\cap R'$ where $\tilde{M}$ is a general element of $\mid M \mid $. We have $$ d(\Delta ')\leq d(\tilde{M}\cap R')=d(c_1(\ensuremath{\mathcal{M}} _{R'})). $$ \par In order to prove the statement we need to calculate the cycle $c_1(\ensuremath{\mathcal{M}} _{R'})\in \textbf{A} _{n-5}(X)$. First of all we calculate the fundamental cycle of $R'$ in $\textbf{A} _{n-4}(X)$: $$R'\sim C'-Y'\sim ((s-1)H-L)^2-(d(Z)H^2 + (e+n+1-2s)H\cap L +L^2)= $$ $$ =((s-1)^2-d(Z))H^2-(e+n-1)H\cap L.$$ Finally, the cycle $c_1(\ensuremath{\mathcal{M}} _{R'})\in \textbf{A} _{n-5}(X)$ is: $$c_1(\ensuremath{\mathcal{M}} _{R'})\sim ((s-1)H-L)\cap R'\sim $$ $$ \sim (s-1)((s-1)^2-d(Z))H^3-((s-1)(e+n-1)+(s-1)^2-d(Z))H^2\cap L+(e+n-1)H\cap L^2.$$ The claim follows from: $$ d(\Delta')\leq d(c_1(\ensuremath{\mathcal{M}} _{R'}))= $$ $$ d((s-1)((s-1)^2-d(Z))H^3-((s-1)(e+n-1)+(s-1)^2-d(Z))H^2\cap L+(e+n-1)H\cap L^2) $$ \end{proof}
Now we can state the improved version of Theorem \ref{thmSpecW}:
\begin{theorem}[Speciality theorem] \label{thmSpec} Let $X \subset \mathbb{P}^n$, $n \geq 5$, be a smooth variety with $dim(X)=n-2$ and $\omega _X \simeq \ensuremath{\mathcal{O}} _X(e)$. Let $\Sigma \subset \mathbb{P}^n$ denote an hypersurface of degree $s$ containing $X$. If $X$ is not a complete intersection, then: $$e \leq \frac{(s-1)[(s-1)^2-n+1]}{n-1}-n+1.$$ \end{theorem}
\begin{proof} It is sufficient to prove the theorem for $s$ minimal. We distinguish two cases (see Lemma \ref{l1}).\\ If $dim(X \cap Sing(\Sigma ))=n-4$, then we argue exactly as in the proof of Theorem \ref{thmSpecW}.\\ If $dim(X \cap Sing(\Sigma ))=n-3$, then by Lemma \ref{lemmaN-3} we have $e < s-n$ or $d(Z)\cdot (e+n+1) \leq (s-1)[(s-1)^2-d(Z)]$. In the first case we conclude using $s \geq n-1$ (Remark \ref{snb}) and, in the second case, we conclude using the fact that $d(Z) \geq n-1$ by \cite{R}. \end{proof}
\begin{proof}[Proof of Theorem \ref{mainthm}] As explained in the Section 1, it follows from Theorem \ref{thmSpec} and Lemma \ref{l1}. \end{proof}
\section{Proofs of \ref{sigma} and of \ref{sigmahilb}.}
\begin{proof}[Proof of Theorem \ref{sigma}] If $X$ is not a complete intersection, this follows from Theorem \ref{mainthm}. Assume $X$ is a complete intersection. Let $F$ and $G$ ($d(F)=f,d(G)=g$) be two generators of the ideal of $X$. Then the equation of $\Sigma$ has the form $PF+QG$. But since $\Sigma$ is irreducible and since $X \cap Sing(\Sigma )\neq \emptyset$, then both $P$ and $Q$ have degree $>0$. This implies $s-1 \geq f$ and $s-1 \geq g$ hence $d=fg \leq (s-1)^2 < s\frac{(s-1)((s-1)^2-n+1}{n-1}+1$. \end{proof}
\begin{proof}[Proof of Corollary \ref{sigmahilb}] The argument goes as in the proof of \cite{CDG} Lemma 4.3: by \cite{K} the coefficients of the Hilbert polynomial of $X$ can be bounded in terms of the degree $d$ hence in terms of $s$, by \ref{sigma}, and there are finitely many components of $\mathcal{H}ilb(\Sigma)$ containing smooth varieties of dimension $n-2$. \end{proof}
\section{Proof of \ref{n=s=5} and \ref{n=s=6}}
\begin{notations} By \cite{EF}, we may assume that $X$ lies on an irreducible hypersurface $\Sigma$ of degree $n$, $5 \leq n \leq 6$ and that $h^0(\ensuremath{\mathcal{I}} _X(n-1))=0$. The assumption of \ref{not2} is satisfied and by Lemma \ref{R} and Lemma \ref{lemmaN-3}, we get: $e <s-n$ or $d(Z)\cdot (e+n-1) \leq (s-1)[(s-1)^2-d(Z)]$. The first case cannot occur in our situation since we may assume $e \geq 3$ if $n=5$ by \cite{BC} (resp. $e \geq 8$ if $n=6$ by \cite{HS} Cor. 6.2). So we may assume $d(Z)\cdot (e+n+1) \leq (s-1)[(s-1)^2-d(Z)]\: (*)$. Now if $e \geq E$, from $(*)$ we get: $d(Z) \leq \frac{(s-1)^3}{E+n+s}\:(+)$. \end{notations}
\begin{proof}[Proof of Theorem \ref{n=s=5}] Applying $(+)$ with $n=s=5$ and $E=3$ we get $d(Z) \leq 4$, hence $d(Z)=4$ (\cite{R}). Arguing as in \cite{EF} Lemma 2.6, every irreducible component of $Z_{red}$ appears with multiplicity, so $Z$ is either a multiplicity four structure on a linear space or a double structure on a quadric. In both cases it is a complete intersection: in the first case this follows from \cite{Mano} and in the second one, from the fact that $Z$ is given by the Ferrand construction since $emdim(Z_{red})\leq 4$. \end{proof}
\begin{proof}[Proof of Theorem \ref{n=s=6}] Applying $(+)$ with $n=s=6$ and $E=8$, we get $d(Z) \leq 6$. If $d(Z)=6$, $(*)$ implies $e \leq 8$. So $e=8$ and $6=d(Z)=d-6e-6$. It follows that $d=60$ and we conclude with \cite{EF} Theorem 1.1. So $d(Z) \leq 5$, hence (\cite{R}), $d(Z)=5$. Now $(*)$ yields $e \leq 13$. Moreover $5=d(Z)=d-6e-6$ yields $d=6e+11$. If $e \leq 10$, again, we conclude with Theorem 1.1 of \cite{EF}. We are left with the following possibilities: $(d,e)=(77,11),(83,12),(89,13)$. We conclude with \cite{HS} (list on page 216). \end{proof}
\end{document}
|
arXiv
|
{
"id": "0406497.tex",
"language_detection_score": 0.6544402241706848,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Weighted Branching Simulation Distance for Parametric Weighted Kripke Structures} \begin{abstract} This paper concerns branching simulation for weighted Kripke structures with parametric weights. Concretely, we consider a weighted extension of branching simulation where a single transition can be matched by a sequence of transitions while preserving the branching behavior. We relax this notion to allow for a small degree of deviation in the matching of weights, inducing a directed distance on states. The distance between two states can be used directly to relate properties of the states within a sub-fragment of weighted CTL. The problem of relating systems thus changes to minimizing the distance which, in the general parametric case, corresponds to finding suitable parameter valuations such that one system can approximately simulate another. Although the distance considers a potentially infinite set of transition sequences we demonstrate that there exists an upper bound on the length of relevant sequences, thereby establishing the computability of the distance. \end{abstract} \section{Introduction} In recent years within the area of embedded and distributed systems, a significant effort has been made to develop various formalisms for modeling and specification that address non-functional properties. Examples include extensions of classical Timed Automata \cite{DBLP:conf/icalp/AlurD90} with cost and resource consumption/production in Priced Timed Automata \cite{DBLP:conf/hybrid/BehrmannFHLPRV01} and Energy Automata \cite{DBLP:conf/formats/BouyerFLMS08}. For quantitative analysis of these systems, a generalization of bisimulation equivalence by Milner \cite{DBLP:books/daglib/0067019} and Park \cite{Park1981} as behavioral distances \cite{DBLP:journals/jlp/ThraneFL10,DBLP:journals/tcs/LarsenFT11,DBLP:journals/tse/AlfaroFS09} between system, has been studied.
In parallel, \emph{parametric} extensions of various formalism have been intensively studied. Instead of requiring exact specification of e.g probabilities, cost or timing constraints, these formalisms allow for the use of \emph{parameters} representing unknown or unspecified values. This can be used to encode multiple configurations of the same system as a system being parametric in the configurable quantities. The problem is then to find ``good'' parameter values such that the instantiated system (configuration) performs as expected. For real-time systems, Parametric Timed Automata \cite{DBLP:conf/stoc/AlurHV93,DBLP:journals/ijfcs/AndreCFE09} and Parametric Stateful Timed CSP \cite{DBLP:journals/rts/Andre00D14} have been developed. Parametric probabilistic models \cite{DBLP:conf/cav/HahnHWZ10, DBLP:conf/nfm/HahnHZ11} have also been developed as well as parametric analysis for weighted Kripke structures \cite{christoffersen_et_al:OASIcs:2015:5611, DBLP:conf/lics/EmersonT99,DBLP:journals/fuin/KnapikP14}. \cite{DBLP:conf/lics/EmersonT99} provides an efficient model-checking algorithm for a parametric extension of real-time CTL on timed Kripke structures. \cite{DBLP:journals/fuin/KnapikP14} extends \cite{DBLP:conf/lics/EmersonT99} to full parameter synthesis by demonstrating that model-checking a finite subset of the entire set of parameter values is sufficient.
In this paper we revisit (parametric) weighted Kripke structures with the purpose of lifting the behavioral distance defined in \cite{WCTL_logic} to the parametric setting, demonstrate its fixed point characterization and prove computability of the distance between any two systems. The distance is a generalization of a weighted extension of branching simulation \cite{branching_bisim}. Consider the following two processes $s,t$ both ending in the inactive process 0: \[ s \to_5 0 \text{ and } t \to_3 t_1 \to_2 0 \] If $s,t,t_1$ satisfy the same atomic proposition, $t_1$ may be deemed unobservable and $t$ may simulate $s$ as they both evolve into the process 0 with the same overall weight. \cite{WCTL_logic} captures this situation in generality by extending branching simulation with weights. Consider a similar scenario, where the process $t$ is now parametrized by the parameter $p$: \[ s \to_5 0 \text{ and } t \to_p t_1 \to_2 0 \] If $p \neq 3$ we know that $t$ can no longer simulate $s$. However, it should be intuitive that $p = 6$ is somehow worse than $p = 2$ as the latter is closer to 3. Thus, instead of considering pre-orders and Boolean answers we develop a parametric distance between states such that as the value of $p$ approaches $3$, the distance between $s$ and $t$ decreases towards 0. The distance will also give us a direct relation between the properties satisfied by $s$ and $t$ and a distance of 0 implies that any formula satisfied by $s$ is satisfied by $t$. In this way one can reason about how ``close'' a given implementation is to the specification and compare different configurations that are not necessarily able to fully simulate $s$.
The structure of this paper is as follows: in \autoref{sec:prelim} we introduce preliminaries and recall results from \cite{WCTL_logic}, \autoref{sec:WKS_sim_dist} concerns the fixed point characterization of the distance for weighted systems, \autoref{sec:PWKS_dist} lifts the distance to the parametric setting and finally \autoref{sec:conc_future} concludes the paper and describes future work. \section{Preliminaries}\label{sec:prelim} A weighted Kripke Structure (WKS) extends the classical Kripke structure by associating to each transition a non-negative rational transition weight. \begin{definition}[Weighted Kripke Structure] A weighted Kripke Structure is a tuple $\mcal{K}=(S,AP,\mcal{L},\to)$ where $S$ is a finite set of states, $AP$ is a set of atomic propositions, $\mcal{L}: S \to \mathcal{P}(AP)$ is a labelling function, associating to each state a set of atomic propositions and $\to \subseteq S \times \Q_{\geq 0} \times S$ is the finite transition relation. \end{definition} A transition from $s$ to $s'$ with weight $w$ will be denoted by $s \to_w s'$ instead of $(s,w,s') \in \to$. \begin{example} \autoref{fig:WKSex} depicts the WKS $\mcal{K}=(S,AP,\mcal{L},\to)$ where $S = \{s,s_1,s_2,s_3,s_4,t,t_1,t_2\}$, $AP = \{a,b\}$, $\mcal{L}(s) = \mcal{L}(s_1) = \mcal{L}(s_2) = \mcal{L}(t) = \mcal{L}(t_2) = \{a\}$, $\mcal{L}(s_3) = \mcal{L}(s_4) = \mcal{L}(t_1) = \{b\}$ and\\ $\to = \{(s,1,s_1),(s,2,s_2),(s_1,2,s_2),(s_1,1,s_3),(s_1,3,s_4),(s_2,5,s_4),(t,2,t_1),(t,1,t_2),(t_2,2,t_2),(t_2,1,t_1)\}$. \begin{figure}
\caption{WKS $\mathcal{K}$ where $s \not\leq t$ and $t \not\leq s$ but $s \simeps[0.5] t$.}
\label{fig:WKSex}
\end{figure} \end{example} To reason about behavior of WKSs, we introduce a weighted variant of the classical notion of branching simulation \cite{branching_bisim}. The basic idea is to let a transition $s \to_5 s'$ be matched by a sequence of transitions $t \to_{2} t_1 \to_{2} t_2 \to_{1} t_3$, if $t_3$ can simulate $s'$, as the accumulated weight equals 5. In addition, each intermediate state passed through in the matching transition sequence must be able to simulate $s$. In this way the branching structure of systems is preserved. Instead of always requiring exact weight matching we allow small relative deviations. These small deviations will in \autoref{sec:WKS_sim_dist} induce a directed distance between WKS states. \begin{definition}[Weighted Branching $\varepsilon$-Simulation \cite{WCTL_logic}] Given a WKS $\mcal{K}=(S,AP,\mcal{L},\to)$ and an $\varepsilon \in \R_{\geq 0}$, a binary relation $R^{\varepsilon} \subseteq S \times S$ is a weighted branching $\varepsilon$-simulation relation if whenever $(s,t) \in R^{\varepsilon}$: \begin{itemize}
\item $\mcal{L}(s) = \mcal{L}(t)$
\item for all $s \to_w s'$ there exists $t \to_{v_1} t_1 \to_{v_2} \cdots \to_{v_k} t_k$ such that $\sum_{i=1}^k v_i \in [w(1-\varepsilon), w(1+\varepsilon)], (s',t_k) \in R^{\varepsilon}$ and $\forall i<k. (s,t_i) \in R^{\varepsilon}$. \end{itemize} \end{definition} If there exists a weighted branching $\varepsilon$-simulation relating $s$ to $t$ we write $s \simeps t$. If $\varepsilon = 0$ we write $s \leq t$ instead of $s \simeps[0] t$. Note that in this case $\sum_{i=1}^k v_i = w$.
\begin{example}\label{ex:sim} Consider again \autoref{fig:WKSex} and the pair $(s,t)$. It is clear that $t \not\leq s$ because of the loop $t_2 \to_2 t_2$. We can also observe that $s \not\leq t$ as the transition $s \to_2 s_2$ can only be matched by $t \to_2 t_1$ but $s_2 \not\leq t_1$ as $\mcal{L}(s_2) \neq \mcal{L}(t_1)$. If we relax the matching requirements by 50\%, we get that $s$ can be simulated by $t$ i.e $s \simeps[0.5] t$; $s \to_2 s_2$ can be matched by $t \to_1 t_2$ as $[2(1-0.5),2(1+0.5)] = [1,3]$ and $1 \in [1,3]$ (another legal match would be $t \to_1 t_2 \to_2 t_2$). Now, $s_2 \to_5 s_4$ can be matched exactly by $t_2 \to_2 t_2 \to_2 t_2 \to_1 t_1$. It follows that $\varepsilon \geq 0.5 \iff s \simeps[\varepsilon] t$. \end{example} If we restrict weighted CTL to only encompass the existential quantifier and remove the next-operator and we know that $s \simeps t$, then for any property $\phi$ of $s$, there exists a related property $\phi^{\varepsilon}$ of $t$.
\begin{definition}[Existential Fragment of Weighted CTL without next] The syntax of $EWCTL_{-X}$ is given by the following abstract syntax: \[ \phi ::= a \mid \neg a \mid \phi_1 \land \phi_2 \mid \phi_1 \lor \phi_2 \mid E(\phi_1U_I\phi_2), \] where $a \in AP$, $I =[l,u]$ and $l,u \in \Q_{\geq 0}$ such that $l \leq u$. For a WKS $\mcal{K}=(S,AP,\mcal{L},\to)$ and an arbitrary state $s \in S$, the semantics of $EWCTCL_{-X}$ formulae is given by a satisfiability relation, inductively defined on the structure of formulae in $EWCTL_{-X}$. For existential until; $\mathcal{K},s \models E(\phi_1 U_I \phi_2) \iff$ there exists a sequence $s \to_{w_1} s_1 \to_{w_2} \cdots \to_{w_k} s_k \to_{w_{k+1}} \ldots$ where $s_k \models \phi_2, \forall i < k. s_i \models \phi_1$ and $\sum_{i=1}^k w_i \in I$. Let the \emph{$\varepsilon$-expansion} of a formula $\phi = E(\phi_1U_{[l,u]}\phi_2)$ be given by $\phi^{\varepsilon} = E(\phi_1^{\varepsilon}U_{[l(1-\varepsilon),u(1+\varepsilon)]}\phi_2^{\varepsilon})$ where $\phi_1^{\varepsilon}$ and $\phi_2^{\varepsilon}$ are defined inductively by relaxing any interval by $\varepsilon$ percent in both directions (just as for $[l,u]$). \end{definition} \begin{theorem}\label{thm:sim_logic_relation} \cite{WCTL_logic} Let $\mcal{K}=(S,AP,\mcal{L},\to)$ be a WKS. Then for all $s,t \in S, \varepsilon \in \R_{\geq 0}$: \[ s \simeps t \quad \text{iff} \quad \forall \varepsilon' \in \Q_{\geq 0}, \varepsilon \leq \varepsilon'. [\forall \phi \in EWCTL_{-X}. s \models \phi \implies t \models \phi^{\varepsilon'}]. \] \end{theorem} \section{Weighted Branching Simulation Distance for WKSs}\label{sec:WKS_sim_dist} We now define a directed distance between WKS states as a least fixed point to a set of equations. The distance from $s$ to $t$, $d(s,t)$, represents the minimal $\varepsilon$ such that $s \simeps t$. Thus, if $d(s,t) = 0$ then $s \leq t$. As the distance is based upon weighted branching $\varepsilon$-similarity and its relative deviation in weight matching, it will not satisfy the triangle inequality and is therefore not a hemi-metric.
The distance definition follows intuitively weighted branching $\varepsilon$-simulation. If $s \simeps t$ then no matter what transition $s$ chooses, $t$ has a matching transition sequence with a relative difference of at most $\varepsilon$. In order words, for a given transition $s \to_w s'$, the goal of $t$ is to find a matching sequence $t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n$ that \emph{minimizes} the relative difference $\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|$ as well as ensuring that any intermediate state $t_i$ has as small a distance to $s$ as possible. The strategy of $s$ is then to find a \emph{maximal} move, given the minimization strategy of $t$. In the remainder of this section we assume a fixed WKS $\mcal{K}=(S,AP,\mcal{L},\to)$. \begin{definition}[Weighted Branching Simulation Distance] For an arbitrary pair of states $s,t \in S$ we define the weighted branching simulation distance from $s$ to $t$, $d(s,t)$, as the least fixed point ($\minfix$) of the following set of equations: \[
d(s,t) \minfix \left\{\begin{array}{ll}
\infty & \text{ if } \mcal{L}(s) \neq \mcal{L}(t)\\
\max_{s \rightarrow_w s'} \left\{\min_{t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,d(s',t_n),\\
\max \{d(s,t_i) | \,i < n\}
\end{array}
\right\}
\right\}\right\} &\text{ o.w}
\end{array}\right. \] \end{definition}
We assume the empty transition sequence to have accumulated weight 0 and let $\R_{\geq 0} = \{w\,|\, w \in \mathbb{R}, w \geq 0\} \cup \{\infty\}$ denote the extended set of non-negative reals. For any $d_1,d_2 \in \Rpos^{S \times S}$ let $d_1 \leq d_2$ iff $\forall (s,t) \in S \times S. d_1(s,t) \leq d_2(s,t)$. Then $(\Rpos^{S \times S}, \leq)$ constitutes a complete lattice. We now define a monotone function on $(\Rpos^{S \times S},\leq)$ that iteratively refines the distance: \begin{definition}\label{def:f} Let $\mcal{F} : \Rpos^{S \times S} \to \Rpos^{S \times S}$ be defined for any $d \in \Rpos^{S \times S}$: \[
\mcal{F}(d)(s,t) = \left\{\begin{array}{ll}
\infty & \text{ if } \mcal{L}(s) \neq \mcal{L}(t)\\
\max_{s \rightarrow_w s'} \left\{\min_{t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,d(s',t_n),\\
\max \{d(s,t_i) | \,i < n\}
\end{array}
\right\}
\right\}\right\} & \text{ o.w}
\end{array}\right. \] \end{definition} By Tarski's fixed point theorem \cite{tarski} we are guaranteed the existence of a least (pre-)fixed point. Thus, the weighted branching simulation distance is well-defined. Note that any transition $s \to_w s'$, $t$ may have an infinite set of possible transition sequence matches in the presence of cycles in the system. To this end we demonstrate an upper bound, $N$, on the length of relevant matching sequences. As the set of sequences of length at most $N$ is finite (the WKS is finite) computability of the distance follows. The first step is proving that any sequence exercising a loop with accumulated weight 0 can be ignored. We refer to these cycles as \emph{0-cycles}.
\begin{lemma}\label{lem:zerocycle} For a given move $s \to_w s'$, any transition sequence $t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n$ with a 0-cycle can be removed without affecting the distance $d(s,t)$. \end{lemma} \begin{proof}
A transition sequence with one or more 0-cycles has the exact same accumulating weight as the corresponding sequence with no 0-cycles. Furthermore, exercising the loop (once) can only introduce new states, leading to a potentially larger value of $\max \{d(s,t_i) | \,i < n\}$. Thus, 0-cycles can be ignored. \end{proof} Given that 0-cycles can be removed, we now prove an upper bound $N$ on the length of sequences that affect the distance $d(s,t)$. Thus, any sequence longer than $N$ can be safely ignored. \begin{lemma}\label{lem:finsequence} Given that $\mathcal{K}$ has no 0-cycles, it is the case that whenever $s \to_w s'$: \begin{align*} \exists N. &\forall \pi = t\rightarrow_{v_1}t_1 \ldots \rightarrow_{v_n} t_n, n \geq N.\\ &\exists \pi^* = t\rightarrow_{u_1} t_1' \ldots \rightarrow_{u_m} t_m', m \leq N.\\
&t_n = t_m' \,\land\, \left|\frac{\sum_{i=1}^m u_i}{w}-1\right| \leq \left|\frac{\sum_{i=1}^n v_i}{w}-1\right| \land\\ & \{t_1', \ldots, t_{m-1}'\} \subseteq \{t_1,\ldots,t_{n-1}\} \end{align*} \end{lemma} \begin{proof}
Let $w_{\min} = \min\{w\,|\,s \to_w s'\}$ be the minimum weight in the WKS and let
$s_{w_{\max}} = \max\{w\,|\,s \to_w s'\}$ be the maximum weight out of $s$. We now demonstrate that $N \geq \frac{2 \cdot {s_{w_{\max}}}}{w_{\min}} \cdot |S|$ is sufficient. Any sequence of length $|S|$ must have a loop which, by assumption, cannot have accumulated weight 0. Thus, after $|S|$ transitions, the accumulated weight must be at least $w_{\min}$. Without loss of generality, assume that it is \emph{exactly} $w_{\min}$. If the sequence exercises the loop a number of time, the accumulated weight will at some point reach $2 \cdot s_{w_{\max}}$. Let this sequence be $\pi = t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_k} t_k$ and let $x$ denote the number of times the loop is exercised i.e $x \cdot w_{\min} \geq 2 \cdot s_{w_{\max}}$. Consider now the corresponding sequence $\pi^* = t\rightarrow_{u_1} t_1' \cdots \rightarrow_{u_l} t_l'$ where the loop is removed. As $\sum_{i=1}^k v_i \geq 2 \cdot s_{w_{\max}}$ it follows that $\left|\frac{\sum_{i=1}^k v_i}{s_{w_{\max}}}-1\right| > 1$. By assumption, removing the loop results in a strictly lower accumulated weight implying $\left|\frac{\sum_{i=1}^l u_i}{s_{w_{\max}}}-1\right| < \left|\frac{\sum_{i=1}^k v_i}{s_{w_{\max}}}-1\right|$. We also directly have $t_k = t_l'$ and $\{t_1,\ldots,t_l'\} \subseteq \{t_1,\ldots,t_k\}$. We will now derive $N$ from the inequality $x \cdot w_{\min} \geq 2 \cdot s_{w_{\max}}$. The number of times the loops is exercised must be equal to the length of the entire sequence divided by $|S|$ as we are sure to exercise the loop every $|S|$ states. Thus, $x = \frac{N}{|S|} \implies \frac{N}{|S|} \cdot w_{\min} \geq 2 \cdot s_{w_{\max}}$ and finally, \[
N \geq \frac{2 \cdot s_{w_{\max}}}{w_{\min}} \cdot |S|. \] \end{proof}
\begin{theorem}[Computability]\label{thm:wdistcomputable} For two states $s,t \in S$, the weighted branching simulation distance is computable. \end{theorem} \begin{proof} \autoref{lem:finsequence} provides an upper bound on the length of transition sequence that we need to consider in the computation of $d(s,t)$ for any states $s,t \in S$ under the assumption that there are no 0-cycles. By \autoref{lem:zerocycle} we know that any 0-cycles can be removed without affecting the distance. Thus when computing the distance we know for the sub-expression \[ \min_{t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,d(s',t_n),\\
\max \{d(s,t_i) | \,i < n\}
\end{array}
\right\}
\right\} \]
that $n \leq \frac{2 \cdot s_{w_{\max}}}{w_{\min}} \cdot |S|$. As the WKS has a finite number of states and a finite transition relation, only a finite number of sequences of finite length exist. Thus we can modify the distance function to only consider these without affecting the computed distance. Thus, the distance must at some point converge as only a finite number of relative distances on the form $\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|$ exists. \end{proof} We leave the exact complexity of computing $d(s,t)$ open but note that deciding $d(s,t) = 0$ is NP-complete \cite{WCTL_logic}. \begin{example}
Consider again \autoref{fig:WKSex} and the computation of $d(s,t)$. For the transition $s \to_1 s_1$ only one sequence is considered instead of the entire infinite set arising from the loop; $t \to_1 t_2$. As $\left|\frac{3}{1}-1\right| > \left|\frac{1}{1}-1\right|$, even the sequence that only exercises the loop once is worse than just transitioning to $t_2$ directly. This happens because the accumulated matching weight exceeds the weight being matched and the same states are involved in both sequences. Therefore any sequence involving the loop can be ignored. Note that we in this example consider fewer sequences than implied by the upper bound given in \autoref{lem:finsequence}. For $s \to_1 s_1$ the bound would be $\frac{2 \cdot 2}{2} \cdot 8 = 16$ but it should be clear that the loop can be safely ignored. For the transition $s \to_2 s_2$, there are two relevant matching sequences; $t \to_1 t_2$ and $t \to_1 t_2 \to_2 t_2$. Thus, \[ d(s,t) \minfix \max\left\{
\begin{array}{l}
\max\left\{\left|\frac{1}{1}-1\right|,d(s_1,t_2)\right\},\\[0.1cm]
\min\left\{\begin{array}{l}
\max\left\{\left|\frac{1}{2}-1\right|,d(s_2,t_2)\right\},\\[0.1cm]
\max\left\{\left|\frac{3}{2}-1\right|,d(s_2,t_2),d(s,t_2)\right\}
\end{array}\right\}
\end{array}\right\} \] It is easily shown that $d(s_2,t_2) = 0$ as $s_2 \to_5 s_4$ can be matched exactly by $t_2 \to_2 t_2 \to_2 t_2 \to_1 t_1$. Thus, \[ d(s,t) \minfix \max\left\{\frac{1}{2}, d(s_1,t_2),d(s,t_2)\right\} \] where \begin{align*} &d(s_1, t_2) \minfix \max\left\{\!\!\!\!\!\begin{array}{ll}
\max\left\{\left|\frac{2}{2}-1\right|,d(s_2,t_2)\right\},\\[0.1cm]
\max\left\{\left|\frac{1}{1}-1\right|,d(s_3,t_1)\right\},\\[0.1cm]
\max\left\{\left|\frac{3}{3}-1\right|,d(s_1,t_2),d(s_4,t_1)\right\}
\end{array}\!\!\!\!\!\!\right\} \,\text{ and}\! &d(s,t_2) \minfix \max\left\{\!\!\!\!\begin{array}{ll}
\max\left\{\left|\frac{2}{1}-1\right|, d(s_2,t_2)\right\},\\[0.1cm]
\max\left\{\left|\frac{2}{2}-1\right|, d(s_2,t_2)\right\}
\end{array}\!\!\!\!\!\right\}. \end{align*} As $s_4 \not\to$, $s_3 \not\to$ and $t_1 \not\to$ it follows that $d(s_4,t_1) = d(s_3,t_1) = 0$, hence \[ d(s_1,t_2) \minfix \max\left\{\frac{1}{2},d(s_1,t_2)\right\}. \] The least solution to this equation is $\frac{1}{2}$ hence $d(s_1,t_2) = d(s,t) = \frac{1}{2}$. From \autoref{ex:sim} we know that $s \simeps t$ for any $\varepsilon \geq 0.5$ i.e for any $\varepsilon \geq d(s,t)$. \end{example} Now that we have established the computability of the distance we prove its relation to weighted branching $\varepsilon$-simulation. \begin{theorem}\label{lem:sim_dist_relation} For two states $s,t \in S$ and $\varepsilon \in \R_{\geq 0}$: \[ d(s,t) \leq \varepsilon \text{ iff } s \simeps t \] \end{theorem} \begin{proof}
$(\implies)$ For this direction we prove that $R^{\varepsilon} = \{(s,t) \,|\, s,t \in S, d(s,t) \leq \varepsilon\}$ is a weighted branching $\varepsilon$-simulation relation. Suppose $(s,t) \in R^{\varepsilon}$. Then $d(s,t) \leq \varepsilon$ and by the fixed point property of $d$, \[ d(s,t) = \max_{s \rightarrow_w s'} \left\{\min_{t\rightarrow_{v_1}t_0 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,\\
\max \{d(s',t_n)\} \cup \{d(s,t_i) | i < n\}
\end{array}
\right\}
\right\}\right\} \]
We immediately have that for any transition $s \to_w s'$ there exists a matching transitions sequence $t\rightarrow_{v_1}t_0 \cdots \rightarrow_{v_n} t_n$ such that $\left|\frac{\sum_{i=1}^n v_i}{w}-1\right| \leq \varepsilon$, $d(s',t_n) \leq \varepsilon$ and $\forall i < n. d(s,t_i) \leq \varepsilon$. Thus, by definition of $R^{\varepsilon}$, for any transition $s \to_w s'$ there exists a sufficient matching sequence from $t$ such that $(s',t_n) \in R^{\varepsilon}$ and $(s,t_i) \in R^{\varepsilon}$ for any $i < n$.
$(\impliedby)$ Let \[ d^*(s,t) = \left\{\begin{array}{ll}
\varepsilon &\text{ if } s \simeps t\\
\infty &\text{ otherwise}
\end{array}\right. \] We now prove that $d$ is a pre-fixed point of $\mcal{F}$ i.e $\mcal{F}(d^*)(s,t) \leq d^*(s,t)$ for any pair $(s,t) \in S$. If $s \not\simeps t$ then $d^*(s,t) = \infty$ and there is nothing to prove. If $s \simeps t$ then for any transition $s \to_w s'$ there exists a matching sequence $t\rightarrow_{v_1}t_0 \cdots \rightarrow_{v_n} t_n$ such that $\sum_{i=1}^n v_i \in [w(1-\varepsilon), w(1+\varepsilon)]$, $s' \simeps t_n$ and $s \simeps t_i$ for any $i < n$. We can now argue that \[ \max_{s \rightarrow_w s'} \left\{\min_{t\rightarrow_{v_1}t_0 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,\\
\max \{d^*(s',t_n)\} \cup \{d^*(s,t_i) | i < n\}
\end{array}
\right\}
\right\}\right\} \leq \varepsilon \]
as $\sum_{i=1}^n v_i \in [w(1-\varepsilon), w(1+\varepsilon)]$ is equivalent to $\left|\frac{\sum_{i=1}^n v_i}{w}-1\right| \leq \varepsilon$, $s' \simeps t_n$ implies $d^*(s',t_n) = \varepsilon$ and similarly $d^*(s,t_i) = \varepsilon$ for any $i < n$. As $d^*$ is a pre-fixed point of $\mcal{F}$ and $d^*(s,t) = \varepsilon$ it must be the case that $d(s,t) \leq \varepsilon$ as $d$ is the \emph{smallest} pre-fixed point of $\mcal{F}$. \end{proof} Combining \autoref{thm:sim_logic_relation} and \autoref{lem:sim_dist_relation} we immediate get a relation between the distance from one state $s$ to another state $t$ and their $EWCTL_{-X}$ properties: \[ d(s,t) \leq \varepsilon \quad \text{iff} \quad \forall \varepsilon' \in \Q_{\geq 0}, \varepsilon \leq \varepsilon'. [\forall \phi \in EWCTL_{-X}. s \models \phi \implies t \models \phi^{\varepsilon'}. \] \section{Weighted Branching Simulation Distances for Parametric WKSs}\label{sec:PWKS_dist} We now extend WKS with parametric weights. The lifted parametric distance will be from a WKS to a parametric system and is represented as a parametric expression that can be evaluated to a rational by a \emph{parameter valuation}. If one abstracts multiple configurations of the same system as one parametric system and calculate the parametric distance, evaluating the distance with respect to a parameter valuation then corresponds to calculating the exact distance from a specific configuration (given by the valuation) to the WKS. Thus, instead of working with multiple WKS configurations, one can use a parametric system and compute the parametric distance once.
A parametric weighted Kripke structure (PWKS) extends WKS by allowing transitions to have parametric weights. Let $\mcal{P} = \{p_1,\ldots,p_n\}$ be a fixed finite set of parameters. A \emph{parameter valuation} is a function mapping each parameter to a non-negative rational; $v : \mcal{P} \to \Q_{\geq 0}$. The set of all such valuation will be denote by $\mcal{V}$.
\begin{definition}[Parametric Weighted Kripke Structure] A \emph{parametric weighted Kripke structure} is a tuple $\mcal{K_\mcal{P}}=(S,AP,\mcal{L},\to)$, where $S$ is a finite set of states, $AP$ is a set of atomic propositions, $\mcal{L}: S\to \mathcal{P}(AP)$ is a mapping from states to sets of atomic propositions and $\to \subseteq S \times \mcal{P} \cup \Q_{\geq 0} \times S$ the finite transition relation. \end{definition} Unless otherwise specified, we assume a fixed PWKS $\mcal{K_\mcal{P}}=(S,AP,\mcal{L},\to)$ in the remainder of this section. One can instantiate a PWKS to a WKS by applying a parameter valuation. A PWKS thus represents an infinite set of WKSs.
\begin{definition} Given a parameter valuation $v \in \mcal{V}$, we define the \emph{instantiated WKS} of $\mathcal{K}_\mcal{P}$ under $v$ to be $\mcal{K}_{\mcal{P}}^{v}=(S,AP,\mcal{L},\to_v)$ where \[ \to_v = \{(s,v(p),s')\mid (s,p,s')\in \to, p \in \mcal{P}\} \cup \{(s,w,s')\mid (s,w,s')\in \to, w \in \Q_{\geq 0}\} \] \end{definition} For a state $s$ in $\mathcal{K}_\mcal{P}$ let $s[v]$ be the corresponding state in the WKS $\mathcal{K}_\mcal{P}^v$ and let $\simeps$ be lifted to disjoint unions of WKSs in the natural way.
Given a WKS state $s$, a PWKS state $t$ and $\varepsilon \geq 0$ we now state three interesting problems:
\begin{enumerate}
\item Does there exist a $v \in \mcal{V}$ such that $s \simeps t[v]$?
\item Can we characterize the set of ``good'' parameter valuation $V = \{v \,|\, v \in \mcal{V}, s \simeps t[v]\}$?
\item Can we synthesize a valuation $v \in \mcal{V}$ that minimizes $\varepsilon$ for $s \simeps t[v]$?
\end{enumerate}
We will show how to solve (2) by fixed point computations. The result will be a set of linear inequalities over parameters and $\varepsilon$ which has as solution a set of parameter valuations. Instead of considering a concrete $\varepsilon \in \R_{\geq 0}$, one can let $\varepsilon$ be an extra parameter. Thus, (1) and (3) can be solved by first solving (2) and applying e.g $Z3$ \cite{DBLP:conf/tacas/MouraB08} and $\nu Z$ \cite{DBLP:conf/tacas/BjornerPF15} or similar tools to solve the inequalities and search for solutions that minimize $\varepsilon$. \begin{example} Consider \autoref{fig:PWKSex}. From \autoref{ex:sim} we know that $s \leq_{0.5} t[v]$ if $v(p) = 1$. Both $v(p) = 0$ and $v(p) = 2$ imply $s \leq_{1} t[v]$. It turns out that $v(p) = 1$ is the valuation that minimizes $\varepsilon$ for $s \simeps t[v]$. \begin{figure}
\caption{A WKS (left) and a PWKS (right)}
\label{fig:PWKSex}
\end{figure} \end{example} When lifting the distance to the parametric setting, we consider disjoint unions of systems and require that only the simulating system can be parametric. Let $\mathcal{K_\mcal{P}}=(S_\mcal{P},AP,\mcal{L}_\mcal{P},\to^\mcal{P})$ be a PWKS and $\mcal{K}=(S,AP,\mcal{L},\to)$ a WKS. If we were to validate a given parameter valuation we could simply apply the valuation to the PWKS and use $\mcal{F}$ directly to decide if the distance is below some $\varepsilon$. As we want a full characterization of the good parameter valuation we will instead represent the distance as a function from a pair of states to a function that returns a weighted distance when a parameter valuation is applied; $d: S \times S_\mcal{P} \to (\mcal{V} \to \R_{\geq 0})$. We let the set of such function be denoted by $\mcal{D}$ and define an ordering as follows; for any $d^1,d^2 \in \mcal{D}$ let $d^1 \leq d^2$ iff $\forall s \in S, t \in S_\mcal{P}, v \in \mcal{V}: d^1(s,t)(v) \leq d^2(s,t)(v)$. Let $\equiv$ denote the set of pairs of semantic equivalent states. Then $(\mcal{D},\leq)$ constitutes a complete lattice and we can define a monotone function on $(\mcal{D}, \leq)$ that iteratively refines the distance: \begin{definition}\label{def:fpar} Let $\mcal{F} : \mcal{D} \to \mcal{D}$ be defined for any $d \in \mcal{D}$: \[
\mcal{F}(d)(s,t) = \left\{\begin{array}{ll}
\infty & \text{ if } \mcal{L}(s) \neq \mcal{L}(t)\\
\max_{s \rightarrow_w s'} \left\{\min_{t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n} \left\{\max\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,d(s',t_n),\\
\max \{d(s,t_i) | \,i < n\}
\end{array}
\right\}
\right\}\right\} & \text{ o.w}
\end{array}\right. \] \end{definition}
Again, by Tarski's fixed point theorem \cite{tarski} we are guaranteed a least fixed point, denoted by $d_{\min}$. The problem is now that the ordering $\leq$ implies a universal quantification over the entire infinite set of parameter valuations; thus, checking if a fixed point is reached is highly impractical. Instead of representing the distance as a function in valuations we will define it as a \emph{parametric expression} that captures the distance function syntactically. For any two states $s,t$ we associate an syntactic expression $E_{s,t}$ such that the solution set to the inequality $E_{s,t} \leq \varepsilon$ characterizes the set of good parameter valuations i.e applying a parameter valuation to $E_{s,t}$ yields a concrete weighted distance. The syntactic elements for the expressions can be derived directly from $\mcal{F}$; we need syntax for describing minimums of maximums of basic elements $\left|\frac{v}{w}-1\right|$ and $\infty$ where $w$ is rational and $v$ a linear expression in the parameters. Hence, we define the following abstract syntax: \[
E_1,E_2 ::= \infty \mid \left|\frac{v}{w}-1\right| \mid \texttt{MIN}\{E_1,E_2\} \mid \texttt{MAX}\{E_1,E_2\} \]
where $w \in \Q_{\geq 0}$ and $v$ is on the form $\sum^{n}_{i=0} a_i p_i + b$ s.t $a_i \in \mathbb{N}$ for all $i < n$ and $b \in \Q_{\geq 0}$. We extend parameter valuations to expressions in the obvious way and denote by $\llbracket E \rrbracket (v)$ the value of $E$ under $v \in \mcal{V}$. Similar to disjunctive normal form for logical formulae, we assume all expression to be a $\texttt{MIN}$ of $\texttt{MAX}$'s of basic elements $\left|\frac{v}{w}-1\right|$or $\infty$. To convert an expression, note that for any $v \in \mcal{V}$ \[ \llbracket \texttt{MAX}\{\texttt{MIN}\{E_1,E_2\},E_3\} \rrbracket (v) = \llbracket \texttt{MIN}\{\texttt{MAX}\{E_1,E_3\},\texttt{MAX}\{E_2,E_3\}\} \rrbracket (v) \] The set of expression on this normal form will be denoted by $\mcal{E}$. Now the distance functions can be defined as functions associating to a pair of states a parametric expression; $d_\mcal{E}: S \times S_\mcal{P} \to \mcal{E}$. The set of syntactic distance function will be denoted by $\dfuncparsem_{\mcal{E}}$ and the syntactic iterator capturing $d_{\min}$ is defined as follows: \begin{definition}\label{def:fparsyn} Let $\ffunc_{\expr} : \dfuncparsem_{\mcal{E}} \to \dfuncparsem_{\mcal{E}}$ be defined for any $d_\mcal{E} \in \dfuncparsem_{\mcal{E}}$: \[
\ffunc_{\expr}(d_\mcal{E})(s,t) = \left\{\begin{array}{ll}
\infty & \text{ if } \mcal{L}(s) \neq \mcal{L}(t)\\
\texttt{MAX}_{s \rightarrow_w s'} \left\{\texttt{MIN}_{t\rightarrow_{v_1}t_1 \cdots \rightarrow_{v_n} t_n} \left\{\texttt{MAX}\left\{\begin{array}{l}
\left|\frac{\sum_{i=1}^n v_i}{w}-1\right|,d_\mcal{E}(s',t_n),\\
\texttt{MAX} \{d_\mcal{E}(s,t_i) | \,i < n\}
\end{array}
\right\}
\right\}\right\} & \text{ o.w}
\end{array}\right. \] \end{definition} We will now define an ordering on elements from $\dfuncparsem_{\mcal{E}}$, by first ordering elements from $\mcal{E}$. \begin{definition}\label{def:exporder} The syntactic ordering $\distsynorder \subseteq \mcal{E} \times \mcal{E}$ is defined inductively on the structure of $\mcal{E}$: \begin{alignat*}{2}
\left|\frac{\sum_{i=1}^n a_ip_i + b}{w}-1\right| \distsynorder \infty \quad\quad &\text{always}\\
\left|\frac{\sum_{i=1}^n a_ip_i + b}{w}-1\right| \distsynorder \left|\frac{\sum_{i=1}^n a_i'p_i + b'}{w}-1\right| \quad & \text{iff} \quad && \left\{\begin{array}{lc}
\forall i.a_i \leq a_i' \land b \leq b' &\text{ if } \frac{b'}{w},\frac{b}{w} \geq 1\\
\forall i.a_i=a_i' \land b=b' \quad &\text{otherwise}
\end{array}\right.\\ \texttt{MAX}\{E_{1.1},\ldots,E_{1.n}\} \distsynorder \texttt{MAX}\{E_{2.1},\ldots,E_{2.m}\} \quad & \text{iff} \quad && \forall i. \exists j. E_{1.i} \distsynorder E_{2.j}\\ \texttt{MIN}\{E_{1.1},\ldots,E_{1.n}\} \distsynorder \texttt{MIN}\{E_{2.1},\ldots,E_{2.m}\} \quad & \text{iff} \quad && \forall j. \exists i. E_{1.i} \distsynorder E_{2.j} \end{alignat*} \end{definition} Let $\equiv_\mcal{E}$ be the set of pairs of syntactically equivalent expressions. We now extend the ordering to distance functions: \begin{definition} The \emph{syntactic} ordering on distance functions $\sqsubseteq_{\mcal{E}}$ is defined for any $d_{\mcal{E}}^1, d_{\mcal{E}}^2 \in \dfuncparsem_{\mcal{E}}$: \[ d_{\mcal{E}}^1 \sqsubseteq_{\mcal{E}} d_{\mcal{E}}^2 \quad \text{ iff } \quad \forall s,t \in S. d_{\mcal{E}}^1(s,t) \distsynorder d_{\mcal{E}}^2(s,t). \] \end{definition} As the syntactic expression computed by $\ffunc_{\expr}$ for any pair of states $(s,t)$ is merely syntactically representing the functions computed by $\mcal{F}$ for the same pair of states, the two concepts are closely related. For any expression $d_\mcal{E} \in \dfuncparsem_{\mcal{E}}$ let $d \in \mcal{D}$ be the associated semantic function. Then it is the case that the syntactic ordering of expressions implies the same semantic ordering of the associated semantic functions. Furthermore, iteratively updating the distances as parametric expressions by $\ffunc_{\expr}$ is semantically equivalent to computing the distances as functions by $\mcal{F}$. \begin{lemma}\label{lem:synsemrelation} For any $d_\mcal{E}^1,d_\mcal{E}^2 \in \dfuncparsem_{\mcal{E}}$ and $n \in \mathbb{N}$: \begin{enumerate} \item $d_\mcal{E}^1 \sqsubseteq_{\mcal{E}} d_\mcal{E}^2 \implies d^1 \leq d^2$. \item $\llbracket \ffunc_{\expr}^n(d_\mcal{E}^1)(s,t) \rrbracket (v) = \mcal{F}^n(d^1)(s,t)(v)$. \end{enumerate} \end{lemma} We will now demonstrate an upper bound on the relevant matching transition sequences for the syntactic computations in $\ffunc_{\expr}$, given that all loops have at least one strictly positive non-parametric weight. This is similar to assuming no 0-cycles in the weighted case (\autoref{lem:finsequence}). \begin{lemma}\label{lem:finsequence_par} Let $\mcal{K}=(S,AP,\mcal{L},\to)$ be a WKS with state $s \in S$ such that $s \to_w s'$ and let
$\mathcal{K_\mcal{P}}=(S_\mcal{P},AP,\mcal{L}_\mcal{P},\to^\mcal{P})$ be a PWKS with the following property: \begin{itemize}
\item There exists a $w_{\min} > 0$ such that for any valuation, the accumulated weight of every loop in $\mathcal{K}_\mcal{P}$ is at least $w_{\min}$ (strongly cost non-zeno). \end{itemize} Then for any $t \in S_\mcal{P}$: \begin{align*} \exists N. &\forall \pi = t\rightarrow_{v_1}^\mcal{P} t_1 \ldots \rightarrow_{v_n}^\mcal{P} t_n, n \geq N.\\ &\exists \pi^* = t\rightarrow_{u_1}^\mcal{P} t_1' \ldots \rightarrow_{u_m}^\mcal{P} t_m', m \leq N.\\
&t_n = t_m' \,\land\, \left|\frac{\sum_{i=1}^m u_i}{w}-1\right| \distsynorder \left|\frac{\sum_{i=1}^n v_i}{w}-1\right| \land\\ & \{t_1', \ldots, t_{m-1}'\} \subseteq \{t_1,\ldots,t_{n-1}\} \end{align*} \end{lemma} \begin{proof}
Let the maximum weight out of $s$ be $s_{w_{\max}}$. Any sequence of length $|S_\mcal{P}|$ must have a loop which, by assumption, cannot have accumulated weight 0 w.r.t any parameter valuation. Thus, the accumulated weight w.r.t any valuation is at least $w_{\min}$. Without loss of generality we assume it to be exactly $w_{\min}$. Exercising the loop a number of times will at some point result in the accumulated weight being greater than $2 \cdot s_{w_{\max}}$ w.r.t any valuation. Let this sequence be $\pi^* = t\rightarrow_{v_1}^\mcal{P} t_1 \cdots \rightarrow_{v_k}^\mcal{P} t_k$ and let $x$ denote the number of times the loop is exercised i.e $x \cdot w_{\min} \geq 2 \cdot s_{w_{\max}}$. Let $\sum_{i=1}^k v_i= \sum_{i=1}^n a_ip_i+b$. Then it is clear that $\frac{b}{s_{w_{\max}}} > 1$. Now consider the corresponding non-looping sequence $\pi_1= t\rightarrow_{u_1}^\mcal{P} t_1' \cdots \rightarrow_{u_l}^\mcal{P} t_l'$ and let $\sum_{i=1}^l u_i= \sum_{i=1}^n a_i'p_i+b'$. We would like it to be the case that \[
\left|\frac{\sum_{i=1}^n a'_ip_i + b'}{w}-1\right| \distsynorder \left|\frac{\sum_{i=1}^n a_ip_i + b}{w}-1\right| \]
but it might be the case that $\frac{b'}{s_{w_{\max}}} < 1$. Consider a third sequence $\pi = t\rightarrow_{x_1}^\mcal{P} t_1'' \cdots \rightarrow_{x_m}^\mcal{P} t_m''$, being $\pi^*$ modified to exercise the loop one more time and let $\sum_{i=1}^m x_i = \sum_{i=1}^n a_i''p_i+b''$. Now we know that $\frac{b''}{s_{w_{\max}}} > 1$ as $b'' > b'$ and furthermore $\left|\frac{\sum_{i=1}^n a_i'p_i + b'}{w}-1\right| \distsynorder \left|\frac{\sum_{i=1}^n a_i''p_i + b''}{w}-1\right|, t_k = t_m''$ and $\{t_1',\ldots,t_k\} \subseteq \{t_1'',\ldots,t_m''\}$. We can now derive $N$. For $\pi^*$ we have the inequality $x \cdot w_{\min} \geq 2 \cdot s_{w_{\max}}$ and by \autoref{lem:finsequence} this leads to the bound $\frac{2 \cdot s_{w_{\max}}}{w_{\min}} \cdot |S_\mcal{P}|$. As $\pi$ is at most $|S_\mcal{P}|$ longer than $\pi^*$ we get \[
N \geq \frac{2 \cdot s_{w_{\max}}}{w_{\min}} \cdot |S_\mcal{P}| + |S_\mcal{P}| \] \end{proof} Note that the bound also holds for the semantic function $\mcal{F}$ as the syntactic ordering implies the semantic ordering (\autoref{lem:synsemrelation}).
We can now limit $\ffunc_{\expr}$ to only consider sequences of length $N$, assuming that the PWKS is strongly cost non-zeno. We apply this fact to prove that we will after a finite number of iterations of $\ffunc_{\expr}$ have discovered two syntactically equivalent expressions. As syntactic equivalence implies semantic equivalence of the associated functions, we get by \autoref{lem:synsemrelation} that $d_{\min}$ can be computed by repeated application of both $\mcal{F}$ and $\ffunc_{\expr}$ is a finite number of steps.
\begin{lemma}\label{lem:synequiv} There exists $n < m$ such that $\ffunc_{\expr}^n(d_{\mcal{E}}^0) \equiv_\expr \ffunc_{\expr}^m(d_{\mcal{E}}^0)$. \end{lemma} \begin{proof} Let \[ \ffunc_{\expr}^n(d_{\mcal{E}}^0)(s,t) = \texttt{MIN}\left\{\texttt{MAX}\left\{E_{1.1},\ldots,E_{1.k}\right\},\ldots,\texttt{MAX}\left\{E_{m.1},\ldots,E_{m.n}\right\}\right\}. \]
From the definition of $\equiv_\expr$ we directly get $\texttt{MAX}$ and $\texttt{MIN}$ expressions behave like sets. Duplicates can be ignored i.e $\texttt{MAX}\{E_1,E_2,E_2\} \equiv_\expr \texttt{MAX}\{E_1,E_2\}$, $\texttt{MIN}\{\texttt{MAX}\{E_1,E_2\},\texttt{MAX}\{E_1,E_2\}\} \equiv_\expr \texttt{MIN}\{\texttt{MAX}\{E_1,E_2\}\}$ and the ordering of elements does not matter; $\texttt{MAX}\left\{E_1,E_2\right\} \equiv_\expr \texttt{MAX}\left\{E_2,E_1\right\}$. By \autoref{lem:finsequence_par} we can limit the transition sequences to length $N$. This implies that only a finite number of basic elements $\left|\frac{v}{w}-1\right|$ exist when iteratively applying $\ffunc_{\expr}$. As one can only construct a finite number of unique sets from a finite set of elements, the number of syntactically unique expressions (w.r.t $\equiv_\expr$) is finite. Therefore, there must exist a $m > n$ such that $\ffunc_{\expr}^n(d_{\mcal{E}}^0) \equiv_\expr \ffunc_{\expr}^m(d_{\mcal{E}}^0)$. \end{proof} We can now demonstrate computability of the distance. \begin{theorem}[Computability] There exists a natural number $n$ such that for all states $s \in S, t \in S_\mcal{P}$ and all valuations $v \in \mcal{V}$ \[ \llbracket \mcal{F}^n(d_{\mcal{E}}^0)(s,t) \rrbracket (v) = d_{\min}(s,t)(v). \] \end{theorem} \begin{proof} By \autoref{lem:synequiv}, there exists $n < m$ such that $\ffunc_{\expr}^n(d_{\mcal{E}}^0) \equiv_\expr \ffunc_{\expr}^m(d_{\mcal{E}}^0)$. By \autoref{lem:synsemrelation} we thus get semantic equivalence $\mcal{F}^n(d^0) \equiv \mcal{F}^m(d^0)$ and as $\mcal{F}$ is monotonic on $(\mcal{D}, \leq)$ we have for all $i$ s.t $n \leq i \leq m$ that $\mcal{F}^i(d^0) \equiv \mcal{F}^m(d^0)$. Thus, $\mcal{F}^n(d^0)$ is a fixed point found after a finite number of steps and is captured syntactically by $\ffunc_{\expr}^n(d_{\mcal{E}}^0)$. The check for equivalence ($\equiv_\expr$) can therefore be used to capture a semantic fixed point syntactically. The fixed point must also be the least fixed point. To see this, suppose towards a contradiction that it is not the least fixed point. Then there exists a $k < n$ such that $\mcal{F}^k(d^0) = d_{\min}$ but by the fixed point property of $d_{\min}$ and the monotonicity of $\mcal{F}$ we immediately get $\mcal{F}^k(d^0) \equiv \mcal{F}^n(d^0)$ which contradicts our assumption that $\mcal{F}^n(d^0)$ is not the least fixed point of $\mcal{F}$. \end{proof} By computing the syntactic fixed point we thus get a syntactic expression $\ffunc_{\expr}^n(d_{\mcal{E}}^0)(s,t) = E_{s,t}$ for each pair of states $s,t$ such that the solution set to $E_{s,t} \leq \varepsilon$ characterizes the set of ``good'' parameter valuations. \begin{example} Consider the WKS and PWKS from \autoref{fig:PWKSex}. To compute $E_{s,t}$, let $d_{\mcal{E}}^i(s,t) = \ffunc_{\expr}^i(d_{\mcal{E}}^0)(s,t)$. We now show how the distance from $s$ to $t$ is updated after each iteration. \begin{alignat*}{2} &d_{\mcal{E}}^1(s,t) &&= \texttt{MAX}\left\{\begin{array}{ll}
\texttt{MAX}\left\{\left|\frac{1}{1}-1\right|,d_{\mcal{E}}^0(s_1,t_2)\right\},\\[0.15cm]
\texttt{MAX}\left\{\left|\frac{3}{2}-1\right|,d_{\mcal{E}}^0(s_1,t_2),d^0_\mcal{E}(s,t_2)\right\}
\end{array}\right\}\\ &d_{\mcal{E}}^2(s,t) &&= \texttt{MAX}\left\{\begin{array}{ll}
\texttt{MAX}\left\{\left|\frac{1}{1}-1\right|,0\right\},\\[0.15cm]
\texttt{MAX}\left\{\left|\frac{3}{2}-1\right|,0,\frac{1}{2}\right\}
\end{array}\right\}\\ &d_{\mcal{E}}^3(s,t) &&= \texttt{MAX}\left\{\begin{array}{l}
\frac{1}{2},\left|\frac{p}{1}-1\right|,\\
\texttt{MIN}\left\{\left|\frac{p}{5}-1\right|,\left|\frac{p+2}{5}-1\right|,\left|\frac{p+4}{5}-1\right|\right\},\\[0.15cm]
\texttt{MIN}\left\{\left|\frac{p}{3}-1\right|,\left|\frac{p+2}{3}-1\right|\right\}
\end{array}\right\}\\ &d_{\mcal{E}}^4(s,t) && = d_{\mcal{E}}^3(s,t) \end{alignat*} We immediately see that any solution to $E_{s,t} \leq \varepsilon$ is bounded from below by $\frac{1}{2}$. This implies that there exists no valuation $v \in \mcal{V}$ such that $s \simeps t[v]$ for $\varepsilon < \frac{1}{2}$. If we consider the valuation $v_{\min}(p) = 1$ we get that $\llbracket E_{s,t} \rrbracket (v_{\min}) = \frac{1}{2}$ i.e $v_{\min}$ is the valuation that induces the minimal distance $d(s,t[v_{\min}]) = \frac{1}{2}$. \end{example} \section{Conclusion and Future Work}\label{sec:conc_future} We have characterized the distance from \cite{WCTL_logic} between weighted Kripke structures (WKS) as a least fixed point. The distance between any pair of states can thus be computed by first assuming the distance between any pair to be 0 and then applying a step-wise refinement of the distance. The computability of the distance is guaranteed as a finite number of the (potentially) infinite transition sequences of the system is sufficient. This we proved by demonstrating an upper bound on the relevant sequences. We furthermore lifted the distance to parametric WKS (PWKS), where transition weights can be parametric. The parameters can be used to abstract multiple configurations of the same system as one parametric system. In this case the distance is from a WKS to a PWKS and is concretely a parametric expression that one can evaluate to get an exact distance from the WKS to a specific WKS instance of the PWKS. The question is then which configuration (parameter valuation) is ``best'' i.e minimizes the induced distance. For computability we again demonstrate an upper bound on the length of relevant distances. To do this we assume all cycles to be cost non-zeno i.e any loop must include a transition with a positive rational weight.
For future work, the actual complexity of computing the distance is open. From \cite{WCTL_logic} we know that checking whether the distance is 0 is NP-complete but the general complexity of checking whether the distance is less than some $\varepsilon \in \R_{\geq 0}$ is open. One could also investigate whether the distance has a polynomial approximation scheme.
\end{document}
|
arXiv
|
{
"id": "1608.00657.tex",
"language_detection_score": 0.6791560053825378,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{High-order stroboscopic averaging methods for highly oscillatory delay problems }
\author{ M. P. Calvo \and J. M. Sanz-Serna \and Beibei Zhu }
\institute{M. P. Calvo\at Departamento de Matem\'atica Aplicada e IMUVA, Facultad de Ciencias, Universidad de Valladolid, Spain\\ \email{[email protected]} \and J. M. Sanz-Serna \at Departamento de Matem\'aticas, Universidad Carlos III de Madrid, Avenida de la Universidad 30, E-28911 Legan\'es (Madrid), Spain \\ \email{[email protected]} \and Beibei Zhu \at National Center for Mathematics and Interdisciplinary Sciences, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China\\
\email{[email protected]} } \date{}
\maketitle
\begin{abstract} We introduce and analyze a family of heterogeneous multiscale methods for the numerical integration of highly oscillatory systems of delay differential equations with constant delays. The methodology suggested provides algorithms of arbitrarily high accuracy. \end{abstract}
\noindent\textbf{Mathematical Subject Classification (2010)} 65L03, 34C29
\noindent\textbf{Keywords} Delay differential equations, stroboscopic averaging, highly oscillatory problems
\section{Introduction}
This paper suggests and analyzes heterogeneous multiscale methods \cite{E2003,EEngquist,Engquist,E2007,Li, Ariel,arieh,CS2} for the numerical solution of highly oscillatory systems of delay differential equations (DDEs) with constant delays. The methods may achieve arbitrarily high orders of convergence and are based on the idea of the stroboscopic averaging method (SAM) \cite{CCMS1,CCMS2} for highly oscillatory ordinary differential equations (ODEs).
We are interested in integrating highly oscillatory delay differential systems of the form \begin{eqnarray}\label{eq:dde1} \frac{d}{dt}x(t)&=&f(x(t),x(t-\tau),t,\Omega t;\Omega),\qquad 0 \leq t \leq t_{max},\\ x(t)&=&\varphi(t),\qquad -\tau\le t \leq 0.\label{eq:dde2} \end{eqnarray} Here \(\tau>0\) is the constant delay, the angular frequency \(\Omega\gg 1\) is a large parameter and \(f\) is smooth, takes values in \(\mathbb{R}^D\) and is \(2\pi\)-periodic in its fourth argument. Note that, in addition to its fast periodic dependence on time through the combination \(\Omega t\), the function \(f\) depends (slowly) on \(t\) through its third argument. An example is given by \begin{eqnarray}\label{eq:toggle} \dot{x}_1(t)&=&\frac{\alpha}{1+x_2^{\beta}(t)}-x_1(t-\tau)+A\sin(\omega t)+B\sin(\Omega t),\\ \dot{x}_2(t)&=&\frac{\alpha}{1+x_1^{\beta}(t)}-x_2(t-\tau), \nonumber \end{eqnarray} where \(\alpha\), \(\beta\), \(A\), \(B\), \(\omega\) are constants. The term \(B\sin(\Omega t)\) induces fast oscillations in the solution and \(A\sin \omega t\) is a slow forcing. In the absence of external slow and fast forcing (\(A=B = 0\)) the system represents a delayed genetic toggle switch, a synthetic gene regulatory network \cite{gardner}. The paper \cite{Daza} studies the phenomenon of \emph{vibrational resonance} \cite{vr,guirao} of the switch, i.e. the enhancement of the response to the slow forcing created by the presence of the high frequency forcing. For additional examples of problems of the form \eqref{eq:dde1} see \cite{beibei}.
The application of standard software to the integration of \eqref{eq:dde1} may be very expensive because accuracy typically requires that the step length be smaller than the small period \(T=2\pi/\Omega\). The difficulties increase in cases, such as \eqref{eq:toggle}, that have to be simulated over long time intervals for many choices of the parameters and constants that appear in the system. The algorithms suggested in this paper integrate an averaged version of \eqref{eq:dde1} that does not lead to solutions with fast oscillations. As other heterogeneous multiscale methods, the required information on the underlying averaged system is obtained on the fly by integrating \eqref{eq:dde1} in narrow time-windows. Here we follow the SAM technique \cite{CCMS1,CCMS2} where the right hand-side of the averaged system is retrieved by using finite differences. A similar approach has been used in \cite{beibei} but there are important differences between the algorithm in that reference and the integrators in this paper:
\begin{itemize} \item The integrators suggested here are constructed by rewriting \eqref{eq:dde1}--\eqref{eq:dde2} as an ODE problem which is then solved by using the ODE SAM algorithms in \cite{CCMS1,CCMS2}. Reference \cite{beibei} borrows the main ideas of \cite{CCMS1,CCMS2} and adjusts them to the delay problem \eqref{eq:dde1}--\eqref{eq:dde2}. \item A single algorithm is introduced in \cite{beibei}; it is based on integrating the averaged problem with the second-order Adams-Basforth formula. In fact the approach in \cite{beibei} would be difficult to generalize to higher-order methods due to the lack of regularity of the solutions of DDEs. The methodology in this paper makes it possible to construct integrators of arbitrarily high orders. \item The analysis of the algorithms in this paper only uses techniques that are standard in the theories of the numerical integration and averaging of ODEs. The analysis in \cite{beibei} requires to develop special averaging results for DDEs. \end{itemize}
This paper has seven sections. Section~\ref{sec:review} presents background material on SAM integrators for ODEs. Section~\ref{sec:ddes} explains the reformulation of \eqref{eq:dde1}--\eqref{eq:dde2} as an ODE problem. The new algorithms are described and analyzed in Sections~\ref{algorithms} and \ref{errorbounds} respectively. Numerical experiments are reported in Section~\ref{sec:experiments} and the final Section contains some proofs and some extensions.
\section{A review of SAM for ODEs} \label{sec:review} The reader is referred to \cite{CCMS1,CCMS2} for a detailed description and analysis of SAM; here we restrict ourselves to those aspects of the method that are needed to present the algorithms in Section~\ref{algorithms}.
SAM is a heterogeneous multiscale technique for the numerical integration of highly oscillatory systems of the form
\begin{equation}\label{eq:sam1} \frac{d}{dt}y = f(y,\Omega t;\Omega), \end{equation}
where the sufficiently smooth\footnote{ The exact number of derivatives that \(f\) must possess depends on the specific SAM algorithm, on the choice of \(J\) below, etc. In order to simplify the exposition we prefer not to keep track of that number. }
function \(f:\mathbb{R}^D\times \mathbb{R}\times(0,\infty)\rightarrow \mathbb{R}^D\) depends \(2\pi\)-periodically on its second argument \(\Omega t\) and \(\Omega\gg 1\) is a large parameter. It is assumed that
\(\Omega^{-1}f\) and its derivatives remain bounded as \(\Omega \uparrow \infty\). The solutions of \eqref{eq:sam1} are sought in an integration interval \(t_0\leq t\leq t_{max}\) assumed to be independent of \(\Omega\).
SAM is applicable whenever, over one period, the solution change \(y(t_0+T)-y(t_0)\) is \(\mathcal{O}(\Omega^{-1})\) as \(\Omega \uparrow \infty\), see \cite{CCMS1}. Then, given an arbitrarily large integer \(J\), there exists a stroboscopic averaged system \cite{Chartier,Murua,Sanz-Serna, kurusch}
\begin{equation}\label{eq:sam2} \frac{d}{dt} Y = F(Y;\Omega)
\end{equation}
such that, if \(y(t)\) and \(Y(t)\) are solutions of \eqref{eq:sam1} and \eqref{eq:sam2} that share a common value at time \(t_0\), then \(Y\) interpolates \(y\) with (small) \(\mathcal{O}(\Omega^{-J})\) errors at \emph{stroboscopic times}, i.e.\ at values of \(t\) of the form \(t_n=t_0+nT\), \(n\) an integer. The constant implied in the \(\mathcal{O}\) notation is independent of \(n\) for \(t_n\) ranging in a compact time interval.
\begin{rem} \label{rem:depends}\em In \eqref{eq:sam2}, the function \(F\) may be chosen to be a polynomial in \(\Omega^{-1}\) whose degree increases with \(J\) (the dependence of \(F\) on \(J\) is not reflected in the notation). The coefficients of this polynomial are smooth functions of \(Y\) that depend on \(t_0\) (again this dependence has not been incorporated to the notation). Explicit formulas for the construction of \(F\) may be seen in \cite{Murua,kurusch}. \end{rem}
Since \(F\) does not depend on the rapidly varying phase \(\Omega t\), the system \eqref{eq:sam2} is non-oscillatory and its numerical integration may be performed with step sizes that are not restricted in terms of the small period \(T\). This consideration, by itself, is not sufficient to construct a viable numerical algorithm because, for large \(J\), finding the analytic expression of \(F\) may be extremely expensive even with the help of a symbolic manipulator \cite{abel}. SAM is a technique for the numerical integration of \eqref{eq:sam1} based on integrating numerically \eqref{eq:sam2} without using that analytic expression;
the required information on \(F\) is collected on the fly by means of numerical integrations of \eqref{eq:sam1}. In its crudest variant, SAM approximately evaluates \(F\) at a given vector \(w\in\mathbb{R}^D\) by using the finite difference formula
\begin{equation}
\label{eq:fw}
F(w;\Omega) \approx \frac{1}{T}[\Phi_T(w)-w],
\end{equation}
where \(\Phi_T(w)\) is the value at time \(t_0+T\) of the solution of \eqref{eq:sam1} with value \(w\) at time \(t_0\). This makes sense because, up to a small
\(\mathcal{O}(\Omega^{-J})\) error, \(\Phi_T(w)\) coincides with the value at \(t_0+T\) of the solution of \eqref{eq:sam2} with \(Y(t_0)=w\) and the slope of this solution at time \(t_0\) is \( F(w;\Omega)\).
SAM consists of three parts: the macrointegrator, the numerical differentiation formula, and the microintegrator. These will be discussed presently. There is much freedom in the choice of each of these three elements.
The macrointegrator is the algorithm used to integrate \eqref{eq:sam2}; it may be e.g.\ a Runge-Kutta or a linear multistep method. For simplicity we shall assume throughout that the macrointegrator uses a constant step size \(H\); however variable step sizes may be equally applied within SAM. It is not necessary that the step points used by the macrointegrator be stroboscopic times. If the macrointegration is arranged in such a way that output is produced at stroboscopic times, then that output provides approximations to the oscillatory solution \(y\). If, on the other hand, one needs to obtain an approximation to \(y(t)\) at a time \(t\) that is not stroboscopic, then one may use SAM to approximate \(y(t_n)\) at the largest stroboscopic time \(t_n\) less than \(t\) and then integrate \eqref{eq:sam1} in the short interval \([t_n,t]\) with length \(< T\).
Instead of the crude differentiation formula \eqref{eq:fw} with \(\mathcal{O}(T)\), i.e.\ \(\mathcal{O}(\Omega^{-1})\), errors, one may use the familiar second-order central difference formula
\begin{equation} \label{eq:cw} F(w;\Omega) \approx \frac{1}{2T}[\Phi_T(w)-\Phi_{-T}(w)], \end{equation}
with \(\mathcal{O}(\Omega^{-2})\) errors (\(\Phi_{-T}(w)\) is the value at time \(t_0-T\) of the solution of \eqref{eq:sam1} with value \(w\) at time \(t_0\)), the fourth-order formula based on function values at \(t_0\pm T\), \(t_0\pm 2T\), etc.
The microintegrator is the algorithm used to integrate \eqref{eq:sam1} to approximately obtain
the values \(\Phi_{\pm kT}(w)\) required by the numerical differentiation formula being employed.
The microintegrator may be e.g. a Runge-Kutta or a linear multistep method and need not coincide with
the scheme used as a macrointegrator. It may use constant or variable step sizes; for simplicity we will
restrict the attention to the case where the step size \(h\) is constant. When \eqref{eq:fw} is used,
each evaluation of \(F\) requires a microintegration of the oscillatory system \eqref{eq:sam1} in the interval
\(t_0\leq t\leq t_0+T\). As \(\Omega\) increases the microintegration step size \(h\) has to be reduced on
accuracy grounds, but this is compensated by the fact that the microintegration interval length \(T\) shrinks
correspondingly. The central difference formula \eqref{eq:cw} needs two microintegrations per evaluation of \(F\), one of them operates forward in time and finds \(\Phi_T(w)\) and the other goes backwards to find \(\Phi_{-T}(w)\). More involved differentiation formulas require forward microintegrations in longer intervals of the form \([t_0,t_0+kT]\) and/or backward integrations in intervals \([t_0-k^\prime T,t_0]\) (\(k\), \(k^\prime\) are small positive integers).
\begin{rem} \label{rem:micro} \em It is important to note that each microintegration starts from an initial condition that is specified at time \(t_0\), regardless of the point of the time axis the macrointegration may have reached when the microintegration is carried out. This is a consequence of the fact, pointed out in Remark \ref{rem:depends}, that the averaged system \eqref{eq:sam2} depends on \(t_0\) (see \cite{CCMS1} for a detailed explanation). \end{rem}
\begin{rem} \label{rem:slowtime} \em The presentation so far has been restricted to the format \eqref{eq:sam1}. It is also possible to apply SAM to problems \begin{equation}\label{eq:sam3} \frac{d}{dt}y = f(y,t,\Omega t;\Omega), \end{equation} where now \(f\) has an additional dependence on \(t\), \(t_0\leq t\leq t_{max}\), in addition to
the \emph{fast} dependence through \(\Omega t\). In fact the case \eqref{eq:sam3} may be reduced to the format \eqref{eq:sam2} by the standard device of considering the second argument of \(f\) as a new dependent variable \(y^0\) and appending to the system the additional equation \(dy^0/dt = 1\).
\end{rem}
Error bounds for SAM are presented in Section~\ref{errorbounds}.
\section{Highly-oscillatory DDEs} \label{sec:ddes} We are interested in integrating the highly oscillatory problem \eqref{eq:dde1}--\eqref{eq:dde2} under the hypothesis
that \(\Omega^{-1} f\) and its derivatives \emph{remain bounded as \(\Omega\uparrow \infty\)}. Without losing generality \cite{beibei}, we assume that the (known) function \(\varphi\) that specifies the values of \(x\) in the interval \([-\tau,0]\) is \(\Omega\) independent. The assumption that the integration of \eqref{eq:dde1} starts at \(t=0\) does not reduce the generality either, as one may always make a translation along the time axis. In order to simplify the exposition, we shall also assume hereafter that the \(\Omega\)-independent end-point \(t_{max}\) of the integration interval is an integer multiple of \(\tau\), i.e.\ \(t_{max}=L\tau\). When this is not the case we may apply the integrators below after increasing \(t_{max}\) up to the smallest integer multiple of \(\tau\) larger than \(t_{max}\). Alternatively one may integrate with the algorithms described below up to the largest integer multiple \(L^\prime \tau\) of \(\tau\) smaller than \(t_{max}\) and then complete the integration by using a conventional integrator for \eqref{eq:dde1} in the short interval \([L^\prime\tau, t_{max}]\).
The algorithms in this paper are based in the introduction of the functions \begin{eqnarray} x^{(0)}(t)&=&\varphi(t-\tau), \qquad 0\leq t \leq \tau, \label{eq:def1}\\ x^{(\ell)}(t)&=&x(t+(\ell-1)\tau), \qquad 0\leq t \leq \tau,\qquad \ell=1, \ldots, L;\label{eq:def2} \end{eqnarray} determining these functions is clearly equivalent to determining the solution \(x(t)\) of \eqref{eq:dde1}--\eqref{eq:dde2}. An illustration is given in Figure~\ref{fig:A}.
\begin{figure}
\caption{The top subplot gives a solution \(x\) of the oscillatory problem \eqref{eq:dde1} in the interval \(-\tau \leq t\leq 3\tau\), \(\tau=0.5\). The other subplots give the functions \(x^{(\ell)}\) for \(\ell = 0, 1, 2, 3\); these obviously provide all the information contained in \(x\). The discontinuous lines in the last three panels depict the solution of the averaged system of ODEs \eqref{eq:aver}. By definition of stroboscopic averaging, each \(X^{(\ell)}\) exactly coincides with the corresponding \(x^{(\ell)}\) at the initial time \(t=0\). An unrealistically low
value of the frequency \(\Omega\) is used here so as not to clatter the plots. }
\label{fig:A}
\end{figure}
In terms of the \(x^{(\ell)}\), the problem \eqref{eq:dde1}--\eqref{eq:dde2} is given by \begin{eqnarray} \label{eq:ode1} &&\frac{d}{dt}x^{(\ell)}(t)=\\&&f(x^{(\ell)}(t),x^{(\ell-1)}(t),t+(\ell-1)\tau,\Omega (t+(\ell-1)\tau);\Omega),\: 0\leq t \leq \tau, \: 1 \leq \ell \leq L,\nonumber \end{eqnarray} in tandem with the conditions \begin{equation} \label{eq:ode2} x^{(\ell)}(0)=x^{(\ell-1)}(\tau),\qquad 1 \leq \ell \leq L. \end{equation}
\begin{rem}\label{rem:seq} \em
Note that \(x^{(0)}\) is known from \eqref{eq:def1}. The unknown function \(x^{(1)}\) is determined from \eqref{eq:ode1} with \(\ell=1\) and the initial condition \(x^{(1)}(0) =\varphi(0)\); once \(x^{(1)}\) is known, \(x^{(2)}\) is determined from \eqref{eq:ode1} with \(\ell=2\) and the initial condition \(x^{(2)}(0) =x^{(1)}(\tau)\), etc. Thus, even though, in view of \eqref{eq:ode2}, the problem \eqref{eq:ode1}--\eqref{eq:ode2} has the appearance of a two-point boundary problem, we are really dealing with an initial-value problem (this was to be expected as \eqref{eq:ode1}--\eqref{eq:ode2} is just a way of writing \eqref{eq:dde1}--\eqref{eq:dde2}). \end{rem}
Obviously \eqref{eq:ode1} is a highly-oscillatory system \emph{of ODEs} (rather than DDEs)
\begin{equation}\label{eq:odebf} \frac{d}{dt}{\bf x} ={\bf f}({\bf x}, t,\Omega t;\Omega),\qquad 0\leq t \leq \tau, \end{equation}
for the unknown function
\[ {\bf x}(t) = [x^{(1)}(t), \dots, x^{(L)}(t)],\qquad 0\leq t\leq \tau, \]
with values in \(\mathbb{R}^{LD}\) ($x^{(0)}$ is known, see \eqref{eq:def1}). The algorithms to be described below are based on the integration of \eqref{eq:odebf} with the help of SAM as described in the preceding section. If we denote by \(X^{(\ell)}\) the averaged version of \(x^{(\ell)}\), \(\ell = 1,\dots, L\), (\(X^{(0)}(t) =\varphi(t-\tau)\), \(0\leq t\leq \tau\)) the averaged system for
\[ {\bf X}(t) = [X^{(1)}(t), \dots, X^{(L)}(t)],\qquad 0\leq t\leq \tau, \]
is of the form \begin{eqnarray} \label{eq:aver} &&\frac{d}{dt}X^{(\ell)}(t)=\\&&F^{(\ell)}(X^{(\ell)}(t),X^{(\ell-1)}(t),\dots, X^{(0)}(t),t;\Omega),\quad 0\leq t \leq \tau, \quad 1 \leq \ell \leq L,\nonumber \end{eqnarray} where we note that \(X^{(\ell+1)}\), \dots , \(X^{(L)}\) do not appear in the right-hand side because \(x^{(\ell)}\) (and therefore its averaged version \(X^{(\ell)}\)) does not depend on the values of the solution \(x\) for \(t> \ell \tau\).
\begin{rem}\label{rem:triangular} \em With a terminology borrowed from linear algebra, we may say that the system \eqref{eq:ode1} has a lower bidiagonal structure, while \eqref{eq:aver} is only lower triangular. The explicit formulas for the averaged system in \cite{beibei} show that in fact for \(J\) large, \(X^{(\ell-2)}\),\dots, \(X^{(0)}\) appear in the right-hand side of \eqref{eq:aver} in addition to \(X^{(\ell-1)}\) and \(X^{(\ell)}\). \end{rem}
\section{Algorithms} \label{algorithms}
We now introduce algorithms for the solution of \eqref{eq:dde1}--\eqref{eq:dde2}.
\subsection{Case I: the delay is an integer multiple of the period}
We study first the particular case where the delay \(\tau\) is an integer multiple of the period. The general situation requires algorithms with additional complications. We note that in some applications there is some freedom in choosing the exact value of the large angular frequency \(\Omega\); one may then use that freedom to ensure that \(\tau/T=\tau\Omega/(2\pi)\) is an integer and thus avoid the extra complications.
\begin{figure}
\caption{\footnotesize As Fig.~\ref{fig:A} in the particular case where the delay \(\tau\) is an integer multiple of the period \(T\). The numerical approximation \(X^{(\ell)}_N\) to \(X^{(\ell)}(\tau)\) approximates \(x^{(\ell)}(\tau)= x^{(\ell+1)}(0)\) and may be used as initial value to compute approximately \(X^{(\ell+1)}(t)\), \(0\leq t\leq \tau\), \(\ell=1,\dots, L-1\). }
\label{fig:B}
\end{figure}
We apply SAM, based on a macro step size \(H\) of the form \(\tau/N\) with \(N\) a positive integer, to the integration of the \(LD\) dimensional ODE system \eqref{eq:odebf}. While at the outset the initial condition \({\bf x}(0)\) is not known (see \eqref{eq:ode2}), the triangular structure of the averaged system \eqref{eq:aver} noted in Remark~\ref{rem:triangular} makes it possible to complete the application of SAM by successively computing, for \(\ell = 1, 2,\dots, L\), the numerical approximations to the functions \(X^{(\ell)}(t)\), \(0\leq t\leq T\), very much as in Remark~\ref{rem:seq}. One first applies SAM to the oscillatory problem for \(x^{(1)}\). Because \(H\) is a submultiple of \(\tau\), the macrointegrator will produce an approximation \(X^{(1)}_N\) to \(X^{(1)}(\tau)=X^{(1)}(NH)\), see Figure~\ref{fig:B}. In addition we are assuming that \(\tau/T\in\mathbb{N}\), so that the final time \(t=\tau\) is stroboscopic and then \(X^{(1)}(\tau)\) is a very accurate approximation to \(x^{(1)}(\tau)\), i.e.\ to \(x^{(2)}(0)\). Therefore \(X^{(1)}_N\) provides an approximation to the missing initial value \(x^{(2)}(0)\) and it is then possible to approximate with SAM the solution \(x^{(2)}(t)\), \(0\leq t\leq \tau\). Iterating this procedure one approximates all the \(x^{(\ell)}(t)\), or, equivalently, the oscillatory solution \(x(t)\), \(0\leq t\leq t_{max}\).
Since coding SAM algorithms requires some care, it may be helpful to provide a detailed description of SAM for a particular choice of integrators and differentiation formula. This is done in Table~\ref{tab:algorithm} that refers to the case where the macro and microintegrators are chosen to be the familiar second-order formula of Runge
that for \((d/dt) z = g(z,t)\) reads
\[ z_{j+1/2} = z_j + \frac{\Delta t}{2}\, g(z_j,t_j),\qquad z_{j+1} = z_j+\Delta t\, g(z_{j+1/2},t_j+\Delta t/2). \]
The formula needs two function evaluations per step. The algorithm uses the central difference
formula \eqref{eq:cw} except when approximating \(F^{(\ell)}\), \(\ell = 1,\dots, L\), in \eqref{eq:aver},
at \(t=0\) where the forward formula \eqref{eq:fw} is applied.
At \(t=0\) central difference are not applicable: backward microintegrations cannot be performed
because the system \eqref{eq:ode1} is only defined for \(t\geq 0\) (\(x^{(0)}\) is not defined
for \(t<0\), see \eqref{eq:def1}). The algorithm consists of an initialization block followed by a loop for
the successive computation of the approximations to \(X^{(\ell)}(t)\), \(\ell = 1,\dots, L\). Note the different
treatment given at all the microintegrations to the third (slow time \(t\)) and fourth (fast rotating phase
\(\Omega t\)) arguments of \(f\); this is in agreement with Remark~\ref{rem:micro}.
\begin{table} \caption{SAM-RK2 Algorithm}
\begin{center} \begin{tabular}{lcccc} \hline $X^{(1)}_0 = \varphi(0)$ \% \texttt{initial condition}\\ \texttt{Load history}\\ For $\nu=0:\nu_{max}$ \\ ~~~~ $x^{(0)}_{0,\nu} = \varphi(-\tau+\nu h)$, $x^{(0)}_{0,\nu+1/2} = \varphi(-\tau+(\nu+1/2)h)$, \\ end \\ For $n=1:N-1$ \\ ~~~~ For $\nu=-\nu_{max}:\nu_{max}$ \\ ~~~~~~~~ $x^{(0)}_{n,\nu} = \varphi(-\tau+nH+\nu h)$, $x^{(0)}_{n,\nu+1/2} = \varphi(-\tau+nH+(\nu+1/2)h)$, \\ ~~~~ end \\ end \\ \texttt{Integration starts}\\ For $\ell=1:L$ \\ ~~~~ For $n=0:N-1$\\ ~~~~~~~~ \texttt{Compute $F^{(\ell)}_{n}$}\\ ~~~~~~~~ $t^{(\ell)}_n = nH + (\ell-1)\tau$ \% \texttt{initial time}\\ ~~~~~~~~ $x^{(\ell)}_{n,0} = X^{(\ell)}_n$ \% \texttt{initial value}\\ ~~~~~~~~ If $n=0$ \\ ~~~~~~~~~~~~ \texttt{Forward micro-integration} \\ ~~~~~~~~~~~~ For $\nu=0:\nu_{max}-1$ \\ ~~~~~~~~~~~~~~~~ $t^{(\ell)}_{0,\nu} = t_0^{(\ell)}+\nu h$, $t^{(\ell)}_{0,\nu+1/2} = t_0^{(\ell)}+(\nu +1/2)h$ \\ ~~~~~~~~~~~~~~~~ $x^{(\ell)}_{0,\nu+1/2} = x^{(\ell)}_{0,\nu}+(h/2) f(x^{(\ell)}_{0,\nu},x^{(\ell-1)}_{0,\nu},t^{(\ell)}_{0,\nu},\Omega\nu h;\Omega)$ \\
~~~~~~~~~~~~~~~~ $x^{(\ell)}_{0,\nu+1} = x^{(\ell)}_{0,\nu}+h f(x^{(\ell)}_{0,\nu+1/2},x^{(\ell-1)}_{0,\nu+1/2},t^{(\ell)}_{0,\nu+1/2},\Omega(\nu+1/2) h;\Omega)$, \\ ~~~~~~~~~~~~ end\\ ~~~~~~~~~~~~ $F^{(\ell)}_0=(x^{(\ell)}_{0,\nu_{max}}\hspace{-8pt}-x^{(\ell)}_{0,0})/T$ \% \texttt{slope at 1st stage }\\ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\texttt{of 1st macro-step for \(X^{(\ell)}\)}\\ ~~~~~~~~ else \\ ~~~~~~~~~~~~ \texttt{Forward micro-integration}\\ ~~~~~~~~~~~~ For $\nu=0:\nu_{max}-1$, \\ ~~~~~~~~~~~~~~~~ $t^{(\ell)}_{n,\nu} = t_n^{(\ell)}+\nu h$, $t^{(\ell)}_{n,\nu+1/2} = t_n^{(\ell)}+(\nu +1/2)h$ \\ ~~~~~~~~~~~~~~~~ $x^{(\ell)}_{n,\nu+1/2} = x^{(\ell)}_{n,\nu}+(h/2) f(x^{(\ell)}_{n,\nu},x^{(\ell-1)}_{n,\nu},t^{(\ell)}_{n,\nu},\Omega \nu h;\Omega)$ \\
~~~~~~~~~~~~~~~~ $x^{(\ell)}_{n,\nu+1} = x^{(\ell)}_{n,\nu}+h f(x^{(\ell)}_{n,\nu+1/2},x^{(\ell-1)}_{n,\nu+1/2},t^{(\ell)}_{n,\nu+1/2},\Omega(\nu+1/2) h;\Omega)$, \\ ~~~~~~~~~~~~ end\\ ~~~~~~~~~~~~ \texttt{Backward micro-integration}\\ ~~~~~~~~~~~~ For $\nu=0:\nu_{max}-1$, \\ ~~~~~~~~~~~~~~~~ $t^{(\ell)}_{n,-\nu} = t_n^{(\ell)}-\nu h$, $t^{(\ell)}_{n,-(\nu+1/2)} = t_n^{(\ell)}-(\nu +1/2)h$ \\ ~~~~~~~~~~~~~~~~ $x^{(\ell)}_{n,-(\nu+1/2)} = x^{(\ell)}_{n,-\nu}-(h/2) f(x^{(\ell)}_{n,-\nu},x^{(\ell-1)}_{n,-\nu},t^{(\ell)}_{n,-\nu},-\Omega\nu h;\Omega)$ \\
~~~~~~~~~~~~~~~~ $x^{(\ell)}_{n,-(\nu+1)} = x^{(\ell)}_{n,-\nu}$\\ ~~~~~~~~~~~~~~~~ $-hf(x^{(\ell)}_{n,-(\nu+1/2)},x^{(\ell-1)}_{n,-(\nu+1/2)}, t^{(\ell)}_{n,-(\nu+1/2)},-\Omega(\nu+1/2) h;\Omega)$, \\ ~~~~~~~~~~~~ end\\ ~~~~~~~~~~~~ $F^{(\ell)}_{n}=(x^{(\ell)}_{n,\nu_{max}}\hspace{-5pt}-x^{(\ell)}_{n,-\nu_{max}})/(2T)$ \% \texttt{slope at 1st stage} \\ ~~~~~~~~ end\\ ~~~~~~~~ \texttt{Macro-integration}\\ ~~~~~~~~ $X^{(\ell)}_{n+1/2}=X^{(\ell)}_{n}+(H/2)F^{(\ell)}_n$, \% \texttt{2nd stage of n-th macro-step} \\ ~~~~~~~~ \texttt{Compute $F^{(\ell)}_{n+1/2}$}\\ ~~~~~~~~ $x^{(\ell)}_{n+1/2,0} = X^{(\ell)}_{n+1/2}$ \% \texttt{initial value}\\ ~~~~~~~~ $t^{(\ell)}_{n+1/2} = (n+1/2)H + (\ell-1)\tau$ \% \texttt{initial time}\\ \end{tabular} \end{center} \label{tab:algorithm} \end{table}
\begin{table*}
\begin{center} \begin{tabular}{lcccc} ~~~~~~~~ \texttt{Forward micro-integration}\\ ~~~~~~~~ For $\nu=0:\nu_{max}-1$, \\ ~~~~~~~~~~~~ $t^{(\ell)}_{n+1/2,\nu} = t_{n+1/2}^{(\ell)}+\nu h, t^{(\ell)}_{n+1/2,\nu+1/2}=t_{n+1/2}^{(\ell)}+(\nu +1/2)h $ \\ ~~~~~~~~~~~~ $x^{(\ell)}_{n+1/2,\nu+1/2} = x^{(\ell)}_{n+1/2,\nu}+(h/2)$\\ ~~~~~~~~~~~~ $f(x^{(\ell)}_{n+1/2,\nu},x^{(\ell-1)}_{n+1/2,\nu},t^{(\ell)}_{n+1/2,\nu},\Omega \nu h;\Omega)$ \\% ~~~~~~~~~~~~ $x^{(\ell)}_{n+1/2,\nu+1} = x^{(\ell)}_{n+1/2,\nu}$\\ ~~~~~~~~~~~~ $+h f(x^{(\ell)}_{n+1/2,\nu+1/2},x^{(\ell-1)}_{n+1/2,\nu+1/2},t^{(\ell)}_{n+1/2,\nu+1/2},\Omega(\nu+1/2) h;\Omega)$, \\ ~~~~~~~~ end\\ ~~~~~~~~ \texttt{Backward micro-integration}\\ ~~~~~~~~ For $\nu=0:\nu_{max}-1$, \\ ~~~~~~~~~~~~ $t^{(\ell)}_{n+1/2,-\nu} = t_{n+1/2}^{(\ell)}-\nu h, t^{(\ell)}_{n+1/2,-(\nu+1/2)}=t_{n+1/2}^{(\ell)}-(\nu +1/2)h $ \\ ~~~~~~~~~~~~ $ x^{(\ell)}_{n+1/2,-(\nu+1/2)} = x^{(\ell)}_{n+1/2,\nu}$\\ ~~~~~~~~~~~~ $-(h/2) f(x^{(\ell)}_{n+1/2,\nu},x^{(\ell-1)}_{n+1/2,\nu},t^{(\ell)}_{n+1/2,-\nu},-\Omega\nu h;\Omega)$ \\ ~~~~~~~~~~~~ $x^{(\ell)}_{n+1/2,-(\nu+1)} = x^{(\ell)}_{n+1/2,-\nu}-h\times$\\% ~~~~~~~~~~~~ $f(x^{(\ell)}_{n+1/2, -(\nu+1/2)}, x^{(\ell-1)}_{n+1/2, -(\nu+1/2)}, t^{(\ell)}_{n+1/2,-(\nu+1/2)}, -\Omega(\nu+1/2) h; \Omega)$ \\
~~~~~~~~ end\\ ~~~~~~~~ $F^{(\ell)}_{n+1/2} = (x^{(\ell)}_{n+1/2,\nu_{max}}\hspace{-8pt}-x^{(\ell)}_{n+1/2,-\nu_{max}})/(2T)$ \% \texttt{slope at 2nd stage}\\ ~~~~~~~~ \texttt{Macro-step with RK2}\\ ~~~~~~~~ $X^{(\ell)}_{n+1}=X^{(\ell)}_{n}+HF^{(\ell)}_{n+1/2}$ \\ ~~~~ end \\ ~~~~ if $\ell < L$ \\ ~~~~~~~~ $X^{(\ell+1)}_0 = X^{(\ell)}_N$ \\ ~~~~ end \\ end\\ \hline \end{tabular} \end{center} \end{table*}
\begin{rem}\label{rem:fixednumber} \em The first-order forward difference formula is used \(L\) times per run of the algorithm, regardless of the value of \(H\) (or, equivalently, regardless of the number of macrosteps needed to span an interval of length \(\tau\)). \end{rem}
\subsection{Case II: the delay is not an integer multiple of the period} \label{sec:case2}
In this case we apply SAM to the ODE system in \eqref{eq:def1} in the interval \(0\leq t \leq MT\) with $M=\lfloor \tau/T \rfloor$ (see Figure~\ref{fig:A}). In the short final interval \(MT\leq t\leq \tau\) (whose length is \(<T\)) we integrate numerically the oscillatory problem itself and for this purpose we choose the integrator and step length being used for the microintegrations. In this way the initial value \(X^{(\ell)}(0)\), \(\ell=2,\dots,L\), required by SAM is computed approximately as the numerical result at \(t=\tau\) of the integration of the oscillatory equation for \( x^{(\ell-1)}(t)\) in \eqref{eq:def1} that starts at \(t=M\tau\) from the SAM approximation to \(X^{(\ell-1)}(NT)\approx x^{(\ell-1)}(NT)\). The integration of the equation for \(x^{(L)}(t)\) in \(MT\leq t\leq \tau\) yields an approximation to \(x(t_{max})\).
\begin{rem}\em While the hypothesis \(\tau/T\in\mathbb{N}\) obviously simplifies the algorithm, it is not necessary for the methods to perform satisfactorily, as the numerical experiments below will show. This should be compared with the situation for the integrator in \cite{beibei}, where, for systems of the general form \eqref{eq:dde1}, there is a degradation in the error behaviour if the hypothesis \(\tau/T\in\mathbb{N}\) does not hold (see Remark 3 in \cite{beibei}). \end{rem}
\section{Error bounds}\label{errorbounds} In this section we provide error bounds for the algorithms described above. We work first under the hypothesis that \(\tau\) is a multiple of the period (Case I in the preceding section) and then consider the general situation. The section concludes with the presentation of some refinements. For simplicity, we assume that the macro and micro integrators are (consistent) Runge-Kutta methods.
\subsection{Basic estimate: Case I}
As explained above, if the delay is a multiple of the period, the integrators to be analyzed are just SAM algorithms for the system of ODEs \eqref{eq:odebf}. For \(X^{(1)}\) the results given by our algorithms are those of macrointegrating \eqref{eq:sam2} with inaccurate values of \(F\). For \(X^{(\ell)}\), \(\ell>1\), we apply the macrointegrator with inaccurate values of \(F\) and in addition with a starting value \(X^{(\ell)}(0)\) that is itself not exact. Classical results of the theory of convergence of numerical ODEs show then that SAM solutions have an error bound\footnote{
Classical numerical analysis texts used to provide global error bounds for integrations subject to inaccuracies in the computation of the numerical solution at each step, see e.g. \cite[Chapter 8, Section 5, Theorem 3]{ik}. Such inaccuracies may be due to rounding errors or, as it is the case here, to other reasons. The importance of rounding errors has diminished over the years and accordingly modern texts assume that those inaccuracies do not exist. In fact the study of the impact of the inaccuracies on the global error is exactly the same as that of the impact of the local truncation error, see e.g. \cite[Remark 2]{granada}.
}
\begin{equation}\label{eq:mainbound} \mathcal{O}\left( H^P+\delta+\Omega\:\mu\right) \end{equation}
where
\begin{itemize}
\item The contribution \(H^P\) (\(P\) is the order of the macrointegrator) arises from the global error
of
the macrointegrator and would remain even if
\(F\) were known exactly rather than evaluated via finite differences. This contribution is uniform in
\(\Omega\) as \(\Omega\) increases, because the stroboscopically averaged system being integrated is a
polynomial in \(\Omega^{-1}\) (Remark~\ref{rem:depends}). \item \(\delta= \delta(H,\Omega)\) is a bound for the error due to the finite-difference formula used to
compute
\(F\). \item \(\mu(h,\Omega)\) is a bound for the microintegrating error when
computing approximately \(\Phi_T(w)\) in \eqref{eq:fw} (or \(\Phi_{\pm T}(w)\) in \eqref{eq:cw}, etc.).
The (large) factor \(\Omega\) in front of \(\mu\) in \eqref{eq:mainbound} is due to the denominator
in the finite difference formulas \eqref{eq:fw}, \eqref{eq:cw}, etc. \end{itemize}
We study \(\mu\) assuming that the oscillatory system is written in the format \eqref{eq:sam1}. It is best to introduce the slow time \(s=\Omega t\) that transforms \eqref{eq:sam1} into \begin{equation}\label{eq:slow} \frac{d}{ds}y = \Omega^{-1}f(y,s;\Omega). \end{equation} This system has to be integrated over a forward period \(0\leq s\leq 2\pi\) (or over a forward period and a backward period, etc. depending on the finite difference formula being used to recover \(F\)). Note that the rescaling of time is compatible with the RK discretization in the sense that the \(y\) vectors produced by the algorithm when the system is integrated in the variable \(t\) with step size \(h\) coincide with
those obtained when the system is integrated in the variable \(s\) with step size \(\Delta s = \Omega h\). Since
we assumed at the outset that \(\Omega^{-1}f\) is smooth and
remains bounded together with its derivatives as \(\Omega\uparrow \infty\), the microintegration errors for \eqref{eq:slow} may be estimated, uniformly in \(\Omega\) as \(\mu=\mathcal{O}((\Delta s)^p)\), i.e.
\begin{equation} \qquad \mu=\mathcal{O}(\Omega^ph^p), \label{eq:mu} \end{equation}
where \(p\) is the order of the microintegrator.
As an example we look at the algorithm in Table~\ref{tab:algorithm} with \(P=p=2\). Second order differentiation contribute to \(\delta\) with an \(\Omega^{-2}\) term. Since the first-order difference formula is only used at a number of macrosteps that is fixed as \(H\rightarrow 0\) (see Remark~\ref{rem:fixednumber}), its contribution to \(\delta\) is \(H\Omega^{-1}\). Thus we have the bound \[ \mathcal{O}\left( H^2+\Omega^{-2}+H\Omega^{-1}+\Omega^3h^2\right). \] \subsection{Basic estimate: Case II} In this case \eqref{eq:mainbound} has to be replaced by
\begin{equation}\label{eq:mainbound2}
\mathcal{O}\left( H^P+\delta+\Omega\:\mu+\nu\right),
\end{equation}
where \(\nu\) bounds the error introduced by the integrations of the oscillatory system in the final short interval \(MT\leq t\leq \tau\).
Since these are carried out in intervals of length \(<
T\) and there is a number \(L\) of them independent of the problem parameters, from \eqref{eq:mu}, we obtain
the bounds \begin{equation} \nu=\mathcal{O}((\Delta s)^p)\qquad {\rm i.e.}\qquad \nu=\mathcal{O}(\Omega^ph^p). \label{eq:nu} \end{equation}
\subsection{Refined microintegration estimates: \(\mathcal{O}(\Omega^{-1})\) microintegration errors}
There are numerous circumstances where \eqref{eq:mu} is pessimistic. An instance is given by the case where \eqref{eq:sam1} is of the form \[ \frac{d}{dt}y = \Omega\: \Lambda(\Omega t)+f(y,\Omega t;\Omega), \] with \(\Lambda\) a (vector-valued) trigonometric polynomial and \(f\) and its derivatives are bounded as \(\Omega\uparrow \infty\).
In terms of the slow time \(s\) we have \begin{equation}\label{eq:perturbed} \frac{d}{ds}y =\Lambda(s) +\Omega^{-1}f(y,s;\Omega), \end{equation} a system that may be seen as a perturbation of \((d/ds) y = \Lambda(s)\). For the unperturbed problem we have
the following result that will be proved in Section~\ref{proofs1}.
\begin{proposition} \label{prop} Let \(\Lambda\) be a (vector-valued) trigonometric polynomial. A Runge-Kutta scheme applied to the initial-value problem \((d/ds) y = \Lambda(s)\), \(y(0)=y_0\), with a constant stepsize \(\Delta s=2\pi/M\) (\(M\) a positive integer) gives approximations that are exact at \(s = \pm 2\pi\) provided that \(\Delta s\) is suffiently small. \end{proposition}
Note that, by implication, the integrator also yields exact approximations at \(s=\pm 4\pi\), \(s=\pm 6\pi\), etc.
Thus the computation of the values of \(\Phi_{\pm k T}\) used in the finite-difference formulas will be
free from error and, for the unperturbed problem, \(\mu =0\). From the proposition it may be expected that for the perturbed system \eqref{eq:perturbed} the microintegration error after a whole number of periods will approach 0 as \(\Omega\uparrow \infty\) with \(\Delta s\) fixed. In fact in this case \eqref{eq:mu} may be replaced by \begin{equation}\label{eq:mu2} \mu=\Omega^{-1}\mathcal{O}((\Delta s)^p)\qquad {\rm or}\qquad \mu=\mathcal{O}(\Omega^{p-1}h^p), \end{equation} an estimate that will be established in Section~\ref{proofs2}.
\subsection{Refined microintegration estimates: \(\mathcal{O}(\Omega^{-2})\) microintegration errors} \label{sec:omegados}
An even more favourable situation holds when in \eqref{eq:sam1} \(f\) and its derivatives remain bounded as \(\Omega\uparrow \infty\)
and
as a function of its second argument is a trigonometric polynomial. According to \eqref{eq:slow},
for \(0\leq s \leq 2\pi\), \(y(s)-y(0) = \mathcal{O}(\Omega^{-1})\) and we may consider a decomposition \[ \frac{d}{ds}y = \Omega^{-1}f(y(0),s;\Omega)+\Omega^{-1} \Big(f(y,s;\Omega)-f(y(0),s;\Omega) \Big). \] For the unperturbed system \((d/ds)y = \Omega^{-1}f(y(0),s;\Omega)\) the output of the microintegrations is exact in view of the preceding proposition; the perturbation is \(\mathcal{O}(\Omega^{-2})\) for \(0\leq s\leq 2\pi\) and \eqref{eq:mu} may be replaced by the improved estimate (Section~\ref{proofs}) \begin{equation}\label{eq:mu3} \mu=\Omega^{-2}\mathcal{O}((\Delta s)^p)\qquad {\rm or}\qquad \mu=\mathcal{O}(\Omega^{p-2}h^p). \end{equation}
We emphasize that the improved bounds for \(\mu\) we have just discussed hold because the integration of the unperturbed problem is exact \emph{after a whole number of periods}. The bound \eqref{eq:nu} cannot be improved similarly because there the integration is not carried out for a whole number of periods.
\section{Numerical experiments} \label{sec:experiments} We now report numerical experiments based on SAM. They are based on the following algorithms:
\begin{table} \caption{Coefficients of methods RK3 (left) and RK4 (right).} \begin{center}
$\begin{array}{c|ccc} & & & \\ [6pt] 0& & & \\ [6pt] \frac{1}{3}& \frac{1}{3} & & \\ [6pt] \frac{2}{3}& 0 & \frac{2}{3} & \\ [6pt] \hline \\[-6pt] & \frac{1}{4} & 0 & \frac{3}{4} \end{array} \qquad\qquad
\begin{array}{c|cccc} 0& & & & \\ [6pt] \frac{1}{2}& \frac{1}{2} & & & \\ [6pt] \frac{1}{2}& 0 & \frac{1}{2} & & \\ [6pt] 1& 0 & 0 & 1 & \\ [6pt]\hline \\[-6pt] & \frac{1}{6} & \frac{2}{6} & \frac{2}{6} & \frac{1}{6} \end{array}$ \end{center} \label{tab:coeff} \end{table}
\begin{enumerate} \item SAM-RK3. This is a SAM algorithm, similar to that in Table~\ref{tab:algorithm}, that uses the well-known third order RK method in Table~\ref{tab:coeff} as macro and microintegrator. We approximate \(F\) by means
of the differentiation formula with \(\mathcal{O}(\Omega^{-3})\) errors based on function values at
\(-2T\), \(-T\), \(0\), \( T\). At \(t=0\), where backward microintegrations are not possible, we use the
\(\mathcal{O}(\Omega^{-3})\) forward differentiation formula based on function values at \(0\), \(T\),
\(2T\), \(3T\). \item SAM-RK4. A SAM algorithm, similar to that in Table~\ref{tab:algorithm}, that uses the \lq
classical\rq\ order four RK method (see Table~\ref{tab:coeff}) as macro and microintegrator. We
approximate \(F\) by means of the well-known differentiation formula based on function values at \(\pm T\),
\(\pm 2T\) (\(\mathcal{O}(\Omega^{-4})\) errors). For the first-stage of the formula at \(t=0\), where
backward microintegrations are not possible we use the \(\mathcal{O}(\Omega^{-4})\) formula based on
function values at \(0\), \(T\), \(2T\), \(3T\), \(4T\). In addition the fourth stage requires values of
\(F\) at the end point \(t=\tau\) and for those we use the \(\mathcal{O}(\Omega^{-4})\) formula based on
\(-4T\), \(-3T\), \(-2T\), \(-T\), \(0\). \item SS-Z. This is the integrator introduced in \citep{beibei} that is not based on rewriting the system as
an ODE. \end{enumerate}
Experiments using the method in Table~\ref{tab:algorithm} were also conducted, but will not be reported as its performance is very similar to that of SS-Z. In fact, the number of possible combinations of integrators and differentiation formulas is bewildering. The choices used here are meant to illustrate the possibilities of the SAM idea and we have not attempted to identify the most efficient combinations.
\subsection{Test problems} We have integrated the two test problems used in \citep{beibei}. The first is given by \eqref{eq:toggle}
together with the history information $x_1(t) = 0.5$, $x_2(t) = 2.0$, for $-\tau \leq t\leq 0$. The constants in the model have the values $\alpha=2.5$, $\beta=2$, $A=0.1$, $\omega=0.1$, $B=4.0$, $\tau=0.5$. This leads to an ODE system that satisfies the hypotheses in Section~\ref{sec:omegados} so
that the estimate \eqref{eq:mu3} holds.
The second test problem is the following more demanding variant of \eqref{eq:toggle}: \begin{eqnarray}\label{eq:geneproblem} \frac{dx_1}{dt}&=&\frac{\alpha}{1+x_2^{\beta}}-x_1(t-\tau)+A\sin(\omega t)+\hat{B}\Omega\sin(\Omega t),\\ \nonumber \frac{dx_2}{dt}&=&\frac{\alpha}{1+x_1^{\beta}}-x_2(t-\tau), \end{eqnarray} with $\hat B=0.1$ and all other constants and the initial history as for \eqref{eq:toggle}. Now the amplitude of the fast forcing grows linearly with \(\Omega\) and, as a result, the solution undergoes fast oscillations of amplitude $O(1)$, as \(\Omega \rightarrow \infty\) (rather than $O(\Omega^{-1})$ as it is the case for \eqref{eq:toggle}). Clearly \eqref{eq:geneproblem} leads to a system of ODEs of the form \eqref{eq:perturbed} and estimate \eqref{eq:mu2} holds.
\subsection{Results: case I}
We first set $\Omega = 8\pi, 16\pi, \ldots$, so that the delay \(\tau = 0.5\) is an integer multiple of the fast period $T=2\pi/\Omega$.
\begin{table}[t] \caption{Maximum errors at stroboscopic times in $x_1$ for SAM-RK4 with respect to the reference solution for problem \eqref{eq:toggle}} \footnotesize
\begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{rcccccccccc} \hline N &$\Omega=16\pi$ & $\Omega=32\pi$ & $\Omega=64\pi$ & $\Omega=128\pi$ & $\Omega=256\pi$ & $\Omega=512\pi$ & $\Omega=1024\pi$ \\ \hline 1&1.18(-3)&6.17(-4)&3.48(-4)&1.86(-4)&9.41(-5)&4.50(-5)&1.95(-5)\\ 2&***&3.01(-5)&1.70(-5)&9.09(-6)&4.62(-6)&2.23(-6)&9.98(-7)\\ 4&***&***&1.00(-6)&5.40(-7)&2.77(-7)&1.35(-7)&6.18(-8)\\ 8&***&***&***&3.34(-8)&1.72(-8)&8.44(-9)&3.89(-9)\\ 16&***&***&***&***&1.12(-9)&5.26(-10)&2.23(-10)\\ 32&***&***&***&***&***&2.93(-11)&1.87(-11)\\ 64&***&***&***&***&***&***&2.30(-11)\\ \hline \end{tabular}} \end{center} \label{tab:3} \end{table} \subsubsection{Test problem \eqref{eq:toggle}} For each value of $\Omega$, we have first computed a reference solution of the problem in the interval $[0, 2]$ using the Matlab function dde23 with relative and absolute tolerances equal to $10^{-11}$; errors have been measured with respect to this reference solution. Notice that the interval $[0, 2]$ includes the locations $t=\ell \times \tau$ for $0 \leq \ell \leq 4$. When studying vibrational resonances in \eqref{eq:toggle} the interest lies in much longer time intervals, but we have not used them in our study due to the extremely high cost of finding the reference solution with dde23 when \(\Omega\) is large. We have run the algorithms with macro-stepsize $H=\tau/N$ and micro-stepsize $h=T/(2N)$ for $N=1, 2, 4, \ldots$ This implies that when $N$ is doubled, both the macro stepsize and the micro stepsize are divided by two and, consequently, the computational cost, which is independent of \(\Omega\), is multiplied by four. \begin{figure}
\caption{Maximum error at stroboscopic times, with respect to the reference solution, in the first component of SAM-RK4 (diamonds), SAM-RK3 (squares) and SS-Z (triangles) versus CPU time for $\Omega =1024 \pi$. Errors
for SAM-RK4 come from the last column in Table~\ref{tab:3}.}
\label{fig:ff1}
\end{figure}
Table~\ref{tab:3} shows, for the first component of the solution, maximum errors at stroboscopic times in the interval \(0\leq t\leq 2\) when the integration is performed with SAM-RK4. Stars denote combinations $(N, \Omega)$ for which the numerical solution has not been computed because the macrostepsize $H$ is not significantly larger than the period $T$ and the heterogeneous multiscale approach does not make sense. Note that entries in the table below, say, $10^{-11}$ may not be reliable due to the accuracy we used in computing the reference solution. According to the estimates in the preceding section, for SAM-RK4 there is an \(H^4\), i.e. \(N^{-4}\), contribution to the error bound \eqref{eq:mainbound} arising from the macrointegrator, an \(\Omega^{-4}\) contribution arising from the use of finite differences and \(\Omega \mu\) may be bounded by \(\Omega^3h^4\), or \(\Omega^{-1}N^{-4}\). The numbers in the table have a clear \(\Omega^{-1}N^{-4}\) behaviour, which shows that the error is mainly due to the microintegrations. For the values of \(\Omega\) under consideration the finite differences employed are virtually exact and, in addition, the error arising from the macrointegrator is also negligible (the averaged solution varies very little in the short integration interval).
For SAM-RK3 the contributions to \eqref{eq:mainbound} are respectively \(H^3\), \(\Omega^{-3}\) and \(\Omega^2h^3\). The results show an \(\Omega^2h^3\), i.e. \(\Omega^{-1}N^{-3}\), behaviour (which corresponds to the error being dominated by the microintegrations) and will not be reproduced here. Error bounds and numerical results for SS-Z may be seen in \citep{beibei}. We plot in Figure~\ref{fig:ff1} an efficiency diagram comparing these three integrators. The figure represents, in doubly-logarithmic scale, the maximum error in $x_1$ at stroboscopic times versus the CPU time when $\Omega=1024 \pi$. We first observe that the slopes of the different lines are close to $-1$ (triangles), $-3/2$ (squares) and $-2$ (diamonds). As mentioned above, due to our choice of $H$ and $h$ (i.e.\ $H=\tau/N$, $h=T/(2N)$, $N=1, 2, \ldots, 128$), the computational cost is multiplied by $2^2$ when $N$ is doubled and, consequently, the slopes observed in Figure~\ref{fig:ff1} correspond to a dependence on \(N\) of the form \(N^{-2}\), \(N^{-3}\), \(N^{-4}\), in agreement with the bounds of the preceding section (and those for SS-Z presented in \cite{beibei}). Comparing the three integrators, we also conclude that for errors larger than $10^{-4}$, SS-Z is the most efficient, for errors between $10^{-6}$ and $10^{-4}$ SAM-RK3 is preferable, and for errors smaller than $10^{-6}$ the more accurate SAM-RK4 requires the smallest CPU time. Similar conclusions may be drawn for other values of $\Omega$ (but the range of errors where one method is better than the others varies slightly with \(\Omega\)).
\subsubsection{Test problem \eqref{eq:geneproblem}}
\begin{table}[t] \caption{Maximum errors at stroboscopic times in $x_1$ for SAM-RK4 with respect to the reference solution for problem \eqref{eq:geneproblem}} \footnotesize
\begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{rcccccccccc} \hline N &$\Omega=16\pi$ & $\Omega=32\pi$ & $\Omega=64\pi$ & $\Omega=128\pi$ & $\Omega=256\pi$ & $\Omega=512\pi$ \\ \hline 1&1.62(-3)&1.64(-3)&1.65(-3)&1.65(-3)&1.65(-3)&1.65(-3)\\ 2&***&8.26(-5)&8.29(-5)&8.29(-5)&8.29(-5)&8.29(-5)\\ 4&***&***&4.72(-6)&4.73(-6)&4.73(-6)&4.73(-6)\\ 8&***&***&***&2.93(-7)&2.93(-7)&2.93(-7)\\ 16&***&***&***&***&1.83(-8)&1.83(-8)\\ 32&***&***&***&***&***&1.15(-9)\\ \hline \end{tabular}} \end{center} \label{tab:4} \end{table}
Table~\ref{tab:4} corresponds to \eqref{eq:geneproblem} integrated with SAM-RK4. Errors for $\Omega=1024\pi$ are not reported because, with our facilities, the computation of the reference solution with dde23 would take several days. The error bounds are different from those for \eqref{eq:toggle} because for this tougher problem the microintegration bound is as in \eqref{eq:mu2} so that the impact \(\Omega\mu\) of the microintegration is now \(\Omega^4h^4\) or \(N^{-4}\); this impact is then \(\Omega\) independent. In fact, the main difference observed when comparing Tables~\ref{tab:3}
and ~\ref{tab:4}, is that in Table~\ref{tab:4} errors along each row stay constant while in Table~\ref{tab:3}
they decrease as $\Omega$ increases as discussed above.
On the other hand, we observe that the same macro stepsizes
used to integrate \eqref{eq:toggle} can be successfully used in this new, more
challenging problem and lead to errors that are not widely different; this should be compared with the direct
integration of the oscillatory problem with dde23 where the costs for \eqref{eq:geneproblem} are
much higher than those for \eqref{eq:toggle}.
\subsection{Results: case II} We consider again the integration of \eqref{eq:toggle} and \eqref{eq:geneproblem} but now set
$\Omega = 25, 50, \ldots$ These values are not very different from the values used in the preceding section,
but now the delay $\tau=0.5$ is not an integer multiple of the period $T=2\pi/\Omega$ of the fast oscillations.
The integrators SAM-RK3, SAM-RK4 and SS-Z have been run with macro-stepsize $H=H_{max}/N$, $N=1, 2, 4, \ldots$ with
$H_{max}=M T$, $M=\lfloor \tau/T \rfloor$. The micro-stepsize is again $h=T/(2N)$. As explained in Section~\ref{sec:case2}, in order to get solution values at integer multiples of $\tau$, each macrointegration from $0$ to $MT$ is followed by a short integration of the
oscillatory problem
from $MT$ to $\tau$. We only report a representative small sample of the experiments we performed.
\begin{table}[t] \caption{Errors at \(t_{max}\) in $x_1$ for SAM-RK4 with respect to the reference solution for problem \eqref{eq:toggle}} \footnotesize
\begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{rccccccccc} \hline N &$\Omega=50$ & $\Omega=100$ & $\Omega=200$ & $\Omega=400$ & $\Omega=800$ & $\Omega=1600$ \\ \hline 1&3.98(-3)&3.93(-3)&2.27(-3)&3.91(-4)&3.99(-4)&4.82(-5)\\ 2&***&2.16(-4)&1.55(-4)&2.21(-5)&1.84(-5)&3.37(-6)\\ 4&***&***&5.14(-6)&1.32(-6)&9.01(-7)&2.07(-7)\\ 8&***&***&***&8.79(-8)&5.46(-8)&1.71(-8)\\ 16&***&***&***&***&3.10(-9)&1.05(-9)\\ 32&***&***&***&***&***&5.56(-11)\\ \hline \end{tabular}} \end{center} \label{tab:5} \end{table}
Table~\ref{tab:5} contains the errors in the first component of the solution of \eqref{eq:toggle} at the final time $t_{max}=4\tau$, with respect to the reference dde23 solution when the integration is performed with SAM-RK4. In \eqref{eq:mainbound2}, \(P\), \(\delta\) and \(\mu\) are as in Case I and the additional contribution \(\nu\) from the short integrations is \((\Omega h)^4\), i.e. \(N^{-4}\). The errors displayed in this table show a clear \(N^{-4}\) behaviour along the columns. However the variation with \(\Omega\) is now not so regular as we found in Table~\ref{tab:3} for Case I, no doubt because now changing \(\Omega\) changes the phase of the oscillation at the final time, where errors are measured.
Finally, we report in Table~\ref{tab:6} errors in the first component of the solution of the challenging problem \eqref{eq:geneproblem}. This is to be compared with Table~\ref{tab:4}; again the error behaviour as a function of \(\Omega\) is now more irregular, but the methodology outlined in this paper finds no difficulty in accurately integrating the problem.
\begin{table}[t] \caption{Errors at \(t_{max}\) in $x_1$ for SAM-RK4 with respect to the reference solution for problem \eqref{eq:geneproblem}} \footnotesize
\begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{crcccccccccc} \hline &N &$\Omega=50$ & $\Omega=100$ & $\Omega=200$ & $\Omega=400$ & $\Omega=800$\\ \hline \hphantom{$\Omega$}&1&4.86(-3)&9.97(-3)&1.20(-2)&3.19(-3)&8.30(-3)&\hphantom{$1600$} \\ &2&***&5.46(-4)&8.01(-4)&2.46(-4)&3.80(-4)&\\ &4&***&***&2.63(-5)&1.45(-5)&1.89(-5)&\\ &8&***&***&***&9.33(-7)&1.15(-6)&\\ &16&***&***&***&***&6.56(-8)&\\
\hline \end{tabular}} \end{center} \label{tab:6} \end{table}
\section{Proofs and additional results} \label{proofs}
We conclude the paper by supplying the proofs of some results presented in Section~\ref{errorbounds}. We also present some extensions of those results. \subsection{Proof of Proposition~\ref{prop}} \label{proofs1}
It is clearly sufficient to carry out the proof for the particular case of the scalar differential equation \(dy/ds = \exp(iks)\), with \(k\neq 0\) an integer. The true solution has the value \(y_0\) at \(s=2\pi\). If \(\{b_j\}_{j=1}^\sigma\) and \(\{c_j\}_{j=1}^\sigma\) are the weights and abscissas of the RK formula and \(M\Delta s=2\pi\), the numerical solution at \(s= 2\pi\) is \[ y_M = y_0 + \Delta s\sum_{m=0}^{M-1} \sum_{j=1}^\sigma b_j \exp(ik ( (m+c_j)\Delta s)). \] Hence \[ y_M-y_0 =\Delta s \sum_{j=1}^\sigma b_j \exp(ik c_j\Delta s) \sum_{m=0}^{M-1} \exp(ik m\Delta s ) . \] If \(\Delta s\) is sufficiently small \(\exp(ik\Delta s)\neq 1\) and the inner sum takes the value \[ \frac{\exp(ikM\Delta s) - 1}{\exp(ik\Delta s)- 1} =\frac {\exp(i2k\pi) - 1}{\exp(ik\Delta s)- 1} =0. \] As a result \(y_M = y_0\), i.e. \(y_M\) coincides with the true solution.
\begin{rem} \label{rem:alias}
\em If \(\exp(ik\Delta s)= 1\) with \(k\neq 0\), then \(\exp(iks)=1\) at all mesh points \( s= 0, \Delta s, 2\Delta s, \dots\), i.e.\ the oscillatory function \(\exp(iks)\) is an \emph{alias} of the constant function \(1\). In that case, the inner sum equals \(M\) and \[ y_M -y_0 = 2\pi \sum_{j=1}^\sigma b_j \exp(ik c_j\Delta s). \] Thus the RK solution is not exact at \(s=2\pi\). \end{rem}
\subsection{Proof of the improved micro-integration estimates} \label{proofs2}
Let us prove the error bound \eqref{eq:mu2}; the proof of \eqref{eq:mu3} follows the same pattern and will not be given. We start by noting that the solution of the initial value problem given by \(y(0) = y_0\) and \eqref{eq:perturbed} may be written as \(y = v+z\), where the pair \((v,z)\) is the solution of the extended problem \begin{eqnarray} \label{eq:estimate1} &&\frac{dv}{ds} = \Lambda(s),\qquad v(0) = 0,\\ \label{eq:estimate2} &&\frac{dz}{ds} = \Omega^{-1} f(v+z,s;\Omega), \qquad z(0) = y_0. \end{eqnarray} By writing the equations that define the RK solution, it is straightforward to check that, similarly,
the RK trajectory \(y_0\), \(y_1\), \dots, \(y_M\) is given by \(y_m=v_m+z_m\), \(m= 0,\dots, M\),
where \((v_0,z_0)\),\dots, \((v_M,z_M)\) is the RK trajectory for the initial value problem \eqref{eq:estimate1}--\eqref{eq:estimate2}. From the proposition we know that, for \(\Delta s\) small, the RK approximation to the \(v\) component of the extended solution is exact at \(s=2\pi\), i.e. \(v_M=v(2\pi)\) and the proof concludes by showing that the RK errors in the \(z\) component \(z_M-z(2\pi)\) possesses an \(\Omega^{-1} \mathcal{O}((\Delta s)^p))\) bound.
The RK discretization of \eqref{eq:estimate1}--\eqref{eq:estimate2} is of the form \begin{eqnarray} \label{eq:estimate3} v_{m+1} & = &v_m + \Delta s F(m\Delta s,\Delta s), \\ \label{eq:estimate4} z_{m+1} & = & z_m + \Delta s \Omega^{-1}G(v_m+z_m, m\Delta s, \Delta s; \Omega), \end{eqnarray} where \(F\) and \(G\) are suitable increment functions; \(G\) and its derivatives are bounded as
\(\Omega\uparrow \infty\). Clearly, for the quadrature in \eqref{eq:estimate1}, \(\max_m |v_m - v(m\Delta s)| = \mathcal{O}((\Delta s)^p)\), with the constant implied in the \(\mathcal{O}\) notation independent of \(\Omega\). For the \(z\) component we define the local error \(\eta_m\) by \[ z((m+1)\Delta s) = z(m\Delta s) + \Delta s \Omega^{-1}G(v(m\Delta s)+z(m\Delta s), m\Delta s, \Delta s; \Omega)+
\eta_m. \]
Since the right hand-side of the equation in \eqref{eq:estimate2} has a prefactor \(\Omega^{-1}\), the same happens for all the associated elementary differentials \cite{Butcher,hlw} in the expansion of \(z\) and as a consequence \(\max_m |\eta_m|= \Omega^{-1}\mathcal{O}((\Delta s)^{p+1})\) (again the implied constant is \(\Omega\)-independent). Subtraction of the last display from \eqref{eq:estimate4} leads to (\(C\) denotes an \(\Omega\)-independent Lipschitz constant) \begin{eqnarray*}
|z_{m+1}-z((m+1)\Delta s)| &\leq& |z_{m}-z(m\Delta s)|\\
&&+\Delta s \Omega^{-1} C \Big(|v_{m}-v(m\Delta s)|
+|z_{m}-z(m\Delta s)|\Big)\\&&+ |\eta_{m}|\\
&=& (1+\Delta s \Omega^{-1} C)|z_{m}-z(m\Delta s)| +\Delta s \Omega^{-1} \mathcal{O}((\Delta s)^p), \end{eqnarray*}
and recursively we arrive at \(\max_m |z_{m}-z(m\Delta s)| = \Omega^{-1} \mathcal{O}((\Delta s)^p)\) and the proof is ready.
\subsection{Extensions} The improved bounds \eqref{eq:mu2} and \eqref{eq:mu3} are based on Proposition~\ref{prop}. This proposition does not hold if \(\Lambda(s)\) is merely a smooth \(2\pi\)-periodic function rather than a trigonometric polynomial. In fact, if \(\Lambda\) contains infinitely many Fourier modes, then for each choice of \(\Delta s =2\pi/M\) there will be modes \(\exp(iks)\) that are alias of the function \(1 = \exp(i0s)\) and therefore are not exactly integrated as we know from Remark~\ref{rem:alias}. However for \(\Lambda\) smooth and \(2\pi\)-periodic it is still possible to derive superconvergence results that show that the RK solution with \(\Delta s=2\pi/M\) is more
accurate at \(s=2\pi\) than it is for \(s<2\pi\). Those results are derived by decomposing the solution in a
Fourier series. If \(\Lambda\) has derivatives of all orders, then the RK error at the final point may be proved to be \(\mathcal{O}((\Delta s)^q)\) for arbitrary \(q>0\). Under analyticity assumptions, the error may decrease exponentially. The situation is very similar to that of the trapezoidal rule studied in \cite{Trefethen}. (In fact, due to the periodicity, the sum \(\sum_{m=0}^{M-1} \exp(ik m\Delta s )\) we encountered in Section~\ref{proofs1} may be written in trapezoidal form \({\sum_{m=0}^{\prime\prime M}} \exp(ik m\Delta s )\), where the double prime indicates that the first and last terms are halved.) By using the technique in Section~\ref{proofs2} the superconvergence results for \((d/ds)y = \Lambda(s)\) give rise to improved micro-integration bounds for problems of the form \eqref{eq:perturbed} with \(f\) bounded and \(\Lambda\) \(2\pi\)-periodic or for the case where in \eqref{eq:sam1} \(f\) and its derivatives remain bounded as \(\Omega\) increases.
\end{document}
|
arXiv
|
{
"id": "1811.12767.tex",
"language_detection_score": 0.7581539154052734,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[] {Instantaneously complete Chern-Ricci flow and K\"ahler-Einstein metrics }
\author{Shaochuang Huang$^1$} \address[Shaochuang Huang]{Yau Mathematical Sciences Center, Tsinghua University, Beijing, China.} \email{[email protected]} \thanks{$^1$Research partially supported by China Postdoctoral Science Foundation \#2017T100059}
\author{Man-Chun Lee} \address[Man-Chun Lee]{Department of
Mathematics, University of British Columbia, Canada} \email{[email protected]}
\author{Luen-Fai Tam$^2$} \address[Luen-Fai Tam]{The Institute of Mathematical Sciences and Department of
Mathematics, The Chinese University of Hong Kong, Shatin, Hong Kong, China.}
\email{[email protected]} \thanks{$^2$Research partially supported by Hong Kong RGC General Research Fund \#CUHK 14301517}
\renewcommand{\subjclassname}{
\textup{2010} Mathematics Subject Classification} \subjclass[2010]{Primary 32Q15; Secondary 53C44 }
\date{February 2019}
\begin{abstract} In this work, we obtain some existence results of {\it Chern-Ricci Flows} and the corresponding {\it Potential Flows} on complex manifolds with possibly incomplete initial data. We discuss the behaviour of the solution as $t\to 0$. These results can be viewed as a generalization of an existence result of Ricci flow by Giesen and Topping for surfaces of hyperbolic type to higher dimensions in certain sense. On the other hand, we also discuss the long time behaviour of the solution and obtain some sufficient conditions for the existence of K\"ahler-Einstein metric on complete non-compact Hermitian manifolds, which generalizes the work of Lott-Zhang and Tosatti-Weinkove to complete non-compact Hermitian manifolds with possibly unbounded curvature.
\end{abstract}
\keywords{Chern-Ricci flow, instantaneous completeness, K\"ahler-Einstein metric}
\maketitle
\markboth{Shaochuang Huang, Man-Chun Lee and Luen-Fai Tam}{Instantaneously complete Chern-Ricci flow and K\"ahler-Einstein metrics }
\section{Introduction}
In this work, we will discuss conditions on the existence of {\it Chern-Ricci Flows} and the corresponding {\it Potential Flows} on complex manifolds with possibly incomplete initial data. The flows will be described later. We will also discuss conditions on long-time existence and convergence to K\"ahler-Einstein metrics.
We begin with the definitions of Chern-Ricci flow and the corresponding potential flow. Let $M^n$ be a complex manifold with complex dimension $n$. Let $h$ be a Hermitian metric on $M$ and let $\theta_0$ be the K\"ahler form of $h$: $$ \theta_0=\sqrt{-1} h_{i\bar{j}} dz^i\wedge d\bar z^j $$ where $h=h_{i\bar{j}} dz^i\otimes d\bar z^j$ in local holomorphic coordinates. {In this work, Einstein summation convention is enforced.}
In general, suppose $\omega$ is a real (1,1) form on $M$, if $\omega=\sqrt{-1} g_{i\bar{j}} dz^i\wedge d\bar z^j$ in local holomorphic coordinates then the corresponding Hermitian form $g$ is given by $$ g=g_{i\bar{j}} dz^i\otimes d\bar z^j. $$
In case $\omega$ is only nonnegative, we call $g$ to be the Hermitian form of $\omega$ and $\omega$ is still called the K\"ahler form of $g$.
Now if $(M^n,h)$ is a Hermitian manifold with K\"ahler form $\theta_0$, let $\nabla$ be the Chern connection $\nabla $ of $h$ and $\text{\rm Ric}(h)$ be the Chern-Ricci tensor of $h$ (or the first Ricci curvature). In holomorphic local coordinates such that $h=h_{i\bar{j}} dz^i\otimes d\bar z^j$, the Chern Ricci form is given by $$ \text{\rm Ric}(h)=-\sqrt{-1} \partial\bar\partial \log \det(h_{i\bar{j}}). $$ For the basic facts on Chern connection and Chern curvature, we refer readers to \cite[section 2]{ TosattiWeinkove2015}, see also \cite[Appendix A]{Lee-Tam} for example.
Let $\omega_0$ be another nonnegative real (1,1) form on $M$. Define \begin{equation}\label{e-alpha} {\alpha}:=-\text{\rm Ric}(\theta_0)+e^{-t}\left(\text{\rm Ric}(\theta_0)+\omega_0\right) \end{equation} where $\text{\rm Ric}(\theta_0)$ is the Chern-Ricci curvature of $h$. We want to study the following parabolic complex Monge-Amp\`ere equation: \begin{equation}\label{e-MP-1} \left\{
\begin{array}{ll}
{\displaystyle \frac{\partial u}{\partial t}}&=\displaystyle{\log\left(\frac{({\alpha}+\sqrt{-1}\partial\bar\partial u)^n}{\theta_0^n}\right)}-u\ \ \text{in $M\times(0,S]$} \\
u(0)&=0
\end{array} \right. \end{equation} so that ${\alpha}+\sqrt{-1}\partial\bar\partial u>0$ for $t>0$. When $M$ is compact and $\omega_0=\theta_0$ is smooth metric, it was first studied by Gill in \cite{Gill}. Here we are interested in the case when $\omega_0$ is possibly an incomplete metric on a complete non-compact Hermitian manifold $(M,h)$. Following \cite{Lott-Zhang}, \eqref{e-MP-1} will be called the {\it potential flow} of the following normalized Chern-Ricci flow: \begin{equation}\label{e-NKRF} \left\{
\begin{array}{ll}
{\displaystyle \frac{\partial}{\partial t}\omega(t)} &= -\text{\rm Ric}(\omega(t))-\omega(t); \\
\omega(0)&= \omega_0.
\end{array} \right. \end{equation} It is easy to see that the normalized Chern-Ricci flow will coincide with the normalized K\"ahler-Ricci flow if $\omega_0$ is K\"ahler. It is well-known that if $\omega_0$ is a Hermitian metric and $\omega(t)$ is Hermitian and a solution to \eqref{e-NKRF} which is smooth up to $t=0$, then \begin{equation}\label{e-potential} u(t)=e^{-t}\int_0^te^s\log \frac{(\omega (s))^n}{\theta_0^n}ds. \end{equation} satisfies \eqref{e-MP-1}. Moreover, $u(t)\to0$ in $C^\infty$ norm in any compact set as $t\to0$. On the other hand, if $u$ is a solution to \eqref{e-MP-1} so that ${\alpha}+\sqrt{-1}\partial\bar\partial u>0$ for $t>0$, then \begin{equation}\label{e-potential-1} \omega(t)={\alpha}+\sqrt{-1}\partial\bar\partial u \end{equation} is a solution to \eqref{e-NKRF} on $M\times(0,S]$. However, even if we know $u(t)\to0$ as $t\to0$ uniformly on $M$, it is still unclear that $\omega(t)\to\omega_0$ in general.
The first motivation is to study Ricci flows starting from metrics which are possibly incomplete and with unbounded curvature. In complex dimension one, the existence of Ricci flow starting from an arbitrary metric has been studied in details by Giesen and Topping \cite{GiesenTopping-1,GiesenTopping-2, GiesenTopping,Topping}. In particular, the following was proved in \cite{GiesenTopping}: {\it If a surface admits a complete metric $H$ with constant negative curvature, then any initial data which may be incomplete can be deformed through the normalized Ricci flow for long time and converges to $H$. Moreover, the solution is instantaneously complete for $t>0$.} In higher dimensions, recently it is proved by Ge-Lin-Shen \cite{Ge-Lin-Shen} that on a complete non-compact K\"ahler manifold $(M,h)$ with $\text{\rm Ric}(h)\leq -h$ and bounded curvature, if $\omega_0$ is a K\"ahler metric, not necessarily complete, but with bounded $C^k$ norm with respect to $h$ for $k\ge 0$, then \eqref{e-NKRF} has a long time solution which converges to the unique K\"ahler-Einstein metric with negative scalar curvature, by solving \eqref{e-MP-1}. Moreover, the solution is instantaneously complete after it evolves.
Motivated by the above mentioned works, we first study the short time existence of the potential flow and the normalized Chern-Ricci flow. Our first result is the following: \begin{thm}\label{main-instant-complete} Let $(M^n,h)$ be a complete non-compact Hermitian manifold with complex dimension $n$. Suppose there is $K>0$ such that the following hold. \begin{enumerate} \item There is a {proper} exhaustion function $\rho(x)$ on $M$ such that
$$|\partial\rho|^2_h +|\sqrt{-1}\partial\bar\partial \rho|_h \leq K.$$ \item $\mathrm{BK}_h\geq -K$; \item The torsion of $h$, $T_h=\partial \omega_h$ satisfies
$$|T_h|^2_h +|\nabla^h_{\bar\partial} T_h |\leq K.$$ \end{enumerate} Let $\omega_0$ be a nonnegative real (1,1) form with corresponding Hermitian form $g_0$ on $M$ (possibly incomplete {or degenerate}) such that \begin{enumerate} \item[(a)] $g_0\le h$ and
$$|T_{g_0}|_h^2+|\nabla^h_{\bar\partial} T_{g_0}|_h+ |\nabla^{h}g_0|_h\leq K.$$
\item[(b)] There exist $f\in C^\infty(M)\cap L^\infty(M),\beta>0$ and $s>0$ so that $$-\text{\rm Ric}(\theta_0)+e^{-s}(\omega_0+\text{\rm Ric}(\theta_0))+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0.$$
\end{enumerate} Then \eqref{e-MP-1} has a solution on $M\times(0, s)$ so that $u(t)\to 0$ as $t\to0$ uniformly on $M$. Moreover, for any $0<s_0<s_1<s$, $\omega(t)={\alpha}+\sqrt{-1}\partial\bar\partial u$ is the K\"ahler form of a complete Hermitian metric which is uniformly equivalent to $h$ on $M\times[s_0, s_1]$. In particular, $g(t)$ is complete for $t>0$. \end{thm} Here $\mathrm{BK}_h\geq -K$ means that for any unitary frame $\{e_k\}$ of $h$, we have $R(h)_{i\bar ij\bar j}\geq -K$ for all $i,j$. \begin{rem} It is well-known that when $(M,h)$ is K\"ahler with bounded curvature, then condition (1) will be satisfied, \cite{Shi1989,Tam2010}. See also \cite{NiTam2013,Huang2018} for related results under various assumptions. \end{rem}
Condition (b) was used in \cite{Lott-Zhang,TosattiWeinkove2015, Lee-Tam} with $\omega_0$ replaced by $\theta_0$ and is motivated as pointed out in \cite{Lott-Zhang} as follows. If we are considering cohomological class instead, in case that $\omega(t)$ is closed, then \eqref{e-NKRF} is: $$ \partial_t[\omega(t)]=-[\text{\rm Ric}(\omega(t)]-[\omega(t)] $$ and so $$ [\omega(t)]=-(1-e^{-t})[\text{\rm Ric}(\theta_0)]+e^{-t}[\omega_0]. $$ Condition (b) is used to guarantee that $\omega(t)>0$. In our case $\omega_0,\theta_0, \omega(t)$ may not be closed and $\omega_0$ may degenerate. These may cause some difficulties. Indeed, the result is analogous to running K\"ahler-Ricci flow from a rough initial data. When $M$ is compact, the potential flow from a rough initial data had already been studied by several authors, see for example \cite{BG2013,SongTian2017,To2017} and the references therein.
On the other hand, a solution of \eqref{e-MP-1} gives rise to a solution of \eqref{e-NKRF} when $t>0$. It is rather delicate to see if the corresponding solution of \eqref{e-NKRF} will attain the initial Hermitian form $\omega_0$. In this respect, we will prove the following: \begin{thm}\label{t-initial-Kahler-1} With the same notation and assumptions as in Theorem \ref{main-instant-complete}. Let $\omega(t)$ be the solution of \eqref{e-NKRF} obtained in the theorem. If in addition $h$ is K\"ahler and $d\omega_0=0$. Let $U=\{\omega_0>0\}$. Then $\omega(t)\rightarrow \omega_0$ in $C^\infty(U)$ as $t\rightarrow 0$, {uniformly on compact subsets of $U$}. \end{thm}
We should remark that if in addition $h$ has bounded curvature, then the theorem follows easily from pseudo-locality. The theorem can be applied to the cases studied in \cite{Ge-Lin-Shen} and to the case that $-\text{\rm Ric}(h)\ge {\beta}\theta_0$ outside a compact set $V$ and $\omega_0>0$ on $V$ with $\omega_0$ and its first covariant derivative are bounded. In particular, when $\Omega$ is a bounded strictly pseudoconvex domain of another manifold $M$ with defining function $\varphi$, then the $\Omega$ with the metric $h_{i\bar j}=-\partial_i \partial_{\bar j}\log(-\varphi)$ will satisfy the above, see \cite[(1.22)]{ChengYau1982}.
Another motivation here is to study the existence of K\"ahler-Einstein metric with negative scalar curvature on complex manifolds using geometric flows. In \cite{Aubin, Yau1978-2}, Aubin and Yau proved that if $M$ is a compact K\"ahler manifold with negative first Chern class $c_1(M)<0$, then it admits a unique K\"ahler-Einstein metric with negative scalar curvature by studying the elliptic complex Monge-Amp\`ere equation. Later, Cao \cite{Cao} reproved the above result using the K\"ahler-Ricci flow by showing that one can deform a suitable initial K\"ahler metric through normalized K\"ahler-Ricci flow to the K\"ahler-Einstein metric. Recently, Tosatti and Weinkove \cite{TosattiWeinkove2015} proved that under the same condition that $c_1(M)<0$ on a compact complex manifold, the normalized Chern-Ricci flow \eqref{e-NKRF} with an arbitrary Hermitian initial metric also has a long time solution and converges to the K\"ahler-Einstein metric with negative scalar curvature. In \cite{ChengYau1982}, Cheng and Yau proved that if $M$ is a complete non-compact K\"ahler manifold with Ricci curvature bounded above by a negative constant, injectivity radius bounded below by a positive constant and curvature tensor with its covariant derivatives are bounded, then $M$ admits a unique complete K\"ahler-Einstein metric with negative scalar curvature. In \cite{Chau04}, Chau used K\"ahler-Ricci flow to prove that if $(M, g)$ is a complete non-compact K\"ahler manifold with bounded curvature and $\text{\rm Ric}(g)+g=\sqrt{-1}\partial\bar\partial f $ for some smooth bounded function $f$, then it also admits a complete K\"ahler-Einstein metric with negative scalar curvature. Later, Lott and Zhang \cite{Lott-Zhang} generalized Chau's result by assuming $$-\text{\rm Ric}(g)+\sqrt{-1}\partial\bar\partial f\ge{\beta} g$$ for some smooth function $f$ with bounded $k$th covariant derivatives for each $k\geq0$ and positive constant ${\beta}$. In this work, we will generalize the results in \cite{Lott-Zhang,TosattiWeinkove2015} to complete non-compact Hermitian manifolds with possibly unbounded curvature.
For the long time existence and convergence, we will prove the following: \begin{thm}\label{main-longtime} Under the assumption of Theorem \ref{main-instant-complete}, if in addition, $$-\text{\rm Ric}(h)+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0$$ for some $f\in C^\infty(M)\cap L^\infty(M)$, $\beta>0$. Then the solution constructed from Theorem \ref{main-instant-complete} is a longtime solution and converges to a unique complete K\"ahler Einstein metric with negative scalar curvature on $M$. \end{thm} As a consequence, we see that if $h$ satisfies the conditions in the theorem, then $M$ supports a complete K\"ahler-Einstein metric with negative scalar curvature, generalizing the results in \cite{Lott-Zhang,TosattiWeinkove2015}.
The paper is organized as follows: In section 2, we will derive a priori estimates along the potential flow and apply it in section 3 to prove Theorem \ref{main-instant-complete}. Furthermore, we will study the short time behaviour of the constructed solution. In section 4, we will prove the Theorem \ref{main-longtime} and discuss longtime behaviour for general K\"ahler-Ricci flow if the initial data satisfies some extra condition. In Appendix A, we will collect some information about the relation between normalized Chern-Ricci flow and unnormalized one {together with some useful differential inequalities. In Appendix B, we will state a maximum principle which will be used in this work.}
\section{a priori estimates for the potential flow}\label{s-aprior}
We will study the short time existence of the potential flow \eqref{e-MP-1} with $\omega_0$ only being assumed to be nonnegative. We need some a priori estimates for the flow. In this section, we always assume the following: \begin{enumerate} \item There is a {proper} exhaustion function $\rho(x)$ on $M$ such that
$$|\partial\rho|^2_h +|\sqrt{-1}\partial\bar\partial \rho|_h \leq K.$$ \item $\mathrm{BK}_h\geq -K$. \item The torsion of $h$, $T_h=\partial \omega_h$ satisfies
$$|T_h|^2_h +|\nabla^h_{\bar\partial} T_h |\leq K.$$ \end{enumerate} Here $K$ is some positive constant.
On the other hand, let $\omega_0$ be a real (1,1) form with corresponding Hermitian form $g_0$. We always assume the following: \begin{enumerate} \item[(a)] $g_0\le h$ and
$$|T_{g_0}|_h^2+|\nabla^h_{\bar\partial} T_{g_0}|_h+ |\nabla^{h}g_0|_h\leq K.$$
\item[(b)] There exist $f\in C^\infty(M)\cap L^\infty(M),\beta>0$ and $s>0$ so that $$-\text{\rm Ric}(\theta_0)+e^{-s}(\omega_0+\text{\rm Ric}(\theta_0))+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0.$$
\end{enumerate} Note that if $g_0\le Ch$, then we can replace $h$ by $Ch$, then (b) is still satisfied with a possibly smaller $\beta$.
Since $g_0$ can be degenerate, we perturb $g_0$ in the following way: Let $1\ge \eta\ge 0$ be a smooth function on $\mathbb{R}$ such that $\eta(s)=1$ for $s\le 1$ and $\eta(s)=0$ for $s\ge 2$ so that $|\eta'|+|\eta''|\le c_1$, say. For $\epsilon>0$ and $\rho_0>>1$, let $\eta_{0}(x)=\eta(\rho(x)/\rho_0)$. Consider the metric:
\begin{equation} \gamma_0=\gamma_0(\rho_0,\epsilon)=\eta_0\omega_0+(1-\eta_0)\theta_0+\epsilon\theta_0. \end{equation}
Then \begin{itemize}
\item $\gamma_0$ is the K\"ahler form of a complete Hermitian metric, which is uniformly equivalent to $h$;
\item $\mathrm{BK}(\gamma_0 )\ge -C$ for some $C$ which may depend on $\rho_0, \epsilon$;
\item The torsion $|T_{\gamma_0} |_{\gamma_0}+|\nabla^{\gamma_0}_{\bar \partial} T_{\gamma_0}|_{\gamma_0}$ is uniformly bounded by a constant which may depend on $\rho_0, \epsilon$. \end{itemize}
We will obtain a short time existence for the potential flow starting with $\gamma_0$:
\begin{lma}\label{l-perturbed-1} \eqref{e-MP-1} has a solution $u(t)$ on $M\times[0, s)$ with ${\alpha}=-\text{\rm Ric}(\theta_0)+e^{-t}\left(\text{\rm Ric}(\theta_0)+\gamma_0\right)$ and $\omega(t)={\alpha}+\sqrt{-1}\partial\bar\partial u$ such that $\omega(t)$ satisfies \eqref{e-NKRF} with initial data $\gamma_0$, where $\omega(t)$ is the K\"ahler form of $g(t)$. Moreover, $g(t)$ is uniformly equivalent to $h$ on $M\times[0, s_1]$ for all $s_1<s$. \end{lma} \begin{proof} By the proof of \cite[Theorem 4.1]{Lee-Tam}, it is sufficient to prove that for any $0<s_1<s$, $$ -\text{\rm Ric}(\gamma_0)+e^{-s_1}(\gamma_0+\text{\rm Ric}(\gamma_0))+\sqrt{-1}\partial\bar\partial f_1\ge \beta_1\gamma_0 $$ for some smooth bounded function $f_1$ and some constant $\beta_1>0$. To simplify the notations, if $\eta, \zeta$ are real (1,1) forms, we write $\eta \succeq \zeta$ if $\eta+\sqrt{-1}\partial\bar\partial \phi\ge \zeta$ for some smooth and bounded function $\phi$. We compute: \begin{equation*}\begin{split} -\text{\rm Ric}(\gamma_0)+e^{-s_1}(\gamma_0+\text{\rm Ric}(\gamma_0)) =&-(1-e^{-s_1})\text{\rm Ric}(\gamma_0)+e^{-s_1}\gamma_0\\ \succeq&-(1-e^{-s_1})\text{\rm Ric}(\theta_0)+e^{-s_1}\gamma_0\\ \succeq&\frac{1-e^{-s_1}}{1-e^{-s}}(\beta \theta_0-e^{-s}\omega_0)+e^{-s_1}\gamma_0 \\ \ge&\frac{1-e^{-s_1}}{1-e^{-s}} \beta \theta_0 \\ \ge& \beta_1\gamma_0\end{split}\end{equation*} for some $\beta_1>0$ because $0<s_1<s$ and $\gamma_0\ge \omega_0$. Here we have used {condition (b) above}, the fact that $\gamma_0^n =\theta_0^ne^H$ for some smooth bounded function $H$ and the definition of Chern-Ricci curvature.
\end{proof} Let $\omega(t)$ be the solution in the lemma and let $u(t)$ be the potential as in \eqref{e-potential}. Since we want to prove that \eqref{e-MP-1} has a solution $u(t)$ on $M\times(0, s)$ with ${\alpha}=-\text{\rm Ric}(\theta_0)+e^{-t}\left(\text{\rm Ric}(\theta_0)+\omega_0\right)$ in next section, we need to obtain some uniform estimates of $u, \dot u$ and $\omega(t)$ which is independent of $\rho_0$ and $\epsilon$. The estimates are more delicate because the initial data $\omega_0$ maybe degenerate. For later applications, we need to obtain estimates on $(0,1]$ and $[1,s)$ if $s>1$. Note that for fixed $\rho_0, \epsilon$, $u(t)$ is smooth up to $t=0$. Moreover, $u, \dot u=:\frac{\partial}{\partial t}u$ are uniformly bounded on $M\times[0,s_1]$ for all $0<s_1<s$.
\subsection{a priori estimates for $u$ and $\dot u$}\label{ss-uudot}
We first give estimates for upper bound of $u$ and $\dot u$.
\begin{lma}\label{l-uudot-upper-1} There is a constant $C$ depending only on $n$ and $K$ such that $$
u\le C\min\{t,1\}, \ \ \dot u\le \frac{Ct}{e^t-1} $$ on $M\times[0, s)$, provided $0<\epsilon<1$. \end{lma} \begin{proof}The proofs here follow almost verbatim from the K\"ahler case \cite{TianZhang2006}, but we include brief arguments for the reader's convenience. For notational convenience, we use $\Delta=g^{i\bar j} \partial_i \partial_{\bar j}$ to denote the Chern Laplacian associated to $g(t)$. Since $-\text{\rm Ric}(\theta_0)=\omega(t)-e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)-\sqrt{-1}\partial\bar\partial u$
by \eqref{e-potential-1}, we have
\begin{equation}\label{e-udot-1}
\begin{split}
\lf(\frac{\p}{\p t}-\Delta\ri) (e^t\dot u)=&e^t\dot u-e^t \operatorname{tr}_{\omega}\text{\rm Ric}(\theta_0)-e^t\lf(\frac{\p}{\p t}-\Delta\ri) u-n e^t\\
=&e^t\operatorname{tr}_\omega \left(- \text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial u\right)-ne^t\\
=&e^t\operatorname{tr}_\omega\left(\omega-e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)\right)-ne^t\\
=& -\operatorname{tr}_\omega (\text{\rm Ric}(\theta_0)+\gamma_0)\\
=&\lf(\frac{\p}{\p t}-\Delta\ri) (\dot u+u)+n -\operatorname{tr}_\omega(\gamma_0).
\end{split}
\end{equation}
Hence \begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri)(\dot u+u+nt-e^t\dot u)=\operatorname{tr}_\omega\gamma_0\ge0. \end{equation*} At $t=0$, $\dot u+u+nt-e^t\dot u=0$. By maximum principle Lemma \ref{max}, we have \begin{equation}\label{e-uudot-upper-1} (e^t-1)\dot u\le nt+u. \end{equation}
Next consider \begin{equation*} F=u-At-\kappa\rho \end{equation*} on $M\times[0, s_1]$ for any fixed $s_1<s$. Here $\kappa>0$ is a constant. Suppose $\sup\limits_{M\times[0, s_1]}F>0$, then there exists $(x_0, t_0)\in M\times(0, s_1]$ such that $F\leq F(x_0, t_0)$ on $M\times[0, s_1]$, and at this point,
\begin{equation*}\begin{split} 0\leq& \dot u -A=\log \left(\frac{\omega^n(t)}{\theta_0^n}\right)-u-A. \end{split} \end{equation*} Also, $\sqrt{-1}\partial\bar\partial u\le \kappa\sqrt{-1}\partial\bar\partial \rho\le \kappa K\theta_0$. Hence at $(x_0,t_0)$, \begin{equation*} \begin{split} \omega(t)=&-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)+\sqrt{-1}\partial\bar\partial u\\ \le&(-1+e^{-t})\text{\rm Ric}(\theta_0)+e^{-t}\gamma_0+\kappa K\theta_0\\ \le &(L+2+\kappa K)\theta_0, \end{split} \end{equation*} here $\text{\rm Ric}(\theta_0)\ge -L(n, K)\theta_0$. Hence at $(x_0,t_0)$ we have \begin{equation*} \begin{split} u\le & n\log(L+2+\kappa K)-A\\ \le &0 \end{split} \end{equation*} if $A=n\log(L+2)+1$ and $\kappa>0$ is small enough. Hence $F(x_0,t_0)<0$. This is a contradiction. Hence $F\le 0$ on $M\times[0, s_1]$ provided $A=A(n,K)=n\log(L+2)+1$ and we have \begin{equation}\label{e-uudot-upper-2} u\le At \end{equation} by letting $\kappa\to0$. Combining this with \eqref{e-uudot-upper-1}, we conclude that $$ \dot u\le \frac{(A+n)t}{e^t-1}. $$ Combining this with \eqref{e-uudot-upper-2}, we conclude that $u\le C$ for some constant $C$ depending only on $n, K$. Since $s_1$ is arbitrary, we complete the proof of Lemma \ref{l-uudot-upper-1}. \end{proof}
Next, we will estimate the lower bound of $u$ and $\dot u$. \begin{lma}\label{l-all-u}\begin{enumerate}
\item[(i)] $u(x,t)\geq - \frac{C}{1-e^{-s}} t+nt\log(1-e^{-t})$ on $M\times[0, s)$ for some constant $C>0$ depending only on $ n, \beta, K, ||f||_\infty$.
\item [(ii)] For $0<s_1\leq 1$ and $s_1<s$,
\begin{equation*}
\dot u+u\ge\frac1{1-e^{s_1-s}}\left(n\log t-\frac{C}{1-e^{-s}}\right)
\end{equation*}
some constant $C>0$ depending only on $ n, \beta, K, ||f||_\infty$
on $M\times(0, s_1]$.
\item [(iii)] For $0<s_1\leq 1$ and $s_1<s$,
$$\dot u+u\geq -C$$
on $M\times[0, s_1]$ for some constant $C>$ depending only on
$ n, \beta$, $K, ||f||_\infty, s_1, s$ and $\epsilon$.
\item [(iv)] Suppose $s>1$, then for $1<s_1<s$,
$$\dot u+u\ge -\frac{C(1+s_1e^{s_1-s})}{1-e^{s_1-s}}$$ on $M\times[1,s_1]$
for some constant $C(n, \beta, ||f||_\infty, K)>0$.
\item[(v)] For $0<s_1<s$,
$$u\ge -\frac{C(1+s_1e^{s_1-s})}{1-e^{s_1-s}}$$ on $M\times[0,s_1]$ for some constant $C(n, \beta, ||f||_\infty, K)>0$.
\end{enumerate}
\end{lma}
\begin{proof} In the following, $C_i$ will denote positive constants depending only on $n, \beta, ||f||_\infty, K$ and $D_i$ will denote positive constants which may also depend on $\rho_0, \epsilon$ but not on $\kappa$.
To prove (i): Consider \begin{equation*} F=u(x,t)-\frac{1-e^{-t}}{1-e^{-s}}f(x)+A\cdot t-nt\log(1-e^{-t})+\kappa\rho(x) .\end{equation*} Suppose $\inf\limits_{M\times[0, s_1]}F<0$. Then there exists $(x_0, t_0)\in M\times(0, s_1]$ such that $F\geq F(x_0, t_0)$ on $M\times[0, s_1]$. At this point, we have \begin{equation*}\begin{split} 0\geq & \frac{\p}{\p t} F \\=&\dot u+A-\frac{e^{-t}}{1-e^{-s}}f(x)-n\log(1-e^{-t})-\frac{nt}{e^t-1}.\\ =&\log\frac{(-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)+\sqrt{-1}\partial\bar\partial u)^n}{\theta_0^n}-u +A\\ &-n\log(1-e^{-t})-\frac{nt}{e^t-1}-\frac{e^{-t}}{1-e^{-s}}f\\ \geq& \log\frac{(-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)+ \frac{1-e^{-t}}{1-e^{-s}}\sqrt{-1}\partial\bar\partial f-\kappa\sqrt{-1}\partial\bar\partial\rho)^n}{\theta_0^n}\\ &-C(n, K)-\frac{e^{-t}}{1-e^{-s}}f+A-n\log(1-e^{-t})-\frac{nt}{e^t-1},\\ \end{split} \end{equation*} where we have used the fact that $u\leq C(n, K)$, and $\sqrt{-1}\partial\bar\partial u\ge\frac{1-e^{-t}}{1-e^{-s}}\sqrt{-1}\partial\bar\partial f-\kappa\sqrt{-1}\partial\bar\partial \rho$. Note that $$ -\text{\rm Ric}(\theta_0)\ge\frac1{1-e^{-s}}\left(\beta\theta_0-e^{-s}\omega_0-\sqrt{-1}\partial\bar\partial f\right), $$ hence \begin{equation*} \begin{split} &-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\gamma_0)+\frac{1-e^{-t}}{1-e^{-s}}\sqrt{-1}\partial\bar\partial f-\kappa\sqrt{-1}\partial\bar\partial\rho\\ \ge&e^{-t}\gamma_0+\frac{1-e^{-t}}{1-e^{-s}}\left(\beta\theta_0-e^{-s}\omega_0 \right) -\kappa K\theta_0 \\ \ge& \frac{1}{2}\frac{1-e^{-t}}{1-e^{-s}} \beta\theta_0 \end{split} \end{equation*} if $\kappa $ is small enough. Here we have used the fact that $0<t<s$ and $\gamma_0\ge \omega_0$. Hence at $(x_0,t_0)$, \begin{equation*} \begin{split} 0\geq& n\log(1-e^{-t}) -C_1 \\ &-\frac{e^{-t}}{1-e^{-s}}f+A-n\log(1-e^{-t})-\frac{nt}{e^t-1}\\
\geq& -\frac{1}{1-e^{-s}}||f||_\infty+A-C_2 \\
>&0 \end{split}
\end{equation*} if $A=\frac{1}{1-e^{-s}}||f||_\infty+C_2+1$. Hence for such $A$, $F\ge 0$ and for all $\kappa>0$ small enough, we conclude that $$ u(x,t)\ge -At+nt\log(1-e^{-t}). $$
To prove (ii), we have
\begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri)(\dot u+u)=-\operatorname{tr}_\omega(\text{\rm Ric}(\theta_0))-n. \end{equation*} On the other hand, by \eqref{e-udot-1}, we also have \begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri) (e^t\dot u)=-\operatorname{tr}_\omega(\text{\rm Ric}(\theta_0)+\gamma_0). \end{equation*} Hence \begin{equation}\label{e-udot-2} \begin{split} & \lf(\frac{\p}{\p t}-\Delta\ri)\left((1-e^{t-s})\dot u+u\right)\\ =&\operatorname{tr}_\omega(-\text{\rm Ric}(\theta_0)+e^{-s}(\text{\rm Ric}(\theta_0)+\gamma_0))-n\\ \ge&\beta\operatorname{tr}_\omega(\theta_0)-\Delta f-n. \end{split} \end{equation}
Let $F=(1-e^{t-s})\dot u+u-f-A\log t+\kappa\rho$, where $A>0$ is a constant to be determined. Since $\log t\to-\infty$ as $t\rightarrow 0$, we conclude that for $0<s_1<s$, if $\inf_{M\times[0,s_1]}F\le 0$, then there is $(x_0,t_0)\in M\times(0,s_1]$ so that $F(x_0,t_0)=\inf_{M\times[0,s_1]}F$. By \eqref{e-udot-2}, at $(x_0,t_0)$ we have \begin{equation*} \begin{split} 0\ge&\lf(\frac{\p}{\p t}-\Delta\ri) F\\ \ge&\beta\operatorname{tr}_\omega(\theta_0)-n-\frac At-\kappa D_1\\ \ge& n\beta\exp(-\frac1n(\dot u+u))-n-\frac At-\kappa D_1 \end{split} \end{equation*} where $D_1>0$ is a constant independent of $\kappa$. Hence at this point, $$ \dot u+u\ge -n\log\left(\frac1{n\beta}(n+\frac At+\kappa D_1)\right). $$
Hence at $(x_0,t_0)$, noting that $0<t_0\le s_1<s$ and $s_1\le 1$, \begin{equation*} \begin{split} F\geq& (1-e^{t-s})(\dot u+u)+e^{t-s}u-f-A\log t\\ \ge&-(1-e^{t-s})n\log\left(\frac1{n\beta}(n+\frac At+\kappa D_1)\right)-\sup_M f-A\log t\\ &- \frac{C_3}{1-e^{-s}}+nt\log(1-e^{-t})\\ \ge&[(1-e^{t-s})n-A]\log t-(1-e^{t-s})n\log\left(\frac1{n\beta}(nt+A+\kappa t D_1)\right)\\
&-||f||_\infty-\frac{C_4}{1-e^{-s}} \\
\ge &- n\log\left(\frac1{n\beta}(2n+\kappa D_1)\right)-||f||_\infty-\frac{C_4}{1-e^{-s}}\end{split} \end{equation*} if $A=n$. Here we may assume that $\beta>0$ is small enough so that $2/\beta>1$. Hence we have \begin{align*}
F\ge - n\log\left(\frac1{n\beta}(2n+\kappa D_1)\right)-||f||_\infty-\frac{C_4}{1-e^{-s}}. \end{align*} on $M\times(0,s_1]$. Let $\kappa\to0$, we conclude that \begin{equation*} \begin{split} (1-e^{t-s})\left(\dot u+u\right)=& (1-e^{t-s})\dot u+u-e^{t-s}u \\ \ge&n\log t-\frac{C_5}{1-e^{-s}}, \end{split} \end{equation*} where we have used the upper bound of $u$ in Lemma \ref{l-uudot-upper-1}. From this (ii) follows because $t\le s_1$.
The proof of (iii) is similar to the proof of (ii) by letting $A=0$. Note that in this case, the infimum of $F$ may be attained at $t=0$ which depends also on $\epsilon$.
To prove (iv), let
$F$ as in the proof of (ii) with $A=0$. Suppose $\inf_{M\times[\frac 12,s_1]}F=\inf_{M\times\{\frac 12\} }F$, then by (i) and (ii), we have
$$
F\ge -C_6.
$$
Suppose $\inf_{M\times[\frac 12,s_1]}F<\inf_{M\times\{\frac 12\}}F$, then we can find $(x_0,t_0)\in M\times(\frac 12,s_1]$ such that $F(x_0,t_0)$ attains the infimum. As in the proof of (ii), at this point,
\begin{equation*}
\begin{split}
\dot u+u\ge-n\log\left(\frac1{n\beta}(n+\kappa D_2)\right)
\end{split}
\end{equation*} where $D_2>0$ is a constant independent of $\kappa$. Hence as in the proof of (ii), \begin{equation*} \begin{split} F(x_0,t_0)\ge &(1-e^{t_0-s})(\dot u+u)+e^{t_0-s}u-f\\ \geq&-n(1-e^{t_0-s}) \log\left(\frac1{n\beta}(n+\kappa D_2)\right)- \frac{C_7s_1e^{s_1-s}}{1-e^{-s}} -C_8\\ \ge&-n \log\left(\frac1{n\beta}(n+\kappa D_2)\right)- \frac{C_7s_1e^{s_1-s}}{1-e^{-s}} -C_8 \end{split} \end{equation*} because $t_0\le s_1$, where we have used (i) and we may assume that $\beta<1$. Let $\kappa\to0$, we conclude that on $M\times[\frac 12, s_1]$, \begin{equation*}\begin{split} &(1-e^{t-s})(\dot u+u)+e^{t-s}u-f\ge n \log\beta- \frac{C_7s_1e^{s_1-s}}{1-e^{-s}} -C_8.\end{split} \end{equation*}
By Lemma \ref{l-uudot-upper-1}, we have \begin{equation*} \dot u+u\ge -\frac{C_9(1+s_1e^{s_1-s})}{1-e^{s_1-s}} \end{equation*} on $M\times[\frac 12,s_1]$ for some constant because $s>1$.
Finally, (v) follows from (i), Lemma \ref{l-uudot-upper-1} and (iv) by integration.
\end{proof}
\subsection{a priori estimates for $\omega(t)$}\label{ss-trace}
Next we will estimate the uniform upper bound of $g(t)$. Before we do this, we first give uniform estimates for the evolution of the key quantity $\log \operatorname{tr}_hg(t)$.
Let $\hat T$ and $T_0$ be the torsions of $h, \gamma_0$ respectively. Note that $\gamma_0$ depends on $\rho_0, \epsilon$. Let $\hat\nabla$ be the Chern connection of $h$. Recall that $T_{ij\bar l}=\partial_ig_{j\bar l}-\partial_j g_{i\bar l}$ etc.
Let $\widetilde g$ be such that $g(t)=e^{-t}\widetilde g(e^t-1)$. Let $s=e^t-1$. Then \begin{equation*} \begin{split} -\text{\rm Ric} (\widetilde g(s))-g(t)=&-\text{\rm Ric} (g(t))-g(t)\\ =&\frac{\partial}{\partial t}g(t)\\ =&-e^{-t}\widetilde g(e^t-1)+\frac{\partial}{\partial s}\widetilde g(s)\\ =&-g(t)+\frac{\partial }{\partial s}\widetilde g(s). \end{split} \end{equation*} So \begin{equation*} \frac{\partial }{\partial s}\widetilde g(s)=-\text{\rm Ric}(\widetilde g(s)) \end{equation*} and $\widetilde g(0)=\gamma_0$.
Let $\Upsilon(t)=\operatorname{tr}_{h}g(t)$ and $\widetilde\Upsilon(s)=\operatorname{tr}_{h}\widetilde g(s)$. By Lemma \ref{l-a-1}, we have \begin{equation*}
\lf(\frac{\p}{\p s}-\wt\Delta\ri) \log \widetilde\Upsilon=\mathrm{I+II+III}
\end{equation*}
where
\begin{equation*} \begin{split} \mathrm{I}\le &2\widetilde\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} \widetilde g^{k\bar q} (T_0)_{ki\bar l}\hat \nabla_{\bar q}\widetilde\Upsilon\right). \end{split} \end{equation*} \begin{equation*} \begin{split} \mathrm{II}=&\widetilde\Upsilon^{-1} \widetilde g^{i\bar{j}} h^{k\bar l}\widetilde g_{k\bar q} \left(\hat \nabla_i \overline{(\hat T)_{jl}^p}- h^{p\bar q}\hat R_{i\bar lp\bar j}\right)\\ \end{split} \end{equation*} and
\begin{equation*} \begin{split} \mathrm{III}=&-\widetilde\Upsilon^{-1} \widetilde g^{{i\bar{j}}} h^{k\bar l}\left(\hat \nabla_i\left(\overline{( T_0)_{jl\bar k} } \right) +\hat \nabla_{\bar l}\left( ( T_0)_{ik\bar j} \right)-\overline{ (\hat T)_{jl}^q}( T_0)_{ik\bar q} \right) \end{split} \end{equation*}
Now $$ \widetilde \Upsilon(s)=e^t\Upsilon(t). $$ So \begin{equation*} \lf(\frac{\p}{\p s}-\wt\Delta\ri) \log\widetilde\Upsilon(s)=e^{-t}\left(\lf(\frac{\p}{\p t}-\Delta\ri) \log\Upsilon+1\right) \end{equation*} \begin{equation*} \begin{split} \mathrm{I}\le &2e^{- 2t}\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q} (T_0)_{ki\bar l}\hat \nabla_{\bar q} \Upsilon\right). \end{split} \end{equation*} \begin{equation*}
\begin{split} \mathrm{II}=&e^{-t}\Upsilon^{-1} g^{i\bar{j}} h^{k\bar l} g_{k\bar q} \left(\hat \nabla_i \overline{(\hat T)_{jl}^q}- h^{p\bar q}\hat R_{i\bar lp\bar j}\right)\\ \end{split} \end{equation*} and
\begin{equation*} \begin{split} \mathrm{III}=&-e^{-2t}\Upsilon^{-1} g^{{i\bar{j}}} h^{k\bar l}\left(\hat \nabla_i\left(\overline{( T_0)_{jl\bar k} } \right) +\hat \nabla_{\bar l}\left( ( T_0)_{ik\bar j} \right)-\overline{ (\hat T)_{jl}^q}( T_0)_{ik\bar q} \right) \end{split} \end{equation*}
Hence \begin{equation}\label{e-logY} \lf(\frac{\p}{\p t}-\Delta\ri)\log \Upsilon=\mathrm{I}'+\mathrm{II}'+\mathrm{III}'-1 \end{equation} where \begin{equation*} \begin{split} \mathrm{I}'\le &2e^{-t}\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q} (T_0)_{ki\bar l}\hat \nabla_{\bar q} \Upsilon\right). \end{split} \end{equation*} \begin{equation*} \begin{split} \mathrm{II}'=& \Upsilon^{-1} g^{i\bar{j}} h^{k\bar l} g_{k\bar q} \left(\hat \nabla_i \overline{(\hat T)_{jl}^q}- h^{p\bar q}\hat R_{i\bar lp\bar j}\right)\\ \end{split} \end{equation*} and
\begin{equation*} \begin{split} \mathrm{III}'=&-e^{-t}\Upsilon^{-1} g^{{i\bar{j}}} h^{k\bar l}\left(\hat \nabla_i\left(\overline{( T_0)_{jl\bar k} } \right) +\hat \nabla_{\bar l}\left( ( T_0)_{ik\bar j} \right)-\overline{ (\hat T)_{jl}^q}( T_0)_{ik\bar q} \right) \end{split} \end{equation*}
Now we want to estimate the terms in the above differential inequality.
\underline{\it Estimate of $\mathrm{II}'$}
Choose an frame unitary with respect to $h$ so that $g_{{i\bar{j}}}=\lambda_i\delta_{ij}$. Then \begin{equation}\label{e-logY-1} \begin{split} \mathrm{II}'=& (\sum_l\lambda_l)^{-1}\lambda_i^{-1}\lambda_k\left(\hat\nabla_i\overline{(\hat T)_{ik}^k}-\hat R_{i\bar kk\bar i}\right)\\ \le &C(n,K)\operatorname{tr}_{g}h. \end{split} \end{equation}
\underline{\it Estimate of $\mathrm{III}'$}
Next, we compute the torsion of $\gamma_0$, $T_0=T_{\gamma_0}$, where $\gamma_0=\eta(\frac{\rho(x)}{\rho_0})g_0+(1-\eta(\frac{\rho(x)}{\rho_0}))h+\epsilon h$:\begin{equation*}\begin{split} (T_0)_{ik\bar q}=&\partial_i(\gamma_0)_{k\bar q}-\partial_k(\gamma_0)_{i\bar q}\\ =&\eta'\frac{1}{\rho_0}[\rho_i(x)(g_0)_{k\bar q}-\rho_k(x)(g_0)_{i\bar q}]+\eta[\partial_i(g_0)_{k\bar q}-\partial_k(g_0)_{i\bar q}]\\ &+(1-\eta+\epsilon)[\partial_ih_{k\bar q}-\partial_kh_{i\bar q}]-\eta'\frac{1}{\rho_0}[\rho_ih_{k\bar q}-\rho_kh_{i\bar q}]. \end{split} \end{equation*}
By the assumptions, all terms above are bounded by $C(n, K)$ for all $\rho_0\geq 1$ and for all $\epsilon\leq 1$.
It remains to control $\hat \nabla_{\bar l}\left( ( T(\gamma_0))_{ik\bar j} \right)$. We may compute $\hat \nabla_{\bar l}\left( ( T(\gamma_0))_{ik\bar j} \right)$ directly. \begin{equation*}\begin{split} & \hat \nabla_{\bar l}\left( ( T(\gamma_0))_{ik\bar j} \right)\\=&\hat \nabla_{\bar l}(\partial_i(\gamma_0)_{k\bar j}-\partial_k(\gamma_0)_{i\bar j})\\ =&\hat \nabla_{\bar l}\{\eta'\frac{1}{\rho_0}[\rho_i(x)(g_0)_{k\bar j}-\rho_k(x)(g_0)_{i\bar j}]+\eta[\partial_i(g_0)_{k\bar j}-\partial_k(g_0)_{i\bar j}]\\ &+(1-\eta+\epsilon)[\partial_ih_{k\bar j}-\partial_kh_{i\bar j}]-\eta'\frac{1}{\rho_0}[\rho_ih_{k\bar q}-\rho_kh_{i\bar q}]\}\\ =&\eta''\rho_{\bar l}\frac{1}{\rho^2_0}[\rho_i(g_0)_{k\bar j}-\rho_k(g_0)_{i\bar j}]+\eta'\frac{1}{\rho_0}[\rho_{i\bar l}(g_0)_{k\bar j}-\rho_{k\bar l}(g_0)_{i\bar j}]\\ &+\eta'\frac{1}{\rho_0}[\rho_i\hat \nabla_{\bar l}(g_0)_{k\bar j}-\rho_k\hat \nabla_{\bar l}(g_0)_{i\bar j}]+\eta_{\bar l}[\partial_i(g_0)_{k\bar j}-\partial_k(g_0)_{i\bar j}]\\ &+\eta \hat\nabla_{\bar l} T(g_0)_{ik\bar q}+(1-\eta+\epsilon)\hat \nabla_{\bar l} T(h)_{ik\bar j}-\eta'\frac{\rho_{\bar l}}{\rho_0}T(h)_{ik\bar j}\\ &-\eta'\frac{1}{\rho_0}[\rho_{i\bar l}h_{k\bar q}-\rho_{k\bar l}h_{i\bar q}]-\eta''\frac{1}{\rho^2_0}[\rho_{\bar l}\rho_{i}h_{k\bar q}-\rho_{\bar l}\rho_{k}h_{i\bar q}]. \end{split} \end{equation*}
Since we can control every term of the above equation by $C(n, K)$. Therefore, $|\hat \nabla_{\bar l}\left( ( T(\gamma_0))_{ik\bar j} \right)|\leq C(n, K)$.
Therefore, if $0<\epsilon<1,\rho_0>1$ \begin{equation}\label{e-logY-2} \mathrm{III}'\leq C(n, K)\cdot e^{-t}\Upsilon^{-1} \Lambda.
\end{equation}
where $\Lambda=\operatorname{tr}_{g}h$.
Now we will prove the uniform upper bound of $g(t)$.
\begin{lma}\label{l-trace-2} \begin{enumerate}
\item [(i)] For $0<s_1<s$,
$$
\operatorname{tr}_{h}g(x,t)\le \exp\left(\frac{C(E-\log(1-e^{-s}))}{1-e^{-t}}\right)
$$
on $M\times(0,s_1]$ for some constant $C>0$ depending only on $n,K, \beta, ||f||_\infty$ provided such that if $0<\epsilon<1$, $\rho_0>1$,
where
$$
E=\frac{(1+s_1e^{s_1-s})}{(1-e^{-s})(1-e^{s_1-s})}.
$$
\item [(ii)] For $0<s_1<s$, there is a constant $C$ depending only on $n,K, \beta, ||f||_\infty, s, s_1$ and also on $\epsilon$, but independent of $\rho_0$ such that $$ \operatorname{tr}_{h}g\le C $$ on $M\times[0, s_1]$. \end{enumerate}
\end{lma}
\begin{proof} In the following, $C_i$ will denote constants depending only on $n,K, {\beta}$ and $||f||_\infty$, but not $\rho_0$ and $\epsilon$. $D_i$ will denote constants which may also depend on $\epsilon, \rho_0$, but not $\kappa$. We always assume $0<\epsilon<1<\rho_0$.
Let $v(x,t)\ge1$ be a smooth bounded function. As before, let $\Upsilon=\operatorname{tr}_{h}g$ and $\Lambda=\operatorname{tr}_gh$ and let $\lambda=0$ or 1. For $\kappa>0$, consider the function $$F=(1-\lambda e^{-t})\log \Upsilon-Av+\frac 1v-\kappa\rho+Bt\log (1-\lambda e^{-t})
$$
on $M\times[0, s_1]$, where $A, B>0$ are constants to be chosen. We want to estimate $F$ from above.
Let
$$
\mathfrak{M}=\sup_{M\times[0,s_1]}F.
$$
Either (i) $\mathfrak{M}\le 0$; (ii) $\mathfrak{M}=\sup_{M\times\{0\}}F$; or (iii) there is $(x_0,t_0)$ with $t_0>0$ such that $F(x_0,t_0)=\mathfrak{M}$. If (ii) is true, then
\begin{equation}\label{e-tr-1}
\mathfrak{M}\le C_1(n).
\end{equation}
because $g(0)=\gamma_0\le (1+\epsilon)h$.
Suppose (iii) is true. If at this point $\Upsilon(x_0,t_0)\le 1$. Then \eqref{e-tr-1} is true with a possibly larger $C_1$. So let us assume that $\Upsilon(x_0,t_0)>1$. By \eqref{e-logY}, \eqref{e-logY-1} and \eqref{e-logY-2}, at $(x_0,t_0)$ we have: \begin{equation*} \begin{split} 0\le&\lf(\frac{\p}{\p t}-\Delta\ri) F\\ =&(1-\lambda e^{-t})\lf(\frac{\p}{\p t}-\Delta\ri)\log \Upsilon+\lambda e^{-t}\log \Upsilon-(\frac{1}{v^2}+A)\lf(\frac{\p}{\p t}-\Delta\ri) v\\
&-\frac{2}{v^3}|\nabla v|^2+\kappa \Delta \rho+B\left(\log(1-\lambda e^{-t})+\frac{\lambda t}{e^t-\lambda}\right)\\ \le &(1-\lambda e^{-t})C_2\Lambda \left( 1 +e^{-t}\Upsilon^{-1} \right)\\& + 2(1-\lambda e^{-t})e^{-t}\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q} (T_0)_{ki\bar l}\hat \nabla_{\bar q} \Upsilon\right)\\
&+\lambda e^{-t}\log \Upsilon- (\frac 1{v^2}+A)\lf(\frac{\p}{\p t}-\Delta\ri) v -\frac{2|\nabla v|^2}{v^3}\\ &+B\left(\log(1-\lambda e^{-t})+\frac{\lambda t}{e^t-\lambda}\right)+\kappa D_1. \end{split} \end{equation*}
At $(x_0,t_0)$, we also have:
$$(1-\lambda e^{-t}) \Upsilon^{-1}\hat \nabla \Upsilon-(\frac 1{v^2}+A)\hat\nabla v- \kappa\hat \nabla \rho=0.$$
Hence
\begin{equation*}\begin{split}
&2(1-\lambda e^{-t})e^{-t} \Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q} (T_0)_{ki\bar l}\hat \nabla_{\bar q} \Upsilon\right)\\ =& \frac{2e^{-t}}{\Upsilon}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q} (T_0)_{ki\bar l}((\frac 1{v^2}+A)\hat\nabla_{\bar q} v- \kappa\hat \nabla_{\bar q} \rho)\right) \\
\leq&\frac{1}{v^3}|\nabla v|^2+\frac{C_3(A+1+\frac{1}{v^2})^2\cdot v^3 \Lambda}{\Upsilon^2}+\kappa D_2\\
\end{split}\end{equation*}
Using the fact that $\Upsilon(x_0,t_0)>1$, we have at $(x_0,t_0)$:
Hence \begin{equation}\label{e-g-1}\begin{split} 0\le& C_2(1-\lambda e^{-t})\Lambda + \frac{C_3(A+\frac{1}{v^2})^2\cdot v^3 \Lambda}{\Upsilon} +\lambda e^{-t}\log \Upsilon\\ &- (\frac 1{v^2}+A)\lf(\frac{\p}{\p t}-\Delta\ri) v +B\left(\log(1-\lambda e^{-t})+\frac{\lambda t}{e^t-\lambda}\right)+\kappa D_3.\end{split} \end{equation} Now let $$
v=u-\frac{1- e^{-t}}{1-e^{-s}}f+\frac{C_4(1+s_1e^{s_1-s})}{(1-e^{-s})(1-e^{s_1-s})} $$
By Lemmas \ref{l-uudot-upper-1} and \ref{l-all-u}, we can find $C_4>0$ so that $v\ge 1$, and there is $C_5>0$ so that $$v\le \frac{C_5(1+s_1e^{s_1-s})}{(1-e^{-s})(1-e^{s_1-s})}.
$$ Let \begin{equation}\label{e-E} E:=\frac{(1+s_1e^{s_1-s})}{(1-e^{-s})(1-e^{s_1-s})}. \end{equation}
Note that
\begin{equation*} \begin{split} & \lf(\frac{\p}{\p t}-\Delta\ri) u\\ =&\dot u-\Delta u\\ =&\dot u-n+\operatorname{tr}_g\left(-(1-e^{-t}) \text{\rm Ric}(\theta_0)+e^{-t}\gamma_0 \right)\\ \ge&\dot u-n+\operatorname{tr}_g\left(\frac{1-e^{-t}}{1-e^{-s}}\left(\beta\theta_0-e^{-s}\omega_0-\sqrt{-1}\partial\bar\partial f\right)+ e^{-t}\gamma_0\right)\\ \ge&\dot u+\left[\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right]\Lambda-\frac{1-e^{-t}}{1-e^{-s}}\Delta f -n\\ \ge&\dot u+\left[\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right]\Lambda+\lf(\frac{\p}{\p t}-\Delta\ri) \left(\frac{1- e^{-t}}{1-e^{-s}} f\right)-\frac{ e^{-t}}{1-e^{-s}}f-n\\ \ge&\dot u+u+\left[\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right]\Lambda+\lf(\frac{\p}{\p t}-\Delta\ri) \left(\frac{1- e^{-t}}{1-e^{-s}} f\right)-\frac{C_6}{1-e^{-s}}. \end{split} \end{equation*} because $\gamma_0\ge \omega_0+\epsilon\theta_0$ and $t<s$.
On the other hand, $$ -\dot u-u=\log\left(\frac{\det h}{\det g}\right)\le c(n)+n\log \Lambda. $$ Hence \begin{equation}\label{e-g-2} \lf(\frac{\p}{\p t}-\Delta\ri) v\ge -n\log \Lambda+ \left[\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right]\Lambda -\frac{C_7}{1-e^{-s}}. \end{equation} On the other hand, in a unitary frame with respect to $h$ so that $g_{i\bar{j}}=\lambda_i\delta_{ij}$, then
\begin{equation}\label{e-tr-2}
\begin{split}
\Upsilon=&\sum_i\lambda_i\\
=&\frac{\det g}{\det h}\sum_{i} (\lambda_1\dots\hat\lambda_i\dots\lambda_n)^{-1}\\
\le &C_{8}\Lambda^{n-1}.
\end{split}
\end{equation} where we have used the upper bound of $\dot u+u=\log\frac{\det g}{\det h} $ in Lemma \ref{l-uudot-upper-1}. Combining \eqref{e-g-1}, \eqref{e-g-2} and \eqref{e-tr-2}, at $(x_0,t_0)$ we have \begin{equation} \label{e-tr-revised} \begin{split} 0\le& C_2(1-\lambda e^{-t})\Lambda\left(1 + \frac{C_9 E^3(A+1)^2}{(1-\lambda e^{-t})\Upsilon}\right) +\lambda e^{-t}\left(\log C_8+(n-1)\log\Lambda\right)\\ &+ (\frac 1{v^2}+A)\left( n\log \Lambda- \left[\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right]\Lambda+\frac{C_7}{1-e^{-s}}\right) \\
&+B\left(\log(1-\lambda e^{-t})+\frac{\lambda t}{e^t-\lambda}\right)+\kappa D_3\\
\le&\Lambda\left[C_2(1-\lambda e^{-t}) \left(1 + \frac{C_9E^3(A+1)^2}{(1-\lambda e^{-t})\Upsilon}\right)-\frac{A+1}{C^2_5E^2}\left(\frac{\beta(1-e^{-t})}{1-e^{-s}}+\epsilon e^{-t}\right)\right] \\ &+[n(1+A)+\lambda(n-1)] \log \Lambda+ \frac{C_{10}(A+1) }{1-e^{-s}}\\&+B\left(\log(1-\lambda e^{-t})+\frac{\lambda t}{e^t-\lambda}\right)+\kappa D_3+\lambda\log C_8 \end{split} \end{equation} where we have used the fact that $1\le v\le C_5E$.
{\bf Case 1}: Let $\lambda=1$. Suppose at $(x_0,t_0)$, $$ \frac{C_2C_9E^3(A+1)^2}{(1- e^{-t})\Upsilon}\ge \frac12\frac{1}{C^2_5E^2}\cdot(A+1)\cdot\beta\cdot\frac{1}{1-e^{-s}} $$ Then
\begin{equation*} (1- e^{-t})\Upsilon\le \frac{2C_2C_9C^2_5E^5(1-e^{-s})(A+1)}{\beta}\leq C_{11}E^5(A+1).
\end{equation*} Hence, \begin{equation*} (1- e^{-t})\log \Upsilon\le (1- e^{-t})\log(C_{11}E^5(A+1)) -(1-e^{-t})\log(1- e^{-t}) . \end{equation*} Therefore, \begin{equation}\label{e-tr-1-2} \mathfrak{M}\le C(1+\log E)+\log (A+1).
\end{equation} for some $C(n,\beta, K,||f||_\infty)>0$. Suppose at $(x_0,t_0)$,
$$ \frac{C_2C_9E^3(A+1)^2}{(1- e^{-t})\Upsilon}< \frac12\frac{1}{C^2_5E^2}\cdot(A+1)\cdot\beta\cdot\frac{1}{1-e^{-s}}, $$ then at $(x_0,t_0)$ we have
\begin{equation*} \begin{split} 0\le & (1- e^{-t}) \Lambda \left(C_2 - \frac12\frac{1}{C^2_5E^2}\cdot(A+1)\cdot\beta\cdot\frac{1}{1-e^{-s}}\right) +n(A+2)\log \Lambda\\ &+ \frac{C_{10}(A+1) }{1-e^{-s}} +B\left(\log(1- e^{-t})+\frac{ t}{e^t-1}\right)+\kappa D_3+\log C_8\\ =&\Lambda\left[ (1- e^{-t}) \left(C_2 - \frac12\frac{1}{C^2_5E^2}\cdot(A+1)\cdot\beta\cdot\frac{1}{1-e^{-s}} \right)\right]\\ &+n(A+2)\log ((1-e^{-t})\Lambda)+\frac{C_{10}(A+1) }{1-e^{-s}}-n(A+2)\log(1-e^{-t})\\ &+B\left(\log(1- e^{-t})+\frac{ t}{e^t-1}\right)+\kappa D_3+\log C_8\\ \le &-(1-e^{-t})\Lambda +n(A+2)\log ((1-e^{-t})\Lambda)+\frac{C_{12}E^2}{1-e^{-s}}, \end{split} \end{equation*} provided $A=C_{13}E^2$ so that $$ \frac12\frac{1}{C^2_5E^2}\cdot(A+1)\cdot\beta\cdot\frac{1}{1-e^{-s}} \ge (C_2+1) $$ and
$B$ is chosen so that $B=n(A+2)$ and $\kappa$ is small enough so that $\kappa D_2\le 1$.
Hence using $1+\frac{1}{2}\log x\leq \sqrt{x},\;\forall x>0$, we have at $(x_0,t_0)$, $$ (1-e^{-t})\Lambda\le \frac{C_{14}E^4}{1-e^{-s}}, $$ and so $$ \log\Lambda\le \log\frac{C_{14}E^4}{1-e^{-s}}-\log(1-e^{-t}). $$ By \eqref{e-tr-2}, we have
\begin{equation}\label{e-tr-4}
\begin{split} &(1-e^{-t})\log\Upsilon\\ \leq&(1-e^{-t})\left( \log C_8+(n-1)\log \Lambda\right) \\ \le&(1-e^{-t})\left( \log C_8+(n-1)\left(\log\frac{C_{14}E^4}{1-e^{-s}}-\log(1-e^{-t})\right)\right)\\ \le &(n-1)\log(\frac{1}{1-e^{-s}})+C_{15}(1+\log E). \end{split} \end{equation} Hence $\mathfrak{M}\le (n-1)\log(\frac{1}{1-e^{-s}})+C_{16}(1+\log E).$ By combining \eqref{e-tr-1}, \eqref{e-tr-1-2} and using the choice of $A$, we may let $\kappa\rightarrow 0$ to conclude that on $ M\times(0,s_1]$, $$ (1-e^{-t})\log \Upsilon\le (n-1)\log(\frac{1}{1-e^{-s}})+C_{17}(1+E). $$ and hence (i) in the lemma is true. Here we have used the fact that $E\geq \log E+1$.
{\bf Case 2}: Let $\lambda=0$, then \eqref{e-tr-revised} becomes: \begin{equation*} \begin{split} 0\le&\Lambda\left[C_2\left(1 + \frac{C_9E^3(A+1)^2}{\Upsilon}\right)-\frac{1}{C^2_5E^2}(A+1)\epsilon e^{-t}\right] \\ &+n(1+A) \log \Lambda+ \frac{C_{10}(A+1) }{1-e^{-s}}+\kappa D_3. \end{split} \end{equation*} We can argue as before to conclude that (ii) is true.
\end{proof}
Combining the lower bound of $\dot u+u$, we obtain:
\begin{cor}\label{eq-g} For any $0<s_0<s_1<s$, there is a constant $C$ depending only on $n,K, {\beta}, ||f||_\infty$ and $s_0, s_1, s$ but independent of $\epsilon,\rho_0$ such that if $0<\epsilon<1$, $\rho_0>1$, we have \begin{equation*}
C^{-1}h\leq g(t)\leq Ch \end{equation*} on $M\times[s_0, s_1]$. There is also a constant $\widetilde C(\epsilon)>0$ which may also depend on $\epsilon$ such that
\begin{equation*}
\widetilde C^{-1}h\leq g(t)\leq \widetilde Ch
\end{equation*}
on $M\times[0, s_1]$.
\end{cor}
\section{Short time existence for the potential flow and the normalized Chern-Ricci flow}
Using the a priori estimates in previous section, we are ready to discuss short time existence for the the potential flow and the Chern-Ricci flow. We begin with the short time existence of the potential flow. We have the following:
\begin{thm}\label{t-instant-complete} Let $(M,h)$ be a complete non-compact Hermitian metric { with K\"ahler form $\theta_0$.} Suppose there is $K>0$ such that the following holds. \begin{enumerate} \item There is a {proper} exhaustion function $\rho(x)$ on $M$ such that
$$|\partial\rho|^2_h +|\sqrt{-1}\partial\bar\partial \rho|_h \leq K.$$ \item $\mathrm{BK}_h\geq -K$; \item The torsion of $h$, $T_h=\partial \omega_h$ satisfies
$$|T_h|^2_h +|\nabla^h_{\bar\partial} T_h |\leq K.$$ \end{enumerate} Let $\omega_0$ be a nonnegative real (1,1) form with corresponding Hermitian form $g_0$ on $M$ (possibly incomplete or degenerate) such that \begin{enumerate} \item[(a)] $g_0\le h$ and
$$|T_{g_0}|_h^2+|\nabla^h_{\bar\partial} T_{g_0}|_h+ |\nabla^{h}g_0|_h\leq K.$$
\item[(b)] There exist $f\in C^\infty(M)\cap L^\infty(M),\beta>0$ and $s>0$ so that $$-\text{\rm Ric}(\theta_0)+e^{-s}(\omega_0+\text{\rm Ric}(\theta_0))+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0.$$
\end{enumerate} Then \eqref{e-MP-1} has a solution on $M\times(0, s)$ so that $u(t)\to 0$ as $t\to0$ uniformly on $M$. Moreover, for any $0<s_0<s_1<s$, { let $$ {\alpha}(t)=-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\omega_0) $$} then $$\omega(t)={\alpha}+\sqrt{-1}\partial\bar\partial u$$ is the K\"ahler form of a complete Hermitian metric which is uniformly equivalent to $h$ on $M\times[s_0, s_1]$. \end{thm}
\begin{proof}[Proof of Theorem \ref{t-instant-complete}] For later application, we construct the solution in the following way. Combining the local higher order estimate of Chern-Ricci flow (See \cite{ShermanWeinkove2013} for example) with Corollary \ref{eq-g} for any $1>\epsilon>0$, using diagonal argument as $\rho_0\to \infty$ we obtain a solution $u_\epsilon(t)$ to \eqref{e-MP-1} with initial data $\omega_0+\epsilon \theta_0$ on $M\times[0,s)$ which is smooth up to $t=0$, so that the corresponding solution $g_\epsilon(t)$ of \eqref{e-NKRF} has smooth solution on $M\times[0,s)$ with initial metric $g_\epsilon(0)=g_0+\epsilon h$. Moreover, $g_\epsilon$ is uniformly equivalent to $h$ on $M\times[0,s_1]$ for all $0<s_1<s$ and for any $0<s_0<s_1<s$, there is a constant $C>0$ independent of $\epsilon$ such that $$ C^{-1}h\le g_\epsilon\le Ch $$ on $M\times[s_0,s_1]$. Using the local higher order estimate of Chern-Ricci flow \cite{ShermanWeinkove2013} again, we can find $\epsilon_i\to0$ such that $u_{\epsilon_i}$ converge locally uniformly on any compact subsets of $M\times(0,s)$ to a solution $u$ of \eqref{e-MP-1}.
By Lemmas \ref{l-uudot-upper-1}, \ref{l-all-u}, we see that $u(t)\to 0$ as $t\to0$ uniformly $M$. Moreover, for any $0<s_0<s_1<s$, $\omega(t)={\alpha}+\sqrt{-1}\partial\bar\partial u$ is the K\"ahler form of the solution to \eqref{e-NKRF}. Also, the corresponding Hermitian metric $g(t)$ is a complete Hermitian metric which is uniformly equivalent to $h$ in $M\times[s_0, s_1]$ for any $0<s_0<s_1<1$.
\end{proof}
Next we want to discuss { the short time existence of the Chern-Ricci flow. The solution $\omega(t)$ obtained from the Theorem \ref{t-instant-complete} satisfies the normalized Chern-Ricci flow on $M\times(0,s)$. Hence we concentrate on the discussion of the behaviour of $\omega(t)$ as $t\to0$ for the solution obtained in Theorem \ref{t-instant-complete}}. In case that
$h$ is K\"ahler and $\omega_0$ is closed, we have the following:
\begin{thm}\label{t-initial-Kahler-1} With the same notation and assumptions as in Theorem \ref{t-instant-complete}. Let $\omega(t)$ be the solution of \eqref{e-NKRF} obtained in the theorem. If in addition $h$ is K\"ahler and $d\omega_0=0$. Let $U=\{\omega_0>0\}$. Then $\omega(t)\rightarrow \omega_0$ in $C^\infty(U)$ as $t\rightarrow 0$, {uniformly in compact sets of $U$}. \end{thm} \begin{rem} If in addition $h$ has bounded curvature, then one can use Shi's K\"ahler-Ricci flow \cite{Shi1989,Shi1997} and the argument in \cite{ShermanWeinkove2012} to show that the K\"ahler-Ricci flow $g_i(t)$ starting from $g_0+\epsilon_i h$ has bounded curvature when $t>0$. The uniform local $C^k$ estimates will follow from the pseudo-locality theorem \cite[Corollary 3.1]{HeLee2018} and the modified Shi's local estimate \cite[Theorem 14.16]{Chow2}. \end{rem}
By Theorem \ref{t-instant-complete} we have the following:
\begin{cor}\label{c-shorttime} Let $(M,h)$ be a complete non-compact K\"ahler manifold with bounded curvature. Let $\theta_0$ be the K\"ahler form of $h$. Suppose there is a compact set $V$ such that outside $V$, $-\text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial f\ge\beta \theta_0$ for some $\beta>0$ for some bounded smooth function $f$. Then for any closed nonnegative real (1,1) form $\omega_0$ such that $\omega_0\le \theta_0$, $|\nabla_h\omega_0|$ is bounded, and $\omega_0>0$ on $V$, there is $s>0$ such that \eqref{e-NKRF} has a solution $\omega(t)$ on $M\times(0,s)$ so that $\omega(t)$ is uniformly equivalent to $h$ on $M\times[s_0,s_1]$ for any $0<s_0<s_1<s$ and $\omega(t)$ attains initial data $\omega_0$ in the set where $\omega_0>0$.
\end{cor} \begin{proof} Let $s>0$, then $$ -(1-e^{-s})\text{\rm Ric}(\theta_0)+(1-e^{-s})\sqrt{-1}\partial\bar\partial f\ge (1-e^{-s})\beta \theta_0 $$ outside $V$. On $V$, $$ \omega_0 -(1-e^{-s})\text{\rm Ric}(\theta_0)+(1-e^{-s})\sqrt{-1}\partial\bar\partial f\ge \beta'\theta_0 $$ for some $\beta'>0$, provided $s$ is small enough. The Corollary then follows from Theorems \ref{t-instant-complete} and \ref{t-initial-Kahler-1}. \end{proof}
\begin{rem}\label{r-shorttime} Suppose $\Omega$ is a bounded strictly pseudoconvex domain in $\mathbb{C}^n$ with smooth boundary, then there is a complete K\"ahler metric with Ricci curvature bounded above by the negative constant near infinity by \cite{ChengYau1982}. Hence Corollary \ref{c-shorttime} can be applied to this case, which has been studied by Ge-Lin-Shen \cite{Ge-Lin-Shen}. \end{rem}
To prove the Theorem \ref{t-initial-Kahler-1}, suppose $h$ is K\"ahler and $d\omega_0=0$, then solution in Theorem \ref{t-instant-complete} is the limit of solutions $g_i(t)$ of the normalized K\"ahler-Ricci flow on $M\times[0, s)$ with initial data $g_0+\epsilon_i h$, where $\epsilon_i\to 0$. Here we may assume $s\leq 1$. By Lemma \ref{l-all-u} (iii) and Lemma \ref{l-trace-2} (ii), each $g_i(t)$ is uniformly equivalent to $h$, the uniform constant here may depend on $\epsilon_i$. In this section, we will use $\tilde g_i(t)=(t+1)g_i( \log (t+1))$ to denote the unnormalized K\"ahler-Ricci flow and $\phi_i$ be the corresponding potential flow to the unnormalized K\"ahler-Ricci flow $\tilde g_i(t)$, see appendix.
We want to prove the following:
\begin{lma}\label{l-initial-Kahler-1} With the same notation and assumptions as in Theorem \ref{t-initial-Kahler-1}, for any precompact open subset $\Omega$ of $U$, there is $S_1>0$ and $C>0$,
$$
C^{-1}h\le \tilde g_i(t)\le Ch
$$
for all $i$ in $\Omega\times[0,S_1]$. \end{lma}
{\begin{proof}[Proof of Theorem \ref{t-initial-Kahler-1}] Suppose the lemma is true, then Theorem \ref{t-initial-Kahler-1} will follow from the local estimates in \cite{ShermanWeinkove2012}. \end{proof}
It remains to prove Lemma \ref{l-initial-Kahler-1}.}
\begin{lma}\label{slma-1} We have $|\phi_i|\leq C_0,\;\dot\phi_i\leq C_0$ on $M\times[0, e^s-1)$ for some positive constant $C_0$ independent of $i$. \begin{proof} By Lemma \ref{l-uudot-upper-1}, we have \begin{equation*} \log\frac{{\omega_i}^n(s)}{\theta_0^n}=\dot u_i+u_i\leq C. \end{equation*} Here $C$ is a positive constant independent of $i$ and ${\widetilde\omega_i}(s)$ is the corresponding normalized flow with the relation \begin{equation*} \widetilde g_i(t)e^{-s}= g_i(s), t=e^s-1. \end{equation*} Then by the equation $\dot\phi_i=\log\frac{\widetilde\omega^n_i(t)}{\theta_0^n}$, we obtain the upper bound on $\dot\phi_i(t)$. The lower bound on $\phi_i$ follows from Lemma \ref{l-all-u}. \end{proof} \end{lma} Before we state the next lemma, we fix some notations. Let $p\in U$. By scaling, we may assume that there is a holomorphic coordinate neighbourhood of $p$ which can be identified with $B_0(2)\subset \mathbb{C}^n$ with $p$ being the origin and $B_0(r)$ is the Euclidean ball with radius $r$. Moreover, $B_0(2)\Subset U$. We may further assume $\frac14h\le h_E\le 4h$ in $B_0(2)$ where $h_E$ is the Euclidean metric. Since $\omega_0>0$, there is $\sigma>0$ such that $B_{g_i(0)}(p,2\sigma)\subset B_0(2)$ and $$ g_i(0)\ge 4\sigma^2h $$ in $B_0(2)$ for some $0<\sigma<1$. This is because $g_i(0)=\omega_0+\epsilon_i h$. Here we use $h_E$ because we want to use the estimates in \cite{ShermanWeinkove2012} explicitly. Let $\tau=e^{s}-1$, where $s$ is the constant in assumption in Theorem \ref{t-instant-complete}, { and let $\dot \phi$ be as in the proof of Lemma \ref{slma-1}. It is easy to see that Lemma \ref{l-initial-Kahler-1} follows from the following:}
\begin{lma}\label{local-bound} With the same notation and assumptions as in Theorem \ref{t-initial-Kahler-1} and with the above set up. There exist positive constants $1>\gamma_1, \gamma_2>0$ with $\gamma_2<\tau$ which are independent of $i$ such that { $$\gamma_1^{-2}h\ge \widetilde g_i(t)\geq \gamma_1^2 h$$} on $B_{\widetilde g_i(t)}(p,\sigma),\; t\in [0,\gamma_2\gamma_1^{8(n-1)}]$. \end{lma} \begin{proof} The lower bound in lemma will follow from the following:\vskip .1cm
\noindent\underline{\it Claim}: There are constants $1>\gamma_1, \gamma_2>0$ independent of ${\alpha}>0$ and $i$ with $\gamma_2<\tau$ such that if $\widetilde g_i(t)\ge {\alpha}^2h$ on $B_{\widetilde g_i(t)}(p,\sigma)$, $t\in [0, \gamma_2{\alpha}^{8(n-1)}]$, then $\widetilde g_i(t)\ge \gamma_1^2 h$ on $B_{\widetilde g_i(t)}(p,\sigma)$ for $t\in [0, \gamma_2{\alpha}^{8(n-1)}]$. \vskip .1cm
The main point is that $\gamma_1$ does not depend on ${\alpha}$. Suppose the claim is true. Fix $i$, let ${\alpha}\le \gamma_1$ be the supremum of $\widetilde{\alpha}$ so that $\widetilde g_i(t)\ge \widetilde{\alpha}^2h$ on $ B_{\widetilde g_i(t)}(p,\sigma)$, $t\in [0, \gamma_2\widetilde{\alpha}^{8(n-1)}]$. Since $\widetilde g_i(0)\ge \sigma^2h$ in $U$, we see that ${\alpha}>0$. Suppose ${\alpha}<\gamma_1$. By continuity, there is $\epsilon>0$ such that ${\alpha}+\epsilon<\gamma_1$. Then $\gamma_2 {\alpha}^{8(n-1)} \le \gamma_2 <\tau$. By the claim, we can conclude that $$
\widetilde g_i(t)\ge \gamma_1^2 h\geq ({\alpha}+\epsilon)^2h $$ in $B_{\widetilde g_i(t)}(p,\sigma)$, $t\in [0,\gamma_2{\alpha}^{8(n-1)}]$. By choosing a possibly smaller $\epsilon$ and by continuity, the above inequality is also true for $t\in [0,\gamma_2({\alpha}+\epsilon)^{8(n-1)}]$. This is a contradiction.
To prove the claim, let $\gamma_1$ and $\gamma_2>0$ be two constants to be determined later and are independent of ${\alpha}$ and $i$. In the following, $C_k$ will denote a positive constant independent of ${\alpha}$ and $i$. In the following, for simplicity in notation, we suppress the index $i$ and simply write $\widetilde g_i$ as $g$.
Suppose ${\alpha}\le \gamma_1$ is such that $$ g(t)\ge {\alpha}^2 h $$ in $ B_{g(t)}(p,\sigma),\;t\in[0,\gamma_2 {\alpha}^{8(n-1)}]$. By Lemma \ref{slma-1}, $\det(g(t))/\det(h)\le C_1$ for some $C_1>1$. Hence we have \begin{equation*} {\alpha}^2h\le g(t)\le C_1{\alpha}^{-2(n-1)}h \end{equation*}
on $B_{g(t)}(p,\sigma),\;t\in[0,\gamma_2 {\alpha}^{8(n-1)}]$ and hence on $B_h(p,C_1^{-1/2}{\alpha}^{n-1}\sigma)\times [0,\gamma_2 {\alpha}^{8(n-1)}]$ because $B_h(p,C_1^{-1/2}{\alpha}^{n-1}\sigma)\subset B_{g(t)}(p,\sigma)$ for $t\in[0,\gamma_2 {\alpha}^{8(n-1)}]$. This can be seen by considering the maximal $h$-geodesic inside $B_t(p,\sigma)$. Together with the fact that $\frac14h \le h_E\le 4h$ on $B_0(2)$, we conclude that \begin{equation}\label{e-alpha-2} {\alpha}_1^2h_E\le g(t)\le {\alpha}_1^{-2}h_E \end{equation} on $ B_0(\frac1{2\sqrt{C_1}}{\alpha}^{n-1}\sigma)\times[0, \gamma_2{\alpha}^{8(n-1)}]$, where ${\alpha}_1>0$ is given by \begin{equation}\label{e-alpha-1} {\alpha}_1^2=\frac1{4C_1}{\alpha}^{2(n-1)}. \end{equation}
By \cite[Theorem 1.1]{ShermanWeinkove2012}, we conclude that \begin{equation}\label{e-Rm}
|\text{\rm Rm}(g(t))|\le \frac{C_2}{{\alpha}_1^8t} \end{equation} on $ B_0(\frac\sigma 2{\alpha}_1)\times[0, \gamma_2{\alpha}^{8(n-1)}]$. From the proof in \cite{ShermanWeinkove2012}, the constant $C_2$ depends on an upper bound of the existence time but not its precise value. In particular, it is independent of ${\alpha}$ here. By \eqref{e-alpha-2}, we conclude that \eqref{e-Rm} is true on { $ B_{g(t)}(p, \frac\sigma2{\alpha}_1^2)$, $t\in[0, \gamma_2{\alpha}^{8(n-1)}]$}.
By \cite[Lemma 8.3]{Perelman2003} (see also \cite[Chapter 18, Theorem 18.7]{Chow}), we have: \begin{equation}\label{e-distance-1} \lf(\frac{\p}{\p t}-\Delta\ri) (d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12)\ge0 \end{equation} in the sense of barrier (see the definition in Appendix \ref{s-max}) outside $B_{g(t)}(p,{\alpha}_1^4\sqrt t)$, provided \begin{equation}\label{e-t-1}
t^\frac12\le \frac\sigma2{\alpha}_1^{-2}. \end{equation}
Let $\xi\ge0$ be smooth with $\xi=1$ on $[0,\frac 43]$ and is zero outside $[0,2]$, with $\xi'\le 0, |\xi'|^2/\xi+ |\xi''|\le C $. Let
$$ \Phi(x,t)=\xi( \sigma^{-1}\eta(x,t)) $$ where $\eta(x,t)=d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12$. For any $\epsilon>0$, for $t>0$ satisfying \eqref{e-t-1}, if $d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12<\frac43\sigma$, then $\Phi(x,t)=1$ near $x$ and so \begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri)(\log(\Phi+\epsilon))=0. \end{equation*} If $d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12\ge\frac43\sigma$ and $d_t(p,x)\ge {\alpha}_1^4t^\frac12$, then
in the sense of barrier we have:
\begin{equation}\label{e-Phi-1} \begin{split} & \lf(\frac{\p}{\p t}-\Delta\ri) \log (\Phi+\epsilon)\\
=& \left(\frac{\xi'}{\xi} \sigma^{-1}\lf(\frac{\p}{\p t}-\Delta\ri)\eta-\frac{\xi''}{\xi} \sigma^{-2}|\nabla\eta|^2+\frac{(\xi')^2}{\xi^2} \sigma^{-2}|\nabla \eta|^2\right)\\
\le & C_4(\Phi+\epsilon)^{-1}. \end{split} \end{equation} by the choice of $\xi$ and \eqref{e-distance-1}. Hence there exists $C_5>0$ such that if
\begin{equation}\label{e-t-2} t^\frac12\le C_5{\alpha}_1^4 \end{equation} then $t$ also satisfies \eqref{e-t-1} and $C_3{\alpha}_1^{-4}t^{1/2}<\frac\sigma 3$. Moreover, $C_5$ can be chosen so that either $d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12<\frac43\sigma$ or $d_t(p,x)+C_3{\alpha}_1^{-4}t^\frac12\ge\frac43\sigma$ and $d_t(p,x)\ge {\alpha}_1^4t^\frac12$. Hence \eqref{e-Phi-1} is true in the sense of barrier for $t\in (0, C_5^2{\alpha}_1^8]$.
Consider the function $$ F=\log \operatorname{tr}_hg -Lv+m\log (\Phi+\epsilon) $$ where $v=(\tau-t)\dot\phi+\phi-f+nt$, $\tau=e^s-1$. Here $L, m>0$ are constants to be chosen later which are independent of $i,\ {\alpha}$. Recall that $v$ satisfies
\begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri) v=\operatorname{tr}_g (\omega_0-\tau \text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial f) \geq {\beta} \operatorname{tr}_g h. \end{equation*} and \begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri) \log \operatorname{tr}_h g\le C_6\operatorname{tr}_g h \end{equation*} by Lemma \ref{l-a-1} with vanishing torsion terms here. Let \begin{equation}\label{e-L} L\beta= C_6+1+\tau^{-1}. \end{equation} Note that by the A.M.-G.M. inequality and the definition of $\dot \phi$, we have \begin{equation}\label{e-AMGM} -\dot \phi \le n\log \operatorname{tr}_gh;\ \ \log \operatorname{tr}_hg\le \dot\phi +(n-1)\log\operatorname{tr}_gh. \end{equation} So \begin{equation*} \log \operatorname{tr}_gh\ge \frac{1}{ n(\tau L-1)+(n-1)}(\log \operatorname{tr}_hg-\tau L\dot\phi) \end{equation*} Then in the sense of barrier \begin{equation*} \begin{split} \lf(\frac{\p}{\p t}-\Delta\ri) F\le & -\operatorname{tr}_gh+m C_4 (\Phi+\epsilon)^{-1}\\ \le &-\exp\left(C_7 (\log \operatorname{tr}_hg-\tau L\dot\phi)\right)+m C_4 (\Phi+\epsilon)^{-1}\\ \le &-\exp\left(C_7F-C_8-C_7m\log(\Phi+\epsilon)\right)+m C_4 (\Phi+\epsilon)^{-1}\\ =&-(\Phi+\epsilon)^{-1}mC_4\left[\exp (C_7F-C_8-\log(mC_4))-1\right]\\ \end{split} \end{equation*} if $mC_7=1$, where we have used the upper bound of $\dot \phi$ and the bound of $\phi$ in Lemmas \ref{slma-1}. So \begin{equation*} \begin{split} &\lf(\frac{\p}{\p t}-\Delta\ri) (C_7F-C_8-\log(mC_4))\\ \le& -\frac{mC_4C_7}{ \Phi+\epsilon}\left[\exp (C_7F-C_8-\log(mC_4))-1\right]\\ \le&0 \end{split} \end{equation*} in the sense of barrier whenever $C_7F-C_8-\log(mC_4)>0$. Then by the maximum principle Lemma \ref{max}, we conclude that $$ C_7F-C_8-\log(mC_4)\le\sup_{t=0}\left(C_7F-C_8-\log(mC_4)\right). $$
Let $\epsilon\to0$, using the definition of $\Phi$, the choice of $C_5$ and the bound of $|\phi|$, we conclude that in $ B_{g(t)}(p,\sigma)$, \begin{equation}\label{e-trace-lower} \log \operatorname{tr}_hg-L(\tau-t)\dot \phi \le C_9 \end{equation} provided $t\in [0, C_5^2{\alpha}_1^8]$. On the other hand, as in \eqref{e-AMGM}, we have \begin{equation*} \begin{split} \log \operatorname{tr}_gh\le& -\dot \phi+(n-1)\log\operatorname{tr}_hg\\ =&(n-1)\left(\log\operatorname{tr}_hg-L(\tau-t)\dot \phi\right)+(n-1)(L(\tau-t)-1)\dot\phi\\ \le&C_{10} \end{split} \end{equation*} provided \begin{equation}\label{e-t-3} Lt\le L\tau-1. \end{equation} Here we have used the upper bound of $\dot \phi$ in Lemma \ref{slma-1}.
Hence there is $\gamma_1>0$ independent of ${\alpha}$ and $i$ such that if $t$ satisfies \eqref{e-t-2} and \eqref{e-t-3}, then $$ g_i(t)\ge \gamma_1^2h $$ on $B_{g_i(t)}(p,\sigma)$. Let $\gamma_2<\tau$ be such that
$$
\gamma_2=\min\{C_5^2,L^{-1}(L\tau-1)\}\times (4C_1)^{-4} $$ where $C_1, C_5$ are the constants in \eqref{e-alpha-1} and \eqref{e-t-2} respectively and $L$ is given by \eqref{e-L}. If $t\in[0,\gamma_2{\alpha}^{8(n-1)}]$, then $t$ will satisfy \eqref{e-t-2}. One can see that the claim this true.
{ By \eqref{e-trace-lower} and Lemma \ref{slma-1}, we conclude that $$ \widetilde g_i(t)\le C_{11}h $$ on $B_{\widetilde g_i(t)}(p,\sigma)$ for $t\in[0,\gamma_2{\alpha}^{8(n-1)}]$. The upper bound in the Lemma follows by choosing a possibly smaller $\gamma_1$.}
\end{proof}
For the case of Chern-Ricci flow, the result is less satisfactory because the property of $d(x,t)$ does not behave as nice as in the K\"ahler case. As before, under the assumptions of Theorem \ref{t-instant-complete}, let $g(t)$ be the Chern-Ricci flow $g(t)$ constructed in the theorem. We have the following: \begin{prop}\label{p-initial-CR} With the same notation and assumptions as in Theorem \ref{t-instant-complete}. Suppose $\operatorname{tr}_{g_0}h=o(\rho)$. Then $g(t)\rightarrow g_0$ as $t\rightarrow 0$ in $M$. The convergence is in $C^\infty$ topology and is uniform in compact subsets of $M$. \end{prop} Note that $g_0$ may still be complete. But it may not be equivalent to $h$ and { the curvature of $g_0$ may be unbounded}.
As before, $g(t)$ is the limit of solutions $g_i(t)$ of the unnormalized Chern-Ricci flow on $M\times[0, s)$ with initial data $g_0+\epsilon_i h$ with $\epsilon_i\to 0$. Here we may assume $s\leq 1$. We want to prove the following:
\begin{lma}\label{l-initial-CR-1} With the same notation and assumptions as in Proposition \ref{p-initial-CR} and let $S<\tau:=e^s-1$, for any precompact open subset $\Omega$ of $M$, there is $C>0$,
$$
C^{-1}h\le g_i(t)\le Cg
$$
for all $i$ in $\Omega\times[0, S]$. \end{lma} Suppose the lemma is true, then Proposition \ref{p-initial-CR} will follows from the local estimates in \cite{ShermanWeinkove2013} for Chern-Ricci flow. To prove the lemma, first we prove the following.
Let $\phi_i$ be the potential for $g_i$. \begin{sublma}\label{sl-initial-CR-1} Suppose $$\liminf_{\rho\to\infty}\rho^{-1}\log\frac{\omega_0^n}{\theta_0^n}\ge0. $$ Then for any $\sigma>0$ (small enough independent of $i$), there is a constant $C>0 $ independent of $i$ such that \begin{equation*} \dot\phi_i\ge -C -\sigma\rho \end{equation*} on $M\times[0, S]$. \end{sublma} \begin{proof} In the following, we will denote $\phi_i$ simply by $\phi$ and $g_i(t)$ simply by $g(t)$ if there is no confusion arisen. Note that $g(t)$ is uniformly equivalent to $h$. Let $\sigma>0$.
Let $F=-(\tau-t)\dot \phi-\phi+f-nt-\sigma \rho $. By \eqref{e-a-1} and \eqref{e-a-2}, for $0\le t\le S$, we have \begin{equation*} \begin{split} \lf(\frac{\p}{\p t}-\Delta\ri)(-(\tau-t)\dot \phi-\phi) =& (\tau-t)\operatorname{tr}_g\text{\rm Ric}(\theta_0)+\dot\phi-\dot\phi+\operatorname{tr}_g(\sqrt{-1}\partial\bar\partial \phi)\\ =&(\tau-t)\operatorname{tr}_g\text{\rm Ric}(\theta_0)+\left(n+t\operatorname{tr}_g(\text{\rm Ric}(\theta_0))-\operatorname{tr}_g(\theta_0)\right)\\ =&\tau\operatorname{tr}_g\text{\rm Ric}(\theta_0)+n-\operatorname{tr}_g\theta_0 \\ \end{split} \end{equation*} Hence by the fact that: \begin{equation*} \omega_0-\tau\text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial f\ge \beta\theta_0, \end{equation*} we have \begin{equation*} \begin{split} \lf(\frac{\p}{\p t}-\Delta\ri) F\le &\tau\operatorname{tr}_g\text{\rm Ric}(\theta_0)-\operatorname{tr}_g\theta_0-\Delta f+\sigma \Delta \rho\\ \le& (-\beta+ \sigma C_1)\operatorname{tr}_g\theta_0\\ <&0 \end{split} \end{equation*} for some constant $C_1$ independent of $\sigma$ and $i$ for $\sigma$ with $C_1\sigma<\beta$. Since $F$ is bounded from above, by the maximum principle Lemma \ref{max}, we conclude that $$ \sup_{M\times[0, S]}F\le \sup_{M\times\{0\}}F. $$ At $t=0$, $$ F=-\tau\dot\phi-\sigma\rho+f. $$ By the assumption, we conclude that $F\le C(\sigma)$ at $t=0$. Hence we have $$ F\le C(\sigma) $$ on $M\times[0, S]$. Since $\phi, f$ are bounded, the sublemma follows.
\end{proof}
\begin{sublma}\label{sl-initial-CR-2} With the same notations as in Sublemma \ref{sl-initial-CR-1}. Suppose $\operatorname{tr}_{g_0}h=o(\rho)$. Then $$ \operatorname{tr}_hg_i\le C\exp(C'\rho) $$ on $M\times[0, S]$ for some positive constants $C, C'$ independent of $i$.
\end{sublma}
\begin{proof} We will denote $g_i$ by $g$ again and $\omega_{0}$ to be the K\"ahler form of the initial metric $g_i(0)=g_0+\epsilon_ih$. Note that \begin{equation*} \begin{split} \lf(\frac{\p}{\p t}-\Delta\ri)\phi=&\dot\phi-\Delta \phi\\ =&\dot \phi-(n-\operatorname{tr}_g\omega_{0}+t\operatorname{tr}_g(\text{\rm Ric}(\theta_0)))\\ \ge& \dot \phi-n+ \operatorname{tr}_g\omega_{0}+\frac{t\beta}{\tau}\operatorname{tr}_gh-\frac{t}{\tau}\operatorname{tr}_g\omega_0-\frac t\tau\Delta f\\ \ge &\dot \phi- n-\frac t\tau\Delta f+(1-\frac S\tau)\operatorname{tr}_g\omega_{0}. \end{split} \end{equation*} Then we have: \begin{equation}\label{e-initial-CR-1} \lf(\frac{\p}{\p t}-\Delta\ri) (\phi+ nt-\frac t\tau f)\ge \dot\phi+(1-\frac S\tau)\operatorname{tr}_g\omega_{0}-C_0.
\end{equation} Since $|\phi|$ is bounded by a constant independent of $i$ on $M\times[0, S]$, see Lemma \ref{l-uudot-upper-1} and Lemma \ref{l-all-u}, there is a constant $C_1, C_2>0$ so that $\xi:=\phi +nt-\frac t\tau f+C_1\ge 1$ and $\xi\le C_2$ on $M\times[0, S]$. Here and below $C_j$ will denote positive constants independent of $i$. Let $\Phi( \varsigma)=2-e^{-\varsigma}$ for $\varsigma\in \mathbb{R}$. Then for $\xi:=\phi +nt-\frac t\tau f+C_1\ge 1$, we have \begin{equation}\label{e-initia-CR-2} \left\{
\begin{array}{ll}
\Phi(\xi)\ge & 1 \\
\Phi'(\xi)\ge & e^{-C_2}\\
\Phi''(\xi)\le &-e^{-C_2}
\end{array} \right. \end{equation} on $M\times[0, S]$. Next, let $P(\varsigma)$ be a positive function on $\mathbb{R}$ so that $P'>0$. Define $$ F(x,t)=\Phi(\xi)P(\rho). $$ Let $\Upsilon=tr_hg$, here $g=g_i$. Let $F\to\infty$ near infinity be a smooth function of $x, t$. Then by Lemma \ref{l-a-1}, we have \begin{equation*}
\lf(\frac{\p}{\p t}-\Delta\ri) (\log \Upsilon-F)=\mathrm{I+II+III}-\lf(\frac{\p}{\p t}-\Delta\ri) F
\end{equation*}
where
\begin{equation*} \begin{split} \mathrm{I}\le &2\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q}( T_0)_{ki\bar l} \hat \nabla_{\bar q}\Upsilon\right), \end{split} \end{equation*}
\begin{equation*} \begin{split} \mathrm{II}=&\Upsilon^{-1} g^{i\bar{j}} h^{k\bar l}g_{k\bar q} \left(\hat \nabla_i \overline{(\hat T)_{jl}^q}- \hat h^{p\bar q}\hat R_{i\bar lp\bar j}\right),\\ \end{split} \end{equation*} and
\begin{equation*} \begin{split} \mathrm{III}=&-\Upsilon^{-1} g^{{i\bar{j}}} h^{k\bar l}\left(\hat \nabla_i\left(\overline{( T_0)_{jl\bar k}} \right) +\hat \nabla_{\bar l}\left( {( T_0)_{ik\bar j} }\right)-\overline{ (\hat T)_{jl}^q}( T_0)_{ik\bar q}^p \right). \end{split} \end{equation*} Let $\Theta=\operatorname{tr}_gh$. Suppose $\log \Upsilon-F$ attains a positive maximum at $(x_0,t_0)$ with $t_0>0$, then at this point, $$ \Upsilon^{-1}\hat\nabla \Upsilon=\hat\nabla F, $$ and so \begin{equation*} \begin{split} \mathrm{I}\le &2\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q}( T_0)_{ki\bar l} \hat \nabla_{\bar q}\Upsilon\right)\\
\le &C\Upsilon^{-1}\Theta^\frac12|\nabla F|\\
\le&C'\Upsilon^{-1}\Theta^\frac12\left(P|\nabla \xi|+P'\Theta^\frac12\right). \end{split}
\end{equation*} because $|\partial\rho|_h$ is bounded. Here we use the norm with respect to the evolving metric $g(t)$.
$$ \mathrm{II}\le C\Theta, $$ $$ \mathrm{III}\le C\Upsilon^{-1}\Theta. $$ Here $C, C'$ are positive constants independent of $i$. On the other hand, \begin{equation*} \begin{split} &\lf(\frac{\p}{\p t}-\Delta\ri) F\\ =&P\lf(\frac{\p}{\p t}-\Delta\ri)\Phi+2{\bf Re}\left(g^{i\bar{j}}\partial_i\Phi\partial_{\bar j}P\right)+\Phi\lf(\frac{\p}{\p t}-\Delta\ri) P\\
\ge&P\left(\Phi'\lf(\frac{\p}{\p t}-\Delta\ri) \xi -\Phi''|\nabla\xi|^2\right)-C_4\Phi'P'\Theta^\frac12|\nabla\xi|-C_4\Theta\Phi (P'+|P''|)\\
\ge&P\Phi'\dot\phi+e^{-C_2}P(1-\frac S\tau)\operatorname{tr}_g\omega_0-C_0P+e^{-C_2}P|\nabla\xi|^2-\frac12e^{-C_2}P|\nabla\xi|^2 \\
&-C_5\frac{(P')^2}{P}\Theta-C_4\Theta(P'+|P''|). \end{split}
\end{equation*} Here we have used the fact that $|\partial\rho|_h, |\partial\bar\partial\rho|_h$ are bounded $\Phi(\xi)\le 2$ and \eqref{e-initia-CR-2}.
So at $(x_0,t_0)$, \begin{equation*} \begin{split}
&\lf(\frac{\p}{\p t}-\Delta\ri) (\log \Upsilon-F)\\
\le& C_3\left( \Upsilon^{-1}\Theta^\frac12\left(P|\nabla \xi|+P'\Theta^\frac12\right) + \Upsilon^{-1}\Theta+ \Theta\right)\\
&- P\Phi'\dot\phi-e^{-C_2}P(1-\frac S\tau)\operatorname{tr}_g\omega_0+C_0P-\frac12e^{-C_2}P|\nabla\xi|^2\\
&+\Theta\left(C_5\frac{(P')^2}{P}+
C_4(P'+|P''|)\right)\\
\le&- P\Phi'\dot\phi-e^{-C_2}P(1-\frac S\tau)\operatorname{tr}_g\omega_0+C_0P+\left(-\frac12e^{-C_2} +\Upsilon^{-1}\right)P|\nabla\xi|^2 \\
&+C_6\Theta\left(\Upsilon^{-1}+1+\Upsilon^{-1}P'+P'+\Upsilon^{-1}P+\frac{(P')^2}{p}+|P''|\right).
\end{split} \end{equation*} Now \begin{equation*} -\dot\phi\le c(n)\log \Theta. \end{equation*} Suppose $\omega_0\ge \frac1{Q(\rho)}\theta_0$ with $Q>0$ and suppose $\Upsilon^{-1}\le \frac12e^{-C_2}$ at $(x_0,t_0)$, then at $(x_0,t_0)$, we have \begin{equation*} \begin{split} &\lf(\frac{\p}{\p t}-\Delta\ri) (\log \Upsilon-F)\\
\le&C_7P(\log\Theta+1)+\Theta\left[-C_8PQ^{-1}+C_9 \left( 1+ P'+\frac{(P')^2}{P}+|P''|\right)\right]. \end{split} \end{equation*} By the assumption on $\operatorname{tr}_{g_0}h$, for any $\sigma>0$ there is $\rho_0>0$ such that if $\rho\ge \rho_0$, then $\operatorname{tr}_{g_0}h\le \sigma\rho.$ Hence we can find $C=C(\sigma)$ such that $$ g_0\ge \frac{1}{\sigma(\rho+C(\sigma))}h $$ and $\rho+C(\sigma)\ge 1$ on $M$. Let $Q(\rho)=\sigma(\rho+C(\sigma)), P(\rho)=\rho+C(\sigma)$, then above inequality becomes \begin{equation*} \begin{split}
\lf(\frac{\p}{\p t}-\Delta\ri) (\log \Upsilon-F) \le&C_7P\log (e\Theta)+\Theta\left(-C_8\sigma^{-1}+3C_9\right)\\ \le& C_7P\log (e\Theta)-\frac12C_8\Theta \end{split} \end{equation*} if we choose $\sigma$ small enough independent of $i$. Since $\log\Upsilon-F\to-\infty$ near infinity and uniform in $t\in [0, S]$, and $\log\Upsilon-F<0$ at $t=0$, by maximum principle, either $\log\Upsilon-F\le 0$ on $M\times[0, S]$ or there is $t_0>0$, $x_0\in M$ such that $\log\Upsilon-F$ attains a positive maximum at $(x_0,t_0)$. Suppose at this point $\Upsilon^{-1}\ge\frac12 e^{-C_2}$, then $$ \log\Upsilon-F\le C_{10}. $$ Otherwise, at $(x_0,t_0)$ we have \begin{equation*} 0\le C_7P\log (e\Theta)-\frac12C_8\Theta. \end{equation*} Hence we have at this point $\Theta\le C_{11}$ which implies $\Upsilon\le C_{12}$ because $\dot\phi\le C$ for some constant independent of $i$. So $$ \log\Upsilon-F\le\log C_{12}. $$ Or $$ \Theta\le C_{13}P^2. $$ This implies $\log \Upsilon\le C_{14}(1+\log P)$. Hence \begin{equation*} \log \Upsilon-F\le C_{14}. \end{equation*} From these considerations, we conclude that the sublemma is true. \end{proof} \begin{proof}[Proof of Lemma \ref{l-initial-CR-1}] The lemma follows from Sublemmas \ref{sl-initial-CR-1} and \ref{sl-initial-CR-2}. \end{proof}
\section{Long time behaviour and convergence}
In this section, we will first study the longtime behaviour for the solution constructed in Theorem \ref{t-instant-complete}. Namely, we will show the following theorem: \begin{thm}\label{longtime} Under the assumption of Theorem \ref{t-instant-complete}, if in addition, $$-\text{\rm Ric}(h)+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0$$ for some $f\in C^\infty(M)\cap L^\infty(M)$, $\beta>0$. Then the solution constructed from Theorem \ref{t-instant-complete} is a longtime solution and converges to a unique complete K\"ahler Einstein metric with negative scalar curvature on $M$. \end{thm}
Before we prove Theorem \ref{longtime}, let us prove a lower bound of $\dot u$ which will be used in the argument of convergence. Once we have uniform equivalence of metrics, we can obtain a better lower bound of $\dot{u}$.
\begin{lma}\label{du-convergence} Assume the solution constructed from Theorem \ref{t-instant-complete} is a longtime solution, then there is a positive constant $C$ such that \begin{equation*} \dot{u}\geq-Ce^{-\frac t2} \end{equation*} on $M\times[2, \infty)$. \begin{proof} Since we do not have upper bound of $g(t)$ as $t\to 0$, we shift the initial time of the flow to $t=1$. Note that \begin{equation*}\begin{split} \lf(\frac{\p}{\p t}-\Delta\ri) (e^t\dot{u}-f)=&-tr_{ g}(\text{\rm Ric}(h)+g(1))+\Delta f\\ \geq&-tr_{ g}g(1)\geq-C_1. \end{split}\end{equation*}
Consider $Q=e^t\dot{u}-f+(C_1+1)t$. Then we can use maximum principle argument as before to obtain $Q(x, t)\geq \inf\limits_MQ(0)$. Then we have\begin{equation*} e^t\dot{u}\geq -C_2-(C_1+1)t \end{equation*} which implies \begin{equation*} \dot{u}\geq -C_3e^{-\frac t2} \end{equation*} on $M\times[1, \infty)$. We shift the time back, we obtain the result.
\end{proof} \end{lma}
\begin{proof}[Proof of Theorem \ref{longtime}] The assumption $-\text{\rm Ric}(h)+\sqrt{-1}\partial\bar\partial f\geq \beta \theta_0$ implies that for all $s$ large enough, \begin{equation*} -Ric(h)+e^{-s}(\omega_0+Ric(h))+\sqrt{-1}\partial\bar\partial \hat f\geq \frac\beta2 \theta_0. \end{equation*} Here $\hat f=(1-e^{-s})f$ is a bounded function on $M$. By Theorem \ref{t-instant-complete} and Lemma \ref{l-trace-2}, \eqref{e-MP-1} has a smooth solution on $M\times(0, \infty)$ with $g(t)$ uniformly equivalent to $h$ on any $[a, \infty)\subset(0, \infty)$. Combining the local higher order estimate of Chern-Ricci flow (See \cite{ShermanWeinkove2013} for example) and Lemma \ref{du-convergence}, we can conclude that $u(t)$ converges smoothly and locally to a smooth function $u_\infty$ as $t\to\infty$ and $\log\frac{\omega^n_\infty}{\theta_0^n}=u_\infty$. Taking $\sqrt{-1}\partial\bar\partial$ on both sides, we have \begin{equation*} -\text{\rm Ric}(g_\infty)+\text{\rm Ric}(h)=\sqrt{-1}\partial\bar\partial u_\infty, \end{equation*} which implies $-\text{\rm Ric}(g_\infty)=g_\infty$. Obviously, $g_\infty$ is K\"ahler. Uniqueness follows from \cite[Theorem 3]{Yau1978} (see also Proposition 5.1 in \cite{HLTT}).
\end{proof}
Taking $g_0=h$ in the theorem, we have
\begin{cor} Let $(M,h)$ be a complete Hermitian manifold satisfying the assumptions in Theorem \ref{longtime}. Then the Chern-Ricci flow with initial data $h$ exists on $M\times[0,\infty)$ and converge uniformly on any compact subsets to the unique complete K\"ahler-Einstein metric with negative scalar curvature on $M$. \end{cor}
For K\"ahler-Ricci flow, we have the following general phenomena related to Theorem \ref{longtime}. \begin{thm}\label{convergence-krf} Let $(M,h)$ be a smooth complete Hermitian manifold with
$\mathrm{BK}(h) \geq -K_0$ and $|\nabla^h_{\bar\partial}T_h|_h\leq K_0$ for some constant $K_0\geq 0$. Moreover, assume \begin{equation*} -\text{\rm Ric}(h)+\sqrt{-1}\partial\bar\partial f \geq k h \end{equation*} for some constant $k>0$ and function $f\in C^\infty(M)\cap L^\infty(M)$. Suppose $g(t)$ is a smooth complete solution to the normalized K\"ahler-Ricci flow on $M\times[0,+\infty)$ with $g(0)=g_0$ which satisfies \begin{equation*}
\frac{\det g_0}{\det h}\leq \Lambda \end{equation*} and \begin{equation*} R(g_0)\geq -L \end{equation*} for some $\Lambda,L>0$. Then $g(t)$ satisfies \begin{equation*} C^{-1}h\leq g(t)\leq C h \end{equation*} on $M\times[1, \infty)$ for some constant $C=C(n, K_0, k, ||f||_\infty, \Lambda, L)>0$. In particular, $ g(t)$ converges to the unique smooth complete K\"ahler-Einstein metric with negative scalar curvature. \end{thm}
\begin{proof} We can assume $k=1$, otherwise we rescale $h$. We consider the corresponding unnormalized K\"ahler-Ricci flow $\widetilde g(s)=e^{t}g(t)$ with $s=e^t-1$. Then the corresponding Monge-Amp\`ere equation to the unnormalized K\"ahler-Ricci flow is: \begin{equation*}\left\{\begin{array}{l}
\frac{\partial}{\partial s}\phi=\displaystyle{\log\frac{(\omega_0-s\text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial\phi)^n}{\theta_0^n}} \\
\phi(0)=0. \end{array} \right. \end{equation*}
Here $\theta_0$ is the K\"ahler form of $h$. By the assumption $R(g_0)\geq -L $, Proposition 2.1 in \cite{Chen2009} and Lemma 5.1 in \cite{HLTT} with the fact \begin{equation*} \lf(\frac{\p}{\p s}-\wt\Delta\ri) \widetilde R\geq \frac{1}{n}\widetilde R^2,\end{equation*} we conclude that $\widetilde R:=R(\widetilde g(s))\geq \max\{-L, -\frac ns\}$ on $M\times[0, \infty)$. Note that $\ddot{\phi}=-R(\widetilde g(s))$, we have on $M\times[0, 1]$, $\dot{\phi}\leq C(L, \Lambda)$; on $M\times[1, \infty)$, $\dot{\phi}\leq C(L, \Lambda)+n\log s$.
For lower bound of $\dot{\phi}$, we consider $Q=-\dot{\phi}+f$. We compute: \begin{equation*}\begin{split} \lf(\frac{\p}{\p s}-\wt\Delta\ri) Q=&-\lf(\frac{\p}{\p s}-\wt\Delta\ri) \dot{\phi}-\Delta f \\ =&tr_{\widetilde g}[\text{\rm Ric}(\theta_0)-\sqrt{-1}\partial\bar\partial f]\\ \leq&-tr_{\widetilde g}h\\ \leq&-ne^{-\frac{\dot{\phi}}{n}}\\ \leq&-ne^{\frac{1}{n}(Q-f)}\\
\leq&-C(n, ||f||_\infty)e^{\frac{Q}{n}}\\
\leq&-C(n, ||f||_\infty)Q^2, \end{split}\end{equation*} whenever $Q>0$.
Then by the same argument as in the proof of Proposition 2.1 in \cite{Chen2009}, we conclude that $\dot{\phi}\geq -C(n, \lambda, ||f||_\infty)$ on $M\times[0, \infty)$. Here $\lambda$ is the lower bound of $\frac{\det g_0}{\det h}$. However, this estimate is not enough for later applications. We consider $F=-\dot{\phi}+f+n\log s$. Then we similarly obtain \begin{equation*}
\lf(\frac{\p}{\p s}-\wt\Delta\ri) F\leq -C(n, ||f||_\infty)F^2, \end{equation*} whenever $F>0$. By Lemma 5.1 in \cite{HLTT}, we conclude that $F\leq\frac{C(n, ||f||_\infty)}{s}$ on $M\times[0, \infty)$. Therefore, we obtain \begin{equation*}
\dot{\phi}\geq -C(n, ||f||_\infty)+n\log s \end{equation*} on $M\times[1, \infty)$.
To sum up, for the bound of $\dot{\phi}$, we have:
(i) On $M\times[0, 1]$, $-C(n, \lambda, ||f||_\infty)\leq \dot{\phi}\leq C(L, \Lambda)$;
(ii) On $M\times[1, \infty)$, $-C(n, ||f||_\infty)+n\log s\leq \dot{\phi}\leq C(L, \Lambda)+n\log s$.
Then we consider back to the normalized K\"ahler-Ricci flow $g(t)$. Since \begin{equation*} \log\frac{\det g(t)}{\det h}=-n\log (s+1)+\frac{\partial}{\partial s}\phi(s), \end{equation*} where $s=e^t-1$, we obtain: \begin{equation*}
-C(n, ||f||_\infty)\leq\dot{u}(t)+u(t)\leq C(L, \Lambda) \end{equation*} on $M\times[\log 2, \infty)$. Here $u$ solves \eqref{e-MP-1}.
Next, we consider $G(x, t)=\log tr_h g(t)-A(\dot{u}(t)+u(t)+f)$. Here $A$ is a large constant to be chosen. As in Section 1, we have \begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri) \log tr_h g(t)\leq C(n, K_0)tr_{ g(t)}h-1. \end{equation*} Therefore, \begin{equation*}\begin{split} \lf(\frac{\p}{\p t}-\Delta\ri) G\leq& C(n, K_0)tr_{ g(t)}h-1+An+A(tr_{ g}\text{\rm Ric}(h)+tr_{ g}\sqrt{-1}\partial\bar\partial f)\\ \leq& (-A+C(n, K_0))tr_{g(t)}h-1+An\\ \leq& -tr_{g(t)}h+An. \end{split}\end{equation*} Here we take $A=C(n, K_0)+1$.
On the other hand, \begin{equation*} tr_h g(t)\leq\frac{1}{(n-1)!}\cdot(tr_{ g(t)}h)^{n-1}\cdot\frac{\det g}{\det h}\leq C(n, L, \Lambda)(tr_{ g(t)}h)^{n-1}. \end{equation*}
Then we have \begin{equation*}\begin{split} \lf(\frac{\p}{\p t}-\Delta\ri) G\leq&-C(n, L, \Lambda)(tr_h g(t))^{\frac{1}{n-1}}+C(n,K_0)\\ =&-C(n, L, \Lambda)e^{\frac{1}{n-1}\log tr_hg(t)}+C(n,K_0)\\ =&-C(n, L, \Lambda)e^{\frac{1}{n-1}[G+A(\dot{u}(t)+u(t)+f)]}+C(n,K_0)\\
\leq&-C(n, L, \Lambda, ||f||_\infty)e^{\frac{1}{n-1}G}+C(n,K_0)\\
\leq&-C(n, L, \Lambda, ||f||_\infty)G^2+C(n,K_0), \end{split}\end{equation*} whenever $G>0$.
By similar argument as in the proof of Lemma 5.1 in \cite{HLTT}, we conclude that $G\leq C(n, L, \Lambda, ||f||_\infty, K_0)$ on $M\times[1, \infty)$. The difference here is that we consider the normalized K\"ahler-Ricci flow instead of K\"ahler-Ricci flow. The Perelman's distance distortion lemma for normalized K\"ahler-Ricci flow is the following:\begin{equation*} \lf(\frac{\p}{\p t}-\Delta\ri) d_t(x_0, x)\geq -\frac{5(n-1)}{3}r_0^{-1}-d_t(x_0, x). \end{equation*} We then consider $t\cdot\phi(\frac{1}{Ar_0}[e^t\cdot d_t(x_0, x)+\frac{5(n-1)e^t}{3}r_0^{-1}])\cdot G(x,t)$, the results follows from the same argument as in the proof of Lemma 5.1 in \cite{HLTT}.
This implies \begin{equation*}
g(t)\leq C(n, L, \Lambda, ||f||_\infty, K_0)h \end{equation*} on $M\times[1, \infty)$.
For lower bound, combining with $e^{\dot{u}(t)+u(t)}=\frac{\det g}{\det h}$, we have \begin{equation*}
g(t)\geq C^{-1}(n, L, \Lambda, ||f||_\infty, K_0)h \end{equation*} on $M\times[1, \infty)$.
Once we obtain the uniform equivalence of metrics of the normalized K\"ahler-Ricci flow, the convergence follows from the same argument as in the proof of Theorem 5.1 in \cite{HLTT}. This completes the proof of Theorem \ref{convergence-krf}.
\end{proof}
\appendix \section{Some basic relations}
Let $g(t)$ be a solution to the Chern-Ricci flow,
$$
\partial_tg=-\text{\rm Ric}(g)
$$
and $h$ is another Hermitian metric. Let $\omega(t)$ be the K\"ahler form of $g(t)$, $\theta_0$ be the K\"ahler form of $h$. Let $$ \phi(t)=\int_0^t\log \frac{\omega^n(s)}{\theta_0^n}ds. $$
\begin{equation}\label{e-a-1} \omega(t)=\omega(0)-t\text{\rm Ric}(\theta_0)+\sqrt{-1}\partial\bar\partial\phi. \end{equation}
Let $\dot\phi=\frac{\partial}{\partial t}\phi$. Then \begin{equation}\label{e-a-2} \lf(\frac{\p}{\p t}-\Delta\ri)\dot\phi=-\operatorname{tr}_g(\text{\rm Ric}(\theta_0)), \end{equation} where $ \Delta$ is the Chern Laplacian with respect to $ g$.
On the other hand, if $g$ is as above, the solution $\widetilde g$ of the corresponding normalized Chern-Ricci flow with the same initial data $$ \partial_t\widetilde g=-\text{\rm Ric}(\widetilde g)-\widetilde g $$ is given by $$ \widetilde g(x,t)=e^{-t}g(x,e^{t}-1). $$ The corresponding potential $u$ is given by $$ u(t)=e^{-t}\int_0^te^s\log \frac{\widetilde \omega^n(s)}{\theta_0^n}ds $$ where $\widetilde \omega(s)$ is the K\"ahler form of $\widetilde g(s)$. Also, \begin{equation}\label{e-a-3} \widetilde\omega(t)=-\text{\rm Ric}(\theta_0)+e^{-t}(\text{\rm Ric}(\theta_0)+\omega(0))+\sqrt{-1}\partial\bar\partial u. \end{equation}
\begin{equation}\label{e-a-5} \left(\frac{\partial}{\partial t}-\widetilde \Delta\right)(\dot u+u)=-\operatorname{tr}_{\widetilde g}\text{\rm Ric}(\theta_0)-n, \end{equation} where $\widetilde \Delta$ is the Chern Laplacian with respect to $\widetilde g$.
\begin{lma}[See \cite{TosattiWeinkove2015,Lee-Tam}]\label{l-a-1} Let $g(t)$ be a solution to the Chern-Ricci flow and let $\Upsilon=\operatorname{tr}_{ h}g$, and $\Theta=\operatorname{tr}_g h$.
\begin{equation*}
\lf(\frac{\p}{\p t}-\Delta\ri) \log \Upsilon=\mathrm{I+II+III}
\end{equation*}
where
\begin{equation*} \begin{split} \mathrm{I}\le &2\Upsilon^{-2}\text{\bf Re}\left( h^{i\bar l} g^{k\bar q}( T_0)_{ki\bar l} \hat \nabla_{\bar q}\Upsilon\right). \end{split} \end{equation*} \begin{equation*} \begin{split} \mathrm{II}=&\Upsilon^{-1} g^{i\bar{j}} \hat h^{k\bar l}g_{k\bar q} \left(\hat \nabla_i \overline{(\hat T)_{jl}^q}- \hat h^{p\bar q}\hat R_{i\bar lp\bar j}\right)\\ \end{split} \end{equation*} and
\begin{equation*} \begin{split} \mathrm{III}=&-\Upsilon^{-1} g^{{i\bar{j}}} h^{k\bar l}\left(\hat \nabla_i\left(\overline{( T_0)_{jl\bar k}} \right) +\hat \nabla_{\bar l}\left( {( T_0)_{ik\bar j} }\right)-\overline{ (\hat T)_{jl}^q}( T_0)_{ik\bar q}^p \right). \end{split} \end{equation*} where $T_0$ is the torsion of $g_0=g(0)$, $\hat T$ is the torsion of $h$ and $\hat\nabla$ is the derivative with respect to the Chern connection of $h$.
\end{lma}
\section{A maximum principle}\label{s-max}
We have the following maximum principle, see \cite{HLTT} for example.
\begin{lma}\label{max} Let $(M^n,h)$ be a complete non-compact Hermitian manifold satisfying condition: There exists a smooth positive real exhaustion function $\rho$ such that $|\partial \rho|^2_h+|\sqrt{-1}\partial\bar\partial \rho|_h\leq C_1$. Suppose $g(t)$ is a solution to the Chern-Ricci flow on $M\times[0,S)$. Assume for any $0<S_1<S$, there is $C_2>0$ such that $$ C_2^{-1}h\le g(t) $$ for $0\leq t\le S_1$. Let $f$ be a smooth function on $M\times[0,S)$ which is bounded from above such that $$ \lf(\frac{\p}{\p t}-\Delta\ri) f\le0 $$ on $\{f>0\}$ in the sense of barrier. Suppose $f\le 0$ at $t=0$, then $f\le 0$ on $M\times[0,S)$. \end{lma} {We say that $$ \lf(\frac{\p}{\p t}-\Delta\ri) f\le \phi $$
in the sense of barrier means that for fixed $t_1>0$ and $x_1$, for any $\epsilon>0$, there is a smooth function $\sigma(x)$ near $x$ such that $\sigma(x_1)=f(x_1,t_1)$, $\sigma(x)\le f(x,t_1)$ near $x_1$, such that $\sigma$ is $C^2$ and at $(x_1,t_1)$ \begin{equation*} \frac{\partial_-}{\partial t}f(x,t)-\Delta \sigma(x)\le \phi(x)+\epsilon. \end{equation*} Here \begin{equation*} \frac{\partial_-}{\partial t}f(x,t)=\liminf_{h\to 0^+}\frac{f(x,t)-f(x,t-h)}h. \end{equation*} for a function $f(x,t)$.}
\end{document}
|
arXiv
|
{
"id": "1902.04017.tex",
"language_detection_score": 0.5510307550430298,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Initial-boundary value problems for nearly incompressible vector fields, and applications to the Keyfitz and Kranzer system}
\author{Anupam Pal Choudhury\footnote{APC: Departement Mathematik und Informatik, Universit\"at Basel, Spiegelgasse 1, CH-4051 Basel, Switzerland. Email: [email protected]}, Gianluca Crippa\footnote{GC: Departement Mathematik und Informatik, Universit\"at Basel, Spiegelgasse 1, CH-4051 Basel, Switzerland. Email: [email protected]}, Laura V. Spinolo\footnote{LVS: IMATI-CNR, via Ferrata 1, I-27100 Pavia, Italy. Email: [email protected]} } \date{} \maketitle
\begin{abstract} We establish existence and uniqueness results for initial boundary value problems with nearly incompressible vector fields. We then apply our results to establish well-posedness of the initial-boundary value problem for the Keyfitz and Kranzer system of conservation laws in several space dimensions. \end{abstract}
\section{Introduction}
The Keyfitz and Kranzer system is a system of conservation laws in several space dimensions that was introduced in~\cite{KK} and takes the form \begin{equation} \label{e:KK}
\partial_{t} U+\sum_{i=1}^{d} \partial_{x_{i}} (f^{i}(\vert U \vert) U) =0.
\notag
\end{equation}
The unknown is $U: \mathbb R^d \to \mathbb R^N$ and $|U|$ denotes its modulus. Also, for every $i=1, \dots, d$ the function $f^i: \mathbb R \to \mathbb R^N$ is smooth. In this work we establish existence and uniqueness results for the initial-boundary value problem associated to~\eqref{e:KK}.
The well-posedness of the Cauchy problem associated to~\eqref{e:KK} was established by Ambrosio, Bouchut and De Lellis in~\cite{ABD,AD} by relying on a strategy suggested by Bressan in~\cite{Br}. Note that the results in~\cite{ABD,AD} are one of the very few well-posedness results that apply to systems of conservation laws in several spaces dimensions. Indeed, establishing either existence or uniqueness for a general system of conservation laws in several space dimensions is presently a completely open problem, see~\cite{Daf,Serre1,Serre2} for an extended discussion on this topic.
The basic idea underpinning the argument in~\cite{ABD,AD} is that~\eqref{e:KK} can be (formally) written as the coupling between a \emph{scalar} conservation law and a transport equation with very irregular coefficients. The scalar conservation law is solved by using the foundamental work by Kru{\v{z}}kov~\cite{Kr}, while the transport equation is handled by relying on Ambrosio's celebrated extension of the DiPerna-Lions' well-posendess theory, see~\cite{A} and~\cite{Diperna-Lions}, respectively, and~\cite{AC,Delellis2} for an overview. Note, however, that Ambrosio's theory~\cite{A} does not directly apply to~\eqref{e:KK} owing to a lack of control on the divergence of the vector fields. In order to tackle this issue, a theory of \textit{nearly incompressible vector fields} was developed, see~\cite{Delellis1} for an extended discussion. Since we will need it in the following, we recall the definition here. \begin{definition}\label{near-incom}
Let $\Omega \subseteq \mathbb{R}^{d} $ be an open set and $T>0 $. We say that a vector field
$b \in L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d})$ is \textbf{nearly incompressible} if there are a density function $\rho \in L^{\infty}((0,T) \times \Omega) $ and a constant $C > 0 $ such that
\begin{itemize}
\item[i.] $0 \leq \rho \leq C, \ \mathcal{L}^{d+1}-a.e. \ \text{in} \ (0,T) \times \Omega, $ and
\item[ii.] the equation
\begin{equation}
\label{e:continuityrho}
\partial_{t}\rho +\mathrm{div}(\rho b)=0
\end{equation}
holds in the sense of distributions in $(0, T) \times \Omega$.
\end{itemize}
\end{definition} The analysis in~\cite{ABD, AD, Delellis1} ensures that, if $b \in L^\infty ((0, T) \times \mathbb R^d; \mathbb R^d) \cap BV ((0, T) \times \mathbb R^d; \mathbb R^d)$ is a nearly incompressible vector field with density $\rho \in BV ((0, T) \times \mathbb R^d)$, then the Cauchy problem $$ \left\{ \begin{array}{ll}
\partial_{t}[ \rho u] +\mathrm{div}[ \rho bu ]=0 &
\text{in $(0, T) \times \mathbb R^d$}\\
u = \overline{u} &
\text{at $t=0$}\\ \end{array} \right. $$ is well-posed for every initial datum $\overline{u} \in L^\infty (\mathbb R^d)$. This result is pivotal to the proof of the well-posedness of the Cauchy problem for the Keyfitz and Kranzer system~\eqref{e:KK}. See also~\cite{ACFS} for applications of nearly incompressible vector fields to the so-called chromatography system of conservation laws. Note, furthermore, that here and in the following we denote by $BV$ the space of functions with \emph{bounded variation}, see~\cite{AFP} for an extended introduction.
The present paper aims at extending the analysis in~\cite{ABD, AD, Delellis1} to the case of initial-boundary value problems. First, we establish the well-posedness of initial-boundary value problems with $BV$, nearly incompressible vector fields, see Theorem~\ref{IBVP-NC} below for the precise statement. In doing so, we rely on well-posedness results for continuity and transport equations with weakly differentiable vector fields established in~\cite{CDS1}, see also~\cite{CDS2} for related results. Next, we discuss the applications to the Keyfitz and Kranzer system~\eqref{e:KK}.
We now provide a more precise description of our results concerning nearly incompressible vector fields. We fix an open, bounded set $\Omega$ and a nearly incompressible vector field $b$ with density $\rho$ and we consider the initial-boundary value problem
\begin{equation}
\left\{
\begin{array}{ll}
\partial_{t} [\rho u]+ \text{div}[\rho u b]=0 &
\text{in $(0,T)\times \Omega$}\\
u=\overline{u} & \text{at $t=0$}\\
u= \overline{g} & \text{on $ \Gamma^{-}$},
\end{array}
\right.
\label{prob-2}
\end{equation}
where $\Gamma^{-}$ is the part of the boundary $(0,T) \times \partial \Omega $ where the characteristic lines of the vector field $\rho b $ are \emph{inward pointing}. Note that, in general, if $b$ and $\rho$ are only weakly differentiable, one cannot expect that the solution $u$ is a regular function. Since $\Gamma^-$ will in general be negligible, then assigning the value of $u$ on $\Gamma^-$ is in general not possible. In~\S~\ref{s:formu} we provide the rigorous (distributional) formulation of the initial-boundary value problem~\eqref{prob-2} by relying on the theory of normal traces for low regularity vector fields, see~\cite{ACM,Anz,CF,CTZ}.
We can now state our well-posedness result concerning~\eqref{prob-2}.
\begin{theorem}\label{IBVP-NC}
Let $T > 0 $ and $\Omega \subseteq \mathbb{R}^{d}$ be an open, bounded set with $C^2 $ boundary. Also, let $b \in BV((0,T) \times \Omega; \mathbb{R}^{d}) \cap L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d})$ be a nearly incompressible vector field with density $\rho \in BV((0,T) \times \Omega) \cap L^{\infty}((0,T) \times \Omega)$, see Definition~\ref{near-incom}. Further, assume that
$\overline{u} \in L^{\infty}(\Omega) $ and $\overline{g} \in L^{\infty}(\Gamma^{-}) $.
Then there is a distributional solution $u \in L^{\infty}((0,T) \times \Omega) $
to \eqref{prob-2} satisfying the maximum principle
\begin{equation}
\label{e:maxprin}
\| u \|_{L^\infty} \leq \max \{ \| \overline u \|_{L^\infty}, \| \overline g \|_{L^\infty} \}.
\end{equation} Also, if $u_1, \; u_2 \in L^\infty ((0, T) \times \Omega)$ are two different distributional solutions of the
same initial-boundary value problem, then $\rho u_1 = \rho u_2$
$a.e.$ in $(0,T) \times \Omega$.
\end{theorem}
Note that the reason why we do not exactly obtain uniqueness of the function $u$ is because $\rho$ can attain the value $0$. If $\rho$ is bounded away from $0$, then the distributional solution $u$ of~\eqref{prob-2} is unique.
Also, we refer to~\cite{Bar,Boyer,CDS1,CDS2,GS,Mis} for related results on the well-posedness of initial-boundary value problems for continuity and transport equation with weakly differentiable vector fields.
In~\S~\ref{s:KK} we discuss the applications of Theorem~\ref{IBVP-NC} to the Keyfitz and Kranzer system and our main well-posedness result is Theorem~\ref{t:KK}. Note that the proof of Theorem~\ref{t:KK} combines Theorem~\ref{IBVP-NC}, the analysis in~\cite{Delellis1}, and well-posedness results for the initial-boundary value problems for scalar conservation laws established in~\cite{BLN, CR, Serre2}.
\subsection*{Paper outline} In~\S~\ref{s:prel} we go over some preliminary results concerning normal traces of weakly differentiable vector fields. By relying on these results, in~\S~\ref{s:formu} we provide the rigorous formulation of the initial-boundary value problem~\eqref{prob-2}. In~\S~\ref{s:exi} we establish the existence part of Theorem~\ref{IBVP-NC}, and in~\S~\ref{s:uni} the uniqueness. In~\S~\ref{s:ssc} we establish some stability and space continuity property results. Finally, in~\S~\ref{s:KK} we discuss the applications to the Keyfitz and Kranzer system.
\subsection*{Notation} For the reader's convenience, we collect here the main notation used in the present paper.
\begin{itemize} \item $\mathrm{div}$: the divergence, computed with respect to the $x$ variable only. \item $\mathrm{Div}$: the complete divergence, i.e. the divergence computed with respect to the $(t, x)$ variables.
\item $\mathrm{Tr} ( B, \partial \Lambda)$: the normal trace of the bounded, measure-divergence vector field $B$ on the boundary of the set $\Lambda$, see \S~\ref{s:prel}. \item $(\rho u)_0$, $\rho_0$: the initial datum of the functions $\rho u$ and $\rho$, see Lemma~\ref{trace-existence} and Remark~\ref{r} . \item $ T (f)$: the trace of the $BV$ function $f$, see Theorem~\ref{bv-trace}. \item $\mathcal H^s$: the $s$-dimensional Hausdorff measure.
\item $f_{|_E}$: the restriction of the function $f$ to the set $E$. \item $\mu \llcorner E$: the restriction of the measure $\mu$ to the measurable set $E$. \item $a.e.$: almost everywhere.
\item $|\mu|$: the total variation of the measure $\mu$.
\item $a \cdot b$: the (Euclidean) scalar product between $a$ and $b$.
\item $\mathbf{1}_E:$ the characteristic function of the measurable set $E$. \item $\Gamma, \Gamma^-, \Gamma^+, \Gamma^0$: see~\eqref{e:gamma}. \item $\vec n$: the outward pointing, unit normal vector to $\Gamma$. \end{itemize}
\section{Preliminary results}
\label{s:prel}
In this section, we briefly recall some notions and results that shall be used in the sequel.
First, we discuss the notion of normal trace for weakly differentiable vector fields, see~\cite{ACM,Anz,CF,CTZ}. Our presentation here closely follows that of \cite{ACM}. Let $\Lambda \subseteq \mathbb{R}^{N} $ be an open set and let us denote by $\mathcal{M}_{\infty}(\Lambda) $, the family of bounded, measure-divergence vector fields. The space $\mathcal{M}_{\infty}(\Lambda) $, therefore, consists of bounded functions $B \in L^{\infty}(\Lambda;\mathbb{R}^{N})$ such that the distributional divergence of $B$ (denoted by $\text{Div} B $) is a locally bounded Radon measure on $\Lambda$.
The normal trace of $B \in \mathcal{M}_{\infty}(\Lambda)$ on the boundary $\partial \Lambda $ can be defined as follows.
\begin{definition}
Let $\Lambda \subseteq \mathbb{R}^{N}$ be an open and bounded set with Lipschitz continuous boundary and let $B \in \mathcal{M}_{\infty}(\Lambda)$. The normal trace of $B$ on $\partial \Lambda $ is a distribution defined by the identity \begin{equation} \Big\langle \emph{Tr}(B,\partial \Lambda), \psi \Big\rangle = \int_{\Lambda} \nabla \psi \cdot B \ dy + \int_{\Lambda} \psi\ d(\emph{Div} B) , \qquad \forall \ \psi \in C^{\infty}_{c}(\mathbb{R}^{N}). \label{prel-1} \end{equation} Here $\emph{Div} B $ denotes the distributional divergence of $B$ and is a bounded Radon measure on $\Lambda $. \end{definition} Note that, owing to the Gauss-Green formula, if $B$ is a smooth vector field, then $\mathrm{Tr}(B,\partial \Lambda) = B \cdot \vec n$,
where $\vec n$ denotes the outward pointing, unit normal vector to $\partial \Lambda$.
Note, furthermore, that the analysis in~\cite{ACM} shows that the normal trace distribution satisfies the following properties.
\begin{itemize}
\item[(a)] The normal trace distribution is induced by an $L^{\infty}$ function on $\partial \Lambda $, which we shall continue to refer to as $\mathrm{Tr}(B,\partial \Lambda) $. The bounded function $\mathrm{Tr}(B,\partial \Lambda) $ satisfies \[\Vert \mathrm{Tr}(B,\partial \Lambda) \Vert_{L^{\infty}(\partial \Lambda)} \leq \Vert B \Vert_{L^{\infty}(\Lambda)}. \]
\item[(b)] Let $\Sigma $ be a Borel set contained in $\partial \Lambda_{1} \cap \partial \Lambda_{2} $, and let $\vec{n}_{1}=\vec{n}_{2} \ \text{on}\ \Sigma$ (here $\vec{n}_{1},\vec{n}_{2} $ denote the outward pointing, unit normal vectors to $\partial \Lambda_{1},\partial \Lambda_{2} $ respectively). Then
\begin{equation}
\mathrm{Tr}(B, \partial \Lambda_{1}) = \mathrm{Tr}(B, \partial \Lambda_{2}) \qquad \text{$\mathcal{H}^{N-1}$-a.e.~on $\Sigma$.}
\label{prel-2}
\end{equation}
\end{itemize} In the following we will use several times the following renormalization result, which was established in~\cite{ACM}. \begin{theorem}\label{trace-renorm} Let $B \in BV (\Lambda;\mathbb{R}^{N})\cap L^{\infty}(\Lambda;\mathbb{R}^{N}) $ and $w \in L^{\infty}(\Lambda) $ be such that $\emph{Div} (wB )$ is a Radon measure. If $\Lambda' \subset \subset \Lambda $ is an open set with bounded and Lipschitz continuous boundary and $h \in C^{1}(\mathbb{R})$, then \begin{equation} \emph{Tr}(h(w)B,\partial \Lambda')=h\left(\frac{\emph{Tr}(wB,\partial \Lambda')}{\emph{Tr}(B,\partial \Lambda')}\right) \emph{Tr}(B,\partial \Lambda') \qquad \text{$\mathcal{H}^{N-1}$-a.e.~on~$\partial \Lambda'$,} \notag \end{equation} where the ratio $\displaystyle{\frac{\emph{Tr}(wB,\partial \Lambda')}{\emph{Tr}(B,\partial \Lambda')} }$ is arbitrarily defined at points where the trace $\emph{Tr}(B,\partial \Lambda') $ vanishes. \end{theorem} We can now introduce the notion of normal trace on a general bounded, Lipschitz continuous, oriented hypersurface $\Sigma \subseteq \mathbb{R}^{N}$ in the following manner. Since $\Sigma $ is oriented, an orientation of the normal vector $\vec{n}_{\Sigma} $ is given. We can then find a domain $\Lambda_{1} \subseteq \mathbb{R}^{N} $ such that $\Sigma \subseteq \partial \Lambda_{1} $ and the normal vectors $\vec{n}_{\Sigma}, \vec{n}_{1} $ coincide. Using \eqref{prel-2}, we can then define
\[\text{Tr}^{-}(B,\Sigma):= \text{Tr}(B,\partial \Lambda_{1}). \]
Similarly, if $\Lambda_{2} \subseteq \mathbb{R}^{N} $ is an open set satisfying $\Sigma \subseteq \partial \Lambda_{2} $, and $\vec{n}_{2}=-\vec{n}_{\Sigma} $, we can define
\[\text{Tr}^{+}(B,\Sigma):=- \text{Tr}(B,\partial \Lambda_{2}). \]
Furthermore we have the formula
\[(\text{Div} B)\llcorner \Sigma= \Big( \text{Tr}^{+}(B,\Sigma)-\text{Tr}^{-}(B,\Sigma) \Big) \mathcal{H}^{N-1} \llcorner \Sigma. \]
Thus $\text{Tr}^{+} $ and $\text{Tr}^{-} $ coincide $\mathcal{H}^{N-1}- $a.e. on $\Sigma$ if and only if $\Sigma $ is a $(\text{Div} B)$-negligible set.\\
We next recall some results from \cite{ACM} concerning space continuity.
\begin{definition}\label{graph}
A family of oriented surfaces $\{\Sigma_{r} \}_{r \in I} \subseteq \mathbb{R}^{N} $ (where $I \subseteq \mathbb{R}$ is an open interval) is called a family of graphs if there
exist
\begin{itemize}
\item a bounded open set $D \subseteq \mathbb{R}^{N-1}$,
\item a Lipschitz function $f:D \rightarrow \mathbb{R}$,
\item a system of coordinates $(x_{1},\cdots,x_{N})$
\end{itemize}
such that the following holds true:
For each $r \in I$, we can write
\begin{equation}
\Sigma_{r}=\big\{(x_{1},\cdots,x_{N}): f(x_{1},\cdots,x_{N-1})-x_{N}=r \big\},
\label{ACM-99}
\end{equation}
and the orientation of $\Sigma_{r}$ is determined by the normal $\displaystyle{\frac{(-\nabla f,1)}{\sqrt{1+\vert \nabla f \vert^{2}}} }$.
\end{definition} We now quote a space continuity result.
\begin{theorem}[see \cite{ACM}]\label{Weak-continuity}
Let $B \in \mathcal{M}_{\infty}(\mathbb{R}^{N})$ and let $\{\Sigma_{r} \}_{r \in I} $ be a family of graphs as above. For a fixed $r_{0} \in I$, let us define the functions $\alpha_{0}, \alpha_{r}: D \rightarrow \mathbb{R} $ as \begin{equation} \begin{aligned} \alpha_{0}(x_{1},\cdots,x_{N-1})&:=\emph{Tr}^{-}(B,\Sigma_{r_{0}})(x_{1},\cdots,x_{N-1},f(x_{1},\cdots,x_{N-1})-r_{0}), \ \text{and} \\ \alpha_{r}(x_{1},\cdots,x_{N-1})&:=\emph{Tr}^{+}(B,\Sigma_{r})(x_{1},\cdots,x_{N-1},f(x_{1},\cdots,x_{N-1})-r) . \end{aligned} \label{ACM-100} \end{equation}
Then $\alpha_{r} \stackrel{*}{\rightharpoonup} \alpha_{0} $ weakly$^{*}$ in $L^{\infty}(D,\mathcal{L}^{N-1} \llcorner D) $ as $r \rightarrow r^{+}_{0}$.
\end{theorem} We will also need the following result, which was originally established in~\cite{CDS1}.
\begin{lemma}\label{extension}
Let $\Lambda \subseteq \mathbb{R}^{N}$ be an open and bounded set with bounded and Lipschitz continuous boundary and let $B$ belong to
$\mathcal{M}_{\infty}(\Lambda)$. Then the vector field
\begin{equation}
\tilde{B}(z):= \left\{\begin{array}{ll}
B(z) & z \in \Lambda \\
0 & \text{otherwise}
\end{array}\right.
\notag
\end{equation} belongs to $\mathcal{M}_{\infty}(\mathbb{R}^{N})$.
\end{lemma} We conclude by recalling some results concerning traces of $BV$ functions and we refer to~\cite[\S 3]{AFP} for a more extended discussion. \begin{theorem} \label{bv-trace} Let $\Lambda \subseteq \mathbb{R}^{N}$ be an open and bounded set with bounded and Lipschitz continuous boundary. There exists a bounded linear mapping \begin{equation} T: BV(\Lambda) \rightarrow L^{1}(\partial \Lambda;\mathcal{H}^{N-1}) \label{ACM-101} \end{equation}
such that $T (f) = f_{|_{\partial \Lambda}}$ if $f$ is continuous up to the boundary. Also, \begin{equation} \int_{\Lambda} \nabla \psi \cdot f \ dy = - \int_{\Lambda} \psi \ d(\emph{\text{Div}} f) + \int_{\partial \Lambda} \psi \ Tf \cdot \vec n \ d\mathcal{H}^{N-1}, \label{ACM-102} \end{equation} for all $f \in BV(\Lambda)$ and $\psi \in C^{\infty}_{c}(\mathbb{R}^{N})$. In the above expression, $\vec n$ denotes the outward pointing, unit normal vector to $\partial \Lambda$. \end{theorem} By comparing~\eqref{prel-1} and~\eqref{ACM-102} we conclude that \begin{equation} \label{e:equal}
\mathrm{Tr} (f, \partial \Lambda) = T (f) \cdot \vec n, \quad \text{for every $f \in BV (\Lambda)$}. \end{equation} By combining Theorems~3.9 and~3.88 in~\cite{AFP} we get the following result. \begin{theorem}[\cite{AFP}] \label{t:traceafp} Assume $\Lambda \subseteq \mathbb R^N$ is an open set with bounded and Lipschitz continuous boundary. If $f \in BV (\Lambda; \mathbb R^m)$, then there is a sequence $\{\tilde f_m \} \subseteq C^\infty (\Lambda)$ such that \begin{equation} \label{e:tracefp}
\tilde f_m \to f \; \text{ strongly in $L^1 (\Lambda; \mathbb R^m)$},
\qquad
T (\tilde f_m) \to T(f) \text{ strongly in $L^1 (\partial \Lambda; \mathbb R^m)$}. \end{equation} Also, we can choose $\tilde f_m$ in such a way that \begin{itemize} \item $\tilde f_m \ge 0$ if $f \ge 0$, \item if $f \in L^\infty (\Lambda; \mathbb R^m)$, then \begin{equation} \label{e:four}
\| \tilde f_m \|_{L^\infty} \leq 4 \| f \|_{L^\infty}. \end{equation} \end{itemize} \end{theorem} A sketch of the proof of Theorem~\ref{t:traceafp} is provided in~\S~\ref{s:proof1}. \section{Distributional formulation of the problem} \label{s:formu} In this section, we follow~\cite{Boyer,CDS1} and we provide the distributional formulation of the problem \eqref{prob-2}. We first establish a preliminary result.
\begin{lemma}\label{trace-existence}
Let $\Omega \subseteq \mathbb{R}^{d}$ be an open bounded set with $C^2$ boundary and let $T > 0 $. We assume that
$b \in L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d}) $ is a nearly incompressible vector field with density $\rho \in L^{\infty}((0,T) \times \Omega) $, see Definition \ref{near-incom}. If $u \in L^{\infty}((0,T) \times \Omega)$ satisfies
\begin{equation}
\int_{0}^{T} \int_{\Omega} \rho u (\partial_{t} \phi+b \cdot \nabla \phi) \ dx dt= 0, \quad \forall \ \phi \in \mathcal{C}^{\infty}_{c} ((0,T) \times \Omega),
\label{iden-2}
\end{equation}
then there are two unique functions, which we henceforth denote by $\emph{Tr}(\rho u b) \in L^{\infty}((0,T) \times \partial \Omega) $ and $(\rho u)_{0} \in L^{\infty}(\Omega)$, that satisfy \begin{equation} \int_{0}^{T} \! \! \int_{\Omega} \rho u (\partial_{t} \psi+ b \cdot \nabla \psi) \ dx dt= \int_{0}^{T} \! \! \int_{\partial \Omega} \emph{Tr}(\rho u b) \psi \ d\mathcal{H}^{d-1}\ dt - \int_{\Omega} \psi(0,\cdot) (\rho u)_{0}\ dx, \quad \forall \psi \in \mathcal{C}^{\infty}_{c} ([0,T) \times \mathbb{R}^{d}).
\label{iden-3} \end{equation} Also, we have the bounds \begin{equation} \label{e:maxprintraces2}
\| \emph{Tr}(\rho u b) \|_{L^\infty((0,T) \times \partial \Omega) } , \;
\| (\rho u)_{0} \|_{ L^{\infty}(\Omega)}
\leq \max\{ \| \rho u \|_{L^\infty((0,T) \times \Omega) } ; \| \rho u b \|_{L^\infty((0,T) \times \Omega) } \}. \end{equation} \end{lemma} \begin{proof} First of all, let us note that the uniqueness of such functions follow from the liberty in choosing the test functions $\psi$. Therefore it is enough to discuss the existence of the functions with the above properties.
Let us define
\begin{equation}
B(t,x):= \left\{
\begin{array}{ll}
(u \rho, u \rho b) & (t,x) \in (0,T)\times \Omega \\
0 &\text{elsewhere in}\ \mathbb{R}^{d+1}.\\
\end{array} \right.
\label{e:extend}
\end{equation} Then $B \in L^{\infty}(\mathbb{R}^{d+1})$ and from \eqref{iden-2}, it also follows that $\big[\text{Div} B \llcorner {(0,T) \times \Omega} \big]=0 $. We can now apply Lemma \ref{extension} with $\Lambda= (0,T) \times \Omega $ to conclude that $B \in \mathcal{M}_{\infty}(\mathbb{R}^{d+1}).$ Hence $B$ induces the existence of normal trace on $\partial \Lambda$. Let \begin{equation}
\text{Tr}(\rho u b):= \text{Tr} (B,\partial \Lambda) \Big\vert_{(0,T) \times \partial \Omega}, \ \
(\rho u)_{0}:= -\text{Tr}(B,\partial \Lambda) \Big\vert_{\{0 \} \times \Omega}.
\notag
\end{equation}
The identity \eqref{iden-3} then follows from \eqref{prel-1} by virtue of the fact that $\text{Div}B=0 \ \text{in}\ (0,T)\times \Omega $. \end{proof} \begin{remark} \label{r} We define the vector field $P:=(\rho,\rho b) $ and we point out that $P \in {L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d+1})}$ since
$\rho$ and $b$ are both bounded functions. By introducing the same extension as in~\eqref{e:extend} and using the fact that
\begin{equation}
\int_{0}^{T} \int_{\Omega} \rho (\partial_{t} \phi+b \cdot \nabla \phi) \ dx dt= 0, \quad \forall \ \phi \in \mathcal{C}^{\infty}_{c} ((0,T) \times \Omega),
\notag
\end{equation}
we can argue as in the proof of the above lemma to establish the existence of unique functions $\emph{Tr}(\rho b) \in L^{\infty}((0,T) \times \partial \Omega) $ and
$\rho_0 \in L^\infty(\Omega)$ defined as
$$
\emph{Tr}(\rho b):= \emph{Tr}(P, \partial \Lambda) \Big\vert_{(0,T)
\times \partial \Omega}, \quad
\rho_0 : = - \emph{Tr}(P, \partial \Lambda) \Big\vert_{\{ 0 \} \times
\Omega}.
$$
In this way, we can give a meaning to the normal trace $\mathrm{Tr} (\rho b)$ and to the initial datum $\rho_0$. Also, we have the bounds
\begin{equation} \label{e:maxprintraces1}
\| \emph{Tr}(\rho b) \|_{L^\infty((0,T) \times \partial \Omega) } , \;
\| \rho_{0} \|_{ L^{\infty}(\Omega)}
\leq \max\{ \| \rho \|_{L^\infty((0,T) \times \Omega) } ; \| \rho b \|_{L^\infty((0,T) \times \Omega) }\}. \end{equation} \end{remark} We can now introduce the distributional formulation to the problem \eqref{prob-2} by using Lemma \ref{trace-existence}. We introduce the following notation:
\begin{equation}
\left.
\begin{array}{ll}
\Gamma: = (0, T) \times \partial \Omega,
& \Gamma^{-}:= \{(t,x) \in \Gamma: \ \text{Tr} (\rho b)(t,x)<0 \},\\
\Gamma^{+}:=\{(t,x) \in \Gamma: \ \text{Tr} (\rho b)(t,x) > 0 \}, &
\Gamma^0:=\{(t,x) \in \Gamma: \ \text{Tr} (\rho b)(t,x) = 0 \}. \\
\end{array}
\right.
\label{e:gamma}
\end{equation} \begin{definition} \label{d:distrsol}
Let $\Omega \subseteq \mathbb{R}^{d}$ be an open bounded set with $C^2$ boundary and let $T > 0 $. Let $b \in
L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d}) $ be a nearly incompressible vector field
with density $\rho $, see Definition~\ref{near-incom}. Fix $\overline{u} \in L^\infty (\Omega)$ and $\overline{g} \in L^\infty (\Gamma^-)$.
We say that a function $u \in L^{\infty}((0,T)\times \Omega)$ is a distributional solution of \eqref{prob-2} if the following conditions are satisfied:
\begin{itemize}
\item[i.] $u$ satisfies \eqref{iden-2};
\item[ii.] $(\rho u)_{0}= \overline{u} \rho_0 $;
\item[iii.] $\emph{Tr}(\rho u b)= \overline{g} \emph{Tr}(\rho b) $ on the set $\Gamma^{-}$.
\end{itemize}
\end{definition}
\section{Proof of Theorem~\ref{IBVP-NC}: existence of solution}
\label{s:exi}
In this section we establish the existence part of Theorem~\ref{IBVP-NC}, namely we prove the existence of functions $u \in L^{\infty}((0,T)\times \Omega) $ and $w \in L^{\infty}(\Gamma^{0}\cup \Gamma^+ ) $ such that for every $\psi \in C^{\infty}_{c}([0,T)\times \mathbb{R}^{d})$,
\begin{equation}
\int_{0}^{T} \int_{\Omega} \rho u (\partial_{t} \psi+b \cdot \nabla \psi) \ dx dt= \int_{\Gamma^{-}} \overline{g} \text{Tr}(\rho b) \psi \ d\mathcal{H}^{d-1} dt +\int_{\Gamma^{+}\cup \Gamma^0} \! \!\text{Tr}(\rho b) \psi w \ d\mathcal{H}^{d-1} dt-\int_{\Omega} \rho_0
\ \overline{u}\ \psi(0,\cdot)\ dx .
\label{weak-exist1}
\end{equation} We proceed as follows: first, in~\S~\ref{ss:as} we introduce an approximation scheme. Next, in~\S~\ref{ss:limit} we pass to the limit and establish existence.
\subsection{Approximation scheme}
\label{ss:as} In this section we rely on the analysis in~\cite[\S~3.3]{Delellis1}, but we employ a more refined approximation scheme which guarantees strong convergence of the traces.
We set $\Lambda: =(0, T) \times \Omega$ and we recall that by assumption $\rho \in BV(\Lambda) \cap L^\infty (\Lambda).$ We apply Theorem~\ref{t:traceafp} and we select a sequence $\{ \tilde \rho_m \} \subseteq C^\infty (\Lambda)$ satisfying~\eqref{e:tracefp} and~\eqref{e:four}. Next, we set \begin{equation} \label{e:rhoenne}
\rho_m: = \frac{1}{m} + \tilde \rho_m \ge \frac{1}{m}. \end{equation} We then apply Theorem~\ref{t:traceafp} to the function $b \rho$ and we set \begin{equation} \label{e:benne}
b_m : = \frac{\widetilde{(b \rho)}_m}{\rho_m}. \end{equation} Owing to Theorem~\ref{t:traceafp} we have \begin{equation} \label{e:elle1conv}
\rho_m \to \rho \;
\text{strongly in $L^1 ((0, T) \times \Omega)$}, \quad
b_m \rho_m \to b \rho
\;
\text{strongly in $L^1 ((0, T) \times \Omega;\mathbb R^d)$}. \end{equation} and, by using the identity~\eqref{e:equal}, \begin{equation} \label{e:traceconv} \begin{split}
\mathrm{Tr} (\rho_m) \to \mathrm{Tr} (\rho)& \; \text{strongly in $L^1 (\Gamma)$}, \quad
\mathrm{Tr} (\rho_mb_m) \to \mathrm{Tr} (\rho b) \;
\text{strongly in $L^1 (\Gamma)$}, \\
& \quad
\rho_{m0} \to \rho_0 \;
\text{strongly in $L^1 (\Omega)$}.
\end{split} \end{equation} Note, furthermore, that \begin{equation} \label{e:linftytraces}
\| \mathrm{Tr} (b_m \rho_m ) \|_{L^\infty} \stackrel{\eqref{e:maxprintraces1}}{\leq} \| b_m \rho_m \|_{L^\infty}
\stackrel{\eqref{e:four}}{\leq} 4 \| b \rho \|_{L^\infty}. \end{equation} In the following, we will use the notation \begin{equation} \label{e:gammadef}
\Gamma_m^- : = \big\{(t, x) \in \Gamma: \; \mathrm{Tr} (\rho_m b_m)
< 0 \big\},
\qquad
\Gamma_m^+ : = \big\{(t, x) \in \Gamma: \; \mathrm{Tr} (\rho_m b_m) > 0 \big\} \end{equation} Finally, we extend the function $\overline{g}$ to the whole $\Gamma$ by setting it equal to $0$ outside $\Gamma^-$ and we construct two sequences $\{ \overline{g}_m \} \subseteq C^1 (\Gamma)$ and $\{\overline{u}_m \} \subseteq C^\infty (\Omega)$ such that \begin{equation} \label{e:convbdata}
\overline{g}_m \to \overline{g} \; \text{strongly in $L^1 (\Gamma)$}, \quad
\overline{u}_m \to \overline{u} \; \text{strongly in $L^1 (\Omega)$} \end{equation} and \begin{equation} \label{e:tomaxprin}
\| \overline{g}_m \|_{L^\infty} \leq \| \overline{g} \|_{L^\infty}, \quad
\| \overline{u}_m \|_{L^\infty} \leq \| \overline{u} \|_{L^\infty}. \end{equation} We can now define the function $u_m$ as the solution of the initial-boundary value problem
\begin{equation}
\left\{
\begin{array}{ll}
\partial_{t} u_m+b_m \cdot \nabla u_m=0 & \text{on $(0, T) \times \Omega$} \\
u_m=\overline{u}_m & \text{at $t=0$}\\
u_m= \overline{g}_m & \text{on} \; \tilde \Gamma^{-}_m,
\end{array}
\right.
\label{exist3}
\end{equation} where $\tilde \Gamma^-_m$ is the subset of $\Gamma$ such that the characteristic lines of $b_m$ starting at a point in $\tilde \Gamma^-_m$ are entering $(0, T) \times \Omega$. We recall~\eqref{e:gammadef} and we point out that $$
\Gamma^-_m
\subseteq
\tilde \Gamma^-_m \subseteq
\big\{ (t, x) \in \Gamma:
\; b_m \cdot \vec n \leq 0 \big\}. $$ In the previous expression, $\vec n$ denotes as the outward pointing, unit normal vector to $\partial \Omega$. By using the classical method of characteristics (see also~\cite{Bar}) we establish the existence of a solution $u_m$ satisfying
\begin{equation}
\Vert u_m\Vert_{\infty} \leq \max\{\Vert \overline{u}_m \Vert_{\infty}, \Vert \overline{g}_m \Vert_{\infty} \}
\stackrel{\eqref{e:tomaxprin}}{\leq} \max \{\Vert \overline{u} \Vert_{\infty}, \Vert \overline{g} \Vert_{\infty} \}.
\label{mp}
\end{equation} We now introduce the function $h_m$ by setting
\begin{equation}
\label{e:accaenne}
h_m : = \partial_t \rho_m + \mathrm{div} (b_m \rho_m)
\end{equation} and by using the equation at the first line of~\eqref{exist3} we get that $$
\partial_t (\rho_m u_m ) + \mathrm{div} (b_m \rho_m u_m ) = h_m u_m. $$ Owing to the Gauss-Green formula, this implies that, for every $\psi \in C^\infty_c ([0, T) \times \mathbb R^d)$, \begin{equation}
\begin{aligned}
&\int_{0}^{T} \int_{\Omega} \rho_m u_m [\partial_{t} \psi+ b_m \cdot \nabla \psi ] \ dx dt
+ \int_0^T \int_\Omega h_m u_m \psi \, dx dt
\\
&\quad = -\int_{\Omega} \psi(0,x) \overline{\rho}_{m0} \overline{u}_{m} \ dx- \int_{0}^{T} \! \! \int_{\partial \Omega} \psi u_m \rho_m b_m \cdot \vec n \, d\mathcal{H}^{d-1} dt \\
& \quad =
-\int_{\Omega} \psi(0,x) \overline{\rho}_{m0} \overline{u}_{m} \ dx-
\int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma_m^-}
\overline{g}_{m} \psi \mathrm{Tr} (\rho_m b_m ) d\mathcal{H}^{d-1} dt
- \int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma_m^+} u_m \psi \mathrm{Tr} (\rho_m b_m ) d\mathcal{H}^{d-1} dt.
\end{aligned}
\label{weak-exist2}
\end{equation} In the above expression, we have used the notation introduced in~\eqref{e:gammadef} and the fact that $\mathrm{Tr} (\rho_m b_m )=0$ on~${\Gamma \setminus (\Gamma^-_m \cup \Gamma^+_m)}$.
\subsection{Passage to the limit} \label{ss:limit} Owing to the uniform bound~\eqref{mp}, there are a subsequence of $\{ u_m \}$ (which, to simplify notation, we do not relabel) and a function $u \in L^\infty ((0, T) \times \Omega$ such that
\begin{equation}
\label{e:uweaks}
u_m \weaks u \; \text{weakly$^\ast$ in $L^\infty ((0, T) \times \Omega)$. }
\end{equation} The goal of this paragraph is to show that the function $u$ in~\eqref{e:uweaks} is a distributional solution of~\eqref{prob-2} by passing to the limit in~\eqref{weak-exist2}. We first introduce a technical lemma. \begin{lemma} \label{l:meyerserrin} We can construct the approximating sequences $\{ \rho_m \}$ and $\{ b_m \}$ in such a way that the sequence $\{ h_m \}$ defined as in~\eqref{e:accaenne} satisfies \begin{equation} \label{e:convaccaemme}
h_m \to 0 \; \text{strongly in $L^1 ((0, T) \times \Omega)$}. \end{equation} \end{lemma} The proof of Lemma~\ref{l:meyerserrin} is deferred to~\S~\ref{s:proof1} . For future reference, we state the next simple convergence result as a lemma. \begin{lemma} \label{l:traces} Assume that \begin{equation} \label{e:hyp}
\mathrm{Tr} (\rho_m b_m )\to \mathrm{Tr}(\rho b)
\; \text{strongly in $L^1 (\Gamma)$}. \end{equation} Let $\Gamma^-_m$ and $\Gamma^+_m$ as in~\eqref{e:gammadef} and $\Gamma^-$ and $\Gamma^+$ as in~\eqref{e:gamma}, respectively. Then, up to subsequences, \begin{equation} \label{e:convchar1}
\mathbf{1}_{\Gamma^-_m} \to \mathbf{1}_{\Gamma^-} +
\mathbf{1}_{\Gamma'} \; \text{strongly in $L^1 (\Gamma)$} \end{equation} and \begin{equation} \label{e:convchar2}
\mathbf{1}_{\Gamma^+_m} \to \mathbf{1}_{\Gamma^+} +
\mathbf{1}_{\Gamma''} \; \text{strongly in $L^1 (\Gamma)$}, \end{equation} where $\Gamma'$ and $\Gamma''$ are (possibly empty) measurable sets satisfying \begin{equation} \label{e:subsetgamma0}
\Gamma', \Gamma'' \subseteq \Gamma^0. \end{equation} \end{lemma} \begin{proof}[Proof of Lemma~\ref{l:traces}] Owing to~\eqref{e:hyp} we have that, up to subsequences, the sequence $\{ \mathrm{Tr} (\rho_m b_m) \}$ satisfies $$
\mathrm{Tr} (\rho_m b_m) (t, x) \to \mathrm{Tr} (\rho b)(t, x),
\quad \text{for $\mathcal{L}^1 \otimes \mathcal{H}^{d-1}$-almost
every $(t, x) \in \Gamma.$} $$ Owing to the Lebesgue Dominated Convergence Theorem, this implies~\eqref{e:convchar1} and~\eqref{e:convchar2}. \end{proof} We can now pass to the limit in all the terms in~\eqref{weak-exist2}. First, by combining~\eqref{e:elle1conv},~\eqref{mp},~\eqref{e:uweaks} and~\eqref{e:convaccaemme} we get that \begin{equation} \label{e:conv11}
\int_{0}^{T} \! \! \int_{\Omega} \rho_m u_m [\partial_{t} \psi+ b_m \cdot \nabla \psi ] \ dx dt
+ \int_0^T \! \! \int_\Omega h_m u_m \psi \, dx dt
\to \int_{0}^{T} \! \! \int_{\Omega}
\rho u [\partial_{t} \psi+ b \cdot \nabla \psi ] \ dx dt, \end{equation}
for every $ \psi \in C^\infty_c ([0, T) \times \mathbb R^d)$. Also, by combining the second line of~\eqref{e:traceconv} with~\eqref{e:convbdata} and~\eqref{e:tomaxprin} we arrive at \begin{equation} \label{e:conv21}
\int_{\Omega} \psi(0,x) {\rho}_{m0} \overline{u}_{m} \ dx
\to
\int_{\Omega} \psi(0,x) {\rho}_{0} \overline{u} \ dx , \end{equation} for every $ \psi \in C^\infty_c ([0, T) \times \mathbb R^d). $ Next, we combine~\eqref{e:traceconv},~\eqref{e:convbdata},~\eqref{e:tomaxprin},~\eqref{e:convchar1},~\eqref{e:subsetgamma0} and the fact that $\mathrm{Tr}(\rho b) =0 $ on $\Gamma^0$ to get that \begin{equation} \label{e:conv4} \begin{split} \int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma_m^-}
\overline{g}_{m} \psi \mathrm{Tr} (\rho_m b_m ) d\mathcal{H}^{d-1} dt \to
&
\int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma^-}
\overline{g} \psi \mathrm{Tr} (\rho b ) d\mathcal{H}^{d-1} dt \\
& =
\int_{0}^{T} \! \! \int_{\Gamma^-}
\overline{g} \psi \mathrm{Tr} (\rho b ) d\mathcal{H}^{d-1} dt,
\end{split} \end{equation}
for every $\psi \in C^\infty_c ([0, T) \times \Omega; \mathbb R^d)$. We are left with the last term in~\eqref{weak-exist2}: first, we denote by $u_{m{|_\Gamma}}$ the restriction of $u_m$ to $\Gamma$. Since $u_m$ is a smooth function, then $$
\| u_{m{|_\Gamma}} \|_{L^\infty (\Gamma)} \leq
\| u_m \|_{L^\infty ((0, T) \times \Omega)}
\stackrel{\eqref{mp}}{\leq}
\max \big\{ \| \bar u \|_{L^\infty}, \| \bar g \|_{L^\infty} \big\} $$ and hence there is a function $w \in L^\infty (\Gamma)$ such that, up to subsequences, \begin{equation} \label{e:convw}
u_{m{|_\Gamma}} \weaks w \; \text{weakly$^\ast$ in $L^\infty (\Gamma)$}. \end{equation} By combining~\eqref{e:traceconv},~\eqref{e:convchar2},~\eqref{e:convw} and the fact that $\mathrm{Tr} (\rho b) =0$ on $\Gamma^0$ we get that \begin{equation} \begin{split} \label{e:conv5}
\int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma_m^+} u_m \psi \mathrm{Tr} (\rho_m b_m ) d\mathcal{H}^{d-1} dt \to &
\int_{0}^{T} \! \! \int_{\partial \Omega} \mathbf{1}_{\Gamma^+} w \psi \mathrm{Tr} (\rho b ) d\mathcal{H}^{d-1} dt \\
& = \! \! \int_{\Gamma^+ \cup \Gamma^0} w \psi \mathrm{Tr} (\rho b ) d\mathcal{H}^{d-1} dt . \end{split} \end{equation} By combining~\eqref{e:conv11},~\eqref{e:conv21},~\eqref{e:conv4} and~\eqref{e:conv5} we get that $u$ satisfies~\eqref{weak-exist1} and this establishes existence of a distributional solution of~\eqref{prob-2}. \subsection{Proof of Lemma~\ref{l:meyerserrin}} \label{s:proof1} To ensure that~\eqref{e:convaccaemme} holds we use the same approximation \emph{\`a la} Meyers-Serrin as in~\cite[pp.122-123]{AFP}. We now recall some details of the construction. First, we fix a countable family of open sets $\big\{ \Lambda_h \big\}$ such that \begin{itemize} \item[i.] $\Lambda_h$ is compactly contained in $\Lambda$, for every $h$; \item[ii.] $\big\{ \Lambda_h \big\}$ is a covering of $\Lambda$, namely $$
\bigcup_{h=1}^\infty \Lambda_h = \Lambda; $$ \item[iii.] every point in $\Lambda$ is contained in at most $4$ sets $\Lambda_h$. \end{itemize} Next, we consider a partition of unity associated to $\big\{ \Lambda_h \big\}$, namely a countably family of smooth, nonnegative functions $\{ \zeta_h \}$ such that \begin{itemize} \item[iv.] we have \begin{equation} \label{e:isone} \sum_{h=1}^\infty \zeta_h \equiv1 \quad \text{in $\Omega$} ; \end{equation} \item[v.] for every $h>0$, the support of $\zeta_h$ is contained in $\Lambda_h$. \end{itemize} Finally, we fix a convolution kernel $\eta: \mathbb R^{d+1} \to \mathbb R^+$ and we define $\eta_\ee$ by setting $$
\eta_\ee (z) : = \frac{1}{\ee^{d+1}} \eta
\left(
\frac{z}{\ee}
\right) $$ For every $m>0$ and $h>0$ we can choose $\ee_{mh}$ in such a way that $(\rho \zeta_h) \ast \eta_{\ee_{mh}} $ is supported in $\Lambda_h$ and furthermore \begin{equation} \label{e:ms2}
\int_0^T \! \! \int_\Omega
| \rho \zeta_h - ( \rho \zeta_h) \ast \eta_{\ee_{mh}} |+
| \rho \, \partial_t \zeta_h - ( \rho \, \partial_t \zeta_h) \ast \eta_{\ee_{mh}}|
+
| \rho b \cdot \nabla \zeta_h - ( \rho b \cdot \nabla \zeta_h) \ast \eta_{\ee_{mh}}| dx dt
\leq \frac{2^{-h}}{m}. \end{equation} We then define $\tilde \rho_m$ by setting \begin{equation} \label{e:ms3}
\tilde \rho_m : = \sum_{h=1}^\infty
(\rho \zeta_h) \ast \eta_{\ee_{mh}} . \end{equation} The function $(\widetilde{\rho b})_m$ is defined analogously. Next, we proceed as in~\cite[p.123]{AFP} and we point out that \begin{equation*} \begin{split}
h_m \stackrel{\eqref{e:accaenne}}{=} &
\partial_t \rho_m + \mathrm{div} ({\rho_m b_m})
\stackrel{\eqref{e:accaenne}}{=}
\underbrace{\sum_{h=1}^\infty
(\partial_t \rho \zeta_h) \ast \eta_{\ee_{mh}} +
\sum_{h=1}^\infty
(\mathrm{div} (\rho b) \zeta_h) \ast \eta_{\ee_{mh}}}_{= 0
\; \text{by~\eqref{e:continuityrho} } }
\\ &\quad +
\sum_{h=1}^\infty
(\rho \, \partial_t \zeta_h) \ast \eta_{\ee_{mh}} +
\sum_{h=1}^\infty
(\rho b \cdot \nabla \zeta_h) \ast \eta_{\ee_{mh}}
\\ & \stackrel{\eqref{e:isone}}{=}
\sum_{h=1}^\infty
(\rho \, \partial_t \zeta_h) \ast \eta_{\ee_{mh}} -
\rho \sum_{h=1}^\infty \partial_t \zeta_h
\quad +
\sum_{h=1}^\infty
(\rho b \cdot \nabla \zeta_h) \ast \eta_{\ee_{mh}}-
\rho b \cdot \sum_{h=1}^\infty
\nabla \zeta_h \end{split} \end{equation*} By using~\eqref{e:ms2} we then get that $$
\int_0^T \! \! \int_\Omega |h_m| dx dt \leq \sum_{h=1}^\infty
\frac{2^{-h}}{m} = \frac{1}{m} $$ and this establishes~\eqref{e:convaccaemme}. \label{s:proof2}
\section{Proof Theorem~\ref{IBVP-NC}: comparison principle and uniqueness}
\label{s:uni} In this section we complete the proof of Theorem~\ref{IBVP-NC}. More precisely, we establish the following comparison principle. \begin{lemma} \label{l:uni}
Let $\Omega$, $b$ and $\rho$ as in the statement of
Theorem~\ref{IBVP-NC}. Assume $u_1$ and $u_2 \in
L^{\infty}((0,T) \times \Omega)$ are distributional
solutions (in the sense of Definition~\ref{d:distrsol}) of the initial-boundary value problem~\eqref{prob-2}
corresponding to the initial and boundary data
$\overline{u}_{1} \in L^{\infty}(\Omega)$, $\overline{g}_1 \in L^\infty(\Gamma^-)$ and
$\overline{u}_2 \in L^{\infty}(\Omega)$, $\overline{g}_2 \in L^\infty(\Gamma^-)$, respectively. If $\overline{u}_1 \ge \overline{u}_2$ and $\overline{g}_1 \ge \overline{g}_2$, then
\begin{equation}
\label{e:compa}
\rho u_1 \ge \rho u_2 \quad a.e. \; \text{in} \; (0, T) \times \Omega.
\end{equation} \end{lemma} Note that the uniqueness of $\rho u$, where $u$ is a distributional solution of the initial-boundary value problem~\eqref{prob-2}, immediately follows from the above result. \begin{proof} [Proof of Lemma~\ref{l:uni}] Let us define the function $$ \tilde{\beta}(u)= \left\{ \begin{array}{ll} u^2 & u \geq 0 \\ 0 & u<0. \end{array}\right. $$ In what follows, we shall prove that the identity $\rho\ \tilde{\beta}(u_{2}-u_{1})=0 $ holds almost everywhere, whence the comparison principle follows. To see this, we proceed as described below. First, we point out that, since the equation at the first line of~\eqref{prob-2} is linear, then $u_2-u_1$ is a distributional solution of the initial boundary value problem with data $\overline{u}_2 - \overline{u}_1$, $\overline{g}_2 - \overline{g}_1$. In particular, for every $ \psi \in C^{\infty}_{c}([0,T) \times \mathbb{R}^{d} )$ we have \begin{equation} \int_{0}^{T} \int_{\Omega} \rho (u_{2}-u_{1}) (\partial_{t} \psi +b \cdot \nabla \psi) \ dx dt= \int_{0}^{T} \int_{\partial \Omega} [\text{Tr}(\rho u_2 b) - \text{Tr}(\rho u_{1} b)] \ \psi \ d\mathcal{H}^{d-1} dt -\int_{\Omega} \psi(0,\cdot) {\rho}_0 (\overline{u}_{2}-\overline{u}_{1}) \ dx \label{e7} \end{equation} and \begin{equation} \label{e:ntraces}
\text{Tr}(\rho u_2 b) = \overline{g}_2 \text{Tr}(\rho b), \quad
\text{Tr}(\rho u_1 b) = \overline{g}_1 \text{Tr}(\rho b)
\quad \text{on $\Gamma^-$}. \end{equation} Note that~\eqref{e7} implies that \begin{equation} \int_{0}^{T} \int_{\Omega} \rho (u_{2}-u_{1}) (\partial_{t} \phi+b \cdot \nabla \phi) \ dx dt=0, \quad \forall \phi \in C^{\infty}_{c}((0,T) \times \Omega). \label{e5} \end{equation} By using~\cite[Lemma 5.10]{Delellis1} (renormalization property inside the domain), we get \begin{equation} \int_{0}^{T} \int_{\Omega} \rho \ \tilde{\beta}(u_{2}-u_{1})(\partial_{t} \phi+b \cdot \nabla \phi) \ dx dt=0, \qquad \forall \phi \in C^{\infty}_{c}((0,T) \times \Omega). \label{e10} \end{equation} We next apply Lemma \ref{trace-existence} to the function $\tilde{\beta}(u_{2}-u_{1})$ to infer that there are bounded functions $\text{Tr}(\rho \tilde{\beta}(u_{2}-u_{1}) b)$ and $(\rho \tilde{\beta}(u_{2}-u_{1}))_{0} $ such that, for every $ \psi \in C^{\infty}_{c}([0,T) \times \mathbb{R}^{d} ),$ we have \begin{equation} \int_{0}^{T} \int_{\Omega} \rho \ \tilde{\beta}(u_{2}-u_{1}) (\partial_{t} \psi +b \cdot \nabla \psi) \ dx dt= \int_{0}^{T} \int_{\partial \Omega} \text{Tr}(\rho \ \tilde{\beta}(u_{2}-u_{1}) b) \ \psi \ d\mathcal{H}^{d-1} dt -\int_{\Omega} \psi(0,\cdot) (\rho \ \tilde{\beta}(u_{2}-u_{1}))_{0} \ dx. \label{e11} \end{equation} We recall~\eqref{e7} and we apply Lemma \ref{trace-renorm} (trace renormalization property) with $w= u_2 -u_1$, $h= \tilde \beta$, $B=(\rho,\rho b) $, $\Lambda = \mathbb R^{d+1}$ and $\Lambda'=(0,T)\times \Omega$. We recall that the vector field $P$ is defined by setting $P:= (\rho, \rho b)$ and we get \begin{equation} \begin{aligned} (\rho\ \tilde{\beta} (u_{2}-u_{1}))_{0}=- \text{Tr}(\tilde{\beta}(u_{2}-u_{1}) P,\partial \Lambda') \Big\vert_{\{0\} \times \Omega}&= - \tilde{\beta}\left(\frac{(\rho (u_{2}-u_{1}))_{0}}{\text{Tr}(P,\partial \Lambda')\Big\vert_{\{0\}\times \Omega}} \right) \text{Tr}(P,\partial \Lambda')\Big\vert_{\{ 0\} \times \Omega}\\ &=-\tilde{\beta}\left( \frac{\rho_0 (\overline{u}_{2}-\overline{u}_{1})}{\overline{\rho}} \right) \rho_0 \\ & =0, \; \text{since} \ \overline{u}_{1} \geq \overline{u}_{2} \phantom{\int} \end{aligned} \label{e12} \end{equation} and \begin{equation} \begin{aligned} \text{Tr}(\rho \ \tilde{\beta}(u_{2}-u_{1}) b) &= \text{Tr}(\tilde{\beta}(u_{2}-u_{1}) \rho, \partial \Lambda')\Big\vert_{(0,T) \times \partial \Omega} = \tilde{\beta} \left( \frac{\text{Tr}((u_{2}-u_{1})\rho, \partial \Lambda')\Big\vert_{(0,T) \times \partial \Omega}}{\text{Tr}(P, \partial \Lambda')\Big\vert_{(0,T) \times \partial \Omega}} \right) \text{Tr}(P, \partial \Lambda')\Big\vert_{(0,T) \times \partial \Omega}\\ &=\tilde{\beta}\left(\frac{\text{Tr}(\rho (u_{2}-u_{1}) b)}{\text{Tr}(\rho b)} \right) \text{Tr}(\rho b). \end{aligned} \notag \end{equation} By recalling~\eqref{e:ntraces} and the inequality $\bar g_1 \ge \bar g_2$, we conclude that $$
\text{Tr}(\rho \ \tilde{\beta}(u_{2}-u_{1}) b) = 0 \quad \text{on $\Gamma^-$} $$ and, since $\tilde \beta \ge 0$, that \begin{equation} \label{e:ntracein}
\text{Tr}(\rho \ \tilde{\beta}(u_{2}-u_{1}) b) \ge 0
\quad \text{on $\Gamma$.} \end{equation} We now choose a test function $\nu \in C^\infty_c (\mathbb R^d)$ in such a way that $\nu \equiv 1$ on the bounded set $\Omega$. Note that \begin{equation} \label{e:zerozero}
\partial_t \nu + b \cdot \nabla \nu =0 \quad \text{on $(0, T) \times \Omega$.} \end{equation} Next we choose a sequence of functions $\chi_{n} \in {C}^{\infty}_{c}([0,+\infty))$ that satisfy
\[\chi_{n} \equiv 1 \ \text{on}\ [0,\bar{t}],\ \chi_{n}\equiv 0\ \text{on}\ [\bar{t}+\frac{1}{n},+\infty),\ \chi'_{n} \leq 0, \]
and we define
\[\psi_{n}(t,x):= \chi_{n}(t) \nu(x), \ (t,x)\in [0,T)\times \mathbb{R}^{d}.\] Note that $\psi$ is smooth, non-negative and compactly supported in $[0,T)\times \mathbb{R}^{d}$. By combining the identities \eqref{e11},~\eqref{e12} and the inequality~\eqref{e:ntracein}, we get
\begin{equation}
\begin{aligned}
0 &\leq \int_{0}^{T} \int_{\Omega} \rho\ \tilde{\beta}(u_{2}-u_{1}) [\partial_{t}(\chi_{n} \nu)+b \cdot \nabla (\chi_{n} \nu)] \ dx dt \\
& = \int_{0}^{T} \int_{\Omega} \nu \rho\ \tilde{\beta}(u_{2}-u_{1}) \chi'_{n} \ dx dt+ \int_{0}^{T} \int_{\Omega} \chi_{n} \rho \ \tilde{\beta}(u_{2}-u_{1}) (\partial_{t} \nu
+b \cdot \nabla \nu) \ dx dt \\
& \stackrel{\eqref{e:zerozero}}{=} \int_{0}^{T} \int_{\Omega} \nu \rho \ \chi'_{n} \ \tilde{\beta}(u_{2}-u_{1}) \ dx dt. \\
\end{aligned}
\notag
\end{equation} Passing to the limit as $n \rightarrow +\infty $ and noting that $\chi'_{n} \rightarrow -\delta_{\bar{t}} $ as $n \rightarrow \infty $ in the sense of distributions and recalling that $\nu \equiv 1$ on $\Omega$ we obtain \begin{equation} \int_{\Omega} \rho(\bar{t},\cdot) \tilde{\beta}(u_{2}-u_{1})(\bar{t},\cdot) \leq 0.
\notag \end{equation} Since the above inequality is true for arbitrary $\bar t \in [0, T]$, we can conclude that \begin{equation}
\begin{aligned}
\rho \ \tilde{\beta}(u_2-u_1)=0,\ \text{for almost every}\ (t,x) \Rightarrow \rho u_{1} \geq \rho u_{2}, \ \text{for almost every}\ (t,x). \end{aligned} \label{e14} \end{equation} This concludes the proof of Lemma~\ref{l:uni}. \end{proof}
\section{Stability and space continuity properties}
\label{s:ssc}
In this section, we discuss some qualitative properties of solutions of the initial-boundary value problem~\eqref{prob-2}. First, we establish Theorem~\ref{stability-weak}, which establishes (weak) stability of solutions with respect to perturbations in the vector fields and the data. Theorem~\ref{stability-strong} implies that, under stronger hypotheses, we can establish strong stability. Finally, Theorem~\ref{space-continuity} establishes space continuity properties.
\begin{theorem}\label{stability-weak} Let $T>0$ and let $\Omega \subseteq \mathbb R^d$ be an open and bounded set with $C^2$ boundary. Assume that $$ b_{n}, b \in BV((0,T) \times \Omega; \mathbb{R}^{d}) \cap L^{\infty}((0,T) \times \Omega; \mathbb{R}^{d}), \qquad \rho_{n},\rho \in BV((0,T) \times \Omega) \cap L^{\infty}([0,T) \times \Omega) $$ satisfy
\begin{equation}
\begin{aligned}
\partial_{t} \rho_{n}+\mathrm{div} (b_{n}\rho_{n})=0,\\
\partial_{t} \rho+\mathrm{div} (b \rho)=0,
\end{aligned}
\label{stability-1}
\end{equation}
in the sense of distributions on $(0, T) \times \Omega$. Assume furthermore that
\begin{equation}
0 \leq \rho_{n}, \rho \leq C \; \text{and} \; \Vert b_{n} \Vert_{\infty}\ \text{is uniformly bounded},
\label{stability-2}
\end{equation}
\begin{equation}
(b_{n},\rho_{n}) \xrightarrow[n \rightarrow \infty]{} (b,\rho) \ \text{strongly in} \ L^{1}((0,T) \times \Omega; \mathbb R^{d+1}),
\label{stability-3}
\end{equation}
\begin{equation}
\rho_{n0} \xrightarrow[n \rightarrow \infty]{} \rho_0
\; \text{strongly in}\ L^{1}(\Omega),
\label{stability-4}
\end{equation}
\begin{equation}
\emph{Tr}(\rho_{n} b_n) \xrightarrow[n \rightarrow \infty]{} \emph{Tr}(\rho b)\ \text{strongly in}\ L^{1}(\Gamma),
\label{stability-5}
\end{equation}
Let $u_{n} \in L^{\infty}((0,T) \times \Omega) $ be a distributional solution (in the sense of Definition~\ref{d:distrsol}) of the initial-boundary value problem
\begin{equation}
\label{e:ibvpapp}
\left\{
\begin{array}{lll}
\partial_{t}(\rho_{n} u_{n})+\mathrm{div}(\rho_{n} u_{n} b_{n})=0 &
\text{in} \ (0,T)\times \Omega \\
u_{n}=\overline{u}_{n} & \text{at $t=0$}\\
u_{n} =\overline{g}_{n} & \text{on}
\ \Gamma_{n}^{-} \\
\end{array}
\right.
\end{equation}
and $u \in L^{\infty}((0,T) \times \Omega) $ be a distributional solution of the equation
\begin{equation}
\label{e:ibvplimit}
\left\{
\begin{array}{lll}
\partial_{t}(\rho u)+\mathrm{div}(\rho u b)=0 & \text{in} \ (0,T)\times \Omega \\
u=\overline{u} & \text{at $t=0$}\\
u=\overline{g} & \text{on}\ \Gamma^{-}.
\end{array}
\right.
\end{equation}
If $u_m, \bar u \in L^\infty (\Omega)$ and $ \overline{g}_{n}, \bar g \in L^\infty (\Gamma)$ satisfy \begin{equation}
\overline{u}_{n} \stackrel{\ast}{\rightharpoonup} \overline{u}\ \text{weak-$^\ast$ in}\ L^{\infty}(\Omega),
\label{stability-7}
\end{equation}
\begin{equation}
\overline{g}_{n} \stackrel{\ast}{\rightharpoonup} \overline{g} \; \text{weak-$^\ast$ in}\ L^{\infty}(\Gamma) ,
\label{stability-8}
\end{equation} then
\begin{equation}
\rho_{n} u_{n} \stackrel{*}{\rightharpoonup}
\rho u \ \text{weak-* in}\ L^{\infty}((0,T) \times \Omega)
\label{stability-10}
\end{equation}
and
\begin{equation}
\emph{Tr}(\rho_{n} u_{n} b_{n}) \stackrel{*}{\rightharpoonup}
\emph{Tr}(\rho u b) \ \text{weak-* in}\ L^{\infty}(\Gamma).
\label{stability-9}
\end{equation}
\end{theorem}
Note that in the statement of the above theorem $\overline{g}_m$ and $\overline{g}$ are functions defined on the whole $\Gamma$, although the values of $\rho_m u_m$ and $\rho u$ are only determined by their values on $\Gamma^-_m$ and $\Gamma^-$, respectively.
\begin{proof}
We proceed according to the following steps. \\
{\sc Step 1:} we apply Theorem~\ref{IBVP-NC} and we infer that the function $\rho_n u_n$ satisfying~\eqref{e:ibvpapp} is unique. Also, without loss of generality, we can redefine the function $u_n$ on the set $\{\rho_n=0\}$ in such a way that $u_n$ satisfies the maximum principle~\eqref{e:maxprin}. Owing to~\eqref{stability-9}, the sequences $\| \overline{u}_m \|_{L^\infty}$ and $\| \overline{g}_m \|_{L^\infty}$ are both uniformly bounded and by the maximum principle so is $\| u_m \|_{L^\infty}$. Also, by combining~\eqref{e:maxprintraces2} and~\eqref{stability-2} we infer that the sequence $\Vert \text{Tr}(\rho_{n} b_{n} u_{n}) \Vert_{\infty} $ is also uniformly bounded. We conclude that, up to subsequences (which we do not label to simplify the notation), we have
\begin{comment}
We begin with the preliminary observation that if we are able to prove that
\begin{equation}
u_{n} \stackrel{*}{\rightharpoonup} u \ \text{weak-* in}\ L^{\infty}((0,T) \times \Omega),
\label{stability-10}
\end{equation}
we can combine the fact that $\Vert u_{n} \Vert_{\infty} $ are uniformly bounded (this follows from the maximum principle and \eqref{stability-7}-\eqref{stability-8}) with \eqref{stability-3} to infer \eqref{stability-9}. Therefore it is sufficient to establish \eqref{stability-10} which we pursue next.
We note that since $\Vert u_{n} \Vert_{\infty} $ and $\Vert \text{Tr}(\rho_{n} b_{n} u_{n}) \Vert_{\infty} $ are uniformly bounded (see Remark $2.2 (a)$), there exist $R_{1} \in L^{\infty}((0,T) \times \Omega)$ and $R_{2} \in L^{\infty}((0,T)\times \partial \Omega)$ such that as $n \rightarrow \infty $, \end{comment}
\begin{equation}
\begin{aligned}
& u_{n} \stackrel{*}{\rightharpoonup} r_{1} \ \text{weak-* in} \ L^{\infty}((0,T) \times \Omega),\\
& \text{Tr}(\rho_{n} u_{n} b_{n}) \stackrel{*}{\rightharpoonup} r_{2} \ \text{weak-* in} \ L^{\infty}(\Gamma)
\end{aligned}
\label{stability-11}
\end{equation}
for some $r_{1} \in L^{\infty}((0,T) \times \Omega)$ and $r_{2} \in L^{\infty}(\Gamma)$.
By using \eqref{iden-2} and \eqref{iden-3}, we get that
\begin{equation}
\int_{0}^{T} \int_{\Omega} \rho r_{1} (\partial_{t} \phi+b \cdot \nabla \phi)\ dx dt=0, \quad \forall \phi \in C^{\infty}_{c}((0,T) \times \Omega),
\label{stability-12}
\end{equation}
and
\begin{equation}
\int_{0}^{T} \int_{\Omega} \rho r_{1} (\partial_{t} \psi+b \nabla \psi)\ dx dt= \int_{0}^{T} \int_{\partial \Omega} r_{2} \psi\ d\mathcal{H}^{d-1} dt -\int_{\Omega} \psi(0,\cdot) {\rho}_0\ \overline{u}\ dx ,\ \forall \psi \in C^{\infty}_{c}([0,T) \times \mathbb{R}^{d}).
\label{stability-13}
\end{equation}
From Lemma \ref{trace-existence}, it also follows that
\begin{equation}
r_{2}= \text{Tr}(\rho r_{1} b).
\label{stability-14}
\end{equation}
Assume for the time being that we have established the equality \begin{equation} \label{e:whatww}
r_{2}=\overline{g} \text{Tr}(\rho b), \quad \text{on}\ \Gamma^{-} , \end{equation} then by recalling~\eqref{stability-14} and the uniqueness part in Theorem~\ref{IBVP-NC} we conclude that $r_1= \rho u$ and $r_2 = \text{Tr}(\rho bu) $. Owing to~\eqref{stability-11}, this concludes the proof of the theorem. \\ {\sc Step 2:} we establish~\eqref{e:whatww}. First, we decompose $\text{Tr}(\rho_m u_m b_m) $ as \begin{equation} \label{e:decompo} \begin{split}
\text{Tr}(\rho_n u_n b_n) &= \text{Tr}(\rho_n u_n b_n)
\mathbf{1}_{\Gamma^{-}_{n}}+\text{Tr}(\rho_n u_n b_n) \mathbf{1}_{\Gamma^{+}_{n}} + \text{Tr}(\rho_n u_n b_n) \mathbf{1}_{\Gamma^{0}_{n}}
\\
& = \overline{g}_n \text{Tr}(\rho_n b_n)
\mathbf{1}_{\Gamma^{-}_{n}}+\text{Tr}(\rho_n u_n b_n) \mathbf{1}_{\Gamma^{+}_{n}} + \text{Tr}(\rho_n u_n b_n) \mathbf{1}_{\Gamma^{0}_{n}} ,
\end{split} \end{equation} where $\Gamma^-_n$, $\Gamma^+_n$ and $\Gamma^0_n$ are defined as in~\eqref{e:gamma}. By using Lemma~\ref{trace-renorm} (trace renormalization), one could actually prove that the last term in the above expression is actually $0$. This is actually not needed here. Indeed, it suffices to recall~\eqref{stability-5} and Lemma~\ref{l:traces} and point out that by combining~\eqref{e:convchar1} and~\eqref{e:convchar2} we get \begin{equation} \label{e:conchar3}
\mathbf{1}_{\Gamma^{0}_{n}} \to \mathbf{1}_{\Gamma^0}
- \mathbf{1}_{\Gamma'} - \mathbf{1}_{\Gamma''}. \end{equation} Next, we recall that
the sequence $\| \text{Tr}(\rho_n u_n b_n)\|_{L^\infty}$ is uniformly bounded owing to the uniform bounds on $\| \rho_n \|_{L^\infty}$ and $\| u_n \|_{L^\infty}$. By recalling~\eqref{stability-8}, we conclude that
\begin{equation}
\label{e:conv1}
\overline{g}_n \text{Tr}(\rho_n b_n)
\mathbf{1}_{\Gamma^{-}_{n}} \stackrel{*}{\rightharpoonup}
\overline{g} \, \text{Tr}(\rho b)
\Big( \mathbf{1}_{\Gamma^{-}} + \mathbf{1}_{\Gamma'} \Big)
\qquad \text{weak-* in $L^\infty (\Gamma)$.}
\end{equation}
By recalling that $\Gamma' \subseteq \Gamma^0$ we get that $\text{Tr}(\rho b)
\mathbf{1}_{\Gamma'} =0$. We now pass to the weak star limit in~\eqref{e:decompo} and using~\eqref{e:convchar1},~\eqref{e:convchar2},~\eqref{stability-11},~\eqref{stability-8} and~\eqref{e:conv1} we get \begin{equation} \label{e:conv2}
r_2 = \overline{g} \text{Tr}(\rho b)
\mathbf{1}_{\Gamma^{-}} + r_2
\Big( \mathbf{1}_{\Gamma^{+}} + \mathbf{1}_{\Gamma'} \Big)+ r_2
\Big( \mathbf{1}_{\Gamma^{0}} - \mathbf{1}_{\Gamma'} -\mathbf{1}_{\Gamma''} \Big), \end{equation} which owing to the properties $$
\Gamma^- \cap \Gamma^{0}= \emptyset,
\quad \Gamma^- \cap \Gamma'=\emptyset, \quad
\Gamma^- \cap \Gamma''= \emptyset
$$ implies~\eqref{e:whatww}. This concludes the proof Theorem~\ref{stability-weak}. \end{proof}
\begin{theorem}\label{stability-strong}
Under the same assumptions as in Theorem~\ref{stability-weak}, if we furthermore assume that
\begin{equation}
\overline{u}_{n} \xrightarrow[n \rightarrow \infty]{} \overline{u}\ \text{strongly in}\ L^{1}(\Omega),
\label{stability-35}
\end{equation}
\begin{equation}
\overline{g}_{n} \xrightarrow[n \rightarrow \infty]{} \overline{g} \ \text{strongly in}\ L^{1}(\Gamma) ,
\label{stability-36}
\end{equation}
then we get
\begin{equation}
\begin{aligned}
&\rho_{n} u_{n} \xrightarrow[n \rightarrow \infty]{} \rho u \ \text{strongly in}\ L^{1}((0,T) \times \Omega),\\
&\emph{Tr}(\rho_{n} u_{n} b_{n}) \xrightarrow[n \rightarrow \infty]{} \emph{Tr}(\rho u b) \ \text{strongly in}\ L^{1}(\Gamma).
\end{aligned}
\label{stability-37}
\end{equation}
\end{theorem}
\begin{proof} First, we point out that the first equation
in~\eqref{stability-9} implies that \begin{equation}
\label{e:ell2}
\rho_n u_m
{\rightharpoonup} \rho {u}\ \text{weakly in}\ L^{2}((0, T) \times \Omega ). \end{equation} Next, by using Lemma \ref{trace-renorm} (trace-renormalization property), we get that $\rho_m u^{2}_{n}$ and $\rho u^{2}$ satisfy (in the sense of distributions)
\begin{equation}
\left\{
\begin{array}{lll}
\partial_{t}(\rho_{n} u_{n}^{2})+\text{div}(\rho_{n} u_{n}^{2} b_{n})=0 &
\text{in} \ (0,T)\times \Omega \\
u_{n}^{2}=\overline{u}_{n}^{2} & \text{at $t=0$}\\
u^{2}_{n} =\overline{g}^{2}_{n} & \text{on}\ \Gamma_{n}^{-}, \\
\end{array}
\right.
\notag
\end{equation}
and
\begin{equation}
\left\{
\begin{array}{lll}
\partial_{t}(\rho u^{2})+\text{div}(\rho u^{2} b)=0 &
\text{in} \ (0,T)\times \Omega \\
u^{2}=\overline{u}^{2} & \text{at $t=0$} \\
u^{2} =\overline{g}^{2} & \text{on}\ \Gamma^{-}, \\
\end{array}
\right.
\notag
\end{equation}
respectively. Also, by combinig~\eqref{stability-7},\eqref{stability-8}, \eqref{stability-35} and \eqref{stability-36}, we get that
\begin{equation}
\overline{u}^2_{n} \stackrel{*}{\rightharpoonup} \overline{u}^2\ \text{weak-$^\ast$ in}\ L^{\infty}(\Omega), \qquad
\overline{g}^2_{n} \stackrel{*}{\rightharpoonup} \overline{g}^2 \; \text{weak-$^\ast$ in}\ L^{\infty}(\Gamma) \notag
\end{equation}
and by applying Theorem~\ref{stability-weak} to $\rho_m u_m^2$ we conclude that
$$
\rho_m u_m^2
\stackrel{*}{\rightharpoonup} \rho {u}^2\ \text{weak-$^\ast$ in}\ L^{\infty}((0, T) \times \Omega )
$$
and that
\begin{equation}
\label{e:convbur}
\text{Tr}(\rho_{n} u^2_{n} b_{n})
\stackrel{*}{\rightharpoonup}
\text{Tr}(\rho u^2 b)
\ \text{weak-$^\ast$ in}\ L^{\infty}(\Gamma ).
\end{equation}
Since the sequence $\| \rho_m \|_{L^\infty}$ is uniformly bounded, then by recalling~\eqref{stability-3} we get $$
\rho^2_m u_m^2
\stackrel{*}{\rightharpoonup} \rho^2 {u}^2\ \text{weak-$^\ast$ in}\ L^{\infty}((0, T) \times \Omega )
$$
and hence
\begin{equation}
\label{e:square}
\rho^2_m u_m^2
{\rightharpoonup} \rho^2 {u}^2\ \text{weakly in}\ L^2((0, T) \times \Omega ).
\end{equation}
By combining~\eqref{e:ell2} and~\eqref{e:square} we get that
$\rho^2_m u_m^2 \longrightarrow \rho u$ strongly in
$L^2((0, T) \times \Omega )$ and this implies the first convergence in~\eqref{stability-37}.
Next, we establish the second convergence in $L^2((0, T) \times \Omega )$. Since $\Gamma $ is a set of finite measure, from \eqref{stability-9} and \eqref{e:convbur} we can infer that
\begin{equation}
\begin{aligned}
& \text{Tr}(\rho_{n} u_{n} b_{n}) \rightharpoonup \text{Tr}(\rho u b) \ \text{weakly in} \ L^{2}(\Gamma),\\
& \text{Tr}(\rho_{n} u^{2}_{n} b_{n}) \rightharpoonup \text{Tr}(\rho u^{2} b) \ \text{weakly in} \ L^{2}(\Gamma).
\end{aligned}
\label{stability-39}
\end{equation}
By using the uniform bounds for $\Vert \text{Tr}(\rho_{n} b_{n})\Vert_{\infty} $, we infer from the $L^{1}$ convergence of $\text{Tr}(\rho_{n} b_{n}) $ to $\text{Tr}(\rho b)$ that
\begin{equation}
\text{Tr}(\rho_{n} b_{n}) \xrightarrow[n \rightarrow \infty]{} \text{Tr}(\rho b) \ \text{strongly in}\ L^{2}(\Gamma).
\label{stability-40}
\end{equation} Next, we apply Lemma \ref{trace-renorm} (trace renormalization property) and we get that
\begin{equation}
[\text{Tr}(\rho_{n} u_{n} b_{n})]^{2}= \left[\frac{\text{Tr}(\rho_{n} u_{n} b_{n})}{\text{Tr}(\rho_{n} b_{n})} \right]^{2} [\text{Tr}(\rho_{n} b_{n})]^{2}= \text{Tr}(\rho_{n} u_{n}^{2} b_{n}) \text{Tr}(\rho_{n}b_{n})
\notag
\end{equation}
and
\begin{equation}
[\text{Tr}(\rho u b)]^{2}= \left[\frac{\text{Tr}(\rho u b)}{\text{Tr}(\rho b)} \right]^{2} [\text{Tr}(\rho b)]^{2}= \text{Tr}(\rho u^{2} b) \text{Tr}(\rho b).
\notag
\end{equation}
From \eqref{stability-39} and \eqref{stability-40}, we can then conclude that
\begin{equation}
[\text{Tr}(\rho_{n} u_{n} b_{n})]^{2} \rightharpoonup [\text{Tr}(\rho u b)]^{2} \ \text{weakly in}\ L^{2}(\Gamma),
\label{stability-41}
\end{equation}
and by recalling~\eqref{stability-39} the second convergence in \eqref{stability-37} follows.
\end{proof} Finally, we establish space-continuity properties of the vector field $(\rho u, \rho u b)$ similar to those established in~\cite{Boyer,CDS1}.
\begin{theorem}\label{space-continuity} Under the same assumptions as in Theorem~\ref{IBVP-NC}, let $P$ be the vector field $P : = ( \rho, \rho b)$, $u$ be a distributional solution of~\eqref{prob-2} and
$\{\Sigma_{r} \}_{r \in I} \subseteq \mathbb R^d$ be a family of graphs as in Definition \ref{graph}. Also, fix $r_{0} \in I $ and let $\gamma_{0}, \gamma_{r}: (0,T) \times D \rightarrow \mathbb{R} $ be defined by
\begin{equation}
\begin{aligned}
\gamma_{0}(t,x_{1},\cdots,x_{d-1})&:= \emph{Tr}^{-}(uP,(0,T)\times \Sigma_{r_{0}})(t,x_{1},\cdots,x_{d-1},f(x_{1},\cdots,x_{d-1})-r_{0}),\\
\gamma_{r}(t,x_{1},\cdots,x_{d-1})&:=\emph{Tr}^{+}(uP,(0,T) \times \Sigma_{r})(t,x_{1},\cdots,x_{d-1},f(x_{1},\cdots,x_{d-1})-r) .
\end{aligned}
\label{space1}
\end{equation}
Then $\gamma_{r} \rightarrow \gamma_{0}$ strongly in $L^{1}((0,T)\times D) $ as $r \rightarrow r^{+}_{0} $.
\end{theorem} The proof of the above result follows the same strategy as the proof of~\cite[Proposition 3.5]{CDS1} and is therefore omitted.
\section{Applications to the Keyfitz and Kranzer system}
\label{s:KK}
In this section, we consider the initial-boundary value problem for the Keyfitz and Kranzer system~\cite{KK} of conservation laws in several space dimensions, namely
\begin{equation}
\left\{
\begin{array}{lll}
\partial_{t} U+\displaystyle{
\sum_{i=1}^{d} \partial_{x_{i}} (f^{i}(\vert U \vert) U)=0}
& \text{in}\ (0,T) \times \Omega \\
U = U_{0} & \text{at $t=0$} \\
U = U_{b} & \text{on} \ \Gamma.
\displaystyle{\phantom{\int}} \\
\end{array}
\right.
\label{KK1}
\end{equation}
Note that, in general, we cannot expect that the boundary datum is pointwise attained on the whole boundary $\Gamma$. We come back to this point in the following.
We follow the same approach as in~\cite{ABD,AD,Br,Delellis2} and we formally split the equation at the first line of~\eqref{KK1} as the coupling between a scalar conservation law and a linear transport equation. More precisely, we set $F:=(f^{1},\cdots,f^{d})$ and we point out that the modulus
$\rho: = |U|$ formally solves the initial-boundary value problem
\begin{equation}
\left\{
\begin{array}{ll}
\partial_{t} \rho+\text{div} (F(\rho) \rho)=0 & \text{in}\ (0,T) \times \Omega\\
\rho =\vert U_{0} \vert & \text{at $t=0$}\\
\rho= \vert U_{b} \vert & \text{on}\, \Gamma.
\end{array}
\right.
\label{KK2}
\end{equation}
We follow~\cite{BLN,CR,Serre2} and we extend notion of \emph{entropy admissible} solution (see~~\cite{Kr}) to initial boundary value problems.
\begin{definition}
A function $\rho \in L^{\infty}((0,T) \times \Omega) \cap BV((0,T) \times \Omega) $ is an entropy solution of \eqref{KK2} if for all $k \in \mathbb{R}$,
\begin{equation}
\begin{aligned}
&\int_{0}^{T} \int_{\Omega} \Big\{\vert \rho(t,x)-k \vert\ \partial_{t} \psi + \emph{sgn}(\rho-k)[F(\rho)-F(k)] \cdot \nabla \psi \Big\} \ dx dt \\
&+ \int_{\Omega} \vert \rho_{0}-k \vert\ \psi(0, \cdot) \ dx -\int_{0}^{T} \int_{\partial \Omega} \emph{sgn}(\vert U_{b} \vert(t,x)-k)\ [F(T(\rho))-F(k)]\cdot \vec n \ \psi \ dx dt \geq 0,
\end{aligned}
\notag
\end{equation}
for any positive test function $\psi \in C^{\infty}_{c}([0,T) \times \mathbb{R}^{d}; \mathbb{R}^{+}).$ In the above expression $T(\rho)$ denotes the trace of the function $\rho$ on the boundary $\Gamma$ and $\vec n$ is the outward pointing, unit normal vector to $\Gamma$.
\end{definition}
Existence and uniqueness results for entropy admissible solutions of the above systems were obtained by Bardos, le Roux and N{\'e}d{\'e}lec~\cite{BLN} by extending the analysis by Kru{\v{z}}kov to initial-boundary value problems (see also~\cite{CR,Serre2} for a more recent
discussion). Note, however, that one cannot expect that the boundary value $|U_b|$ is pointwise attained on the whole boundary $\Gamma$, see again~\cite{BLN,CR,Serre2} for a more extended discussion.
Next, we introduce the equation for the \emph{angular part} of the solution of~\eqref{KK1}. We recall that, if $|U_b|$ and $|U_0|$ are of bounded variation, then so is $\rho$ and hence the trace of $F(\rho) \rho$ on $\Gamma$ is well defined. As usual, we denote it by $T(F(\rho) \rho)$. In particular, we can introduce the set $$
\Gamma^- : = \big\{ (t, x) \in \Gamma: \; T(F(\rho) \rho) \cdot \vec n <0 \big\}, $$
where as usual $\vec n$ denotes the outward pointing, unit normal vector to $\Gamma$. We consider the vector $\theta=(\theta_{1},\cdots,\theta_{N}) $ and we impose
\begin{equation}
\left\{
\begin{array}{llll}
\partial_{t}(\rho \theta)+\text{div}(F(\rho) \rho \theta)=0 & \text{in}\ (0,T) \times \Omega \phantom{\displaystyle{\int}}\\
\theta=\displaystyle{\frac{U_{0}}{\vert U_{0} \vert}}& \text{at $t=0$}\\
\theta=\displaystyle{\frac{U_{b}}{\vert U_{b} \vert}} & \text{on} \ \Gamma^{-},
\end{array}
\label{KK5}
\right.
\end{equation}
where the ratios $U_0 / |U_0|$ and $U_b / |U_b|$ are defined to be an arbitrary unit vector when $|U_0|=0$ and $|U_b|=0$, respectively. Note that the product $U=\theta \rho$ formally satisfies the equation at the first line of~\eqref{KK1}. We now extend the notion of \emph{renormalized entropy solution} given in~\cite{ABD,AD,Delellis2} to initial-boundary value problems.
\begin{definition}
\label{d:res}
A renormalized entropy solution of~\eqref{KK1} is a function $U \in L^\infty ( (0, T) \times \Omega; \mathbb R^N)$ such that $U = \rho \theta$, where
\begin{itemize}
\item $\rho = |U|$ and $\rho$ is an entropy admissible solution of~\eqref{KK2}.
\item $\theta = (\theta_1, \dots, \theta_N)$ is a distributional solution, in the sense of Definition~\ref{d:distrsol}, of~\eqref{KK5}.
\end{itemize}
\end{definition}
Some remarks are here in order. First, we can repeat the proof of \cite[Proposition 5.7]{Delellis1} and conclude that, under fairly general assumptions, any renormalized entropy solution is an entropy solution. More precisely, let us fix a renormalized entropy solution $U$ and an \emph{entropy-entropy flux pair} $(\eta, Q)$, namely a couple of functions $\eta: \mathbb R^N \to \mathbb R$, $Q: \mathbb R^N \to \mathbb R^d$ such that
$$
\nabla \eta D f^i = \nabla Q^i, \quad \text{for every $i=1, \dots, d$.}
$$
Assume that
$$
\mathcal L^1 \big\{ r \in \mathbb R: \; (f^1)'(r) = \dots = (f^d)' (r)=0 \big\}=0. $$
By arguing as in~\cite{Delellis1} we conclude that, if $\eta$ is convex, then $$
\int_0^T \int_\Omega \eta (U) \partial_t \phi + Q(U)\cdot \nabla \phi \, dx dt \ge 0
$$
for every \emph{entropy-entropy flux pair} $(\eta, Q)$ and for every nonnegative test function $\phi \in C^\infty_c ((0, T) \times \Omega)$.
Second, we point out that, as the Bardos, le Roux and N{\'e}d{\'e}lec~\cite{BLN} solutions of scalar initial-boundary value problems, renormalized entropy solutions of the Keyfitz and Kranzer system do not, in general pointwise attain the boundary datum $U_0$ on the whole boundary $\Gamma$.
We now state our well-posedness result.
\begin{theorem}
\label{t:KK} Assume $\Omega$ is a bounded open set with $C^2$ boundary. Also, assume that $U_0 \in L^\infty (\Omega; \mathbb R^N)$ and $U_b \in L^\infty (\Gamma; \mathbb R^N)$ satisfy $|U_0| \in
BV ( \Omega)$, $|U_b| \in BV (\Gamma).$ Then there is a unique renormalized entropy solution of~\eqref{KK1} that satisfies $U \in L^\infty ((0, T)\times \Omega; \mathbb R^N)$.
\end{theorem}
\begin{proof} We first establish existence, next uniqueness. \\
{\sc Existence:} first, we point out that the results in~\cite{BLN,CR,Serre2} imply that there is an entropy admissible solution of~\eqref{KK2} satisfying
$\rho \in L^\infty ((0, T) \times \Omega) \cap BV ((0, T) \times \Omega).$
Also, $\rho$ satisfies the maximum principle, namely
\begin{equation}
\label{e:rhomaxprin}
0 \leq \rho \leq \max \big\{ \| U_0 \|_{L^\infty}, \|U_b \|_{L^\infty} \big\}.
\end{equation}
For every $j=1, \dots, N$ we consider the initial-boundary value problem
\begin{equation}
\left\{
\begin{array}{llll}
\partial_{t}(\rho \theta_j)+\text{div}(F(\rho) \rho \theta_j)=0 & \text{in}\ (0,T) \times \Omega \phantom{\displaystyle{\int}}\\
\theta_j=\displaystyle{\frac{U_{0j}}{\vert U_{0} \vert}}& \text{at $t=0$}\\
\theta_j=\displaystyle{\frac{U_{bj}}{\vert U_{b} \vert}} & \text{on} \ \Gamma^{-},
\end{array}
\label{KK6}
\right.
\end{equation} where $U_{0j}$ and $U_{bj}$ is the $j$-th component of $U_0$ and $U_b$, respectively. The existence of a distributional solution $\theta_j$ follows from the existence part in Theorem~\ref{IBVP-NC}.
We now set $U: = \rho \theta$, where $\theta = (\theta_1, \dots, \theta_N)$. To conclude the existence part we are left to show that $|U|=\rho$. To this end, we point out that, by combining~\cite[Lemma 5.10]{Delellis1} (renormalization property inside the domain) with Theorem~\ref{trace-renorm} (trace renormalization property) and by arguing as in \S~\ref{s:uni}, we conclude that, for every $j=1, \dots, N$, $\theta^2_j$ is a distributional solution, in the sense of Definition~\ref{d:distrsol}, of the initial-boundary value problem
\begin{equation*}
\left\{
\begin{array}{llll}
\partial_{t}(\rho \theta^2_j)+\text{div}(F(\rho) \rho \theta^2_j)=0 & \text{in}\ (0,T) \times \Omega \phantom{\displaystyle{\int}}\\
\theta_j=\displaystyle{\frac{U^2_{0j}}{\vert U_{0} \vert^2}}& \text{at $t=0$}\\
\theta=\displaystyle{\frac{U^2_{bj}}{\vert U_{b} \vert^2}} & \text{on} \ \Gamma^{-}.
\end{array}
\right.
\end{equation*}
By adding from $1$ to $N$, we conclude that $|\theta|^2$ is a distributional solution of \begin{equation*}
\left\{
\begin{array}{llll}
\partial_{t}(\rho |\theta|^2)+\text{div}(F(\rho) \rho |\theta|^2)=0 & \text{in}\ (0,T) \times \Omega \\
\theta_j=1& \text{at $t=0$}\\
\theta=1 & \text{on} \ \Gamma^{-}.
\end{array}
\right.
\end{equation*}
By recalling the equation at the first line of~\eqref{KK2} we infer that $|\theta|^2 =1$ is a solution of the above initial-boundary value problem. By the uniqueness part of Theorem~\ref{IBVP-NC}, we then deduce that $\rho |\theta|^2= \rho$ and this concludes the proof of the existence part. \\
{\sc Uniqueness:} assume $U_1$ and $U_2$ are two renormalized entropy solutions, in the sense of Definition~\ref{d:res}, of the initial-boundary value
problem~\eqref{KK1}. Then $\rho_1: = |U_1|$ and $\rho_2 : =|U_2|$ are two entropy admissible solutions of the initial-boundary value problem~\eqref{KK2} and hence $\rho_1=\rho_2$. By applying the uniqueness part of Theorem~\ref{IBVP-NC} to the initial-boundary value problem~\eqref{KK6}, for every $j=1, \dots, N$, we can then conclude that $U_1 =U_2$.
\end{proof}
\section*{Acknowledgments} This paper has been written while APC was a postdoctoral fellow at the University of Basel supported by a ``Swiss Government Excellence Scholarship'' funded by the State Secretariat for Education, Research and Innovation (SERI). APC would like to thank the SERI for the support and the Department of Mathematics and Computer Science of the University of Basel for the kind hospitality. GC was partially supported by the Swiss National Science Foundation (Grant 156112). LVS is a member of the GNAMPA group of INdAM (``Istituto Nazionale di Alta Matematica"). Also, she would like to thank the Department of Mathematics and Computer Science of the University of Basel for the kind hospitality during her visit, during which part of this work was done.
\small
\end{document}
|
arXiv
|
{
"id": "1610.00188.tex",
"language_detection_score": 0.5466397404670715,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\newcommand{\brac}[1]{\left(#1\right)} \newcommand{\bfrac}[2]{\brac{\frac{#1}{#2}}} \newcommand{\set}[1]{\{#1\}} \newcommand{\stack}[2]{\genfrac{}{}{0pt}{}{#1}{#2}} \def{\cal T}{{\cal T}} \def{\bf M}{{\bf M}} \def{\bf k}{{\bf k}} \def{\cal C}{{\cal C}} \def{\cal E}{{\cal E}} \def{\cal F}{{\cal F}} \def2^{\tilde O(1/\epsilon^2)}{2^{\tilde O(1/\epsilon^2)}} \def{\rm Cond}{{\rm Cond}} \def\hat{u}{\hat{u}} \def{\bf C}{{\bf C}} \def{\bf T}{{\bf T}} \def\hat{{\bf D}}{\hat{{\bf D}}} \def{\bf M}{{\bf M}} \def{\bf D}{{\bf D}} \def\hbox{Norm}{\hbox{Norm}} \def{\textstyle{1\over2}}{{\textstyle{1\over2}}} \def{\bf R}{{\bf R}} \def\recip#1{{1\over#1}} \def\hat{d}_{i,j}{\hat{d}_{i,j}} \def{\rm Vol}{{\rm Vol}} \def\hbox{ for }{\hbox{ for }} \def\hat{T}{\hat{T}} \def{\bf A}{{\bf A}} \def{\bf W}{{\bf W}} \def\hat{\bW}{\hat{{\bf W}}} \def\hat{c}{\hat{c}} \def\epsilon{\epsilon} \def{\rm ind}{{\rm ind}} \def\hat{w}{\hat{w}} \def\bar{\sigma}{\bar{\sigma}} \def{\cal A}{{\cal A}} \def{\cal B}{{\cal B}}
\def{\cal F}{{\cal F}} \def{\bf E}{{\bf E}} \def{\cal H}{{\cal H}} \def{\bf E}{{\bf E}} \def{\cal S}{{\cal S}} \def{\cal P}{{\cal P}} \def{\cal Q}{{\cal Q}} \def{\rm ind}{{\rm ind}} \def\bar{n}{\bar{n}} \def\bar{\nu}{\bar{\nu}} \def\alpha{\alpha} \def\beta{\beta} \def\delta{\delta} \def\Delta{\Delta} \def\epsilon{\epsilon} \def\phi{\phi} \def\gamma{\gamma} \def\Gamma{\Gamma} \def\kappa{\kappa} \def\lambda{\lambda} \def\Kappa{\Kappa} \def\zeta{\zeta} \def\theta{\theta} \def\Theta{\Theta} \def\lambda{\lambda}
\def\mu{\mu} \def\nu{\nu} \def\pi{\pi} \def\Pi{\Pi} \def\rho{\rho} \def\Rho{\Rho} \def\sigma{\sigma} \def\Sigma{\Sigma} \def\tau{\tau} \def\omega{\omega} \def\Omega{\Omega} \def\rule[-3.5mm]{0.1mm}{9mm}{\rule[-3.5mm]{0.1mm}{9mm}} \def{\cal N}{{\cal N}} \def\mbox{{\bf Pr}}{\mbox{{\bf Pr}}} \def{\cal G}{{\cal G}} \def{\cal A}{{\cal A}} \def{\bf whp }{{\bf whp }} \def{\bf whp}{{\bf whp}} \def{\bf Pr}{{\bf Pr}} \def\hat{\e}{\hat{\epsilon}} \def{\bf D}{{\bf D}} \def{\bf W}{{\bf W}} \def{\bf B}{{\bf B}} \def\hat{r}{\hat{r}} \def\hat{R}{\hat{R}}
\newcommand{\ratio}[2]{\mbox{${#1\over #2}$}} \newcommand{\bbD}[1]{\bar{{\bf D}}^{(#1)}} \newcommand{\gap}[1]{\mbox{\hspace{#1 in}}} \newcommand{\bD}[1]{{\bf D}^{(#1)}} \newcommand{\hbD}[1]{\hat{{\bf D}}^{(#1)}} \newcommand{\bTT}[1]{{\bf T}^{(#1)}} \newcommand{\mbox{$\lim_{n \rightarrow \infty}$}}{\mbox{$\lim_{n \rightarrow \infty}$}} \newcommand{{\bf Proof\hspace{2em}}}{{\bf Proof\hspace{2em}}} \newcommand{\mbox{$\cal T$}}{\mbox{$\cal T$}} \newcommand{\hspace*{\fill}\mbox{$\Box$}}{\hspace*{\fill}\mbox{$\Box$}} \newcommand{\bfm}[1]{\mbox{\boldmath $#1$}} \newcommand{\mbox{\bfm{R}}}{\mbox{\bfm{R}}} \newcommand{\mbox{\bf E}}{\mbox{\bf E}} \newcommand{\mbox{\bf E}}{\mbox{\bf E}}
\newcommand{\card}[1]{\mbox{$|#1|$}} \newcommand{\scaps}[1]{\mbox{\sc #1}} \newcommand{\rdup}[1]{\lceil #1 \rceil } \newcommand{\rdown}[1]{\lfloor #1 \rfloor } \newcommand{\mnote}[1]{\marginpar{\footnotesize\raggedright#1}} \newcommand{\right}{\right} \newcommand{\left}{\left}
\newcommand{\mbox{\rm e}}{\mbox{\rm e}} \newcommand{\setminus}{\setminus}
\newenvironment{proof}{\noindent{\bf Proof\,}}{
$\Box$} \newtheorem{remark}{Remark}
\def{\bf x}{{\bf x}} \def{\cal G}_{\n,\m}^{\d\geq 2}{{\cal G}_{\nu,\mu}^{\delta\geq 2}} \def{G}_{\n,\m}^{\d\geq 2}{{G}_{\nu,\mu}^{\delta\geq 2}}
\newcommand{\con}[2]{#1\leftrightarrow #2} \newcommand{\mathrm{dist}}{\mathrm{dist}} \newcommand{D^{\mathrm{tree}}}{D^{\mathrm{tree}}} \newcommand{\mbox{\sc Ehr}}{\mbox{\sc Ehr}} \newcommand{\mathrm{diam}}{\mathrm{diam}}
\title{First Order Definability of Trees and\\ Sparse Random Graphs}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}
\author{Tom Bohman\footnotemark[1]\ \footnotemark[2], Alan Frieze\footnotemark[1]\ \footnotemark[3],
Tomasz {\L}uczak\footnotemark[4], Oleg Pikhurko\footnotemark[1]\ \footnotemark[5],\\ Clifford Smyth\footnotemark[1], Joel
Spencer\footnotemark[6], and Oleg Verbitsky\footnotemark[7]}
\date{}
\maketitle
\footnotetext[1]{Department of Mathematical Sciences, Carnegie Mellon University, Pittsburgh, PA 15213, USA.} \footnotetext[2]{Partially supported by NSF grant DMS-0401147.} \footnotetext[3]{Partially supported by NSF Grant CCR-0200945.} \footnotetext[4]{Department of Discrete Mathematics, Adam Mickiewicz
University, Pozna\'n 61-614, Poland. Partially supported by KBN grant 1 P03A 025 27.} \footnotetext[5]{Partially supported by the Berkman Faculty Development Fund, CMU.} \footnotetext[6]{Courant Institute, New York University, New York, NY 10012, USA.} \footnotetext[7]{Institut f\"ur Informatik, Humboldt Universit\"at, Berlin 10099, Germany. Supported by an Alexander von Humboldt fellowship.}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{abstract}
Let $D(G)$ be the smallest quantifier depth of a first order formula which is true for a graph $G$ but false for any other non-isomorphic graph. This can be viewed as a measure for the first order descriptive complexity of $G$.
We will show that almost surely $D(G)=\Theta(\frac{\ln n}{\ln\ln n})$, where $G$ is a random tree of order $n$ or the giant component of a random graph $\C G(n,\frac cn)$ with constant $c>1$. These results rely on computing the maximum of $D(T)$ for a tree $T$ of order $n$ and maximum degree $l$, so we study this problem as well.\end{abstract}
\section{Introduction}
This paper deals with graph properties expressible in first order logic. The vocabulary consists of variables, connectives ($\vee$, $\wedge$ and $\neg$), quantifiers ($\exists$ and $\forall$), and two binary relations: the equality and the graph adjacency ($=$ and $\sim$ respectively). The variables denote vertices only so we are not allowed to quantify over sets or relations. The notation $G\models A$ means that a graph $G$ is a model for a \emph{sentence} $A$ (a first order formula without free variables); in other words, $A$ is true for the graph $G$. All sentences and graphs are assumed to be finite. The Reader is referred to Spencer's book~\cite{spencer:slrg} (or to~\cite{kim+pikhurko+spencer+verbitsky:03rsa}) for more details.
A first order sentence $A$ \emph{distinguishes} $G$ from $H$ if $G\models A$ but $H\not\models A$. Further, we say that $A$ \emph{defines} $G$ if $A$ distinguishes $G$ from any non-isomorphic graph $H$. In other words, $G$ is the unique (up to an isomorphism) finite model for $A$.
The \emph{quantifier depth} (or simply \emph{depth}) $D(A)$ is the largest number of nested quantifiers in $A$. This parameter is closely related to the complexity of checking whether $G\models A$.
The main parameter we will study is $D(G)$, the smallest quantifier depth of a first order formula defining $G$. It was first systematically studied by Pikhurko, Veith and Verbitsky~\cite{pikhurko+veith+verbitsky:03} (see also~\cite{pikhurko+verbitsky:03}). In a sense, a defining formula $A$ can be viewed as the canonical form for $G$ (except that $A$ is not unique): in order to check whether $G\cong H$ it suffices to check whether $H\models A$. Unfortunately, this approach does not seem to lead to better isomorphism algorithms but this notion, being on the borderline of combinatorics, logic and computer science, is interesting on its own and might find unforeseen applications.
Within a short time-span various results on the values of $D(G)$ for order-$n$ graphs appeared. The initial papers~\cite{pikhurko+veith+verbitsky:03,pikhurko+verbitsky:03} studied the maximum of $D(G)$ (the `worst' case). The `best' case is considered by Pikhurko, Spencer, and Verbitsky~\cite{pikhurko+spencer+verbitsky:04} while Kim, Pikhurko, Spencer and Verbitsky~\cite{kim+pikhurko+spencer+verbitsky:03rsa} obtained various results for random graphs.
Here we study these questions for trees and sparse random structures. Namely, the three main questions we consider are:
\begin{description}
\item[Section~\ref{general}:] What is $D^{\mathrm{tree}}(n,l)$, the maximum of $D(T)$ over all trees of order at most $n$ and maximum degree at most $l$?
\item[Section~\ref{giant}:] What is $D(G)$, where $G$ is the giant component of a random graph $\C G(n,\frac{c}{n})$ for constant $c>1$?
\item[Section~\ref{random}:] What is $D(T)$ for a random tree $T$ of order $n$?
\end{description}
In all cases we determine the order of magnitude of the studied function. Namely, we prove that $D^{\mathrm{tree}}(n,l)=\Theta(\frac{l\ln n}{\ln l})$, and whp we have $D(G)=\Theta(\frac{\ln n}{\ln\ln n})$, whenever $G$ is a random tree of order $n$ or the giant component of a random graph $\C G(n,\frac cn)$ with constant $c>1$. (The acronym \emph{whp} stands for `with high probability', i.e.,\ with probability $1-o(1)$.) Moreover, for some cases involving trees we estimate the smallest quantifier depth of a first order formula defining $G$ up to a factor of $1+o(1)$. For instance, we show that for a random tree $T$ of order $n$ we have whp $D(T)=(1+o(1))\frac{\ln n}{\ln\ln n}$.
\comment{ also we prove that $D^{\mathrm{tree}}(n,l)=(1/2+o(1))\frac{l\ln n}{\ln l}$ whenever both $l=l(n)$ and $\ln n/\ln l$ tends to infinity as $n\to\infty$.
}
\section{Further Notation and Terminology}
Our main tool in the study of $D(G)$ is the \emph{Ehrenfeucht game}. Its description can be found in Spencer's book~\cite{spencer:slrg} whose terminology we follow (or see~\cite[Section~2]{kim+pikhurko+spencer+verbitsky:03rsa}), so here we will be very brief.
Given two graphs $G$ and $G'$, the \emph{Ehrenfeucht game} $\mbox{\sc Ehr}_k(G,G')$ is a perfect information game played by two players, called \emph{Spoiler} and \emph{Duplicator}, and consists of $k$ rounds, where $k$ is known in advance to both players. For brevity, let us refer to Spoiler as `him' and to Duplicator as `her'. In the $i$-th round, $i=1,\dots,k$, Spoiler selects one of the graphs $G$ and $G'$ and marks one of its vertices by $i$; Duplicator must put the same label $i$ on a vertex in the other graph. At the end of the game let $x_1,\dots,x_k$ be the vertices of $G$ marked $1,\dots,k$ respectively, regardless of who put the label there; let $x_1',\dots,x_k'$ be the corresponding vertices in $G'$. Duplicator wins if the correspondence $x_i\leftrightarrow x_i'$ is a partial isomorphism, that is, we require that $\{x_i,x_j\}\in E(G)$ iff $\{x_i',x_j'\}\in E(G')$ as well as that $x_i=x_j$ iff $x_i'=x_j'$. Otherwise, Spoiler wins.
The key relation is that $D(G,G')$, the smallest depth of a first order sentence $A$ distinguishing $G$ from $G'$, is equal to the smallest $k$ such that Spoiler can win $\mbox{\sc Ehr}_k(G,G')$. Also,
\beq{D}
D(G)=\max_{G'\not\cong G} D(G,G'),
\end{equation}
see e.g.~\cite[Lemma~1]{kim+pikhurko+spencer+verbitsky:03rsa}.
Sometimes it will be notationally more convenient to prove the bounds on $D(G,G')$ for colored graphs which generalize the usual (uncolored) graphs. Graphs $G,G'$ are \emph{colored} if we have unary relations $U_i:V(G)\cup V(G')\to\{0,1\}$, $i\in I$. We say that the vertices in the set $U_i^{-1}(1)$ have color $i$. Note that some vertices may be uncolored and some may have more than one color. There are no restrictions on a color class, i.e.,\ it does not have to be an independent set. When the Ehrenfeucht game is played on colored graphs, Duplicator must additionally preserve the colors of vertices.
Colorings can be useful even if we prove results for uncolored graphs. For example, if $x\in V(G)$ and $x'\in V(G')$ were selected in some round, then, without changing the outcome of the remaining game, we can remove $x$ and $x'$ from $G$ and $G'$ respectively, provided we color their neighbors with a new color. (Note that in an optimal strategy of Spoiler, there is no need to select the same vertex twice.)
We will also use the following fact, which can be easily deduced from the general theory of the Ehrenfeucht game. Let $x,y\in V(G)$ be distinct vertices. Then the smallest quantifier depth of a first order formula $\Phi(z)$ with one free variable $z$ such that $G\models \Phi(x)$ but $G\not\models \Phi(y)$ is equal to the minimum $k$ such that Spoiler can win the $(k+1)$-round game $\mbox{\sc Ehr}_{k+1}(G,G)$, where the vertices $x_1=x$ and $x_1'=y$ have been selected in the first round.
In this paper $\ln$ denotes the natural logarithm, while the logarithm base $2$ is written as $\log_2$.
\section{General Trees}\label{general}
Let $D^{\mathrm{tree}}(n,l)$ be the maximum of $D(T)$ over all colored trees of order at most $n$ and maximum degree at most $l$. We split the possible range of $l,n$ into a few cases.
\bth{MaxDeg} Let both $l$ and $\ln n/\ln l$ tend to the infinity. Then
\beq{MaxDeg}
D^{\mathrm{tree}}(n,l)= \left(\frac12+o(1)\right)\, \frac{ l\ln n}{\ln l}.
\end{equation}
In fact, the lower bound can be achieved by uncolored trees.
\end{theorem}
In order to prove Theorem~\ref{th:MaxDeg} we need some preliminary results. Let $\mathrm{dist}_G(x,y)$ denote the distance in $G$ between $x,y\in V(G)$.
\blm{Distance} Suppose $x,y\in V(G)$ at distance $k$ were selected while their counterparts $x',y'\in V(G')$ are at a strictly larger distance (possibly infinity). Then Spoiler can win in at most $\log_2k+1$ extra moves, playing all of the time inside $G$.\end{lemma}
\bpf We prove the claim by induction on $k$. Assume $k\ge 2$ and choose an appropriate $xy$-path $P$. Spoiler selects a vertex $w\in V(G)$ which is a \emph{middle vertex} of $P$, that is, $k_1=\mathrm{dist}_P(x,w)$ and $k_2=\mathrm{dist}_P(y,w)$ differ at most by one. Suppose that Duplicator responds with $w'\in G'$. It is impossible that $G'-z'$ contains both an $x'w'$-path of length at most $k_1$ and a $y'w'$-path of length at most $k_2$. If, for example, the latter does not exist, then we apply induction to $y,w\in G$. The required bound follows by observing that $k_1,k_2\le \ceil{\frac k2}$.\qed
The same method gives the following lemma.
\blm{path} Let $G,G'$ be colored graphs. Suppose that $x,y\in V(G)$ and $x',y'\in V(G')$ have been selected such that $G$ contains some $xy$-path $P$ of length at most $k$ such that some vertex of $P$ has color $c$ while this is not true with respect to $G'$. Then Spoiler can win in at most $\log_2 k +1$ moves playing all of the time inside $G$.
The same conclusion holds if all internal vertices of $P$ have colors from some fixed set $A$ while any $x'y'$-path of length at most $k$ has a color not in $A$.\qed\end{lemma}
\blm{Tree} Let $T$ be a tree of order $n$ and let $T'$ be a graph which is not a tree. Then $D(T,T')\le \log_2n+3$.\end{lemma}
\bpf
If $T'$ is not connected, Spoiler selects two vertices $x',y'\in T'$ from different components. Then he switches to $G$ and applies Lemma~\ref{lm:Distance}, winning in at most $\log_2 n+3$ moves in total.
Otherwise, let $C'\subset T'$ be a cycle of the shortest length $l$. If $l>2n+1$, then Spoiler picks two vertices $x',y'$ at distance at least $n$ in $C'$ (or equivalently in $T'$). But the diameter of $T$ is at most $n-1$, Spoiler switches to $T$ and starts halving the $xy$-path, making at most $\log_2 n+3$ moves in total, cf.\ Lemma~\ref{lm:Distance}.
If $l\le 2n+1$, then Spoiler selects some three adjacent vertices of $C'$, say $x',z',y'$ in this order. Now, he applies Lemma~\ref{lm:path} with respect to $k=l-2$.\qed
\bpf[Proof of Theorem~\ref{th:MaxDeg}.] Let us prove the upper bound first.
Let $T$ be any tree of order at most $n$ and maximum degree at most $l$. Let $T'$ be an arbitrary colored graph not isomorphic to $T$. By Lemma~\ref{lm:Tree} we can assume that $T'$ is a tree.
In fact, we will be proving the upper bound on the version of the $(T,T')$-game, wherein some distinguished vertex, called the \emph{root}, is given and all graph isomorphisms must additionally preserve the root. (This can be achieved by introducing a new color $U_0$ which is assigned to the root only.) The obtained upper bound, if increased by $1$, applies to the original function $D(T,T')$ because we can regard $x_1$ and $x_1'$, the first two moves of the Ehrenfeucht game, as the given roots.
It is easy to show that $T$ contains a vertex $x\in T$ such that any component of $T-x$ has order at most $\frac n2$. We call such a vertex a \emph{median} of $T$. Spoiler selects this vertex $x$; let Duplicator reply with $x'$. We can assume that the degrees of $x$ and $x'$ are the same: otherwise Spoiler can exhibit this discrepancy in at most $l+1$ extra moves.
\comment{Alternating sides at most once.}
We view the components of $T-x$ and $T'-x'$ as colored rooted graphs with the neighbors of $x$ and $x'$ being the roots. As $T\not\cong T'$, some component $C_1$ has different multiplicities $m_1$ and $m_1'$ in $T-x$ and $T'-x'$. As $d(x)=d(x')$, we have at least two such components. Assume that for $C_1$ and $C_2$ we have $m_1>m_1'$ and $m_2<m_2'$. By the condition on the maximum degree, $m_1'+m_2\le l-1$. Hence, $\min(m_1',m_2)\le \frac{l-1}2$. Let us assume, for example, that $m_1'\le \frac{l-1}2$. Spoiler chooses the roots of any $m_1'+1$ $C_1$-components of $T-x$. It must be the case that some vertices $y\in V(T)$ and $y'\in V(T')$ have been selected, so that $y$ lies in a $C_1$-component $F\subset T-x$ while $y'$ lies in a component $F'\subset T'-x$ not isomorphic to $C_1$. Let $n_1$ be the number of vertices in $F$. By the choice of $x$, $n_1\le \frac n2$.
Now, Spoiler restricts his moves to $V(F)\cup V(F')$. If Duplicator moves outside this set, then Spoiler uses Lemma~\ref{lm:path}, winning in at most $\log_2n+O(1)$ moves. Otherwise Spoiler uses the recursion applied to $F$.
Let $f(n,l)$ denote the largest number of moves (over all trees $T,T'$ with $v(T)\le n$, $\Delta(T)\le l$, and $T\not\cong T'$) that Duplicator can survive against the above strategy with the additional restriction that a situation where Lemma~\ref{lm:path} can be applied never occurs and we always have that $d(x)=d(x')$. Clearly,
\beq{DTf}
D^{\mathrm{tree}}(n,l)\le f(n,l) + \log_2n + l +O(1).
\end{equation}
As $m_1\le \frac{n-1}{n_1}$, we get the following recursive bound on $f$.
\beq{DT}
\textstyle
f(n,l)\le \max\Big\{2 + \min(\frac{l-1}2,\frac{n-1}{n_1}) +
f(n_1,l): 1\le n_1\le \frac n2\Big\}.
\end{equation}
Denoting $n_0=n$ and unfolding~\req{DT} as long as $n_i\ge 1$, say $s$ times, we obtain that $f(n,l)$ is bounded by the maximum of
\beq{f}
2s + \sum_{i=1}^s \min\left(\frac{l-1}2,\frac{n_{i-1}}{n_i}\right),
\end{equation}
over all sequences $n_1,\dots,n_s$ such that
\beq{n}
1\le n_i \le \frac{n_{i-1}}2,\quad i\in[s].
\end{equation}
Note that the restrictions~\req{n} force $s$ to be at most $\log_2 n$. Let us maximize~\req{f} over all $s\in\I N$ and real $n_i$'s satisfying~\req{n}.
It is routine to see that for the optimal sequence we have $2\le \frac{n_{i-1}}{n_i}\le \frac{l-1}2$, $i\in[s]$; moreover, both these inequalities can be simultaneously strict for at most one index $i$.
\comment{Indeed, suppose on the contrary that for two indexes $1\le i<j< s$ we have $2<n_i/n_{i+1}<\frac{l-1}2$ and $2<n_j/n_{j+1}<\frac{l-1}2$. Redefine a new sequence: $n_h'=n_h$ if $h\le i$ or $h>j$, while $n_h'=xn_h$ for $i<h\le j$. If $x=1$, then we obtain the same sequence. Note that $\frac{n_h'}{n_{h+1}'}=\frac{n_h}{n_{h+1}}$ for any $h$ except $h=i$ or $h=j$. So, we can slightly perturb $x$ either way, without violating~\req{n}. The right-hand side of~\req{f}, as a function of $x$ in a small neighborhood of $x=1$, is of the form $ax+\frac bx+c$ with $a,b>0$. But this function is strictly convex, so it cannot attain its maximum at $x=1$, a contradiction.}
Let $t$ be the number of times we have $n_{i-1}=2n_i$. The bound~\req{f} reads
\beq{st}
f(n,l)- 2 \log_2 n \le 2t + (s-t)\, \frac{l-1}2.
\end{equation}
Given that $2^t(\frac{l-1}2)^{s-t-1}\le n$, the right hand side of~\req{st} is maximized for $t=O(\log l)$ and $s=(1+o(1))\, \frac{\ln n}{\ln l}$, implying the upper bound~\req{MaxDeg} by~\req{DTf}.
Let us prove the lower bound. Let $k=\floor{l/2}$. Define $G_0=K_{1,l-1}$ and $G_0'=K_{1,l-2}$. Let $r_0\in V(G_0)$, $r_0'\in V(G_0')$ be their roots. Define inductively on $i$ the following graphs. $G_{i}$ is obtained by taking $k$ copies of $G_{i-1}$ and $k-1$ copies of $G_{i-1}'$, pairwise vertex-disjoint, plus the root $r_i$ connected to the root of each copy of $G_{i-1}$ and $G_{i-1}'$. We have $d(r_i)\le l-1$. The graph $G_{i}'$ is defined in a similar way except that we take $k-1$ copies of $G_{i-1}$ and $k$ copies of $G_{i-1}'$. Let $i$ be the largest index such that $\max(v(G_i),v(G_i'))\le n$.
Let us disregard all roots, i.e.,\ view $G_j$ and $G_j'$ as usual (uncolored) graphs. Note that the trees $G_i$ and $G_i'$ are non-isomorphic as for every $j$ we can identify the level-$j$ roots as the vertices at distance $j+1$ from some leaf.
Define $g_j=(k-1)j+l-2$, $j\in[0,i]$. Let us show by induction on $j$ that Duplicator can survive at least $g_j$ rounds in the $(G_j,G_j')$-game. This is clearly true for $j=0$. Let $j\ge 1$. If Spoiler claims one of $r_j,r_j'$ then Duplicator selects the other. If Spoiler selects a vertex in a graph from the ``previous'' level, for example $F\subset G_j$ with $F\cong G_{j-1}'$, then Duplicator chooses an $F'\subset G_i'$, $F'\cong G_{j-1}'$ and keeps the isomorphism between $F$ and $F'$. So any moves of Spoiler inside $V(F)\cup V(F')$ will be useless and we can ignore $F$ and $F'$. Thus it takes Spoiler at least $k-1$ moves before we are down to the pair $(G_{j-1},G_{j-1}')$, which proves the claim.
Thus we have $D(G_i) \ge D(G_i,G_i') \ge g_i=(\frac12+o(1))\, \frac{l\ln n}{\ln l}$, finishing the proof.\qed
\noindent{\bf Remark.} Verbitsky~\cite{verbitsky:04} proposed a different argument to estimate $D^{\mathrm{tree}}(n,l)$ which gives a weaker bounds than those in Theorem~\ref{th:MaxDeg} but can be applied to other classes of graphs with small separators.
Let us study $D^{\mathrm{tree}}(n,l)$ for other $l,n$. The methods have much in common with the proof of Theorem~\ref{th:MaxDeg} so our explanations are shorter.
\bth{l=n^C} Let an integer $t\ge1$ be fixed. Suppose that $l,n\to\infty$ so that $n\ge l^t$ but $n=o(l^{t+1})$. Then $D^{\mathrm{tree}}(n,l)=(\frac{t+1}2+o(1))\, l$. In fact, the lower bound can be achieved by uncolored trees.\end{theorem}
\bpf The lower bound is proved by the induction on $t$. If $t=1$, take $T_1=K_{1,l-2}$. One needs at least $l-1$ moves to distinguish it from $T_1'=K_{1,l-1}$. Let $a=\floor{l/2}$ and $b=\ceil{l/2}$. Suppose we have already constructed $T_{t-1}$ and $T_{t-1}'$, rooted trees with $\le l^{t-1}$ vertices such that the root has degree at most $l-1$. To construct $T_t$ take $a$ copies of $T_{t-1}$ and $b-1$ copies of $T_{t-1}'$ and connect them to the common root. For $T_t'$ we take $a-1$ and $b$ copies respectively. The degree of the main root is $a+b-1= l-1$ as required. The order of $T_t$ is at most $(a+b-1)l^{t-1}+1\le l^t$. Also, Spoiler needs at least $a$ moves before reducing the game to $(T_{t-1},T_{t-1}')$ (while, for $t=1$, $l$ moves are needed to finish the game), giving the required bound.
Let us turn to the upper bound. Spoiler uses the same strategy as before. Namely, he chooses a median $x\in T$ and of two possible multiplicities, summing up to $l$, chooses the smaller. Let $m_1+1,m_2+1,\dots,m_k+1$ be the number of moves per each selected median. We have $n\ge \prod_{i=1}^k m_i$. Also, we have $k\le \log_2n$ because we always choose a median. Given these restrictions, the inequalities $m_i\le l/2$, $i\in[k-1]$, and $m_k\le l-1$, the sum $\sum_{i=1}^k m_i$ is maximized if $m_k=l-1$ and as many as possible $m_j=l/2$ are maximum possible. We thus factor out $l/2$ at most $t-1$ times until the remaining terms have the product (and so the sum) $o(l)$. Thus,
$$
\sum_{i=1}^k (m_i+1)\le \log_2n+\sum_{i=1}^km_i\le l+\frac{(t-1)l}2+o(l),
$$
completing the proof.\qed
Theorems~\ref{th:MaxDeg} and~\ref{th:l=n^C} do not cover all the possibilities for $n,l$. The asymptotic computation in the remaining cases seems rather messy. However, the order of magnitude of $D^{\mathrm{tree}}(n,l)$ is easy to compute with what we already have. Namely, Theorem~\ref{th:l=n^C} implies that for $l=\Theta(n^t)$ with fixed $t\in \I N$ we have $D^{\mathrm{tree}}(n,l)=\Theta(l)$. Also, if $l\ge 2$ is constant, then $D^{\mathrm{tree}}(n,l)=\Theta(\ln n)$, where the lower bound follows from considering the order-$n$ path and the upper bound is obtained by using the method of Theorem~\ref{th:MaxDeg}.
\section{The Giant Component}\label{giant}
Let $c>1$ be a constant, $p=\frac cn$, and $G$ be the giant component of a random graph $\C G(n,p)$.
\comment{
Kim, Pikhurko, Spencer and Verbitsky~\cite{kim+pikhurko+spencer+verbitsky:03rsa} conjectured that whp $D(G)=O(\ln n)$.
}
Here we show the following result.
\bth{giant} Let $c>1$ be a constant, $p=c/n$, and $G$ be the giant component of $\C G(n,p)$. Then whp
\beq{giant}
D(G)=\Theta\left(\frac{\ln n}{\ln \ln n}\right)
\end{equation}
\end{theorem}
This result allows us to conclude that for any $p=O(n^{-1})$ a random graph $H\in \C G(n,p)$ satisfies whp
\beq{d/n}
D(H)=({\mathrm e}^{-np}+o(1))\, n.
\end{equation}
The proof is an easy modification of that in~\cite{kim+pikhurko+spencer+verbitsky:03rsa} where the validity of \req{d/n} was established for $p\le (1.19...+o(1))\, n^{-1}$. The lower bound in~\req{d/n} comes from considering the graph $H'$ obtained from $H$ by adding an isolated vertex (and noting that whp $H$ has $({\mathrm e}^{-np}+o(1))\, n$ isolated vertices). The method in~\cite{kim+pikhurko+spencer+verbitsky:03rsa} shows that the upper bound~\req{d/n} can fail only if $D(G)>({\mathrm e}^{-np}+o(1))\, n$, where $G$ is the giant component of $H$. (And $p/n\approx 1.19...$ is the moment when $v(G)\approx {\mathrm e}^{-np}$.)
\subsection{Upper Bound}
The structure of the giant component is often characterized using its core and kernel (e.g., see Janson, \L uczak, and Ruci\'nski~\cite[Section~5]{janson+luczak+rucinski:rg}). We follow this approach in the proof of the upper bound in \req{giant}. Thus, we first bound $D(G)$ from above for a graph $G$ with small diameter whose kernel fulfills some ``sparsness'' conditions. Then, we show that these conditions hold whp for the kernel of the giant component of a random graph.
\subsubsection{Bounding $D(G)$ Using the Kernel of $G$}\label{DKernel}
The \emph{core} $C$ of a graph $G$ is obtained by removing, consecutively and as long as possible, vertices of degree at most $1$. If $G$ is not a forest, then $C$ is non-empty and $\delta(C)\ge 2$.
First we need an auxiliary lemma which is easily proved, similarly to the auxiliary lemmas in Section~\ref{general}, by the path-halving argument.
\blm{cycle} Let $G,G'$ be graphs. Suppose $x\in V(G)$ and $x'\in V(G')$ have been selected such that $G$ contains some cycle $P\ni x$ of length at most $k$ while $G'$ does not. Then Spoiler can win in at most $\log_2 k+O(1)$ moves, playing all time inside $G$.\qed\end{lemma}
\blm{DCore} Let $G,G'$ be graphs and $C,C'$ be their cores. If Duplicator does not preserve the core, then Spoiler can win in at most $\log_2d+O(1)$ extra moves, where $d$ is the diameter of $G$.\end{lemma}
\bpf Assume that $\mathrm{diam}(G')=\mathrm{diam}(G)$ for otherwise we are easily done. Suppose that, for example, some vertices $x\in C$ and $x'\in \O {C'}$ have been selected.
If $x$ lies on a cycle $C_1\subset C$, then we can find such a cycle of length at most $2d+1$. Of course, $G'$ cannot have a cycle containing $x'$, so Spoiler wins by Lemma~\ref{lm:cycle} in $\log_2(2d+1)+O(1)$ moves, as required.
Suppose that $x$ does not belong to a cycle. Then $G$ contains two vertex-disjoint cycles $C_1,C_2$ connected by a path $P$ containing $x$. Choose such a configuration which minimizes the length of $P\ni x$. Then the length of $P$ is at most $d$. Spoiler selects the branching vertices $y_1\in V(C_1)\cap V(P)$ and $y_2\in V(C_2)\cap V(P)$. If some Duplicator's reply $y_i'$ is not on a cycle, we done again by Lemma~\ref{lm:cycle}. So assume there are cycles $C_i'\ni y_i'$. In $G$ we have
\beq{dist}
\mathrm{dist}(y_1,y_2)= \mathrm{dist}(y_1,x) + \mathrm{dist}(y_2,x).
\end{equation}
As $x'\not\in C'$, any shortest $x'y_1'$-path and $x'y_2'$-path enter $x'$ via the same edge $\{x',z'\}$. But then
\beq{distp}
\mathrm{dist}(y_1',y_2')\le \mathrm{dist}(y_1',z')+\mathrm{dist}(y_2',z')= \mathrm{dist}(y_1',x') + \mathrm{dist}(y_2',x')-2.
\end{equation}
By~\req{dist} and~\req{distp}, the distances between $x,y_1,y_2$ cannot be all equal to the distances between $x',y_1',y_2'$. Spoiler can demonstrate this in at most $\log_2 (\mathrm{dist}(y_1,y_2)) +O(1)$, as required.\qed
In order to state our upper bound on $D(G)$ we have to define a number of parameters of $G$. In outline, we try to show that any distict $x,y\in V(C)$ can be distinguished by Spoiler reasonably fast. This would mean that each vertex of $C$ can be identified by a first order formula of small depth. Note that $G$ can be decomposed into the core and a number of trees $T_x$, $x\in V(C)$, rooted at vertices of $C$. Thus, by specifying which pairs of vertices of $C$ are connected and describing each $T_x$, $x\in V(C)$, we completely define $G$. However, we have one unpleasant difficulty that not all pairs of points of $C$ can be distinguished from one another. For example, we may have a pendant triangle on $\{x,y,z\}$ with $d(x)=d(y)=2$, in which case the vertices $x$ and $y$ are indistinguishable. However, we will show that whp we can distinguish any two vertices of degree $3$ or more in $C$, which suffices for our purposes.
Let us give all the details. For $x\in V(C)$, let $T_x\subset G$ denote the tree rooted at $x$, i.e., $T_x$ is a component containing $x$ in the forest obtained from $G$ by removing all edges of $C$. Let
$$
t=\max\{D(T_x): x\in V(C)\},
$$
where $D(T_x)$ is taken with respect to the class of graphs with one root.
Let the \emph{kernel} $K$ of $G$ be obtained from $C$ by the \emph{serial reduction} where we repeat as long as possible the following step: if $C$ contains a vertex $x$ of degree $2$, then remove $x$ from $V(C)$ but add the edge $\{y,z\}$ to $E(C)$ where $y,z$ are the two neighbors of $x$. Note that $K$ may contain loops and multiple edges. We agree that each loop contributes $2$ to the degree. Then we have $\delta(K)\ge 3$.
Let $u=\Delta(G)$ and $d$ be the diameter of $G$. It follows that each edge of $K$ corresponds to the path $P$ in $C$ of length at most $2d$.
\comment{(For otherwise any two vertices of $P$ at distance $d+1$ contradict the definition of $d$.)}
Let $l$ be an integer such that every set of $v\le 6 l$ vertices of $K$ spans at most $v$ edges in $K$. (Roughly speaking, we do not have two short cycles close together.)
For $\{x,y\}\in E(K)$ let $A_{x,y}$ be the set of vertices obtained by doing breadth first search in $K-x$ starting with $y$ until the process dies or, after we have added a whole level, we reach at least $k=2^{l-2}$ vertices. Let $K_{x,y}=K[A_{x,y}\cup \{x\}]$.
The \emph{height} of $z\in V(K_{x,y})$ is the distance in $K-x$ between $z$ and $y$. It is easy to deduce from the condition on short cycles that each $K_{x,y}\subset K-x$ has at most one cycle and the maximum height is at most $l$. In fact, the process dies only in the case if $y$ is an isolated loop in $K-x$. For $xy\in E(K)$ let $G_{x,y}$ be a subgraph of $G$ corresponding to $K_{x,y}$. We view $K_{x,y}$ and $G_{x,y}$ as having two special \emph{roots} $x$ and $y$.
Here is another assumption about $G$ and $l$ we make. Suppose that for any $xx',yy'\in E(K)$ if $K_{x,x'}$ and $K_{y,y'}$ have both order at least $k$ and $A_{x,x'}\cap A_{y,y'}=\emptyset$, then the rooted graphs $G_{x,x}$ and $G_{y,y'}$ are not isomorphic. Let
\begin{eqnarray}
b_0&=&\frac{l(\ln u+\ln \ln n + l)}{\ln l} +2u+\log_2d,\label{eq:b0}\\
b&=& b_0 + t+ u +2\log_2d.\label{eq:b}
\end{eqnarray}
\blm{a} Under the above assumptions on $G$, we have $D(G)\le b+O(1)$. \end{lemma}
\bpf
Let $G'\not\cong G$. Let $C',K'$ be its core and kernel. We can assume that $\Delta(G')=u$ and its diameter is $d$ for otherwise Spoiler easily wins in $u+2$ or $\log_2d+O(1)$ moves.
By Lemma~\ref{lm:DCore} it is enough to show that Spoiler can win the Ehrenfeucht $(G,G')$-game in at most $b-\log_2d+O(1)$ moves provided Duplicator always respects $C$ and $K$. Call this game $\C C$.
Color $V(K)\cup E(K)$ and $V(C)$ by the isomorphism type of the subgraphs of $G$ which sit on a vertex/edge. We have a slight problem with the edges of $K$ as the color of an unordered edge may depend in which direction we traverse it. So, more precisely, every edge of $K$ is considered as a pair of ordered edges each getting its own color. Do the same in $G'$. As $G\not\cong G'$, the obtained colored digraphs $K$ and $K'$ cannot be isomorphic. Call the corresponding digraph game $\C K$.
\claim1 If Spoiler can win the game $\C K$ in $m$ moves, then he can win $\C C$ in at most $m+t+u+\log_2d+O(1)$ moves.
\bpf[Proof of Claim.] We can assume that each edge of $K'$ corresponds to a path in $G'$ of length at most $2d+1$: otherwise Spoiler selects a vertex of $C'$ at the $C'$-distance at least $d+1$ from any vertex of $K'$ and wins in $\log_2d+ O(1)$ moves.
Spoiler plays according to his $\C K$-strategy by making moves inside $V(K)\subset V(G)$ or $V(K')\subset V(G')$. Duplicator's reply are inside $V(K')$, so they correspond to replies in the $\C K$-game. In at most $m$ moves, Spoiler can achieve that the set of colored edges between some selected vertices $x,y\in K$ and $x',y'\in K'$ are different. (Or loops if $x=y$.)
In at most $u+1$ moves, Spoiler can either win or select a vertex $z$ inside a colored $xy$-path $P$ (an edge of $K$) such that $z'$ either is not inside an $x'y'$-path (an edge of $K'$) or its path $P'\ni z'$ has a different coloring from $P$. In the former case, Spoiler wins by Lemma~\ref{lm:path}: in $G$ there is an $xy$-path containing $z$ and no vertex from $K$.
Consider the latter case. Assume that $|P|=|P'|$, for otherwise we are done by Lemma~\ref{lm:path}. Spoiler selects $w\in P$ such that for the vertex $w'\in P'$ with $\mathrm{dist}_P(w,x)=\mathrm{dist}_{P'}(w',x')$ we have $T_w\not\cong T'_{w'}$. If Duplicator does not reply with $w'$, then she has violated distances. Otherwise Spoiler needs at most $t$ extra moves to win the game $\C T$ on $(T_w,T'_{w'})$ (and at most $\log_2d+O(1)$ extra moves to catch Duplicator if she does not respect $\C T$).\cqed
It remains to bound $D(K)$, the colored digraph version. This requires a few preliminary results.
\claim2 For any $\{x,x'\}\in K$ we have $D(K_{x,x})\le b_0+O(1)$ in the class of colored digraphs with two roots, where $b_0$ is defined by~\req{b0}.
\bpf[Proof of Claim.] Let $T=K_{x,x}$ and $T'\not\cong T$. If $T$ is a tree, then we just apply a version of Theorem~\ref{th:MaxDeg} using the order ($\le\! u 2^{l}$) and maximum degree ($\le\! u$). Otherwise, Spoiler first selects a vertex $z\in T$ which lies on the (unique) cycle. We have at most $u-1$ components in $T-z$, viewing each as a colored tree where one extra color marks the neighbors of $z$. As $T\not\cong T'$, in at most $u+1$ moves we can restrict our game to one of the components. (If Duplicator does not respect components, she loses in at most $\log_2 d +O(1)$ moves.) Now, one of the graphs is a colored tree, and Theorem~\ref{th:MaxDeg} applies.\cqed
\claim3 For every two distinct vertices $x,y\in V(K)$ there is a first order formula $\Phi_{x,y}(z)$ with one free variable and quantifier rank at most $b_0+\log_2d+O(1)$ such that $G\models \Phi_{x,y}(x)$ and $G\not\models \Phi_{x,y}(y)$. (Note that we have to find $\Phi_{x,y}$ for $x,y$ in the kernel only, but we evaluate $\Phi_{x,y}$ with respect to $G$.)
\bpf[Proof of Claim.] To prove the existence of $\Phi_{x,y}$ we have to describe Spoiler's strategy, where he has to distinguish $(G,x)$ and $(G,y)$ for given distinct $x,y\in K$.
If the multiset of isomorphism classes $K_{x,x'}$, over $\{x,x'\}\in E(K)$ is not equal to the multiset $\{ K_{y,y'}: \{y,y'\}\in E(K)\}$, then we are done by Claim~2. So let us assume that these multisets are equal.
Note that an isomorphism $K_{x,x'}\cong K_{y,y'}$ implies an isomorphism $G_{x,x'}\cong G_{y,y'}$. Also, by our assumption on $l$, the isomorphism $G_{x,x'}\cong G_{y,y'}$ implies that $V(K_{x,x'})\cap V(K_{y,y'})\not=\emptyset$.
At most one neighbor of $x$ can be an isolated loop for otherwise, we get 3 vertices spanning 4 edges. The same holds for $y$. As the height of any $K_{a,b}$ is at most $l$, we conclude that $\mathrm{dist}_K(x,y)\le 2l$. A moment's thought reveals that there must be a cycle of length at most $4l$ containing both $x$ and $y$. But this cycle rules out the possibility of a loop adjacent to $x$ or to $y$. Thus, in order to exclude $2$ short cycles in $K$ close to each other, it must be the case that $\mathrm{dist}(x,y)\le l-1$ and $d_K(x)=d_K(y)=3$. Moreover, let $x_1,x_2,x_3$ and $y_1,y_2,y_3$ be the neighbors of $x$ and $y$ such that $G_{x,x_i}\cong G_{y,y_i}$; then (up to a relabeling of indices), we have the following paths between $x$ and $y$: either $(x,x_1,\dots,y_1,y)$ and $(x,x_2,\dots,y_3,y)$ or $(x,x_1,\dots,y_3,y)$ and $(x,x_2,\dots,y_1,y)$
Now, $K_{x,x_3}$ is not isomorphic to $K_{x,x_1}$ nor to $K_{x,x_2}$ by the vertex-disjointness. (Note that it is not excluded that $K_{x,x_1}\cong K_{x,x_2}$: they may intersect, for example, in $y$.)
But then $z=x$ is different from $z=y$ in the following respect: the (unique) short cycle of $K$ containing $z$ has its two edges entering $z$ from subgraphs isomorphic to $K_{x,x_1}$ and $K_{x,x_2}$ (while for $z=y$ the corresponding subgraphs are isomorphic to $K_{x,x_1}$ and $K_{x,x_3}$).
This can be used by Spoiler as follows. Spoiler selects $x_1,x_2$. If Duplicator replies with $y_3$, then Spoiler can use Claims~2 and~3 because $K_{y,y_3}$ is not-isomorphic to $K_{x,x_1}$ nor to $K_{x,x_2}$. Otherwise, the edge $\{x,x_2\}$ is on a short cycle while $\{y,y_2\}$ is not. Spoiler uses Lemma~\ref{lm:cycle}.\cqed
By Lemma~\ref{lm:DCore} we can find $\Phi_K(x)$, a formula of rank at most $\log_2d+O(1)$ which, with respect to $G$, evaluates to $1$ for all $x\in V(K)$ and to $0$ otherwise. More precisely, Lemma~\ref{lm:DCore} gives a formula $\Phi_C(x)$ testing for $x\in V(C)$. But $V(K)\subset V(C)$ are precisely the vertices of degree at least $3$ in $C$.
\comment{So we can take
$$
\Phi_K(x)= \Phi_C(x) \wedge \exists_{x_1,x_2,x_1} \left(
\Phi_C(x_1)\wedge \Phi_C(x_2)\wedge \Phi_C(x_3)\wedge x\sim
x_1\wedge x\sim x_2\wedge x\sim x_3\wedge_{i\not= j} x_i\not=x_j\right).
$$
}
Now, as it is easy to see, for any $x\in K$ the formula
\beq{Phi}
\Phi_x(v):= \Phi_K(v) \wedge \bigwedge_{y\in V(K)\setminus \{x\}} \Phi_{x,y}(v)
\end{equation}
identifies uniquely $x$ and has rank at most $\log_2d+b_0+ O(1)$.
Take $x\in V(K)$. If there is no $x'\in V(K')$ such that $G'\models \Phi_{x}(x')$, then Spoiler selects $x$. Whatever Duplicator's reply $x'$ is, it evaluates differently from $x$ on $\Phi_{x}$. Spoiler can now win in at most $D(\Phi_{x})$ moves, as required. If there are two distinct $y',z'\in K'$ such that $G'\models \Phi_{x}(y')$ and $G'\models \Phi_{x}(z')$, then Spoiler selects both $y'$ and $z'$. At least one of Duplicator's replies is not equal to $x$, say, $y\not=x$. Again, the selected vertices $y\in V(K)$ and $y'\in V(K')$ are distinguished by $\Phi_x$, so Spoiler can win in at most extra $D(\Phi_x)$ moves.
Therefore, let us assume that for every $x\in V(K)$ there is the unique vertex $x'=\phi(x)\in V(K')$ such that $G'\models \Phi_x(x')$. Clearly, $\phi$ is injective. Furthermore, $\phi$ is surjective for if $x'\not\in \phi(V(K))$, then Spoiler wins by selecting $x'\in V(K')$ and then using $\Phi_x$, where $x\in V(K)$ is Duplicator's reply. Moreover, we can assume that Duplicator always respects~$\phi$ for otherwise Spoiler wins in at most $\log_2d+b_0+O(1)$ extra moves.
As $K\not\cong K'$, Spoiler can select $x,y\in V(K)$ such that the multisets of colored paths (or loops if $x=y$) between $x$ and $y$ and between $x'=\phi(x)$ and $y'=\phi(y)$ are distinct. Again, this means that some colored path has different multiplicities and Spoiler can highlight this in at most $u+1$ moves. Then in at most $\log_2l+O(1)$ moves he can ensure that some vertices $z\in V(K)$ and $z'\in V(K')$ are selected such that the removed trees $T_z$ and $T_{z'}$ rooted at $z$ and $z'$ are not isomorphic, compare with Lemma~\ref{lm:path}.
Now, by the definition of $t$, at most $t$ moves are enough to distinguish $T_z$ from $T_{z'}'$ (plus possible $\log_2 d +O(1)$ moves to catch Duplicator if she replies outside $V(T_z)\cup V(T_{z'})$).
This completes the proof of Lemma~\ref{lm:a}.\qed
\subsubsection{Probabilistic Part}
Here we estimate the parameters from the previous section. As before, let $G$ be the giant component of $\C G(n,\frac cn)$, let $C$ be its core, etc.
It is well-known that whp $u=O(\frac{\ln n}{\ln\ln n})$ and $d=O(\ln n)$. \comment{Reference???}
\blm{Shaved} Whp every edge of $K$ corresponds to at most $O(\ln n)$ vertices of $G$. Similarly, for any $x\in V(C)$ we have $v(T_x)=O(\ln n)$.\end{lemma}
\bpf
The expected number of $K$-edges, each corresponding to precisely $i=O(\ln n)$ vertices in $G$ is at most
$$
\binom{n}{i}\binom{i}{2} p^{i-1} i^{i-2} (1-p)^{(i-2)(n-i)} \le n i^2\left(\frac{{\mathrm e} c}{{\mathrm e}^c}\right)^{i}.
$$
But ${\mathrm e} c< {\mathrm e}^c$ for $c>1$, so if $i$ is large enough, $i>M\ln n$, then the expectation is $o(n^{-3})$.
Similarly, the expected number of vertices $x$ with $v(T_x)=i=O(\ln n)$ is at most $$n\binom{n-1}{i-1}p^{i-1}i^{i-2}(1-p)^{(i-1)(n-i)}\leq 2n i\left(\frac{{\mathrm e} c}{{\mathrm e}^c}\right)^{i}.$$ \qed
In particular, our results from Section~\ref{general} imply that whp $t=O(\frac{\ln n}{\ln \ln n})$.
Let, for example, $l=2\ln \ln n$. Thus $k/\ln n\to\infty$, where $k=2^{l-2}$. It remains to prove that this choice of $l$ satisfies all the assumptions.
\blm{ShortCycle} Whp any set of $s\le 6l$ vertices of $K$ spans at most $s$ edges.\end{lemma}
\bpf A moment's thought reveals that it is enough to consider sets spanning connected subgraphs only.
Let $L=M\ln n$ be given by Lemma~\ref{lm:Shaved}. The probability that there is a set $S$ such that $|S|=s\leq 6l$ and $K[S]$ is a connected graph with at least $s+1$ edges is at most
\begin{align*} &o(1)+\sum_{s=4}^{6l}\binom{n}{s}\, s^{s-2}\, {s\choose 2}^2\sum_{0\leq \ell_1,\ldots,\ell_{s+1}\leq L} \prod_{i=1}^{s+1}\binom{n}{\ell_i}(\ell_i+2)^{\ell_i}p^{\ell_i+1}(1-p)^{\ell_i(n-\ell_i-2)}\\
&\leq o(1)+\sum_{s=4}^{6l}\bfrac{n{\mathrm e}}{s}^s s^{s+2}\sum_{0\leq \ell_1,\ldots,\ell_{s+1}\leq L} \prod_{i=1}^{s+1}\left(\frac{c{\mathrm e}^2}{n}\left(\frac{{\mathrm e}
c}{{\mathrm e}^c}\right)^{\ell_i}\right)\ \le\ o(1)+ \sum_{s=4}^{6l}\frac{(O(1))^s}{n}\ =\ o(1). \end{align*}
The lemma is proved.\qed
\blm{Kab} Whp $K$ does not contain four vertices $x,x',y,y'$ such that $xx',yy'\in E(K)$, $v(K_{x,x'})\ge k$, $A_{x,x'}\cap A_{y,y'}=\emptyset$, and $G_{x,x'}\cong G_{y,y'}$.\end{lemma}
\bpf Given $c$, choose the following constants in this order: small $\epsilon_1>0$, large $M_1$, large $M_2$, small $\epsilon_2>0$, and large $M_3$.
Consider breadth-first search in $G-x$ starting with $x'$. Let $L_1=\{x'\}$, $L_2$, $L_3$, etc., be the levels. Let $T_i=\{x\}\cup
(\cup_{j=1}^i L_i)$. Let $s$ be the smallest index such that $|T_s|\ge M_2\ln n$.
Chernoff's bound implies that the probability of $|T_s|> 2cM_2 \ln n$ is $o(n^{-2})$. Indeed, this is at most the probability that the binomial random variable with parameters $(n, \frac cn \times M_2\ln n)$ exceeds $2cM_2\ln n$.
Similarly, with probability $1-o(n^{-3})$ we have $|L_{i+1}|=(c\pm
\epsilon_2)|L_i|$ provided $i\ge s$ and $|T_i|=o(n)$. Hence, we see that from the first time we reach $2M_2\ln n$ vertices, the levels increase proportionally with the coefficient close to $c$ for further $\Theta(\ln n)$ steps.
Take some $i$ with $|T_i|=O(\ln n)$. The sizes of the first $\Theta(\ln n)$ levels of the breadth-first search from the vertices of $L_i$ can be bounded from below by independent branching processes with the number of children having the Poisson distribution with mean $c-\epsilon_2$. Indeed, for every active vertex $v$ choose a pool $P$ of $\ceil{(1-\frac{\epsilon_2}c)n}$ available vertices and let $v$ choose its neighbors from $P$, each with probability $c/n$. (The edges between $v$ and $\O P$ are ignored.) If $v$ claimed $r$ neighbors, then, when we take the next active vertex $u$, we add extra $r$ vertices to the pool, so that its size remains constant.
With positive probability $p_1$ the ideal branching process survives infinitely long; in fact, $p_1$ is the positive root of $1-p_1={\mathrm e}^{-cp_1}$. Let
$$
p_2=\max_{j\ge 0} \frac{c^j{\mathrm e}^{-c}}{j!} <1.
$$
The numbers $p_1>0$ and $p_2<1$ are constants (depending on $c$ only).
Take the smallest $i$ such that $|T_i|\ge 2cM_3\ln n$. The breadth-first search inside $G$ goes on for at least $M_1$ further rounds (after the $i$-th round) before we reach a vertex outside
$G_{x,x'}$. We know that $|L_i|\ge (\frac{c-1}c-\epsilon_1)\,|T_i|$ because the levels grow proportionally from the $s$-th level. Let $Z$ consist of the vertices of $L_i$ for which the search process in $G-x$ goes on for at least $M_1$ further levels before dying out. By Chernoff's bound, with probability $1-o(n^{-2})$ we have $|Z|\ge \frac{p_1}2
|L_i|$.
Let us fix any $K_{x,x'}$ having all the above properties and compute the expected number of copies of $K_{x,x'}$ in $G$. More precisely, we compute the expected number of subgraphs of $G$ isomorphic to
$G[T_{i}]$ such that a specified $|Z|$-subset of the last level has specified trees, each of height at least $M_1$, sitting on it. The expected number of $G[T_i]$-subgraphs is at most
$n^{|T_i|}\,p_1^{|T_i|-1}$. This has to be multiplied by
$$
(p_2+o(1))^{M_1|Z|} \le p_2^{M_1(c-1)p_1\,|T_i|/4c}:
$$
because if we want to get a given height-$M_1$ tree, then at least $t$ times we have to match the sum of degrees of a level, each coincidence having probability at most $p_2+o(1)$. As the constant $M_1$ can be arbitrarily large, we can make the total expectation $o(n^{-2})$.
Markov's inequality implies the lemma.\qed
Finally, putting all together we deduce the upper bound of Theorem~\ref{th:giant}.
\subsection{Lower Bound}
Let $l=(1-\epsilon) \frac{\ln n}{\ln \ln n}$ for some $\epsilon>0$. We claim that whp the core $C$ has a vertex $i$ adjacent to at least $l$ leaves of $G$. (Then we have $D(C)\ge l+1$: consider the graph obtained from $C$ by adding an extra leaf to $i$.)
Let us first prove this claim for the whole random graph $H\in \C G(n,c/n)$ (rather than for the giant component $G\subset H$). For $i\in [n]$ let $X_i$ be the event that the vertex $i$ is incident to at least $l$ leaves. It is easy to estimate the expectation of $X=\sum_{i=1}^n X_i$:
\begin{eqnarray*}
E(X) &=& n \binom{n-1}{ l} p^l (1-p)^{\binom{l}{ 2} + l(n-l)} +O(1)\times
n\binom{n}{ l+1} p^{l+1}(1-p)^{(l+1)n}\\
&=& (1+o(1)) \frac{nc^l{\mathrm e}^{-cl}}{l!}\ \to\ \infty.
\end{eqnarray*}
Also, for $i\not=j$,
\begin{eqnarray*}
E(X_i\wedge X_j) &=&(1+o(1))\,\binom{n-2}{ l} \binom{n-l-2}{ l}p^{2l}
(1-p)^{\binom{2l}{ 2} +2l(n-2l-1)}\\
&=& (1+o(1))\, E(X_i)E(X_j).
\end{eqnarray*}
The second moment method gives that $X$ is concentrated around its mean.
Now, let us reveal the vertex set $A$ of the $2$-core of the whole graph $H$. When we expose the stars demonstrating $X_i=1$ one by one, then for each $i$ the probability of $i\in A$ is $\frac{|A|}n+o(1)$. The sharper results of {\L}uczak~\cite{luczak:91}
\comment{Or Pittel~\cite{pittel:90}?}
imply that whp the core $C$ of the giant component has size $\Theta(n)$. Hence, whp at least one vertex $i$ with $X_i=1$ belongs to the $V(C)$, giving the required.
\section{Random Trees}\label{random}
We consider the probabilistic model $\C T(n)$, where a tree $T$ on the vertex set $[n]$ is selected uniformly at random among all $n^{n-2}$ trees. In this section we prove that whp $D(T)$ is close to the maximum degree of $T$.
\bth{RandomTree} Let $T\in\C T(n)$. Whp $D(T)=(1+o(1))\Delta(T)=(1+o(1))\frac{\ln n}{\ln\ln n}$. \end{theorem}
\newcommand{{\textrm{Var}}}{{\textrm{Var}}} \newcommand{{\textrm{Ch}}}{{\textrm{Ch}}} \newcommand{{\textrm{del}}}{{\textrm{del}}}
Let ${\cal F}(n,k)$ be a forest chosen uniformly at random from the family of ${\cal F}_{n,k}$ of all forests with the vertex set $[n]$, which consist of $k$ trees rooted at vertices
$1,2,\dots,k$. Note that a random tree $T\in {\cal T}(n)$ can be identified with ${\cal F}(n,1)$. We recall that $|{\cal F}_{n,k}|=kn^{n-k-1}$, see e.g.\ Stanley~\cite[Theorem~5.3.2]{stanley:ec}. We start with the following simple facts on ${\cal F}(n,k)$.
\blm{forest} Let $k=k(n)\le \ln^4 n$. \begin{enumerate} \renewcommand{(\roman{enumi})}{(\roman{enumi})} \item The expected number of vertices in all trees of ${\cal F}(n,k)$, except for the largest one, is $O(k\sqrt n)$. \item The probability that ${\cal F}(n,k)$ contains precisely $\ell$, $\ell=0,\dots,k-1$, isolated vertices is given by $(1+O({k^2}/{n})) \binom{k-1}\ell {\mathrm e}^{-\ell}(1-{\mathrm e}^{-1})^{k-\ell-1}$. \item The probability that the roots of ${\cal F}(n,k)$ have more than $k(1+1/\ln n)+2\ln^2 n$ neighbors combined is $o(n^{-3})$.
\item The probability that $\ell$ given roots of ${\cal F}(n,k)$ have degree at least $s\ge 4$ each is bounded from above by $(2/(s-1)!)^\ell$
\end{enumerate} \end{lemma}
\bpf If $i\le n/2+1$, then the probability that a tree rooted at a vertex $j=1,2,\dots,k$ in the forest ${\cal F}(n,k)$ has precisely $i$ vertices is given by
$$\binom {n-k}{i-1} i^{i-2} \frac{(k-1)(n-i)^{n-i-k}}{k n^{n-k-1}}=O(i^{-3/2})\,.$$
Consequently, the expectation of the sum of the orders of all components of ${\cal F}(n,k)$ with at most $n/2+1$ vertices is $O(k \sqrt n)$.
In order to see (ii) note that from the generalized inclusion-exclusion principle the stated probability equals
\begin{equation}\label{eqf1} \begin{aligned} \sum_{i=\ell}^k&\binom i\ell(-1)^{i-\ell}\binom ki\frac{(k-i)(n-i)^{n-k-1}}{kn^{n-k-1}}\\ =&\Big(1+O\Big(\frac{k^2}{n}\Big)\Big) \sum_{i=\ell}^k\frac{(k-1)!}{\ell!(i-\ell)!(k-1-i)!}(-1)^{i-\ell}{\mathrm e}^{-i}\\ =&\Big(1+O\Big(\frac{k^2}{n}\Big)\Big) \binom{k-1}\ell {\mathrm e}^{-\ell}(1-{\mathrm e}^{-1})^{k-\ell-1}\,.
\end{aligned} \end{equation}
For the probability that precisely $m$ ($\ge\! k$) vertices of ${\cal F}(n,k)$ are adjacent to the roots, Stirling's formula gives \begin{equation}\label{f1}
\binom{n-k}{m}k^m\frac{m\,(n-k)^{n-k-m-1}}{k\,n^{n-k-1}} \le \Big(1+O\Big(\frac{k^2}n\Big)\Big)\Big(\frac{{\mathrm e}^ {1-k/m}k}{m}\Big)^{m}.
\end{equation} For every $x$, $0<x<1$, we have $x{\mathrm e}^{1-x}\le {\mathrm e}^{-(1-x)^2/2}$, so the above formula is bounded from above by $\exp(-\frac{(m-k)^2}{2m})$. Since $$\sum_{m\ge k(1+1/\ln n)+2\ln ^2n}\exp\Big(-\frac{(m-k)^2}{2m}\Big)=o(n^{-3})\,,$$ the assertion follows.
For $k=1$ the probability that a given root has degree at least $s$ is bounded from above by
$$\sum_{t\ge s}\binom{n-1}{t}\frac{t(n-1)^{n-t-2}}{n^{n-2}}\le \sum_{t\ge s}\frac{1}{(t-1)!}\le \frac{2}{(s-1)!}\;.$$
If we fix some $\ell\ge 2$ roots, then if we condition on the vertex sets of the $\ell$ corresponding components, the obtained trees are independent and uniformly distributed, implying the required bound by the above calculation. \qed
Using the above result one can estimate the number of vertices of $T\in {\cal T}(n)$ with a prescribed number of pendant neighbors.
\blm{vert} Let $X_{\ell,m}$ denote the number of vertices in $T\in {\cal T}(n)$ with precisely $\ell$ neighbors of degree one and $m$ neighbors of degree larger than one. Let
$$
A\subseteq\{(\ell,m)\colon\; 0\le \ell\le \ln n, \quad 1\le m\le \ln n \}\,,$$
be a set of pairs of natural numbers and $X_A=\sum_{(\ell,m)\in A} X_{\ell,m}$. Then, the expectation
\begin{equation}\label{eqf2}
E(X_A)=(1+o(1))\,n\sum_{(\ell,m)\in A} \frac{{\mathrm e}^{-\ell-1}}{\ell!}\frac{(1-{\mathrm e}^{-1})^{m-1}}{(m-1)!} \end{equation}
and $E(X_A(X_A-1))=(1+o(1))\,(E(X_A))^2$. \end{lemma}
\bpf Using Lemma~\ref{lm:forest}(ii) we get \begin{equation*}
E(X_A)=(1+o(1))n\sum_{(\ell,m)\in A}\binom{n-1}{m+\ell}\binom{m+\ell-1}\ell {\mathrm e}^{-\ell} (1-{\mathrm e}^{-1})^{m-1}\frac{(m+\ell)(n-1)^{n-m-\ell-2}}{n^{n-2}} \end{equation*}
which gives (\ref{eqf2}). In order to count the expected number of pairs of vertices with prescribed neighborhoods one needs first to choose
$\ell+m$ neighbors of a vertex and then compute the expectation of the number of vertices of a given neighborhood in the random forest ${\cal F}(n,\ell+m)$ obtained in this way. However, the largest tree of ${\cal F}(n,\ell+m)$ has the expectation $n-O(\sqrt n \ln n)$ (Lemma~\ref{lm:forest}); one can easily observe that this fact implies that the expected number of vertices with a prescribed neighborhood in ${\cal F}(n,\ell+m)$
is $(1+o(1))\,E(X_A)$, and so $E(X_A(X_A-1))=(1+o(1))\,(E(X_A))^2$. \qed
As an easy corollary of the above result we get a lower bound for $D({\cal T}(n))$.
\bth{lower} Let $T\in\C T(n)$. Whp $D(T)\ge (1-o(1))\Delta(T)=(1-o(1))\, \frac{\ln n}{\ln \ln n}$. \end{theorem}
\bpf Since whp the maximum degree is $(1-o(1)){\ln n}/{\ln\ln n}$, in order to prove the assertion it is enough to show that whp $T$ contains a vertex $v$ with
\begin{equation}\label{eqf3} \ell_0=(1-o(1))\, \frac{\ln n}{\ln \ln n} \end{equation} neighbors of degree one; indeed, to characterize such a structure Spoiler needs at least $\ell_0+1$ moves. Using Lemma~\ref{lm:vert}, we infer that the for the number of vertices $X_{\ell}$ of $T$ with exactly $\ell$ neighbors of degree $1$ we have $E(X_\ell)=O({\mathrm e}^{-\ell}n/\ell!)$. Thus, one can choose $\ell_0$ so that (\ref{eqf3}) holds and $E(X_{\ell_0})\to\infty$. Then, due to Lemma~\ref{lm:vert}, ${\textrm{Var}}(X_{\ell_0})=o((E(X_{\ell_0}))^2)$, and Chebyshev's inequality implies that whp $X_{\ell_0}>0$.\qed
Let us state another simple consequence of Lemma~\ref{lm:forest} which will be used in our proof of Theorem~\ref{th:RandomTree}. Here and below $N_r(v)$ denotes the $r$-neighborhood of $v$, i.e., the set of all vertices of a graph which are at the distance $r$ from $v$, and $N_{\le r}(v)=\bigcup_{i=0}^r N_i(r)$.
\blm{largedegrees} Let $r_0=r_0(n)= \lceil 7 \ln n\rceil $. Then, whp the following holds for every vertex $v$ of $T\in {\cal T}(n)$: \begin{enumerate} \renewcommand{(\roman{enumi})}{(\roman{enumi})}
\item $|N_{\le r_0}(v)|\le 10^8 \ln^4n\;,$ \item $N_{\le r_0}(v)$ contains fewer than $\ln n/(\ln\ln n)^2$ vertices of degree larger than $(\ln\ln n)^5$. \end{enumerate} \end{lemma}
\bpf For $s\le r_0$ let $W_s=\cup_{i=0}^s N_i(v)$. Note that, conditioned on the structure of the subtree of $T$ induced by $W_s$ for some $s\le r_0$, the forest $T- W_{s-1}$
can be identified with the random forest on $n-|W_{s-1}|$ vertices, rooted at the set $W_s$. Thus, it follows from Lemma~\ref{lm:forest}(iii) that
once for some $i$ we have $|N_i(v)|\ge 4 \ln ^3 n$
then $|N_{i+1}(v)|\le |N_i(v)|(1+2/\ln n)$,
so that
$$|N_{\le r_0}(v)|\le 4 r_0\ln ^3n (1+2/\ln n)^{r_0}\le 10^8 \ln^4n\;.$$
In order to show (ii) note that (i) and Lemma~\ref{lm:forest}(iv) imply that the probability that, for some vertex $v$, at least $\ell=\lfloor \ln n/(\ln\ln n)^2\rfloor$ vertices of $N_{\le r_0}(v)$
have degree larger than $m=(\ln\ln n)^5$ is bounded from above by $$n\binom {\ln^5 n}{\ell}\left(\frac{2}{(m-1)!}\right)^\ell \le n\left(\frac{2{\mathrm e} \ln^5n}{\ell(m-1)!}\right)^\ell\le n{\mathrm e}^{-m\ell}=o(1).$$
\comment{
Here is a small hole: we know that the probability of having at least $>m$ neighbors is at most $2/m!$ but why is the probability that $l$ given vertices each have degree $>m$ is at most $(2/(m-1)!)^l$?
Proof: we expose levels one by one. Once we have exposed a level, we allow an adversary to choose any number of active vertices, provided he does not choose more than $\ell$ vertices in total. Then adversary succeeds (all his points have high degree) with probability at most $(2/(m-1)!)^\ell$.
}
\qed
In our further argument we need some more definitions. Let $T$ be a tree and let $v$ be a vertex of $T$. For a vertex $w\in N_r(v)$ let $P_{vw}$ denote the unique path connecting $v$ to $w$ (of length $r$). Let the \emph{check} ${\textrm{Ch}}(v;P_{vw})$ be the binary sequence $b_0\cdots b_r$, in which, for $i=0,\dots, r$, $b_{i}$ is zero (resp.\ 1) if the $i$-th vertex of $P_{vw}$ is adjacent (resp.\ not adjacent) to a vertex of degree one. Finally, the \emph{$r$-checkbook} ${\textrm{Ch}}_r(v)$ is the set
$$
{\textrm{Ch}}_r(v)=\{{\textrm{Ch}}(v;P_{vw})\colon w\in N_r\textrm{\ and }P_{vw} \textrm{ is a path of length $r$}\}. $$
Note that a checkbook is not a multiset, i.e., a check from ${\textrm{Ch}}_r(v)$ may correspond to more than one paths $P_{vw}$.
Our proof of the upper bound for $D({\cal T}(n))$ is based on the following fact.
\bth{checks} Let $r_0=\lceil 7 \ln n\rceil$. Whp for each pair $P_{vw}$, $P_{v'w'}$ of paths of length $r_0$ in $T\in {\cal T}(n)$ which share at most one vertex, the checks ${\textrm{Ch}}(v;P_{vw})$ and ${\textrm{Ch}}(v;P_{v'w'})$ are different.
\end{theorem}
\bpf Let $C={\textrm{del}}(T)$ denote the tree obtained from $T$ by removing all vertices of degree one. From Lemma~\ref{lm:vert} it follows that whp the tree $C$ has $(1-{\mathrm e}^{-1}-o(1))n$ vertices of which
$$
(1+o(1))\,n \sum_{\ell>0} \frac{{\mathrm e}^{-\ell-1}}{\ell!} = (\exp({\mathrm e}^{-1}-1)-{\mathrm e}^{-1}+o(1))\,n$$
vertices have degree one and
$$
\alpha n = (1-\exp({\mathrm e}^{-1}-1) +o(1))\, n.
$$
vertices have degree greater than one.
Moreover, among the set $B$ of $({\mathrm e}^{-1}+o(1))n$ vertices removed from $T$,
$$
(1+o(1))n\sum_{l=0}^\infty \ell\frac{{\mathrm e}^{-\ell-1}}{\ell!}=(1+o(1))\exp({\mathrm e}^{-1}-2)n\,$$
were adjacent to vertices which became pendant in $C$. Let $B'$ denote the set of the remaining
$$
({\mathrm e}^{-1}-\exp({\mathrm e}^{-1}-2)+o(1))n=(\rho_0+o(1))n
$$ vertices which are adjacent to vertices of degree at least two in $C$. Note that, given $C={\textrm{del}}(T)$, each attachment of vertices from $B\setminus B'$ to pendant vertices of $C$ such that each pendant vertex of $C$ get at least one vertex from $B\setminus B'$, as well as each attachment of vertices from $B'$ to vertices of degree at least two from $C$ is equally likely.
Let $P_{vw}$, $P_{v'w'}$, be two paths of length $r_0$ in $T$ which share at most one vertex. Clearly, each vertex of $P_{vw}$, except, maybe, at most two vertices at each of the ends, belong to $C$ and have in it at least two neighbors; the same is true for $P_{v'w'}$. Since $(\rho_0+o(1))n$ vertices from $B'$ are attached to the $\alpha n$ vertices of degree at least two in $C$ at random, the probability that one such vertex gets no attachment is
$$
p_0=(1+o(1))\, \left(1-\frac1{\alpha n}\right)^{\rho_0 n}= (1+o(1))\, {\mathrm e}^{-\rho_0/\alpha} = 0.692...+o(1).
$$
Therefore, the probability that the checks ${\textrm{Ch}}(v,P_{vw})$ and ${\textrm{Ch}}(w,P_{v'w'})$ are identical is bounded from above by
$$
\left(p_0^2+(1-p_0)^2 +o(1)\right)^{r_0}\le {\mathrm e}^{-3\ln n}=o(n^{-2})\,.
$$
Since by Lemma~\ref{lm:largedegrees}(i) whp $T$ contains at most $O(n\ln^4 n)$ checks of length $r_0$, the assertion follows.
\qed
Now, let $r_0=\lceil 7 \ln n\rceil$. We call a tree $T$ on $n$ vertices \emph{typical} if: \begin{itemize} \item for each pair of paths $P_{vw}$, $P_{v'w'}$ of length $r_0$ which share at most one vertex, the checks ${\textrm{Ch}}(v;P_{vw})$, ${\textrm{Ch}}(v;P_{v'w'})$ are different, \item for the maximum degree $\Delta$ of $T$ we have
$$\frac{\ln n}{2\ln\ln n}\le \Delta\le \frac{2\ln n}{\ln\ln n} \,,$$
\item $|N_{\le r_0}|\le 10^8\ln ^4 n$, for every vertex $v$, \item for every vertex $v$ at most $\ln n/(\ln\ln n)^2$ vertices of degree larger than $(\ln\ln n)^5$ lie within distance $r_0$ from $v$. \end{itemize}
\bth{upper} For a typical tree $T\in\C T(n)$ we have $D(T)\le (1+o(1))\, \Delta$. \end{theorem}
\bpf Let $T$ be a typical tree and $T'$ be any other graph which is not isomorphic to $T$. We shall show that then Spoiler can win the Ehrenfeucht game on $T$ and $T'$ in $(1+o(1))\Delta$ moves.
Let us call a vertex $v$ of a graph a \emph{yuppie}, if there are two paths $P_{vw}$, $P_{vw'}$ of length $r_0$ starting at $v$ so that $V(P_{vw})\cap V(P_{vw'})=\{v\}$. Note that the set of all yuppies $Y$ spans a subtree in $T$, call it $K$.
Our approach is similar to that for the giant component from Section~\ref{giant}.
Let us view $K$ as a colored graph where the color of a vertex $x$ is the isomorphism type of the component of $T-(Y\setminus\{x\})$ rooted at $x$. Let $Y'$ be the set of yuppies of $T'$, and let $K'=T'[Y']$. We can assume that Duplicator preserves the subgraphs $K$ and $K'$, for otherwise Spoiler wins in extra $O(\ln \ln n)$ moves.
\claim1 Any distinct $v,v'\in K$ can be distinguished (with respect to $G$) in $O(\ln\ln n)$ moves.
\bpf[Proof of Claim.] Assume that the $r_0$-checkbooks of $v,v'$ are the same for otherwise Spoiler wins in $\log_2(r_0)+O(1)$ moves. (Please note that the checkbooks are viewed as sets, not as multisets, so the number of moves does not depend on the degrees of $v$ and $v'$.)
Take a path $P_{vx}$ of length $r_0$, which shares with $P_{vv'}$ only vertex $v$. Spoiler selects $x$. Let Duplicator reply with $x'$. Assume that ${\textrm{Ch}}(w,P_{vx})={\textrm{Ch}}(v,P_{v'x'})$. The path $P_{v'x'}$ must intersect $P_{vx}$; thus $v\in P_{v'x'}$. Next, Spoiler selects the $P_{vx}$-neighbor $y$ of $v$; Duplicator's reply must be $y'\in P_{v'x'}$.
Let $z\in T$ maximize $\mathrm{dist}(v,z)$ on the condition that ${\textrm{Ch}}(z)={\textrm{Ch}}(v)$ and $v$ lies between $y$ and $z$ in $T$. Define the analogous vertex $z'$, replacing $v,y$ in the definition by $v',y'$. We have $\mathrm{dist}(v,z)>\mathrm{dist}(v',z')$. Let Spoiler select $w=z$. If Duplicator's reply $w'$ satisfies ${\textrm{Ch}}(w')\not\cong {\textrm{Ch}}(w)$, then Spoiler quickly wins. Otherwise, $\mathrm{dist}(v,w)> \mathrm{dist}(v',w')$. Moreover, $\mathrm{dist}(v,w)\le 2r_0$ (because their $r_0$-checkbooks are non-empty and equal). Spoiler wins in $\log_2 r_0+O(1)$ extra moves. The claim has been proved.\cqed
Similarly to the argument surrounding~\req{Phi}, one can agrue that for every vertex $x\in K$ there is a formula $\Phi_x(v)$ of rank $O(\ln \ln n)$ identifying $x$ (with respect to $T$). Moreover, we can assume that this gives us an isomorphism $\phi:K\to K'$ which is respected by Duplicator.
As $T\not\cong T'$, there are two cases to consider.
\case1 There is $x\in K$ such that $T_x\not\cong T'_{x'}$, where $x'=\phi(x)$ and $T_{x'}'$ is the component of $T'-(Y' \setminus\{x'\})$ rooted at $x'$.
Since each vertex of $T$ is within distance at most $r_0$ from some yuppie, the tree $T_x$ has height at most $r_0$. If $T'_{x'}$ has a path of length greater than $2r_0$ or a cycle, then Spoiler easily wins, so assume that $T'$ is a tree. Now Spoiler should select all vertices of $T_x$ which are of degree larger than $(\ln\ln n)^5$, say $w_1,\dots,w_t$. Since $T$ is typical there are at most $\ln n/(\ln\ln n)^2$ such vertices in $T_v$. Suppose that, in responce to that, Duplicator chooses vertices $w'_1,\dots,w'_s$ in $T'_{x'}$. Then, $T_v\setminus \{w_1,\dots,w_s\}$ splits into a number of trees $F_1, \dots, F_u$, colored accordingly to their adjacencies to the $w_i$'s. Now, for some $i$ the multisets of colored trees adjacent to $w_i$ and $w_i'$ are different. Spoiler can highlight this by using at most $\Delta(T)+1$ moves. Now Spoiler plays inside some $F_i$ the strategy of Theorem~\ref{th:MaxDeg}. Note that $F_i$ has diameter at most $2r_0$ and maximum degree at most $(\ln\ln n)^5$.
\case2 $T'$ is not connected.
As $K'\cong K$ is connected, there is a component $C'$ of $T'$ without a yuppie. Spoiler chooses an $x'\in C'$. Now, any Duplicator's reply $x$ is within distance $r_0$ from a yuppie, which is not true for $x'$. Spoiler can win in $O(\ln \ln n)$ moves.
Consequently, for a typical tree $T$,
$$
D(T)\le \Delta(T)+\frac{\ln n}{(\ln\ln n)^2}+O((\ln\ln n)^6)\,,
$$ and the assertion follows. \qed
\noindent {\it Proof of Theorem~\ref{th:RandomTree}.} Theorem~\ref{th:RandomTree} is an immediate consequence of Theorems~\ref{th:lower} and~\ref{th:upper} and the fact that, due to Lemmas~\ref{lm:forest} and~\ref{lm:largedegrees}, whp a random tree $T\in {\cal T}(n)$ is typical. \qed
\section{Restricting Alternations}
If Spoiler can win the Ehrenfeucht game, alternating between the graphs $G$ and $G'$ at most $r$ times, then the corresponding sentence has the \emph{alternation number} at most $r$, that is, any chain of nested quantifiers has at most $r$ changes between $\exists$ and $\forall$. (To make this well-defined, we assume that no quantifier is within the range of a negation sign.) Let $D_r(G)$ be the smallest depth of a sentence which defines $G$ and has the alternation number at most $r$. It is not hard to see that $D_r(G)=\max\{D_r(G,G'): G'\not\cong G\}$, where $D_r(G,G')$ may be defined as the smallest $k$ such that Spoiler can win $\mbox{\sc Ehr}_k(G,G')$ with at most $r$ alternations. For small $r$, this is a considerable restriction on the structure of the corresponding formulas, so let us investigate the alternation number given by our strategies.
Let $D^{\mathrm{tree}}_r(n,l)$ be the maximum of $D_r(T)$ over all colored trees of order at most $n$ and maximum degree at most $l$.
Unfortunately, in Theorem~\ref{th:MaxDeg} we have hardly any control on the number of alternations. However, we can show that alternation number $0$ suffices if we are happy to increase the upper bound by a factor of $2$.
\begin{lemma}\label{lem:treetree} Let $T$ and $T'$ be colored trees. Suppose that $T\not\cong T'$, where $\cong$ stands for the isomorphism relation for colored trees, i.e., the underlying (uncolored) trees of $T$ and $T'$ may be isomorphic. Furthermore, assume that $v(T)\ge v(T')$ and denote $n=v(T)$. Assume also that $\Delta(T)\le l$ and let both $l$ and $\ln n/\ln l$ tend to the infinity. Then Spoiler can win the Ehrenfeucht game on $(T,T')$ in at most
\beq{D1}
(1+o(1)) \frac{l \ln n}{\ln l}.
\end{equation} moves playing all time in~$T$. \end{lemma}
\bpf In the first move Spoiler selects a median $x\in T$; let $x'$ be Duplicator's reply. If $d(x)>d(x')$, then Spoiler wins in extra $l$ moves, which is negligible when compared to~(\ref{eq:D1}). So, suppose that $d(x')\ge d(x)$.
Let $t=d(x)$ and $C_1,\dots,C_t$ be the (rooted) components of $T-x$ indexed so that $v(C_1)\ge v(C_2)\ge\ldots\ge v(C_t)$. Referring to the root of a component we mean the vertex of it which is adjacent to $x$. Spoiler starts selecting, one by one, the roots of $C_1,C_2,\ldots$. Duplicator is enforced to respond with roots of distinct components of $T'-x'$. Spoiler keeps doing so until the following situation occurs: he selects the root $y$ of a component $C=C'_i$ while Duplicator selects the root $y'$ of a component $C'$ such that $v(C)\ge v(C')$ and $C\not\cong C'$ (as rooted trees). Such a situation really must occur for some $i\le t$ due to the conditions that $v(T)\ge v(T')$, $d(x)\le d(x')$, and $T\not\cong T'$.
We claim that if Spoiler selects a vertex $z$ inside $C$, then Duplicator must reply with some $z'\in C'$ for otherwise Spoiler wins in at most $\log_2 n$ moves. Indeed, suppose $z'\not\in C'$. Spoiler selects $z_1$ which is a middle point of the $yz$-path. Whatever the reply $z_1'$ is, the $z'z_1'$-path or $z_1'y'$-path contains the vertex $x'$. Suppose it is the $z'z_1$-path. Then Spoiler halves the $zz_1$-path. In at most $\log_2n$ times he wins.
Thus making $i+1\le t+1\le l+1$ steps, we have reduced the game to two non-isomorphic (rooted) trees, $C$ and $C'$, with $v(C)\le \min(\frac1i,\frac12)\, v(T)$. In the game on $(C,C')$ Spoiler applies the same strategy recursively. Two ending conditions are possible: the root of $C$ has strictly larger degree than the root of $C'$ and Duplicator violates a color, the adjacency, or the equality relation. It is easy to argue, cf.\ the proof of Theorem~\ref{th:MaxDeg}, that the worst case for us is when we have $i=(1+o(1))\, l$ all the time, which gives the required bound~(\ref{eq:D1}).
\qed
\bth{DT0} Let both $l$ and $\ln n/\ln l$ tend to the infinity. Then
\beq{}
D^{\mathrm{tree}}_0(n,l)\le (1+o(1)) \frac{l \ln n}{\ln l}.
\end{equation}
\end{theorem}
\bpf
Let $T$ be a tree of order $n$ and maximum degree at most $l$ and let $G\not\cong T$. If $\Delta(T)\ne\Delta(G)$ then Spoiler wins the Ehrenfeucht game on $(T,G)$ in at most $l+2$ moves playing in the graph of the larger degree. We will therefore assume that $T$ and $G$ have the same maximum degree not exceeding~$l$.
\case1 $G$ contains a cycle of length no more than $n+1$.
Spoiler plays in $G$ proceeding as in the last paragraph of the proof of Lemma~\ref{lm:Tree}.
\case2 $G$ is connected and has no cycle of length up to $n+1$.
If $v(G)\le n$, then $G$ must be a tree. Lemma \ref{lem:treetree} applies. Let us assume $v(G)>n$. Let $A$ be a set of $n+1$ vertices spanning a connected subgraph in $G$. This subgraph must be a tree. Spoiler plays in $G$ staying all time within $A$. Lemma~\ref{lem:treetree} applies.
\case3 $G$ is disconnected and has no cycle of length up to $n+1$.
We can assume that every component $H$ of $G$ is a tree for otherwise Spoiler plays the game on $(T,H)$ staying in $H$, using the strategy described above.
Suppose first that $G$ has a tree component $H$ such that $H\not\cong T$ and $v(H)\ge n$. If $v(H)=n$, let $T'=H$. Otherwise let $T'$ be a subtree of $H$ on $n+1$ vertices. Spoiler plays the game on $(T,T')$ staying in $T'$ and applying the strategy of Lemma \ref{lem:treetree} (with $T$ and $T'$ interchanged and perhaps with $n+1$ in place of~$n$).
Suppose next that all components of $G$ are trees of order less than~$n$. In the first move Spoiler selects a median $x$ of $T$. Let Duplicator respond with a vertex $x'$ in a component $T'$ of $G$. If in the sequel Duplicator makes a move outside $T'$, then Spoiler wins by Lemma~\ref{lm:path}. As long as Duplicator stays in $T'$, Spoiler follows the strategy of Lemma \ref{lem:treetree}.
Finally, it remains to consider the case that $G$ has a component $T'$ isomorphic to~$T$. Spoiler plays in $G$. In the first move he selects a vertex $x'$ outside $T'$. Let $x$ denote Duplicator's response in $T$. Starting from the second move Spoiler plays the game on $(T,T')$ according to Lemma \ref{lem:treetree}, where $x$ is considered colored in a color absent in $T'$.
Our description of Spoiler's strategy is complete.\qed
It is not clear what the asymptotics of $D^{\mathrm{tree}}_0(n,l)$ is. We could not even rule out the possibility that $D^{\mathrm{tree}}_0(n,l)=(\frac12+o(1))\, \frac{l\ln n}{\ln l}$.
\comment{Also, it would be interesting to know $D^{\mathrm{tree}}_i$ for other small $i$, such as $i=1$ or $i=2$.}
The similar method shows that $D^{\mathrm{tree}}_0(n,l)=\Theta(\ln n)$ if $l\ge 2$ is constant and $D^{\mathrm{tree}}_0(n,l)=\Theta(l)$ if $\frac{\ln n}{\ln l}=O(1)$ but the exact asymptotics seems difficult to compute.
Using these results, one can show that the upper bounds in Theorems~\ref{th:RandomTree} and~\ref{th:giant} apply to $D_1(G)$, that is, there are strategies for Spoiler requiring at most one alternation. It is not clear whether 0 alternations is possible here. One of a few places that seem to require an alternation is establishing that $\phi$ is a bijection: Spoiler may be forced to start in one of the graphs, while later (for example, when showing that $T_x\not\cong T'_{x'}$) he may need to swap graphs.
\end{document}
\end{document}
|
arXiv
|
{
"id": "0506288.tex",
"language_detection_score": 0.775371789932251,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Approximating Turaev-Viro 3-manifold invariants \\is universal for quantum computation}
\author{Gorjan Alagic} \affiliation{Institute for Quantum Computing, University of Waterloo} \author{Stephen P. Jordan} \affiliation{Institute for Quantum Information, California Institute of Technology} \author{Robert K\"onig} \affiliation{Institute for Quantum Information, California Institute of Technology} \author{Ben W. Reichardt} \affiliation{Institute for Quantum Computing, University of Waterloo}
\date{\today}
\begin{abstract} The Turaev-Viro invariants are scalar topological invariants of compact, orientable $3$-manifolds. We give a quantum algorithm for additively approximating Turaev-Viro invariants of a manifold presented by a Heegaard splitting. The algorithm is motivated by the relationship between topological quantum computers and $(2+1)$-D topological quantum field theories. Its accuracy is shown to be nontrivial, as the same algorithm, after efficient classical preprocessing, can solve any problem efficiently decidable by a quantum computer. Thus approximating certain Turaev-Viro invariants of manifolds presented by Heegaard splittings is a universal problem for quantum computation. This establishes a novel relation between the task of distinguishing non-homeomorphic $3$-manifolds and the power of a general quantum computer. \end{abstract}
\pacs{03.67.-a, 05.30.Pr, 03.65.Vf}
\maketitle
The topological quantum computer is among the most striking examples of known relationships between topology and physics. In such a computer, quantum information is encoded in a quantum medium on a $2$-D surface, whose topology determines the ground space degeneracy. Surface deformations implement encoded operations. Topological quantum computers are universal, i.e., can implement arbitrary quantum circuits. It is natural to try to identify the topological origin of this computational power.
One answer is that the power stems from the underlying $(2+1)$-D topological quantum field theory (TQFT)~\cite{FreedmanKitaevWang00}. The TQFT assigns a Hilbert space ${\mathcal H}_\Sigma$ to a $2$-D surface $\Sigma$, and a unitary map $U(f): {\mathcal H}_\Sigma \rightarrow {\mathcal H}_{\Sigma'}$ to every diffeomorphism $f: \Sigma \rightarrow \Sigma'$, subject to a number of axioms~\cite{Walker91}. However, this answer is not fully satisfactory; the definition of a TQFT is involved, and uses mathematics that appears in similar form in the theory of quantum computation. A second answer, arising in~\cite{AharonovJonesLandau06Jones, AharonovArad06Jones, GarneroneMarzuoliRasetti06Jones, WocjanYard07Jones}, is that quantum computers' power comes from their ability to approximate the evaluation, at certain points, of the Jones polynomial of the plat closure of a braid.
Here we give an alternative topological description of the power of quantum computers, in terms of the Turaev-Viro $3$-manifold invariants. Observe that restricting TQFTs to closed manifolds results in scalar invariants. We show that approximating certain such invariants is equivalent to performing general quantum computations. That is, we give an efficient quantum algorithm for additively approximating Turaev-Viro invariants, and conversely we show that for any problem decidable in bounded-error, quantum polynomial time (BQP), there is an efficient classical reduction to the Turaev-Viro invariant approximation problem. The classical procedure outputs the description of a $3$-manifold whose certain Turaev-Viro invariant is either large or small depending on whether the original BQP algorithm outputs $1$ or $0$.
Turaev and Viro~\cite{TuraevViro92} defined a family of invariants for compact, orientable $3$-manifolds. The original definition parameterized the invariants by the quantum groups $\SU(2)_k$, for $k \in \mathbb{N}$, but it was extended by Barrett and Westbury~\cite{BarrettWestbury96invariants} to give an invariant for any spherical tensor category~${\mathcal C}$. Any compact $3$-manifold $M$ is homeomorphic to a finite collection of tetrahedra glued along their faces~\cite{Moise52}. Beginning with such a triangulation, assign a certain rank-six tensor $F$ to each tetrahedron and a certain gluing tensor $d$ to every edge. The invariant $\TV_{\mathcal C}(M)$ is the contraction of the tensor network, which can be written out as \begin{equation} \label{eq:triang_invar} \hspace{-0.5ex} \TV_{\mathcal C}(M) = {\mathcal D}^{-2 \abs V}\hspace{-0.2ex} \sum_{\textrm{labelings}} \prod_{\text{edges}} d_e \prod_{\text{tetrahedra}} \frac{F^{ijm}_{kln}}{\sqrt{d_m d_n}}
\enspace \end{equation} if~${\mathcal C}$ is multiplicity-free. Here, the sum is over edge labelings of the triangulation by particles from the category~${\mathcal C}$. The index $e$ on $d$ is the label of an edge, while the indices $i, \ldots, n$ are the labels of the six edges involved in a tetrahedron, ordered and oriented following certain rules. The fusion tensor $F$, the quantum dimensions~$d$ and the total quantum dimension ${\mathcal D}$ are parameters of~${\mathcal C}$. $\abs V$ is the number of vertices of the triangulation. The topological invariance of $\TV_{\mathcal C}(M)$ follows from the fact that any two triangulations of $M$ can be related by a finite sequence of local Pachner moves~\cite{Pachner91}, under which the above quantity is invariant. In this paper we consider multiplicity-free unitary modular tensor categories, which include the $\SU(2)_k$ case, but are not as general as spherical tensor categories.
To formulate a BQP-complete problem~\cite{WocjanZhang06BQPcomplete} of estimating the Turaev-Viro invariant, we require a presentation of $3$-manifolds known as a Heegaard splitting. Consider two genus-$g$ handlebodies (e.g., the solid torus for $g=1$). They can be glued together, to give a $3$-manifold, using a self-homeomorphism of the genus-$g$ surface. The set of orientation-preserving self-homeomorphisms modulo those isotopic to the identity form the mapping class group $\MCG(g)$ of the surface. It is an infinite group generated by the $3g-1$ Dehn twists illustrated in \figref{fig:Dehn}. A Heegaard splitting thus consists of a natural number $g$ and an element $x \in \MCG(g)$, defining a manifold $M(g, x)$. Every compact, orientable $3$-manifold can be obtained in this way, up to homeomorphism.
\begin{figure}
\caption{A Dehn twist is a $2 \pi$ rotation about a closed curve. The Dehn twists about the $3g-1$ curves shown above generate the full mapping class group of the genus-$g$ surface~\cite{Lickorish64generators}.}
\label{fig:Dehn}
\end{figure}
\begin{theorem} \label{t:TVproblem} For any fixed multiplicity-free unitary modular tensor category ${\mathcal C}$, there is a quantum algorithm that, given $\delta, \epsilon > 0$, $g \in \mathbb{N}$ and a length-$m$ word $x$ in the Dehn-twist generators of $\MCG(g)$ from \figref{fig:Dehn}, runs in time $\mathrm{poly}(g, m, \log 1/\delta, 1/\epsilon)$ and, except with probability at most $\delta$, outputs an approximation of $\TV_{\mathcal C}(M(g, x))$ to within~$\pm {\mathcal D}^{2(g-1)}\, \epsilon$.
Conversely, for ${\mathcal C}$ the category associated to
$\SU(2)_k$ or $\SO(3)_k$ for $k\geq 3$ such that $k+2$~is prime, it is BQP-hard to decide whether ${\mathcal D}^{2(1-g)} \, \TV_{\mathcal C}(M(g, x))$ is greater than $2/3$ or less than $1/3$. More precisely, given any quantum circuit $\Upsilon$ of $T$ two-qubit gates acting on $n$ qubits $\ket{0^n}$, with output either $0$ or $1$, one can classically find in polynomial time a word $x = x_1 \ldots x_m$ in the standard Dehn-twist generators of~$\MCG(g)$, with $g = n + 1$ and $m = \mathrm{poly}(T)$, such that \begin{equation} \big\lvert \Pr[\text{$\Upsilon$ outputs $1$}] - {\mathcal D}^{2(1-g)} \, \TV_{\mathcal C}(M(g, x)) \big\rvert < 1/6
\enspace . \end{equation} \end{theorem}
The additive approximation error is exponential in~$g$. Complexity-theoretic reasons make it unlikely that quantum computers can efficiently obtain a multiplicative or otherwise presentation-independent error~\cite{Kuperberg09}.
In fact, a similar statement to \thmref{t:TVproblem} also holds for approximating the Witten-Reshetikhin-Turaev (WRT) invariants~\cite{Witten89, ReshetikhinTuraev91}. For any $g$, a modular category~${\mathcal C}$ can be used to define a projective representation $\rho_{{\mathcal C}, g}: \MCG(g) \rightarrow \GL({\mathcal H}_{{\mathcal C}, g})$. This representation will be given below. The WRT invariant for a $3$-manifold~$M(g, x)$ is then given by a matrix element \begin{equation} \label{eq:wrtinvariant} \WRT_{\mathcal C}(M(g,x)) = {\mathcal D}^{g-1} \bra{v_{{\mathcal C}, g}} \rho_{{\mathcal C}, g}(x) \ket{v_{{\mathcal C}, g}}\ , \end{equation} where $\ket{v_{{\mathcal C}, g}} \in {\mathcal H}_{{\mathcal C}, g}$ is a certain unit-normalized vector. As the representation is projective,~$\WRT_{\mathcal C}$ is a 3-manifold invariant only up to a multiple of $e^{2\pi i c/24}$ where $c$ is called the central charge. (Eq.~\eqref{eq:wrtinvariant} is the Crane-Kohno-Kontsevich presentation~\cite{Crane91, Kohno92, Kontsevich88} of the WRT invariant, which is more commonly defined in terms of a Dehn surgery presentation of~$M$. Equivalence of these definitions for ${\mathcal C} = \SU(2)_k$ is shown in~\cite{Piunikhin93}; see also~\cite[Sec.~2.4]{Kohno02}.)
The fact that Eq.~\eqnref{eq:wrtinvariant} indeed gives an invariant can be established by studying the problem of when two Heegaard splittings~$(g, x)$ and $(g', x')$ describe homeomorphic manifolds. Since taking the connected sum of a manifold $M$ with the $3$-sphere~$S^3$ does not change the manifold, i.e., $M\# S^3\cong M$, the standard Heegaard splitting of~$S^3$ into two genus-one handlebodies allows defining a ``stabilization'' map $(g, x) \mapsto (g+1, \tilde x)$ such that $M(g, x) \cong M(g+1, \tilde x)$. A general theorem of Reidemeister~\cite{Reidemeister33} and Singer~\cite{Singer33} asserts that $M(g, x) \cong M(g', x')$ if and only if $(g,x)$ and $(g',x')$ are equivalent under stabilization and the following algebraic equivalence relation for the case of equal genus~\cite{Funar95} \begin{equation} (g, x) \equiv (g, x') \; \textrm{ if $x = yx'z$ with $y, z \in \MCG^+(g)$}
\enspace . \end{equation} Here $\MCG^+(g) \subset \MCG(g)$ is the subgroup of self-homeomorphisms (classes) of the genus-$g$ surface that extend to the genus-$g$ handlebody. Invariance of $\WRT_{\mathcal C}(M(g,x))$ now follows essentially from the fact that $\ket{v_{{\mathcal C}, g}}$ is invariant under the action of~$\MCG^+(g)$.
The Turaev-Viro and WRT invariants are related by \begin{equation} \label{eq:tvwrtinvariant} \TV_{\mathcal C}(M) = \abs{\WRT_{\mathcal C}(M)}^2 \end{equation} as shown by Turaev~\cite{Turaev91} and Walker~\cite{Walker91} (see also~\cite{Turaev94book, Roberts95}). In~\cite{KoenigKuperbergReichardt10TVcode}, Eq.~\eqnref{eq:tvwrtinvariant} is discussed in the category-theoretic formalism used here. Identities~\eqnref{eq:wrtinvariant} and~\eqnref{eq:tvwrtinvariant}, together with density and locality properties of the representations~$\rho_{{\mathcal C}, g}$, are the basis of our BQP-completeness proof.
Previously, a quantum algorithm for approximating the $\SU(2)_k$ Turaev-Viro and WRT invariants was given by Garnerone \emph{et al.}~\cite{GarneroneMarzuoliRasetti07}, assuming the manifold is specified by Dehn surgery rather than a Heegaard splitting. BQP-hardness of the approximation was left as an open problem. In unpublished work, Bravyi and Kitaev have proven the BQP-completeness of the problem of approximating the $\SU(2)_4$ WRT invariant of $3$-manifolds with boundary~\cite{BravyiKitaev00}, where the manifold is specified using Morse functions. We remark that one can use Arad and Landau's quantum algorithm for approximating tensor network contractions to compute the Turaev-Viro invariant of a triangulated manifold~\cite{AradLandau08tensor}. While this algorithm would run polynomially in the number of tetrahedra, its precision depends on the order in which tensors are contracted and may be trivial.
We will only briefly describe the space ${\mathcal H}_{{\mathcal C}, g}$, the representation $\rho_{{\mathcal C}, g} : \MCG(g) \rightarrow \GL({\mathcal H}_{{\mathcal C}, g})$ and the state $\ket{v_{{\mathcal C}, g}} \in {\mathcal H}_{{\mathcal C}, g}$ from Eq.~\eqnref{eq:wrtinvariant}. Details are in~\cite{Crane91, Kohno92, Kontsevich88, KoenigKuperbergReichardt10TVcode}.
Let ${\mathcal C}$ be a multiplicity-free unitary modular tensor category. It specifies a set of particles $i$ with quantum dimensions $d_i > 0$, and including a trivial particle~$\vac$. The total quantum dimension is ${\mathcal D} = \sqrt{\sum_i d_i^2}$. ${\mathcal C}$ additionally specifies a particle duality map $i \mapsto i^*$, fusion rules, $F$-symbols $F^{ijm}_{kln}$ and $R$-symbols $R_i^{jk}$. These tensors obey certain identities, such as the pentagon and hexagon equations, which can be found in, e.g., \cite{Preskill98notes, KoenigKuperbergReichardt10TVcode}.
Let $g \in \mathbb{N}$, $g \geq 2$. The space ${\mathcal H}_{{\mathcal C}, g}$ can be defined by specifying an orthonormal basis. Decompose the genus-$g$ surface~$\Sigma_g$ into three-punctured spheres (or ``pants'') by cutting along $3g - 3$ noncontractible curves, as illustrated in \figref{fig:dualgraphexamples}. Dual to such a decomposition is a trivalent graph~$\Gamma$. Direct arbitrarily the edges of $\Gamma$. A basis vector $\ket \ell_\Gamma$ is a fusion-consistent labeling of the edges of $\Gamma$ by particles of the category~${\mathcal C}$. Fusion-consistency is defined by the fusion rules, i.e., a set of triples~$(i,j,k)$ that are allowed to meet at every vertex, and particle duality, which switches the direction of an edge, replacing a label~$i$ by the antiparticle~$i^*$. Define the states $\cB_{\Gamma}:=\{\ket{\ell}_\Gamma\}_{\ell}$ to be orthonormal, and their span to be ${\mathcal H}_{{\mathcal C}, g}$. Note that this definition gives a natural encoding of ${\mathcal H}_{{\mathcal C}, g}$ into qudits, with one qudit to store the label of each edge of~$\Gamma$. The directed graph $\Gamma$ can be stored in a classical register.
The above definition depends on~$\Gamma$, but alternative pants decompositions simply represent different bases~$\cB_\Gamma$ for the same Hilbert space. To convert between all possible pants decompositions of~$\Sigma_g$ we need two moves, each corresponding to a local unitary operator.
\begin{figure}
\caption{Three examples of decompositions of the genus-two surface $\Sigma_2$ into three-punctured spheres. In each case, a trivalent adjacency graph of the punctured spheres is shown in red.}
\label{fig:dualgraphexamples}
\end{figure}
The $F$ move relates bases that differ by a ``flip" of a cut between two three-punctured spheres. In the qudit encoding, it is a five-qudit unitary, with four control qudits. Its action is given by \begin{equation} \label{eq:Fmove} \raisebox{-1.3cm}{\includegraphics[scale=.5]{images/fmove1}} = \sum_n F^{i j m}_{k l n} \raisebox{-1.3cm}{\includegraphics[scale=.5]{images/fmove2}} \end{equation}
The $S$ move applies when two boundaries of a single three-punctured sphere are connected. It is a two-qudit unitary, with one control qudit, and its action is given by \begin{equation} \label{eq:Smove} \raisebox{-1.3cm}{\includegraphics[scale=.5]{images/smove1}} = \sum_k S^i_{jk} \raisebox{-1.3cm}{\includegraphics[scale=.5]{images/smove2}} \end{equation} Most presentations of modular tensor categories do not explicitly provide values for $S^i_{jk}$. However, as discussed in~\cite{Walker91}, $S^i_{jk}$~can be calculated by the identity \begin{equation} {\mathcal D} S^i_{jk} = \sum_{\substack{l:\,(j,k^*,\ell)\\~\textrm{fusion-consistent}}} \hspace{-0.5cm} F^{i k^* k}_{l j^* j} \frac{d_l}{\sqrt {d_i}} R^{k j^*}_l R^{j k^*}_{l^*} = \raisebox{-.55cm}{\includegraphics[scale=1]{images/Sabc}} \end{equation} (The last expression uses ribbon graph notation.)
The action~$\rho_{{\mathcal C}, g}$ of~$\MCG(g)$ on~${\mathcal H}_{{\mathcal C}, g}$ can now be specified by the action of the Dehn-twist generators on basis vectors. For a Dehn twist about a curve $\sigma$, apply a sequence of $F$ and $S$ moves to change into a basis $\cB_\Gamma$, i.e., a pants decomposition of $\Sigma_g$, in which $\sigma$ divides two three-punctured spheres. In such a basis, the Dehn twist acts diagonally: if the edge of~$\Gamma$ crossing $\sigma$ has label~$i$, the twist applies a phase shift of~$R^{ii^*}_0$.
To complete the definition of~$\WRT_{\mathcal C}(M(g,x))$ from Eq.~\eqnref{eq:wrtinvariant}, it remains to define the state~$\ket{v_{{\mathcal C}, g}}$. As on the right-hand side of Eq.~\eqnref{eq:Smove}, decompose $\Sigma_g$ with a meridional cut through each handle. Then $\ket{v_{{\mathcal C}, g}}$ is the state in which every edge of~$\Gamma$ is labeled by~$\vac$, the trivial particle.
Let us now prove \thmref{t:TVproblem}. Although not obvious from Eq.~\eqnref{eq:triang_invar}, the original tensor-network-contraction-based definition of the Turaev-Viro invariant, \thmref{t:TVproblem} is a straightforward consequence of the definition based on the representation~$\rho_{{\mathcal C}, g}$, and of known density results.
The Turaev-Viro and WRT invariants for $M(g, x)$ can be approximated essentially by implementing $\rho_{{\mathcal C}, g}(x)$. The algorithm maintains a classical register storing the graph~$\Gamma$, together with a quantum register containing the current state in~${\mathcal H}_{{\mathcal C}, g}$ in the basis $\cB_\Gamma$. If~${\mathcal C}$ has~$N$ particle types, the algorithm uses an $N$-dimensional qudit for each edge of~$\Gamma$. Then~$\rho_{{\mathcal C}, g}(x_j)$ can be applied by using a sequence of $F$ and $S$ moves, i.e., certain local unitaries, to change to a basis in which $x_j$ acts diagonally. Since $x_j$~is one of the generators from \figref{fig:Dehn}, starting with the graph $\Gamma$ of \figref{fig:encoding} (for which every edge is labeled~$\vac$ in $\ket{v_{{\mathcal C}, g}}$) at most one $F$ and one $S$ move are needed. An estimate to within $\epsilon$ of the desired matrix element $\bra{v_{{\mathcal C}, g}} \rho_{{\mathcal C}, g}(x) \ket{v_{{\mathcal C}, g}}$ can be given, except with probability~$\delta$, using $O(\log (1/\delta) /\epsilon^2)$ Hadamard tests, as in~\cite{AharonovJonesLandau06Jones}.
\begin{figure}\label{fig:encoding}
\label{fig:local}
\end{figure}
\def\Upsilon{\Upsilon} To prove BQP-hardness we reduce from the BQP-complete problem of deciding whether $\lvert \bra{0^g} \Upsilon \ket{0^g}\rvert^2$ is larger than $5/6$ or less than $1/6$, given the $g$-qubit quantum circuit~$\Upsilon$~\cite{AharonovJonesLandau06Jones}. Let~${\mathcal C}$ be the modular tensor category associated with~$\SU(2)_k$ or $\SO(3)_k$, with $k \geq 3$ and $k+2$ prime. Given~$\Upsilon$ consisting of $T$ two-qubit gates, our aim is to construct efficiently the Heegaard splitting~$(g,x)$ of a manifold~$M=M(g,x)$ such that ${\mathcal D}^{2(1-g)} \TV_{\mathcal C}(M)$ approximates $\lvert \bra{0^g} \Upsilon \ket{0^g}\rvert^2$. As illustrated in \figref{fig:encoding}, we use one handle of a genus-$g$ handlebody to encode each qubit. Such a labeling is fusion-consistent, and the encoding of the initial state $\ket{0^g}$ is exactly $\ket{v_{{\mathcal C}, g}} \in {\mathcal H}_{{\mathcal C}, g}$. As shown in~\cite{FLW_dense, LarsenWang05dense}, for ${\mathcal C} = \SO(3)_k$ the representation $\rho_{{\mathcal C}, g}$ has a dense image, up to phases, in the group of unitaries on~${\mathcal H}_{{\mathcal C},g}$, for $g \geq 2$. By the density for $g = 2$ and the Solovay-Kitaev theorem~\cite{NielsenChuang00}, therefore any two-qubit gate can be approximated in the codespace to precision $1/(6T)$ by applying a $(\log T)^{O(1)}$-long sequence of the five Dehn twists shown in \figref{fig:local}. This holds also for ${\mathcal C} = \SU(2)_k$, as $\SO(3)_k$ is just the restriction of $\SU(2)_k$ to particles with integer spins. Thus we obtain a polynomial-length word~$x = x_1\cdots x_{poly(T)}$ in the Dehn-twist generators whose action approximates $\Upsilon$ on the codespace. Then $\bra{v_{{\mathcal C}, g}} \rho_{{\mathcal C}, g}(M(g, x)) \ket{v_{{\mathcal C}, g}}$ approximates~$\bra{0^g} \Upsilon \ket{0^g}$.
This work demonstrates how quantum physics, in the form of TQFTs, can inspire new quantum algorithms for problems based on topology and tensor networks. The approach taken here realizes in a sense the traditional vision of quantum computers as universal simulators for physical systems, but with a different outcome: it provides a purely mathematical problem whose difficulty exactly captures the power of a quantum computer.
S.J.\ acknowledges support from the Sherman Fairchild Foundation and NSF grant PHY-0803371. R.K.\ acknowledges support by the Swiss National Science Foundation (SNF) under grant PA00P2-126220. B.R.\ and G.A.\ acknowledge support from NSERC and ARO. Some of this research was conducted at the Kavli Institute for Theoretical Physics, supported by NSF grant PHY05-51164.
\end{document}
|
arXiv
|
{
"id": "1003.0923.tex",
"language_detection_score": 0.7709575891494751,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Geometric phase accumulated in a driven quantum system coupled to a structured environment} \author{Paula I. Villar} \affiliation{Departamento de F\'\i sica {\it Juan Jos\'e Giambiagi}, FCEyN UBA and IFIBA CONICET-UBA, Facultad de Ciencias Exactas y Naturales, Ciudad Universitaria, Pabell\' on I, 1428 Buenos Aires, Argentina.} \author{Alejandro Soba} \affiliation{ Centro At\'omico Constituyentes, Comisi\'on Nacional de Energ\'\i a At\'omica, Avenida General Paz 1499, San Mart\'\i n, Argentina} \date{\today}
\begin{abstract}
We study the role of driving in a two-level system evolving under the presence of a structured environment in different regimes. We find that adding a periodical modulation to the two-level system can greatly enhance the survival of the geometric phase for many time periods in an intermediate coupling to the environment. In this regime, where there are some non markovian features characterizing the dynamics, we have noted that adding driving to the system leads to a suppression of non-markovianity altogether, allowing for a smooth dynamical evolution and an enhancement of the robustness condition of the geometric phase. As the model studied herein is the one used to model experimental situations such as hybrid quantum classical systems feasible with current technologies, we positive believe this knowledge can aid the search for physical set-ups that best
retain quantum properties under dissipative dynamics.
\end{abstract}
\maketitle
The state of a point like discrete energy level quantum system interacting with a quantum field acquires a geometric phase (GP) that is independent of the state of the field \cite{berry}. The phase depends only on the system's path in parameter space, particularly the flux of some gauge field enclosed by that path. Due to its topological properties and close connection with gauge theories of quantum fields, the GP has recently become a fruitful venue of investigation to infer features of the quantum system. For pure field states, the GP is said to encode information about the number of particles in the field \cite{caridi}. If the field is in a thermal state, the GP encodes information about its temperature, and so it has been used in a proposal to measure the Unruh effect at low accelerations \cite{martinez1}. Furthermore, in \cite{martinez2}, it has been proposed as a high precision thermometer in order to infer the temperature of two atoms interacting with a known hot source and an unknown temperature cold cavity. In this context, the study of the GP in open quantum systems has been a subject of investigation lately. The definition of the geometric phase for non-unitary evolution was first stated in \cite{Tong}. This definition has been used to measure the corrections of the GP in a non-unitary evolution \cite{prl} and to explain the noise effects in the observation of the GP in a superconducting qubit \cite{leek,pra}. The geometric phase of a two-level system under the influence of an external environment has been studied in a wide variety of scenarios \cite{papers}. It has further been used to track traces of quantum friction in an experimentally viable scheme of a neutral particle traveling at constant velocity in front of a dielectric plate \cite{nature} and in a very simplistic analytical model of an atom coupled to a scalar quantum field \cite{epl}.
The coupling of the quantum system to the environment is described by the spectral density function. If the system couples to all modes of the environment in an equal way the spectrum of the reservoir is flat. If, otherwise, the spectral density function strongly varies with the frequency of the environmental oscillators, the environment is said to be structured. In this type of environment the memory effects induce a feedback of information from the environment into the system. They are therefore called non-markovian \cite{breuer}. Numerous works have investigated the presence of non-markovianity in a variety of scenarios in quantum open systems so as to determine whether non-markovianity is a useful resource for quantum technologies. It has been studied how the presence of a driving field affects the non-markovian features of a quantum open system. For instance, studies which assessed the effectiveness of optimal control methods \cite{Zhu,Krotov} in open quantum system evolutions showed that non-markovianity allowed for an improved controllability \cite{Schmidt, Reich, Triana}. Likewise, the non-markovian effects were associated to the reduction of efficiency in dynamical decoupling schemes \cite{addis} and accounted for corrections to the GP acquired \cite{pra1,Luo,Oh}.
In this work, we investigate to what extent external driving acting solely on the system can increase non-markovianity (and therefore modify the geometric phase) with respect to the undriven case. To this end, we consider a two-level system described by a time-periodic Hamiltonian interacting with a structured environment. It has been recently shown that the driving has a peculiar effect on the non-markovian character of the system dynamics: it can generate a large enhancement of the degree of non-markovianity with respect to the static case for a weak coupling between the system and environment \cite{Poggi}. The importance of the driven two-state model is especially pronounced in quantum computation and quantum technologies, where one or more driven qubits constitute the basic building block of quantum logic gates \cite{nielsen}. Geometric quantum computation exploits GPs to implement universal sets of one-qubit and two-qubit gates, whose realization finds versatile platforms in systems of trapped atoms \cite{Duan}, quantum dots \cite{solinas} and superconducting circuit-QED \cite{Faoro}. Different implementations of qubits for quantum logic gates are subjected to different types of environmental noise, i.e., to different environmental spectra. Since the model studied herein can be implemented in these experimental contexts, using real or artificial atoms, it is important to unveil the time behavior of the qubit geometric phase for driven systems. We shall only focus on weak or intermediate coupling since we try to track traces of the geometric phase, which is literally destroyed under a strong influence of the environment. This means that while there are non-markovian effects that induce a correction to the unitary GP, the system maintains its purity for
several cycles, which allows the GP to be observed. It is important to note that if the noise effects induced on the system are of considerable magnitude, the coherence terms of the quantum system are rapidly destroyed and the GP literally disappears \cite{papers}. This knowledge can aid the search for physical set-ups that best retain quantum properties under dissipative dynamics.
This paper is structured as follows. In Sec. \ref{modelo} we present the model consisting of a two-level system described by a time-periodic Hamiltonian interacting with a structured environment. In Sec. \ref{dinamica}, we numerically solve the dynamics of the system for different regimes through the hierarchy method beyond the rotating wave approximation. In Sec. \ref{fase} we compute the geometric phase for a two-level driven system and analyze its deviation from the unitary geometric phase under different regimes. Since we want to track traces of the geometric phase, which is literally destroyed under a strong influence of the environment, we shall restrict our study to two situations: (A) weakly coupling and (B) intermediate coupling. Therein, we analyze the robustness condition of the geometric phase acquired by the driven two level system and the best scenarios for its experimental detection. Finally, in Sec. \ref{conclusiones}, we summarize the results and present conclusions.
\section{The Model} \label{modelo} We shall consider a two-level system described by a time-periodic Hamiltonian interacting with an environment.
The total Hamiltonian which describes this model reads (we set $\hbar=1$ from here on) \begin{equation} H= \bar{\omega}_0 (t) \sigma_+\sigma_- + \sigma_x \sum_k (g_k b_k + g_k^*b_k^{\dagger}) + \sum_k \bar{\omega}_k b_k^{\dagger} b_k, \end{equation}
where $\sigma_{\pm}= \sigma_x \pm i \sigma_y$ (with $\sigma_{\alpha}$ ($\alpha=x,y,z$) the Pauli matrices) and $b_k$, $b_k^{\dagger}$ the annihilation and creation operators corresponding to the $k-$th mode of the bath. The coupling constant is $g_k$ and $\bar{\omega}_0(t)$ is the time-dependent energy difference between the states $|0\rangle$ and $|1\rangle$ of the two-level system. We shall assume it has the following form: \begin{equation} \bar{\omega}_0(t)= \bar{\Omega} + \bar{\Delta} \cos({\bar {\omega}_D} t). \end{equation} The exact dynamics of the system in the interaction picture has been derived in \cite{Tanimura}. If the qubit and the bath are initially in a separable state, i.e. $\rho(0)=\rho_s(0)\otimes \rho_B$, the formal solution is: \begin{eqnarray} \tilde{\rho}_S(t) &=& {\cal T} \exp\bigg(-\int_0^t dt_2\int_0^{t_2} dt_1 \tilde{\sigma_x}^\times (t_2) \\ && [C^R(t_2-t_1)\tilde{\sigma_x}^{\times}(t_1) + i C^I(t_2-t_1)\tilde{\sigma_x}^{\circ}(t_1)] \bigg), \nonumber \label{rhoexacta} \end{eqnarray} where ${\cal T}$ implies the chronological time-ordering operator and $\tilde {o}$, denotes the expression of the operator $o$ in the interaction picture. We have further introduced the following notation $A ^{\times} B=[A,B]= AB-BA$ and $A^{\circ}B= \{A,B\}= A B+ B A$. $C^R(t_2-t_1)$ and $C^I(t_2-t_1)$ are the real and imaginary parts of the bath time-correlation function, defined as \begin{eqnarray} C(t_2-t_1) &\equiv& \langle B(t_2) B(t_1) \rangle = {\rm Tr}[B(t_2)B(t_1)\rho_B] \nonumber \\ &=& \int_0^{\infty} d\omega J(\omega) e^{-i \omega (t_2-t_1)} \end{eqnarray} and \begin{equation} B(t)=\sum_k \bigg(g_k b_k \exp(-i \omega_k t) + g_k^*b_k^{\dagger} \exp(i \omega_k t)\bigg). \nonumber \end{equation} Eq.(\ref{rhoexacta}) is difficult to solve directly. An effective method for obtaining a solution has been developed by defining a set of hierarchy equations \cite{Tanimura,Tanimura2,Sun}. The key condition in deriving the hierarchy equations is that the correlation function can be decomposed into a sum of exponential functions of time. At finite temperatures, the system-bath coupling can be described by the Drude spectrum, however, if we consider qubit devices, they are generally prepared in nearly zero temperatures. Then we shall consider a Lorentz type spectral density $J(\omega)$,
\begin{equation} J(\omega)= \frac{{\bar{\gamma}_0}}{2 \pi} \frac{\lambda^2}{(\omega- \bar{\Omega})^2 + \lambda^2}, \end{equation} and the hierarchy method can also be applied \cite{Sun2}. As has been stated in \cite{Poggi}, this method can be used if i) the initial state of the system plus bath is separable, ii) the interaction Hamiltonian is bilinear, and iii) if the environmental correlation function can be cast in multi-exponential form. In this case, $\bar{\gamma_0}$ is the coupling strength between the system and the bath and $\lambda$ characterizes the broadening of the spectral peak, which is connected to the bath correlation time $\tau_c= \lambda^{-1}$. The relaxation time scale at which the state of the system changes is determined by $\tau_r=\bar{\gamma_0}^{-1}$. At zero-temperature, if we consider the bath in a vacuum state, the correlation function can be expressed as \begin{equation}
C(t_2-t_1)= \frac{\lambda \bar{\gamma_0}}{2} \exp([-(\lambda + i \bar{\Omega})|t_2-t_1|]) \end{equation} which is the exponential form required for the hierarchy method. The advantage of solving the dynamics of the system by this method is that we can take an insight of different regimes of the dynamics. For example, in the limiting case $\bar{\gamma_0} \ll \lambda$, i.e. $\tau_c \ll \tau_r$, we have a flat spectrum and the correlation tends to $C(t_2-t_1)\rightarrow \bar{\gamma_0} \delta (t_2-t_1)$. This is the so called markovian limit. Therefore, we can study the full spectrum of behavior by solving the hierarchy method, which can be expressed as \begin{widetext} \begin{equation} \frac{d}{d\tau}\rho_{\vec{n}}(\tau)= -(i H_s[\tau]^{\times}+ \vec{n}.\vec{\nu})\rho_{\vec{n}}(\tau) - i \sum_{k=1}^2 \sigma_x^{\times}\rho_{\vec{n}+\vec{e}_k}(\tau)- i\frac{{\gamma_0}}{2} \sum_{k=1}^2 n_k [\sigma_x^{\times} + (-1)^k \sigma_x^{\circ}] \rho_{\vec{n}-\vec{e}_k}(\tau), \label{hierarchy} \end{equation} \end{widetext} where we have defined dimensionless parameters variables $\tau=\lambda t$ and $x=\bar{x}/\lambda$ where $x$ is any parameter with units of energy in the model described. The subscript $\vec{n}=(n_1,n_2)$ with integers numbers $n_{1(2)} \geq 0$, and $\rho_S(t) \equiv \rho_{(0,0)} (t)$. This means that the ``physical" solution is encoded in $\rho_{(0,0)} (t)$ and all other $\rho_{\vec{n}}(\tau)$ with $\vec{n} \neq (0,0)$ are auxiliary operators implemented for the sake of computation. We have defined the vector $\vec{\nu}=(\nu_1,\nu_2)=(1-i \Omega, 1 + i \Omega)$. The hierarchy equations are a set of linear differential equations, that can be solved by using a Runge Kutta rutine. For numerical computations, the hierarchy equations must be truncated for large $\vec{n}$. The hierarchy terminator equation is similar to that of Eq.(\ref{hierarchy}) for the term $\vec{N}$, and the corresponding term related to $\rho_{\vec{N}+\vec{e}_k}$ is dropped \cite{Tanimura}. The numerical results in this paper have been all tested and converged, using a maximum value of $\vec{N}=(25,25)$. We shall take advantage of this model, whose non-markovian properties has been studied in \cite{Poggi}, and set the scenario to study the corrections to the GP for a driven two-level system.
\section{Environmentally induced Dynamics} \label{dinamica}
We begin by studying the environmentally induced dynamics by considering a qubit with no driving at all ($\Delta=0$). In this case, we must consider a qubit and a dipolar coupling to the cavity mode, for example. This means, that the dynamics of the system contemplates decoherence and dissipation as well as variation of the population numbers (in contrast to the spin boson model). The density matrix for this case has a formal expression as \begin{equation} \rho_s(\tau)= \bigg( \begin{matrix}
\rho_{11}|G(\tau)|^2 & \rho_{12} G(\tau) \\
\rho_{21} G^*(\tau) & 1-\rho_{11}|G(\tau)|^2 \end{matrix} \label{rho} \bigg) \end{equation} where $G(\tau)$ is a single-complex valued function that characterizes the dynamics of the system. We herein do not write its explicit form since we shall solve the problem numerically through the hierarchy approach.
Decoherence time $\tau_D$ is mostly known as the timescale at which the quantum interferences are suppressed. This is formally true for a purely dephasing process where noise only affects the off-diagonal terms of the reduced density matrix. However, Eq.(\ref{rho}) describes a process where populations and off-diagonal terms are both affected by the presence of noise. Qualitatively, decoherence can be thought of as the deviation of probabilities measurements from the ideal intended outcome. Therefore decoherence can be understood as fluctuations in the Bloch vector $\vec{R}$ induced by noise.
In a wider sense, we will represent decoherence as the change of $|\vec{R}(\tau)|$ in time, starting from $|R(0)| = 1$ for the initial pure state, and decreasing as long as the quantum state loses purity. The contributions of the bath to the dynamics of the system, including both dissipation and Lamb shift, are fully contained in the hierarchy equation. In Fig. \ref{Fig1} we present the absolute value of the Bloch vector of the state system $R(\tau)=\sqrt{x(\tau)^2+y(\tau)^2+z(\tau)^2}$ as a function of time measured in natural cycles $\omega_0 \tau= N 2\pi $ for different values of $\gamma_0$. In this case, we can note that the trajectory differs substantially from the unitary one, meaning the system's dynamics is affected by the noise effects. In the case the unitary dynamics is considered, $\gamma_0=0$ and $R=1$ for all times.
\begin{figure}
\caption{(Color online) We plot the loss of the quantum state purity $R(\tau)$ as function of time $N$($\omega_0 \tau= N 2\pi $ number of cycles). We can see that as the coupling constant with the bath increases for a fixed value of $\lambda=0.01$ ($\gamma_0=\bar{\gamma}_0/\lambda$), the dynamical behavior is modified. Orange dashed line is $\gamma_0=0.3$ and dot dashed purple line for $\gamma_0=0.1$ represent situations of $\tau_r > \tau_c$. Dot-dashed brown line is for $\gamma_0=1$, blue solid line for $\gamma_0=0.7$ are situations of $\tau_r \sim \tau_c$. In the inset we show different solutions for $\gamma_0=0.1$ by varying the order of truncation. We can state that from $\vec{N}=(10,10)$ we can obtain a converged positive reduced matrix $\rho(\tau)$. Parameters used: $\Delta=0$, $\vec{N}=(25,25)$, $\Omega=20$.}
\label{Fig1}
\end{figure}
We can notice that the dynamical behavior is modified as the coupling constant $\gamma_0$ is increased. It is interesting to see the interplay between time and $\gamma_0$: a stronger bath can initially produce less damage on the dynamics but has a stronger effect in the renormalization of the frequency. A weak-coupling has a more ``adiabatic" modification of the dynamics in an equal period of time. In Fig. \ref{Fig1}, we have set $\lambda$ fixed. As $\gamma_0$ increases, the relaxation time $\tau_r$ of the system decreases and $\tau_r \sim \tau_c$. The presence of oscillations in the Bloch vector $R(\tau)$ for short times, as $\gamma_0$ becomes similar to $\lambda$ indicates non markovian dynamics induced by the reservoir memory and describing the feedback of information and/or energy from the reservoir into the system \cite{addis}. We can see that as long as $\bar{\gamma_0}/\lambda < 1$, the systems exhibits a markovian dynamics (orange-dashed line, purple dot-dashed line and red solid line). On the other side, if $\bar{\gamma_0}/\lambda \geq 1$, there are non markovian features in the system's dynamics. We can notice that as long as $\bar{\gamma_0} \leq \lambda$ and $\lambda \ll 1$, the behavior remains similar to that of $\lambda \rightarrow 0$ (markovian since $\tau_c \rightarrow \infty$). However, as $\lambda$ increases, the environmentally-induced dynamics is considerably modified, introducing oscillations again. So, with this kind of environment we can simulate different regimes by the solely selection of the $\bar{\gamma_0}$ and $\lambda$ parameters. In the inset of Fig. \ref{Fig1}, we show a simulation for different truncations of the system of equations for $\gamma_0=1$. We show that by setting the order of truncation in 25, we already obtain a converged positive reduced matrix $\rho(\tau)$.
\begin{figure}
\caption{(Color online) We plot different dynamics for a fixed value of $\lambda$, but different values of the $\bar{\gamma}_0$ parameter. The left column is for $\bar{\gamma_0}/\lambda <1$ and the right for $\bar{\gamma_0}/ \lambda \geq 1$. On top we show the behavior of $\rho_{11}(t)$ and the absolute value of $\rho_{12}(t)$ in each case. We can see some time revivals on the right reduced matrix elements. The lower plots show the trajectory ($\vec{R}=(x,y,z)$) of the two level system in the Bloch sphere. Parameters used: $\Delta=0$, $\vec{n}=(25,25)$, $\Omega=20$, $\tau_c=100$. }
\label{Fig3}
\end{figure}
In Fig. \ref{Fig3}, we compare the dynamics of two different environmental situations: the left column is for $\bar{\gamma_0}/\lambda <1$, and the right one for $\bar{\gamma_0}/\lambda \geq 1$, both evolutions are simulated for fixed $\bar{\Omega}$ and zero driving ($\Delta=0$). In this example, we can see that when $\tau_c < \tau_r$, the system presents a markovian evolution. On the other hand, if $\tau_c > \tau_r$, non-markovian effects can be seen, for example by accelerating the transition between quantum states and revivals for longer times. For initial short times, the spontaneous decay of the atom can not only be suppressed or enhanced, but also partly reversed, when non-markovian oscillations induced by reservoir memory effects are present. As has been shown, by choosing the right set of parameters, we can simulate different type of environments and obtain the corresponding dynamics beyond the rotating wave approximation.
\section{Correction to the Geometric Phase}\label{fase}
In this section, we shall compute the geometric phase for the central spin and analyze its deviation from the unitary geometric phase for a two-level driven system. A proper generalization of the geometric phase for unitary evolution to a non-unitary evolution is crucial for practical implementations of geometric quantum computation. In \cite{Tong}, a quantum kinematic approach was proposed and the geometric phase (GP) for a mixed state under non-unitary evolution has been defined as \begin{eqnarray} \Phi & = & {\rm arg}\{\sum_k \sqrt{ \varepsilon_k (0) \varepsilon_k (T)}
\langle\Psi_k(0)|\Psi_k(T)\rangle \nonumber \\
& & \times e^{-\int_0^{T} dt \langle\Psi_k|
\frac{\partial}{\partial t}| {\Psi_k}\rangle}\}, \label{fasegeo} \end{eqnarray} where $\varepsilon_k(t)$ are the eigenvalues and
$|\Psi_k\rangle$ the eigenstates of the reduced density matrix $\rho_{\rm s}$ (obtained after tracing over the reservoir degrees
of freedom). In the last definition, $T$ denotes a time after the total system completes a cyclic evolution when it is isolated from the environment. Taking the effect of the environment into account, the system no longer undergoes a cyclic evolution. However, we will consider a quasi cyclic path ${\cal P}: T~\epsilon~[0,\tau_S]$ with $\tau_S=2 \pi/\omega_0$ ($\omega_0$ the system's dimensionless frequency). When the system is open, the original GP $\phi_u$, i.e. the one that would have been obtained if the system had been closed, is modified. This means, in a general case, the phase is $ \phi_g=\phi_u+ \delta \phi$, where $\delta \phi$ depends on the kind of environment coupled to the main system \cite{papers, zanardi}. For a spin-1/2 particle in $SU(2)$, the unitary GP is known to be $\phi_u= \pi(1+\cos(\theta_0))$. It is worth noticing that the proposed GP is gauge invariant and leads to the well known results when the evolution is unitary.
As this method can be used when the initial state of the whole system is separable, we shall start by assuming $\rho(0)= \rho_s(0) \otimes \rho_{\cal E}(0)$. The initial state of the quantum system is supposed to be a pure state
of the form:
\begin{equation}
|\Psi (0) \rangle=\cos(\theta_0/2) |0 \rangle
+ \sin(\theta_0/2) |1 \rangle . \nonumber\end{equation} We shall solve the master equation and then, compute the geometric phase acquired by the quantum system. If the environment is strong, then the unitary evolution is destroyed in a decoherence time $\tau_D$. Otherwise, we can imagine an scenario where the effect of the environment is not so drastic. In the following, we shall focus on how driving can affect (or even benefit) the measurement of the geometric phase under different regimes, both weakly coupling and intermediate coupling. In particular, we shall investigate to what extent external driving acting solely on the system can correct the geometric phase with respect to the undriven or unitary case.
\subsection{Geometric phase under a weakly coupling}
\begin{figure}
\caption{(Color online) Comparison between the accumulated geometric phase $\phi_g$ for the unitary case (asterisk blue dot-dashed line) and a weak coupled environment in a markovian regime $\bar{\gamma_0 }\ll \lambda$ (circled red dot-dashed line) (corresponding to a similar situation to that of the left column of Fig. \ref{Fig3}). Parameters used: $\gamma_0=0.01$, $\Omega=20$, $\theta_0=\pi/4$, $\Delta=0$ and $\omega_D=0$.}
\label{Fig4}
\end{figure} The dynamics of the driven two-level system comprises of three different dynamical effects, occurring each at a different timescale. Dissipation and decoherence occur at the relaxation timescale $\tau_r$, non-markovian memory effects occur at times shorter or similar to the reservoir correlation timescale $\tau_c$ \cite{addis}. Finally, nonsecular terms cause oscillations in a timescale of the system $\tau_S=(\Omega^2+\Delta^2)^{-1/2}$. Generally, this nonsecular terms can be neglected when $\tau_c \ll \tau_S $. We shall consider the secular regime, by assuming $\tau_S \ll \tau_c $, and in the markovian regime $\tau_S \ll \tau_c \ll \tau_r$. As we are dealing with a structured environment, we shall start by studying a weakly coupled system, which leads to a markovian regime (i.e $\gamma_0<\lambda$). Firstly, we shall compare a two level undriven ($\Delta=0$) evolution to an unitary one in order to see how different the open evolution is and decide whether the geometric phase can be measured in such scenario. Hence, in Fig. \ref{Fig4}, we show the total geometric phase accumulated for the non-unitary (red circled line) and unitary (blue asterisk line) evolution as time evolves, being the number of cycles $N=\tau/\tau_S$. Therein, it is possible to see that initially the geometric phases are similar, with an estimate error of $2.5\%$ for 5 cycles and $10\%$ for 15 cycles when $\gamma_0=0.01$. As time evolves, the difference among both lines increases as expected, since for long times the loss of purity of the system would be considerable.
\begin{figure}
\caption{(Color online) We study the effect of adding $\Delta$ by computing the geometric phase accumulated in time: $\Delta=0$ black dotted line, $\Delta=0.3$ turquoise cross line, $\Delta=0.5$ blue triangle line and $\Delta=1$ circle orange line for $\gamma_0=0.1$. For $\gamma_0=0.01$ gray squares ($\Delta=1$) and magenta solid line ($\Delta=0$) are very similar, while the blue asterisk line is the unitary geometric phase for reference. In the inset, we show the $\phi_g/\phi_u$ for $\gamma_0=0.01$ with different values of $\Delta$. Parameters used: $\Omega=20$, $\theta_0=\pi/4$ and $\omega_D=0$.}
\label{Fig5}
\end{figure}
In Fig. \ref{Fig5}, we show the geometric phase acquired when adding detuning frequencies to the two level system compared to the case when $\Delta \neq 0$, for different environments, say $\gamma_0=0.1$ and $\gamma_0=0.01$. When the coupling to the environment is very weak, the corrections to the geometric phase acquired are very small and one can expect to obtain very similar results to the unitary geometric phase for few cycles of evolution. Evolutions with $\Delta \neq 0$ are very similar to $\Delta =0$ if we compared the $\gamma_0=0.01$ results in Fig. \ref{Fig5} and Fig. \ref{Fig4}. In this case, the dominant correction to the geometric phase is given by the interaction with the environment, parametrized by the value of $\gamma_0$.
However, for larger values of $\gamma_0$ ($\gamma_0=0.1$ but still weakly coupled), evolutions with bigger values of $\Delta$ acquire a geometric phase of considerable difference for long time evolutions. For the first few cycles, the geometric phases acquired are all very similar. As time evolves, different features as the magnitude of the coupling to the environment and the system's frequency (with $\Delta$ involved) have impact on the dynamics and therefore, in the geometric phase acquired.
In the inset of Fig. \ref{Fig5}, we plot the normalized geometric phase ($\phi_g/\phi_u$) for $\gamma_0=0.01$. We can see the $\Delta=0$ geometric phase represented by a magenta solid line, $\Delta=0.3$ turquoise cross line, $\Delta=0.5$ orange circled dotted line, $\Delta=1$ gray circled line, $\Delta=3$ blue asterisk line and $\Delta=5$ cross red line. The distance from unity becomes relevant as the number of cycles increases.
As expected, if $\Delta$ is added to the system, then the geometric phase acquired is different from that with $\Delta=0$, modifying the system's timescale involved and enhancing non-markovian effects as reported in \cite{Poggi}. This can be a severe experimental problem to overcome. However, for low-values of $\Delta$ considered here, the addition of a tunnel frequency does not considerably affect the geometric phase, obtaining $\phi_g/\phi_u \sim 1$ for many evolution cycles in a weakly coupled regime.
\begin{figure}
\caption{(Color online) We include driving in the model and compute the geometric phase acquired $\phi_g$. Blue asterisk line correspond to $\omega_D=0.1$ and $\Delta=3$. Magenta line is for $\Delta=0$, red dotted line is for $\Delta=5$ and $\omega_D=0.3$, the green circled solid line for $\Delta=2$ and $\omega_D=0.3$ and the orange circled dot-dashed line for $\Delta=1$ and $\omega_D=0.5$. Black dotted unitary geometric phase is included for a reference. Low-frequency driving corrects the geometric phase accumulated for short times. Parameters used: $\gamma_0=0.01$, $\Omega=20$, $\theta_0=\pi/4$.}
\label{Fig6}
\end{figure}
We shall therefore study the interplay of adding driving to the two level system. In particular, we shall focus on the effect of driving when considering the possibility of measuring the geometric phase acquired by the two-state particle. In Fig. \ref{Fig6}, we show the geometric phase acquired when low-frequency driving is added: blue asterisk line correspond to $\Delta=3$ and $\omega_D=0.1$. Magenta line is for $\Delta=0$, red dotted line is for $\Delta=5$ and $\omega_D=0.3$, the green circled solid line for $\Delta=2$ and $\omega_D=0.3$ and the orange circled dot-dashed line for $\Delta=1$ and $\omega_D=0.5$. Black dotted unitary geometric phase is included for a reference. In the zoom plot we show the geometric phase acquired for $\Delta=3$ and $\omega_D=0$ (static) and compared it to $\Delta=3$ and $\omega_D=0.1$ (low-frequency field). We can see that the driven system acquires a geometric phase closer to the unitary one for longer periods of time, still the difference is very small. In the main plot of Fig. \ref{Fig6}, we note that other driven systems are closer to the unitary geometric phase for ten periods as well. Therefore, there are some set of parameters for which driving ``preserves purity". The geometric phase acquired is more similar to the unitary geometric phase acquired when there is low frequency driving added for low values of $\Delta$. This fact can easily be observed in the inset plot, where the lines with asterisks and circles are closer to the unitary one (black solid line) than the corresponding static ones. \begin{center} \begin{figure}
\caption{(Color online) $\phi_g/\phi_u$ for different values of $\Delta$ and $\omega_D$ for (a) $N=4$ and (b) $N=8$ under weakly coupling. For short time evolution there is a wider set of parameters that yield $\phi_g/\phi_u \sim 1$. As time evolves, the set of parameters becomes smaller.
Parameters used: $\gamma_0=0.01$, $\Omega=20$, $\theta_0=\pi/4$.}
\label{Fig7}
\end{figure} \end{center} In Fig. \ref{Fig7}, we further explore this result by representing the normalized geometric phase $\phi_g/\phi_u$ as function of $\Delta$ and $\omega_D$, for two time evolutions: (a) $N=4$ and (b) $N=8$. It is easy to note that for short times, several model's parameters yield a $\phi_g/\phi_u \sim 1$. This can be understood because, as explained in Fig. \ref{Fig5}, the main contribution of the correction to geometric phase is given by the magnitude of the coupling between the environment and the system (say, if we assume $\phi_g=\phi_u + \delta \phi $ when $\gamma_0=0$, $\delta \phi=0$ and the geometric phase obtained is the unitary geometric phase $\phi_u$). However, as time evolves the intrinsic dynamic of each set of values ($\Delta, \omega_D$) will gain more importance. This type of behavior in the correction to the geometric phase has been observed in other studies, yielding that for short time the main correction derives from the fact that the environment is present and the system performs an ``open" evolution (and only markovian effects where taken into account) \cite{nature,ludmila}.
As time elapses, the values of $\omega_D$ that preserve the unitary of the GP are less. For example, it can be seen in Figs. \ref{Fig6} and \ref{Fig7}, that $\Delta=5$ and $\omega_D=0.1$ renders a value $\phi_g$ closer to $\phi_u$ than $\Delta=5$ and $\omega_D=0.3$.
Likewise, adding a very low frequency driving and small detuning frequencies for short time evolutions renders a geometric phase similar to the unitary geometric phase, which leads to a good scenario of measuring the geometric phase in structured environments. It has been shown in \cite{Poggi} that $\omega_D/\Delta <1$ increases the degree of non markovianity (for a small coupling), and particularly, non markovianity increases with $\gamma_0$ and decreases with $\Delta$ for a given environment (fixed $\gamma_0$). We are not strictly in the regime reported in \cite{Poggi}, since we are studying the situation for different evolving times. However, we must say that if we want to maintain the markovian regime, we should only add low detuning frequencies (if any at all), because by adding detuning frequencies and driving frequencies we shall be modifying considerably the dynamics of the system and comparison with the undriven situation will be useless. However, we can still note that when adding a low-frequency driving, geometric phases acquired are very similar to the non-driven isolated geometric phases for bigger values of $\Delta$. This fact agrees with the result obtained in \cite{lofranco}, where authors state that for a small qubit classical field coupling, a non-resonant control ($\Delta \neq 0 $) is more convenient to stabilize the geometric phase of the open qubit. The use of driven systems can help the measurement of geometric phases under some set of parameters. This knowledge can aid the search for physical set-ups that best retain quantum properties under dissipative dynamics. As can be inferred for the different simulations done, for weak coupling, the better scenario would be to have a small detuning frequency and very low frequency driving field, so as to maintain the smoothness of a markovian evolution and acquire a geometric phase similar to the unitary geometric phase.
\begin{figure*}
\caption{(Color online) Top: Bloch vector ($R(\tau)$) temporal evolution for different set of model parameters. On the right corner we show some matrix elements: population $\rho_{11}(\tau)$ and the absolute value of the off-diagonal term $\rho_{12}(\tau)$ for three different set of parameters.
Bottom: Trajectories in the Bloch sphere ($\vec{R}=(x,y,z)$) for three sets of parameters with $\gamma_0/\lambda \geq 1 $: in magenta, $\omega_D=0=\Delta$; orange for $\omega_D=0$ and $\Delta=5$ and light-blue for $\omega_D=4$ and $\Delta=7$. In all cases $\tau_c=100$ and $\Omega=20$.}
\label{Fig8}
\end{figure*}
\subsection{Geometric phase under an intermediate coupling}
\begin{figure}
\caption{(Color online) Geometric phase accumulated $\phi_g$ as number of periods evolved for different set of parameters.
Colors represent the parameters: black dotted line is the unitary geometric phase; the black squared line if for a markovian evolution as described above; in magenta non-markovian evolution for $\omega_D=0=\Delta$; dot-dashed diamond orange for $\Delta=5$ and $\omega_D=5$; light-blue asterisk line for $\Delta=7$ and $\omega_D=4$. Parameters used: $\gamma_0=1$, $\Omega=20$, $\theta_0=\pi/4$.}
\label{Fig9}
\end{figure}
In the above section we showed the geometric phase acquired by the two-level driven system in a weakly coupled structured environment. The above selection of parameters rendered a markovian situation where one could still find evidence of a quasi-cyclic evolution, since the degradation of the pure state was done slowly and there were no revivals.
In the following, we shall show what happens if the parameters are chosen so as to simulate a
non-markovian environment by considering $\bar{\gamma_0}/\lambda> 0.25$, as it has been said in \cite{Poggi}. This situation can model for example a two-level emitter with a transition frequency driven by an external classical field of frequency $\omega_D$ embedded in a zero temperature reservoir formed by the quantized modes of a high-Q cavity. In such a case, the evolution is wildly modified and one can find revivals after a given number of periods. This shall help us to understand the role of driving in this type of environments. If we set parameters so as to see non-markovian behavior, then we must mention that finding tracks of the geometric phase can be much more difficult. In Fig. \ref{Fig8}, we show different scenarios by setting the model parameters.
On top of Fig. \ref{Fig8}, we show the temporal evolution of the Bloch vector ($|R(\tau)|$) for different driven frequencies with $\bar{\gamma_0}$ and fixed $\lambda$. As can be seen, this type of environment starts to exhibit non-markovian environment though revivals are small in amplitude: the magenta line represents $\Delta=0.0=\omega_D$; the dot-dashed magenta line is for $\Delta=0.5$ and $\omega_D=0$. The orange line is for $\Delta=5$; while the red dot-dashed line is for $\Delta=5$ but $\omega_D=0.1$. The dotted green line represents $\Delta=5$ and $\omega_D=5$ and dot-dashed cyan line $\Delta=7$ and $\omega_D=4$. We have also included a markovian evolution just for reference (black dotted line for $\gamma_0=0.001$). We can easily note that the amount of driving changes considerably the evolution of the initial quantum state. On the top right corner we show the populations probability for different lines: magenta dashed line, represents the $\Delta=0.0=\omega_D$, the orange dotted lined $\Delta=5$, and the cyan dashed line $\omega_D=4$ and $\Delta=7$. We can see that by adding a frequency $\Delta$ and a driving frequency $\omega_D$, revivals disappear, recuperating the opportunity to track traces of a geometric phase. This fact can be easily observed in the Bloch sphere. At the bottom of Fig. \ref{Fig8}, we represent the trajectory ($\vec{R}=(x(\tau),y(\tau),z(\tau)$) in the Bloch sphere of the initial state of the three different sets of parameters for the same number of cycles evolved. We can see that the transition among states is done in a short time for the magenta line. The revivals stimulate the exploration of the south pole of the Bloch sphere, for another period of time until it finally decays. In such an evolution, one can only achieve a geometric phase during the revivals and compare it to the one the system would have acquired if it has started at that latitude of the Bloch sphere. In the case of the orange line, transition among states is delayed by the frequency change of the system's period $\tau_S=2 \pi/(\Omega+\Delta)$. In this case, the geometric phase can be measured for very short initial periods. Finally, for the cyan curve we can observe that the evolution remains ``frozen" at a latitude for almost 3 cycles before continuing the transition among states. \begin{figure}
\caption{(Color online) Geometric phase accumulated $\phi_t/\phi_g$ in the ($\Delta$, $\omega_D$) plane for different number of periods evolved in a non markovian environment: $N=2$, $N=3$, $N=4$ and $N=5$. Parameters used: $\gamma_0=1$, $\Omega=20$, $\theta_0=\pi/4$. }
\label{Fig10}
\end{figure}
We can therefore compute the geometric phase for these different situations in order to see if it is possible to track traces of an accumulated geometric phase during the evolutions. In Fig. \ref{Fig9} we show the geometric phase accumulated for different set of parameters (as those considered in Fig. \ref{Fig8}). The colors of the lines in Fig. \ref{Fig9} correspond to the same values of Fig. \ref{Fig8}. The magenta line (with dots) is the temporal evolution of an initial state under a structured environment in a non-markovian regime with $\Delta=0$. In this case after 4 periods, the evolution presents some revivals after having made a transition from the upper to the lower state (therefore revivals are done in the south pole sphere). This is easily understood with the information given in Fig. \ref{Fig8} where we see that transition is done at very short times. Therefore, the geometric phase acquired for $\Delta=0$ is very different to that the system would have acquired in a markovian regime (black squared solid line) or an isolated evolution (black dotted line). However, in Fig. \ref{Fig9}, we also present the geometric phase for driven systems under non-markovian regime. The diamond orange line represents a driven case of $\omega_D=5$ and $\Delta=5$. In such situation, we see that the evolution of the system initially recovers some ``unitarity", acquiring a geometric phase very similar to that of the unitary case. Finally, after some periods, it makes a transition and the evolution explores the south pole sphere. Finally, the light-blue asterisk line for $\omega_D=4$ and $\Delta=7$, acquires a geometric phase similar to the markovian one for longer time periods. In this last driven case, we see that adding driving has a relevant consequence: the geometric phase acquired is closer to the one acquired under a markovian evolution, and therefore closer to the unitary one for a short number of periods evolved ($\sim N=10$). For smaller time periods, we see that adding driving preserves the geometric phase: in all cases showed, the geometric phase is recovered compared to the case when $\Delta=0$. Finally, in Fig. \ref{Fig10} we show a general scenario of the situation described above for $\phi_g/\phi_u$ at different times: $N=2$, $N=3$, $N=4$ and $N=5$. We effectively notice regions of the $\omega_D-\Delta$ space where the accumulated geometric phase $\phi_g/\phi_u$ remains close to one, meaning that the geometric phase acquired is close to the unitary one. There are regions where $\phi_g$ departs enormously from $\phi_u$. As this situation exhibits non-markovian effects as revivals, it is not that easy to find a general rule so as to when it is more convenient to measure the geometric phase. However, we must say that there are some situations where driving enhances the ``robustness" condition of the geometric phase when $\Delta$ delays the revivals and $\omega_D$ is small. We can see that for some particular situations, the addition of a frequency $\Delta$ and driving $\omega_D$ becomes a useful scenario so as to get control of the geometric phase. These situations deal with a smoothening of the revivals as shown in Fig. \ref{Fig8} for the cyan curve. In \cite{Poggi}, authors show that there is a large region, corresponding to $\omega_D/\Delta \sim {\cal O}(1)$ where non-markovianity is suppressed altogether for an intermediate coupling. They even state that in a strong coupling regime ($\gamma_0>1$), the driving is unable to increase the degree of non-markovianity, contrary to what one can expect when adding driving to the system. On this aspect, authors in \cite{lofranco} state that intense classical fields strongly reduce non-markovianity of the system. To prevent this, they state that the larger the coupling, the higher the values of detuning required in order to maintain a given degree of non-markovianity when dealing with hybrid quantum-classical systems. Herein, we assume an intermediate coupling $\gamma_0=1$, where there are some parameters $\omega_D/\Delta \sim {\cal O}(1)$ that verify a suppression of revivals and assure a smooth evolution and an acquisition of a geometric phase more similar to the unitary one. This fact, in addition to some other features related to the initial quantum state explained below, contribute to a better understanding of driven systems and should be taken into consideration when designing experimental set-ups to measure geometric phases.
\subsubsection{Dependence on $\rho_s(0)$}
In this section we shall study the dependence upon the initial state of the quantum system. As explained above, we consider an initial pure state of the form $
|\Psi (0) \rangle=\cos(\theta_0/2) |0 \rangle
+ \sin(\theta_0/2) |1 \rangle,$ with $0\leq \theta_0 \leq \pi/2 $. This determines the initial values of the reduced density matrix $\rho_{11}(0)=\cos(\theta_0)^2$ and $\rho_{12}(0)=1/2 \sin(2 \theta_0)$. In the manuscript, we have always started with an initial $\theta_0=\pi/4$, so as to consider an initial average state (where the geometric phase is more ``stable").
In the following, we shall study how decoherence affects different initial states of the two-level system. We shall use the change in time of the absolute value of $|R(\tau)|=R(\tau)$ as a measure of decoherence.
In Fig. \ref{fig1ap}, we show $R(\tau)$ as function of time: (a) is for a weakly coupling while (b) for an intermediate coupling. The curves start from an angle of $\theta_0=0.15$ radians (near the north pole of the Bloch sphere) to $\theta_0=1.5$ radians (near the Equator). The measurement of the ``robustness" of quantum states is that the loss of purity of the state vector is very small for many cycles. The dependence of this magnitude upon time (measured in cycles) depends on the initial quantum state for the same parameters of the model. As it can be seen there, the state is more affected for smaller initial angles. The purity of the state remains close to unity (isolated case) when the initial state is located near the equator of the Bloch Sphere ($\theta_0 \sim \pi/2$) for both environments. This means an initial state of the form $|+\rangle=\cos(\pi/4)|0\rangle + \sin(\pi/4)|1\rangle$. This can be understand by noting that the Interaction Hamiltonian is proportional to $\sigma_x$, which in turn is eigenstate of the Interaction Hamiltonian.
As for the experimental detection of the geometric phase, we need to find a compromise between the loss of purity and the area enclosed in the path trajectory. As the natural evolution of the system would be to make a transition to the lower state of the quantum state, we need to ``control" this evolution so as to obtain small variations of the trajectory and still find traces of the geometric phase. The non-markovian evolution is the one that provides the more interesting results and the one that can be used to model experimental situations such as hybrid quantum classical systems feasible with current technologies, so we shall explore in detail the dependence upon the initial angles.
\begin{widetext} \begin{center} \begin{figure}
\caption{(Color online) Loss of purity $R(\tau)$ as function of time for (a) a weakly coupling ($\gamma_0=0.01$) and (b) an intermediate coupling ($\gamma_0=1$) for different initial angles $\theta_0$. We can see that as $\theta_0$ reaches $\pi/2$ (labeled as 1.5) the curves are closer to the unitary evolution.}
\label{fig1ap}
\end{figure} \end{center} \end{widetext}
\begin{figure}
\caption{(Color online) $R(\tau)$ as function of the number of cycles evolved for a non-markovian environment with driving for different values of the initial quantum state ($\theta_0$ labeled in degrees). For reference, we also included the static non-markovian evolution $\omega_D=0=\Delta$ with a black dotted line. Parameters used: $\gamma_0=1$, $\Omega=20$, $\omega_D=4$ and $\Delta=7$.}
\label{fig2ap}
\end{figure}
In Fig. \ref{fig2ap}, we show the loss of purity $R(\tau)$ for a non-markovian evolution with driving considered in the manuscript (included in Fig. \ref{Fig9}) for different initial angles $\theta_0$: the lower initial angle considered is $23.5^\circ$ with a red solid line and a brown solid line for the bigger angle considered $\theta_0=84.5^\circ$. In between, we have considered several angles $\theta_0=35^\circ$, $\theta_0=40^\circ$, $\theta_0=45^\circ$, $\theta_0=50^\circ$, $\theta_0=62^\circ$ and $\theta_0=73^\circ$. For reference, we also included the static non-markovian evolution $\omega_D=0=\Delta$ with a black dotted line. \begin{figure}
\caption{(Color online) Geometric phase accumulated normalized ($\phi_g/\phi_u$) by the unitary geometric phase accumulated for several cycles of the evolution under a non-markovian environment for different initial angles. Parameters used: $\gamma_0=1$, $\Omega=20$, $\omega_D=4$ and $\Delta=7$.}
\label{fig3ap}
\end{figure}
We can see that all cases considered are qualitatively similar, however $\theta_0 \sim \pi/4$ is the one that maintains the degree of purity for several cycles. This fact can be fruitfully exploited for the detection of the geometric phase. In Fig. \ref{fig3ap}, we show the geometric phase accumulated normalized by the unitary geometric phase accumulated for several cycles of the evolution under a non-markovian environment for different initial angles. The lower angle considered is $23.5^\circ$ and we can see that the GP acquired is very different to the unitary one ($\phi_g/\phi_u \neq 1$). We considered increasing initial angles up to $45^\circ$ indicated by a magenta solid lined which gives a $\phi_g/\phi_u $ close to 1 for several cycles (in agreement to results shown in Fig. \ref{Fig9}). The next light blue-asterisk line is for $\theta_0=50^\circ$ and shows a similar behavior. Angles continue to increase up to the hexagram blue curve indicating $\theta_0=73^\circ$. Therein, we can see that as the angle increases the difference between $\phi_g$ and $\phi_u$ grows becoming considerable for large angles. That is the reason we believe that in order to experimentally detect the geometric phase we need to only consider the decoherence model of noise but the geometric aspects of $SU(2)$ as well.
\section{Conclusions} \label{conclusiones} In this manuscript, we have focused on the hierarchy equations of motion method in order to study the interplay between driving and geometric phases. This method can be used if (i) the initial state of the system plus the bath is separable and (ii) the interaction Hamiltonian is bilinear. It results in an advantageous method since it provides a tool to simulate markovian and non markovian behavior in the structured spectrum.
We have therefore studied the dynamics of the system and computed the geometric phase for different environment regimes defined by the relation among the model's parameters. In all cases we have focused on the effect of adding driving to the two-state system. By numerically studying the proposed model for various parameter regimes, we find a remarkable result: the driving can produce a large enhancement of non markovian effects, but only when the coupling between system and environment is small.
We have seen for a weakly coupled configuration,
when adding a low frequency driving to the quantum system's frequency, the system's dynamic tends to be corrected towards the undriven situation only for very small values of $\omega_D$. This can be understood that by adding a detuning frequency changes considerably the system's timescale and therefore, the geometric phase would be different from the unitary undriven one.
More interesting, for a stronger coupling or non markovian regime, that there are some situations where driving enhances the ``robustness" condition of the geometric phase when $\Delta$ delays the revivals and $\omega_D$ is small, particularly when $\omega_D/\Delta \sim {\cal O}(1)$. As stated in the existing bibliography, in strong-coupling regime, on the other hand, the driving is unable to increase the degree of non markovianity. In this manuscript, we have further studied the intermediate coupling, since we try to track traces of the geometric phase, which is literally destroyed under a strong influence of the environment. In this regime, where there are some non markovian features characterizing the dynamics, we have noted a suppression of non markovianity altogether, allowing for a smooth dynamical evolution. We have further shown that for low-frequency driving, the driving fails to increase the degree of non markovianity with respect to the static case, recuperating in some cases a scenario where a geometric phase can still be measured ($\phi_g=\phi_u +\delta \phi$). This knowledge can aid the search for physical set-ups that best retain quantum properties under dissipative dynamics.
As we have noticed that the non markovian evolution (with intermediate coupling) is the situation that provides the more interesting results, and further it is the one that can be used to model experimental situations such as hybrid quantum classical systems feasible with current technologies, we have explored in detail the dependence upon the initial angles for a better understanding of the results. We have found that exits a set of more ``stable" initial angles. This means that while there are dissipative and diffusive effects that induce a correction to the unitary GP, the system maintains its purity for several cycles, which allows the GP to be observed. It is important to note that if the noise effects induced on the system are of considerable magnitude, the coherence terms of the quantum system are rapidly destroyed and the GP literally disappears.
It has been argued that the observation of GPs should be done in times long enough to obey the adiabatic approximation but short enough to prevent decoherence from deleting all phase information. As the geometric phase accumulates over time, its correction becomes relevant at a relative short timescale, while the system still preserves purity. All the above considerations lead to a scenario where the geometric phase can still be found and it can help us infer features of the quantum system that otherwise might be hidden to us.
\\
We acknowledge UBA, CONICET and ANPCyT--Argentina. The authors wish to express their gratitude to the TUPAC cluster, where the calculations of this paper have been carried out. We thank F. Lombardo and P. Poggi for their warmhearted discussions and comments.\\
{}
\end{document}
|
arXiv
|
{
"id": "1912.02697.tex",
"language_detection_score": 0.8696077466011047,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Fully bounded noetherian rings]{Fully bounded noetherian rings and Frobenius extensions} \author{S. Caenepeel} \address{Faculty of Engineering, Vrije Universiteit Brussel, VUB, B-1050 Brussels, Belgium} \email{[email protected]} \urladdr{http://homepages.vub.ac.be/\~{}scaenepe/} \author{T. Gu\'ed\'enon} \address{Faculty of Engineering, Vrije Universiteit Brussel, VUB, B-1050 Brussels, Belgium} \email{[email protected], [email protected]} \thanks{Research supported by the project G.0278.01 ``Construction and applications of non-commutative geometry: from algebra to physics" from FWO Vlaanderen} \subjclass{16W30} \keywords{Frobenius extension, Fully bounded noetherian ring, coring, Hopf algebra action, quasi-projective module} \begin{abstract} Let $i:\ A\to R$ be a ring morphism, and $\chi:\ R\to A$ a right $R$-linear map with $\chi(\chi(r)s)=\chi(rs)$ and $\chi(1_R)=1_A$. If $R$ is a Frobenius $A$-ring, then we can define a trace map ${\rm tr}\,:\ A\to A^R$. If there exists an element of trace 1 in $A$, then $A$ is right FBN if and only if $A^R$ is right FBN and $A$ is right noetherian. The result can be generalized to the case where $R$ is an $I$-Frobenius $A$-ring. We recover results of Garc\'{\i}a and del R\'{\i}o and by D\v{a}sc\v{a}lescu, Kelarev and Torrecillas on actions of group and Hopf algebras on FBN rings as special cases. We also obtain applications to extensions of Frobenius algebras, and to Frobenius corings with a grouplike element. \end{abstract}
\maketitle \section*{Introduction} A ring $A$ is called right bounded if every essential right ideal contains a non-zero two-sided ideal. $A$ is right fully bounded noetherian or right FBN if $A$ is noetherian, and $A/P$ is right bounded for every two-sided prime ideal $P$ of $A$. Obviously commutative noetherian rings are right FBN; more generally, noetherian PI-rings and artinian rings are FBN. A series of conjectures in classical ring theory can be proved in the case of rings with the FBN property, we refer to the introduction of \cite{Sorin} for a brief survey.\\ Assume that a finite group $G$ acts on $A$. Garc\'{\i}a and Del R\'{\i}o \cite{Garcia} investigated the relationship between the FBN property for $A$ and its subring of invariants $A^G$. The main result is that, in case $A$ is right noetherian, the right FBN property for $A$ is equivalent to the right FBN property for $A^G$, if there exists an element in $A$ having trace $1$. A similar statement was proved in \cite{Nasta} for rings graded by a finite group $G$. These results can be generalized to Hopf algebra actions (see \cite{Sorin,Guedenon}). \\ We have observed that the methods introduced in \cite{Garcia} can be applied in an apparently completely different situation. Let $S$ be a Frobenius algebra (with Frobenius system $(e=e^1\otimes e^2,\overline{\nu})$) and $j:\ S\to A$ an algebra map, with $A$ a right noetherian ring. If there exists $a\in A$ such that $j(e^1)aj(e^2)=1$, then $A$ is right FBN if and only if $C_S(A)$ is right FBN.\\ In this note, we propose a unified approach to these results, based on the concept of an $A$-ring with a grouplike character, as introduced in \cite{CVW}. Basically, this consists of a ring morphism $i:\ A\to R$, together with a right $A$-linear map
$\chi:\ R\to A$ such that the formula $a\hbox{$\leftharpoonup$} r=\chi(ar)$ makes $A$ into a right $R$-module. The subring of invariants is defined as $B=\{b\in A~|~b\chi(r)=\chi(br)\}$. The main result is basically the following: if $R$ is a Frobenius $A$-ring, and $A$ is projective as a right $R$-module, then $A$ is right FBN if and only if $B$ is right FBN and $A$ is right noetherian. The methods of proof are essentially the same as in \cite{Garcia}. If $R$ is a Frobenius $A$-ring, then we can define a trace map ${\rm tr}\,:\ A\to B$, and $A$ is projective (and a fortiori quasi-projective) as a right $R$-module if and only if there exists an element of trace 1. The condition that $R$ is Frobenius can be relaxed in the sense that it suffices that $R$ is Frobenius of the second kind, with respect to a strict Morita context $(A,A,I,J,f,g)$. Then the trace map is a map ${\rm tr}\,:\ J\to B$.\\ The above mentioned results on group and Hopf algebra actions and extensions of Frobenius algebras can be obtained as special cases. We also present an application to Frobenius corings with a grouplike element.
\section{Rings with a grouplike character}\selabel{1} Let $A$ be an associative ring with unit. The category of $A$-bimodules ${}_A\mathcal{M}_A$ is a monoidal category, and we can consider algebras in ${}_A\mathcal{M}_A$. Such an algebra $R$ is a ring $R$ together with a ring morphism $i:\ A\to R$. The bimodule structure on $A$ is then given by $arb=i(a)ri(b)$, for all $a,b\in A$ and $r\in R$. A {\sl right grouplike character} on $R$ is a right $A$-linear map $\chi:\ R\to A$ such that \begin{equation}\eqlabel{1.1.0} \chi(\chi(r)s)=\chi(rs)~~{\rm and}~~\chi(1_R)=1_A, \end{equation} for all $r,s\in R$. We then say that $(R,i,\chi)$ is an $A$-ring with a right grouplike character. Right grouplike characters were introduced in \cite{CVW}. The terminology is motivated by the fact that the dual of a coring with a grouplike element is a ring with a grouplike character (see \seref{7}). For all $a\in A$, we have that $$\chi(i(a))=\chi(1_R\cdot a)=\chi(1_R)a=1_Aa=a,$$ so $\chi\circ i={\rm Id}_A$, and $i$ is injective, $\chi$ is surjective. Sometimes we will regard $i$ as an inclusion. $A$ is a right $R$-module, with right $R$-action \begin{equation}\eqlabel{1.1.1} a\hbox{$\leftharpoonup$} r=\chi(ar). \end{equation} $A$ is a cyclic right $R$-module, since $$a=\chi(i(a))=\chi(1_Ai(a))=1_A\hbox{$\rightharpoonup$} i(a),$$ for all $a\in A$. For $M\in \mathcal{M}_R$, the submodule of invariants is defined as
$$M^R=\{m\in M~|~mr=m\chi(r),~{\rm for~all~}r\in R\}.$$ Let
$$B=A^R=\{b\in A~|~b\chi(r)=\chi(br),~{\rm for~all~}r\in R\}.$$ Then $B$ is a subring of $A$, $M^R$ is a right $B$-module, and we have the invariants functor $(-)^{R}:\ \mathcal{M}_R\to \mathcal{M}_B$. We will now present some elementary properties of
$$Q=R^R=\{q\in R~|~qr=q\chi(r),~{\rm for~all~}r\in R\}.$$
\begin{lemma}\lelabel{1.1} Let $(R,i,\chi)$ be an $A$-ring with a right grouplike character. \begin{enumerate} \item $Q$ is a $(R,B)$-subbimodule of $R$; \item $\chi$ restricts to a $B$-bimodule map $\chi:\ Q\to B$; \item if $1_R\in Q$, then $i$ is an isomorphism of rings, with inverse $\chi$. \end{enumerate} \end{lemma}
\begin{proof} 1) We refer to \cite[Prop. 2.2]{CVW}.\\ 2) For all $q\in Q$ and $r\in R$, we have $$\chi(q)\chi(r)=\chi(q\chi(r))=\chi(qr)=\chi(\chi(q)r),$$ hence $\chi(q)\in B$. $\chi$ is right $A$-linear, so its restriction to $Q$ is right $B$-linear. For all $q\in Q\subset R$ and $b\in B$, we have, by the definition of $A^R=B$ that $b\chi(q)=\chi(bq)$, so $\chi$ is also left $B$-linear.\\ 3) If $1_R\in Q$, then we have for all $r\in R$ that $$r=1_Rr=1_R\chi(r)=1_Ri(\chi(r))=i(\chi(r)).$$ It follows that $i$ is a left inverse of $\chi$. We have seen above that $i$ is always a right inverse of $\chi$, so it follows that $i$ is an isomorphism. \end{proof}
If $M\in \mathcal{M}_R$, then ${\rm Hom}_R(A,M)\in \mathcal{M}_B$, with right $B$-action $(fb)(a)=f(ba)$, for all $b\in B$, $f\in {\rm Hom}_R(A,M)$ and $a\in A$.\\ ${\rm End}_R(A)$ is a $B$-bimodule, with left $B$-action $(bf)(a)=bf(a)$, for all $b\in B$, $f\in {\rm End}_R(A)$ and $a\in A$.
\begin{lemma}\lelabel{1.2} Let $(R,i,\chi)$ be an $A$-ring with a right grouplike character, and $M$ a right $R$-module. \begin{enumerate} \item ${\rm Hom}_R(A,M)\cong M^R$ as right $B$-modules; \item ${\rm End}_R(A)\cong B$ as $B$-bimodules and as rings. \end{enumerate} \end{lemma}
\begin{proof} 1) For $f\in {\rm Hom}_R(A,M)$ and $r\in R$, we have $$f(1_A)r=f(1_A\hbox{$\leftharpoonup$} r)=f(\chi(r))=f(1_A)\chi(r),$$ so $f(1_A)\in M^R$, and we have a well-defined map $$\phi:\ {\rm Hom}_R(A,M)\to M^R,~~\phi(f)=f(1_A).$$ $\phi$ is right $B$-linear since $$\phi(fb)=(fb)(1_A)=f(b1_A)=f(1_Ab)=f(1_A)b=\phi(f)b.$$ The inverse of $\phi$ is given by the formula $$\phi^{-1}(m)(a)=ma,$$ for all $m\in M^R$ and $a\in A$.\\ 2) If $M=A$, then $\phi$ is also left $B$-linear since $$\phi(bf)=(bf)(1_A)=bf(1_A)=\phi(f)b.$$ \end{proof}
\section{Quasi-projective modules}\selabel{2} A right $R$-module $M$ is called {\sl quasi-projective} if the canonical map ${\rm Hom}_R(M,M)\to {\rm Hom}_R(M,M/N)$ is surjective, for every $R$-submodule $N$ of $M$. This means that every right $R$-linear map $f:\ M\to M/N$ factorizes through the canonical projection $p:\ M\to M/N$, that is, there exists a right $R$-linear map $g:\ M\to M$ such that $f=p\circ g$.
\begin{proposition}\prlabel{2.1} Let $(R,i,\chi)$ be an $A$-ring with a right grouplike character. The following assertions are equivalent. \begin{enumerate} \item $A$ is quasi-projective as a right $R$-module; \item for every right $R$-submodule $I$ of $A$, and every $a+I\in (A/I)^R$, there exists $b\in B$ such that $b-a\in I$; \item for every right $R$-submodule $I$ of $A$, $(A/I)^R\cong (B+I)/I$. \end{enumerate} \end{proposition}
\begin{proof} $\underline{1)\Rightarrow 2)}$. Observe that \begin{equation}\eqlabel{2.1.1}
(A/I)^R=\{a+I\in A/I~|~a\chi(r)-\chi(ar)\in I,~{\rm for~all~}r\in R\}. \end{equation} For $a+I\in (A/I)^R$, we have a well-defined right $A$-linear map $$f:\ A\to A/I,~~f(a')=aa'+I.$$ $f$ is right $A$-linear since \begin{eqnarray*} &&\hspace*{-2cm} f(a'\hbox{$\leftharpoonup$} r)=a(a'\hbox{$\leftharpoonup$} r)+I=a\chi(a'r)+I\\ &=&\chi(aa'r)+I=((aa')\hbox{$\leftharpoonup$} r)+I=f(a')\hbox{$\leftharpoonup$} r. \end{eqnarray*} Let $p:\ A\to A/I$ be the canonical projection. Since $A$ is quasi-projective, there exists $g\in {\rm Hom}_R(A,A)$ such that $p\circ g= f$, that is $aa'+I=g(a')+I$ and, in particular, $a+I=g(1_A)+I$, or $g(1_A)-a\in I$. Let us show that $b=g(1_A)\in B$. Indeed, for all $r\in R$, we have \begin{eqnarray*} &&\hspace*{-2cm} \chi(br)-b\chi(r)=\chi(g(1_A)r)-g(1_A)\chi(r) = (g(1_A)\hbox{$\leftharpoonup$} r)-(g(1_A)\hbox{$\leftharpoonup$} (i\circ \chi)(r))\\ &=& g(1_A\hbox{$\leftharpoonup$} r)-g((\chi\circ i\circ \chi)(r)) = g(\chi(r))-g(\chi(r))=0. \end{eqnarray*} $\underline{2)\Rightarrow 3)}$. The map $B\to (A/I)^R$, $b\mapsto b+I$ induces a monomorphism $(B+I)/I\to (A/I)^R$. Condition 2) means precisely that this map is surjective.\\ $\underline{3)\Rightarrow 1)}$. Take a right $R$-linear map $f:\ A\to A/I$, with $I$ a right $R$-submodule of $A$. Then $$\chi(f(1_A)r)=f(1_A)\hbox{$\leftharpoonup$} r=f(1_A\hbox{$\leftharpoonup$} r)= f(\chi(1_Ar))=f(\chi(r))=f(1_A)\chi(r),$$ so $f(1_A)\in (A/I)^R\cong (B+I)/I$. Take $b\in B$ such that $f(1_A)=b+I$, and consider the map $g:\ A\to A$, $g(a)=ba$. $g$ is right $R$-linear since $$g(a\hbox{$\leftharpoonup$} r)=b(a\hbox{$\leftharpoonup$} r)=b\chi(ar)=\chi(bar)=(ba)\hbox{$\leftharpoonup$} r=g(a)r.$$ Finally $$(p\circ g)(a)=p(ba)=ba+I=f(1_A)a=f(a).$$ \end{proof}
In \prref{2.1}, we characterize quasi-projectivity of $A$ as a right $R$-module. Projectivity has been characterized in \cite[Prop. 2.4]{CVW}:
\begin{proposition}\prlabel{2.2} Let $(R,i,\chi)$ be an $A$-ring with a right grouplike character. The following assertions are equivalent. \begin{enumerate} \item $A$ is projective as a right $R$-module; \item there exists $q\in Q$ such that $\chi(q)=1$. \end{enumerate} We refer to \cite[Prop. 2.4]{CVW} for more equivalent properties. \end{proposition}
\begin{proposition}\prlabel{2.3}\cite[4.11]{Albu} Let $R$ be a ring, $M$ a quasi-projective right $R$-module, and $N$ a noetherian right $R$-module. Then ${\rm Hom}_R(M,N)$ is a noetherian right ${\rm End}_R(M)$-module. \end{proposition}
\section{$I$-Frobenius rings}\selabel{3} Let $(R,i)$ be an $A$-ring, and $I=(A,A,I,J,f,g)$ a strict Morita context connecting $A$ with itself. We say that $R$ is an $I$-Frobenius $A$-ring if there exist an element $e=e^1\otimes u^1\otimes e^2\in R\otimes_A I\otimes_A R$ (summation understood implicitely) and an $A$-bimodule map $\overline{\nu}:\ R\otimes_A I\to A$ such that the following conditions are satisfied, for all $r\in R$ and $u\in I$: \begin{eqnarray} &&re^1\otimes u^1\otimes e^2=e^1\otimes u^1\otimes e^2r;\eqlabel{3.1.1}\\ && \overline{\nu}(e^1\otimes_A u^1)e^2=1_R;\eqlabel{3.1.2}\\ && e^1\otimes_A u^1\overline{\nu}(e^2\otimes_A u)=r1_R\otimes_A u.\eqlabel{3.1.3} \end{eqnarray} If $I=(A,A,A,A,{\rm id}_A,{\rm id}_A)$, then the notion ``$I$-Frobenius" coincides with the classical Frobenius property. Equivalent definitions are given in \cite[Theorem 2.7]{CDM}.\\ $f:\ I\otimes_A J\to A$ and $g:\ J\otimes_A I\to A$ are $A$-bimodule isomorphisms, and \begin{equation}\eqlabel{3.1.4} f(u\otimes_A v)u'=ug(v\otimes_A u')~~;~~g(v\otimes_A u)v'=vf(u\otimes_A v'), \end{equation} for all $u,u'\in I$ and $v,v'\in J$. We will write $$f^{-1}(1_A)=\sum_i u_i\otimes v_i\in I\otimes_A J.$$ From the fact that $f$ is an $A$-bimodule isomorphism, it follows easily that \begin{equation}\eqlabel{3.1.5} \sum_i au_i\otimes v_i=\sum_i u_i\otimes v_ia, \end{equation} for all $a\in A$. We have the following generalization of \cite[Theorem 2.7]{CVW}.
\begin{theorem}\thlabel{3.1} Let $(R,i,\chi)$ be an $I$-Frobenius $A$-ring with a right grouplike character. Then $J$ is an $(R,B)$-bimodule, with left $R$-action \begin{equation}\eqlabel{3.1.6} r\cdot v=\sum_i \overline{\nu}(rg(v\otimes \chi(e^1)u^1)e^2\otimes_A u_i)v_i, \end{equation} and we have an isomorphism $\alpha:\ J\to Q$ of $(R,B)$-bimodules. \end{theorem}
\begin{proof} The map $\alpha$ is defined by the formula \begin{equation}\eqlabel{3.1.7} \alpha(v)=g(v\otimes_A\chi(e^1)u^1)e^2, \end{equation} for all $v\in J$. Let us first show that $\alpha(v)\in Q$. For all $r\in R$, we compute \begin{eqnarray*} &&\hspace*{-1cm} \alpha(v)r= g(v\otimes_A\chi(e^1)u^1)e^2r\equal{\equref{3.1.1}} g(v\otimes_A\chi(re^1)u^1)e^2\\ &\equal{\equref{1.1.0}}&g(v\otimes_A\chi(\chi(r)e^1)u^1)e^2 \equal{\equref{3.1.1}}g(v\otimes_A\chi(e^1)u^1)e^2\chi(r)=\alpha(v)\chi(r). \end{eqnarray*} $\alpha$ is right $B$-linear since \begin{eqnarray*} &&\hspace*{-2cm}\alpha(vb)=g(vb\otimes_A\chi(e^1)u^1)e^2= g(v\otimes_Ab\chi(e^1)u^1)e^2\\ &=& g(v\otimes_A\chi(be^1)u^1)e^2 g(v\otimes_A\chi(e^1)u^1)e^2b\equal{\equref{3.1.1}}\alpha(v)b, \end{eqnarray*} for all $b\in B$. The inverse $\beta$ of $\alpha$ is given by the composition $$Q\subset R\rTo^{R\otimes_A f^{-1}}R\otimes_AI\otimes_AJ\rTo^{\overline{\nu}\otimes_AJ} A\otimes_AJ\cong J,$$ or $$\beta(q)=\sum_i\overline{\nu}(q\otimes_A u_i)v_i,$$ for all $q\in Q$. Indeed, we compute for all $q\in Q$ that \begin{eqnarray*} &&\hspace*{-15mm} \alpha(\beta(q))= g(\sum_i\overline{\nu}(q\otimes_A u_i)v_i\otimes_A\chi(e^1)u^1)e^2\\ &=&\sum_ig(\overline{\nu}(q\otimes_A u_i)v_i\chi(e^1)\otimes_Au^1)e^2 \equal{\equref{3.1.5}} \sum_ig(\overline{\nu}(q\otimes_A \chi(e^1)u_i)v_i\otimes_Au^1)e^2\\ &=& \sum_ig(\overline{\nu}(q\chi(e^1)\otimes_A u_i)v_i\otimes_Au^1)e^2 \equal{\equref{3.1.1}} \sum_ig(\overline{\nu}(\chi(e^1)\otimes_A u_i)v_i\otimes_Au^1)e^2q\\ &=& \sum_i\overline{\nu}(\chi(e^1)\otimes_A u_i) g(v_i\otimes_Au^1)e^2q = \sum_i\overline{\nu}(\chi(e^1)\otimes_A u_i g(v_i\otimes_Au^1))e^2q\\ &\equal{\equref{3.1.4}}& \sum_i\overline{\nu}(\chi(e^1)\otimes_A f(u_i \otimes_Av_i)u^1)e^2q = \overline{\nu}(e^1\otimes_A e^1)e^2q=q. \end{eqnarray*} For all $v\in J$, we have that \begin{eqnarray*} &&\hspace*{-15mm} \beta(\alpha(v))= \sum_i\overline{\nu}(g(v\otimes_A\chi(e^1)u^1)e^2\otimes_A u_i)v_i= \sum_ig(v\otimes_A\chi(e^1)u^1)\overline{\nu}(e^2\otimes_A u_i)v_i\\ &=&\sum_ig(v\otimes_A\chi(e^1)u^1\overline{\nu}(e^2\otimes_A u_i))v_i \equal{\equref{3.1.3}} \sum_i g(v\otimes_A \chi(1_R)u_i)v_i\\ &=&\sum_i g(v\otimes_A u_i)v_i \equal{\equref{3.1.4}} \sum_i vf(u_i\otimes_A v_i)=v. \end{eqnarray*} This shows that $\alpha$ is an isomorphism of right $B$-modules. We can transport the left $B$-action on $Q$ to $J$ such that $\alpha$ becomes an $(R,B)$-bimodule map. This yields formula \equref{3.1.6}. \end{proof}
The composition $${\rm tr}\,=\chi\circ \alpha:\ J\to Q\to B$$ is a $B$-bimodule map (see \leref{1.1}), and will be called the {\sl trace map}. It is given by the formula \begin{equation}\eqlabel{3.18} {\rm tr}\,(v)=\chi(g(v\otimes_A\chi(e^1)u^1)e^2). \end{equation} Combining \prref{2.2} and \thref{3.1}, we obtain the following result:
\begin{proposition}\prlabel{3.2} Let $(R,i,\chi)$ be an $I$-Frobenius $A$-ring with a right grouplike character. The following assertions are equivalent. \begin{enumerate} \item $A$ is projective as a right $R$-module; \item there exists $v\in J$ such that ${\rm tr}\,(v)=1_B$. \end{enumerate} \end{proposition}
Now assume that $R$ is Frobenius $A$-ring, that is, $I=A$. Then the above formulas simplify. $e=e^1\otimes e^2\in R\otimes_AR$, $\overline{\nu}:\ R\to A$ is an $A$-bimodule map, and the trace map ${\rm tr}\,:\ A\to B$ is given by $${\rm tr}\,(a)= \chi(a\chi(e^1)e^2).$$
\section{Fully bounded noetherian rings}\selabel{4} We recall some definitions and basic results from \cite{Garcia}. Let $R$ be a ring, and $M,P\in \mathcal{M}_R$. For a subset $X$ of ${\rm Hom}_R(P,M)$, we write
$$r_P(X)=\cap\{{\rm Ker}\, f~|~f\in X\}.$$ In particular, for $X\subset M\cong {\rm Hom}_R(R,M)$, we have
$$r_R(X)=\{r\in R~|~xr=0\}.$$ $M$ is called {\sl finitely} $P$-{\sl generated} if there exists an epimorphism of right $R$-modules $P^n\to M\to 0$.\\ $M$ is called $P$-{\sl faithful} if ${\rm Hom}_R(P,M')\neq 0$, for every nonzero submodule $M'\subset M$.\\ $R$ is called {\sl right bounded} if every essential right ideal contains a non-zero two-sided ideal. $R$ is called {\sl right fully bounded} if $R/P$ is right bounded, for every two-sided prime ideal $P$ of $R$. A ring $R$ that is right fully bounded and right noetherian is called a {\sl right fully bounded noetherian ring} or a {\sl FBN ring}. Characterizations of right FBN rings are given in \cite[Theorem 1.2]{Garcia}. For later use, we recall one of them.
\begin{proposition}\prlabel{4.1} For a ring $R$, the following conditions are equivalent. \begin{enumerate} \item $R$ is right FBN; \item for every finitely generated right $R$-module $M$, there exists a finite subset $F\subset M$ such that $r_R(M)=r_R(F)$. \end{enumerate} \end{proposition}
A right $R$-module $P$ is called a {\sl right FBN-module} if it is noetherian and for every finitely generated $P$-faithful right $R$-module $M$, there exists a finite subset $F\subset {\rm Hom}_R(P,M)$ such that $r_P(F)=r_P({\rm Hom}_R(P,M))$. We recall the following properties from \cite{Garcia}.
\begin{proposition}\prlabel{4.2} \cite[Theorem 1.7]{Garcia} For a quasi-projective, noetherian right $R$-module $P$, the following assertions are equivalent: \begin{enumerate} \item ${\rm End}_R(P)$ is right FBN; \item $P$ is an FBN right $R$-module. \end{enumerate} \end{proposition}
\begin{proposition}\prlabel{4.3} \cite[Corollary 1.8]{Garcia} Let $P$ be a quasi-projective FBN right $R$-module, $Q$ a finitely $P$-generated right $R$-module, and $M$ a finitely generated $Q$-faithful right $R$-module. For every $X\subset {\rm Hom}_R(Q,M)$, there exists a finite subset $F\subset X$ such that $r_Q(X)=r_Q(F)$. \end{proposition}
\begin{proposition}\prlabel{4.4} \cite[Corollary 1.9]{Garcia} A right noetherian ring $R$ is right FBN if and only if every finitely generated right $R$-module is FBN. \end{proposition}
We can now state the main result of this paper.
\begin{theorem}\thlabel{4.5} Let $(R,i,\chi)$ be an $A$-ring with a right grouplike character, and consider the following statements. \begin{enumerate} \item $R\in \mathcal{M}_A$ is finitely generated and $A$ is right FBN; \item $R$ is right FBN and $A$ is right noetherian; \item $B$ is right FBN and $A$ is right noetherian. \end{enumerate} Then $1)\Rightarrow 2)$.\\ If $A$ is quasi-projective as a right $R$-module, then $2)\Rightarrow 3)$.\\ If $A$ is projective as a right $R$-module and $R$ is an $I$-Frobenius $A$-ring for some strict Morita context $I=(A,A,I,J,f,g)$, then $3)\Rightarrow 1)$ and the three conditions are equivalent. \end{theorem}
\begin{proof} $1)\Rightarrow 2)$. It follows from \prref{4.4} that $R$ is an FBN right $R$-module. Let $M$ be a finitely generated right $R$-module; then $M$ is also finitely generated as a right $A$-module. We claim that $M$ is an $R$-faithful right $A$-module. Indeed, take a non-zero right $A$-module $M'\subset M$. Since $M'\cong {\rm Hom}_A(A,M')$, there exists a non-zero $f\in {\rm Hom}_A(A,M')$, and the composition $f\circ \chi:\ R\to M'$ is non-zero, since $\chi$ is surjective.\\ Now take $P=R$, $Q=A$ in \prref{4.3}, and consider the subset $M\cong {\rm Hom}_R(R,M)\subset {\rm Hom}_A(R,M)$. It follows that there exists a finite $F\subset M$ such that $r_A(F)=r_A(M)$. It then follows from \prref{4.1} that $R$ is right FBN.\\
$2)\Rightarrow 3)$. $A$ is a finitely generated (even cyclic) right $R$-module, so it follows from \prref{4.4} that $A$ is an FBN right $R$-module. It then follows from \prref{4.2} that ${\rm End}_R(A)\cong B$ is right FBN.\\
$3)\Rightarrow 1)$. We will apply \prref{2.3} with $M=A$ and $N=R$. By assumption, $A$ is quasi-projective as a right $R$-module. Since $R/A$ is $I$-Frobenius, $R$ is finitely generated projective as a right $R$-module. Since $A$ is right noetherian, $R$ is also right noetherian.\\ It follows from \leref{1.2}, \prref{2.3} and \thref{3.1} that ${\rm Hom}_R(A,R)\cong R^R=Q\cong J$ is noetherian as a right module over ${\rm End}_R(A)\cong A^R=B$. It then follows that $J$ is finitely generated as a right $B$-module. Let $\{e_1,\cdots,e_k\}$ be a set of generators of $J$ as a right $B$-module.\\ Recall that we have an $A$-bimodule isomorphism $f:\ I\otimes_A J\to A$. With notation as in \seref{3}, we have, for $a\in A$, $$f^{-1}(a)=\sum_{i=1}^n u_i\otimes_A v_ia\in I\otimes_A J.$$ For every $i$, we can find $b_{i1},\cdots,b_{ik_i}\in B$ such that $$v_ia=\sum_{j=1}^{k_i}e_jb_{ij}.$$ We then easily compute that \begin{eqnarray*} a&=& f\Bigl(\sum_{i=1}^n u_i\otimes_A v_ia\Bigr)= f\Bigl(\sum_{i=1}^n\sum_{j=1}^{k_i} u_i\otimes_A e_jb_{ij}\Bigr)=\ \sum_{i=1}^n\sum_{j=1}^{k_i} f(u_i\otimes_Ae_j)b_{ij}, \end{eqnarray*} and we conclude that $A$ is finitely generated as a right $B$-module.\\ Take $M\in \mathcal{M}_A$ finitely generated. Then $M$ is also finitely generated as a right $B$-module. We now show that $M$ is an $A$-faithful right $B$-module. Let $M'$ be a non-zero right $B$-submodule of $M$, and take $0\neq m'\in M'$. It follows from \prref{3.2} that there exists $v\in J$ such that ${\rm tr}\,(v)=1_B$. The map $f:\ A\to M$, $f(a)=m'{\rm tr}\,(va)$ is right $B$-linear, and different from $0$ since $f(1_A)=m'\neq 0$.\\ Observe now that \begin{itemize} \item $B$ is a quasi-projective FBN right $B$-module; \item $A$ is a finitely $B$-generated right $B$-module; \item $M$ is a finitely generated $A$-faithful right $B$-module. \end{itemize} Applying \prref{4.3} to $M\cong {\rm Hom}_A(A,M)\subset {\rm Hom}_B(A,M)$, we find that there exists a finite subset $F\subset M$ such that $r_A(F)= r_A(M)$. It then follows from \prref{4.1} that $A$ is right FBN. \end{proof}
\begin{remark}\relabel{4.6} We do not know whether the implication $3)\Rightarrow 1)$ holds under the weaker assumption that $A\in \mathcal{M}_R$ is quasi-projective. The projectivity is used at the point where we applied \prref{4.3}. \end{remark}
\section{Application to Frobenius algebras}\selabel{5} Let $k$ be a commutative ring, and consider two $k$-algebras $A$ and $S$, and an algebra map $j:\ S\to A$. All unadorned tensor products in this Section are over $k$. It is easy to establish that $(R=S^{\rm op}\otimes A, i,\chi)$ with $$i:\ A\to S^{\rm op} \otimes A,~~i(a)=1_S\otimes a,$$ $$\chi:\ S^{\rm op}\otimes A\to A,~~\chi(s\otimes a)=j(s)a$$ is an $A$-ring with a right grouplike character. Also observe that the categories $\mathcal{M}_R$ and ${}_S\mathcal{M}_A$ are isomorphic. For $M\in {}_S\mathcal{M}_A$, we have that
$$M^R=\{m\in M~|~sm=mj(s),~{\rm for~all~}s\in S\}=C_S(M).$$ In particular, $B=A^R=C_S(A)$ and
$$Q=\{\sum_i s_i\otimes a_i\in S^{\rm op} \otimes A~|~ \sum_i ts_i\otimes a_i=\sum_i s_i\otimes a_ij(t),~{\rm for~all~}t\in S\}.$$ Consequently $A$ is projective as a right $R$-module if and only if there exists $\sum_i s_i\otimes a_i\in Q$ such that $\sum_i j(s_i)a_i=1_A$.\\ From \prref{2.1}, it follows that $A$ is quasi-projective as a right $R$-module if and only if for every $(S,A)$-submodule $I$ of $A$ and $a\in A$ such that $as-sa\in I$, for all $s\in S$, there exists $b\in B$ such that $a-b\in I$.\\ Assume that $S$ is a Frobenius $k$-algebra, with Frobenius system $(e=e^1\otimes e^2,\overline{\nu})$. Then $S^{\rm op}$ is also a Frobenius algebra, with Frobenius system $(e=e^2\otimes e^1,\overline{\nu})$, and $S^{\rm op}\otimes A$ is a Frobenius $A$-ring, with Frobenius system $(E, N)$, with $E=(e^2\otimes 1_A)\otimes_A (e^1\otimes 1_A)$ and $$N:\ S^{\rm op}\otimes A\to A,~~N(s\otimes a)=\overline{\nu}(s)a.$$ We then have the isomorphism $$\alpha:\ A\to Q,~~\alpha(a)=e^1\otimes aj(e^2)$$ and the trace map $${\rm tr}\,:\ A\to B,~~{\rm tr}\,(a)=j(e^1)aj(e^2).$$ $A$ is projective as a right $R$-module if and only if there exists $a\in A$ such that ${\rm tr}\,(a)=1$.
\begin{corollary}\colabel{5.1} Let $S$ be a Frobenius algebra over a commutative ring $k$, and $j:\ S\to A$ an algebra map. Furthermore, assume that there exists $a\in A$ such that ${\rm tr}\,(a)=1$. Then the following assertions are equivalent: \begin{enumerate} \item $A$ is right FBN; \item $S^{\rm op}\otimes A$ is right FBN and $A$ is right noetherian; \item $B=C_S(A)$ is right FBN and $A$ is right noetherian. \end{enumerate} \end{corollary}
\section{Application to Hopf algeba actions}\selabel{6} Let $H$ be a finitely generated projective Hopf algebra over a commutative ring $k$, and $A$ a left $H$-module algebra. The smash product $R=A\# H$ is equal to $A\otimes H$ as a $k$-module, with multiplication given by the formula $$(a\# h)(b\# k)=a(h_{(1)}\cdot b)\# h_{(2)}k.$$ The unit is $1_A\# 1_H$. Consider the maps $$i:\ A\to A\#H,~~i(a)=a\#1_H,$$ $$\chi:\ A\# H\to A,~~\chi(a\# h)=a\varepsilon(h).$$ Straightforward computations show that $(A\# H,i,\chi)$ is an $A$-ring with a left grouplike character. It is also easy to prove that
$$A^R=\{a\in A~|~h\cdot a=\varepsilon(h)a,~{\rm for~all~}h\in H\}=A^H$$ is the subalgebra of invariants of $R$.\\ In a similar way, we can associate an $A$-ring with right grouplike character to a right $H$-comodule algebra. We will discuss the left handed case here, in order to recover the results from \cite{Sorin,Garcia,Guedenon}. The results from the previous Sections can easily be restated for rings with a left grouplike character.\\ Let $I=\int_{H^*}^l$ and $J=\int_H^l$ be the spaces of left integrals on and in $H$. $I$ and $J$ are projective rank one $k$-modules, and $H/k$ is $I$-Frobenius (see for example \cite[Theorem 3.4]{CDM}). We need an explicit description of the Frobenius system. From the Fundamental Theorem, it follows that we have an isomorphism $$\phi:\ I\otimes H\to H^*,~~\phi(\varphi\otimes h)=h\cdot \varphi,$$ with $\langle h\cdot \varphi,k\rangle=\langle \varphi,kh\rangle$. If $t\in J$, then $$\phi(\varphi\otimes t)(h)=\langle \varphi,ht\rangle=\langle\varphi,t\rangle\varepsilon(h),$$ so $\phi$ restricts to a monomorphism $\tilde{\phi}:\ I\otimes J\to k\varepsilon$. If $I$ and $J$ are free of rank one, then $\tilde{\phi}$ is an isomorphism, as there exist $\varphi\in I$ and $t\in J$ such that $\langle\varphi,t\rangle=1$ (see for example \cite[Theorem 31]{CMZ}, \cite{Pareigis71}. Hence $\tilde{\phi}$ is an isomorphism after we localize at a prime ideal $p$ of $k$, and this implies that $\tilde{\phi}$ is itself an isomorphism. Consequently $J^*\cong I$. Consider $\tilde{\phi}^{-1}(\varepsilon)=\sum_i\varphi_i\otimes t_i\in I\otimes J$. Then \begin{equation}\eqlabel{6.1.1} \sum_i \langle\varphi_i,t_i\rangle=1. \end{equation}
Furthermore $\{(\varphi_i,t_i)~|~i=1,\cdots,n\}$ is a finite dual basis for $I$, so we have $t=\sum_i \langle\varphi_i,t\rangle t_i$, $\varphi=\sum_i\langle\varphi,t_i\rangle \varphi$, for all $t\in J$ and $\varphi\in I$. $\phi$ induces an isomorphism $$\psi:\ H\to H^*\otimes J,~~\psi(h)=\sum_i h\cdot\varphi_i\otimes t_i.$$ The inverse of $\psi$ is given by the formula $$\psi^{-1}(h^*\otimes t)=\langle h^*,\overline{S}(t_{(1)})\rangle t_{(2)},$$ where $\overline{S}$ is the inverse of the antipode $S$; recall from \cite{Pareigis71} that the antipode of a finitely generated projective Hopf algebra is always bijective. Indeed, it is straightforward to show that $\psi^{-1}$ is a right inverse of $\psi$. First observe that $$\psi(\psi^{-1}(h^*\otimes t))=\sum_i\langle h^*,\overline{S}(t_{(1)})\rangle t_{(2)}\cdot \varphi_i\otimes t_i.$$ Now we compute for all $h\in H$ that \begin{eqnarray*} &&\hspace*{-2cm} \langle h^*,\overline{S}(t_{(1)})\rangle \langle t_{(2)}\cdot\varphi_i,h\rangle= \langle h^*,\overline{S}(t_{(1)})\overline{S}(h_{(2)})h_{(1)}\rangle \langle \varphi_i,h_{(3)}t_{(2)}\rangle\\ &=& \langle h^*,\overline{S}(h_{(2)}t_{(1)})h_{(1)}\rangle \langle \varphi_i,h_{(3)}t_{(2)}\rangle\\ &=& \langle h^*,\overline{S}(1_H)h_{(1)}\rangle \langle \varphi_i,h_{(2)}t\rangle = \langle h^*,h\rangle \langle \varphi_i,t\rangle, \end{eqnarray*} where we used the fact that $\varphi_i$ and $t$ are integrals. It follows that $$\psi(\psi^{-1}(h^*\otimes t))=\sum_i h^*\otimes \langle\varphi_i,t\rangle t_i=h^*\otimes t.$$ A right inverse of an invertible element is also a left inverse, so it follows that $$ 1_H=\psi(\psi^{-1}(1_H))=\sum_i\langle \varphi_i,\overline{S}(t_{i(1)})\rangle t_{i(2)}=
\sum_i\langle \varphi_i\circ \overline{S},t_{i(1)}\rangle t_{i(2)}= \sum_i\langle \varphi_i\circ \overline{S},t_i\rangle 1_H,$$ where we used the fact that $\varphi_i\circ \overline{S}$ is a right integral on $H$. We conclude that \begin{equation}\eqlabel{6.1.2} \sum_i\langle \varphi_i,\overline{S}(t_i)\rangle=1. \end{equation} Consider the particular situation where $I$ and $J$ are free rank one modules. Then there exist free generators $\varphi_1$ of $ I$ and $t_1$ of $ J$ such that $\langle\varphi_1,t_1\rangle=1$. From \equref{6.1.2} it follows that $\langle\varphi_1,\overline{S}(t_1)\rangle=1$. For arbitrary $\varphi=x\varphi_1\in I$ and $t=yt_1\in J$, it then follows that $\langle \varphi,t\rangle=xy \langle\varphi_1,t_1\rangle=xy= xy\langle\varphi_1,\overline{S}(t_1)\rangle=\langle\varphi,\overline{S}(t)\rangle$. Consider the case where $I$ and $J$ are not necessarily free, and take $\varphi\in I$, $t\in J$ and a prime ideal $p$ of $k$. Then the images of $\langle \varphi,t\rangle$ and $\langle\varphi,\overline{S}(t)\rangle$ in the localized ring $k_p$ are equal, since the integral space of the Hopf $k_p$-algebra $H_p$ is free. So we can conclude that \begin{equation}\eqlabel{6.1.3} \langle \varphi,t\rangle=\langle\varphi,\overline{S}(t)\rangle. \end{equation}
\begin{lemma}\lelabel{6.1} Let $H$ be a finitely generated projective Hopf algebra over a commutative ring $k$. There exist $t_i\in J=\int_H^l$ and $\varphi_i\in I=\int_{H^*}^l$ such that $\sum_i \langle\varphi_i,t_i\rangle=1$. $H$ is an $I$-Frobenius $k$-algebra, with Frobenius system $(e,\overline{\nu})$ with \begin{eqnarray*} &&e=\sum_i t_{i(2)}\otimes\varphi_i\otimes\overline{S}(t_{i(1)})\\ &&\overline{\nu}=\sum_j t_j\otimes \varphi_j\in (H\otimes I)^*\cong J\otimes H^* \end{eqnarray*} \end{lemma}
\begin{proof} It is straightforward to show that $e\in C_H(H\otimes I\otimes H)$; this also follows from \cite[Prop. 3.3]{CDM}, taking into account that $e= i'(\varphi\otimes \overline{S}(t))$.\\ Write $e=e^1\otimes u^1\otimes e^2\in H\otimes I\otimes H$. We compute that \begin{eqnarray} &&\hspace*{-2cm} \overline{\nu}(e^1\otimes u^1\otimes e^2)= \sum_{i,j}\langle \varphi_j,t_{i(2)}\rangle \langle \varphi,t_j\rangle \overline{S}(t_{i(1)})\nonumber\\ &=& \sum_{i}\langle \varphi_i,t_{i(2)}\rangle \overline{S}(t_{i(1)}) = \sum_i\overline{S}(\langle \varphi_i,t_i\rangle 1_H)\equal{\equref{6.1.1}}1_H.\eqlabel{6.1.4} \end{eqnarray} For all $\varphi\in I$, we calculate \begin{eqnarray} &&\hspace*{-2cm} e^1\otimes u^1\overline{\nu}( e^2\otimes \varphi)= \sum_{i,j} t_{i(2)}\otimes\varphi_i\langle\varphi_j,\overline{S}(t_{i(1)})\rangle\langle\varphi,t_j\rangle\nonumber\\ &=& \sum_{i,j} 1_H\otimes\varphi_i\langle\varphi_j,\overline{S}(t_{i})\rangle\langle\varphi,t_j\rangle = \sum_{i} 1_H\otimes\varphi_i\langle\varphi,\overline{S}(t_{i})\rangle\nonumber\\ &\equal{\equref{6.1.3}}& \sum_{i} 1_H\otimes\varphi_i\langle\varphi,t_{i}\rangle=1_H\otimes \varphi. \eqlabel{6.1.5} \end{eqnarray} It now follows from \cite[Theorem 3.1]{CDM} that $(e,\overline{\nu})$ is a Frobenius system. \end{proof}
\begin{proposition}\prlabel{6.2} Let $H$ be a finitely generated projective Hopf algebra over a commutative ring $k$, and $A$ a left $H$-module algebra. Then $A\otimes H$ is an $A\otimes I$-Frobenius $A$-algebra, with Frobenius system $(E,N)$, with \begin{eqnarray*} &&\hspace*{-2cm} E=E^1\otimes_AU^1\otimes_AE^2=(1_A\# e^1)\otimes_A(1_A\otimes u^1)\otimes_A(1_A\# e^1)\\ &=&\sum_i (1_A\# t_{i(2)})\otimes_A(1_A\otimes\varphi_i) \otimes_A(1_A\#\overline{S}(t_{i(1)})),\\ &&\hspace*{-2cm}N:\ (A\#H)\otimes_A (A\otimes I)\cong A\# H\otimes I\to A,\\ &&N(a\#h\otimes\varphi)=a\overline{\nu}(h\otimes\varphi)=\sum_j a\langle\varphi_j,h\rangle\langle \varphi,t_j\rangle. \end{eqnarray*} Here we used the notation introduced above. \end{proposition}
\begin{proof} The proof is an adaptation of the proof of \cite[Proposition 5.1]{CVW}. Let us first show that $E$ satisfies \equref{3.1.1}. \begin{eqnarray*} &&\hspace*{-15mm} \sum_i (1_A\# t_{i(2)})\otimes_A(1_A\otimes\varphi_i)\otimes_A(1_A\#\overline{S}(t_{i(1)})(a\# h)\\ &=& \sum_i (1_A\# t_{i(3)})\otimes_A(1_A\otimes\varphi_i)\otimes_A(\overline{S}(t_{i(2)})\cdot a \# \overline{S}(t_{i(1)}) h)\\ &=& \sum_i (1_A\# t_{i(3)})\otimes_A(\overline{S}(t_{i(2)})\cdot a\otimes\varphi_i)\otimes_A(1_A \# \overline{S}(t_{i(1)}) h)\\ &=& \sum_i ((t_{i(3)}\overline{S}(t_{i(2)}))\cdot a\# t_{i(4)})\otimes_A(1_A\otimes\varphi)\otimes_A(1_A \# \overline{S}(t_{i(1)}) h)\\ &=& \sum_i ( a\# t_{i(2)})\otimes_A(1_A\otimes\varphi)\otimes_A(1_A \# \overline{S}(t_{i(1)}) h)\\ &=& \sum_i ( a\#h t_{i(2)})\otimes_A(1_A\otimes\varphi)\otimes_A(1_A \# \overline{S}(t_{i(1)}) )\\ &=& \sum_i ( a\#h)(1_A\# t_{i(2)})\otimes_A(1_A\otimes\varphi_i)\otimes_A(1_A\#\overline{S}(t_{i(1)}). \end{eqnarray*} Obviously $N$ is left $A$-linear. Right $A$-linearity can be proved as follows: \begin{eqnarray*} &&\hspace*{-2cm} N((1\# h\otimes \varphi)a)=N(h_{(1)}a\# h_{(2)}\otimes\varphi)\\ &=& \sum_j h_{(1)}\cdot a\langle\varphi_j,h_{(2)}\rangle\langle \varphi,t_j\rangle = N(1\# h\otimes \varphi)a. \end{eqnarray*} \equref{3.1.2} is satisfied since \begin{eqnarray*} &&\hspace*{-2cm} N(E^1\otimes_AU^1)E^2=1_A\overline{\nu}(e^1\otimes u^1)(1_A\# e^2)\\ &\equal{\equref{6.1.4}}& 1_A\#\overline{\nu}(e^1\otimes u^1) e^2=1_A\#1_H. \end{eqnarray*} Let us finally show that \equref{3.1.3} holds. For all $a\in A$ and $\varphi\in I$, we have \begin{eqnarray*} &&\hspace*{-2cm} E^1\otimes_AU^1N(E^2\otimes_A(a\otimes\varphi))\\ &=& \sum_i(1_A\#t_{i(2)})\otimes_A(1_A\otimes\varphi_i)N(a\#\overline{S}(t_{i(1)})\otimes\varphi)\\ &=&\sum_{i,j}(1_A\#t_{i(2)})\otimes_A(a\otimes\varphi_i)\langle\varphi_j,\overline{S}(t_{i(1)}) \langle\varphi,t_j\rangle\\ &\equal{\equref{6.1.4}}& (1_A\#1_H)\otimes_A (a\otimes\varphi) \end{eqnarray*} \end{proof}
\begin{proposition}\prlabel{6.3} Let $H$ be a finitely generated projective Hopf algebra, and $A$ a left $H$-module algebra. The trace map ${\rm tr}\,:\ A\otimes J\to B=A^H$ is given by the formula $${\rm tr}\,(a\otimes t)=t\cdot a.$$ \end{proposition}
\begin{proof} Observe that the map $g:\ (J\otimes A)\otimes_A(I\otimes A)$ in the Morita context associated to $I\otimes A$ is given by the formula $$g((t\otimes a)\otimes_A (\varphi\otimes b))=\langle \varphi,t\rangle ab.$$ Using the left handed version of \equref{3.18}, we compute, for $V=a\otimes t\in A\otimes J$ that \begin{eqnarray*} &&\hspace*{-15mm} {\rm tr}\,(a\otimes v)=\chi(E^1g(U^1\chi(E^2)\otimes_AV)) =\sum_i \chi((1_A\# t_i)g((1_A\otimes\varphi)\otimes (a\otimes t)))\\ &=&\sum_i \chi((1_A\# t_i)a\langle\varphi,t\rangle)=\chi((1_A\# t)a) =\chi(t_{(1)}\cdot a\# t_{(2)})=t\cdot a. \end{eqnarray*} \end{proof}
We can now apply Propositions \ref{pr:2.1}, \ref{pr:2.2} and \ref{pr:3.2}, and \thref{4.5}, and obtain the following result.
\begin{corollary}\colabel{6.4} Let $H$ be a finitely generated projective Hopf algebra, and $A$ a left $H$-module algebra. Assume that there exist $a_i\in A$ and $t_i\in \int_l^H$ such that $\sum_it_i\cdot a_i=1$.\\ Then the following assertions are equivalent; \begin{enumerate} \item $A$ is left FBN; \item $A\# H$ is left FBN and $A$ is left noetherian; \item $B$ is left FBN and $A$ is left noetherian. \end{enumerate} \end{corollary}
We recover \cite[Theorem 2.3 and Corollary 2.4]{Garcia}, \cite[Theorem 8]{Sorin} and \cite[Theorem 2.4]{Guedenon}. If $H$ is Frobenius (e.g. if $k$ is a field, or $H=kG$ is a finite group algebra), then the space of left integrals is free. We can then take a free generator $t$ of $\int_H^l$ and the condition of the trace map means that there exists $a\in A$ such that $t\cdot a=1$. We observe that - in the case where the space of integrals is not free - the sufficient condition in \coref{6.4} that there exist $a_i\in A$ and $t_i\in \int_l^H$ such that $\sum_it_i\cdot a_i=1$ is weaker than the one given in \cite[Theorem 8]{Sorin}, where a single $t\in \int_l^H$ and $a\in A$ with $t\cdot a=1$ are needed.\\ In \cite{Garcia} and \cite{Guedenon}, it is stated that \coref{6.4} holds under the weaker assumption (called (C1)) that $A$ is $A\# H$-{\sl quasi}-projective. There seems to be a hole in the proofs in \cite{Garcia} and \cite{Guedenon}: the proof of the implication $3)\Longrightarrow 1)$ uses the projectivity of $A$ as an $A\# H$-module (see \reref{4.6}).
\section{Application to corings}\selabel{7} Let $A$ be a ring. An $A$-coring is a coalgebra in the category of $A$-bimodules ${}_A\mathcal{M}_A$. This means that we have two $A$-bimodule maps $$\Delta_\mathcal{C}:\ \mathcal{C}\to \mathcal{C}\otimes_A\mathcal{C}~~{\rm and}~~\varepsilon_\mathcal{C}:\ \mathcal{C}\to A$$ satisfying some coassociativity and counit axioms. The maps $\Delta_\mathcal{C}$ and $\varepsilon_\mathcal{C}$ are called the comultiplication and counit, and we use the Sweedler notation $$\Delta_\mathcal{C}(c)=c_{(1)}\otimes_A c_{(2)},$$ where summation is understood implicitely. Corings were revived recently in \cite{Brzezinski02}, and we refer to \cite{BrzezinskiWisbauer} for a detailed discussion of all kinds of applications. The left dual $R={}^*\mathcal{C}={}_A{\rm Hom}(\mathcal{C},A)$ is an $A$-ring, with multiplication rule $$(f\# g)(c)=g(c_{(1)}f(c_{(2)})),$$ for all $c\in \mathcal{C}$ and $f,g\in {}^*\mathcal{C}$. The unit is $\varepsilon_\mathcal{C}$, and the ring morphism $i:\ A\to{}^* \mathcal{C}$ is given by $$i(a)(c)=\varepsilon_\mathcal{C}(c)a.$$ The $A$-bimodule structure on ${}^*\mathcal{C}$ is then given by the formula $$(afb)(c)=f(ca)b,$$ for all $a,b\in A$, $f\in \mathcal{C}^*$ and $c\in \mathcal{C}$.\\ $x\in \mathcal{C}$ is called grouplike if $\Delta_\mathcal{C}(x)=x\otimes_R x$ and $\varepsilon_\mathcal{C}(x)=1_R$. $(\mathcal{C},x)$ is then called an $R$-coring with a grouplike element. Now consider the map $\chi:\ {}^*\mathcal{C}\to A$, $\chi(f)=f(x)$. It can be shown easily (see \cite{CVW}) that $(\mathcal{C}^*,i,\chi)$ is an $A$-ring with a right grouplike character. We can also compute that
$$B=A^R=\{a\in A~|~f(xa)=af(x),~{\rm for~all~}f\in \mathcal{C}^*\}.$$ Using the grouplike element $x$, we can define a right $\mathcal{C}$-coaction on $A$, namely $$\rho:\ \mathcal{C}\to A\otimes_A\mathcal{C}\cong \mathcal{C},~~\rho(r)=1_A\otimes_A xa=xa.$$ We can consider the subring of coinvariants
$$A^{{\rm co}\mathcal{C}}=\{a\in A~|~ax=xa\}.$$ In general, $A^{{\rm co}\mathcal{C}}$ is a subring of $A^R$, and they are equal if $\mathcal{C}$ is finitely generated and projective as a right $A$-module.\\ An $A$-coring $\mathcal{C}$ is called Frobenius if there exist an $A$-bimodule map $\theta:\ \mathcal{C}\otimes_A\mathcal{C}\to A$ and $z\in C_A( \mathcal{C})$ (that is, $az=za$, for all $a\in A$) such that the following conditions hold, for all $c,d\in \mathcal{C}$: $$c_{(1)}\theta(c_{(2)}\otimes d)=\theta(c\otimes d_{(1)})d_{(2)},$$ $$\theta(z\otimes c)=\theta(c\otimes z)=1.$$ We refer to \cite[Theorem 35]{CMZ} for the explanation of this definition. If $\mathcal{C}$ is Frobenius, $\mathcal{C}$ is finitely generated and projective as a (left or right) $A$-module, and ${}^*\mathcal{C}/A$ is Frobenius (see \cite[Theorem 36]{CMZ}). Then we also have (see \cite[Sec. 3]{CVW}) that
$$Q=\{q\in {}^*\mathcal{C}~|~c_{(1)}q(c_{(2)})=q(c)x\}.$$ It follows from \cite[Theorem 2.7]{CVW} or \thref{3.1} that we have an isomorphism of $({}^*\mathcal{C},B)$-bimodules $\alpha:\ A\to Q$, given by $$\alpha(a)(c)=\theta(ca\otimes_A x),$$ for all $a\in A$ and $c\in \mathcal{C}$. The inverse $\alpha^{-1}$ is given by $\alpha^{-1}(q)=q(z)$, and the left ${}^*\mathcal{C}$-action on $A$ is $$f\cdot a=\theta(z_{(1)}f(z_{(2)})\otimes_A x).$$ This can be verified directly as follows: $$\alpha(\alpha^{-1}(a))=\theta(za\otimes_A x)=\theta(az\otimes x)=a\theta(z\otimes x)=a,$$ and \begin{eqnarray*} &&\hspace*{-2cm} \alpha(\alpha^{-1}(q))(c)=\theta(cq(z)\otimes_A x)=\theta(c\otimes_A q(z)x) = \theta(c\otimes_A z_{(1)}q(z_{(2)})\\ &=& \theta(c\otimes_A z_{(1)})q(z_{(2)}) =q(\theta(c\otimes_A z_{(1)})z_{(2)})\\ &=&q(c_{(1)}\theta(c_{(2)}\otimes z)) = q(c_{(1)}\varepsilon(c_{(2)}))=q(c). \end{eqnarray*} The trace map ${\rm tr}\,:\ A\to B$ is given by $${\rm tr}\,(a)=\theta(xa\otimes_A x).$$
\begin{corollary}\colabel{7.1} Let $(\mathcal{C},x)$ be a Frobenius $A$-coring with a fixed grouplike element, and Frobenius system $(\theta, z)$, and assume that there exists $a\in A$ such that ${\rm tr}\,(a)=1$. Then the following assertions are equivalent. \begin{enumerate} \item $A$ is right FBN; \item ${}^*\mathcal{C}$ is right FBN and $A$ is right noetherian; \item $B=A^{{\rm co}\mathcal{C}}$ is right FBN and $A$ is right noetherian. \end{enumerate} \end{corollary}
\begin{center} {\bf Acknowledgement} \end{center} We thank Angel del R\'{\i}o and Sorin D\v{a}sc\v{a}lescu for discussing with us the sufficiency of the quasi-projectivity assumption in the proof of $3)\Longrightarrow 1)$ in \thref{4.5}.
\end{document}
|
arXiv
|
{
"id": "0503686.tex",
"language_detection_score": 0.6649520397186279,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Absolute Concentration Robustness in Rank-One Kinetic Systems}
\begin{abstract} A kinetic system has an absolute concentration robustness (ACR) for a molecular species if its concentration remains the same in every positive steady state of the system. Just recently, a condition that sufficiently guarantees the existence of an ACR in a rank-one mass-action kinetic system was found. In this paper, it will be shown that this ACR criterion does not extend in general to power-law kinetic systems. Moreover, we also discussed in this paper a necessary condition for ACR in multistationary rank-one kinetic system which can be used in ACR analysis. Finally, a concept of equilibria variation for kinetic systems which are based on the number of the system's ACR species will be introduced here. \end{abstract}
\baselineskip=0.30in
\section{Introduction}
Robustness is the capacity of a system to maintain essential functions in the presence of internal or external stresses \cite{kitano}. It is an important characteristic that helps biological systems adapt to environmental changes and thrive. Several types of robustness have already been identified, but this paper focuses on the kind that requires the positive steady states of a system to possess certain qualities.
As defined in \cite{shifein}, a biological system has an absolute concentration robustness (ACR) for a molecular species if in every positive steady state, the system admits, the concentration of the said species is the same. The identification of the steady states of a system is not an easy task, making this property difficult to determine. Shinar and Feinberg provided a sufficient condition that guarantees a deficiency-one mass-action kinetic (MAK) system exhibits an ACR \cite{shifein}. The said condition requires the corresponding network to have two nonterminal complexes that differ only in a specific species. This structural condition can be easily observed in most small networks and thus offers some advantages.
In 2018, Fortun et al. showed that the result of Shinar and Feinberg can be readily extended to deficiency-one power-law kinetic systems with reactant-determined interactions (PL-RDK) \cite{fort4}. A PL-RDK system is a kinetic system that is more general than the MAK system which requires reactions with the same reactant complex to have identical kinetic order vectors. In a MAK system, the elements of a kinetic order vector are the stoichiometric coefficients in the corresponding reactant complex.
The deficiency-one requirement of the results mentioned above is significantly limiting when it comes to analyzing the capacity of a system to admit an ACR. Fortunately, Fortun and Mendoza \cite{fort3} and Lao et al. \cite{lao} came up with more general results which do not require a network to have a specific deficiency. Their results utilized the concept of network decomposition which is done by partitioning the reaction set of the network such that each partition generates a network (called subnetwork) smaller than the given network. The analysis then focuses on the low-deficiency subnetworks (with a deficiency of at most one) that contain a Shinar-Feinberg pair (SF-pair). An SF-pair is a pair of reactions with kinetic order vectors that only differ in a particular species. This result allows the ACR determination to be confined to the identified subnetworks that are relatively easier to handle.
Recently, Meshkat et al. \cite{mesh} provided a necessary and sufficient condition for the existence of stable ACR in a rank-one MAK system. A system has stable ACR if it has an ACR such that each of its positive steady states is stable. However, this ACR criterion does not extend in general to PL-RDK systems, in contrast to the sufficient conditions for low deficiency systems by Horn and Jackson in 1972 \cite{horn} (as generalized by Fortun and Mendoza in 2021 \cite{fort3}) and Shinar and Feinberg in 2010 \cite{shifein}. Counterexamples and examples of PL-RDK systems for the ACR criterion are discussed in this paper.
This paper also discussed a necessary condition for ACR in multistationary rank-one kinetic systems. The use of this condition in ACR analysis is illustrated by examples from classes of multistationary rank-one mass action systems introduced by Pantea and Voitiuk in 2022 \cite{voitiuk}. In addition, a concept of equilibria variation for kinetic systems based on the number of ACR species is introduced here. Its basic properties are derived and a general low bound is computed for deficiency zero PL-RDK systems. For multistationary rank-one systems, however, the necessary condition leads to a much sharper lower bound.
This is how this paper was organized. Fundamental concepts on chemical reaction networks, kinetic systems, and robustness are provided in Section 2. In Section 3, the problem of extending the result of Meshkat et al. in PLK sytem is presented. A necessary condition for ACR in rank-one multistationary kinetic system is given in Section 4. ACR and equilibria variation are discussed in Section 5. A summary and an outlook are provided in the last section.
\section{Fundamentals of chemical reaction networks and kinetic systems} \subsection{Structure of chemical reaction networks}
We review in this section some necessary concepts and results on chemical reaction network, the details of which can be found in \cite{fein2, arce, fort3}.
First, we introduce some notations used in this paper. The sets of real number, nonnegative real numbers, and positive real numbers are denoted, respectively, by $\mathbb R, \mathbb R_{\geq 0}$, and $\mathbb R_{> 0}$. Given that $\mathscr I$ is a finite index set, $\mathbb R^{\mathscr I}$ denotes the usual vector space of real valued functions with domain $\mathscr I$ where addition, subtraction, and scalar multiplication are defined the usual way. For $x\in \mathbb R^{\mathscr I}$ and $i\in \mathscr I$, $x_i$ denotes the $i^{th}$ coordinate of $x$. Lastly, if $x\in \mathbb R^{\mathscr I}_{> 0}$ and $y\in \mathbb R^{\mathscr I}$, then $x^y\in \mathbb R^{\mathscr I}_{> 0}$ is defined to be $x^y = \displaystyle \prod_{i\in \mathscr I}x^{y^i}_i$.
\begin{definition} A \textbf{chemical reaction network} is a triple $\mathscr N = (\mathscr S, \mathscr C, \mathscr R)$ of three non-empty finite sets: \begin{enumerate}
\item a set \textbf{species} $\mathscr S$;
\item a set $\mathscr C$ of \textbf{complexes}, which are nonnegative integer linear combinations of the species; and
\item a set $\mathscr R \subseteq \mathscr C \times \mathscr C$ of reactions such that
\begin{itemize}
\item $(y,y)\notin \mathscr R$ for all $y\in \mathscr C$, and
\item for each $y\in \mathscr C$, there exists a $y'\in \mathscr C$ such that $(y,y')\in \mathscr R$ or $(y',y)\in \mathscr R$.
\end{itemize} \end{enumerate} \end{definition}
\noindent The nonnegative coefficients of the species in a complex are referred to as \textbf{stoichiometric coefficients}. In this paper, a reaction $R_i=(y_i,y'_i)$ is also denoted by $R_i: y_i\rightarrow y'_i$ and $y_i$ and $y'_i$ are called the \textbf{reactant} and \textbf{product complexes} of $R_i$, respectively. Further, we use the symbols $\sigma, \kappa$, and $\rho$ to denote the numbers of species, complexes, and reactions, respectively. The following example shows that a CRN can be represented by a digraph where the complexes and reactions serve as the digraph's vertices and arcs, respectively.
\subsection*{Running example 1}
Chemical reaction networks (CRNs) can be represented as a directed graph. The vertices or nodes are the complexes and the reactions are the edges. The CRN is not unique and might not have a physical interpretation. Let us consider the following CRN: \\
\begin{center} \begin{tikzpicture}[baseline=(current bounding box.center)] \tikzset{vertex/.style = {draw=none,fill=none}} \tikzset{edge/.style = {bend left,->,> = latex', line width=0.20mm}}
\node[vertex] (1) at (-4,1.5) {$X_1 + X_2 + X_3$}; \node[vertex] (2) at (0,1.5) {$X_3$}; \node[vertex] (3) at (-4,0) {$2X_1$}; \node[vertex] (4) at (0,0) {$3X_1 + X_2$}; \node[vertex] (5) at (4,0) {$4X_1 + 2X_2$}; \node[vertex] (6) at (0,-1.5) {$4X_1+X_2$}; \node[vertex] (7) at (4,-1.5) {$3X_1$}; \draw [edge] (1) to["$k_1$"] (2); \draw [edge] (4) to["$k_2$"] (3); \draw [edge] (4) to["$k_3$"] (5); \draw [edge] (7) to["$k_4$"] (6); \end{tikzpicture} \end{center}
The $k_i$'s are called the reaction rate constants. We have $m=3$ (species), $n=7$ (complexes), $n_r=3$ (reactant complexes) and $r=4$ (reactions). Here, we can write $$\mathscr{S}=\left\{ X_1, X_2, X_3\right\}, \quad \mathscr{C}=\left\{X_1 + X_2 + X_3, X_3, 2X_1, 3X_1 + X_2, 4X_1 + 2X_2, 4X_1+X_2, 3X_1 \right\}.$$
On the other hand, the set of reaction $\mathscr{R}$ consists of the following: $$\begin{array}{l} R_{1}: X_1 + X_2 + X_3 \rightarrow X_3 \\ R_{2}: 3X_1+X_2 \rightarrow 2X_1\\ \end{array} \quad \begin{array}{l} R_{3}: 3X_1+X_2 \rightarrow 4X_1 + 2X_2 \\ R_{4}: 3X_1 \rightarrow 4X_1+X_2\\ \end{array}$$
We denote the CRN $\mathscr{N}$ as $\mathscr{N}= (\mathscr{S}, \mathscr{C}, \mathscr{R})$. The \textbf{linkage classes} of a CRN are the subgraphs of a reaction graph where for any complexes $C_i$, $C_j$ of the subgraph, there is a path between them. Thus, the number of linkage classes, denoted as $l$, of Running Example 1 is three ($l=3$). The linkage classes are: $$\mathscr{L}_1=\left\{ R_1 \right\}, \quad \mathscr{L}_2=\left\{R_2,R_3 \right\} \quad \mathscr{L}_3=\left\{R_4 \right\}.$$ A subset of a linkage class where any two vertices are connected by a directed path in each direction is said to be a \textbf{strong linkage class}. Considering Running Example 1, there are no strong linkage classes whose number is denoted by $sl$. We also identify the \textbf{terminal strong linkage classes}, the number denoted as $t$, to be the strong linkage classes where there is no reaction from a complex in the strong linkage class to a complex outside the same strong linkage class. The terminal strong linkage classes can be of two kinds: cycles (not necessarily simple) and singletons (which we call ``terminal points'').
We now define important CRN classes. A CRN is \textbf{weakly reversible} if every linkage class is a strong linkage class. A CRN is \textbf{t-minimal} if $t = l$, i.e. each linkage class has only one terminal strong linkage class. Let $n_r$ be the number of reactant complexes of a CRN. Then $n - n_r$ is the number of terminal points. A CRN is called \textbf{cycle-terminal} if and only if $n - n_r = 0$, i.e., each complex is a reactant complex. Clearly, the CRN of the Running Example 1 is both not t-minimal and weakly reversible.
With each reaction $y\rightarrow y'$, we associate a \textbf{reaction vector} obtained by subtracting the reactant complex $y$ from the product complex $y'$. The \textbf{stoichiometric subspace} $S$ of a CRN is the linear subspace of $\mathbb{R}^\mathscr{S}$ defined by $$S := \text{span } \left\lbrace y' - y \in \mathbb{R}^\mathscr{S} \mid y\rightarrow y' \in \mathscr{R}\right\rbrace.$$
The \textbf{map of complexes} $\displaystyle{Y: \mathbb{R}^\mathscr{C} \rightarrow \mathbb{R}^\mathscr{S}_{\geq}}$ maps the basis vector $\omega_y$ to the complex $ y \in \mathscr{C}$. The \textbf{incidence map} $\displaystyle{I_a : \mathbb{R}^\mathscr{R} \rightarrow \mathbb{R}^\mathscr{C}}$ is defined by mapping for each reaction $\displaystyle{R_i: y \rightarrow y' \in \mathscr{R}}$, the basis vector $\omega_{R_i}$ (or simply $\omega_i$) to the vector $\omega_{y'}-\omega_{y} \in \mathscr{C}$. The \textbf{stoichiometric map} $\displaystyle{N: \mathbb{R}^\mathscr{R} \rightarrow \mathbb{R}^\mathscr{S}}$ is defined as $N = Y \circ I_a$.
In Running Example 1, the matrices $Y$ and $I_a$ are
$$Y=\begin{blockarray}{cccccccc} C_1 & C_2 & C_3 & C_4 & C_5 & C_6 & C_7 \\ \begin{block}{[ccccccc]c} 1 & 0 & 2 & 3 & 4 & 4 & 3 & X_1 \\ 1 & 0 & 0 & 1 & 2 & 1 & 0 & X_2 \\ 1 & 1 & 0 & 0 & 0 & 0 & 0 & X_3 \\ \end{block} \end{blockarray}$$
$$I_a=\begin{blockarray}{ccccc} R_1 & R_2 & R_3 & R_4 \\ \begin{block}{[cccc]c} -1 & 0 & 0 & 0 & C_1 \\ 1 & 0 & 0 & 0 & C_2 \\ 0 & 1 & 0 & 0 & C_3 \\ 0 & -1 & -1 & 0 & C_4 \\ 0 & 0 & 1 & 0 & C_5 \\ 0 & 0 & 0 & 1 & C_6 \\ 0 & 0 & 0 & -1 & C_7 \\ \end{block} \end{blockarray}.$$
The \textbf{deficiency} $\delta$ is defined as $\delta = n - l - \dim S$. This non-negative integer is, as Shinar and Feinberg pointed out in \cite{shifein2}, essentially a measure of the linear dependency of the network's reactions. In Running Example 1, the deficiency of the network is 3.
\subsection{Dynamics of chemical reaction networks}
A \textbf{kinetics} is an assignment of a rate function to each reaction in a CRN. A network $\mathscr N$ together with a kinetics $K$ is called a \textbf{chemical kinetic system} (CKS) and is denoted here by $(\mathscr N,K)$. \textbf{Power law kinetics} (PLK) is identified by the \textbf{kinetic order matrix} which is an $\rho\times \sigma$ matrix $F=[F_{ij}]$, and vector $k\in \mathbb R^{\mathscr R}_{>0}$, called the \textbf{rate vector}.
\begin{definition} A kinetics $K: \mathbb R^{\mathscr S}_{>0} \rightarrow \mathbb R^{\mathscr R}$ is a \textbf{power law kinetics} if
\begin{center}
$K_i(x)=k_ix^{F_{i,\cdot}}$ for $i=1,\dots, r$ \end{center}
\noindent where $k_i\in \mathbb R_{>0}$, $F_{i,j} \in \mathbb R$, and $F_{i,\cdot}$ is the row of $F$ associated to reaction $R_i$. \end{definition}
We can classify a PLK system based on the kinetic orders assigned to its \textbf{branching reactions} (i.e., reactions sharing a common reactant complex).
\begin{definition} A PLK system has \textbf{reactant-determined kinetics} (of type PL-RDK) if for any two branching reactions $R_i, R_j\in \mathscr R$, the corresponding rows of kinetic orders in $F$ are identical, i.e., $F_{ih}=F_{jh}$ for $h=1, \dots,m$. Otherwise, a PLK system has \textbf{non-reactant-determined kinetics} (of type PL-NDK). \end{definition}
Consider the CRN in Running example 1 with the following kinetic order matrix.
\begin{equation} \nonumber F=\begin{blockarray}{cccc} X_1 & X_2 & X_3 \\ \begin{block}{[ccc]c} 0 & 0 & 2 & R_1 \\ 1 & 1 & 0 & R_2 \\ 1 & 1 & 0 & R_3 \\ 1 & 0 & 0 & R_4 \\ \end{block} \end{blockarray}. \end{equation}
\noindent Observe that $R_2$ and $R_3$ are two branching reactions whose corresponding rows in $F$ (or \textbf{kinetic order vectors}) are the same. Hence, the CRN is a PL-RDK system.
The well-known \textbf{mass action kinetic system} (MAK) forms a subset of PL-RDK systems. In particular, MAK is given by $K_i(x)=k_ix^{Y_{.,j}}$ for all reactions $R_i: y_i \rightarrow y'_i \in \mathscr R$ with $k_i\in \mathbb R_{>0}$ (called \textbf{rate constant}). The vector $Y_{.,j}$ contains the stoichiometric coefficients of a reactant complex $y_i\in \mathscr C$.
\begin{definition} The \textbf{species formation rate function} of a chemical kinetic system is the vector field
\begin{center}
$f(c)=NK(c)=\displaystyle \sum_{y_i\rightarrow y'_i\in \mathscr R}K_i(c)(y'_i-y_i)$, where $c\in \mathbb R^{\mathscr S}_{\geq 0}$, \end{center} where $N$ is the $m \times r$ matrix, called \textbf{stoichiometric matrix}, whose columns are the reaction vectors of the system. \noindent The equation $dc/dt=f(c(t))$ is the \textbf{ODE or dynamical system} of the chemical kinetic system. An element $c^*$ of $\mathbb R^{\mathscr S}_{>0}$ such that $f(c^*)=0$ is called a \textbf{positive equilibrium} or \textbf{steady state} of the system. We use $E_+(\mathscr N,K)$ to denote the set of all positive equilibria of a CKS. \end{definition}
Deficiency is one of the important parameters in CRNT to establish claims regarding the existence, multiplicity, finiteness and parametrization of the set of positive steady states.
The dynamical system $f(x)$ (or species formation rate function (SFRF)) of the Running Example 1 can be written as $$\left[ \begin{array}{c}
\dot{X_1} \\
\dot{X_2} \\
\dot{X_3} \\ \end{array} \right]= \begin{blockarray}{cccc} R_1 & R_2 & R_3 & R_4 \\ \begin{block}{[cccc]} -1 & -1 & 1 & 1 \\ -1 & -1 & 1 & 1 \\ 0 & 0 & 0 & 0 \\ \end{block} \end{blockarray} \left[ \begin{array}{c}
k_1 X_3^2 \\
k_2 X_1 X_2 \\
k_3 X_1 X_2 \\
k_4 X_1 \\ \end{array}
\right] =NK(x).$$
Analogous to the species formation rate function, we also have the complex formation rate function.
\begin{definition} The \textbf{complex formation rate function} $g: \mathbb R^{\mathscr S}_{>0}\rightarrow \mathbb R^{\mathscr C}$ of a chemical kinetic system is the vector field
\begin{center}
$g(c)=I_aK(c)=\displaystyle \sum_{y_i\rightarrow y'_i\in \mathscr R}K_i(c)(\omega_{y'_i}-\omega_{y_i})$, where $c\in \mathbb R^{\mathscr S}_{\geq 0}$. \end{center}
\noindent where $I_a$ is the incidence map. A CKS is \textbf{complex balanced} if it has complex balanced steady state, i.e., there is a composition $c^{**}\in \mathbb R_{>0}^{\mathscr S}$ such that $g(c^{**} )=0$. We denote by $Z_+(\mathscr N,K)$ the set of all complex balanced steady states of the system. \end{definition}
\begin{theorem}[Corollary 4.8, \cite{fein3}] \label{1.5} If a CKS has deficiency 0, then its steady states are all complex balanced. \end{theorem}
\subsection{A brief review of concentration robustness} The concept of \textbf{absolute concentration robustness (ACR)} was first introduced by Shinar and Feinberg in their well-cited paper published in \textit{Science} \cite{shifein}. ACR pertains to a phenomenon in which a species in a kinetic system carries the same value for any positive steady state the network may admit regardless of initial conditions. In particular, a PL-RDK system $(\mathscr{N},K)$ has ACR in a species $X \in \mathscr{S}$ if there exists $c^*\in E_+(\mathscr{N},K)$ and for every other $c^{**} \in E_+(\mathscr{N},K)$, we have $c^{**}_X =c^*_{X}$ where $c^{**}_X$ and $c^*_{X}$ denote the concentrations of $X$ in $c^*$ and $c^{**}$, respectively.
Fortun and Mendoza \cite{fort3} emphasized that ACR as a dynamical property is conserved under dynamic equivalence, ie., they generate the same set of ordinary differential equations. They further investigated ACR in power law kinetic systems and derived novel results that guarantee ACR for some classes of PLK systems. For these PLK systems, the key property for ACR in a species $X$ is the presence of an SF-reaction pair. A pair of reactions in a PLK system is called a \textbf{Shinar-Feinberg pair} (or \textbf{SF-pair)} in a species $X$ if their kinetic order vectors differ only in $X$. A subnetwork of the PLK system is of \textbf{SF-type} if it contains an SF-pair in $X$.
\section{The extension problem for the ACR criterion for rank-one systems}
Just recently, Meshkat et al. gave a necessary and sufficient condition that guarantees the existence of stable ACR in rank-one MAK systems \cite{mesh}. A system is said to have a stable ACR if it has an ACR where each of its positive steady states is stable. The condition requires the existence of an embedded one-species network that follows some structures described using arrow diagrams.
\subsection{A review of the key results of Meshkat et al.}
A network $\mathscr N=\left(\mathscr S, \mathscr C, \mathscr R \right)$ is called one-species network if there is a species $A_i$ such that $(y,y')\in \mathscr R$ implies that $y_j= y_j'=0$ for all species $A_j\in S-\{A_i\}$. In other words, every complex in the network takes the form $kA_i$, where $k$ is a nonnegative integer. The following definition and example were taken from \cite{mesh}.
\begin{definition}
Let $\mathscr N=\left(\{A\}, \mathscr C, \mathscr R \right)$ be a one-species network with $|\mathscr R|\geq 1$. Let every reaction of $\mathscr N$ be of the form $aA\rightarrow bA$, where $a,b\geq 0$ and $a\neq b$. Suppose $\mathscr N$ has $m$ distinct reactant complexes with $a_1<a_2<\dots<a_m$ as their stoichiometric species. The \textbf{arrow diagram} of $\mathscr N$, denoted by $\rho=(\rho_1,\rho_2,\dots,\rho_m)$, is the element of $\{\rightarrow,\leftarrow, \longleftrightarrow\}^m$ given by: \begin{equation}\nonumber
\rho_i = \left\{ \begin{array}{clrc@{\qquad}l} \rightarrow & \textnormal{if for all reactions\;} a_iA \rightarrow bA \textnormal{\;in\;} G, \textnormal{\;it is the case that\;} b > a_i \\ \leftarrow & \textnormal{if for all reactions\;} a_iA \rightarrow bA \textnormal{\;in\;} G, \textnormal{\;it is the case that\;} b < a_i\\ \longleftrightarrow & \textnormal{otherwise} \end{array} \right. \end{equation} \end{definition}
\begin{example} Consider the network determined by $\{B\rightarrow A, 2A+B \rightarrow A+2B\}$. After removing species $A$, the embedded network has the following reaction set $\{0\leftarrow B \rightarrow 2B\}$ which has $(\longleftrightarrow)$ as arrow diagram. On the other hand, $\{0\rightarrow A, A\rightarrow 2A\}$ is the embedded network obtained after removing $B$ with arrow diagram $(\rightarrow,\leftarrow)$. \end{example}
Here is the main result in \cite{mesh} that provides a necessary and sufficient condition that ensure the existence of ACR in some rank-one MAK systems. Their results are based on the idea of arrow diagrams.
\begin{theorem}\label{meshkatthm}
Let $\mathscr N$ be a rank-one network with species $A_1, A_2, \dots, A_m$. Then, the following are equivalent:
\begin{enumerate}
\item $\mathscr N$ has stable ACR and admits a positive state.
\item There is a species $A_{i^*}$ such that the following holds:
\begin{enumerate}
\item for the embedded network of $\mathscr N$ obtained by removing all species except $A_{i^*}$, the arrow diagram has one of the following forms:
\begin{equation}
\begin{array}{rl} (\longleftrightarrow,\leftarrow,\leftarrow,\dots,\leftarrow), & (\rightarrow, \rightarrow, \dots, \rightarrow,\longleftrightarrow) \\
(\rightarrow, \rightarrow, \dots, \rightarrow,\longleftrightarrow, \leftarrow,\leftarrow,\dots,\leftarrow), & (\rightarrow, \rightarrow, \dots, \rightarrow, \leftarrow,\leftarrow,\dots,\leftarrow)
\end{array}
\end{equation}
\item the reactant complexes of $\mathscr N$ differ only in species $A_{i^*}$ (i.e., if $y$ and $\hat{y}$ are both reactant complexes, then $y_i=\hat{y_i}$ for all $i\neq i^*$).
\end{enumerate}
\end{enumerate} \end{theorem}
Notice that in the above example, the embedded network obtained by removing species $A$ from $\mathscr N$ has $(\longleftrightarrow)$ as arrow diagram that falls under the enumerated arrow diagrams in the above theorem. Moreover, the reactant complexes in $\mathscr N$ differ only in $A$. It follows that $\mathscr N$ has a stable ACR.
\subsection{Examples of PL-RDK systems for which the ACR critera do not hold}
Here is the direct adaptation of the sufficient condition of Theorem \ref{meshkatthm} in PLK-systems and formalism. Let $(\mathscr N, K)$ be a rank-one PLK-system having species $A_1, A_2, \dots, A_m$. Also, let $F$ be the kinetic order matrix of the system. Then, $\mathscr N$ has an ACR if there is species $A_{j^*}$ such that the following holds
\begin{enumerate}
\item for the embedded network of $\mathscr N$ obtained by removing all species except $A_{j^*}$, the arrow diagram has one of the following forms:
\begin{equation}
\begin{array}{rl} (\longleftrightarrow,\leftarrow,\leftarrow,\dots,\leftarrow), & (\rightarrow, \rightarrow, \dots, \rightarrow,\longleftrightarrow) \\
(\rightarrow, \rightarrow, \dots, \rightarrow,\longleftrightarrow, \leftarrow,\leftarrow,\dots,\leftarrow), & (\rightarrow, \rightarrow, \dots, \rightarrow, \leftarrow,\leftarrow,\dots,\leftarrow)
\end{array}
\end{equation}
\item the kinetic order vectors are pairwise SF-pairs in $A_{j^*}$ (i.e., $F_{ij}=F_{lj}$ for all $j\neq j^*$).
\end{enumerate}
The following counterexample shows that the statement above is not necessarily true.
\begin{example} Consider $\mathscr N=(\{A,B\},\mathscr C, \mathscr R)$, where $\mathscr R$ consists of the following reactions and rate constants: \begin{equation}\nonumber
\begin{array}{cl}
R_1:& B {\xrightarrow[r_1]{}} A \\
R_2:& 2A+B {\xrightarrow[r_2]{}} 3A\\
R_3:& 3A+B {\xrightarrow[r_3]{}} 2A+2B\\
R_4:& 4A+B {\xrightarrow[r_4]{}} 3A+2B\\
\end{array} \end{equation} This is a rank-one network with $S=\textnormal{span\;}\{A-B\}$ as its stoichiometric subspace. Suppose that it is endowed with power law kinetics and has the following kinetic order matrix:
\begin{equation}\nonumber F=\begin{blockarray}{ccc} A & B \\ \begin{block}{[cc]c} p_1 & q & R_1 \\ p_2 & q & R_2 \\ p_3 & q & R_3 \\ p_4 & q & R_4\\ \end{block} \end{blockarray} \end{equation} where $p_i$'s are integers. We obtained the following embedded network after removing $B$.
\begin{equation}\nonumber \begin{array}{c}
0\rightarrow A\\
2A\leftrightarrow 3A\leftarrow 4A \end{array} \end{equation} With the four distinct reactant complexes in this network, here is the corresponding arrow diagram $(\rightarrow, \rightarrow, \leftarrow, \leftarrow)$. Now, observe that $(\mathscr N, K)$ has the following ODEs: \begin{equation}\nonumber \begin{array}{cc}
\dot{A}= & {r_1}A^{p_1}B^q + {r_2}A^{p_2}B^q - {r_3}A^{p_3}B^q - {r_4}A^{p_4}B^q \\
\dot{B}= & -{r_1}A^{p_1}B^q - {r_2}A^{p_2}B^q + {r_3}A^{p_3}B^q + {r_4}A^{p_4}B^q \end{array} \end{equation} Solving for the positive equilibria, one of these equations is equated to zero and get \begin{equation}\nonumber \begin{array}{cc}
B^q({r_1}A^{p_1} + {r_2}A^{p_2} - {r_3}A^{p_3} - {r_4}A^{p_4} & = 0\\
\Leftrightarrow {r_1}A^{p_1} + {r_2}A^{p_2} - {r_3}A^{p_3} - {r_4}A^{p_4} & = 0 \end{array} \end{equation} Observe that when $p_1<p_2<p_3<p_4$, the polynomial \begin{equation}\label{poly}
{r_1}A^{p_1} + {r_2}A^{p_2} - {r_3}A^{p_3} - {r_4}A^{p_4} \end{equation} has exactly one positive root by the Descarte's Rule of Signs. This means that the system has ACR in $A$. On the other hand, suppose that \begin{center}
$ \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ p_4 \end{bmatrix} =
\begin{bmatrix} 0 \\ 3 \\ 1 \\ 2 \end{bmatrix} \;$ and
$ \begin{bmatrix} r_1 \\ r_2 \\ r_3 \\ r_4 \end{bmatrix} =
\begin{bmatrix} 4 \\ 1 \\ 2 \\ 3 \end{bmatrix} \;$ \end{center} Then, the polynomial in (\ref{poly}) becomes \begin{equation}
4+A^3-2A-3A^2=4-2A-3A^2+A^3 \end{equation} This polynomial has two positive roots namely, $1$ and $1 + \sqrt{5}$. This means that the system does not have an ACR in $A$ in this case. \end{example}
It can be observed in the above counterexample the crucial role played by $p_i$'s. If these parameters are not carefully chosen, ACR may not be observed in the system. The problem becomes more complicated when $p_i$'s are non integers since the solvability of the polynomial in (\ref{poly}) may not be determined immediately. This negatively affects the possibility of extending the result of Meshkat et al. to PLK-systems where $p_i$'s are allowed to be real numbers.
\subsection{The stable ACR criterion for homogenous PL-quotients of mass action systems}
In this section, we introduce the set of homogeneous monomial quotients of mass action systems (PL-MMK) where the stable ACR criterion holds. To define PL-MMK, we recall some definition and result from \cite{ACB2022}.
\begin{definition} \label{PFF} Two kinetics $K, K'$ in $\mathscr{K}_\Omega (\mathscr{N})$ are \textbf{positive function factor equivalent} (PPF-equivalent) if for all $x\in \mathbb{R}^{\mathscr{S}}_{>}$ and every reaction $q$, $\dfrac{K_q(x)}{K'_q(x)}$ is a positive function of $x$ only, i.e. independent of $q.$ \end{definition}
A key property of PFF-equivalence is expressed in the following proposition:
\begin{proposition}[\cite{ACB2022}] If $K$, $K'$ are PPF, then $E_+(\mathscr{N}, K) = E_+(\mathscr{N}, K')$. \label{PFFresult} \end{proposition}
Let $(\mathscr{N}, K)$ be a MAK system with $m$ species and $r$ reactions and $(\beta_1,\cdots,\beta_m)$ any real vector. Let $F_{MAK}$ and $F_\beta$ be the $r \times m$ matrices defined as follows: \begin{itemize}
\item $F_{MAK,q} = Y^\top_{\rho(q)}$, where $q$ is a reaction and $Y$ is the matrix of complexes of $(\mathscr{N}, K)$.
\item $F_\beta$ with identical rows $\beta = (\beta_1,\cdots,\beta_m)$. \end{itemize}
\begin{definition} A \textbf{homogeneous PL quotient} of a MAK system $(\mathscr{N}, K)$ is a PLK system $(\mathscr{N}, K_{PLK})$ with the same rate constants and a kinetic order matrix $F_{PLK} = F_{MAK} - F_\beta$. \end{definition}
We have the following result:
\begin{proposition} The stable ACR criterion holds for homogeneous PL-quotients of rank-one mass action systems. \end{proposition}
\begin{proof} Let $(\mathscr{N}, K_{PLK})$ be a homogeneous PL quotient of a rank-one MAK system $(\mathscr{N}, K)$ where $(\mathscr{N}, K)$ satisfies the stable ACR criterion. For each species, write $X_i^{\alpha_i} = X_i^{\beta_i}(X_i^{\alpha_i - \beta_i})$. For a reaction $q$, each $K_q(x) = k_q \prod X_i^{\beta_i} \prod (X_i^{\alpha_i - \beta_i})$. Note that $\prod X_i^{\beta_i}$ is independent of the reaction $q$ and hence, from Definition \ref{PFF}, $K_{PLK}$ and $K$ are PFF-equivalent. From Proposition \ref{PFFresult}, $E_+(\mathscr{N}, K) = E_+(\mathscr{N}, K_{PLK})$. Thus, their ACR coincide and, hence the stable ACR criterion also holds for $(\mathscr{N}, K_{PLK})$. \end{proof}
\section{A necessary condition for ACR in multistationary rank-one kinetic systems}
In this section, we present a necessary condition for the occurrence of ACR species in any multistationary rank-one kinetic system. We introduce the class of co-conservative kinetic systems and show the multistationary rank-one systems of this kind do not have any ACR species. We further illustrate the condition for a number of multistationary rank-one mass action systems, using a recently presented classification of such systems by Pantea and Voitiuk \cite{voitiuk}.
\subsection{The necessary condition for ACR in rank-one stationary kinetic systems}
The necessary condition is the following fundamental observation:
\begin{theorem} \label{4.1.1}
If $\mathscr N$ has rank one and $\left (\mathscr N, K\right )$ is multistationary, then (the line) $S$ lies in the species hyperplane of every ACR species $X$. In other words, for any basis vector $v$ of $S$, its $X$-coordinate is $0$. \end{theorem}
\begin{proof} Since $\left (\mathscr N, K\right )$ is multistationary, there is a stoichiometric class that contains two distinct positive equilibria $x_1$ and $x_2$. In other words, $x_1 - x_2 \in S$ and $x_1 - x_2 \neq 0$. For any ACR species $X$ of $\left (\mathscr N, K\right )$, it follows that the $X$-th coordinate of $x_1 - x_2 $ is $0$. Since $x_1 - x_2 \neq 0$, it is a basis vector for the one-dimensional subspace $S$ of the rank-one system, proving the claim. \end{proof}
The necessary condition immediately leads to an upper bound for the number of ACR species in a multistationary rank-one system:
\begin{corollary} \label{cor1}
Let $m_{\textnormal{ACR}}$ be the number of $\textnormal{ACR}$ species of a rank-one multistationary system $\left (\mathscr N, K\right )$. Then $m_{\textnormal{ACR}} \leq m - |\textnormal{supp\;}v|$, where $\textnormal{supp\;}v$ is the support of any basis vector of $S$. \end{corollary}
Note that the right-hand side of the inequality in Corollary \ref{cor1} is just the number of zeros in $v$. Now, recall that a network is called conservative if the orthogonal complement of $S$ contains a positive vector. We hence define the following term.
\begin{definition} A network is called \textbf{co-conservative} if $S$ contains a positive vector. \end{definition}
In general, a positive vector in $S$ can be a linear combination of reaction vectors with $0$ or negative coordinates. For example, for $m=3$, the reactions $X_1 \rightarrow X_1 + X_2$ and $2X_1 \rightarrow 3X_1 + 2X_3$ have the reaction vectors $(0,1,-1)$ and $(1, 0, 2)$ whose sum is $(1,1,1)$. In rank-one networks however, a positive vector requires the occurrence of a positive reaction vector $y'- y$. This in turn implies a reaction $y \rightarrow y'$ with the following characteristics: \begin{itemize}
\item all species occur in the product complex;
\item each species has a higher stoichiometric coefficient in the product complex compared to the corresponding species in the reactant complex; and
\item no enzymatic regulation on a reaction. \end{itemize}
\begin{corollary}
If a rank-one and co-conservative network is multistationary, then it has no $\textnormal{ACR}$ species. \end{corollary}
\begin{proof}
Any non-zero vector $v$ in $S$ has only positive or negative coordinates, i.e., $|\textnormal{supp\;}v|=m$. Hence, $m_{\textnormal{ACR}}=0$. \end{proof}
\subsection{Examples from the multistationary rank-one mass action systems}
Pantea and Voitiuk introduced a complete classification of multistationary rank-one mass action systems in \cite{voitiuk}. The following table provides an overview of the eight classes that they identified.
\begin{table}[H]
\centering
\begin{tabularx}{\linewidth}{|X|X|}
\hline
\textbf{Network} & \textbf{Definition} \\ \hline
Class 1-alt$^c$: 1-alt complete networks & has a 1D projection containing both $(\leftarrow,\rightarrow)$ and $(\rightarrow,\leftarrow)$ patterns \\
\hline
Class 2-alt: 2-alternating & has a 1D projection containing both $(\leftarrow,\rightarrow,\leftarrow)$ and $(\rightarrow,\leftarrow,\rightarrow)$ patterns \\
\hline
Class $Z$: zigzag network & has a 2D projection containing a zigzag \\
\hline
Class $S_1$: one-source networks & contains exactly one source complex and two reactions of opposite directions \\
\hline
Class $S^z_2$ : two-source zigzag networks & has two species, contains exactly two source complexes and has a zigzag of slope -1 \\ \hline
Class $L$: line networks & has two species and at least three source complexes satisfying some properties* \\ \hline
Class $S^{nz}_2$ : two-source non-zigzag networks & an essential network that contains exactly two source complexes and $N \in$ 1-alt$^c - Z$. \\ \hline
Class $C$: corner networks & an essential, 1-alt complete network that contains at least three source complexes satisfying some properties* \\
\hline
\end{tabularx}
\caption{Classes of rank-one networks (for details concerning the concepts and symbols used here, the readers are referred to \cite{voitiuk}).} \label{rankonenet} \end{table}
Under mass-action, our running example is a rank-one zigzag network but not a line, corner and two-source zigzag network. We illustrate Theorem \ref{4.1.1} with two examples from different classes of multistationary rank-one systems identified in \cite{voitiuk}.
\begin{example}[Class 1-alt$^c$] The rank-one network below was the focus of Example 4.2 in \cite{voitiuk}. It was shown in that paper that it has the capacity for multiple positive and non-degenerate equilibria. Moreover, the network has stoichiometric subspace generated only by the vector $v=(1,1,1,0,-1,-1)$. \begin{equation}\nonumber
\begin{array}{cl}
R_1:& B+2C+2E \rightarrow A+2B+3C+E \\
R_2:& 2A+2B+C+2D+E \rightarrow A+B+2D+2E\\
R_3:& A+3C+D+2E \rightarrow 2A+B+4C+D+E\\
R_4:& 3A+3B+C+E \rightarrow 2A+2B+2E\\
\end{array} \end{equation} \end{example}
\begin{example}[Class 2-alt] The rank-one network below is a 2-alternating network with stoichiometric subspace generated by the vector $v=(2,1,0)$. From Corollary 4.3 in \cite{voitiuk}, we conclude that it has the capacity for multiple positive and non-degenerate equilibria. \begin{equation}\nonumber
\begin{array}{cl}
R_1:& 2X + 2Y \rightarrow Y \\
R_2:& 3X + Y \rightarrow 5X + 2Y\\
R_3:& 4X + 2Y + Z \rightarrow Z \\
R_4:& 4X + 2Y + 2Z \rightarrow 2X + Y + 2Z\\
\end{array} \end{equation} \end{example}
Direct computations show that the preceding systems both have ACR in species $D$ and $Z$, respectively. That is, their basis vectors of $S$ have $0$ coordinate in species $D$ and $Z$, respectively. This illustrates Theorem \ref{4.1.1}.
\section{ACR and equilibria variation in kinetic systems}
In this section, we briefly discuss the relationship between concentration robustness and equilibria variability in general by introducing the concept of (positive) equilibria variation. We consider several examples to illustrate its use. We assume throughout the section that the kinetic system $(\mathscr N, K)$ is positively equilibrated, i.e., $E_+(\mathscr N, K) \neq \varnothing$.
\subsection{The equilibria variation of a kinetic system}
The motivation for the following definition comes from the observation that a kinetic system has a unique (positive) equilibrium in species space if and only if it possesses absolute concentration robustness in each of its species.
\begin{definition} The \textbf{(positive) equilibria variation} of a kinetic system $(\mathscr N, K)$ is the number of non-ACR species divided by the number of species, i.e., \begin{equation*}
v_+(\mathscr N, K) = \dfrac{m-m_{ACR}}{m}. \end{equation*} \end{definition}
Clearly, the variation values lie between $0$ and $1$, and the following proposition characterizes the attainment of the extreme values:
\begin{proposition} Let $E_+(\mathscr N, K)$ be a kinetic system. Then,
\begin{enumerate}[i.]
\item $v_+(\mathscr N, K) = 0 \Leftrightarrow |E_+(\mathscr N, K)| = 1$ and
\item $v_+(\mathscr N, K) = 1 \Leftrightarrow (\mathscr N, K)$ has no ACR species \end{enumerate} \end{proposition}
The proofs follow directly from the definition. We have the following corollary for any multistationary system:
\begin{corollary} Let $(\mathscr N, K)$ be a kinetic system.
\begin{enumerate} [a.]
\item If $(\mathscr N, K)$ is multistationary, then $v_+(\mathscr N, K) \geq \dfrac{1}{m}$. In particular, if $v_+(\mathscr N, K) = 0$, then $(\mathscr N, K)$ is monostationary.
\item If $\mathscr N$ is open and $v_+(\mathscr N, K) > 0$, then $(\mathscr N, K)$ is multistationary. \end{enumerate} \end{corollary}
\begin{proof}
For $(a)$: Multistationarity implies at least two distinct positive equilibria, hence $m_{ACR} \leq m - 1$, and the claims follow. For $(b)$: if the network is open, then there is only one stoichiometric class. Hence, multistationarity is equivalent to the occurrence of at least two distinct equilibria in species space. \end{proof}
\begin{example}
Schmitz's model of the earth's pre-industrial carbon cycle system was analyzed by Fortun et al. in \cite{fort3.5}. Below is its corresponding reaction network.
\begin{equation}
\label{schmitz}
\begin{tikzcd} M_5 \arrow[dd, "R_1 "'] \arrow[rd, "R_2", shift left] & & M_2 \arrow[ld, "R_5"', shift right] \arrow[rd, "R_{11}"', shift right] \arrow[dd, "R_9"'] & \\
& M_1 \arrow[lu, "R_3", shift left] \arrow[ru, "R_6"', shift right] \arrow[rd, "R_8"', shift right] & & M_4 \arrow[lu, "R_{10}"', shift right] \arrow[ld, "R_{12}"', shift right] \\ M_6 \arrow[ru, "R_4"'] & & M_3 \arrow[lu, "R_7"', shift right] \arrow[ru, "R_{13}"', shift right] & \end{tikzcd} \end{equation}
\noindent In this network, $M_1, M_2, M_3, M_4, M_5,$ and $M_6$ stand for atmosphere, warm ocean surface waters, cool ocean surface waters, deep ocean waters, terrestrial biota, and soil and detritus, respectively. Important numbers of the network as well as its kinetic order matrix are given in the following.
\begin{figure}
\caption{(a) Some numbers related to the reaction network of Schmitz's model of the earth's pre-industrial carbon cycle system; (b) kinetic order matrix of the network.}
\label{schmitz1}
\end{figure}
\pagebreak
Fortun and Mendoza \cite{fort3.5} showed that the system has no ACR species, i.e., $v_+(\mathscr N, K) = 1 > \dfrac{1}{6}$. Since the system has a conservative, concordant, and weakly reversible network and weakly monotonic kinetics, by Theorem 6.6 of \cite{shinar}, it has a unique positive equilibrium in each stoichiometric class and consequently monostationary. Note also that the network has rank $5 < 6$, hence it is a closed network. \end{example}
If the multistationary kinetic system has rank one, Theorem \ref{4.1.1} provides a better lower bound for the equilibria variation.
\begin{proposition}
Let $(\mathscr N, K)$ be a multistationary rank-one kinetic system and $v$ a basis vector of $S$. Then, $v_+(\mathscr N, K) \geq \dfrac{|supp(v)|}{m}$. \end{proposition}
\begin{proof}
According to Theorem \ref{4.1.1}, $m_{ACR} \leq m - |supp(v)| \Leftrightarrow |supp(v)| \leq m - m_{ACR}$, leading to the new lower bound. Note that since $v$ is a basis vector, $|supp(v)|\geq 1$, confirming the improvement. \end{proof}
One can derive a lower bound for equilibria variation for any kinetic system using the general species hyperplane criterion for ACR introduced by Hernandez and Mendoza in \cite{hernandez}. It is based on the following considerations:
\begin{definition}
For any species $X$, the $(m-1)$-dimensional subspace
\begin{equation}
\nonumber
H_X:=\{x\in \mathbb R^{\mathscr S}| x_X=0\}
\end{equation}
is called the \textbf{species hyperplane} of $X$. \end{definition}
For $U$ containing $\mathbb R_{>0}$, let $\phi: U\rightarrow \mathbb R$ be an injective map, i.e., $\phi: U\rightarrow \textnormal{Im}~\phi$ is a bijection. By component-wise application ($m$ times), we obtain a bijection $U^\mathscr S \rightarrow \mathbb R^{\mathscr S}$, which we also denote with $\phi$. We formulate our concepts for any subset $Y$ of $E_+(\mathscr N, K)$, although we are mainly interested in $Y=E_+(\mathscr N, K)$.
\begin{definition}
For a subset $Y$ of $E_+(\mathscr N, K)$, the set
\begin{equation}
\nonumber
\Delta_{\phi}Y:=\{\phi(x)-\phi(x')| x,x'\in Y\}
\end{equation}
is called the \textbf{difference set of $\phi$-transformed equilibria} in $Y$, and its span $\langle \Delta_{\phi}Y \rangle$ the difference space of $\phi$-transformed equilibria in $Y$. \end{definition}
In Proposition 6 of \cite{hernandez}, it is shown that
\begin{equation}
\nonumber
m_{ACR} \leq m - \dim \langle \Delta_{\phi}E_+\rangle.
\end{equation}
\noindent It follows immediately that we have the following lower bound for the equilibria variation.
\begin{proposition}
Let $(\mathscr N, K)$ be a kinetic system. Then, $\dfrac {\dim \langle \Delta_{\phi}E_+\rangle}{m} \leq v_+(\mathscr N, K)$. \end{proposition}
In general, it is challenging to compute $\dim \langle \Delta_{\phi}E_+\rangle$. However, for PLP systems, this can be done, and in the next section, we apply it to compute equilibria variation for weakly reversible, deficiency zero PL-RDK systems.
\subsection{Equilibria variation in deficiency zero PL-RDK systems}
A kinetic system $(\mathscr N, K)$ is a PLP (\textbf{positively equilibrated-log parametrized}) system if there is a reference equilibrium $x^*$ and a flux subspace $P_E$ of $\mathbb R^{\mathscr S}$ such that $E_+(\mathscr N, K) = \{ x \in \mathbb R^{\mathscr S}_{>} | \log x - \log x^* \in P_E^{\perp}\}$. For any PLP system, Lao et al. \cite{lao} showed that ACR is characterized by the species hyperplane criterion for PLP systems: $X$ is an ACR species if and only if it is contained in $H_X := \{x \in \mathbb R_{\mathscr S} | x_X = 0\}$. This implies that $m - \dim P_E \leq m - m_{ACR}$ and, hence, $1 - \dfrac{\dim P_E}{m} \leq v_+(\mathscr N, K)$.
Jose et al. \cite{ACB2022} showed that a CLP (\textbf{complex balanced-log parametrized}) system, i.e., $Z_+(\mathscr N, K) = \{x\in \mathbb R^{\mathscr S}_> |\log x - \log x^* \in P_Z^{\perp}\}$ is absolutely complex balanced, i.e., $E_+(\mathscr N, K) = Z_+(\mathscr N, K)$ if and only if it is both CLP and PLP and $P_E = P_Z$. It was shown in \cite{muller} that any complex balanced PL-RDK system is a CLP system with $P_Z = \tilde{S}$. Since after Feinberg, any weakly reversible deficiency zero kinetic system is absolutely complex balanced, then a weakly reversible deficiency zero PL-RDK system is a PLP system with $P_E = \tilde{S}$. Hence, for any such system, $1 - \dfrac{\Tilde{s}}{m} \leq v_+(\mathscr N, K)$, where $\Tilde{s}$ is the kinetic rank of the system.
\begin{example}
Fortun and collaborators studied variants of the Anderies et al. \cite{anderies} model of the earth's pre-industrial carbon cycle (see the reaction network below) in \cite{noel} and \cite{fort3.5}.
\end{example}
\begin{equation} \label{anderies}
\begin{tikzcd} A_1+2A_2 \arrow[r, "R_1"] & 2A_1+A_2 \\ A_1+A_2 \arrow[r, "R_2"] & 2A_2 \\ A_2 \arrow[r, "R_3 ", shift left] & A_3 \arrow[l, "R_4", shift left] \end{tikzcd} \end{equation}
\noindent Note that $A_1$, $A_2$, and $A_3$ here stand for land, atmosphere, and ocean, respectively. Given below are some important numbers and the kinetic order matrix of the network in \ref{anderies}.
\begin{figure}
\caption{(a) Some numbers related to the reaction network of the model of the earth's pre-industrial carbon cycle system given by Anderies et al. in \cite{anderies}; (b) kinetic order matrix of the network.}
\label{schmitz2}
\end{figure}
In \cite{fort3.5}, Fortun and Mendoza showed that the system is dynamically equivalent to a weakly reversible, deficiency zero system with $\Tilde{S}^{\perp} = \left \langle \begin{pmatrix} -1 & \dfrac{p_2-p_1}{q_2-q_1} & \dfrac{p_2-p_1}{q_2-q_1} \end{pmatrix} \right \rangle$. Let $R$ denote the ratio $\dfrac{p_2 - p_1}{q_2 - q_1}$. Based on the value of $R$, one obtains three classes of Anderies system: $AND_> (R > 0)$ consists of multistationary systems with no ACR species, $AND_0 (R = 0)$ contains only monostationary systems with two ACR species and $AND_<$ contains both injective and non-injective systems but also no ACR species. The variants studied in [10] and [14] belong to $AND_>$ and $AND_0$, respectively.
Based on the previous results, we have $v_+(\mathscr N, K) = 1$ if $(\mathscr N, K)$ is contained in either $AND_>$ or $AND_<$, and $v_+(\mathscr N, K) = \dfrac{1}{3}$ if $(\mathscr N, K)$ is in $AND_0$.
\subsection{Equilibria variation of independent subnetworks}
In this section, we discuss equilibria variation for independent subnetworks. It is important in this regard to differentiate between the two concepts of subnetworks that we introduced in previous work: embedded and non-embedded. We hence begin with a review of these concepts and their properties relevant to absolute concentration robustness (ACR), which is the basis of our concept of equilibria variation. We then collect some relevant results from previous publications and add some new details. We conclude by using the results of the reaction network analysis of metabolic insulin signaling by Lubenia et al. \cite{lubenia} to illustrate the different concepts and relationships. We in fact show that the various inequalities between the different equilibria variation values are sharp, i.e., equality is achieved in various subnetworks of the insulin system.
\subsubsection{Review of subnetwork properties}
In a subnetwork in a decomposition $\mathscr N=\mathscr N_1\cup \mathscr N_2\cup\cdots \cup\mathscr N_k$, often, a smaller number of species occurs than in the whole network. If $\mathscr N=(\mathscr S, \mathscr C, \mathscr R)$, we call a subnetwork $(\mathscr C', \mathscr R')$, where $\mathscr R'\subseteq \mathscr R$ and $\mathscr C'=\mathscr C|_{\mathscr R'}$, \textbf{embedded} if its species space is $\mathscr S$ and \textbf{non-embedded} if it has $\mathscr S|_{\mathscr C'}$ as species space, which we denote with $\mathscr S'$.
We use the embedded representation in a decomposition because it conveniently allows the set operations on equilibria sets. A basic fact is the following observation:
\begin{proposition}
If $X$ is an ACR species of a subnetwork, then $X$ is an element of $\mathscr S'$. \end{proposition}
This means that we need only one count of ACR species in a subnetwork, $m'_{ACR}$. For non-ACR species and equilibria variation, we have the following relationships:
\begin{proposition}
Let $m'=|\mathscr S'|$ and $v_+(\mathscr N', K')$, $\Tilde{v}_+(\mathscr N', K')$ be the equilibria variations of the non-embedded and embedded subnetworks. Then,
\begin{enumerate}[i.]
\item $m-m'_{ACR}=(m'-m'_{ACR})+(m-m')$ and
\item $0\leq \Tilde{v}_+(\mathscr N', K')-v_+(\mathscr N', K')\leq \dfrac{m-m'}{m}$
\end{enumerate} \end{proposition}
\begin{proof} $(i.)$ should be read as: the number of non-ACR species in an embedded network is the number of non-ACR species in the non-embedded network plus the number of non-occurring species, and $(ii.)$: dividing the equation in $(i.)$ by $m$ and using the inequality $\dfrac{m'-m'_{ACR}}{m}\leq \dfrac{m'-m'_{ACR}}{m'}$, we obtain $\Tilde{v}_+(\mathscr N', K')-v_+(\mathscr N', K')\leq \dfrac{m-m'}{m}$. On the other hand, the left-hand side, after forming a common denominator, is now equal to $\dfrac{m'_{ACR}(m-m')}{mm'}\geq 0$, since all factors are non-negative. \end{proof}
\subsubsection{Equilibria variation of independent subnetworks}
In \cite{lao}, Proposition 4.4 states that if species $X$ has ACR in $\mathscr N_i$ and the decomposition is independent, then $X$ has ACR in $\mathscr N$, i.e., $\displaystyle |\mathscr S_{ACR, i}|\leq \left| \bigcup_{i=1}^{k} \mathscr S_{ACR,i} \right| \leq |\mathscr S_{ACR}|$. We hence have the following corollary:
\begin{proposition}
$v_+(\mathscr N, K)\leq \Tilde{v}_+(\mathscr N', K')$ for any independent subnetwork $\mathscr N'$. \end{proposition}
\begin{proof}
It follows that $m-m_{ACR}\leq m-m'_{ACR}$ and dividing both sides with $m$ results in the claim. \end{proof}
\subsubsection{The example of metabolic insulin signaling in healthy cells}
Lubenia et al. \cite{lubenia} constructed a mass action kinetic realization of the widely used model of metabolic insulin signaling (in healthy cells) by Sedaghat et al. \cite{sedaghat}. They used the kinetic system's finest independent decomposition (FID) to conduct an ACR analysis and showed that $m_{ACR} \geq 8$ for all rate constants (such that the system has positive equilibria) and $m_{ACR} = 8$ for some rate constants. Hernandez et al. [3] confirmed that $m_{ACR} = 8$ for all rate constants with a positively equilibrated system. Hence, $v_+(\mathscr N, K) = \dfrac{20 - 8}{20} = \dfrac{12}{20} =\dfrac{3}{5} = 0.60$.
The table below (see Table \ref{lube}), which was taken from \cite{lubenia}, provides an overview of the characteristics of the FID subnetworks. Note that, with the exception of $\mathscr{N}_1$, all subnetworks are rank-one systems. Applying the Meshkat et al. criterion to these 9 subnetworks showed that only the one-species systems $\mathscr{N}_2$ and $\mathscr{N}_{10}$ had ACR for the species $X_6$ and $X_{20}$, respectively. All other subnetworks had no ACR species. Hence, we have:
\begin{itemize}
\item For $i=3,\dots,9, v_+(\mathscr N, K) = 0.60 < 1 = v_+(\mathscr N_i, K_i) = \Tilde{v}_+(\mathscr N_i, K_i)$
\item For $i=2,\dots,10, v_+(\mathscr N_i, K_i) = 0< 0.60 = v_+(\mathscr N, K) = \Tilde{v}_+(\mathscr N_i, K_i)=\dfrac{19}{20}=0.95$ \end{itemize}
\begin{table}[H]
\begin{tabular}{|l|c|c|c|c|c|c|c|c|c|c|} \hline \textbf{Network numbers} & $\mathscr N_1$ & $\mathscr N_2$ & $\mathscr N_3$ & $\mathscr N_4$ & $\mathscr N_5$ & $\mathscr N_6$ & $\mathscr N_7$ & $\mathscr N_8$ & $\mathscr N_9$ & $\mathscr N_{10}$ \\ \hline Species & 7 & 1 & 4 & 3 & 3 & 2 & 3 & 3 & 4 & 1 \\ \hline Complexes & 7 & 2 & 6 & 2 & 4 & 2 & 4 & 4 & 6 & 2 \\ \hline Reactant complexes & 7 & 2 & 3 & 2 & 2 & 2 & 2 & 2 & 4 & 2 \\ \hline Reversible reactions & 5 & 1 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 \\ \hline Irreversible reactions & 4 & 0 & 3 & 0 & 2 & 0 & 2 & 2 & 2 & 0 \\ \hline Reactions & 14 & 2 & 3 & 2 & 2 & 2 & 2 & 2 & 4 & 2 \\ \hline Linkage classes & 1 & 1 & 3 & 1 & 2 & 1 & 2 & 2 & 3 & 1 \\ \hline Strong linkage classes & 1 & 1 & 6 & 1 & 4 & 1 & 4 & 4 & 5 & 1 \\ \hline Terminal strong linkage classes & 1 & 1 & 3 & 1 & 2 & 1 & 2 & 2 & 3 & 1 \\ \hline Rank & 6 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\ \hline Reactant rank & 7 & 1 & 3 & 2 & 2 & 2 & 2 & 2 & 4 & 1 \\ \hline Deficiency & 0 & 0 & 2 & 0 & 1 & 0 & 1 & 1 & 2 & 0 \\ \hline Reactant deficiency & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ \hline \end{tabular} \caption{Some numbers of FID subnetworks.} \label{lube} \end{table}
The authors considered also the coarsening $\mathscr N_A \cup \mathscr N_B$, where $\mathscr N_A$ is the union of all deficiency zero subnetworks and $\mathscr N_B$ the union of all positive deficiency subnetworks. They also showed that all ACR species are contained solely in $\mathscr N_A$. Hence, in this case, we have:
\begin{itemize}
\item $v_+(\mathscr N_A, K_A) =\dfrac{13-8}{13}=0.38 < 0.6 = v_+(\mathscr N, K) = \Tilde{v}_+(\mathscr N_A, K_A)$
\item $v_+(\mathscr N, K) = 0.6<1 = v_+(\mathscr N_B, K_B) = \Tilde{v}_+(\mathscr N_B, K_B)$
\end{itemize}
\noindent These examples show that all the inequalities in the propositions above are sharp.
\section{Summary and outlook}
This study was motivated by Meshkat et al.'s result which allows the analysis of rank-one MAK systems for possession of stable ACR. The result guarantees a MAK system such robustness if the two identified structural conditions are met. These conditions require the system to have an embedded network that follows certain structures and reactant complexes that differ in just one species. We attempted to extend this result to more general PLK systems but to no avail. We found an example illustrating how these conditions do not always work in a general setting. This means that, unlike other earlier results on ACR, the conditions in this result do not always insure the existence of ACR in a PLK system.
On the other hand, we found a subclass of PLK systems where the stable ACR criterion of Meshkat et al. holds. We call this subclass homogenous monomial quotients of mass action systems or PL-MMK for short. This subclass is obtained by utilizing the set of rate constants of a given MAK system and its modified kinetic order matrix.
We also discovered a property that is necessarily present in a multistationary rank-one system that possesses an ACR. Specifically, the corresponding result indicates that such system that has an ACR must have a basis vector generator of the stoichiometric subspace with 0 as a coordinate in the ACR species. We illustrated this result using a multistationary system that was given in the paper of Pantea and Voitiuk.
Finally, we considered the concept of
equilibria variation of independent subnetworks in this paper. We discussed some mathematical relationships of the equilibria variations of embedded and non-embedded subnetworks. These relationships were illustrated through the data shown in \cite{lubenia} that Lubenia et al. used for analyzing metabolic insulin signaling of healthy cells. It is important to note that ACR species of a subnetwork is always contained in the corresponding non-embedded subnetwork.
For future studies, one can look at the extension problem of the rank-one ACR criterion, that is, the identification of further kinetic systems (beyond mass action) for which it holds. One can also consider the exploration of relationships between multistationarity classes and the necessary condition for rank-one mass action systems as well as an extension of the Pantea-Voitiuk classification beyond mass action. Further, it is also interesting to identify further kinetics sets for which the general low bound can be computed or a much sharper alternative as in rank-one multistationary systems can be derived.
\noindent \textbf{Acknowledgement}
\noindent D. Talabis, E. Jose, and L. Fontanil acknowledge the support of the University of the Philippines Los Ba\~{n}os through the Basic Research Program.
\baselineskip=0.25in
\end{document}
|
arXiv
|
{
"id": "2304.03611.tex",
"language_detection_score": 0.7681312561035156,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\maketitle
\begin{abstract} In the present paper, we consider the Cauchy problem of fourth order nonlinear Schr\"odinger type equations with a derivative nonlinearity.
In one dimensional case, we prove that the fourth order nonlinear Schr\"odinger equation with the derivative quartic nonlinearity $\partial _x (\overline{u}^4)$ is the small data global in time well-posed and scattering to a free solution. Furthermore, we show that the same result holds for the $d \ge 2$ and derivative polynomial type nonlinearity, for example $|\nabla | (u^m)$ with $(m-1)d \ge 4$. \\
\noindent {\it Key Words and Phrases.} Schr\"odinger equation, well-posedness, Cauchy problem, scaling critical, multilinear estimate, bounded $p$-variation.\\ 2010 {\it Mathematics Subject Classification.} 35Q55, 35B65. \end{abstract}
\section{Introduction\label{intro}} We consider the Cauchy problem of the fourth order nonlinear Schr\"odinger type equations: \begin{equation}\label{D4NLS} \begin{cases} \displaystyle (i\partial_{t}+\Delta ^2)u=\partial P_{m}(u,\overline{u}),\hspace{2ex}(t,x)\in (0,\infty )\times {\BBB R}^{d} \\ u(0,x)=u_{0}(x),\hspace{2ex}x\in {\BBB R}^{d} \end{cases} \end{equation} where $m\in {\BBB N}$, $m\geq 2$, $P_{m}$ is a polynomial which is written by \[ P_{m}(f,g)=\sum_{\substack{\alpha ,\beta \in {\BBB Z}_{\geq 0}\\ \alpha +\beta=m}}f^{\alpha}g^{\beta}, \] $\partial$ is a first order derivative with respect to the spatial variable, for example a linear combination of
$\frac{\partial}{\partial x_1} , \, \dots , \, \frac{\partial}{\partial x_d}$ or $|\nabla |= \mathcal{F}^{-1}[|\xi | \mathcal{F}]$ and the unknown function $u$ is ${\BBB C}$-valued.
The fourth order Schr\"{o}dinger equation with $P_{m}(u,\overline{u})=|u|^{m-1}u$ appears in the study of deep water wave dynamics \cite{Dysthe}, solitary waves \cite{Karpman}, \cite{KS}, vortex filaments \cite{Fukumoto}, and so on. The equation (\ref{D4NLS}) is invariant under the following scaling transformation: \[ u_{\lambda}(t,x)=\lambda^{-3/(m-1)}u(\lambda^{-4}t,\lambda^{-1}x), \] and the scaling critical regularity is $s_{c}=d/2-3/(m-1)$. The aim of this paper is to prove the well-posedness and the scattering for the solution of (\ref{D4NLS}) in the scaling critical Sobolev space.
There are many results for the fourth order nonlinear Schr\"{o}dinger equation
with derivative nonlinearities (see \cite{S1}, \cite{S2}, \cite{HJ1}, \cite{HHW}, \cite{HHW2}, \cite{HJ3}, \cite{S3}, \cite{HJ2}, \cite{Y12}, \cite{HN15_1}, \cite{HN15_2}, and references cited therein). Especially, the one dimensional case is well studied. Wang (\cite{Y12}) considered (\ref{D4NLS}) for the case $d=1$, $m=2l+1$, $l\ge 2$, $P_{2l+1}(u,\overline{u})=|u|^{2l}u$ and proved the small data global in time well-posedness for $s=s_{c}$ by using Kato type smoothing effect. But he did not treat the cubic case. Actually, a technical difficulty appears in this case (see Theorem \ref{notC3} below).
Hayashi and Naumkin (\cite{HN15_1}) considered (\ref{D4NLS}) for $d=1$ with the power type nonlineality $\partial_{x}(|u|^{\rho -1}u)$ ($\rho >4$) and proved the global existence of the solution and the scattering in the weighted Sobolev space. Moreover, they (\cite{HN15_2}) also proved that the large time asymptotics is determined by the self similar solution in the case $\rho =4$. Therefore, derivative quartic nonlinearity in the one spatial dimension is the critical in the sense of the asymptotic behavior of the solution.
We firstly focus on the quartic nonlinearity $\partial _x (\overline{u}^4)$ in one space dimension. Since this nonlinearity has some good structure, the global solution scatters to a free solution in the scaling critical Sobolev space. Our argument does not apply to \eqref{D4NLS} with $P (u,\overline{u}) = |u|^3 u$ because we rely on the Fourier restriction norm method. Now, we give the first results in this paper.
For a Banach space $H$ and $r>0$, we define $B_r(H):=\{ f\in H \,|\, \|f\|_H \le r \}$. \begin{thm}\label{wellposed_1} Let $d=1$, $m=4$ and $P_{4}(u,\overline{u})=\overline{u}^{4}$. Then the equation {\rm (\ref{D4NLS})} is globally well-posed for small data in $\dot{H}^{-1/2}$. More precisely, there exists $r>0$ such that for any $T>0$ and all initial data $u_{0}\in B_{r}(\dot{H}^{-1/2})$, there exists a solution \[ u\in \dot{Z}_{r}^{-1/2}([0,T))\subset C([0,T );\dot{H}^{-1/2}) \] of {\rm (\ref{D4NLS})} on $(0, T )$. Such solution is unique in $\dot{Z}_{r}^{-1/2}([0,T))$ which is a closed subset of $\dot{Z}^{-1/2}([0,T))$ {\rm (see Definition~\ref{YZ_space} and (\ref{Zr_norm}))}. Moreover, the flow map \[ S^{+}_{T}:B_{r}(\dot{H}^{-1/2})\ni u_{0}\mapsto u\in \dot{Z}^{-1/2}([0,T)) \] is Lipschitz continuous. \end{thm}
\begin{rem} We note that $s=-1/2$ is the scaling critical exponent of (\ref{D4NLS}) for $d=1$, $m=4$. \end{rem}
\begin{cor}\label{sccat} Let $r>0$ be as in Theorem~\ref{wellposed_1}. For all $u_{0}\in B_{r}(\dot{H}^{-1/2})$, there exists a solution $u\in C([0,\infty );\dot{H}^{s_{c}})$ of (\ref{D4NLS}) on $(0,\infty )$ and the solution scatters in $\dot{H}^{-1/2}$. More precisely, there exists $u^{+}\in \dot{H}^{-1/2}$ such that \[ u(t)-e^{it\Delta^2}u^{+} \rightarrow 0 \ {\rm in}\ \dot{H}^{-1/2}\ {\rm as}\ t\rightarrow + \infty. \] \end{cor}
Moreover, we obtain the large data local in time well-posedness in the scaling critical Sobolev space. To state the result, we put \[
B_{\delta ,R} (H^s) := \{ u_0 \in H^s | \ u_0=v_0+w_0 , \, \| v_0 \| _{\dot{H}^{-1/2}} < \delta, \, \| w_0 \| _{L^2} <R \} \] for $s<0$.
\begin{thm} \label{large-wp} Let $d=1$, $m=4$ and $P_{4}(u,\overline{u})=\overline{u}^{4}$. Then the equation {\rm (\ref{D4NLS})} is locally in time well-posed in $H^{-1/2}$. More precisely, there exists $\delta >0$ such that for all $R \ge \delta$ and $u_0 \in B_{\delta ,R} (H^{-1/2})$ there exists a solution \[ u \in Z^{-1/2}([0,T]) \subset C([0,T); H^{-1/2}) \] for $T=\delta ^{8} R^{-8}$ of \eqref{D4NLS}.
Furthermore, the same statement remains valid if we replace $H^{-1/2}$ by $\dot{H}^{-1/2}$ as well as $Z^{-1/2}([0,T])$ by $\dot{Z}^{-1/2}([0,T])$. \end{thm}
\begin{rem} For $s>-1/2$, the local in time well-posedness in $H^s$ follows from the usual Fourier restriction norm method, which covers for all initial data in $H^s$. It however is not of very much interest. On the other hand, since we focus on the scaling critical cases, which is the negative regularity, we have to impose that the $\dot{H}^{-1/2}$ part of initial data is small. But, Theorem \ref{large-wp} is a large data result because the $L^2$ part is not restricted. \end{rem}
The main tools of the proof are the $U^{p}$ space and $V^{p}$ space which are applied to prove the well-posedness and the scattering for KP-II equation at the scaling critical regularity by Hadac, Herr and Koch (\cite{HHK09}, \cite{HHK10}).
We also consider the one dimensional cubic case and the high dimensional cases. The second result in this paper is as follows.
\begin{thm}\label{wellposed_2} {\rm (i)}\ Let $d=1$ and $m=3$. Then the equation {\rm (\ref{D4NLS})} is locally well-posed in $H^{s}$ for $s\ge 0$. \\ {\rm (ii)}\ Let $d\geq 2$ and $(m-1)d\geq 4$. Then the equation {\rm (\ref{D4NLS})}
is globally well-posed for small data in $\dot{H}^{s_{c}}$ (or $H^{s}$ for $s\ge s_{c}$)
and the solution scatters in $\dot{H}^{s_{c}}$ (or $H^{s}$ for $s\ge s_{c}$). \end{thm}
The smoothing effect of the linear part recovers derivative in higher dimensional case. Therefore, we do not use the $U^p$ and $V^p$ type spaces. More precisely, to establish Theorem \ref{wellposed_2}, we only use the Strichartz estimates and get the solution in $C([0,T);H^{s_c})\cap L^{p_m}([0,T); W^{q_m,s_{c}+1/(m-1)})$ with $p_m =2(m-1)$, $q_m =2(m-1)d/\{ (m-1)d-2\}$. Accordingly, the scattering follows from a standard argument. Since the condition $(m-1)d\geq 4$ is equivalent to $s_{c}+1/(m-1)\ge 0$, the solution space $L^{p_m}([0,T); W^{q_m,s_{c}+1/(m-1)})$ has nonnegative regularity even if the data belongs to $H^{s_{c}}$ with $-1/(m-1)\le s_c <0$. Our proof of Thorem~\ref{wellposed_2} {\rm (ii)} cannot applied for $d=1$ since the Schr\"odingier admissible $(a,b)$ in {\rm (\ref{admissible_ab})} does not exist.
\begin{rem} For the case $d=1$, $m=4$ and $P_{4}(u,\overline{u})\ne \overline{u}^{4}$, we can obtain the local in time well-posedness of {\rm (\ref{D4NLS})} in $H^{s}$ for $s\ge 0$ by the same way of the proof of Theorem~\ref{wellposed_2}. Actually, we can get the solution in $C([0,T];H^s)\cap L^4 ([0,T];W^{s+1/2,\infty })$ for $s\ge 0$ by using the iteration argument since the fractional Leibnitz rule (see \cite{CW91}) and the H\"older inequality imply \[
\left\| |\nabla |^{s+\frac{1}{2}}\prod_{j=1}^{4}u_j \right\|_{L^{4/3}_{t}([0,T);L_{x}^{1})}
\lesssim T^{1/4}\| |\nabla |^{s+\frac{1}{2}}u_1 \|_{L^{4}_{t}L_{x}^{\infty}}\| u_2 \|_{L^{4}_{t}L_{x}^{\infty}}
\| u_3 \|_{L^{\infty}_{t}L_{x}^{2}}\| u_4 \|_{L^{\infty}_{t}L_{x}^{2}}. \] \end{rem}
We give a remark on our problem, which shows that the standard iteration argument does not work.
\begin{thm}\label{notC3}
{\rm (i)}\ Let $d=1$, $m=3$, $s<0$ and $P_{3}(u,\overline{u})=|u|^{2}u$. Then the flow map of {\rm (\ref{D4NLS})} from $H^s$ to $C({\BBB R} ; H^s)$ is not smooth. \\
{\rm (ii)}\ Let $m\ge 2$, $s<s_c$ and $\partial =|\nabla |$ or $\frac{\partial}{\partial x_k}$ for some $1\le k \le d$. Then the flow map of {\rm (\ref{D4NLS})} from $H^s$ to $C({\BBB R} ; H^s)$ is not smooth.
\end{thm}
More precisely, we prove that the flow map is not $C^3$ if $d=1$, $m=3$, $s<0$ and $P_{3}(u,\overline{u})=|u|^{2}u$ or $C^m$ if $d \ge 1$, $m \ge 2$, and $s<s_c$. It leads that the standard iteration argument fails, because the flow map is smooth if it works. Of course, there is a gap between ill-posedness and absence of a smooth flow map.
Since the resonance appears in the case $d=1$, $m=3$ and $P_{3}(u,\overline{u})=|u|^{2}u$, there exists an irregular flow map even for the subcritical Sobolev regularity.
\text{} \\ \noindent {\bf Notation.} We denote the spatial Fourier transform by\ \ $\widehat{\cdot}$\ \ or $\mathcal{F}_{x}$, the Fourier transform in time by $\mathcal{F}_{t}$ and the Fourier transform in all variables by\ \ $\widetilde{\cdot}$\ \ or $\mathcal{F}_{tx}$. The free evolution $S(t):=e^{it\Delta^{2}}$ is given as a Fourier multiplier \[
\mathcal{F}_{x}[S(t)f](\xi )=e^{-it|\xi |^{4}}\widehat{f}(\xi ). \] We will use $A\lesssim B$ to denote an estimate of the form $A \le CB$ for some constant $C$ and write $A \sim B$ to mean $A \lesssim B$ and $B \lesssim A$. We will use the convention that capital letters denote dyadic numbers, e.g. $N=2^{n}$ for $n\in {\BBB Z}$ and for a dyadic summation we write $\sum_{N}a_{N}:=\sum_{n\in {\BBB Z}}a_{2^{n}}$ and $\sum_{N\geq M}a_{N}:=\sum_{n\in {\BBB Z}, 2^{n}\geq M}a_{2^{n}}$ for brevity.
Let $\chi \in C^{\infty}_{0}((-2,2))$ be an even, non-negative function such that $\chi (t)=1$ for $|t|\leq 1$. We define $\psi (t):=\chi (t)-\chi (2t)$ and $\psi_{N}(t):=\psi (N^{-1}t)$. Then, $\sum_{N}\psi_{N}(t)=1$ whenever $t\neq 0$. We define frequency and modulation projections \[ \widehat{P_{N}u}(\xi ):=\psi_{N}(\xi )\widehat{u}(\xi ),\
\widetilde{Q_{M}^{S}u}(\tau ,\xi ):=\psi_{M}(\tau -|\xi|^{4})\widetilde{u}(\tau ,\xi ). \] Furthermore, we define $Q_{\geq M}^{S}:=\sum_{N\geq M}Q_{N}^{S}$ and $Q_{<M}^{S}:=Id -Q_{\geq M}^{S}$.
The rest of this paper is planned as follows. In Section 2, we will give the definition and properties of the $U^{p}$ space and $V^{p}$ space. In Section 3, we will give the multilinear estimates which are main estimates to prove Theorems~\ref{wellposed_1} and \ref{large-wp}. In Section 4, we will give the proof of the well-posedness and the scattering (Theorem~\ref{wellposed_1}, Corollary~\ref{sccat}, and Theorem \ref{large-wp}). In Section 5, we will give the proof of Theorem~\ref{wellposed_2}. In Section 6, we will give the proof of Theorem~\ref{notC3}.
\section{The $U^{p}$, $V^{p}$ spaces and their properties \label{func_sp}} In this section, we define the $U^{p}$ space and the $V^{p}$ space, and introduce the properties of these spaces which are proved by Hadac, Herr and Koch (\cite{HHK09}, \cite{HHK10}).
We define the set of finite partitions $\mathcal{Z}$ as \[
\mathcal{Z} :=\left\{ \{t_{k}\}_{k=0}^{K}|K\in {\BBB N} , -\infty <t_{0}<t_{1}<\cdots <t_{K}\leq \infty \right\} \] and if $t_{K}=\infty$, we put $v(t_{K}):=0$ for all functions $v:{\BBB R} \rightarrow L^{2}$.
\begin{defn}\label{upsp} Let $1\leq p <\infty$. For $\{t_{k}\}_{k=0}^{K}\in \mathcal{Z}$ and $\{\phi_{k}\}_{k=0}^{K-1}\subset L^{2}$ with
$\sum_{k=0}^{K-1} \| \phi_{k} \| _{L^{2}}^{p}=1$ we call the function $a:{\BBB R}\rightarrow L^{2}$ given by \[ a(t)=\sum_{k=1}^{K}\mbox{\boldmath $1$}_{[t_{k-1},t_{k})}(t)\phi_{k-1} \] a ``$U^{p}${\rm -atom}''. Furthermore, we define the atomic space \[ U^{p}:=\left\{ \left. u=\sum_{j=1}^{\infty}\lambda_{j}a_{j}
\right| a_{j}:U^{p}{\rm -atom},\ \lambda_{j}\in {\BBB C} \ {\rm such\ that}\ \sum_{j=1}^{\infty}|\lambda_{j}|<\infty \right\} \] with the norm \[
\| u \| _{U^{p}}:=\inf \left\{\sum_{j=1}^{\infty}|\lambda_{j}|\left|u=\sum_{j=1}^{\infty}\lambda_{j}a_{j},\ a_{j}:U^{p}{\rm -atom},\ \lambda_{j}\in {\BBB C}\right.\right\}. \] \end{defn}
\begin{defn}\label{vpsp} Let $1\leq p <\infty$. We define the space of the bounded $p$-variation \[
V^{p}:=\{ v:{\BBB R}\rightarrow L^{2}|\ \| v \| _{V^{p}}<\infty \} \] with the norm \[
\| v \| _{V^{p}}:=\sup_{\{t_{k}\}_{k=0}^{K}\in \mathcal{Z}}\left(\sum_{k=1}^{K} \| v(t_{k})-v(t_{k-1}) \| _{L^{2}}^{p}\right)^{1/p}. \] Likewise, let $V^{p}_{-, rc}$ denote the closed subspace of all right-continuous functions $v\in V^{p}$ with
$\lim_{t\rightarrow -\infty}v(t)=0$, endowed with the same norm $ \| \cdot \| _{V^{p}}$. \end{defn}
\begin{prop}[\cite{HHK09} Proposition\ 2.2,\ 2.4,\ Corollary\ 2.6]\label{upvpprop} Let $1\leq p<q<\infty$. \\ {\rm (i)} $U^{p}$, $V^{p}$ and $V^{p}_{-, rc}$ are Banach spaces. \\ {\rm (ii)} For every $v\in V^{p}$, $\lim_{t\rightarrow -\infty}v(t)$ and $\lim_{t\rightarrow \infty}v(t)$ exist in $L^{2}$. \\ {\rm (iii)} The embeddings $U^{p}\hookrightarrow V^{p}_{-,rc}\hookrightarrow U^{q}\hookrightarrow L^{\infty}_{t}({\BBB R} ;L^{2}_{x}({\BBB R}^{d}))$ are continuous. \end{prop}
\begin{thm}[\cite{HHK09} Proposition\ 2,10,\ Remark\ 2.12]\label{duality} Let $1<p<\infty$ and $1/p+1/p'=1$. If $u\in V^{1}_{-,rc}$ be absolutely continuous on every compact intervals, then \[
\| u \| _{U^{p}}=\sup_{v\in V^{p'}, \| v \| _{V^{p'}}=1}\left|\int_{-\infty}^{\infty}(u'(t),v(t))_{L^{2}({\BBB R}^{d})}dt\right|. \] \end{thm}
\begin{defn} Let $1\leq p<\infty$. We define \[
U^{p}_{S}:=\{ u:{\BBB R}\rightarrow L^{2}|\ S(-\cdot )u\in U^{p}\} \]
with the norm $ \| u \| _{U^{p}_{S}}:= \| S(-\cdot )u \| _{U^{p}}$, \[
V^{p}_{S}:=\{ v:{\BBB R}\rightarrow L^{2}|\ S(-\cdot )v\in V^{p}_{-,rc}\} \]
with the norm $ \| v \| _{V^{p}_{S}}:= \| S(-\cdot )v \| _{V^{p}}$. \end{defn}
\begin{rem} The embeddings $U^{p}_{S}\hookrightarrow V^{p}_{S}\hookrightarrow U^{q}_{S}\hookrightarrow L^{\infty}({\BBB R};L^{2})$ hold for $1\leq p<q<\infty$ by {\rm Proposition~\ref{upvpprop}}. \end{rem}
\begin{prop}[\cite{HHK09} Corollary\ 2.18]\label{projest} Let $1< p<\infty$. We have \begin{align}
& \| Q_{\geq M}^{S}u \| _{L_{tx}^{2}}\lesssim M^{-1/2} \| u \| _{V^{2}_{S}},\label{highMproj}\\
& \| Q_{<M}^{S}u \| _{V^{p}_{S}}\lesssim \| u \| _{V^{p}_{S}},\ \ \| Q_{\geq M}^{S}u \| _{V^{p}_{S}}\lesssim \| u \| _{V^{p}_{S}},\label{Vproj} \end{align} \end{prop}
\begin{prop}[\cite{HHK09} Proposition\ 2.19]\label{multiest} Let \[ T_{0}:L^{2}({\BBB R}^{d})\times \cdots \times L^{2}({\BBB R}^{d})\rightarrow L^{1}_{loc}({\BBB R}^{d}) \] be a $m$-linear operator. Assume that for some $1\leq p, q< \infty$ \[
\| T_{0}(S(\cdot )\phi_{1},\cdots ,S(\cdot )\phi_{m}) \| _{L^{p}_{t}({\BBB R} :L^{q}_{x}({\BBB R}^{d}))}\lesssim \prod_{i=1}^{m} \| \phi_{i} \| _{L^{2}({\BBB R}^{d})}. \] Then, there exists $T:U^{p}_{S}\times \cdots \times U^{p}_{S}\rightarrow L^{p}_{t}({\BBB R} ;L^{q}_{x}({\BBB R}^{d}))$ satisfying \[
\| T(u_{1},\cdots ,u_{m}) \| _{L^{p}_{t}({\BBB R} ;L^{q}_{x}({\BBB R}^{d}))}\lesssim \prod_{i=1}^{m} \| u_{i} \| _{U^{p}_{S}} \] such that $T(u_{1},\cdots ,u_{m})(t)(x)=T_{0}(u_{1}(t),\cdots ,u_{m}(t))(x)$ a.e. \end{prop}
Now we refer the Strichartz estimate for the fourth order Schr\"odinger equation proved by Pausader. We say that a pair $(p,q)$ is admissible if $2 \le p,q \le \infty$, $(p,q,d) \neq (2, \infty ,2)$, and \[ \frac{2}{p} + \frac{d}{q} = \frac{d}{2}. \] \begin{prop}[\cite{P07} Proposition\ 3.1]\label{Stri_est} Let $(p,q)$ and $(a,b)$ be admissible pairs.
Then, we have \[ \begin{split}
\| S(\cdot )\varphi \| _{L_{t}^{p}L_{x}^{q}}&\lesssim \| |\nabla|^{-2/p}\varphi \| _{L^{2}_{x}},\\
\left\| \int_{0}^{t}S(t-t' )F(t')dt'\varphi \right\| _{L_{t}^{p}L_{x}^{q}}&\lesssim \| |\nabla|^{-2/p-2/a}F \| _{L^{a'}_{t}L^{b'}_{x}}, \end{split} \] where $a'$ and $b'$ are conjugate exponents of $a$ and $b$ respectively. \end{prop} Propositions \ref{multiest} and ~\ref{Stri_est} imply the following. \begin{cor}\label{Up_Stri} Let $(p,q)$ be an admissible pair.
\begin{equation}\label{U_Stri}
\| u \| _{L_{t}^{p}L_{x}^{q}}\lesssim \| |\nabla|^{-2/p}u \| _{U_{S}^{p}},\ \ u\in U^{p}_{S}. \end{equation} \end{cor}
Next, we define the function spaces which will be used to construct the solution. We define the projections $P_{>1}$ and $P_{<1}$ as \[ P_{>1}:=\sum_{N\ge 1}P_N,\ P_{<1}:=Id-P_{>1}. \]
\begin{defn}\label{YZ_space} Let $s <0$.\\
{\rm (i)} We define $\dot{Z}^{s}:=\{u\in C({\BBB R} ; \dot{H}^{s}({\BBB R}^{d}))\cap U^{2}_{S}|\ \| u \| _{\dot{Z}^{s}}<\infty\}$ with the norm \[
\| u \| _{\dot{Z}^{s}}:=\left(\sum_{N}N^{2s} \| P_{N}u \| ^{2}_{U^{2}_{S}}\right)^{1/2}. \]
{\rm (ii)} We define $Z^{s}:=\{u\in C({\BBB R} ; H^{s}({\BBB R}^{d})) |\ \| u \| _{Z^{s}}<\infty\}$ with the norm \[
\| u \| _{Z^{s}}:= \| P_{<1} u \| _{\dot{Z}^{0}}+ \| P_{>1} u \| _{\dot{Z}^{s}}. \]
{\rm (iii)} We define $\dot{Y}^{s}:=\{u\in C({\BBB R} ; \dot{H}^{s}({\BBB R}^{d}))\cap V^{2}_{S}|\ \| u \| _{\dot{Y}^{s}}<\infty\}$ with the norm \[
\| u \| _{\dot{Y}^{s}}:=\left(\sum_{N}N^{2s} \| P_{N}u \| ^{2}_{V^{2}_{S}}\right)^{1/2}. \]
{\rm (iv)} We define $Y^{s}:=\{u\in C({\BBB R} ; H^{s}({\BBB R}^{d})) |\ \| u \| _{Y^{s}}<\infty\}$ with the norm \[
\| u \| _{Y^{s}}:= \| P_{<1} u \| _{\dot{Y}^{0}}+ \| P_{>1 }u \| _{\dot{Y}^{s}}. \] \end{defn}
\section{Multilinear estimate for $P_{4}(u,\overline{u})=\overline{u}^{4}$ in $1d$ \label{Multi_est}}
In this section, we prove multilinear estimates for the nonlinearity $\partial_{x}(\overline{u}^{4})$ in $1d$, which plays a crucial role in the proof of Theorem \ref{wellposed_1}.
\begin{lemm}\label{modul_est} We assume that $(\tau_{0},\xi_{0})$, $(\tau_{1}, \xi_{1})$, $\cdots$, $(\tau_{4}, \xi_{4})\in {\BBB R}\times {\BBB R}^{d}$ satisfy $\sum_{j=0}^{4}\tau_{j}=0$ and $\sum_{j=0}^{4}\xi_{j}=0$. Then, we have \begin{equation}\label{modulation_est}
\max_{0\leq j\leq 4}|\tau_{j}-|\xi_{j}|^{4}|
\geq \frac{1}{5}\max_{0\leq j\leq 4}|\xi_{j}|^{4}. \end{equation} \end{lemm}
\begin{proof} By the triangle inequality, we obtain (\ref{modulation_est}). \end{proof}
\subsection{The homogeneous case}
\begin{prop}\label{HL_est_n} Let $d=1$ and $0<T\leq \infty$. For a dyadic number $N_{1}\in 2^{{\BBB Z}}$, we define the set $A_{1}(N_{1})$ as \[
A_{1}(N_{1}):=\{ (N_{2},N_{3},N_{4})\in (2^{{\BBB Z}})^{3}|N_{1}\gg N_{2}\geq N_{3} \geq N_{4}\}. \] If $N_{0}\sim N_{1}$, then we have \begin{equation}\label{hl} \begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\ &\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}. \end{split} \end{equation} \end{prop}
\begin{proof} We define $u_{j,N_{j},T}:=\mbox{\boldmath $1$}_{[0,T)}P_{N_{j}}u_{j}$\ $(j=1,\cdots ,4)$ and put $M:=N_{0}^{4}/5$. We decompose $Id=Q^{S}_{<M}+Q^{S}_{\geq M}$. We divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \begin{equation}\label{piece_form_hl} \int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}Q_{j}^{S}u_{j,N_{j},T}\right) dxdt \end{equation} with $Q_{j}^{S}\in \{Q_{\geq M}^{S}, Q_{<M}^{S}\}$\ $(j=0,\cdots ,4)$. By the Plancherel's theorem, we have \[ (\ref{piece_form_hl}) = c\int_{\sum_{j=0}^{4}\tau_{j}=0}\int_{\sum_{j=0}^{4}\xi_{j}=0}N_{0}\prod_{j=0}^{4}\mathcal{F}[Q_{j}^{S}u_{j,N_{j},T}](\tau_{j},\xi_{j}), \] where $c$ is a constant. Therefore, Lemma~\ref{modul_est} implies that \[ \int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{m}Q_{<M}^{S}u_{j,N_{j},T}\right) dxdt=0. \] So, let us now consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$.
First, we consider the case $Q_{0}^{S}=Q_{\geq M}^{S}$. By the Cauchy-Schwartz inequality, we have \[ \begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}\prod_{j=2}^{4}\left\|\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}. \end{split} \] Furthermore by (\ref{highMproj}) and $M\sim N_{0}^{4}$, we have \[
\| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}}
\lesssim N_{0}^{-2} \| u_{0,N_{0},T} \| _{V^{2}_{S}} \] and by (\ref{U_Stri}) and $V^{2}_{S}\hookrightarrow U^{4}_{S}$, we have \[ \begin{split}
\| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}
&\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{U^{4}_{S}}
\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{V^{2}_{S}}. \end{split} \] While by the Sobolev inequality, (\ref{U_Stri}), $V^{2}_{S}\hookrightarrow U^{12}_{S}$ and the Cauchy-Schwartz inequality for the dyadic sum
, we have \begin{equation}\label{L12L6_est} \begin{split}
\left\|\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}
&\lesssim \left\| |\nabla |^{1/6}\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{L^{12}_{t}L^{3}_{x}}
\lesssim \left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{V^{2}_{S}}\\
& \lesssim N_1^{1/2} \left( \sum _{N_j \lesssim N_1} N_j^{-1} \| u_{j,N_j,T} \|_{V^2_S}^2 \right) ^{1/2}
\lesssim N_{1}^{1/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{j} \| _{\dot{Y}^{-1/2}} \end{split} \end{equation} for $2\leq j\leq 4$. Therefore, we obtain \[ \begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{m}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\ &\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}} \end{split} \]
by (\ref{Vproj}) since $ \| \mbox{\boldmath $1$}_{[0,T)}u \| _{V^{2}_{S}}\lesssim \| u \| _{V^{2}_{S}}$ for any $T\in (0,\infty]$. For the case $Q_{1}^{S}=Q_{\geq M}^{S}$ is proved in same way.
Next, we consider the case $Q_{i}^{S}=Q_{\geq M}^{S}$ for some $2\le i \le 4$. By the H\"older inequality, we have \[ \begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{i,N_{i},T}\prod_{\substack{0\le j\le 4\\ j\neq i}}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\lesssim N_{0} \| Q_{0}^{S}u_{0,N_{0},T} \| _{L_{t}^{12}L_{x}^{6}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}\\
&\ \ \ \ \times \left\| \sum_{N_{i}\lesssim N_{1}}Q_{\geq M}^{S}u_{i,N_{i},T} \right\|_{L_{tx}^{2}}
\prod_{\substack{2\le j\le 4 \\ j\neq i}}\left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{L_{t}^{12}L_{x}^{6}}. \end{split} \] By $L^{2}$ orthogonality and (\ref{highMproj}), we have \begin{equation}\label{hi_mod_234} \begin{split}
\left\| \sum_{N_{i}\lesssim N_{1}}Q_{\geq M}^{S}u_{i,N_{i},T}\right\| _{L_{tx}^{2}}
&\lesssim \left(\sum_{N_{2}} \| Q_{\geq M}^{S}u_{i,N_{i},T} \| _{L_{tx}^{2}}^{2}\right)^{1/2}\\
&\lesssim N_{1}^{-3/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{i} \| _{\dot{Y}^{-1/2}} \end{split} \end{equation} since $M\sim N_{0}^{4}$. While, by the calculation way as the case $Q_{0}^{S}=Q_{\geq M}^{S}$, we have \[
\| Q_{0}^{S}u_{0,N_{0},T} \| _{L_{t}^{12}L_{x}^{6}}\lesssim \| Q_{0}^{S}u_{0,N_{0},T} \| _{V^{2}_{S}}, \] \[
\| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{V^{2}_{S}} \] and \[
\left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\|_{L_{t}^{12}L_{x}^{6}}
\lesssim N_{1}^{1/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{j} \| _{\dot{Y}^{-1/2}}. \] Therefore, we obtain \[ \begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{i,N_{i},T}\prod_{\substack{0\le j\le 4\\ j\neq i}}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\ &\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}} \end{split} \]
by (\ref{Vproj}) since $ \| \mbox{\boldmath $1$}_{[0,T)}u \| _{V^{2}_{S}}\lesssim \| u \| _{V^{2}_{S}}$ for any $T\in (0,\infty]$.
\end{proof}
\begin{prop}\label{HH_est} Let $d=1$ and $0<T\leq \infty$. For a dyadic number $N_{2}\in 2^{{\BBB Z}}$, we define the set $A_{2}(N_{2})$ as \[
A_{2}(N_{2}):=\{ (N_{3}, N_{4})\in (2^{{\BBB Z}})^{4}|N_{2}\geq N_{3}\geq N_{4}\}. \] If $N_{0}\lesssim N_{1}\sim N_{2}$, then we have \begin{equation}\label{hh} \begin{split}
&\left|\sum_{A_{2}(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\ &\lesssim
\frac{N_{0}}{N_{1}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}. \end{split} \end{equation} \end{prop} The proof of Proposition~\ref{HH_est} is quite similar as the proof of Proposition~\ref{HL_est_n}.
\subsection{The inhomogeneous case} \begin{prop}\label{HL_est_n-inh} Let $d=1$ and $0<T\leq 1$. For a dyadic number $N_{1}\in 2^{{\BBB Z}}$, we define the set $A_{1}'(N_{1})$ as \[
A_{1}'(N_{1}):=\{ (N_{2},N_{3},N_{4})\in (2^{{\BBB Z}})^{3}|N_{1}\gg N_{2}\geq N_{3} \ge N_{4}, \, N_4 \le 1 \}. \] If $N_{0}\sim N_{1}$, then we have \begin{equation}\label{hl-inh} \begin{split}
\left|\sum_{A_{1}'(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|
\lesssim T^{\frac{1}{6}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}} \prod_{j=2}^{4} \| u_{j} \| _{Y^{-1/2}}. \end{split} \end{equation} \end{prop} \begin{proof} We further divide $A_1'(N_1)$ into three pieces: \begin{align*} A_1'(N_1) & = \bigcup _{j=1}^3 A_{1,j}'(N_1), \\ A_{1,1}'(N_1) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : N_3 \ge 1 \} ,\\ A_{1,2}'(N_2) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : N_2 \ge 1 \ge N_3 \} ,\\ A_{1,3}'(N_2) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : 1 \ge N_2 \} . \end{align*} We define $u_{j,N_{j}}:=P_{N_{j}}u_{j}$, $u_{j,T}:=\mbox{\boldmath $1$}_{[0,T)}u_{j}$ and $u_{j,N_{j},T}:=\mbox{\boldmath $1$}_{[0,T)}P_{N_{j}}u_{j}$\ $(j=1,\cdots ,4)$. We firstly consider the case $A_{1,1}'(N_1)$ In the case $T \le N_0^{-3}$, the H\"older inequality implies \begin{align*}
& \left|\sum_{A_{1,1}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
\prod _{j=2}^3 \left\| \sum_{1\le N_j \le N_{1}} u_{j,N_j} \right\| _{L_t^{\infty} L_x^2} \| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{\infty}}
\end{align*}
Furthermore by (\ref{U_Stri}) and $V^{2}_{S}\hookrightarrow U^{4}_{S}$, we have \[ \begin{split}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
&\lesssim N_{0}^{-1/2}\| u_{0,N_0} \| _{U^{4}_{S}}N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1}} \| _{U^{4}_{S}}\\
&\lesssim N_{0}^{-1} \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}} \end{split} \] and by the Sobolev inequality, $V^{2}_{S}\hookrightarrow L^{\infty}_{t}L^{2}_{x}$ and the Cauchy-Schwartz inequality , we have \[
\| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{\infty}}\lesssim \| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{4}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_4\|_{\dot{Y}^{0}} \] While by $L^{2}$ orthogonality and $V^{2}_{S}\hookrightarrow L^{\infty}_{t}L^{2}_{x}$, we have \[ \begin{split}
\left\| \sum_{1\le N_j \le N_{1}} u_{j,N_j} \right\| _{L_t^{\infty} L_x^2}
&\lesssim \left(\sum_{1\le N_j \le N_{1}} \| u_{j,N_{j}} \| _{V^{2}_{S}}^{2}\right)^{1/2}
\lesssim N_{0}^{1/2} \| P_{>1}u_{j} \| _{\dot{Y}^{-1/2}} \end{split} \] Therefore, we obtain \[ \begin{split}
&\left|\sum_{A_{1,1}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
&\lesssim T^{1/2}N_0 \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\prod_{j=2}^{3}\| P_{>1}u_{j} \| _{\dot{Y}^{-1/2}}\|P_{<1}u_4\|_{\dot{Y}^{0}} \end{split} \]
and note that $T^{1/2}N_0\le T^{1/6}$.
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}. Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$. First, we consider the case $Q_{0}^{S}=Q_{\geq M}^{S}$. By the same way as in the proof of Proposition \ref{HL_est_n} and using \[
\|Q_{4}^{S}P_{<1}u_{4,T}\|_{L^{12}_{t}L^{6}_{x}}\lesssim \|Q_{4}^{S}P_{<1}u_{4,T}\|_{V^{2}_{S}}\lesssim \|P_{<1}u_{4,T}\|_{\dot{Y}^{0}} \] instead of (\ref{L12L6_est}), we obtain \[ \begin{split}
&\left|\sum_{A_{1,1}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{3} \left\|\sum_{1 \le N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}} \|Q_{4}^{S}P_{<1}u_{4,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-\frac{1}{2}} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod_{j=2}^{3} \left\| P_{>1} u_j \right\| _{\dot{Y}^{-1/2}} \| P_{<1} u_{4} \| _{\dot{Y}^0} \end{split} \]
and note that $N_0^{-1/2}\le T^{1/6}$. Since the cases $Q_j^S = Q_{\ge M}^S$ ($j=1,2,3$) are similarly handled, we omit the details here.
We focus on the case $Q_4^S = Q_{\ge M}^S$. By the same way as in the proof of Proposition \ref{HL_est_n} and using \[
\|Q_{\ge M}^{S}P_{<1}u_{4,T}\|_{L^{2}_{tx}}\lesssim N_{0}^{-2} \|P_{<1}u_{4,T}\|_{V^{2}_{S}}\lesssim N_{0}^{-2}\|P_{<1}u_{4,T}\|_{\dot{Y}^{0}} \] instead of (\ref{hi_mod_234}) with $j=4$, we obtain \[ \begin{split}
&\left|\sum_{A_{1,1}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{3} \left\|\sum_{1 \le N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}
\|Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^{2}_{tx}}\\
& \lesssim N_{0}^{-1/2}\| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod_{j=2}^{3} \left\| P_{>1} u_j \right\| _{\dot{Y}^{-1/2}} \| P_{<1} u_4 \| _{\dot{Y}^0} \end{split} \]
and note that $N_0^{-1/2}\le T^{1/6}$.
We secondly consider the case $A_{1,2}'(N_1)$. In the case $T \le N_0^{-3}$, the H\"older inequality implies \[ \begin{split}
& \left|\sum_{A_{1,2}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}} \left\| \sum _{1 \le N_2 \lesssim N_1} u_{2,N_2} \right\| _{L_t^{\infty} L_x^2}
\prod_{j=3}^{4}\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^4} . \end{split} \] By the same estimates as in the proof for the case $A_{1,1}'(N_1)$ and \[
\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^4}\lesssim \| P_{<1} u_{j} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{j}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_j\|_{\dot{Y}^{0}} \] for $j=3,4$, we obtain \[ \begin{split}
&\left|\sum_{A_{1,2}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
&\lesssim T^{1/2}N_0^{1/2} \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\| P_{>1}u_{2} \| _{\dot{Y}^{-1/2}}\prod_{j=3}^{4}\|P_{<1}u_j\|_{\dot{Y}^{0}} \end{split} \] and note that $T^{1/2}N_0^{1/2}\le T^{1/3}$.
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}. Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$. By the same argument as in the proof for the case $A_{1,1}'(N_1)$, we obtain \[ \begin{split}
&\left|\sum_{A_{1,2}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \left\|\sum_{1 \le N_{2}\lesssim N_{1}}Q_{2}^{S}u_{2,N_{2},T}\right\|_{L^{12}_{t}L^{6}_{x}} \prod_{j=3}^{4} \| Q_{j}^{S}P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-1} \| P_{N_0} u_0 \| _{V^2_S} | P_{N_1} u_1 \| _{V^2_S} \left\| P_{>1} u_2 \right\| _{\dot{Y}^{-1/2}} \prod _{j=3}^4 \| P_{<1} v_j \| _{\dot{Y}^0} \end{split} \]
if $Q_0 = Q_{\ge M}^S$ and \[ \begin{split}
&\left|\sum_{A_{1,2}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{4}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \left\|\sum_{1 \le N_{2}\lesssim N_{1}}Q_{2}^{S}u_{2,N_{2},T}\right\|_{L^{12}_{t}L^{6}_{x}} \\
&\hspace{21ex}\times \|Q_{3}^{S} P_{<1}u_{3,T}\|_{L^{12}_{t}L^{6}_{x}} \| Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^{2}_{tx}}\\
& \lesssim N_0^{-1} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \left\| P_{>1} u_2 \right\| _{\dot{Y}^{\frac{1}{2}}} \prod_{j=3}^{4}\| P_{<1} u_j \| _{\dot{Y}^0} \end{split} \] if $Q_4 = Q_{\ge M}^S$ Note that $N_0^{-1}\le T^{1/3}$. The remaining cases follow from the same argument as above.
We thirdly consider the case $A_{1,3}'(N_1)$. In the case $T \le N_0^{-3}$, the H\"older inequality implies \[ \begin{split}
& \left|\sum_{A_{1,3}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
\prod_{j=2}^{4} \| P_{<1}u_{2} \| _{L_t^{\infty} L_x^3}. \end{split} \]
By the same estimates as in the proof for the case $A_{1,1}'(N_1)$ and \[
\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^3}\lesssim \| P_{<1} u_{j} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{j}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_j\|_{\dot{Y}^{0}} \] for $j=2, 3,4$, we obtain \[ \begin{split}
&\left|\sum_{A_{1,3}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|
\lesssim T^{1/2}\| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\prod_{j=2}^{4}\| P_{<1}u_{j} \| _{\dot{Y}^{0}}. \end{split} \]
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}. Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$. By the same argument as in the proof for the case $A_{1,1}'(N_1)$, we obtain \[ \begin{split}
&\left|\sum_{A_{1,3}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{4} \|Q_{j}^{S}P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-3/2} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \left\| P_{<1} u_2 \right\| _{Y^{-1/2}} \prod _{j=3}^4 \| P_{<1} v_j \| _{\dot{Y}^0} \end{split} \]
if $Q_0 = Q_{\ge M}^S$ and \[ \begin{split}
&\left|\sum_{A_{1,3}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{4}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \prod _{j=2}^3 \|Q_{j}^{S} P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}
\|Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^2_{tx}}\\
& \lesssim N_0^{-3/2} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod _{j=2}^4 \left\| P_{<1} u_j \right\| _{Y^{0}} \end{split} \] if $Q_4 = Q_{\ge M}^S$. Note that $N_0^{-3/2}\le T^{1/2}$.
The cases $Q_j^S = Q_{\ge M}^S$ ($j=1,2,3$) are the same argument as above.
\end{proof}
Furthermore, we obtain the following estimate.
\begin{prop}\label{HH_est-inh} Let $d=1$ and $0<T\leq 1$. For a dyadic number $N_{2}\in 2^{{\BBB Z}}$, we define the set $A_{2}'(N_{2})$ as \[
A_{2}'(N_{2}):=\{ (N_{3}, N_{4})\in (2^{{\BBB Z}})^{4}|N_{2}\geq N_{3}\ge N_{4} , \, N_4 \le 1 \}. \] If $N_{0}\lesssim N_{1}\sim N_{2}$, then we have \begin{equation}\label{hh-inh} \begin{split}
&\left|\sum_{A_{2}'(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\
&\lesssim T^{\frac{1}{6}} \frac{N_{0}}{N_{1}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{Y^{-1/2}} \| u_{4} \| _{Y^{-1/2}}. \end{split} \end{equation} \end{prop}
Because the proof is similar as above, we skip the proof.
\section{Proof of well-posedness \label{pf_wellposed_1}}
\subsection{The small data case}
In this section, we prove Theorem~\ref{wellposed_1} and Corollary~\ref{sccat}. We define the map $\Phi_{T, \varphi}$ as \[ \Phi_{T, \varphi}(u)(t):=S(t)\varphi -iI_{T}(u,\cdots, u)(t), \] where \[ I_{T}(u_{1},\cdots u_{4})(t):=\int_{0}^{t}\mbox{\boldmath $1$}_{[0,T)}(t')S(t-t')\partial_{x}\left(\prod_{j=1}^{4}\overline{u_{j}(t')}\right)dt'. \] To prove the well-posedness of (\ref{D4NLS}) in $\dot{H}^{-1/2}$, we prove that $\Phi_{T, \varphi}$ is a contraction map on a closed subset of $\dot{Z}^{-1/2}([0,T))$. Key estimate is the following:
\begin{prop}\label{Duam_est} Let $d=1$. For any $0<T<\infty$, we have \begin{equation}\label{Duam_est_1}
\| I_{T}(u_{1},\cdots u_{4}) \| _{\dot{Z}^{-1/2}}\lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}. \end{equation} \end{prop}
\begin{proof} We decompose \[ I_{T}(u_{1},\cdots u_{m})=\sum_{N_{1},\cdots ,N_{4}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4}). \] By symmetry, it is enough to consider the summation for $N_{1}\geq N_{2}\geq N_{3} \geq N_{4}$. We put \[ \begin{split}
S_{1}&:=\{ (N_{1},\cdots ,N_{m})\in (2^{{\BBB Z}})^{m}|N_{1}\gg N_{2}\geq N_{3} \geq N_{4}\}\\
S_{2}&:=\{ (N_{1},\cdots ,N_{m})\in (2^{{\BBB Z}})^{m}|N_{1}\sim N_{2}\geq N_{3} \geq N_{4}\} \end{split} \] and \[
J_{k}:=\left\| \sum_{S_{k}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4}) \right\| _{\dot{Z}^{-1/2}}\ (k=1,2). \]
First, we prove the estimate for $J_{1}$. By Theorem~\ref{duality} and the Plancherel's theorem, we have \[ \begin{split}
J_{1}&\leq \left\{ \sum_{N_{0}}N_{0}^{-1}\left\| S(-\cdot )P_{N_{0}}\sum_{S_{1}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4})\right\|_{U^{2}}^{2}\right\}^{1/2}\\ &\lesssim \left\{\sum_{N_{0}}N_{0}^{-1}\sum_{N_{1}\sim N_{0}}
\left( \sup_{ \| u_{0} \| _{V^{2}_{S}}=1}\left|\sum_{A_{1}(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\right)^{2}\right\}^{1/2}, \end{split} \] where $A_{1}(N_{1})$ is defined in Proposition~\ref{HL_est_n}. Therefore by Proposition~\ref{HL_est_n}, we have \[ \begin{split} J_{1}&\lesssim \left\{\sum_{N_{0}}N_{0}^{-1}\sum_{N_{1}\sim N_{0}}
\left( \sup_{ \| u_{0} \| _{V^{2}_{S}}=1} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}\right)^{2}\right\}^{1/2}\\ &\lesssim
\left(\sum_{N_{1}}N_{1}^{-1} \| P_{N_{1}}u_{1} \| _{V^{2}_{\Delta}}^{2}\right)^{1/2}
\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}\\
&=\prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}. \end{split} \]
Next, we prove the estimate for $J_{2}$. By Theorem~\ref{duality} and the Plancherel's theorem, we have \[ \begin{split} J_{2}&\leq
\sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}}N_{0}^{-1}\left\|S(-\cdot )P_{N_{0}}\sum_{A_{2}(N_{2})}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4})\right\|_{U^{2}}^{2}\right)^{1/2}\\ &=\sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}\lesssim N_{1}}N_{0}^{-1}
\sup_{ \| u_{0} \| _{V^{2}_{S}}=1}\left| \sum_{A_{2}(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|^{2}\right)^{1/2}, \end{split} \] where $A_{2}(N_{2})$ is defined in Proposition~\ref{HH_est}. Therefore by {\rm Proposition~\ref{HH_est}} and Cauchy-Schwartz inequality for the dyadic sum, we have \[ \begin{split} J_{2}&\lesssim \sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}\lesssim N_{1}}N_{0}^{-1}
\left(\frac{N_{0}}{N_{1}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}\right)^{2}\right)^{1/2}\\
&\lesssim \left(\sum_{N_{1}}N_{1}^{-1} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}^{2}\right)^{1/2}
\left(\sum_{N_{2}}N_{2}^{-1} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}}^{2}\right)^{1/2} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}\\
&= \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{s_{c}}}. \end{split} \] \end{proof}
\begin{proof}[\rm{\bf{Proof of Theorem~\ref{wellposed_1}.}}] For $r>0$, we define \begin{equation}\label{Zr_norm} \dot{Z}^{s}_{r}(I)
:=\left\{u\in \dot{Z}^{s}(I)\left|\ \| u \| _{\dot{Z}^{s}(I)}\leq 2r \right.\right\} \end{equation} which is a closed subset of $\dot{Z}^{s}(I)$. Let $T>0$ and $u_{0}\in B_{r}(\dot{H}^{-1/2})$ are given. For $u\in \dot{Z}^{-1/2}_{r}([0,T))$, we have \[
\| \Phi_{T,u_{0}}(u) \| _{\dot{Z}^{-1/2}([0,T))}\leq \| u_{0} \| _{\dot{H}^{-1/2}} +C \| u \| _{\dot{Z}^{-1/2}([0,T))}^{4}\leq r(1+ 16 Cr^{3}) \] and \[ \begin{split}
\| \Phi_{T,u_{0}}(u)-\Phi_{T,u_{0}}(v) \| _{\dot{Z}^{-1/2}([0,T))}
&\leq C( \| u \| _{\dot{Z}^{-1/2}([0,T))}+ \| v \| _{\dot{Z}^{-1/2}([0,T))})^{3} \| u-v \| _{\dot{Z}^{-1/2}([0,T))}\\
&\leq 64Cr^{3} \| u-v \| _{\dot{Z}^{-1/2}([0,T))} \end{split} \] by Proposition~\ref{Duam_est} and \[
\| S(\cdot )u_{0} \| _{\dot{Z}^{-1/2}([0,T))}\leq \| \mbox{\boldmath $1$}_{[0,T)}S(\cdot )u_{0} \| _{\dot{Z}^{-1/2}}\leq \| u_{0} \| _{\dot{H}^{-1/2}}, \] where $C$ is an implicit constant in (\ref{Duam_est_1}). Therefore if we choose $r$ satisfying \[ r <(64C)^{-1/3}, \] then $\Phi_{T,u_{0}}$ is a contraction map on $\dot{Z}^{-1/2}_{r}([0,T))$. This implies the existence of the solution of (\ref{D4NLS}) and the uniqueness in the ball $\dot{Z}^{-1/2}_{r}([0,T))$. The Lipschitz continuously of the flow map is also proved by similar argument. \end{proof}
Corollary~\ref{sccat} is obtained by the same way as the proof of Corollaty\ 1.2 in \cite{Hi}.
\subsection{The large data case}
In this subsection, we prove Theorem \ref{large-wp}. The following is the key estimate.
\begin{prop}\label{Duam_est-inh} Let $d=1$. We have \begin{equation}\label{Duam_est_1-inh}
\| I_{1}(u_{1},\cdots u_{4}) \| _{\dot{Z}^{-1/2}} \lesssim \prod_{j=1}^{4} \| u_{j} \| _{Y^{-1/2}}. \end{equation} \end{prop}
\begin{proof} We decompose $u_j = v_j +w_j$ with $v_j = P_{>1}u_j \in \dot{Y}^{-1/2}$ and $w_j = P_{<1} u_j \in \dot{Y}^0$. >From Propositions \ref{HL_est_n-inh}, \ref{HH_est-inh}, and the same way as in the proof of Proposition~\ref{Duam_est}, it remains to prove that \[
\| I_{1}(w_{1},w_2,w_3,w_{4}) \| _{\dot{Z}^{-1/2}} \lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^0}. \] By Theorem \ref{duality}, the Cauchy-Schwartz inequality, the H\"older inequality and the Sobolev inequality, we have \[
\| I_{1}(w_{1},w_2,w_3,w_{4}) \| _{\dot{Z}^{-1/2}}
\lesssim \left\| \prod_{j=1}^{4}\overline{w_{j}} \right\|_{L^1([0,1];L^2)}
\lesssim \prod _{j=1}^4 \| w_j \| _{L_t^{\infty} L_x^2}
\lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{0}}, \] which completes the proof. \end{proof}
\begin{proof}[\rm{\bf{Proof of Theorem \ref{large-wp}}}] Let $u_0 \in B_{\delta ,R}(H^{-1/2})$ with $u_0=v_0+w_0$, $v_0 \in \dot{H}^{-1/2}$, $w_0 \in L^2$. A direct calculation yields \[
\| S(t) u_0 \| _{Z^{-1/2}([0,1))} \le \delta +R. \] We start with the case $R=\delta = (4C+4)^{-4}$, where $C$ is the implicit constant in \eqref{Duam_est_1-inh}. Proposition \ref{Duam_est-inh} implies that for $u \in Z^{-1/2}_r([0,1])$ with $r=1/(4C+4)$ \begin{align*}
\| \Phi_{1,u_{0}}(u) \| _{Z^{-1/2}([0,1))} & \leq \| S(t) u_0 \| _{Z^{-1/2}([0,1))} +C \| u \| _{Z^{-1/2}([0,1))}^{4} \\ & \leq 2r^4 + 16C r^4 = r^4 (16C+2) \le r \end{align*} and \begin{align*}
\| \Phi_{1,u_{0}}(u)-\Phi_{1,u_{0}}(v) \| _{Z^{-1/2}([0,1))}
&\leq C( \| u \| _{Z^{-1/2}([0,1))}+ \| v \| _{Z^{-1/2}([0,1))})^{3} \| u-v \| _{Z^{-1/2}([0,1))}\\
&\leq 64Cr^{3} \| u-v \| _{Z^{-1/2}([0,1))}
< \| u-v \| _{Z^{-1/2}([0,1))} \end{align*} if we choose $C$ large enough (namely, $r$ is small enough). Accordingly, $\Phi_{1,u_{0}}$ is a contraction map on $\dot{Z}^{-1/2}_{r}([0,1))$.
We note that
all of the above remains valid if we exchange $Z^{-1/2}([0,1))$ by the smaller space $\dot{Z}^{-1/2}([0,1))$ since $\dot{Z}^{-1/2}([0,1)) \hookrightarrow Z^{-1/2}([0,1))$ and the left hand side of \eqref{Duam_est_1-inh} is the homogeneous norm.
We now assume that $u_0 \in B_{\delta ,R}(H^{-1/2})$ for $R \ge \delta = (4C+4)^{-4}$. We define $u_{0, \lambda}(x) = \lambda ^{-1} u_0 (\lambda ^{-1}x)$. For $\lambda = \delta ^{-2} R^{2}$, we observe that $u_{0,\lambda} \in B_{\delta ,\delta}(H^{-1/2})$. We therefore find a solution $u_{\lambda} \in Z^{-1/2}([0,1))$ with $u_{\lambda}(0,x) = u_{0,\lambda}(x)$. By the scaling, we find a solution $u \in Z^{-1/2}([0, \delta ^8 R^{-8}))$.
Thanks to Propositions \ref{HL_est_n-inh} and \ref{HH_est-inh}, the uniqueness follows from the same argument as in \cite{HHK10}. \end{proof}
\section{Proof of Theorem~\ref{wellposed_2}}\label{pf_wellposed_2}\text{} In this section, we prove Theorem~\ref{wellposed_2}. We only prove for the homogeneous case since the proof for the inhomogeneous case is similar. We define the map $\Phi_{T, \varphi}^{m}$ as \[ \Phi_{T, \varphi}^{m}(u)(t):=S(t)\varphi -iI_{T}^{m}(u,\cdots, u)(t), \] where \[ I_{T}^{m}(u_{1},\cdots u_{m})(t):=\int_{0}^{t}\mbox{\boldmath $1$}_{[0,T)}(t')S(t-t')\partial \left(\prod_{j=1}^{m}u_{j}(t')\right)dt'. \] and the solution space $\dot{X}^{s}$ as \[ \dot{X}^{s}:=C({\BBB R};\dot{H}^{s})\cap L^{p_{m}}({\BBB R};\dot{W}^{s+1/(m-1),q_{m}}), \] where $p_{m}=2(m-1)$, $q_{m}=2(m-1)d/\{(m-1)d-2\}$ for $d \ge 2$ and $p_3=4$, $q_3=\infty$ for $d=1$. To prove the well-posedness of (\ref{D4NLS}) in $L^{2}({\BBB R} )$ or $H^{s_{c}}({\BBB R}^{d})$, we prove that $\Phi_{T, \varphi}$ is a contraction map on a closed subset of $\dot{X}^{s}$. The key estimate is the following: \begin{prop}\label{Duam_est_g} {\rm (i)}\ Let $d=1$ and $m=3$. For any $0<T<\infty$, we have \begin{equation}\label{Duam_est_1d}
\| I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{\dot{X^{0}}}\lesssim T^{1/2}\prod_{j=1}^{3} \| u_{j} \| _{\dot{X}^{0}}. \end{equation} {\rm (ii)}\ Let $d\ge 2$, $(m-1)d\ge 4$ and $s_c=d/2-3/(m-1)$ For any $0<T\le \infty$, we have \begin{equation}\label{Duam_est_2}
\| I_{T}^{m}(u_{1},\cdots, u_{m}) \| _{\dot{X^{s_c}}}\lesssim \prod_{j=1}^{m} \| u_{j} \| _{\dot{X}^{s_c}}. \end{equation} \end{prop}
\begin{proof} {\rm (i)}\ By Proposition~\ref{Stri_est} with $(a,b)=\left( 4, \infty \right)$, we get \[
\| I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{L^{\infty}_{t}L^{2}_{x}}
\lesssim \left\|\mbox{\boldmath $1$}_{[0,T)} |\nabla |^{-1/2}\partial \left(\prod_{j=1}^{3}u_{j}\right)\right\|_{L^{4/3}_{t}L^{1}_{x}} \] and \[
\| |\nabla |^{1/2}I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{L^{4}_{t}L^{\infty}_{x}}
\lesssim \left\| \mbox{\boldmath $1$}_{[0,T)}|\nabla |^{1/2-1/2-1/2}\partial \left(\prod_{j=1}^{3}u_{j}\right)\right\|_{L^{4/3}_{t}L^{1}_{x}}. \] Therefore, thanks to the fractional Leibniz rule (see \cite{CW91}), we have \[ \begin{split}
\| I_{T}^{3}(u_{1},\cdots, u_{3}) \| _{\dot{X^{0}}}
& \lesssim \left\| \mbox{\boldmath $1$}_{[0,T)}|\nabla |^{1/2}\prod_{j=1}^{3}u_{j}\right\|_{L^{4/3}_{t}L^{1}_{x}} \\
& \lesssim \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}\| |\nabla |^{1/2}u_{i} \| _{L^{4}_{t}L^{\infty}_{x}}\prod_{\substack{1\le j\le 3\\ j\neq i}} \| u_{j} \| _{L^{\infty}_{t}L^{2}_{x}}\\
&\lesssim T^{1/2}\prod_{j=1}^{3} \| u_{j} \| _{\dot{X}^{0}} \end{split} \] by the H\"older inequality. \\ {\rm (ii)}\ By Proposition~\ref{Stri_est} with \begin{equation}\label{admissible_ab} (a,b)=\left( \frac{2(m-1)}{m-2}, \frac{2(m-1)d}{(m-1)d-2(m-2)}\right), \end{equation} we get \[
\| |\nabla |^{s_c}I_{T}^{m}(u_{1},\cdots u_{m}) \| _{L^{\infty}_{t}L^{2}_{x}}
\lesssim \left\| |\nabla |^{s_c-2/a}\partial \left(\prod_{j=1}^{m}u_{j}\right)\right\|_{L^{a'}_{t}L^{b'}_{x}} \] and \[
\| |\nabla |^{s_c+1/(m-1)}I_{T}^{m}(u_{1},\cdots u_{m}) \| _{L^{p_m}_{t}L^{q_m}_{x}}
\lesssim \left\| |\nabla |^{s_c+1/(m-1)-2/p_m-2/a}\partial \left(\prod_{j=1}^{m}u_{j}\right)\right\|_{L^{a'}_{t}L^{b'}_{x}}. \] Therefore, thanks to the fractional Leibniz rule (see \cite{CW91}), we have
\[ \begin{split}
\| I_{T}^{m}(u_{1},\cdots u_{m}) \| _{\dot{X^{s_c}}}
& \lesssim \left\| |\nabla |^{s_c+1/(m-1)}\prod_{j=1}^{m}u_{j}\right\|_{L^{a'}_{t}L^{b'}_{x}} \\
& \lesssim \sum_{i=1}^{m} \| |\nabla |^{s_c+1/(m-1)}u_{i} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\prod_{\substack{1\le j\le m\\ j\neq i}} \| u_{j} \| _{L^{p_{m}}_{t}L^{(m-1)d}_{x}}\\
&\lesssim \sum_{i=1}^{m} \| |\nabla |^{s_c+1/(m-1)}u_{i} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\prod_{\substack{1\le j\le m\\ j\neq i}} \| |\nabla |^{s_{c}+1/(m-1)}u_{j} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\\
&\lesssim \prod_{j=1}^{m} \| u_{j} \| _{\dot{X}^{s_c}} \end{split} \] by the H\"older inequality and the Sobolev inequality, where we used the condition $(m-1)d\ge 4$ which is equivalent to $s_{c}+1/(m-1)\ge 0$. \end{proof} The well-posedness can be proved by the same way as the proof of Theorem~\ref{wellposed_1} and the scattering follows from that the Strichartz estimate because the $\dot{X}^{s_c}$ norm of the nonlinear part is bounded by the norm of the $L^{p_m}L^{q_m}$ space (see for example \cite[Section 9]{P07}).
\section{Proof of Theorem~\ref{notC3}}\label{pf_notC3}
In this section we prove the flow of (\ref{D4NLS}) is not smooth. Let $u^{(m)}[u_0]$ be the $m$-th iteration of \eqref{D4NLS} with initial data $u_0$: \[ u^{(m)}[u_0] (t,x) := -i \int _0^t e^{i(t-t') \Delta ^2} \partial P_m( S(t') u_0, S(-t') \overline{u_0}) dt' . \]
Firstly we consider the case $d=1$, $m=3$, $P_{3}(u,\overline{u})=|u|^{2}u$. For $N\gg 1$, we put \[ f_{N} = N^{-s+1/2} \mathcal{F}^{-1}[ \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]}] \] Let $u^{(3)}_{N}$ be the third iteration of (\ref{D4NLS}) for the data $f_{N}$. Namely, \[
u^{(3)}_{N}(t,x) = u^{(3)}[f_N] (t,x)= -i \int _0^t e^{i(t-t') \partial _x ^4} \partial _x \left( |e^{it' \partial _x^4} f_{N}| ^2 e^{it' \partial _x^4} f_{N} \right)(x) dt'. \]
Note that $ \| f_{N} \| _{H^s} \sim 1$. Thorem~\ref{notC3} is implied by the following propositions. \begin{prop} If $s<0$, then for any $N\gg 1$, we have \[
\| u^{(3)}_{N} \| _{L^{\infty}([0,1]; H^s)} \rightarrow \infty \] as $N\rightarrow \infty$. \end{prop} \begin{proof} A direct calculation implies \[ \widehat{u^{(3)}_{N}} (t, \xi ) = e^{it \xi ^4} \xi \int _{\xi _1-\xi _2+\xi _3 =\xi} \int _0^t e^{it'(-\xi ^4 +\xi _1^4-\xi _2^4+\xi _3^4)} d t' \widehat{f_{N}}(\xi _1) \overline{\widehat{f_{N}}}(\xi _2) \widehat{f_{N}}(\xi _3) \] and \begin{equation} \label{modulation} \begin{split} &-(\xi _1-\xi _2+\xi _3)^4+\xi _1^4-\xi _2^4+\xi _3^4\\ &= 2 (\xi _1- \xi _2)(\xi _2-\xi _3) ( 2 \xi _1^2 +\xi _2^2+2\xi _3^2 -\xi _1 \xi _2 -\xi _2\xi _3 +3 \xi _3 \xi _1) . \end{split} \end{equation} >From $\xi _j \in [N-N^{-1}, N+N^{-1}]$ for $j=1,2,3$, we get \[
|-(\xi _1-\xi _2+\xi _3)^4+\xi _1^4-\xi _2^4+\xi _3^4| \lesssim 1. \] We therefore obtain for sufficiently small $t>0$ \begin{align*}
|\widehat{u^{(3)}_{N}} (t,\xi ) |
& \gtrsim t N^{-3s+5/2} \left| \int _{\xi _1-\xi _2+\xi _3 =\xi} \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _1) \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _2) \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _3) \right| \\ & \gtrsim t N^{-3s+1/2} \mbox{\boldmath $1$} _{[N-N^{-1},N+N^{-1} ]} (\xi ) . \end{align*} Hence, \[
\| u^{(3)}_{N} \| _{L^{\infty}([0,1]; H^s)} \gtrsim N^{-2s}. \] This lower bound goes to infinity as $N$ tends to infinity if $s<0$, which concludes the proof. \end{proof}
Secondly, we show that absence of a smooth flow map for $d \ge 1$ and $m \ge 2$. Putting \[ g_N := N^{-s-d/2} \mathcal{F}^{-1}[ \mbox{\boldmath $1$} _{[-N,N]^d}] , \]
we set $u_N^{(m)} := u^{(m)} [g_N]$. Note that $\| g_N \| _{H^s} \sim 1$. As above, we show the following.
\begin{prop}
If $s<s_c := d/2-3/(m-1)$ and $\partial =|\nabla |$ or $\frac{\partial}{\partial x_k}$ for some $1\le k\le d$, then for any $N \gg 1$, we have \[
\| u_N^{(m)} \| _{L^{\infty}([0,1];H^s)} \rightarrow \infty \] as $N \rightarrow \infty$. \end{prop}
\begin{proof}
We only prove for the case $\partial =|\nabla |$ since the proof for the case $\frac{\partial}{\partial x_k}$ is same. Let \[ \mathcal{A} := \{ (\pm _1, \dots , \pm _m) : \pm _j \in \{ +, - \} \, (j=1, \dots ,m) \} . \] Since $\mathcal{A}$ consists of $2^m$ elements, we write \[ \mathcal{A} = \bigcup _{\alpha}^{2^m} \{ \pm ^{(\alpha )} \} , \] where $\pm ^{( \alpha )}$ is a $m$-ple of signs $+$ and $-$. We denote by $\pm _{j}^{(\alpha )}$ the $j$-th component of $\pm ^{(\alpha )}$. A simple calculation shows that \[
\widehat{u_N^{(m)}} (t,\xi) = |\xi | \sum _{\alpha =0}^{2^m} e^{it |\xi |^4} \int _{\xi = \sum _{j=1}^m \pm _j^{(\alpha)} \xi _j} \int _0^t e^{it' (-|\xi|^4 + \sum _{j=1}^m \pm _j^{(\alpha )} |\xi _j|^4)} dt' \prod _{j=1}^m \widehat{g_N} (\xi _j) . \] From \[
\left| -|\xi|^4 + \sum _{j=1}^m \pm _j^{(\alpha )} |\xi _j|^4 \right| \lesssim N^4 \]
for $|\xi _j| \le N$ ($j= 1, \dots , m$), we have \[
|\widehat{u_N^{(m)}} (t, \xi )|
\gtrsim |\xi | N^{-4} N^{-m(s+d/2)} N^{(m-1)d} \mbox{\boldmath $1$} _{[-N.N]^d} (\xi ) \gtrsim N^{-3} N^{-m(s+d/2)} N^{(m-1)d} \mbox{\boldmath $1$} _{[N/2.N]^d} (\xi ) \] provided that $t \sim N^{-4}$. Accordingly, we obtain \[
\| u_N^{(m)} (N^{-4}) \| _{H^s} \gtrsim N^{-3} N^{-m(s+d/2)} N^{(m-1)d} N^{s+d/2} \sim N^{-(m-1)s+(m-1)d/2-3} , \]
which conclude that $\limsup _{t \rightarrow 0} \| u^{(m)}_N(t) \| _{H^s} = \infty$ if $s<s_c$. \end{proof}
\section*{Acknowledgment}
The work of the second author was partially supported by JSPS KAKENHI Grant number 26887017.
\end{document}
|
arXiv
|
{
"id": "1505.06496.tex",
"language_detection_score": 0.5194845199584961,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\fontsize{.5cm}{.5cm}\selectfont\sf
\title[Schubert Draft]{Double Quantum Schubert Cells and\\ Quantum Mutations}
\date{\today} \author{Hans P. Jakobsen} \address{
Department of Mathematical Sciences\\ University of Copenhagen\\Universitetsparken 5\\
DK-2100, Copenhagen,
Denmark} \email{[email protected]} \begin{abstract}Let ${\mathfrak p}\subset {\mathfrak g}$ be a parabolic subalgebra of s simple finite dimensional Lie algebra over ${\mathbb C}$. To each pair $w^{\mathfrak a}\leq w^{\mathfrak c}$ of minimal left coset representatives in the quotient space $W_p\backslash W$ we construct explicitly a quantum seed ${\mathcal Q}_q({\mathfrak a},{\mathfrak c})$. We define Schubert creation and annihilation mutations and show that our seeds are related by such mutations. We also introduce more elaborate seeds to accommodate our mutations. The quantized Schubert Cell decomposition of the quantized generalized flag manifold can be viewed as the result of such mutations having their origins in the pair $({\mathfrak a},{\mathfrak c})= ({\mathfrak e},{\mathfrak p})$, where the empty string ${\mathfrak e}$ corresponds to the neutral element. This makes it possible to give simple proofs by induction. We exemplify this in three directions: Prime ideals, upper cluster algebras, and the diagonal of a quantized minor. \end{abstract} \subjclass[2010]{MSC 17B37 (primary),\ MSC 13F60, \ MSC 16T20 (primary), \ MSC 17A45 (secondary), \and MSC 20G42 (secondary)} \maketitle
\section{Introduction}
We study a class of quadratic algebras connected to quantum parabolics and double quantum Schubert cells. We begin by considering a finite-dimensional simple Lie algebra ${\mathfrak g}$ over ${\mathbb C}$ and a parabolic sub-algebra ${\mathfrak p}\subset{\mathfrak g}$. Then we consider a fixed Levi decomposition \begin{equation} {\mathfrak p}={\mathfrak l}+{\mathfrak u}, \end{equation} with ${\mathfrak u}\neq 0$ and ${\mathfrak l}$ the Levi subalgebra.
The main references for this study are the articles by A. Berenstein and A. Zelevinski \cite{bz} and by C. Geiss, B. Leclerc, J. Schr\"oer \cite{leclerc}. We also refer to \cite{jak-cen} for further background.
Let, as usual, $W$ denote the Weyl group. Let $W_p=\{w\in W\mid w(\triangle^-)\cap \triangle^+\subseteq \triangle^+({\mathfrak l}) \}$ and $W^p$, by some called the Hasse Diagram of $G\backslash P$, denote the usual set of minimal length coset representatives of $W_p\backslash W$. Our primary input is a pair of Weyl group elements $w^{\mathfrak a},w^{\mathfrak c}\in W^p$ such that $w^{\mathfrak a}\leq w^{\mathfrak c}$. We will often, as here, label our elements $w$ by ``words'' ${\mathfrak a}$; $w=w^{\mathfrak a}$, in a fashion similar, though not completely identical, to that of \cite{bz}. Details follow in later sections, but we do mention here that the element $e$ in $W$ is labeled by ${\mathfrak e}$ corresponding to the empty string; $e=\omega^{\mathfrak e}$ while the longest elements in $W^p$ is labeled by ${\mathfrak p}$.
To each pair $w^{\mathfrak a},w^{\mathfrak c}$ as above we construct explicitly a quantum seed \begin{equation}{\mathcal Q}_q({\mathfrak a},{\mathfrak c}):=({\mathcal C}_q({\mathfrak a},{\mathfrak c}), {\mathcal L}_q({\mathfrak a},{\mathfrak c}), {\mathcal B}_q({\mathfrak a},{\mathfrak c})).\end{equation}
The cluster ${\mathcal C}_q({\mathfrak a},{\mathfrak c})$ generates a quadratic algebra ${\mathcal A}_q({\mathfrak a},{\mathfrak c})$ in the space of functions on ${\mathcal U}_q({\mathfrak n})$.
After that we define transitions \begin{equation}{\mathcal Q}_q({\mathfrak a},{\mathfrak c})\rightarrow {\mathcal Q}_q({\mathfrak a}_1,{\mathfrak c}_1). \end{equation} We call our transitions quantum Schur (creation/annihilation) mutations and prove that they are indeed just (composites of) quantum mutations in the sense of Berenstein and Zelevinski. These actually have to be augmented by what we call creation/annihilation mutations which are necessary since we have to work inside a larger ambient space. To keep the full generality, we may also have to restrict our seeds to sub-seeds.
The natural scene turns out to be \begin{equation}{\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}):=({\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal L}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})),\end{equation} which analogously is determined by a triple $w^{\mathfrak a},w^{\mathfrak b},w^{\mathfrak c}\in W^p$ such that $w^{\mathfrak a}\leq w^{\mathfrak b}\leq w^{\mathfrak c}$.
Later we extend our construction to even \begin{equation} {\mathcal Q}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\textrm{ and }{\mathcal A}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n),
\end{equation}
though we do not use it here for anything specific.
It is a major point of this study to establish how our seeds and algebras can be constructed, inside an ambient space, starting from a single variable (indeed: none). In this sense the quantized generalized flag manifold of $(G/P)_q$ as built from quantized Schubert Cells can be built from a single cell. Furthermore, we prove that we can pass between our seeds by Schubert creation and annihilation mutations inside a larger ambient space.
This sets the stage for (simple) inductive arguments which is a major point of this article, and is what we will pursue here.
We first prove by induction that the two-sided sided ideal $I({\det}_{s}^{{\mathfrak a},{\mathfrak c}})$ in ${\mathcal A}_q({\mathfrak a},{\mathfrak c})$ generated by the quantized minor ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}$ is prime.
Then we prove that each upper cluster algebra ${\mathbb U}({\mathfrak a},{\mathfrak c})$ equals its quadratic algebra ${\mathcal A}_q({\mathfrak a},{\mathfrak c})$.
There is a sizable overlap between these result and results previously obtained by K. Goodearl M. Yakimov (\cite{good},\cite{good1}).
We further use our method to study the diagonal of a quantum minor.
The idea of induction in this context was introduced in \cite{jz} and applications were studied in the case of a specific type of parabolic related to type $A_n$. Further ideas relating to explicit constructions of compatible pairs in special cases were studied in \cite{jp}.
\section{A little about quantum groups and cluster algebras}
\subsection{2.1 Quantum Groups}
We consider quantized enveloping algebras $U={\mathcal U}_q({\mathfrak g})$ in the standard notation given either eg. by Jantzen (\cite{jan}) or by Berenstein and Zelevinsky (\cite{bz}), though their assumptions do not coincide completely. To be completely on the safe side, we state our assumptions and notation, where it may differ: Our algebra is a Hopf algebra defined in the usual fashion from a semi-simple finite-dimensional complex Lie algebra ${\mathfrak g}$. They are algebras over ${\mathbb Q}(q)$. $\Phi$ denotes a given set of roots and throughout, $\Pi=\{\alpha_1,\alpha_2,\dots,\alpha_R\}$ a fixed choice of simple roots. Our generators are then given as $$\{E_\alpha,F_\alpha,K^\alpha\}_{\alpha\in\Pi},$$ but we will allow elements of the form $K^\eta$ for any integer weight. $W$ denotes the Weyl group defined by $\Phi$.
Finally we let $\{\Lambda_\alpha\mid\alpha\in\Pi\}$ denote the set of fundamental weights. We assume throughout that the diagonalizing elements $d_\alpha$ are determined by \begin{equation}
\forall \alpha\in\Pi:(\Lambda_\alpha,\alpha)=d_\alpha. \end{equation}
\begin{Lem}[(2.27) in \cite{fz}]\label{3.1} Let $\alpha_i\in \Phi$. Then $$(\sigma_i+1)(\Lambda_i)+\sum_{j\neq i}a_{ji}(\Lambda_j)=0.$$ \end{Lem}
\subsection{Quantum Cluster Algebras}
We take over without further ado the terminology and constructions of (\cite{bz}). Results from \cite{leclerc} are also put to good use.
\begin{Def} We say that two elements $A,B$ in some algebra over ${\mathbb C}$ $q$-commute if, for some $r\in{\mathbb R}$: \begin{equation}AB=q^rBA. \end{equation} \end{Def}
To distinguish between the original mutations and the more elaborate ones we need here, and to honor the founding fathers A. Berenstein, S. Fomin, and A. Zelevinski, we use the following terminology: \begin{Def}A quantum mutation as in \cite{bz} is called a BFZ-mutation. \end{Def}
\subsection{A simple observation}
If $\underline{a}=(a_1,a_2,\dots,a_{\mathfrak m})$ and $\underline{f}=(f_1,f_2,\dots,f_{\mathfrak m})$ are vectors then\begin{Lem}{(\cite{jz})}\label{2.22}\begin{equation} {\mathcal L}_q(\underline{a})^T=(\underline{f} )^T\Leftrightarrow\forall i:X_iX^{\underline{a}}=q^{f_i}X^{\underline{a}}X_i.\end{equation} In particular, if there exists a $j$ such that $\forall i: f_i=-\delta_{i,j}$ then the column vector $\underline{a}$ can be the $j$th column in the matrix ${\mathcal B}$ of a compatible pair. \end{Lem} \noindent However simple this actually is, it will have a great importance later on.
\section{On Parabolics}
The origin of the following lies in A. Borel \cite{borel}, and B. Kostant \cite{kos}. Other main contributors are \cite{bgg} and \cite{stein}. See also \cite{cap}. We have also found (\cite{sager}) useful.
\begin{Def} Let $w\in W$. Set $$\Phi_\omega=\{\alpha\in \Delta^+\mid w^{-1}\alpha\in \Delta^-\}=w( \Delta^-)\cap \Delta^+.$$\end{Def}
We have that $\ell(w)=\ell(w^{-1})=\vert\Phi_\omega\vert$.
We set $\Phi_\omega=\Delta^+(w)$.
From now on, we work with a fixed parabolic $\mathfrak p$ with a Levi decomposition \begin{equation} {\mathfrak p}={\mathfrak l}+{\mathfrak u}, \end{equation} where ${\mathfrak l}$ is the Levi subalgebra, and where we assume ${\mathfrak u}\neq 0$,
Let
\begin{Def} \begin{eqnarray*} W_p&=&\{w\in W\mid \Phi_\omega\subseteq \Delta^+({\mathfrak l})\},\\ W^p&=&\{w\in W\mid \Phi_\omega\subseteq \Delta^+({\mathfrak u})\}. \end{eqnarray*} $W^p$ is a set of distinguished representatives of the right coset space $W_p\backslash W$. \end{Def}
It is well known (see eg (\cite{sager})) that any $w\in W$ can be written uniquely as $w=w_pw^p$ with $w_p\in W_p$ and $w^p\in W^p$.
One defines, for each $w$ in the Weyl Group $W$, the Schubert cell $X_w$. This is a cell in ${\mathbb P}(V)$, the projective space over a specific finite-dimensional representation of ${\mathfrak g}$. The closure, ${X_w}$, is called a Schubert variety. The main classical theorems are
\begin{Thm}[Kostant,\cite{kos}]$$G/P=\sqcup_{w\in W^p}X_w.$$\end{Thm}
{\begin{Thm}[\cite{stein}]\label{stein} Let $w,w'\in W^p$. Then $$X_{w'}\subseteq {{X_{w}}}$$ if and only $w'\leq w$ in the usual Bruhat ordering. \end{Thm}
If $\omega^{\mathfrak r}=\omega_m\tilde\omega$ and $\omega_{m}=\omega_n\hat\omega$ with $\omega_n,\omega_m\in W^P$ and all Weyl group elements reduced, we say that $\omega_n<_L\omega_m$ if $\hat\omega\neq e$. This is the weak left Bruhat order.
\section{The quadratic algebras}\label{sec4}
Let $\omega=s_{\alpha_1}s_{\alpha_2}\dots s_{\alpha_t}$ be an element of the Weyl group written in reduced form. Following Lusztig (\cite{luz}), we construct roots $\gamma_i=\omega_{i-1}(\alpha_i)$ and elements $Z_{\gamma_i}\in {\mathcal U}_q({\mathfrak n}_\omega)$.
The following result is well known, but notice a change $q\to q^{-1}$ in relation to (\cite{jak-cen}).
\begin{Thm}[\cite{lev},\cite{lev0}] \label{4.1}Suppose that $1\leq i<j\leq t$. Then $$Z_{i}Z_{j}=q^{-( \gamma_i,\gamma_j)}Z_{j}Z_{i} + {\mathcal R}_{ij},$$ where ${\mathcal R}_{ij}$ is of lower order in the sense that it involves only elements $Z_k$ with $i< k< j$. Furthermore, the elements $$Z_t^{a_t}\dots Z_2^{a_2}Z_1^{a_1}$$ with $a_1,a_2,\dots,a_t\in{\mathbb N}_0$ form a basis of ${\mathcal U}_q({\mathfrak n}_\omega)$. \end{Thm} Our statement follows \cite{jan},\cite{jan2}. Other authors, eg. \cite{lev}, \cite{leclerc} have used the other Lusztig braid operators. The result is just a difference between $q$ and $q^{-1}$. Proofs of this theorem which are more accessible are available (\cite{cp},\cite{jan2}).
It is known that this algebra is isomorphic to the algebra of functions on ${\mathcal U}_q({\mathfrak n}_\omega)$ satisfying the usual finiteness condition. It is analogously equivalent to the algebra of functions on ${\mathcal U}^-_q({\mathfrak n}_\omega)$ satisfying a similar finiteness condition. See eg (\cite{leclerc}) and (\cite{jan}).
\section{basic structure}\label{sec5}
Let $\omega^{\mathfrak p}$ be the maximal element in $W^p$. It is the one which maps all roots in $\Delta^+({\mathfrak u})$ to $\Delta^-$. (Indeed: To $\Delta^-({\mathfrak u})$.) Let $w_0$ be the longest element in $W$ and $w_L$ the longest in the Weyl group of ${\mathfrak l}$, Then \begin{equation}w^{\mathfrak p}w_L=w_0.\end{equation}
Let $\omega^{\mathfrak r}=\sigma_{i_1}\sigma_{i_2}\cdots\sigma_{i_r}\in W^p$ be written in a fixed reduced form. Then $\ell(\omega^{\mathfrak r})=r$. We assume here that $r\geq 1$. We set $e=\omega^{\mathfrak e}$ and $\ell(\omega^{\mathfrak e})=0$ where ${\mathfrak e}$ denotes the empty set, construed as the empty sequence. We also let ${\mathfrak r}$ denote the sequence $i_1,i_2,\dots,i_r$ if ${\mathfrak r}\neq {\mathfrak e}$. If a sequence ${\mathfrak s}$ corresponds to an analogous element $\omega^{\mathfrak s}\in W^p$ we define \begin{equation} {\mathfrak s}\leq {\mathfrak r}\Leftrightarrow \omega^{\mathfrak s}\leq_L \omega^{\mathfrak r}. \end{equation}
Set \begin{equation}\Delta^+(\omega^{\mathfrak r})=\{ \beta_{i_1},\dots,\beta_{i_r}\}.\end{equation}
\begin{Def} Let ${\mathbf b}$ denote the map $\Pi\to\{1,2,\dots,R\}$ defined by ${\mathbf b}(\alpha_i)=i$. Let $\overline\pi_{\mathfrak r}:\{1,2,\dots, r\}\to\Pi$ be given by \begin{equation}\overline\pi_{\mathfrak r}(j)=\alpha_{i_j}.\end{equation} If $\overline\pi_{\mathfrak r}(j)=\alpha$ we say that $\alpha$ (or $\sigma_\alpha$) occurs at position $j$ in $w^{\mathfrak r}$, and we say that $\overline\pi_{\mathfrak r}^{-1}(\alpha)$ are the positions at which $\alpha$ occurs in $w$. Set \begin{equation} {\pi}_{\mathfrak r}={\mathbf b}\circ\overline\pi_{\mathfrak r}. \end{equation} \end{Def}
$\pi_{\mathfrak e}$ is construed as a map whose image is the empty set.
Recall from (\cite{jak-cen}):
\begin{Def}Let $\omega^{\mathfrak r}\in W^p$ be given and suppose $s\in Im(\pi_{\mathfrak r})$. Then $s=\pi_{\mathfrak r}(n)$ for some $n$ and we set $\omega_n:=\sigma_{i_1}\sigma_{i_2}\cdots\sigma_{i_n}$. Suppose $\omega_n=\omega_1 \sigma_{i_n}\omega_2\dots\omega_t \sigma_{i_n}$ and $\omega_i\in W\setminus\{e\}$ for $i>1$. Further assume that each $\omega_i$ is reduced and does not contain any $\sigma_{i_n}$. We denote this simply as $n\leftrightarrow (s,t)$. We further write $\beta_{n}\leftrightarrow \beta_{s,t}$ and \begin{equation}\omega_n\leftrightarrow \omega_{s,t}\end{equation} if $n,s,t$ are connected as above. It is convenient to set $\omega_{s,0}=e$ for all $s\in\{1,2,\dots, R\}$.
For a fixed $s\in\{1,2,\dots,R\}$ we let $s_{\mathfrak r}$ denote the maximal such $t$. If there is no such decomposition we set $t=0$. So, in particular, $s_{\mathfrak e}=0$, and $s_{\mathfrak r}$ is the number of times $\sigma_s$ occurs in $\omega^{\mathfrak r}$. Finally we set (cf. (\cite{jak-cen})) \begin{equation} {\mathbb U}({\mathfrak r})=\{(s,t)\in {\mathbb N}\times {\mathbb N}_0\mid 1\leq s\leq R\textrm{ and }0\leq t\leq s_{\mathfrak r}\}. \end{equation} \end{Def}
Notice that if $(s,t)\in{\mathbb U}({\mathfrak r})$ then we may construct a subset ${\mathbb U}({\mathbf s}, {\mathbf t})$ of ${\mathbb U}$ by the above recipe, replacing $\omega^{\mathfrak r}$ by $\omega_{s,t}$. In this subset $t$ is maximal. Likewise, if ${\mathfrak s}\leq {\mathfrak r}$ we have of course ${\mathbb U}({\mathfrak s})\subseteq {\mathbb U}({\mathfrak r})$ and may set ${\mathbb U}({\mathfrak r}\setminus {\mathfrak s})={\mathbb U}({\mathfrak r})\setminus {\mathbb U}({\mathfrak s})$.
\section{Key structures and background results}
\subsection{Quantized minors}
Following a construction of classical minors by S. Fomin and A. Zelevinsky \cite{fz}, the last mentioned and A. Berenstein have introduced a family of quantized minors $\Delta_{u\cdot\lambda,v\cdot\lambda}$ in \cite{bz}. These are elements of the quantized coordinate ring ${\mathcal O}_q(G)$. The results by K. Brown and K. Goodearl (\cite{brown}) were important in this process.
{The element $\Delta_{u\cdot\lambda,v\cdot\lambda}$ is determined by $u,v\in W$ and a positive weight $\lambda$. We will always assume that $u\leq_L v$.
\subsection{Identifications} There is a well-known pairing between ${\mathcal U}^{\leq}$ and ${\mathcal U}^{\geq}$ (\cite{jan}) and there is a unique bilinear form on ${\mathcal U}_q({\mathfrak n})$. With this we can identify $({\mathcal U}^{\geq})^*$ with ${\mathcal U}^{\geq}$. One can even define a product in $({\mathcal U}_q({\mathfrak n}))^*$ that makes it isomorphic to ${\mathcal U}_q({\mathfrak n})$ \cite{leclerc}. We can in this way identify the elements $\Delta_{u\cdot\lambda,v\cdot\lambda}$ with elements of ${\mathcal U}^{\geq}$.
\subsection{Key results from \cite{bz} and \cite{leclerc}}
The quantized minors are by definitions functions on ${\mathcal U}_q({\mathfrak g})$ satisfying certain finiteness conditions. What is needed first are certain commutation relations that they satisfy. Besides this, they can be restricted to being functions on ${\mathcal U}_q({\mathfrak b})$ and even on ${\mathcal U}_q({\mathfrak n})$. Our main references here are (\cite{bz}) and (\cite{leclerc}); the details of the following can be found in the latter.
\begin{Lem}[\cite{bz}]The element $\triangle_{u\lambda,v\lambda}$ indeed depends only on the weights $u\lambda,v\lambdaλ$, not on the choices of $u, v$ and their reduced words. \end{Lem}
\begin{Thm}[A version of Theorem~10.2 in \cite{bz}] \label{10.2}For any $\lambda,\mu\in P^+$, and $s, s', t, t' \in W$ such that $$\ell(s's) = \ell(s') + \ell(s), \ell(t't) = \ell(t') + \ell(t) ,$$the following holds:
$$ \triangle_{s's\lambda,t'\lambda} · \triangle_{s'\mu,t't\mu} =q^{(s\lambda |
\mu) - (\lambda | t\mu)}\triangle_{s'\mu,t't\mu} · \triangle_{s's\lambda,t'\lambda}.$$ \end{Thm}
It is very important for the following that the conditions essentially are on the Weyl group elements. The requirement on $\lambda,\mu$ is furthermore independent of those.
An equally important fact we need is the following $q$-analogue of \cite[Theorem~1.17]{fz}:
\begin{Thm}[\cite{leclerc}, Proposition~3.2]\label{3.2} Suppose that for $u,v\in W$ and $i\in I$ we have $l(us_i)=l(u)+1$ and $l(vs_i)=l(v)+1$. Then
\begin{equation}\label{eq3.2} \Delta_{us_i(\Lambda_i),vs_i(\Lambda_i)}\,\Delta_{u(\Lambda_i),v(\Lambda_i)}= ({q^{-d_i}})\Delta_{us_i(\Lambda_i),v(\Lambda_i)}\,\Delta_{ u(\Lambda_i), vs_i(\Lambda_i)}+ \prod_{j\neq i}\Delta_{u(\Lambda_j),v(\Lambda_j)}^{-a_{ji}} \end{equation} holds in ${\mathcal O}_q(\frak g)$. \end{Thm}
(That a factor $q^{-d_i}$ must be inserted for the general case is clear.)
One considers in \cite{leclerc}, and transformed to our terminology, modified elements \begin{equation}\label{59}D_{\xi,\eta}=\triangle_{\xi,\eta}K^{-\eta}.\end{equation} We suppress here the restriction map $\rho$, and our $K^{-\eta}$ is denoted as $\triangle^\star_{\eta,\eta}$ in \cite{leclerc}. The crucial property is that \begin{equation} K^{-\eta}\triangle_{\xi_1,\eta_1}=q^{-(\eta,\xi_1-\eta_1)}\triangle_{\xi_1, \eta_1}K^{-\eta}. \end{equation}
The family $D_{\xi,\eta}$ satisfies equations analogous to those in Theorem~\ref{10.2} subject to the same restrictions on the relations between the weights.
The following result is important:
\begin{Prop}[\cite{leclerc}] Up to a power of $q$, the following holds: \begin{equation}Z_{c,d}=D_{\omega^{\mathfrak r}_{c,d-1}(\Lambda_c),\omega^{\mathfrak r}_{c,d}(\Lambda_c)}. \end{equation} \end{Prop}
We need a small modification of the elements $D_{\xi,\eta}$ of \cite{leclerc}:
\begin{Def} \begin{equation}E_{\xi,\eta}:=q^{\frac14(\xi-\eta,\xi-\eta)+\frac12(\rho, \xi-\eta)}D_{\xi,\eta}.\end{equation} \end{Def}
It is proved in (\cite{ki}), (\cite{re})) that $E_{\xi,\eta}$ is invariant under the dual bar anti-homomorphism augmented by $q\to q^{-1}$.
Notice that this change does not affect commutators: \begin{equation}
D_1D_2=q^\alpha D_2D_1\Leftrightarrow E_1E_2=q^\alpha E_2E_1 \end{equation} if $E_i=q^{x_i}D_i$ for $i=1,2$.
\begin{Def}We say that \begin{equation}\label{less} E_{\xi,\eta}<E_{\xi_1,\eta_1} \end{equation} if $\xi=s's\lambda$, $\eta=t'\lambda$, $\xi_1=s'\mu$ and $\eta_1=t't\mu$ and the conditions of Theorem~\ref{10.2} are satisfied. \end{Def}
The crucial equation is \begin{Cor} \begin{equation} E_{\xi,\eta}<E_{\xi_1,\eta_1}\Rightarrow E_{\xi,\eta}E_{\xi_1,\eta_1}=q^{(\xi-\eta,\xi_1+\eta_1)}E_{\xi_1,\eta_1}E_{\xi, \eta}. \end{equation}\end{Cor}
\subsection{Connecting with the toric frames}
\begin{Def}Suppose that $\triangle_i$, $i=1,\dots,r$ is a family of mutually $q$-commuting elements. Let $n_1,\dots,n_r\in{\mathbb Z}$. We then set \begin{equation}N(\prod_{i=1}^r \triangle_i^{n_i})=q^m\prod_{i=1}^r\triangle_i^{n_i}, \end{equation}where $q^m$ is determined by the requirement that \begin{equation}q^{-m}\triangle_r^{n_r}\dots \triangle_2^{n_2}\triangle_1^{n_1}= q^m \triangle_1^{n_1}\triangle_2^{n_2}\dots \triangle_r^{n_r}. \end{equation} \end{Def} It is easy to see that \begin{equation} \forall \mu\in S_r: N(\prod_{i=1}^r \triangle_{\mu(i)}^{n_{\mu(i)}})=N(\prod_{i=1}^r. \triangle_i^{n_i}) \end{equation}
It is known through \cite{bz} that eg. the quantum minors are independent of the choices of the reduced form of $\omega^{\mathfrak r}_{\mathfrak p}$. Naturally, this carries over to $\omega^{\mathfrak r}$. The quadratic algebras we have encountered are independent of actual choices. In the coming definition we wish to maintain precisely the right amount of independence.
Let us now formulate Theorem~\ref{3.2} in our language while using the language and notation of toric frames from \cite{bz}. In the following Theorem we first state a formula which uses our terminology, and then we reformulate it in the last two lines in terms of toric frames $M$. These frames are defined by a cluster made up by certain elements of the form $E_{\xi,\eta}$ to be made more precise later.
\begin{Thm}\label{toric} \begin{eqnarray}
E_{us_i\Lambda_i,vs_i\Lambda_i}&=&N\left( E_{us_i\Lambda_i,v\Lambda_i}
E_{u\Lambda_i,vs_i\Lambda_i} E_{u\Lambda_i,v\Lambda_i}^{-1}\right) \\\nonumber&+&N\left((\prod_{j\neq i} E_{u(\Lambda_j),v(\Lambda_j)}^{-a_{ji}}) E_{u(\Lambda_i),v(\Lambda_i)}^{-1}\right)\\ &=&M(E_{us_i(\Lambda_i),v(\Lambda_i)}+E_{u(\Lambda_i), vs_i(\Lambda_i)}-E_{u(\Lambda_i),v(\Lambda_i)})\\&+& M(\sum_{j\neq i}-a_{ji}E_{u(\Lambda_j),v(\Lambda_j)}-E_{u(\Lambda_i),v(\Lambda_i)}). \end{eqnarray} \end{Thm}
\noindent{\em Proof of Theorem~\ref{toric}:} We first state a lemma whose proof is omitted as it is straightforward.
\begin{Lem} Let $\Delta_{\xi_k}$ be a family of $q$-commuting elements of weights $\xi_k$, $k=1,\dots,r$ in the sense that for any weight $b$:\begin{equation} \forall k=1,\dots,r: K^b\Delta_{\xi_k}=q^{(b,\xi_k)}\Delta_{\xi_k}K^b. \end{equation} Let $\alpha$ be defined by \begin{equation} \Delta_{\xi_r}\cdots\Delta_{\xi_1}=q^{-2\alpha} \Delta_{\xi_1}\cdots\Delta_{\xi_r} \end{equation}Furthermore, let $b_1,\dots,b_r$ be integer weights. Then \begin{eqnarray} &(\Delta_{\xi_1}\Delta_{\xi_2}\cdots\Delta_{\xi_r}) K^{b_1}K^{b_2}\cdots K^{b_r}=\\\nonumber &q^{\sum_{k<\ell}(b_k,\xi_\ell)}(\Delta_{\xi_1}K^{b_1})(\Delta_{\xi_2}K^{ b_2})\cdots(\Delta_{\xi_r}K^{b_r}),\textrm{ and,}\\\nonumber &(\Delta_{\xi_r}K^{b_r})\cdots(\Delta_{\xi_1}K^{b_1})=\\\nonumber &q^{-2\alpha} q^{(\sum_{k<\ell}-\sum_{\ell<k})(b_\ell,\xi_k)}(\Delta_{\xi_1}K^{b_1} )\cdots(\Delta_{\xi_r}K^{b_r}), \textrm{ so that}\\\nonumber &(\Delta_{\xi_1}K^{b_1})\cdots(\Delta_{\xi_r}K^{b_r})=\\\nonumber &q^{\alpha} q^{-\frac12(\sum_{k<\ell}-\sum_{\ell<k})(b_\ell,\xi_k)}N\left( (\Delta_{\xi_1}K^{b_1})\cdots(\Delta_{\xi_r}K^{b_r})\right). \end{eqnarray} Finally, \begin{eqnarray} &q^{-\alpha}(\Delta_{\xi_1}\Delta_{\xi_2}\cdots\Delta_{\xi_r}) K^{b_1}K^{b_2} \cdots K^{b_r}=\\\nonumber&q^{-\frac12(\sum_{\ell\neq k})(b_\ell,\xi_k)}N\left( (\Delta_{\xi_1}K^{b_1})\cdots(\Delta_{\xi_r}K^{b_r})\right). \end{eqnarray} \end{Lem}
We apply this lemma first to the case where the elements $\xi_k$ are taken from the set $\{-\textrm{sign}(a_{ki})(u\Lambda_k-v\Lambda_k)\mid a_{ki}\neq0 \}$ and where each element corresponding to an $a_{ki}<0$ is taken $-a_{ki}$ times. Then $r=\sum_{k\neq i}\vert a_{ji}\vert+1$. The terms considered actually commute so that here, $\alpha=0$. The weights $b_k$ are chosen in the same fashion, but here $b_k=\textrm{sign}(a_{ki})(v\Lambda_k)$. We have that \begin{equation}\sum_{\ell\neq k}(b_\ell,\xi_k)=\left(\sum_{\ell}b_\ell, \sum_{k}\xi_k\right)-\sum_{k}(b_k,\xi_k). \end{equation}
It follows from (\ref{3.1}) that $\sum_{\ell}b_\ell=-vs_i\lambda_i$ and $\sum_{k}\xi_k=(us_i\Lambda_i-vs_i\lambda_i)$. Now observe that for all $k$: $-(v\Lambda_k,(u-v)\Lambda_k)=\frac12(\xi_k,\xi_k)$. Let $\xi_0=(us_i-vs_i)\Lambda_i$. The individual summands in $\sum_k(b_k,\xi_k)$ can be treated analogously. Keeping track of the multiplicities and signs, it follows that \begin{eqnarray} q^{-\alpha}(\Delta_{\xi_1}\Delta_{\xi_2}\dots\Delta_{\xi_r})K^{b_1}K^{b_2}\dots K^{b_r}=\\\nonumber q^{-\frac14(\xi_0,\xi_0)+\frac14\sum_k\varepsilon_k(\xi_k,\xi_k)}N\left( (\Delta_{\xi_1}K^{b_1})\dots(\Delta_{\xi_r}K^{b_r})\right). \end{eqnarray}
Let us turn to the term \begin{equation} q^{-d_i}\Delta_{us_i\Lambda_i,v\Lambda_i} \Delta_{u\Lambda_i,vs_i\Lambda_i}\Delta_{u\Lambda_i,v\Lambda_i}^{-1}K^{ -vs_i\Lambda_i}. \end{equation} We can of course set $K^{-vs_i\Lambda_i}=K^{-v\Lambda_i}K^{-vs_i\Lambda_i}K^{v\Lambda_i}$. Furthermore, it is known (and easy to see) that \begin{eqnarray} &\Delta_{u\Lambda_i,v\Lambda_i}^{-1}\Delta_{u\Lambda_i,vs_i\Lambda_i} \Delta_{us_i\Lambda_i,v\Lambda_i}=\\\nonumber&q^{-2d_i}\Delta_{us_i\Lambda_i, v\Lambda_i} \Delta_{u\Lambda_i,vs_i\Lambda_i}\Delta_{u\Lambda_i,v\Lambda_i}^{-1}, \end{eqnarray} so that $\alpha=d_i$ here. We easily get again that $\sum_{\ell}b_\ell=-vs_i\lambda_i$ and $\sum_{k}\xi_k=(us_i\Lambda_i-vs_i\lambda_i)$.
Let us introduce elements $\tilde E_{\xi,\eta}=q^{\frac14(\xi-\eta,\xi-\eta)}\Delta_{\xi,\eta}K^{-\eta}$. It then follows that (c.f. Theorem~\ref{3.2})
\begin{eqnarray} \tilde E_{us_i\Lambda_i,vs_i\lambda_i}&=&N\left(\tilde E_{us_i\Lambda_i,v\Lambda_i} \tilde E_{u\Lambda_i,vs_i\Lambda_i}\tilde E_{u\Lambda_i,v\Lambda_i}^{-1}\right) \\\nonumber&+&N\left((\prod_{j\neq i}\tilde E_{u(\Lambda_j),v(\Lambda_j)}^{-a_{ji}})\tilde E_{u(\Lambda_i),v(\Lambda_i)}^{-1}\right). \end{eqnarray}
The elements $E_{\xi,\eta}$ differ from the elements $\tilde E_{\xi,\eta}$ by a factor which is $q$ to an exponent which is linear in the weight $(\xi-\eta)$. Hence an equation identical to the above holds for these elements. \qed
\section{Compatible pairs}
We now construct some general families of quantum clusters and quantum seeds. The first, simplest, and most important, correspond to double Schubert Cells:
Let ${\mathfrak e}\leq {\mathfrak s}<{\mathfrak t}<{\mathfrak v}\leq{\mathfrak p}$.
Set
\begin{eqnarray*}{\mathbb U}^{d,{\mathfrak t},{\mathfrak v}}&:=&\{(a,j)\in {\mathbb U}({\mathfrak p})\mid a_{\mathfrak t}<j\leq a_{\mathfrak v}\},\\ {\mathbb U}_{R<}^{d,{\mathfrak t},{\mathfrak v}}&:=&\{(a,j)\in {\mathbb U}({\mathfrak p})\mid a_{\mathfrak t}<j< a_{\mathfrak v}\},\\ {\mathbb U}^{u,{\mathfrak s},{\mathfrak t}}&:=&\{(a,j)\in {\mathbb U}({\mathfrak p})\mid a_{\mathfrak s}\leq j< a_{\mathfrak t}\},\\ {\mathbb U}_{L<}^{u,{\mathfrak s},{\mathfrak t}}&:=&\{(a,j)\in {\mathbb U}({\mathfrak p})\mid a_{\mathfrak s}< j< a_{\mathfrak t}\}. \end{eqnarray*}
Further, set \begin{eqnarray} {\mathbb U}^{d,{\mathfrak t}}&=&{\mathbb U}^{d,{\mathfrak t},{\mathfrak p}},\\
{\mathbb U}^{u,{\mathfrak t}}&=&{\mathbb U}^{d,{\mathfrak e},{\mathfrak t}}.
\end{eqnarray}
It is also convenient to define \begin{Def} \begin{eqnarray}E_s(i,j)&:=&E_{\omega^{{\mathfrak p}}_{(s,i)}\Lambda_s,\omega^{\mathfrak p}_{(s,j)}\Lambda_s} \quad(0\leq i<j\leq s_{{\mathfrak p}}). \end{eqnarray} For $j'\geq s_{\mathfrak t}$ we set \begin{equation} E^d_{\mathfrak t}(s,j'):=E_s(s_{\mathfrak t},j'). \end{equation} For $j'\leq s_{\mathfrak t}$ we set \begin{equation} E^u_{\mathfrak t}(s,j'):=E_s(j',s_{\mathfrak t}). \end{equation} Finally, we set \begin{eqnarray} {\mathcal C}_q^d({\mathfrak t},{\mathfrak v})&=&\{E^d_{\mathfrak t}(s,j')\mid (s,j')\in {\mathbb U}^{d,{\mathfrak t},{\mathfrak v}}\},\\ {\mathcal C}_q^u({\mathfrak s},{\mathfrak t})&=&\{E^u_{\mathfrak t}(s,j'); (s,j')\in {\mathbb U}^{u,{\mathfrak s},{\mathfrak t}}\},\\ {\mathcal C}_q^d({\mathfrak t})&=&{\mathcal C}_q^d({\mathfrak t},{\mathfrak p}),\textrm{ and}\\ {\mathcal C}_q^u({\mathfrak t})&=&{\mathcal C}_q^u({\mathfrak s},{\mathfrak t}). \end{eqnarray} \end{Def}
It is clear that ${\mathcal C}_q^d({\mathfrak t},{\mathfrak v})\subseteq {\mathcal C}_q^d({\mathfrak t})$ for any ${\mathfrak v}>{\mathfrak t}$ and ${\mathcal C}_q^u({\mathfrak s},{\mathfrak t})\subseteq {\mathcal C}_q^u({\mathfrak t})$ for any ${\mathfrak s}<{\mathfrak t}$.
\begin{Lem}The elements in the set ${\mathcal C}_q^d({\mathfrak t})$ are $q$-commuting and the elements in the set ${\mathcal C}_q^u({\mathfrak t})$ are $q$-commuting.\label{above} \end{Lem}
The proof is omitted as it is very similar to the proof of Proposition~\ref{7.13} which comes later.
\begin{Def}${\mathcal A}_q^d({\mathfrak t}, {\mathfrak v})$ denotes the ${\mathbb C}$-algebra generated by ${\mathcal C}_q^d({\mathfrak t},{\mathfrak v})$ and ${\mathcal A}_q^u({\mathfrak s},{\mathfrak t})$ denotes the ${\mathbb C}$-algebra generated by ${\mathcal C}_q^u({\mathfrak s},{\mathfrak t})$. Further, ${\mathcal F}_q^d({\mathfrak t},{\mathfrak v})$ and ${\mathcal F}_q^u({\mathfrak s},{\mathfrak t})$ denote the corresponding skew-fields of fractions. Likewise, ${\mathbf L}_q^d({\mathfrak t},{\mathfrak v})$ and ${\mathbf L}_q^u({\mathfrak s},{\mathfrak t})$ denote the respective Laurent quasi-polynomial algebras. Finally, ${\mathcal L}_q^d({\mathfrak t},{\mathfrak v})$ and ${\mathcal L}_q^u({\mathfrak s},{\mathfrak t})$ denote the symplectic forms associated with the clusters ${\mathcal C}_q^d({\mathfrak t},{\mathfrak v})$, and ${\mathcal C}_q^u({\mathfrak s},{\mathfrak t})$, respectively. \end{Def}
\begin{Def}Whenever ${\mathfrak a}<{\mathfrak b}$, we set \begin{equation}\forall s\in Im(\pi_{\mathfrak b}): {\det}_{s}^{{\mathfrak a},{\mathfrak b}}:=E_{\omega^{\mathfrak a}\Lambda_s,\omega^{\mathfrak b}\Lambda_s}. \end{equation} \end{Def}
We conclude in particular that
\begin{Prop}\label{quasipol}The elements ${\det}_{s}^{{\mathfrak t},{\mathfrak p}}$ $q$-commute with all elements in the algebra ${\mathcal A}_q^d({\mathfrak t})$ and the elements ${\det}_{s}^{{\mathfrak e},{\mathfrak t}}$ $q$-commute with all elements in the algebra ${\mathcal A}_q^d({\mathfrak t})$. \end{Prop}
\begin{Def}An element $C$ in a quadratic algebra ${\mathcal A}$ that $q$-commutes with all the generating elements is said to be covariant. \end{Def}
As a small aside, we mention the following easy generalization of the result in (\cite{jak-cen}):
\begin{Prop}It ${\mathfrak a}<{\mathfrak b}$, then the spaces ${\mathcal A}_q^u({\mathfrak a},{\mathfrak b})$ and ${\mathcal A}_q^d({\mathfrak a},{\mathfrak b})$ are quadratic algebras. In both cases, the center is given by $Ker(\omega^{\mathfrak a}+\omega^{\mathfrak b})$. The semi-group of covariant elements in generated by $\{ {\det}_{s}^{{\mathfrak a},{\mathfrak b}}\mid s\in Im(\pi_{\mathfrak b})\}$. \end{Prop}
We now construct some elements in ${\mathbf L}_q^d({\mathfrak t})$ and ${\mathbf L}_q^u({\mathfrak t})$ of fundamental importance. They are indeed monomials in the elements of $\left[{\mathcal C}_q^d({\mathfrak t})\right]^{\pm1}$ and $\left[{\mathcal C}_q^u({\mathfrak t})\right]^{\pm1}$, respectively.
First a technical definition:
\begin{Def} $p(a,j,k)$ denotes the largest non-negative integer for which $$\omega^{\mathfrak p}_{(k,p(a,j,k))}\Lambda_k=\omega^{\mathfrak p}_{(a,j)}\Lambda_k.$$ \end{Def}
We also allow $E_a(j,j)$ which is defined to be $1$.
Here are then the first building blocks:
\begin{Def} \begin{eqnarray}\nonumber &\forall (a,j)\in {\mathbb U}^{d,{\mathfrak t}}:\\& H^d_{\mathfrak t}(a,j):=E_a(a_{\mathfrak t},j)E_a(a_{\mathfrak t},j-1) \prod_{a_{ka}<0}E_k(k_{\mathfrak t},p(a,j,k))^{a_{ka}} \\\nonumber &\forall (a,j)\in {\mathbb U}^{d,{\mathfrak t}}\textrm{ with }j<a_{\mathfrak p}:\\ &B^d_{\mathfrak t}(a,j):=H^d_{\mathfrak t}(a,j)(H^d_{\mathfrak t}(a,j+1))^{-1}. \end{eqnarray} \end{Def} The terms $E(k_{\mathfrak t},p(a,j,k))$ and $E_a(a_{\mathfrak t},j-1)$ are well-defined but may become equal to $1$. Also notice that, where defined, $H^d_{\mathfrak t}(a,j), B^d_{\mathfrak t}(a,j)\in {\mathbf L}_q^d({\mathfrak t})$.
\begin{Lem}\label{7.10}If $E_{\xi,\eta}<H^d_{\mathfrak t}(a,j)$ in the sense that it is less than or equal to each factor $E_{\xi_1,\eta_1}$ of $H^d_{\mathfrak t}(a,j)$ (and $<$ is defined in (\ref{less})), then \begin{equation}\label{54} E_{\xi,\eta}H^d_{\mathfrak t}(a,j)=q^{(\xi-\eta,\omega^{\mathfrak t}(\alpha_a))}H^d_{\mathfrak t}(a,j)E_{\xi,\eta}. \end{equation} If $E_{\xi,\eta}\geq H^d_{\mathfrak t}(a,j)$, then \begin{equation}\label{55} E_{\xi,\eta}H^d_{\mathfrak t}(a,j)=q^{(-\xi-\eta,\omega^{\mathfrak t}(\alpha_a))}H^d_{\mathfrak t}(a,j)E_{\xi,\eta}. \end{equation} \end{Lem}
\proof This follows from (\ref{less}) by observing that we have the following pairs $(\xi_1,\eta_1)$ occurring in $H^d_{\mathfrak t}(a,j)$: $$(\omega^{\mathfrak t}\Lambda_a,\omega(a,j)\Lambda_a),(\omega^{\mathfrak t}\Lambda_a,\omega(a,j)\sigma_a\Lambda_a),$$ and $$(-\omega^{\mathfrak t}\Lambda_k,-\omega(a,j)\Lambda_k) \textrm{ with multiplicity }(-a_{ka}).$$ Furthermore, as in (\ref{3.1}), $\Lambda_a+\sigma_a\Lambda_a+\sum_ka_{ka}\Lambda_k=0$ and, equivalently, $2\Lambda_a+\sum_ka_{ka}\Lambda_k=\alpha_a$ . \qed
\begin{Prop}\label{7.10}$\forall (a,j),(b,j')\in {\mathbb U}^{d,{\mathfrak t}}, j<a_{\mathfrak p}$ the following holds: \begin{equation} E^d_{\mathfrak t}(b,j')B^d_{\mathfrak t}(a,j)=q^{-2(\Lambda_a,\alpha_a)\delta_{j,j'}\delta_{a,b}}B^d_{\mathfrak t}(a,j)E^d_{\mathfrak t}(b,j'). \end{equation} \end{Prop}
\proof It is clear from the formulas (\ref{54}-\ref{55}) that if an element $E_{\xi,\eta}$ either is bigger than all factors in $B^d_{\mathfrak t}(s,j)$ or smaller than all factors, then it commutes with this element. The important fact now is that the ordering is independent of the fundamental weights $\Lambda_i$ - it depends only on the Weyl group elements. The factors in any $H_{\mathfrak t}^d$ are, with a fixed ${\mathfrak t}$, of the form $E_{\omega^{\mathfrak t}\Lambda_i,\omega\Lambda_i}$ or $E_{\omega^{\mathfrak t}\Lambda_a,\omega\circ\sigma_a\Lambda_a}$ for some $\omega\geq \omega^{\mathfrak t}$. The elements $E_{\xi,\eta}=E^d_{\mathfrak t}(b,j')$ we consider thus satisfy the first or the second case in Lemma~\ref{7.10} for either terms $H^d_{\mathfrak t}(a,j)$ and $H^d_{\mathfrak t}(a,j+1)$. Clearly, we then need only consider the in-between case $H^d_{\mathfrak t}(a,j)\leq E_{\xi,\eta}\leq H^d_{\mathfrak t}(a,j+1)$, and here there appears a factor $q^{-2(\xi,\omega^{\mathfrak t}(\alpha_a))}$ in the commutator with $\xi=\omega^{\mathfrak t}\Lambda_{b}$. This accounts for the term $-2(\Lambda_a,\alpha_a)\delta_{a,b}$. Finally, if $a=b$ the previous assumption forces $j=j'$. \qed
Let us choose an enumeration \begin{equation} {\mathcal C}_q^d({\mathfrak t})=\{c_1,c_2,\dots, c_N\} \end{equation} so that each $(a,j)\leftrightarrow k$ and let us use the same enumeration of
the elements $B^d_{\mathfrak t}(a,j)$. Set, for now $B^d_{\mathfrak t}(a,j)=b_k$ if $(a,j)\leftrightarrow k$. Let us also agree that the, say $n$, non-mutable elements $\det_s^{{\mathfrak t},{\mathfrak p}}$ of ${\mathcal C}_q^d({\mathfrak t})$ are written last, say numbers $N-n+1, N-n+2,\dots, N-n$. Then, as defined, \begin{equation} \forall j=1,\dots, N-n: b_j=q^{\alpha_j}\prod_kc_k^{b_{kj}} \end{equation} for some integers $b_{kj}$ and some, here inconsequential, factor $q^{\alpha_j}$. The symplectic form yields a matrix which we, abusing notation slightly, also denote ${\mathcal L}_q^d({\mathfrak t})$ such that \begin{equation} \forall i,j=1,\dots, N:\ \left({\mathcal L}_q^d({\mathfrak t})\right)_{ij}=\lambda_{ij} \end{equation} and \begin{equation} \forall i,j=1,\dots, N: c_jc_j=q^{\lambda_{ij}}c_jc_i.\end{equation} Similarly, we let ${\mathcal B}_q^d({\mathfrak t})$ denote the matrix \begin{equation} \forall i=1,\dots, N\ \forall j=1,\dots, N-n: \left({\mathcal B}_q^d({\mathfrak t})\right)_{ij}=b_{ij}. \end{equation} Then, where defined, \begin{equation} c_ib_j=\prod_kq^{\lambda_{ik}b_{kj}}b_jc_i, \end{equation} and Proposition~\ref{7.10} may then be restated as \begin{equation} \forall i=1,\dots, N\ \forall j=1,\dots, N-n: \sum_k\lambda_{ik}b_{kj}=-2(\Lambda_s,\alpha_s)\delta_{ij}, \end{equation} where we assume that $i\leftrightarrow (s,\ell)$.
We have then established \begin{Thm} The pair $({\mathcal L}_q^d({\mathfrak t}),{\mathcal B}_q^d({\mathfrak t}))$ is a compatible pair and hence, \begin{equation} {\mathcal Q}_q^d({\mathfrak t}):=({\mathcal C}_q^d({\mathfrak t}),{\mathcal L}_q^d({\mathfrak t}),{\mathcal B}_q^d({\mathfrak t})) \end{equation} is a quantum seed with the $n$ non-mutable elements $\det_s^{{\mathfrak t},{\mathfrak p}}$, $(s,s_{\mathfrak p})\in {\mathbb U}^d({\mathfrak t})$. The entries of the diagonal of the matrix $\tilde D=({\mathcal B}_q^d({\mathfrak t}))^T{\mathcal L}_q^d({\mathfrak t})$ are in the set $\{2(\Lambda_s,\alpha_s)\mid s=1,\dots,R\}$. \end{Thm}
It ${\mathfrak v}>{\mathfrak t}$, we let $({\mathcal L}_q^d({\mathfrak t},{\mathfrak v}),{\mathcal B}_q^d({\mathfrak t},{\mathfrak v}))$ denote the part of the compatible pair $({\mathcal L}_q^d({\mathfrak t}),{\mathcal B}_q^d({\mathfrak t}))$ that corresponds to the cluster ${\mathcal C}_q^d({\mathfrak t},{\mathfrak v})$ and we let ${\mathcal Q}_q^d({\mathfrak t},{\mathfrak v})$ be the corresponding triple. It is then obvious by simple restriction, that we in fact have obtained
\begin{Thm} The pair $({\mathcal L}_q^d({\mathfrak t},{\mathfrak v}),{\mathcal B}_q^d({\mathfrak t},{\mathfrak v}))$ is a compatible pair and hence, \begin{equation} {\mathcal Q}_q^d({\mathfrak t},{\mathfrak v}):=({\mathcal C}_q^d({\mathfrak t},{\mathfrak v}),{\mathcal L}_q^d({\mathfrak t},{\mathfrak v}),{\mathcal B}_q^d({\mathfrak t},{\mathfrak v})) \end{equation} is a quantum seed with the $n$ non-mutable elements $\det_s^{{\mathfrak t},{\mathfrak v}}$, $(s,s_{\mathfrak v})\in {\mathbb U}^d({\mathfrak t},{\mathfrak v})$. \end{Thm}
The case of ${\mathcal C}_q^u({\mathfrak t})$ is completely analogous: Define
\begin{Def} \begin{eqnarray} H^u_{\mathfrak t}(a,j)&:=&E_a(j, a_{\mathfrak t})E_a(j-1,a_{\mathfrak t}) \prod_{a_{ka}<0}E_k(p(a,j,k), k_{\mathfrak t})^{a_{ka}} \ (1\leq j<a_{\mathfrak t}),\nonumber\\ B^u_{\mathfrak t}(a,j)&:=&H^u_{\mathfrak t}(a,j+1)(H^u_{\mathfrak t}(a,j))^{-1} \ (1\leq j<a_{\mathfrak t}). \end{eqnarray} The terms $E(p(a,j,k),k_{\mathfrak t})$ are well-defined but may become equal to $1$. Notice also the exponents on the terms $H^u_{\mathfrak t}$. \end{Def}
The terms $E(p(a,j,k),k_{\mathfrak t})$ are well-defined but may become equal to $1$. As defined, $H^u_{\mathfrak t}(a,j)$, and $B^u_{\mathfrak t}(a,j)$ are in ${\mathbf L}_q^u({\mathfrak t})$.
\begin{Prop}$\forall (a,j),(b,j')\in {\mathbb U}^{u,{\mathfrak t}}, 1\leq j$ the following holds:
\begin{equation} E^u_{\mathfrak t}(b,j')B^u_{\mathfrak t}(a,j)=q^{2(\Lambda_a,\alpha_a)\delta_{j,j'}\delta_{a,b}}B^u_{\mathfrak t}(a,j)E^u_{\mathfrak t}(b,j'). \end{equation} \end{Prop}
We then get in a similar way
\begin{Thm} The pair $({\mathcal L}_q^u({\mathfrak t}),{\mathcal B}_q^u({\mathfrak t}))$ is a compatible pair and hence, \begin{equation} {\mathcal Q}_q^u({\mathfrak t}):=({\mathcal C}_q^u({\mathfrak t}),{\mathcal L}_q^u({\mathfrak t}),{\mathcal B}_q^u({\mathfrak t})) \end{equation} is a quantum seed with the $n$ non-mutable elements $\det_s^{{\mathfrak e},{\mathfrak t}}$, $(s,s_{\mathfrak t})\in {\mathbb U}^u({\mathfrak t})$. \end{Thm}
Naturally, we even have
\begin{Thm} The pair $({\mathcal L}_q^u({\mathfrak s},{\mathfrak t}),{\mathcal B}_q^u({\mathfrak s},{\mathfrak t}))$ is a compatible pair and hence, \begin{equation} {\mathcal Q}_q^u({\mathfrak s},{\mathfrak t}):=({\mathcal C}_q^u({\mathfrak s},{\mathfrak v}),{\mathcal L}_q^u({\mathfrak s},{\mathfrak t}),{\mathcal B}_q^u({\mathfrak s},{\mathfrak t})) \end{equation} is a quantum seed with the $n$ non-mutable elements $\det_s^{{\mathfrak s},{\mathfrak t}}$, $(s,s_{\mathfrak s})\in {\mathbb U}^u({\mathfrak s},{\mathfrak t})$. \end{Thm}
We now wish to consider more elaborate seeds. The first generalization is the most important:
Let \begin{equation} {\mathfrak e}\leq {\mathfrak a}\leq {\mathfrak b}\leq{\mathfrak c}\leq {\mathfrak p}, \textrm{ but } {\mathfrak a}\neq {\mathfrak c}.\end{equation}
\begin{eqnarray}\label{l1} {\mathcal C}_q^d({\mathfrak a},{\mathfrak b},{\mathfrak c})&:=&\{E^d_{\mathfrak a}(s,j)\mid (a,j)\in ({\mathbb U}^{d,{\mathfrak b}}\setminus {\mathbb U}^{d,{\mathfrak c}})= {\mathbb U}^{d,{\mathfrak b},{\mathfrak c}}\},\\\label{l2}{\mathcal C}_q^u({\mathfrak a},{\mathfrak b},{\mathfrak c})&:=&\{E^u_{\mathfrak c}(s,j)\mid (s,j)\in ({\mathbb U}^{u,{\mathfrak b}}\setminus {\mathbb U}^{u,{\mathfrak a}})= {\mathbb U}^{u,{\mathfrak a},{\mathfrak b}}\}. \end{eqnarray} In (\ref{l1}), ${\mathfrak a}={\mathfrak b}$ is allowed, and in (\ref{l2}), ${\mathfrak b}={\mathfrak c}$ is allowed.
\begin{Def} \begin{eqnarray*}{\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})&:=&{\mathcal C}_q^d({\mathfrak a},{\mathfrak b},{\mathfrak c})\cup{\mathcal C}_q^u({\mathfrak a},{\mathfrak b},{\mathfrak b}),\\ {\mathcal C}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})&:=&{\mathcal C}_q^u({\mathfrak a},{\mathfrak b},{\mathfrak c})\cup{\mathcal C}_q^d({\mathfrak b},{\mathfrak b},{\mathfrak c}). \end{eqnarray*} \end{Def}
\begin{Prop}\label{7.13} The elements of ${\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})$ and ${\mathcal C}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})$, respectively, $q$-commute. \end{Prop}
\proof The two cases are very similar, so we only prove it for the first case. We examine 3 cases, while using the following mild version of Theorem~\ref{10.2}: $ \triangle_{s's\lambda,t'\lambda}$ and $\triangle_{s'\mu,t't\mu}$ $q$ commute for any $\lambda,\mu\in P^+$, and $s, s', t, t' \in W$ for which $\ell(s's) = \ell(s') + \ell(s), \ell(t't) = \ell(t') + \ell(t)$.
{\bf Case 1:} $E_{\mathfrak a}^d(s,t)$ and $E_{\mathfrak a}^d(s_1,t_1)$ for $(s,t)\in {\mathbb U}^{d, {\mathfrak b},{\mathfrak c}}$ and $(s,t)<(s_1,t_1)$: Set $\lambda=\Lambda_s,\mu=\Lambda_{s_1}$, $s=1,s'=\omega^{\mathfrak a}$, and $t'=\omega^{\mathfrak c}(s,t), t't=\omega^{\mathfrak c}(s_1,t_1)$.
{\bf Case 2:} $E_{\mathfrak b}^u(s,t)$ and $E_{\mathfrak b}^u(s_1,t_1)$ for $(s,t)\in {\mathbb U}^{u, {\mathfrak a},{\mathfrak b}}$ and $(s,t)>(s_1,t_1)$: Set $\lambda=\Lambda_s,\mu=\Lambda_{s_1}$, $t=1$, $t'=\omega^{\mathfrak b}$ and $s'=\omega^{\mathfrak p}(s_1,t_1), s's=\omega^{\mathfrak r}(s,t)$.
{\bf Case 3:} $E_{\mathfrak b}^u(s,t)$ and $E_{\mathfrak a}^d(s_1,t_1)$ for $(s,t)\in {\mathbb U}^{u, {\mathfrak a},{\mathfrak b}}$ and $(s_1,t_1)\in
{\mathbb U}^{d, {\mathfrak b},{\mathfrak c}}$: Set $\lambda=\Lambda_s,\mu=\Lambda_{s_1}$, $s'=\omega^{{\mathfrak a}}$, $s=\omega^{\mathfrak p}(s,t)$, $t'=\omega^{\mathfrak b}$ and $t't=\omega^{\mathfrak p}(s_1,t_1)$. \qed
Notice that the ordering in ${\mathbb U}^{u, {\mathfrak a},{\mathfrak b}}$ (Case 2) is the opposite of that of the two other cases.
We also define, for ${\mathfrak a}<{\mathfrak b}$,
\begin{eqnarray} {\mathcal C}_q^u({\mathfrak a},{\mathfrak b})&=&{\mathcal C}_q^u({\mathfrak a}, {\mathfrak b},{\mathfrak b}),\textrm{ and}\\\nonumber {\mathcal C}_q^d({\mathfrak a},{\mathfrak b})&=&{\mathcal C}_q^d({\mathfrak a}, {\mathfrak a},{\mathfrak b}). \end{eqnarray}
We let ${\mathcal L}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})$ and ${\mathcal L}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})$ denote the corresponding symplectic matrices. We proceed to construct compatible pairs and give the details for just ${\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})$. We will be completely explicit except in the special cases $E^u_{\omega^{{\mathfrak a}}\Lambda_s,\omega^{{\mathfrak b}}\Lambda_s}={\det}_{s}^{{\mathfrak a},{\mathfrak b}}$ where we only give a recipe for $ {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}}) $. Notice, however, the remark following (\ref{77}).
\begin{equation}\label{72}{B}_q^{{\mathfrak a}, {\mathfrak b},{\mathfrak c}}(s,j):=\left\{\begin{array}{lll}B^d_{{\mathfrak a}}(s,j)&\textrm{if }(s,j)\in {\mathbb U}_{R<}^{d, {\mathfrak b},{\mathfrak c}}\\ \ \\ B^u_{{\mathfrak b}}(s,j)&\textrm{if }(s,j)\in {\mathbb U}_{L<}^{u, {\mathfrak a},{\mathfrak b}} \end{array}\right..\end{equation}
We easily get from the preceding propositions:
\begin{Prop}\label{7.20} Let $E(b,j')\in {\mathcal C}_q({{\mathfrak a}, {\mathfrak b},{\mathfrak c}})$ and let ${B}_q^{{\mathfrak a}, {\mathfrak b},{\mathfrak c}}(s,j)$ be as in the previous equation. Then \begin{equation} E(b,j'){B}_q^{{\mathfrak a}, {\mathfrak b},{\mathfrak c}}(s,j)=q^{{-2(\Lambda_s,\alpha_s)\delta_{j,j'}\delta_{s,b}}}{B}_q^{{\mathfrak a}, {\mathfrak b},{\mathfrak c}}(s,j)E(b,j'), \end{equation} and ${B}_q^{{\mathfrak a}, {\mathfrak b},{\mathfrak c}}(s,j)$ is in the algebra ${\mathcal A}_q({{\mathfrak a}, {\mathfrak b},{\mathfrak c}})$ generated by the elements of ${\mathcal C}_q({{\mathfrak a}, {\mathfrak b},{\mathfrak c}})$. \end{Prop}
This then leaves the positions $(s,s_{\mathfrak c})\in {\mathbb U}^{d,{\mathfrak b},{\mathfrak c}}$ and $(s,s_{\mathfrak a})\in {\mathbb U}^{u,{\mathfrak a},{\mathfrak b}}$ to be considered. Here, the first ones are considered as the non-mutable elements. In the ambient space ${\mathcal A}_q({{\mathfrak a}, {\mathfrak b},{\mathfrak c}})$, the positions in remaining cases define elements that are, in general, mutable.
The elements in these cases are of the form $E_{\omega^{{\mathfrak a}}\Lambda_s,\omega^{{\mathfrak b}}\Lambda_s}$ for some $s$. To give a recipe we define the following elements in ${\mathcal A}_q({{\mathfrak a}, {\mathfrak b},{\mathfrak c}})$: \begin{eqnarray}&\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}}) :=\\&\left(H^u_{{\mathfrak b}}(s,s_{{\mathfrak a}}+1) H^d_{{\mathfrak a}}(s,s_{{\mathfrak b}}+1)\right)^{-1} E_s(s_{{\mathfrak a}},s_{{\mathfrak b}})^2\prod_{a_{ka}<0}E_k(k_{{\mathfrak a}},k_{{\mathfrak b}})^{a_{ks}}.\nonumber\end{eqnarray}
If $\omega(s,s_{{\mathfrak a}}+1)=\omega^{{\mathfrak a}}\omega_x\sigma_s$ and $\omega(s,s_{{\mathfrak b}}+1)=\omega^{{\mathfrak b}}\omega_y\sigma_s$, and if we set $u=\omega^{{\mathfrak a}}\omega_x$, $v=\omega^{{\mathfrak b}}\omega_y$ this takes the simpler form
\begin{equation}\label{75}\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}}) =E_{u\sigma_s\Lambda_s,v\Lambda_s}^{-1}E_{ u\Lambda_s,v\sigma_s\Lambda_s}^{-1}\prod_{a_{ks<0}} E_{\omega^{\mathfrak a}\Lambda_k,v\Lambda_k}^{-a_{ks}}\prod_{a_{ks<0}} E_{u\Lambda_k,\omega^{\mathfrak b}\Lambda_k}^{-a_{ks}}\prod_{a_{ks<0}} E_{\omega^{\mathfrak a}\Lambda_k,\omega^{\mathfrak b}\Lambda_k}^{a_{ks}}.\end{equation}
\begin{Prop} \begin{equation} \forall\ell: E_{\omega^{\mathfrak a}\Lambda_\ell, \omega^{\mathfrak b}\Lambda_\ell}\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})=q^{-2\delta_{\ell.s}(\lambda_s,\alpha_s)} \tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})E_{\omega^{\mathfrak a}\Lambda_\ell, \omega^{\mathfrak b}\Lambda_\ell}. \end{equation} Besides this, $\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})$ commutes with everything in the cluster except possibly elements of the form $$ E_{\omega^{{\mathfrak a}}\Lambda_\ell,\omega^{{\mathfrak b}}\tilde\omega_y\Lambda_\ell}, \textrm{ and } E_{\omega^{{\mathfrak a}}\tilde\omega_x\Lambda_\ell,\omega^{{\mathfrak b}}\Lambda_\ell}, $$ with $1<\tilde\omega_x<\omega_x$ and $1<\tilde\omega_y<\omega_y$. \end{Prop}
The exceptional terms above are covered by Proposition~\ref{7.20} which means that we can in principle make a modification $\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})\to {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})$ where the latter expression commutes with everything except $E_{\omega^{{\mathfrak a}}\Lambda_s,\omega^{{\mathfrak b}}\Lambda_s}$ where we get a factor $q^{-2(\Lambda_s,\alpha_s)}$.
If $\omega_y=1$ we get a further simplification where now $u=\omega^{{\mathfrak a}}\omega_x$ and $v=\omega^{{\mathfrak b}}$: \begin{equation}\label{77}\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}}) =E_{u\sigma_s\Lambda_s,v\Lambda_s}^{-1}E_{ u\Lambda_s,v\sigma_s\Lambda_s}^{-1}\prod_{a_{ks<0}} E_{u\Lambda_k,v\Lambda_k}^{-a_{ks}}.\end{equation}
Here we actually have $\tilde {B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}}) ={B}_q^{{\mathfrak a},{\mathfrak b},{\mathfrak c}}(s,s_{{\mathfrak a}})$, and this expression has the exact form needed for the purposes of the next section.
We let ${\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})$ and ${\mathcal B}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})$ denote the corresponding symplectic matrices and can now finally define our quantum seeds:
\begin{equation}{\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}):=({\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal L}_q({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})).\end{equation}
\begin{Def}\begin{equation}{\mathcal Q}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c}):=({\mathcal C}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal L}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c}), {\mathcal B}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})).\end{equation}\end{Def}
According to our analysis above we have established
\begin{Thm}\label{seedth} They are indeed seeds. The non-mutable elements are in both cases the elements ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}; s\in Im(\pi_{\omega^{\mathfrak c}})$. \end{Thm}
Let us finally consider a general situation where we are given a finite sequence of elements $\{\omega^{{\mathfrak r}_i}\}_{i=1}^n\in W^p$ such that \begin{equation}\label{genseq} {\mathfrak e}\leq {{\mathfrak r}_1}<\dots<{{\mathfrak r}_n}\leq {{\mathfrak p}}. \end{equation} Observe that \begin{equation}\forall(s,t)\in{\mathbb U}({\mathfrak r}_k):\omega^{{\mathfrak r}_k}_{(s,t)}=\omega^{{\mathfrak p}}_{(s,t)}. \end{equation}
It may of course well happen that for some $a$, and some $ {{\mathfrak r}_i}<{{\mathfrak r}_j}$, \begin{equation}\omega^{{\mathfrak r}_i}\Lambda_a=\omega^{{\mathfrak r}_j}\Lambda_a. \end{equation}
\begin{Def}Given (\ref{genseq}) we define
\begin{eqnarray}{\mathcal C}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)&=&{\mathcal C}_q^d({\mathfrak r}_1, {\mathfrak r}_{n-1},{\mathfrak r}_n)\cup {\mathcal C}_q^u({\mathfrak r}_1,{\mathfrak r}_{2},{\mathfrak r}_{n-1})\cup\dots\\&=&\bigcup_{0<2i\leq n}{\mathcal C}_q^d({\mathfrak r}_i,{\mathfrak r}_{n-i},{\mathfrak r}_{n-i+1})\cup \bigcup_{0<2j\leq n-1}{\mathcal C}_q^u({\mathfrak r}_j,{\mathfrak r}_{j+1},{\mathfrak r}_{n-j}). \nonumber \end{eqnarray}
It is also convenient to consider
\begin{eqnarray}{\mathcal C}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)&=&{\mathcal C}_q^u({\mathfrak r}_1, {\mathfrak r}_{2},{\mathfrak r}_n)\cup {\mathcal C}_q^d({\mathfrak r}_2,{\mathfrak r}_{n-1},{\mathfrak r}_{n})\cup\dots\\&=&\bigcup_{0<2i\leq n}{\mathcal C}_q^u({\mathfrak r}_i,{\mathfrak r}_{i+1},{\mathfrak r}_{n-i+1})\cup \bigcup_{0<2j\leq n-1}{\mathcal C}_q^d({\mathfrak r}_{j+1},{\mathfrak r}_{n-j},{\mathfrak r}_{n-j+1}). \nonumber \end{eqnarray} \end{Def}
Notice that \begin{eqnarray}{\mathcal C}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)&=&{\mathcal C}_q^d({\mathfrak r}_1, {\mathfrak r}_{n-1},{\mathfrak r}_n)\cup {\mathcal C}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-2},{\mathfrak r}_{n-1})\\\nonumber {\mathcal C}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)&=&{\mathcal C}_q^u({\mathfrak r}_1, {\mathfrak r}_{2},{\mathfrak r}_n)\cup {\mathcal C}_q({\mathfrak r}_2,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n) \end{eqnarray}
For the last equations, notice that ${\mathcal C}_q^{d}({\mathfrak e},{\mathfrak r},{\mathfrak r}) =\emptyset={\mathcal C}_q^{d}({\mathfrak r},{\mathfrak r},{\mathfrak r})$.
\begin{Prop}\label{qcom} The spaces \begin{equation} {\mathcal C}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\textrm{ and }{\mathcal C}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)
\end{equation} each consists of $q$-commuting elements. \end{Prop}
\proof This is proved in the same way as Proposition~{\ref{7.13}. \qed
Our goal is to construct seeds out of these clusters using (and then generalizing) Proposition~\ref{seedth}.
With Proposition~\ref{qcom} at hand, we are immediately given the corresponding symplectic matrices \begin{equation} {\mathcal L}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\textrm{ and }{\mathcal L}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n).
\end{equation}
The construction of the accompanying $B$-matrices
\begin{equation} {\mathcal B}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\textrm{ and }{\mathcal B}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)
\end{equation}
takes a little more work, though in principle it is straightforward.
The idea is in both cases to consider an element in the cluster as lying in a space
\begin{eqnarray}
{\mathcal C}_q^d({\mathfrak r}_i, {\mathfrak r}_{n-i},{\mathfrak r}_{n-i+1})\cup {\mathcal C}_q^u({\mathfrak r}_i, {\mathfrak r}_{i+1},{\mathfrak r}_{n-i})&\subseteq&{\mathcal C}_q({\mathfrak r}_i, {\mathfrak r}_{n-i},{\mathfrak r}_{n-i+1})\textrm{ or}\\
{\mathcal C}_q^u({\mathfrak r}_i, {\mathfrak r}_{i+1},{\mathfrak r}_{n-i+1})\cup {\mathcal C}_q^d({\mathfrak r}_{i+1}, {\mathfrak r}_{n-i},{\mathfrak r}_{n-i+1})&\subseteq&{\mathcal C}_q^o({\mathfrak r}_i, {\mathfrak r}_{i+1},{\mathfrak r}_{n-i+1})\quad
\end{eqnarray}
as appropriate. Then we can use the corresponding matrices
${\mathcal B}_q({\mathfrak r}_i, {\mathfrak r}_{n-i},{\mathfrak r}_{n-i+1})$ or ${\mathcal B}_q^o({\mathfrak r}_i, {\mathfrak r}_{i+1},{\mathfrak r}_{n-i+1})$ in the sense that one can extend these matrices to the full rank by inserting rows of zeros. In this way, we can construct columns even for the troublesome elements of the form $E(a_{{\mathfrak r}_i}, a_{{\mathfrak r}_j})$ that may belong to such spaces. Indeed, we may start by including $E(a_{{\mathfrak r}_{\frac{ n+0}2}}, a_{{\mathfrak r}_{\frac{n+2}2}})$ ($n$ even) or $E(a_{{\mathfrak r}_{\frac{n-1}2}}, a_{{\mathfrak r}_{\frac{n+1}2}})$ ($n$ odd) in a such space in which they may be seen as mutable. Then these spaces have new non-mutable elements which can be handled by viewing them in appropriate spaces. The only ones which we cannot capture are the elements ${\det}_{s}^{{\mathfrak r}_1,{\mathfrak r}_n}=E(s_{{\mathfrak r}_1}, s_{{\mathfrak r}_n})$.
\begin{Def}In both cases, the elements ${\det}_{s}^{{\mathfrak r}_1,{\mathfrak r}_n}$, $s\in Im(\pi_{{\mathfrak r}_1})$ are the non-mutable elements. We let ${\mathcal N}_q({\mathfrak r}_1, {\mathfrak r}_n)$ denote the set of these. \end{Def}
\begin{Prop}
\begin{equation} {\mathcal Q}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\textrm{ and } {\mathcal Q}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)
\end{equation}
are quantum seeds.
\end{Prop}
\section{Mutations}
Here is the fundamental setup: Let $\omega^{\mathfrak a}, \omega^{\mathfrak b},\omega^{\mathfrak c}\in W^p$ satisfy
\begin{equation}{\mathfrak a}<{\mathfrak c}\textrm{ and } {\mathfrak a}\leq {\mathfrak b}\leq{\mathfrak c}.\end{equation}
\begin{Def}\label{7.1bis} A root $\gamma\in\triangle^+({\mathfrak c})$ is an {\bf increasing-mutation site} of $\omega^{\mathfrak b}\in W^p$ (in reference to $({\mathfrak a},{\mathfrak b},{\mathfrak c})$) if there exists a reduced form of $\omega^{\mathfrak c}$ as \begin{equation} \omega^{\mathfrak c}=\hat\omega\sigma_\gamma\omega^{\mathfrak b}. \end{equation} Let $W^p\ni\omega^{{\mathfrak b}'}=\sigma_\gamma\omega^{\mathfrak b}$. It follows that \begin{equation}\label{94} \omega^{{\mathfrak b}'}=\omega^{\mathfrak b}\sigma_{\alpha_s} \end{equation} for a unique $s\in Im(\pi_{{\mathfrak b}'})$. Such a site will henceforth be called an ${\mathfrak m}^+$ site.
We will further say that $\gamma$ is a {\bf decreasing-mutation site}, or ${\mathfrak m}^-$ site (in reference to $({\mathfrak a},{\mathfrak b},{\mathfrak c})$) of $\omega^{{\mathfrak b}}\in W^p$ in case there exists a rewriting of $\omega^{{\mathfrak b}}$ as $\omega^{{\mathfrak b}}=\sigma_\gamma\omega^{{\mathfrak b}''}$ with ${\mathfrak a}\leq \omega^{{\mathfrak b}''}\in W^p$. Here, \begin{equation} \omega^{{\mathfrak b}}=\omega^{{\mathfrak b}''}\sigma_{\alpha_s} \end{equation} for a unique $s\in Im(\pi_{{\mathfrak b}})$. We view such sites as places where replacements are possible and will use the notation
\begin{equation}\label{m+}{\mathfrak m}^{+}_{{\mathfrak a},{\mathfrak c}}:({\mathfrak a},{\mathfrak b},{\mathfrak c})\to ({\mathfrak a},{\mathfrak b}',{\mathfrak c}),\end{equation} and \begin{equation}{\mathfrak m}^-_{{\mathfrak a},{\mathfrak c}}:({\mathfrak a},{\mathfrak b},{\mathfrak c})\to ({\mathfrak a},{\mathfrak b}'',{\mathfrak c}),\end{equation} respectively, for the replacements while at the same time defining what we mean by replacements.
Notice that ${\mathfrak a}={\mathfrak b}$ and ${\mathfrak b}'={\mathfrak c}$ are allowed in the first while ${\mathfrak b}={\mathfrak c}$ and ${\mathfrak b}''={\mathfrak a}$ are allowed in the second.
Furthermore, $${\mathfrak m}_{{\mathfrak a},{\mathfrak c}}:({\mathfrak a},{\mathfrak b},{\mathfrak c})\to ({\mathfrak a},{\mathfrak b}_1,{\mathfrak c})$$ denotes the composition of any finite number of such maps ${\mathfrak m}^{\pm}_{{\mathfrak a},{\mathfrak c}}$ (in any order, subject to the limitations at any step stipulated above)
We will further extend the meaning of ${\mathfrak m}_{{\mathfrak a},{\mathfrak c}}$ also to include the replacements $${\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})\to {\mathcal C}_q({\mathfrak a},{\mathfrak b}_1,{\mathfrak c}),$$ and even $${\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})\to {\mathcal Q}_q({\mathfrak a},{\mathfrak b}_1,{\mathfrak c}).$$At the seed level, we will refer to the replacements as {\bf Schubert mutations}.
Similarly, we can define maps ${\mathfrak m}^{o,\pm}_{{\mathfrak a},{\mathfrak c}}$, and after that mutations as composites $${\mathfrak m}^{o}_{{\mathfrak a},{\mathfrak c}}:{\mathcal Q}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})\to {\mathcal Q}_q^o({\mathfrak a},{\mathfrak b}_1,{\mathfrak c}).$$ \end{Def}
We need to define another kind of replacement: Consider\begin{equation}\label{maxim} {\mathfrak a}<{\mathfrak b}_1<{\mathfrak b}<{\mathfrak c}. \end{equation}
\begin{Def}We say that $({\mathfrak a},{\mathfrak b},{\mathfrak c})$ is a {\bf d-splitting} of $({\mathfrak a},{\mathfrak c})$ if $${\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})={\mathcal C}_q({\mathfrak a},{\mathfrak c}).$$ In this case we will also say that $({\mathfrak a},{\mathfrak c})$ is a {\bf d-merger} of $({\mathfrak a},{\mathfrak b},{\mathfrak c})$. \end{Def}
To make this more definitive, one might further assume that ${\mathfrak b}$ is maximal amongst those satisfying (\ref{maxim}), but we will not need to do this here.
Similarly,
\begin{Def}We say that $({\mathfrak a},{\mathfrak b},{\mathfrak c})$ is a {\bf u-splitting} of $({\mathfrak a},{\mathfrak c})$ if $${\mathcal C}_q^o({\mathfrak a},{\mathfrak b},{\mathfrak c})={\mathcal C}_q^o({\mathfrak a},{\mathfrak c}).$$ Similarly, we will in this case also say that $({\mathfrak a},{\mathfrak c})$ is a {\bf u-merger} of $({\mathfrak a},{\mathfrak b},{\mathfrak c})$. \end{Def}
Our next definition combines the two preceding:
\begin{Def}\label{def84}A Schubert creation replacement $$a^+_{{\mathfrak a},{\mathfrak c}}:({\mathfrak a},{\mathfrak c})\rightarrow ({\mathfrak a},{\mathfrak b}_1,{\mathfrak c})$$ consists in a d-splitting $$({\mathfrak a},{\mathfrak c})\rightarrow ({\mathfrak a},{\mathfrak b},{\mathfrak c})$$ followed by a replacement $m_{{\mathfrak a},{\mathfrak c}}$ applied to $({\mathfrak a},{\mathfrak b},{\mathfrak c})$. A Schubert annihilation replacement $$a^-_{{\mathfrak a},{\mathfrak c}}:({\mathfrak a},{\mathfrak b}_1,{\mathfrak c})\rightarrow ({\mathfrak a},{\mathfrak c})$$ is defined as the reverse process.
Schubert creation/annihilation mutations $a^{o,\pm}_{{\mathfrak a},{\mathfrak c}}$ are defined analogously; $$a^{o,+}_{{\mathfrak a},{\mathfrak c}}: {\mathcal Q}_q^o({\mathfrak a},{\mathfrak c})\to {\mathcal Q}_q^o({\mathfrak a},{\mathfrak b}_1,{\mathfrak c}), $$and $$a^{o,-}_{{\mathfrak a},{\mathfrak c}}: {\mathcal Q}_q^o({\mathfrak a},{\mathfrak b}_1,{\mathfrak c})\to {\mathcal Q}_q^o({\mathfrak a},{\mathfrak c}). $$ We finally extend these Schubert creation/annihilation mutations into (we could do it more generally, but do not need to do so here) $${\mathcal Q}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)\rightarrow {\mathcal Q}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-2},\dots,{\mathfrak r}_{n\pm 1})$$ by inserting/removing an ${\mathfrak r}_x$ between ${\mathfrak r}_{\frac{n}2}$ and ${\mathfrak r}_{\frac{n}2+1}$ ($n$ even) or between ${\mathfrak r}_{\frac{n-1}2}$ and ${\mathfrak r}_{\frac{n+1}2}$ ($n$ odd). Similar maps are defined for the spaces
${\mathcal Q}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)$. \end{Def}
In the sequel, we will encounter expressions of the form $\check B(u,v,s)$;
\begin{equation}\label{99} \check B(u,v,s)=E_{u\sigma_s\Lambda_s,v\Lambda_s}^{-1}E_{ u\Lambda_s,v\sigma_s\Lambda_s}^{-1}\prod_{a_{ks<0}} E_{u\Lambda_k,v\Lambda_k}^{a_{ks}}\end{equation} where \begin{equation} E(u\Lambda_s,v\Lambda_s)\check B(u,v,s)=q^{-2(\Lambda_s,\alpha_s)}\check B(u,v,s)E(u\Lambda_s,v\Lambda_s), \end{equation} and where $\check B(u,v,s)$ commutes with all other elements in a given cluster.
\begin{Def} We say that $\check B(u,v,s)$ implies the change $$E_{u\Lambda_s,v\Lambda_s}\to E_{u\sigma_s\Lambda_s,v\sigma_s\Lambda_s}.$$ \end{Def}
We will only encounter such changes where the set with $E_{u\Lambda_s,v\Lambda_s}$ removed from the initial cluster, and $E_{u\sigma_s\Lambda_s,v\sigma_s\Lambda_s}$ added, again is a cluster.
We further observe that a (column) vector with $-1$ at positions corresponding to $E_{u\sigma_s\Lambda_s,v\Lambda_s}$ and $E_{u\Lambda_s,v\sigma_s\Lambda_s}$ and $ {a_{ks}}$ at each position corresponding to a $E_{u\Lambda_k,v\Lambda_k}$ with $a_{ks}<0$ has the property that the symplectic form of the original cluster, when applied to it, returns a vector whose only non-zero entry is $-2(\Lambda_s,\alpha_s)$ at the position corresponding to $E_{u\Lambda_s,v\Lambda_s}$. Hence, this can be a column vector of the $B$ of a potential compatible pair.
Even more can be ascertained: It can be seen that the last two lines of Theorem~\ref{toric} precisely states that with a $B$ matrix like that, the following holds:
\begin{Prop}The change $E_{u\Lambda_s,v\Lambda_s}\to E_{u\sigma_s\Lambda_s,v\sigma_s\Lambda_s}$ implied by $\check B(u,v,s)$ is the result of a BFZ mutation. \end{Prop}
\begin{Thm} The Schubert mutation $${\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})\rightarrow{\mathcal Q}_q({\mathfrak a},{\mathfrak b}',{\mathfrak c})$$ implied by a replacement ${\mathfrak m}^{+}_{{\mathfrak a},{\mathfrak c}}$ as in (\ref{m+}) is the result of series of BFZ mutations. \end{Thm}
\proof The number $s$ is given by (\ref{94}) and remains fixed throughout. We do the replacement in a number of steps. We set ${\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})={\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(0)$ and perform changes \begin{eqnarray}
&{\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})={\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(0)\rightarrow \\\nonumber& {\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(1)\rightarrow \dots\rightarrow {\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t_0)={\mathcal Q}_q({\mathfrak a},{\mathfrak b}',{\mathfrak c}). \end{eqnarray}
We will below see that $t_0=s_{\mathfrak b}-s_{\mathfrak a}-1$. We set
\begin{equation}
\textrm{If } 0\leq t\leq t_o: {\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t)=({\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t),{\mathcal L}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t),{\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t)). \end{equation} The intermediate seeds ${\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t)$ with $0<t<t_0$ are not defined by strings $\tilde{\mathfrak a}\leq \tilde{\mathfrak b}\leq \tilde{\mathfrak c}$. At each $t$-level, only one column is replaced when passing from ${\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t)$ to ${\mathcal B}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})(t+1)$, and here (\ref{77}) is applied. Of course, the whole ${\mathcal B}$ matrix is given by (\ref{72}) and (\ref{75}) for a suitable seed.
Specifically, using (\ref{77}) we introduce a family of expressions $\check B$ as in (\ref{99})
\begin{eqnarray}\label{b-conv-bar-t}{B}^{{\mathfrak a},{\mathfrak b}(t), {\mathfrak c}}_{m^+}(s,t)= E_{\omega(s_{\mathfrak a}+t+1)\Lambda_s,\omega^{{\mathfrak b}}\Lambda_s}^{-1} E_{\omega(s_{\mathfrak a}+t)\Lambda_s,\omega^{{\mathfrak b}'}\Lambda_s}^{-1} \prod E_{\omega(s,s_{\mathfrak a}+t+1)\Lambda_j,\omega^{{\mathfrak b}'}\Lambda_j} ^{-a_{js}}\\\nonumber =(E^u_{\mathfrak b}(s,s_{\mathfrak a}+t+1)E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t))^{-1}\prod E^u_{{\mathfrak b}}(j,\overline p(j,s,s_{\mathfrak a}+t+1))^{-a_{js}}, \end{eqnarray} implying the changes \begin{equation} E^u_{{\mathfrak b}}(s,s_{\mathfrak a}+t)\rightarrow E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t+1). \end{equation}
If $\omega(s,s_{\mathfrak a}+t+1)=u_t\sigma_s$ and $v=\omega^{\mathfrak b}$ then this corresponds to \begin{equation} \left((u_t\sigma_s\Lambda_s,v\Lambda_s)(u_t\Lambda_s, v\sigma_s\Lambda_s)(u\Lambda_j,v\Lambda_j)^{a_{js}}\right)^{-1} \end{equation}
Here are then in details how the changes are performed:
\begin{eqnarray*}{\mathrm Step}(0):&&\\{\mathcal C}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})\ni E^d_{{\mathfrak a}}(s,s_{\mathfrak b}+1)&\rightarrow& E^u_{{\mathfrak b}'}(s,s_{\mathfrak a})\in {\mathcal C}_q({\mathfrak a},{\mathfrak b}(0),{\mathfrak c}) \ (renaming),\\ {B}_q^{{\mathfrak a},{\mathfrak b}, {\mathfrak c}}(s,s_{\mathfrak a})&\rightarrow&{B}^{{\mathfrak a},{\mathfrak b}(0), {\mathfrak c}}_{m^+}(s,0) \ (renaming), \\{\mathcal L}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})&\rightarrow& {\mathcal L}_q({\mathfrak a},{\mathfrak b}(0),{\mathfrak c}) \ (renaming), \\{\mathrm Step}(1): && (implied\ by \ {B}^{{\mathfrak a},{\mathfrak b}(0), {\mathfrak c}}_{m^+}(s,0) ),
\\{\mathcal C}_q({\mathfrak a},{\mathfrak b}(0),{\mathfrak c}) \ni E^u_{{\mathfrak b}}(s,s_{\mathfrak a})&\rightarrow& E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+1) \in {\mathcal C}_q^d({\mathfrak a},{\mathfrak b}(1),{\mathfrak c}) , \\{B}_q^{{\mathfrak a},{\mathfrak b}, {\mathfrak c}}(s,s_{\mathfrak a}+1)& \rightarrow&{B}^{{\mathfrak a},{\mathfrak b}(1), {\mathfrak c}}_{m^+}(s,1) (by\ (\ref{77})), \\{\mathcal L}_q({\mathfrak a},{\mathfrak b}(0),{\mathfrak c})&\rightarrow& {\mathcal L}_q({\mathfrak a},{\mathfrak b}(1),{\mathfrak c}) \ (implied),\\{\mathrm Step}(2): && (implied\ by \ {B}^{{\mathfrak a},{\mathfrak b}(1), {\mathfrak c}}_{m^+}(s,1) ), \\ {\mathcal C}_q^d({\mathfrak a},{\mathfrak b}(1),{\mathfrak c}) \ni E^u_{{\mathfrak b}}(s,s_{\mathfrak a}+1)&\rightarrow& E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+2)\in {\mathcal C}_q^d({\mathfrak a},{\mathfrak b}(2),{\mathfrak c}),\\\vdots\ \\{\mathrm Step}(t+1): && (implied\ by \ {B}^{{\mathfrak a},{\mathfrak b}(t), {\mathfrak c}}_{m^+}(s,t) ),\\ {\mathcal C}_q^d({\mathfrak a},{\mathfrak b}(t),{\mathfrak c}) \ni E^u_{{\mathfrak b}}(s,s_{\mathfrak a}+t)&\rightarrow& E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t+1)\in {\mathcal C}_q^d({\mathfrak a},{\mathfrak b}(t+1),{\mathfrak c}) , \\{B}_q^{{\mathfrak a},{\mathfrak b}, {\mathfrak c}}(s,s_{\mathfrak a}+t)& \rightarrow&{B}^{{\mathfrak a},{\mathfrak b}(t), {\mathfrak c}}_{m^+}(s,t) (by\ (\ref{77})),\\{\mathcal L}_q({\mathfrak a},{\mathfrak b}(t),{\mathfrak c})&\rightarrow& {\mathcal L}_q({\mathfrak a},{\mathfrak b}(t+1),{\mathfrak c}) \ (implied). \end{eqnarray*}
The last step is $t=s_{\mathfrak b}-s_{\mathfrak a}-1$. ${\mathfrak b}(0)={\mathfrak b}$, ${\mathfrak b}(s_{\mathfrak b}-s_{\mathfrak a}-1)={\mathfrak b}'$.
It is easy to see that all intermediate sets indeed are seeds.
What is missing now is to connect, via a change of basis transformation of the compatible pair, with the ``$E,F$'' matrices of \cite{bz}. Here we notice that both terms \begin{equation} (E^u_{\mathfrak b}(s,s_{\mathfrak a}+t+1)E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t))^{-1}(E^u_{\mathfrak b}(s,s_{\mathfrak a}+t))^{-1} \end{equation} and
\begin{equation} \prod E^u_{{\mathfrak b}}(j,\overline p(j,s,s_{\mathfrak a}+t+1))^{-a_{js}}(E^u_{\mathfrak b}(s,s_{\mathfrak a}+t))^{-1} \end{equation}
have the same $q$-commutators as $E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t+1)$. The two possibilities correspond to the two signs in formulas (3.2) and (3.3) in \cite{bz}.
Indeed, the linear transformation \begin{equation}E(t):E^u_{\mathfrak b}(s,s_{\mathfrak a}+t)\rightarrow -E^u_{\mathfrak b}(s,s_{\mathfrak a}+t+1)-E^u_{{\mathfrak b}'}(s,s_{\mathfrak a}+t)-E^u_{\mathfrak b}(s,s_{\mathfrak a}+t) \end{equation} results in a change-of-basis on the level of forms: \begin{eqnarray}{\mathcal L}_q({\mathfrak a},{\mathfrak b}(t),{\mathfrak c})\rightarrow{\mathcal L}_q({\mathfrak a},{\mathfrak b}(t+1),{\mathfrak c})&=&E^T(t){\mathcal L}_q({\mathfrak a},{\mathfrak b}(t),{\mathfrak c})E(t),\\\nonumber {\mathcal B}_{m^+}^{{\mathfrak a},{\mathfrak b}(t),{\mathfrak c}}(s,t)\rightarrow{\mathcal B}_{m^+}^{{\mathfrak a},{\mathfrak b}(t+1),{\mathfrak c}}(s,t+1)&=&E(t){\mathcal B}_{m^+}^{{\mathfrak a},{\mathfrak b}(t),{\mathfrak c}}(s,t)F(t), \end{eqnarray} where $F(t)$ is a truncated part of $E(t)^T$ (the restriction to the mutable elements).
With this, the proof is complete. \qed
\begin{Thm} Any ${\mathcal Q}_q({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)$ can be obtained from ${\mathcal Q}_q({\mathfrak e},{\mathfrak p})$ as a sub-seed and any ${\mathcal Q}_q^o({\mathfrak r}_1,\dots, {\mathfrak r}_{n-1},{\mathfrak r}_n)$ can be obtained from ${\mathcal Q}_q^o({\mathfrak e},{\mathfrak p})$ as a sub-seed through a series of Schubert creation and annihilation mutations. These mutations are, apart from the trivial actions of renaming, splitting, merging, or simple restrictions, composites of BFZ-mutations. \end{Thm}
\proof Apart from mergers and splittings (Definition~\ref{def84}), the mutations are composites of mutations of the form ${\mathcal Q}_q({\mathfrak a},{\mathfrak b},{\mathfrak c})\to {\mathcal Q}_q({\mathfrak a},{\mathfrak b}',{\mathfrak c})$. \qed
\begin{Cor}\label{cor8.7}The algebras ${\mathcal A}_q^{d,{\mathfrak a},{\mathfrak c}}$ and ${\mathcal A}_q^{u,{\mathfrak a},{\mathfrak c}}$ are mutation equivalent and indeed are equal. We denote henceforth this algebra by ${\mathcal A}^{{\mathfrak a},{\mathfrak c}}$. This is the quadratic algebra generated by the elements $\beta_{c,d}$ with $c_{\mathfrak a}<d\leq c_{\mathfrak c}$. \end{Cor}
We similarly denote the corresponding skew-field of fractions by ${\mathcal F}_q^{{\mathfrak a},{\mathfrak c}}$.
\section{Prime}
\begin{Def} \begin{equation}{\det}_{s}^{{\mathfrak a},{\mathfrak c}}:=E_{\omega^{\mathfrak a}\Lambda_s,\omega^{\mathfrak c}\Lambda_s}.\end{equation} \end{Def}
\begin{Thm}\label{8.6} The 2 sided ideal $I({\det}_{s}^{{\mathfrak a},{\mathfrak c}})$ in ${\mathcal A}_q({\mathfrak a},{\mathfrak c})$ generated by the covariant and non-mutable element ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}$ is \underline{prime} for each $s$. \end{Thm}
\proof Induction. The induction start is trivially satisfied. Let us then divide the induction step into two cases. First, let $Z_\gamma$ be an annihilation-mutation site of $\omega^{\mathfrak c}$ such that $\omega^{\mathfrak c}=\sigma_\gamma\omega^{{\mathfrak c}_1}=\omega^{{\mathfrak c}_1}\sigma_{\alpha_s}$ with $\omega^{{\mathfrak c}_1}\in W^p$. We have clearly ${\mathcal A}_q({\mathfrak a},{\mathfrak c})= {\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)\cup I({\det}_{s}^{{\mathfrak a},{\mathfrak c}})$. Furthermore, ${\mathcal A}_q({\mathfrak a},{\mathfrak c})\setminus {\mathcal A}_q({\mathfrak a},{\mathfrak c}_1) =I_\ell(Z_\gamma)$, where $I_\ell(Z_\gamma)$ denotes the left ideal generated by $Z_\gamma$. We might as well consider the right ideal, but not the 2-sided ideal since in general there will be terms ${\mathcal R}$ of lower order, c.f. Theorem~\ref{4.1}.
It follows that \begin{equation}\label{Z}{\det}_{s}^{{\mathfrak a},{\mathfrak c}}=M_1Z_\gamma +M_2\end{equation} where $M_1,M_2\in {\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$ and $M_1\neq0$. Indeed, $M_1$ is a non-zero multiple of ${\det}_{s}^{{\mathfrak a},{\mathfrak c}_1}$. (If $s_{\mathfrak c}=1$ then $M_1=1$ and $M_2=0$.) We also record, partly for later use, that $Z_\gamma$ $q$-commutes with everything up to correction terms from ${\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$.
Notice that we use Corollary~\ref{cor8.7}.
Now consider an equation \begin{equation}
{\det}_{s}^{{\mathfrak a},{\mathfrak c}}p_1=p_2p_3 \end{equation} with $p_1,p_2,p_3\in {\mathcal A}_q({\mathfrak a},{\mathfrak c})$. Use (\ref{Z}) to write for each $i=1,2,3$ \begin{equation}
p_i=\sum_{k=0}^{n_i}({\det}_{s}^{{\mathfrak a},{\mathfrak c}})^kN_{i,k} \end{equation} where each $N_{i,k}\in {\mathbf L}_q({\mathfrak a},{\mathfrak c}_1)$ and assume that $N_{i,0}\neq0\textrm{ for }i=2,3$ Then $0\neq N_{0,2}N_{0,3}\in {\mathbf L}_q({\mathfrak a},{\mathfrak c}_1)$. At the same time, \begin{equation}
N_{0,2}N_{0,3}=\sum_{k=1}^{n_i}({\det}_{s}^{{\mathfrak a},{\mathfrak c}})^k\tilde N_{i,k} \end{equation} for certain elements $\tilde N_{i,k} \in {\mathbf L}_q({\mathfrak a},{\mathfrak c}_1)$.
Using the linear independence (\cite[Proposition~10.8]{bz}) we easily get a contradiction by looking at the leading term in ${\det}_{s}^{{\mathfrak a},{\mathfrak c}})$.
Now in the general case, the $s$ in ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}$ is given and we may write ${\omega}^{\mathfrak c}={\omega}^{{\mathfrak c}_2}\sigma_{s}\tilde{\omega}$ where $\sigma_s$ does not occur in $\tilde{\omega}$. Let ${\omega}^{{\mathfrak c}_1}={\omega}^{{\mathfrak c}_2}\sigma_s$. It is clear that ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}={\det}_{s}^{{\mathfrak a},{\mathfrak c}_1}$ and by the previous, ${\det}_{s}^{{\mathfrak a},{\mathfrak c}_1}$ is prime in ${\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$. We have that ${\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$ is an algebra in its own right. Furthermore, \begin{equation}{\mathcal A}_q({\mathfrak a},{\mathfrak c})={\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)[Z_{\gamma_1},\dots,Z_{\gamma_n}], \end{equation} where the Lusztig elements $Z_{\gamma_1},\dots,Z_{\gamma_n}$ are bigger than the generators of ${\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$. In a PBW basis we can put them to the right. They even generate a quadratic algebra $\tilde{\mathcal A}_q$ in their own right! The equation we need to consider are of the form \begin{equation}p_1p_2={\det}_{s}^{{\mathfrak a},{\mathfrak c}_1}p_3 \end{equation} with $p_1,p_2,p_3\in {\mathcal A}_q({\mathfrak a},{\mathfrak c})$. The claim that at least one of $p_1,p_2$ contains a factor of ${\det}^{{\mathfrak r}_1}_{q,s}$ follows by easy induction on the $\tilde{\mathcal A}_q$ degree of $p_1p_2$, i.e. the sum of the $\tilde{\mathcal A}_q$ degrees of $p_1$ and $p_2$. \qed
\section{Upper}
Let $\omega^{\mathfrak a}, \omega^{\mathfrak c}\in W^p$ and ${\mathfrak a}<{\mathfrak c}$.
\begin{Def} The cluster algebra ${\mathbf A}_q({\mathfrak a},{\mathfrak c})$ is the ${\mathbb Z}[q]$-algebra generated in the space ${\mathcal F}_q({\mathfrak a},{\mathfrak c})$ by the inverses of the non-mutable elements ${\mathcal N}_q({\mathfrak a},{\mathfrak c})$ together with the union of the sets of all variables obtainable from the initial seed ${\mathcal Q}_q({\mathfrak a},{\mathfrak c})$ by composites of quantum Schubert mutations. (Appropriately applied) \end{Def}
Observe that we include ${\mathcal N}_q({\mathfrak a},{\mathfrak c})$ in the set of variables.
\begin{Def} The upper cluster algebra ${\mathbf U}_q({\mathfrak a},{\mathfrak c})$ connected with the same pair $\omega^{\mathfrak a}, \omega^{\mathfrak c}\in W^p$ is the ${\mathbb Z}[q]$-algebra in ${\mathcal F}_q({\mathfrak a},{\mathfrak c})$ given as the intersection of all the Laurent algebras of the sets of variables obtainable from the initial seed ${\mathcal Q}_q({\mathfrak a},{\mathfrak c})$ by composites of quantum Schubert mutations. (Appropriately applied) \end{Def}
\begin{Prop} $${\mathcal A}_q({\mathfrak a},{\mathfrak c})\subseteq {\mathbf A}_q({\mathfrak a},{\mathfrak c})\subset {\mathbf U}_q({\mathfrak a},{\mathfrak c}).$$ \end{Prop}
\proof The first inclusion follows from \cite{leclerc}, the other is the quantum Laurent phenomenon. \qed
\begin{Rem} Our terminology may seem a bit unfortunate since the notions of a cluster algebra and an upper cluster algebra already have been introduced by Berenstein and Zelevinsky in terms of all mutations. We only use quantum line mutations which form a proper subset of the set of all quantum mutations. However, it will be a corollary to what follows that the two notions in fact coincide, and for this reason we do not introduce some auxiliary notation. \end{Rem}
\begin{Thm}
$${\mathbf U}_q({\mathfrak a},{\mathfrak c})={\mathcal A}_q({\mathfrak a},{\mathfrak c})[({\det}_{s}^{{\mathfrak a},{\mathfrak c}})^{-1}; s\in Im(\pi_{{\mathfrak c}})].$$
\end{Thm}
\proof This follows by induction on $\ell(\omega^{\mathfrak c})$ (with start at $\ell(\omega^{\mathfrak a})+1$) in the same way as in the proof of \cite[Theorem~8.5]{jz}, but for clarity we give the details: Let the notation and assumptions be as in the proof of Theorem~\ref{8.6}. First of all, the induction start is trivial since we there are looking at the generator of a Laurent quasi-polynomial algebra. Let then $u\in {\mathbf U}_q({\mathfrak a},{\mathfrak c})$. We will argue by contradiction, and just as in the proof of \cite[Theorem~8.5]{jz}, one readily sees that one may assume that $u\in {\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)[({\det}_{s}^{{\mathfrak a},{\mathfrak c}_1})^{-1}, {\det}_{s}^{{\mathfrak a},{\mathfrak c}}]$. Using (\ref{Z}) we may now write \begin{equation} \label{neg1} u=\left(\sum_{i=0}^K Z_\gamma^ip_i({\det}_{s}^{{\mathfrak a},{\mathfrak c}_1})^{k_i}\right)({\det}_{s}^{{\mathfrak a},{\mathfrak c}_1})^{-\rho}, \end{equation} with $p_i\in {\mathcal A}_q({\mathfrak a},{\mathfrak c}_1)$, $p_i\notin I({\det}_{s}^{{\mathfrak a},{\mathfrak c}_1})$, and $k_i\geq0$. Our assumption is that $\rho>0$. recall that the elements ${\det}_{s}^{{\mathfrak a},{\mathfrak c}_1}$ and ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}$ are covariant and define prime ideals in the appropriate algebras. Using the fact that ${\mathbf U}_q({\mathfrak a},{\mathfrak c})$ is an algebra containing ${\mathcal A}_q({\mathfrak a},{\mathfrak c})$, we can assume that the expression in the left bracket in (\ref{neg1}) is not in $I({\det}_{s}^{{\mathfrak a},{\mathfrak c}})$ and we may further assume that $p_i\neq0\Rightarrow k_i<\rho$. To wit, one can remove the factors of ${\det}_{s}^{{\mathfrak a},{\mathfrak c}}$, then remove the terms with $k_i\geq \rho$, then possibly repeat this process a number of times.
Consider now the cluster ${\mathcal C}_q^{u}({\mathfrak a},{\mathfrak c})$. We know that $u$ can be written as a Laurent quasi-polynomial in the elements of ${\mathcal C}_q^{u}({\mathfrak a},{\mathfrak c})$. By factoring out, we can then write
\begin{equation}\label{neg2} u=p\prod_{(c,d)\in{\mathbb U}^{u,{\mathfrak a},{\mathfrak c}}}(E^u_{\mathfrak c}(c,d))^{-\alpha_{c,d}}, \end{equation} where $p\in {\mathcal A}_q({\mathfrak a},{\mathfrak c})$, and $\alpha_{c,d}\geq0$. We will compare this to (\ref{neg1}). For the sake of this argument set $\tilde{\mathbb U}^{u,{\mathfrak a},{\mathfrak c}})=\{(c,d)\in {\mathbb U}^{u,{\mathfrak a},{\mathfrak c}})\mid \alpha_{c,d}>0\}$.
Of course, ${\det}_s^{{\mathfrak a},{\mathfrak c}}\in {\mathcal C}^{u}({\mathfrak e},{\mathfrak r})$.
``Multiplying across'', we get from (\ref{neg1}) and (\ref{neg2}), absorbing possibly some terms into $p$: \begin{equation} (\sum_{i=0}^K Z^ip_i({\det}^{{\mathfrak a},{\mathfrak c}_1}_{s})^{k_i})\prod_{(c,d)\in\tilde{\mathbb U}^{u,{\mathfrak a},{\mathfrak c}}}(E^u_{\mathfrak c}(c,d))^{\alpha_{c,d}}=p({\det}^{{\mathfrak a},{\mathfrak c}_1}_{s})^{\rho}. \end{equation} Any factor of ${\det}^{{\mathfrak a},{\mathfrak c}}_{s}$ in $p$ will have to be canceled by a similar factor of $E^u_{\mathfrak c}(s,0)$ in the left-hand side, so we can assume that $p$ does not contain no factor of ${\det}^{{\mathfrak a},{\mathfrak c}}_{s}$. After that we can assume that $(s,0)\notin \tilde{\mathbb U}^{u,{\mathfrak a},{\mathfrak c}}$ since clearly ${\det}^{{\mathfrak a},{\mathfrak c}_1}_{s}\notin I({\det}^{{\mathfrak a},{\mathfrak c}}_{s})$. Using that $k_i<\rho$ it follows that there must be a factor of $({\det}^{{\mathfrak a},{\mathfrak c}_1}_{s})$ in $\prod_{(c,d)\in\tilde{\mathbb U}^{u,{\mathfrak a},{\mathfrak c}}}(E^u_{\mathfrak c}(c,d))^{\alpha_{c,d}}$. Here, as but noticed, $d=0$ is excluded. The other terms do not contain $Z_{s,1}$ but $({\det}^{{\mathfrak a},{\mathfrak c}_1}_{s})$ does. This is an obvious contradiction. \qed
\section{The diagonal of a quantized minor}
\begin{Def}Let ${\mathfrak a}<{\mathfrak b}$. The diagonal, ${\mathbb D}_{\omega^{\mathfrak a}(\Lambda_s),\omega^{\mathfrak b}(\Lambda_s)}$, of $E_{\omega^{\mathfrak a}(\Lambda_s),\omega^{\mathfrak b}(\Lambda_s)}$ is set to \begin{equation} {\mathbb D}_{\omega^{\mathfrak a}(\Lambda_s),\omega^{\mathfrak b}(\Lambda_s)}=q^{\alpha}Z_{s,s_{\mathfrak a}+1}\cdots Z_{s,s_{\mathfrak b}}, \end{equation} where \begin{equation} Z_{s,s_{\mathfrak b}}\cdots Z_{s,s_{\mathfrak a}+1}=q^{2\alpha}Z_{s,s_{\mathfrak a}+1}\cdots Z_{s,s_{\mathfrak b}} + {\mathcal R} \end{equation} where the terms ${\mathcal R}$ are of lower order \end{Def}
\begin{Prop} $$E_{\omega^{\mathfrak a}(\Lambda_s),\omega^{\mathfrak b}(\Lambda_s)}={\mathbb D}_{\omega^{\mathfrak a}(\Lambda_s),\omega^{\mathfrak b}(\Lambda_s)}+{\mathcal R}$$ The terms in ${\mathcal R}$ are of lower order in our ordering induced by $\leq_L$. They can in theory be determined from the fact that the full polynomial belongs to the dual canonical basis. (\cite{bz},\cite{leclerc}). \end{Prop}
\proof We prove this by induction on the length $s_{\mathfrak b}-s_{\mathfrak a}$ of any $s$-diagonal. When this length is $1$ we have at most a quasi-polynomial algebra and here the case is clear. Consider then a creation-mutation site where we go from length $r$ to $r+1$: Obviously, it is only the very last determinant we need to consider. Here we use the equation in Theorem~\ref{3.2} but reformulate it in terms of the elements $E_{\xi,\eta}$, cf. Theorem~\ref{toric}.
Set $\omega^{{\mathfrak b}_1}=\omega^{{\mathfrak b}}\sigma_s$ and consider $E_{\omega^{\mathfrak a}(\Lambda_s),\omega^{{\mathfrak b}_1}(\Lambda_s)}$. Its weight is given as $$\omega^{{\mathfrak b}_1}(\Lambda_s)-\omega^{\mathfrak a}(\Lambda_s)=\beta_{s,s_{\mathfrak a}+1} \dots+\beta_{s,s_{\mathfrak b}+1}.$$ In the recast version of Theorem~\ref{3.2}, the terms on the left hand side are covered by the induction hypothesis. The second term on the right hand side contains no element of the form $Z_{s,s_{{\mathfrak b}_1}}$ and it follows that we have an equation
\begin{equation}(Z_{s,s_{\mathfrak a}+2}\cdots Z_{s,s_{\mathfrak b}})E_{\omega^{\mathfrak a}(\Lambda_s),\omega^{{\mathfrak b}_1}(\Lambda_s)}=(Z_{s,s_{\mathfrak a}+2}\cdots Z_{s,s_{\mathfrak b}+1}) (Z_{s,s_{\mathfrak a}+1}\cdots Z_{s,s_{\mathfrak b}})
+{\mathcal R}. \end{equation} The claim follows easily from that. \qed
Recall that in the associated quasi-polynomial algebra is the algebra with relations corresponding to the top terms, i.e., colloquially speaking, setting the lower order terms ${\mathcal R}$ equal to $0$. Let \begin{equation}
{d}_{\omega^{\mathfrak r}_{s,t_1}(\Lambda_s),\omega^{\mathfrak r}_{s,t}(\Lambda_s)}=z_{s,t_1+1}\cdots z_{s,t}. \end{equation}
The following shows the importance of the diagonals:
\begin{Thm} \begin{eqnarray} d_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}d_{u_1\cdot\Lambda_{i_1}, v_1\cdot\Lambda_{i_1}}&=& q^G d_{u_1\cdot\Lambda_{i_1},v_1\cdot\Lambda_{i_1}} d_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}\Leftrightarrow\\ {\mathbb D}_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}{\mathbb D}_{u_1\cdot\Lambda_{i_1},v_1\cdot\Lambda_{i_1}}&=& q^G {\mathbb D}_{u_1\cdot\Lambda_{i_1},v_1\cdot\Lambda_{i_1}} {\mathbb D}_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}+ {\mathcal R}
\end{eqnarray}In particular, if the two elements ${E}_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}{E}_{u_1\cdot\Lambda_{i_1}, v_1\cdot\Lambda_{i_1}}$ $q$-commute: \begin{equation} {E}_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}{E}_{u_1\cdot\Lambda_{i_1}, v_1\cdot\Lambda_{i_1}}= q^G {E}_{u_1\cdot\Lambda_{i_1},v_1\cdot\Lambda_{i_1}} {E}_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}
\end{equation}then $G$ can be computed in the associated quasi-polynomial algebra:
\begin{equation} d_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}d_{u_1\cdot\Lambda_{i_1}, v_1\cdot\Lambda_{i_1}}= q^G D_{u_1\cdot\Lambda_{i_1},v_1\cdot\Lambda_{i_1}} d_{u\cdot\Lambda_{i_0},v\cdot\Lambda_{i_0}}.\end{equation} \end{Thm}
\begin{Rem} One can also compute $G$ directly using the formulas in \cite{bz}. \end{Rem}
\begin{Rem} The elements $E_{\xi,\eta}$ that we consider belong to the dual canonical basis. As such, they can in principle be determined from the highest order terms ${\mathbb D}_{\xi,\eta}$. \end{Rem}
\section{Litterature}
\end{document}
|
arXiv
|
{
"id": "1509.06137.tex",
"language_detection_score": 0.5871309638023376,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{On the Basis Property of the Root Functions of Some Class of Non-self-adjoint Sturm--Liouville Operators.} \author{Cemile Nur\\{\small Depart. of Math., Dogus University, Ac\i badem, Kadik\"{o}y, \ }\\{\small Istanbul, Turkey.}\ {\small e-mail: [email protected]} \and O. A. Veliev\\{\small Depart. of Math., Dogus University, Ac\i badem, Kadik\"{o}y, \ }\\{\small Istanbul, Turkey.}\ {\small e-mail: [email protected]}} \date{} \maketitle
\begin{abstract} We obtain the asymptotic formulas for the eigenvalues and eigenfunctions of the Sturm-Liouville operators with some regular boundary conditions. Using these formulas, we find sufficient conditions on the potential $q$ such that the root functions of these operators do not form a Riesz basis.
Key Words: Asymptotic formulas, Regular boundary conditions. Riesz basis.
AMS Mathematics Subject Classification: 34L05, 34L20.
\end{abstract}
\section{Introduction and Preliminary Facts}
Let $T_{1},T_{2},T_{3}$ and $T_{4}$ be the operators generated in $L_{2}[0,1]$ by the differential expression \begin{equation} l\left( y\right) =-y^{\prime\prime}+q(x)y \end{equation} and the following boundary conditions: \begin{equation} y_{0}^{\prime}+\beta y_{1}^{\prime}=0,\text{ }y_{0}-y_{1}=0, \end{equation} \begin{equation} y_{0}^{\prime}+\beta y_{1}^{\prime}=0,\text{ }y_{0}+y_{1}=0, \end{equation} \begin{equation} y_{0}^{\prime}-y_{1}^{\prime}=0,\text{ }y_{0}+\alpha y_{1}=0, \end{equation} and \begin{equation} y_{0}^{\prime}+y_{1}^{\prime}=0,\text{ }y_{0}+\alpha y_{1}=0 \end{equation} respectively, where $q(x)$ is a complex-valued summable function on $[0,1]$, $\beta\neq\pm1$ and $\alpha\neq\pm1.$
In conditions (2), (3), (4) and (5) if $\beta=1,$ $\beta=-1,$ $\alpha=1$ and $\alpha=-1$ respectively, then any $\lambda\in
\mathbb{C}
$ is an eigenvalue of infinite multiplicity. In (2) and (4) if $\beta=-1$ and $\alpha=-1$ then they are periodic boundary conditions; In (3) and (5) if $\beta=1$ and $\alpha=1$ then they are antiperiodic boundary conditions.
These boundary conditions are regular but not strongly regular. Note that, if the boundary conditions are strongly regular, then the root functions form a Riesz basis (this result was proved independently in [6], [10] and [17]). In the case when an operator is regular but not strongly regular, the root functions generally do not form even usual basis. However, Shkalikov [20], [21] proved that they can be combined in pairs, so that the corresponding 2-dimensional subspaces form a Riesz basis of subspaces.
In the regular but not strongly regular boundary conditions, periodic and antiperiodic boundary conditions are the ones more commonly studied. Therefore, let us briefly describe some historical developments related to the Riesz basis property of the root functions of the periodic and antiperiodic boundary value problems. First results were obtained by Kerimov and Mamedov [8]. They established that, if \[ q\in C^{4}[0,1],\ q(1)\neq q(0), \] then the root functions of the operator $L_{0}(q)$ form a Riesz basis in $L_{2}[0,1],$ where $L_{0}(q)$ denotes the operator generated by (1) and the periodic boundary conditions.
The first result in terms of the Fourier coefficients of the potential $q$ was obtained by Dernek and Veliev [1]. They proved that if the conditions \begin{align} \lim_{n\rightarrow\infty}\frac{\ln\left\vert n\right\vert }{nq_{2n}} & =0,\text{ }\\ q_{2n} & \sim q_{-2n} \end{align} hold, then the root functions of $L_{0}(q)$ form a Riesz basis in $L_{2} [0,1]$, where $q_{n}=:(q,e^{i2\pi nx})$ is the Fourier coefficient of $q$ and everywhere, without loss of generality, it is assumed that $q_{0}=0.$ Here $(.,.)$ denotes the inner product in $L_{2}[0,1]$ and $a_{n}\sim b_{n}$ means that $a_{n}=O(b_{n})$ and $b_{n}=O(a_{n})$ as $\ n\rightarrow\infty.$ Makin [11] improved this result. Using another method he proved that the assertion on the Riesz basis property remains valid if condition (7) holds, but condition (6) is replaced by a less restrictive one: $q\in W_{1}^{s}[0,1],$ \[ q^{(k)}(0)=q^{(k)}(1),\quad\forall\,k=0,1,...,s-1 \] holds and $\mid q_{2n}\mid>cn^{-s-1}$ with some$\ \,c>0$ for sufficiently large $n,$ where $s$ is a nonnegative integer. Besides, some conditions which imply the absence of the Riesz basis property were presented in [11]. Shkalilov and Veliev obtained in [22] more general results which cover all results discussed above.
The other interesting results about periodic and antiperiodic boundary conditions were obtained in [2-5, 7, 14-16, 24, 25].
The basis properties of regular but not strongly regular other some problems are studied in [9,12,13]. It was proved in [12] that the system of the root functions of the operator generated by (1) and the boundary conditions \begin{align*} y^{\prime}\left( 1\right) -\left( -1\right) ^{\sigma}y^{\prime}\left( 0\right) +\gamma y\left( 0\right) & =0\\ y\left( 1\right) -\left( -1\right) ^{\sigma}y\left( 0\right) & =0, \end{align*} forms an unconditional basis of the space $L_{2}[0,1]$, where $q\left( x\right) $ is an arbitrary complex-valued function from the class $L_{1}[0,1]$, $\gamma$ is an arbitrary nonzero complex constant and $\sigma=0,1$. Kerimov and Kaya proved [9] that the system of the root functions of the spectral problem \begin{align*} y^{\left( 4\right) }+p_{2}\left( x\right) y^{\prime\prime}+p_{1}\left( x\right) y^{\prime}+p_{0}\left( x\right) y & =\lambda y,\text{ }0<x<1,\\ y^{\left( s\right) }\left( 1\right) -\left( -1\right) ^{\sigma }y^{\left( s\right) }\left( 0\right) +\sum_{l=0}^{s-1}\alpha _{s,l}y^{\left( l\right) }\left( 0\right) & =0,\text{ }s=1,2,3,\\ y\left( 1\right) -\left( -1\right) ^{\sigma}y\left( 0\right) & =0, \end{align*} forms a basis in the space $L_{p}\left( 0,1\right) $, $1<p<\infty$, when $\alpha_{3,2}+\alpha_{1,0}\neq\alpha_{2,1}$, $p_{j}\left( x\right) \in W_{1}^{j}\left( 0,1\right) $, $j=1,2$, and $p_{0}\left( x\right) \in L_{1}\left( 0,1\right) $; moreover, this basis is unconditional for $p=2$, where $\lambda$ is a spectral parameter; $p_{j}\left( x\right) \in L_{1}\left( 0,1\right) $, $j=1,2,3$, are complex-valued functions; $\alpha_{s,l}$, $s=1,2,3$, $l=\overline{0,s-1}$ are arbitrary complex constants; and $\sigma=0,1$.
It was shown in [19] that if \[ q\left( x\right) =q\left( 1-x\right) ,\text{ }\forall x\in\left[ 0,1\right] , \] then the spectrum of each of the problems $T_{1}$, and $T_{3}$, coincides with the spectrum of the periodic problem and the spectrum of each of the problems $T_{2},$ and $T_{4}$, coincides with the spectrum of the antiperiodic problem.
In this paper we prove that if \begin{equation} \lim_{n\rightarrow\infty}\dfrac{\ln\left\vert n\right\vert }{ns_{2n}}=0, \end{equation} where $s_{k}=\left( q,\sin2\pi kx\right) ,$ then the large eigenvalues of the operators $T_{1}$ and $T_{3}$ are simple. Moreover, if there exists a sequence $\left\{ n_{k}\right\} $ such that (8) holds when $n$ is replaced by $n_{k},$ then the root functions of these operators do not form a Riesz basis.
Similarly, if \begin{equation} \lim_{n\rightarrow\infty}\dfrac{\ln\left\vert n\right\vert }{ns_{2n+1}}=0, \end{equation} then the large eigenvalues of the operators $T_{2}$ and $T_{4}$ are simple and if there exists a sequence $\left\{ n_{k}\right\} $ such that (9) holds when $n$ is replaced by $n_{k},$ then the root functions of these operators do not form a Riesz basis.
Moreover we obtain asymptotic formulas of arbitrary order for the eigenvalues and eigenfunctions of the operators $T_{1}$,$T_{2},T_{3}$ and $T_{4}$.
\section{Main Results}
We will focus only on the operator $T_{1}$. The investigations of the operators $T_{2},T_{3}$ and $T_{4}$ are similar. It is well-known that ( see formulas (47a), (47b)) in page 65 of [18] ) the eigenvalues of the operators $T_{1}(q)$ consist of the sequences $\{\lambda_{n,1}\},\{\lambda_{n,2}\}$ satisfying \begin{equation} \lambda_{n,j}=(2n\pi)^{2}+O(n^{1/2}) \end{equation} for $j=1,2$. From this formula one can easily obtain the following inequality \begin{equation} \left\vert \lambda_{n,j}-(2\pi k)^{2}\right\vert =\left\vert 2(n-k)\pi \right\vert \left\vert 2(n+k)\pi\right\vert +O(n^{\frac{1}{2}})>n \end{equation} for $j=1,2;$ $k\neq n;$ $k=0,1,...;$ and $n\geq N,$ where we denote by $N$ a sufficiently large positive integer, that is, $N\gg1.$
It is easy to verify that if $q(x)=0$ then the eigenvalues of the operator $T_{1},$ denoted by $T_{1}(0),$ are $\lambda_{n}=\left( 2\pi n\right) ^{2}$ for $n=0,1,\ldots$ The eigenvalue $0$ is simple and the corresponding eigenfunction is $1.$ The eigenvalues $\lambda_{n}=\left( 2\pi n\right) ^{2}$ for $n=1,2,\ldots$ are double and the corresponding eigenfunctions and associated functions are \begin{equation} y_{n}\left( x\right) =\cos2\pi nx\text{ }\And\text{ }\phi_{n}\left( x\right) =\left( \frac{\beta}{1+\beta}-x\right) \frac{\sin2\pi nx}{4\pi n} \end{equation} respectively. Note that for any constant $c$, $\phi_{n}\left( x\right) +cy_{n}\left( x\right) $ is also an associated function. It can be shown that the adjoint operator $T_{1}^{\ast}(0)$ is associated with the boundary conditions: \begin{equation} y_{1}+\overline{\beta}y_{0}=0,\text{ }y_{1}^{\prime}-y_{0}^{\prime}=0. \end{equation} It is easy to see that, $0$ is a simple eigenvalue of $T_{1}^{\ast}(0)$ and the corresponding eigenfunction is $y_{0}^{\ast}\left( x\right) =x-\dfrac {1}{1+\overline{\beta}}$ . The other eigenvalues $\lambda_{n}^{\ast}=\left( 2\pi n\right) ^{2}$ for $n=1,2,\ldots$, are double and the corresponding eigenfunctions and associated functions are \begin{equation} y_{n}^{\ast}\left( x\right) =\sin2\pi nx\text{ }\And\text{ }\phi_{n}^{\ast }\left( x\right) =\left( x-\dfrac{1}{1+\overline{\beta}}\right) \frac {\cos2\pi nx}{4\pi n}\nonumber \end{equation} respectively.
Let \begin{equation} \varphi_{n}\left( x\right) :=\frac{16\pi n\left( \beta+1\right) }{\beta -1}\phi_{n}\left( x\right) =\frac{4\left( \beta+1\right) }{\beta-1}\left( \dfrac{\beta}{1+\beta}-x\right) \sin2\pi nx \end{equation} and \begin{equation} \varphi_{n}^{\ast}\left( x\right) :=\frac{16\pi n\left( \overline{\beta }+1\right) }{\overline{\beta}-1}\phi_{n}^{\ast}\left( x\right) =\frac{4\left( \overline{\beta}+1\right) }{\overline{\beta}-1}\left( x-\dfrac{1}{1+\overline{\beta}}\right) \cos2\pi nx. \end{equation} The system of the root functions of $T_{1}^{\ast}(0)$ can be written as $\{f_{n}:n\in\mathbb{Z}\},$ where \begin{equation} f_{-n}=\sin2\pi nx,\text{ }\forall n>0\And\text{ }f_{n}=\varphi_{n}^{\ast }\left( x\right) ,\text{ }\forall n\geq0. \end{equation} One can easily verify that it forms a basis in $L_{2}[0,1]$ and the biorthogonal system $\{g_{n}:n\in\mathbb{Z}\}$ is the system of the root functions of $T_{1}(0),$ where \begin{equation} g_{-n}=\varphi_{n}\left( x\right) ,\forall n>0\text{ }\And\text{ }g_{n} =\cos2\pi nx,\forall n\geq0, \end{equation} since $\left( f_{n},g_{m}\right) =\delta_{n,m}.$
To obtain the asymptotic formulas for the eigenvalues $\lambda_{n,j}$ and the corresponding normalized eigenfunctions $\Psi_{n,j}(x)$ of $T_{1}(q)$ we use (11) and the well-known relations \begin{equation} (\lambda_{N,j}-(2\pi n)^{2})(\Psi_{N,j},\sin2\pi nx)=(q\Psi_{N,j},\sin2\pi nx) \end{equation} and \begin{equation} \left( \lambda_{N,j}-\left( 2\pi n\right) ^{2}\right) \left( \Psi _{N,j},\varphi_{n}^{\ast}\right) -\gamma_{1}n\left( \Psi_{N,j},\sin2\pi nx\right) =\left( q\Psi_{N,j},\varphi_{n}^{\ast}\right) , \end{equation} where \[ \gamma_{1}=\frac{16\pi\left( \beta+1\right) }{\beta-1}, \] which can be obtained by multiplying both sides of the equality \[ -\left( \Psi_{N,j}\right) ^{\prime\prime}+q\left( x\right) \Psi _{N,j}=\lambda_{N,j}\Psi_{N,j} \] by $\sin2\pi nx$ and $\varphi_{n}^{\ast}$ respectively. It follows from (18) and (19) that \begin{equation} \left( \Psi_{N,j},\sin2\pi nx\right) =\frac{\left( q\left( x\right) \Psi_{N,j},\sin2\pi nx\right) }{\lambda_{N,j}-\left( 2\pi n\right) ^{2} };\text{ }N\neq n, \end{equation}
\begin{equation} \left( \Psi_{N,j},\varphi_{n}^{\ast}\right) =\frac{\gamma_{1}n\left( q\left( x\right) \Psi_{N,j},\sin2\pi nx\right) }{\left( \lambda _{N,j}-\left( 2\pi n\right) ^{2}\right) ^{2}}+\frac{\left( q\left( x\right) \Psi_{N,j},\varphi_{n}^{\ast}\right) }{\lambda_{N,j}-\left( 2\pi n\right) ^{2}};\text{ }N\neq n. \end{equation} Moreover, we use the following relations \begin{align} \left( \Psi_{N,j},\overline{q}\sin2\pi nx\right) & =\sum_{n_{1}=0} ^{\infty}[\left( q\varphi_{n_{1}},\sin2\pi nx\right) \left( \Psi_{N,j} ,\sin2\pi n_{1}x\right) +\\ & +\left( q\cos2\pi n_{1}x,\sin2\pi nx\right) \left( \Psi_{N,j} ,\varphi_{n_{1}}^{\ast}\right) ],\nonumber \end{align} \begin{equation} \left( \Psi_{N,j},\overline{q}\varphi_{n}^{\ast}\right) =\sum_{n_{1} =0}^{\infty}\left[ \left( q\varphi_{n_{1}},\varphi_{n}^{\ast}\right) \left( \Psi_{N,j},\sin2\pi n_{1}x\right) +\left( q\cos2\pi n_{1} x,\varphi_{n}^{\ast}\right) \left( \Psi_{N,j},\varphi_{n_{1}}^{\ast}\right) \right] , \end{equation} \begin{align} \left\vert (q\Psi_{N,j},\sin2\pi nx)\right\vert & <4M,\\ \left\vert (q\Psi_{N,j},\varphi_{n}^{\ast})\right\vert & <4M, \end{align} for $N\gg1,$where $M=\sup\left\vert q_{n}\right\vert .$ These relations are obvious for $q\in L_{2}(0,1),$ since to obtain (22) and (23) we can use the decomposition of $\overline{q}\sin2\pi nx$ and $\overline{q}\varphi_{n}^{\ast }$ by basis (16). For $q\in L_{1}(0,1)$\ see Lemma 1 of [23].
To obtain the asymptotic formulas for the eigenvalues and eigenfunctions we iterate (18) and (19) by using (22), (23). First let us prove the following obvious asymptotic formulas for the eigenfunctions $\Psi_{n,j}$. The expansion of $\Psi_{n,j}$\ by basis (17) can be written in the form \begin{equation} \Psi_{n,j}=u_{n,j}\varphi_{n}\left( x\right) +v_{n,j}\cos2\pi nx+h_{n,j} \left( x\right) , \end{equation} where \begin{equation} u_{n,j}=\left( \Psi_{n,j},\sin2\pi nx\right) ,\text{ }v_{n,j}=\left( \Psi_{n,j},\varphi_{n}^{\ast}\right) , \end{equation} \[ h_{n,j}\left( x\right) =\sum_{\substack{k=0\\k\neq n}}^{\infty}\left[ \left( \Psi_{n,j},\sin2\pi kx\right) \varphi_{k}\left( x\right) +\left( \Psi_{n,j},\varphi_{k}^{\ast}\right) \cos2\pi kx\right] . \] Using (20), (21), (24) and (25) one can readily see that, there exists a constant $C$ such that \begin{equation} \sup\left\vert h_{n,j}\left( x\right) \right\vert \leq C\left( \sum_{k\neq n}\left( \frac{1}{\mid\lambda_{n,j}-\left( 2\pi k\right) ^{2}\mid}+\frac {n}{\left\vert \left( \lambda_{n,j}-\left( 2\pi k\right) ^{2}\right) ^{2}\right\vert }\right) \right) =O\left( \frac{\ln n}{n}\right) \end{equation} and by (26) we get \begin{equation} \Psi_{n,j}=u_{n,j}\varphi_{n}\left( x\right) +v_{n,j}\cos2\pi nx+O\left( \frac{\ln n}{n}\right) . \end{equation}
Since $\Psi_{n,j}$\ is normalized, we have \[ 1=\left\Vert \Psi_{n,j}\right\Vert ^{2}=\left( \Psi_{n,j},\Psi_{n,j}\right) =\left\vert u_{n,j}\right\vert ^{2}\left\Vert \varphi_{n}\left( x\right) \right\Vert ^{2}+\left\vert v_{n,j}\right\vert ^{2}\left\Vert \cos2\pi nx\right\Vert ^{2}+ \] \[ +u_{n,j}\overline{v_{n,j}}\left( \varphi_{n}\left( x\right) ,\cos2\pi nx\right) +v_{n,j}\overline{u_{n,j}}\left( \cos2\pi nx,\varphi_{n}\left( x\right) \right) +O\left( \frac{\ln n}{n}\right) \] \[ =\left( \frac{8}{3}\dfrac{\left\vert \beta\right\vert ^{2}-\operatorname{Re} \beta+1}{\left\vert \beta-1\right\vert ^{2}}\right) \left\vert u_{n,j} \right\vert ^{2}+\frac{1}{2}\left\vert v_{n,j}\right\vert ^{2}+O\left( \frac{\ln n}{n}\right) , \] that is, \begin{equation} a\left\vert u_{n,j}\right\vert ^{2}+\frac{1}{2}\left\vert v_{n,j}\right\vert ^{2}=1+O\left( \frac{\ln n}{n}\right) , \end{equation} where \[ a=\frac{8}{3}\dfrac{\left\vert \beta\right\vert ^{2}-\operatorname{Re}\beta +1}{\left\vert \beta-1\right\vert ^{2}}. \] Note that $a\neq0$, since $\left\vert \beta\right\vert ^{2}+1>\left\vert \beta\right\vert .$
Now let us iterate (18). Using (22) in (18) we get \begin{gather*} \left( \lambda_{n,j}-\left( 2\pi n\right) ^{2}\right) \left( \Psi _{n,j},\sin2\pi nx\right) =\\ =\sum_{n_{1}=0}^{\infty}\left[ \left( q\varphi_{n_{1}},\sin2\pi nx\right) \left( \Psi_{n,j},\sin2\pi n_{1}x\right) +\left( q\cos2\pi n_{1}x,\sin2\pi nx\right) \left( \Psi_{n,j},\varphi_{n_{1}}^{\ast}\left( x\right) \right) \right] . \end{gather*} Isolating the terms in the right-hand side of this equality containing the multiplicands $\left( \Psi_{n,j},\sin2\pi nx\right) $ and $\left( \Psi_{n,j},\varphi_{n}^{\ast}\left( x\right) \right) $ (i.e., case $n_{1}=n$ ), using\ (20) and (21) for the terms $\left( \Psi_{n,j},\sin2\pi n_{1}x\right) $ and \ $\left( \Psi_{n,j},\varphi_{n_{1}}^{\ast}\left( x\right) \right) $ respectively (in the case $n_{1}\neq n$) we obtain
\begin{gather*} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-\left( q\varphi_{n} ,\sin2\pi nx\right) \right] \left( \Psi_{n,j},\sin2\pi nx\right) -\left( q\cos2\pi nx,\sin2\pi nx\right) \left( \Psi_{n,j},\varphi_{n}^{\ast}\right) =\\ =\sum_{\substack{n_{1}=0\\n_{1}\neq n}}^{\infty}\left[ \left( q\varphi _{n_{1}},\sin2\pi nx\right) \left( \Psi_{n,j},\sin2\pi n_{1}x\right) +\left( q\cos2\pi n_{1}x,\sin2\pi nx\right) \left( \Psi_{n,j} ,\varphi_{n_{1}}^{\ast}\left( x\right) \right) \right] \\ =\sum_{n_{1}}\left[ a_{1}\left( \lambda_{n,j}\right) \left( q\left( x\right) \Psi_{n,j},\sin2\pi n_{1}x\right) +b_{1}\left( \lambda _{n,j}\right) \left( q\left( x\right) \Psi_{n,j},\varphi_{n_{1}}^{\ast }\right) \right] . \end{gather*} where \begin{align*} a_{1}\left( \lambda_{n,j}\right) & =\frac{\left( q\varphi_{n_{1}} ,\sin2\pi nx\right) }{\lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}} +\frac{\gamma_{1}n_{1}\left( q\cos2\pi n_{1}x,\sin2\pi nx\right) }{\left( \lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}\right) ^{2}},\\ b_{1}\left( \lambda_{n,j}\right) & =\frac{\left( q\cos2\pi n_{1} x,\sin2\pi nx\right) }{\lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}}. \end{align*}
Using (22) and (23) for the terms $\left( q\Psi_{n,j},\sin2\pi n_{1}x\right) $ and $\left( q\Psi_{n,j},\varphi_{n_{1}}^{\ast}\right) $ of the last summation we obtain
\begin{gather*} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-\left( q\varphi_{n} ,\sin2\pi nx\right) \right] \left( \Psi_{n,j},\sin2\pi nx\right) -\left( q\cos2\pi nx,\sin2\pi nx\right) \left( \Psi_{n,j},\varphi_{n}^{\ast}\right) =\\ =\sum_{n_{1}}\left[ a_{1}\left( \lambda_{n,j}\right) \left( q\Psi _{n,j},\sin2\pi n_{1}x\right) +b_{1}\left( \lambda_{n,j}\right) \left( q\Psi_{n,j},\varphi_{n_{1}}^{\ast}\right) \right] =\\ =\sum_{n_{1}}a_{1}\left( \sum_{n_{2}=0}^{\infty}\left[ \left( q\varphi_{n_{2}},\sin2\pi n_{1}x\right) \left( \Psi_{n,j},\sin2\pi n_{2}x\right) +\left( q\cos2\pi n_{2}x,\sin2\pi n_{1}x\right) \left( \Psi_{n,j},\varphi_{n_{2}}^{\ast}\left( x\right) \right) \right] \right) +\\ +\sum_{n_{1}}b_{1}\left( \sum_{n_{2}=0}^{\infty}\left[ \left( q\varphi_{n_{2}},\varphi_{n_{1}}^{\ast}\right) \left( \Psi_{n,j},\sin2\pi n_{2}x\right) +\left( q\cos2\pi n_{2}x,\varphi_{n_{1}}^{\ast}\right) \left( \Psi_{n,j},\varphi_{n_{2}}^{\ast}\left( x\right) \right) \right] \right) . \end{gather*} Now isolating the terms for $n_{2}=n$ we get \begin{gather*} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-\left( q\varphi_{n} ,\sin2\pi nx\right) \right] \left( \Psi_{n,j},\sin2\pi nx\right) -\left( q\cos2\pi nx,\sin2\pi nx\right) \left( \Psi_{n,j},\varphi_{n}^{\ast}\right) =\\ =\sum_{n_{1}}\left[ a_{1}\left( q\varphi_{n},\sin2\pi n_{1}x\right) +b_{1}\left( q\varphi_{n},\varphi_{n_{1}}^{\ast}\right) \right] \left( \Psi_{n,j},\sin2\pi nx\right) +\\ +\sum_{n_{1}}\left[ a_{1}\left( q\cos2\pi nx,\sin2\pi n_{1}x\right) +b_{1}\left( q\cos2\pi nx,\varphi_{n_{1}}^{\ast}\right) \right] \left( \Psi_{n,j},\varphi_{n}^{\ast}\left( x\right) \right) +\\ =\sum_{n_{1},n_{2}}\left( \left[ a_{1}\left( q\varphi_{n_{2}},\sin2\pi n_{1}x\right) +b_{1}\left( q\varphi_{n_{2}},\varphi_{n_{1}}^{\ast}\right) \right] \left( \Psi_{n,j},\sin2\pi n_{2}x\right) +\right) +\\ +\sum_{n_{1},n_{2}}\left[ a_{1}\left( q\cos2\pi n_{2}x,\sin2\pi n_{1}x\right) +b_{1}\left( q\cos2\pi n_{2}x,\varphi_{n_{1}}^{\ast}\right) \right] \left( \Psi_{n,j},\varphi_{n_{2}}^{\ast}\right) . \end{gather*} Here and further the summations are taken under the conditions $n_{i}\neq n$ and $n_{i}=0,1,...$ for $i=1,2,...$ Introduce the notations \begin{align*} C_{1} & =:a_{1},\text{ }M_{1}=:b_{1},\\ C_{2} & =:a_{1}a_{2}+b_{1}A_{2}=C_{1}a_{2}+M_{1}A_{2},\text{ }M_{2} =:a_{1}b_{2}+b_{1}B_{2}=C_{1}b_{2}+M_{1}B_{2},\\ C_{k+1} & =:C_{k}a_{k+1}+M_{k}A_{k+1},\text{ }M_{k+1}=:C_{k}b_{k+1} +M_{k}B_{k+1};\text{ }k=2,3,\ldots, \end{align*}
where \begin{gather*} a_{k+1}=a_{k+1}\left( \lambda_{n,j}\right) =\dfrac{\left( q\varphi _{n_{k+1}},\sin2\pi n_{k}x\right) }{\lambda_{n,j}-\left( 2\pi n_{k+1} \right) ^{2}}+\dfrac{\gamma_{1}n_{k+1}\left( q\cos2\pi n_{k+1}x,\sin2\pi n_{k}x\right) }{\left( \lambda_{n,j}-\left( 2\pi n_{k+1}\right) ^{2}\right) ^{2}},\\ b_{k+1}=b_{k+1}\left( \lambda_{n,j}\right) =\dfrac{\left( q\cos2\pi n_{k+1}x,\sin2\pi n_{k}x\right) }{\lambda_{n,j}-\left( 2\pi n_{k+1}\right) ^{2}},\\ A_{k+1}=A_{k+1}\left( \lambda_{n,j}\right) =\dfrac{\left( q\varphi _{n_{k+1}},\varphi_{n_{k}}^{\ast}\right) }{\lambda_{n,j}-\left( 2\pi n_{k+1}\right) ^{2}}+\dfrac{\gamma_{1}n_{k+1}\left( q\cos2\pi n_{k+1} x,\varphi_{n_{k}}^{\ast}\right) }{\left( \lambda_{n,j}-\left( 2\pi n_{k+1}\right) ^{2}\right) ^{2}},\\ B_{k+1}=B_{k+1}\left( \lambda_{n,j}\right) =\dfrac{\left( q\cos2\pi n_{k+1}x,\varphi_{n_{k}}^{\ast}\right) }{\lambda_{n,j}-\left( 2\pi n_{k+1}\right) ^{2}}. \end{gather*} Using these notations and repeating this iteration $k$ times we get \begin{gather} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-\left( q\varphi_{n} ,\sin2\pi nx\right) -\widetilde{A}_{k}\left( \lambda_{n,j}\right) \right] \left( \Psi_{n,j},\sin2\pi nx\right) =\nonumber\\ =\left[ \left( q\cos2\pi nx,\sin2\pi nx\right) +\widetilde{B}_{k}\left( \lambda_{n,j}\right) \right] \left( \Psi_{n,j},\varphi_{n}^{\ast}\left( x\right) \right) +R_{k}, \end{gather} where \begin{align*} \widetilde{A}_{k}\left( \lambda_{n,j}\right) & =\sum_{m=1}^{k}\alpha _{m}\left( \lambda_{n,j}\right) \text{, }\widetilde{B}_{k}\left( \lambda_{n,j}\right) =\sum_{m=1}^{k}\beta_{m}\left( \lambda_{n,j}\right) ,\\ \alpha_{k}\left( \lambda_{n,j}\right) & =\sum_{n_{1},\ldots,n_{k}}\left[ C_{k}\left( q\varphi_{n},\sin2\pi n_{k}x\right) +M_{k}\left( q\varphi _{n},\varphi_{n_{k}}^{\ast}\right) \right] ,\\ \beta_{k}\left( \lambda_{n,j}\right) & =\sum_{n_{1},\ldots,n_{k}}\left[ C_{k}\left( q\cos2\pi nx,\sin2\pi n_{k}x\right) +M_{k}\left( q\cos2\pi nx,\varphi_{n_{k}}^{\ast}\right) \right] ,\\ R_{k} & =\sum_{n_{1},\ldots,n_{k+1}}\left\{ C_{k+1}\left( q\Psi_{n,j} ,\sin2\pi n_{k+1}x\right) +M_{k+1}\left( q\Psi_{n,j},\varphi_{n_{k+1}} ^{\ast}\right) \right\} . \end{align*} It follows from (11), (24) and (25) that \begin{equation} \alpha_{k}\left( \lambda_{n,j}\right) =O\left( \left( \frac{\ln\left\vert n\right\vert }{n}\right) ^{k}\right) ,\beta_{k}\left( \lambda_{n,j}\right) =O\left( \left( \frac{\ln\left\vert n\right\vert }{n}\right) ^{k}\right) ,R_{k}=O\left( \left( \frac{\ln\left\vert n\right\vert }{n}\right) ^{k+1}\right) . \end{equation}
Therefore if we take limit in (31) for $k\rightarrow\infty$, we obtain \[ \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-Q_{n}-A\left( \lambda _{n,j}\right) \right] u_{n,j}=\left[ P_{n}+B\left( \lambda_{n,j}\right) \right] v_{n,j}, \] where \begin{equation} P_{n}=\left( q\cos2\pi nx,\sin2\pi nx\right) ,\text{ }Q_{n}=\left( q\varphi_{n},\sin2\pi nx\right) , \end{equation} \begin{equation} A\left( \lambda_{n,j}\right) =\sum_{m=1}^{\infty}\alpha_{m}\left( \lambda_{n,j}\right) =O\left( \frac{\ln\left\vert n\right\vert }{n}\right) \text{, }B\left( \lambda_{n,j}\right) =\sum_{m=1}^{\infty}\beta_{m}\left( \lambda_{n,j}\right) =O\left( \frac{\ln\left\vert n\right\vert }{n}\right) . \end{equation}
Thus iterating (18) we obtained (31). Now starting \ to iteration from (19) instead of (18) and using (23), (22) and arguing as in the previous iteration, we get \begin{equation} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}-A_{k}^{\prime }\left( \lambda_{n,j}\right) \right] v_{n,j}=\left[ \gamma_{1} n+Q_{n}^{\ast}+B_{k}^{\prime}\left( \lambda_{n,j}\right) \right] u_{n,j}+R_{k}^{\prime}, \end{equation} where \begin{equation} P_{n}^{\ast}=\left( q\cos2\pi nx,\varphi_{n}^{\ast}\right) ,\text{ } Q_{n}^{\ast}=\left( q\varphi_{n},\varphi_{n}^{\ast}\right) , \end{equation} \begin{align*} A_{k}^{\prime}\left( \lambda_{n,j}\right) & =\sum_{m=1}^{k}\alpha _{m}^{\prime}\left( \lambda_{n,j}\right) \text{, }B_{k}^{\prime}\left( \lambda_{n,j}\right) =\sum_{m=1}^{k}\beta_{m}^{\prime}\left( \lambda _{n,j}\right) ,\\ \alpha_{k}^{\prime}\left( \lambda_{n,j}\right) & =\sum_{n_{1},\ldots ,n_{k}}\left[ \widetilde{C}_{k}\left( q\cos2\pi nx,\sin2\pi n_{k}x\right) +\widetilde{M}_{k}\left( q\cos2\pi nx,\varphi_{n_{k}}^{\ast}\right) \right] ,\\ \beta_{k}^{\prime}\left( \lambda_{n,j}\right) & =\sum_{n_{1},\ldots,n_{k} }\left[ \widetilde{C}_{k}\left( q\varphi_{n},\sin2\pi n_{k}x\right) +\widetilde{M}_{k}\left( q\varphi_{n},\varphi_{n_{k}}^{\ast}\right) \right] ,\\ R_{k}^{\prime} & =\sum_{n_{1},\ldots,n_{k+1}}\left\{ \widetilde{C} _{k+1}\left( q\Psi_{n,j},\sin2\pi n_{k+1}x\right) +\widetilde{M} _{k+1}\left( q\Psi_{n,j},\varphi_{n_{k+1}}^{\ast}\right) \right\} , \end{align*}
\[ \widetilde{C}_{k+1}=\widetilde{C}_{k}a_{k+1}+\widetilde{M}_{k}A_{k+1},\text{ }\widetilde{M}_{k+1}=\widetilde{C}_{k}b_{k+1}+\widetilde{M}_{k}B_{k+1};\text{ }k=0,1,2,\ldots, \] \begin{align*} \widetilde{C}_{1} & =A_{1}\left( \lambda_{n,j}\right) =\frac{\left( q\varphi_{n_{1}},\varphi_{n}^{\ast}\right) }{\lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}}+\frac{\gamma_{1}n_{1}\left( q\cos2\pi n_{1}x,\varphi _{n}^{\ast}\right) }{\left( \lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}\right) ^{2}},\\ \widetilde{M}_{1} & =B_{1}\left( \lambda_{n,j}\right) =\frac{\left( q\cos2\pi n_{1}x,\varphi_{n}^{\ast}\right) }{\lambda_{n,j}-\left( 2\pi n_{1}\right) ^{2}}. \end{align*} Similar to (32) one can verify that \begin{equation} \alpha_{k}^{\prime}\left( \lambda_{n,j}\right) =O\left( \left( \frac {\ln\left\vert n\right\vert }{n}\right) ^{k}\right) ,\beta_{k}^{\prime }\left( \lambda_{n,j}\right) =O\left( \left( \frac{\ln\left\vert n\right\vert }{n}\right) ^{k}\right) ,R_{k}^{\prime}=O\left( \left( \frac{\ln\left\vert n\right\vert }{n}\right) ^{k+1}\right) . \end{equation} If we take limit in (35) for $k\rightarrow\infty$, we obtain \[ \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}-A^{\prime }\left( \lambda_{n,j}\right) \right] v_{n,j}=\left[ \gamma_{1} n+Q_{n}^{\ast}+B^{\prime}\left( \lambda_{n,j}\right) \right] u_{n,j}, \] where \begin{equation} A^{\prime}\left( \lambda_{n,j}\right) =\sum_{m=1}^{\infty}\alpha_{m} ^{\prime}\left( \lambda_{n,j}\right) =O\left( \frac{\ln\left\vert n\right\vert }{n}\right) \text{, }B^{\prime}\left( \lambda_{n,j}\right) =\sum_{m=1}^{\infty}\beta_{m}^{\prime}\left( \lambda_{n,j}\right) =O\left( \frac{\ln\left\vert n\right\vert }{n}\right) . \end{equation} To get the main results of this paper we use the following system of equations, obtained above, with respect to $u_{n,j}$ and $v_{n,j}$ \begin{gather} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-Q_{n}-A\left( \lambda _{n,j}\right) \right] u_{n,j}=\left[ P_{n}+B\left( \lambda_{n,j}\right) \right] v_{n,j},\\ \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}-A^{\prime }\left( \lambda_{n,j}\right) \right] v_{n,j}=\left[ \gamma_{1} n+Q_{n}^{\ast}+B^{\prime}\left( \lambda_{n,j}\right) \right] u_{n,j}, \end{gather} where \begin{gather} Q_{n}=\left( q\varphi_{n},\sin2\pi nx\right) =\nonumber\\ =-\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+\frac{2\left( \beta+1\right) }{\beta-1}\left( xq\left( x\right) ,\cos4\pi nx\right) -\frac{2\beta}{\beta-1}\left( q\left( x\right) ,\cos4\pi nx\right) \\ =-\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+o\left( 1\right) , \end{gather} \begin{gather} P_{n}^{\ast}=\left( q\cos2\pi nx,\varphi_{n}^{\ast}\right) =\nonumber\\ =\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+\frac{2\left( \beta+1\right) }{\beta-1}\left( xq\left( x\right) ,\cos4\pi nx\right) -\frac{2}{\beta-1}\left( q\left( x\right) ,\cos4\pi nx\right) \\ =\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+o\left( 1\right) , \end{gather} \begin{equation} P_{n}=\left( q\cos2\pi nx,\sin2\pi nx\right) =\frac{1}{2}\left( q,\sin4\pi nx\right) =o\left( 1\right) , \end{equation} \begin{equation} Q_{n}^{\ast}=\left( q\varphi_{n},\varphi_{n}^{\ast}\right) =8\left( \frac{\beta_{1}+1}{\beta_{1}-1}\right) ^{2}\int_{0}^{1}q\left( x\right) \left( \dfrac{\beta_{1}}{1+\beta_{1}}-x\right) \left( x-\dfrac{1} {1+\beta_{1}}\right) \sin4\pi nxdx=o\left( 1\right) . \end{equation} Note that (39), (40) with (34), (38) give us \begin{gather} \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-Q_{n}+O\left( \dfrac {\ln\left\vert n\right\vert }{n}\right) \right] u_{n,j}=\left[ P_{n}+O\left( \dfrac{\ln\left\vert n\right\vert }{n}\right) \right] v_{n,j},\\ \left[ \lambda_{n,j}-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}+O\left( \dfrac{\ln\left\vert n\right\vert }{n}\right) \right] v_{n,j}=\left[ \gamma_{1}n+Q_{n}^{\ast}+O\left( \dfrac{\ln\left\vert n\right\vert } {n}\right) \right] u_{n,j}. \end{gather}
Introduce the notations \begin{align} c_{n} & =\left( q,\cos2\pi nx\right) \text{, }s_{n}=\left( q,\sin2\pi nx\right) \nonumber\\ c_{n,1} & =\left( xq,\cos2\pi nx\right) \text{, }s_{n,1}=\left( xq,\sin2\pi nx\right) \\ c_{n,2} & =\left( x^{2}q,\cos2\pi nx\right) \text{, }s_{n,2}=\left( x^{2}q,\sin2\pi nx\right) .\nonumber \end{align} In these notations we have \begin{equation} Q_{n}=-\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+\frac{2\left( \beta+1\right) }{\beta-1}c_{2n,1}-\frac{2\beta }{\beta-1}c_{2n} \end{equation} \begin{equation} P_{n}^{\ast}=\frac{2\left( \beta+1\right) }{\beta-1}\int_{0}^{1}xq\left( x\right) dx+\frac{2\left( \beta+1\right) }{\beta-1}c_{2n,1}-\frac{2} {\beta-1}c_{2n} \end{equation} \begin{equation} P_{n}=\frac{1}{2}s_{2n} \end{equation} \begin{equation} Q_{n}^{\ast}=-8\left( \frac{\beta+1}{\beta-1}\right) ^{2}s_{2n,2}+8\left( \frac{\beta+1}{\beta-1}\right) ^{2}s_{2n,1}-\frac{8\beta}{\left( \beta-1\right) ^{2}}s_{2n}. \end{equation}
\begin{theorem} For $j=1,2$ the following statements hold:
$(a)$ Any eigenfunction $\Psi_{n,j}$ of $T_{1}$ corresponding to the eigenvalue $\lambda_{n,j}$ defined in (10) satisfies \begin{equation} \Psi_{n,j}=\sqrt{2}\cos2\pi nx+O\left( n^{-1/2}\right) . \end{equation} Moreover there exists $N$ such that for all $n>N$ the geometric multiplicity of the eigenvalue $\lambda_{n,j}$ is $1$.
$\left( b\right) $ A complex number $\lambda\in U(n)=:\{\lambda:\left\vert \lambda-\left( 2\pi n\right) ^{2}\right\vert \leq n\}$ is an eigenvalue of $T_{1}$ if and only if it is a root of the equation \begin{gather} \left[ \lambda-\left( 2\pi n\right) ^{2}-Q_{n}-A\left( \lambda\right) \right] \left[ \lambda-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}-A^{\prime }\left( \lambda\right) \right] -\nonumber\\ -\left[ P_{n}+B\left( \lambda\right) \right] \left[ \gamma_{1} n+Q_{n}^{\ast}+B^{\prime}\left( \lambda\right) \right] =0. \end{gather} Moreover $\lambda\in U(n)$ is a double eigenvalue of $T_{1}$ if and only if \textit{it is a double root of} (55) . \end{theorem}
\begin{proof} $\left( a\right) $ By (10) the left hand side of (48) is $O(n^{1/2}),$ which implies that $u_{n,j}=O(n^{-1/2}).$ Therefore from (29) we obtain (54). Now suppose that there are two linearly independent eigenfunctions corresponding to $\lambda_{n,j}$. Then there exists an eigenfunction satisfying \[ \Psi_{n,j}=\sqrt{2}\sin2\pi nx+o\left( 1\right) \] which contradicts (54).
$(b)$ First we prove that the large eigenvalues $\lambda_{n,j}$ are the roots of the equation (55). It follows from (54), (27) and (15) that $v_{n,j}\neq0.$ If $u_{n,j}\neq0$ then multiplying the equations (39) and (40) side by side and then canceling $v_{n,j}u_{n,j}$ we obtain (55) . If $u_{n,j}=0$ then by (39) and (40) we have $P_{n}+B\left( \lambda_{n,j}\right) =0$ \ and $\lambda_{n,j}-\left( 2\pi n\right) ^{2}-P_{n}^{\ast}-A^{\prime}\left( \lambda_{n,j}\right) =0$ which mean that (55) holds. Thus in any case $\lambda_{n,j}$ is a root of (55).
Now we prove that the roots of (55) lying in $U(n)$\ are the eigenvalues of $T_{1}.$ Let $F(\lambda)$ be the left-hand side of (55) which can be written as \begin{gather} F(\lambda)=(\lambda-\left( 2\pi n\right) ^{2})^{2}-\left( Q_{n}+A\left( \lambda\right) +P_{n}^{\ast}+A^{\prime}\left( \lambda\right) \right) \left( \lambda-\left( 2\pi n\right) ^{2}\right) +\\ +\left( Q_{n}+A\left( \lambda\right) \right) \left( P_{n}^{\ast }+A^{\prime}\left( \lambda\right) \right) -\left( P_{n}+B\left( \lambda\right) \right) \left( \gamma_{1}n+Q_{n}^{\ast}+B^{\prime}\left( \lambda\right) \right) \nonumber \end{gather} and \[ G(\lambda)=(\lambda-\left( 2\pi n\right) ^{2})^{2}. \] One can easily verify that the inequality \begin{equation} \mid F(\lambda)-G(\lambda)\mid<\mid G(\lambda)\mid \end{equation} holds\ for all $\lambda$ from the boundary of $U(n).$ Since the function $G(\lambda)$ has two roots in the set $U(n),$ by the Rouche's theorem we obtain that $F(\lambda)$ has two roots in the same\ set.\ Thus\ $T_{1}$ has two eigenvalues (counting with multiplicities) lying in $U(n)$ that are the roots of (55). On the other hand, (55) has preciously two roots (counting with multiplicities) in $U(n).$ Therefore $\lambda\in U(n)$ is an eigenvalue of $T_{1}$ if and only if (55) holds.
If \textit{ }$\lambda\in U(n)$ is a double eigenvalue of $T_{1}$ then it has no other eigenvalues\textit{ }in\textit{ }$U(n)$ and hence (55) has no other roots. This implies that $\lambda$ is a double root of (55). By the same way one can prove that if $\lambda$ is a double root of (55) then it is a double eigenvalue of $T_{1}.$ \end{proof}
Let us consider (55) in detail. If we substitute $t=:\lambda-\left( 2\pi n\right) ^{2}$ then it becomes \begin{gather} t^{2}-\left( Q_{n}+A\left( \lambda\right) +P_{n}^{\ast}+A^{\prime}\left( \lambda\right) \right) t+\\ +\left( Q_{n}+A\left( \lambda\right) \right) \left( P_{n}^{\ast }+A^{\prime}\left( \lambda\right) \right) -\left( P_{n}+B\left( \lambda\right) \right) \left( \gamma_{1}n+Q_{n}^{\ast}+B^{\prime}\left( \lambda\right) \right) =0.\nonumber \end{gather} The solutions of this equation are \[ t_{1,2}=\frac{\left( Q_{n}+P_{n}^{\ast}+A+A^{\prime}\right) \pm\sqrt {\Delta\left( \lambda\right) }}{2}, \] where \begin{equation} \Delta\left( \lambda\right) =\left( Q_{n}+P_{n}^{\ast}+A+A^{\prime}\right) ^{2}-4\left( Q_{n}+A\right) \left( P_{n}^{\ast}+A^{\prime}\right) +4\left( P_{n}+B\right) \left( \gamma_{1}n+Q_{n}^{\ast}+B^{\prime}\right) \nonumber \end{equation} which can be written in the form \begin{equation} \Delta\left( \lambda\right) =\left( Q_{n}-P_{n}^{\ast}+A-A^{\prime}\right) ^{2}+4\left( P_{n}+B\right) \left( \gamma_{1}n+Q_{n}^{\ast}+B^{\prime }\right) . \end{equation} Clearly the eigenvalue $\lambda_{n,j}$\ is a root either of the equation \begin{equation} \lambda=\left( 2\pi n\right) ^{2}+\frac{1}{2}\left[ \left( Q_{n} +P_{n}^{\ast}+A+A^{\prime}\right) -\sqrt{\Delta\left( \lambda\right) }\right] \end{equation} or of the equation \begin{equation} \lambda=\left( 2\pi n\right) ^{2}+\frac{1}{2}\left[ \left( Q_{n} +P_{n}^{\ast}+A+A^{\prime}\right) +\sqrt{\Delta\left( \lambda\right) }\right] . \end{equation} Now let us examine $\Delta\left( \lambda_{n,j}\right) $ in detail. If (8) holds then one can readily see from (34), (38), (50), (51) and (59) that \begin{equation} \Delta\left( \lambda_{n,j}\right) =2\gamma_{1}ns_{2n}(1+o(1)). \end{equation} Taking into account the last three equality and (34), (38), (50), (51), we see that (60) and (61) have the form \begin{equation} \lambda=\left( 2\pi n\right) ^{2}-\frac{\sqrt{2\gamma_{1}}}{2}\sqrt{ns_{2n} }(1+o(1)), \end{equation} \begin{equation} \lambda=\left( 2\pi n\right) ^{2}+\frac{\sqrt{2\gamma_{1}}}{2}\sqrt{ns_{2n} }(1+o(1)). \end{equation}
\begin{theorem} If (8) holds, then the large eigenvalues $\lambda_{n,j}$ are simple and satisfy the following asymptotic formulas \begin{equation} \lambda_{n,j}=\left( 2\pi n\right) ^{2}+\left( -1\right) ^{j}\frac {\sqrt{2\gamma_{1}}}{2}\sqrt{ns_{2n}}(1+o(1)). \end{equation} for $j=1,2.$ Moreover, if there exists a sequence $\left\{ n_{k}\right\} $ such that (8) holds when $n$ is replaced by $n_{k},$ then the root functions of $T_{1}$ do not form a Riesz basis. \end{theorem}
\begin{proof} To prove that the large eigenvalues $\lambda_{n,j}$ are simple let us show that one of the eigenvalues, say $\lambda_{n,1}$ satisfies (65) for $j=1$ and the other $\lambda_{n,2}$ satisfies (65) for $j=2.$ Let us prove that each of the equations (60) and (61) has a unique root in $U(n)$ by proving that \[ \left( 2\pi n\right) ^{2}+\frac{1}{2}\left[ \left( Q_{n}+P_{n}^{\ast }+A+A^{\prime}\right) \pm\sqrt{\Delta\left( \lambda\right) }\right] \] is a contraction mapping. For this we show that there exist positive real numbers $K_{1},K_{2},K_{3}$ such that \begin{equation} \mid A\left( \lambda\right) -A(\mu)\mid<K_{1}\mid\lambda-\mu\mid,\text{ }\mid A^{\prime}(\lambda)-A^{\prime}(\mu)\mid<K_{2}\mid\lambda-\mu\mid, \end{equation} \begin{equation} \left\vert \sqrt{\Delta\left( \lambda\right) }-\sqrt{\Delta\left( \mu\right) }\right\vert <K_{3}\mid\lambda-\mu\mid, \end{equation} where $K_{1}+K_{2}+K_{3}<1$. The proof of (66) is similar to the proof of (56) of the paper [26].
Now let us prove (67). By (62) and (8) we have \[ \left( \sqrt{\Delta\left( \lambda\right) }\right) ^{-1}=o(1). \] On the other hand arguing as in the proof of (56) of the paper [26] we get \[ \dfrac{d}{d\lambda}\Delta\left( \lambda\right) =O(1). \] Hence in any case we have \[ \frac{d}{d\lambda}\sqrt{\Delta\left( \lambda\right) }=\frac{\dfrac {d}{d\lambda}\Delta\left( \lambda\right) }{2\sqrt{\Delta\left( \lambda\right) }}=o(1). \] Thus by the fixed point theorem, each of the equations (60) and (61) has a unique root $\lambda_{1}$ and $\lambda_{2}$ respectively. Clearly by (63) and (64), we have $\lambda_{1}\neq\lambda_{2}$ which implies that the equation (55) has two simple root in $U\left( n\right) .$ Therefore by Theorem 1(b), $\lambda_{1}$ and $\lambda_{2}$ are the eigenvalues of $T_{1}$ lying in $U\left( n\right) ,$ that is, they are $\lambda_{n,1}$ and $\lambda_{n,2}$, which proves the simplicity of the large eigenvalues and the validity of (65).
If there exists a sequence $\left\{ n_{k}\right\} $ such that (8) holds when $n$ is replaced by $n_{k}$, then by Theorem 1(a) \[ \left( \Psi_{n_{k},1},\Psi_{n_{k},2}\right) =1+O\left( n_{k}^{-1/2}\right) . \] Now it follows from the theorems of [20,21] (see also Lemma 3 of [24]) that the root functions of $T_{1}$ do not form a Riesz basis. \end{proof}
Now let us consider the operators $T_{2}$, $T_{3}$ and $T_{4}.$ First we consider the operator $T_{3}$.
It is well-known that ( see formulas (47a), (47b)) in page 65 of [18] ) the eigenvalues of the operators $T_{3}(q)$ consist of the sequences $\{\lambda_{n,1,3}\},\{\lambda_{n,2,3}\}$ satisfying (10) when $\lambda_{n,j}$ is replaced by $\lambda_{n,j,3}.$ The eigenvalues, eigenfunctions and associated functions of $T_{3}$ are \begin{align*} \lambda_{n} & =\left( 2\pi n\right) ^{2};\text{ }n=0,1,2,\ldots\\ y_{0}\left( x\right) & =x-\dfrac{\alpha}{1+\alpha},\text{ }y_{n}\left( x\right) =\sin2\pi nx;\text{ }n=1,2,\ldots\\ \phi_{n}\left( x\right) & =\left( x-\dfrac{\alpha}{1+\alpha}\right) \frac{\cos2\pi nx}{4\pi n};\text{ }n=1,2,\ldots. \end{align*} respectively. The biorthogonal systems analogous to (16), (17) are \begin{equation} \left\{ \cos2\pi nx,\frac{4\left( 1+\overline{\alpha}\right) } {1-\overline{\alpha}}\left( \dfrac{1}{1+\overline{\alpha}}-x\right) \sin2\pi nx\right\} _{n=0}^{\infty} \end{equation} \begin{equation} \left\{ \sin2\pi nx,\frac{4\left( 1+\alpha\right) }{1-\alpha}\left( x-\dfrac{\alpha}{1+\alpha}\right) \cos2\pi nx\right\} _{n=0}^{\infty} \end{equation} respectively.
Analogous formulas to (18) and (19) are \begin{equation} \left( \lambda_{N,j}-\left( 2\pi n\right) ^{2}\right) \left( \Psi _{N,j},\cos2\pi nx\right) =\left( q\left( x\right) \Psi_{N,j},\cos2\pi nx\right) \end{equation} \begin{equation} \left( \lambda_{N,j}-\left( 2\pi n\right) ^{2}\right) \left( \Psi _{N,j},\varphi_{n}^{\ast}\right) -\gamma_{3}n\left( \Psi_{N,j},\cos2\pi nx\right) =\left( q\left( x\right) \Psi_{N,j},\varphi_{n}^{\ast}\right) \end{equation} respectively, where \[ \gamma_{3}=\frac{16\pi\left( 1+\alpha\right) }{1-\alpha}. \]
Instead of (16)-(19) using (68)-(71) and arguing as in the proofs of Theorem 1 and Theorem 2 we obtain the following results for $T_{3}.$
\begin{theorem} If (8) holds, then the large eigenvalues $\lambda_{n,j,3}$ are simple and satisfy the following asymptotic formulas \begin{equation} \lambda_{n,j,3}=\left( 2\pi n\right) ^{2}+\left( -1\right) ^{j}\frac {\sqrt{2\gamma_{3}}}{2}\sqrt{ns_{2n}}(1+o(1)). \end{equation} for $j=1,2.$ The eigenfunctions $\Psi_{n,j,3}$ corresponding to $\lambda _{n,j,3}$ obey \begin{equation} \Psi_{n,j,3}=\sqrt{2}\sin2\pi nx+O\left( n^{-1/2}\right) . \end{equation} Moreover, if there exists a sequence $\left\{ n_{k}\right\} $ such that (8) holds when $n$ is replaced by $n_{k},$ then the root functions of $T_{3}$ do not form a Riesz basis. \end{theorem}
Now let us consider the operator $T_{2}$. It is well-known that ( see formulas (47a), (47b)) in page 65 of [18] ) the eigenvalues of the operators $T_{2}(q)$ consist of the sequences $\{\lambda_{n,1,2}\},\{\lambda_{n,2,2}\}$ satisfying \begin{equation} \lambda_{n,j,2}=(2n\pi+\pi)^{2}+O(n^{1/2}), \end{equation} for $j=1,2$. The eigenvalues, eigenfunctions and associated functions of $T_{2}$ are \begin{align*} \lambda_{n} & =\left( \pi+2\pi n\right) ^{2},\text{ }y_{n}\left( x\right) =\cos\left( 2n+1\right) \pi x,\\ \phi_{n}\left( x\right) & =\left( \frac{\beta}{\beta-1}-x\right) \frac{\sin\left( 2n+1\right) \pi x}{2\left( 2n+1\right) \pi} \end{align*} for $n=0,1,2,\ldots$respectively. The biorthogonal systems analogous to (16), (17) are \begin{equation} \left\{ \sin\left( 2n+1\right) \pi x,\frac{4\left( \overline{\beta }-1\right) }{\overline{\beta}+1}\left( x+\dfrac{1}{\overline{\beta} -1}\right) \cos\left( 2n+1\right) \pi x\right\} _{n=0}^{\infty} \end{equation} \begin{equation} \left\{ \cos\left( 2n+1\right) \pi x,\frac{4\left( \beta-1\right) } {\beta+1}\left( \frac{\beta}{\beta-1}-x\right) \sin\left( 2n+1\right) \pi x\right\} _{n=0}^{\infty} \end{equation} respectively.
Analogous formulas to (18) and (19) are \begin{equation} \left( \lambda_{N,j}-\left( \left( 2n+1\right) \pi\right) ^{2}\right) \left( \Psi_{N,j},\sin\left( 2n+1\right) \pi x\right) =\left( q\left( x\right) \Psi_{N,j},\sin\left( 2n+1\right) \pi x\right) \end{equation} \begin{equation} \left( \lambda_{N,j}-\left( \left( 2n+1\right) \pi\right) ^{2}\right) \left( \Psi_{N,j},\varphi_{n}^{\ast}\right) -\left( 2n+1\right) \gamma _{2}\left( \Psi_{N,j},\sin\left( 2n+1\right) \pi x\right) =\left( q\left( x\right) \Psi_{N,j},\varphi_{n}^{\ast}\right) \end{equation} respectively, where \[ \gamma_{2}=\frac{8\pi\left( \beta-1\right) }{\beta+1}. \] Instead of (16)-(19) using (75)-(78) and arguing as in the proofs of Theorem 1 and Theorem 2 we obtain the following results for $T_{2}.$
\begin{theorem} If (9) holds, then the large eigenvalues $\lambda_{n,j,2}$ are simple and satisfy the following asymptotic formulas \begin{equation} \lambda_{n,j,2}=\left( \left( 2n+1\right) \pi\right) ^{2}+\left( -1\right) ^{j}\frac{\sqrt{2\gamma_{2}}}{2}\sqrt{\left( 2n+1\right) s_{2n+1}}(1+o(1)). \end{equation} for $j=1,2.$ The eigenfunctions $\Psi_{n,j,2}$ corresponding to $\lambda _{n,j,2}$ obey \begin{equation} \Psi_{n,j,2}=\sqrt{2}\cos\left( 2n+1\right) \pi x+O\left( n^{-1/2}\right) . \end{equation} Moreover, if there exists a sequence $\left\{ n_{k}\right\} $ such that (9) holds when $n$ is replaced by $n_{k},$ then the root functions of $T_{2}$ do not form a Riesz basis. \end{theorem}
Lastly we consider the operator $T_{4}$. It is well-known that ( see formulas (47a), (47b)) in page 65 of [18] ) the eigenvalues of the operators $T_{4}(q)$ consist of the sequences $\{\lambda_{n,1,4}\},\{\lambda_{n,2,4}\}$ satisfying (74) when $\lambda_{n,j,2}$ is replaced by $\lambda_{n,j,4}.$ The eigenvalues, eigenfunctions and associated functions of $T_{4}$ are \begin{align*} \lambda_{n} & =\left( \pi+2\pi n\right) ^{2},\text{ }y_{n}\left( x\right) =\sin\left( 2n+1\right) \pi x,\\ \phi_{n}\left( x\right) & =\left( \frac{\alpha}{1-\alpha}+x\right) \frac{\cos\left( 2n+1\right) \pi x}{2\left( 2n+1\right) \pi} \end{align*} for $n=0,1,2,\ldots$respectively. The biorthogonal systems analogous to (16), (17) are \begin{equation} \left\{ \cos\left( 2n+1\right) \pi x,\frac{4\left( 1-\overline{\alpha }\right) }{1+\overline{\alpha}}\left( \dfrac{1}{1-\overline{\alpha} }-x\right) \sin\left( 2n+1\right) \pi x\right\} _{n=0}^{\infty} \end{equation} \begin{equation} \left\{ \sin\left( 2n+1\right) \pi x,\frac{4\left( 1-\alpha\right) }{1+\alpha}\left( \dfrac{\alpha}{1-\alpha}+x\right) \cos\left( 2n+1\right) \pi x\right\} _{n=0}^{\infty} \end{equation} respectively.
Analogous formulas to (18) and (19) are \begin{equation} \left( \lambda_{N,j}-\left( \pi+2\pi n\right) ^{2}\right) \left( \Psi_{N,j},\cos\left( 2n+1\right) \pi x\right) =\left( q\left( x\right) \Psi_{N,j},\cos\left( 2n+1\right) \pi x\right) , \end{equation} \begin{equation} \left( \lambda_{N,j}-\left( \left( 2n+1\right) \pi\right) ^{2}\right) \left( \Psi_{N,j},\varphi_{n}^{\ast}\right) -\left( 2n+1\right) \gamma _{4}\left( \Psi_{N,j},\cos\left( 2n+1\right) \pi x\right) =\left( q\left( x\right) \Psi_{N,j},\varphi_{n}^{\ast}\right) \end{equation} respectively, where \[ \gamma_{4}=\frac{8\pi\left( 1-\alpha\right) }{1+\alpha}. \] Instead of (16)-(19) using (81)-(84) and arguing as in the proofs of Theorem 1 and Theorem 2 we obtain the following results for $T_{4}.$
\begin{theorem} If (9) holds, then the large eigenvalues $\lambda_{n,j,4}$ are simple and satisfy the following asymptotic formulas \begin{equation} \lambda_{n,j,4}=\left( \left( 2n+1\right) \pi\right) ^{2}+\left( -1\right) ^{j}\frac{\sqrt{2\gamma_{4}}}{2}\sqrt{\left( 2n+1\right) s_{2n+1}}(1+o(1)). \end{equation} for $j=1,2.$ The eigenfunctions $\Psi_{n,j,4}$ corresponding to $\lambda _{n,j,4}$ obey \begin{equation} \Psi_{n,j,4}=\sqrt{2}\sin\left( 2n+1\right) \pi x+O\left( n^{-1/2}\right) . \end{equation} Moreover, if there exists a sequence $\left\{ n_{k}\right\} $ such that (9) holds when $n$ is replaced by $n_{k},$ then the root functions of $T_{4}$ do not form a Riesz basis. \end{theorem}
\begin{remark} Suppose that \begin{equation} \int_{0}^{1}xq\left( x\right) dx\neq0. \end{equation} If \begin{equation} \frac{1}{2}s_{2n}+B=o\left( \frac{1}{n}\right) , \end{equation} where $B$ is defined by (34), then arguing as in the proof of Theorem 2, we obtain that the large eigenvalues of the operator $T_{1}$ are simple. Moreover if there exists a sequence $\left\{ n_{k}\right\} $ such that (88) holds when $n$ is replaced by $n_{k},$ then the root functions of $T_{1}$ do not form a Riesz basis. The similar results can be obtained for the operators $T_{2},T_{3}$ and $T_{4}.$ \end{remark}
\begin{remark} Using (31) and (35) and arguing as in the proof of Theorem 3 of [1] it can be obtained asymptotic formulas of arbitrary order for the eigenvalues and eigenfunctions of the operator $T_{1}.$ The similar formulas can be obtained for the operators $T_{2},T_{3}$ and $T_{4}.$ \end{remark}
\end{document}
|
arXiv
|
{
"id": "1301.7043.tex",
"language_detection_score": 0.5433728098869324,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Restoring broken entanglement by separable correlations}
\begin{abstract} We consider two bosonic Gaussian channels whose thermal noise is strong enough to break bipartite entanglement. In this scenario, we show how the presence of separable correlations between the two channels is able to restore the broken entanglement. This reactivation occurs not only in a scheme of direct distribution, where a third party (Charlie) broadcasts entangled states to remote parties (Alice and Bob), but also in a configuration of indirect distribution which is based on entanglement swapping. In both schemes, the amount of entanglement remotely activated can be large enough to be distilled by one-way distillation protocols.
\end{abstract}
\pacs{03.65.Ud, 03.67.--a, 42.50.--p, 89.70.Cf} \author{Gaetana Spedalieri} \author{Stefano Pirandola} \email{[email protected]} \affiliation{Computer Science and York Centre for Quantum Technologies, University of York, York YO10 5GH, United Kingdom} \maketitle
Entanglement is a fundamental physical resource in quantum information and computation. Once two parties, say Alice and Bob, share a suitable amount of entanglement, they can implement a variety of powerful protocols~\cite{NielsenBook,Mwilde}. In a scheme of direct distribution, there is a middle station (Charlie) possessing a bipartite system in an entangled state; one subsystem is sent to Alice and the other to Bob. Alternatively, in a scheme of indirect distribution, known as entanglement swapping, the distribution is mediated by a measurement process. Here Alice and Bob each has a bipartite system prepared in an entangled state. One subsystem is retained while the other is sent to Charlie. At his station, Charlie detects the two incoming subsystems by performing a suitable Bell measurement and communicates the classical outcome back to Alice and Bob. As a result of this process, the two subsystems retained by the remote parties are projected onto an entangled state.
\begin{figure}
\caption{Direct and indirect schemes for entanglement distribution in memoryless and correlated-noise environments. Charlie is the middle station transmitting to or receiving systems from Alice (left station)
and Bob (right station). Ellipses represent entangled states, black-circles represent channels, and detectors are Bell measurements. (1) Direct distribution in a memoryless environment. If the individual channels are EB, then no distribution of entanglement is possile. We have $A|B^{\prime}$ and
$A^{\prime}|B$ which implies $A^{\prime}|B^{\prime}$. (2) Entanglement swapping in a memoryless environment. We have that $a|A^{\prime}$ and
$B^{\prime}|b$ implies $a|b$. (3) Direct distribution in a correlated environment. Despite $A|B^{\prime}$ and $A^{\prime}|B$ we have that
$A^{\prime}-B^{\prime}$ is possible. Surprisingly, this can be realized by a separable environment. (4) Entanglement swapping in a correlated environment. Despite $a|A^{\prime}$ and $B^{\prime}|b$ we have that $a-b$ is possible. Surprisingly, this is realizable by a separable environment.}
\label{scenario}
\end{figure}
In both configurations, entanglement distribution is possible as long as the action of the environment is not too strong. When decoherence is strong enough to destroy any input entanglement, the environment results into an entanglement breaking (EB) channel~\cite{EBchannels,HolevoEB}. By definition, a quantum channel $\mathcal{E}$\ is EB when its local action on one part of a bipartite state always results into a separable output state. In other words, given two systems, $A$ and $B$, in an arbitrary bipartite state $\rho_{AB}$, the output state $\rho_{AB^{\prime}}=(\mathcal{I}_{A}\otimes\mathcal{E} _{B})(\rho_{AB})$\ is always separable, where $\mathcal{I}_{A}$ is the identity channel applied to system $A$ and $\mathcal{E}_{B}$ is the EB channel applied to system $B$. Thus, if the input systems $A$ and $B$ were initially entangled (here denoted by the notation $A-B$), the output systems $A$ and
$B^{\prime}$ are separable (here denoted by the notation $A|B^{\prime}$).
The standard model of decoherence is assumed to be Markovian, where the travelling systems are subject to memoryless channels. For instance, consider the case of direct distribution depicted in the panel~(1) of Fig.~\ref{scenario}. In the standard Markovian description, the entangled state $\rho_{AB}$ of the input systems $A$ and $B$ is subject to a tensor product of channels $\mathcal{E}_{A}\otimes\mathcal{E}_{B}$. In this case, there is clearly no way to distribute entanglement if both $\mathcal{E}_{A}$ and $\mathcal{E}_{B}$ are EB channels. Suppose that Charlie tries to share entanglement with one of the remote parties by sending one of the two systems while keeping the other (one-system transmission). For instance, Charlie may keep system $A$ while transmitting system $B$ to Bob. The action of $\mathcal{I}_{A}\otimes\mathcal{E}_{B}$ destroys the initial entanglement, so that systems $A$ (kept) and $B^{\prime}$ (transmitted) are separable
($A|B^{\prime}$). Symmetrically, the action of $\mathcal{E}_{A}\otimes \mathcal{I}_{B}$ destroys the entanglement between system $A^{\prime}$
(transmitted) and system $B$ (kept), i.e., we have $A^{\prime}|B$. Then suppose that Charlie sends both his systems to Alice and Bob (two-system transmission). This strategy will also fail since the joint action of the two EB\ channels is given by the tensor product $\mathcal{E}_{A}\otimes \mathcal{E}_{B}=(\mathcal{E}_{A}\otimes\mathcal{I}_{B})(\mathcal{I}_{A} \otimes\mathcal{E}_{B})$. In other words, since we have one-system EB
($A|B^{\prime}$ and $A^{\prime}|B$) then we must have two-system EB
($A^{\prime}|B^{\prime}$).
The previous reasoning can be extended to the case of indirect distribution as shown in panel (2) of Fig.~\ref{scenario} involving a Bell measurement by Charlie. Since the environment is memoryless ($\mathcal{E}_{A}\otimes
\mathcal{E}_{B}$), we have that the absence of entanglement before the Bell measurement ($a|A^{\prime}$ and $B^{\prime}|b$) is a sufficient condition for the swapping protocol to fail, i.e., the remote systems $a$ and $b$ remains separable ($a|b$). Similarly to the previous case, if one-system transmission does not distribute entanglement, then two-system transmission cannot lead to entanglement generation via the swapping protocol.
Here we discuss how the previous implications for direct and indirect distribution of entanglement are false in the presence of a correlated-noise environment: Two-system transmission can successfully distribute entanglement despite one-system transmission being subject to EB. In other words, by combining two EB\ channels into a joint suitably-correlated environment, we can reactivate the distribution of entanglement. We will show the physical conditions under which the environmental correlations are able to trigger the reactivation, therefore \textquotedblleft breaking entanglement-breaking\textquotedblright. The most remarkable finding is that we do not need to consider an entangled state for the environment: The injection of separable correlations from the environment is sufficient for the restoration.
To better clarify these points, consider the schemes of direct and indirect distribution in the presence of a correlated-noise environment. In the scheme of direct distribution shown in panel~(3) of Fig.~\ref{scenario}, an input entangled state $\rho_{AB}$ is jointly transformed into an output state $\rho_{A^{\prime}B^{\prime}}=\mathcal{E}_{AB}(\rho_{AB})$. We assume that the dilation of the composite channel $\mathcal{E}_{AB}$ is realized by introducing a two-system environment, $E_{1}$ and $E_{2}$, in a bipartite state $\rho_{E_{1}E_{2}}$, which interacts with the incoming systems via two unitaries $U_{AE_{1}}$ (transforming $A$ and $E_{1}$) and $U_{BE_{2}}$ (transforming $B$ and $E_{2}$). In other words, the output state can be written in the form
\begin{align} \rho_{A^{\prime}B^{\prime}} & =\mathrm{Tr}_{E_{1}E_{2}}[(U_{AE_{1}}\otimes U_{BE_{2}})\nonumber\\ & \times(\rho_{AB}\otimes\rho_{E_{1}E_{2}})(U_{AE_{1}}^{\dagger}\otimes U_{BE_{2}}^{\dagger})]. \label{eqDIL} \end{align}
If the environmental state is not tensor product, i.e., $\rho_{E_{1}E_{2}} \neq\rho_{E_{1}}\otimes\rho_{E_{2}}$, then the composite channel cannot be decomposed into memoryless channels, i.e., $\mathcal{E}_{AB}\neq \mathcal{E}_{A}\otimes\mathcal{E}_{B}$. In any case, from the dilation given in Eq.~(\ref{eqDIL}), we can always define the reduced channels, $\mathcal{E}_{A}$ and $\mathcal{E}_{B}$, acting on the individual systems. For instance, if only system $B$ is transmitted, then we have the evolved state
\begin{align} \rho_{AB^{\prime}} & =(\mathcal{I}_{A}\otimes\mathcal{E}_{B})(\rho _{AB})\nonumber\\ & =\mathrm{Tr}_{E_{2}}[(I_{A}\otimes U_{BE_{2}})\rho_{AB}\otimes\rho_{E_{2} }(I_{A}\otimes U_{BE_{2}}^{\dagger})], \end{align} where $\rho_{E_{2}}=\mathrm{Tr}_{E_{1}}(\rho_{E_{1}E_{2}})$. A similar formula holds for the evolution of the other system $\rho_{A^{\prime}B}=(\mathcal{E} _{A}\otimes\mathcal{I}_{B})(\rho_{AB})$.
Now, assuming that $\mathcal{I}_{A}\otimes\mathcal{E}_{B}$ and $\mathcal{E}
_{A}\otimes\mathcal{I}_{B}$ are EB channels (so that $A|B^{\prime}$ and
$B|A^{\prime}$), the composite channel $\mathcal{E}_{AB}$ can still preserve entanglement (so that $A^{\prime}-B^{\prime}$ is possible). In other words, we have a paradoxical situation where Charlie is not able to share entanglement with Alice or Bob, but still can distribute entanglement to them. This is clearly an effect of the injected correlations coming from the environmental state $\rho_{E_{1}E_{2}}$. As mentioned earlier, our main finding is that these correlations do not need to be strong: Entanglement distribution can be activated by separable correlations, i.e., by an environment which is in a separable state $\rho_{E_{1}E_{2}}$.
This effect of reactivation can also be extended to entanglement distillation, which typically requires stronger conditions than entanglement distribution (demanded by the existence of effective distillation protocols). Despite the individual channels are EB, their combination into a separable environment enables Charlie to distribute distillable entanglement to Alice and Bob. This is easy to prove for an environment with finite memory, which can be decomposed as $\mathcal{E}_{AB}\otimes\mathcal{E}_{AB}\otimes\ldots$
In our investigation, we also consider the case of entanglement swapping in a correlated-noise environment as depicted in panel~(4) of Fig.~\ref{scenario}. Here Alice and Bob have two entangled states, $\rho_{aA}$\ and $\rho_{Bb}$, respectively. Systems $a$ and $b$ are retained, while systems $A$ and $B$ are transmitted to Charlie, therefore undergoing the joint quantum channel $\mathcal{E}_{AB}$. Before the Bell measurement, the global state is described by
\begin{align} \rho_{aA^{\prime}B^{\prime}b} & =(\mathcal{I}_{a}\otimes\mathcal{E} _{AB}\otimes\mathcal{I}_{b})(\rho_{aA}\otimes\rho_{Bb})\nonumber\\ & =\mathrm{Tr}_{E_{1}E_{2}}[U(\rho_{aA}\otimes\rho_{E_{1}E_{2}}\otimes \rho_{Bb})U^{\dagger}]~, \end{align} where $U=I_{a}\otimes U_{AE_{1}}\otimes U_{E_{2}B}\otimes I_{b}$.
As before, we consider the case where the reduced channels, $\mathcal{E}_{A}$
and $\mathcal{E}_{B}$, are EB channels, so that no entanglement survives before the Bell measurement ($a|A^{\prime}$ and $B^{\prime}|b$). If the environment has no memory ($\rho_{E_{1}E_{2}}=\rho_{E_{1}}\otimes\rho_{E_{2}}
$) there is no way to distribute entanglement to Alice and Bob ($a|b$). By contrast, if the environment has memory ($\rho_{E_{1}E_{2}}\neq\rho_{E_{1} }\otimes\rho_{E_{2}}$), then entanglement distribution is possible ($a-b$) and this distribution can be activated by a separable environmental state
$\rho_{E_{1}E_{2}}$. Thus, we have the paradoxical situation where no bipartite entanglement survives at Charlie's station ($a|A^{\prime}$ and
$B^{\prime}|b$), but still the swapping protocol is able to generate remote entanglement at Alice's and Bob's stations ($a-b$) thanks to the separable correlations injected by the environment. As before, these separable correlations can be strong enough to distribute distillable entanglement to the remote parties.
The environmental reactivation of entanglement distribution can be proven~\cite{NJP} for quantum systems with Hilbert spaces of any dimension, both finite (discrete-variable systems) and infinite (continuous-variable systems~\cite{BraREV,BraREV2,RMP}). We remark that the phenomenon of reactivation in direct distribution is not surprising in specific lossless scenarios where the environment is \textquotedblleft twirling\textquotedblright, i.e., a classical mixture of operators of the type $U\otimes U$ or $U\otimes U^{\ast}$, with $U$ being a unitary~\cite{NJP}. In this case, it is easy to find a fixed point in the joint map of the environment, so that a state can be perfectly distributed, despite the fact that the local (single-system) channels may become entanglement breaking~\cite{NJP}. In discrete variables with Hilbert space dimensionality $d\geq2$, these fixed points are the multi-dimensional Werner states~\cite{Werner} (invariant under $U\otimes U$-twirling) and the multi-dimensional isotropic states~\cite{HOROs} (invariant under $U\otimes U^{\ast}$-twirling). Similarly, one can consider continuous-variable Werner states which are invariant under anti-correlated phase-space rotations (non-Gaussian twirlings)~\cite{NJP}. However, all these cases are artificial since they are associated with lossless environments. The phenomenon of reactivation becomes non-trivial in the presence of loss as typical for continuous variable systems in realistic Gaussian environments.
In the configuration of indirect distribution, we can also find simple examples of reactivation with discrete variable systems (in particular, qubits) when the environment is lossless and $U\otimes U^{\ast}$-twirling. Suppose that $\rho_{aA}$ and $\rho_{bB}$ are Bell pairs, e.g., singlet states \begin{equation} \left\vert -\right\rangle =\frac{1}{\sqrt{2}}\left( \left\vert 0,1\right\rangle -\left\vert 1,0\right\rangle \right) ~. \end{equation} Then suppose that qubits $A$ and $B$ are subject to twirling, which means that $\rho_{AB}$ is transformed as \begin{equation} \rho_{A^{\prime}B^{\prime}}=\int dU~(U\otimes U^{\ast})~\rho_{AB}~(U\otimes U^{\ast})^{\dagger}~, \end{equation} where the integral is over the entire unitary group $\mathcal{U}(2)$ acting on the bi-dimensional Hilbert space and $dU$ is the Haar measure. Now the application of a Bell detection on the output qubits $A^{\prime}$ and $B^{\prime}$ has the effect to cancel the environmental noise. In fact, one can easily check that the output state of $a$ and $b$ will be projected onto a singlet state up to a Pauli operator, which is compensated via the communication of the Bell outcome. Again, the phenomenon becomes non-trivial when more realistic environments are taken into account, in particular, lossy environments as typical for continuous-variable systems.
For this reason we discuss here the reactivation phenomenon using continuous-variable systems. In particular, we consider the bosonic modes of the electromagnetic field. The input modes are prepared in Gaussian states with Einstein-Podolsky-Rosen (EPR) correlations~\cite{RMP,EPR}, which are the most typical form of continuous variable entanglement. These modes are then assumed to evolve under the action of a lossy Gaussian environment. This type of environment is modelled by two beam splitters which mix the travelling modes, $A$ and $B$, with two environmental modes, $E_{1}$ and $E_{2}$, prepared in a bipartite Gaussian state $\rho_{E_{1}E_{2}}$ (separable or entangled). The reduced channels, $\mathcal{E}_{A}$ and $\mathcal{E}_{B} $,\ are two lossy channels whose transmissivities and thermal noises are such to make them EB channels. To achieve simple analytical results, in this manuscript we only consider the limit of large entanglement for the input states.
The paper is structured as follows. In Sec.~\ref{SECgauss} we characterize the basic model of correlated Gaussian environment, which directly generalizes the standard model of thermal-loss environment. We identify the physical conditions under which the correlated Gaussian environment is separable or entangled. In Sec.~\ref{SECdirect}, we study the direct distribution of entanglement in the presence of the correlated Gaussian environment and assuming the condition of one-system EB. We provide the regimes of parameters under which remote entanglement is activated by the environmental correlations (in particular, separable correlations) and the stronger regimes where the generated remote entanglement is also distillable. This part is a review of results already known in the literature~\cite{NJP}. Then, in Sec.~\ref{SECindirect}, we generalize the theory of entanglement swapping to the correlated Gaussian environment. We consider swapping and distillation of entanglement, finding the regimes of parameters where these tasks are successful despite the EB condition. Finally, Sec.~\ref{SECconclusion} is for conclusion and discussion.
\section{Correlated Gaussian environment\label{SECgauss}}
We consider two beam splitters (with transmissivity $\tau$) which combine modes $A$ and $B$ with two environmental modes, $E_{1}$ and $E_{2}$, respectively. These ancillary modes are in a zero-mean Gaussian state $\rho_{E_{1}E_{2}}$ symmetric under $E_{1}$-$E_{2}$ permutation. In the memoryless model, the environmental state is tensor-product $\rho_{E_{1}E_{2} }=\rho\otimes\rho$, meaning that $E_{1}$ and $E_{2}$ are fully independent. In particular, $\rho$ is a thermal state with covariance matrix (CM) $\omega\mathbf{I}$, where the noise variance $\omega=2\bar{n}+1$ quantifies the mean number of thermal photons $\bar{n}$\ entering the beam splitter. Each interaction is then equivalent to a lossy channel with transmissivity $\tau$ and thermal noise $\omega$.
\begin{figure}
\caption{\textit{Left}. Correlated Gaussian environment, with losses $\tau$, thermal noise $\omega$ and correlations $\mathbf{G}$. The state of the environment $E_{1}$ and $E_{2}$ can be separable or entangled. \textit{Right}. Correlation plane $(g,g^{\prime})$ for the Gaussian environment, corresponding to thermal noise $\omega=2$. The black area identifies forbidden environments (correlations are too strong to be compatible with quantum mechanics). White area identifies physical environments, i.e., the subset of points which satisfy the bona-fide conditions of Eq.~(\ref{CMconstraints}). Whitin this area, the inner region labbeled by S identifies separable environments, while the two outer regions identify entangled environments. Figures adapted from Ref.~\cite{NJP} under a CC BY 3.0 licence (http://creativecommons.org/licenses/by/3.0/).}
\label{ENVschemes}
\end{figure}
This Gaussian process can be generalized to include the presence of correlations between the environmental modes as depicted in the right panel of Fig.~\ref{ENVschemes}. The simplest extension of the model consists\ of taking the ancillary modes, $E_{1}$ and $E_{2}$, in a zero-mean Gaussian state $\rho_{E_{1}E_{2}}$ with CM given by the symmetric normal form \begin{equation} \mathbf{V}_{E_{1}E_{2}}(\omega,g,g^{\prime})=\left( \begin{array} [c]{cc} \omega\mathbf{I} & \mathbf{G}\\ \mathbf{G} & \omega\mathbf{I} \end{array} \right) ~, \label{EVE_cmAPP} \end{equation} where $\omega\geq1$ is the thermal noise variance associated with each ancilla, and the off-diagonal block \begin{equation} \mathbf{G=}\left( \begin{array} [c]{cc} g & \\ & g^{\prime} \end{array} \right) ~, \label{Gblock} \end{equation} accounts for the correlations between the ancillas. This type of environment can be separable or entangled (conditions for separability will be given afterwards).
It is clear that, when we consider the two interactions $A-E_{1}$ and $B-E_{2}$ separately, the environmental correlations are washed away. In fact, by tracing out $E_{2}$, we are left with mode $E_{1}$ in a thermal state ($\mathbf{V}_{E_{1}}=\omega\mathbf{I}$) which is combined with mode $A$ via the beam-splitter. In other words, we have again a lossy channel with transmissivity $\tau$ and thermal noise $\omega$. The scenario is identical for the other mode $B$ when we trace out $E_{1}$. However, when we consider the joint action of the two environmental modes, the correlation block $\mathbf{G}$ comes into play and the global dynamics of the two travelling modes becomes completely different from the standard memoryless scenario.
Before studying the system dynamics and the corresponding evolution of entanglement, we need to characterize the correlation block $\mathbf{G}$\ more precisely. In fact, the two correlation parameters, $g$ and $g^{\prime}$, cannot be completely arbitrary but must satisfy specific physical constraints. These parameters must vary within ranges which make the CM of Eq.~(\ref{EVE_cmAPP}) a bona-fide quantum CM. Given an arbitrary value of the thermal noise $\omega\geq1$, the correlation parameters must satisfy the following three bona-fide conditions~\cite{TwomodePRA,NJP} \begin{equation}
|g|<\omega,~~~|g^{\prime}|<\omega,~~~\omega^{2}+gg^{\prime}-1\geq \omega\left\vert g+g^{\prime}\right\vert . \label{CMconstraints} \end{equation}
\subsection{Separability properties}
Once we have clarified the bona-fide conditions for the environment, the next step is to characterize its separability properties. For this aim, we compute the smallest partially-transposed symplectic (PTS) eigenvalue $\varepsilon$ associated with the CM\ $\mathbf{V}_{E_{1}E_{2}}$. For Gaussian states, this eigenvalue represents an entanglement monotone which is equivalent to the log-negativity~\cite{logNEG1,logNEG2,logNEG3} $\mathcal{E}=\max\left\{ 0,-\log\varepsilon\right\} $. After simple algebra, we get~\cite{NJP} \begin{equation}
\varepsilon=\sqrt{\omega^{2}-gg^{\prime}-\omega|g-g^{\prime}|}~. \end{equation} Provided that the conditions of Eq.~(\ref{CMconstraints}) are satisfied, the separability condition $\varepsilon\geq1$ is equivalent to \begin{equation}
\omega^{2}-gg^{\prime}-1\geq\omega|g-g^{\prime}|~. \label{sepCON} \end{equation}
To visualize the structure of the environment, we provide a numerical example in Fig.~\ref{ENVschemes}. In the right panel of this figure, we consider the \textit{correlation plane} which is spanned by the two parameters $g$ and $g^{\prime}$. For a given value of the thermal noise $\omega$, we identify the subset of points which satisfy the bona-fide conditions of Eq.~(\ref{CMconstraints}). This subset corresponds to the white area in the figure. Within this area, we then characterize the regions which correspond to separable environments (area labelled by S) and entangled environments (areas labelled by E).
\section{Direct distribution of entanglement in a correlated Gaussian environment\label{SECdirect}}
Let us study the system dynamics and the entanglement propagation in the presence of a correlated Gaussian environment, reviewing some key results from the literature~\cite{NJP}. Suppose that Charlie has an entanglement source described by an EPR\ state $\rho_{AB}$ with CM \begin{equation} \mathbf{V}(\mu)=\left( \begin{array} [c]{cc} \mu\mathbf{I} & \mu^{\prime}\mathbf{Z}\\ \mu^{\prime}\mathbf{Z} & \mu\mathbf{I} \end{array} \right) ~, \label{CM_TMSV} \end{equation} where $\mu\geq1$, $\mu^{\prime}:=\sqrt{\mu^{2}-1}$, and $\mathbf{Z}$\ is the reflection matrix \begin{equation} \mathbf{Z}:=\left( \begin{array} [c]{cc} 1 & \\ & -1 \end{array} \right) ~. \label{ZetaMAT} \end{equation} We may consider the different scenarios depicted in the three panels of Fig.~\ref{twomodeEB}. Charlie may attempt to distribute entanglement to Alice and Bob as shown in Fig.~\ref{twomodeEB}(1), or he may try to share entanglement with one of the remote parties, as shown in Figs.~\ref{twomodeEB} (2) and~(3).
\begin{figure}
\caption{Scenarios for direct distribution of entanglement. (1) Charlie has two modes $A$ and $B$ prepared in an EPR state $\rho_{AB}$. In order to distribute entanglement to the remote parties, Charlie transmits the two modes through the correlated Gaussian environment characterized by transmissivity $\tau$, thermal noise $\omega$ and correlations $\mathbf{G}$. (2) Charlie aims to share entanglement with Alice. He then keeps mode $B$ while sending mode $A$ to Alice through the lossy channel $\mathcal{E}_{A}$. (3) \ Charlie aims to share entanglement with Bob. He then keeps mode $A$ while sending mode $B$ to Bob through the lossy channel $\mathcal{E}_{B}$.}
\label{twomodeEB}
\end{figure}
Let us start considering the scenario where Charlie aims to share entanglement with one of the remote parties (one-mode transmission). In particular, suppose that Charlie wants to share entanglement with Bob (by symmetry the derivation is the same if we consider Alice). For sharing entanglement, Charlie keeps mode $A$ while sending mode $B$ to Bob as shown in Fig.~\ref{twomodeEB}(3). The action of the environment is therefore reduced to $\mathcal{I}_{A} \otimes\mathcal{E}_{B}$, where $\mathcal{E}_{B}$ is a lossy channel applied to mode $B$. It is easy to check~\cite{NJP} that the output state $\rho _{AB^{\prime}}$, shared by Charlie and Bob, is Gaussian with zero mean and CM \begin{equation} \mathbf{V}_{AB^{\prime}}=\left( \begin{array} [c]{cc} \mu\mathbf{I} & \mu^{\prime}\sqrt{\tau}\mathbf{Z}\\ \mu^{\prime}\sqrt{\tau}\mathbf{Z} & x\mathbf{I} \end{array} \right) , \label{ABpCM} \end{equation} where \begin{equation} x:=\tau\mu+(1-\tau)\omega~. \end{equation}
Remarkably, we can compute closed analytical formulas in the limit of large $\mu$, i.e., large input entanglement. In this case, the entanglement of the output state $\rho_{AB^{\prime}}$ is quantified by the PTS\ eigenvalue \begin{equation} \varepsilon=\frac{1-\tau}{1+\tau}\omega~. \end{equation} The EB condition corresponds to the separability condition $\varepsilon\geq1$, which provides \begin{equation} \omega\geq\frac{1+\tau}{1-\tau}:=\omega_{\text{EB}}~, \label{EBcond} \end{equation} or equivalently $\bar{n}\geq\tau/(1-\tau)$. Despite the EB condition of Eq.~(\ref{EBcond}) regards an EPR\ input, it is valid for any input state. In other words, a lossy channel $\mathcal{E}_{B}$\ with transmissivity $\tau$ and thermal noise $\omega\geq\omega_{\text{EB}}$ destroys the entanglement of any input state $\rho_{AB}$. Indeed Eq.~(\ref{EBcond}) corresponds exactly to the well-known EB condition for lossy channels~\cite{HolevoEB}. The threshold condition $\omega=\omega_{\text{EB}}$ guarantees one-mode EB, i.e., the impossibility for Charlie to share entanglement with the remote party.
Now the central question is the following: Suppose that Charlie cannot share any entanglement with the remote parties (one-mode EB), can Charlie still distribute entanglement to them? In other words, suppose that the correlated Gaussian environment has transmissivity $\tau$ and thermal noise $\omega=\omega_{\text{EB}}$, so that the lossy channels $\mathcal{E}_{A}$\ and $\mathcal{E}_{B}$ are EB. Is it still possible to use the joint channel $\mathcal{E}_{AB}$ to distribute entanglement to Alice and Bob? In the following, we explicitly reply to this question, discussing how entanglement can be distributed by a separable environment, with the distributed amount being large enough to be distilled by one-way distillation protocols~\cite{NJP}.
Let us study the general evolution of the two modes $A$ and $B$ under the action of the environment as in Fig.~\ref{twomodeEB}(1). Since the input EPR\ state $\rho_{AB}$ is Gaussian and the environmental state $\rho _{E_{1}E_{2}}$ is Gaussian, the output state $\rho_{A^{\prime}B^{\prime}}$ is also Gaussian. This state has zero mean and CM given by~\cite{NJP} \begin{equation} \mathbf{V}_{A^{\prime}B^{\prime}}=\tau\mathbf{V}_{AB}+(1-\tau)\mathbf{V} _{E_{1}E_{2}}=\left( \begin{array} [c]{cc} x\mathbf{I} & \mathbf{H}\\ \mathbf{H} & x\mathbf{I} \end{array} \right) ~, \end{equation} where \begin{equation} \mathbf{H}:=\tau\mu^{\prime}\mathbf{Z}+(1-\tau)\mathbf{G}~. \end{equation} For large $\mu$, one can easily derive the symplectic spectrum of the output state \begin{equation}
\nu_{\pm}=\sqrt{\left( 2\omega+g^{\prime}-g\pm|g+g^{\prime}|\right) (1-\tau)\tau\mu}~, \end{equation} and its smallest PTS\ eigenvalue~\cite{NJP} \begin{equation} \varepsilon=(1-\tau)\sqrt{(\omega-g)(\omega+g^{\prime})}~, \label{epsMAIN} \end{equation} quantifying the entanglement distributed to Alice and Bob.
In the same limit, one can compute the coherent information~\cite{CohINFO,CohINFO2} $I(A\rangle B)$ between the two remote parties, which provides a lower bound to the number of entanglement bits per copy that can be distilled using one-way distillation protocols, i.e., protocols based on local operations and one-way classical communication. It is clear that one-way distillability implies two-way distillability, where both forward and backward communication is employed. After simple algebra, one achieves~\cite{NJP} \begin{equation} I(A\rangle B)=\log\frac{1}{e\varepsilon}~. \label{coheDIR} \end{equation} Thus, remote entanglement is distributed for $\varepsilon<1$ and is distillable for $\varepsilon<e^{-1}$.
Now suppose that the environment has thermal noise $\omega=\omega_{\text{EB}}$ (one-mode EB). Then, we can write
\begin{align} \varepsilon & =\sqrt{[1+\tau-(1-\tau)g][1+\tau+(1-\tau)g^{\prime} ]}\nonumber\\ & :=\varepsilon(\tau,g,g^{\prime}) \label{EBentEXP} \end{align} Answering the previous question corresponds to checking the existence of environmental parameters $\tau$, $g$ and $g^{\prime}$, for which\ $\varepsilon $ is sufficiently low: For a given value of the transmissivity $\tau$, we look for regions in the correlation plane $(g,g^{\prime})$ where $\varepsilon<1$ (remote entanglement is distributed) and possibly $\varepsilon<e^{-1}$ (remote entanglement is distillable). This is done in Fig.~\ref{dirTOT} for several numerical values of the transmissivity.
\begin{figure}\label{dirTOT}
\end{figure}
In Fig.~\ref{dirTOT}, the environments identified by the gray activation area allow Charlie to distribute entanglement to Alice and Bob ($\varepsilon<1$), despite it is impossible for him to share entanglement with any of the remote parties. In other words, these environments are two-mode entanglement preserving (EP), despite being one-mode EB. Furthermore, one can identify sufficiently-correlated environments for which the entanglement distributed to the remote parties can also be distilled ($\varepsilon<e^{-1}$).
The most remarkable feature in Fig.~\ref{dirTOT} is represented by the presence of separable environments in the activation area. In other words, there are separable environments which contain enough correlations to restore the distribution of entanglement to Alice and Bob. Furthermore, for sufficiently high transmissivities and correlations, these environments enable Charlie to distribute distillable entanglement. As we can note from Fig.~\ref{dirTOT}, the weight of separable environments in the activation area increases for increasing transmissivities, with the entangled environments almost disappearing for $\tau=0.9$.
\section{Entanglement swapping in a correlated Gaussian environment\label{SECindirect}}
In this section we consider the indirect distribution of entanglement, i.e., the protocol of entanglement swapping. We start with a brief review of this protocol in the ideal case of no noise. Then, we generalize its theory to the case of correlated-noise Gaussian environments, where we prove how entanglement swapping can be reactivated in the presence of one-mode EB.
\subsection{Entanglement swapping in the absence of noise\label{SECSUBswap1}}
Consider two remote parties, Alice and Bob, who possess two identical EPR states with CM given in Eq.~(\ref{CM_TMSV}). At Alice's station, the EPR\ state describes modes $a$ and $A$, while at Bob's station it describes modes $b$ and $B$. Alice and Bob keep modes $a$ and $b$, while sending modes $A$ and $B$ to Charlie, where a Bell measurement is performed. This means that the travelling modes $A$ and $B$ are combined in a balanced beam splitter whose output modes \textquotedblleft$-$\textquotedblright\ and \textquotedblleft$+$\textquotedblright\ are homodyned, with mode \textquotedblleft$-$\textquotedblright\ measured in the position quadrature and mode \textquotedblleft$+$\textquotedblright\ in the momentum quadrature. In other words, Charlie measures the two EPR quadratures $\hat{q}_{-} :=(\hat{q}_{A}-\hat{q}_{B})/\sqrt{2}$ and $\hat{p}_{+}:=(\hat{p}_{A}+\hat {p}_{B})/\sqrt{2}$. The Bell measurement provides two classical outcomes, $q_{-}$ and $p_{+}$, which can be compacted into a single complex variable
$\gamma:=q_{-}+ip_{+}$. The classical variable $\gamma$ is finally communicated to Alice and Bob, with the result of projecting their remote modes $a$ and $b$ into a conditional state $\rho_{ab|\gamma}$ (see Fig.~\ref{swap}). \begin{figure}
\caption{Entanglement swapping in the absence of noise. See text for explanation.}
\label{swap}
\end{figure}
Since the input states are pure Gaussian and the Bell measurement is a Gaussian measurement which projects pure states into pure states, we have that the remote conditional state $\rho_{ab|\gamma}$ turns out to be a pure Gaussian state. This state has a measurement-dependent mean $\mathbf{x}
=\mathbf{x}(\gamma)$ which Alice and Bob can always delete by conditional displacements. It is clear that these local unitaries do not alter the amount of entanglement in the state, as long as they are perfectly implemented. The conditional CM $\mathbf{V}_{ab|\gamma}$ can be computed using a simple input-output formula for Gaussian entanglement swapping~\cite{GaussSWAP}. We get \begin{equation}
\mathbf{V}_{ab|\gamma}=\frac{1}{2\mu}\left( \begin{array} [c]{cc} (\mu^{2}+1)\mathbf{I} & (\mu^{2}-1)\mathbf{Z}\\ (\mu^{2}-1)\mathbf{Z} & (\mu^{2}+1)\mathbf{I} \end{array} \right) ~. \label{CMswapNLess} \end{equation} Its smallest PTS\ eigenvalue is equal to $\varepsilon=\mu^{-1}$, which means that remote entanglement is always generated for entangled inputs ($\mu>1$). Furthermore, remote entanglement is present in the form of EPR correlations since the two remote EPR quadratures $\hat{q}_{-}^{r}:=(\hat{q}_{a}-\hat {q}_{b})/\sqrt{2}$ and $\hat{p}_{+}^{r}:=(\hat{p}_{a}+\hat{p}_{b})/\sqrt{2}$ have variances \begin{equation} V(\hat{q}_{-}^{r})=V(\hat{p}_{+}^{r})=\mu^{-1}~. \end{equation}
The simplest description of the entanglement swapping protocol can be given when we consider the limit for $\mu\rightarrow\infty$. In this case the initial states are ideal EPR states with quadratures perfectly correlated, i.e., $\hat{q}_{a}=\hat{q}_{A}$ and $\hat{p}_{a}=-\hat{p}_{A}$ for Alice, and $\hat{q}_{b}=\hat{q}_{B}$ and $\hat{p}_{b}=-\hat{p}_{B}$ for Bob. Then, the overall action of Charlie, i.e., the Bell measurement plus classical communication, corresponds to create a remote state with \begin{equation} \hat{q}_{b}=\hat{q}_{a}-\sqrt{2}q_{-},~\hat{p}_{b}=-\hat{p}_{a}-\sqrt{2} p_{+}~. \end{equation}
The quadratures of the two remote modes are perfectly correlated, up to an erasable displacement. In other words, the ideal EPR\ correlations have been swapped from the initial states to the final conditional state $\rho _{ab|\gamma}$.
\subsection{Entanglement swapping in the presence of correlated-noise\label{SECSUBswap2}}
The theory of entanglement swapping can be extended to include the presence of loss and correlated noise. We consider our model of correlated Gaussian environment with transmission $\tau$, thermal noise $\omega$ and correlations $\mathbf{G}$. The modified scenario is depicted in Fig.~\ref{swapLOSS} .\begin{figure}
\caption{Entanglement swapping in the presence of loss, thermal noise and environmental correlations (correlated Gaussian environment). The Bell detector has been simplified.}
\label{swapLOSS}
\end{figure}
\subsubsection{Swapping of EPR\ correlations}
For simplicity, we start by studying the evolution of the EPR correlations under ideal input conditions ($\mu\rightarrow+\infty$). After the classical communication of the outcome $\gamma$, the quadratures of the remote modes $a$ and $b$ satisfy the asymptotic relations
\begin{align} \hat{q}_{b} & =\hat{q}_{a}-\sqrt{\frac{2}{\tau}}\left( q_{-}-\sqrt{1-\tau }\hat{\delta}_{q}\right) ,\label{eqrrr}\\ \hat{p}_{b} & =-\hat{p}_{a}-\sqrt{\frac{2}{\tau}}\left( p_{+}-\sqrt{1-\tau }\hat{\delta}_{p}\right) , \label{eqrrr2} \end{align} where $\hat{\delta}_{q}=(\hat{q}_{E_{1}}-\hat{q}_{E_{2}})/\sqrt{2}$ and $\hat{\delta}_{p}=(\hat{p}_{E_{1}}+\hat{p}_{E_{2}})/\sqrt{2}$ are noise variables introduced by the environment.
Using previous Eqs.~(\ref{eqrrr}) and~(\ref{eqrrr2}), we construct the remote EPR\ quadratures $\hat{q}_{-}^{r}$\ and $\hat{p}_{+}^{r}$, and we compute the EPR variances \begin{equation} \boldsymbol{\Lambda}:=\left( \begin{array} [c]{cc} V(\hat{q}_{-}^{r}) & \\ & V(\hat{p}_{+}^{r}) \end{array} \right) \rightarrow\boldsymbol{\Lambda}_{\infty}=\frac{1-\tau}{\tau} (\omega\mathbf{I}-\mathbf{ZG})~, \label{lam1} \end{equation} where the limit is taken for $\mu\rightarrow+\infty$. Assuming the EB condition $\omega=\omega_{\text{EB}}$, we finally get \begin{equation} \boldsymbol{\Lambda}_{\infty,\text{EB}}=\frac{1}{\tau}\left[ (1+\tau )\mathbf{I}-(1-\tau)\mathbf{ZG}\right] . \label{lam2} \end{equation}
In the case of a memoryless environment ($\mathbf{G=0}$) we see that $\boldsymbol{\Lambda}_{\infty,\text{EB}}=(1+\tau^{-1})\mathbf{I}\geq \mathbf{I}$, which means that the EPR\ correlations cannot be swapped to the remote systems. However, it is evident from Eq.~(\ref{lam2}) that there are choices for the correlation block $\mathbf{G}$\ such that the EPR\ condition $\boldsymbol{\Lambda}_{\infty,\text{EB}}<\mathbf{I}$ is satisfied. For instance, this happens when we consider $\mathbf{G=}g\mathbf{Z}$. In this case it is easy to check that $\boldsymbol{\Lambda}_{\infty,\text{EB}}<\mathbf{I}$ is satisfied for $\tau\geq1/4$ and $g>(1-\tau)^{-1}$. Under these conditions, EPR\ correlations are successfully swapped to the remote modes. In particular, for $\tau>1/2$ and $g>(1-\tau)^{-1}$ there are separable environments which do the job.
\subsubsection{Swapping and distillation of entanglement}
Here we discuss in detail how entanglement is distributed by the swapping protocol in the presence of a correlated Gaussian environment. In particular, suppose that Alice and Bob cannot share entanglement with Charlie because the environment is one-mode EB. Then, we aim to address the following questions: (i)~Is it still possible for Charlie to distribute entanglement to the remote parties thanks to the environmental correlations? (ii)~In particular, is the swapping successful when the environmental correlations are separable? (iii)~Finally, are Alice and Bob able to distill the swapped entanglement by means of one-way distillation protocols? Our previous discussion on EPR correlations clearly suggests that these questions have positive answers. Here we explicitly show this is indeed true for quantum entanglement by finding the typical regimes of parameters that the Gaussian environment must satisfy.
In order to study the propagation of entanglement we first need to derive the CM\ $\mathbf{V}_{ab|\gamma}$\ of the conditional remote state $\rho _{ab|\gamma}$. As before, we have two identical EPR\ states at Alice's and Bob's stations with CM $\mathbf{V}(\mu)$ given in Eq.~(\ref{CM_TMSV}). The travelling modes $A$ and $B$ are sent to Charlie through a Gaussian environment with transmissivity $\tau$, thermal noise $\omega$ and correlations $\mathbf{G}$. After the Bell measurement and the classical communication of the result $\gamma$, the conditional remote state at Alice's and Bob's stations is Gaussian with CM~\cite{BellFORMULA} \begin{equation}
\mathbf{V}_{ab|\gamma}=\left( \begin{array} [c]{cc} \mu\mathbf{I} & \\ & \mu\mathbf{I} \end{array} \right) -\frac{(\mu^{2}-1)\tau}{2}\left( \begin{array} [c]{cccc} \frac{1}{\theta} & & -\frac{1}{\theta} & \\ & \frac{1}{\theta^{\prime}} & & \frac{1}{\theta^{\prime}}\\ -\frac{1}{\theta} & & \frac{1}{\theta} & \\ & \frac{1}{\theta^{\prime}} & & \frac{1}{\theta^{\prime}} \end{array} \right) ~, \label{VabGamma} \end{equation} where \begin{equation} \theta=\tau\mu+(1-\tau)(\omega-g),~\theta^{\prime}=\tau\mu+(1-\tau )(\omega+g^{\prime})~. \label{thetas} \end{equation}
From the CM of Eq.~(\ref{VabGamma}) we compute the smallest PTS eigenvalue $\varepsilon$ quantifying the remote entanglement at Alice's and Bob's stations. For large input entanglement $\mu\gg1$, we find a closed formula in terms of the environmental parameters, i.e., \begin{equation} \varepsilon=\frac{1-\tau}{\tau}\sqrt{(\omega-g)(\omega+g^{\prime} )}:=\varepsilon(\tau,\omega,g,g^{\prime})~, \label{SpectrumTOT} \end{equation}
which is equal to Eq.~(\ref{epsMAIN}) up to a factor $\tau^{-1}$. As before, this eigenvalue not only determines the log-negativity but also the coherent information $I(a\rangle b)$\ associated with the remote state $\rho _{ab|\gamma}$. In fact, for large $\mu$, one can easily compute the asymptotic expression \begin{equation}
I(a\rangle b)\rightarrow\log\frac{2}{e}\sqrt{\frac{\det\mathbf{V}_{b|\gamma}
}{\det\mathbf{V}_{ab|\gamma}}}=\log\frac{1}{e\varepsilon}~, \label{IabCOHEswap} \end{equation} which is identical to the formula of Eq.~(\ref{coheDIR}) for the case of direct distribution. Thus, the PTS\ eigenvalue of Eq.~(\ref{SpectrumTOT}) contains all the information about the distribution and distillation of entanglement in the swapping scenario. For $\varepsilon<1$ entanglement is successfully distributed by the swapping protocol (log-negativity $\mathcal{E}>0$). Then, for the stronger condition $\varepsilon<e^{-1}$, the swapped entanglement can also be distilled into $I(a\rangle b)$ entanglement bits per copy by means of one-way protocols.
Now, let us assume the condition of one-mode EB ($\omega=\omega_{\text{EB}}$) so that the bipartite states before measurement $\rho_{aA^{\prime}}$ and $\rho_{B^{\prime}b}$ are separable (see Fig.~\ref{swapLOSS}). We investigate the amount of entanglement generated in the remote modes $a$ and $b$ by computing the eigenvalue $\varepsilon(\tau,\omega_{\text{EB}},g,g^{\prime})$. In the standard memoryless case ($\mathbf{G}=\mathbf{0}$) we have $\varepsilon=1+\tau^{-1}$ which means that no entanglement can be swapped, as expected. To study the general case of correlated environment, we consider different numerical values of the transmissivity $\tau$, and we plot the $\varepsilon(\tau,\omega_{\text{EB}},g,g^{\prime})$ on the correlation plane. The results are shown in Fig.~\ref{total} and are similar to those achieved in Fig.~\ref{dirTOT} for direct distribution.
\begin{figure}\label{total}
\end{figure}
In each panel of Fig.~\ref{total}, the physical values for the correlation parameters $(g,g^{\prime})$ are individuated by the non-black area. Remote entanglement is distributed ($\varepsilon<1$) for values of the correlation parameters belonging to the gray activation area. For $\tau\leq1/2$ (top two panels), we see that the activation area is confined within the region of entangled environments. The property that entangled environments are necessary for the reactivation of entanglement swapping at any $\tau\leq1/2$ is easy to prove. In fact, suppose that $\varepsilon<1$ holds. By using its formula in Eq.~(\ref{SpectrumTOT}) and the bona-fide conditions on the correlation parameters given in Eq.~(\ref{CMconstraints}), we can write $\varepsilon ^{2}<1$ as \begin{equation} \omega^{2}-gg^{\prime}+\omega(g^{\prime}-g)<\left( \frac{\tau}{1-\tau }\right) ^{2}~. \label{eqkk} \end{equation} Now, for $\tau\leq1/2$, we have $\tau^{2}(1-\tau)^{-2}\leq1$ and using this inequality in Eq.~(\ref{eqkk}), we derive \begin{equation}
\omega^{2}-gg^{\prime}-1<\omega(g-g^{\prime})\leq\omega|g-g^{\prime}|~, \end{equation} which is the entanglement condition for the environment [i.e., the violation of Eq.~(\ref{sepCON})].
It is clear that the most interesting result holds for transmissivities $\tau>1/2$. In this regime, in fact, the distribution of remote entanglement can be activated by separable environments. As explicitly shown for $\tau=0.75$ and $0.9$, the activation area progressively invades the region of separable environments. In other words, separable correlations become more and more important for increasing transmissivities. Furthermore, for $\tau \gtrsim0.75$, separable environments are even able to activate the distribution of distillable entanglement ($\varepsilon<e^{-1}$). By comparing Fig.~\ref{dirTOT} and Fig.~\ref{total}, we see how entanglement is more easily generated and distilled by the direct protocol. This is a consequence of the extra factor $\tau^{-1}$ in Eq.~(\ref{SpectrumTOT}), whose influence becomes less important only at high transmissivities ($\tau\simeq1$).
\section{Conclusion\label{SECconclusion}}
In conclusion, we have investigated the distribution of entanglement in the presence of correlated-noise Gaussian environments, proving how the injection of separable correlations can recover from entanglement breaking. In order to derive simple analytical results we have considered here only the case of large entanglement for the input states. We have analyzed scenarios of direct distribution and indirect distribution, i.e., entanglement swapping. Surprisingly, the injection of the weaker separable correlations is sufficient to restore the entanglement distribution, as we have shown for wide regimes of parameters. Furthermore, the generated entanglement can be sufficient to be distilled by means of one-way protocols. The fact that separability can be exploited to recover from entanglement breaking is clearly a paradoxical behavior which poses fundamental questions on the intimate relations between local and nonlocal correlations.
\end{document}
|
arXiv
|
{
"id": "1509.01091.tex",
"language_detection_score": 0.7923208475112915,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Barycentric subdivisions of cubical complexes] {Face numbers of barycentric subdivisions of cubical complexes}
\author{Christos~A.~Athanasiadis}
\address{Department of Mathematics\\ National and Kapodistrian University of Athens\\ Panepistimioupolis\\ 15784 Athens, Greece} \email{[email protected]}
\date{December 18, 2020} \thanks{ 2010 \textit{Mathematics Subject Classification.} Primary 05E45; \, Secondary 26C10, 52B12.} \thanks{ \textit{Key words and phrases}. Barycentric subdivision, cubical complex, $h$-polynomial, Eulerian polynomial, real-rootedness.}
\begin{abstract} The $h$-polynomial of the barycentric subdivision of any $n$-dimensional cubical complex with nonnegative cubical $h$-vector is shown to have only real roots and to be interlaced by the Eulerian polynomial of type $B_n$. This result applies to barycentric subdivisions of shellable cubical complexes and, in particular, to barycentric subdivisions of cubical convex polytopes and answers affirmatively a question of Brenti, Mohammadi and Welker. \end{abstract}
\maketitle
\section{Introduction} \label{sec:intro}
A fundamental problem in algebraic and geometric combinatorics is to characterize, or at least obtain significant information about, the face enumerating vectors of triangulations of various topological spaces, such as balls and spheres \cite{StaCCA}. Face enumerating vectors are often presented in the form of the $h$-polynomial (see Section~\ref{sec:enu} for definitions). Properties such as unimodality, log-concavity, $\gamma$-positivity and real-rootedness have been of primary interest \cite{Ath18, Bra15, Bre94b, Sta89}. One expects that the `nicer' the triangulation is combinatorially and the space being triangulated is topologically, the better the behavior of the $h$-polynomial is.
Following this line of thought, Brenti and Welker~\cite{BW08} considered an important and well studied triangulation in mathematics, namely the barycentric subdivision. They studied the transformation of the $h$-polynomial of a simplicial complex $\Delta$ under barycentric subdivision and showed that the resulting $h$-polynomial has only real roots (a property with strong implications) for every simplicial complex $\Delta$ with nonnegative $h$-polynomial. They asked \cite[Question~3.10]{BW08} whether the $h$-polynomial of the barycentric subdivision of any convex polytope has only real roots, suspecting an affirmative answer (see \cite[p.~105]{MW17}). This question was raised again by Mohammadi and Welker~\cite[Question~35]{MW17} and, as is typically the case in face enumeration, it is far more interesting and more challenging for general polytopes and polyhedral complexes, than it is for simplicial polytopes and simplicial complexes. Somewhat surprisingly, no strong evidence has been provided in the literature that such a result may (or may not) hold beyond the simplicial setting. One should also note that barycentric subdivisions of boundary complexes of polytopes form a special class of flag triangulations of spheres and that the real-rootedness property fails for the $h$-polynomials of this more general class of triangulations in dimensions higher that four \cite{Ga05}. At present, it is unclear where the borderline between positive and negative results lies.
Mohammadi and Welker (based on earlier discussions with Brenti) suggested the class of cubical polytopes as another good test case; see \cite[p.~105]{MW17}. Cubical complexes and polytopes are important and mysterious objects with highly nontrivial combinatorial properties (see, for instance, \cite{Ad96, BBC97, Jo93, JZ00}). They have been studied both for their own independent interest, and for the role they play in other areas of mathematics. Given the intricacy of their combinatorics, it comes as no surprise that the question of Brenti and Welker turns out to be more difficult for them than for simplicial complexes. The following theorem provides the first general positive result on this question, since \cite{BW08} appeared, and suggests that an affirmative answer should be expected at least for broad classes of nonsimplicial convex polytopes (or even more general cell complexes and posets).
\begin{theorem} \label{thm:main} The $h$-polynomial of the barycentric subdivision of any shellable cubical complex has only real roots. In particular, barycentric subdivisions of cubical polytopes have this property. \end{theorem}
The case of cubical polytopes was also studied recently by Hlavacex and Solus~\cite{HS20+}. Using the concept of shellability and the theory of interlacing polynomials, they gave an affirmative answer for cubical complexes which admit a special type of shelling and applied their result to certain families of cubical polytopes, such as cuboids, capped cubical polytopes and neighborly cubical polytopes.
The proof of the aforementioned result of~\cite{BW08} applies a theorem of Br\"and\'en~\cite{Bra06} on the subdivision operator \cite[Section~3.3]{Bra15} to a formula for the $h$-polynomial of the barycentric subdivision of a simplicial complex (see Remark~\ref{rem:BW-formula}). The proof of Theorem~\ref{thm:main} is motivated by the proof of the result of~\cite{BW08}, given and extended to the setting of uniform triangulations of simplicial complexes in~\cite{Ath20+} (the latter was partially motivated by \cite[Example~8.1]{Bra15}). To explain further, we let $h(\Delta, x) = \sum_{i=0}^{n+1} h_i(\Delta) x^i$ denote the $h$-polynomial and ${\rm sd}(\Delta)$ denote the barycentric subdivision of an $n$-dimensional simplicial complex $\Delta$. As already shown in~\cite{BW08}, there exist polynomials with nonnegative coefficients $p_{n,k}(x)$ for $k \in \{0, 1,\dots,n+1\}$, which depend only on $n$ and $k$, such that
\begin{equation} \label{eq:BW} h({\rm sd}(\Delta), x) \ = \ \sum_{k=0}^{n+1} h_k(\Delta)
p_{n,k}(x) \end{equation}
for every $n$-dimensional simplicial complex $\Delta$. For every $n \in {\mathbb N}$, the polynomials $p_{n,k}(x)$ can be shown \cite[Example~8.1]{Bra15} to have only real roots and to form an interlacing sequence. This implies that their nonnegative linear combination $h({\rm sd}(\Delta), x)$ also has only real (negative) roots and that it is interlaced by $p_{n,0}(x)$, which equals the classical $(n+1)$st Eulerian polynomial $A_{n+1}(x)$ \cite[Section~1.4]{StaEC1}. The interlacing condition implies that the roots of $h({\rm sd}(\Delta), x)$ are not arbitrary, but rather that they lie in certain intervals that depend only on the dimension $n$, formed by zero and the roots of $A_{n+1}(x)$. The polynomial $p_{n,k}(x)$ can be interpreted as the $h$-polynomial of the relative simplicial complex obtained from the barycentric subdivision of the $n$-dimensional simplex by removing all faces lying on $k$ facets of the simplex \cite[Section~5]{Ath20+} \cite[Section~4.2]{HS20+}.
This paper presents a similar picture for cubical complexes. We define (see Definition~\ref{def:pBnk}) polynomials $p^B_{n,k}(x)$ for $k \in \{0, 1,\dots,n+1\}$ as the $h$-polynomials of relative simplicial complexes obtained from the barycentric subdivision of the $n$-dimensional cube by removing all faces lying on certain facets of the cube and prove (see Theorem~\ref{thm:h-trans}) that Equation~(\ref{eq:BW}) continues to hold when $\Delta$ is replaced by an $n$-dimensional cubical complex ${\mathcal L}$, $p_{n,k}(x)$ is replaced by $p^B_{n,k}(x)$ and the $h_k(\Delta)$ are replaced by the entries of the (normalized) cubical $h$-vector of ${\mathcal L}$, introduced and studied by Adin~\cite{Ad96}. We provide recurrences (see Proposition~\ref{prop:pBnk}) for the polynomials $p^B_{n,k}(x)$ which guarantee that they form an interlacing sequence for every $n \in {\mathbb N}$ and conclude that $h({\rm sd}({\mathcal L}), x)$ has only real (negative) roots and that it is interlaced by the $n$th Eulerian polynomial $B_n(x)$ of type $B$ for every $n$-dimensional cubical complex ${\mathcal L}$ with nonnegative cubical $h$-vector (see Corollary~\ref{cor:main}). This implies Theorem~\ref{thm:main}, since shellable cubical complexes were shown~\cite{Ad96} to have nonnegative cubical $h$-vector and boundary complexes of convex polytopes are shellable~\cite{BM71}.
The main results of this paper apply to cubical regular cell complexes (equivalently, to cubical posets) and will be stated at this level of generality. What comes perhaps unexpectedly is the fact that the transformation of a cubical $h$-polynomial into a simplicial one can be so well behaved. Corollary~\ref{cor:main} has nontrivial applications to triangulations of simplicial complexes as well; see Remark~\ref{rem:simplicial}.
\section{Face enumeration of simplicial and cubical complexes} \label{sec:enu}
This section recalls some definitions and background on the face enumeration of simplicial and cubical complexes and their triangulations, and shellability. For more information and any undefined terminology, we recommend the books \cite{HiAC, StaCCA}. All cell complexes considered here are assumed to be finite. Throughout this paper, we set ${\mathbb N} =
\{0, 1, 2,\dots\}$ and denote by $|S|$ the cardinality of a finite set $S$.
\subsection{Simplicial complexes} \label{sec:simplicial}
An $n$-dimensional \emph{relative simplicial complex} \cite[Section~III.7]{StaCCA} is a pair $(\Delta, \Gamma)$, denoted $\Delta / \Gamma$, where $\Delta$ is an (abstract) $n$-dimensional simplicial complex and $\Gamma$ is a subcomplex of $\Delta$. The \emph{$f$-polynomial} of $\Delta / \Gamma$ is defined as
\[ f(\Delta / \Gamma, x) \ = \ \sum_{i=0}^{n+1} f_{i-1} (\Delta / \Gamma) x^i, \]
where $f_j(\Delta / \Gamma)$ is the number of $j$-dimensional faces of $\Delta$ which do not belong to $\Gamma$. The \emph{$h$-polynomial} is defined as
\begin{eqnarray*} \label{def:simpl-h} h(\Delta/\Gamma, x) & = & (1-x)^{n+1} f(\Delta/\Gamma, \frac{x}{1-x}) \ = \ \sum_{i=0}^{n+1} f_{i-1} (\Delta/\Gamma) \, x^i (1-x)^{n+1-i} \\ & = & \sum_{F \in \Delta/\Gamma}
x^{|F|} (1-x)^{n+1-|F|} \ := \ \sum_{k=0}^{n+1} \, h_k (\Delta/\Gamma) x^k \nonumber \end{eqnarray*}
\noindent and the sequence $h(\Delta/\Gamma) := (h_0(\Delta/\Gamma), h_1(\Delta/\Gamma),\dots,h_{n+1}(\Delta/\Gamma))$ is called the \emph{$h$-vector} of $\Delta/\Gamma$. Note that $f(\Delta / \Gamma, x)$ has only real roots if and only if so does $h(\Delta/\Gamma, x)$. When $\Gamma$ is empty, we get the corresponding invariants of $\Delta$ and drop $\Gamma$ from the notation. Thus, for example, $h(\Delta, x) = \sum_{k=0}^{n+1} h_k (\Delta) x^k$ is the (usual) $h$-polynomial of $\Delta$.
Suppose now that $\Delta$ triangulates an $n$-dimensional ball. Then, the boundary complex $\partial \Delta$ is a triangulation of an $(n-1)$-dimensional sphere and the \emph{interior $h$-polynomial} of $\Delta$ is defined as $h^\circ(\Delta, x) = h(\Delta / \partial \Delta, x)$. The following statement is a special case of \cite[Lemma~6.2]{Sta87}.
\begin{proposition} \label{prop:hsymmetry} {\rm (\cite{Sta87})} Let $\Delta$ be a triangulation of an $n$-dimensional ball. Let $\Gamma$ be a subcomplex of $\partial\Delta$ which is homeomorphic to an $(n-1)$-dimensional ball and $\bar{\Gamma}$ be the subcomplex of $\partial\Delta$ whose facets are those of $\partial\Delta$ which do not belong to $\Gamma$. Then,
\[ h(\Delta / \bar{\Gamma}, x) \ = \ x^{n+1}
h(\Delta / \Gamma, 1/x) . \]
Moreover, $h^\circ(\Delta, x) = x^{n+1} h(\Delta, 1/x)$. \end{proposition}
\subsection{Cubical complexes} \label{sec:cubical}
A \emph{regular cell complex} \cite[Section 4.7]{OM} is a (finite) collection ${\mathcal L}$ of subspaces of a Hausdorff space $X$, called \emph{cells} or \emph{faces}, each homeomorphic to a closed unit ball in some finite-dimensional Euclidean space, such that: (a) $\varnothing \in {\mathcal L}$; (b) the relative interiors of the nonempty cells partition $X$; and (c) the boundary of any cell in ${\mathcal L}$ is a union of cells in ${\mathcal L}$. The \emph{boundary complex} of $\sigma \in {\mathcal L}$, denoted by $\partial \sigma$, is defined as the regular cell complex consisting of all faces of ${\mathcal L}$ properly contained in $\sigma$. A regular cell complex ${\mathcal L}$ is called \emph{cubical} if every nonempty face of ${\mathcal L}$ is combinatorially isomorphic to a cube. A convex polytope is called \emph{cubical} if so is its boundary complex.
Given a cubical complex ${\mathcal L}$ of dimension $n$, we denote by $f_k({\mathcal L})$ the number of $k$-dimensional faces of ${\mathcal L}$. The cubical $h$-polynomial was introduced and studied by Adin~\cite{Ad96} as a (well behaved) analogue of the (simplicial) $h$-polynomial of a simplicial complex. Following \cite[Section~4]{EH00}, we define the (normalized) \emph{cubical $h$-polynomial} of ${\mathcal L}$ as
\begin{equation} \label{def:cub-h} (1+x) h({\mathcal L}, x) \ = \ 1 \, + \, \sum_{k=0}^n \, f_k({\mathcal L}) \, x^{k+1} \left( \frac{1-x}{2} \right)^{n-k} + \ (-1)^n \, \widetilde{\chi} ({\mathcal L}) x^{n+2} , \end{equation}
where $\widetilde{\chi} ({\mathcal L}) = -1 + \sum_{k=0}^n (-1)^k f_k({\mathcal L})$ is the reduced Euler characteristic of ${\mathcal L}$ (the only difference from Adin's definition is that all coefficients of $h({\mathcal L}, x)$ have been divided by $2^n$ and, therefore, are not necessarily integers). We note that $h({\mathcal L}, x)$ is indeed a polynomial in $x$ of degree at most $n+1$. The (normalized) \emph{cubical $h$-vector} of ${\mathcal L}$ is the sequence $h({\mathcal L}) = (h_0({\mathcal L}), h_1({\mathcal L}),\dots,h_{n+1} ({\mathcal L}))$, where $h({\mathcal L}, x) = \sum_{k=0}^{n+1} h_k({\mathcal L}) x^k$.
Adin showed that $h({\mathcal L}, x)$ has nonnegative coefficients for every shellable cubical complex ${\mathcal L}$ \cite[Theorem~5~(iii)]{Ad96} (his result is stated for abstract cubical complexes with the intersection property, but the proof is valid without assuming the later). He asked whether the same holds whenever ${\mathcal L}$ is Cohen--Macaulay \cite[Question~1]{Ad96}. The coefficient $h_k({\mathcal L})$ is known to be nonnegative for every Cohen--Macaulay ${\mathcal L}$ for $k \in \{0, 1\}$, since $h_0({\mathcal L}) = 1$ and $h_1({\mathcal L}) = (f_0({\mathcal L}) - 2^n)/2^n$, for $k=n$ \cite[Corollary~1.2]{Ath12} and for $k = n+1$, since $h_{n+1}({\mathcal L}) = (-1)^n \widetilde{\chi} ({\mathcal L})$, and for every $k$ in the special case that ${\mathcal L}$ is the cubical barycentric subdivision of a Cohen--Macaulay simplicial complex \cite{Het96} (see also Remark~\ref{rem:simplicial}).
\subsection{Barycentric subdivision and shellability} \label{sec:shell}
The \emph{barycentric subdivision} of a regular cell complex ${\mathcal L}$ is denoted by ${\rm sd}({\mathcal L})$ and defined as the abstract simplicial complex whose faces are the chains $\sigma_0 \subset \sigma_1 \subset \cdots \subset \sigma_k$ of nonempty faces of ${\mathcal L}$. The natural restriction of ${\rm sd}({\mathcal L})$ to a nonempty face $\sigma \in {\mathcal L}$ is exactly the barycentric subdivision ${\rm sd}(\sigma)$ of (the complex of faces of) $\sigma$.
Similarly, by the barycentric subdivision ${\rm sd}(Q)$ of a convex polytope $Q$ we mean that of the complex of faces of $Q$. Since ${\rm sd}(Q)$ is a cone over ${\rm sd}(\partial Q)$, we have $h({\rm sd}(Q), x) = h({\rm sd}(\partial Q), x)$. For the $n$-dimensional cube $Q$ we have $h({\rm sd}(Q), x) = B_n(x)$, where $B_n(x)$ is the Eulerian polynomial which counts signed permutations of $\{1, 2,\dots,n\}$ by the number of descents of type $B$; see, for instance, \cite[Chapter~11]{Pet15}. The following well known type $B$ analogue of Worpitzky's identity \cite[Equation~(13.3)]{Pet15}
\begin{equation} \label{eq:Bn-gen} \frac{B_n(x)}{(1-x)^{n+1}} \ = \ \sum_{m \ge 0} \, (2m+1)^n x^m \end{equation}
will make computations in the following section easier.
A regular cell complex ${\mathcal L}$ is called \emph{pure} if all its facets (faces which are maximal with respect to inclusion) have the same dimension. Such a complex ${\mathcal L}$ is called \emph{shellable} if either it is zero-dimensional, or else there exists a linear ordering $\tau_1, \tau_2,\dots,\tau_m$ of its facets, called a \emph{shelling}, such that (a) $\partial \tau_1$ is shellable; and (b) for $2 \le j \le m$, the complex of faces of $\partial \tau_j$ which are contained in $\tau_1 \cup \tau_2 \cup \cdots \cup \tau_{j-1}$ is pure, of the same dimension as $\partial \tau_j$, and there exists a shelling of $\partial \tau_j$ for which the facets of $\partial \tau_j$ contained in $\tau_1 \cup \tau_2 \cup \cdots \cup \tau_{j-1}$ form an initial segment. A fundamental result of Bruggesser and Mani \cite{BM71} states that $\partial Q$ is shellable for every convex polytope $Q$. For the shellability of cubical complexes in particular, see \cite[Section~3]{EH00} \cite[Section~3]{HS20+}.
\section{The $h$-vector transformation} \label{sec:h-trans}
This section studies the transformation which maps the cubical $h$-vector of a cubical complex ${\mathcal L}$ to the (simplicial) $h$-vector of the barycentric subdivision ${\rm sd}({\mathcal L})$ and deduces Theorem~\ref{thm:main} from its properties. We begin with an important definition.
\begin{definition} \label{def:pBnk} For $n \in {\mathbb N}$ and $k \in \{0, 1,\dots,n+1\}$ we denote by ${\mathcal C}_{n,k}$ the relative simplicial complex which is obtained from the barycentric subdivision of the $n$-dimensional cube by removing
\begin{itemize} \item[$\bullet$] no face, if $k=0$, \item[$\bullet$] all faces which lie in one facet and $k-1$ pairs of antipodal facets of the cube (making a total of $2k-1$ facets), if $k \in \{1, 2,\dots,n\}$, \item[$\bullet$] all faces on the boundary of the cube, if $k=n+1$. \end{itemize}
We define $p^B_{n,k}(x) = h({\mathcal C}_{n,k}, x)$ for $k \in \{0, n+1\}$, and $p^B_{n,k}(x) = 2 h({\mathcal C}_{n,k}, x)$ for $k \in \{1, 2,\dots,n\}$. \end{definition}
The polynomials $p^B_{n,k}(x)$ are shown on Table~\ref{tab:pBnk} for $n \le 3$. For $n=4$,
\[ p^B_{4,k}(x) \ = \ \begin{cases}
1 + 76x + 230x^2 + 76x^3 + x^4,
& \text{if $k = 0$,} \\
108x + 460x^2 + 196x^3 + 4x^4,
& \text{if $k = 1$,} \\
36x + 420x^2 + 300x^3 + 12x^4,
& \text{if $k=2$,} \\
12x + 300x^2 + 420x^3 + 36x^4,
& \text{if $k=3$,} \\
4x + 196x^2 + 460x^3 + 108x^4,
& \text{if $k=4$,} \\
x + 76x^2 + 230x^3 + 76x^4 + x^5,
& \text{if $k=5$}. \end{cases} \]
\noindent Their significance stems from the following theorem.
\begin{theorem} \label{thm:h-trans} For every $n$-dimensional cubical complex ${\mathcal L}$,
\[ h({\rm sd}({\mathcal L}), x) \ = \ \sum_{k=0}^{n+1} h_k({\mathcal L})
p^B_{n,k}(x) . \]
\end{theorem}
{\scriptsize \begin{table}[hptb] \begin{center}
\begin{tabular}{| l || l | l | l | l | l | l ||} \hline & $k=0$ & $k=1$ & $k=2$ & $k=3$ & $k=4$ \\ \hline \hline $n=0$ & 1 & $x$ & & & \\
\hline $n=1$ & $1+x$ & $4x$ & $x+x^2$ & & \\ \hline
$n=2$ & $1+6x+x^2$ & $12x+4x^2$ &
$4x+12x^2$ & $x+6x^2+x^3$ & \\ \hline
$n=3$ & $1+23x+23x^2+x^3$ & $36x+56x^2+4x^3$
& $12x+72x^2+12x^3$ & $4x+56x^2+36x^3$ &
$x+23x^2+23x^3+x^4$ \\ \hline \end{tabular} \caption{The polynomials $p^B_{n,k}(x)$ for $n \le 3$.} \label{tab:pBnk} \end{center} \end{table}}
The proof requires a few preliminary results. We first summarize some of the main properties of $p^B_{n,k}(x)$.
\begin{proposition} \label{prop:pBnk} For every $n \in {\mathbb N}$:
\begin{itemize} \itemsep=0pt \item[{\rm (a)}] The polynomial $p^B_{n,k}(x)$ has nonnegative coefficients for every $k \in \{0, 1,\dots,n+1\}$; its degree is equal to $n+1$, if $k = n+1$, and to $n$ otherwise.
\item[{\rm (b)}] $p^B_{n,n+1-k}(x) = x^{n+1}p^B_{n,k}(1/x)$ for every $k \in \{0, 1,\dots,n+1\}$.
\item[{\rm (c)}] $p^B_{n,0}(x) = B_n(x)$, $p^B_{n,n+1}(x) = xB_n(x)$ and $\sum_{k=0}^{n+1} p^B_{n,k}(x) = B_{n+1}(x)$.
\item[{\rm (d)}] We have
\[ p^B_{n+1,k+1}(x) \ = \ \begin{cases}
2 p^B_{n+1,0}(x) + 2 (x-1) p^B_{n,0}(x),
& \text{if $k = 0$,} \\
p^B_{n+1,k}(x) + 2(x-1) p^B_{n,k}(x),
& \text{if $1 \le k \le n$,} \\
(1/2) \cdot p^B_{n+1,n+1}(x) + (x-1) p^B_{n,n+1}(x),
& \text{if $k=n+1$} . \end{cases} \]
\item[{\rm (e)}] The recurrence
\[ p^B_{n+1,k}(x) \ = \ \begin{cases}
{\displaystyle \sum_{i=0}^{n+1} p^B_{n,i}(x)},
& \text{if $k = 0$}, \\
{\displaystyle 2x \sum_{i=0}^{k-1} p^B_{n,i}(x)
\, + \, 2 \sum_{i=k}^{n+1} p^B_{n,i}(x)},
& \text{if $1 \le k \le n+1$}, \\
{\displaystyle x \sum_{i=0}^{n+1} p^B_{n,i}(x)},
& \text{if $k=n+2$} \end{cases} \]
holds for $k \in \{0, 1,\dots,n+1\}$.
\item[{\rm (f)}] We have
\[ \frac{p^B_{n,k}(x)}{(1-x)^{n+1}} \ = \ \begin{cases}
{\displaystyle \sum_{m \ge 0} \, (2m+1)^n x^m},
& \text{if $k = 0$,} \\
{\displaystyle \sum_{m \ge 0} \, (4m) (2m-1)^{k-1}
(2m+1)^{n-k} x^m},
& \text{if $1 \le k \le n$,} \\
{\displaystyle \sum_{m \ge 1} \, (2m-1)^n x^m},
& \text{if $k=n+1$}. \end{cases} \]
\end{itemize} \end{proposition}
\begin{proof} We first note that, as discussed in Section~\ref{sec:shell}, $p^B_{n,0}(x) = h({\mathcal C}_{n,0},x) = B_n(x)$. Part (d) follows from Definition~\ref{def:pBnk} and the definition of the $h$-polynomial of a relative simplicial complex. Indeed, for $1 \le k \le n$, we have $f({\mathcal C}_{n+1,k+1}, x) = f({\mathcal C}_{n+1,k}, x) - 2 f({\mathcal C}_{n,k}, x)$. Hence,
\begin{eqnarray*} h({\mathcal C}_{n+1,k+1}, x) & = & (1-x)^{n+2} f({\mathcal C}_{n+1,k+1}, \frac{x}{1-x}) \\ & & \\ & = & (1-x)^{n+2} f({\mathcal C}_{n+1,k}, \frac{x}{1-x}) \, - \, 2 (1-x) \cdot (1-x)^{n+1} f({\mathcal C}_{n,k}, \frac{x}{1-x}) \\ & & \\ & = & h({\mathcal C}_{n+1,k}, x) \, + \, 2(x-1) h({\mathcal C}_{n,k}, x) \end{eqnarray*}
\noindent and
\begin{eqnarray*} p^B_{n+1,k+1}(x) & = & 2 h({\mathcal C}_{n+1,k+1}, x) \ = \ 2 h({\mathcal C}_{n+1,k}, x) \, + \, 4 (x-1) h({\mathcal C}_{n,k}, x) \\ & = & p^B_{n+1,k}(x) \, + \, 2 (x-1) p^B_{n,k}(x). \end{eqnarray*}
\noindent The same argument, similar to that in the proof of \cite[Corollary~5.6]{Ath20+}, works for $k \in \{0, n+1\}$. Part (f) follows from part (d) by straightforward induction on $k$ (for fixed $n$), where the base $k=0$ of the induction holds because of Equation~(\ref{eq:Bn-gen}).
For part (c) we first note that $p^B_{n,n+1}(x) = h({\mathcal C}_{n,n+1},x) = h^\circ({\mathcal C}_{n,0}, x) = x^{n+1} h({\mathcal C}_{n,0}, 1/x) = x^{n+1} B_n(1/x) = xB_n(x)$. The identity for the sum of the $p^B_{n,k}(x)$ can be verified directly by summing that of part (f). For a more conceptual proof, one can use an obvious shelling of the boundary complex of the $(n+1)$-dimensional cube to write, as explained in \cite[Section~3]{HS20+}, the $h$-polynomial $B_{n+1}(x)$ of its barycentric subdivision as a sum of $h$-polynomials of relative simplicial complexes, each one combinatorially isomorphic to one of the ${\mathcal C}_{n,k}$. The details are left to the interested reader.
Given (c), the recursion of part (e) follows easily by induction on $k$ from part (d) (this parallels the proof of \cite[Lemma~6.3]{Ath20+}).
Part (b) is a consequence of Definition~\ref{def:pBnk} and Proposition~\ref{prop:hsymmetry}. Alternatively, it follows from part (f) and standard facts about rational generating functions; see \cite[Proposition~4.2.3]{StaEC1}. The nonnegativity of the coefficients of $p^B_{n,k}(x)$, claimed in part (a), follows from the recursion of part (e), as well as from general results \cite[Corollary~III.7.3]{StaCCA} on the nonnegativity of $h$-vectors of Cohen--Macaulay relative simplicial complexes. The statement about the degree of $p^B_{n,k}(x)$, claimed there, follows from either of parts (d), (e) or (f). \end{proof}
We leave the problem to find a combinatorial interpretation of $p^B_{n,k}(x)$ open. Given part (c) of the proposition, one naturally expects that there is such an interpretation which refines one of the known combinatorial interpretations of $B_{n+1}(x)$.
The following statement is a consequence of a more general result \cite[Proposition~7.6]{Sta92} of Stanley on subdivisions of CW-posets. To keep this paper self-contained, we include a proof.
\begin{proposition} \label{prop:sdc-h} For every $n$-dimensional cubical complex ${\mathcal L}$,
\[ h({\rm sd}({\mathcal L}), x) \ = \ (1-x)^{n+1} \, + \, x \,
\sum_{k=0}^n f_k({\mathcal L}) (1-x)^{n-k} B_k(x) . \]
\end{proposition}
\begin{proof} Since every face of ${\rm sd}({\mathcal L})$ is an interior face of the restriction ${\rm sd}(\sigma)$ of ${\rm sd}({\mathcal L})$ to a unique face $\sigma \in {\mathcal L}$, we have
\[ f({\rm sd}({\mathcal L}), x) \ = \ \sum_{\sigma \in {\mathcal L}} f^\circ
({\rm sd}(\sigma), x) \ = \ 1 \, + \sum_{\sigma \in {\mathcal L}
{\smallsetminus} \{\varnothing\}} f^\circ({\rm sd}(\sigma), x) . \]
Transforming $f$-polynomials to $h$-polynomials in this equation and recalling from Section~\ref{sec:enu} that $h^\circ({\rm sd}(\sigma), x) = x^{k+1} h({\rm sd}(\sigma), 1/x) = x^{k+1} B_k(1/x) = x B_k(x)$ for every nonempty $k$-dimensional face $\sigma \in {\mathcal L}$, we get
\begin{eqnarray*} h({\rm sd}({\mathcal L}), x) & = & (1-x)^{n+1} f({\rm sd}({\mathcal L}), \frac{x}{1-x}) \\ & = & (1-x)^{n+1} \ + \sum_{\sigma \in {\mathcal L} {\smallsetminus} \{\varnothing\}} (1-x)^{n+1} f^\circ({\rm sd}(\sigma), \frac{x}{1-x}) \\ & = & (1-x)^{n+1} \ + \sum_{\sigma \in {\mathcal L} {\smallsetminus} \{\varnothing\}} (1-x)^{n-\dim(\sigma)} \, h^\circ({\rm sd}(\sigma), x) \\ & = & (1-x)^{n+1} \, + \ \sum_{k=0}^n f_k({\mathcal L}) (1-x)^{n-k} x B_k(x) \end{eqnarray*}
and the proof follows. \end{proof}
\noindent \emph{Proof of Theorem~\ref{thm:h-trans}}. Let us denote by $p({\mathcal L}, x)$ the right-hand side of the desired equality. Clearly, it suffices to show that $h({\rm sd}({\mathcal L}), x)/(1-x)^{n+1} = p({\mathcal L}, x)/(1-x)^{n+1}$. From Proposition~\ref{prop:sdc-h} and Equation~(\ref{eq:Bn-gen}) we deduce that
\begin{eqnarray*} \frac{h({\rm sd}({\mathcal L}), x)}{(1-x)^{n+1}} & = & 1 \, + \, x \, \sum_{k=0}^n f_k({\mathcal L}) \, \frac{B_k(x)}{(1-x)^{k+1}} \ = \ 1 \, + \, \sum_{m \ge 0} \left( \, \sum_{k=0}^n f_k({\mathcal L}) (2m+1)^k \right) x^{m+1} \nonumber \\ & = & 1 \, + \, \sum_{m \ge 1} \left( \, \sum_{k=0}^n f_k({\mathcal L}) (2m-1)^k \right) x^m . \end{eqnarray*}
Similarly, from part (f) of Proposition~\ref{prop:pBnk} we get
\[ \frac{p({\mathcal L}, x)}{(1-x)^{n+1}} \ = \ \sum_{k=0}^{n+1} h_k({\mathcal L}) \, \frac{p^B_{n,k}(x)}{(1-x)^{n+1}} \ = \ 1 \, + \, \sum_{m \ge 1} a_{\mathcal L}(m) x^m, \]
where
\[ a_{\mathcal L}(y) \ := \ h_0({\mathcal L}) (2y+1)^n \, + \, \sum_{k=1}^n h_k({\mathcal L}) (4y) (2y-1)^{k-1} (2y+1)^{n-k} \, + \, h_{n+1}({\mathcal L}) (2y-1)^n. \]
Thus, it remains to show that
\begin{equation} \label{eq:final} \sum_{k=0}^n f_k({\mathcal L}) (2y-1)^k \ = \ a_{\mathcal L}(y) . \end{equation}
We claim that this is, essentially, the defining Equation~(\ref{def:cub-h}) of the cubical $h$-polynomial of ${\mathcal L}$ in disguised form. Indeed, cancelling first the summand $1 + h_{n+1}({\mathcal L}) x^{n+2} = 1 + (-1)^n \widetilde{\chi} ({\mathcal L}) x^{n+2}$, and then a factor of $x$, from both sides of (\ref{def:cub-h}) gives that
\[ \sum_{k=0}^n (h_k({\mathcal L}) + h_{k+1}({\mathcal L})) x^k \ = \
\left( \frac{1-x}{2} \right)^n \, \sum_{k=0}^n
f_k({\mathcal L}) \left( \frac{2x}{1-x} \right)^ k . \]
Setting $2x/(1-x) = 2y-1$, so that $x = (2y-1)/(2y+1)$ and $(1-x)/2 = 1/(2y+1)$, the previous identity can be rewritten as
\begin{equation} \label{def:cub-h2} \sum_{k=0}^n f_k({\mathcal L}) (2y-1)^k \ = \ \sum_{k=0}^n \, (h_k({\mathcal L}) + h_{k+1}({\mathcal L})) (2y-1)^k (2y+1)^{n-k} . \end{equation}
Since the right-hand side is readily equal to $a_{\mathcal L}(y)$, this proves Equation~(\ref{eq:final}) and the theorem as well. \qed
To deduce Theorem~\ref{thm:main} from Theorem~\ref{thm:h-trans} and Proposition~\ref{prop:pBnk}, we need to recall a few definitions and facts from the theory of interlacing polynomials; for more information, see \cite[Section~8]{Bra15} and references therein. A polynomial $p(x) \in {\mathbb R}[x]$ is called \emph{real-rooted} if either it is the zero polynomial, or every complex root of $p(x)$ is real. Given two real-rooted polynomials $p(x), q(x) \in {\mathbb R}[x]$, we say that $p(x)$ \emph{interlaces} $q(x)$ if the roots $\{\alpha_i\}$ of $p(x)$ interlace (or alternate to the left of) the roots $\{\beta_j\}$ of $q(x)$, in the sense that they can be listed as
\[ \cdots \le \beta_3 \le \alpha_2 \le \beta_2 \le
\alpha_1 \le \beta_1 \le 0. \]
A sequence $(p_0(x), p_1(x),\dots,p_n(x))$ of real-rooted polynomials is called \emph{interlacing} if $p_i(x)$ interlaces $p_j(x)$ for all $0 \le i < j \le n$. Assuming also that these polynomials have positive leading coefficients, every nonnegative linear combination of $p_0(x), p_1(x),\dots,p_n(x)$ is real-rooted and interlaced by $p_0(x)$. A standard way to produce interlacing sequences in combinatorics is the following. Suppose that $p_0(x), p_1(x),\dots,p_n(x)$ are real-rooted polynomials with nonnegative coefficients and set
\[ q_k(x) \ = \ x \sum_{i=0}^{k-1} p_i(x) \, + \,
\sum_{i=k}^n p_i(x) \]
for $k \in \{0, 1,\dots,n+1\}$. Then, if the sequence $(p_0(x), p_1(x),\dots,p_n(x))$ is interlacing, so is $(q_0(x), q_1(x),\dots,q_{n+1}(x))$; see \cite[Corollary~8.7]{Bra15} for a more general statement.
The following result is a stronger version of Theorem~\ref{thm:main}.
\begin{corollary} \label{cor:main} The polynomial $h({\rm sd}({\mathcal L}), x)$ is real-rooted and interlaced by the Eulerian polynomial $B_n(x)$ for every $n$-dimensional cubical complex ${\mathcal L}$ with nonnegative cubical $h$-vector.
In particular, $h({\rm sd}({\mathcal L}), x)$ and $h({\rm sd}(Q), x)$ are real-rooted and interlaced by $B_n(x)$ for every shellable, $n$-dimensional cubical complex ${\mathcal L}$ and every cubical polytope $Q$ of dimension $n+1$, respectively. \end{corollary}
\begin{proof} By an application of the lemma on interlacing sequences just discussed, the recurrence of part (e) of Proposition~\ref{prop:pBnk} implies that $(p^B_{n,0}(x), p^B_{n,1}(x),\dots,p^B_{n,n+1}(x))$ is interlacing for every $n \in {\mathbb N}$ by induction on $n$. Therefore, being a nonnegative linear combination of the elements of the sequence by Theorem~\ref{thm:h-trans}, $h({\rm sd}({\mathcal L}), x)$ is real-rooted and interlaced by $p^B_{n,0}(x) = B_n(x)$ for every $n$-dimensional cubical complex ${\mathcal L}$ with nonnegative cubical $h$-vector. This proves the first statement.
The second statement follows from the first since shellable cubical complexes are known to have nonnegative cubical $h$-vector \cite[Theorem~5~(iii)]{Ad96}, $h({\rm sd}(Q), x) = h({\rm sd}(\partial Q), x)$ for every convex polytope $Q$ and because boundary complexes of polytopes are shellable. \end{proof}
\begin{remark} \label{rem:simplicial} \rm Let $\Delta$ be a simplicial complex with nonnegative $h$-vector and ${\mathcal L}$ be a cubical complex which is obtained from $\Delta$ by any operation which preserves nonnegativity of $h$-vectors. Corollary~\ref{cor:main} implies that $h({\rm sd}({\mathcal L}), x)$ is real-rooted.
By a result of Hetyei~\cite{Het96}, such an operation is the cubical barycentric subdivision ${\mathcal L} = {\rm sd}_c(\Delta)$ (see \cite[p.~44]{Ath18}), also known as barycentric cover \cite[Section~2.3]{BBC97}, of $\Delta$. Then, ${\rm sd}({\mathcal L})$ becomes the interval triangulation of $\Delta$ \cite[Section~3.3]{MW17}. This argument shows that the interval triangulation of $\Delta$ has a real-rooted $h$-polynomial for every simplicial complex $\Delta$ with nonnegative $h$-vector and answers in the affirmative the question of \cite[Problem~33]{MW17}. Although there are other proofs of this fact in the literature (see \cite{Ath20+} and references therein), the approach via Corollary~\ref{cor:main} allows for more general results, e.g., by applying further cubical subdivisions of ${\rm sd}_c(\Delta)$ which preserve the nonnegativity of the cubical $h$-vector. \end{remark}
\begin{remark} \label{rem:BW-formula} \rm Applying the reasoning of the proof of Proposition~\ref{prop:sdc-h} and of the first few lines of the proof of Theorem~\ref{thm:h-trans} to an $n$-dimensional simplicial complex $\Delta$ gives that
\[ h({\rm sd}(\Delta), x) \ = \ (1-x)^{n+1} \, + \, x \,
\sum_{k=0}^n f_k(\Delta) (1-x)^{n-k} A_{k+1}(x) \]
and
\[ \frac{h({\rm sd}(\Delta), x)}{(1-x)^{n+2}} \ = \ \sum_{m \ge 0} \left( \, \sum_{k=0}^{n+1} f_{k-1} (\Delta) m^k \right) x^m \ = \ \sum_{m \ge 0} \left( \, \sum_{k=0}^{n+1} h_k(\Delta) m^k (m+1)^{n+1-k} \right) x^m . \]
This is the expression at which Brenti and Welker arrived \cite[Equation~(5)]{BW08} via a different route and which they used to show that $h({\rm sd}(\Delta), x)$ has only real roots, provided that $h_k(\Delta) \ge 0$ for all $k$. \end{remark}
\begin{remark} \rm Replacing $2y-1$ by $x$ in (\ref{def:cub-h2}) shows that the equation
\[ \sum_{k=0}^n f_k({\mathcal L}) x^k \ = \ \sum_{k=0}^n \, (h_k({\mathcal L}) + h_{k+1}({\mathcal L})) \, x^k (x+2)^{n-k} , \]
together with the condition $h_0({\mathcal L}) = 1$, gives an equivalent way to define the normalized cubical $h$-vector of an $n$-dimensional cubical complex ${\mathcal L}$. \end{remark}
\section{Closing remarks} \label{sec:rem}
There is a large literature on the barycentric subdivision of simplicial complexes which relates to the work \cite{BW08}. Many of the questions addressed there make sense for cubical complexes. We only consider a couple of them here.
\textbf{a}. Being real-rooted, $h({\rm sd}(\Delta), x)$ is unimodal for every $n$-dimensional simplicial complex $\Delta$ with nonnegative $h$-vector. Kubitzke and Nevo showed~\cite[Corolalry~4.7]{KN09} that the corresponding $h$-vector $(h_i({\rm sd}(\Delta))_{0 \le i \le n+1}$ has a peak at $i = (n+1)/2$, if $n$ is odd, and at $i = n/2$ or $i = n/2 + 1$, if $n$ is even. The analogous statement for cubical complexes follows from Theorem~\ref{thm:h-trans} since, as in the simplicial setting, the unimodal polynomial $p^B_{n,k}(x)$ has a peak at $i = (n+1)/2$, if $n$ is odd, at $i = n/2$ if $n$ is even and $k \le n/2$, and at $i = n/2 + 1$, if $n$ is even and $k \ge n/2 + 1$. The latter claim can be deduced from the recursion of part (e) of Proposition~\ref{prop:pBnk} by mimicking the argument given in the simplicial setting in~\cite[Section~2]{Mur10}. For general results on the unimodality of $h$-vectors of barycentric subdivisions of Cohen--Macaulay regular cell complexes, proven by algebraic methods, see Corollaries~1.2 and~5.12 in \cite{MY14}.
\textbf{b}. The main result of~\cite{BS20} implies (see~\cite[Section~8]{Ath20+}) that $h({\rm sd}(\Delta), x)$ has a nonnegative real-rooted symmetric decomposition with respect to $n$ for every triangulation $\Delta$ of an $n$-dimensional ball. Does this hold if $\Delta$ is replaced by any cubical subdivision of the $n$-dimensional ball? Are these symmetric decompositions interlacing? Do the polynomials $p^B_{n,k}(x)$ have such properties?
\textbf{c}. The subdivision operator (see \cite[Section~3.3]{Bra15}) has a natural generalization in the context of uniform triangulations of simplicial complexes \cite[Section~5]{Ath20+} which plays a role in that theory. It may be worth studying the cubical analogue of this operator further.
\end{document}
|
arXiv
|
{
"id": "2009.02272.tex",
"language_detection_score": 0.7452417016029358,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Nonunique asymptotic limit]{Finite-energy pseudoholomorphic planes with multiple asymptotic limits} \author[R.\ Siefring]{Richard Siefring}
\address{Fakult\"at f\"ur Mathematik \\ Ruhr-Universit\"at Bochum \\
44780 Bochum \\
Germany} \urladdr{\url{http://homepage.ruhr-uni-bochum.de/richard.siefring}} \email{\href{mailto:[email protected]}{[email protected]}} \date{September 30, 2016}
\dedicatory{ Dedicated to Helmut Hofer on the occasion of his 60th birthday, and in warm remembrance of Kris Wysocki. }
\begin{abstract} It's known from \cite{hwz:prop1, hwz:prop4, bourgeois} that in a contact manifold equipped with either a nondegenerate or Morse-Bott contact form, a finite-energy pseudoholomorphic curve will be asymptotic at each of its nonremovable punctures to a single periodic orbit of the Reeb vector field and that the convergence is exponential. We provide examples here to show that this need not be the case if the contact form is degenerate. More specifically, we show that on any contact manifold $(M, \xi)$ with cooriented contact structure one can choose a contact form $\lambda$ with $\ker\lambda=\xi$ and a compatible complex structure $J$ on $\xi$ so that for the associated $\mathbb{R}$-invariant almost complex structure $\tilde J$ on $\mathbb{R}\times M$ there exist families of embedded finite-energy $\tilde J$-holomorphic cylinders and planes having embedded tori as limit sets. \end{abstract}
\maketitle
\tableofcontents
\section{Introduction and statement of results} The study of punctured pseudoholomorphic curves in symplectizations of contact manifolds was introduced by Hofer in \cite{hofer93}. Specifically, considering a contact manifold $(M, \xi=\ker\lambda)$, Hofer introduced a class of $\mathbb{R}$-invariant almost complex structures and a notion of energy for a pseudoholomorphic map $\tilde u=(a, u):\mathbb{C}\to \mathbb{R}\times M$ and showed that if the energy of a pseudoholomorphic plane is finite, then there are sequences $s_{k}\to\infty$ so that the sequence of loops \[ t\in \mathbb{S}^{1}\approx \mathbb{R}/\mathbb{Z} \mapsto u(e^{2\pi(s_{k}+it)}) \] converge in $C^{\infty}(\mathbb{S}^{1}, M)$ to a periodic orbit $\gamma$ of the Reeb vector field of the contact form $\lambda$.
In \cite[Theorem 1.2/1.3]{hwz:prop1}, Hofer, Wysocki, and Zehnder further show that if the periodic orbit $\gamma$ is nondegenerate, then the maps $u(s):\mathbb{S}^{1}\to M$ defined by $u(s)(t)=u(e^{s+it})$ satisfy \[ \lim_{s\to\infty}u(s)=\gamma \text{ in $C^{\infty}(\mathbb{S}^{1}, M)$} \] and in fact the convergence is exponential \cite[Theorem 1.4]{hwz:prop1}. There, immediately following the statement of Theorem 1.2, the authors mention that they expect this need not be the case in the event that the periodic orbit $\gamma$ is degenerate, but that they didn't know of an explicit example. To date no examples have appeared in the literature, and whether or not it is possible for a finite-energy plane to have multiple periodic orbits as asymptotic limits has remained an open question.\footnote{In fact, a claimed proof that no such examples exist has appeared in a recent (now-withdrawn) arXiv preprint.} We present some examples here. The examples we construct can be localized to any arbitrarily small neighborhood of a standard model of a transverse knot and since transverse knots exist in abundance in any contact manifold, we can prove the following.
\begin{theorem}\label{t:main-theorem} Let $(M, \xi)$ be a contact manifold. Then there exists a contact form $\lambda$ on $M$ and a compatible complex structure $J$ on $\xi$ so that there exist finite-energy pseudoholomorphic planes and cylinders for the data $(\lambda, J)$ whose limit sets have image diffeomorphic to the $2$-torus. \end{theorem}
We give a brief outline of what follows. In Section \ref{s:background} we begin by recalling some basic notions from contact geometry and pseudoholomorphic curves. Then, in Section \ref{s:prequant}, we explain a correspondence between gradient flow lines on exact symplectic manifolds and pseudoholomorphic cylinders in contact manifolds constructed as circle bundles over those symplectic manifolds. From this construction it is clear that one can construct pseudoholomorphic cylinders having more than one limit orbit by constructing gradient flow lines in a symplectic manifold having an alpha or omega limit set consisting of more than a single point. To this end, we construct in Section \ref{s:gradient} a function on the cylinder $\mathbb{R}\times S^{1}$ which will have the circle $\br{0}\times S^{1}$ as the omega limit set of any nontrivial gradient flow line with respect to any Riemannian metric and which can be chosen to be linear in the $\mathbb{R}$-variable and independent of the $S^{1}$-variable outside of any desired neighborhood of $\br{0}\times S^{1}$. Finally, in Section \ref{s:main-proof}, we apply the results of Sections \ref{s:prequant} and \ref{s:gradient} to construct finite-energy pseudoholomorphic cylinders and planes having tori as limit sets. We comment that while the construction of a pseudoholomorphic cylinder with tori as limit sets is a straightforward application of the results in Sections \ref{s:prequant} and \ref{s:gradient}, applying these results to construct a plane with multiple limit orbits is a bit trickier and requires finding a situation where these results can be applied to construct a cylinder with a removable singularity.
We close this section with a remark about notation. In most of what follows we find it convenient to consider the circle as $\mathbb{R}/2\pi\mathbb{Z}$, although at some points --- specifically when considering domains of pseudoholomorphic cylinders or periodic orbits --- we will find it more convenient to consider the circle to be $\mathbb{R}/\mathbb{Z}$. To avoid ambiguity we will use the notations $S^{1}=\mathbb{R}/2\pi\mathbb{Z}$ and $\mathbb{S}^{1}=\mathbb{R}/\mathbb{Z}$ to distinguish between the two.
\end{ack}
\section{Pseudoholomorphic curves in contact manifolds}\label{s:background}
Here we recall some basic notions, primarily for the purpose of fixing notation. Let $M$ be an oriented $(2n+1)$-dimensional manifold. A $1$-form $\lambda$ is said to be a contact form on $M$ if \begin{equation}\label{e:contact-condition} \text{$\lambda\wedge d\lambda^{n}$ is a nowhere vanishing.} \end{equation} A contact form on $M$ determines a splitting \begin{equation}\label{e:splitting} TM=\mathbb{R}X_{\lambda}\oplus\xi \end{equation} where $\xi=\ker\lambda$ is a hyperplane distribution, called the \emph{contact structure}, and $X_{\lambda}$ is the \emph{Reeb vector field}, defined by \[ i_{X_{\lambda}}d\lambda=0 \qquad\text{ and }\qquad i_{X_{\lambda}}\lambda=1. \] We note that \eqref{e:contact-condition} implies that $d\lambda$ restricts to a nondegenerate form on $\xi$ and thus $(\xi, d\lambda)$ is a symplectic vector bundle over $M$.
We recall that if $\lambda$ is a contact form on $M$ and $f:M\to\mathbb{R}$ is a smooth function, then $e^{f}\lambda$ is also a contact form since $d(e^{f}\lambda)=e^{f}(df\wedge\lambda+d\lambda)$ and hence \[ (e^{f}\lambda)\wedge d(e^{f}\lambda)^{n}=e^{(n+1)f}\lambda\wedge d\lambda. \] We note for later reference that a straightforward computation shows that the Reeb vector field for the contact form $e^{f}\lambda$ is related to the Reeb vector field for $\lambda$ by \begin{equation}\label{e:reeb-change-1} X_{e^{f}\lambda}=e^{-f}\bp{X_{\lambda}-X_{f}} \end{equation} where $X_{f}$ is the unique section of $\xi$ satisfying \begin{equation}\label{e:reeb-change-2} i_{X_{f}}d\lambda=-df+df(X_{\lambda})\lambda. \end{equation} That there is a unique section $X_{f}$ of $\xi$ satisfying \eqref{e:reeb-change-2} follows from nondegeneracy of $d\lambda$ on $\xi$ and the fact that both sides of \eqref{e:reeb-change-2} vanish on $X_{\lambda}$.
Given a symplectic vector bundle $(E, \omega)$ over a given manifold $W$, a complex structure $J\in\operatorname{End}(E)$ is said to be \emph{compatible} with $\omega$ if the section of $E^{*}\otimes E^{*}$ defined by $g_{J}:=\omega(\cdot, J\cdot)$ is symmetric and positive definite on $E$. It is well known that the space of such $J$ is nonempty and contractible (see e.g.\ the discussion following Proposition 5 in Section 1.3 of \cite{hoferzehnder}). Given a contact manifold, $(M, \xi=\ker\lambda)$, we then define the set $\mathcal{J}(M, \xi)$ to be the set of complex structures on $\xi$ compatible with
$d\lambda|_{\xi\times\xi}$. We observe that if a complex structure $J\in\operatorname{End}(\xi)$ is compatible with $d\lambda$, then it is also compatible with $d(e^{f}\lambda)$ since \[ d(e^{f}\lambda)-e^{f}d\lambda=e^{f}df\wedge\lambda \] which vanishes on $\xi\times\xi=\ker\lambda\times\ker\lambda$. Therefore, the set $\mathcal{J}(M, \xi)$ depends only on a choice of conformal symplectic structure on $\xi$, and not on the choice of a specific contact form inducing that structure.
Given a manifold $M$ with contact form $\lambda$ and a compatible $J$, we can extend $J$ to an $\mathbb{R}$-invariant almost complex structure $\tilde J$ on $\mathbb{R}\times M$ by requiring \begin{equation}\label{e:R-invariant-J}
\tilde J\partial_{a}=X_{\lambda} \qquad\text{ and }\qquad \tilde J|_{\pi_{M}^{*}\xi}=\pi_{M}^{*}J \end{equation} with $a$ the coordinate along $\mathbb{R}$ and $\pi_{M}:\mathbb{R}\times M\to M$ the coordinate projection. We consider quintuples $(\Sigma, j, \Gamma, a, u)$ where $(\Sigma, j)$ is a closed Riemann surface, $\Gamma\subset\Sigma$ is a finite set, called the set of \emph{punctures}, and $a:\Sigma\setminus\Gamma\to\mathbb{R}$ and $u:\Sigma\setminus\Gamma\to M$ are smooth maps. We say such a quintuple is \emph{pseudoholomorphic map for the data $(\lambda, J)$ on $M$} if $\tilde u=(a, u):\Sigma\setminus\Gamma\to\mathbb{R}\times M$ satisfies the equation \begin{equation}\label{e:j-hol} d\tilde u\circ j=\tilde J\circ \tilde u \end{equation} or, equivalently, if $u$ and $a$ satisfy \begin{equation}\label{e:j-hol-M} \begin{gathered} \pi_{\lambda}\circ du\circ j=J\circ \pi_{\lambda}\circ du \\ u^{*}\lambda\circ j=da \end{gathered} \end{equation} where $\pi_{\lambda}:TM\approx\mathbb{R}X_{\lambda}\oplus\xi\to\xi$ is the projection of $TM$ onto $\xi$ along $X_{\lambda}$. The \emph{Hofer energy} $E(u)$ of a pseudoholomorphic map $(\Sigma, j, \Gamma, a, u)$ is defined by \begin{equation}\label{e:hofer-energy-defn} E(u)=\sup_{\varphi\in\Xi}\int_{\Sigma\setminus\Gamma}\tilde u^{*}d(\varphi\lambda)= \sup_{\varphi\in\Xi}\int_{\Sigma\setminus\Gamma}d(\varphi(a) u^{*}\lambda) \end{equation} where $\Xi\subset C^{\infty}(\mathbb{R}, [0, 1])$ is the set of smooth functions $\varphi:\mathbb{R}\to[0, 1]$ with $\varphi'(t)\ge 0$ for all $s\in\mathbb{R}$, $\lim_{s\to-\infty}\varphi(s)=0$, and $\lim_{s\to\infty}\varphi(s)=1$.
To each puncture in a pseudoholomorphic map we will a assign a quantity called the mass of the puncture. First, we will call a holomorphic embedding $\psi:[0, +\infty)\times \mathbb{S}^{1}\subset\mathbb{C}/i\mathbb{Z}\to\Sigma\setminus\Gamma$ a \emph{holomorphic cylindrical coordinate system} around $z_{0}\in\Gamma$ if $\lim_{s\to\infty}\psi(s, t)=z_{0}$. Given a holomorphic cylindrical coordinates $\psi$ around $z_{0}\in\Gamma$, we consider the family of loops $v(s)=(u\circ\psi)(s, \cdot):\mathbb{S}^{1}\to M$ and define the \emph{mass $m(z_{0})$ of the puncture $z_{0}$} by \begin{equation}\label{e:mass} m(z_{0})=\lim_{s\to\infty}\int_{\mathbb{S}^{1}}v(s)^{*}\lambda. \end{equation} The limit in this definition is well-defined as a result the compatibility of $J$ with $d\lambda$. Indeed, for $s_{1}>s_{0}$ we apply Stokes' theorem to compute \begin{align} \int_{\mathbb{S}^{1}}v(s_{1})^{*}\lambda-\int_{\mathbb{S}^{1}}v(s_{0})^{*}\lambda &=\int_{[s_{0}, s_{1}]\times \mathbb{S}^{1}}(u\circ\psi)^{*}d\lambda \label{e:mass-stokes} \\ &=\int_{[s_{0}, s_{1}]\times \mathbb{S}^{1}}d\lambda(u_{s}, u_{t})\,ds\wedge dt \notag \\ &=\int_{[s_{0}, s_{1}]\times \mathbb{S}^{1}}d\lambda(\pi_{\lambda}(u_{s}),\pi_{\lambda}(u_{t}))\,ds\wedge dt & i_{X_{\lambda}}d\lambda=0 \notag \\ &=\int_{[s_{0}, s_{1}]\times \mathbb{S}^{1}}d\lambda(\pi_{\lambda}(u_{s}),J\pi_{\lambda}(u_{s}))\,ds\wedge dt & \eqref{e:j-hol-M} \notag \end{align} and we observe the integrand in the final line above is nonnegative by compatibility of $J$ with $d\lambda$. Thus the integral in the definition \eqref{e:mass} of mass is an increasing function of $s$, which lets us conclude the limit is well-defined (although possibly infinite). It can, moreover, be shown that the mass is independent of the choice of holomorphic cylindrical coordinates near $z_{0}$.
If is a straightforward exercise using \eqref{e:mass-stokes} and definition of Hofer energy to show that if a pseudoholomorphic map has finite Hofer energy, then all punctures have finite mass. Furthermore, punctures with mass $0$ can be shown to be removable, that is, one can find a pseudoholomorphic extension of the map $\tilde u$ over any puncture with mass $0$ (see \cite[pgs.\ 272-3]{hwz:prop2}). The behavior near punctures with nonzero mass is described by the following now well-known theorem of Hofer from \cite{hofer93}.\footnote{ Hofer only considers planes in \cite{hofer93} and proves the slightly weaker statement that there exists a sequence $s_{k}\to\infty$ so that corresponding loops $u\circ\psi(s_{k}, \cdot)$ converge to a periodic orbit, but the generalization of the proof to the result we state here is straightforward. In the survey \cite[Theorem 3.2]{hwz-survey}, the appropriate result is proven for a general pseudoholomorphic half-cylinder, albeit under a different notion of energy. The fact that this different notion of energy implies finite Hofer energy as defined by \eqref{e:hofer-energy-defn} is addressed in Theorem 5.1 of the same paper. }
\begin{theorem} Let $M$ be a compact manifold equipped with a contact form $\lambda$ and compatible complex structure $J\in\mathcal{J}(M, \xi)$ on $\xi=\ker\lambda$. Let $(\Sigma, j, \Gamma, a, u)$ be a solution to \eqref{e:j-hol-M} and assume that $z_{0}\in\Gamma$ has mass $m(z_{0})=T\ne 0$. Then for every holomorphic cylindrical coordinate system $\psi:[0, \infty)\times \mathbb{S}^{1}\to\Sigma\setminus\Gamma$ around $z_{0}$, and every sequence $s_{k}\to\infty$ there exists a subsequence $s_{k_{j}}$ and a smooth map $\gamma:\mathbb{S}^{1}=\mathbb{R}/\mathbb{Z}\to M$ with $\dot\gamma=T\cdotX_{\lambda}\circ\gamma$ so that the sequence of loops $u\circ\psi(s_{k_{j}}, \cdot):\mathbb{S}^{1}\to M$ converge in $C^{\infty}(\mathbb{S}^{1}, M)$ to $\gamma$. \end{theorem}
We will refer to the collection of periodic orbits obtained as limits of a given finite-energy pseudoholomorphic map as the \emph{limit set} of that map. As mentioned in the introduction, it can be shown under some suitable nondegeneracy assumptions that a puncture has a unique periodic orbit (up to reparametrization) in its limit set and that the convergence to that orbit is exponential \cite{hwz:prop1, hwz:prop4, HWZ:planes, bourgeois, mora}. In the absence of nondegeneracy however, it has until now remained an open question whether it's possible for a finite-energy pseudoholomorphic map to have more than one periodic orbit in the asymptotic limit set of a given puncture.
\section{Prequantization spaces, gradient flows, and pseudoholomorphic cylinders}\label{s:prequant}
In this section we explain a correspondence between gradient flows on symplectic manifolds and certain pseudoholomorphic cylinders in an associated prequantization space, that is, a contact manifold constructed as a principal $S^{1}$-bundle over the given symplectic manifold with the contact structure being given as the horizontal distribution determined by an appropriate connection on the bundle. For simplicity we focus on the case of trivial $S^{1}$-bundles over exact symplectic manifolds, since that is all we require for the proof of our main theorem, but we point out that the construction of pseudoholomorphic cylinders in a prequantization space from gradient flow lines in the base can be generalized to any prequantization space.
Let $(W, \omega=d\beta)$ be an exact symplectic manifold and consider $S^{1}(\approx \mathbb{R}/2\pi\mathbb{Z})\times W$ equipped with the $1$-form \begin{equation}\label{e:prequant-form} \lambda=d\theta+\pi^{*}\beta. \end{equation} where $\pi:S^{1}\times W\to W$ is the canonical projection onto the second factor. The $1$-form $\lambda$ defined in this way is a contact form on $S^{1}\times W$ since \begin{align*} \lambda\wedge (d\lambda)^{n} &=(d\theta+\pi^{*}\beta)\wedge \pi^{*}\omega^{n} \\ &=d\theta\wedge\pi^{*}\omega^{n}>0. \end{align*} We will refer to a pair $(S^{1}\times W, d\theta+\pi^{*}\beta)$ consisting of a trivial $S^{1}$-bundle and a contact form arising in this way as a \emph{prequantization space} over the symplectic manifold $(W, \omega=d\beta)$.
We observe that the Reeb vector field of the contact form \eqref{e:prequant-form} is given by $\partial_{\theta}$ and hence the splitting \eqref{e:splitting} induced on $TM$ by the contact form is given by \[ T(S^{1}\times W)\approx TS^{1}\oplus\xi. \] Thus $\xi$ is an $S^{1}$-invariant horizontal distribution of the bundle $S^{1}\times W\to W$ which gives us a one-to-one correspondence between the space $\Gamma(TW)$ of vector fields on $W$ and the space $\Gamma_{S^{1}}(\xi)$ of $S^{1}$-invariant sections of the contact structure $\xi$. This correspondence is given explicitly by the maps \begin{equation}\label{e:horizontal-lift} X\in T_{p}W\mapsto \tilde X:=-\beta(X)\,\partial_{\theta} +X \in \xi_{(\theta, p)}\subset T_{(\theta, p)}(S^{1}\times W) \end{equation} and \[ \tilde Y\in \xi_{(\theta, p)}\mapsto d\pi(\tilde Y)\in T_{p}W, \] where the plus sign in \eqref{e:horizontal-lift} is to be interpreted relative to the natural splitting \[ T_{(\theta, p)}(S^{1}\times W)\approx T_{\theta}S^{1}\oplus T_{p}W \] arising from the canonical projection onto the factors of the Cartesian product. The correspondence between vector fields on $W$ and $S^{1}$-invariant sections of $\xi$ generalizes to arbitrary tensor fields on $W$. In particular an endomorphism $A\in\operatorname{End}(TW)\approx TW^{*}\otimes TW$ of the form \[ A=\sum_{i}\alpha_{i}\otimes X_{i} \] lifts to an $S^{1}$-invariant endomorphism $\tilde A\in\operatorname{End}(\xi)\approx\xi^{*}\otimes\xi$ given by \[ \tilde A=\sum_{i}\pi^{*}\alpha_{i}\otimes \tilde X_{i}. \] Equivalently, we can define $\tilde A$ to be the unique section of $\operatorname{End}(\xi)$ satisfying \[ \tilde A\tilde X=\widetilde{AX} \] for every vector field $X$ on $TW$.
We define $\mathcal{J}(W, \omega)$ to be the set of almost complex structures on $W$ compatible with the symplectic form $\omega$, that is, those $j\in\operatorname{End}(TW)$ which square to negative the identity and for which $g_{j}:=\omega(\cdot, j\cdot)$ is a Riemannian metric on $W$. According to the remarks of the previous paragraph, $j$ lifts to an $S^{1}$-invariant endomorphism $\tilde j$ of $\xi$ characterized by \[ \tilde j\tilde X=\widetilde{jX} \] for every vector field $X$ on $W$. From this equation together with the linearity of the map $X\mapsto\tilde X$ and the fact that \[ d\lambda=d(\pi^{*}\beta)=\pi^{*}(d\beta)=\pi^{*}\omega \] it follows that the $S^{1}$-invariant lift $\tilde j\in\operatorname{End}(\xi)$ of a compatible almost complex structure $j\in\mathcal{J}(W, \omega)$ on $W$ is an element of $\mathcal{J}(S^{1}\times W, \xi)$, i.e.\ a complex structure on $\xi$ compatible with $d\lambda$.
Given a choice of compatible $j\in\mathcal{J}(W, \omega)$ we can associate two vector fields on $W$ to any smooth real-valued function $f$ on $W$: the Hamiltonian vector field $X_{f}$ and the gradient $\nabla f$ defined respectively by \[ i_{X_{f}}\omega=-df \qquad\text{ and }\qquad g_{j}(\nabla f, \cdot)=df. \] These vector fields are related by the equations \[ X_{f}=j\nabla f \qquad\text{ and }\qquad \nabla f=-jX_{f} \] since we can use the definition of $g_{j}$ and the antisymmetry of $\omega$ to compute \[ i_{j\nabla f}\omega=\omega(j\nabla f, \cdot) =-\omega(\cdot, j\nabla f) =-g_{j}(\cdot, \nabla f) =-df. \] From the observations of the previous paragraph, the respective $S^{1}$-invariant lifts of $\tilde j$, $\widetilde{\nabla f}$, and $\widetilde{X_{f}}$ of $j$, $\nabla f$ and $X_{f}$ satisfy \begin{equation}\label{e:ham-grad-lift} \widetilde{X_{f}}=\tilde j\widetilde{\nabla f} \qquad\text{ and }\qquad \widetilde{\nabla f}=-\tilde j\widetilde{X_{f}}. \end{equation}
Continuing to let $f:W\to\mathbb{R}$ denote a smooth function on $W$, we can pull $f$ back to an $S^{1}$-invariant smooth function $\pi^{*}f$ on $S^{1}\times W$ and consider the contact form $\lambda_{f}$ defined by \[ \lambda_{f}=e^{\pi^{*}f}\lambda=e^{\pi^{*}f}(d\theta+\pi^{*}\beta). \] Since \begin{align*} i_{\tilde X_{f}}d\lambda &=i_{\tilde X_{f}}\pi^{*}\omega \\ &=\pi^{*}(i_{X_{f}}\omega) \\ &=-\pi^{*}df \\ &=-d(\pi^{*}f)+d(\pi^{*}f)(\partial_{\theta})\lambda \end{align*} it follows from \eqref{e:reeb-change-1}-\eqref{e:reeb-change-2} that \begin{equation}\label{e:reeb-deformed} X_{\lambda_{f}}=e^{-\pi^{*}f}(\partial_{\theta}-\tilde X_{f}). \end{equation} From this and \eqref{e:ham-grad-lift} we note at any point $p\in W$ where $f$ has a critical point, $X_{\lambda_{f}}(\theta, p)=e^{-f(p)}\partial_{\theta}$, and thus the fiber in $S^{1}\times W$ over $p$ is a periodic orbit of the Reeb vector field with period $2\pi e^{f(p)}$.
We are now ready to state the main theorem of the section, which establishes a correspondence between gradient flows on a symplectic manifold $(W, \omega=d\beta)$ and pseudoholomorphic cylinders in the corresponding prequantization space $(S^{1}\times W, d\theta+\pi^{*}\beta)$. The idea of relating gradient flow lines of a Morse function to pseudoholomorphic cylinders in a contact manifold originates in \cite{bourgeois} (see also \cite{sft, behwz}). In the present context this relationship can be seen as a generalization to the contact setting of an idea of Floer from \cite{floer} (see also \cite{SZ, HS}).
\begin{theorem}\label{t:gradient-flow-hol-cylinders} Let $(S^{1}\times W, \lambda=d\theta+\pi^{*}\beta)$ be a prequantization space over an exact symplectic manifold $(W, \omega)$, let $j\in\mathcal{J}(W, \omega=d\beta)$ be a compatible almost complex structure on $W$, and let $J=\tilde j\in\mathcal{J}(S^{1}\times W, \xi)$ be the corresponding $S^{1}$-invariant compatible complex structure on $\xi=\ker\lambda$. Given a smooth function $f:W\to\mathbb{R}$, consider smooth maps $\gamma:\mathbb{R}\to W$, $\theta:\mathbb{R}\to S^{1}$, and $a:\mathbb{R}\to\mathbb{R}$ satisfying the system of o.d.e.'s \begin{align} \dot\gamma(s)&=2\pi \nabla f(\gamma(s)) \label{e:ode-gamma} \\ \dot\theta(s)&=-2\pi \beta(\nabla f(\gamma(s))) \label{e:ode-theta}\\ \dot a(s)&=2\pi e^{f(\gamma(s))} \label{e:ode-a} \end{align} with $\nabla f$ denoting the gradient with respect to the metric $g_{j}=\omega(\cdot, j\cdot)$. Then the map $\tilde u=(a, u):\mathbb{R}\times\mathbb{S}^{1}(\approx\mathbb{R}/\mathbb{Z})\to \mathbb{R}\times S^{1}(\approx\mathbb{R}/2\pi\mathbb{Z})\times W$ defined by \[ \tilde u(s, t)=(a(s, t), u(s, t))=(a(s), \theta(s)+2\pi t, \gamma(s)) \] is a pseudoholomorphic cylinder for the data $(e^{\pi^{*}f}\lambda, J)$ with Hofer energy\footnote{ We remark that finiteness of the energy here does not immediately imply that the cylinders approach periodic orbits because $W$, being equipped with an exact symplectic form, is necessarily noncompact. Consider, for example, the symplectic manifold $(\mathbb{R}^{2}, dx\wedge dy=d(x\,dy))$ and the function $f(x, y)=\arctan x$. The pseudoholomorphic cylinders in the appropriate prequantization space covering gradient flow lines in the base have finite energy as a result of \eqref{e:energy-formula} since the function $f$ is bounded, but the cylinders do not approach periodic orbits since the function $f$ has no critical points. } \begin{equation}\label{e:energy-formula} E(u)=2\pi \lim_{s\to\infty}e^{f(\gamma(s))}\in [0, +\infty]. \end{equation} \end{theorem}
\begin{proof} We compute using \eqref{e:horizontal-lift} \begin{align*} \tilde u_{s}(s, t) &=\dot a(s)\,\partial_{a}+\dot\theta(s)\,\partial_{\theta}+\dot\gamma(s) \\ &=2\pi e^{f(\gamma(s))}\,\partial_{a}-2\pi \beta(\nabla f(\gamma(s)))\,\partial_{\theta}+2\pi \nabla f(\gamma(s)) \\ &=2\pi \bp{e^{f(\gamma(s))}\,\partial_{a}+\widetilde{\nabla f}(u(s, t))} \intertext{and similarly using \eqref{e:reeb-deformed}} \tilde u_{t}(s, t) &=2\pi \partial_{\theta} \\ &=2\pi \bp{\partial_{\theta}-\widetilde{X_{f}}}(u(s, t))+2\pi \widetilde{X_{f}}(u(s, t)) \\ &=2\pi \bp{e^{f(\gamma(s))}X_{e^{f}\lambda}(u(s,t))+\widetilde{X_{f}}(u(s, t))}. \end{align*} It then follows from the definition \eqref{e:R-invariant-J} of the $\mathbb{R}$-invariant extension of $J$ to an almost complex structure on $\mathbb{R}\times S^{1}\times W$ and from \eqref{e:ham-grad-lift}, that $\tilde u$ satisfies the pseudoholomorphic map equation \eqref{e:j-hol}.
It remains to compute the Hofer energy. To do that, we first compute \begin{align*} u^{*}\lambda &=\lambda(u_{s})\,ds+\lambda(u_{t})\,dt \\ &=\lambda(\widetilde{\nabla f})\,ds+\lambda(2\pi \partial_{\theta})\,dt \\ &=2\pi dt. \end{align*} We then consider a smooth, increasing function $\varphi:\mathbb{R}\to[0, 1]$ with $\lim_{s\to\infty}\varphi(s)=1$ and $\lim_{s\to-\infty}\varphi(s)=0$, and compute \begin{equation}\label{e:energy-computation} \begin{aligned} \int_{[s_{0}, s_{1}]\times\mathbb{S}^{1}}\tilde u^{*}d(\varphi e^{\pi^{*}f}\lambda) &=\int_{[s_{0}, s_{1}]\times\mathbb{S}^{1}}d(\varphi(a) e^{f(\gamma)}2\pi dt) \\ &=\bp{\int_{\br{s_{1}}\times\mathbb{S}^{1}}-\int_{\br{s_{0}}\times\mathbb{S}^{1}}}2\pi \varphi(a) e^{f(\gamma)}\,dt \\ &=2\pi \bp{\varphi(a(s_{1}))e^{f(\gamma(s_{1}))}-\varphi(a(s_{0}))e^{f(\gamma(s_{0}))}}. \end{aligned}
\end{equation} From \eqref{e:ode-gamma} and \eqref{e:ode-a} we know that the function $e^{f\circ\gamma}$ is increasing and $a$ is strictly increasing with increasing derivative. Thus $\lim_{s\to\infty}a(s)=+\infty$ and we can conclude that \[ \lim_{s_{1}\to\infty}\varphi(a(s_{1}))e^{f(\gamma(s_{1}))} =\bp{\lim_{s_{1}\to\infty}\varphi(a(s_{1}))}\bp{\lim_{s_{1}\to\infty}e^{f(\gamma(s_{1}))}} =\varphi(+\infty)\lim_{s_{1}\to\infty}e^{f(\gamma(s_{1}))} =\lim_{s_{1}\to\infty}e^{f(\gamma(s_{1}))}. \] Again using that $e^{f\circ\gamma}$ is increasing we can know that $\lim_{s_{0}\to-\infty}e^{f(\gamma(s_{0}))}$ exists and is either positive or zero. If the case that this limit is positive, we know from \eqref{e:ode-a} that $\lim_{s_{0}\to-\infty}a(s_{0})=-\infty$ and hence that $\lim_{s_{0}\to-\infty}\varphi(a(s_{0}))=0$. In either case, we conclude that \[ \lim_{s_{0}\to-\infty}\varphi(a(s_{0}))e^{f(\gamma(s_{0}))} =0 \] because it's a product of increasing, positive functions, at least one of which limits to $0$ as $s_{0}\to-\infty$. Hence, taking limits in \eqref{e:energy-computation} above leads to \[ \int_{\mathbb{R}\times\mathbb{S}^{1}}\tilde u^{*}d(\varphi e^{f}\lambda)=2\pi \lim_{s\to\infty}e^{f(\gamma(s))} \] for any $\varphi\in\Xi$, which establishes $E(u)=2\pi \lim_{s\to\infty}e^{f(\gamma(s))}$ as claimed. \end{proof}
\section{A gradient flow with a $1$-dimensional limit set}\label{s:gradient}
In this section we construct a function so that the omega limit set of all of its nontrivial gradient flow lines is diffeomorphic to a circle. This function will be used in the next section in conjunction with Theorem \ref{t:gradient-flow-hol-cylinders} above to construct finite-energy cylinders and planes localized near a transverse knot which have tori as limit sets.
The main theorem of this section is the following. \begin{theorem}\label{t:function-construction} For any $\delta>0$, there exists a smooth function $F_{\delta}:\mathbb{R}\times S^{1}(\approx\mathbb{R}/2\pi\mathbb{Z}) \to \mathbb{R}$ so that \begin{itemize} \item $F_{\delta}(s, t)=s$ for $s\le\delta$, \item $s\le F_{\delta}(s, t)< 0$ for $s\in(-\delta, 0)$, \item $F_{\delta}(s, t)=0$ for $s\ge 0$, \item $dF_{\delta}(s, t)\ne 0$ for $s<0$, \end{itemize} and so that for any choice of Riemannian metric on $\mathbb{R}\times S^{1}$, the solution to the initial value problem \[ \gamma(\tau)=\nabla F_{\delta}(\gamma(\tau)) \qquad \gamma(0)=(s_{0},t_{0}) \text{ with $s_{0}<0$} \] exists for all $\tau\ge 0$ and has the circle $\br{0}\times S^{1}$ as its omega limit set. \end{theorem}
\begin{remark} The fact that there exist gradient flows with omega limit sets consisting of more than a single point has been known for some time and a qualitative description of a function like the one we construct below is given in \cite[pg.\ 261]{curry}. In \cite[Example 3, pgs.\ 13-14]{palis-demelo} a function in $\mathbb{R}^{2}$ is given for which it can be shown that there is at least one gradient flow line whose omega limit set is a circle.
An interesting feature of the functions $F_{\delta}$ provided by our theorem is that every nontrivial flow line, independent of the metric, has the circle $\br{0}\times S^{1}$ as its omega limit set. Since the behavior of the functions $F_{\delta}$ is especially simple outside of a neighborhood of this limit set, this allows for a good deal of flexibility in constructing functions on a given Riemannian manifold whose gradients will have flow lines having a circle as an omega limit set. For example, it is a straightforward corollary of this theorem that one can construct smooth functions on any Riemannian $2$-manifold $(M^2, g)$ having any desired embedded circle as the limit set of some gradient flow line. Indeed, we can either identify a neighborhood of a given embedded circle with $(-\varepsilon, \varepsilon)\times \mathbb{R}/2\pi\mathbb{Z}$ or, in the nonorientable case, we can identify a double cover of a neighborhood of the circle with $(-\varepsilon, \varepsilon)\times\mathbb{R}/2\pi\mathbb{Z}$ with the nontrivial deck-transformation of the cover being given by the map $(s, t)\mapsto (-s, t+\pi)$. We then consider the function $G(s, t):=F_{\varepsilon/2}(s, t)+F_{\varepsilon/2}(-s, t+\pi)$ which is invariant under the action of the deck transformation in the nonoriented case, agrees with $-\abs{s}$ for $\abs{s}\in (\varepsilon/2, \varepsilon)$, and which will have $\br{0}\times S^{1}$ as the omega limit set of any gradient flow line starting in the neighborhood. The function can then be extended to a smooth function on the entire surface using an appropriate cutoff function. \end{remark}
We will construct the function in the following paragraph and prove that it has the required properties in a series of lemmas. Throughout this section we will make no notational distinction between smooth functions with domain $\mathbb{R}\times S^{1}\approx\mathbb{R}\times\mathbb{R}/2\pi\mathbb{Z}$ and functions on $\mathbb{R}^{2}$ which are $2\pi$-periodic in the second variable.
We consider the function $G:\mathbb{R}\times \mathbb{R}/2\pi\mathbb{Z}=\br{(s, t)}\to\mathbb{R}$ defined by\footnote{ As will become clear from our proof, the $5/4$ in our example can be replaced with any constant strictly bigger than $1$ and strictly less than $\sqrt{2}$. } \[ G(s, t)= \begin{cases} e^{1/s}\bp{\sin(1/s+t)-5/4} & s<0 \\ 0 & s\ge 0 \end{cases} \] and we note that $G$ is smooth and that\footnote{ The left-most part of this inequality can be seen from the following argument. To show that $-\frac{9}{4}e^{1/s}-s$ is positive for all $s<0$, it suffices to show that $g(t)=\frac{9}{4}te^{t}+1$ is positive for all $t<0$. A straightforward argument using single-variable calculus then shows that $g(-1)=-\frac{9}{4}e^{-1}+1>0$ is the absolute minimum of the function $g$ on $\mathbb{R}$. } \begin{equation}\label{e:bounds-G} s< -\tfrac{9}{4} e^{1/s} \le G(s, t)\le -\tfrac{1}{4}e^{1/s} \text{ for all $s<0$.} \end{equation} For a given value $\delta>0$ we let $\eta:\mathbb{R}\to[0, 1]$ be a smooth cut-off function satisfying \[ \eta(t)= \begin{cases} 0 & s<-\delta \\ 1& s>-\delta/2 \end{cases} \] and $\eta'(t)\ge 0$ everywhere and define the function $F:\mathbb{R}\times\mathbb{R}/2\pi\mathbb{Z}\to\mathbb{R}$ by \begin{equation}\label{e:F-definition} F(s, t)=(1-\eta(s))s+\eta(s)G(s, t). \end{equation} We observe that this definition with \eqref{e:bounds-G} implies that \begin{equation}\label{e:bounds-F} s \le F(s, t)\le -\tfrac{1}{4}e^{1/s} \text{ for all $s<0$} \end{equation} so the first three properties required of $F_{\delta}$ in the theorem are clearly satisfied. The fourth property, concerning the critical set, is then addressed by the following lemma.
\begin{lemma}\label{l:F-crit-set} The set of critical points of the above defined function $F$ is $s\ge 0$. \end{lemma}
\begin{proof} We first compute for $s<0$ \begin{equation}\label{e:G-s} \begin{aligned} G_{s}(s, t) &=-1/s^{2}e^{1/s}\bp{\sin(1/s+t)+\cos(1/s+t)-\tfrac{5}{4}} \\ &=-1/s^{2}e^{1/s}\bp{\sqrt{2}\sin(1/s+t+\tfrac{\pi}{4})-\tfrac{5}{4}} \\ \end{aligned} \end{equation} and \begin{equation}\label{e:G-t} G_{t}(s, t) =e^{1/s}\cos(1/s+t) \end{equation} and note then that \begin{align*} dG(s^{2}\,\partial_{s}+\partial_{t}) &=s^{2}G_{s}+G_{t} \\ &=-G \end{align*} which is everywhere positive for $s<0$. We then compute \begin{align*} dF(s, t) &=\eta'(s)(G(s, t)-s)\,ds+(1-\eta(s))\,ds+\eta(s)dG(s, t) \end{align*} and thus \[ dF(s^{2}\,\partial_{s}+\partial_{t}) =s^{2}\eta'(s)(G(s, t)-s)+(1-\eta(s))s^{2}+\eta(s)(-G), \] which we claim is always positive for $s<0$. Indeed the first term is always nonnegative since, as observed above in \eqref{e:bounds-G}, $G(s, t)\ge-\frac{9}{4}e^{1/s}>s$ for all $s<0$. Meanwhile the second two terms are the convex sum of positive quantities and thus always positive. We've thus found a vector field $v=s^{2}\,\partial_{s}+\partial_{t}$ for which $dF(v)>0$ for $s<0$ which shows that $F$ has no critical points for $s<0$, and hence the critical set of $F$ is $s\ge 0$ where $F$ vanishes identically. \end{proof}
As an immediate corollary we are able to show that the $\mathbb{R}$-component of any nontrivial gradient flow line of $F$ converges to $0$ in forward time.
\begin{lemma}\label{l:F-forward-time} For an arbitrary Riemannian metric $g$ on $\mathbb{R}\times S^{1}$ and a point $(s_{0}, t_{0})\in\mathbb{R}^{-}\times S^{1}$, the solution $\gamma(\tau)=(s(\tau), t(\tau))\in\mathbb{R}\times S^{1}$ to \begin{equation}\label{e:gradient-flow-ode} \gamma'(\tau)=\nabla^{g} F(\gamma(\tau)) \qquad \gamma(0)=(s_{0}, t_{0}) \end{equation} exists for all $\tau\ge 0$ and $\lim_{\tau\to\infty}s(\tau)=0$. \end{lemma}
\begin{proof} Since $dF(s, t)=0$ for $s\ge 0$ and $F(s, t)=s$ agrees with $s$ for $s< -\delta$, we know that any solution to \eqref{e:gradient-flow-ode} stays bounded in a set of the form $[a, 0]\times S^{1}$ in forward time which implies that the solution exists for all $\tau\ge 0$. Given that the solution $\gamma(\tau)$ exists and is bounded in forward time, we know from general properties of gradient flows that $\lim_{\tau\to\infty}F(\gamma(\tau))$ exists and is equal to a critical value of $F$. Since we have just seen in Lemma \ref{l:F-crit-set}, that $0$ is the unique critical value of $F$, we conclude $\lim_{\tau\to\infty}F(\gamma(\tau))=0$. This with \eqref{e:bounds-F} implies that $\lim_{\tau\to\infty}s(\tau)=0$. \end{proof}
The key step to proving the claim about the omega limit sets of flow lines of $F$ is the following lemma.
\begin{lemma}\label{l:z-bounded} Let $\gamma(\tau)=(s(\tau), t(\tau))$ be a solution to \eqref{e:gradient-flow-ode}, and let $\tilde t:\mathbb{R}^{+}\to\mathbb{R}$ be a choice of lift of $t:\mathbb{R}^{+}\to S^{1}$. Then the function $z:\mathbb{R}^{+}\to\mathbb{R}$ defined by \[ z(\tau)=\frac{1}{s(\tau)}+\tilde t(\tau) \] is bounded. \end{lemma}
\begin{proof} Let \[ \begin{bmatrix} A(s, t) & B(s, t) \\ B(s, t) & C(s, t) \end{bmatrix} \] be the matrix of the dual metric to $g$ with respect to the coordinate basis $\br{ds, dt}$ for $T^{*}(\mathbb{R}\times S^{1})$, and note positive definiteness tells us that $A(s, t)$ and $C(s, t)$ are positive for all $(s, t)\in\mathbb{R}\times S^{1}$. Furthermore, since $\gamma(\tau)$ remains in a compact region for all $\tau\ge 0$, we can conclude that the functions $A(\tau):=A(s(\tau), t(\tau))$, $B(\tau):=B(s(\tau), t(\tau))$, and $C(\tau):=C(s(\tau), t(\tau))$ are bounded and that $A(\tau)$ and $C(\tau)$ are bounded away from zero (or, equivalently that $A^{-1}(\tau)$ and $C^{-1}(\tau)$ are bounded).
From Lemma \ref{l:F-forward-time} and the definition \eqref{e:F-definition} of $F$ it follows that $F(\gamma(\tau))=G(\gamma(\tau))$ for sufficiently large $\tau$. For such values of $\tau$ we use \eqref{e:G-s}-\eqref{e:G-t} with the boundedness of $A(\tau)$, $B(\tau)$, $C(\tau)$, and $A^{-1}(\tau)$ to compute \begin{align*} s'&=A(s, t) G_{s}(s, t)+B(s, t) G_{t}(s, t) \\ &=-s^{-2}e^{1/s}A(s, t)\bp{\sqrt{2}\sin(1/s+t+\tfrac{\pi}{4})-\tfrac{5}{4}+O(s^{2})} \intertext{and} \tilde t' &=t' \\ &=B(s, t)G_{s}(s, t)+C(s,t)G_{t}(s, t) \\ &=s^{-4}e^{1/s}A(s, t)O(s^{2}) \end{align*} with $O(s^{2})$ denoting, as usual, a function $h$ for which $s^{-2}h(s, t)$ remains bounded on a deleted neighborhood of $s=0$. We then have that \begin{align*} z' &=-s^{-2}s'+\tilde t' \\ &=s^{-4}e^{1/s}A(s, t)\bp{\sqrt{2}\sin(1/s+ t+\tfrac{\pi}{4})-\tfrac{5}{4}+O(s^{2})} \\ &=s^{-4}e^{1/s}A(s, t)\bp{\sqrt{2}\sin(z+\tfrac{\pi}{4})-\tfrac{5}{4}+O(s^{2})} \end{align*} and so, for sufficiently large values of $\tau$ (and thus sufficiently small values of $s(\tau)$), we'll have that \begin{equation}\label{e:z-inequality} \sqrt{2}\sin(z(\tau)+\tfrac{\pi}{4})-\tfrac{11}{8} \le \bp{s(\tau)^{-4}e^{1/s(\tau)}A(\tau)}^{-1}z'(\tau) \le \sqrt{2}\sin(z(\tau)+\tfrac{\pi}{4})-\tfrac{9}{8}. \end{equation} We claim this lets us conclude that $z(\tau)$ is bounded. Indeed, since $\frac{9}{8}\in(-\sqrt{2}, \sqrt{2})$, the solution set to the inequality \[ \sqrt{2}\sin(z+\tfrac{\pi}{4})-\tfrac{9}{8}<0 \] is a countable union of intervals which is invariant under translation by $2\pi\mathbb{Z}$. By \eqref{e:z-inequality}, $z(\tau)$ can't cross these intervals in the positive direction once $\tau$ is sufficiently large for \eqref{e:z-inequality} to hold. Similarly, since $\frac{11}{8}\in(-\sqrt{2}, \sqrt{2})$, $z(\tau)$ can't cross the intervals where \[ \sqrt{2}\sin(z+\tfrac{\pi}{4})-\tfrac{11}{8}>0 \] in the negative direction once $\tau$ is sufficiently large. We conclude that $z(\tau)$ is bounded for $\tau\in[0, \infty)$. \end{proof}
We now complete the proof of the main theorem of the section \begin{proof}[Proof of Theorem \ref{t:function-construction}] By construction and Lemma \ref{l:F-crit-set}, $F$ satisfies all required properties, and it remains to show that the omega limit set of a solution to \eqref{e:gradient-flow-ode} is the circle $\br{0}\times S^{1}$. Let $\gamma(\tau)=(s(\tau), t(\tau))$ be a solution to \eqref{e:gradient-flow-ode} and let $\tilde t:\mathbb{R}^{+}\to\mathbb{R}$ be a lift of $t:\mathbb{R}^{+}\to S^{1}$. We have shown in Lemma \ref{l:z-bounded} above that the function $z=1/s+\tilde t$ is bounded on $[0, \infty)$. Since we know from Lemma \ref{l:F-forward-time} that $\lim_{\tau\to\infty}s(\tau)=0$ and since $s(\tau)<0$ for all $\tau\ge 0$, we can conclude that $\lim_{\tau\to\infty}\frac{1}{s(\tau)}=-\infty$. This in turn lets us conclude $\tilde t(\tau)$ approaches $+\infty$ as $\tau\to\infty$ or else $z$ would not be bounded. By continuity, the equation $\tilde t(\tau)=c$ has a solution $\tau_{c}$ for all $c\ge \tilde t(0)$. We then conclude that for any $\tau_{0}\in\mathbb{R}$ and any $t_{0}\in S^{1}$, there exists a $\tau_{t_{0}}>\tau_{0}$ so that $t(\tau_{t_{0}})=t_{0}$. This with the fact that $s(\tau)\to 0$ as $\tau\to\infty$ shows that $\br{0}\times S^{1}$ is the omega limit set of $\gamma$. \end{proof}
\section{Finite-energy cylinders and planes with tori as limit sets}\label{s:main-proof} Here we prove our main theorem, Theorem \ref{t:main-theorem}, that is, we construct examples of finite-energy cylinders and finite-energy planes having tori as limit sets. The constructions take place in an arbitrarily small tubular neighborhood of a standard model of a transverse knot, so we begin by recalling some basic facts about transverse knots and explaining why this construction suffices to prove the main theorem.
Let $(M^{2n+1}, \xi=\ker\lambda)$ be a contact manifold. An embedding $\gamma:S^{1}\to M$ is said to be a transverse knot if $\gamma$ is everywhere transverse to $\xi$ or equivalently, if $\lambda(\dot\gamma)$ is never zero. Transverse knots exist in abundance in any contact manifold. Indeed, by the well-known Darboux theorem for contact structures, there exists a contactomorphism --- that is a diffeomorphism preserving the contact structure --- between a neighborhood of any point in a contact manifold $(M^{2n+1}, \xi)$ and a neighborhood of $0$ in $\mathbb{R}^{2n+1}=\br{(z, x_{i}, y_{i})}$ equipped with the contact structure $\xi_{0}=\ker\lambda_{0}$ where $\lambda_{0}$ is the contact form \[ \lambda_{0}=dz+\alpha_{n} \] with \begin{equation}\label{e:alpha-defn} \alpha_{n}=\sum_{i=1}^{n}x_{i}\,dy_{i}-y_{i}\,dx_{i} \end{equation} (see e.g.\ \cite[Theorem 2.24]{geiges}). Since, for a given $k\in\mathbb{Z}\cap[1, n]$ and any constants $r>0$, $c_{i}$, $d_{i}\in\mathbb{R}$, circles of the form \[ \text{$x_{k}^{2}+y_{k}^{2}=r^{2}$, and $x_{i}=c_{i}$, $y_{i}=d_{i}$, for $i\ne k$} \] are easily seen to be transverse to the contact structure, we can conclude that transverse knots exist in every contact manifold and, indeed, that transverse knots exist in any neighborhood of a given point in a contact manifold.
We next recall that one can use a Moser argument to prove a neighborhood theorem for transverse knots which tells us that there exists a contactomorphism between some neighborhood of any given transverse knot and a neighborhood of $S^{1}\times\br{0}$ in $S^{1}\times\mathbb{R}^{2n}=\br{(\theta, x_{i}, y_{i})}$ equipped with the contact structure $\xi_{0}=\ker \lambda_{0}$ where \begin{equation}\label{e:transverse-standard} \lambda_{0}=d\theta+\alpha_{n} \end{equation} with $\alpha_{n}$ as defined in \eqref{e:alpha-defn} above (see e.g.\ \cite[Theorem 2.32/Example 2.33]{geiges}). We will refer to $S^{1}\times\br{0}\subset (S^{1}\times\mathbb{R}^{2n}, \xi_{0})$ as the standard model of a transverse knot in $S^{1}\times\mathbb{R}^{2n}$.
Given the facts recalled in the previous two paragraphs, it suffices for the proof of our main theorem to construct the desired finite-energy planes and cylinders in any given neighborhood of the standard model of a transverse knot in $S^{1}\times\mathbb{R}^{2n}$. Since $d\alpha_{n}=2\sum_{i=1}^{n}dx_{i}\wedge dy_{i}$ is a symplectic form on $\mathbb{R}^{2n}$, $S^{1}\times\mathbb{R}^{2n}$ equipped with the contact form \eqref{e:transverse-standard} has the structure of a prequantization space. We can thus apply Theorems \ref{t:gradient-flow-hol-cylinders} and \ref{t:function-construction} to construct a finite-energy cylinder having tori of periodic orbits as its limit sets.
\begin{theorem} Let $r_{+}>r_{-}>0$. Then there exists a smooth function $F:S^{1}\times\mathbb{R}^{2n}$ and an almost complex structure $J\in\mathcal{J}(S^{1}\times\mathbb{R}^{2n}, \xi_{0})$ so that passing through every point $(\theta_{0}, p, z)\in S^{1}\times \mathbb{R}^{2(n-1)}\times \mathbb{R}^{2}$ with $\abs{z}\in (r_{-}, r_{+})$ is a finite-energy cylinder for the data $(e^{F}\lambda, J)$ with limit sets equal to the union of tori $S^{1}\times\br{p}\times\br{\abs{z}=r_{-}}\cup\br{\abs{z}=r_{+}} \in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$. \end{theorem}
\begin{proof} With $F_{\delta}$ a function with the properties stated in Theorem \ref{t:function-construction}, we consider a function $G:\mathbb{R}\times S^{1}\to\mathbb{R}$ defined by \[ G(\rho, \phi)= \begin{cases} F_{1/4}(\rho, \phi) & \rho>-3/4 \\ -F_{1/4}(-\rho-1, \phi)-1 & \rho<-1/4 \end{cases} \] which defines a smooth function since $F_{1/4}(\rho, \phi)=\rho=-F_{1/4}(-\rho-1, \phi)-1$ for $\rho\in [-3/4, -1/4]$. For any initial condition $(\rho_{0}, \phi_{0})\in [-3/4, -1/4]\times S^{1}$ the forward gradient flow of $G$ for any metric agrees with that of $F_{1/4}$ and thus limits to $\br{0}\times S^{1}$. Similarly, for any initial condition $(\rho_{0}, \phi_{0})\in [-3/4, -1/4]\times S^{1}$ the backward gradient flow of $G$ for any metric agrees with that of $-F_{1/4}(-\rho-1, \phi)-1$ which is conjugated to the forward gradient flow of $F_{1/4}$ by reflection and translation and thus limits in backward time to $\br{-1}\times S^{1}$.
We consider the diffeomorphism $p:\mathbb{R}\times\mathbb{R}/2\pi\mathbb{Z}\to\mathbb{R}^{2}\setminus\br{0}$ defined by \[ p(\rho, \phi)=(r_{+}(r_{+}/r_{-})^{\rho}\cos \phi, r_{+}(r_{+}/r_{-})^{\rho}\sin \phi) \] which maps the circles $\br{-1}\times S^{1}$ and $\br{0}\times S^{1}$ to the circles $\abs{z}=r_{-}$ and $\abs{z}=r_{+}$ respectively. We then define a function $F:\mathbb{R}^{2n}\to\mathbb{R}$ by \[ F(x_{1}, y_{1}, \dots, x_{n}, y_{n})= \begin{cases} G(p^{-1}(x_{n}, y_{n})) & (x_{n}, y_{n})\ne 0 \\ -1 & (x_{n}, y_{n})= 0 \end{cases} \] which defines a smooth function since $G(\rho, \phi)=-F_{1/4}(-\rho-1, \phi)-1$ for $\rho\le -1$ and hence $G(p^{-1}(z))=-1$ for $\abs{z}<r_{-}$. We observe that for any metric on $\mathbb{R}^{2n}\approx\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$ for which the last $T\mathbb{R}^{2}$ is everywhere orthogonal to $T\mathbb{R}^{2(n-1)}$ we will have that $\nabla F=(0, \nabla G)\in \mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$ and thus the gradient flow of $F$ for initial points $(p, z)\in\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$ with $\abs{z}\in(r_{-}, r_{+})$ will have the circles $\abs{z}\in\br{r_{-}, r_{+}}$ as limit sets.
Choosing then an almost complex structure $J\in\mathcal{J}(\mathbb{R}^{2n}, d\alpha_{n})$ on $\mathbb{R}^{2(n-1)}\times \mathbb{R}^{2}$ which preserves the two factors (for example the standard $J_{0}$ defined by $J_{0}\partial_{x_{i}}=\partial_{y_{i}}$), we know from Theorem \ref{t:gradient-flow-hol-cylinders} that gradient flow lines of $F$ on $\mathbb{R}^{2n}$ with respect to the metric $d\beta(\cdot, J\cdot)$ lift to finite-energy cylinders in $\mathbb{R}\times S^{1}\times\mathbb{R}^{2n}$ for the data $(e^{\pi^{*}F}\lambda, J)$. Since the nonconstant gradient flow lines for the function $F$ will have the circles $\abs{z}=r_{\pm}$ as limit sets, the corresponding finite-energy cylinders in $S^{1}\times\mathbb{R}^{2n}$ will have the tori $S^{1}\times\br{p}\times\br{\abs{z}=r_{\pm}}$ as limit sets. \end{proof}
Using Theorem \ref{t:gradient-flow-hol-cylinders} to construct finite-energy a plane with a torus as a limit set is somewhat more subtle since the theorem only tells us how to construct a cylinder from a gradient flow line. To construct a plane we will use the theorem to construct a cylinder with a removable singularity.
\begin{theorem}\label{t:plane-construction} Let $r_{0}>0$. Then there exists a smooth function $\tilde F:S^{1}\times\mathbb{R}^{2n}$ and an almost complex structure $J\in\mathcal{J}(S^{1}\times\mathbb{R}^{2n}, \xi_{0})$ so that passing through every point $(\theta_{0}, 0, z)\in S^{1}\times \mathbb{R}^{2(n-1)}\times \mathbb{R}^{2}$ with $\abs{z}<r_{0}$ is a finite-energy plane for the data $(e^{\tilde F}\lambda_{0}, J)$ with limit set equal to the embedded torus $S^{1}\times\br{0}\times\br{\abs{z}=r_{0}}\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$. \end{theorem}
The strategy of the proof is to consider a set which is contactomorphic to the complement of the $x_{n}=y_{n}=0$ locus of a standard model of a transverse knot and show that this can be given the structure of a prequantization space with respect to the angular variable on $\br{(x_{n}, y_{n})}\setminus\br{0}$. We then use Theorems \ref{t:gradient-flow-hol-cylinders} and \ref{t:function-construction} to construct a pseudoholomorphic cylinder which has a removable puncture mapped to the the $x_{n}=y_{n}=0$ locus.
We begin with a computational lemma. \begin{lemma}\label{l:plane-construction} Consider $W:=S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}=\br{(\theta, x_{i}, y_{i}, \rho)}$ equipped with the $1$-form \begin{equation}\label{e:beta-planes} \beta=e^{-2\rho}\bp{d\theta+\alpha_{n-1}} \end{equation} with $\alpha_{n-1}$ as defined in \eqref{e:alpha-defn}. Then: \begin{itemize} \item $d\beta$ is a symplectic form on $W$. \item Consider the corresponding prequantization space $(S^{1}\times W, \lambda:=d\phi+\pi^{*}\beta)$ over $W$. With $\lambda_{0}$ as defined in \eqref{e:transverse-standard}, the map \[ \Phi:(S^{1}\times W, \xi=\ker\lambda)\to (S^{1}\times \mathbb{R}^{2(n-1)}\times(\mathbb{R}^{2}\setminus\br{0}), \xi_{0}=\ker\lambda_{0}) \] defined by \begin{equation}\label{e:Phi-definition} \Phi(\phi, \theta, x_{i}, y_{i}, \rho)=(\theta, x_{i}, y_{i}, e^{\rho}\cos\phi, e^{\rho}\sin\phi) \end{equation} is a contactomorphism and, in particular, \begin{equation}\label{e:lambda-0-pullback} \Phi^{*}\lambda_{0}=e^{2\rho}\lambda. \end{equation} \item For any choice of $j_{0}\in\mathcal{J}(\mathbb{R}^{2(n-1)}, d\alpha_{n-1})$ the endomorphism $j_{1}\in\operatorname{End}(TW)$ defined by \begin{equation}\label{e:j1-definition} \begin{gathered} j_{1}(\theta, p, \rho)\partial_{\rho}=-e^{2\rho}\partial_{\theta}, \qquad j_{1}(\theta, p, \rho)\partial_{\theta}=e^{-2\rho}\partial_{\rho}, \text{ and} \\ j_{1}(\theta, p, \rho)v=j_{0}(p)v-\alpha_{n-1}(j_{0}(p)v)\,\partial_{\theta}+\alpha_{n-1}(v)e^{-2\rho}\,\partial_{\rho} \text{ for $v\in T\mathbb{R}^{2(n-1)}$}. \end{gathered} \end{equation} is an almost complex structure on $W$ compatible with $d\beta$, i.e.\ $j_{1}\in\mathcal{J}(W, d\beta)$, and the corresponding metric $g_{j_{1}}:=d\beta\circ(I\times j_{1})$ on $W$ is given by \begin{equation}\label{e:metric-plane} g_{j_{1}} = 2 \,d\rho\otimes d\rho +2e^{-4\rho}(d\theta+\alpha_{n-1})\otimes\bp{d\theta+\alpha_{n-1}} +e^{-2\rho}d\alpha_{n-1}\circ (I\times j_{0}). \end{equation} \item Let $\tilde j_{1}\in\mathcal{J}(S^{1}\times W, \xi)$ be the $S^{1}$-invariant complex structure on $\xi$ determined by $j_{1}$ as defined above, i.e.\ $\tilde j$ is the complex structure characterized by $\widetilde{j_{1}v}=\tilde j_{1}\tilde v$ with \begin{equation}\label{e:lift-planes-proof} \tilde v=-\beta(v)\partial_{\phi}+v=-e^{-2\rho}\bp{d\theta(v)+\alpha_{n-1}(v)}\partial_{\phi}+v \end{equation} the lift of $v$ to an $S^{1}$-invariant section of $\xi$ from \eqref{e:horizontal-lift}. Then $\Phi_{*}\tilde j_{1}=d\Phi\circ \tilde j_{1}\circ d\Phi^{-1}\in\mathcal{J}(S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}\setminus\br{0}, \xi_{0})$ admits a smooth extension to a compatible $J\in\mathcal{J}(S^{1}\times\mathbb{R}^{2n}, \xi_{0})$. \end{itemize} \end{lemma}
Assuming for the moment the results of the lemma, we proceed with the proof of Theorem \ref{t:plane-construction}.
\begin{proof}[Proof of Theorem \ref{t:plane-construction}] Given $r_{0}>0$ we define a smooth function $G:W\to\mathbb{R}$ by \[ G(\theta, p, \rho)=2(F_{1}(\rho-\log r_{0}, \theta)+\log r_{0}) \] with $F_{1}$ a function satisfying the properties given in Theorem \ref{t:function-construction} with $\delta=1$. We note that as a result of the definition and of Theorem \ref{t:function-construction}, $G(\theta, p, \rho)=2\rho$ for $\rho<\log r_{0}-1$ and $G(\theta, p, \rho)=2\log r_{0}$ for $\rho\ge\log r_{0}$. Moreover, since $\alpha_{n-1}=0$ along the $p=(x_{1}, y_{1}, \dots, x_{n-1}, y_{n-1})=0$ locus, we have that \begin{equation}\label{e:G-gradient} \nabla G(\theta, 0, \rho)=\frac{1}{2}\bp{G_{\rho}\partial_{\rho}+G_{\theta}e^{4\rho}\partial_{\theta}} =\partial_{\rho} F_{1}(\rho-\log r_{0}, \theta)\partial_{\rho}+\partial_{\theta} F_{1}(\rho-\log r_{0}, \theta)e^{4\rho}\partial_{\theta} \end{equation} where $\nabla G$ is the gradient with respect to the metric \eqref{e:metric-plane} on $W$. Therefore, for any initial point $w_{0}=(\theta_{0}, 0, \rho_{0})\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}$ with $\rho_{0}<\log r_{0}$, the solution $\gamma(s)$ to the equation \begin{equation}\label{e:ode-gamma-G} \dot\gamma(s)=2\pi\nabla G(\gamma(s)) \end{equation} stays within the embedded cylinder $S^{1}\times\br{0}\times\mathbb{R}\subset S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}$ and agrees with a gradient flow for the function $(\rho, \theta)\in \mathbb{R}\times S^{1}\mapsto 2(F_{1}(\rho-\log r_{0}, \theta)+\log r_{0})$ for an appropriate metric on the cylinder. Thus the flow exists in forward time and has the circle $S^{1}\times\br{0}\times\br{\log r_{0}}\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}$ as its omega limit set. Meanwhile, using the fact that $F_{1}(\rho-\log r_{0}, \theta)=\rho-\log r_{0}$ for $\rho<\log r_{0}-1$, we have from \eqref{e:G-gradient} that \[ \nabla G(\theta, 0, \rho)=\partial_{\rho} \qquad \text{ for $\rho<\log r_{0}-1$} \] and thus that the solution $\gamma$ to \eqref{e:ode-gamma-G} is given by $\gamma(s)=(\theta_{1}, 0, 2\pi s+s_{1})$ for sufficiently small $s$ with $\theta_{1}\in S^{1}$ and $s_{1}\in\mathbb{R}$ appropriate constants. Thus the flow exists indefinitely in backward time as well. Applying Theorem \ref{t:gradient-flow-hol-cylinders} we know that the map $\tilde u(s, t)=(a(s),\phi(s)+2\pi t, \gamma(s))\in\mathbb{R}\times S^{1}\times W$ where $a:\mathbb{R}\to\mathbb{R}$ and $\phi:\mathbb{R}\to S^{1}$ satisfy $\dot a(s)=2\pi e^{G(\gamma(s))}$ and $\dot\phi(s)=2\pi \beta(\nabla G(\gamma(s)))$ is a finite-energy cylinder for the data $(e^{\pi^{*}_{W}G}\lambda, \tilde j_{1})$ with energy $2\pi \lim_{s\to\infty}e^{G(\gamma(s))}=2\pi e^{2\log r_{0}}=2\pi r_{0}^{2}$ and the torus $S^{1}\times S^{1}\times \br{0}\times\br{\log r_{0}}$ as its limit set. Moreover, since $G(\theta, p, \rho)=2\rho$ and $\nabla G(\theta, p, \rho)=\partial_{\rho}$ for $\rho<\log r_{0}-1$, there exist constants $a_{1}\in\mathbb{R}$, $\theta_{1}\in S^{1}$, $s_{1}\in\mathbb{R}$, and $t_{1}\in S^{1}$ so that \begin{equation}\label{e:map-near-zero} \tilde u(s, t) =(\pi e^{4\pi s}+a_{1},t_{1}+2\pi t, \theta_{1},0, 2\pi s+s_{1}) \in\mathbb{R}\times S^{1}\times S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R} =\mathbb{R}\times S^{1} \times W. \end{equation} for sufficiently negative $s$.
We next show that the map $\tilde v:=(a, \Phi\circ u):\mathbb{R}\times\mathbb{S}^{1}\to\mathbb{R}\times S^{1}\times\mathbb{R}^{2n}$, with $\Phi:(S^{1}\times W, \xi) \to (S^{1}\times \mathbb{R}^{2n}, \xi_{0})$ the contactomorphism defined in \eqref{e:Phi-definition}, has a removable singularity at $-\infty$ and thus extends to a pseudoholomorphic plane. We first note that \eqref{e:lambda-0-pullback} gives us \[ [\Phi^{-1}]^{*}\bp{e^{(\pi_{W}^{*}G)}\lambda}=e^{(\pi_{W}^{*}G-2\rho)\circ\Phi^{-1}}\lambda_{0} \] and, since $G(\theta, p, \rho)=2\rho$ for $\rho<\log r_{0}-1$, we'll have that $\tilde F:=(\pi_{W}^{*}G-2\rho)\circ\Phi^{-1}$ extends to a smooth function on $S^{1}\times\mathbb{R}^{2n}$ and thus $[\Phi^{-1}]^{*}\bp{e^{(\pi_{W}^{*}G)}\lambda}=e^{\tilde F}\lambda_{0}$ defines a contact form on $S^{1}\times\mathbb{R}^{2n}$. Since, by Lemma \ref{l:plane-construction}, the pushed-forward complex structure $\Phi_{*}\tilde j_{1}=d\Phi\circ j_{1}\circ d\Phi^{-1}$ has a smooth extension to a $J\in\mathcal{J}(S^{1}\times\mathbb{R}^{2n}, \xi_{0})$, it suffices to show that the map $\tilde v=(a, \Phi\circ u)$ has a smooth extension. To see this, we use the definition \eqref{e:Phi-definition} with \eqref{e:map-near-zero} to compute that \[ \tilde v=(a, \Phi\circ u)=(\pi e^{4\pi s}+a_{1}, \theta_{0},0, e^{2\pi s+s_{1}}\cos(t_{1}+2\pi t), e^{2\pi s+s_{1}}\sin(t_{1}+2\pi t))\in \mathbb{R}\times S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}^{2} \] for sufficiently negative $s$. Precomposing with the biholomorphic map $\psi:\mathbb{C}\setminus\br{0}\to\mathbb{R}\times S^{1}=\mathbb{C}/i\mathbb{Z}$ defined by \[ \psi(z)=(\log\abs{z}/2\pi, \arg{z}/2\pi) \] we find that \[ \tilde v(\psi(z))=(\pi\abs{z}^{2}+a_{1}, \theta_{0}, 0, e^{s_{1}+it_{1}}z) \in \mathbb{R}\times S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}(\approx \mathbb{C}) \] which clearly extends smoothly over $z=0$. We note moreover that the limit set $S^{1}\times S^{1}\times \br{0}\times\br{\log r_{0}}\subset S^{1}\times S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}$ gets mapped by $\Phi$ to the embedded torus $S^{1}\times\br{0}\times\br{\abs{z}= r_{0}}\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$, while the set of points $(\phi, \theta, 0, \rho)\in S^{1}\times S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}$ with $\rho<\log r_{0}$ gets mapped by $\Phi$ to the set $(\theta, 0, z)\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$ with $\abs{z}\in(0, r_{0})$. Thus, since we were able, by appropriate choice of initial point of the flow of $\nabla G$, to construct a pseudoholomorphic cylinder for the data $(e^{\pi_{W}G}\lambda, \tilde j_{1})$ through any point $(\phi, \theta, 0, \rho)\in S^{1}\times S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}$, we can construct a pseudoholomorphic plane for the data $(e^{\tilde F}\lambda_{0}, J)$ through any point $(\theta, 0, z)\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}$ with $\abs{z}<r_{0}$ as desired. This completes the proof. \end{proof}
\begin{remark} If we choose an initial point in the proof of theorem to be a point $(\theta_{0}, p_{0}, \rho_{0})\in S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}$ with $p_{0}\ne 0$, one can still construct a finite-energy plane from the resulting flow line with a limit set consisting of more than a single orbit, although the limit set may be more complicated than a torus. Indeed, for a given choice of $j_{0}\in \mathcal{J}(\mathbb{R}^{2(n-1)}, d\alpha_{n-1})$, we let $g_{j_{0}}=d\alpha_{n-1}(\cdot, j_{0})$ denote the associated metric and observe that \[ \alpha_{n-1}(p)(v)=\frac{1}{2}d\alpha_{n-1}(p, v)=-\frac{1}{2}d\alpha_{n-1}(v, p)=\frac{1}{2}g_{j_{0}}(v, j_{0}p). \] Using this, one can compute the gradient of the function $G$ with respect to the metric \eqref{e:metric-plane} to be given by \begin{align*} \nabla G(\theta, p, \rho) &=2^{-1}\bp{ G_{\rho}\,\partial_{\rho}+e^{2\rho}G_{\theta}\bp{e^{2\rho}+\abs{p}_{j_{0}}}\,\partial_{\theta} - e^{2\rho}G_{\theta}j_{0}p } \\ &= \partial_{\rho} F_{1}(\rho-\log r_{0}, \theta)\,\partial_{\rho} +\partial_{\theta} F_{1}(\rho-\log r_{0}, \theta)e^{2\rho}\bp{e^{2\rho}+\abs{p}_{j_{0}}}\,\partial_{\theta} -\partial_{\theta} F_{1}(\rho-\log r_{0}, \theta) e^{2\rho} j_{0}p. \end{align*} with $\abs{\cdot}_{j_{0}}$ the norm with respect to the metric $g_{j_{0}}$. We note the the $\mathbb{R}^{2(n-1)}$-component of $\nabla G(\theta, p, \rho)$ is always orthogonal to $p$. If the almost complex structure $j_{0}$ is constant, we can thus conclude that $\abs{p}_{j_{0}}$ is constant along the flow. Thus, $\rho$- and $\theta$-components of the gradient flow for $G$ agree with a gradient flow for a shift of the function $F$ on $\mathbb{R}\times S^1$ for an appropriate metric (specifically the metric $g=d\rho\otimes d\rho+e^{-2\rho}\bp{e^{2\rho}+c^{2}}^{-1}d\theta\otimes d\theta$ with $c^{2}$ equal to the constant value of $\abs{p}_{j_{0}}$ along the flow line). From this one can argue that under the projection $S^{1}\times\mathbb{R}^{2(n-1)}\times\mathbb{R}^{2}\to S^{1}\times\br{0}\times\mathbb{R}^{2}$ the limit set of any plane obtained as a lift of a gradient flow of the function $G$ in our theorem will project to a torus. \end{remark}
Finally, to complete the proof of Theorem \ref{t:plane-construction}, we give the proof of Lemma \ref{l:plane-construction} above.
\begin{proof}[Proof of Lemma \ref{l:plane-construction}] We first show that $d\beta$ is a symplectic form. Computing, we have that \begin{equation}\label{e:d-beta} d\beta =e^{-2\rho}(-2\,d\rho\wedge d\theta-2\,d\rho\wedge\alpha_{n-1}+d\alpha_{n-1}) \end{equation} and hence \begin{align*} d\beta^{n} &=e^{-2n\rho}(-2\,d\rho\wedge d\theta-2\,d\rho\wedge\alpha_{n-1})\wedge(d\alpha_{n-1})^{n-1} \\ &=-2e^{-2n\rho}\,d\rho\wedge d\theta\wedge(d\alpha_{n-1})^{n-1} \end{align*} which is nowhere vanishing on $W=S^{1}\times \mathbb{R}^{2(n-1)}\times\mathbb{R}$. Hence $d\beta$ is a symplectic form on $W$ as claimed.
Next, we show that the map $\Phi:(W, \xi=\ker\lambda) \to (S^{1}\times\mathbb{R}^{2(n-1)}\times(\mathbb{R}^{2}\setminus\br{0}), \xi_{0}=\ker\lambda_{0})$ defined in \eqref{e:Phi-definition} is a contactomorphism satisfying \eqref{e:lambda-0-pullback}. From the definition \eqref{e:Phi-definition} of the map, it's clear that $\Phi$ is a diffeomorphism and that \[ \Phi^{*}d\theta=d\theta \qquad \Phi^{*}dx_{i}=dx_{i} \qquad \Phi^{*}dy_{i}=dy_{i} \] for $i\in\mathbb{Z}\cap[1, n-1]$, while a straightforward computation shows that \begin{equation}\label{e:Phi-pullback} \Phi^{*}(x_{n}\,dy_{n}-y_{n}\,dx_{n})=e^{2\rho}\,d\phi \qquad \Phi^{*}(x_{n}\,dx_{n}+y_{n}\,dy_{n})=e^{2\rho}\,d\rho. \end{equation} Computing then gives \begin{align*} \Phi^{*}\lambda_{0} &=d\theta+\alpha_{n-1}+\Phi^{*}(x_{n}\,dy_{n}-y_{n}\,dx_{n}) \\ &=d\theta+\alpha_{n-1}+e^{2\rho}\,d\phi \\ &=e^{2\rho}\lambda \end{align*} which shows that $\Phi$ is a contactomorphism and establishes \eqref{e:lambda-0-pullback} as claimed.
We next address the third point. The fact that $j_{1}^{2}\partial_{\rho}=-\partial_{\rho}$ and $j_{1}^{2}\partial_{\theta}=-\partial_{\theta}$ is immediate from the definition \eqref{e:j1-definition}. Meanwhile for $v\in T(\mathbb{R}^{2(n-1)})$, we use \eqref{e:j1-definition} twice with $j_{0}^{2}v=-v$ to compute \begin{align*} j_{1}^{2}v &=j_{1}\bp{j_{0}v-\alpha_{n-1}(j_{0}v)\,\partial_{\theta}+\alpha_{n-1}(v)e^{-2\rho}\,\partial_{\rho}} \\ &=j_{1}(j_{0}v)-\alpha_{n-1}(j_{0}v)j_{1}\partial_{\theta}+\alpha_{n-1}(v)e^{-2\rho}j_{1}\partial_{\rho} \\ &=j_{0}^{2}v-\alpha_{n-1}(j_{0}^{2}v)\,\partial_{\theta}+\alpha_{n-1}(j_{0}v)e^{-2\rho}\,\partial_{\rho} \\ &\hskip.25in -\alpha_{n-1}(j_{0}v)(e^{-2\rho}\,\partial_{\rho})+\alpha_{n-1}(v)e^{-2\rho}(-e^{2\rho}\partial_{\theta}) \\ &=-v \end{align*} which shows that $j_{1}$ is an almost complex structure on $W$. To check compatibility of $j_{1}$ with $d\beta$ we compute from \eqref{e:j1-definition} that \begin{align*} d\rho\circ j_{1} &=e^{-2\rho}\bp{d\theta+\alpha_{n-1}} \\ d\theta\circ j_{1} &=-e^{2\rho}\,d\rho-\alpha_{n-1}\circ j_{0}\circ d\pi_{\mathbb{R}^{2(n-1)}}. \\ dx_{i}\circ j_{1}&=dx_{i}\circ j_{0}\circ d\pi_{\mathbb{R}^{2(n-1)}} \\ dy_{i}\circ j_{1}&=dy_{i}\circ j_{0}\circ d\pi_{\mathbb{R}^{2(n-1)}} \end{align*} which with \eqref{e:d-beta} gives us \[ d\beta\circ(I\times j_{1}) = 2 \,d\rho\otimes d\rho +2e^{-4\rho}(d\theta+\alpha_{n-1})\otimes\bp{d\theta+\alpha_{n-1}} +e^{-2\rho}d\alpha_{n-1}\circ (I\times j_{0}). \] as claimed. By the assumption that $j_{0}$ is compatible with $d\alpha_{n-1}$, this is clearly symmetric and positive definite, and thus $j_{1}\in\mathcal{J}(W, d\beta)$ as claimed.
Finally, we show that $\Phi_{*}\tilde j_{1}$ has a smooth extension to a compatible complex structure $J\in\mathcal{J}(S^{1}\times\mathbb{R}^{2n}, \xi_{0})$. The contact structure $\xi_{0}=\ker\lambda_{0}$ is spanned by the smooth sections \[ -\alpha_{n}(\partial_{x_{i}})\partial_{\theta}+\partial_{x_{i}} \qquad -\alpha_{n}(\partial_{y_{i}})\partial_{\theta}+\partial_{y_{i}} \] so it suffices to check that $\Phi_{*}\tilde j_{1}$ times each of these sections has a smooth continuation. We first observe that from the definition \eqref{e:Phi-definition} of $\Phi$ we immediately have \begin{equation}\label{e:Phi-pushforward-1} \Phi_{*}\partial_{\theta}=\partial_{\theta} \qquad \Phi_{*}\partial_{x_{i}}=\partial_{x_{i}} \qquad \Phi_{*}\partial_{y_{i}}=\partial_{y_{i}} \end{equation} for $i$ between $1$ and $n-1$, while \eqref{e:Phi-pullback} give us \begin{equation}\label{e:Phi-pushforward-2} \Phi_{*}\partial_{\rho}=x_{n}\,\partial_{x_{n}}+y_{n}\,\partial_{y_{n}} \qquad \Phi_{*}\partial_{\phi}=x_{n}\,\partial_{y_{n}}-y_{n}\,\partial_{x_{n}}. \end{equation} Thus, for $v\in T(\mathbb{R}^{2(n-1)})=\operatorname{span}\br{\partial_{x_{i}}, \partial_{y_{i}}}_{i=1}^{n-1}$, a straightforward computation using that $d\theta(v)=0$ along with \eqref{e:lift-planes-proof} and \eqref{e:Phi-pushforward-1} shows that \[ -\alpha_{n}(v)\partial_{\theta}+v=\Phi_{*}(-\alpha_{n-1}(v)\widetilde{\partial_{\theta}}+\tilde v). \] Computing further with this, the definition \eqref{e:j1-definition} of $j_{1}$, and $\tilde j_{1}\tilde v=\widetilde{j_{1}v}$ then shows that \begin{align*} (\Phi_{*}\tilde j_{1})(-\alpha_{n}(v)\partial_{\theta}+v) &=(\Phi_{*}\tilde j_{1})\Phi_{*}(-\alpha_{n-1}(v)\widetilde{\partial_{\theta}}+\tilde v) \\ &=\Phi_{*}( -\alpha_{n-1}(v)\widetilde{j_{1}\partial_{\theta}}+\widetilde{j_{1}v}) \\ &=\Phi_{*}(-e^{-2\rho}\alpha_{n-1}(v)\widetilde{\partial_{\rho}}+\widetilde{j_{0}v}-\alpha_{n-1}(j_{0}v)\widetilde{\partial_{\theta}}+\alpha_{n-1}(v)e^{-2\rho}\widetilde{\partial_{\rho}}) \\ &=\Phi_{*}(-\alpha_{n-1}(j_{0}v)\widetilde{\partial_{\theta}}+\widetilde{j_{0}v}) \\ &=-\alpha_{n-1}(j_{0}v)\partial_{\theta}+j_{0}v \end{align*} which clearly extends smoothly over the $x_{n}=y_{n}=0$ locus since there is no $x_{n}$- or $y_{n}$-dependence. Meanwhile, a straightforward computation using \eqref{e:Phi-definition} and \eqref{e:Phi-pushforward-2} shows that \[ \partial_{x_{n}}=\Phi_{*}\bp{e^{-\rho}\bp{\cos\phi\,\partial_{\rho}-\sin\phi\,\partial_{\phi}}} \quad\text{ and }\qquad \partial_{y_{n}}=\Phi_{*}\bp{e^{-\rho}\bp{\sin\phi\,\partial_{\rho}+\cos\phi\,\partial_{\phi}}} \] and using this with \eqref{e:Phi-definition}, \eqref{e:Phi-pushforward-1}, and \eqref{e:lift-planes-proof} shows that \[ -\alpha_{n}(\partial_{x_{n}})\partial_{\theta}+\partial_{x_{n}} =y_{n}\,\partial_{\theta}+\partial_{x_{n}} =\Phi_{*}(e^{\rho}\sin\phi\,\widetilde\partial_{\theta}+e^{-\rho}\cos\phi\,\widetilde\partial_{\rho}) \] and \[ -\alpha_{n}(\partial_{y_{n}})\partial_{\theta}+\partial_{y_{n}} =-x_{n}\,\partial_{\theta}+\partial_{y_{n}} =\Phi_{*}(-e^{\rho}\cos\phi\,\widetilde\partial_{\theta}+e^{-\rho}\sin\phi\,\widetilde\partial_{\rho}). \] Computing further using the definition \eqref{e:j1-definition} of $j_{1}$ with $\tilde j_{1}\tilde v=\widetilde{j_{1}v}$ shows that \[ \Phi_{*}\tilde j_{1}(-\alpha_{n}(\partial_{x_{n}})\partial_{\theta}+\partial_{x_{n}}) =-\alpha_{n}(\partial_{y_{n}})\partial_{\theta}+\partial_{y_{n}} =-x_{n}\,\partial_{\theta}+\partial_{y_{n}} \] and \[ \Phi_{*}\tilde j_{1}(-\alpha_{n}(\partial_{y_{n}})\partial_{\theta}+\partial_{y_{n}}) =\alpha_{n}(\partial_{x_{n}})\partial_{\theta}-\partial_{x_{n}} =-y_{n}\,\partial_{\theta}-\partial_{x_{n}} \] which also extend smoothly over $x_{n}=y_{n}=0$. This completes the proof. \end{proof}
\end{document}
|
arXiv
|
{
"id": "1607.00324.tex",
"language_detection_score": 0.6816179752349854,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{The Submodular Santa Claus Problem \ in the Restricted Assignment Caseootnote{This research was supported by the Swiss National Science Foundation project
200021-184656 “Randomness in Problem Instances and Randomized Algorithms.”}
\begin{abstract} The submodular Santa Claus problem was introduced in a seminal work by Goemans, Harvey, Iwata, and Mirrokni (SODA'09) as an application of their structural result. In the mentioned problem $n$ unsplittable resources have to be assigned to $m$ players, each with a monotone submodular utility function $f_i$. The goal is to maximize $\min_i f_i(S_i)$ where $S_1,\dotsc,S_m$ is a partition of the resources. The result by Goemans et al. implies a polynomial time $O(n^{1/2 +\varepsilon})$-approximation algorithm.
Since then progress on this problem was limited to the linear case, that is, all $f_i$ are linear functions. In particular, a line of research has shown that there is a polynomial time constant approximation algorithm for linear valuation functions in the restricted assignment case. This is the special case where each player is given a set of desired resources $\Gamma_i$ and the individual valuation functions are defined as $f_i(S) = f(S \cap \Gamma_i)$ for a global linear function $f$. This can also be interpreted as maximizing $\min_i f(S_i)$ with additional assignment restrictions, i.e., resources can only be assigned to certain players.
In this paper we make comparable progress for the submodular variant. Namely, if $f$ is a monotone submodular function, we can in polynomial time compute an $O(\log\log(n))$-approximate solution.
\end{abstract} \pagebreak
\section{Introduction} In the Santa Claus problem (sometimes referred to as Max-Min Fair Allocation) we are given a set of $n$ players $P$ and a set of $m$ indivisible resources $R$. In its full generality, each player $i\in P$ has a utility function $f_i:2^R\mapsto \mathbb{R}_{\ge 0}$, where $f_i(S)$ measures the happiness of player $i$ if he is assigned the resource set $S$. The goal is to find a partition of the resources that maximizes the happiness of the least happy player. Formally, we want to find a partition $\{S_i\}_{i\in P}$ of the resources that maximizes \begin{equation*}
\min_{i\in P} f_i(S_i) . \end{equation*} Most of the recent literature on this problem focuses on cases where $f_i$ is a linear function for all players $i$.
If we assume all valuation functions are linear,
the best approximation algorithm known for this problem, designed by Chakrabarty, Chuzhoy, and Khanna~\cite{DBLP:conf/focs/ChakrabartyCK09}, has an approximation rate of $n^{\epsilon}$ and runs in time $n^{O(1/\epsilon)}$ for $\epsilon\in\Omega(\log\log(n)/\log(n))$. On the negative side, it is only known that computing a $(2 - \delta)$-approximation is NP-hard~\cite{LenstraSchmoysTardos}. Apart from this there has been significant attention on the so-called \emph{restricted assignment case}. Here the utility functions are defined by one linear function $f$ and a set of resources $\Gamma_i$ for each player $i$. Intuitively, player $i$ is interested in the resources $\Gamma_i$, whereas the other resources are worthless for him. The individual utility functions are then implicitly defined by $f_i(S)=f(S\cap \Gamma_i)$. In a seminal work Bansal and Srividenko~\cite{BansalSrividenko} provide a $O(\log \log (m)/\log \log \log (m))$-approximation algorithm for this case. This was improved by Feige~\cite{Feige} to an $O(1)$-approximation. Further progress on the constant or the running time was made since then, see e.g.~\cite{DBLP:journals/talg/AnnamalaiKS17, DBLP:conf/soda/DaviesRZ20, DBLP:conf/icalp/ChengM19, DBLP:conf/icalp/ChengM18, JANSEN2020106025, Asadpour_local_search, Polacek}.
Let us now move to the non-linear case. Indeed, the problem becomes hopelessly difficult without any restrictions on the utility functions. Consider the following reduction from set packing. There are sets of resources $\{S_1,\dotsc,S_k\}$ and all utility functions are equal and defined by $f_i(S) = 1$ if $S_j \subseteq S$ for some $j$ and $f_i(S) = 0$ otherwise. Deciding whether there are $m$ disjoint sets in $S_1,\dotsc,S_k$ (a classical NP-hard problem) is equivalent to deciding whether the optimum of the Santa Claus problem is non-zero. In particular, obtaining any bounded approximation ratio for Santa Claus in this case is NP-hard.
Two naturally arising properties of utility functions are monotonicity and submodularity, see for example the related submodular welfare problem~\cite{DBLP:journals/geb/LehmannLN06,DBLP:conf/stoc/Vondrak08} where the goal is to maximize $\sum_i f_i(S_i)$. A function $f$ is monotone, if $f(S) \le f(T)$ for all $S\subseteq T$. It is submodular, if $f(S\cup \{a\}) - f(S) \ge f(T\cup\{a\}) - f(T)$ for all $S\subseteq T$ and $a\notin T$. The latter is also known as the \emph{diminishing returns} property in economics. A standard assumption on monotone submodular functions (used throughout this work) is that the value on the empty set is zero, i.e., $f(\emptyset) = 0$. Goemans, Harvey, Iwata, and Mirrokni~\cite{goemans2009approximating} first considered the Santa Claus problem with monotone submodular utility functions as an application of their fundamental result on submodular functions. Together with the algorithm of~\cite{DBLP:conf/focs/ChakrabartyCK09} it implies an $O(n^{1/2+\epsilon})$-approximation in time $O(n^{1/\epsilon})$.
In this paper we investigate the restricted assignment case with a monotone submodular utility function. That is, all utility functions are defined by $f_i(S)=f(S\cap \Gamma_i)$, where $f$ is a monotone submodular function and $\Gamma_i$ is a subset of resources for each players $i$. Before our work, the state-of-the-art for this problem was the $O(n^{1/2+\epsilon})$-approximation algorithm mentioned above, since none of the previous results for the restricted assignment case with a linear utility function apply when the utility function becomes monotone submodular.
\subsection{Overview of results and techniques}
Our main result is an approximation algorithm for the submodular Santa Claus problem in the restricted assignment case.
\begin{theorem} \label{thm:main} There is a randomized polynomial time $O(\log \log (n))$-approximation algorithm for the restricted assignment case with a monotone submodular utility function. \end{theorem}
Our way to this result is organised as follows. In Section~\ref{sec:reduction to hypergraph}, we first reduce our problem to a hypergraph matching problem (see next paragraph for a formal definition). We then solve this problem using Lovasz Local Lemma (LLL) in Section~\ref{sec:hypergraph problem}. In~\cite{BansalSrividenko} the authors also reduce to a hypergraph matching problem which they then solve using LLL, although both parts are substantially simpler. The higher generality of our utility functions is reflected in the more general hypergraph matching problem. Namely, our problem is precisely the weighted variant of the (unweighted) problem in~\cite{BansalSrividenko}. We will elaborate later in this section why the previous techniques do not easily extend to the weighted variant.
\paragraph{The hypergraph matching problem.} After the reduction in Section \ref{sec:reduction to hypergraph} we arrive at the following problem. There is a hypergraph
$\mathcal H = (P\cup R, \mathcal C)$ with hyperedges $\mathcal C$ over the vertices $P$ and $R$. We write $m = |P|$ and $n = |R|$. We will refer to hyperedges as configurations, the vertices in $P$ as players and $R$ as resources \footnote{We note that these do not have to be the same players and resources as in the Santa Claus problem we reduced from, but $n$ and $m$ do not increase.}. Moreover, a hypergraph is said to be regular if all vertices in $P$ and $R$ have the same degree, that is, they are contained in the same number of configurations.
The hypergraph may contain multiple copies of the same configuration. Each configuration $C\in\mathcal C$ contains exactly one vertex in $P$, that is, $|C\cap P| = 1$. Additionally, for each configuration $C\in \mathcal{C}$ the resources $j\in C$ have weights $w_{j,C} \ge 0$.
We emphasize that the same resource $j$ can be given different weights in two different configurations, that is, we may have $w_{j,C}\neq w_{j,C'}$ for two different configurations $C,C'$.
We require to select for each player $i\in P$ one configuration $C$ that contains $i$. For each configuration $C$ that was selected we require to assign a subset of the resources in $C$ which has a total weight of at least $(1/\alpha) \cdot \sum_{j\in C} w_{j,C}$ to the player in $C$. A resource can only be assigned to one player. We call such a solution an $\alpha$-relaxed perfect matching. One seeks to minimize $\alpha$.
We show that every regular hypergraph has an $\alpha$-relaxed perfect matching for some $\alpha=O(\log \log (n))$ assuming that $w_{j,C}\leq (1/\alpha) \cdot \sum_{j'\in C} w_{j',C}$ for all $j,C$, that is, all weights are small compared to the total weight of the configuration. Moreover, we can find such a matching in randomized polynomial time. In the reduction we use this result to round a certain LP relaxation and $\alpha$ essentially translates to the approximation rate. This result generalizes that of Bansal and Srividenko on hypergraph matching in the following way. They proved the same result for unit weights and uniform hyperedges, that is, $w_{j,C}=1$ for all $j,C$ and all hyperedges have the same number of resources\footnote{In fact they get a slightly better ratio of $\alpha = O(\log\log(m) / \log\log\log(m))$.}. In the next paragraph we briefly go over the techniques to prove our result for the hypergraph matching problem.
\paragraph{Our techniques.} Already the extension from uniform to non-uniform hypergraphs (assuming unit weights) is highly non-trivial and captures the core difficulty of our result. Indeed, we show with a (perhaps surprising) reduction, that we can reduce our weighted hypergraph matching problem to the unweighted (but non-uniform) version by introducing some bounded dependencies between the choices of the different players.
For sake of brevity we therefore focus in this section on the unweighted non-uniform variant, that is, we need to assign to each player a configuration $C$ and at least $|C| / \alpha$ resources in $C$. We show that for any regular hypergraph there exists such a matching for $\alpha = O(\log \log (n))$ assuming that all configurations contain at least $\alpha$ resources and we can find it in randomized polynomial time. Without the assumption of uniformity the problem becomes significantly more challenging. To see this, we lay out the techniques of Bansal and Srividenko that allowed them to solve the problem in the uniform case. We note that for $\alpha = O(\log(n))$ the statement is easy to prove: We select for each player $i$ one of the configurations containing $i$ uniformly at random. Then by standard concentration bounds each resource is contained in at most $O(\log(n))$ of the selected configurations with high probability. This implies that there is a fractional assignment of resources to configurations such that each of the selected configurations $C$ receives
$\lfloor |C| / O(\log(n)) \rfloor$ of the resources in $C$. By integrality of the bipartite matching polytope, there is also an integral assignment with this property.
To improve to $\alpha= O(\log \log (n))$ in the uniform case, Bansal and Srividenko proceed as follows. Let $k$ be the size of each configuration. First they reduce the degree of each player and resource to $O(\log(n))$ using the argument above, but taking $O(\log(n))$ configurations for each player. Then they sample uniformly at random $O(n \log(n) / k)$ resources and drop all others. This is sensible, because they manage to prove the (perhaps surprising) fact that an $\alpha$-relaxed perfect matching with respect to the smaller set of resources is still an $O(\alpha)$-relaxed perfect matching with respect to all resources with high probability (when assigning the dropped resources to the selected configurations appropriately). Indeed, the smaller instance is easier to solve: With high probability all configurations have size $O(\log(n))$ and this greatly reduces the dependencies between the bad events of the random experiment above (the event that a resource is contained in too many selected configurations). This allows them to apply Lov\'asz Local Lemma (LLL) in order to show that with positive probability the experiment succeeds for $\alpha = O(\log\log(n))$.
It is not obvious how to extend this approach to non-uniform hypergraphs: Sampling a fixed fraction of the resources will either make the small configurations empty---which makes it impossible to retain guarantees for the original instance---or it leaves the big configurations big ---which fails to reduce the dependencies enough to apply LLL. Hence it requires new sophisticated ideas for non-uniform hypergraphs, which we describe next.
Suppose we are able to find a set $\mathcal K\subseteq \mathcal C$ of configurations (one for each player)
such that for each $K\in\mathcal K$ the sum of intersections $|K\cap K'|$ with smaller configurations $K'\in \mathcal K$ is very small, say at most $|K| / 2$. Then it is easy to derive a $2$-relaxed perfect matching: We iterate over all $K\in\mathcal K$ from large to small and reassign all resources to $K$
(possibly stealing them from the configuration that previously had them). In this process every configuration gets stolen at most $|K| / 2$ of its resources, in particular, it keeps the other half. However, it is non-trivial to obtain a property like the one mentioned above. If we take a random configuration for each player, the dependencies of the intersections are too complex. To avoid this we invoke an advanced variant of the sampling approach where we construct not only one set of resources, but a hierarchy of resource sets $R_0\supseteq \cdots \supseteq R_d$ by repeatedly dropping a fraction of resources from the previous set. We then formulate bad events based on the intersections of a configuration $C$ with smaller configurations $C'$, but we write it only considering a resource set $R_k$ of convenient granularity
(chosen based on the size of $C'$). In this way we formulate a number of bad events using various sets $R_k$. This succeeds in reducing the dependencies enough to apply LLL. Unfortunately, even with this new way of defining bad events, the guarantee that for each $K\in\mathcal K$ the sum of intersections $|K\cap K'|$ with smaller configurations $K'\in \mathcal K$ is at most $|K| / 2$ is still too much to ask. We can only prove some weaker property which makes it more difficult to reconstruct a good solution from it. The reconstruction still starts from the biggest configurations and iterates to finish by including the smallest configurations but it requires a delicate induction where at each step, both the resource set expands and some new small configurations that were not considered before come into play.
\paragraph{Additional implications of non-uniform hypergraph matchings to the Santa Claus problem.} We believe this hypergraph matching problem is interesting in its own right. Our last contribution is to show that finding good matchings in unweighted hypergraphs with fewer assumptions than ours would have important applications for the Santa Claus problem with linear utility functions. We recall that here, each player $i$ has its own utility function $f_i$ that can be any linear function. In this case, the best approximation algorithm is due to Chakrabarty, Chuzhoy, and Khanna~\cite{DBLP:conf/focs/ChakrabartyCK09} who gave a $O(n^{\epsilon})$-approximation running in time $O(n^{1/\epsilon})$. In particular, no sub-polynomial approximation running in polynomial time is known. Consider as before $\mathcal H = (P\cup R, \mathcal C)$ a non-uniform hypergraph with unit weights ($w_{j,C}=1$ for all $j,C$ such that $j\in C$). Finding the smallest $\alpha$ (or an approximation of it) such that there exists an $\alpha$-relaxed perfect matching in $\mathcal H$ is already a very non-trivial question to solve in polynomial time.
We show, via a reduction, that a $c$-approximation for this problem would yield a $O((c\log^* (n))^2)$-approximation for the Santa Claus problem with arbitrary linear utility functions. In particular, any sub-polynomial approximation for this problem would significantly improve the state-of-the-art\footnote{We mention that our result on relaxed matchings in Section \ref{sec:hypergraph problem} does not imply an $O(\log \log (n))$-approximation for this problem since we make additional assumptions on the regularity of the hypergraph or the size of hyperedges.}. All the details of this last result can be found in Section \ref{sec:reduction santa claus}.
\paragraph{A remark on local search techniques.} We focus here on an extension of the LLL technique of Bansal and Srividenko. However, another technique proved itself very successful for the Santa Claus problem in the restricted assignment case with a linear utility function. This is a local search technique discovered by Asadpour, Feige, and Saberi~\cite{Asadpour_local_search} who used it to give a non-constructive proof that the integrality gap of the configuration LP of Bansal and Srividenko is at most $4$. One can wonder if this technique could also be extended to the submodular case as we did with LLL. Unfortunately, this seems problematic as the local search arguments heavily rely on amortizing different volumes of configurations (i.e., the sum of their resources' weights or the number of resources in the unweighted case). Amortizing the volumes of configurations works well, if each configuration has the same volume, which is the case for the problem derived from linear valuation functions, but not the one derived from submodular functions. If the volumes differ then these amortization arguments break and the authors of this paper believe this is a fundamental problem for generalizing those arguments.
\section{Reduction to hypergraph matching problem} \label{sec:reduction to hypergraph} In this section we give a reduction of the restricted submodular Santa Claus problem to the hypergraph matching problem. As a starting point we solve the configuration LP, a linear programming relaxation of our problem. The LP is constructed using a parameter $T$ which denotes the value of its solution. The goal is to find the maximal $T$ such that the LP is feasible. In the LP we have a variable $x_{i,C}$ for every player $i\in P$ and every configuration $C\in \mathcal{C}(i, T)$. The configurations $\mathcal{C}(i, T)$ are defined as the sets of resources $C \subseteq \Gamma_i$ such that $f(C) \ge T$. We require every player $i\in P$ to have at least one configuration and every resource $j \in R$ to be contained in at most one configuration. \begin{align*}
\sum_{C\in \mathcal{C}(i, T)} & x_{i,C} \ge 1 \quad \text{ for all } i \in P \\
\sum_{i\in P}\sum_{C\in \mathcal{C}(i,T) : j \in C} & x_{i,C} \leq 1 \quad \text{ for all } j \in R \\
& x_{i,C} \geq 0 \quad \text{ for all } i\in P, C \in \mathcal{C}(i, T) \end{align*} Since this linear program has exponentially many variables, we cannot directly solve it in polynomial time. We will give a polynomial time constant approximation for it via its dual. This is similar to the linear variant in~\cite{BansalSrividenko}, but requires some more work. In their case they can reduce the problem to one where the separation problem of the dual can be solved in polynomial time. In our case even the separation problem can only be approximated. Nevertheless, this is sufficient to approximate the linear program in polynomial time.
\begin{theorem}\label{thm:config-LP}
The configuration LP of the restricted submodular Santa Claus
problem can be approximated within a factor of $(1 - 1/e)/2$
in polynomial time. \end{theorem} We defer the proof of this theorem to Appendix~\ref{appendix_lp}. Given a solution $x^*$ of the configuration LP we want to arrive at the hypergraph matching problem from the introduction such that an $\alpha$-relaxed perfect matching of that problem corresponds to an $O(\alpha)$-approximate solution of the restricted submodular Santa Claus problem. Let $T^*$ denote the value of the solution $x^*$. We will define a resource $j\in R$ as \emph{fat} if \begin{equation*}
f(\{j\}) \geq \frac{T^*}{100 \alpha} . \end{equation*} Resources that are not fat are called \emph{thin}. We call a configuration $C\in\mathcal{C}(i, T)$ thin, if it contains only thin resources and denote by $\mathcal{C}_t(i,T) \subseteq \mathcal{C}(i, T)$ the set of thin configurations. Intuitively in order to obtain an $O(\alpha)$-approximate solution, it suffices to give each player $i$ either one fat resource $j\in \Gamma_i$ or a thin configuration $C\in\mathcal{C}_t(i,T^*/O(\alpha))$. For our next step towards the hypergraph problem we use a technique borrowed from Bansal and Srividenko~\cite{BansalSrividenko}. This technique allows us to simplify the structure of the problem significantly using the solution of the configuration LP. Namely, one can find a partition of the players into clusters such that we only need to cover one player from each cluster with thin resources. All other players can then be covered by fat resources. Informally speaking, the following lemma is proved by sampling configurations randomly according to a distribution derived in a non-trivial way from the configuration LP. \begin{lemma}\label{lem:config-sample}
Let $\ell \ge 12\log (n)$.
Given a solution of value $T^*$ for the configuration LP
in randomized polynomial time we can find a partition of the players into clusters $K_1\cup\cdots \cup K_k\cup Q = P$ and multisets of configurations
$\mathcal{C}_h \subseteq \bigcup_{i\in K_h} \mathcal{C}_T(i, T^*/5)$, $h=1,\dotsc,k$, such that
\begin{enumerate}
\item $|\mathcal{C}_h| = \ell$ for all $h=1,\dotsc,k$ and
\item Each small resource appears in at most $\ell$ configurations of $\bigcup_h \mathcal{C}_h$.
\item given any $i_1\in K_1, i_2\in K_2,\dotsc,i_k\in K_k$
there is a matching of fat resources to players
$P\setminus\{i_1,\dotsc,i_k\}$ such that each of these players $i$ gets a unique fat resource $j\in\Gamma_i$.
\end{enumerate} \end{lemma} The role of the players $Q$ in the lemma above is that each one of them gets a fat resource for certain. The proof follows closely that in~\cite{BansalSrividenko}. For completeness we include it in Appendix~\ref{appendix_lp}. We are now ready to define the hypergraph matching instance. The vertices of our hypergraph are the clusters $K_1,\dotsc,K_k$ and the thin resources. Let $\mathcal{C}_1,\dotsc,\mathcal{C}_k$ be the multisets of configurations as in Lemma~\ref{lem:config-sample}. For each $K_h$ and $C\in\mathcal{C}_h$ there is a hyperedge containing $K_h$ and all resources in $C$. Let $\{j_1,\dotsc,j_\ell\} = C$ ordered arbitrarily, but consistently. Then we define the weights as normalized marginal gains of resources if they are taken in this order, that is, \begin{equation*}
w_{j_i, C} = \frac{5}{T^*} f(\{j_i\} \mid \{j_1,\dotsc,j_{i-1}\}) = \frac{5}{T^*} (f(\{j_1,\dotsc,j_{i-1}, j_i\})-f(\{j_1,\dotsc,j_{i-1}\})). \end{equation*} This implies that $\sum_{j\in C} w_{j, C} \ge 5 f(C) / T^* \ge 1$ for each $C\in\mathcal{C}_h$, $h=1,\dotsc,k$. \begin{lemma}
Given an $\alpha$-relaxed perfect matching to the
instance as described by the reduction, one can
find in polynomial time an $O(\alpha)$-approximation
to the instance of restricted submodular Santa Claus. \end{lemma} \begin{proof}
The $\alpha$-relaxed perfect matching implies that
cluster $K_h$ gets some small resources $C'$
where $C'\subseteq C$ for some $C\in\mathcal{C}_h$
and $\sum_{j\in C'} w_{j, C} \ge 1/\alpha$.
By submodularity we have that
$f(C') \ge T^* / (5 \alpha)$.
Therefore we can satisfy one player in each
cluster using thin resources and by
Lemma~\ref{lem:clusters} all others using
fat resources. \end{proof} The proof above is the most critical place in the paper where we make use of the submodularity of the valuation function $f$. We note that since all resources considered are thin resources we have, by submodularity of $f$, the assumption that \begin{equation*}
w_{j,C} \leq \frac{5}{T^*}f(\{j\}) \leq \frac{5}{T^*}\frac{T^*}{100\alpha} \leq \frac{5}{100\alpha} \sum_{j\in C} w_{j,C} \end{equation*} for all $j,C$ such that $j\in C$. This means that the weights are all small enough, as promised in introduction. From now on, we will assume that $\sum_{j\in C} w_{j,C}=1$ for all configurations $C$. This is w.l.o.g. since we can just rescale the weights inside each configuration. This does not hurt the property that all weights are small enough.
\subsection{Reduction to unweighted hypergraph matching} Before proceeding to the solution of this hypergraph matching problem, we first give a reduction to an unweighted variant of the problem. We will then solve this unweighted variant in the next section. First, we note that we can assume that all the weights $w_{j,C}$ are powers of $2$ by standard rounding arguments. This only loses a constant factor in the approximation rate. Second, we can assume that inside each configuration $C$, each resource has a weight that is at least a $1/(2n)$. Formally, we can assume that \begin{equation*}
\min_{j\in C}w_{j,C}\geq 1/(2n) \end{equation*} for all $C\in \mathcal{C}$. If this is not the case for some $C\in \mathcal{C}$, simply delete from $C$ all the resources that have a weight less than $1/(2n)$. By doing this, the total weight of $C$ is only decreased by a factor $1/2$ since it looses in total at most a weight of \begin{equation*}
n\cdot \frac{1}{2n} = \frac{1}{2}. \end{equation*} (Recall that we rescaled the weights so that $\sum_{j\in C} w_{j,C}=1$).
Hence after these two operations, an $\alpha$-relaxed perfect matching in the new hypergraph is still an $O(\alpha)$-relaxed perfect matching in the original hypergraph. From there we reduce to an unweighted variant of the matching problem. Note that each configuration contains resources of at most $\log (n)$ different possible weights (powers of $2$ from $1 /(2n)$ to $1 / \alpha$). We create the following new unweighted hypergraph $\mathcal H'=(P'\cup R,\mathcal C')$. The resource set $R$ remains unchanged. For each player $i\in P$, we create $\log (n)$ players, which later correspond each to a distinct weight. We will say that the players obtained from duplicating the original player form a \textit{group}. For every configuration $C$ containing player $i$ in the hypergraph $\mathcal H$, we add a set $\mathcal{S}_C=\{C_1,\ldots ,C_s, \ldots ,C_{\log(n)}\}$ of configurations in $\mathcal H'$. $C_s$ contains player $i_s$ and all resources that are given a weight $2^{-(s+1)}$ in $C$. In this new hypergraph, the resources are not weighted. Note that if the hypergraph $\mathcal H$ is regular then $\mathcal H'$ is regular as well.
Additionally, for a group of player and a set of $\log(n)$ configurations (one for each player in the group), we say that this set of configurations is \textit{consistent} if all the configurations selected are obtained from the same configuration in the original hypergraph $\mathcal H$ (i.e. the selected configurations all belong to $\mathcal{S}_C$ for some $C$ in $\mathcal H$).
Formally, we focus of the following problem. Given the regular hypergraph $\mathcal H'$, we want to select, for each group of $\log (n)$ players, a consistent set of configurations $C_1,\ldots, C_s, \ldots ,C_{\log(n)}$ and assign to each player $i_s$ a subset of the resources in the corresponding configuration $C_s$ so that $i_s$ is assigned at least $\left\lfloor |C_s|/\alpha\right\rfloor$ resources. No resource can be assigned to more than one player. We refer to this assignment as a consistent $\alpha$-relaxed perfect matching. Note that in the case where $|C_s|$ is small (e.g. of constant size) we are not required to assign any resource to player $i_s$.
\begin{lemma} A consistent $\alpha$-relaxed matching in $\mathcal H'$ induces a $O(\alpha)$-relaxed matching in $\mathcal H$. \end{lemma} \begin{proof}
Let us consider a group of $\log (n)$ players $i_1,\ldots , i_s, \ldots ,i_{\log (n)}$ in $\mathcal H'$ corresponding to a player $i$ in $\mathcal H$. These players are assigned a consistent set of configurations $C_1,\ldots , C_s, \ldots, C_{\log (n)}$ that correspond to a partition of a configuration in $\mathcal H$. Moreover, each player $i_s$ is assigned $\left\lfloor |C_s|/\alpha\right\rfloor$ resources from $C_s$. We have two cases. If $|C_s|\geq \alpha$ then we have that $i_s$ is assigned at least \begin{equation*}
\left\lfloor |C_s|/\alpha\right\rfloor\geq |C_s|/(2\alpha)
\end{equation*} resources from $C_s$. On the other hand, if $\left\lfloor |C_s|/\alpha\right\rfloor=0$ then the player $i_s$ might not be assigned anything. However, we claim that that the configurations $C_s$ of cardinality less than $\alpha$ can represent at most a $1/5$ fraction of the total weight of the configuration $C$ in the original weighted hypergraph. To see this note that the total weight they represent is upper bounded by \begin{equation*}
\alpha \left(\sum_{k=\log(100\alpha/5)}^{\infty} \frac{1}{2^k}\right) = \alpha\left(\frac{5}{100\alpha}\sum_{k=0}^{\infty} \frac{1}{2^k}\right) \leq \frac{10}{100} = \frac{1}{10}\sum_{j\in C}w_{j,C}. \end{equation*}
Hence, the consistent $\alpha$-relaxed matching in $\mathcal H'$ induces in a straightforward way a matching in $\mathcal H$ where every player gets at least a fraction $1/(2\alpha) \cdot (1-1/10) \geq 1/(3\alpha)$ of the total weight of the appropriate configuration. This means that the consistent $\alpha$-relaxed perfect matching in $\mathcal H'$ is indeed a $(3\alpha)$-relaxed perfect matching in $\mathcal H$. \end{proof}
\section{Matchings in regular hypergraphs} \label{sec:hypergraph problem}
In this section we solve the hypergraph matching problem we arrived to in the previous section. For convenience, we give a self contained definition of the problem before formulating and proving our result.
\paragraph{Input:} We are given $\mathcal H = (P\cup R, \mathcal C)$ a hypergraph with hyperedges $\mathcal C$ over the vertices $P$ (players) and $R$ (resources) with
$m = |P|$ and $n = |R|$. As in previous sections, we will refer to hyperedges as configurations.
Each configuration $C\in\mathcal C$ contains exactly one vertex in $P$, that is, $|C\cap P| = 1$. The set of players is partitioned into groups of size at most $\log (n)$, we will use $A$ to denote a group. These groups are disjoint and contain all players. Finally there exists an integer $\ell$ such that for each group $A$ there are $\ell$ consistent sets of configurations. A consistent set of configurations for a group $A$ is a set of $|A|$ configurations such that all players in the group appear in exactly one of these configurations. We will denote by $\mathcal{S}_A$ such a set and for a player $i\in A$, we will denote by $\mathcal{S}_A^{(i)}$ the unique configuration in $\mathcal{S}_A$ containing $i$. Finally, no resource appears in more than $\ell$ configurations. We say that the hypergraph is regular (although some resources may appear in less than $\ell$ configurations).
\paragraph{Output:} We wish to select a matching that covers all players in $P$. More precisely, for each group $A$ we want to select a consistent set of configurations (denoted by $\{\mathcal{S}_A^{(i)}\}_{i\in A}$). Then for each player $i\in A$, we wish to assign a subset of the resources in $\mathcal{S}_A^{(i)}$ to the player $i$ such that: \begin{enumerate}
\item No resource is assigned to more than one player in total.
\item For any group $A$ and any player $i\in A$, player $i$ is assigned at least
\begin{equation*}
\left\lfloor \frac{\mathcal{S}_A^{(i)}}{\alpha}\right\rfloor
\end{equation*} resources from $\mathcal{S}_A^{(i)}$. \end{enumerate} We call this a consistent $\alpha$-relaxed perfect matching. Our goal in this section will be to prove the following theorem.
\begin{theorem}\label{thm:unweighted_hypergraph} Let $\mathcal H=(P\cup R, \mathcal C)$ be a regular (non-uniform) hypergraph where the set of players is partitioned into groups of size at most $\log (n)$. Then we can, in randomized polynomial time, compute a consistent $\alpha$-relaxed perfect matching for $\alpha=O(\log \log (n))$. \end{theorem}
We note that Theorem \ref{thm:unweighted_hypergraph} together with the reduction from the previous section will prove our main result (Theorem \ref{thm:main}) stated in introduction.
\subsection{Overview and notations}
To prove Theorem~\ref{thm:unweighted_hypergraph}, we introduce the following notations. Let $\ell \in \mathbb N$ be the regularity parameter as described in the problem input (i.e. each group has $\ell$ consistent sets and each resource appears in no more than $\ell$ configurations). As we proved in Lemma \ref{lem:config-sample} we can assume with standard sampling arguments that $\ell =300.000\log^{3}(n)$ at a constant loss. If this is not the case because we might want to solve the hypergraph matching problem by itself (i.e. not obtained by the reduction in Section \ref{sec:reduction to hypergraph}), the proof of Lemma \ref{lem:config-sample} can be repeated in a very similar way here.
For a configuration $C$, its size will be defined as $|C\cap R|$ (i.e. its cardinality over the resource set). For each player $i$, we denote by $\mathcal{C}_i$ the set of configurations that contain $i$. We now group the configurations in $\mathcal{C}_i$ by size: We denote by $\mathcal{C}_{i}^{(0)}$ the configurations of size in $[0,\ell^{4})$ and for $k\ge 1$ we write $\mathcal{C}_{i}^{(k)}$ for the configurations of size in $[\ell^{k+3},\ell^{k+4})$. Moreover, define $\mathcal{C}^{(k)}=\bigcup_i \mathcal{C}_{i}^{(k)}$ and $\mathcal{C}^{(\ge k)} = \bigcup_{h\ge k} \mathcal{C}^{(h)}$. Let $d$ be the smallest number such that $\mathcal{C}^{(\ge d)}$ is empty. Note that $d\le \log(n) / \log(\ell)$.
Now consider the following random process. \begin{experiment}\label{exp:sequence} We construct a nested sequence of resource sets $R=R_0 \supseteq R_1 \supseteq \ldots \supseteq R_d$ as follows. Each $R_k$ is obtained from $R_{k-1}$ by deleting every resource in $R_{k-1}$ independently with probability $(\ell-1) / \ell$. \end{experiment}
In expectation only a $1/\ell$ fraction of resources in $R_{k-1}$ survives in $R_k$. Also notice that for $C \in \mathcal{C}^{(k)}$ we have that $\mathbb E[ |R_k \cap C| ] = \mathrm{poly}(\ell)$.
The proof of Theorem~\ref{thm:unweighted_hypergraph} is organized as follows. In Section~\ref{sec:sequence}, we give some properties of the resource sets constructed by Random Experiment~\ref{exp:sequence} that hold with high probability. Then in Section~\ref{sec:LLL}, we show that we can find a single consistent set of configurations for each group of players such that for each configuration selected, its intersection with smaller selected configurations is bounded if we restrict the resource set to an appropriate $R_k$. Restricting the resource set is important to bound the dependencies of bad events in order to apply Lovasz Local Lemma. Finally in Section~\ref{sec:reconstruction}, we demonstrate how these configurations allows us to reconstruct a consistent $\alpha$-relaxed perfect matching for an appropriate assignment of resources to configurations.
\subsection{Properties of resource sets}\label{sec:sequence} In this subsection, we give a precise statement of the key properties that we need from Random Experiment~\ref{exp:sequence}. The first two lemmas have a straight-forward proof. The last one is a generalization of an argument used by Bansal and Srividenko \cite{BansalSrividenko}. Since the proof is more technical and tedious, we also defer it to Appendix~\ref{appendix_sequence} along with the proof of the first two statements.
We start with the first property which bounds the size of the configurations when restricted to some $R_k$. This property is useful to reduce the dependencies while applying LLL later. \begin{lemma} \label{lma-size} Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. For any $k\geq 0$ and any $C\in\mathcal{C}^{(\geq k)}$ we have
\begin{equation*}
\frac{1}{2} \ell^{-k}|C| \le |R_k \cap C| \le \frac{3}{2} \ell^{-k}|C|
\end{equation*} with probability at least $1-1/n^{10}$. \end{lemma}
The next property expresses that for any configuration the sum of intersections with configurations of a particular size does not deviate much from its expectation. In particular, for any configuration $C$, the sum of it's intersections with other configurations is at most $|C|\ell$ as each resource is in atmost $\ell$ configurations. By the lemma stated below, we recover this up to a multiplicative constant factor when we consider the appropriately weighted sum of the intersection of $C$ with other configurations $C'$ of smaller sizes where each configuration $C' \in \mathcal{C}^{(k)}$ is restricted to the resource set $R_k$.
\begin{lemma} \label{lma-overlap-representative} Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. For any $k\geq 0$ and any $C\in\mathcal{C}^{(\geq k)}$ we have
\begin{equation*}
\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C\cap R_k| \leq \frac{10}{\ell^{k}} \left(|C|+\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C| \right)
\end{equation*} with probability at least $1-1/n^{10}$. \end{lemma} We now define the notion of \emph{good} solutions which is helpful in stating our last property. Let $\mathcal{F}$ be a set of configurations, $\alpha:\mathcal{F} \rightarrow \mathbb N$, $\gamma \in\mathbb N$, and $R'\subseteq R$. We say that an assignment of $R'$ to $\mathcal{F}$ is $(\alpha,\gamma)$-good if every configuration $C\in \mathcal{F}$ receives at least $\alpha(C)$ resources of $C\cap R'$ and if no resource in $R'$ is assigned more than $\gamma$ times in total.
Below we obtain that given a $(\alpha,\gamma)$-good solution with respect to resource set $R_{k+1}$, one can construct an almost $(\ell \cdot \alpha,\gamma)$-good solution with respect to the bigger resource set $R_{k}$. Informally, starting from a good solution with respect to the final resource set and iteratively applying this lemma would give us a good solution with respect to our complete set of resources. \begin{lemma} \label{lma-good-solution} Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. Fix $k\geq 0$. Conditioned on the event that the bounds in Lemma~\ref{lma-size} hold for $k$, then with probability at least $1 - 1/n^{10}$ the following holds for all $\mathcal{F}\subseteq \mathcal{C}^{(\geq k+1)}$, $\alpha:\mathcal{F} \rightarrow \mathbb N$, and $\gamma \in\mathbb N$ such that $\ell^3/1000\leq \alpha(C) \leq n $ for all $C\in\mathcal{F}$ and $\gamma \in \{1,\dotsc,\ell\}$: If there is a $(\alpha,\gamma)$-good assignment of $R_{k+1}$ to $\mathcal{F}$, then there is a $(\alpha',\gamma)$-good assignment of $R_k$ to $\mathcal{F}$ where \begin{equation*}
\alpha'(C) \ge \ell \left(1-\frac{1}{\log (n)} \right) \alpha(C) \end{equation*} for all $C\in\mathcal{F}$. Moreover, this assignment can be found in polynomial time. \end{lemma}
Given the lemmata above, by a simple union bound one gets that all the properties of resource sets hold.
\subsection{Selection of configurations}\label{sec:LLL} In this subsection, we give a random process that selects one consistent set of configurations for each group of players such that the intersection of the selected configurations with smaller configurations is bounded when considered on appropriate sets $R_k$. We will denote $\mathcal{S}_A$ the selected consistent set for group $A$ and for ease of notation we will denote $K_i=\mathcal{S}_A^{(i)}$ the selected configuration for player $i\in A$. For any integer $k$, we write $\mathcal K^{(k)}_i = \{K_i\}$ if $K_i\in\mathcal C^{(k)}_i$ and $\mathcal K^{(k)}_i = \emptyset$ otherwise. As for the configuration set, we will also denote $\mathcal K^{(k)}=\bigcup_{i}\mathcal K^{(k)}_i$ and $\mathcal K= \bigcup_{k}\mathcal K^{(k)}$. The following lemma describes what are the properties we want to have while selecting the configurations. For better clarity we also recall what the properties of the sets $R_0,\dotsc,R_d$ that we need are. These hold with high probability by the lemmata of the previous section.
\begin{lemma}\label{lma:main-LLL}
Let $R =R_0\supseteq\dotsc\supseteqR_d$ be
sets of fewer and fewer resources.
Assume that for each $k$ and $C\in \mathcal C_i^{(k)}$
we have
\begin{equation*}
1/2 \cdot \ell^{k - h} \le |C\cap R_h| \le 3/2 \cdot \ell^{- h} |C| < 3/2 \cdot \ell^{k - h + 4}
\end{equation*}
for all $h=0,\dotsc,k$.
Then there exists a selection of one consistent set $\mathcal{S}_A$ for each group $A$ such for all $k=0,\dotsc, d$, $C\in \mathcal C^{(k)}$ and $j=0,\dotsc,k$ then we have
\begin{equation*}
\sum_{j\leq h\le k} \sum_{K\in\mathcal K^{(h)}} \ell^{h} |K \cap C \cap R_h|
\le \frac{1}{\ell} \sum_{j\leq h\le k} \sum_{C'\in\mathcal C^{(h)}} \ell^{h} |C' \cap C \cap R_h| + 1000 \frac{d + \ell}{\ell}\log(\ell) |C| .
\end{equation*}
Moreover, this selection of consistent sets can be found in polynomial time. \end{lemma}
Before we prove this lemma, we give an intuition of the statement. Consider the sets $R_1,\dotsc,R_d$ constructed as in Random Experiment~\ref{exp:sequence}. Then for $C'\in\mathcal C^{(h)}$ we have $\mathbb{E}[\ell^h |C'\cap C\cap R_h|] = |C'\cap C|$. Hence \begin{equation*}
\sum_{h\le k} \sum_{K\in\mathcal K^{(h)}} |K \cap C| = \mathbb{E}[\sum_{h\le k} \sum_{K\in\mathcal K^{(h)}} \ell^h |K \cap C \cap R_h|] \end{equation*} Similarly for the right-hand side we have \begin{multline*}
\mathbb{E}[\frac{1}{\ell} \sum_{j \le h\le k} \sum_{C'\in\mathcal C^{(h)}} \ell^h |C' \cap C \cap R_h| + O(\frac{d + \ell}{\ell}\log(\ell) |C|)] \\
= \frac{1}{\ell}\underbrace{\sum_{j\le h\le k} \sum_{C'\in\mathcal C^{(h)}} |C' \cap C|}_{\le \ell |C|} + O\left(\frac{d + \ell}{\ell}\log(\ell) |C|\right)
= O\left(\frac{d + \ell}{\ell}\log(\ell) |C|\right) . \end{multline*} Hence the lemma says that each resource in $C$ is roughly covered $O((d + \ell)/\ell \cdot \log(\ell))$ times by smaller configurations.
We now proceed to prove the lemma by performing the following random experiment and by Lovasz Local Lemma show that there is a positive probability of success. \begin{experiment} For each group $A$, select one consistent set $\mathcal{S}_A$ uniformly at random. Then for each player $i \in A$ set $K_i=\mathcal \mathcal{S}_A^{(i)}$. \end{experiment} For all $h=0,\dotsc,d$ and $i\in P$ we define the random variable \begin{equation*}
X^{(h)}_{i,C} = \sum_{K\in\mathcal K^{(h)}_i} |K \cap C \cap R_h| \le \min\{3/2 \cdot \ell^4, |C\cap R_h|\} . \end{equation*} Let $X^{(h)}_C = \sum_{i=1}^m X^{(h)}_{i, C}$. Then \begin{equation*}
\mathbb{E}[X^{(h)}_C] \le \frac{1}{\ell} \sum_{C'\in\mathcal C^{(h)}} |C'\cap C\cap R_h| \le |C\cap R_h| . \end{equation*} We define a set of bad events. As we will show later, if none of them occur, the properties from the premise hold. For each $k$, $C\in\mathcal C^{(k)}$, and $h\le k$ let $B_C^{(h)}$ be the event that \begin{equation*}
X_C^{(h)}
\ge \begin{cases}
\mathbb{E}[X_C^{(h)}] + 63 |C\cap R_h| \log(\ell) &\text{ if $k - 5 \le h \le k$}, \\
\mathbb{E}[X_C^{(h)}] + 135 |C\cap R_h| \log(\ell) \cdot \ell^{-1} &\text{ if $h \le k - 6$}.
\end{cases} \end{equation*}
There is an intuitive reason as to why we define these two different bad events. In the case $h\leq k-6$, we are counting how many times $C$ is intersected by configurations that are much smaller than $C$. Hence the size of this intersection can be written as a sum of independent random variables of value at most $O(\ell^4)$ which is much smaller than the total size of the configuration $|C\cap R_h|$. Since the random variables are in a much smaller range, Chernoff bounds give much better concentration guarantees and we can afford a very small deviation from the expectation. In the other case, we do not have this property hence we need a bigger deviation to maintain a sufficiently low probability of failure. However, this does not hurt the statement of Lemma~\ref{lma:main-LLL} since we sum this bigger deviation only a constant number of times. With this intuition in mind, we claim the following. \begin{claim} For each $k$, $C\in\mathcal C^{(k)}$, and $h\le k$ we have \begin{equation*}
\mathbb{P}[B_C^{(h)}] \le \exp\left(- 2\frac{|C \cap R_h|}{\ell^9} - 18\log(\ell)\right) . \end{equation*} \end{claim}
\begin{proof} Consider first the case that $h \ge k - 5$. By a Chernoff bound (see Proposition~\ref{cor:chernoff}) with \begin{equation*}
\delta = 63\frac{|C\cap R_h| \log(\ell)}{\mathbb{E}[X_C^{(h)}]} \ge 1 \end{equation*} we get \begin{equation*}
\mathbb{P}[B_C^{(h)}] \le \exp\bigg(-\frac{\delta \mathbb{E}[X^{(h)}_C]}{3 |C\cap R_h|}\bigg) \le \exp(-21\log(\ell))) \le \exp\bigg(-2 \underbrace{\frac{|C\cap R_h|}{\ell^{9}}}_{\le 3/2} - 18\log(\ell)\bigg). \end{equation*} Now consider $h \le k - 6$. We apply again a Chernoff bound with \begin{equation*}
\delta = 135\frac{|C\capR_h| \log(\ell)}{\ell \mathbb{E}[X_C^{(h)}]} \ge \frac{1}{\ell} . \end{equation*} This implies \begin{multline*}
\mathbb P[B_C^{(h)}]
\le \exp\left(-\frac{\min\{\delta,\delta^2\} \mathbb{E}[X^{(h)}_C]}{3 \cdot 3/2 \cdot \ell^4}\right)
\le \exp\left(-30\frac{|C\capR_h| \log(\ell)}{\ell^6} \right) \\
\le \exp\left(-2 \frac{|C\cap R_h|}{\ell^9} - 18\log(\ell)\right) . \qedhere \end{multline*} \end{proof} \begin{proposition}[Lovasz Local Lemma (LLL)]\label{prop:LLL} Let $B_1, \dotsc, B_t$ be bad events, and let $G = (\{B_1,\dotsc,B_t\}, E)$ be a dependency graph for them, in which for every $i$, event $B_i$ is mutually independent of all events $B_j$ for which $(B_i, B_j)\notin E$. Let $x_i$ for $1\le i \le t$ be such that $0 < x(B_i) < 1$ and $\mathbb{P}[B_i]\le x(B_i) \prod_{(B_i,B_j)\in E} (1-x(B_j))$. Then with positive probability no event $B_i$ holds. \end{proposition} Let $k\in\{0,\dotsc,d\}$, $C\in\mathcal C^{(k)}$ and $h\le k$. For event $B_C^{(h)}$ we set \begin{equation*}
x(B_C^{(h)}) = \exp(-|C\capR_h| / \ell^9 - 18\log(\ell)) . \end{equation*} We now analyze the dependencies of $B_C^{(h)}$. The event depends only on random variables $\mathcal{S}_A$ for groups $A$ that contain at least one player $i$ that has a configuration in $\mathcal C^{(h)}_i$ which overlaps with $C\cap R_h$. The number of such configurations (in particular, of such groups) is at most
$\ell |C\cap R_h|$ since the hypergraph is regular.
In each of these groups, we count at most $\log (n)$ players, each having $\ell$ configurations hence in total at most $\ell\cdot \log (n)$ configurations.
Each configuration $C'\in \mathcal{C}^{(h')}$ can only influence those events $B^{(h')}_{C''}$ where $C' \cap C'' \cap R_{h'} \neq \emptyset$. Since $|C'\cap R_{h'}|\leq 3/2\cdot \ell^4$ and since each resource appears in at most $\ell$ configurations, we see that each configuration can influence at most $3/2 \cdot \ell^5$ events.
Putting everything together, we see that the bad event $B_C^{(h)}$ is independent of all but at most \begin{equation*}
(\ell |C\cap R_h|) \cdot (\ell\cdot \log (n)) \cdot (3/2 \cdot \ell^5) = 3/2\cdot \ell^7 \cdot \log (n) |C\cap R_h| \leq |C\cap R_h|\ell^8 \end{equation*} other bad events.
We can now verify the condition for Proposition~\ref{prop:LLL} by calculating \begin{align*}
x(B_C^{(h)}) & \prod_{(B_C^{(h)}, B_{C'}^{(h')})\in E} (1 - x(B_{C'}^{(h')})) \\
&\ge \exp(-|C\capR_h|/\ell^9 - 18\log(\ell)) \cdot (1 - \ell^{-18})^{|C\capR_h|\ell^8} \\
&\ge \exp(-|C\capR_h|/\ell^9 - 18\log(\ell)) \cdot \exp(- |C\capR_h| / \ell^9) \\
&\ge \exp(-2|C\capR_h|/\ell^9 - 18\log(\ell)) \ge \mathbb{P}[B^{(h)}_C] . \end{align*} By LLL we have that with positive probability none of the bad events happen. Let $k\in\{0,\dotsc,d\}$ and $C\in\mathcal C^{(k)}$. Then for $k - 5 \le h \le k$ we have \begin{equation*}
\ell^{h} X^{(h)}_C \le \ell^{h} \mathbb{E}[X_C^{(h)}] + 63 \ell^{h} |C\capR_h|\log(\ell)
\le \ell^{h} \mathbb{E}[X_C^{(h)}] + 95 |C|\log(\ell) . \end{equation*} Moreover, for $h\le k-6$ it holds that \begin{equation*}
\ell^{h} X^{(h)}_C \le \ell^{h} \mathbb{E}[X_C^{(h)}] + 135 \ell^{h-1} |C\capR_h|\log(\ell)
\le \ell^{h} \mathbb{E}[X_C^{(h)}] + 203 |C|\log(\ell) \cdot \ell^{-1} . \end{equation*} We conclude that, for any $0\leq j\leq k$, \begin{align*}
\sum_{j\leq h\le k} \sum_{K\in\mathcal K^{(h)}}
\ell^{h} |K \cap C \cap R_h|
&\le \sum_{j\leq h\le k} \ell^{h} \mathbb{E}[X^{(h)}_{C}] + 1000 \frac{(k-j + 1) + \ell}{\ell} |C| \log(\ell) \\
&\le \frac{1}{\ell} \sum_{j\leq h\le k} \ell^{h} \sum_{C'\in\mathcal C^{(h)}} |C'\cap C \cap R_h| + 1000 \frac{d + \ell}{\ell} |C| \log(\ell) . \end{align*} This proves Lemma~\ref{lma:main-LLL}.
\begin{remark}{\rm Since there are at most $\mathrm{poly}(n,m,\ell)$ bad events and each bad event $B$ has $\frac{x(B)}{1-x(B)}\le1/2$ (because $x(B)\le \ell^{-18}$), the constructive variant of LLL by Moser and Tardos~\cite{moser2010constructive} can be applied to find a selection of configurations such that no bad events occur in randomized polynomial time.} \end{remark}
\subsection{Assignment of resources to configurations}\label{sec:reconstruction} In this subsection, we show how all the previously established properties allow us to find, in polynomial time, a good assignment of resources to the configurations $\mathcal{K}$ chosen as in the previous subsection. We will denote as in the previous subsection $\mathcal{K}_i^{(k)}=\{K_i\}$ if $K_i\in \mathcal{C}_i^{(k)}$ and $\mathcal{K}_i^{(k)}=\emptyset$ otherwise. We also define $\mathcal{K}^{(k)}=\bigcup_{i}\mathcal{K}_i^{(k)}$ and $\mathcal{K}^{(\geq k)}=\bigcup_{h\geq k}\mathcal{K}^{(k)}$. Finally we define the parameter \begin{equation*}
\gamma = 100.000 \frac{d+\ell}{\ell}\log(\ell) , \end{equation*} which will define how many times each resource can be assigned to configurations in an intermediate solution. Note that $d\le\log(n)/\log(\ell)$. By our choice of $\ell=300.000\log^3(n)$, we have that $\gamma \leq 310.000 \log \log (n)$. Lemma~\ref{lma:main-LLL} implies the following bound. For sake of brevity, the proof is deferred to Appendix~\ref{appendix_reconstruct}. \begin{claim} \label{cla:reconstruct} For any $k\geq 0$, any $0\leq j\leq k$, and any $C\in \mathcal{K}^{(k)}$ \begin{equation*}
\sum_{j\leq h\leq k}\sum_{K\in \mathcal{K}^{(h)}} \ell^{h}|K\cap C \cap R_h| \leq 2000\frac{d+\ell}{\ell}\log (\ell) |C| \end{equation*} \end{claim}
The main technical part of this section is the following lemma that is proved by induction. \begin{lemma} \label{lem:reconstruct} For any $j\geq 0$, there exists an assignment of resources of $R_j$ to configurations in $\mathcal{K}^{(\geq j)}$ such that no resource is taken more than $\gamma$ times and each configuration $C\in \mathcal{K}^{(k)}$ ($k\geq j$) receives at least \begin{equation*}
\left(1-\frac{1}{\log (n)} \right)^{2(k-j)}\ell^{k-j} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-j}|K\cap C \cap R_h| \end{equation*} resources from $R_k$. \end{lemma}
Before proceeding to the proof, we first give intuition of why this is what we want to prove. Note that the term $\ell^{k-j}|C\cap R_k|$ is roughly equal to $\ell^{-j}|C|$ by the properties of the resource sets (precisely Lemma \ref{lma-size}). The second term \begin{equation*}
\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-j}|K\cap C \cap R_h| \end{equation*} can be shown to be \begin{equation*}
O\left(\ell^{-j}\frac{d+\ell}{\ell}\log (\ell) |C| \right)= O (\ell^{-j}\log \log (n) |C|) \end{equation*}
by Claim \ref{cla:reconstruct}. Hence by choosing $\gamma$ to be $\Theta (\log \log (n))$ we get that the bound in Lemma \ref{lem:reconstruct} will be $\Theta (\ell^{-j}|C|)$. At the end of the induction, we have $j=0$ which indeed implies that we have an assignment in which configurations receive \begin{equation*}
\Theta (\ell^{-0}|C|)=\Theta(|C|) \end{equation*} resources and such that each resource is assigned to at most $O (\log \log (n))$ configurations.
\begin{proof} We start from the biggest configurations and then iteratively reconstruct a good solution for smaller and smaller configurations. Recall $d$ is the smallest integer such that $\mathcal{K}^{(\geq d)}$ is empty. Our base case for these configurations in $\mathcal{K}^{(\geq d)}$ is vacuously satisfied.
Now assume that we have a solution at level $j$, i.e. an assignment of resources to configurations in $\mathcal{K}^{(\geq j)}$ such that no resource is taken more than $\gamma$ times and each configuration $C\in \mathcal{K}^{(k)}$ such that $k\geq j$ receives at least \begin{equation*}
\left(1-\frac{1}{\log (n)} \right)^{2(k-j)}\ell^{k-j} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-j}|K\cap C \cap R_h| \end{equation*} resources from $R_j$. We show that this implies a solution at level $j-1$ in the following way. First by Lemma~\ref{lma-good-solution}, this implies an assignment of resources of $R_{j-1}$ to configurations in $\mathcal{K}^{(\geq j)}$ such that each $C\in \mathcal{K}^{(k)}$ receives at least \begin{align*}
&\left(1-\frac{1}{\log (n)} \right)\ell \left(\ell^{k-j} \left(1-\frac{1}{\log (n)} \right)^{2(k-j)} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-j}|K\cap C \cap R_h|\right)\\
&=\left(1-\frac{1}{\log (n)} \right)^{2(k-(j-1))-1} \ell^{k-(j-1)} |C\cap R_k|-\frac{3}{\gamma}\left(1-\frac{1}{\log (n)} \right)\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-(j-1)}|K\cap C \cap R_h|\\
&\geq \left(1-\frac{1}{\log (n)} \right)^{2(k-(j-1))-1}\ell^{k-(j-1)} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-(j-1)}|K\cap C \cap R_h| \end{align*} resources and no resource of $R_{j-1}$ is taken more than $\gamma$ times. Note that we can apply Lemma \ref{lma-good-solution} since we have by Claim \ref{cla:reconstruct} and Lemma \ref{lma-size} \begin{align*}
&\left(1-\frac{1}{\log (n)} \right)^{2(k-j)}\ell^{k-j} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-j}|K\cap C \cap R_h| \\
&\geq \frac{\ell^{k-j}}{e^2}|C\cap R_k| - \frac{3}{\gamma}2000\ell^{-j}\frac{d+\ell}{\ell}\log(\ell)|C|\\
&\geq \ell^{-j}|C|\left(\frac{1}{2e^2}-\frac{6000}{\gamma}\frac{d+\ell}{\ell}\log(\ell)\right)\\
&\geq \frac{\ell^{-j}|C|}{3e^2}>\frac{\ell^3}{1000} \end{align*}
Now consider configurations in $\mathcal{K}^{(j-1)}$ and proceed for them as follows. Give to each $C\in\mathcal{K}^{(j-1)}$ all the resources in $C\cap R_{j-1}$ except all the resources that appear in more than $\gamma$ configurations in $\mathcal{K}^{(j-1)}$. Since each deleted resource is counted at least $\gamma$ times in the sum $\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C\cap R_{j-1}|$, we have that each configuration $C$ in $\mathcal{K}^{(j-1)}$ receives at least \begin{equation*}
|C\cap R_{j-1}|-\frac{1}{\gamma}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C\cap R_{j-1}| \end{equation*} resources and no resource is taken more than $\gamma$ times by configurations in $\mathcal{K}^{(j-1)}$. Notice that now every resource is taken no more than $\gamma$ times by configurations in $\mathcal{K}^{(\geq j)}$ and no more than $\gamma$ times by configurations in $\mathcal{K}^{(j-1)}$ which in total can sum up to $2\gamma$ times.
Therefore to finish the proof consider an resource $i\in R_{j-1}$. This resource is taken $b_i$ times by configurations in $\mathcal{K}^{(\geq j)}$ and $a_i$ times by configurations in $\mathcal{K}^{(j-1)}$. If $a_i+b_i \leq \gamma$, nothing needs to be done. Otherwise, denote by $O$ the set of problematic resources (i.e. resources $i$ such that $a_i+b_i>\gamma$). For every $i\in O$, select uniformly at random $a_i+b_i-\gamma$ configurations in $\mathcal{K}^{(\geq j)}$ that currently contain resource $i$ and delete the resource from these configurations. When this happens, each configuration in $C\in \mathcal{K}^{(\geq j)}$ that contains $i$ has a probability of $(a_i+b_i-\gamma)/b_i$ to be selected to loose this resource. Hence the expected number of resources that $C$ looses with such a process is
\begin{equation*}
\mu = \sum_{i\in O\cap C} \frac{a_i+b_i-\gamma}{b_i} \end{equation*} It is not difficult to prove the following claim. However, for better clarity we defer its proof to appendix \ref{appendix_reconstruct}. \begin{claim} For any $C\in \mathcal{K}^{(\geq j)}$, \label{cla:reconstruct_mu} \begin{equation*}
\frac{1}{\gamma^2}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\leq \mu \leq \frac{2}{\gamma} \sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O| \end{equation*} \end{claim}
Assume then that $\mu \leq \frac{|C\cap R_k|}{10^{12}\log^3 (n)}$. Note that $C$ cannot loose more than $\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|$ resources in any case. Therefore, by assumption on $\mu$, and since \begin{equation*}
\mu\geq \frac{1}{\gamma^2}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\ , \end{equation*} we have that \begin{equation*}
\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\leq \frac{\gamma^2}{10^{12}\log^3 (n)} |C\cap R_k|\leq \frac{10^{11} \log^2 \log (n)}{10^{12}\log^3 (n)}|C\cap R_k|\leq \frac{1}{\log (n)}|C\cap R_k|\ . \end{equation*}
Therefore $C$ looses at most $|C\cap R_k|/\log (n)$ resources. Otherwise we have that \begin{equation*}
\mu > \frac{|C\cap R_k|}{10^{12}\log^2 (n)} \geq \frac{\ell^3}{10^{12} \log^3 (n)} \geq 200\log(n) \end{equation*} by Lemma~\ref{lma-size}. Hence noting $X$ the number of deleted resources in $C$ we have that \begin{equation*}
\mathbb P\left(X\geq \frac{3}{2}\mu \right) \leq \exp\left(-\frac{\mu}{12} \right)\leq \frac{1}{n^{10}}. \end{equation*} With high probability no configuration looses more than \begin{equation*}
\frac{3}{2}\mu \leq \frac{3}{\gamma}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\leq \frac{3}{\gamma}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}| \end{equation*} resources. Hence each configuration $C\in \mathcal{K}^{(\geq j)}$ ends with at least \begin{align*}
&\left(1-\frac{1}{\log (n)} \right)^{2(k-(j-1))-1}\ell^{k-(j-1)} |C\cap R_k|-\frac{3}{\gamma}\sum_{j\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-(j-1)}|K\cap C \cap R_h|\\
&-\frac{1}{\log (n)}\left(1-\frac{1}{\log (n)} \right)^{2(k-(j-1))-1}\ell^{k-(j-1)} |C\cap R_k| - \frac{3}{\gamma}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}|\\
&\geq \left(1-\frac{1}{\log (n)} \right)^{2(k-(j-1))}\ell^{k-(j-1)} |C\cap R_k|-\frac{3}{\gamma}\sum_{j-1\leq h\leq k} \sum_{K\in \mathcal{K}^{(h)}} \ell^{h-(j-1)}|K\cap C \cap R_h| \end{align*} resources which concludes the proof. \end{proof} \begin{corollary} \label{reconstruct_corollary}
There exists an assignment of resources $R$ to $\mathcal{K}$ such that each configuration $C\in \mathcal{K}$ receives at least $\left\lfloor |C|/(100\gamma) \right\rfloor$ resources. Moreover, this assignment can be found in polynomial time. \end{corollary} \begin{proof} Lemma \ref{lem:reconstruct} for $k=0$ and Claim \ref{cla:reconstruct} together imply that we can assign at least \begin{equation*}
\frac{|C|}{2e^2}-\frac{6000}{100.000}|C|\geq \frac{|C|}{100} \end{equation*}
resources to every $C\in \mathcal{K}$ such that no resource in $R$ is assigned more than $\gamma$ times. In particular, we can fractionally assign at least $|C| / (100\gamma)$ resources to each $C\in \mathcal{K}$ such that no resource is assigned more than once. By integrality of the bipartite matching polytope, the corollary follows. \end{proof}
\section{Further connections between hypergraph matching and Santa Claus} \label{sec:reduction santa claus} In Section~\ref{sec:hypergraph problem} we essentially prove that every regular (non-uniform) hypergraph has an $\alpha$-relaxed perfect matching for some $\alpha=O(\log \log (n))$, assuming that all hyperedges contain at least $\alpha$ resources. This means that we give a sufficient condition for a hypergraph to have a good relaxed matching. A natural optimization problem that arises from this is the following: Given any unweighted hypergraph, which is not necessarily regular nor all hyperedges necessarily contain many resources, what is the minimum $\alpha$ such that there exists an $\alpha$-relaxed perfect matching in this hypergraph?
In this section, we investigate the relationship between this problem and the Santa Claus problem with linear utility functions. Formally, the two problems considered are precisely the following.
\paragraph{Matching in general hypergraphs.} Consider a (non-uniform) hypergraph $\mathcal H=(P\cup R, \mathcal{C})$ with unit weights, that is, $w_{j,C}=1$ for all $j,C$ such that $j\in C$. The problem is to find the minimum $\alpha$ such that $\mathcal H$ has an $\alpha$-relaxed perfect matching (and output such a matching).
\paragraph{The Santa Claus with linear utility functions.} In this case, each player $i$ has an arbitrary linear utility function $f_i$. We note that there is no relationship assumed between the utility functions of different players. The goal is to assign resources to players to maximize the minimum utility among players. As mentioned in introduction, the best approximation algorithm for this problem is an $O(n^\epsilon)$-approximation running in time $O(n^{1/\epsilon})$.
We show by a straightforward reduction that a $c$-approximation for the Santa Claus problem immediately implies a $c$-approximation for the matching problem. Interestingly, there is also a close connection in the opposite direction. \begin{theorem}\label{thm:reduction} A $c$-approximation algorithm to the hypergraph matching problem in general hypergraphs yields an $O((c\log^* (n))^2)$-approximation algorithm to the Santa Claus problem. \end{theorem}
We mention that we implicitly refer to polynomial time algorithms even when not specified. All the proofs of this section are deferred to Appendix \ref{appendix:reduction}. We also mention that Theorem~\ref{thm:reduction} implies that any sub-polynomial approximation to the matching problem would be a significant improvement of the state-of-the-art for Santa Claus with arbitrary linear utility functions.
\paragraph{Remark.} Since hypergraphs considered here might be non-regular and some hyperedges might contain very few resources, our result in Section \ref{sec:hypergraph problem} does not imply any approximation for the optimization problem considered here. Our reduction in this section makes a crucial use of small hyperedges containing only one resource. This shows that handling the small hyperedges is one of the core difficulties in this case.
\begin{equation*}
\min_{i\in P} f(S_i\cap R_i)
\end{equation*} is maximized. A very natural extension of this problem is to consider $f$ as non necessarily linear. For instance, $f$ can be submodular. We show that our result also implies a $O(\log \log (|R|))$-approximation when $f$ is submodular.
\section{Conclusion}
We investigated the submodular Santa Claus in the restricted assignment case and gave a $O(\log \log (n))$-approximation for this problem. This represents a significant generalization of the results for the linear case. The submodularity of the utility function introduced new obstacles compared to the linear case. These difficulties are captured by the fact that we need to solve a new matching problem in non-uniform hypergraphs that generalizes the case of uniform hypergraphs which has been already studied in the context of the restricted Santa Claus problem with a linear utility function. Under the assumption that the hypergraph is regular and all edges are sufficiently large, we proved that there is always a $\alpha$-relaxed perfect matching for $\alpha = O(\log \log (n))$. This result generalizes the work of Bansal and Srividenko~\cite{BansalSrividenko}. It remains an intriguing question whether one can get $\alpha = O(1)$ as it is possible in the uniform case. One idea (similar to Feige's proof in the uniform case~\cite{Feige}) would be to view our proof as a sparsification theorem and to apply it several times. Given a set of hyperedges such that every player has $\ell$ hyperedges and every resource appears in no more than $\ell$ hyperedges, one would like to select $\textrm{polylog}(\ell)$ hyperedges for each player such that all resources appear in no more than $\textrm{polylog}(\ell)$ of the selected hyperedges. It is not difficult to see than our proof actually achieves this when $\ell=\textrm{polylog}(n)$. However, repeating this after the first step seems to require new ideas since our bound on the number of times each resource is taken is $\Omega \left(\frac{d+\ell}{\ell}\log(\ell) \right)$ where $\ell$ is the current sparsity and $d$ the number of configuration sizes. For the first step, we conveniently have that $d=O(\log (n))=O(\ell)$ but after the first sparsification, it may not be true.
We also provided a reduction from the Santa Claus with arbitrary linear utility functions to the hypergraph matching problem in general hypergraphs. This shows that finding the smallest $\alpha$ such that a hypergraph has an $\alpha$-relaxed perfect matching (or approximating it) is a very non-trivial problem (even within a sub-polynomial factor). Another interesting question is to improve the $O(\log^*(n))^2$ factor in the reduction to a constant.
\appendix
\section{Concentration bounds} \begin{proposition}[Chernoff bounds (see e.g.~\cite{mitzenmacher2017probability})] \label{chernoff} Let $X=\sum_i X_i$ be a sum of independent random variables such that each $X_i$ can take values in a range $[0,1]$. Define $\mu=\mathbb E(X)$. We then have the following bounds
\begin{equation*}
\mathbb P \left(X\geq (1+\delta)\mathbb E(X) \right) \leq \exp\left(-\frac{\min\{\delta,\delta^2\} \mu}{3} \right) \end{equation*} for any $\delta>0$. \begin{equation*}
\mathbb P \left(X\leq (1-\delta)\mathbb E(X) \right) \leq \exp\left(-\frac{\delta^2 \mu}{2} \right) \end{equation*} for any $0<\delta<1$. \end{proposition}
The following proposition follows immediately from Proposition \ref{chernoff} by apply it with $X'=X/a$. \begin{proposition} \label{cor:chernoff} Let $X=\sum_i X_i$ be a sum of independent random variables such that each $X_i$ can take values in a range $[0,a]$ for some $a>0$. Define $\mu=\mathbb E(X)$. We then have the following bounds
\begin{equation*}
\mathbb P \left(X\geq (1+\delta)\mathbb E(X) \right) \leq \exp\left(-\frac{\min\{\delta,\delta^2\} \mu}{3a} \right) \end{equation*} for any $\delta>0$. \begin{equation*}
\mathbb P \left(X\leq (1-\delta)\mathbb E(X) \right) \leq \exp\left(-\frac{\delta^2 \mu}{2a} \right) \end{equation*} for any $0<\delta<1$. \end{proposition}
in regular hypergraphs with sufficiently large hyperedges, we can assume that $\ell =300.000\log^{3}(n)$ at a constant loss:
If $\ell$ is smaller than $300.000\log^{3}(n)$, then we simply duplicate all hyperedges an appropriate number of times. If $\ell$ is larger, we select for each player $300.000\log^{3}(n)$ configurations uniformly at random from his configurations. The expected number of times a resource appears in a configuration with this process is at most $300.000\log^{3}(n)$. Hence, the probability that a resource appears more than $600.000\log^{3}(n)$ times is at most $\exp \left(- 1/3 \cdot 100.000\log^{3}(n)\right)\leq 1/n^{10}$ by a standard Chernoff bound (see Proposition~\ref{cor:chernoff}). Hence with high probability this event does not happen for any resource. We now have that each player has $300.000\log^{3}(n)$ configurations and each resource does not appear in more than $600.000\log^{3}(n)$ configurations. Taking for each configuration $C$ only $\lfloor |C| / 2 \rfloor$ resources we can reduce the latter bound to $300.000\log^3(n)$ as well: The previous argument gives a half-integral matching of resources to configurations satisfying the mentioned guarantee. Then by integrality of the bipartite matching polytope there is also an integral one.
\section{Omitted proofs from Section~\ref{sec:reduction to hypergraph}}\label{appendix_lp} \subsection{Solving the configuration LP} The goal of this section is to prove Theorem~\ref{thm:config-LP}. We consider the dual of the configuration LP (after adding an artificial minimization direction $\min 0^T x$). \begin{align*}
\max \sum_{i\in P} y_i &- \sum_{j\in R} z_j \\
\sum_{j\in C} z_j &\ge y_i \quad \text{ for all } i\in P, C\in\mathcal{C}(i, T) \\
y_j, z_i &\ge 0 \end{align*} Observe that the optimum of the dual is either $0$ obtained by $y_i = 0$ and $z_j = 0$ for all $i,j$ or it is unbounded: If it has any solution with $\sum_{i\in P} y_i - \sum_{j\in R} z_j > 0$, the variables can be scaled by an arbitrary common factor to obtain any objective value. If it is unbounded, this can therefore be certified by providing a feasible solution $y, z$ with \begin{equation}
\sum_{i\in P} y_i - \sum_{j\in R} z_j \ge 1 \tag{$*$}. \end{equation} We approximate the dual in the variant with constraint $(*)$ instead of a maximization direction using the ellipsoid method. The separation problem of the dual is as follows. Given $z_j$, $y_i$ find a player $i$ and set $C$ with $g(C\cap \Gamma_i) \ge T$ such that $\sum_{j\in C} z_j < y_i$.
To this end, consider the related problem of maximizing a monotone submodular function subject to knapsack constraints. In this problem we are given a monotone submodular function $g$ over a ground set $E$ and the goal is to maximize $g(E')$ over all $E'\subseteq E$ with $\sum_{j\in E'} a_j \le b$. Here $a_j \ge 0$ is a weight associated with $j\in E$ and $b$ is a capacity. For this problem Srividenko gave a polynomial time $(1 - 1/e)$-approximation algorithm~\cite{DBLP:journals/orl/Sviridenko04}. It is not hard to see that this can be used to give a constant approximation for the variation where strict inequality is required in the knapsack constraint: Assume w.l.o.g.\ that $0 < a_j < b$ for all $j$. Then run Srivideko's algorithm to find a set $E'$ with $\sum_{j\in E'} a_j \le b$. Notice that $g(E')$ is at least $(1 - 1/e)\mathrm{OPT}$, also when $\mathrm{OPT}$ is the optimal value with respect to strict inequality. If $E'$ contains only one element then equality in the knapsack constraint cannot hold and we are done. Otherwise, split $E'$ into two arbitrary non-empty parts $E''$ and $E'''$. It follows that $\sum_{j\in E''} a_j < b$ and $\sum_{j\in E'''} a_j < b$. Moreover, either $g(E'') \ge g(E') / 2$ or $g(E'') \ge g(E') / 2$. Hence, this method yields a $c$-approximation for $c = (1 - 1/e)/2$. We now demonstrate how to use this to find a $c$-approximation to the configuration LP.
Let $\mathrm{OPT}$ be the optimum of the configuration LP. It suffices to solve the problem of finding for a given $T$ either a solution of value $c T$ or deciding that $T > \mathrm{OPT}$. This can then be embedded into a standard dual approximation framework. We run the ellipsoid method on the dual of the configuration LP with objective value $c T$ and constraint $(*)$. This means we have to solve the separation problem. Let $z, y$ be the variables at some state. We first check whether $(*)$ is satisfied, that is $\sum_{i\in P} y_i - \sum_{j\in R} z_j \ge 1$. If not, we return this inequality as a separating hyperplane. Hence, assume $(*)$ is satisfied and our goal is to find a violated constraint of the form $\sum_{j\in C} z_j < y_i$ for some $i\in P$ and $C\in \mathcal{C}(i, T)$. For each player $i$ we maximize $f$ over all $S\subseteq \Gamma_i$ with $\sum_{j\in S} z_j < y_i$. We use the variant of Srividenko's algorithm described above to obtain a $c$-approximation for each player. If for one player $i$ the resulting set $S$ satisfies $f(S) \ge c T$, then we have found a separating hyperplane to provide to the ellipsoid method. Otherwise, we know that $f(S) < T$ for all players $i$ and $S\subseteq\Gamma_i$ with $\sum_{j\in S} z_j < y_i$. In other words, for all players $i$ and all $C\in\mathcal{C}(i,T)$ it holds that $\sum_{j\in C} z_j \ge y_i$, i.e., $z, y$ is feasible for objective value $T$ and hence $\mathrm{OPT} > T$. If the ellipsoid method terminates without concluding that $\mathrm{OPT} > T$, we can derive a feasible primal solution with objective value $cT$: The configurations constructed for separating hyperplanes suffice to prove that the dual is bounded. These configurations can only be polynomially many by the polynomial running time of the ellipsoid method. Hence, when restricting the primal to these configurations it must remain feasible. To obtain the primal solution we now only need to solve a polynomial size linear program. This concludes the proof of Theorem~\ref{thm:config-LP}.
\subsection{Clusters} This section is devoted to prove Lemma~\ref{lem:config-sample}. The arguments are similar to those used in~\cite{BansalSrividenko}.
\begin{lemma} \label{lem:clusters} Let $x^*$ be a solution to the configuration LP of value $T^*$. Then $x^*$ can be transformed into some $x'_{i,C} \ge 0$ for $i\in P$, $C\in\mathcal{C}_t(i, T^*)$ which satisfies the following. There is a partition of the players into clusters $K_1\cup\cdots \cup K_k \cup Q = P$ that satisfy the following. \begin{enumerate}
\item any thin resource $j$ is fractionally assigned at most once, that is,
\begin{equation*}
\sum_{i\in P} \sum_{C\in \mathcal{C}_t(i,T^*):j\in C}x'_{i, C} \le 1
\end{equation*} We say that the \textit{congestion} on item $j$ is at most 1.
\item every cluster $K_j$ gets at least $1/2$ thin configurations in $x'$, that is, \begin{equation*}
\sum_{i\in K_j} \sum_{C\in \mathcal{C}_t(i,T^*)} x'_{i, C} \ge 1/2 ; \end{equation*}
\item given any $i_1\in K_1, i_2\in K_2,\dotsc,i_k\in K_k$
there is a matching of fat resources to players
$P\setminus\{i_1,\dotsc,i_k\}$ such that each of these players $i$ gets a unique fat resource $j\in\Gamma_i$. \end{enumerate} \end{lemma} The role of the set of players $Q$ in the lemma above is that each of them gets one fat resource for certain. \begin{proof} We first transform the solution $x^*$ as follows. For every configuration $C$ (for player $i$) that contains at least one fat resource and such that $x^*_{i,C}>0$, we select arbitrarily one of these fat resources $j$ and we set $x^*_{i,\{j\}}=x^*_{i,C}$ and then we set $x^*_{i,C}=0$. It is clear that this does not increase the congestion on resources and now every configuration that has non-zero value is either a thin configuration or a singleton containing one fat resource. Therefore we can consider the bipartite graph $G$ formed between the players and the fat resources where there is an edge between player $i$ and fat resource $j$ if the corresponding configuration $C=\{j\}$ is of non zero value (i.e. $x^*_{i,C}>0$). The value of such an edge will be exactly the value $x^*_{i,C}$. We now make G acyclic by doing the following operation until there exists no cycle anymore. Pick any cycle (which must have even length since the graph is bipartite) and increase the coordinate of $x^*$ corresponding to every other edge in the cycle by a small constant. Decrease the value corresponding to the remaining edges of the cycle by the same constant. This ensures that fat resources are still (fractionally) taken at most once and that the players still have one unit of configurations fractionally assigned to them. We continue this until one of the edge value becomes 0 or 1. If an edge becomes 0, delete that edge and if it becomes 1, assign the corresponding resource to the corresponding player forever. Then delete the player and the resource from the graph and add the player to the cluster $Q$. By construction, every added player to $Q$ is assigned a unique fat resource. Notice that when we stop, each remaining player still has at least 1 unit of configurations assigned to him and every fat resource is still (fractionally) taken at most once. Hence we get a new assignment vector where the assignments of fat resources to players form a forest. We also note that the congestion on thin resources did not increase during this process (it actually only decreased either when we replace fat configurations by a singleton and when players are put into the set $Q$ and deleted from the instance). We show below how to get the clusters for any tree in the forest. \begin{enumerate} \item If the tree consists of a single player, then it trivially forms its own cluster. By feasibility of the original solution $x^*$, condition 2 of the lemma holds.
\item If there is a fat resource that has degree 1, assign it to its player, add the player to $Q$ and delete both the player and resource. Continue this until every resource has a degree of at least 2. This step adds players to cluster $Q$. By construction, every added player is assigned a unique fat resource.
\item While there is a resource of degree at least 3, we perform the following operation. Root the tree containing such a resource at an arbitrary player. Consider a resource $j$ of degree at least 3 such that the subtree rooted at this resource contains only resources of degree 2. Because this resource must have at least 2 children in the tree $i_1,i_2,\ldots$ (which are players) and because \begin{equation*}
\sum_{i\in P} \sum_{C : j\in C} x^*_{i,C}\leq 1, \end{equation*} it must be that one of the children (say $i_1$) satisfies $x^*_{i_1,\{j\}}\leq 1/2$. We then delete the edge $(j,i_1)$ in the tree and set $x^*_{i_1,\{j\}}$ to 0.
\item Every resource now has degree exactly 2. We form a cluster for each tree in the forest. The cluster will contain the players and fat resources in the tree. We note that in every tree, only the player at the root lost at most $1/2$ unit of a fat resource by the previous step in the construction. By the degree property of resources and because the graph contains no cycle, it must be that in each cluster $K$ we have $|R(K)|=|P(K)|-1$ where $|R(K)|$ is the number of resources in the cluster and $|P(K)|$ the number of players. Because each resource is assigned at most once, and because only one player in the cluster lost at most $1/2$ unit of a fat resource, it must be that the cumulative amount of thin configurations assigned to players in $K$ is at least \begin{equation*}
|P(K)|-|R(K)|-1/2=1/2. \end{equation*} This gives the second property of the lemma. For the third property, notice that for any choice of player $i\in K$, we can root the tree corresponding to the cluster $K$ at the player $i$ and assign all the fat resources in $K$ to their only child in the tree (they all have degree 2). This gives the third property of the lemma.
As each of these steps individually maintained maintained a congestion of at most $1$ on every thin resource, we indeed get a new solution $x'$ and the associated clusters with the required properties. \end{enumerate} \end{proof}
Lemma~\ref{lem:clusters} implies that for each cluster we need to cover only one player with a thin configuration. Then the remaining players can be covered with fat resources. We will now replace $x'$ by a solution $x''$ which takes slightly worse configurations $\mathcal{C}_t(i, T^*/5)$, but satisfies (2) in Lemma~\ref{lem:clusters} with $2$ instead of $1/2$. This can be achieved by splitting each configuration $C\in \mathcal{C}_t(i, T^*)$ in $4$ disjoint parts $C_1, C_2, C_3, C_4\in \mathcal{C}_t(i, T^* / 5)$. Let $C_1 \subseteq C$ with $f(C_1) \ge T^* / 5$ minimal in the sense that $f(C_1 \setminus \{j\}) < T^* / 5$ for all $j\in C_1$. Let $j_1 \in C_1$. By submodularity and because $j_1$ is thin it holds that \begin{equation*}
f(C \setminus C_1) \ge f(C) - f(C_1 \setminus \{j_1\}) - f(\{j_1\}) \ge 4T^* / 5 - T^*/100 . \end{equation*} Hence, in the same way we can select $C_2\subseteq C\setminus C_1$, $C_3\subseteq C\setminus (C_1 \cup C_2)$ and $C_4\subseteq C\setminus (C_1 \cup C_2 \cup C_3)$. We now augment $x'$ to $x''$ by initializing $x''$ with $0$ and then for each $i$ and $C\in\mathcal{C}(i,T^*)$ increasing $x''_{i, C_1}$, $x''_{i, C_2}$, $x''_{i, C_3}$, and $x''_{i, C_4}$ by $x'_{i, C}$. Here $C_1, C_2, C_3, C_4 \in \mathcal{C}(i, T^* / 5)$ are the configurations derived from $C$ by splitting it as described above.
Finally, we sample for each cluster some $\ell \geq 12\log(n)$ many configurations with the distribution of $x''$ to obtain the statement of Lemma~\ref{lem:config-sample} which we restate for convenience. \begin{customthm}{\ref{lem:config-sample}} (restated)
Let $\ell \ge 12\log (n)$.
Given a solution of value $T^*$ for the configuration LP
in randomized polynomial time we can find a partition of the players into clusters $K_1\cup\cdots \cup K_k\cup Q = P$ and multisets of configurations
$\mathcal{C}_h \subseteq \bigcup_{i\in K_h} \mathcal{C}_T(i, T^*/5)$, $h=1,\dotsc,k$, such that
\begin{enumerate}
\item $|\mathcal{C}_h| = \ell$ for all $h=1,\dotsc,k$ and
\item Each small resource appears in at most $\ell$ configurations of $\bigcup_h \mathcal{C}_h$.
\item given any $i_1\in K_1, i_2\in K_2,\dotsc,i_k\in K_k$
there is a matching of fat resources to players
$P\setminus\{i_1,\dotsc,i_k\}$ such that each of these players $i$ gets a unique fat resource $j\in\Gamma_i$.
\end{enumerate} \end{customthm} \begin{proof}
We start with the clusters obtained with Lemma~\ref{lem:clusters} and the solution $x''$ described above. Recall that
\begin{equation*}
\sum_{i\in K_h} \sum_{C\in C_t(i,T^*/5)} x''_{i,C} \geq 2
\end{equation*}
for each cluster $K_h$. We assume w.l.o.g.\ that equality holds
by reducing some variables $x''_{i, C}$. Clearly then each resource
is still contained in at most one configuration in total.
For each cluster $K_h$, we sample a configuration that contains a player in this cluster according to the probability distribution given by the values $\{x''_{i,C}/2 \}_{i\in K_h, C\in \mathcal{C}_t (i,T^*/5)}$.
By the assumption of equality stated above
this indeed defines a probability distribution. We repeat this process $\ell$ times. We first note that for one iteration, each resource
is in expectation contained in
\begin{equation*}
\sum_{i\in P} \sum_{C\in \mathcal{C}(i,T^*/5):j\in C} x''_{i,C}/2 \leq 1/2
\end{equation*} selected configurations. Hence in expectation all the resource are contained in $\ell/2$ selected configurations after $\ell$ iterations. By a standard Chernoff bound (see Proposition \ref{chernoff}), we have that with probability at most
\begin{equation*}
\exp \left( -\ell/6\right)\leq 1/n^2
\end{equation*} a resource is contained in more than $\ell$ configurations. By a union bound, it holds that all resources are contained in at most $\ell$ selected configurations with high probability. \end{proof}
\section{Omitted proofs from Section~\ref{sec:sequence}}\label{appendix_sequence} \begin{customthm}{\ref{lma-size}}(restated) Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. For any $k\geq 0$ and any $C\in\mathcal{C}^{(\geq k)}$ we have
\begin{equation*}
\frac{1}{2} \ell^{-k}|C| \le |R_k \cap C| \le \frac{3}{2} \ell^{-k}|C|
\end{equation*} with probability at least $1-1/n^{10}$. \end{customthm} \begin{proof}
The lemma trivially holds for $k=0$.
For $k>0$, by assumption $C\in\mathcal{C}^{(\geq k)}$ hence $|C|\geq \ell^{k+3}$. Since each resource of $R=R_0$ survives in $R_k$ with probability $\ell^{-k}$ we clearly have that in expectation
\begin{equation*}
\mathbb E(|R_k\cap C|) = \ell^{-k}|C|
\end{equation*}
Hence the random variable $X=|R_k\cap C|$ is a sum of independent variables of value either $0$ or $1$ and such that $\mathbb E (X)\geq \ell^3$. By a standard Chernoff bound (see Proposition \ref{cor:chernoff}), we get
\begin{equation*}
\mathbb P\left(X\notin \left[\frac{\mathbb{E}(X)}{2}, \frac{3\mathbb{E}(X)}{2}\right]\right) \leq 2 \exp \left(-\frac{\mathbb E(X)}{12} \right) \leq 2 \exp \left(-\frac{300.000\log^3 (n)}{12} \right) \leq \frac{1}{n^{10}}
\end{equation*}
since by assumption $\ell \geq 300.000\log^3 (n)$. \end{proof}
\begin{customthm}{\ref{lma-overlap-representative}}(restated) Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. For any $k\geq 0$ and any $C\in\mathcal{C}^{(\geq k)}$ we have
\begin{equation*}
\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C\cap R_k| \leq \frac{10}{\ell^{k}} \left(|C|+\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C| \right)
\end{equation*} with probability at least $1-1/n^{10}$. \end{customthm}
\begin{proof} The expected value of the random variable $X=\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C\cap R_k|$ is \begin{equation*}
\mathbb E(X) = \frac{1}{\ell^k} \sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C|. \end{equation*} Since each resource is in at most $\ell$ configurations, $X$ is a sum of independent random variables that take value in a range $[0,\ell]$. Then by a standard Chernoff bound (see Proposition \ref{cor:chernoff}), we get
\begin{equation*}
\mathbb P\left(X\ge 10 \left(\frac{|C|}{\ell^k} + \mathbb E(X)\right) \right) \leq \exp\left(-\frac{3|C|}{\ell^{k+1}}\right) \leq \frac{1}{n^{10}} , \end{equation*}
since by assumption, $|C|\geq \ell^{k+3}$ and $\ell \geq 300.000\log ^3(n)$.
\begin{comment} \begin{align*}
\mathbb P\left(X\ge 10\cdot \mathbb E(X)\right) &\leq \exp\left(-\frac{3\mathbb E(X)}{\log (n)}\right) \\
&\leq \exp\left(-\frac{3\ell^{-k}|C|}{\log^6 (n)}\right) \\
&\leq \exp\left(-\frac{3\ell}{\log^6 (n)}\right) \\
&= o\left(\frac{1}{n^{10}}\right) \end{align*}
Otherwise if $1\leq \sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C|\leq\frac{|C|}{\log^5 (n)}$, then by choosing $\delta = \frac{10|C|}{l^k \mathbb E(X)}$ we first note that $\delta \geq 10\log^5 (n)>1$ since $\mathbb E(X) = \frac{1}{\ell^k}\cdot \sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C| \leq \frac{|C|}{\ell^k\log^5 (n)}$. By plugging in a Chernoff bound (Proposition \ref{cor:chernoff}) we have that \begin{align*}
\mathbb P\left(X\ge (1+\delta)\mathbb E(X)\right) &\leq \exp\left(-\frac{\delta\mathbb E(X)}{3}\right) \\
&\leq \exp\left(-\frac{10|C|}{l^k}\right)\\
&\leq \exp\left(-10\ell\right)\\
&=o\left(\frac{1}{n^{10}} \right) \end{align*} Hence we have
\begin{equation*}
\mathbb P\left(X\ge \mathbb E(X)+\frac{10|C|}{\ell^k}\right)=o\left(\frac{1}{n^{10}} \right) \end{equation*} The last case when $\mathbb E(X)=0$ is trivial since in this case there is no randomness and we always get \begin{equation*}
\sum_{C'\in \mathcal{C}^{(k)}} |C'\cap C\cap R_k|=0 \end{equation*} \end{comment}
\end{proof}
We finish by the proof of the last property. As mentioned in the main body of the paper, this statement is a generalization of some ideas that already appeared in \cite{BansalSrividenko}. However, in \cite{BansalSrividenko}, the situation is simpler since they need to sample down the resource set only once (i.e. there are only two sets $R_1\subseteq R$ and not a full hierarchy of resource sets $R_d\subseteq R_{d-1}\subseteq \cdots \subseteq R_1 \subseteq R$). Given the resource set $R_1$, they want to select configurations and give to each selected configuration $K$ all of its resource set $|K\cap R_1|$ so that no resource is assigned too many times. In our case the situation is also more complex than that since at every step the selected configurations receive only a fraction of their current resource set. Nevertheless, we extend the ideas of Bansal and Srividenko to our more general setting. We recall the main statement before proceeding to its proof.
\begin{customthm}{\ref{lma-good-solution}}(restated) Consider Random Experiment~\ref{exp:sequence} with $\ell\geq 300.000\log^{3} (n)$. Fix $k\geq 0$. Conditioned on the event that the bounds in Lemma~\ref{lma-size} hold for $k$, then with probability at least $1 - 1/n^{10}$ the following holds for all $\mathcal{F}\subseteq \mathcal{C}^{(\geq k+1)}$, $\alpha:\mathcal{F} \rightarrow \mathbb N$, and $\gamma \in\mathbb N$ such that $\ell^3/1000\leq \alpha(C) \leq n $ for all $C\in\mathcal{F}$ and $\gamma \in \{1,\dotsc,\ell\}$: If there is a $(\alpha,\gamma)$-good assignment of $R_{k+1}$ to $\mathcal{F}$, then there is a $(\alpha',\gamma)$-good assignment of $R_k$ to $\mathcal{F}$ where \begin{equation}\label{property:3}
\alpha'(C) \ge \ell \left(1-\frac{1}{\log (n)} \right) \alpha(C) \end{equation} for all $C\in\mathcal{F}$. Moreover, this assignment can be found in polynomial time. \end{customthm}
\begin{comment} \begin{lemma}(Property 3)
For any $k\geq 0$, any family of configurations $\mathcal{F}\subseteq \mathcal{C}^{(\geq k+1)}$, any integers $\alpha_1,\alpha_2,\ldots ,\alpha_{|\mathcal{F}|}$ and $\gamma$ such that $n\geq \alpha_j\geq \ell^2/1000$ for all $j$ and $\gamma=O(\log (n))$, if there is a $(\alpha_1,\ldots ,\alpha_{|\mathcal{F}|},\gamma)$-good assignment of $R_{k+1}$ to $\mathcal{F}$, then there is a $(\alpha'_1,\ldots ,\alpha'_{|\mathcal{F}|},\gamma)$-good assignment of $R_k$ to $\mathcal{F}$ with $\alpha'_j=\ell \left(1-\frac{1}{\log (n)} \right) \alpha_j$. Moreover, this assignment can be found in polynomial time. \end{lemma} \end{comment} We first provide the definitions of a flow network that allows us to state a clean condition whether a good assignment of resources exists or not. We then provide the high probability statements that imply the lemma.
For any subset of configurations $\mathcal F \subseteq \mathcal{C}^{(\geq k+1)}$, resource set $R_k$, $\alpha:\mathcal{F} \rightarrow \mathbb N$, and any integer $\gamma$, consider the following directed network (denoted by $\mathcal N (\mathcal F, R_k, \alpha,\gamma)$). Create a vertex for each configuration in $\mathcal F$ as well as a vertex for each resource. Add a source $s$ and sink $t$. Then add a directed arc from $s$ to the vertex $C\in\mathcal F$ with capacity $\alpha(C)$. For every pair of a configuration $C$ and a resource $i$ such that $i\in C$ add a directed arc from $C$ to $i$ with capacity $1$. Finally, add a directed arc from every resource to the sink of capacity $\gamma$. See Figure \ref{fig:network_flow} for an illustration.
\begin{figure}
\caption{The directed network and an $s$-$t$ cut}
\label{fig:network_flow}
\end{figure}
We denote by \begin{equation*}
\textrm{maxflow}\left(\mathcal N (\mathcal F, R_k, \alpha,\gamma)\right) \end{equation*} the value of the maximum $s$-$t$ flow in $\mathcal N (\mathcal F, R_k, \alpha,\gamma)$.
Before delving into the technical lemmas, we provide a brief road map for the proof. First, we argue that for any subset of configurations, in the two networks induced on this subset and the consecutive resource sets (which are $R_k$ and $R_{k+1}$), the value of the maximum flow differs by approximately a factor $\ell$ (this is Lemma \ref{lem:flow_conservation} stated below). Then by a union bound over all possible subsets of configurations, we say that the above argument consecutively holds with good probability. This helps us conclude that a good assignment of the resource set $R_{k+1}$ implies that there is a good assignment of the resource set $R_k$. Notice that if one does not have the above argument with respect to all subsets of configurations at once, it is not necessary that a good assignment of resources must exist. In particular, we need Lemma \ref{lem:flow_black_box} to show that if on \textit{all} subsets of configurations the maximum flow is multiplied by \textit{approximately} $\ell$ when we expand the resource set from $R_{k+1}$ to $R_k$, then an $(\alpha,\gamma)$-good assignment of $R_{k+1}$ implies an $(\alpha',\gamma)$-good assignment of $R_k$, where $\alpha'$ is almost equal to $\ell \alpha$.
\input{black-box-lemma}
\begin{lemma} \label{lem:flow_conservation} Let $\mathcal F\subseteq \mathcal C^{\geq (k+1)}$, $\alpha:\mathcal{F} \rightarrow \mathbb N$ such that $\ell^3/1000 \leq \alpha(C) \leq n$ for all $C\in\mathcal{F}$, and $1 \le \gamma \le \ell$. Denote by $\mathcal N$ the network $\mathcal N (\mathcal F, R_k, \ell \cdot \alpha,\gamma)$ and by $\Tilde{\mathcal{N}}$ the network $\mathcal N (\mathcal F, R_{k+1},\alpha,\gamma)$. Then
\begin{equation*}
\mathrm{maxflow}\left(\mathcal{N}\right)\geq \frac{\ell}{1+0.5/\log (n)} \mathrm{maxflow}\left(\Tilde{\mathcal{N}}\right)
\end{equation*}
with probability at least $1-1/(n\ell)^{20|\mathcal F|}$. \end{lemma} \begin{proof} We use the max-flow min-cut theorem that asserts that the value of the maximum flow in a network is equal to the value of the minimum $s$-$t$ cut in the network. Consider a minimum cut $S$ of network $\mathcal N$ with $s\in S$ and $t\notin S$. Denote by $c(S)$ the value of the cut. We will argue that with high probability this cut induces a cut of value at most $c(S) / \ell \cdot (1+0.5/\log(n))$ in the network $\Tilde{\mathcal N}$. This directly implies the lemma.
Denote by $\mathcal C'$ the set of configurations of $\mathcal{F}$ that are in $S$, i.e., on the source side of the cut, and $\mathcal C''=\mathcal{F}\setminus \mathcal C'$. Similarly consider $R'$ the set of resources in the $s$ side of the cut and $R''= R_k\setminus R'$. With a similar notation, we denote $\Tilde R' = R'\cap R_{k+1}$ the set of resources of $R'$ surviving in $R_{k+1}$; and $\Tilde R'' = R''\cap R_{k+1}$. Finally, denote by $\Tilde S$ the cut in $\Tilde{\mathcal N}$ obtained by removing resources of $R'$ that do not survive in $R_{k+1}$ from $S$, i.e., $\Tilde S = \{s\}\cup \mathcal C' \cup R'$. The value of the cut $S$ of $\mathcal N$ is \begin{equation*}
c(S) = \sum_{C\in \mathcal C''} \ell \cdot \alpha(C) + e(\mathcal C',R'')+ \gamma |R'| \end{equation*} where $e(X,Y)$ denotes the number of edges from $X$ to $Y$. The value of the cut $\Tilde S$ in $\Tilde{\mathcal N}$ is \begin{equation*}
c( \Tilde S) = \sum_{C\in \mathcal C''} \alpha(C) + e(\mathcal C',\Tilde R'')+ \gamma |\Tilde R'| \end{equation*} We claim the following properties. \begin{claim} \label{cla:size_configurations} For every $C\in \mathcal F$, the outdegree of the vertex corresponding to $C$ in $\mathcal N$ is at least $\ell^4/2$. \end{claim}
Since $C\in \mathcal{C}^{(\geq k+1)}$ and by Lemma \ref{lma-size}, we clearly have that $|C\cap R_k|\geq \ell^4/2$. \begin{claim} \label{cla:size_cut} It holds that \begin{equation*}
c(S)\geq \frac{|\mathcal{F}| \ell^3}{1000} . \end{equation*} \end{claim} We have by assumption on $\alpha(C)$ \begin{multline*}
c(S) = \sum_{C\in \mathcal C''} \ell \cdot \alpha(C) + e(\mathcal C',R'')+ \gamma |R'|
\geq \sum_{C\in \mathcal C''} \frac{\ell^3}{1000} + e(\mathcal C',R'')+ \gamma |R'|\\
\geq \frac{|\mathcal C''|\ell^3}{1000} + e(\mathcal C',R'')+ \gamma |R'| \end{multline*}
Now consider the case where $e(\mathcal C',R'')\leq |\mathcal C'|\ell^3 / 1000$. Since each vertex in $\mathcal C'$ has outdegree at least $\ell^4/2$ in the network $\mathcal N$ (by Claim~\ref{cla:size_configurations}) it must be that $e(\mathcal C',R')\geq |\mathcal C'|\ell^4 / 2 - |\mathcal C'|\ell^3 / 1000 > |\mathcal C'|\ell^4 / 3$. Using that each vertex in $R'$ has indegree at most $\ell$ (each resource is in at most $\ell$ configurations), this implies
$|R'|\geq |\mathcal C'|\ell^3 / 3$. Since $\gamma \geq 1$ we have in all cases that $e(\mathcal C',R'')+ \gamma |R'|\geq |\mathcal C'|\ell^3 / 1000$. Hence \begin{equation*}
c(S) \geq \frac{|\mathcal C''|\ell^3}{1000} + \frac{|\mathcal C'|\ell^3}{1000} = \frac{|\mathcal{F}| \ell^3}{1000} . \end{equation*}
This proves Claim~\ref{cla:size_cut}. We can now finish the proof of the lemma. Denote by $X$ the value of the random variable $e(\mathcal C',\Tilde{R''})+ \gamma |\Tilde{R'}|$. We have that \begin{equation*}
\mathbb E[X] = \frac{1}{\ell}(e(\mathcal C',R'')+ \gamma |R'|). \end{equation*} Moreover, $X$ can be written as a sum of independent variables in the range $[0, \ell]$ since each vertex is in at most $\ell$ configurations and $\gamma \le \ell$ by assumption. By a Chernoff bound (see Proposition \ref{cor:chernoff}) with \begin{equation*}
\delta = \frac{0.5 c(S)}{\log(n) \cdot (c(S)-\sum_{C\in \mathcal C''} \alpha(C))} \geq \frac{0.5}{\log(n)} \end{equation*} we have that \begin{multline*}
\mathbb P\left(X\geq \mathbb E(X)+\frac{0.5 c(S)}{\ell\log(n)}\right)
\leq \exp\left(-\frac{\min\{\delta,\delta^2\}\mathbb E(X)}{3\ell} \right) \\
\leq \exp\left(-\frac{c(S)}{12\ell^2\log^2 (n)} \right)
\leq \exp\left(-\frac{|\mathcal F|\ell^3}{12.000\ell^2\log^2 (n)} \right)
\leq \frac{1}{(n\ell)^{20|\mathcal{F}|}} , \end{multline*}
where the third inequality comes from Claim~\ref{cla:size_cut} and the last one from the assumption that $\ell\geq 300.000\log^{3}(n)$. Hence with probability at least $1-1/(n\ell)^{20|\mathcal{F}|}$, we have that \begin{equation*}
c( \Tilde S) = \sum_{C\in \mathcal C''} \alpha(C) + e(\mathcal C',\Tilde R'')+ \gamma |\Tilde R'| \leq \frac{1}{\ell}c(S)+\frac{0.5}{\ell \log (n)}c(S) .\qedhere \end{equation*} \end{proof}
We are now ready to prove Lemma~\ref{lma-good-solution}. Note that Lemma \ref{lem:flow_conservation} holds with probability at least $1-1/(n\ell)^{20|\mathcal{F}|}$. Given the resource set $R_k$ and a cardinality $s = |\mathcal{F}|$ there are $O((n\ell)^{2s})$ ways of defining a network satisfying the conditions from Lemma~\ref{lem:flow_conservation} ($(m\ell)^s\le (n\ell)^s$ choices of $\mathcal{F}$, $n^{s}$ choices for $\alpha$ and $\ell$ choices for $\gamma$). By a union bound, we can assume that the properties of Lemma~\ref{lem:flow_conservation} hold for every possible network with probability at least $1 - 1/n^{10}$. Assume now there is a $(\alpha,\gamma)$-good assignment of $R_{k+1}$ to some family $\mathcal{F}$. Then by Lemma~\ref{lem:flow_black_box} the $\mathrm{maxflow}(\mathcal{N}(\mathcal{F}',R_{k+1}, \alpha,\gamma))$ is exactly $\sum_{C\in \mathcal{F}'}\alpha(C)$ for any $\mathcal{F}'\subseteq \mathcal{F}$. By Lemma~\ref{lem:flow_conservation}, this implies that $\mathrm{maxflow}(\mathcal{N}(\mathcal{F}',R_{k}, \ell \cdot \alpha,\gamma))$ is at least $\ell/(1+0.5/\log(n)) \sum_{C\in \mathcal{F}'}\alpha(C)$. By Lemma \ref{lem:flow_black_box}, this implies a $(\alpha',\gamma)$-good assignment from $R_k$ to $\mathcal{F}$, where \begin{equation*}
\alpha'(C) = \lfloor\ell/(1+0.5/\log(n))\rfloor \alpha(C) \ge \ell / (1 + 1/\log(n)) \alpha(C) \geq \ell(1 - 1/\log(n)) \alpha(C). \end{equation*}
\section{Omitted proofs from Section~\ref{sec:reconstruction}}\label{appendix_reconstruct} \begin{customcla}{\ref{cla:reconstruct}}(restated) For any $k\geq 0$, any $0\leq j\leq k$, and any $C\in \mathcal{K}^{(k)}$ \begin{equation*}
\sum_{j\leq h\leq k}\sum_{K\in \mathcal{K}^{(h)}} \ell^{h}|K\cap C \cap R_h| \leq 2000\frac{d+\ell}{\ell}\log (\ell) |C|. \end{equation*} \end{customcla}
\begin{proof}[Proof of Claim \ref{cla:reconstruct}] By Lemma~\ref{lma:main-LLL} we have that \begin{equation*}
\sum_{j\leq h\leq k}\sum_{K\in \mathcal{K}^{(h)}} \ell^{h}|K\cap C \cap R_h| \leq \frac{1}{\ell} \sum_{j\leq h\leq k}\sum_{C'\in \mathcal{C}^{(h)}} \ell^{h}|C'\cap C \cap R_h| + 1000\frac{d+\ell}{\ell}\log (\ell) |C|. \end{equation*} Furthermore, by Lemma \ref{lma-overlap-representative}, we get \begin{equation*}
\sum_{C'\in \mathcal{C}^{(h)}} \ell^{h}|C'\cap C \cap R_h| \leq \ell^{h}\frac{10}{\ell^h}\left(|C|+\sum_{C'\in \mathcal{C}^{(h)}} |C'\cap C| \right). \end{equation*} Finally note that each resource appears in at most $\ell$ configurations, hence \begin{equation*}
\sum_{j\leq h\leq k}\sum_{C'\in \mathcal{C}^{(h)}} |C'\cap C| \leq \ell |C|. \end{equation*} Putting everything together we conclude \begin{align*}
\sum_{j\leq h\leq k}\sum_{K\in \mathcal{K}^{(h)}} \ell^{h}|K\cap C \cap R_h| &\leq \frac{1}{\ell} \sum_{j\leq h\leq k}\sum_{C'\in \mathcal{C}^{(h)}} \ell^{h}|C'\cap C \cap R_h| + 1000\frac{d+\ell}{\ell}\log (\ell) |C| \\
&\leq \frac{1}{\ell} \sum_{j\leq h\leq k}10\left( |C|+\sum_{C'\in \mathcal{C}^{(h)}}|C'\cap C|\right) + 1000\frac{d+\ell}{\ell}\log (\ell) |C|\\
&\leq \frac{k-j}{\ell}10|C|+10|C|+1000\frac{d+\ell}{\ell}\log (\ell) |C|\\
&\leq 20|C|+1000\frac{d+\ell}{\ell}\log (\ell) |C|\\
&\leq 2000\frac{d+\ell}{\ell}\log (\ell) |C|.\qedhere \end{align*} \end{proof}
\begin{customcla}{\ref{cla:reconstruct_mu}}(restated) For any $C\in \mathcal{K}^{(\geq j)}$, \begin{equation*}
\frac{1}{\gamma^2}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\leq \mu \leq \frac{2}{\gamma} \sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|. \end{equation*} \end{customcla} \begin{proof}[Proof of Claim \ref{cla:reconstruct_mu}] Note that we can write \begin{equation*}
\mu = \sum_{i\in O\cap C} \frac{a_i+b_i-\gamma}{b_i} \leq \max_{i\in O\cap C}\left\lbrace \frac{a_i+b_i-\gamma}{a_ib_i} \right\rbrace \sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|. \end{equation*} The reason for this is that each resource $i$ accounts for an expected loss of $(a_i+b_i-\gamma)/b_i$ while it is counted $a_i$ times in the sum \begin{equation*}
\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|. \end{equation*} Similarly, \begin{equation*}
\mu = \sum_{i\in O\cap C} \frac{a_i+b_i-\gamma}{b_i} \geq \min_{i\in O\cap C}\left\lbrace \frac{a_i+b_i-\gamma}{a_ib_i} \right\rbrace \sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|. \end{equation*} Note that by assumption we have that $a_i+b_i>\gamma$. This implies that either $a_i$ or $b_i$ is greater than $\gamma/2$. Assume w.l.o.g. that $a_i\geq \gamma/2$. Since by assumption $a_i\leq \gamma$ we have that \begin{equation*}
\frac{a_i+b_i-\gamma}{a_ib_i}\leq \frac{b_i}{a_ib_i} =\frac{1}{a_i} \leq \frac{2}{\gamma}. \end{equation*} In the same manner, since $a_i+b_i>\gamma$ and that $a_i,b_i\leq \gamma$, we can write \begin{equation*}
\frac{a_i+b_i-\gamma}{a_ib_i}\geq \frac{1}{a_ib_i} \geq \frac{1}{\gamma^2}. \end{equation*} We therefore get the following bounds \begin{equation*}
\frac{1}{\gamma^2}\sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|\leq \mu \leq \frac{2}{\gamma} \sum_{K\in \mathcal{K}^{(j-1)}}|K\cap C \cap R_{j-1}\cap O|, \end{equation*} which is what we wanted to prove. \end{proof}
\section{Omitted proofs from Section~\ref{sec:reduction santa claus}}\label{appendix:reduction} \subsection{From matchings to Santa Claus} The idea in this reduction is to replace each player by a set of players, one for each of the $t$ configuration containing him. These players will share together $t-1$ large new resources, but to satisfy all, one of them has to get other resources, which are the original resources in the corresponding configuration. \begin{description} \item[Players.] For every vertex $v \in P$, and every hyperedge $C \in \mathcal{C}$ that $v$ belongs to, we create a player $p_{v,C}$ in the Santa Claus instance.
\item[Resources.] For every vertex $u \in R$, create a resource $r_{u}$ in the Santa Claus instance. For any vertex $v \in P$ such that it belongs to $t$ edges in $\mathcal{C}$, create $t-1$ resources $r_{v,1}, r_{v,2}, \ldots, r_{v,t-1} $.
\item[Values.]
For any resource $r_{u}$ for some $u \in R$ and any player $p_{v,C}$ for some $C \in \mathcal{C}$, the resource has a value $\frac{1}{|C|-1}$ if $u \in C$, otherwise it has value $0$. Any resource $r_{v,i}$ for some $v \in P$ and $i \in \mathbb N$, has value $1$ for any player $p_{v,C}$ for some $C \in \mathcal{C}$ and $0$ to all other players. \end{description} It is easy to see that given an $\alpha$-relaxed matching in the original instance, one can construct an $\alpha$-approximate solution for the Santa Claus instance.
For the other direction, notice that for each $v \in P$, there exists a player $p_{v,C}$ for some $C \in \mathcal{C}$, such that it gets resources only of the type $r_{u}$. One can simply assign the resource $u \in R$ to the player $v$ for any resource $r_{u}$ assigned to $p_{v,C}$.
\subsection{From Santa Claus to matchings} This subsection is devoted to the proof of Theorem \ref{thm:reduction}.
\begin{proof} We write $(\log)^k(n) = \underbrace{\log \cdots \log}_{\times k}(n)$ and $(\log)^0(n) = n$.
\paragraph*{Construction.} We describe how to construct a hypergraph matching instance from a Santa Claus instance in four steps by reducing to the following more and more special cases.
\paragraph{(1) Geometric grouping.} In this step, given arbitrary $v_{ij}$, we reduce it to an instance such that $\mathrm{OPT} = 1$ and for each $i, j$ we have $v_{ij} = 2^{-k}$ for some integer $k$ and $1/(2n) < v_{ij} \le 1$.
This step follows easily from guessing $\mathrm{OPT}$, rounding down the sizes, and omitting all small elements in a solution. \paragraph{(2) Reduction to O(log*(n)) size ranges.} Next, we reduce to an instance such that for each player $i$ there is some $k \le \log^*(2n)$ such that for each resource $j$, $v_{ij}\in\{0, 1\}$ or $1/(\log)^k(2n) < v_{ij} \le 1/(\log)^{k+1}(2n)$. We explain this step below.
Each player and resource is copied to the new instance. However, we will also add auxiliary players and resources. Let $i$ be a player. In the optimal solution there is some $0 \le k \le \log^*(2n)$ such that the values of all resources $j$ with $1/(\log)^k(2n) < v_{ij} \le 1/(\log)^{k+1}(2n)$ assigned to player $i$ sum up to at least $1/\log^*(2n)$. Hence, we create $\log^*(2n)$ auxiliary players which correspond to each $k$ and each of which share an resource with the original player that has value $1$ for both. The original player needs to get one of these resources, which means one of the auxiliary players needs to get a significant value from the resources with $1/(\log)^k(2n) < v_{ij} \le 1/(\log)^{k+1}(2n)$. This reduction loses a factor of at most $\log^*(2n)$. Hence, $\mathrm{OPT} \geq 1/\log^*(2n)$.
\begin{comment} \item[(3) Reduction to 3 sizes.] We further reduce to an instance such that for each player $i$ there is some value $v_i$ such that for each resource $j$, $v_{ij}\in\{0, v_i, 1\}$.
Let $i$ be some player who has only resources of value $v_{ij}\in\{0,1\}$ or $1/(\log)^k(2n) < v_{ij} \le 1/(\log)^{k+1}(2n)$. There are at most $\log((\log)^k(2n)) \leq (\log)^{k+1}(2n)$ distinct values of the latter kind. The idea is to assign bundles of resources of value $1/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ to the player $i$. For each distinct value, we create sufficiently many (say, $2n$) auxiliary players. These auxiliary players each share a new resource with $i$, which has value $1$ for this player and value $1/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ for $i$. If $i$ takes such an resource, the auxiliary player should collect a value of $0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ of resources of his particular value. Hence, we set the values for these resources for this player to $v_{ij} / \left(0.5/\left(\log^*(2n)(\log)^{k+1}(2n) \right) \right)$. We lose a factor of $O(\log^*(2n))$ in this step as there are at least $\lceil (0.5(\log)^{k+1}(2n))/\log^*(2n) \rceil$ bundles of size at least $0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ for player $i$. Now rescale the instance appropriately to get $\mathrm{OPT}=1$. \end{comment}
\paragraph{(3) Reduction to 3 sizes.} We further reduce to an instance such that for each player $i$ there is some value $v_i$ such that for each resource $j$, $v_{ij}\in\{0, v_i, 1\}$.
Let $i$ be some player who has only resources of value $v_{ij}\in\{0,1\}$ or $1/(\log)^k(2n) < v_{ij} \le 1/(\log)^{k+1}(2n)$ for some integer $k$. There are at most $\log((\log)^k(2n)) \leq (\log)^{k+1}(2n)$ distinct values of the latter kind. The idea is to assign bundles of resources of value $0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ to the player $i$.
Fix a resource value $s$ such that $1/(\log)^k(2n) <s\le 1/(\log)^{k+1}(2n)$. We denote by $R_s$ the set of resources $j$ such that $v_{ij}=s$.
We define the integer \begin{equation*}
b=\left\lceil \frac{0.5}{s\log^*(2n)(\log)^{k+1}(2n)}\right\rceil \end{equation*} which is the number of resources of value $s$ that are needed to make a bundle of total value at least $0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$. We remark that if $s>0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ we have $b=1$. However, since $s\leq 1/(\log)^{k+1}(2n)$, the value of a bundle never exceeds $1/(\log)^{k+1}(2n)$ in the instance of step (2).
Then we create \begin{equation*}
\left\lfloor |R_s|/b\right\rfloor
\end{equation*} auxiliary players $i_1,i_2,\ldots $ and auxiliary resources $j_1,j_2,\ldots$ (note that we create 0 player and resource if $|R_s|<b$).
Each auxiliary player $i_\ell$ shares resource $j_\ell$ with player $i$. This resource has value $2/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ for player $i$ and value $1$ for player $i_\ell$. Then for all resources $j\in R_s$, we set $v_{ij}=0$ and \begin{equation*}
v_{i_\ell j}=\frac{1}{(\log^*(2n))^2b} \end{equation*} for any auxiliary player $i_\ell$ that was created.
We see that we are now in the case where for each player $i$, there exists some $v_i$ such that $v_{ij}\in \{0,v_i,1\}$ for all resources $j$. We claim the following. \begin{claim} \label{cla:reduction_3_OPT} In the instance created at step (3), we have that $\mathrm{OPT}\geq 1/(\log^*(2n))^2$. \end{claim} \begin{proof} To see this, take an assignment of resources to player that gives $1/\log^*(2n)$ value to every player in the instance obtained at the end of step (2). Define $R_i$ to be the set of resources assigned to player $i$ in this solution. Either $R_i$ contains a resource of value $1$ or only resources that are in a range $(1/(\log)^k(2n),1/(\log)^{k+1}(2n)]$ for some integer $k$. In the first case, nothing needs to be done as the resource $j$ of value $1$ assigned to $i$ still satisfies $v_{ij}=1$ in the new instance. Hence we assign $j$ to $i$ and all auxiliary players created for player $i$ get their auxiliary resource of value 1.
In the second case, fix a resource value $s$. Let $R_{i,s}$ be the set of resources assigned to $i$ for which $v_{ij}=s$ and $b$ defined as before. We select $\left\lfloor |R_{i,s}|/b \right\rfloor$ auxiliary players to receive $b$ resources from $R_{i,s}$ and player $i$ takes the corresponding auxiliary resources. The remaining auxiliary players of the corresponding value take their auxiliary resource.
Doing this, we ensure that all auxiliary players receive either a value of 1 (by taking the auxiliary resource) or $1/(\log^*(2n))^2$ by taking resources assigned to $i$ in the instance of step (2). Moreover, we claim that $i$ receives a total value of at least $1/(\log^*(2n))^2$. To see this, we have $3$ cases depending on the value of $b$ and $\left\lfloor |R_{i,s}|/b \right\rfloor$.
\begin{itemize}
\item If $b=1$, then $\left\lfloor |R_{i,s}|/b \right\rfloor=|R_{i,s}|$. We note that the value of a bundle of $b$ resources of size $s$ never exceeds $1/(\log)^{k+1}(2n)$ in instance (2). Since each auxiliary resource represents a value of $2/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ to player $i$ in instance (3), it must be that player $i$ receives in instance (3) at least a $2/\log^*(2n)$ fraction of the value he would receive in instance (2).
\item If $b>1$ and $\left\lfloor |R_{i,s}|/b \right\rfloor>0$. Then we have that $\left\lfloor |R_{i,s}|/b \right\rfloor\geq |R_{i,s}|/(2b)$. Since in this case we have $s<0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ it must be that each bundle of $b$ resources of size $s$ represents a total value of at most $1/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$. Since the value of auxiliary resources is twice this value and because $\left\lfloor |R_{i,s}|/b \right\rfloor\geq |R_{i,s}|/(2b)$ it must be that in this case player $i$ receives in instance (3) at least the same value he would receive in instance (2).
\item If $\left\lfloor |R_{i,s}|/b \right\rfloor=0$, then player $i$ receives $0$ value from resources of this value. However, when we combine all the values $s$ for which $\left\lfloor |R_{i,s}|/b \right\rfloor=0$, it represents to player $i$ in instance (2) a total value of at most
\begin{equation*}
0.5/\left( \log^*(2n)(\log)^{k+1}(2n) \right)\cdot (\log)^{k+1}(2n) = 0.5/\log^*(2n)
\end{equation*}
since there are at most $(\log)^{k+1}(2n)$ different resource values. \end{itemize}
Putting everything together, we see that in the first two cases, player $i$ receives at least a $2/\log^*(2n)$ fraction of the value he would receive in instance (2) and that he looses at total value of at most $0.5/\log^*(2n)$ in the third case. Since in instance (2) we have that $\mathrm{OPT}\geq 1/\log^*(2n)$ we see that in instance (3) player $i$ receives a value at least \begin{equation*}
(2/\log^*(2n))\cdot (1/\log^*(2n)-0.5/\log^*(2n))\geq 1/(\log^*(2n))^2. \end{equation*} \end{proof}
Finally, we also claim that it is easy to reconstruct an approximate solution to the instance obtained at step (1) from an approximate solution to the instance at step (3). \begin{claim}\label{cla:reduction_3_to_1} A $c$-approximate solution to the instance obtained at step (3) induces a $O((c\log^*(2n))^2)$-approximate solution to the instance obtained at step (1). \end{claim} \begin{proof} To see this, note that a $c$-approximate solution must give at least $1/(c(\log^*(2n))^2)$ value to every player since $\mathrm{OPT}\geq 1/(\log^*(2n))^2$ (by Claim \ref{cla:reduction_3_OPT}). This means that each player $i$ either takes a resource of value 1 which has also value 1 for him in the instance at step (1) or he must take at total value of $1/(c(\log^*(2n))^2)$ in auxiliary resources and the corresponding auxiliary players must take bundles of resources that represent a value of at least \begin{equation*}
0.5/\left( c\log^*(2n)(\log)^{k+1}(2n)\right) \end{equation*} for player $i$ in the instance at step (1). We simply assign all the resources appearing in these bundles to the player $i$ in the instance of step (1). Since the value of an auxiliary resource for player $i$ is $2/\left( \log^*(2n)(\log)^{k+1}(2n) \right)$ it must be that player $i$ takes at least \begin{equation*}
\frac{1/(c(\log^*(2n))^2)}{2/\left( \log^*(2n)(\log)^{k+1}(2n) \right)} = \frac{(\log)^{k+1}(2n)}{2c\log^*(2n)} \end{equation*} auxiliary resources. Since each auxiliary resource brings a value of \begin{equation*}
0.5/\left( c\log^*(2n)(\log)^{k+1}(2n)\right) \end{equation*} to player $i$ (in the instance at step (1)) then player $i$ receives in total a value of at least \begin{equation*}
\frac{1}{(2c\log^*(2n))^2} \end{equation*} in the instance of step (1). \end{proof}
Before the last step, we rescale the instance appropriately to get $\mathrm{OPT}=1$ (we keep the property that each player $i$ has 3 distinct sizes 0,1 and $v_i$).
\paragraph{(4) Reduction to hypergraph matching.} For each player create a vertex in $P$ and for each resource create a vertex in $R$. For each player add one hyperedge for each resource he values at $1$ (containing $i$ and this resource). Moreover, for every player $i$, add $1/v_i$ \textit{new} vertices to $P$ and the same number of \textit{new} resources to $R$. Pair these $1/v_i$ new vertices in $P$ and $R$ together (one from $R$ and one from $P$) and for each pair add a hyperedge containing these two vertices in the pair. Add another hyperedge for $i$ containing $i$ and all corresponding $1/v_i$ new vertices in $R$. Finally, for each new vertex in $P$ and each resource that $i$ values at $v_i$, add a hyperedge containing them. See Figure \ref{fig:reduction} for an illustration: New resources and players are marked as squares and hyperedges containing only 2 vertices are marked as simple edges.
\begin{figure}
\caption{An example of the reduction to hypergraph matching for player $i$ with $v_i=1/2$.}
\label{fig:reduction}
\end{figure}
We claim that there exists a $1$-relaxed perfect matching in this instance. Since $\mathrm{OPT}=1$ there is an assignment of resources to players such that every player gets a value $1$. If player $i$ takes one resource of value $1$, give to player $i$ the corresponding hyperedge and the resource in it in the hypergraph. All the new players get the new resource they are paired to. If player $i$ takes $1/v_i$ resources of value $v_i$, give to player $i$ in the hypergraph all the $1/v_i$ new resources contained in the new hyperedge. Then we give to each new player the hyperedge (and the resource in it) corresponding to a resource that is assigned to $i$ in instance from step (3). This is indeed a $1$-relaxed perfect matching.
\paragraph*{Correctness.} In the reduction we arrive at step (3) for which we prove that a $c$-approximate solution can be used to easily reconstruct a $O((c\log^*(2n))^2)$-approximate solution to the original instance (in Claim \ref{cla:reduction_3_to_1}). It remains to show that a $c$-relaxed perfect matching in the instance (4) induces a $c$-approximate solution to step (3). To see this, note that a $c$-relaxed perfect matching in the instance (4) either gives to player $i$ the resource in one hyperedge corresponding to a resource of value $1$ to player $i$ in instance (3). In that case we assign this resource to player $i$ in instance (3). Or it gives at least $1/(cv_i)$ new resources to player $i$. In this case, it must be that each new player paired to one of these resources takes one resource of value $v_i$ in instance (3). We give these resources to $i$ in instance (3). In this case $i$ receives a total value of $v_i/(cv_i)=1/c$ which ends the proof.
We finish by remarking that the size of our construction is indeed polynomial in the size of the original instance. This is clear for step (1). In step (2), only $O(\log^* (n))$ new players and items are created for each player in the original instance. In step (3), for each player $i$ and each resource size $v_{ij}$, at most a polynomial number of resources and players are created. As for the last step, $O(1/v_i)$ new resources and players are created for each player $i$ which is also polynomial since $v_i=\Omega (1/n)$. The number of hyperedges in the hypergraph is also clearly polynomial in the number of vertices in our construction. \end{proof}
\begin{comment}
\paragraph*{Correctness.} Steps (1) and (2) are easy and we omit it. For step (3), notice that player $i$ might only be able to get a value of $O(1/(\log^*(2n))^2)$. Hence, we lose at most a factor of $O(\log^*(2n))$. It is not hard to see that one can reconstruct a solution to the instance produced by step ($2$) given a solution to instance produced by step ($3$) and vice-versa.
For the (4) step, notice that if the player $i$ of value $v_i$ in the Santa Claus instance takes a resource of value 1, then the new vertices of $P$ and $R$ can form a matching and the vertex $i$ can take the same resource. On the other hand, if there exists a 1-relaxed matching in the hypergraph matching instance, we argue as follows. If the vertex $i$ takes an edge with single vertex, the player $i$ is given the corresponding resource of value 1. Otherwise, vertex $i$ must have taken the edge with $1/v_i$ new vertices corresponding to $i$ and consequently $1/v_i$ new vertices in $P$ must have taken edges with vertices corresponding to resources of value $v_i$ each. We simply assign the resources corresponding to these vertices to the player $i$.
Hence, any $c$-approximate solution to the hypergraph matching problem yields a $O((c\log^*(n))^2)$ solution to the Santa Claus problem. We lose a factor of $O(1)$ in step (1), $O(\log^*(n))$ factors each in steps (2) and (3) due to the reduction and factors of $O(c)$ each in steps (3) and (4) while reconstructing the solution.
We also note that the size of the newly constructed instance is at most polynomially larger than the size of the original instance. This is clear for step (1). In step (2), only $O(\log^* (n))$ new players and items are created for each player in the original instance. In step (3), for each player $i$ and each resource size $v_{ij}$, at most a polynomial number of resources and players are created. As for the last step, $O(1/v_i)$ new resources and players are created for each player $i$ which is also polynomial since $v_i=\Omega (1/n)$. The number of hyperedges in the hypergraph is also clearly polynomial in the number of vertices in our construction. \end{comment}
\end{document}
|
arXiv
|
{
"id": "2011.06939.tex",
"language_detection_score": 0.7964858412742615,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{ extbf{More on limited packings in graphsootnote{Supported by NSFC No.11531011.} \begin{abstract} A set $B$ of vertices in a graph $G$ is called a \emph{$k$-limited packing} if for each vertex $v$ of $G$, its closed neighbourhood has at most $k$ vertices in $B$. The \emph{$k$-limited packing number} of a graph $G$, denoted by $L_k(G)$, is the largest number of vertices in a $k$-limited packing in $G$. The concept of the $k$-limited packing of a graph was introduced by Gallant et al., which is a generalization of the well-known packing of a graph. In this paper, we present some tight bounds for the $k$-limited packing number of a graph in terms of its order, diameter, girth, and maximum degree, respectively. As a result, we obtain the tight Nordhaus-Gaddum-type result of this parameter for general $k$. At last, we investigate the relationship among the open packing number, the packing number and $2$-limited packing number of trees.
\noindent\textbf{Keywords:} $k$-limited packing, opening packing, Nordhaus-Gaddum-type result
\noindent\textbf{AMS subject classification 2010:} 05C69, 05C70 \end{abstract}
\section{Introduction}
All graphs in this paper are undirected, simple and nontrivial. We follow \cite{BM} for graph theoretical notation and terminology not described here. Let $G$ be a graph, we use $V(G), E(G), diam(G), \Delta(G)$ and $\delta(G)$ to denote the vertex set, edge set, diameter, maximum degree, and minimum degree of $G$, respectively. Take a vertex $v\in V(G)$, the \emph{open neighbourhood} of $v$ is defined as the set of all vertices adjacent to $v$ in $G$, the set $N[v] = \{v\}\cup N(v)$ is called \emph {the closed neighbourhood} of $v$ in $G$.
A set $D$ of vertices in a graph $G$ is called a \emph{dominating set} if each vertex in $V(G)\setminus D$ has at least one neighbour in $D$. The \emph{domination number} $\gamma(G)$ of a graph $G$ is the minimum cardinality of a dominating set in $G$. The theory of dominating sets, introduced formally by Ore \cite{O} and Berge \cite{Berge}, has been the subject of many recent papers due to its practical and theoretical interest. For more information on domination topics we refer to the books \cite{HHS, HHS1}. A domination set $D$ of a graph $G$ is called a \emph{total dominating set} if $G[D]$ has no isolated vertex, and the minimum cardinality of a total dominating set in $G$ is called the \emph{total domination number} of $G$, denoted by $\gamma_t(G)$. Total domination in graphs was introduced by Cockayne, Dawes, and Hedetniemi \cite{CDH}, and has been well studied (see, for example, \cite{FH,FHMP,HY,Y}).
On the other side, the \emph{open packing} of a graph $G$ is a set $S$ of vertices in $G$ such that for each vertex $v$ of $G$,
$|N(v)\cap S|\leq 1$. The \emph{open packing number} of a graph $G$, denoted by $\rho^{0}(G)$, is the maximum cardinality among all open packings in $G$. The open packing of a graph has been studied in \cite{H,HSE}.
The well-known \emph{packing} (\emph{$2$-packing}) of a graph $G$ is a set $B$ of vertices in $G$ such that $|N[v]\cap B|\leq 1$ for each vertex $v$ of $G$. The \emph{packing number} $\rho(G)$ of a graph $G$ is the maximum cardinality of a packing in $G$. The packing of a graph has been well studied in the literature \cite{B,C,MM,TV}. Dominating sets and packings of graphs are two good models for many utility location problems in operations research. But the corresponding problems have a very different nature: the former is a minimization problem (dominating sets) to satisfy some reliability requirements, the latter is a maximization problem not to break some (security) constraints. Consider the following scenarios:
Network security: A set of sensors is to be deployed to covertly monitor a facility. Too many sensors close to any given location in the facility can be detected. Where should the sensors be placed so that the total number of sensors deployed is maximized?
Market Saturation: A fast food franchise is moving into a new city. Market analysis shows that each outlet draws customers from both its immediate city block and from nearby city blocks. However it is also known that a given city block cannot support too many outlets nearby. Where should the outlets be placed?
Codes: Information is to be transmitted between two interested parties. This data is first represented by bit strings (codewords) of length $n$. It is desirable to be able to use as many of these $2^n$ strings as possible. However, if a single bit of a codeword is altered during transmission, we should still be able to recover the piece of data correctly by employing a ``nearest neighbour" decoding algorithm. How many code words can be used as a function of $n$?
A graph model of these scenarios might maximize the size of a vertex subset subject to the constraint that no vertex in the graph is near too many of the selected vertices.
Motivated by the packing of graphs, Gallant et al. relaxed the constraints and introduced the concept of the \emph{$k$-limited packing} in graphs in \cite{GGHR}. A set $B$ of vertices in a graph $G$ is called a \emph{$k$-limited packing} if for each vertex $v$ of $G$,
$|N[v]\cap B|\leq k$. The \emph{$k$-limited packing number} of a graph $G$, denoted by $L_k(G)$, is the largest number of vertices in a $k$-limited packing set in $G$. It is clear that $L_1(G) = \rho(G)$. The problem of finding a $1$-limited packing of maximum size for a graph is shown to be NP-complete in \cite{HS0}. In \cite{DLN}, it is shown that the problem of finding a maximum size $k$-limited packing is NP-complete even in split or bipartite graphs. For more results on $k$-limited packings of graphs, we refer to \cite{BBG,GZ,GGHR,LN}.
The remainder of this paper will be organized as follows. In Section $2$, we give the technical preliminaries, including notations and relevant known results on open packings and $k$-limited packings of graphs. In Section $3$, we present some tight bounds for the $k$-limited packing number of a graph in terms of its order, diameter, girth, and maximum degree, respectively. Based on them, we obtain the tight Nordhaus-Gaddum-type result of this parameter for general $k$. In Section $4$, we focus on the $2$-limited packing number of graphs, including trees and graphs with diameter two. And we get the better upper bound of the $2$-limited packing number of graphs with large diameter. In Section $5$, we investigate the relationship among the open packing number, the $1$-limited packing number and the $2$-limited packing number of trees.
\section{Preliminaries}
The notation we use is mostly standard. For $B \subseteq V(G)$, let $\overline{B}=V(G)\backslash B$, and $G[B]$ denote the subgraph of $G$ induced by $B$. Given $t$ graphs $G_1,\ldots, G_t$, the \emph{union} of $G_1,\ldots,G_t$, denoted by $G_1\cup \cdots \cup G_t$, is the graph with vertex set $V(G_1)\cup \cdots \cup V(G_t)$ and edge set $E(G_1)\cup \cdots \cup E(G_t)$. In particular, let $tG$ denote the vertex-disjoint \emph{union} of $G_1,\ldots G_t$ for $G_1= \cdots =G_t=G$.
We next state some relevant known results on $k$-limited packings of graphs, which will be needed later.
\begin{lem}{\upshape\cite{HS}}\label{lem15} Let $G$ be a graph of order at least $3$. Then $\rho^{0}(G) = 1$ if and only if $diam(G)\leq 2$ and every edge of $G$ lies on a triangle. \end{lem}
\begin{lem}{\upshape\cite{HS}}\label{lem17} If $G$ is a graph of diameter $2$, then $\rho^{0}(G)\leq 2$. \end{lem}
\noindent\textbf{Remark 1.} It is clear that graphs with diameter $1$, which are exactly complete graphs, have opening packing number at most $2$. Thus, if $G$ is a graph of diameter at most $2$, then $\rho^{0}(G)\leq 2$.
\begin{lem}{\upshape\cite{R}}\label{lem33} If $T$ is any tree of order at least $2$, then $\rho^{0}(T)=\gamma_t(T)$. \end{lem}
\begin{lem}{\upshape\cite{MS}}\label{lem16} For any graph $G$, $L_1(G) = 1$ if and only if $diam(G)\leq2$. \end{lem}
\begin{lem}{\upshape\cite{BBG}}\label{lem31} For any graph $G$ of order $n$, $L_1(G)\geq \frac{n}{\Delta(G)^2+1}.$ \end{lem}
\begin{lem}{\upshape\cite{MM}}\label{lem34} For any tree T, $L_1(T)=\gamma(T).$ \end{lem}
\begin{lem}{\upshape\cite{MS}}\label{lem7} For any connected graph $G$ and integer $k\in \{1,2\}$, $$ L_k(G)\geq \lceil \frac{k+kdiam(G)}{3}\rceil .$$ \end{lem}
\begin{lem}{\upshape\cite{S}} \label{lem2} Let $G$ be a graph of order $n$. Then $L_2(G)+L_2(\overline{G})\leq n+2$, and this bound is tight. \end{lem}
Since a $k$-limited packing of a graph is also a $(k+1)$-limited packing, we immediately obtain the following inequalities: $L_1(G)\leq L_2(G)\leq\cdots\leq L_{k}(G)\leq L_{k+1}(G)\leq\cdots$. Furthermore, the authors obtained the stronger result in \cite{MSH}.
\begin{lem}{\upshape\cite{MSH}}\label{lem5} Let $G$ be a connected graph of order $n$ and $k\leq \Delta(G)$. Then $L_{k+1}(G)\geq L_k(G)+1$. Moreover, $L_{k}(G)\geq L_1(G)+k-1$, and this bound is tight. \end{lem}
\noindent\textbf{Remark 2.} Based on the proof of Lemma \ref{lem5} in \cite{MSH}, the condition of the connectivity of $G$ in Lemma \ref{lem5} can be deleted.
\begin{lem}{\upshape\cite{BBG}}\label{lem10} For any graph $G$ of order $n$, $L_k(G)\leq \frac{kn}{\delta(G)+1}.$ \end{lem}
In the sequel, let $P_n$, $C_n$, $K_n$, and $K_{s,t}$ denote the path of order $n$, cycle of order $n$, complete graph of order $n$, and complete bipartite graph of order $s+t$, respectively. It is clear that $L_k(P_n)=L_k(C_n)=n$ for $k\geq 3$.
\begin{lem}{\upshape \cite{GGHR}}\label{lem3} Let $m, n, k\in \mathbb{N}$. Then
$(i)$ $L_k(P_n)=\lceil \frac{kn}{3}\rceil $ for $k=1,2$,
$(ii)$ $L_k(C_n)=\lfloor\frac{kn}{3}\rfloor$ for $k=1,2$ and $n\geq 3$,
$(iii)$ $L_k(K_{n})=min\{k,n\},$
$(iv)$ $L_k(K_{m,n})= \begin{cases} 1& \text{if $k=1$},\\ min\{k-1,m\}+min\{k-1,n\}& \text{if $k > 1$}. \end{cases}$ \end{lem}
\begin{lem}{\upshape\cite{GGHR}}\label{lem4} If $G$ is a graph, then $L_k(G)\leq k\gamma(G)$. Furthermore, the equality holds if and only if for any maximum $k$-limited packing $B$ in $G$ and any minimum dominating set $D$ in $G$ both the following hold:
$(i)$ For any $b\in B$ we have $\mid N[b]\cap D\mid=1$,
$(ii)$ For any $d\in D$ we have $\mid N[d]\cap B\mid=k$. \end{lem}
It is worth mentioning that we generalize the results of Lemma \ref{lem16}, Lemma \ref{lem7} and lemma \ref{lem2} to general $k$-limited packing parameter of graphs, and characterize all the trees $T$ satisfying $L_{2}(T)= L_1(T)+1$ in Lemma \ref{lem5} later, which are parts of our job.
\section{$k$-limited packing}
In this section we present some tight bounds for the $k$-limited packing number of a graph in terms of its order, diameter, girth, and maximum degree, respectively. As a result, we obtain the tight Nordhaus-Gaddum-type result for this parameter.
It is clear to obtain the following result.
\begin{pro}\label{prop1} If $G$ is a graph of order $n$ with $n\leq k$, then $L_k(G)=n$. \end{pro}
\noindent\textbf{Remark 3.} Actually, the above condition that $n\leq k$ can be weakened to $\Delta(G)+1 \leq k$. So, we only need to consider the $k$-limited packing number for graphs $G$ with $\Delta(G)\geq k$.
\begin{pro}\label{prop2} If $G$ is a graph of order $k+1$, then \begin{equation*} L_k(G)= \left\{
\begin{array}{ll}
k & \hbox{ if $\Delta(G)=k$,}\\
k+1 & \hbox{ otherwise.} \\
\end{array} \right. \end{equation*} \end{pro}
\begin{proof} Let $G$ be a graph of order $k+1$. Then $k \leq L_k(G)\leq k+1$. Let $\Delta(G)=k$. Assume to the contrary that $L_k(G)= k+1$. It is obtained that $V(G)$ is the unique maximum $k$-limited packing of $G$. Let $v_0$
be a vertex with maximum degree $k$ in $G$. Then $|N[v_0]\cap V(G)|=k+1$, which is a contradiction. Thus, $L_k(G)= k$. It remains to show the other case. Let $\Delta(G)\leq k-1$. Obviously, $V(G)$ is a $k$-limited packing of $G$, it follows that $L_k(G)=k+1$. \end{proof}
For a given graph $G$ of order less than $k+2$, we can determine its $k$-limited packing number by Proposition \ref{prop1} and Proposition \ref{prop2}. So we are concerned with graphs of order at least $k+2$ in the following.
\begin{pro}\label{prop3} If $G$ is a graph of order at least $k+2$, then $L_k(G)\geq k$. \end{pro}
The following result is a generalization of Lemma \ref{lem16}.
\begin{thm}\label{th1} Let $G$ be a graph of order $n$. Then $L_k(G)=k$ if and only if one of the following conditions holds:
$(i)$ $n=k$,
$(ii)$ $\Delta(G)=k$, where $n=k+1$,
$(iii)$ for each $(k+1)$-subset $X$ of $V(G)$, $G[X]$ has maximum degree $k$ or the $k+1$ vertices of $X$ have a common neighbour, where $n\geq k+2$. \end{thm}
\begin{proof} The statement holds for $n\leq k+1$ by Proposition \ref{prop1} and Proposition \ref{prop2}, thus we may assume that $n\geq k+2$ in the following. Notice that $k \leq L_k(G)\leq n$ for $n\geq k+2$. Let $G$ be a graph of order $n$ such that for each $(k+1)$-subset $X$ of $V(G)$,
$G[X]$ has maximum degree $k$ or the $k+1$ vertices of $X$ have a common neighbour. Assume that $G$ has a $k$-limited packing $B$ with at least $k+1$ vertices. Let $X$ be a $(k+1)$-subset of $B$. Obviously, $X$ is also a $(k+1)$-subset of $V(G)$. If $G[X]$ has a vertex $v_0$ with degree $k$, then $|N[v_0]\cap B|\geq k+1$, which is a contradiction. If the $k+1$ vertices of $X$ have a common neighbour
$a$, then $|N[a]\cap B|\geq k+1$, which is also a contradiction. Thus, $L_k(G)= k$.
It remains to show the converse. Let $G$ be a graph of order $n$ such that $L_k(G)= k$. Assume that there exists a $(k+1)$-subset $X_0$ of $V(G)$ such that $G[X_0]$ has maximum degree at most $k-1$ and each vertex outside $X_0$ is adjacent to at most $k$ vertices in $X_0$. It follows that $X_0$ is a $k$-limited packing of $G$, which implies that $L_k(G)\geq k+1$, a contradiction. Therefore, if $G$ is a graph of order at least $k+2$ with $L_k(G)= k$, then for each $(k+1)$-subset $X$ of $V(G)$, $G[X]$ has maximum degree $k$ or the $k+1$ vertices of $X$ have a common neighbour. \end{proof}
The following result is an immediate and obvious corollary of the above theorem.
\begin{cor}\label{coro2} Let $G$ be a graph of order at least $k+1$ such that $L_k(G)=k$. Then $diam(G)\leq 2$. \end{cor}
Next, we present a lower bound of the $k$-limited packing number of a graph in terms of it diameter for $k\geq 3$, which is a generalization of Lemma \ref{lem7}.
\begin{thm}\label{th5} Let $G$ be a connected graph and $ \Delta(G)\geq k\geq 3$. Then $L_k(G)\geq diam(G)+k-2$. Moreover, the lower bound is tight. \end{thm}
\begin{proof} Let $P=v_1v_2\cdots v_{diam(G)+1}$ be a path of length $diam(G)$ in $G$. Obviously, for each vertex $v_i$ on $P$,
$|N[v_i]\cap V(P)|\leq 3$. We claim that $V(P)$ is a $3$-limited packing in $G$. Assume to the contrary that there exists a vertex
$u$ outside $P$ such that $|N(u)\cap V(P)|\geq 4$. Let $N(u)\cap V(P)=\{v_{i_1},\ldots,v_{i_d}\}$ with $i_1\leq \cdots \leq i_d$ and $d\geq 4$. Then $P'=v_1\cdots v_{i_1}uv_{i_d}\cdots v_{diam(G)+1}$ is a path between
$v_1$ and $v_{diam(G)+1}$, whose length is less than $diam(G)$, a contradiction. Thus, $L_3(G)\geq |V(P)|=diam(G)+1$. And by Lemma \ref{lem5}, we have $L_k(G)\geq L_3(G)+k-3\geq diam(G)+1+k-3=diam(G)+k-2.$
Corollary \ref{coro2} shows that non-complete graphs $G$ of order at least $k+1$ with $L_k(G)=k$ are ones satisfying $L_k(G)=diam(G)+k-2$. \end{proof}
Recall that \emph{the girth} of a graph $G$ is the length of a shortest cycle in $G$, denoted by $g(G)$.
\begin{thm}\label{th7} If $G$ is a graph with girth $g(G)$, then $L_1(G)\geq \lfloor\frac{g(G)}{3}\rfloor$. Moreover, the lower bound is tight. \end{thm}
\begin{proof} Let $G$ be a graph and $C$ be a cycle of length $g(G)$ in $G$. The statement is evidently true for $g(G)\leq 4$. Thus, we only need to consider the case when $g(G)\geq 5$. Let $B$ be a maximum $1$-limited packing of $C.$ Then
$|B|=L_1(C)=\lfloor\frac{g(G)}{3}\rfloor$ by Lemma \ref{lem3}. Next we will show that $B$ is also a $1$-limited packing of $G$, which implies that $L_1(G)\geq \lfloor\frac{g(G)}{3}\rfloor$. It is sufficient to show that each vertex $v$ outside $C$ has at most one neighbour on $C$. Assume to the contrary that there is a vertex
$v_0$ outside $C$ that is adjacent to two vertices, say $x,$ $y$, on $C$. Let $P$ be the shortest path between $x$ and $y$ on $C$. If $|V(P)|\leq 3$, then $x$, $v_0$ and $y$ are on either a $C_3$ or $C_4$ in $G$, contradicting $g(G)\geq 5.$ Now we may assume that
$|V(P)|\geq 4$. Let $C'$ be the cycle obtained from $C$ replacing $P$ by the path $xv_0y$. Then the length of $C'$ is less than $g(G)$, which is a contradiction. Thus, each vertex outside $C$ has at most one neighbour on $C$. Furthermore, cycles are graphs $G$ with $L_1(G)= \lfloor\frac{g(G)}{3}\rfloor$ by Lemma \ref{lem3}. The proof is complete. \end{proof}
Next, we give the lower bound of the $k$-limited packing number of a graph with respect to its girth for general $k\geq 2$.
\begin{thm}\label{th2} If $G$ is a graph with girth $g(G)$, then $L_2(G)\geq \lfloor\frac{2g(G)}{3}\rfloor$ and $L_k(G)\geq g(G)+k-3$ for $ \Delta(G)\geq k \geq 3$. Moreover, the lower bounds are tight. \end{thm}
\begin{proof} The statement trivially holds for $g(G)\leq 3$. Thus, we may assume that $g(G)\geq 4$ in the following. Let $C$ be a cycle of length $g(G)$ in $G$. We first present the following claim.
\textbf{Claim 1.} Each vertex outside $C$ has at most two neighbours on $C$.
\noindent\textbf{Proof of Claim 1:} Suppose to the contrary that there is a vertex $v$ outside $C$ such that $v$ is adjacent to three vertices, say $x,$ $y,$ $z$, on $C$. Let $P$ be the shortest path containing $x,$ $y,$ $z$ on $C$ such that the end vertices of $P$ are contained in $\{x,y,z\}.$ Without loss of generality, assume that $x$, $y$ are the end vertices of $P.$
Obviously, $|V(P)|\geq 3.$ Suppose that $|V(P)|=3$. It follows that $z$ is adjacent to both $x$ and $y$. Therefore, $G[\{x,z,v\}]$ is exactly
$C_3$, which contradicts to that $g(G)\geq 4.$ Suppose that $|V(P)|\geq 4$. Let $C'$ be the cycle obtained from $C$ replacing $P$ by the path $xvy$. Then the length of $C'$ is less than $g(G)$, which is a contradiction. Thus, each vertex outside $C$ has at most two neighbours on $C$.
Let $B$ be a maximum $2$-limited packing of $C.$
Then $|B|=L_2(C)=\lfloor\frac{2g(G)}{3}\rfloor$ by Lemma \ref{lem3}. By Claim $1$, we obtain that $B$ is also a $2$-limited packing of $G$. Thus, $L_2(G)\geq|B| =\lfloor\frac{2g(G)}{3}\rfloor$. Moreover, cycles are graphs $G$ with $L_2(G)= \lfloor\frac{2g(G)}{3}\rfloor$ by Lemma \ref{lem3}.
Observe that $V(C)$ is a maximum $3$-limited packing of $C$. And by Claim $1$, it is known that $V(C)$ is also a $3$-limited packing of $G$, which implies that $L_3(G)\geq g(G)$. It follows from Lemma \ref{lem5} and Remark $2$ that $L_k(G)\geq L_3(G)+k-3\geq g(G)+k-3$ for $k\geq 3$. Furthermore, graphs $G$ of order at least $k+1$ with triangles, satisfying $L_k(G)=k$, have the property that $L_k(G)=g(G)+k-3$. \end{proof}
Next, we turn to study the upper bound of the $k$-limited packing number of a graph.
\begin{thm}\label{th3} If $G$ is a graph of order $n$, then $L_k(G)\leq n+k-1-\Delta(G)$. \end{thm}
\begin{proof} Let $v_0$ be a vertex of maximum degree $\Delta(G)$ in $G$. If $k\geq \Delta(G)+1$, then it is clear that $V(G)$ is a $k$-limited packing of $G$, and hence $L_k(G)=n\leq n+k-1-\Delta(G)$. Thus, we may assume that $k< \Delta(G)+1$ in the following. Let $B$
be a maximum $k$-limited packing of $G$. Since $|N[v_0]\cap B|\leq k$, it follows that there exist at least $\Delta(G)+1-k$ vertices in
$N[v_0]\setminus B$, which means that $|\overline{B}|\geq \Delta(G)+1-k$. Thus, $L_k(G)=|B|=n-|\overline{B}|\leq n-(\Delta(G)+1-k)=n+k-1-\Delta(G)$. \end{proof}
We define the graph class $\mathcal{G}$ consisting of all graphs $G$ constructed as follows. Let $G$ be a graph of order $n$ such that $V(G)=A_0\cup B_0$ has the following properties:
$(i)$ $|A_0\cap B_0|=2$,
$(ii)$ $G[A_0]$ has a spanning star, and each component of $G[B_0]$ is $K_1$ or $K_2$,
$(iii)$ for each vertex $v\in \overline{B_0}$, $|N(v)\cap B_0|\leq 2$.
The following result shows that $\mathcal{G}$ is the set of all graphs $G$ of order $n$ with $L_2(G)= n+1-\Delta(G)$.
\begin{cor}\label{cor14} If $G$ is a graph of order $n$, then $L_2(G)\leq n+1-\Delta(G)$. Moreover, $L_2(G)= n+1-\Delta(G)$ if and only if $G \in \mathcal{G}$. \end{cor}
\begin{proof} We first restate the proof for Theorem \ref{th3}. Let $B$ be a maximum $2$-limited packing in $G$. Obviously, each component of $G[B]$ is
$K_1$ or $K_2$, and $|N[v]\cap B|\leq 2$ for each vertex $v$ of $G$. Let $v_0$ be a vertex of maximum degree $\Delta(G)$. Since $|N[v_0]\cap B|\leq 2$, it follows that there exist at least $\Delta(G)-1$ vertices in $N[v_0]\setminus B$. Thus,
$L_2(G)=|B|=n-|\overline{B}|\leq n-(\Delta(G)-1)=n+1-\Delta(G)$. Let $G$ be a graph of order $n$ such that $L_2(G)=n+1-\Delta(G)$. It is easily obtained that $G$ has the following properties:
$(P1)$ $|N[v_0]\cap B|=2$,
$(P2)$ $V(G)\setminus N[v_0]\subset B$.
By the above argument, we have $G \in \mathcal{G}$ with $N[v_0]=A_0$ and $B=B_0$. It remains to show the converse. Suppose that $G \in \mathcal{G}$. It is sufficient to show that $L_2(G)\geq n+1-\Delta(G)$. Let $A_0\cap B_0=\{v_p,v_q\}$ and
$|A_0|=t+1$, where $v_0$ is a vertex of degree $t$ in $G[A_0]$. Observe that $d(v_0)\geq t$. Furthermore, we obtain the following claim.
\textbf{Claim 1.} $\Delta(G)=t$.
\noindent\textbf{Proof of Claim 1:} Since each vertex in $B_0$ has degree at most $1$ in $G[B_0]$, it follows that each of $v_p,v_q$ is adjacent to at most one vertex in $B_0$. On the other hand, each of $v_p,v_q$ is adjacent to at most $t-1$ vertices in $A_0\setminus\{v_p,v_q\}$. Thus, $d(v_p)\leq t$ and $d(v_q)\leq t$. For each vertex $v$ in $A_0\setminus\{v_p,v_q\}$, $v$ is adjacent to at most $t-2$ vertices in $A_0\setminus\{v,v_p,v_q\}$ and at most two vertices in $B_0$, thus $d(v)\leq t$ for each vertex $v$ in $A_0\setminus\{v_p,v_q\}$. For each vertex $u$ in $B_0\setminus\{v_p,v_q\}$, $u$ is adjacent to at most $t-1$ vertices in $A_0\setminus\{v_p,v_q\}$ and at most one vertex in $B_0$, hence $d(u)\leq t$ for each vertex $u$ in $B_0\setminus\{v_p,v_q\}$. Thus $\Delta(G)\leq t$. But $d(v_0)\geq t$, which means that $\Delta(G)=t$.
Notice that $B_0$ is a $2$-limited packing of $G$ with
$|B_0|=n-|A_0|+2=n+1-\Delta(G)$, then $L_2(G)\geq n+1-\Delta(G)$. We complete the proof. \end{proof}
\begin{cor}\label{coro1} Let $G$ be a $d$-regular graph of order $n$ such that $L_k(G)= n+k-1-d$, where $k\leq d$. Then $d\geq \frac{n}{2}$. \end{cor}
\begin{proof} If $d=n-1$, then $G$ is a complete graph with $L_k(G)=k$ for
$n\geq k+1\geq 2$, and the result follows from $d=n-1\geq \frac{n}{2}$. Thus, we may assume that $d\leq n-2$. Suppose that $L_k(G)= n+k-1-d$. Let $B$ be a maximum $k$-limited packing of $G$ with $|B|=n+k-1-d$, and $v$ be a vertex of $G$. Since $|N[v]\cap B|\leq k$, it follows that
$|N[v]\cap \overline{B}|\geq d+1-k$. Assume that
$|N[v]\cap \overline{B}|> d+1-k$. Then $|B|<n-(d+1-k)=n+ k-1-d$, a contradiction. Thus, there exist exactly $d+1-k$ vertices, say $v_1, \ldots, v_{d+1-k}$, in $N[v]\cap \overline{B}$, furthermore,
$\overline{B}=\{v_1, \ldots, v_{d+1-k}\}$. Let $U=V(G)\setminus N[v]$. Since $d\leq n-2$, it follows that $|U|> 0$. Observe that $U\subseteq B$. Consider a vertex $u_i$ in $U$, there exist at most $k-1$ neighbours in $B$, therefore $u_i$ is adjacent to at least $d-(k-1)$ vertices in $\overline{B}$. But $|\overline{B}|=d+1-k$, it follows that for each vertex $u_i$ in $U$, $u_i$ is adjacent to all the vertices in $\overline{B}$. That is, each vertex $v_i$ in $\overline{B}$ is adjacent to all the $n-d-1$ vertices in $U$. Note that $d(v_i)=d$ and $v_i$ has at least one neighbour in $N[v]$, it follows that $n-d-1+1\leq d$. Thus, we obtain that $d\geq \frac{n}{2}$. \end{proof}
To end this section, we present the tight Nordhaus-Gaddum-type result for $k$-limited packing numbers of graphs $G$ and $\overline{G}$ for $k\geq1$. We first establish the tight Nordhaus-Gaddum-type lower bound for this parameter, and characterize all the graphs obtaining this lower bound.
\begin{pro}\label{prop6} If $G$ is a graph of order at least $k$, then $L_k(G)+L_k(\overline{G})\geq 2k$. Moreover, $L_k(G)+L_k(\overline{G})=2k$ if and only if $G$ has one of the following properties:
$(i)$ $G$ has exactly $k$ vertices,
$(ii)$ for each $(k+1)$-subset $X$ of $V(G)$, $G[X]$ has maximum degree $k$ and there is a vertex outside $X$ such that it is not adjacent to any vertex of $X$, or there is a vertex outside $X$ such that it is adjacent to all the vertices of $X$ and $G[X]$ has an isolated vertex, or there are one vertex outside $X$ such that it is adjacent to all the vertices of $X$ and another vertex outside $X$ such that it is not adjacent to any vertex of $X$. \end{pro}
\begin{proof} Since it is impossible that $\Delta(G)=\Delta(\overline {G})=k$
for $|V(G)|=k+1$, it follows from Proposition \ref{prop2} that $L_k(G)+L_k(\overline{G})>2k$ for $|V(G)|=k+1$. And observe that it is also impossible that for some $(k+1)$-subset $X$ of $V(G)$, both $G[X]$ and $\overline{G}[X]$ has maximum degree $k$. Thus, the result follows from Theorem \ref{th1}. \end{proof}
The tight Nordhaus-Gaddum-type upper bounds for $k$-limited packing numbers of graphs $G$ and $\overline{G}$ in the following theorem are a generalization of Lemma \ref{lem2}.
\begin{thm}\label{th10} Let $G$ be a graph of order $n$. Then \begin{equation*} L_k(G)+L_k(\overline{G})\leq \left\{
\begin{array}{ll}
2n & \hbox{ if $k\geq \max\{\Delta(G),\Delta(\overline {G})\}+1$,}\\
n+2k-2 & \hbox{ if $ k \leq \min\{\Delta(G),\Delta(\overline {G})\}$, }\\
2n-1 & \hbox{ otherwise. } \\
\end{array} \right. \end{equation*} Moreover, the upper bounds are tight. \end{thm}
\begin{proof} Suppose that $k\geq \max\{\Delta(G),\Delta(\overline {G})\}+1$. It is clear that $L_k(G)+L_k(\overline{G})=n+n=2n$.
Suppose that $\max\{\Delta(G),\Delta(\overline {G})\}+1> k \geq \min\{\Delta(G),\Delta(\overline {G})\}+1$. Without loss of generality, we assume that $\Delta(\overline {G})+1> k\geq \Delta(G)+1$. It follows that $L_k(G)=n$ and $L_k(\overline{G})<n$. Therefore, $L_k(G)+L_k(\overline{G})\leq 2n-1$. To show that the upper bound is tight. Let $G$ be a graph of order $k+1$ with $\Delta(G)<k$ such that $G$ has isolated vertices. By Proposition \ref{prop2}, $L_k(G)+L_k(\overline{G})=2n-1$.
It remains to consider the case when $ k \leq \min\{\Delta(G),\Delta(\overline {G})\}$. By Theorem \ref{th3}, we have $L_k(G)\leq n+k-1-\Delta(G)$ and $L_k(\overline{G})\leq n+k-1-\Delta(\overline{G})$. Thus, \begin{eqnarray*} L_k(G)+L_k(\overline{G}) &\leq& (n+k-1-\Delta(G))+ (n+k-1-\Delta(\overline{G}))\\ &=& 2n+2k-2-(\Delta(G)+\Delta(\overline{G}))\\ &\leq& 2n+2k-2-(\Delta(G)+\delta(\overline{G}))\\ &=& 2n+2k-2-(n-1)\\ &=& n+2k-1. \end{eqnarray*} Next, we claim that it is impossible that $L_k(G)+L_k(\overline{G})= n+2k-1$. Assume to the contrary that $L_k(G)+L_k(\overline{G})= n+2k-1$. It follows that both $G$ and $\overline{G}$ are regular graphs with $ L_k(G)= n+k-1-\Delta(G)$ and $ L_k(\overline{G})= n+k-1-\Delta(\overline{G})$. Notice that $G$ is a $\Delta(G)$-regular graph and $\overline{G}$ is a $\Delta(\overline{G})$-regular graph, then $\Delta(G)+\Delta(\overline{G})=n-1$. Since $ L_k(G)= n+k-1-\Delta(G)$ and $ L_k(\overline{G})= n+k-1-\Delta(\overline{G})$, it follows from Corollary \ref{coro1} that $\Delta(G)\geq \frac{n}{2}$ and $\Delta(\overline{G})\geq \frac{n}{2}$, which implies that $\Delta(G)+\Delta(\overline{G})> n-1$, which is a contradiction. Thus, $L_k(G)+L_k(\overline{G})\leq n+2k-2$. The following examples show that the upper bound is best possible. Let $G=K_n-e$, where $e$ is an edge of $K_n$ and $n\geq 3$. Then $L_1(G)= 1$ by Theorem \ref{th1}. On the other side, $\overline{G}=K_2\cup (n-2)K_1$, then $L_1(\overline{G})= n-1$. It is obtained that $\min\{\Delta(G),\Delta(\overline {G})\}\geq 1$ and $L_1(G)+L_1(\overline{G})= n+2k-2=n$. \end{proof}
\section{ $2$-limited packing}
In \cite{GGHR}, the authors bounded the $2$-limited packing number for a graph in terms of its order.
\begin{lem}{\upshape\cite{GGHR}}\label{lem26}
If $G$ is a connected graph with $|V(G)|\geq3$, then $L_2(G)\leq\frac{4}{5}|V(G)|$. \end{lem}
Furthermore, they imposed constraints on the minimum degree of $G$, and obtained the following result.
\begin{lem}{\upshape\cite{GGHR}}\label{le1}
If $G$ is a connected graph, and $\delta(G)\geq k$, then $L_k(G)\leq\frac{k}{k+1}|V(G)|$. \end{lem}
By Lemma \ref{le1}, we have $L_2(G)\leq\frac{2}{3}|V(G)|$ for graphs with $\delta(G)\geq 2$. It is known that trees are graphs with minimum degree $1$. We find a class of trees $T$ with $2$-limited packing number at most
$\frac{2}{3}|V(T)|$. The minimum degree of a graph $G$ taken over all non-leaf vertices is denoted by $\delta'(G)$.
\begin{thm}\label{thm30} If $T$ is a tree with $\delta'(T)\geq 4$, then
$L_2(T)\leq\frac{2}{3}|V(T)|$. \end{thm}
\begin{proof}
Since $\delta'(T)\geq 4$, it follows that $|V(T)|\geq 5$. Let $B$ be a maximum $2$-limited packing of $T.$ By induction on the order of $T$. If $|V(T)|=5$, then $T=K_{1,4}$, and hence $L_2(T)=2\leq \frac{2}{3}|V(T)|$ by Lemma \ref{lem3}. Let $T$ be a tree of order at least $6$. It is known that $T$ can be regarded as a rooted tree. Take a leaf vertex $v$ of $T$, which is the lowest level in the rooted tree $T$. Let $v_0$ be the unique neighbour of
$v$ in $T$, and $L_0$ be the set of leaf vertices in $N(v_0)$. Since $v_0$ is adjacent to at least three leaf vertices, we have $|L_0|\geq 3$. Let $T_0$ be the subtree obtained from $T$ by deleting all the vertices of $L_0$. By the inductive hypothesis, $L_2(T_0)\leq \frac{2|V(T_0)|}{3}\leq \frac{2(|V(T)|-3)}{3}=\frac{2}{3}|V(T)|-2.$
Since $|N[v_0]\cap B|\leq 2$, it follows that $|L_0\cap B|\leq 2$. Hence, $L_2(T)\leq L_2(T_0)+2=\frac{2}{3}|V(T)|$. \end{proof}
It is shown that both the opening packing number and the $1$-limited packing number of a graph with diameter at most $2$ are small in Remark $1$ and Lemma \ref{lem16}. These results naturally lead to the following problem: can the $2$-limited packing number of a graph $G$ be bounded by a constant for $diam(G)\leq 2$? It is known that the graph with order $n$ and diameter $1$, which is exactly $K_n$, has $2$-limited packing number $2$ by Lemma \ref{lem3}. Thus, we only need to investigate the $2$-limited packing number of graphs with diameter $2$. Theorem \ref{th6} answers the above question.
\begin{thm}\label{th6} For any positive integer $a$ with $a\geq 2$, there exists a graph $G$ with $diam(G)=2$ such that $L_2(G)=a.$ \end{thm}
\begin{proof} We construct a graph $G$ with $diam(G)=2$ such that $L_2(G)=a$ for $a\geq 2$ as follows.
First, suppose that $X=\{x_1,x_2,\ldots, x_a\}$ and $Y=\{y_1,y_2,\ldots, y_{\frac{a(a-1)}{2}}\}$ with $X\cap Y=\emptyset$. Let $G$ be a graph with $V(G)=X\cup Y$ such that $G[X]$ consists of
$a$ isolated vertices, $G[Y]$ is a clique and each pair of distinct vertices in $X$ has a unique common neighbour in $Y$. Obviously, it is true that $diam(G)=2$. Now we need to show that $L_2(G)=a$. Notice that $|V(G)|=a+\frac{a(a-1)}{2}$ and $\Delta(G)=\frac{a(a-1)}{2}+1$, then $L_2(G)\leq |V(G)|+1-\Delta(G)=a$ by Corollary \ref{cor14}. Observe that $X$ is a $2$-limited packing of $G$, thus $L_2(G)= a$. \end{proof}
But we can find graphs $G$ with $diam(G)=2$ such that $L_2(G)$ is small. First, we give some auxiliary lemmas.
\begin{lem}{\upshape\cite{JP}}\label{lem20} Every planar graph of diameter $2$ has domination number at most $2$ except for the graph $F$ of Fig. $1$ which has domination number $3$. \end{lem} \begin{figure}
\caption{A counterexample $F$ of Lemma \ref{lem20}}
\end{figure}
\begin{lem}\label{lem21} If $G$ is a graph with order $n$ and $\Delta(G)=n-1$, then $L_2(G)=2$. \end{lem}
\begin{proof} By Lemma \ref{lem7}, we have $L_2(G)\geq \lceil \frac{2diam(G)+2}{3}\rceil \geq \lceil\frac{4}{3}\rceil=2$. On the other hand, $L_2(G)\leq n-\Delta(G)+1=2$ by Corollary \ref{cor14}. Thus, $L_2(G)=2$. \end{proof}
\begin{lem}\label{lem22} Let $G$ be a graph with diameter $2$. Then
$(i)$ if $G$ has a cut vertex, then $L_2(G)=2$,
$(ii)$ if $G$ is a planar graph, then $L_2(G)\leq 4$. \end{lem}
\begin{proof} Firstly we prove part $(i)$. Let $v_0$ be a cut vertex of $G$. We claim that for any vertex $u\in V(G)\setminus \{v_0\}$, $d(u,v_0)=1$. Suppose that there exists a vertex $u_0\in V(G)\setminus \{v_0\}$ such that $d(u_0,v_0)=2$. Let $w_0$ be another vertex of $G$ such that $w_0$ and $u_0$ are contained in different components of $G-v_0$. Then
$d(u_0,w_0)\geq 3$, which contradicts to $diam(G)=2$. Thus, for any vertex $u\in V(G)\setminus \{v_0\}$, $d(u,v_0)=1$. It is obtained that $d(v_0)=|V(G)|-1$, hence $L_2(G)=2$ by Lemma \ref{lem21}.
Next we prove part $(ii)$. Let $G$ be a planar graph with diameter $2$. Then it follows from Lemma \ref{lem4} and Lemma \ref{lem20} that $L_2(G)\leq 2\gamma(G)\leq 4$ except for the graph $F$ of Fig. $1$. It remains to verify the graph $F$ in Fig. $1$. Let $B$ be a maximum
$2$-limited packing of $F$. Observe that $|\{u_1,\ldots,u_4\}\cap B|\leq 2$
and $|\{v_0,\ldots,v_4\}\cap B|\leq 2$, it follows that $L_2(F)\leq 4$. \end{proof}
Next, we get the better upper bound of the $2$-limited packing number of graphs with large diameter.
\begin{thm}\label{th9} If $G$ is a connected graph of order $n$, then $L_2(G)\leq n+1-\Delta(G)-\lfloor \frac{diam(G)-4}{3} \rfloor$. \end{thm}
\begin{proof}
If $diam(G)\leq 2$, then $L_2(G)\leq |V(G)|+1-\Delta(G)\leq n+1-\Delta(G)
-\lfloor \frac{diam(G)-4}{3} \rfloor $ by Corollary \ref{cor14}. Thus, we may assume that $diam(G)\geq 3$ in the following. Let $B$ be a maximum $2$-limited packing in $G$, and $u$ be a vertex of degree $\Delta(G)$. Then $|N[u]\cap B|\leq 2$. Let $P$ be a path of length $diam(G)$ between $x$ and $y$ in $G$. We claim that
$|V(P)\cap N[u]|\leq3$, otherwise using the same argument in proof of Theorem \ref{th5}, we can find a path between $x$ and $y$, whose length is less than $diam(G)$, a contradiction. It is also obtained that
$|\{x,y\}\cap N[u]|\leq 1$, otherwise $d(x,y)\leq 2$, which contradicts to $diam(G)\geq 3$. Without loss of generality, assume that $x\in V(G)\setminus N[u]$.
\textbf{Case 1.} $V(P)\cap N[u]=\emptyset$.
By Lemma \ref{lem3},
$P$ has at most $\lceil\frac{2|V(P)|}{3}\rceil $ vertices in
$B$. Then $|V(P)\cap \overline {B}|\geq \lfloor\frac{|V(P)|}{3}\rfloor$. On the other hand, $|N[u]\cap \overline {B}|\geq \Delta(G)+1-2=\Delta(G)-1$. Thus, \begin{eqnarray}
| \overline {B}|
&\geq & \Delta(G)-1+\lfloor\frac{|V(P)|}{3}\rfloor \nonumber\\
&=& \Delta(G)-1+\lfloor \frac{diam(G)+1}{3} \rfloor\nonumber. \end{eqnarray}
\textbf{Case 2.} $V(P)\cap N[u]\neq\emptyset$.
Let $P_x$, $P_y$ be the paths obtained from $P$ by deleting all the vertices in $N[u]$ such that $P_x$ and $P_y$ contain $x$, $y$, respectively. Let $H=P_x\cup P_y$. It is worth mentioning that $V(P_y)= \emptyset$ if $y \in N[u]$. Observe that $|V(H)|=|V(P_x)|+|V(P_y)|\geq |V(P)|-3 = diam(G)-2$. Since $H$ has at most
$\lceil\frac{2|V(P_x)|}{3}\rceil+\lceil\frac{2|V(P_y)|}{3} \rceil$ vertices in $B$ by Lemma \ref{lem3}, it follows that
$|V(H)\cap \overline{B}|\geq \lfloor\frac{|V(P_x)|}{3}\rfloor+\lfloor\frac{|V(P_y)|}{3} \rfloor$. Since $|N[u]\cap \overline {B}|\geq \Delta(G)+1-2=\Delta(G)-1$ and $V(H)\cap N[u]=\emptyset$, we have \begin{eqnarray}
| \overline{B}|
&\geq & \Delta(G)-1+\lfloor\frac{|V(P_x)|}{3}\rfloor+\lfloor\frac{|V(P_y)|}{3} \rfloor \nonumber\\
&\geq& \Delta(G)-1+\lfloor \frac{|V(H)|-2}{3} \rfloor\nonumber\\
&\geq& \Delta(G)-1+\lfloor \frac{diam(G)-4}{3} \rfloor \end{eqnarray} Combining Case $1$ and Case $2$, we have
$| \overline{B}|\geq \Delta(G)-1+\lfloor \frac{diam(G)-4}{3} \rfloor$. Hence, $L_2(G)=|B|\leq n+1-\Delta(G)-\lfloor \frac{diam(G)-4}{3} \rfloor$.
\noindent\textbf{Remark 4.} The upper bound in Theorem \ref{th9} is better than that in Corollary \ref{cor14} for $diam(G)\geq 7$. \end{proof}
\section{ Comparing $L_2(T)$ with $L_1(T)$ and $\rho^{0}(T)$}
In this section, we study the relationship among the $2$-limited packing number, the $1$-limited packing number and the open packing number of trees.
\begin{lem}{\upshape\cite{HHS}}\label{lem27}
For any graph $G$, $L_1(G)\leq \rho^{0}(G)\leq 2L_1(G)$. \end{lem}
Similarly, we consider the relationship between the $2$-limited packing number and the $1$-limited packing number of graphs.
\begin{pro}\label{prop4} For any graph $G$ with edges, $L_1(G)+1\leq L_2(G) \leq \frac{2(\Delta(G)^2+1)}{\delta(G)+1}L_1(G).$ \end{pro}
\begin{proof} The Lower bound is evidently true for $\Delta(G)\geq 1$ by Lemma \ref{lem5} and Remark $2$. It remains to verify the upper bound. Combining Lemma \ref{lem31} and Lemma \ref{lem10}, we have \begin{eqnarray*}
L_1(G) &\geq& \frac{|V(G)|}{\Delta(G)^2+1}\\
&=& \frac{2|V(G)|}{\delta(G)+1}\frac{\delta(G)+1}{2(\Delta(G)^2+1)}\\ &\geq& L_2(G)\frac{\delta(G)+1}{2(\Delta(G)^2+1)}. \end{eqnarray*} That is, $L_2(G) \leq \frac{2(\Delta(G)^2+1)}{\delta(G)+1}L_1(G).$ \end{proof}
With respect to trees, the above result can be further improved. Recall that a star is a tree with diameter at most $2$. Define a $t$-spider to be a tree obtained from a star by subdividing $t$ of its edges once.
\begin{thm}\label{th11} For any tree $T$, $L_1(T)+1\leq L_2(T)\leq 2L_1(T)$. Moreover, $L_1(T)+1= L_2(T)$ if and only if $T$ is a $t$-spider with $0\leq t<\Delta(T)$.
\end{thm}
\begin{proof} The lower bound holds from $\Delta(T)\geq 1$ by Lemma \ref{lem5}, and the upper bound follows from $L_2(T)\leq 2\gamma (T)=2L_1(T)$ by Lemma \ref{lem34} and Lemma \ref{lem4}.
Next, we show that $L_1(T)+1= L_2(T)$ if and only if $T$ is a $t$-spider with $0\leq t<\Delta(T)$. Let $T$ be a $t$-spider with $V(T)=\{r,v_1,\ldots,v_t, w_1,\ldots,w_t, u_1,\ldots,u_s\}$ and $E(T)=\{rv_1,\ldots,rv_t\}\cup \{ru_1,\ldots,ru_s\} \cup \{v_1w_1,\ldots,v_tw_t\}$, where $s\geq1$ and $t\geq 0$. If $t=0$, then $T$ is a star and the results follows from
$L_2(T)=2$ and $L_1(T)=1$. Now we assume that $t\geq1$. Notice that $|V(T)|=1+2t+s$ and $\Delta(T)=t+s$. By Corollary \ref{cor14}, we have
$L_2(T)\leq |V(T)|+1-\Delta(T)=t+2$. Observe that
$\{ v_1,u_1, w_1,\ldots,w_t\}$ is a $2$-limited packing of $T$, it follows that $L_2(T)= t+2$. On the other side, it follows from Theorem \ref{th3} that $L_1(T)\leq |V(T)|-\Delta(T)=t+1$. And it is clear that $\{w_1,\ldots,w_t, u_1\}$ is a $1$-limited packing of $T$, so $L_1(T)= t+1$. Thus, $L_1(T)+1= L_2(T)$.
It remains to show the converse. Let $T$ be a tree with $L_1(T)+1= L_2(T)$. We first give the following claim.
\textbf{Claim 1.} $diam(T)\leq 4$.
\noindent\textbf{Proof of Claim 1:} Assume to the contrary that there is a path $Q=v_1\cdots v_6$ of order $6$ in $T$. Let $B_1$ be a maximum $1$-limited packing of $T$. By Lemma \ref{lem3}, it is obtained that $|V(Q)\cap B_1|\leq 2$. To have a contradiction, we aim to find a $2$-limited packing that contains $|B_1|+2$ vertices in $T$. Suppose that $V(Q)\cap B_1=\emptyset$. We claim that
$B_2=B_1\cup\{v_1,v_6\}$ is a $2$-limited packing of $T$. It is clear that $|N[v_i]\cap B_2|\leq 2$ for each vertex $v_i$ on $Q$. Consider each vertex $u$ outside $Q$. First, we know
$|N[u]\cap B_1|\leq 1$ for each vertex $u$ outside $Q$. Since $T$ is a tree and has no cycle, it follows that each vertex $u$ outside $Q$ has at most one neighbour on $Q$, which means
$|N[u]\cap \{v_1,v_6\}|\leq 1$. Thus, $|N[u]\cap B_2|\leq 2$ for each vertex $u$ outside $Q$. As a result, we obtain that
$B_2=B_1\cup\{v_1,v_6\}$ is a $2$-limited packing of $T$. Suppose that $V(Q)\cap B_1=\{v_i\}$ for some $1\leq i\leq 6$. If $i=1$, then $B_1\cup\{v_2,v_6\}$ is a $2$-limited packing of $T$. It is worth mentioning that $v_2$ is not adjacent to any vertex in $B_1\setminus\{v_1\}$, otherwise $|N[v_2]\cap B_1|\geq2$, a contradiction. If $2\leq i\leq5$, then $B_1\cup\{v_1,v_6\}$ is a $2$-limited packing of $T$. If $i=6$, then $B_1\cup\{v_1,v_5\}$ is a $2$-limited packing of $T$. Suppose that $V(Q)\cap B_1=\{v_i,v_j\}$ for some $1\leq i\neq j\leq 6$. If $(i,j)\in\{(1,4),(3,6)\}$, then $ B_1\cup\{v_2,v_5\}$ is a $2$-limited packing of $T$. If $(i,j)\in\{(1,5),(1,6),(2,5),(2,6)\}$, then $(B_1\setminus \{v_i,v_j\})\cup\{v_1,v_2,v_5,v_6\}$ is a $2$-limited packing of $T$. By the above argument, we have $L_2(T)\geq L_1(T)+2$, which is a contradiction. Thus, it is obtained that $diam(T)\leq4$.
Suppose that $diam(T)\leq4$. Let $F$ be a tree with diameter $4$
and a unique vertex $f_0$ of maximum degree $3$ such that $f_0$ is adjacent to two leaf vertices. Let $B_1$ be a maximum $1$-limited packing of $T$. It is clear that $|V(F)\cap B_1|\leq 2$. We claim that $T$ has no $F$ as a subtree. Suppose to the contrary that $F$ is a subtree of $T$. By the similar argument when $T$ contains a path $P_6$, we always find a $2$-limited packing with $L_1(T)+2$ vertices in $T$ as depicted in Fig. 2, which is a contradiction. Thus, $T$ has no $F$ as a subtree, which implies that $T$ is a $t$-spider with $0\leq t\leq \Delta(T)$. Notice that if $\Delta(T)=1$, then it is clear that $t=0$. For $\Delta(T)\geq2$, we claim that $t< \Delta(T)$. Assume to the contrary that $t=\Delta(T)\geq 2$. Let $r$ be a vertex of maximum degree $t$ with $N(r)=\{v_1,\ldots,v_t\}$ and $N(v_i)=\{r,w_i\}$ for $1\leq i\leq t$, where $w_1,\ldots,w_t$ are $t$ leaf vertices of $T$. Observe that $\{v_1, v_2, w_1,\ldots,w_t\}$ is $2$-limited packing of $T$, it follows that $L_2(T)\geq t+2$. On the other hand, $\{r, v_i, w_i\}$ has at most one vertex in a $1$-limited packing of $T$ for each $1\leq i\leq t$, it follows that $L_1(T)\leq t$. Thus, $L_2(T)\geq L_1(T)+2$, which is a contradiction. As a result, $T$ is a $t$-spider with $0\leq t< \Delta(T)$.
\begin{figure}
\caption{The three cases can arise on the number of
$|V(F)\cap B_1|$. In each case, the blue points correspond to the vertices in $B_1$, the black points correspond to the vertices outside $B_1$, and the circled black points correspond to the vertices outside $B_1$ that will be added into $B_2$.}
\end{figure}
\end{proof}
\noindent\textbf{Remark 5.} By the proof of Theorem \ref{th11}, we know that $L_2(T)= 2L_1(T)$ if and only if $L_2(T)= 2\gamma (T)$. And all the trees $T$ with $L_2(T)= 2\gamma (T)$ are characterized in \cite{GGHR}.
\vskip 0.3cm
Similarly, we compare the $2$-limited packing number with the open packing number of trees. We first define a class of trees, which is needed in the following theorem. Let $\mathcal{T}$ be the set of trees $T$ whose vertex set can be partitioned into two disjoint subsets $S_0$ and $R_0$, satisfying the following properties:
$(i)$ $T[S_0]=aK_2$, and each copy of $K_2$ has at least one vertex with degree $1$ in $T$, where $a$ is an positive integer,
$(ii)$ for each $r\in R_0$, $|N(r)\cap S_0|=1$.
\begin{thm}\label{th12} For any tree $T$, $\rho^{0}(T)\leq L_2(T)\leq2\rho^{0}(T)$. Moreover, $\rho^{0}(T)= L_2(T)$ if and only if $T\in \mathcal{T}$. \end{thm}
\begin{proof} For the lower bound, we give the stronger result that
$ L_2(G)\geq \rho^{0}(G)$ for any graph $G$. Let $S$ be an open packing of $G$ with $|S|=\rho^{0}(G)$. Then
$|N(v)\cap S|\leq 1$ for each vertex $v$ of $G$. Obviously,
$|N[v]\cap S|\leq 2$ for each vertex $v$ of $G$. It is obtained that $S$ is a $2$-limited packing of $G$, therefore $\rho^{0}(G)\leq L_2(G)$. On the other hand, since $\gamma(T)\leq \gamma_t(T)$, it follows that $L_2(T)\leq2\gamma(T)\leq 2\gamma_t(T)=2\rho^{0}(T)$ by Lemma \ref{lem33} and Lemma \ref{lem4}.
Next, we show that $\rho^{0}(T)= L_2(T)$ if and only if
$T\in \mathcal{T}$. Let $T$ be a tree in $\mathcal{T}$. If $T$ has only two vertices, then $T=K_2$, and hence the result trivially holds. Now we assume that $|V(T)|\geq 3$. Observe that $S_0$ is an open packing of $T$, if follows that $\rho^{0}(T)\geq 2a$. By the definition of $T$, it is obtained that $V(T)$ can be partitioned into $V_1\cup \cdots\cup V_a$ such that $G[V_i]$ is a star for each $1\leq i\leq a$. Notice that $V_i$ has at most two vertices in a $2$-limited packing of $T$ for each $1\leq i\leq a$, then $L_2(T)\leq 2a$. Since $\rho^{0}(T)\leq L_2(T)$, it follows that $\rho^{0}(T)= L_2(T)$ for each tree $T$ in $\mathcal{T}$.
Conversely, suppose that $\rho^0(T)= L_2(T)$. Let $S$ be a maximum open packing of $T$. It is known that each component of $T[S]$ is $K_1$ or $K_2$. To show $T\in \mathcal{T}$, we give the following claims.
\textbf{Claim 1.} $T[S]=tK_2$.
\noindent\textbf{Proof of Claim 1:} Suppose to the contrary that there is at least one isolated vertex, say $v$, in $T[S]$. Since $T$ is connected, it follows that $v$ has a neighbour, say $r$, in $\overline {S}$. Let $B=S\cup\{r\}$. Next we show that $B$ is a $2$-limited packing of $T$. Since $r$ is not adjacent to any vertex in $S\setminus\{v\}$, it follows that for each vertex $v$ in $B$, $|N[v]\cap B|\leq 2$. On the other hand, for each vertex $u$ in $\overline{B}$, we have $|N[u]\cap S|\leq 1$, and hence $|N[u]\cap B|\leq 2$. Thus, $B$ is a $2$-limited packing of $T$, and $L_2(T)\geq |B|=|S|+1$, which is a contradiction. Thus, there is no isolated vertex in $T[S]$, so $T[S]=tK_2$.
\textbf{Claim 2.} For each $r\in \overline{S}$, $|N(r)\cap S|=1$.
\noindent\textbf{Proof of Claim 2:} By the definition of the open packing, we know $|N(r)\cap S|\leq 1$ for any vertex $r\in \overline{S}$. To show this claim, it remains to prove $|N(r)\cap S|\geq 1$ for any vertex $r\in \overline{S}$. Assume that there is a vertex $r_0\in \overline{S}$
such that $N(r_0)\cap S=\emptyset$. Then $S\cup \{r_0\}$ is a $2$-limited packing of $T$, so $L_2(T)\geq \rho^{0}(T)+1$, a contradiction. Hence, we have $|N(r)\cap S|=1$ for each $r\in \overline{S}$.
\textbf{Claim 3.} Each component of $T[S]$ has at least one vertex with degree $1$ in $T$.
\noindent\textbf{Proof of Claim 3:} Suppose that $T[S]$ has one component $K_2=v_1v_2$, where $d(v_i)\geq2$ for $i=1,2$. It is obtained that there is a path $P=uv_1v_2w$ in $T$, where $u,w\in \overline{S}$. Notice that each vertex on $P$ has the property that its neighbours outside $P$ are not contained in $S$, otherwise there is a vertex on $P$ such that it has at least two neighbours in $S$, which is a contradiction. It is obtained that $(S\setminus \{v_2\})\cup\{u,w\}$ is a $2$-limited packing of $T$, which means $L_2(T)\geq \rho^0(T)+1$, which is a contradiction. Thus, each component of $T[S]$ has at least one vertex with degree $1$ in $T$.
By the above claims, we get that if $\rho^0(T)= L_2(T)$, then $T\in \mathcal{T}$ with $S=S_0$, this completes the proof. \end{proof}
Graphs with $\rho^{0}(G)=1$, graphs with $L_k(G)=k$ for $k=1,2$ are characterized in Lemma \ref{lem15} and Theorem \ref{th1}, respectively. So we assume that $a\geq 2$ in the following theorem.
\begin{thm}\label{th8} For each pair of integers $a$ and $b$ with $a\geq 2$ and $a+1\leq b\leq 2a$, there exists a tree $T$ such that $\rho^{0}(T)=L_1(T)=a$ and $L_2(T)=b$. \end{thm}
\begin{proof} Suppose $a$ and $b$ are two positive integers with $a+1\leq b\leq 2a$. Let $b=a+r$ with $1\leq r\leq a$. To construct a tree $T$ with $\rho^{0}(T)=L_1(T)=a$ and $L_2(T)=a+r$ for $a\geq2$ and $1\leq r\leq a$, we distinguish the following two cases.
\textbf{Case 1.} $a=r$.
Suppose that $Q_i=x_iy_iz_i$ is a path of order $3$ for $1\leq i\leq a$. Let $T$ be the tree obtained from $Q_1\cup\cdots \cup Q_a$ by adding the edge $y_iy_{i+1}$ for $1 \leq i\leq a-1$. First, we show that $L_2(T)=a$. Since each $V(Q_i)$ has at most two vertices in a $2$-limited packing of $T$ for $1\leq i\leq a$, we have $L_2(T)\leq 2a$. It is observed that $\{x_1, \cdots, x_a, z_1,\cdots,z_a\}$ is a $2$-limited packing of $T$, thus $L_2(T)=2a$. Next, we show that $\rho^{0}(T)=L_1(T)=a$. Note that $L_1(T)\leq \rho^{0}(T)$ by Lemma \ref{lem27}, then it is sufficient to show that $L_1(T)\geq a$ and $\rho^{0}(T)\leq a$. Obviously, $\{x_1, \cdots, x_a\}$ is a $1$-limited packing of $T$, thus $L_1(T)\geq a$. It remains to show that $\rho^{0}(T)\leq a$. It is clear that $\{y_1, \cdots, y_a\}$ is a total dominating set of $T$, which implies that $\gamma_t(T)\leq a$. By Lemma \ref{lem33}, we have $\rho^{0}(T)=\gamma_t(T)\leq a$.
\textbf{Case 2.} $1\leq r\leq a-1$.
Consider a star $A=K_{1,a}$ with $V(A)=\{v_0,v_1,\ldots,v_a\}$ and $d(v_0)=a$. Let $T$ be the tree obtained from $A$ by adding two pendent edges $v_iw_i$ and $v_iw_i^{'}$ to each $v_i$ of $A$ for $1\leq i\leq r-1$, and one pendent edge $v_iw_i$ at each $v_i$ of $A$ for $r\leq i\leq a-1$. Fig. $3$ gives an example for $a=8$, $b=12$.
\begin{figure}
\caption{A graph with $\rho^{0}(T)=L_1(T)=8$ and $L_2(T)=12$}
\end{figure}
To obtain that $L_1(T)=\rho^{0}(T)=a$, it suffices to prove that $L_1(T)\geq a$ and $\rho^{0}(T)\leq a$ by Lemma \ref{lem27}. Obviously, $\{w_1,\ldots,w_{a-1}, v_a\}$ is a $1$-limited packing of $T$, so $L_1(T)\geq a$. On the other hand, let $S$ be a maximum open packing of $T$ with
$|S|=\rho^{0}(T)$. It only need to show $|S|\leq a$. Suppose that $v_0\in S$. It follows that $\{v_i: 1\leq i\leq a\}$ has at most one vertex in $S$, and
$\{w_i,w_j': 1\leq i\leq a-1, 1\leq j\leq r-1\}$ has no vertex in $S$. It is obtained that $S=\{v_0, v_i\}$ for some $i$ with $1\leq i\leq a$, and hence $|S|=2\leq a$. Suppose that $v_0 \notin S$. If $v_a\in S$, then $\{v_i: 1\leq i\leq a-1\}$ has no vertex in $S$ and $\{w_i,w_i'\}$ has at most one vertex in $S$ for each $1\leq i\leq a-1$, and hence
$|S|\leq a$. If $v_a\notin S$, then both $\{v_i: 1\leq i\leq a-1\}$ and $\{w_i,w_i'\}$ for each $1\leq i\leq a-1$ have at most one vertex in $S$, so
$|S|\leq a$.
It remains to show that $L_2(T)= a+r$ with $1\leq r\leq a-1$. Note that $T$ has $2a+r-1$ vertices and $\Delta(T)=a$. By Corollary \ref{cor14}, we have $L_2(T)\leq |V(T)|+1-\Delta(T)= a+r$. Observe that $\{v_a,v_{a-1}\}\cup\{w_i,w_j': 1\leq i\leq a-1, 1\leq j\leq r-1\}$ is a $2$-limited packing of $T$, it follows that $L_2(T)= a+r$. \end{proof}
\end{document}
|
arXiv
|
{
"id": "1807.01021.tex",
"language_detection_score": 0.8024611473083496,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{center}
{\bf Combinatorial Sums $\sum_{k\equiv r(\mbox{mod } m)}{n\choose k}a^k$ and Lucas Quotients (II)}
\vskip 20pt
{\bf Jiangshuai Yang}\\
{\smallit Key Laboratory of Mathematics Mechanization, NCMIS, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, People's Republic of China}\\
{\tt [email protected]}\\
\vskip 10pt
{\bf Yingpu Deng}\\
{\smallit Key Laboratory of Mathematics Mechanization, NCMIS, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, People's Republic of China}\\
{\tt [email protected]}\\
\end{center} \vskip 30pt
\centerline{\bf Abstract}
\noindent In \cite{dy}, we obtained some congruences for Lucas quotients of two infinite families of Lucas sequences by studying the combinatorial sum $$\sum_{k\equiv r(\mbox{mod }m)}{n\choose k}a^k.$$ In this paper, we show that the sum can be expressed in terms of some recurrent sequences with orders not exceeding $\varphi{(m)}$ and
give some new congruences.
\pagestyle{myheadings}
\thispagestyle{empty}
\baselineskip=12.875pt
\vskip 30pt
\section{Introduction}
\noindent Let $p$ be an odd prime, using the formula for the sum $$\sum_{k\equiv r(\mbox{mod }8)}{n\choose k},$$
Sun \cite{s1995} proved that \[\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{1}{k\cdot2^k}\equiv\sum\limits_{k=1} ^{[\frac{3p}{4}]}\frac{(-1)^{k-1}}{k}\pmod p.\] Later, Shan and E.T.H.Wang \cite{sw} gave a simple proof of the above congruence. In \cite{sun5}, Sun proved five similar congruences by using the formulas for Fibonacci quotient and Pell quotient.
In \cite{s2002}, Sun showed that the sum
$$\sum_{k\equiv r(\mbox{mod }m)}{n\choose k},$$
where $n,m$ and $r$ are integers with $m,n>0$, can be expressed in terms of some recurrent sequences with orders not exceeding $\varphi{(m)}/2$, and obtained the following congruence
\[\sum_{k=1}^{\frac{p-1}{2}}\frac{3^k}{k}\equiv\sum_{k=1}^{\left[\frac{p}{6}\right]}\frac{(-1)^k}{k} \pmod p. \]
In \cite{dy}, we studied more general sum \begin{equation}\label{generalsum} \sum_{k\equiv r(\mbox{mod }m)}{n\choose k}a^k, \end{equation} and obtained congruences for Lucas quotients of two infinite families of Lucas sequences. See (\cite{dy} Theorems 4.10 and 5.4). In this paper, we continue studying the sum. We show that it can be expressed in terms of some recurrent sequences with orders not exceeding $\varphi{(m)}$, and
obtain some new congruences.
For $x\in\mathbb{R}$, we use $[x]$ to denote the integral part of $x$ i.e., the largest integer $\leq x$. For odd prime $p$ and integer $b$, let $\left(\frac bp\right)$ denote the Legendre symbol and $q_p(b)$ denote the Fermat quotient $(b^{p-1}-1)/p$ if $p\nmid b$. When $c,d\in\mathbb{Z}$, as usual $(c,d)$ stands for the greatest common divisor of $c$ and $d$. For any positive integer $m$, let $\zeta_m=e^{\frac{2\pi i}{m}}$ be the primitive $m$-th root of unity and let $\varphi({m})$, $\mu{(m)}$ denote the Euler totient function and M$\ddot{\textup{o}}$bius function respectively. Throughout this paper, we fix $a\neq 0,\pm1$.
\section{Main Results}
\begin{definition}\label{defsum} {\rm Let $n,m,r$ be integers with $n>0$ and $m>0$. We define
$$\left[\begin{array}{c}n \\ r\\\end{array}\right] _{m}(a):=\sum_{\substack{k=0\\k\equiv r({\mbox{mod }}m)}}^n\binom nk a^k,$$
where ${n\choose k}$ is the binomial coefficient with the convention ${n\choose k}=0$ for $k<0$ or $k>n$.} \end{definition}
\noindent Then we have the following theorem.
\begin{theorem}\label{Maintheorem}
Let $m,n\in\mathbb{Z}^+$, and $k\in\mathbb{Z}$. Write
$$W_{n}(k,m)=\sum_{\substack{l=1\\(l,m)=1}}^m\zeta_m^{-kl}(1+a\zeta_m^l)^n,$$
and
$$A_{m}(x)=\prod_{\substack{l=1\\(l,m)=1}}^m(x-1-a\zeta_m^l)=\sum\limits_{s=0}^{\varphi(m)}b_sx^s.$$
Then
$$A_{m}(x)\in \mathbb{Z}[x] \quad and \quad\sum\limits_{s=0}^{\varphi(m)}b_sW_{n+s}(k,m)=0.$$
Moreover, for any $r\in\mathbb{Z}$ we have
\begin{equation*}
\left[\begin{array}{c}n \\r\\\end{array}\right] _{m}(a)=\frac1m\sum\limits_{d\mid m }W_{n}(r,d).
\end{equation*}
\end{theorem}
\begin{proof}
It is easy to see that the coefficients of $A_{m}(x+1)$ are symmetric polynomials in those primitive $m$-th roots of unity with integer coefficients. Sicne
\[\Phi_m(x)=\prod_{\substack{l=1\\(l,m)=1}}^m(x-\zeta_m^l)\in\mathbb{Z}[x],\]
$A_{m}(x+1)\in \mathbb{Z}[x]$ by Fundamental Theorem on Symmetric Polynomials. Therefore $A_{m}(x)\in \mathbb{Z}[x].$
For any positive integer $n$, we clearly have
\begin{align*}
\sum\limits_{s=0}^{\varphi(m)}b_sW_{n+s}(k,m)
&=\sum\limits_{s=0}^{\varphi(m)}b_s\sum_{\substack{l=1\\(l,m)=1}}^m\zeta_m^{-kl}(1+a\zeta_m^l)^{n+s}\\
&=\sum_{\substack{l=1\\(l,m)=1}}^m\zeta_m^{-kl}(1+a\zeta_m^l)^{n}\sum\limits_{s=0}^{\varphi(m)}b_s(1+a\zeta_m^l)^{ s}\\
&=\sum_{\substack{l=1\\(l,m)=1}}^m\zeta_m^{-kl}(1+a\zeta_m^l)^{n}A_{m}(1+a\zeta_m^l)\\
&=0.
\end{align*}
Let $r\in\mathbb{Z}$, then we have
\begin{align*}
\left[\begin{array}{c}n \\ r\\\end{array}\right] _{m}(a)
&=\sum\limits_{k=0}^n\binom nka^k\cdot\frac1m\sum\limits_{l=1}^m\zeta_m^{(k-r)l}\\
&=\frac1m\sum\limits_{l=1}^m\zeta_m^{-rl}(1+a\zeta_m^l)^n\\
&=\frac1m\sum\limits_{d\mid m}\sum_{\substack{b=1\\(b,d)=1}}^d\zeta_d^{-rb}(1+a\zeta_d^b)^n\\
&=\frac1m\sum\limits_{d\mid m}W_{n}(r,d).
\end{align*}
This ends the proof.
\end{proof}
Note that the theorem is a generalization of Theorem 1 of \cite{s2002}.
\begin{remark}\label{Wremark}
The last result shows that $\left[\begin{array}{c}n \\ r\\\end{array}\right] _{m}(a)$ can be expressed in terms of some linearly recurrent sequences with orders not exceeding $\varphi{(m)}.$
\end{remark}
Now we list $A_{m}(x)$ for $1\leq m\leq6$:
\begin{align*}
&A_{1}(x)=x-1-a, A_{2}(x)=x-1+a,\\
&A_{3}(x)=x^2-(2-a)x+a^2-a+1, A_{4}(x)=x^2-2x+a^2+1,\\
&A_{5}(x)=x^4-(4-a)x^3+(a^2-3a+6)x^2+(a^3-2a^2+3a+4)+a^4-a^3+a^2-a+1,\\
&A_{6}(x)=x^2-(a+2)x+a^2+a+1.
\end{align*} \begin{lemma}\textup{(\cite{s2002})}\label{Molemma}
Let $m,c$ be integers with $m>0$. Then we have
\begin{equation*}
\sum\limits_{d\mid m}\mu(\frac md)d\delta_{d\mid c}=\varphi(m)\frac{\mu(m/(c,m))}{\varphi(m/(c,m))},
\end{equation*}
where
\[\delta_{d\mid c}=\begin{cases}1,&\mbox{ if }d\mid c \mbox{ holds};\\0,&\mbox{otherwise}.\end{cases} \]
\end{lemma}
\begin{proof}
We can find that both sides are multiplicative with respect to $m$, thus we only need to prove it when $m$ is a prime power. For any prime $p$ and positive integer $k$, we have
\begin{align*}
\sum\limits_{d\mid p^k}\mu(\frac {p^k}d)d\delta_{d\mid c}
&=\sum\limits_{s=0}^k\mu(p^{k-s})p^s\delta_{p^s\mid c}\\
&=p^k\delta_{p^k\mid c}-p^{k-1}\delta_{p^{k-1}\mid c}\\
&=\begin{cases}p^k-p^{k-1}&\textup{if}\; p^k\mid c,\\
-p^{k-1}&\textup{if}\;p^{k-1}\parallel c,\\
0&\textup{if} \;p^{k-1}\nmid c.\end{cases}\\
&=\varphi(p^k)\frac{\mu(p^k/(c,p^k))}{\varphi(p^k/(c,p^k))}.
\end{align*}
This concludes the proof.
\end{proof}
\begin{theorem}\label{Wtheorem}
Let $m,n\in\mathbb{Z}^+ ,r\in\mathbb{Z}$. Then
\begin{equation*}
W_{n}(r,m)=\varphi(m)\sum\limits_{k=0}^n\frac{\mu(m/(k-r,m))}{\varphi(m/(k-r,m))}\binom nka^k.
\end{equation*}
\end{theorem}
\begin{proof}
By Theorem \ref{Maintheorem}, Lemma \ref{Molemma} and M$\ddot{\textup{o}}$bius Inversion Theorem, we have
\begin{align*}
W_{n}(r,m)
&=\sum\limits_{d\mid m}\mu(\frac md)d\left[\begin{array}{c}n \\r\\\end{array}\right]_d(a)\\
&=\sum\limits_{d\mid m}\mu(\frac md)d\sum\limits_{k=0}^n\binom nka^k\delta_{d\mid k-r} \\
&= \sum\limits_{k=0}^n\binom nka^k\sum\limits_{d\mid m}\mu(\frac md)d\delta_{d\mid k-r}\\
&=\varphi(m)\sum\limits_{k=0}^n\frac{\mu(m/(k-r,m))}{\varphi(m/(k-r,m))}\binom nka^k.
\end{align*}
\end{proof}
\begin{corollary}\label{Wcorollary}
Let $m,n$ be two relatively prime positive integers. Then we have
\begin{equation*}
W_{n}(0,m)-\varphi(m)-\mu(m)a^n=\varphi(m)n\sum\limits_{k=1}^{n-1}\frac{\mu(m/(m,k))}{\varphi(m/(m,k))}\binom{n-1}{k-1}\frac{a^k}{k}
\end{equation*}
and
\begin{equation*}
W_{n}(n,m)-\varphi(m)a^n-\mu(m)=\varphi(m)n\sum\limits_{k=1}^{n-1}\binom{n-1}{k-1}\frac{\mu(m/(m,k))}{\varphi(m/(m,k))}\frac{a^{n-k}}{k}.
\end{equation*}
\end{corollary}
\begin{proof}
Since $\binom nk=\frac nk\binom{n-1}{k-1}$ for $1\leq k\leq n$, we can derive the results by setting $r=0,n$ respectively in Theorem \ref{Wtheorem},
\end{proof}
\begin{corollary}\label{Wpcorlllary}
Let $m\in\mathbb{Z}^+$ and $p$ be an odd prime not dividing $am$. Then we have
\begin{equation*}
\frac{W_{p}(0,m)- \varphi(m)-\mu(m)a^p}{p}\equiv-\varphi(m)\sum\limits_{k=1}^{p-1}\frac{\mu(m/(m,k))}{\varphi(m/(m,k))}\cdot\frac{(-a)^k}{k}\pmod p,
\end{equation*}
and
\begin{equation*}
\frac{W_{p}(p,m)-\varphi(m)a^p-\mu(m)}{p}\equiv\varphi(m)\sum\limits_{k=1}^{p-1}\frac{\mu(m/(m,k))}{\varphi(m/(m,k))}\cdot\frac{1}{k(-a)^{k-1}}\pmod p.
\end{equation*}
\end{corollary}
\begin{proof}
Since $\binom{p-1}{k}=(-1)^k$ for $0\leq k\leq p-1$, the results follow from Corollary \ref{Wcorollary}.
\end{proof}
\section{Some New Congruences}
In this section, we give some new congruences by using the results of \cite{dy}.
\begin{lemma}\label{3uvlemma}
Let $p\nmid 3a(2-a)(a^3+1)$ be and odd prime, and $\{u_n\}_{n\geq0},\{v_n\}_{n\geq0}$ be the Lucas sequences defined as
$$u_0=0,\;u_1=1,\;u_{n+1}=(2-a)u_n-(a^2-a+1)u_{n-1}\;\textup{for}\;n\geq1;$$
$$v_0=2,\;v_1=(2-a),\;v_{n+1}=(2-a)v_n-(a^2-a+1)v_{n-1}\;\textup{for}\;n\geq1.$$
Then we have: \\
\begin{description}
\item[(1)]
\[ \frac{u_p-\left(\frac{-3}{p}\right)}{p}\equiv\sum\limits_{k=1}^{\frac{p-1}2}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}
+\left(\frac{-3}{p}\right)\left(q_p(a)-q_p(2)+\frac12q_p(3)\right)\pmod p;\]
\item[(2)]
\[ \quad\frac{v_{p}-(2-a)}{p}\equiv
(2-a)\left[-\frac12\sum\limits_{k=1}^{\frac{p-1}2}\frac{(-3)^k}{k}\cdot\left(\frac{a}{2-a}\right)^{2k}-q_p(2)+q_p(2-a)\right]\pmod p.\]
\end{description}
\end{lemma}
\begin{proof}
By Lemmas 2.1 and 2.2 of \cite{dy}, we have
$u_p=\frac1{a\sqrt{-3}} \left[\left(\frac{2-a}{2}+\frac a2\sqrt{-3}\right)^p- \left(\frac{2-a}{2}-\frac a2\sqrt{-3}\right)^p\right]$
, $v_p= \left(\frac{2-a}{2}+\frac a2\sqrt{-3}\right)^p+ \left(\frac{2-a}{2}-\frac a2\sqrt{-3}\right)^p$, and $u_p\equiv\left(\frac{-3}{p}\right)\pmod p$, $v_p=(2-a)u_p-2(a^2-a+1)u_{p-1} =2u_{p+1}-(2-a)u_p\equiv(2-a)\pmod p$. Then
\begin{align*}
2^{p-1}u_p
&=\sum_{\substack{k=0\\k\; odd}}^{p}\binom pk(2-a)^{p-k}(a\sqrt{-3})^{k-1}\\
&=a^{p-1}(-3)^{\frac{p-1}{2}}+\sum\limits_{k=1}^{\frac{p-1}{2}}\binom p{2k-1}(2-a)^{p-2k+1}a^{2k-2}(-3)^{k-1}\\
&=a^{p-1}(-3)^{\frac{p-1}{2}}+p\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\binom {p-1}{2k-2}(2-a)^{p-2k+1}a^{2k-2}\\
&\equiv a^{p-1}(-3)^{\frac{p-1}{2}}+ p\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}\pmod {p^2},\\
\end{align*}
and
\begin{align*}
2^{p-1}v_p
&=\sum_{\substack{k=0\\k\; even}}^{p}\binom pk(2-a)^{p-k}(a\sqrt{-3})^k\\
&=(2-a)^p+\sum\limits_{k=1}^{\frac{p-1}{2}}\binom p{2k}(2-a)^{p-2k}a^{2k}(-3)^k\\
&=(2-a)^p+p\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^k}{2k}\binom {p-1}{2k-1}(2-a)^{p-2k}a^{2k}\\
&\equiv(2-a)^p-\frac{2-a}2 p\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^k}{k}\cdot\left(\frac{a}{2-a}\right)^{2k}\pmod {p^2}.\\
\end{align*} Hence (1) and (2) follow from Lemma 2.6(1) of \cite{dy}.
\end{proof}
\begin{corollary}\label{30corollary}
Let $p\nmid 3a(2-a)(a^3+1)$ be and odd prime. Then we have
\[\sum\limits_{k=1}^{[\frac{p}{3}]}\frac{(-a)^{3k}}{k}\equiv(2-a)\left[\frac12\sum\limits_{k=1}^{\frac{p-1}2}\frac{(-3)^k}{k}\cdot\left(\frac{a}{2-a}\right)^{2k}
+q_p(2)-q_p(2-a)\right]-(a+1)q_p(a+1)\pmod p.\]
\end{corollary}
\begin{proof}
The result follows from Lemma 4.9 of \cite{dy} and Lemma \ref{3uvlemma}(2).
\end{proof} \begin{theorem}\label{31theorem}
Let $p\nmid 3a(a-1)(2-a)(a^3+1)$ be an odd prime, and $\{u_n\}_{n\geq0}$ be the Lucas sequence defined as
$$u_0=0,\;u_1=1,\;u_{n+1}=(2-a)u_n-(a^2-a+1)u_{n-1}\;\textup{for}\;n\geq1.$$ \begin{description}
\item[(1)] If $p\equiv1\pmod 3$, we have \[\frac{u_{p-1}}{p}\equiv-\frac{2}{a(a-1)}\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1}+\frac{a+1}{3a(a-1)}\left(q_p(a^2-a+1)-2q_p(a+1)\right)\pmod p\] and \begin{align*} \sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1} &\equiv\frac{a(a-1)}{a-2}\sum\limits_{k-1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}\\ &+\frac{a(a-1)}{a-2} [q_p(a)-q_p(2)+\frac12q_p(3)]\\ &-\frac{1}{3}(a+1)q_p(a+1)- \frac{ a^2-a+1}{3(a-2)}q_p(a^2-a+1) \pmod p. \end{align*}
\item[(2)] If $p\equiv2\pmod 3$, we have
\[\frac{u_{p+1}}{p}\equiv\frac{2(a^2-a+1)}{a(a-1)}\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2}-\frac{a^3+1}{3a(a-1)}\left(q_p(a^2-a+1)-2q_p(a+1)\right)\pmod p\]
and \begin{align*} \sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2} &\equiv-\frac{a(a-1)}{a-2}\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}\\ &\quad+\frac{a(a-1)}{a-2} [q_p(a)-q_p(2)+\frac12q_p(3)]\\ &\quad-\frac{1}{3}(a+1)q_p(a+1)-\frac{ a^2-a+1}{3(a-2)}q_p(a^2-a+1) \pmod p. \end{align*} \end{description} \end{theorem} \begin{proof} Since$(2-a)^2-4(a^2-a+1)=-3a^2$, we have $p\mid u_{p-\left(\frac{-3}{p}\right)}$ by Lemma 2.2 of \cite{dy}. Let $\{v_n\}_{n\geq0}$ be the Lucas sequence define as Lemma \ref{3uvlemma}.
(1) By Lemma 2.1 and Theorem 4.1 of \cite{dy}, we have $-(a+1)u_p+(a^2-a+1)u_{p-1}=3\left[\begin{array}{c}p \\ 2\\\end{array}\right] _{3}(a)-(1+a)^p$ and $v_{p-1}=2u_p-(2-a)u_{p-1}$. Thus by Lemma 2.4 of \cite{dy}, we have
\begin{align*}
3a(a-1)u_{p-1}
&=6\left[\begin{array}{c}p \\ 2\\\end{array}\right] _{3}(a)-2(1+a)^p+(a+1)v_{p-1}\\
&\equiv-6p\sum\limits_{k-1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1}+(a+1)\left[(v_p-2)-2((a+1)^{p-1}-1)\right]\pmod{p^2}
\end{align*}
and
\begin{align*}
3a(a-1)(u_{p}-1)
&=3(2-a)\left[\begin{array}{c}p \\ 2\\\end{array}\right] _{3}(a)-(2-a)(1+a)^p+(a^2-a+1)v_{p-1}-3a(a-1)\\
&\equiv-3(2-a)p\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1}-(2-a)(1+a)((a+1)^{p-1}-1)\\
&\quad+(a^2-a+1)(v_{p-1}-2) \pmod{p^2}.
\end{align*}
Thence by Lemma 2.7 of \cite{dy} and Lemma \ref{3uvlemma}(1),
\[\frac{u_{p-1}}{p}\equiv-\frac{2}{a(a-1)}\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1}+\frac{a+1}{3a(a-1)}\left(q_p(a^2-a+1)-2q_p(a+1)\right)\pmod p,\]
and \begin{align*} \sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-a)^{3k-1}}{3k-1} &\equiv\frac{a(a-1)}{a-2}\sum\limits_{k-1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}\\ &+\frac{a(a-1)}{a-2} [q_p(a)-q_p(2)+\frac12q_p(3)]\\ &-\frac{1}{3}(a+1)q_p(a+1)- \frac{ a^2-a+1}{3(a-2)}q_p(a^2-a+1) \pmod p. \end{align*} (2) By Lemma 2.1 and Theorem 4.1 of \cite{dy}, we have $-u_{p+1}+(a+1)u_{p}=3\left[\begin{array}{c}p \\ 1\\\end{array}\right] _{3}(a)-(1+a)^p$ and $v_{p+1}=(2-a)u_{p+1}-2(a^2-a+1)u_p$. Thus by Lemmas 2.4 of \cite{dy}, we have
\begin{align*}
-3a(a-1) u_{p+1} &=6(a^2-a+1)\left[\begin{array}{c}p \\ 1\\\end{array}\right] _{3}(a)-2(a^2-a+1)(1+a)^p+(a+1)v_{p+1}\\
&\equiv-6(a^2-a+1)p\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2}-2(a^2-a+1)(a+1)\left[(a+1)^{p-1}-1\right]\\
&\quad+(a+1)\left[v_{p+1}-2(a^2-a+1)\right]\pmod{p^2} \end{align*}
and
\begin{align*}
-3a(a-1)(u_{p}+1)
&=3(2-a)\left[\begin{array}{c}p \\ 1\\\end{array}\right] _{3}(a)- (2-a)(1+a)^p+ v_{p+1}-3a(a-1)\\
&\equiv-3 (2-a )p\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2}- (2-a)(a+1)\left[(a+1)^{p-1}-1\right]\\
&\quad +v_{p+1}-2(a^2-a+1) \pmod{p^2}.
\end{align*}
Thence by Lemma 2.7 of \cite{dy} and Lemma \ref{3uvlemma}(1),
\[\frac{u_{p+1}}{p}\equiv\frac{2(a^2-a+1)}{a(a-1)}\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2}-\frac{a^3+1}{3a(a-1)}\left(q_p(a^2-a+1)-2q_p(a+1)\right)\pmod p,\]
and \begin{align*} \sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-a)^{3k-2}}{3k-2} &\equiv-\frac{a(a-1)}{a-2}\sum\limits_{k=1}^{\frac{p-1}{2}}\frac{(-3)^{k-1}}{2k-1}\cdot\left(\frac{a}{2-a}\right)^{2k-2}\\ &+\frac{a(a-1)}{a-2} [q_p(a)-q_p(2)+\frac12q_p(3)]\\ &-\frac{1}{3}(a+1)q_p(a+1)-\frac{ a^2-a+1}{3(a-2)}q_p(a^2-a+1) \pmod p. \end{align*} \end{proof} Set $a=-2$ in Corollary \ref{30corollary} and Theorem \ref{31theorem}, we have the following two corollaries.
\begin{corollary}
Let $p\neq3,7$ be and odd prime. Then we have
\begin{description}
\item[(1)]
\[\sum\limits_{k=1}^{[\frac p3]}\frac{8^k}{k}\equiv\sum\limits_{k=1}^{\frac {p-1}{2}}\frac{2}{k}\cdot\left(-\frac34\right)^k-4q_p(2)\pmod p.\]
\item[(2)]
If $p\equiv1 \pmod 3$,
\[\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{8^k}{3k-1}\equiv4\sum\limits_{k=1}^{\frac {p-1}{2}}\frac{1}{2k-1}\cdot\left(-\frac34\right)^k-\frac 32q_p(3)+\frac 76q_p(7)\pmod p.\]
If $p\equiv2 \pmod 3$,
\[\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{8^k}{3k-2}\equiv-8\sum\limits_{k=1}^{\frac {p-1}{2}}\frac{1}{2k-1}\cdot\left(-\frac34\right)^k-3q_p(2)+\frac 73q_p(7)\pmod p.\]
\end{description}
\end{corollary}
\begin{corollary}\label{-2corollary} Let $p\neq3,7$ be and odd prime, and $\{u_n\}_{n\geq0}$ be the Lucas sequence defined as
$$u_0=0,\;u_1=1,\;u_{n+1}=4u_n-7u_{n-1}\;\textup{for}\;n\geq1.$$
Then, if $p\equiv1\pmod 3$, \[\frac{u_{p-1}}p\equiv-\frac{1}{6}\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{8^k}{3k-1}-\frac{1}{18} q_p(7)\pmod p,\] if $p\equiv2\pmod 3$, \[\frac{u_{p+1}}p\equiv\frac{7}{12}\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{8^k}{3k-2}+\frac{7}{18} q_p(7)\pmod p.\] \end{corollary}
The following theorem can reduce the summation terms occurring in the expression of Lucas quotients in Corollary 4.11 of \cite{dy} and Corollary 3.5.
\begin{theorem}
Let $p\neq3,7$ be an odd prime, and $\left\{u_n\right\}_{n\geq0}$ be the Lucas sequence defined as Corollary 3.5. Then if $p\equiv1\pmod3$, \begin{align*} \frac{u_{p-1}}p &\equiv\frac16\sum\limits_{k=1}^{\frac{p-1}{6}}\frac{64^k}{k}+\frac13q_p(7)+\frac12q_p(3)\\ &\equiv-\frac{1}{3}\sum\limits_{k=1}^{\frac{p-1}{6}}\frac{64^k}{6k-1}-\frac{1}{18} q_p(7)+\frac 16q_p(3) \pmod p, \end{align*} if $p\equiv2\pmod3$, \begin{align*} \frac{u_{p+1}}p &\equiv-\frac76\sum\limits_{k=1}^{\frac{p-5}{6}}\frac{64^k}{k}-\frac73q_p(7)-\frac72q_(3)\\ &\equiv\frac{7}{6}\sum\limits_{k=1}^{\frac{p+1}{6}}\frac{64^k}{6k-2}+\frac{7}{18} q_p(7)+\frac 76q_p(3)\pmod p. \end{align*} \end{theorem}
\begin{proof}
By Lemma 2.4 and Theorem 4.5 of \cite{dy}, if $p\equiv1\pmod 3$,
\[3^{p-1}-(-3)^{\frac{p-1}{2}}=\left[\begin{array}{c}p \\2 \\\end{array}\right] _3(2)\equiv-p\sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-2)^{3k-1}}{3k-1}\pmod{p^2},\]
if $p\equiv2\pmod 3$,
\[3^{p-1}+(-3)^{\frac{p-1}{2}}=\left[\begin{array}{c}p \\1 \\\end{array}\right] _3(2)\equiv-p\sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-2)^{3k-2}}{3k-2}\pmod{p^2}.\]
Thus by the Lemma 2.6 of \cite{dy}, we have
\[ \sum\limits_{k=1}^{\frac{p-1}{3}}\frac{(-8)^{k}}{3k-1}\equiv q_p(3)\pmod p \;\textup{if} \;p\equiv1 \pmod 3,\] and
\[ \sum\limits_{k=1}^{\frac{p+1}{3}}\frac{(-8)^{k}}{3k-2}\equiv-2q_p(3) \pmod p \;\textup{if} \;p\equiv2 \pmod 3.\]
Hence the results follow from Corollaries 4.7 and 4.11 of \cite{dy} and Corollary \ref{-2corollary}.
\end{proof}
\section{A Specific Lucas Sequence}
\noindent Let $A,B\in\mathbb{Z}$. The Lucas sequences $u_n=u_n(A,B)(n\in\mathbb{N})$ and $v_n=v_n(A,B)(n\in\mathbb{N})$ are defined by
\[u_0=1,\;u_1=1,\; u_{n+1}=Bu_n-Au_{n-1}(n\geq1);\]
\[v_0=2,\;v_1=B,\; v_{n+1}=Bv_n-Av_{n-1}(n\geq1).\] Next we give some properties of the Lucas sequences with $A=5$ and $B=2$. We need some lemmas. Let $D=B^2-4A.$
\begin{lemma}\label{ulucasmod}
Let $p$ be an odd prime not dividing $DA$.
\begin{description}
\item[(1)]
If $p\equiv1\pmod 4$, then $p\mid u_{\frac{p-1}{4}}$ if and only if $v_{\frac{p-1}{2}}\equiv2A^{\frac{p-1}{4}}\pmod p$ and
$p\mid v_{\frac{p-1}{4}}$ if and only if $v_{\frac{p-1}{2}}\equiv-2A^{\frac{p-1}{4}}\pmod p$.
\item[(2)] If $p\equiv3\pmod 4$, then $p\mid u_{\frac{p+1}{4}}$ if and only if $v_{\frac{p+1}{2}}\equiv2A^{\frac{p+1}{4}}\pmod p$ and
$p\mid v_{\frac{p+1}{4}}$ if and only if $v_{\frac{p+1}{2}}\equiv-2A^{\frac{p+1}{4}}\pmod p$.
\end{description}
\end{lemma}
\begin{proof}
(1) and (2) follow from the fact that $v_{2n}=v_n^2-2A^n=Du_n^2+2A^n.$
\end{proof}
\begin{lemma}\textup{(\cite{sun4})}\label{uvlucasmod}
Let $p$ be an odd prime and $A'$ be an integer such that $4A'\equiv B^2-4A\pmod p $. Let
$u_n'=u_n(A',B),\;v_n'=v_n(A',B)$. Then we have
\[
u_{\frac{p+1}{2}}\equiv\frac12\left(\frac2p\right)v'_{\frac{p-1}{2}}\pmod p,\;u_{\frac{p-1}{2}}\equiv-\left(\frac2p\right)u'_{\frac{p-1}{2}}\pmod p,
\]
\[v_{\frac{p+1}{2}}\equiv\left(\frac2p\right)v'_{\frac{p+1}{2}}\pmod p,\;v_{\frac{p-1}{2}}\equiv2\left(\frac2p\right)u'_{\frac{p+1}{2}}\pmod p.
\] \end{lemma}
\begin{remark} \begin{description}
\item[(1)] Let $S_n=u_n(1,4),\;T_n=v_n(1,4)$. For any prime $p>3$, by the facts that $u'_n=u_n(3,4)=\frac{1}{2}(3^n-1)$ and $v_n'=v_n(3,4)=3^n+1$, we have \begin{align*} &S_{\frac{p+1}{2}}\equiv\frac12\left(\frac{2}{p}\right)\left[\left(\frac{3}{p}\right)+1\right]\pmod p,\quad S_{\frac{p-1}{2}}\equiv-\frac12\left(\frac{2}{p}\right)\left[\left(\frac{3}{p}\right)-1\right]\pmod p,\\ &T_{\frac{p+1}{2}}\equiv \left(\frac{2}{p}\right)\left[3\left(\frac{3}{p}\right)+1\right]\pmod p,\quad T_{\frac{p-1}{2}}\equiv\left(\frac{2}{p}\right)\left[3\left(\frac{3}{p}\right)-1\right]\pmod p. \end{align*}
Thus by Lemma \ref{ulucasmod}, $p\mid S_{[\frac{p+1}{4}]}$ iff $p\equiv1,19\pmod {24}$ and $p\mid T_{[\frac{p+1}{4}]}$ iff $p\equiv7,13\pmod {24}$. Sun \cite{s2002} got these by studying the sum (\ref{generalsum}) for $a=1$ and $m=12$;
\item[(2)] Let $P_n=u_n(-1,2),\;Q_n=v_n(-1,2)$ and $u_n'=u_n(2,2),\;v_n=v_n'(2,2).$ For any odd prime $p$, by the facts that $u_{4n}'=0,u_{4n+1}'=(-4)^n,u_{4n+2}'=u_{4n+3}'=2(-4)^n$ and $v_{4n}'=v_{4n+1}'=2(-4)^n,u_{4n+2}'=0,u_{4n+3}'=(-4)^{(n+1)}$, we have \[P_{\frac{p-\left(\frac2p\right)}{2}}\equiv\begin{cases}0\pmod p,& \textup{if}\,p\equiv1\pmod 4,\\(-1)^{[\frac{p+5}{8}]}2^{\frac{p-3}{4}}\pmod p,& \textup{if}\;p\equiv3\pmod 4,\end{cases} \]
\[Q_{\frac{p-\left(\frac2p\right)}{2}}\equiv\begin{cases}(-1)^{[\frac{p}{8}]}2^{\frac{p+3}{4}}\pmod p,& \textup{if}\;p\equiv1\pmod 4,\\0\pmod p,& \textup{if}\;p\equiv3\pmod 4,\end{cases}\] and \[P_{\frac{p+\left(\frac2p\right)}{2}}\equiv(-1)^{[\frac{p+1}{8}]}2^{[\frac{p }{4}]}\pmod p,\quad Q_{\frac{p+\left(\frac2p\right)}{2}}\equiv(-1)^{[\frac{p+5}{8}]}2^{[\frac{p+5}{4}]}\pmod p.\] \end{description} Sun got \cite{sun2} these by studying the sum (\ref{generalsum}) for $a=1$ and $m=8$. \end{remark}
\begin{lemma}\label{uv1lucasmod}
Let $p\nmid B$ be an odd prime and $A'$ be an integer such that $A'\equiv \frac{A}{B^2}\pmod p $. Let
$u_n'=u_n(A',1),\;v_n'=v_n(A',1).$ Then we have \[u_{\frac{p+1}{2}}\equiv\left(\frac Bp\right)u'_{\frac{p+1}{2}}\pmod p,\quad u_{\frac{p-1}{2}}\equiv\frac1B\left(\frac Bp\right)u_{\frac{p-1}{2}}'\pmod p,\] \[v_{\frac{p+1}{2}}\equiv B\left(\frac Bp\right)v'_{\frac{p+1}{2}} \pmod p,\quad v_{\frac{p-1}{2}}\equiv \left(\frac Bp\right)v_{\frac{p-1}{2}}'\pmod p.\] \end{lemma} \begin{proof}
By Lemma 2.1 of \cite{dy} and $D'=1-4A'\equiv\frac{D}{B^2}\pmod p,$ we have
\begin{align*}
u_{n}
&=2\sum_{\substack{k=0\\k\;odd}}^{n}\binom{n}{k}\left(\frac{B}2\right)^{n-k}\left(\frac{D}2\right)^{\frac{k-1}{2}}\\
&=2B^{n-1}\sum_{\substack{k=0\\k\;odd}}^{n}\binom{n}{k}\left(\frac{1}2\right)^{\frac{p-1}{2}-k}\left(\frac{D}{2B^2}\right)^{\frac{k-1}{2}}\\
&\equiv2B^{n-1}\sum_{\substack{k=0\\k\;odd}}^{n}\binom{n}{k}\left(\frac{1}2\right)^{n-k}\left(\frac{D'}{2}\right)^{\frac{k-1}{2}}\\
&=B^{n-1}u'_n\pmod p,
\end{align*} and
\begin{align*}
v_{n}
&=2\sum_{\substack{k=0\\k\;even}}^{n}\binom{n}{k}\left(\frac{B}2\right)^{n-k}\left(\frac{D}2\right)^{\frac{k}{2}}\\
&=2B^{n}\sum_{\substack{k=0\\k\;even}}^{n}\binom{n}{k}\left(\frac{1}2\right)^{\frac{p-1}{2}-k}\left(\frac{D}{2B^2}\right)^{\frac{k}{2}}\\
&\equiv2B^{n}\sum_{\substack{k=0\\k\;even}}^{n}\binom{n}{k}\left(\frac{1}2\right)^{n-k}\left(\frac{D'}{2}\right)^{\frac{k}{2}}\\
&=B^{n}v'_n\pmod p,
\end{align*} Thus
\begin{align*}
&u_{\frac{p+1}{2}}\equiv B^{\frac{p-1}{2}}u'_{\frac{p+1}{2}}\equiv\left(\frac Bp\right)u'_{\frac{p+1}{2}}\pmod p,\\
& u_{\frac{p-1}{2}}\equiv B^{\frac{p-3}{2}}u'_{\frac{p-1}{2}}\equiv\frac1B\left(\frac Bp\right)u_{\frac{p-1}{2}}'\pmod p, \\
&v_{\frac{p+1}{2}}\equiv B^{\frac{p+1}{2}}v'_{\frac{p+1}{2}}\equiv B\left(\frac Bp\right)v'_{\frac{p+1}{2}} \pmod p,\\
& v_{\frac{p-1}{2}}\equiv B^{\frac{p-1}{2}}v'_{\frac{p-1}{2}}\equiv \left(\frac Bp\right)v_{\frac{p-1}{2}}'\pmod p.
\end{align*} \end{proof}
\begin{theorem} \label{52lucas}
Let $p\neq5$ be an odd prime and $\{U_n\}_{n\geq0}$ and $\{V_n\}_{n\geq0}$ be the Lucas sequences defined as $$U_0=0,U_1=1,U_{n+1} =2U_{n}-5U_{n-1} \;\textup{for}\;n\geq1;$$ $$V_0=2,V_1=2,V_{n+1}=2V_{n}-5V_{n-1}\;\textup{for}\;n\geq1.$$ \begin{description}
\item[(1)] If $p\equiv\pm1\pmod 5$, we have \begin{align*} &U_{\frac{p+\left(\frac{-1}{p}\right)}{2}}\equiv\left(\frac{-1}{p}\right)(-1)^{[\frac{p+5}{10}]}5^{[\frac{p}{4}]}\pmod p,\\ &U_{\frac{p-\left(\frac{-1}{p}\right)}{2}}\equiv0\pmod p,\\ &V_{\frac{p+\left(\frac{-1}{p}\right)}{2}}\equiv2(-1)^{[\frac{p+5}{10}]}5^{[\frac{p}{4}]} \pmod p,\\ &V_{\frac{p-\left(\frac{-1}{p}\right)}{2}}\equiv2(-1)^{[\frac{p+5}{10}]}5^{[\frac{p+1}{4}]}\pmod p. \end{align*} \item[(2)] If $p\equiv\pm2\pmod 5$, we have \begin{align*} &U_{\frac{p+\left(\frac{-1}{p}\right)}{2}}\equiv\frac12\left(\frac{-1}{p}\right)(-1)^{[\frac{p+5}{10}]}5^{[\frac{p}{4}]}\pmod p,\\ &U_{\frac{p-\left(\frac{-1}{p}\right)}{2}}\equiv\frac12\left(\frac{-1}{p}\right)(-1)^{[\frac{p+5}{10}]}5^{[\frac{p+1}{4}]}\pmod p,\\ &V_{\frac{p+\left(\frac{-1}{p}\right)}{2}}\equiv4(-1)^{[\frac{p-5}{10}]}5^{[\frac{p}{4}]} \pmod p,\\ &V_{\frac{p-\left(\frac{-1}{p}\right)}{2}}\equiv0\pmod p. \end{align*} \end{description} \end{theorem} \begin{proof}
Let $F_n=u_n(-1,1)$ and $L_n=v_n(-1,1)$ be Fibonacci sequence and its companion. Then by Lemmas \ref{uvlucasmod} and \ref{uv1lucasmod}, we have \[U_{\frac{p+1}{2}}\equiv\frac12L_{\frac{p-1}{2}}\pmod p, \quad U_{\frac{p-1}{2}}\equiv-\frac12F_{\frac{p-1}{2}}\pmod p,\] \[V_{\frac{p+1}{2}}\equiv2L_{\frac{p+1}{2}}\pmod p,\quad V_{\frac{p-1}{2}}\equiv2F_{\frac{p+1}{2}}\pmod p.\] Thus by Corollaries 1 and 2 of \cite{ss}, we can derive the results. \end{proof} \begin{remark}
In \cite{dy}, we gave some congruences for the Lucas quotient $ U_{p-\left(\frac{-1}p\right)}/p$ by studying the sum (\ref{generalsum}) for $a=-2$ and $m=4$. \end{remark} \begin{corollary} Let $p\neq5$ be an odd prime, $\{U_n\}_{n\geq0}$ and $\{V_n\}_{n\geq0}$ be Lucas sequences defined as above.
\begin{description}
\item[(1)] If $p\equiv1\pmod4$, then $p\mid U_{\frac{p-1}{4}}$ if and only if $p\equiv1\pmod {20}$ and $p\mid V_{\frac{p-1}{4}}$ if and only if $p\equiv9\pmod {20}$.
\item[(2)] If $p\equiv3\pmod4$, then $p\mid U_{\frac{p+1}{4}}$ if and only if $p\equiv19\pmod {20}$ and $p\mid V_{\frac{p+1}{4}}$ if and only if $p\equiv11\pmod {20}$.
\end{description} \end{corollary} \begin{proof}
(1) and (2) follow from Lemma \ref{ulucasmod} and Theorem \ref{52lucas}. \end{proof}
\noindent \textbf{Acknowledgments}\quad The work of this paper was supported by the NNSF of China (Grant No. 11471314), and the National Center for Mathematics and Interdisciplinary Sciences, CAS.
\end{document}
|
arXiv
|
{
"id": "1609.05409.tex",
"language_detection_score": 0.32350075244903564,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\preprint{\includegraphics[scale=0.4]{iqus_logo.png} $\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$\qquad\qquad\qquad\qquad\qquad\qquad\qquad$ Preprint number: IQuS@UW-21-054}
\hspace{0.1cm}
\title{Quantum Imaginary Time Propagation algorithm for preparing thermal states}
\newcommand{\iqusfil}{InQubator for Quantum Simulation (IQuS), Department of Physics,
University of Washington, Seattle, Washington 98195, USA}
\author{Francesco~Turro \orcidlink{https://orcid.org/0000-0002-1107-2873}}
\affiliation{\iqusfil}
\begin{abstract}
Calculations at finite temperatures are fundamental in different scientific fields, from nuclear physics to condensed matter. Evolution in imaginary time is a prominent classical technique for preparing thermal states of quantum systems.
We propose a new quantum algorithm that prepares thermal states based on the quantum imaginary time propagation method, using a diluted operator with ancilla qubits to overcome the non-unitarity nature of the imaginary time operator. The presented method is the first that allows us to obtain the correct thermal density matrix on a general quantum processor for a generic Hamiltonian.
We prove its reliability in the actual quantum hardware computing thermal properties for two and three neutron systems.
\end{abstract}
\maketitle
Calculations at finite temperature are essential for understanding quantum systems across scientific fields. In particular, the thermodynamic properties of nuclear matter play a crucial role in heavy-ion collisions, astrophysics, and general nuclear applications. Some examples are nuclear reactions in the evolution of matter in the early universe and inside the core of stars~\cite{RevModPhys_83_195,annurev-astro-081811-125543,annurev-nucl-020620-063734}, supernova explosions and the phase diagram of QCD~\cite{de2010simulating,shuryak2017strongly}. The recent detection of gravitational waves~\cite{abbott2017gw170817} can provide constraints on the equation of state of nuclear matter at high densities, used, for example, to describe the composition of Neutron Stars~\cite{burgio2021neutron,lattimer2021neutron,haensel2007neutron}.
According to statistical mechanics, the idealization of the density matrix that describes the quantum system with a thermal bath at temperature $T$, and the thermal expectation value of an observable $O$, are given by
\begin{equation}
\rho = e^{-\beta H}\,, \qquad
\langle O \rangle = \frac{\Tr{\rho O}}{Z_0} \,, \label{eq:thermal_states}
\end{equation}
where $H$ is the Hamiltonian of the system, $\beta=\frac{1}{k_B T}$, $k_B$ the Boltzmann constant, and $Z_0=\Tr{\rho}$ is the partition function.
The imaginary time propagation (ITP) method is a popular classical algorithm to prepare a thermal state of a quantum system. This algorithm was originally designed to prepare the ground state of quantum systems, where one dissipates an arbitrary quantum state to reach the ground state using the ITP operator, $e^{-\tau\, H}$, where $\tau$ is the imaginary time. Well-known classical techniques for computing thermodynamic properties are Quantum Monte Carlo methods and their improvements, like Auxiliary Field Monte Carlo \cite{baeurle2002field}, Continuous-time Quantum Monte Carlo \cite{gull2011continuous} and Path Integral Monte Carlo \cite{barker1979quantum}. It is a notorious problem that the required classical computational resources grow exponentially with the number of particles. Moreover, many nuclear systems are mainly composed of fermions. Hence, the fermion sign problem emerges, slowing down the progress in studying complex systems~\cite{troyer2005}.
Following Feynman's idea on the efficiency in simulating quantum systems using quantum hardware~\cite{feynman}, it is desired to develop quantum algorithms that efficiently compute the thermodynamic properties of quantum systems. Different quantum algorithms that implement imaginary time methods and thermal state preparation have been proposed~\cite{motta2020determining,mcardle2019variational,warren2022adaptive,holmes2022quantum,sagastizabal2021variational,consiglio2023variational,turro2022imaginary}.
Refs.~\cite{motta2020determining,mcardle2019variational} presents a hybrid quantum-classical algorithm, called Quantum Imaginary Time Evolution (QITE), based on variational ansatz. Applications of the QITE method for thermal states can be found in Ref.~\cite{motta2020determining,davoudi2022toward,sun2021quantum,getelina2023adaptive,wang2023critical,leadbeater2023nonunitary}. Instead, Ref.~\cite{turro2022imaginary} illustrates a quantum algorithm for implementing the imaginary time operator using ancilla qubits with a unitary operator.
This letter presents the first quantum algorithm (to the best of our knowledge) that allows us to obtain the correct density matrix on quantum hardware. This is done by implementing a modified version of the QITP operator of Ref.~\cite{turro2022imaginary}.
Moreover, the proposed quantum algorithm is independent of the initial variational ansatz and of the set of classical variables that one has using QITE. In particular, this algorithm can be implemented in studying phase transition, because QITE may become prohibitively expensive due to the large correlation length.
In this letter, we start from the work of Ref.~\cite{turro2022imaginary}, presenting a quantum algorithm that prepares the thermal state implementing the imaginary time propagation. We also upgrade the QITP algorithm of Ref.~\cite{turro2022imaginary}, improving success probability. We implemented the proposed algorithm in computing the partition function $Z_0$ for spin systems of two and three neutrons in the IBM~\cite{ibm} and Quantinuum H$1$-$1$ quantum hardware~\cite{quantinuum}. We also evaluate the thermal expectation value of some observables. The obtained results are compatible with the analytical behavior.
\textit{Quantum algorithm. }
Our algorithm starts with the initialization of $n_s$ qubits, where we map our physical system, in the so-called maximally mixed state whose density matrix is given by $\mathbb{1}/ 2^{n_s} $ ($\mathbb{1}$ indicates the $2^{n_s}\times 2^{n_s}$ identity matrix). The first proposal was discussed in Ref.~\cite{white2009minimally}, using $n_s$ ancilla qubits (details and our implementation can also be found in App.~A).
For small quantum systems, $2-4$ qubits, the ancilla cost is not expensive, but this can be a limitation for bigger systems. However, this initialization is not strictly necessary, and improvements can be implemented, like minimally entangled typical
thermal states \cite{white2009minimally,stoudenmire2010minimally} and Canonical Thermal Pure
Quantum State~\cite{sugiura2013canonical,sugiura2012thermal}.
After the initialization of the state in the quantum processor, we should implement the imaginary time operator. We start by summarizing the basic steps of the algorithm in QITP~\cite{turro2022imaginary}.
Being the imaginary time operator non-unitary, one could work in a diluted Hilbert space to employ a unitary form. Explicitly, we add an ancilla qubit in the $\ket{0}$ state. Then, we implement the following operator
\begin{equation}
QITP_{gs} = \begin{pmatrix}
\frac{e^{-\tau (H-E_T)}}{\sqrt{1+e^{-2\,\tau (H-E_T)}}} & \frac{1}{\sqrt{1+e^{-2\,\tau (H-E_T)}}}\\
\frac{-1}{\sqrt{1+e^{-2\,\tau (H-E_T)}}} & \frac{e^{-\tau (H-E_T)}}{\sqrt{1+e^{-2\,\tau (H-E_T)}}}\\
\end{pmatrix} \,,
\end{equation}
where $E_T$ indicates the so-called trial energy, an algorithm parameter that should be tuned (see Ref.~\cite{turro2022imaginary} for more details). Then, after the action of the $QITP_{gs}$ operator and measuring the ancilla qubit in $\ket{0}$, the system state is closer to the ground state than the initial one due to the action of $\frac{e^{-\tau (H-E_T)}}{\sqrt{1+e^{-2\,\tau (H-E_T)}}}$ operator.
We have to modify the form of the $QITP_{gs}$ operator such that, after measuring the ancilla qubit $\ket{0}$, we get the correct form of thermal state $e^{-\beta H}$. A straightforward modification is described by
\begin{equation}
QITP_{th} = \begin{pmatrix}
\sqrt{p}\,e^{-\tau (H-E_T)} & \sqrt{ 1- p\,e^{-2\tau (H-E_T)}}\\
-\sqrt{ 1-p\, e^{-2\tau (H-E_T)}} &\sqrt{p}\,e^{-\tau (H-E_T)}\\
\end{pmatrix} \,,\label{eq:thermal_QITP_op}
\end{equation}
where $0<p\le 1$ is a free parameter, representing the success probability in measuring the ancilla qubit in the $\ket{0}$ state in the limit of $\tau \xrightarrow[]{}0 $. For $E_T\le E_0$ ($E_0$ represents the ground energy), we can also set $p=1$, which removes the exponential decay of the success probability. Details for implementing this ITP operator can be found in App.~B.
This form requires the least ancilla qubit number (only one) for implementing $QITP_{th}$. Blocking encoding and qubitization~\cite{low2019hamiltonian,low2017optimal,tang2023cs} can be explored to compile the imaginary time operator $e^{-\beta H}$ using more than one ancilla qubit.
After implementing the operator in Eq.~\eqref{eq:thermal_QITP_op} and measuring the ancilla qubit in the $\ket{0}$ state, we find that the physical qubits are in the correct thermal state (a demonstration can be found in App.~C).
The steps of the proposed algorithm to prepare thermal states in quantum processors are as follows:
\begin{enumerate}
\item Start with all the qubits in the $\ket{0}$.
\item Implement the gates of the dashed square in Fig.~\ref{fig:qc_classical} (Its action gives us the physical system qubits in $\mathbb{1}/2^{n_s}$ state).
\item Employ the the $QITP_{th}$ operator of Eq.~\eqref{eq:thermal_QITP_op} with a additional ancilla qubit, setting $\tau=\frac{\beta}{2}$.
\item Measure the ancilla qubit in $\ket{0}$.
\end{enumerate}
In the worst case scenario, the required number of qubits is $2\, n_s+N_\beta$, where $n_s$ qubits are needed to map the system, $n_s$ ancilla qubits to prepare the maximally mixed state, and $N_\beta$ additional qubits to apply the $QITP_{th}$ operator. Moreover, using more ancilla qubits, one can apply the Trotter decomposition to simplify the compilation of the $QITP_{th}$.
The success probability $P_s$ of the proposed algorithm (equal to the probability to measure all the ancilla qubit in $\ket{0}$) for a perfect noiseless quantum computer is given by
\begin{equation}
P_s=\frac{1}{2^{n_s}} \Tr\left[ p^{N_\beta}\,e^{-\beta (H-E_T)}\right] =\frac{p^{N_\beta}}{2^{n_s}} Z(\beta) \,.
\end{equation}
Setting $p=1$, the success probability is proportional to the partition function, $Z(\beta)=\Tr\left[ e^{-\beta (H-E_T)}\right]$. However, in the worst case scenario, for $\beta \xrightarrow[]{}+\infty$, the success probability decays as $\frac{1}{2^{n_s}}$. This is mostly related to the fact that the state of the quantum system is mostly in the ground state. A solution to this decay could be the application of the amplitude amplification method~\cite{brassard2002quantum} to enhance the success probability.
\begin{figure}
\caption{Quantum circuits for preparing the thermal state in quantum processors. The dashed square shows the gates for initializing the system qubits on the maximally entangled mixed state.}
\label{fig:qc_classical}
\end{figure}
\begin{figure}
\caption{Feynman diagrams of the Leading Order of Chiral Effective Field Theory}
\label{fig:feynman}
\end{figure}
\textit{Results}
As a first test, we prepare the thermal state of a spin system of two neutrons fixed in their position. The considered interaction is the spin-dependent part of the leading order of chiral effective field theory~\cite{Chiral_review1,Chiral_review2,tews2016quantum}, using the parameters of Ref.~\cite{holland2020optimal}. The Feynman diagrams of such interaction is shown in Fig.~\ref{fig:feynman}. This system can be mapped to two qubits.
We implement the thermal $QITP_{th}$ quantum algorithm for different values of $\beta$ using five qubits (two for the system, two for preparing the maximally mixed state and one for implementing $QITP_{th}$). The employed quantum circuits in this work are built with the \texttt{Qiskit} package~\cite{qiskit} and compiled with the \texttt{pytkey} package~\cite{pytket_paper} for the Quantinuum machine. Additionally, we also compute the expectation value of the $\sigma_z$ for the first neutron using the \texttt{H1-1} hardware (adding an extra qubit). Our procedure is described in App.~D.
Panel (a) of Fig.~\ref{fig:2n_partition_function} shows our results for the partition function, $Z_0=\Tr{e^{-\beta (H-E_T)}}$.
In our tests, we set $E_T$ equal to the ground energy $E_0$ and $p$ in Eq.~\eqref{eq:thermal_QITP_op} equal to $0.8$ to diagnose the algorithm in the non-optimal situation.
The dashed line represents the analytical values of the partition function (obtained by classically computing the thermal density matrix and tracing it).
The magenta circles and orange squares represent the results obtained from \texttt{H1-1} Quantinuum using both 200 shots. The quantum circuits for the squares compute the partition function and the expectation value, instead, for the circles only the partition function. In the same panel (a), the results from the IBM \texttt{ibmq\textunderscore manila} (green diamonds) and \texttt{ibmq\textunderscore quito} (pink triangles) are shown as well.
While we do not implement any error mitigation methods on the Quantinuuum results, on the IBM hardware we employ the randomizing compiling technique~\cite{wallman2016noise,hashim2021randomized}, using 8 randomized quantum circuits with 64000 total shots (8$\times$8000). Our results are compatible (within two sigma) with the analytical partition function values.
Panel (b) of Fig.~\ref{fig:2n_partition_function} presents the results for the expectation value of $\sigma_z$ (orange squares) using the same obtained probabilities of the panel (a) with different analysis (see App.~D).
The dashed line represents the analytical curve. Also, in this case, the obtained results are compatible with the analytical values. However, with the increase of $\beta$, the error bars get larger due to statistical error because the probability of measuring the ancilla qubits in $\ket{0}$ decreases. A good solution would be to run the quantum circuits with more shots, reducing the statistical errors. Nevertheless, these big error bars are mostly caused by how we compile the hermitian operator. Hence, different strategies, like decomposing in the sum of Pauli matrices, can reduce them. A discussion about tests of these two different compiling methods can be found at the end of App.~D.
\begin{figure}\label{fig:2n_partition_function}
\end{figure}
The next test is preparing thermal states for a spin system of three neutrons. The used Hamiltonian is given by the sum of three two-body terms, ignoring a three-body potential.
We start by evaluating the partition function as a function of $\beta$ implementing a single $\beta$ step. Additionally, we add an extra ancilla qubit to evaluate the expectation value of the Hamiltonian $H$. The final quantum circuits use seven qubits for preparing the thermal state and an additional one to evaluate the expectation value of the Hamiltonian.
Panel (a) and panel (b) of Fig.~\ref{fig:3n_single_trotter} present the results for the partition function and the expectation value of the Hamiltonian, respectively. The orange squares, brown circles, and violet diamonds indicate the Quantinuum, the IBM \texttt{ibmq\textunderscore nairobi} and \texttt{ibmq\textunderscore olso} results, respectively. The dashed line represents the analytical curves. Even though employing randomizing compiling, we observe the IBM results are noisier than the Quantinuum ones.
Indeed, this can be easily explained by the required implementation of swap gates to correctly compile the $QITP_{th}$ operator due to the non-all-to-all connectivity on IBM hardware. The addition of swap gates increases the depth of quantum circuits and the contribution of noise. Nevertheless, our results are still compatible with the analytical values at two sigma.
One can observe the same increasing behavior of the error bars for the expectation value. Therefore, we can reduce the error bar with a more number of shots.
\begin{figure}\label{fig:3n_single_trotter}
\end{figure}
\begin{figure}\label{fig:3n_trotterdecomposition}
\end{figure}
We also test the proposed quantum algorithm implementing the Trotter decomposition for preparing the thermal states of the three neutron spin systems. Specifically, we split $\beta$ in smaller steps and the full thermal propagators into a product of the two-body propagators. Consequently, each layer of the quantum circuit employs a single two-body contribution.
Panel (a) of Fig.~\ref{fig:3n_trotterdecomposition} shows the results obtained from the \texttt{H1-1} Quantinuum processor with orange squares. The dashed line represents the analytical curve applying the full Hamiltonian, and the black stars indicate the analytical values applying the Trotter decomposition. Also, we obtain full compatibility with the analytical values in this case.
In the runs, we duplicate the number of shots from $200$ to $400$. In the same figure, we also report the total number of used qubits and implemented CNOT gates for the different $\beta$ values in the lower table.
We also compute the expectation value of $\sigma_z$ of the first neutron as a function of $\beta$ with the same implemented quantum circuit. as shown in panel (b) of Fig.~\ref{fig:3n_trotterdecomposition}. We observe the same behavior for error bars of Fig.~\ref{fig:2n_partition_function}. A solution may be to increase the number of shots to reduce the error bars.
\textit{Conclusions }
This work has presented a quantum algorithm that prepares the quantum thermal states in quantum processors for a generic Hamiltonian. This algorithm is based on the quantum imaginary time propagation method using ancilla qubits to get a unitary version of the imaginary time operator. Upgrading of the quantum imaginary time propagation in Ref.~\cite{turro2022imaginary} has been reported, solving the decaying of the success probability.
We have discussed the proposed algorithm's validity and reliability in the present quantum hardware. Indeed, we have reported our tests in computing thermal expectation values for simple spin nuclear systems. These simulations have been implemented in different quantum processor hardware, ion-trap (Quantinuum) and superconducting devices (IBM quantum processors). The obtained results are compatible with the analytical values proving the reliability of the proposed quantum algorithm, also in the presence of deep quantum circuits, especially using the \texttt{H1-1} Quantinuum processor.
This algorithm and the algorithm of Ref.~\cite{motta2020determining} provide a good starting point in preparing thermal states using imaginary time methods on quantum processors. With the leveraging of quantum processors, these algorithms can be applied to solve today's hard problems in nuclear physics, thermalization problems in Quantum Chromodynamics, quantum chemistry, condensed matter applications, and other fields.
\textit{Acknowledgments}
We thank Francesco Pederiva, Alessandro Roggero and the whole IQuS group for useful discussions. In particular, in the IQuS group, we are grateful to Marc Illa Subi\~{n}\`{a}, Anthony Ciavarella, and Martin Savage for the support and correction of the text.
This work was supported in part by the U.S. Department of Energy, Office of Science, Office of Nuclear Physics, InQubator for Quantum Simulation (IQuS) (\url{https://iqus.uw.edu}) under Award Number DOE (NP) Award DE-SC0020970 via the program on Quantum Horizons: QIS Research and Innovation for Nuclear Science.
This research used resources of the Oak Ridge Leadership Computing Facility, which is a DOE Office of Science User Facility supported under Contract DE-AC05-00OR22725.
We acknowledge the use of Quantinnum and IBM Quantum services for this work. The views expressed are those of the authors, and do not reflect the official policy or position of IBM or the IBM Quantum team.
A discussion session inspired this work at the "Next-Generation Computing for Low-Energy Nuclear Physics: from Machine Learning to Quantum Computing" IQuS workshop in August 2022.
The obtained results are shown in App.~E. The reported data are obtained from simulations run in February 2023.
\appendix
\section{Appendix A: Preparation of the maximally mixed state}\label{app:Classicalstate_proof}
This section describes a method to obtain the maximally mixed state.
We start adding ancilla qubits for each system qubit. All the $2\,n_s$ qubits are initialized in $\ket{0}$ state. Applying the Hadamard gate and CNOT gate, as shown in Fig.~\ref{fig:qc_classical}, and measuring the ancilla qubits, we will obtain the maximally mixed state $\rho=\frac{\mathbb{1}}{2^{n_s}}$.
We will prove this procedure for a single qubit system.
After the action of the H and CNOT gates to the state $\ket{00}$, we get
\begin{equation}
\rho = \text{CNOT}\,H\ket{00}\bra{00}H\,\text{CNOT}= \begin{pmatrix}
\frac{1}{2} & 0 & 0& \frac{1}{2} \\
0 & 0 & 0&0\\
0 & 0 & 0&0\\
\frac{1}{2} & 0 & 0& \frac{1}{2} \\
\end{pmatrix}\,.
\end{equation}
Now, we eliminate the ancilla qubit, for example, measuring it (without interest in the outcome). In mathematical terms, this corresponds to the partial trace. Hence, by doing it, the system state becomes
\begin{equation}
\rho^1_{cl}=\Tr_{ancilla}\left[\rho \right]= \begin{pmatrix}
\frac{1}{2} & 0\\
0& \frac{1}{2}\\
\end{pmatrix}\,.
\end{equation}
Iterating this method for $n_s$ qubits, we obtain our desired $2^n$-maximally mixed state. Indeed, we have
\begin{equation}
\rho_{MME}= \otimes_{n_s} \left(\rho^1_{cl}\right)_{n_s}= \otimes_{n_s} \left(\frac{\mathbb{1_{2\times2}}}{2} \right)_{n_s}= \frac{1}{2^{n_s}} \mathbb{1}_{2^{n_s}\times2^{n_s}}
\end{equation}
\section{Appendix B: Implementation of $QITP_{th}$ operator}\label{app:implementation_qitp}
This section will discuss how the $QITP_{th}$ operator of Eq.~\eqref{eq:thermal_QITP_op} could be compiled in quantum circuits.
We start by diagonalizing the Hamiltonian $H$,
\begin{equation}
U H U^\dagger = E\,, \label{eq:Hdiagonalization}
\end{equation}
where $U$ is the eigenstate matrix and $E$ represent the eigenvalue diagonal matrix.
Therefore, applying Eq.~\eqref{eq:Hdiagonalization} , the $QITP_{th}$ operator can be rewritten in the following form
\begin{widetext}
\begin{equation}
QITP_{th}\,=\,U\, \begin{pmatrix}
\sqrt{p} e^{-\beta (E-E_T)}&\sqrt{1-p\,e^{-2\beta (H-E_T)}}\\
-\sqrt{1-p\,e^{-2\beta (H-E_T)}}&\sqrt{p} e^{-\beta (E-E_T)}\\
\end{pmatrix} \,U^\dagger\,,
\end{equation}
\end{widetext}
where we assume $E_T\le E_0$.
The central matrix of the right side of the equation, the matrix with $E$, contains all the physical information. The matrix $U$ is only a change of the computational basis. The central part of $QITP_{th}$ is described by the Cosine Sine decomposition matrix.
Hence, this can be decomposed as a product of controlled $R_y$ rotations, where the different angle $\theta_i$ are given by $\theta_i=\arccos\left(\sqrt{p} e^{-\beta(E_i-E_T)}\right)$. Moreover, employing the Grey Code \cite{mottonen2005decompositions,barenco1995elementary,tang2023cs}, this operator can be compiled using $2^{n_s}$ CNOT and $2^{n_s}-1$ $R_y$ gates. Fig.~\ref{fig:Greycode} shows an example of a two-qubit system.
\begin{figure}
\caption{Example in compiling the $QITP_{th}$ matrix for two qubit system. The upper qubits represent the system ones, and the lower one indicates the ancilla qubit.}
\label{fig:Greycode}
\end{figure}
\section{Appendix C: Demonstration of thermal preparation}
This appendix proves that we obtain the correct thermal density matrix described by Eq.~\eqref{eq:thermal_states} by implementing all these steps of the proposed quantum algorithm. We assume that we have got the maximally mixed state $\rho_{MME}=\frac{1}{2^{n_s}} \mathbb{1}$. Applying the $QITP_{th}$ operator and measuring the ancilla in the $\ket{0}$ state we have
\begin{widetext}
\begin{equation}
\begin{split}
\rho&=P_0\,QITP_{th}\left(\frac{\beta}{2}\right)\,\begin{pmatrix}
\rho_{MME} & 0\\
0&0
\end{pmatrix} \,\left(QITP_{th}\left(\frac{\beta}{2}\right)\right)^\dagger \,P_0\\
&= P_0
\begin{pmatrix}
e^{-\frac{\beta}{2} (H-E_T)} \rho_{MME} e^{-\frac{\beta}{2} (H-E_T)} & e^{-\frac{\beta}{2} (H-E_T)} \rho_{MME} \sqrt{1-e^{-\beta (H-E_T)}}
\\
\sqrt{1-e^{-\beta H}} \rho_{MME} e^{-\frac{\beta}{2} (H-E_T)} &\sqrt{1-e^{-\beta (H-E_T)}} \rho_{MME} \sqrt{1-e^{-\beta H}}
\end{pmatrix} P_0\\
&= \begin{pmatrix}
\frac{1}{2^n} e^{-\frac{\beta}{2} (H-E_T)} \rho_{MME} e^{-\frac{\beta}{2} (H-E_T)} &0\\
0&0\\
\end{pmatrix} = \frac{1}{2^n} e^{-\beta (H-E_T)} \otimes \ket{0}\bra{0}\,,
\end{split} \label{eq:rho_th}
\end{equation}
\end{widetext}
where $P_0$ indicates the projector to the $\ket{0}$ state of the ancilla. We have proved that the density matrix is proportional to the thermal density matrix.
\section{Appendix D: Compilation of an observable }
\label{app:compi_observ}
This appendix presents how we can compile and evaluate the expectation value of an observable $O$ implementing a single quantum circuit. Usually, the observable is hermitian but not a unitary operator. Therefore, we have to transform it into one.
Using the same spirit of the compilation of the $QITP_{th}$ operator, we define the operator $A$
\begin{equation}
A= \sqrt{\frac{O-\lambda^O_0}{\left|O-\lambda^O_0\right|}}\,, \label{eq:A_definitation}
\end{equation}
where $\lambda_0$ is the lowest eigenvalue of $O$ and $|O-\lambda^O_0|= \max_{\lambda_i} |\lambda_i-\lambda_0|$. With this transformation, we shrink the spectrum of $O$ to be between 0 and 1 (the unitary condition requires this), keeping the hermiticity of $A$.
We follow the same procedure of the $QITP_{th}$ operator, we add an extra ancilla qubit in $\ket{0}$ state and we define the following unitary operator $U_O$ as
\begin{equation}
U_O=\begin{pmatrix}
A & \sqrt{1-A^2}\\
-\sqrt{1-A^2} & A\\
\end{pmatrix}\,.
\end{equation}
This new operator is unitary and can be used to evaluate the thermal expectation value of $O$. Indeed, the expectation value of $\langle O \rangle$ is given by
\begin{equation}
\langle O \rangle = \langle A^2 \rangle |O-\lambda^O_0| + \lambda_0\,, \label{eq:thermal_O_as_function_A}
\end{equation}
where $|O-\lambda^O_0|$ and $\lambda_0$ are defined in Eq.~\eqref{eq:A_definitation}.
To demonstrate the validity of this compiling method, we start by assuming we prepare the system qubits in the maximally mixed state, $\rho_{MME}=\frac{\mathbb{1}}{2^n}$. We add two ancilla qubits, one for the $QITP_{th}$ operator and one for the $U_0$ operator. Applying first the $QITP_{th}$ operator and using the result of Eq.~\eqref{eq:rho_th}, we get:
\begin{widetext}
\begin{equation}
\rho_1=\begin{pmatrix}
e^{-\frac{\beta}{2} H} \rho_{MME} e^{-\frac{\beta}{2} H} & e^{-\frac{\beta}{2} H} \rho_{MME} \sqrt{1-e^{-\beta H}}& 0 &0\\
\sqrt{1-e^{-\beta H}} \rho_{MME} e^{-\frac{\beta}{2} H} &\sqrt{1-e^{-\beta H}} \rho_{MME} \sqrt{1-e^{-\beta H}} & 0 &0\\
0 &0 &0 &0\\
0 &0 &0 &0\\
\end{pmatrix}\,.
\end{equation}
\end{widetext}
Then, using $\rho_{MME}=\frac{\mathbb{1}}{2^{n_s}}$ and $A^\dagger=A$, the action of the operator $U_O$ give us
\newcommand\scalemath[2]{\scalebox{#1}{\mbox{\ensuremath{\displaystyle #2}}}}
\begin{widetext}
\begin{equation}
\scalemath{0.75}{
\rho_2=\frac{1}{2^{n_s}} \begin{pmatrix}
A e^{-\beta H} A & -A e^{-\frac{\beta}{2} H} \sqrt{1-e^{-\beta H}} A & -A e^{-\beta H} \sqrt{1-A^2}& A e^{-\frac{\beta}{2} H} \sqrt{1-e^{-\beta H}} \sqrt{1-A^2}\\
-A \sqrt{1-e^{-\beta H}} e^{-\frac{\beta}{2} H} A& A \sqrt{1-e^{-\beta H}} \sqrt{1-e^{-\beta H}} A & A \sqrt{1-e^{-\beta H}} e^{-\frac{\beta}{2} H} \sqrt{1-A^2} & -A \sqrt{1-e^{-H \beta}} \sqrt{1-e^{-H \beta}} \sqrt{1-A^2} \\
-\sqrt{1-A^2} e^{-\beta H} A & -\sqrt{1-A^2} e^{-\frac{\beta}{2} H} \sqrt{1-e^{\beta H}} A & \sqrt{1-A^2} e^{-\beta H} \sqrt{1-A^2} & -\sqrt{1-A^2} e^{-\frac{\beta}{2} H} \sqrt{1-e^{-H \beta}} \sqrt{1-A^2} \\
\sqrt{1-A^2} \sqrt{1-e^{-H \beta}} e^{-\frac{\beta}{2} H} A & -\sqrt{1-A^2} \sqrt{1-e^{-H \beta}} \sqrt{1-e^{-H \beta}} A & -\sqrt{1-A^2} \sqrt{1-e^{-H t}} e^{-\frac{\beta}{2} H} \sqrt{1-A^2}& \sqrt{1-A^2} \sqrt{1-e^{-H \beta}} \sqrt{1-e^{-H \beta}} \sqrt{1-A^2}\\ \end{pmatrix}
}\label{eq:dm_expec_A}\,.
\end{equation}
\end{widetext}
We observe that the probability of measuring both ancilla qubits in $\ket{0}$, indicated with $P_{00}$, is equal to the numerator part for the thermal expectation value of $A^2$, $\Tr{A^2\,e^{-\beta H}}$. Moreover, the partition function, $Z_0$, is obtained by the sum of $P_{00}$ and the probability of measuring the ancilla qubit for $U_O$ in $\ket{1}$ and QITP ancilla in $\ket{0}$, indicated with $P_{10}$ (the third diagonal element in Eq.~\eqref{eq:dm_expec_A}). Hence, we have
\begin{widetext}
\begin{equation}
\begin{split}
P_{00} &= \Tr{A e^{-\frac{\beta}{2} H} \rho_{MME} e^{-\frac{\beta}{2} H} A^\dagger}= \Tr{ A^2 e^{-\beta H} }\\
P_{00}+P_{10} &= \Tr{A e^{-\frac{\beta}{2} H} \rho_{MME} e^{-\frac{\beta}{2} H} A} + \Tr{\sqrt{1-A^2} e^{-\frac{\beta}{2} H} \rho_{MME} e^{-\frac{\beta}{2} H} \sqrt{1-A^2} }\\
& = \Tr{e^{-\frac{\beta}{2} H} \rho_{MME} e^{-\frac{\beta}{2} H}}= \Tr{e^{-\beta H}}=Z_0 \,.\label{eq:thermal_expec}
\end{split}
\end{equation}
\end{widetext}
where, in the last line, we used the unitary condition of $U_O$, $\Tr{A \rho A + \sqrt{1-A^2} \rho \sqrt{1-A^2}}=\Tr{\rho}$.
Using Eq.~\eqref{eq:thermal_expec}, we can compute the thermal expectation of $\langle A^2 \rangle$ using
\begin{equation}
\langle A^2 \rangle\,=\,\frac{1}{Z_0} \Tr{ A^2 e^{-\beta H} }\,=\, \frac{P_{00}}{P_{00}+P_{01}} \,.
\end{equation}
The expectation value of $O$ is recovered via Eq.~\eqref{eq:thermal_O_as_function_A}.
\begin{figure*}
\caption{Obtained results for uncertainties of different thermal expectations expanding the observable in Pauli operators (dashed lines) and using the presented method (solid lines) as a function of $\beta$ for different positions of the two neutrons. In (a) and (c) panels, we use 200 shots; in panels (b) and (d), 2000. }
\label{fig:uncert_pauli_vs_ancilla}
\end{figure*}
\subsection{Test with the Pauli expansion}
As reported in the main text, the experimental error bars of thermal expectation values of observable become bigger with the increase of $\beta$. Hence, at the end of this section, we also report our tests about the difference between implementing the proposed algorithm and expanding the observable in the sum of Pauli operators. In particular, we focus on computing the uncertainties of the expectation values of different observables for the spin system of two neutrons by implementing the two different compiling methods.
We evaluate the uncertainties for a single qubit operator, $\sigma_z$, and a two-qubit operator, the Hamiltonian $H$. They are evaluated as a function of $\beta$ for different nuclear Hamiltonians, where we change the position of the two neutrons. The simulations are implemented on the noiseless IBM emulator.
Panels (a) and (b) of Fig.~\ref{fig:uncert_pauli_vs_ancilla} show the uncertainties for a single Pauli matrix ($\sigma_z$), panels (c) and (d) of Fig.~\ref{fig:uncert_pauli_vs_ancilla} for the Hamiltonian (a generic two-qubit operator). Solid lines indicate the results with the ancilla method, and dashed lines with the Pauli expansion. In panels (a) and (c), we use 200 shots, in panels (b) and (d), 2000 (for the Pauli expansion, we use 200 or 2000 for each quantum circuit).
We observe that the uncertainties for $\sigma_z$ are better for the Pauli expansion. Nevertheless, for a generic single qubit operator, the final uncertainties for the Pauli expansion would generally be twice bigger due to the contribution of the four generators ($\{\mathbb{1},\sigma_x,\sigma_y,\sigma_z\}$). For the two-qubit operator, we notice that the uncertainties of the proposed method (with an extra ancilla) are similar to or smaller than the results obtained from the Pauli expansion.
Despite the low uncertainties of the Pauli expansion for a single qubit, we have used the presented compiling method to compute the expectation value because we have saved more computational credits.
\section{Appendix E: Obtained Data}
The obtained data are shown in the following tables. Tab.~\ref{tab:chi2} reports the reduced chi-square for each simulation.
Tab.~\ref{tab:2n_singlestep} for the two neutrons, Tab.~\ref{tab:3n_singlestep} for the three neutrons with a single step, Tab.~\ref{tab:3n_trotter} for the three neutrons implementing the Trotter decomposition.
\begin{table*}[h]
\centering
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\multirow{2}{*}{$\frac{\chi^2}{N}$}& \multicolumn{4}{|c|}{2 neutrons} &\multicolumn{3}{|c|}{3 neutrons} &
Trotter 3 neutrons \\
& \texttt{H1-1} (only $Z_0$)& \texttt{H1-1} (both $Z_0$ and $\sigma_z$)& \texttt{ibmq\textunderscore quito}&\texttt{ibmq\textunderscore manila}& \texttt{H1-1}&\texttt{ibmq\textunderscore olso}&\texttt{ibmq\textunderscore naroibi} & \texttt{H1-1} \\
\hline
$Z_0$ & 0.93&0.44&2.2 &1.42&0.62 &0.72 &2.06&0.77\\
$Ob$ & &0.34&&&0.17&&&0.44\\
\hline
\end{tabular}
\caption{Reduced $\chi^2$ for each simulations}
\label{tab:chi2}
\end{table*}
\begin{table*}[h]
\centering
\begin{tabular}{|c|cccc|cc|}
\hline
\multirow{2}{*}{$\beta$} & \multicolumn{4}{|c|}{$Z_0$} &\multicolumn{2}{c|}{$\langle \sigma_z^1\rangle$ } \\
& \texttt{H1-1} &\texttt{ibm\textunderscore manila} &\texttt{ibmq\textunderscore quito} & analytical & \texttt{H1-1} & analytical \\
\hline
0.025 & 2.4(2) & &2.7& & &\\
0.075 & 1.50(16) & &1.6& & &\\
0.125 & 1.35(16) & &1.2& & &\\
0.175 & 1.12(15) & &1.1& & &\\
\hline
0.00 & 3.9(2) & & & 4.0& 0.01(11) &0.00\\
0.05 & 2.1(2) & & & 2.0& 0.00(17) &0.01\\
0.10 & 1.40(17) & & & 1.4& 0.1(2) &0.11\\
0.15 & 1.38(17) & & & 1.1& 0.2(2) &0.20\\
0.20 & 1.12(16) & & & 1.1& 0.1(3) &0.27\\
\hline
0.00 & & 3.3(5) & 3.1(4) & 4.0& &\\
0.025 & & 2.9(3) & 2.9(2) & 2.7& &\\
0.05 & & 2.1(2) & 2.1(2) & 2.0& &\\
0.075 & & 1.69(14) & 1.73(13) & 1.60& &\\
0.10 & & 1.60(13) & 1.55(13) & 1.37& &\\
0.125 & & 1.39(11) & 1.50(12) & 1.23& &\\
\hline
\end{tabular}
\caption{Results of the two neutron spin system}
\label{tab:2n_singlestep}
\end{table*}
\begin{table*}[h]
\centering
\begin{tabular}{|c|cccc|cc|}
\hline
\multirow{2}{*}{$\beta$} & \multicolumn{4}{|c|}{$Z_0$} &\multicolumn{2}{c|}{$\langle H \rangle$ } \\
& \texttt{H1-1} &\texttt{ibmq\textunderscore olso} & \texttt{ibmq\textunderscore nairobi} & analytical & \texttt{H1-1} & analytical \\
\hline
0.005 & 6.3(5) & & & 6.7& 0.0(4) &-2.4\\
0.015 & 5.4(4) & & & 4.9& -6(4) &-7.2\\
0.025 & 3.5(4) & & & 3.8& -9(5) &-11.7\\
0.045 & 2.7(3) & & & 2.5& -17(5) &-19.3\\
0.065 & 1.8(3) & & & 1.9& -21(5) &-24.7\\
\hline
0.00 & & 6.8(9) & 7.3(6) & 8.0& &\\
0.01 & & 5.4(6) & 5.8(5) & 5.7& &\\
0.02 & & 4.7(8) & 4.9(7) & 4.3& &\\
0.03 & & 4.2(6) & 4.3(4) & 3.4& &\\
0.04 & & 3.4(4) & 3.9(4) & 2.7& &\\
0.05 & & 3.2(4) & 3.6(4) & 2.3& &\\
\hline
\end{tabular}
\caption{Results of the three neutron spin system without applying the Trotter decomposition}
\label{tab:3n_singlestep}
\end{table*}
\begin{table*}[h]
\centering
\begin{tabular}{|c|cc|cc|}
\hline
\multirow{2}{*}{$\beta$} &\multicolumn{2}{|c|}{$Z_0$} &\multicolumn{2}{|c|}{$\langle \sigma_z^1\rangle$ }\\
&\texttt{H1-1} & analytical &\texttt{H1-1} & analytical\\
\hline
0.000& 8.0(3)&8.0&0.03(6)&0.00\\
0.005& 5.6(3)&5.9&-0.15(10)&-0.03\\
0.01& 5.0(4)&4.5&-0.05(13)&-0.06\\
0.015& 3.1(4)&3.6&0.0(2)&-0.08\\
0.02& 2.8(4)&2.9&0.0(3)&-0.10\\
\hline
\end{tabular}
\caption{Results of the three neutron spin system without using the Trotter decomposition}
\label{tab:3n_trotter}
\end{table*}
\end{document}
|
arXiv
|
{
"id": "2306.16580.tex",
"language_detection_score": 0.688633382320404,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Lateral interatomic dispersion forces}
\author{Pablo Barcellona} \email{[email protected]} \affiliation{Physikalisches Institut, Albert-Ludwigs-Universit\"at Freiburg, Hermann-Herder-Str. 3, 79104 Freiburg, Germany}
\author{Robert Bennett} \affiliation{Physikalisches Institut, Albert-Ludwigs-Universit\"at Freiburg, Hermann-Herder-Str. 3, 79104 Freiburg, Germany} \affiliation{School of Physics \& Astronomy, University of Glasgow, Glasgow, G12 8QQ, United Kingdom}
\author{Stefan Yoshi Buhmann} \affiliation{Physikalisches Institut, Albert-Ludwigs-Universit\"at Freiburg, Hermann-Herder-Str. 3, 79104 Freiburg, Germany} \date{\today}
\begin{abstract} Van der Waals forces between atoms and molecules are universally assumed to act along the line separating them. Inspired by recent works on effects which can propel atoms parallel to a macroscopic surface via the Casimir--Polder force, we predict a lateral van der Waals force between two atoms, one of which is in an excited state with non-zero angular momentum and the other is isotropic and in its ground state. The resulting force acts in the same way as a planetary gear, in contrast to the rack-and-pinion motion predicted in works on the lateral Casimir--Polder force in the analogous case, for which the force predicted here is the microscopic origin. We illustrate the effect by predicting the trajectories of an excited caesium in the vicinity of ground-state rubidium, finding behaviour qualitatively different to that if lateral forces are ignored. \end{abstract}
\maketitle
Descriptions of macroscopic phenomena are often informed and improved by understanding the underlying microscopic processes. Examples are found throughout condensed matter physics, for instance the BCS theory of superconductivity \cite{Bardeen1957} or the Lifshitz theory of Casimir forces \cite{E.M.Lifshitz1956}. The latter explains Casimir's original result \cite{Casimir1948} for the attraction between two perfectly conducting parallel plates in terms of correlations between the fluctuating charge distributions of their elementary atomic constituents. This is part of a broad class of phenomena known as dispersion interactions (c.f. \cite{Buhmann2012BothBooks}), the most familiar being the Van der Waals force between two neutral atoms. Closely related to this is the Casimir--Polder force \new{that a} neutral atom feels in proximity to a material body.
In recent years, lateral Casimir (surface--surface) and Casimir--Polder (atom--surface) \footnote{In this work we refer to atom-atom forces at all distances as van der Waals forces (in contrast to some authors who refer to the long-distance atom-atom interaction as a Casimir-Polder force), and all atom-surface forces as Casimir-Polder forces. This should not be taken as denial of the fact that Casimir and Polder derived atom-atom \emph{and} atom surface forces at all distances in their seminal paper \cite{Casimir1948a}.} forces have received attention due to their potential to realise contactless force transmission \cite{Ashourvan2007,Nasiri2012}, as well as novel types of sensors and clocks \cite{Miri2008}. All of these works rely on corrugated surfaces \cite{Messina2009,Dalvit2008,Chen2002,Rodrigues2006,Emig2003,Dobrich2008}, gratings \cite{Lambrecht2008,Contreras-Reyes2010,Bender2014,Buhmann2016}, or gyrotopic response \cite{Polevoi1985}. A number of more recent works have discussed the intriguing possibility of engineering modes propagating along a flat, featureless planar interface \cite{Rodriguez-Fortuno2013,LeKien2016,Mueller2013,Xi2013,Lin2013,Neugebauer2014,Manjavacas2017} or nanofiber \cite{Petersen2014} in such a way that an atom or second object placed nearby will feel a force dragging it along the surface. In this Letter we will reveal the microscopic origins of this latter force.
The resonant Casimir--Polder (CP) force on an atom can be expressed in terms of the dyadic Green's tensor $\overbar{\tens{G}}\left(\mathbf{r},\mathbf{r}',\omega \right)$ describing propagation of electromagnetic waves of frequency $\omega$ from point $\B{r}'$ to $\B{r}$ subject to boundary conditions imposed by material geometry. For a two-level atom at position $\B{r}_\text{A}$ with time-dependent excited-state occupancy $p(t)$ \new{it} is given by \cite{Scheel2015,OudeWeernink2018}
\begin{align} \textbf{F}^\text{res}(\textbf{r}_\text{A},t) &= 2\mu _0 p(t) \omega_\text{A}^2 \notag \\ &\quad \times \text{Re}\Big[ \nabla \textbf{d}_{10}^\text{A} \cdot \overbar{\tens{G}}\left( \textbf{r},\textbf{r}_\text{A},\omega_\text{A} \right) \cdot \textbf{d}_{01}^\text{A} \Big]_{\textbf{r} = \textbf{r}_\text{A}}, \label{CasPol} \end{align}
where $\omega_\text{A}$ is the transition frequency and $ \textbf{d}_{01}^\text{A}=\textbf{d}_{10}^{\text{A}*} $ is the (complex) transition dipole moment from the upper to lower level, and $\mu_0$ is the permeability of free space. \new{There is also a non-resonant force originating in the contribution from photons with frequencies different to the atomic transition, but as shown in the Supplementary Material \footnote{See Supplementary Material [url] for detailed derivations of the forces, emission rates and emission spectra.} the contribution of this for the parameters we will choose is negligible compared to the resonant terms.} Most derivations of Casimir--Polder forces proceed by finding the position-dependent energy shift of the atomic levels, then taking a spatial derivative to find the force. If the atom has a complex polarisability (and corresponding complex dipole moment) then the Casimir--Polder force is not conservative, meaning that it cannot be derived as the gradient of an energy shift. We seek a microscopic version of the non-conservative force given by Eq.~\eqref{CasPol}, which was derived from the Lorentz force law.
From a microscopic point of view, a macroscopic medium is a collection of a large number of atoms --- the imposition of macroscopic boundary conditions is simply a neat and powerful way of summarising their collective behaviour. We thus begin by replacing the material body found in accounts of the lateral Casimir--Polder force with a collection of neutral atoms. This is done by taking the dilute-gas limit (\new{in which the polarisability volume of each atom is much smaller than the cube of the mean interatomic spacing}) in a similar manner to that done by Lifshitz \cite{E.M.Lifshitz1956} via a Born-expansion of the dyadic Green's tensor (see, for example, \cite{Purcell1973,Buhmann2006,Sherkunov2007})
\begin{align}\label{BornExpansion} &\overbar{\tens{G}}\left(\mathbf{r},\mathbf{r}',\omega \right) = {\tens{G}}\left( \mathbf{r},\mathbf{r}',\omega \right)\notag \\ &\!\!+ \mu _0 \omega ^2\!\!\int \! \text{d}^3 r'' \!\rho \left( \mathbf{r}'' \right){\tens{G}}\left( \mathbf{r},\mathbf{r}'',\omega \right) \cdot \bm{\alpha}_\text{B} \left( \omega \right) \cdot {\tens{G}}\left( \mathbf{r}'',\mathbf{r}',\omega \right) \new{+ \ldots} \end{align}
where $\rho(\B{r})$ is the number density of a collection of arbitrarily-placed atoms with identical polarisibilities $\bm{\alpha}_\text{B}\left( \omega \right)$, and ${\tens{G}}\left( \mathbf{r},\mathbf{r}',\omega \right)$ is the known Green's tensor of the background environment which could for example be unbounded vacuum, but need not be.
Using the Born-expanded Green's tensor \eqref{BornExpansion} \new{with a delta-distributed number density} in the expression \eqref{CasPol} for the \new{resonant} force, one finds that $\textbf{F}^\text{res}(\textbf{r}_\text{A},t)=\bar{\textbf{F}}^\text{res}(\textbf{r}_\text{A},t) + \int d^3 r' \rho (\B{r}') \B{F}^\text{res}(\B{r}_\text{A},\B{r}',t)$, where $\bar{\textbf{F}}^\text{res}(\textbf{r}_\text{A},t)$ is the force felt between atom A and the background bodies alone, and;
\begin{align}\label{ForceA} \textbf{F}^\text{res}(\textbf{r}_\text{A},\B{r}',t)= & 2\mu _0^2p(t)\omega_\text{A}^4 \text{Re} \big[\nabla\textbf{d}_{10}^\text{A} \cdot {\tens{G}}\left( \textbf{r},\textbf{r}',\omega_\text{A} \right)\notag \\ & \cdot \bm{\alpha}_\text{B}(\omega_\text{A})\cdot {\tens{G}}\left( \textbf{r}',\textbf{r}_\text{A},\omega_\text{A} \right) \cdot \textbf{d}_{01}^\text{A} \big] _{\textbf{r} = \textbf{r}_\text{A}}. \end{align}
This is an atom-atom (van der Waals) force felt by atom A due to the presence of a (non-identical) atom B at $\B{r}'=\B{r}_\text{B}$ with dynamic polarisability tensor $\bm{\alpha}_\text{B}(\omega)$, valid as long the atoms are far enough apart that there is no appreciable wave-function overlap. Equation \eqref{ForceA} is made up of both the interaction of atom A with its own field as reflected by atom B, and the interaction with the quantised electromagnetic vacuum field. For most naturally-arising situations, the atomic dipoles can be considered to be randomly oriented, leaving an average force which pulls the particles linearly together (or, in some rare cases, pushes them apart).
The situation changes drastically if one of the atoms has a complex dipole moment, corresponding to an atomic transition with different magnetic quantum numbers --- loosely thought of as a continuous rotation. As we will show, the resulting force causes atom A to orbit atom B. Extending the analogy of the lateral Casimir--Polder force with a rack and pinion to our situation, the interaction considered here could be considered as an atom-scale, contactless version of planetary gearing as illustrated in Fig.~\ref{Mechanical}.
\begin{figure}
\caption{Mechanical analogies to the lateral Casimir--Polder force studied in previous works, and the lateral interatomic force discussed here. Dashed (green) arrows represent forces, while solid arrows (black, white) represent motion. In all cases the entity on the right (blue) is considered as being fixed in space. }
\label{Mechanical}
\end{figure}
We will illustrate this by taking atom A to be caesium undergoing a D2 transition from the highest hyperfine state $\ket{6^2\text{P}_{3/2}, F=5, M_\text{F} = 5} \equiv \ket{1}$ to the hyperfine ground state $\ket{6^2\text{S}_{1/2}, F=4, M_\text{F} = 4}\equiv \ket{0}$, and atom B to be rubidium in its ground state (\new{$5^2\text{S}_{1/2}$}, polarizability $\bm{\alpha}_\text{B} = \alpha_\text{B} \text{diag}(1,1,1)$, where $\alpha_\text{B} = 4\pi \varepsilon_0 \times 293\mathrm{\AA}^3$ at the caesium D2 wavelength of 852nm \cite{Sansonetti2005,SteckData}). The magnitude of the transition dipole moment between these two caesium levels is ${d}_\text{A} \equiv |\B{d}^\text{A}_{10}| = 2.68 \times 10^{-29}$Cm \cite{Scheel2015,SteckData}, while its components in the lab frame depend on the character of the light which excites the transition. Assuming a right-circularly polarised laser beam propagates along the $y$ direction of a cartesian co-ordinate system, the transition dipole moment can be written as:
\begin{equation}\label{DipoleMoment} \B{d}^\text{A}_{10} = \frac{d_\text{A}}{\sqrt{2}} (\mathrm{i},0,1) \; . \end{equation}
We assume that \new{the atoms are in free space, with} atom B at the origin and atom A in the $xz$ plane at position $z = r_\text{A} \cos\theta_\text{A}$, $x = r_\text{A} \sin \theta_\text{A}$. The two lateral components of the \new{resonant} force are in the $\theta$ and $y$ directions, and are found by inserting the free-space Green's tensor $\tens{G}^{(0)}$ into \eqref{ForceA}. As shown in, e.g., Ref.~\cite{Buhmann2012BothBooks}, this is given explicitly by
\begin{equation}
\tens{G}^{(0)}\left( \mathbf{r},\mathbf{r}',\omega \right) = \left( \tens{I} + \frac{c^2}{\omega ^2}\nabla \nabla \right)\frac{e^{\mathrm{i}\omega \left| \mathbf{r} - \mathbf{r}' \right|/c}}{4\pi \left| \mathbf{r} - \mathbf{r}' \right|} \label{G0Eq} \end{equation}
where $c$ is the speed of light. Using cylindrical coordinates $\mathbf{r}=\left(r\sin \theta,y,r\cos \theta \right)$, $\nabla f= \frac{\partial f}{\partial r} \hat{\bm{r}} + \frac{1}{r} \frac{\partial f}{\partial \theta} \hat{\bm{\theta}} + \frac{\partial f}{\partial y} \hat{\bm{y}} $ we find that the $y$ component of the force \new{$F^\text{res}_y = \B{F}^\text{res}\cdot \hat{\B{y}}$} vanishes;
\begin{equation}\label{lateralfy}
F^\text{res}_y(r_\text{A},t)=0 \end{equation} and the $\theta$ component \new{$F^\text{res}_\theta = \B{F}^\text{res}\cdot \hat{\bm{\theta}}$} is:
\begin{equation}F^\text{res}_\theta(r_\text{A},t) = - \frac{p(t)}{40\pi ^2\varepsilon _0^2c^5r_\text{A}^2}d_{\text{A}}^2\alpha_\text{B}(\omega _{\text{A}})\omega _{\text{A}}^5 g \left( \frac{\omega_{\text{A}} r_\text{A}}{c} \right)
\label{lateralf} \end{equation} where $\varepsilon_0$ is the permittivity of free space and we have defined \begin{align} g(\eta) \equiv\frac{5}{2\eta^5}[ 6\eta\left( \eta^2 - 3 \right) &\cos (2\eta) \notag \\ &+ \left( 9 - 15\eta^2 + \eta^4 \right)\sin (2\eta) ]. \end{align}
\new{The lateral force shown in Eq.~\eqref{lateralf} is our main result, but as a point of comparison we also report the normal force} $F^\text{res}_r = \B{F}^\text{res}\cdot \hat{\bm{r}}$: \begin{align} F^\text{res}_r (r_{\text{A}},t) =&- \frac{15 p \left(t \right)}{16\pi ^2\varepsilon _0^2 r_\text{A}^7 }d_{\text{A}}^2 \alpha_{\text{B}}(\omega _{\text{A}}) h \left( \frac{\omega_{\text{A}} r_{\text{A}}}{c} \right), \label{Fr} \end{align} where: \begin{align}\label{hOfEta} h \left(\eta \right) =&\frac{1}{15}\Big[ 3\left(5-8 \eta^2 + \eta^4 \right)\cos (2\eta)\notag \\ & +\eta \left( 30 - 10\eta^2 + \eta^4 \right)\sin (2\eta) \Big]. \end{align} \new{Similar normal forces between (non-rotating) excited and ground state atoms are well-studied, having been considered by the authors of Refs~\cite{Power1995,Milonni2015,Donaire2015c,Donaire2016a,Jentschura2017a,Jentschura2017,Barcellona2016} with particular emphasis on the oscillating distance dependence, but the lateral force \eqref{lateralf} predicted here has not previously been discussed.} The van der Waals interaction in the near field \new{(non-retarded)} limit $\omega_\text{A} r_\text{A}/c \ll 1$, is given by Eqs.~(\ref{lateralf}) and (\ref{Fr}), where $\lim _{\eta \to 0}g\left( \eta \right) = 1, \lim _{\eta \to 0}h\left( \eta \right) = 1$, \new{while the far-field (retarded) limit is found from Eqs.~(\ref{lateralf}) and (\ref{Fr}) by taking $\omega_\text{A} r_\text{A}/c \gg 1$}. It is interesting to note that the forces are independent of $\theta_\text{A}$ which also results from symmetry considerations. Formulae (\ref{lateralf}) and (\ref{Fr}) account for retardation effects via the function $g$ in the limit $\omega_{\text{A}} r_\text{A}/c \gg 1$, which arises because of the finite velocity of light. In the retarded regime the time taken for the photon to reach the second atom and reflect back to the first atom become comparable with the time scale of the dipole fluctuations themselves. In this case the orientation of the dipole at the time of emission may differ from its orientation at the time of absorption of the reflected photon, reducing the attractive force as compared to the ideal case of parallel alignment.
Our next step is to recognise that the excited-state interatomic force can be understood as a recoil force originating from the exchange of excitations with the environment, \new{for which we present an alternative derivation of Eq.~\eqref{ForceA} [and thereby Eqs \eqref{lateralfy} and \eqref{lateralf}], based on emission spectra instead of forces \cite{Sherkunov2009}.}
\new{To do this we begin by calculating the spontaneous decay rate for atom A in the excited state $\left| 1\right\rangle $ in the presence of a second atom B. As shown explicitly in the supplementary material, in free space this is given by;} \begin{align}\label{GammaRShifted}\Gamma(\textbf{r}_\text{A},\textbf{r}_\text{B})= \frac{ 2\mu _0^2}{\hbar} \omega _\text{A}^4 \text{Im} \Big[ \textbf{d}_{10}^\text{A} \cdot \tens{G}^{(0)}\left( \textbf{r}_\text{A},\textbf{r}_\text{B},\omega _\text{A} \right)\notag \\ \cdot \bm{\alpha}_\text{B} (\omega_\text{A})\cdot \tens{G}^{(0)}\left( \textbf{r}_\text{B},\textbf{r}_\text{A},\omega _\text{A} \right) \cdot \textbf{d}_{01}^\text{A} \Big] \, . \end{align}
\new{We can define a momentum-space emission rate density $\gamma$ as \begin{equation} \Gamma(\mathbf{r}_\text{A},\mathbf{r}_\text{B})= \int \text{d}^3k \gamma \left(\mathbf{k}; \mathbf{r}_\text{A}, \mathbf{r}_\text{B}\right), \end{equation} which is the rate at which light with wavevector $\mathbf{k}$ is emitted, if the atom $\text{A}$ is in the excited state. Since the free-space Green's tensor can be Fourier transformed $\tens{G}^{(0)} \left( \textbf{r},\textbf{r}',\omega\right)=\left(2\pi\right)^{-3}\int \text{d}^3k \text{e}^{\text{i} \mathbf{k} \cdot \left(\mathbf{r}-\mathbf{r}'\right)} \tens{G}^{(0)} \left( \mathbf{k},\omega\right)$ the rate density reads: \begin{align}\label{GammaRDef} \gamma(\mathbf{k}; \textbf{r}_\text{A},\textbf{r}_\text{B})= & \frac{ 2\mu _0^2}{\left(2\pi\right)^3\hbar} \omega _\text{A}^4 \text{Im} \Big[\text{e}^{\text{i} \mathbf{k} \cdot \left(\mathbf{r}_\text{A}-\mathbf{r}_\text{B}\right)} \textbf{d}_{10}^\text{A} \cdot \tens{G}^{(0)}\left( \mathbf{k},\omega _\text{A} \right) \notag \\ &\cdot {\bm{\alpha}}_\text{B} (\omega_\text{A})\cdot \tens{G}^{(0)}\left( \textbf{r}_\text{B},\textbf{r}_\text{A},\omega _\text{A} \right) \cdot \textbf{d}_{01}^\text{A} \Big]. \end{align}} \new{Explicit evaluation of the rate density in our particular setup (see supplemental material) reveals that $\gamma(-\mathbf{k}; \textbf{r}_\text{A},\textbf{r}_\text{B}) \ne \gamma(\mathbf{k}; \textbf{r}_\text{A},\textbf{r}_\text{B})$, showing that the net recoil force is, as expected, not zero.} \new{This can be explained by noting that the momentum-space recoil force density is given by $-\gamma \hbar \mathbf{k}$ (the minus signs accounting for the fact that we are considering recoils), so that the total resonant force on atom A is given by \begin{equation}\label{FResFromGamma} \mathbf{F}^{\text{res}} \left(\mathbf{r}_\text{A},\mathbf{r}_\text{B},t\right)=-p\left(t\right) \int \text{d}^3k \hbar \mathbf{k} \gamma \left(\mathbf{k}; \mathbf{r}_\text{A}, \mathbf{r}_\text{B}\right). \end{equation} Since $\nabla \text{e}^{\text{i} \mathbf{k}\cdot \mathbf{r}}=\text{i} \mathbf{k} \text{e}^{\text{i} \mathbf{k}\cdot \mathbf{r}} $ we immediately find the recoil force Eq.~\eqref{ForceA}, which leads to the lateral forces \eqref{lateralfy} and \eqref{lateralf}. }
We are now left with a remarkable conclusion. The asymmetry that atom B represents in the environment of atom A causes the latter to preferentially release its excitation in a direction perpendicular to the line joining them, propelling A around B like a planetary gear. When combined with the oscillatory nature of the \new{resonant} force that atom B exerts on atom A, we also find that the sign of this torque can be varied by changing the distance between the atoms, as shown in Fig.~\ref{Plot1D},
\begin{figure}
\caption{Lateral [solid, Eq.~\eqref{lateralf}] and normal [dashed, Eq.~\eqref{Fr}] \new{resonant} forces on a caesium atom (D2 transition) due to the presence of a rubidium atom at the origin. The numbered dots are those used later for trajectory simulations. Each chosen distance is comfortably larger than the atomic radii ($\sim10$\AA), consistent with our assumption of independent polarisibilities. }
\label{Plot1D}
\end{figure}
where we also plot the corresponding normal \new{resonant} force \eqref{Fr}.
Having seen that a lateral interatomic dispersion force is possible, we now turn our attention to its magnitude and prospects for experimental observation. In the absence of external driving, the atomic population (and therefore the recoil force) decays on average like $e^{-\Gamma t}$, meaning that the torque quickly becomes unobservably small. In order to combat this, we \new{introduce a coherent driving, for which it us useful to go} into the vacuum picture where the interaction of an atom with a coherent field can be considered as being made up of a classical driving field plus the vacuum field \cite{Pegg1980,Dutra1994,Fuchs2018c}. We consider atom A to be continuously driven by a circularly-polarised classical laser field propagating in the positive $y$-direction:
\begin{equation}\label{DrivingLaser} \mathbf{E}_{\text{L}}\left( t \right) = E_0\B{e}_{\text{R}} e^{ - \text{i} \omega _{\text{L}}t}/2 + \text{c.c.} \end{equation}
where $E_0$ is the field's amplitude, $\omega_{\text{L}}$ its frequency and $\B{e}_{\text{R}}= \left(-\text{i},\; 0, \; 1 \right)/\sqrt 2$. The effect of the driving laser is accounted for by the real Rabi frequency ${ \hbar \Omega = \mathbf{d}_{10}^{\text{A}} \cdot \B{e}_{\text{R}} E_0= d_\text{A} E_0 }$. Solving the optical Bloch equations \new{for the interaction of the laser field with atom A in the absence of atom B} in the long time limit ($t\gg \Gamma^{-1}$), the expectation value of the dipole moment operator of atom A is then given by
\begin{align}\label{DipoleExpct} \left\langle \mathbf{d}^\text{A}(t) \right\rangle &= \frac{\sqrt{2}\Omega \Delta }{2\Delta ^2 + \Omega^2} d_\text{A}\big( \sin \left( \omega _\text{L}t \right),0, -\cos \left( \omega _\text{L}t \right) \big) \end{align}
where $\Delta = \omega_\text{L}-\omega_\text{A}$ is the detuning of the laser field from the atomic resonance, and we have also assumed $\Gamma \ll |\Delta|$. In the absence of atom B, atom A simply rotates in the $x-z$ plane with the same frequency as the laser, which is not surprising. The presence of atom B breaks the symmetry of the electromagnetic environment experienced by atom A. To quantify this effect we use Eq.~\eqref{ForceA} with an excited state population given by (\new{see, for example, \cite{Mollow1969}});
\begin{equation}\label{Prob} p(t) =\frac{ \Omega^2}{4\Delta ^2 + 2\Omega^2} \end{equation}
In the strong interaction limit $\Omega \gg |\Delta|$, the effect of the resultant force in is shown in Fig.~\ref{StreamPlot}, \begin{figure}
\caption{Simulated trajectories for a caesium atoms starting at rest for the four points shown in Fig.~\ref{Plot1D}. Shown in the background is the potential energy function found by integrating the normal \new{resonant} force in the radial direction.}
\label{StreamPlot}
\end{figure}
where we place atoms initially at rest on the $x$ axis at the positions indicated by the dots in Fig.~\ref{Plot1D} and compute their trajectories. \new{The illuminating light should be set up in such a way that it has a constant amplitude over the trajectory of atom A, while affecting atom B as little as possible. This could be achieved, for example, by tailoring atom B's level structure, or through the use of structured light.} It is seen that under such continuous laser driving the lateral force causes atom A to be ejected after slightly more than half an orbit of the fixed, isotropic atom B. In Fig.~\ref{VelocityPlot}
\begin{figure}
\caption{Velocities gained along the four trajectories simulated in Fig.~\ref{StreamPlot}. }
\label{VelocityPlot}
\end{figure}
we plot the velocity gained as a function of time, finding $12-15\mu$m/s for the parameters chosen here. To reach these velocities takes a relatively long time (on the order of a second) since the force is so weak. However, there are several routes to combat this by enhancement of the interaction. One might expect that use of Rydberg atoms with their large dipole moments (quadratic in the principle quantum number $n$), however the energy difference of adjacent states scaled as $n^{-3}$ meaning that the force derived here is strongly suppressed for such systems. Finally, we note that the interaction could be enhanced by placing the pair of atoms in a cavity, in much the same was as the spontaneous decay rate of a quantum emitter can be enhanced through the Purcell factor \cite{Purcell1946}.
To conclude, we have demonstrated the existence of a lateral Van der Waals force on an excited, circularly polarised atom due to the placement of an isotropic, ground state atom nearby. We have outlined how the effect might be experimentally accessed by selectively pumping the atom to a Zeeman sub-level. Control of the lateral force direction and magnitude can be experimentally implemented by changing the handedness of the illuminating light and the distance between the two atoms. Our work is the first demonstration of the most elementary lateral force that can act on a circularly polarised emitter, without the influence of a surface. Nevertheless, our expression of the force in terms of the dyadic Green's tensor means that additional macroscopic objects can be introduced without fundamental changes to the method, opening up the effect detailed here to Purcell-type enhancement. In the longer term, the force could find applications in optomechanics as a new actuation method, as well as in any of the numerous fields in which Van der Waals forces play a pivotal role.
\acknowledgments{\new{The authors thank Gabriel Dufour for valuable feedback on the manuscript, and the Deutsche Forschungsgemeinschaft for financial support (grant BU 1803/3-1476)}.}
\begin{thebibliography}{49} \makeatletter \providecommand \@ifxundefined [1]{
\@ifx{#1\undefined} } \providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Bardeen}\ \emph {et~al.}(1957)\citenamefont
{Bardeen}, \citenamefont {Cooper},\ and\ \citenamefont
{Schrieffer}}]{Bardeen1957}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Bardeen}}, \bibinfo {author} {\bibfnamefont {L.~N.}\ \bibnamefont {Cooper}},
\ and\ \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {Schrieffer}},\
}\href {\doibase 10.1103/PhysRev.106.162} {\bibfield {journal} {\bibinfo
{journal} {Physical Review}\ }\textbf {\bibinfo {volume} {106}},\ \bibinfo
{pages} {162} (\bibinfo {year} {1957})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lifshitz}(1956)}]{E.M.Lifshitz1956}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Lifshitz}},\ }\href {http://www.jetp.ac.ru/cgi-bin/e/index/e/2/1/p73?a=list}
{\bibfield {journal} {\bibinfo {journal} {Journal of Experimental and
Theoretical Physics}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages}
{73} (\bibinfo {year} {1956})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Casimir}(1948)}]{Casimir1948}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~B.~G.}\
\bibnamefont {Casimir}},\ }\href {\doibase citeulike-article-id:8810715}
{\bibfield {journal} {\bibinfo {journal} {Proc. K. Ned. Akad.}\ }\textbf
{\bibinfo {volume} {360}},\ \bibinfo {pages} {793} (\bibinfo {year}
{1948})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Buhmann}(2012)}]{Buhmann2012BothBooks}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont
{Buhmann}},\ }\href@noop {} {\emph {\bibinfo {title} {{Dispersion
Forces}}}},\ \bibinfo {series} {Springer Tracts in Modern Physics}, Vol.\
\bibinfo {volume} {247}\ (\bibinfo {publisher} {Springer},\ \bibinfo
{address} {Berlin, Heidelberg},\ \bibinfo {year} {2012})\BibitemShut
{NoStop} \bibitem [{Note1()}]{Note1}
\BibitemOpen
\bibinfo {note} {In this work we refer to atom-atom forces at all distances
as van der Waals forces (in contrast to some authors who refer to the
long-distance atom-atom interaction as a Casimir-Polder force), and all
atom-surface forces as Casimir-Polder forces. This should not be taken as
denial of the fact that Casimir and Polder derived atom-atom \emph {and} atom
surface forces at all distances in their seminal paper \cite
{Casimir1948a}.}\BibitemShut {Stop} \bibitem [{\citenamefont {Ashourvan}\ \emph {et~al.}(2007)\citenamefont
{Ashourvan}, \citenamefont {Miri},\ and\ \citenamefont
{Golestanian}}]{Ashourvan2007}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Ashourvan}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Miri}}, \
and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Golestanian}},\
}\href {\doibase 10.1103/PhysRevLett.98.140801} {\bibfield {journal}
{\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo {volume}
{98}},\ \bibinfo {pages} {140801} (\bibinfo {year} {2007})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Nasiri}\ \emph {et~al.}(2012)\citenamefont {Nasiri},
\citenamefont {Miri},\ and\ \citenamefont {Golestanian}}]{Nasiri2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Nasiri}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Miri}}, \ and\
\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Golestanian}},\ }\href
{\doibase 10.1063/1.3694050} {\bibfield {journal} {\bibinfo {journal}
{Applied Physics Letters}\ }\textbf {\bibinfo {volume} {100}} (\bibinfo
{year} {2012}),\ 10.1063/1.3694050}\BibitemShut {NoStop} \bibitem [{\citenamefont {Miri}\ and\ \citenamefont
{Golestanian}(2008)}]{Miri2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Miri}}\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Golestanian}},\ }\href {\doibase 10.1063/1.2898707} {\bibfield {journal}
{\bibinfo {journal} {Applied Physics Letters}\ }\textbf {\bibinfo {volume}
{92}} (\bibinfo {year} {2008}),\ 10.1063/1.2898707}\BibitemShut {NoStop} \bibitem [{\citenamefont {Messina}\ \emph {et~al.}(2009)\citenamefont
{Messina}, \citenamefont {Dalvit}, \citenamefont {{Maia Neto}}, \citenamefont
{Lambrecht},\ and\ \citenamefont {Reynaud}}]{Messina2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Messina}}, \bibinfo {author} {\bibfnamefont {D.~A.~R.}\ \bibnamefont
{Dalvit}}, \bibinfo {author} {\bibfnamefont {P.~A.}\ \bibnamefont {{Maia
Neto}}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Lambrecht}}, \
and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Reynaud}},\ }\href
{\doibase 10.1103/PhysRevA.80.022119} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo
{pages} {022119} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dalvit}\ \emph {et~al.}(2008)\citenamefont {Dalvit},
\citenamefont {{Maia Neto}}, \citenamefont {Lambrecht},\ and\ \citenamefont
{Reynaud}}]{Dalvit2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~A.~R.}\
\bibnamefont {Dalvit}}, \bibinfo {author} {\bibfnamefont {P.~A.}\
\bibnamefont {{Maia Neto}}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Lambrecht}}, \ and\ \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Reynaud}},\ }\href {\doibase
10.1103/PhysRevLett.100.040405} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {100}},\ \bibinfo
{pages} {040405} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Chen}\ \emph {et~al.}(2002)\citenamefont {Chen},
\citenamefont {Mohideen}, \citenamefont {Klimchitskaya},\ and\ \citenamefont
{Mostepanenko}}]{Chen2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Chen}}, \bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {Mohideen}},
\bibinfo {author} {\bibfnamefont {G.~L.}\ \bibnamefont {Klimchitskaya}}, \
and\ \bibinfo {author} {\bibfnamefont {V.~M.}\ \bibnamefont {Mostepanenko}},\
}\href {\doibase 10.1103/PhysRevLett.88.101801} {\bibfield {journal}
{\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo {volume}
{88}},\ \bibinfo {pages} {4} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rodrigues}\ \emph {et~al.}(2006)\citenamefont
{Rodrigues}, \citenamefont {Neto}, \citenamefont {Lambrecht},\ and\
\citenamefont {Reynaud}}]{Rodrigues2006}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~B.}\ \bibnamefont
{Rodrigues}}, \bibinfo {author} {\bibfnamefont {P.~A.}\ \bibnamefont {Neto}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Lambrecht}}, \ and\
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Reynaud}},\ }\href
{\doibase 10.1103/PhysRevLett.96.100402} {\bibfield {journal} {\bibinfo
{journal} {Physical Review Letters}\ }\textbf {\bibinfo {volume} {96}}
(\bibinfo {year} {2006}),\ 10.1103/PhysRevLett.96.100402}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Emig}\ \emph {et~al.}(2003)\citenamefont {Emig},
\citenamefont {Hanke}, \citenamefont {Golestanian},\ and\ \citenamefont
{Kardar}}]{Emig2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Emig}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Hanke}},
\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Golestanian}}, \ and\
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Kardar}},\ }\href
{\doibase 10.1103/PhysRevA.67.022114} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A - Atomic, Molecular, and Optical Physics}\
}\textbf {\bibinfo {volume} {67}} (\bibinfo {year} {2003}),\
10.1103/PhysRevA.67.022114}\BibitemShut {NoStop} \bibitem [{\citenamefont {D{\"{o}}brich}\ \emph {et~al.}(2008)\citenamefont
{D{\"{o}}brich}, \citenamefont {Dekieviet},\ and\ \citenamefont
{Gies}}]{Dobrich2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{D{\"{o}}brich}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Dekieviet}}, \ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Gies}},\ }\href {\doibase 10.1103/PhysRevD.78.125022} {\bibfield {journal}
{\bibinfo {journal} {Physical Review D - Particles, Fields, Gravitation and
Cosmology}\ }\textbf {\bibinfo {volume} {78}} (\bibinfo {year} {2008}),\
10.1103/PhysRevD.78.125022}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lambrecht}\ and\ \citenamefont
{Marachevsky}(2008)}]{Lambrecht2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Lambrecht}}\ and\ \bibinfo {author} {\bibfnamefont {V.~N.}\ \bibnamefont
{Marachevsky}},\ }\href {\doibase 10.1103/PhysRevLett.101.160403} {\bibfield
{journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo
{volume} {101}},\ \bibinfo {pages} {160403} (\bibinfo {year} {2008})},\
\Eprint {http://arxiv.org/abs/0806.3142} {arXiv:0806.3142} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Contreras-Reyes}\ \emph {et~al.}(2010)\citenamefont
{Contreras-Reyes}, \citenamefont {Gu{\'{e}}rout}, \citenamefont {{Maia
Neto}}, \citenamefont {Dalvit}, \citenamefont {Lambrecht},\ and\
\citenamefont {Reynaud}}]{Contreras-Reyes2010}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont
{Contreras-Reyes}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Gu{\'{e}}rout}}, \bibinfo {author} {\bibfnamefont {P.~A.}\ \bibnamefont
{{Maia Neto}}}, \bibinfo {author} {\bibfnamefont {D.~A.~R.}\ \bibnamefont
{Dalvit}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Lambrecht}}, \
and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Reynaud}},\ }\href
{\doibase 10.1103/PhysRevA.82.052517} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {82}},\ \bibinfo
{pages} {052517} (\bibinfo {year} {2010})},\ \Eprint
{http://arxiv.org/abs/1010.0170} {arXiv:1010.0170} \BibitemShut {NoStop} \bibitem [{\citenamefont {Bender}\ \emph {et~al.}(2014)\citenamefont {Bender},
\citenamefont {Stehle}, \citenamefont {Zimmermann}, \citenamefont {Slama},
\citenamefont {Fiedler}, \citenamefont {Scheel}, \citenamefont {Buhmann},\
and\ \citenamefont {Marachevsky}}]{Bender2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Bender}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Stehle}},
\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Zimmermann}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Slama}}, \bibinfo {author}
{\bibfnamefont {J.}~\bibnamefont {Fiedler}}, \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Scheel}}, \bibinfo {author} {\bibfnamefont {S.~Y.}\
\bibnamefont {Buhmann}}, \ and\ \bibinfo {author} {\bibfnamefont {V.~N.}\
\bibnamefont {Marachevsky}},\ }\href {\doibase 10.1103/PhysRevX.4.011029}
{\bibfield {journal} {\bibinfo {journal} {Physical Review X}\ }\textbf
{\bibinfo {volume} {4}},\ \bibinfo {pages} {011029} (\bibinfo {year}
{2014})},\ \Eprint {http://arxiv.org/abs/1305.1832} {arXiv:1305.1832}
\BibitemShut {NoStop} \bibitem [{\citenamefont {Buhmann}\ \emph {et~al.}(2016)\citenamefont
{Buhmann}, \citenamefont {Marachevsky},\ and\ \citenamefont
{Scheel}}]{Buhmann2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont
{Buhmann}}, \bibinfo {author} {\bibfnamefont {V.~N.}\ \bibnamefont
{Marachevsky}}, \ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Scheel}},\ }\href {\doibase 10.1142/S0217751X16410293} {\bibfield {journal}
{\bibinfo {journal} {International Journal of Modern Physics A}\ }\textbf
{\bibinfo {volume} {31}},\ \bibinfo {pages} {1641029} (\bibinfo {year}
{2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Polevoi}(1985)}]{Polevoi1985}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Polevoi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Experimental and Theoretical Physics}\ }\textbf {\bibinfo
{volume} {89}},\ \bibinfo {pages} {1984} (\bibinfo {year}
{1985})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rodriguez-Fortuno}\ \emph {et~al.}(2013)\citenamefont
{Rodriguez-Fortuno}, \citenamefont {Marino}, \citenamefont {Ginzburg},
\citenamefont {O'Connor}, \citenamefont {Martinez}, \citenamefont {Wurtz},\
and\ \citenamefont {Zayats}}]{Rodriguez-Fortuno2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.~J.}\ \bibnamefont
{Rodriguez-Fortuno}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Marino}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Ginzburg}},
\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {O'Connor}}, \bibinfo
{author} {\bibfnamefont {A.}~\bibnamefont {Martinez}}, \bibinfo {author}
{\bibfnamefont {G.~A.}\ \bibnamefont {Wurtz}}, \ and\ \bibinfo {author}
{\bibfnamefont {A.~V.}\ \bibnamefont {Zayats}},\ }\href {\doibase
10.1126/science.1233739} {\bibfield {journal} {\bibinfo {journal}
{Science}\ }\textbf {\bibinfo {volume} {340}},\ \bibinfo {pages} {328}
(\bibinfo {year} {2013})},\ \Eprint {http://arxiv.org/abs/1306.5068}
{arXiv:1306.5068} \BibitemShut {NoStop} \bibitem [{\citenamefont {{Le Kien}}\ and\ \citenamefont
{Rauschenbeutel}(2016)}]{LeKien2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {{Le
Kien}}}\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Rauschenbeutel}},\ }\href {\doibase 10.1103/PhysRevA.93.043828} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {93}},\ \bibinfo {pages} {043828} (\bibinfo {year}
{2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mueller}\ and\ \citenamefont
{Capasso}(2013)}]{Mueller2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont
{Mueller}}\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Capasso}},\ }\href {\doibase 10.1103/PhysRevB.88.121410} {\bibfield
{journal} {\bibinfo {journal} {Physical Review B - Condensed Matter and
Materials Physics}\ }\textbf {\bibinfo {volume} {88}} (\bibinfo {year}
{2013}),\ 10.1103/PhysRevB.88.121410}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xi}\ \emph {et~al.}(2013)\citenamefont {Xi},
\citenamefont {Lu}, \citenamefont {Yao}, \citenamefont {Yu}, \citenamefont
{Wang},\ and\ \citenamefont {Ming}}]{Xi2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont
{Xi}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Lu}}, \bibinfo
{author} {\bibfnamefont {P.}~\bibnamefont {Yao}}, \bibinfo {author}
{\bibfnamefont {W.}~\bibnamefont {Yu}}, \bibinfo {author} {\bibfnamefont
{P.}~\bibnamefont {Wang}}, \ and\ \bibinfo {author} {\bibfnamefont
{H.}~\bibnamefont {Ming}},\ }\href {\doibase 10.1364/oe.21.030327} {\bibfield
{journal} {\bibinfo {journal} {Optics Express}\ }\textbf {\bibinfo {volume}
{21}},\ \bibinfo {pages} {30327} (\bibinfo {year} {2013})},\ \Eprint
{http://arxiv.org/abs/1307.0675} {arXiv:1307.0675} \BibitemShut {NoStop} \bibitem [{\citenamefont {Lin}\ \emph {et~al.}(2013)\citenamefont {Lin},
\citenamefont {Mueller}, \citenamefont {Wang}, \citenamefont {Yuan},
\citenamefont {Antoniou}, \citenamefont {Yuan},\ and\ \citenamefont
{Capasso}}]{Lin2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Lin}}, \bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont {Mueller}},
\bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Wang}}, \bibinfo {author}
{\bibfnamefont {G.}~\bibnamefont {Yuan}}, \bibinfo {author} {\bibfnamefont
{N.}~\bibnamefont {Antoniou}}, \bibinfo {author} {\bibfnamefont {X.~C.}\
\bibnamefont {Yuan}}, \ and\ \bibinfo {author} {\bibfnamefont
{F.}~\bibnamefont {Capasso}},\ }\href {\doibase 10.1126/science.1233746}
{\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo
{volume} {340}},\ \bibinfo {pages} {331} (\bibinfo {year}
{2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Neugebauer}\ \emph {et~al.}(2014)\citenamefont
{Neugebauer}, \citenamefont {Bauer}, \citenamefont {Banzer},\ and\
\citenamefont {Leuchs}}]{Neugebauer2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Neugebauer}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Bauer}},
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Banzer}}, \ and\ \bibinfo
{author} {\bibfnamefont {G.}~\bibnamefont {Leuchs}},\ }\href {\doibase
10.1021/nl5003526} {\bibfield {journal} {\bibinfo {journal} {Nano Letters}\
}\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {2546} (\bibinfo {year}
{2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Manjavacas}\ \emph {et~al.}(2017)\citenamefont
{Manjavacas}, \citenamefont {Rodriguez-Fortuno}, \citenamefont {{Garcia de
Abajo}},\ and\ \citenamefont {Zayats}}]{Manjavacas2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Manjavacas}}, \bibinfo {author} {\bibfnamefont {F.~J.}\ \bibnamefont
{Rodriguez-Fortuno}}, \bibinfo {author} {\bibfnamefont {F.~J.}\ \bibnamefont
{{Garcia de Abajo}}}, \ and\ \bibinfo {author} {\bibfnamefont {A.~V.}\
\bibnamefont {Zayats}},\ }\href {\doibase 10.1103/PhysRevLett.118.133605}
{\bibfield {journal} {\bibinfo {journal} {Physical Review Letters}\
}\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages} {133605} (\bibinfo
{year} {2017})},\ \Eprint {http://arxiv.org/abs/1612.03733}
{arXiv:1612.03733} \BibitemShut {NoStop} \bibitem [{\citenamefont {Petersen}\ \emph {et~al.}(2014)\citenamefont
{Petersen}, \citenamefont {Volz},\ and\ \citenamefont
{Rauschenbeutel}}]{Petersen2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Petersen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Volz}}, \
and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Rauschenbeutel}},\
}\href {\doibase 10.1126/science.1257671} {\bibfield {journal} {\bibinfo
{journal} {Science}\ }\textbf {\bibinfo {volume} {346}},\ \bibinfo {pages}
{67} (\bibinfo {year} {2014})},\ \Eprint {http://arxiv.org/abs/1406.2184}
{arXiv:1406.2184} \BibitemShut {NoStop} \bibitem [{\citenamefont {Scheel}\ \emph {et~al.}(2015)\citenamefont {Scheel},
\citenamefont {Buhmann}, \citenamefont {Clausen},\ and\ \citenamefont
{Schneeweiss}}]{Scheel2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Scheel}}, \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Buhmann}},
\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Clausen}}, \ and\
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Schneeweiss}},\ }\href
{\doibase 10.1103/PhysRevA.92.043819} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A - Atomic, Molecular, and Optical Physics}\
}\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {043819} (\bibinfo
{year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{Oude Weernink}}\ \emph {et~al.}(2018)\citenamefont
{{Oude Weernink}}, \citenamefont {Barcellona},\ and\ \citenamefont
{Buhmann}}]{OudeWeernink2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~R. Q. P.~T.}\
\bibnamefont {{Oude Weernink}}}, \bibinfo {author} {\bibfnamefont
{P.}~\bibnamefont {Barcellona}}, \ and\ \bibinfo {author} {\bibfnamefont
{S.~Y.}\ \bibnamefont {Buhmann}},\ }\href {\doibase
10.1103/PhysRevA.97.032507} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages}
{032507} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{Note2()}]{Note2}
\BibitemOpen
\bibinfo {note} {See Supplementary Material [url] for detailed derivations of
the forces, emission rates and emission spectra.}\BibitemShut {Stop} \bibitem [{\citenamefont {Purcell}\ and\ \citenamefont
{Pennypacker}(1973)}]{Purcell1973}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont
{Purcell}}\ and\ \bibinfo {author} {\bibfnamefont {C.~R.}\ \bibnamefont
{Pennypacker}},\ }\href {\doibase 10.1086/152538} {\bibfield {journal}
{\bibinfo {journal} {The Astrophysical Journal}\ }\textbf {\bibinfo {volume}
{186}},\ \bibinfo {pages} {705} (\bibinfo {year} {1973})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Buhmann}\ and\ \citenamefont
{Welsch}(2006)}]{Buhmann2006}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont
{Buhmann}}\ and\ \bibinfo {author} {\bibfnamefont {D.-G.}\ \bibnamefont
{Welsch}},\ }\href {\doibase 10.1007/s00340-005-2055-3} {\bibfield {journal}
{\bibinfo {journal} {Applied Physics B}\ }\textbf {\bibinfo {volume} {82}},\
\bibinfo {pages} {189} (\bibinfo {year} {2006})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sherkunov}(2007)}]{Sherkunov2007}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Sherkunov}},\ }\href {\doibase 10.1103/PhysRevA.75.012705} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {75}},\ \bibinfo {pages} {012705} (\bibinfo {year}
{2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sansonetti}\ and\ \citenamefont
{Martin}(2005)}]{Sansonetti2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~E.}\ \bibnamefont
{Sansonetti}}\ and\ \bibinfo {author} {\bibfnamefont {W.~C.}\ \bibnamefont
{Martin}},\ }\href {\doibase 10.1063/1.1800011} {\bibfield {journal}
{\bibinfo {journal} {J. Phys. Chem. Ref. Data}\ }\textbf {\bibinfo {volume}
{34}},\ \bibinfo {pages} {1559} (\bibinfo {year} {2005})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Steck}(2009)}]{SteckData}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Steck}},\ }\href {https://steck.us/alkalidata/} {\bibfield {journal}
{\bibinfo {journal} {steck.us/alkalidata/}\ } (\bibinfo {year}
{2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Power}\ and\ \citenamefont
{Thirunamachandran}(1995)}]{Power1995}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~A.}\ \bibnamefont
{Power}}\ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Thirunamachandran}},\ }\href {\doibase 10.1103/PhysRevA.51.3660} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {51}},\ \bibinfo {pages} {3660} (\bibinfo {year}
{1995})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Milonni}\ and\ \citenamefont
{Rafsanjani}(2015)}]{Milonni2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~W.}\ \bibnamefont
{Milonni}}\ and\ \bibinfo {author} {\bibfnamefont {S.~M.~H.}\ \bibnamefont
{Rafsanjani}},\ }\href {\doibase 10.1103/PhysRevA.92.062711} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A - Atomic, Molecular, and
Optical Physics}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages}
{062711} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Donaire}\ \emph {et~al.}(2015)\citenamefont
{Donaire}, \citenamefont {Gu{\'{e}}rout},\ and\ \citenamefont
{Lambrecht}}]{Donaire2015c}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Donaire}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Gu{\'{e}}rout}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Lambrecht}},\ }\href {\doibase 10.1103/PhysRevLett.115.033201} {\bibfield
{journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo
{volume} {115}},\ \bibinfo {pages} {033201} (\bibinfo {year} {2015})},\
\Eprint {http://arxiv.org/abs/1503.06743} {arXiv:1503.06743} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Donaire}(2016)}]{Donaire2016a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Donaire}},\ }\href {\doibase 10.1103/PhysRevA.93.052706} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {93}},\ \bibinfo {pages} {052706} (\bibinfo {year}
{2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jentschura}\ \emph {et~al.}(2017)\citenamefont
{Jentschura}, \citenamefont {Adhikari},\ and\ \citenamefont
{Debierre}}]{Jentschura2017a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {U.~D.}\ \bibnamefont
{Jentschura}}, \bibinfo {author} {\bibfnamefont {C.~M.}\ \bibnamefont
{Adhikari}}, \ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Debierre}},\ }\href {\doibase 10.1103/PhysRevLett.118.123001} {\bibfield
{journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo
{volume} {118}},\ \bibinfo {pages} {123001} (\bibinfo {year}
{2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jentschura}\ and\ \citenamefont
{Debierre}(2017)}]{Jentschura2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {U.~D.}\ \bibnamefont
{Jentschura}}\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Debierre}},\ }\href {\doibase 10.1103/PhysRevA.95.042506} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {95}},\ \bibinfo {pages} {042506} (\bibinfo {year}
{2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Barcellona}\ \emph {et~al.}(2016)\citenamefont
{Barcellona}, \citenamefont {Passante}, \citenamefont {Rizzuto},\ and\
\citenamefont {Buhmann}}]{Barcellona2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Barcellona}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Passante}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Rizzuto}}, \
and\ \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Buhmann}},\
}\href {\doibase 10.1103/PhysRevA.94.012705} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {94}},\ \bibinfo
{pages} {012705} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sherkunov}(2009)}]{Sherkunov2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Sherkunov}},\ }\href {\doibase 10.1088/1742-6596/161/1/012041} {\bibfield
{journal} {\bibinfo {journal} {Journal of Physics Conference Series}\
}\textbf {\bibinfo {volume} {161}},\ \bibinfo {pages} {12041} (\bibinfo
{year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pegg}(1980)}]{Pegg1980}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~T.}\ \bibnamefont
{Pegg}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} Laser physics :
proceedings of the second New Zealand Summer School in Laser Physics}},\
\bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont {D.~F.}\
\bibnamefont {Walls}}\ and\ \bibinfo {editor} {\bibfnamefont {J.~D.}\
\bibnamefont {Harvey}}}\ (\bibinfo {publisher} {Academic Press},\ \bibinfo
{year} {1980})\ p.\ \bibinfo {pages} {287}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dutra}\ \emph {et~al.}(1994)\citenamefont {Dutra},
\citenamefont {Knight},\ and\ \citenamefont {Moya-Cessa}}]{Dutra1994}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~M.}\ \bibnamefont
{Dutra}}, \bibinfo {author} {\bibfnamefont {P.~L.}\ \bibnamefont {Knight}}, \
and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Moya-Cessa}},\
}\href {\doibase 10.1103/PhysRevA.49.1993} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ } (\bibinfo {year} {1994}),\
10.1103/PhysRevA.49.1993}\BibitemShut {NoStop} \bibitem [{\citenamefont {Fuchs}\ \emph {et~al.}(2018)\citenamefont {Fuchs},
\citenamefont {Bennett},\ and\ \citenamefont {Buhmann}}]{Fuchs2018c}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Fuchs}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Bennett}}, \
and\ \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Buhmann}},\
}\href {\doibase 10.1103/PhysRevA.98.022514} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {98}},\ \bibinfo
{pages} {022514} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mollow}(1969)}]{Mollow1969}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~R.}\ \bibnamefont
{Mollow}},\ }\href {\doibase 10.1103/PhysRev.188.1969} {\bibfield {journal}
{\bibinfo {journal} {Physical Review}\ }\textbf {\bibinfo {volume} {188}},\
\bibinfo {pages} {1969} (\bibinfo {year} {1969})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Purcell}(1946)}]{Purcell1946}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont
{Purcell}},\ }\href {\doibase 10.1103/PhysRev.69.674.2} {\bibfield {journal}
{\bibinfo {journal} {Bulletin of the American Physical Society}\ }\textbf
{\bibinfo {volume} {69}},\ \bibinfo {pages} {674} (\bibinfo {year}
{1946})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Casimir}\ and\ \citenamefont
{Polder}(1948)}]{Casimir1948a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~B.~G.}\
\bibnamefont {Casimir}}\ and\ \bibinfo {author} {\bibfnamefont
{D.}~\bibnamefont {Polder}},\ }\href {\doibase 10.1103/PhysRev.73.360}
{\bibfield {journal} {\bibinfo {journal} {Physical Review}\ }\textbf
{\bibinfo {volume} {73}},\ \bibinfo {pages} {360} (\bibinfo {year}
{1948})}\BibitemShut {NoStop} \end{thebibliography}
\end{document}
|
arXiv
|
{
"id": "1802.09234.tex",
"language_detection_score": 0.5770373344421387,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Towards Automatic Stress Analysis using Scaled Boundary Finite Element Method with Quadtree Mesh of High-order Elements}
\author{Hou~Man\corref{cor1}}
\ead{[email protected]}
\cortext[cor1]{Corresponding author. Tel.: +612 93855030}
\author{Chongmin~Song, Sundararajan~Natarajan, Ean~Tat~Ooi, Carolin~Birk\corref{}}
\address{School of Civil and Environmental Engineering}
\address{The University of New South Wales, Sydney, NSW 2052, Australia} \begin{abstract} This paper presents a technique for stress and fracture analysis by using the scaled boundary finite element method (SBFEM) with quadtree mesh of high-order elements. The cells of the quadtree mesh are modelled as scaled boundary polygons that can have any number of edges, be of any high orders and represent the stress singularity around a crack tip accurately without asymptotic enrichment or other special techniques. Owing to these features, a simple and automatic meshing algorithm is devised. No special treatment is required for the hanging nodes and no displacement incompatibility occurs. Curved boundaries and cracks are modelled without excessive local refinement. Five numerical examples are presented to demonstrate the simplicity and applicability of the proposed technique.\end{abstract} \begin{keyword} scaled boundary finite-element method; quadtree mesh; high order elements; polygon elements \end{keyword} \maketitle
\section{Introduction}
Finite Element Analysis (FEA) is the most widely used analysing tool in Computer Aided Engineering (CAE). One key factor to achieve an accurate FEA is the layout of the finite element mesh, including both mesh density and element shape \citep{Yerry1983a}. Regions containing complex boundaries, rapid transitions between geometric features or singularities require finer discretisation \citep{Cheng1996,Greaves1999}. This leads to the development of adaptive meshing techniques that assure the solution accuracy without sacrificing the computational efficiency \citep{Tabarraei2005a,Lo2010}. The construction of a high quality mesh, in general, takes the most of the analysis time \citep{Hughes2005}. The recent rapid development of the isogeometric analysis \citep{Hughes2005,Nguyen-Thanh2011a,Simpson2013}, which suppressed the meshing process, has emphasised the significance of mesh automation in engineering design and analysis.
Quadtree in FEA is a kind of hierarchical tree-based techniques for adaptive meshing of a 2D geometry \citep{Greaves1999}. It discretises the geometry into a number of square cells of different size. The process is illustrated in Fig.\,\ref{fig:qtreerep1} using a circular domain. The geometry is first covered with a single square cell, also known as the root cell of the quadtree (Fig.\,\ref{fig:qtreerep1}a). As shown in Fig.\,\ref{fig:qtreerep1}b, the root cell is subdivided into 4 equal-sized square cells and each of the cells is recursively subdivided to refine the mesh until certain termination criteria are reached. In this example, a cell is subdivided to better represent the boundary of the circle and the subdivision stops when the predefined maximum number of division is reached. The final mesh is obtained after deleting all the cells outside the domain (Fig.\,\ref{fig:qtreerep1}c). The cell information is stored in a tree-type data structure, in which the root cell is at the highest level. It is common practice to limit the maximum difference of the division levels between two adjacent cells to one \citep{Yerry1983a,GVS2001}. This is referred to the $2:1$ rule and the resulting mesh is called a balanced \citep{GVS2001} or restricted quadtree mesh \citep{Tabarraei2005a}. A balanced quadtree mesh not only ensures there is no large size difference between adjacent cells, but also reduces the types of quadtree cells in a mesh to the 6 shown in Fig.\,\ref{fig:qtreecell}. Owing to its simplicity and large degree of flexibility, the quadtree mesh is also recognised in large-scale flood/tsunami simulations \citep{Liang2008,Popinet2011} and image processing \citep{Morvan2007}.
\begin{figure}
\caption{Generation of quadtree mesh on a circular domain. (a) Cover the domain with a square root cell (b) Subdivide the square cells (c) Select the cells based on the domain boundary}
\label{fig:qtreerep1}
\end{figure} \begin{figure}
\caption{6 main types of master quadtree cells with $2:1$ rule enforced}
\label{fig:qtreecell}
\end{figure}
It is, however, not straightforward to integrate a quadtree mesh in a FEA. The two major issues are illustrated by Fig.\,\ref{fig:qtreerep}, which shows the quadtree mesh of the top-right quadrant of the circular domain in Fig.\,\ref{fig:qtreerep1}. \begin{enumerate} \item \emph{Hanging nodes} Middle nodes, shown as solid dots in Fig.\,\ref{fig:qtreerep}, exist at the common edges between the adjacent cells with different division levels. When conventional quadrilateral finite elements are used, a middle node is connected to the two smaller elements (lower level) but not to the larger element (higher level). This leads to incompatible displacement along the edges and the middle nodes are called the hanging nodes \citep{Greaves1999}. \item \emph{Fitting of curved boundary} Quadtree cells are composed of horizontal and vertical lines only. As shown in Fig.\,\ref{fig:qtreerep}, the quadtree cells intersected with the curved boundary have to be further divided into smaller ones to improve the fitting of the boundary. Generally, the mesh has to be refined in the area surrounding the boundary. Despite this, the boundary may still not be smooth (Fig.\,\ref{fig:qtreerep1}c) and may result in unrealistically high stresses. Additional procedure is required to conform the mesh to the boundary. \end{enumerate} There exist a number of different approaches to ensure displacement compatibility when hanging nodes are present \citep{Ebeida2010,Legrain2011,Tabarraei2008a,Ainsworth2007}. Three typical approaches among all are briefly discussed here. The first one is to subdivide the higher level quadtree cells next to a hanging node into smaller triangular elements \citep{Yerry1983a,Bern1994,Alyavuz2009} as shown in Fig\,\ref{fig:qtreerep}. Additional nodes may be added to improve the mesh quality and/or reduce the number of element types. These techniques lead to a final mesh that only contains conforming triangular elements. A similar approach was adpoted by \citet{Ebeida2010}, in which the quadtree mesh was subdivided into a conforming mesh dominated by quadrilateral elements.
The second approach introduces special conforming shape functions \citep{Gupta1978} to ensure the displacement compatibility. An early work by Gupta \citep{Gupta1978} reported the development of a transition element that had additional node along its side. A conforming set of shape functions was derived based on the shape functions of the bilinear quadrilateral elements. Owing to its simplicity and applicability, Gupta's work was further extended by \citet{Mcdill1987} and \citet{Lo2010} to hexahedral elements. Fries et al. \citep{Fries2011} investigated two approaches to handle the hanging nodes within the framework of the extended finite element method (XFEM). They were different in whether the enriched degrees-of-freedom (DOFs) were assigned to the hanging node. A similar work was reported by Legrain et al. \citep{Legrain2011}, in which the selected DOFs are enriched and properly constrained to ensure the continuity of the field.
\begin{figure}
\caption{Quadtree mesh of the top-right quadrant of a circular domain. Demonstration of subdivision (dashed lines) is given in two quadtree cells with hanging nodes on their sides.}
\label{fig:qtreerep}
\end{figure}
The third approach is to model the quadtree cells as \emph{n-}sided polygon elements by treating hanging nodes as vertices of the polygon. This approach generally requires a set of polygonal basis function. Special techniques are usually required to integrate the resulting equations over arbitrary polygon domain \citep{Natarajan2009}. This development was initiated by \citet{wachspress1975rational} who showed the use of rational basis functions for elements with arbitrary number of sides. Tabarraei and Sukumar \citep{Tabarraei2005a,Tabarraei2007} in their work adapted their polygon element \citep{Sukumar2004} to quadtree mesh. The set of polygonal basis functions was derived using Laplace interpolant. By using an affine map on the reference polygon, the conforming shape functions of a quadtree cell with the same number of vertices (including the hanging nodes) were obtained. They also reported a fast technique for computing the global stiffness matrix, making use of the quadtree structure by defining parent elements \citep{Tabarraei2007}. In this way, the elemental stiffness matrix has to be computed only 15 times (4-node cell not included) for a balanced quadtree mesh (when $2:1$ rule is enforced). Further development of their work with XFEM was also reported in \citep{Tabarraei2008a}.
As mentioned in a recent paper \citep{Sukumar2013}, the development of high-order polygon elements received relatively less attention. Mibradt and Pick \citep{Milbradt2008} devised high order basis functions for polygons based on the natural element coordinates. Those basis functions, however, are not complete polynomials. Rand et al. \citep{Rand2013} developed a quadratic serendipity element for arbitrary convex polygons based on generalised barycentric coordinates. The potential of using their approach for higher order serendipity elements on convex polygons was also reported. Based on the same approach, Sukumar \citep{Sukumar2013} recently developed the quadratic serendipity shape functions that were applicable for convex and nonconvex polygons and were complete quadratic polynomials. The shape functions were obtained through solving an optimisation problem, which was derived from the maximum-entropy principle.
Besides dealing with the hanging nodes in a quadtree mesh, fitting complex boundaries is another challenging part in the mesh generation. \citet{Yerry1983a} proposed trimming the quadtree cells, intersected with the boundary, into polygons before a further subdivision process into triangles or quadrilaterals. Alternatively those cells were first subdivided and some of the vertices were repositioned based on their projections onto the boundary \citep{Greaves1999,Alyavuz2009}. In \citet{Ebeida2010} and \citet{Liang2010}, after the subdivision, a buffer zone was introduced between the boundary and the internal quadtree cells. A compatible mesh was then constructed to fill up this zone. All these techniques require an additional optimisation step to ensure the final mesh quality. Within the framework of XFEM, quadtree cells intersected with the boundary were not modified in pre-processing stage \citep{Fries2011}. However, when constructing the stiffness matrix, the domain boundary is still required to identify the portion of the cell within the domain for numerical integration. In the integration process, that portion of the cell within the domain is either subdivided into geometric sub-cells \citep{Dreau2010} or treated as a polygon \citep{Natarajan2010}.
The scaled boundary finite element method (SBFEM) provides an attractive alternate technique to construct polygon elements (scaled boundary polygon) \citep{Ooi2012a,Ooi2013} (Fig.\,\ref{fig:sbfempolygon}). It is a semi-analytical procedure developed by Song and Wolf to solve boundary value problems \citep{song1997scaled}. The only requirement for a scaled boundary polygon is that its entire boundary is visible from the \emph{scaling centre} \citep{song1997scaled}. Only the edges of the polygon are discretised into line elements. The number of line elements on an edge can be as many as required. Any type of displacement-based line elements, including high-order spectral elements\textcolor{black}{{} \citep{vu2006use}}, can be used. The domain of the scaled boundary polygon is constructed by scaling from its scaling centre to its boundary, and the solution within the polygon is expressed semi-analytically \citep{Ooi2012a,Ooi2013}. A salient feature of the scaled boundary polygons is that stress singularities occurring at crack and notch tips, formed by one or several materials, can be accurately modelled without resorting to asymptotic enrichment and local mesh refinement. Its high accuracy and flexibility in mesh generation lead to simple remeshing procedures when modelling crack propagation \citep{yang2006,Ooi2009,Ooi2010a,Ooi2013}.
\begin{figure}
\caption{ Scaled boundary representation of a polygon}
\label{fig:sbfempolygon}
\end{figure}
\begin{figure}
\caption{Scaled boundary representation of quadtree cells}
\label{fig:sbfequadtreerep}
\end{figure}
This paper presents a technique for the stress and fracture analysis by integrating the scaled boundary finite element method (SBFEM) with quadtree mesh of high-order elements. This integrated technique possesses the following features: \begin{enumerate} \item Hanging nodes are treated without cell subdivision. Each quadtree cell is modelled as a scaled boundary polygon as shown in Fig.\,\ref{fig:sbfequadtreerep}. The edges of a quadtree cell can be divided into more than one line element to ensure displacement compatibility with the adjacent smaller cells. Hanging-nodes are thus treated the same as other nodes. Owing to the SBFE formulations \citep{Ooi2012a}, no additional procedure is required to compute the shape functions for the quadtree cells. High-order elements can also be used within each quadtree cell directly. \item The entire quadtree meshing process is simple and automatic. The boundary of the problem domain is defined using signed distance functions \citep{Talischi2012}. Only seed points \citep{Greaves1999} are required to be predefined to control the mesh density. Owing to the ability of the SBFEM in constructing polygon elements of, practically, arbitrary shape and order, the quadtree cells trimmed by curved boundaries are simply treated as a non-square scaled boundary polygon. High-order elements can be used to fit curved boundaries closely. The resulting mesh conforms to the boundary without excessive mesh refinement (see Fig.\,\ref{fig:sbfequadtreerep}). \item No local mesh refinement or asymptotic enrichment is required for a quadtree cell containing a crack tip to accurately model the stress singularity. \end{enumerate} The present paper is organised as follows. The summary of the SBFEM and its application to quadtree cells are first presented in the next section. It is followed by the developed algorithm of quadtree mesh generation in Section\,\ref{sec:Quadtree-mesh-generation}. Five examples are given in Section\,\ref{sec:Numerical-examples} with detailed discussion on accuracy and convergence. Finally, conclusions of the present work are stated in Section\,\ref{sec:Conclusion}.
\section{Scaled boundary finite element method on quadtree cells\label{sec:Scaled-boundary-finite}}
This section summarises the scaled boundary finite element method for 2D stress and fracture analysis. Only the key equations that are related to its use with a quadtree mesh are listed. A detailed derivation of the method based on a virtual work approach is given in \citet{deeks2002virtual}.
\subsection{Element formulation}
The SBFEM can be formulated on quadtree cells by treating each cell as a polygon with arbitrary number of sides (Fig.~\ref{fig:sbfequadtreerep}). In each cell, a local coordinate system $(\xi,\eta)$ is defined at a point called the scaling centre from which the entire boundary is visible. $\xi$ is the radial coordinate with $\xi=0$ at the scaling centre and $\xi=1$ at the cell boundary. The edges of each cell are discretised using one-dimensional finite elements with a local coordinate $\eta$ having an interval of $-1\leq\eta\leq1$. It is noted that the hanging nodes appearing in the quadtree structure do not require any special treatment in the SBFEM formulation. They are simply used as end nodes of the 1D elements.
The coordinate transformation between the Cartesian $(x,y)$ and the local $(\xi,\eta)$ coordinate systems are given by the scaled boundary transformation equations \citep{song1997scaled}: \begin{align} \mathbf{x}(\xi,\eta)= & \xi\mathbf{N}(\eta)\mathbf{x}_{\mathrm{b}}\label{eq:coordtrans} \end{align} where $\mathbf{x}(\xi,\eta)=[x(\xi,\eta)\; y(\xi,\eta)]^{\mathrm{T}}$ is the Cartesian coordinates of a point in the cell, $\mathbf{N}(\eta)$ is the shape function matrix and $\mathbf{x}_{\mathrm{b}}=[\begin{array}{ccccc} x_{1} & y_{1} & \ldots & x_{n} & y_{n}\end{array}]^{\mathrm{T}}$ is the vector of nodal coordinates of a cell with $n$ nodes.
The displacement field in each cell $\mathbf{u}(\xi,\eta)$ is interpolated as \begin{align} \mathbf{u}(\xi,\eta)= & \mathbf{N}(\eta)\mathbf{u}(\xi)\label{eq:dispfield} \end{align}
\noindent where $\mathbf{u}(\xi)$ are radial displacement functions and are obtained by solving the scaled boundary finite element equation in displacement \citep{song1997scaled}: \begin{align} \mathbf{E}_{0}\xi^{2}\mathbf{u}(\xi)_{,\xi\xi}+(\mathbf{E}_{0}-\mathbf{E}_{1}+\mathbf{E}_{1}^{\mathrm{T}})\xi\mathbf{u}(\xi)_{,\xi}-\mathbf{E}_{2}\mathbf{u}(\xi)= & 0\label{eq:sbfedispeqn} \end{align}
\noindent with coefficient matrices \begin{align}
\mathbf{E}_{0}= & \int_{-1}^{+1}\mathrm{\mathbf{B}}_{1}^{\mathrm{T}}(\eta)\mathrm{\mathbf{D\mathrm{\mathbf{B}}_{\mathrm{1}}}(\eta)}|\mathbf{J}(\eta)|d\eta\label{eq:e0}\\
\mathbf{E}_{1}= & \int_{-1}^{+1}\mathrm{\mathbf{B}}_{2}^{\mathrm{T}}(\eta)\mathrm{\mathbf{D\mathrm{\mathbf{B}}_{\mathrm{1}}}(\eta)}|\mathbf{J}(\eta)|d\eta\label{eq:e1}\\
\mathbf{E}_{2}= & \int_{-1}^{+1}\mathrm{\mathbf{B}}_{2}^{\mathrm{T}}(\eta)\mathrm{\mathbf{D\mathrm{\mathbf{B}}_{\mathrm{2}}}(\eta)}|\mathbf{J}(\eta)|d\eta\label{eq:e2} \end{align}
\noindent where $\mathbf{D}$ is the material constitutive matrix,
$\mathbf{B}_{1}(\eta)$ and $\mathbf{B}_{2}(\eta)$ are the SBFEM strain-displacement matrices and $|\mathbf{J}(\eta)|$ is the Jacobian on the boundary required for coordinate transformation.
Eq.\,\eqref{eq:sbfedispeqn} is solved by introducing the variable $\mathbf{X}(\xi)$ \citep{wolf2003scaled} \begin{align} \mathbf{X}(\xi)= & [\begin{array}{cc} \mathbf{u}(\xi) & \quad\mathbf{q}(\xi)\end{array}]^{\mathrm{T}}\label{eq:Xksi} \end{align}
\noindent where \begin{align} \mathbf{q}(\xi)= & \mathbf{E}_{0}\xi\mathbf{u}(\xi)_{,\xi}+\mathbf{E}_{1}^{\mathrm{T}}\mathbf{u}(\xi)\label{eq:qksi} \end{align}
\noindent so that Eq.\,\eqref{eq:sbfedispeqn} is transformed into a first order ordinary differential equation with twice the number of unknowns: \begin{align} \xi\mathbf{X}(\xi)_{,\xi}= & -\mathbf{Z}\mathbf{X}(\xi)\label{eq:firstord} \end{align}
\noindent with the Hamiltonian matrix $\mathbf{Z}$ \citep{wolf2003scaled} \begin{align} \mathbf{Z}= & \left[\begin{array}{cc} \mathbf{E}_{0}^{-1}\mathbf{E}_{1}^{\mathrm{T}} & -\mathbf{E}_{0}^{-1}\\ \mathbf{E}_{1}\mathbf{E}_{0}^{-1}\mathbf{E}_{1}^{\mathrm{T}}-\mathbf{E}_{2} & -\mathbf{E}_{1}\mathbf{E}_{0}^{-1} \end{array}\right]\label{eq:hamilton} \end{align}
An eigenvalue decomposition of the $\mathbf{Z}$ results in \begin{align} \mathbf{Z}\left[\begin{array}{cc} \boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}} & \boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(p)}}\\ \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(n)}} & \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(p)}} \end{array}\right]= & \left[\begin{array}{cc} \boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}} & \boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(p)}}\\ \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(n)}} & \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(p)}} \end{array}\right]\left[\begin{array}{cc} \boldsymbol{\Lambda}^{\mathrm{(n)}} & 0\\ 0 & \boldsymbol{\Lambda}^{\mathrm{(p)}} \end{array}\right]\label{eq:eigendecomp} \end{align}
\noindent where $\boldsymbol{\Lambda}^{(\mathrm{n})}$ and $\boldsymbol{\Lambda}^{(\mathrm{p})}$ are the eigenvalue matrices with real parts satisfying $\mathrm{Re}(\lambda(\boldsymbol{\Lambda}^{(\mathrm{n})})<0$ and $\mathrm{Re}(\lambda(\boldsymbol{\Lambda}^{(\mathrm{p})})>0$, respectively. $\boldsymbol{\Phi}_{\mathrm{u}}^{(\mathrm{n})}$ and $\boldsymbol{\Phi}_{\mathrm{q}}^{(\mathrm{n})}$ are the corresponding eigenvectors of $\boldsymbol{\Lambda}^{(\mathrm{n})}$ whereas $\boldsymbol{\Phi}_{\mathrm{u}}^{(\mathrm{p})}$ and $\boldsymbol{\Phi}_{\mathrm{q}}^{(\mathrm{p})}$ are the eigenvectors corresponding to $\boldsymbol{\Lambda}^{(\mathrm{p})}$. For bounded domains such as those considered in this paper, only the eigenvalues satisfying $\mathrm{Re}(\lambda(\boldsymbol{\Lambda}^{(\mathrm{n})})<0$ lead to finite displacements at the scaling centre. Using Eq.\,\eqref{eq:eigendecomp} and Eq.\,\eqref{eq:firstord}, the solutions for $\mathbf{u}(\xi)$ and $\mathbf{q}(\xi)$ are \begin{align} \mathbf{u}(\xi)= & \boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\xi^{-\boldsymbol{\Lambda}^{\mathrm{(n)}}}\mathbf{c}^{\mathrm{(n)}}\label{eq:uksisol}\\ \mathbf{q}(\xi)= & \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(n)}}\xi^{-\boldsymbol{\Lambda}^{\mathrm{(n)}}}\mathbf{c}^{\mathrm{(n)}}\label{eq:qksisol} \end{align}
The integration constants $\mathbf{c}^{(\mathrm{n})}$ in Eq.\,\eqref{eq:uksisol} and Eq.\,\eqref{eq:qksisol} are obtained from the nodal displacements at the cell boundary $\mathbf{u}_{\mathrm{b}}=\mathbf{u}(\xi=1)$ as \begin{align} \mathbf{c}^{(\mathrm{n})}= & \left(\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\right)^{-1}\mathbf{u}_{\mathrm{b}}\label{eq:intcons} \end{align}
\noindent The stiffness matrix of each quadtree cell is formulated as \citep{wolf2003scaled} \begin{align} \mathbf{K}= & \boldsymbol{\Phi}_{\mathrm{q}}^{\mathrm{(n)}}\left(\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\right)^{-1}\label{eq:stf} \end{align}
\noindent Substituting Eq.\,\eqref{eq:uksisol} into Eq.\,\eqref{eq:dispfield}, the displacement field in a cell is \begin{align} \mathbf{u}(\xi,\eta)= & \mathbf{N}(\eta)\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\xi^{-\boldsymbol{\Lambda}^{\mathrm{(n)}}}\mathbf{c}^{\mathrm{(n)}}\label{eq:dispfieldsol} \end{align}
\noindent Using the Hooke's law and the strain-displacement relationship, the stress at a point in a cell is \citep{wolf2003scaled} \begin{align} \boldsymbol{\sigma}(\xi,\eta)= & \boldsymbol{\Psi}_{\sigma}(\eta)\xi^{-\boldsymbol{\Lambda}^{\mathrm{(n)}}-\mathbf{I}}\mathbf{c}^{\mathrm{(n)}}\label{eq:stresfield} \end{align}
\noindent where $\boldsymbol{\Psi}_{\sigma}(\eta)=\left[\begin{array}{ccc} \boldsymbol{\Psi}_{\sigma_{xx}}(\eta) & \boldsymbol{\Psi}_{\sigma_{yy}}(\eta) & \boldsymbol{\Psi}_{\tau_{xy}}(\eta)\end{array}\right]^{\mathrm{T}}$ is the stress mode \begin{align} \boldsymbol{\Psi}_{\sigma}(\eta)= & \mathbf{D}\left(-\mathbf{B}_{1}(\eta)\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\boldsymbol{\Lambda}^{\mathrm{(n)}}+\mathbf{B}_{2}(\eta)\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(n)}}\right)\label{eq:stresmod} \end{align}
\subsection{Evaluation of stress intensity factors}
Fig.\,\ref{fig:crackrep} shows how a crack is modelled with a quadtree cell. The crack tip is chosen as the scaling centre. The crack surfaces are not discretised. The line elements discretising the cell boundary do not form a closed loop.
\begin{figure}
\caption{Modelling of a crack with the scaled boundary finite element method.}
\label{fig:crackrep}
\end{figure}
When a crack is modelled by the SBFEM, two eigenvalues, $\lambda_{i}$, $i=1,\,2$ satisfying $-1<\mathrm{Re}(\lambda_{i})\leq0$ appear in $\boldsymbol{\Lambda}^{(\mathrm{n})}$. From Eq.\,\ref{eq:stresfield}, it can be discovered that these eigenvalues lead to a stress singularity as $\xi\rightarrow0$. Using the two modes corresponding to these two eigenvalues, the singular stresses are expressed as \begin{align} \boldsymbol{\sigma}(\xi,\eta)= & \boldsymbol{\Psi}_{\sigma}^{\mathrm{(s)}}(\eta)\xi^{-\boldsymbol{\Lambda}^{\mathrm{(s)}}-\mathbf{I}}\mathbf{c}^{\mathrm{(s)}}\label{eq:singstrefield} \end{align}
\noindent where \begin{align} \boldsymbol{\Lambda}^{(\mathrm{s})}= & \left[\begin{array}{cc} \lambda_{1} & 0\\ 0 & \lambda_{2} \end{array}\right]\label{eq:singeigenval} \end{align}
\noindent and $\mathbf{c}^{(\mathrm{s})}$ are the integration constants corresponding to $\boldsymbol{\Lambda}^{(\mathrm{s})}$. The singular singular stress modes $\boldsymbol{\Psi}_{\sigma}^{(\mathrm{s})}(\eta)=\left[\begin{array}{ccc} \boldsymbol{\Psi}_{\sigma_{xx}}^{(\mathrm{s})}(\eta) & \boldsymbol{\Psi}_{\sigma_{yy}}^{(\mathrm{s})}(\eta) & \boldsymbol{\Psi}_{\tau_{xy}}^{(\mathrm{s})}(\eta)\end{array}\right]^{\mathrm{T}}$ is written as \begin{align} \boldsymbol{\Psi}_{\sigma}^{\mathrm{(s)}}(\eta)= & \mathbf{D}\left(-\mathbf{B}_{1}(\eta)\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(s)}}\boldsymbol{\Lambda}^{\mathrm{(s)}}+\mathbf{B}_{2}(\eta)\boldsymbol{\Phi}_{\mathrm{u}}^{\mathrm{(s)}}\right)\label{eq:singstremode} \end{align}
\noindent where $\boldsymbol{\Phi}_{\mathrm{u}}^{(\mathrm{s})}$ are the modal displacements in $\boldsymbol{\Phi}_{\mathrm{u}}^{(\mathrm{n})}$ corresponding to $\boldsymbol{\Lambda}^{(\mathrm{s})}$.
The stress intensity factors can be computed directly from their definitions. For a crack that is aligned with the Cartesian coordinate system as shown in Fig.\,\ref{fig:crackrep}, the stress intensity factors are defined as \begin{align} \left\{ \begin{array}{c} K_{\mathrm{I}}\\ K_{\mathrm{II}} \end{array}\right\} = & \lim_{r\rightarrow0}\left\{ \begin{array}{c}
\sqrt{2\pi r}\left.\sigma_{yy}\right|_{\theta=0}\\
\sqrt{2\pi r}\left.\tau_{xy}\right|_{\theta=0} \end{array}\right\} \label{eq:sifdef} \end{align}
\noindent Substituting the stress components in Eq.\,\eqref{eq:singstrefield} into Eq.\,\eqref{eq:sifdef} and using the relation $\xi=r/L_{\mathrm{A}}$ ($L_{\mathrm{A}}$ is the distance from the scaling centre to the boundary along the direction of the crack, see Fig.\,\ref{fig:crackrep}) at $\theta=0$ leads to \begin{align} \left\{ \begin{array}{c} K_{\mathrm{I}}\\ K_{\mathrm{II}} \end{array}\right\} = & \sqrt{2\pi L_{\mathrm{A}}}\left\{ \begin{array}{c} \boldsymbol{\Psi}_{\sigma_{yy}}^{\mathrm{(s)}}(\eta(\theta=0))\mathbf{c}^{\mathrm{(s)}}\\ \boldsymbol{\Psi}_{\tau_{xy}}^{\mathrm{(s)}}(\eta(\theta=0))\mathbf{c}^{\mathrm{(s)}} \end{array}\right\} \label{eq:sifsbfe} \end{align}
\section{Quadtree mesh generation\label{sec:Quadtree-mesh-generation}}
This section presents the developed algorithm for quadtree mesh generation. Fig.\,\ref{flowchart} shows the flow chart of the overall process. The entire generation process is automatic with minimal number of inputs required from the user, which include \begin{itemize} \item Maximum allowed number of seed points in a cell $(s_{max})$, \item Seed points on each boundary $(s_{b})$ and region of interest $(s_{roi})$, \item Maximum difference between the division levels of adjacent cells $(d_{max})$, which is equal to 1 for a balanced quadtree mesh. \end{itemize} \begin{figure}
\caption{Flow chart of the quadtree mesh generation.}
\label{flowchart}
\end{figure} This section is organised based on Fig\,\ref{flowchart}. It first presents defining geometry using signed distance function, and assigning seed points on the boundary and the regions of interest. Detailed explanations of the meshing steps, which include generating the initial quadtree grid, trimming the boundary quadtree cells into polygons and merging cells surrounding a crack tip, are then followed. To facilitate the description of the meshing steps, Fig.\,\ref{qtreedes0} shows a square plate with a circular hole and two local refinement features to be used as an example throughout this section. An efficient computation of the global stiffness matrix, by taking advantage on the quadtree mesh, is described at the end of this section.
\begin{figure}
\caption{Example to illustrate the quadtree mesh generation process: a square plate with a circular hole. An additional circle and an inclined line (dashed lines) are included to control local mesh density.}
\label{qtreedes0}
\end{figure}
\subsection{Define geometry using signed distance function}
The geometry is defined by using the signed distance function \citep{Persson2004}. It provides all the essential information of a geometry and can be operated with simple Boolean operations to build up more complex geometries \citep{Talischi2012}. The signed distance function of a point $\mathbf{x}\in\mathbb{R}^{2}$ associated with a domain $\Omega$, which is a subset of $\mathbb{R}^{2}$, is given as
\begin{equation} d_{\Omega}(\mathbf{x})=s_{\Omega}(\mathbf{x})\min_{\mathbf{y\in\partial\Omega}}\left\Vert \mathbf{x}-\mathbf{y}\right\Vert , \end{equation} where $\partial\Omega$ represents the boundary of the domain and $\left\Vert \mathbf{x}-\mathbf{y}\right\Vert $ is the \emph{Euclidean norm} in $\mathbb{R}^{2}$ with $\mathbf{y}\in\partial\Omega$. The sign function $s_{\Omega}(\mathbf{x})$ is equal $-1$ when $\mathbf{x}$ lies inside the domain and is equal 1 otherwise. This definition of the signed distance function is visualised in Fig.\,\ref{dispfunc}. A number of distance functions in MATLAB for simple geometries are given in \citet{Talischi2012}, including their Boolean operations.
\begin{figure}
\caption{Signed distance function of the points inside the domain ($\mathbf{x}_{1}$), on the boundary ($\mathbf{x}_{2}$) and outside the domain ($\mathbf{x}_{3}$ and $\mathbf{x}_{4}$)}
\label{dispfunc}
\end{figure}
For each boundary and region of interest, a set of pre-defined seed points \citep{Greaves1999} is introduced to control the quadtree mesh density. There require four sets of predefined seed points for the example in Fig.\,\ref{qtreedes0}. Two sets are for the square and the circular hole representing the actual domain boundary. The number of seed points directly controls the local density of the quadtree cells and the quality of fitting the boundary. This is further discussed in Section\,\ref{sub:Polygon-boundary-cells}. The other two sets are for the large circle and inclined line controlling local mesh density only.
\subsection{Initialise quadtree grid}
The meshing process starts with covering the problem domain with a single square cell (the root cell). The dimension of the root cell is based on the larger one between the maximum vertical and maximum horizontal dimension of the geometry. The developed algorithm will check the number of seed points in the cell. If the number is larger than the predefined maximum allowed number, the cell will be divided into 4 equal-sized cells. This generation process is applied recursively until all the cells have seed points no more than the predefined value. For each recursive loop, the maximum difference between the division levels of adjacent cells $(d_{max})$ is enforced. For cells that have division level difference with the adjacent cells larger than $d_{max}$, the higher level cell is subdivided into 4 equal-sized cells. Fig.\,\ref{qtreedes1} shows the initial quadtree grid of the example in Fig.\,\ref{qtreedes0}.
\begin{figure}
\caption{Initial quadtree grid of the example in Fig.~\,\ref{qtreedes0}. Vertices with solid square markers are on the boundary, with square box markers are inside the domain, and without any markers are outside the domain.}
\label{qtreedes1}
\end{figure}
\subsection{Trim boundary cells into polygons\label{sub:Polygon-boundary-cells}}
The initial quadtree grid shown in Fig.\,\ref{qtreedes1} does not conform to the boundary. Those cells that have edges intersected with the boundary need to be identified and trimmed. By using the signed distance function, the locations of the vertices (inside the domain, on the domain boundary or outside the domain as shown in Fig.\,\ref{qtreedes1}) are identified based on the sign and value of the function. For edges containing two vertices with opposite signs, they are identified as the edges intersected with the boundary. For each of those edges, the intersection point with the boundary is computed.
Some quadtree cells could have vertices very close to the boundary in comparison with the lengths of their edges. After trimming, poorly shaped polygon cells with some edges much shorter than the others could be generated and may adversely affect the mesh quality. To avoid this situation, the vertices that are within a threshold distance away from the boundary are identified and then moved to their closest points on the boundary. In the present work, $1/10$ of the length of the cell edge (based on the smallest cell attaching to the vertex) is used as the threshold value. The edges connecting to these vertices will no longer be cut by the boundary. The trade-off of this process is the presence of additional non-square cells that lead to additional computation of the stiffness matrix. This is discussed in Section\,\ref{sub:An-efficient-assembly}.
\begin{figure}
\caption{Model curved boundary by quadtree refinement or using high-order elements. Nodes are represented with small circles along the cell edges.}
\label{qtreedes2}
\end{figure}
At the end of the trimming process, the edges of a cell cut by the boundary are updated with the intersection points and the enclosed segment of boundary is added to the cell. This will result in polygon cells. After trimming the quadtree in Fig.\,\ref{qtreedes1}, the polygon cells around the hole of the example problem is shown in Fig.\,\ref{qtreedes2}. It is clear from Fig.\,\ref{qtreedes2} that the circular boundary is not represented accurately if a single linear element is used on the edge of the cell.
In order to represent the curved boundary more accurately, two alternates are available in the developed algorithm. The first is to reduce the element size ($h$-refinement). This is achieved by increasing the number of seed points on the curved boundary. Fig.\,\ref{qtreedes3} shows the initial quadtree layout of the example problem after increasing the seed points around the hole by 4 times. It can be seen by comparing Fig.\,\ref{qtreedes1} with Fig.~\ref{qtreedes3} that the refinement is limited to a small region around the hole. The refined quadtree (Fig\,\ref{qtreedes2}) demonstrates the improvement of capturing the circular boundary.
\begin{figure}
\caption{Quadtree mesh after refinement}
\label{qtreedes3}
\end{figure}
The second option to improve the modelling of curved boundaries is to utilise high-order elements ($p$-refinement). Fig.\,\ref{qtreedes2} shows the example problem with each line segment on the circular boundary modelled with a 4th order element. With this approach, curved boundaries can be captured more accurately using fewer elements.
Both options to improve the modelling of the boundaries can be applied simultaneously without conflicts. The numerical accuracy of both approaches is discussed through numerical examples given in Section\,\ref{sec:Numerical-examples}.
\subsection{Merge cells surrounding a crack tip}
Owing to the capability of the SBFEM for fracture analysis \citep{Song2002}, the domain containing a crack tip is modelled with a single cell. In the stress solution, the variation along the radial direction, including the stress singularity, is given analytically and the variation along the circumference of the cell is represented numerically by the line elements on boundary. To obtain accurate results, sufficient nodes have to present on the boundary of the cell to cover the angular variation of the solution . In the developed algorithm, the size of a cell containing a crack tip is controlled, as shown in Fig.\,\ref{qtreedes8} with an inclined crack, by a predefined set of seed points on a circle.
\begin{figure}
\caption{Quadtree mesh for a crack problem before and after merging cells. The two crack tips are marked with a cross. The two circles are to control the size of quadtree cells covering the crack tips.}
\label{qtreedes8}
\end{figure} For problems with cracks, only one additional step is required after the initial mesh is generated. The cells surrounding the crack tip are refined to the same division level and then merged into a single cell as shown in Fig.\,\ref{qtreedes8}. This step avoids having a crack tip too close to the edges of the cell, which could affect the mesh quality and the solution accuracy \citep{Ooi2010a}. After the cells are merged, the intersection point between the edge of the resulting cell and the crack is computed to define the two crack mouth points. The other cells on the crack path are split by the crack into two cells. The splitting process is similar to the trimming of cells by the boundary, but two vertices are created at every intersection point between the cell edge and the crack to split the original cells.
\subsection{An efficient construction of the global stiffness matrix\label{sub:An-efficient-assembly}}
The global stiffness matrix is simply the assembly of the stiffness matrices of each master quadtree and polygon cell. When the $2:1$ rule is enforced to the mesh, only 6 main types of master quadtree cells are present as given in Fig.\,\ref{fig:qtreecell}. By rotating the geometry of the master cells orthogonally, the maximum number of types of these master quadtree cells are 24. For isotropic homogeneous materials, rotation does not have effect on 4-node or 8-node cells and only two 2 rotations are required for the first type of 6-node cell (the top one in Fig.\,\ref{fig:qtreecell}). The maximum number of master quadtree cells that require stiffness matrix calculation reduces to 16 (only 15 in \citet{Tabarraei2007} as 4-node cell is excluded).
After the mesh generation, the algorithm will check which master cells out of the 16 appear in the mesh. Their stiffness matrices are then computed and stored. During the stiffness assembling process, the stiffness matrix of each regular quadtree cell is directly extracted from those computed stiffness matrices. For the polygon cells and those irregular quadtree cells (with their vertices moved to fit the boundaries), individual stiffness matrix calculation is required. This approach clearly improves the computational efficiency of constructing the global stiffness matrix, especially for large scale problems that contain a significant number of cells. With the use of high-order elements in the quadtree mesh, this assembling approach becomes even more economical.
\section{Numerical examples\label{sec:Numerical-examples}}
This section presents five numerical examples to highlight the capability and the performance of the proposed technique. In the first example, an infinite plate with a circular hole is modelled and the results are compared with the analytical solution. The proposed technique is then used to analyse a square plate with multiple holes to highlight the automatic meshing capability in handling transition between geometric features. In the third example, a square plate with a central hole and multiple cracks is studied to demonstrate the performance of the proposed technique in handling complicated geometries with singularities. Thereafter, a square plate with two cracks cross each other is analysed. It is aimed to emphasise the automation and simplicity of the mesh generation in the proposed technique. In the first four examples, the same material properties, with Young's modulus $E=100$ and Poisson's ratio $v=0.3$, are used. The final example is a cracked nuclear reactor under internal pressure. It is aimed to show the simplicity of the present technique in modelling practical non-regular structures.
The computation time reported in this section is based on a desktop PC with Intel(R) Core(TM) i7 3.40GHz CPU and 16GB of memory. The proposed technique is implemented in MATLAB and the computation time is extracted in interactive mode of MATLAB.
\subsection{Infinite plate with a circular hole under uniaxial tension}
\subsubsection{Modelling using exact boundary condition}
An infinite plate containing a circular hole with radius $a$ at its centre is considered in this example. The plate is subject to a uniaxial tensile load as shown in Fig.\,\ref{openhole}. The analytical solution of the stresses in polar coordinates $(r,\theta)$ is given by \citep{Sukumar2001}: \begin{align} \sigma_{11}(r,\theta) & =1-\frac{a^{2}}{r^{2}}\left(\frac{3}{2}\cos2\theta+\cos4\theta\right)+\frac{3a^{4}}{2r^{4}}\cos4\theta\nonumber \\ \sigma_{22}(r,\theta) & =-\frac{a^{2}}{r^{2}}\left(\frac{1}{2}\cos2\theta-\cos4\theta\right)-\frac{3a^{4}}{2r^{4}}\cos4\theta\label{eq:exohstr}\\ \sigma_{12}(r,\theta) & =-\frac{a^{2}}{r^{2}}\left(\frac{1}{2}\sin2\theta+\sin4\theta\right)+\frac{3a^{4}}{2r^{4}}\sin4\theta\nonumber \end{align} The displacement solutions are: \begin{align} u_{1}(r,\theta) & =\frac{a}{8\mu}\left[\frac{r}{a}(\kappa+1)\cos\theta+\frac{2a}{r}\left((1+\kappa)\cos\theta+\cos3\theta\right)-\frac{2a^{3}}{r^{3}}\cos3\theta\right]\nonumber \\ u_{2}(r,\theta) & =\frac{a}{8\mu}\left[\frac{r}{a}(\kappa-3)\sin\theta+\frac{2a}{r}\left((1-\kappa)\sin\theta+\sin3\theta\right)-\frac{2a^{3}}{r^{3}}\sin3\theta\right],\label{eq:exohdisp} \end{align} where $\mu$ is the shear modulus and $\kappa=\frac{3-v}{1+v}$ is the Kolosov constant for plane stress condition.
The problem is solved by analysing a finite dimension of the plate with a dimension of $L\times L$ (see Fig.\,\ref{openhole}). Analytical traction (Eq.\,\ref{eq:exohstr}) is applied at the four edges of this finite plate.
\begin{figure}
\caption{Infinite plate with a circular hole under uniaxial tension}
\label{openhole}
\end{figure}
\begin{figure}
\caption{Mesh of a finite square plate with a circular hole ($L/a=10$)}
\label{ohm1}
\label{ohm2}
\label{openholemesh}
\end{figure}
Fig.\,\ref{ohm1} shows the quadtree mesh of the plate for $L/a=10$. Each edge on a quadtree cell is discretised with 1st order line elements. The $2:1$ rule is enforced. Based on the proposed technique, the curved boundary is handled as shown in Fig.\,\ref{ohm2} with polygon cells. Convergence study is conducted based on the $h-$refinement. Three different element orders $(p=1,2,4)$ are investigated.
Fig.\,\ref{openholecon} shows the present results of the relative error in the displacement norm $\left\Vert {\rm {\rm \mathbf{u}}-{\rm \mathbf{u}}}^{h}\right\Vert _{L^{2}(\Omega)}$, with ${\rm \mathbf{u}}$ the analytical solution given in Eq.\,\eqref{eq:exohdisp} and ${\rm \mathbf{u}}^{h}$ the solution computed by the proposed technique. The results show that all three types of elements have monotonic convergence. For higher order elements, more accurate results with similar number of DOF are obtained and the convergence rate is also faster.
\begin{figure}
\caption{Convergence results of the infinite plate with a circular hole, where $p$ is the element order and $m$ is the slope of the fitted line }
\label{openholecon}
\end{figure} There are 37 out of 100 cells calculated for the stiffness matrices. Among those 37 cells, 9 are regular quadtree cells and 28 are polygon cells surrounding the hole. For the remaining cells, their stiffness matrices are simply extracted from those 9 regular quadtree cells.
To further demonstrate the accuracy of the proposed technique, $\sigma_{\theta}/\sigma$ along $A-B$ (see Fig.\,\ref{openhole}) is plotted in Fig.\,\ref{openholesigmatheta} using the mesh given in Fig.\,\ref{ohm1} with 4th order elements. It can be seen that the results of the proposed technique agree well with the analytical solution, which has $\sigma_{\theta}/\sigma=3$ at $A$ ($\theta=90^{\circ},r=a$). For points away from $A$, $\sigma_{\theta}/\sigma$ approaches 1.
\begin{figure}
\caption{Thin square plate with a single circular hole under uniaxial tension}
\label{openholesigmatheta}
\end{figure}
\subsubsection{Approximation of infinite plate by varying $L/a$ ratio }
The same infinite plate can be approximated by increasing the $L/a$ ratio. The application of quadtree mesh facilitates such a study. Only the left and right sides of the plate are subjected to uniaxial in-plane tension stress $\sigma$. The element order used in this study is $p=4$. The same mesh given in Fig.\,\ref{ohm1} is used for $L/a=10$. The adaptive capability of quadtree mesh leads to the same mesh pattern for all $L/a$ ratios. Fig.\,\ref{ohm3} shows the cells around the hole for $L/a=640$ and it is exactly the same as the one shown in Fig.\,\ref{ohm2}.
For $L/a=640$, although there are 316 cells in total, only 37 cells are calculated for the stiffness matrices, which is the same as the previous study. The results of $\sigma_{\theta}/\sigma$ at $A$ with varying $L/a$ ratio are given in Table\,\ref{openholesigmathetavsratio}. It is seen that the analytical solution ($\sigma_{\theta}/\sigma=3$) is quickly approached when increasing the $L/a$ ratio.
\begin{figure}
\caption{Cell pattern around the hole for $L/a=640$}
\label{ohm3}
\end{figure}
\begin{table} \caption{Normalised stress ($\sigma_{\theta}/\sigma$) at $A$ of the thin square plate with a circular hole}
\centering{} \begin{tabular}{ccc} \hline $L/a$ ratio & No. of Nodes & $\sigma_{\theta}/\sigma$ at $A$\tabularnewline \hline 10 & 860 & 3.3591\tabularnewline 40 & 1428 & 3.0204\tabularnewline 160 & 1996 & 3.0049\tabularnewline 640 & 2564 & 2.9991\tabularnewline \hline \end{tabular}\label{openholesigmathetavsratio} \end{table}
\subsection{Square plate with multiple holes}
A unit square plate with 9 randomly distributed holes of different sizes, shown in Fig.\,\ref{multiholes}, is analysed. This example highlights the automation and flexibility of the proposed technique in handling the mesh transition between features with various dimensions. The ability of capturing curved boundaries accurately using high-order elements is also demonstrated. The displacements at the bottom edge of the plate are fully constrained and a uniform tension $P=1$ is applied at the top edge of the plate. A set of consistent units are chosen.
\begin{figure}
\caption{Thin square plate multiple holes under uniaxial tension}
\label{multiholes}
\end{figure} To validate the results, the same problem is solved using the commercial FEA software ANSYS V14.5. The plate is discretised using 8-node quadrilateral elements (PLANE183). In order to demonstrate the automation and performance of the proposed technique, similar user inputs are given to ANSYS to generate a mesh for comparison.
In ANSYS, the square plate is divided into 4 equal-sized quadrants such that a centre key point is created for result comparison. The 9 holes are introduced to the plate by subtracting their areas from the square plate. The mesh constructed in ANSYS is unstructured (paving). A single variable $(N)$ is used to control the mesh density and is used for mesh refinement. For each hole, the number of element around them is equal to $4N$. And for all the straight lines, the size of the elements is set to be $1/3N$, which gives each outer edge approximately $3N$ elements. Note that the mesh used in ANSYS is far from optimal and structured mesh should be used for better performance. However, the main objective using paving mesh and only controlling the boundary element divisions is to show how the proposed technique and ANSYS perform when minimal number of controlling variables are used for the meshing. For the proposed technique, seed points with $s_{b}=(4N\times s_{max})$, where $s_{max}$ is the maximum allowed number of seed points in a cell, are set on the circular holes to generate a mesh with similar number of boundary division as the one in ANSYS.
Fig.\,\ref{multiholesmesh} shows the ANSYS mesh (1358 elements) and the quadtree mesh (1169 cells). Both meshes can effectively handle the mesh transition between the holes. As commented earlier, while the mesh in ANSYS can be further improved by designing a structured layout, it would also require additional time and investigation effort. The amount of additional effort depends on the complexity of the geometry and user experience. And for the proposed technique, the resulting mesh is always in a structured manner (see Fig.\,\ref{mhm2}) without additional effort. The time of generating the quadtree mesh is around 3s using the computer with details outlined in the beginning of this section.
\begin{figure}
\caption{Mesh of a square plate with 9 circular holes}
\label{mhm1}
\label{mhm2}
\label{multiholesmesh}
\end{figure} For the stiffness calculation, there are 364 out of 1169 cells calculated. Among those 364 cells, 12 are master quadtree cells and 352 are polygon cells surrounding the holes. The stiffness matrices for all the other cells are simply extracted from the calculated master quadtree cells. The total time from constructing the stiffness to obtaining the displacement solutions is less than 3.2s when using 5th order elements.
Table\,\ref{multiholestab1} shows the convergence of the displacement components at the centre point $A$ with increasing element order using mesh in Fig.\,\ref{mhm2}. In order to highlight the convergence performance, Fig.\,\ref{multiholescon} shows the relative error of the present results of the displacement vector sum at point $A$. The error is calculated based on the converged ANSYS results, which converged to the first 6 significant digits. Also shown in the same figure are the relative errors of the ANSYS results and another set of results of the proposed technique. They are both generated through a series of $h-$refinement with the use of 2nd order elements. It is observed that the present results with $p-$refinement converge with the fastest rate. And for the $h-$refinement, the present results are basically converging at the same rate as those of ANSYS with slightly better accuracy. The present results demonstrate that for the same accuracy, much less number of DOFs is required when using high-order elements, and curved boundaries are also accurately modelled with minimal number of cells.
\begin{table} \caption{Centre displacement results of a square plate with 9 circular holes}
\centering{} \begin{tabular}{cccc} \hline Elem. Order & No. of Nodes & $u_{x}\times10^{4}$ at $A$ & $u_{y}\times10^{3}$ at $A$\tabularnewline \hline 2 & 4379 & 4.98298 & 6.67236\tabularnewline 3 & 7157 & 4.98350 & 6.67359\tabularnewline 4 & 9935 & 4.98241 & 6.67374\tabularnewline 5 & 12713 & 4.98197 & 6.67379\tabularnewline \hline \end{tabular}\label{multiholestab1} \end{table}
\begin{figure}
\caption{Convergence results of the centre displacement vector sum for the square plate with multiple holes, where $p$ is the element order and $m$ is the slope of the fitted line }
\label{multiholescon}
\end{figure}
In order to further demonstrate the overall consistency of the present results, Fig.\,\ref{multiholessy} shows the contour plots of $\sigma_{y}$, from both ANSYS and the proposed technique. Good agreement is observed from the contour plots.
\begin{figure}
\caption{$\sigma_{y}$ of the square plate with random pattern of holes}
\label{mhsya}
\label{mhsy}
\label{multiholessy}
\end{figure}
\subsection{Square plate with multiple cracks emanating from a hole}
A square plate of length $L$ with a centre hole of radius of $r$ given in Fig.~\ref{holemulticracks} is considered. $n$ cracks with crack length $a$ emanate from the hole. This example aims to show the simplicity and effectiveness of the proposed technique to solve problems with singularities.
\begin{figure}
\caption{Thin square plate with cracks emanating from a hole under bi-axial tension}
\label{holemulticracks}
\end{figure} In this example, to approach the assumption of an infinite plate, $r/L=0.01$ is considered. A parametric study is performed considering $n=2,4,8$ cracks surrounding the hole with various $s=\frac{a}{a+r}$ ratio. The element order used in this study is $p=4$, which is capable to model the circular boundary accurately as shown in the first example. The present results are compared with the reference solution of the stress intensity factor given in \citet{tada2000stress}.
Fig.\,\ref{holemulticracksmesh} shows the mesh around the central hole, with 4 and 8 cracks around the edge and $s=0.6$. Based on the proposed technique, no refinement is required around the crack tips. This facilitates the study with multiple cracks and results in less computational effort when comparing to the conventional FEM.
\begin{figure}
\caption{Mesh of the square plate with cracks emanating from a hole. The black dots represent the crack tips.}
\label{hmcm1}
\label{hmcm2}
\label{holemulticracksmesh}
\end{figure}
Fig.\,\pageref{holemulticracksres} shows the stress intensity factor ($F_{I}=K_{I}/(\sigma\sqrt{\pi a})$) computed from the proposed technique for different value of $s$. Excellent agreement with the reference solution \citep{tada2000stress} is observed. This demonstrates the accuracy of the proposed technique in dealing with stress singularities as well as the feasibility in handling geometry with complicated features.
\begin{figure}
\caption{Stress intensity of the square plate with cracks emanating from a hole}
\label{holemulticracksres}
\end{figure}
\subsection{Square plate with two cracks cross each other }
A square plate of length $L$ with two cracks cross each other is considered. The dimensions of the plate and the cracks as well as the boundary conditions are shown in Fig.~\ref{crosscrack}. This example highlights the automatic mesh generation of the proposed technique and the capacity to handle problems with complicated crack configuration.
\begin{figure}
\caption{Thin square plate with two cracks cross each other}
\label{crosscrack}
\end{figure}
Fig.\,\ref{crosscrackmesh} shows the quadtree mesh of the proposed technique. The mesh only requires defining seed points on the domain boundary, along the cracks and around the crack tips to control the quadtree mesh density. The mesh generation is fully automatic without the requirement of dividing area regions. The resulting mesh contains a total of 216 cells. During the construction of the stiffness matrix, only 48 cells are computed, which contains 9 master quatree cells and 39 polygon cells.
For the same problem, it would require a few more steps to generate a mesh in FEA. These include defining crack tip regions that directly affect the solution accuracy, and designing proper refinement strategy that directly affects the convergence performance. For example in ANSYS, a command \emph{``}KSCON'' needs to be issued to each crack tip in order to generate two circular layers of elements (1 layer singular elements) around the tip. The radius of the two circular layers of elements is solely based on user experience and \emph{trial-and-error}. Shape warning on the elements would occur if the settings of that command are not consistent with the global mesh. Moreover, automatic $h-$refinement is not applicable when ``KSCON'' is activated. It would, therefore, require multiple steps to conduct convergence study with $h-$refinement, such as reducing the radius of the circular layers of element around the crack tips and increasing elements in circumferential direction around the crack tips.
\begin{figure}
\caption{Mesh of a square plate with two cracks cross each other (216 cells)}
\label{crosscrackmesh}
\end{figure}
Table\,\ref{crosscracktab} shows the crack opening displacements ($op1$ and $op2$ in Fig.\,\ref{crosscrack}) with increasing element order. Similar to previous examples, the present results converge rapidly with minimal number of nodes increased. The results between using the 2nd order elements and using the 5th order elements are different with less than $0.04\%$.
\begin{table*} \caption{Crack opening displacement: $u_{op1}$ for the opening $op1$ and $u_{op2}$ for the opening $op2$}
\noindent \centering{} \begin{tabular}{cccc} \hline Elem. Order & No. of Nodes & $u_{op1}\times10^{-3}$ & \multicolumn{1}{c}{$u_{op2}\times10^{-3}$}\tabularnewline \hline 2 & 818 & 5.1300 & 6.9710\tabularnewline 3 & 1333 & 5.1274 & 6.9727\tabularnewline 4 & 1848 & 5.1279 & 6.9726\tabularnewline 5 & 2363 & 5.1280 & 6.9725\tabularnewline \hline \end{tabular}\label{crosscracktab} \end{table*}
\subsection{Cracked nuclear reactor under internal pressure}
In this final example, a nuclear reactor under internal pressure \citep{Simpson2013} is analysed. Due to symmetry, only a quadrant of the reactor is modelled. The geometry, material properties, loading and dimension are shown in Fig.\,\ref{nreact}. Also shown in the figure are the two cracks introduced on the outer boundary. This example shows the flexibility of the proposed technique and the developed meshing algorithm to model more practical structures.
\begin{figure}
\caption{Cracked nuclear reactor under internal pressure}
\label{nreact}
\end{figure}
Fig.\,\ref{nreactmesh} shows the quadtree mesh used in this example, which contains a total of 160 cells. Using the proposed technique only requires seed points to be defined at the boundaries to control the quadtree mesh density. No additional requirement for the cells containing the crack tips is necessary. The time spent on generating the quadtree mesh is less than 0.8s using the same computer with details outlined in the beginning of this section.
The calculation of stiffness matrix involves computing 44 out of the 160 cells, in which 12 are master quadtree cells and 32 are polygon cells. The total time from constructing the stiffness matrix to obtaining the displacement solution is less than 0.7s when using 5th order elements.
\begin{figure}
\caption{Mesh for the quadrant of the reactor with 2 cracks (160 cells)}
\label{nreactmesh}
\end{figure}
A convergence study is conducted by increasing the order of the element without changing the quadtree layout in Fig.\,\ref{nreactmesh}. Table\,\ref{nreacttab1} shows the two crack opening displacements at points $A$ and $B$ (Fig.\,\ref{nreact}). The present results converge quickly with the element order increased. The difference between using the 2nd order elements and using the 5th order elements is less than 0.1\%. This further highlights the advantage of using high-order elements that can model curved boundary more accurately with minimal number of cells.
\begin{table*} \caption{Crack opening displacement: $u_{A}$ for opening at$(25,40)$, $u_{B}$ for opening at $(45,75)$}
\noindent \centering{} \begin{tabular}{cccc} \hline Elem. Order & No. of Nodes & $u_{A}\times10^{2}$ & \multicolumn{1}{c}{$u_{B}\times10^{2}$}\tabularnewline \hline 2 & 635 & 7.14372 & 2.60404\tabularnewline 3 & 1031 & 7.15066 & 2.60336\tabularnewline 4 & 1427 & 7.15077 & 2.60334\tabularnewline 5 & 1823 & 7.15077 & 2.60332\tabularnewline \hline \end{tabular}\label{nreacttab1} \end{table*}
\section{Conclusion\label{sec:Conclusion}}
This paper has presented a numerical technique to automate stress and fracture analysis using the SBFEM and quadtree mesh of high-order elements. Owing to the nature of the SBFEM, the proposed technique has no specific requirement, such as deriving conforming shape functions or sub-triangulation, to handle quadtree cells with hanging nodes. High-order elements are used within each quadtree cell directly.
The quadtree mesh generation is fully automatic and involves minimal number of user inputs and operation steps. Boundaries are modelled with scaled boundary polygons and this allows the proposed technique to conform the boundary without excessive mesh refinement. The meshing algorithm is also applicable for problems with singularities. The use of quadtree mesh leads to an efficient approach to compute the global stiffness matrix. This facilitates the analysis that requires a significant number of cells using high-order elements. Five numerical examples are presented to highlight the functionality and performance of the proposed technique. The present results show excellent agreement with analytical solutions and those computed by the FEM.
\section*{Reference}
\section*{\textmd{\normalsize{
}}}
\end{document}
|
arXiv
|
{
"id": "1402.5186.tex",
"language_detection_score": 0.8056219816207886,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Numerical simulation of wave propagation in inhomogeneous media using Generalized Plane Waves} \tableofcontents
\section*{Abstract} The Trefftz Discontinuous Galerkin (TDG) method is a technique for approximating the Helmholtz equation (or other linear wave equations) using piecewise defined local solutions of the equation to approximate the global solution. When coefficients in the equation (for example, the refractive index) are piecewise constant it is common to use plane waves on each element. However when the coefficients are smooth functions of position, plane waves are no longer directly applicable. In this paper we show how Generalized Plane Waves (GPWs) can be used in a modified TDG scheme to approximate the solution for piecewise smooth coefficients. GPWs are approximate solutions to the equation that reduce to plane waves when the medium through which the wave propagates is constant. We shall show how to modify the TDG sesquilinear form to allow us to prove convergence of the GPW based version. The new scheme retains the high order convergence of the original TDG scheme (when the solution is smooth) and also retains the same number of degrees of freedom per element (corresponding to the directions of the GPWs). Unfortunately it looses the advantage that only skeleton integrals need to be performed. Besides proving convergence, we provide numerical examples to test our theory.
\section{Introduction} The Trefftz Discontinuous Galerkin (TDG) method proposed in \cite{git09} is a mesh based method for approximating solutions of the Helmholtz equation. This method generalizes the Ultra Weak Variational Formulation (UWVF)~of the same problem~\cite{cessenat_phd,despres} by allowing different weighting strategies on penalty terms in the TDG method. Error analysis \cite{buf07,git09,HMP11,hmp13,hmp15} and computational experience \cite{hut03} show that the method can be an efficient way of approximating solutions of the Helmholtz equation. It has also become clear that the method works best in an $hp$-mode (see \cite{hmp15}) where large elements are used away from boundaries in the computational domain, together with larger numbers of plane waves.
However because of the use of simple Trefftz functions (usually plane waves element by element), it has to be assumed that the coefficients in the governing partial differential equation are piecewise constant. Of course smoothly varying coefficient functions could be first approximated by a piecewise constant function and then the resulting perturbed Helmholtz equation could be solved by TDG or UWVF. But this would require small elements and hence defeat some of the potential advantages of using large elements in a Trefftz based scheme.
To circumvent the difficulty with smoothly varying coefficients, we propose to use approximate solutions of the underlying partial differential equation constructed element by element where the coefficients are variable. In this work we use the Generalized Plane Waves (GPW) of \cite{IGD2013,IG2015} as a basis for the TDG type scheme.
Note that smoothly varying coefficients arise in the simulation of electromagnetic wave propagation in tokamaks where the permittivity is spatially variable and may even become negative. Indeed the original design of GPWs in \cite{IGD2013,IG2015} was motivated precisely by this application.
To describe the setting for applying GPWs in more detail let us consider the following model problem from \cite{hmp13}. Given a bounded Lipschitz polyhedron $\Omega$ that is star shaped with respect to the origin and a larger Lipschitz polyhedron $\Omega_R$ containing $\overline{\Omega_D}$, we define the computational domain to be the annulus $\Omega=\Omega_R\setminus\overline{\Omega_D}$. The two boundaries of $\Omega$ are $\Gamma_D=\partial\Omega_D$ and $\Gamma_R=\partial\Omega_R$ and we use a normal $\mathbf{n}$ that is outwards from $\Omega$. Because we shall use some regularity results from \cite{hmp13} we need to assume that $\Omega_R$ is star-like with respect to a ball of radius $\gamma_Rd_\Omega$ centered at the origin where $\gamma_R>0$ and $d_\Omega$ is the diameter of $\Omega$.
Suppose we are give a wave number $\kappa>0$. In addition given a strictly positive, piecewise smooth and bounded real function $\epsilon\in L^{\infty}(\Omega)$ and another function $g_R\in L^2(\Gamma_R)$, we want to approximate the solution $u$ of \begin{eqnarray} \Delta u+\kappa^2 \epsilon u&=&0\mbox{ in }\Omega,\label{helm}\\ u&=&0\mbox{ on }\Gamma_D,\label{dirich}\\ {\partial_n u}+{\rm{}i}\kappa u&=&{\rm{}i}\kappa g\mbox{ on }\Gamma_R.\label{imp} \end{eqnarray} As pointed out in \cite{hmp13} this is a model problem for scattering (for example of an $s$-polarized electromagnetic wave from a perfect conductor embedded in a dielectric in two dimensions). The impedance boundary condition is then a simple radiation boundary condition.
As we shall detail shortly, if we assume that the function $\epsilon$ is piecewise analytic on each element, we can approximate it by a power series. With this in hand we shall give details of a recursive algorithm for generating the coefficients of basis functions on each element that satisfy (\ref{helm}) to high accuracy. These are constructed so that if the coefficient $\epsilon$ is constant on an element, the resulting basis function is just a plane wave. Using these generalized plane waves we can prove convergence of a modified TDG scheme. The resulting discrete problem obtains high order convergence for smooth solutions as is the case for the standard TDG or UWVF. We only consider $h$-convergence in this paper.
As we shall see, the main disadvantage of the use of GPWs in the TDG method is the need to integrate over elements in the grid. We would prefer to use them in a generalized UWVF avoiding this integration. However, although numerical experiments are encouraging \cite{LM_thesis} we do not have a theoretical justification of this approach.
The paper proceeds as follows. In the next section we briefly outline our modification of the basic TDG method. Then in Section~\ref{GPW} we show how to construct GPWs and then obtain two new error estimates for these functions that will underlie our error analysis. We also show that piecewise linear functions can be approximated element by element using the GPW functions. In Section~\ref{EE} we derive error estimates for the new TDG scheme with GPW basis functions. Finally in Section~\ref{NT} we give some basic numerical tests of the new algorithm.
\section{The Plane Wave Discontinuous Galerkin Method} Even with a variable coefficient $\epsilon$, the choice of domain $\Omega$ and the conditions on $\epsilon$ guarantee that if $g_R\in H^s(\Gamma_R)$ for some sufficiently small $s>0$ (depending on the interior angles of $\Gamma_R$) then $u\in H^{3/2+s}(\Omega)$~\cite[Theorem 2.3]{hmp13}. This regularity will allow us to develop consistent fluxes for $u$ and it's derivatives. Unfortunately, because $\epsilon$ is variable, the dependence on $\kappa$ of the continuity constants for estimates in this paper is not easy to track. Therefore we note now that constants in the analysis will depend in an unspecified way on $\kappa$.
As is usual for DG schemes, we start with a mesh and continue to define the method using definitions from \cite{hmp13}. Suppose we cover $\Omega$ by a finite element mesh ${\cal T}_h$ of regular triangular elements $K$ of maximum diameter $h$ (in fact more general domains can easily be allowed). The diameter of an element $K$ is denoted $h_K$. In addition following \cite{hmp13} we assume \begin{enumerate} \item {\em Local quasi-uniformity}: there exists a constant $\tau\geq 1$ independent of $h$ such that \[ \tau^{-1}\leq \frac{h_{K_1}}{h_{K_2}}\leq \tau\mbox{ for all triangles }K_1,K_2\mbox{ meeting at any edge}. \] \item {\em Quasi-uniformity close to $\Gamma_R$}: For all triangles $K$ touching $\Gamma_R$ there is a constant $\tau_R$ independent of $h$ such that \[ \frac{h}{h_K}\leq \tau_R. \] \end{enumerate} Let $\mathbf{n}_K$ denote the unit outward normal to element $K$. Let
$K$ and $K'$ denote two elements in ${\cal T}_h$ meeting at an edge $e$ then, on $e$, we make the standard definitions of the average value and jump of functions across $f$: \begin{eqnarray*}
\mv{u}=\frac{u|_{K}+u|_{K'}}{2},&\quad&\mv{\mathbf{\sigma}}=\frac{\mathbf{\sigma}|_K+\mathbf{\sigma}|_{K'}}{2},\\
\jmp{u}=u|_K\mathbf{n}_K+u|_{K'}\mathbf{n}_{K'},&\quad&
\jmp{\mathbf{\sigma}}=\mathbf{\sigma}|_{K}\cdot\mathbf{n}_K+\mathbf{\sigma}|_{K'}\cdot\mathbf{n}_{K'}. \end{eqnarray*} We denote by ${\cal E}_h$ the set of all edges in the mesh. Then let \begin{itemize} \item ${\cal E}_I$ denote the set of all edges in the mesh interior to $\Omega$, \item ${\cal E}_D$ is the set of all boundary edges on $\Gamma_D$, \item ${\cal E}_R$ is the set of all boundary edges on $\Gamma_R$. \end{itemize} We also need three positive penalty parameters that are functions of position on the skeleton of the mesh: $\alpha$, $\beta$ and $\delta$. At this point these are simply assumed to be positive functions of position on ${\cal E}$ and will be given in more detail shortly. Using the above defined jumps and average values, we are lead to consider the following standard sesquilinear form for TDG \cite{hmp13,MelenkEsterhazy12,kapita14}: \begin{eqnarray} A_h(u,v)&=& \int_{\Omega}\left(\nabla_hu\cdot\nabla_h\overline{v} -\kappa^2\epsilon u\,\overline{v}\right)\,dA-\int_{{\cal E}_I}\left(\avg{\nabla_h u}\cdot\jmp{\overline{v}} +\jmp{ u}\cdot\avg{\nabla_h\overline{v}}\right)\,ds\nonumber\\&& -\frac{1}{i\kappa}\int_{{\cal E}_I}\beta\jmp{\nabla_h u}\jmp{\nabla_h\overline{v}}\,ds +{i\kappa}\int_{{\cal E}_I}\alpha\jmp{ u}\cdot\jmp{\overline{v}}\,ds-\int_{{\cal E}_R}\delta u {\partial_n \overline{v}}\,ds\nonumber\\&& -\int_{{\cal E}_R}\delta{\partial_n u}\overline{v}\,ds -\frac{1}{i\kappa}\int_{{\cal E}_R}\delta{\partial_n u}{\partial_n \overline{v}}\,ds+i\kappa\int_{{\cal E}_D}\alpha u\overline{v}\,ds\nonumber\\&& +i\kappa\int_{{\cal E}_R}(1-\delta)u\overline{v}\,ds -\int_{{\cal E}_D} \left({\partial_n u}\overline{v}+u{\partial_n \overline{v}}\right)\,ds.\label{Ahalt} \end{eqnarray} Here $\nabla_h$ is the piecewise defined gradient and $\partial_n u =\nabla_h u\cdot n$ element by element. In addition the right had side is given by \[ F(v)=-\frac{1}{{\rm{}i}\kappa}\int_{{\cal E}_R}\delta g\,{\partial_n \overline{v}}\,ds+\int_{{\cal E}_R} (1-\delta )g\overline{v}\,ds \]
By virtue of the regularity of the solution of (\ref{helm})-(\ref{imp}) noted above, it satisfies \[ A_h(u,v)=F(v)\] for all sufficiently smooth test functions $v$ (for example piecewise $H^2$ is sufficient).
Now suppose we wish to discretize the problem. Let $V_h\subset \Pi_{K\in T_h}H^2(K)$ be a finite dimensional space. If $V_h$ is chosen to consist of piecewise smooth solutions of (\ref{helm}), we have the standard TDG and seek an approximate $u_h\in V_h$ that satisfies \[ A_h(u_h,v)=F(v)\quad\mbox{ for all }v\in V_h. \] For piecewise constant media, the space $V_h$ can be chosen in many ways. One choice uses Bessel functions (good for conditioning but bad for computational speed because of the need for quadrature), another, more standard choice, uses plane waves (typically worse conditioned but easier to use since integrals can be computed in closed form)~\cite{HMP13I}.
However if $\epsilon(x,y)$ is non-constant on an element we cannot use simple solutions of the Helmholtz equation. In this paper we assume that $\epsilon(x,y)$ is a smooth function on each element (but may be discontinuous between elements). Then, as we shall shortly describe, the space $V_h$ can be constructed using ``Generalized Plane Waves'' (GPWs) that approximately satisfy the Helmholtz equation. However we have been unable to prove convergence for the standard TDG method in this case, and instead add a stabilizing term to the sesquilinear form so define \[ B_h(u,v)=A_h(u,v)+\frac{i}{\kappa^2}\int_\Omega\gamma (\Delta u+\kappa^2\epsilon u)\overline{(\Delta v+\kappa^2\epsilon v)}\,dA. \] Here $\gamma>0$ is a new penalty parameter that is a piecewise constant function of position on the mesh. We note that the new term vanishes on elements with constant material coefficients allowing plane waves to be used and their the method reduces to the standard TDG. Now we seek $u_h\in V_h$ such that \begin{equation}\label{Bhelm} B_h(u_h,v)=F(v)\quad\forall v\in V_h. \end{equation}
In the next section we describe how to construct GPWs element by element and hence complete the specification of the method.
\section{Generalized Plane Waves}\label{GPW} In this section we focus specifically on GPWs. Firstly we describe the design process, including an explicit algorithm to build a local set of GPWs on a given element of the mesh $\mathcal T_h$. Secondly we turn to interpolation of a solution of \eqref{helm} and prove error estimates. We provide various interpolation properties of such a set of local GPWs on a given element of the mesh, and derive a global interpolation property on the whole domain $\Omega$ by piecewise GPWs. Thirdly we prove a result on approximation of the space of bi-variate polynomials of degree 1 by GPWs. This result will be useful for the error analysis.
\subsection{Design and interpolation properties} GPWs have been introduced in \cite{LM_thesis,IGD2013}. They generalize the use of classical plane waves, as exact solutions of an equation with piecewise constant coefficients, to the case of variable coefficients. The GPWs are not exact solutions of (\ref{helm}) but approximately solve the equation element by element.
Their design process is based on a Taylor expansion and ensures that the homogeneous equation is locally satisfied up to a given order on each element $K$ of the mesh.
On a given element $K$, consider the centroid $(x_{K},y_{K})$. A GPW on $K$ is a function $\varphi = e^{P}$ where
\begin{equation}\label{eq:polnotation}
P(x,y)=\sum_{i=0}^{{\rm{}d}_P} \sum_{j=0}^{{\rm{}d}_P-i} \lu{i}{j} \left(x-x_K\right)^i \left(y-y_K\right)^j,
\end{equation}
${\rm{}d}_P$ being the total degree of the polynomial $P$. A GPW is designed to be an approximate solution of the Helmholtz equation: the polynomial coefficients $\left\{ \lu{i}{j},0\leq i+j\leq {\rm d}_P \right\}$ are computed from the Taylor expansion of the variable coefficient $\epsilon$ in order for the function $\varphi = e^{P}$ to satisfy \begin{equation}\label{eq:-lapl+al}
[ \Delta + \kappa^{2}\epsilon ] e^{P(x,y)} = O \left(\| (x,y)-(x_K,y_K) \|^q\right). \end{equation} The parameter $q$ is the order of approximation of the equation. Canceling all the terms of order less than $q$ in the Taylor expansion \eqref{eq:-lapl+al} is equivalent to a non linear system of $q(q+1)/2$ non linear equations. The unknowns of this system are the $({\rm{}d}_P+1)({\rm{}d}_P+2)/2$ coefficients of $P$. Setting simultaneously ${\rm{}d}_P = q+1$ and giving the values of the $2q+3$ coefficients $\left\{ \lu{i}{j}, i\in \{0,1\}, 0\leq j \leq q+1-i \right\}$ leads to a unique solution of the non linear system. This solution is explicitly expressed as \begin{equation}\label{eq:IF} \begin{array}{l} \forall (i,j) \text{ s.t. } 0\leq i+j \leq q-1, \\ \displaystyle \lu{i+2}{j} = \frac1{(i+2)(i+1)}\Bigg(-\kappa^2\frac{\partial_x^i \partial_y^j \epsilon\left(x_K,y_K\right)}{i!j!} - (j+2)(j+1) \lu{i}{j+2} \\ \displaystyle \phantom{\frac{\partial_x^i\beta\left(\mathbf{g}_K\right) \partial_y^j \beta\left(\mathbf{g}_K\right)}{i!j!} = } -\sum_{k=0}^{i} \sum_{l=0}^{j} (i-k+1)(k+1) \lu{i-k+1}{j-l} \lu{k+1}{l} \\ \displaystyle \phantom{\frac{\partial_x^i\beta\left(\mathbf{g}_K\right) \partial_y^j \beta\left(\mathbf{g}_K\right)}{i!j!} = } -\sum_{k=0}^{j} \sum_{l=0}^{i} (j-k+1)(k+1) \lu{i-l}{j-k+1} \lu{l}{k+1}\Bigg), \end{array} \end{equation} where $\partial_x=\partial/\partial_x$ and $\partial_y=\partial/\partial y$. More precisely, as defined in \cite{IG2015}, a GPW at $(x_K,y_K)$ corresponds to the following normalization : \begin{itemize} \item[$\bullet$] $\lambda_{0,0}=0$, \item[$\bullet$] $(\lambda_{1,0},\lambda_{0,1})=N (\cos \theta,\sin\theta)$, for some $N\in\mathbb C$ and $\theta\in\mathbb R$, \item[$\bullet$] $ \lu{i}{j}=0$ for $i\in\{0,1\}$ and $1<i+j\leq q+1$. \end{itemize}
A local set of linearly independent GPWs is then obtained for a given value of $N$ by considering $p$ equi-spaced directions $\theta_{l} = 2\pi(l-1)/p$ for $1\leq l \leq p$. The interpolation properties of this set of functions are the main topic of \cite{IG2015}. The main result of that paper provides a sufficient condition on the parameters p and q to achieve a high order interpolation of a smooth solution of \eqref{helm} by GPWs, as well as a high order interpolation of its gradient. We will denote by $GPW_\kappa^{p,q}(K)$ the space spanned by the $p$ GPWs corresponding to $\theta_{l} = 2\pi(l-1)/p$ for $1\leq l \leq p$ and $N=\sqrt{ -\kappa^2 \epsilon(x_K,y_K)}$, or $N=\imath \kappa\sqrt{ \epsilon(x_K,y_K)}$. Let ${\mathcal C}^k(S)$ denote the set of functions with $k$ continuous derivatives on a set $S$. As a reminder, with the present notation, the interpolation result reads: \begin{theorem}\label{th:u-ua} Consider $K\in \mathcal T_h$ together with $n\in\mathbb N$ such that $n>0$. Assume that $q\geq n+1$, $p=2n+1$ and $\mathbf{g}_K=(x_K,y_K)\in K$ is the centroid of $K$. Finally suppose the coefficient $\epsilon \in\mathcal C^{q-1}(K)$.
Consider a solution $u$ of scalar wave equation \eqref{helm}, satisfying $u\in \mathcal C^{n+1}$. Then there is a function $u_a\in GPW_\kappa^{p,q}(K)$ implicitly depending on $\epsilon$ and its derivatives, and a constant $C(\kappa,K,n)$, implicitly depending on $\epsilon$ and its derivatives as well, such that: for all $\mathbf{m}\in K$ \e{\label{eq:gradumua} \left\{ \begin{array}{l}
\left| u\left(\mathbf{m}\right)-u_a\left(\mathbf{m}\right)\right| \leq C(\kappa,K,n) \left|\mathbf{m}-\mathbf{g}_K\right|^{n+1} \left\| u \right\|_{\mathcal C^{n+1}(K)} ,\\%\phantom \nabla
\left\| \nabla u\left(\mathbf{m}\right)-\nabla u_a\left(\mathbf{m}\right)\right\| \leq C(\kappa,K,n) \left|\mathbf{m}-\mathbf{g}_K\right|^{n} \left\| u \right\|_{\mathcal C^{n+1}(K)}. \end{array} \right. } \end{theorem} The following interpolation property of higher order derivatives stems directly from the proof of the previous theorem. \begin{theorem}\label{th:u-ua2} Consider $K\in \mathcal T_h$ together with $n\in\mathbb N$ such that $n>0$. Assume that $q\geq n+1$, $p=2n+1$ and $\mathbf{g}_K=(x_K,y_K)\in K$ is the centroid of $K$. Finally suppose the coefficient $\epsilon \in\mathcal C^{q-1}(K)$. Consider a solution $u$ of scalar wave equation \eqref{helm}, satisfying $u\in \mathcal C^{n+1}$. Then the function $u_a\in GPW_\kappa^{p,q}(K)$ and the constant $C(\kappa,K,n)$ provided by Theorem \ref{th:u-ua} also satisfies : for all $\mathbf{m}\in K$ and all $j$ such that $0\leq j \leq k$ \e{\label{eq:derumua}
\left| \partial_x^{j}\partial_y^{k-j}u\left(\mathbf{m}\right)-\partial_x^{j}\partial_y^{k-j}u_a\left(\mathbf{m}\right)\right| \leq C(\kappa,K,n)\frac{(n+1)!}{(n+1-k)!} \left|\mathbf{m}-\mathbf{g}_K\right|^{n+1-k} \left\| u \right\|_{\mathcal C^{n+1}(K)} , } where $k\leq n$. Moreover there is a constant $\mathfrak C(\kappa,K,n)$ such that for all $\mathbf{m}\in K$ \e{\label{eq:opua}
\left| [\Delta +\kappa^2\epsilon]u_a\left(\mathbf{m}\right)\right| \leq \mathfrak C(\kappa,K,n) \left|\mathbf{m}-\mathbf{g}_K\right|^{n+1} \left\| u \right\|_{\mathcal C^{n+1}(K)} . } \end{theorem} \begin{proof}
The interpolation property of GPWs for any derivative of $u$ directly stems from the Taylor expansion of $u-u_{a}$, exactly as for the gradient and this proves \eqref{eq:derumua}.
The design of GPWs directly yields that for all $l$ such that $1\leq l \leq p$ the corresponding GPW satisfies $$
\left| [\Delta +\kappa^2\epsilon]\varphi_{l} \right| \leq C_{l} \left|\mathbf{m}-\mathbf{g}_K\right|^{q }. $$
Moreover $\displaystyle u_a= \sum_{l=1}^{2n+1} \mathsf X_l \varphi_l$ and it was already noticed in \cite{IG2015} that $|\mathsf X_l|\leq C(\kappa,K,n)\|u\|_{\mathcal C^{n+1}}$. As a result $$
\left| [\Delta +\kappa^2\epsilon]u_a\left(\mathbf{m}\right)\right|
=\left| \sum_{l=1}^{2n+1} \mathsf X_l [\Delta +\kappa^2\epsilon]\varphi_l \right|
\leq C(\kappa,K,n)\|u\|_{\mathcal C^{n+1}}\left|\mathbf{m}-\mathbf{g}_K\right|^{q } \sum_{l=1}^{2n+1} C_{l} $$ and so \eqref{eq:opua} holds $\displaystyle \mathfrak C (\kappa,K,n) =C(\kappa,K,n) \sum_{l=1}^{2n+1} C_{l} $. \end{proof}
The last step to build, from the local functions spaces $GPW_\kappa^{p,q}(K)$, a set of GPWs on the whole domain $\Omega$: the GPWs space $V_h$ is naturally defined as $ \prod_{K\in T_h}GPW_\kappa^{p,q}(K)$. Note that $p$ and $q$ can vary from element to element.
As a result, we have the following estimate for $[\Delta +\kappa^2\epsilon] (u-v_h)$, where $u$ is a smooth solution of Equation \eqref{helm}: \begin{lemma}\label{helm-est} Suppose that $u$ is a solution of scalar wave equation \eqref{helm} which belongs to $\mathcal C^{n+1}(\Omega)$. Then the function $v_h\in V_h=\prod_{K\in T_h}GPW_\kappa^{p,q}(K)$, provided element by element by Theorem \ref{th:u-ua}, satisfies :
there exists a constant $C$ independent of $h$ such that \[ \Vert [\Delta +\kappa^2\epsilon] (u-v_h) \Vert_{L^2(\Omega)} \leq C\mbox{area}(\Omega)^{1/2}\left(\max_{K\in T_h} h_K\right)^q\Vert u\Vert_{{\mathcal C}^q(\Omega)} \] where $h_K$ is the radius of $K$. \end{lemma}
\subsection{Approximation of linear polynomials} The result \cite[Lemma 3.10]{git09} addresses the approximation of bi-variate polynomials of degree 1 by classical plane waves, and here we are interested in the approximation of bi-variate polynomials of degree 1 by GPWs. This result is needed to apply the $h$-based analysis of \cite{git09} or \cite{kapita14}. \begin{lemma}\label{linfun} Consider $\hat K\in [0,1]^2$ the reference element. Suppose $n\in\mathbb N$ is such that $n\geq 2$. For $p=2n+1$ and $q\geq n+1$ there is a constant $C$ independent of $\kappa$ (but not of $p$) such that $$
\inf_{v\in GPW_\kappa^{p,q}\left(\hat K\right)} \|f-v\|_{0,\hat K}
\leq C\kappa^2|\epsilon(x_{\hat K},y_{\hat K})|\|f\|_{0,\hat K},\ \forall f\in \mathcal P_1(\mathbb R^2). $$ \end{lemma} \begin{remark}The proof strongly relies on the fact that the GPW space is designed with the normalization $(\lambda_{1,0}^k,\lambda_{0,1}^k) = \imath \kappa\sqrt{ \epsilon(x_{\hat K},y_{\hat K})}(\cos\theta_k,\sin\theta_k)$, for equi-spaced angles $\theta_k$, for $1\leq k \leq p$.\end{remark} \begin{proof} For the sake of clarity we define $\tilde \kappa = \kappa\sqrt{ \epsilon(x_{\hat K},y_{\hat K})}$. Consider $$ b_j :=(i\tilde \kappa)^{-[j/2]}\sum_{k=1}^p \alpha_k^{(j)} \varphi_k $$ where the $\varphi_k$s are the GPWs, $\alpha_k^{(j)} = (\mathsf{M}_p)^{-1}$, $p=2m+1$ and $\mathsf{M}_p\in\mathbb R^{p,p}$ is defined for $1\leq k,l \leq p$ by $$ (\mathsf M_p)_{kl} :=\left\{ \begin{array}{l} 1\text{ for }l = 1 \\\displaystyle \cos\left(\frac{l}{2}\theta_k\right)\text{ for }l \text{ even} \\\displaystyle \sin\left(\frac{l-1}{2}\theta_k\right)\text{ for }l \geq 3 \text{ odd} \end{array} \right. $$ We know that $$ \sum_{k=1}^p \alpha_k^{(j)} \varphi_k = \sum_{n=0}^\infty\frac{1}{n!}\sum_{k=1}^p \alpha_k^{(j)} (P_k(x,y))^n , $$ and, if $\hat{\mathbf x} = (\hat x,\hat y)=(x-x_{\hat K},y-y_{\hat K})$, then $$ \begin{array}{rl} (P_k(x,y))^n &\displaystyle= \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} +\tilde \kappa^2 f_{k,q}({\mathbf{x}}) \right)^n \text{ see Lemma \ref{lem:Pk}} \\&\displaystyle= \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^n +\tilde \kappa^n\sum_{j=0}^{n-1}\begin{pmatrix} n\\ j \end{pmatrix} \imath^j\tilde \kappa^{n-j}\left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^j f_{k,q}({\mathbf{x}})^{n-j} \\&\displaystyle= \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^n +\tilde \kappa^{n+1}g_{k,q}({\mathbf{x}}), \end{array} $$
the function $g_{k,q}$, defined as $g_{k,q}({\mathbf{x}}) = \sum_{j=0}^{n-1}\begin{pmatrix} n\\ j \end{pmatrix} \imath^j\tilde \kappa^{n-1-j}\left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^j f_{k,q}({\mathbf{x}})^{n-j}$, being uniformly bounded for $\mathbf x =(x,y)\in \hat K$ as $\tilde \kappa\rightarrow 0$.
We assume that $(\lambda_{1,0}^k,\lambda_{0,1}^k) = \imath \tilde \kappa(\cos\theta_k,\sin\theta_k)$. Define $$ K_{ 0 }^n(\mathbf{x}) = \frac{1}{2\pi}\int_{-\pi}^{\pi} \left(\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\\hat y \end{pmatrix}\right)^n d\theta \quad \text{ and }\forall 1\leq j\leq n: $$ $$ K_{2j }^n(\mathbf{x}) = \frac{1}{\pi}\int_{-\pi}^{\pi} \left(\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix}\right)^n \cos(j\theta)d\theta \text{, } K_{2j+1}^n (\mathbf{x})= \frac{1}{\pi}\int_{-\pi}^{\pi} \left(\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix}\right)^n \sin(j\theta)d\theta. $$ The leading order term of $(P_k(x,y))^n$ as $\tilde \kappa\rightarrow 0$ is $\left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^n$. As shown in \cite{git09} , it can be written $ \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right)^n =(\imath\tilde\kappa)^n\sum_{l=1}^{2n+1}K_l^n(\mathbf{x})\mathsf{M}_{lk} $ so that $$ \begin{array}{l}\displaystyle \sum_{n=0}^\infty\frac{1}{n!}\sum_{k=1}^p \alpha_k^{(j)} \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right) ^n \\\displaystyle = \sum_{n=0}^\infty\frac{(\imath\tilde \kappa)^n}{n!} \sum_{l=1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}\\\displaystyle = \sum_{n=0}^m\frac{(\imath\tilde \kappa)^n}{n!} \sum_{l=1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}+ \sum_{n=m+1}^\infty\frac{(\imath\tilde \kappa)^n}{n!} \sum_{l=1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}, \end{array} $$ and since $K_j^n=0$ if $[j/2]>n$ and $\sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}=\delta_{jl}$ for $1\leq l,j\leq p$, it yields $$ \begin{array}{rl}\displaystyle \sum_{n=0}^\infty\frac{1}{n!}\sum_{k=1}^p \alpha_k^{(j)} \left(
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} \right) ^n &\displaystyle = \sum_{n=[j/2]}^m\frac{(\imath\tilde \kappa)^n}{n!}\left(K_j^n(\mathbf{x})+ \sum_{l=p+1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}\right)\\&\displaystyle+ \sum_{n=m+1}^\infty\frac{(\imath\tilde \kappa)^n}{n!} \left(K_j^n(\mathbf{x})+ \sum_{l=p+1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}\right)\\ &\displaystyle = \sum_{n=[j/2]}^m\frac{(\imath\tilde \kappa)^n}{n!}K_j^n(\mathbf{x})+ \tilde \kappa^{m+1}R_j(\tilde \kappa,\mathbf{x}) \end{array} $$ since $l/2\geq m+1\Rightarrow [K_l^n = 0$ for $n\leq m]$, and where the remainder function $R_j$ defined by $$ R_j(\kappa,\mathbf{x}) = \frac{1}{\tilde \kappa^{m+1}} \sum_{n=m+1}^\infty\frac{(\imath\tilde \kappa)^n}{n!} \left(K_j^n(\mathbf{x})+ \sum_{l=p+1}^{2n+1}K_l^n(\mathbf{x}) \sum_{k=1}^p \alpha_k^{(j)}\mathsf{M}_{lk}\right) $$ is uniformly bounded on $\hat K$. So
$$ \sum_{k=1}^p \alpha_k^{(j)} \varphi_k = \sum_{n=[j/2]}^m \left( \frac{(\imath\tilde \kappa)^n}{n!}K_j^n(\mathbf{x}) +\tilde \kappa^{n+1}g_{k,q}(\mathbf{x}) \right) +\tilde \kappa^{m+1}R_j(\tilde \kappa,\mathbf{x}) $$ Since $b_j(\mathbf{x}) =(i\tilde \kappa)^{-[j/2]}\sum_{k=1}^p \alpha_k^{(j)} \varphi_k $ it clearly shows that $$ \lim_{\tilde \kappa\rightarrow 0} b_j(\mathbf{x}) = \frac{1}{{[j/2]}!}K_j^{[j/2]}(\mathbf{x}). $$ As a consequence, the definition of $K_j^{[j/2]}$ combined with $((\lambda_{1,0}^k,\lambda_{0,1}^k) = \imath \tilde \kappa(\cos\theta_k,\sin\theta_k))$ lead to $$ b_1(\mathbf{x}) = 1 + O(\tilde \kappa^2),\ b_2(\mathbf{x}) = x-x_K + O(\tilde \kappa^2),\ b_3(\mathbf{x}) = y-y_K + O(\tilde \kappa^2). $$ \end{proof}
To complete the proof of Lemma \ref{linfun}, we need to prove the following result. \begin{lemma}\label{lem:Pk}
Suppose $n\in\mathbb N$ is such that $n\geq 2$. For $p=2n+1$ and $q\geq n+1$, consider the basis of $p$ functions $\varphi_k\in GPW_\kappa^{p,q}\left(\hat K\right)$ approximating \eqref{helm} at the point $\mathbf g_{\hat K}=(x_{\hat K},y_{\hat K})$ at order $q$, and, for all $k$ such that $1\leq k\leq p$, the corresponding polynomials $\displaystyle P_k(x,y)=\sum_{0\leq i+j\leq q+1} \lambda_{i,j}^k (x-x_{\hat K})^i (y-y_{\hat K})^j$ satisfying $\varphi_k=\exp P_k$. These polynomials satisfy $$ P_k(x,y)=
\begin{pmatrix} \lambda_{1,0}^k\\ \lambda_{0,1}^k \end{pmatrix} \cdot \begin{pmatrix} \hat x \\ \hat y \end{pmatrix} +\kappa^2 \epsilon(x_K,y_K) f_{k,q}({\mathbf{x}}) $$ where $\mathbf x = (x,y)$, $(\hat x,\hat y)=(x-x_{\hat K},y-y_{\hat K})$ and the remainder function $f_{k,q}$ is uniformly bounded on $\hat K$. \end{lemma} \begin{proof} The normalization $(\lambda_{1,0}^k,\lambda_{0,1}^k) = \imath \kappa\sqrt{ \epsilon(x_K,y_K)}(\cos\theta_k,\sin\theta_k)$ implies that $\lu{2}{0}^k=0$ so that the induction formula \eqref{eq:IF} reads: \begin{equation}\left\{ \begin{array}{cl} \lu{2}{j}^k &\displaystyle
= -\frac{1}{2}\frac{\kappa^2\partial_y^j\epsilon(x_K,y_K)}{j!} \\ \lu{3}{j}^k &\displaystyle
= -\frac{1}{6}\left( \frac{\kappa^2\partial_x\partial_y^j\epsilon(x_K,y_K)}{j!} + 4\lu{2}{j}^k\lambda_{1,0}^k\right)\\ \lu{i+2}{j}^k &\displaystyle
= -\frac{1}{(i+2)(i+1)}\Bigg( \frac{\kappa^2\partial_x^i\partial_y^j\epsilon(x_K,y_K)}{i!j!} + (j+2)(j+1)\lu{i}{j+2}^k\lambda_{1,0}^k \qquad \forall i>1,\\
&\displaystyle \phantom =
+ \sum_{k=1}^{i-1}\sum_{l=0}^{j}
(i-k+1)(k+1)\lu{i-k+1}{j-l}^k\lu{k+1}{l}^k\\
&\displaystyle \phantom =
+ \sum_{l=1}^{i-1}\sum_{k=0}^{j}
(j-k+1)(k+1)\lu{i-l}{j-k+1}^k\lu{l}{k+1}^k\Bigg) \end{array}\right. \end{equation} This clearly completes the proof by induction. \end{proof}
More precisely, the result needed in the following result that corresponds to Lemma 3.12 from \cite{git09}. We state it here to specify the GPWs parameters, and provide no more than a sketch of the proof since it relies on Lemma \ref{linfun} but not specifically on the basis function set. \begin{corollary} \label{lincor} Suppose that $n\in\mathbb N$ is such that $n\geq 2$. For $p=2n+1$ and $q\geq n+1$, suppose $w_h^c$ is a linear function on a triangle $K$ then there is a GPW function $w_h\in GPW_\kappa^{p,q}(K)$ such that \[ \Vert w_h^c-w_h\Vert_{L^2(K)}\leq Ch_K^2\Vert w_h^c\Vert_{L^2(K)}. \] \end{corollary} \begin{proof}
By translation and dilation by $1/h_K$ we can map an element $K$ to an element $\tilde{K}\subset \hat{K}= (0,1)^2$. Let $\hat{w}^c$ denote the transformed polynomial and note that $\hat{w}_h^c\in \mathcal P_1(\mathbb R^2)$. Let $\hat P$ the $L^2(\hat K)$-projection onto the plane wave space $GPW_{\hat{\kappa}}^{p,q}(\hat{K})$ where $\hat{\kappa} = h_K \kappa$. Applying Lemma \ref{linfun}, we get { \begin{equation}\label{eq:est}
\| (I-\hat P )\hat{w}_h^c\|_{0,\hat K}
\leq C\hat{\kappa}^2|\epsilon(x_{\hat K},y_{\hat K})| \| \hat{w}_h^c \|_{0,\hat K}. \end{equation}} The conclusion then follows by transforming back to $K$ using the fact that the transformation from a triangle $K$ to the reference triangle $\tilde K$ changes the frequency into $\hat \kappa = h_K \kappa$ {
and the bound $\| \hat{w}_h^c \|_{0,\hat K}\leq C \| \hat{w}_h^c \|_{0,\tilde K} $ with $C$ independent of $\tilde{K}$ which holds because $ \hat{w}_h^c$ is a linear polynomial and the mesh is regular.} \end{proof}
\section{Error Estimates}\label{EE} In this section we start by establishing a straightforward error estimate using the coercivity and boundedness of the sesquilinear form $B_h(\cdot,\cdot)$, and then prove convergence in the global $L^2$ norm.
We define the obvious modification of the DG norm from \cite{hmp13} by \begin{eqnarray*} \Vert u\Vert_{DG}^2&=& \frac{1}{\kappa}\Vert \beta^{1/2}\jmp{\nabla_h u}\Vert_{L^2({\cal E}_I)}^2+\kappa\Vert \alpha^{1/2}\jmp{ u}\Vert_{L^2({\cal E}_I)}^2+\frac{1}{\kappa}\Vert \delta^{1/2}\partial_nu\Vert_{L^2({\cal E}_R)}^2\\&&\quad+\kappa\Vert (1-\delta)^{1/2}u\Vert_{L^2({\cal E}_R)}^2+\kappa\Vert \alpha^{1/2}u\Vert_{L^2({\cal E}_D)}^2+\frac{1}{\kappa^2}\Vert \gamma^{1/2}(\Delta_h u+\kappa^2\epsilon u)\Vert_{L^2(\Omega)}^2 \end{eqnarray*} where $\Delta_h$ is the Laplacian defined piecewise element by element. Obviously this is a semi-norm, but due to the new term is also a norm even on functions that do not exactly satisfy the Helmholtz equation. \begin{lemma} The semi-norm $\Vert \cdot\Vert_{DG}$ is a norm. \end{lemma} \begin{proof} Suppose $\Vert u\Vert_{DG}=0$ then $u$ satisfies $\Delta u+\kappa^2\epsilon u=0$ element-wise, and the normal derivatives and function values have no jump across interior edges. So $\Delta u+\kappa^2\epsilon u=0$ in $\Omega$. In addition the Cauchy data vanishes and so $u=0$ in $\Omega$. Hence $\Vert \cdot\Vert_{DG}$ is a norm. \end{proof}
We also need a new DG+ norm: \begin{eqnarray*} \Vert u\Vert_{DG+}^2&=&\Vert u\Vert_{DG}^2+\kappa\Vert \beta^{-1/2}\avg{u}\Vert_{L^2({\cal E}_I)}^2+\frac{1}{\kappa}\Vert \alpha^{-1/2}\avg{\nabla_h u}\Vert_{L^2({\cal E}_I)}^2+ \kappa\Vert\delta^{-1/2} u\Vert^2_{L^2({\cal E}_R)}\\&&\quad+\frac{1}{\kappa}\Vert \alpha^{-1}\partial_n u\Vert^2_{L^2({\cal E}_D)}+\kappa^2\Vert\gamma^{-1/2} u\Vert_{L^2(\Omega)}. \end{eqnarray*} The following estimates hold: \begin{lemma} Under the assumption that $\alpha>0$, $\beta>0$, $1>\delta>0$ and $\gamma>0$ in the generalized TDG, and provided $u$ is such that $\Vert u\Vert_{DG}$ is finite, \[ \Im(B_h(u,u))\geq \Vert u\Vert_{DG}^2. \] Provided $\Vert u\Vert_{DG+}$ and $\Vert v\Vert_{DG}$ are finite, there exists a constant $C$ independent of $\kappa$, $u$ and $v$ such that \[
|B_h(u,v)|\leq C \Vert u\Vert_{DG+}\Vert v\Vert_{DG}. \] \end{lemma} \begin{proof} The coercivity estimate follows from in the usual way by considering $\Im (B_h(u,u))$ and using the assumption that $\epsilon$ is real \cite{HMP11}.
To obtain the desired continuity, we integrate the term $\nabla_h u\cdot\nabla_h v$ term in the definition of $A_h(\cdot,\cdot)$ by parts to obtain, for any $u,v\in V_h$, \begin{eqnarray} A_h(u,v)&=&-\int_\Omega u(\overline{\Delta_h v+\kappa^2\epsilon v})\,dA+\int_{{\cal E}_I} \avg{u}\jmp{\nabla_h \overline{v}}\,ds-\int_{{\cal E}_I}\avg{\nabla_h u}\jmp{\overline{v}}\,ds\nonumber \\&& +\int_{{\cal E}_R}(1-\delta) u{\partial_n \overline{v}}\,ds-\frac{1}{{\rm{}i}\kappa}\int_{{\cal E}_I}\beta\jmp{\nabla_h u}\jmp{\nabla_h \overline{v}}\,ds\nonumber\\ &&+{\rm{}i}\kappa\int_{{\cal E}_I}\alpha\jmp{u}\jmp{\overline{v}}\,ds-\frac{1}{{\rm{}i}\kappa}\int_{{\cal E}_R}\delta{\partial_n u}{\partial_n \overline{v}} \,ds\nonumber\\ &&+{\rm{}i}\kappa\int_{{\cal E}_R}(1-\delta)u\overline{v}\,ds-\int_{{\cal E}_R}\delta {\partial_n u}\,\overline{v}\,ds\label{UWVF}\\ &&-\int_{{\cal E}_D}{\partial_n u}\overline{v}+i\kappa\int_{{\cal E}_D}\alpha u\overline{v}. \nonumber \end{eqnarray} The result now follows from the definition of $B_h(u,v)$ and the Cauchy-Schwarz inequality. \end{proof} The following result is now a standard consequence of the above estimates~\cite{MelenkEsterhazy12,hmp13}: \begin{lemma} There is a unique solution $u_h\in V_h$ that satisfies (\ref{Bhelm}), and the following error estimate holds with constant $C$ independent of $\kappa$, $u$, and $u_h$: \begin{equation} \Vert u-u_h\Vert_{DG}\leq C \Vert u-v\Vert_{DG+}\quad \mbox{ for all }v\in V_h. \label{ceatype} \end{equation} \end{lemma}
To obtain an order estimate, we need to make specific choices of the parameters $\alpha$, $\beta$, $\delta$ and $\gamma$. There are several choices in the literature depending on the precise setting of the problem (see for example \cite{buf07,git09,hmp13,hmp15}). In this paper we shall make the classical UWVF choice \cite{buf07}: \begin{equation} \alpha=\beta=\gamma=\delta=1/2,\label{p_uwvf} \end{equation} so that we can use results from~\cite{kapita14}. Then we choose for $\gamma$ \[ \gamma=\gamma_0 h_K^r \] where $\gamma_0$ is constant and $ r\geq 0$. We shall examine the role of $r$ later.
Using the estimates from Section~\ref{GPW} we can then prove the following error estimate \begin{theorem}\label{th:u-uh} Suppose $n\in\mathbb N$ is such that $n\geq 2$ and consider $p=2n+1$ and $q\geq n+1$. Suppose $V_h$ is formed from $q$th order GPWs element by element using $p$ directions. Then the solution $u_h\in V_h$ of (\ref{Bhelm}) exists for all $h>0$ independent of $\kappa$ and it satisfies the following estimate with constant $C$ independent of $\kappa$, $u$, and $u_h$: \[ \Vert u-u_h\Vert_{DG}\leq C (h^{n-1/2}+h^{q+r/2}). \] Here $C$ depends on the $\Vert u\Vert_{{\cal C}^{\max(n+1,q)}(\Omega)}$ norm of $u$. \end{theorem} \begin{remark} Since we need $q\geq n+1$ in the GPW theory, we see that the choice $q=n+1$ guarantees that the approximation of the Helmholtz equation is high enough order. \end{remark}
\begin{proof} We pick $v\in V_h$ in equation (\ref{ceatype}) element by element to be the approximation by GPWs denoted by $u_a$ in Theorem \ref{th:u-ua2}. We now need to estimate each term in $\Vert u-v\Vert_{DG+}$. Using Lemma~\ref{helm-est} the new term \begin{equation}\label{Luest} \Vert \gamma^{1/2}(\Delta_h (u-v)+\kappa^2\epsilon (u-v)\Vert_{L^2(\Omega)} = \Vert \gamma^{1/2}(\Delta_h v+\kappa^2\epsilon v)\Vert_{L^2(\Omega)} \leq Ch^{q+r/2} \Vert u\Vert_{{\cal C}^q}. \end{equation} The remaining terms can be estimated in using Theorem \ref{th:u-ua2}. For example \begin{eqnarray*} \Vert \alpha^{-1/2}\avg{\nabla_h (u-v)}\Vert^2_{L^2({\cal E}_I)}&\leq &C \sum_K\Vert \alpha^{-1/2}\nabla_h (u-v)\Vert_{L^2(\partial K)}^2\\ &\leq&C\sum_K \max_{e\in\partial K}\alpha^{-1}(e)\Vert \nabla_h (u-v)\Vert_{L^2(\partial K)}^2\\ &\leq&C\sum_K \max_{e\in\partial K}\alpha^{-1}(e)\left[h_K^{-1}\Vert \nabla_h (u-v)\Vert_{L^2(K)}^2+h_K\Vert\nabla\nabla (u-v)\Vert_{L^2(K)}^2\right] \end{eqnarray*} where we have used the standard trace estimate on $\partial K$. Using Theorem \ref{th:u-ua2} with $k=2$ and Theorem \ref{th:u-ua} we obtain \begin{eqnarray*} \Vert \alpha^{-1/2}\avg{\nabla_h (u-v)}\Vert^2_{L^2({\cal E}_I)}&\leq &C\sum_K \max_{e\in\partial K}\alpha^{-1}(e)h^{2n-1}_K h_K^2\Vert u\Vert^2_{{\cal C}^{n+1}(K)}\\&\leq& C\max_{e\in {\cal E}_I}\alpha^{-1}(e)h^{2n-1}\Vert u\Vert_{{\cal C}^{n+1}(\Omega)}^2. \end{eqnarray*} Of course under our assumptions $\alpha=1/2$. The remaining terms are estimated in the same way. \end{proof} We now use the standard duality approach to prove an $L^2(\Omega)$ norm estimate on the error. \begin{theorem}\label{th:L2cv} Suppose we choose $r=3$ in the penalty parameter $\gamma$, $p=2n+1$, $n\geq 2$ and $q=n+1$. Then there exists a constant $C$ depending on $\kappa$ but independent of $h$ such that \[ \Vert u-u_h\Vert_{L^2(\Omega)}\leq C h^{s} \Vert u-u_h\Vert_{DG} \] for some $s$ with $0<s<1/2$ depending on $\Omega$ (given in \cite[Theorem 2.3]{hmp13}). \end{theorem} Under best possible conditions we then have the following convergence estimate: \begin{corollary}\label{uuhcor} Suppose $u$ is a smooth solution of the Helmholtz equation, that $r=3$, $p=2n+1$, $n\geq 2$ and $q= n+1$ then \[ \Vert u-u_h\Vert_{L^2(\Omega)}\leq C h^{n+s-1/2} \] \end{corollary} \begin{remark} Since $s\leq 1/2$ the maximum rate of convergence predicted for the method assuming a smooth solution and best regularity is $O(h^n)$. \end{remark} \begin{proof}
Define the dual variable $z\in H^1(\Omega)$ to satisfy \begin{eqnarray*} \Delta z+\kappa^2\epsilon z&=& u-u_h\mbox{ in }\Omega,\\ {\partial_n z}-i\kappa z&=&0\mbox{ on }\Gamma_R,\\
z&=&0\mbox{ on }\Gamma_D. \end{eqnarray*} Under the assumptions on the domain, it is easy to see that $z\in H^{3/2+s}(\Omega)$, $s>1/2$, \cite{hmp13} is sufficiently regular that \[ A_h(\xi,z)=\int_\Omega \xi\overline{(u-u_h)}\,dA \] for all test function $\xi$ that are $H^2$ piecewise smooth. This follows from (\ref{UWVF}). Hence, by the definition of $B_h$ \[ B_h(\xi,z)=\frac{1}{\kappa^2}\int_\Omega \gamma (\Delta_h\xi+\kappa^2\epsilon \xi)\overline{(u-u_h)}\,dA+\int_\Omega \xi\overline{(u-u_h)}\,dA \] so choosing $\xi=u-u_h$ and letting $z_h\in V_h$ be arbitrary \[ \Vert u-u_h\Vert_{L^2(\Omega)}^2=B(u-u_h,z-z_h)-\frac{1}{\kappa^2}\int_\Omega \gamma (\Delta_h(u-u_h)+\kappa^2\epsilon (u-u_h))\overline{(u-u_h)}\,dA. \] The second term on the right hand side can be estimated using the Cauchy-Schwarz and arithmetic-geometric mean inequality to give \begin{eqnarray*}
\left|\int_\Omega \gamma (\Delta_h(u-u_h)+\kappa^2\epsilon (u-u_h))\overline{(u-u_h)}\,dA\right| &\leq &\frac{1}{k}\Vert u-u_h\Vert_{DG}\Vert \gamma^{1/2} (u-u_h)\Vert_{L^2(\Omega)}\\&\leq& \frac{\gamma_{max}}{2\kappa^2}\Vert u-u_h\Vert_{DG}^2+\frac{1}{2}\Vert u-u_h\Vert_{L^2(\Omega)}^2, \end{eqnarray*} where $\gamma_{max}=\max_{x\in\Omega}\gamma=O(h^r)$.
To estimate $B_h(u-u_h,z-z_h)$ we integrate the grad-grad term in $A_h(u,v)$ by parts onto $u$ to obtain \begin{eqnarray} A_h(u,v)&=& -\int_{\Omega}(\Delta_h u+\kappa^2\epsilon u)\overline v\,dA+\int_{{\cal E}_I}\left(\jmp{\nabla_h u}\avg{\overline{v}} -\jmp{ u}\cdot\avg{\nabla_h\overline{v}}\right)\,ds-\frac{1}{i\kappa}\int_{{\cal E}_I}\beta\jmp{\nabla_h u}\jmp{\nabla_h\overline{v}}\,ds\nonumber\\&& +{i\kappa}\int_{{\cal E}_I}\alpha\jmp{ u}\cdot\jmp{\overline{v}}\,ds-\int_{{\cal E}_R}\frac{\delta}{i\kappa} (i\kappa u-{\partial_n u}){\partial_n \overline{v}}\,ds\nonumber\\&& +\int_{{\cal E}_R}(1-\delta)({\partial_n u}-i\kappa u)\overline{v}\,ds+\int_{{\cal E}_D}u(i\kappa\alpha \overline{v}-\partial_n\overline{v})\,ds. \label{Ahap}\end{eqnarray} Using this in the definition of $B_h(u,v)$ shows that \[
|B_h(u,v)|\leq C\Vert u\Vert_{DG}\Vert v\Vert_{DG+} \] where $C$ is independent of $u$ and $v$ so that we have the estimate \begin{equation} \Vert u-u_h\Vert_{L^2(\Omega)}^2\leq C \Vert u-u_h\Vert_{DG}\Vert z-z_h\Vert_{DG+} +\frac{\gamma_{max}}{2\kappa^2}\Vert u-u_h\Vert_{DG}^2. \label{eqest} \end{equation} It is now necessary to choose $z_h$. Following the proof of \cite[Theorem 5.6]{kapita14}, let $z_h^c$ denote the continuous piecewise linear finite element interpolant of $z$. We choose $z_h\in V_h$ to be the GPW approximation of $z_h^c$ constructed in Lemma~\ref{lincor}). Of course \[ \Vert z-z_h\Vert_{DG+}\leq \Vert z-z^c_h\Vert_{DG+}+\Vert z_h^c-z_h\Vert_{DG+} \] and it remains to estimate each term. Estimates from the proof of \cite[Theorem 5.6]{kapita14} show that on each interior edge in the mesh \begin{eqnarray*} \Vert\alpha^{-1/2}\avg{\nabla_h(z-z^c_{h})}\Vert_{L^2(e)} &\leq& C\sum_{j=1}^2h_{K_j}^{s}\vert z\vert_{H^{3/2+s}(K_j)}\\ \Vert\beta^{-1/2}\avg{z-z^c_{h}}\Vert_{L^2(e)}&\leq&C\sum_{j=1}^2h^{1+s}_{K_j}\vert z\vert_{H^{3/2+s}(K_j)}, \end{eqnarray*} with corresponding entries results for the jumps in the above quantities and for boundary terms. In addition \begin{eqnarray*} \Vert \gamma^{1/2}(\Delta_h (z-z_h^c)+\kappa^2\epsilon (z-z_h^c)\Vert_{L^2(\Omega)} &=& \Vert \gamma^{1/2}(u-u_h-(\Delta_h z_h^c+\kappa^2\epsilon z_h^c))\Vert_{L^2(\Omega)}\\ &\leq& Ch^{r/2}\Vert u-u_h\Vert_{L^2(\Omega)}+ \Vert \gamma^{1/2}(\Delta_h z_h^c+\kappa^2\epsilon z_h^c)\Vert_{L^2(\Omega)}. \end{eqnarray*} On an element $K$ we can use the regularity of the mesh to establish local inverse estimates and prove: \begin{eqnarray*} && \Vert \gamma^{1/2}\Delta_h z_h^c\Vert_{L^2(K)}\leq Ch_K^{r/2-1}\Vert z_h^c\Vert_{H^1(K)}\\ & \leq &Ch_K^{r/2-1}(\Vert z_h^c-z\Vert_{H^1(K)}+\Vert z\Vert_{H^1(K)})\leq Ch_K^{r/2-1}\Vert z\Vert_{H^{3/2+s}(K)}.
\end{eqnarray*} Proceeding similarly for the lower order term, we conclude that provided $r/2>1$ we have \[ \Vert \gamma^{1/2}(\Delta_h (z-z_h^c)+\kappa^2\epsilon (z-z_h^c))\Vert_{L^2(\Omega)}\leq Ch^{r/2-1}\Vert z\Vert_{H^{3/2+s}(\Omega)}. \] In addition we must estimate \begin{eqnarray*} \Vert \gamma^{-1/2}(z-z_h^c)\Vert^2_{L^2(\Omega)}&=&\sum_K\int_K\gamma^{-1}(z-z_h^c)^2\,dA\leq C\sum_Kh_K^{3+2s-r}\Vert z\Vert_{H^{3/2+s}(K)}^2\\ &\leq& Ch^{3+2s-r}\Vert z\Vert_{H^{3/2+s}(\Omega)}^2. \end{eqnarray*} Taken together, if $3+2s\geq r\geq 2$ we have \[
\Vert z-z^c_h\Vert_{DG+}\leq C(h^{r/2-1}+h^{3/2+s-r/2})\Vert z\Vert_{H^{3/2+s}(\Omega)}
\]
A good choice is then $r=3$ since in that case $r/2-1\geq s$ and using the a priori estimate for $z$ from \cite[Theorem 2.3]{hmp13}
\[
\Vert z-z^c_h\Vert_{DG+}\leq Ch^s\Vert u-u_h\Vert_{L^2(\Omega)}.
\]
It now remains to estimate $\Vert z_h^c-z_h\Vert_{DG+}$. As we have seen there are two troublesome terms: $\Vert \gamma^{-1/2}(z_h^c-z_h)\Vert^2_{L^2(\Omega)}$ and $\Vert \gamma^{1/2}(\Delta_h (z^c_h-z_h)+\kappa^2\epsilon (z_h^c-z_h))\Vert_{L^2(\Omega)}$ with the remaining terms following using Lemma \ref{linfun} as in \cite{kapita14}. Using first a local inverse estimate, then Lemma \ref{linfun}, \[ \Vert \gamma^{1/2}(\Delta_h (z^c_h-z_h)+\kappa^2\epsilon (z_h^c-z_h))\Vert_{L^2(K)}\leq Ch_K^{r/2-2}\Vert z_h^c-z_h\Vert_{L^2(K)} \leq Ch_K^{r/2}\Vert z_h^c\Vert_{L^2(K)} \] so that, squaring and adding, and using the a priori estimate for $z$ from \cite[Theorem 2.3]{hmp13} \begin{eqnarray*} \Vert \gamma^{1/2}(\Delta_h (z^c_h-z_h)+\kappa^2\epsilon (z_h^c-z_h))\Vert_{L^2(\Omega)}&\leq &Ch_K^{r/2}\Vert z_h^c\Vert_{L^2(\Omega)}\\ &\leq& Ch_K^{r/2}(\Vert z-z_h^c\Vert_{L^2(K)}+\Vert z\Vert_{L^2(K)})\\&\leq& Ch^{r/2}\Vert u-u_h\Vert_{L^2(\Omega)}. \end{eqnarray*} To estimate the global $L^2$ term, again using Lemma \ref{linfun}, \[ \Vert \gamma^{-1/2}(z_h^c-z_h)\Vert^2_{L^2(K)}\leq Ch_K^{-r/2} \Vert (z_h^c-z_h)\Vert^2_{L^2(K)} \leq Ch_K^{2-r/2} \Vert z_h^c\Vert^2_{L^2(K)} \] Adding over all elements and using the a priori estimate for $z$ from \cite[Theorem 2.3]{hmp13} \[ \Vert \gamma^{-1/2}(z_h^c-z_h)\Vert^2_{L^2(\Omega)}\leq Ch^{2-r/2} \Vert u-u_h\Vert^2_{L^2(K)} \]
We have thus proved that when $r=3$ and noting $0<s<1/2$ we have \[ \Vert z-z_h\Vert\leq Ch^{s}\Vert u-u_h\Vert_{L^2(\Omega)}. \] so we conclude the desired result using this result in (\ref{eqest}).\end{proof} \section{Numerical Tests}\label{NT} We now test the GPW based RDG method on two test problems with a known solution: Airy waves (linear variation in $\epsilon$) and Weber Waves (quadratic variation in $\epsilon$). {In Figs.~\ref{ghqs}, \ref{ghcomb}, \ref{Wcomb} we plot the relative $L^2(\Omega)$ error in the computed solution against a parameter labeled $C/h$. This is computed using the total number of degrees of freedom $N_{\rm{}dof}$ and the number of directions per element $p=2n+1$ as $\sqrt{N_{\rm{}dof}/p}$. We choose this parameter since, in our theorems, convergence is in terms of mesh size rather than total number of degrees of freedom.}
\subsection{Airy Waves.}\label{Airy} The simplest example of a spatially dependent refractive index is $\epsilon(x,y)=-y$ on the domain $[-1,1]\times[-1,1]$. We can then choose Dirichlet boundary data (for our theory we need an impedance boundary condition, but the same result holds in the case provided $\kappa$ is not an eigenvalue of the domain) such that the exact solution is \[ u(x,y)=Ai(\kappa^{2/3}y) \] where $Ai(r)$ is the Airy function as shown in the left panel of Fig.~\ref{FL1}. \begin{figure}
\caption{Left: Exact Airy function solution. Right: Initial mesh.}
\label{FL1}
\end{figure}
This solution is oscillatory for $y<0$ and exponentially decaying for $y>0$. In all the experiments, we make the choice \[ \alpha=\beta=\delta=1/2, \] The initial mesh for the experiments is shown Fig.~\ref{FL1} right panel.
\subsubsection{The case $\gamma=h^3$} { Starting with the mesh in Fig.~\ref{FL1} and using uniform refinement, we have computed the error in approximating the Airy function solution when $\gamma=h^3$ and $\kappa=15$. The order of approximation of the Helmholtz equation $q$ is set to 1, 3, 4, and 5 and the corresponding results are respectively marked with diamonds, circles, crosses and squares.
Our goal is to demonstrate that an appropriate choice of $n$ and $q$ can produce high order convergence. Indeed our theory predicts that we should choose $q=n+1$, $n\geq 2$ and expect $O(h^n)$ convergence in the $L^2(\Omega)$ norm since the Airy function solution is smooth and the domain is convex (see Corollary~\ref{uuhcor}). Results are shown in Fig.~\ref{ghqs} and Fig.~\ref{ghcomb}.
{Fig.~\ref{ghqs} (left panel) demonstrates the need for GPWs in order to obtain high order convergence. When $q=1$, the GPWs are plane waves and we see no obvious convergence when $n=1$ (three plane waves per element), but at most third order convergence for $n>1$. This suggests that one approach using an $h$-refinement strategy is to use simple plane waves with $n=3$ to obtain third order convergence under mesh refinement (it appears that $n=3$ offers a useful improvement in accuracy over $n=2$ even if the order of convergence is the same). To obtain fourth or higher order convergence we need true GPWs with $q>1$. }
The case $n=1$ is also interesting. Regardless of $q$ we do not see obvious convergence when $n=1$, whereas for a constant medium the plane wave basis with $n=1$ gives $O(h^2)$ convergence \cite{cessenat_phd}. The variable refractive index seems to require $n>1$. {This is not unreasonable since when $n=1$ the plane waves do not approximate linear polynomials well, and hence may not converge for a solution corresponding to smoothly varying coefficients. When $n>1$, piecewise linears are well approximated by plane waves and so we expect (and see) convergence in this case ~\cite{git09,kapita14}.}
{For $n=2$ regardless of the choice $q=1,\cdots,5$ we see $O(h^{3})$ convergence, and for $n=3$ we get $O(h^4)$ convergence provided $q>1$, while if $q=1$ we get $O(h^2)$ convergence. Finally for $n=4$ we only have $O(h^{3})$ convergence when $q=1$, but $O(h^5)$ convergence for $q>1$ (with some deterioration on the finest mesh when $q=3$ or $q=5$). This deterioration may be due to the usual conditioning problem experience by plane wave type methods since when $n=4$ the condition number of the system is roughly $10^{20}$ which may impact convergence. The last cases $n=3,4$ confirms that $q$ must increase as $n$ increases although apparently more slowly than we predict. In addition, with an adequate choice of $q$ we appear to see $O(h^{n+1})$ convergence for $n>1$. This is the same order as has been found experimentally using $2n+1$ plane waves when $\epsilon$ is constant!~\cite{cessenat_phd}. An optimal error analysis in that case is also illusive~\cite{buf07}.}
To try to clarify the best relationship between $q$ and $n$ we focus on the cases $q=n-1$, $q=n$ and $q=n+1$ in the left panel of Fig.~\ref{ghcomb}. Again the case $n=1$ fails to converge regardless of $q$, but otherwise the most reliable convergence is seen when $n=q$.
\begin{figure}
\caption{$L^2(\Omega)$ norm convergence when $\gamma=h^3$ (left panel), $\gamma=h$ (middle panel) and $\gamma=0$ (right panel). The dotted lines in each figure show the order of convergence. }
\label{ghqs}
\end{figure}
\begin{figure}
\caption{$L^2(\Omega)$ norm convergence when $\gamma=h^3$ (left panel), $\gamma=h$ (middle panel) and $\gamma=0$ (right panel). The dotted lines are reference lines showing $O(h^2)$, $O(h^{3})$, $O(h^{4})$, and $O(h^5)$ convergence. }
\label{ghcomb}
\end{figure}
\subsubsection{The case $\gamma=h$.}
In this section we describe numerical results obtained by increasing the parameter $\gamma$ from $h^3$ to $\gamma =h$, and we compare convergence rates obtained for different combinations of the order of approximation of the equation by the basis functions, $q$, and the number of basis functions per element, $p=2n+1$. This choice for $\gamma$ violates the hypothesis of Theorem \ref{th:L2cv}, but should result in greater stability.
Figure \ref{ghqs} (middle panel) displays series of results for several choices of $q$, while $n$ varies from 1 to 4. The results are broadly similar to those in the left panel of the same figure, although the case $n=4$ shows a slowing of convergence on fine meshes for $q=3$ and $q=4$. In this case it appears that $q=n+1$ is indeed a good choice.
Figure \ref{ghcomb} (middle panel) displays series of results for $n=q$, $n=q-1$, and $n=q+1$,. These convergence studies emphasize the fact that the three choices $n=q$, $n=q-1$, and $n=q+1$ seem to result in approximately the same rate of convergence, suggesting that $q=n-1$ would be the best choice for a fixed value of $n$. The best rates of convergence obtained are 3 for $n=2$, 4 for $n=3$, and 5 for $n=4$. Although the convergence rates are similar to those when $\gamma=h^3$, the accuracy attained on a given mesh is slightly worse. This suggests that choosing $\gamma$ larger than $O(h^3)$ is not useful (other tests, not shown, with $\gamma=1$ and $\gamma=10^3$ show similar results but even worse error at a particular mesh).
\subsubsection{The case $\gamma=0$.} Our theoretical analysis requires that $\gamma>0$ even to obtain convergence in the DG norm but this term requires integration over the interior of all the elements (unlike the standard PWDG or UWVF) and we would prefer to drop it. In addition we saw that $\gamma=O(h^3)$ gives better results than $\gamma=O(h)$ so we want to test if an even smaller penalty is better. In Fig~\ref{ghqs} and \ref{ghcomb} (right panels) we show results when $\gamma=0$.
Overall the results are similar to previous results. Provided $q$ is chosen large enough, we can obtain $O(h^{n+1})$ convergence. In fact the mesh now seems slightly more stable!
} \subsection{Weber waves} { In this section we approximate what we term Weber waves. These are solutions of the following problem \[ \Delta u + \kappa^2\left(\frac{x_2^2}{4}-\frac{a}{\kappa}\right)u=0 \] in the domain $\Omega=[-1,1]^2$ subject to \[ u(x_2,y_2)=P_o(\sqrt{\kappa}x_2,a)\mbox{ on }\partial \Omega \] where $w(x_2)=P_o(x_2,a)$ is the odd solution of Weber's differential equation \[ \frac{d^2w}{dx_2^2}+\left(\frac{x_2^2}{4}-a\right)=0 \]
defined in \cite{ban04} and implemented in \cite{ban_matlab}. We choose $a=5$ and $\kappa=50$ which gives the solution in Fig.~\ref{para_exact}. For this choice of $a$, $\kappa$ and domain, the solution is evanescent for $|x_2|<\sqrt(2/5)$ and oscillatory otherwise. So this example again tests how well GPWs can approximate both traveling and evanescent solutions.
\begin{figure}
\caption{Exact solution for Weber's equation with $a=5$}
\label{para_exact}
\end{figure}
\begin{figure}
\caption{Analogues of Fig.~\ref{ghqs} (left) and Fig.~\ref{ghcomb} (right) for the Weber wave example. }
\label{Wcomb}
\end{figure}
Results are shown in Fig.~\ref{Wcomb}. Broadly the same picture emerges for the Weber example as for the Airy example. We see $O(h^{n+1})$ convergence (this is not completely clear when $n=4$) provided $q$ is large enough. \section{Conclusion} We have provided a modification to the TDG approach that allows the approximation of solutions of the Helmholtz equation in which the refractive index is piecewise smooth using Generalized Plane Waves. The resulting numerical scheme maintains one advantage of TDG: the number of degrees of freedom per element increases linearly with the order of approximation of the method. But the method looses one advantage of pure TDG: there is now a need to perform numerical integration element by element. This is required because we introduce a new stabilization term, and also because the GPW basis functions are not exact solutions of the adjoint problem.
Theory suggests a choice of parameters that balances polynomial degree with the number of GPWs in the basis element by element. This is examined in detail using Airy's equation to provide an exact solution, and substantiated further by using Weber's example. In the Airy case we have also studied if our new stabilization term is necessary: the numerical results in this one simple case suggest that it can be ignored, but much more testing (for example with less smooth solutions with curved wavefronts) and theoretical backup would be needed to confirm this. Our testing also suggests that our predicted choice of polynomial degree $q=n+1$ may be excessive.
In summary, we have achieved a first theoretical convergence result for GPWs in a TDG setting. Our numerical investigations suggest that the theory is not optimal so far, but do show examples where GPWs can provide accurate solutions to wave propagation problems in which the coefficients are smooth functions of position.}
\end{document}
|
arXiv
|
{
"id": "1511.08251.tex",
"language_detection_score": 0.6863667368888855,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Similarity and commutators of matrices over PIDs]{Similarity and commutators of matrices over principal ideal rings}
\author{Alexander Stasinski} \begin{abstract} We prove that if $R$ is a principal ideal ring and $A\in\M_{n}(R)$ is a matrix with trace zero, then $A$ is a commutator, that is, $A=XY-YX$ for some $X,Y\in\M_{n}(R)$. This generalises the corresponding result over fields due to Albert and Muckenhoupt, as well as that over $\Z$ due to Laffey and Reams, and as a by-product we obtain new simplified proofs of these results. We also establish a normal form for similarity classes of matrices over PIDs, generalising a result of Laffey and Reams. This normal form is a main ingredient in the proof of the result on commutators. \end{abstract}
\address{Department of Mathematical Sciences, Durham University, South Rd, Durham, DH1 3LE, UK}
\email{[email protected]}
\maketitle
\section{Introduction}
Let $R$ denote an arbitrary ring. If a matrix $A\in\M_{n}(R)$ is a commutator, that is, if $A=[X,Y]=XY-YX$ for some $X,Y\in\M_{n}(R)$, then $A$ must have trace zero. The problem of when the converse holds goes back at least to Shoda \cite{Shoda} who showed in 1937 that if $K$ is a field of characteristic zero, then every $A\in\M_{n}(K)$ with trace zero is a commutator. Shoda's argument fails in positive characteristic, but Albert and Muckenhoupt \cite{Albert-Muckenhoupt} found another argument valid for all fields. The first result for rings which are not fields was obtained by Lissner \cite{Lissner} who proved that if $R$ is a principal ideal domain (PID) then every $A\in\M_{2}(R)$ with trace zero is a commutator. A motivation for Lissner's work was the relation with a special case of Serre's problem on projective modules over polynomial rings, nowadays known as the Quillen-Suslin theorem (see \cite[Sections~1-2]{Lissner}). Lissner's result on commutators in $\M_{2}(R)$ for $R$ a PID was rediscovered by Vaserstein \cite{Vaserstein/87} and Rosset and Rosset \cite{Rosset}, respectively. Vaserstein also formulated the problem of whether every $A\in\M_{n}(\Z)$ with trace zero is a commutator for $n\geq3$ (see \cite[Section~5]{Vaserstein/87}). A significant breakthrough was made by Laffey and Reams \cite{Laffey-Reams} who settled Vaserstein's problem in the affirmative. However, their proofs involve steps which are special to the ring of integers $\Z$ and do not generalise to other rings in any straightforward way. The most crucial step of this kind is an appeal to Dirichlet's theorem on primes in arithmetic progressions. The analogue of Dirichlet's theorem, although true in the ring $\F_{q}[x]$, fails for other Euclidean domains such as $\C[x]$ or discrete valuation rings. Nevertheless, in \cite{Laffey-notes} Laffey asked whether any matrix with trace zero over a Euclidean domain is a commutator. Until now this appears to have remained an open problem even for $n=3$, except for the cases where $R$ is a field or $\Z$.
In the present paper we answer Laffey's question by proving that if $R$ is any PID and $A\in\M_{n}(R)$ is a matrix with trace zero, then $A$ is a commutator. This is achieved by extending the methods of Laffey and Reams and in particular removing the need for Dirichlet's theorem. Another of our main results is a certain (non-unique) normal form for similarity classes of matrices over PIDs, itself a generalisation of a result proved in \cite{Laffey-Reams} over $\Z$. The normal form, while interesting in its own right and potentially for other applications, is also a key ingredient in the proof of the main result on commutators.
We now describe the contents of the paper in more detail. In Section~\ref{sec:Regular-elements} we define regular elements in $\M_{n}(R)$ for an arbitrary ring $R$ and state some of their basic properties. Regular elements play a central role in the problem of writing matrices as commutators because of the criterion of Laffey and Reams, treated in Section~\ref{sec:LF-criterion}. The criterion says that if $R$ is a PID and $A,X\in\M_{n}(R)$ with $X$ regular mod every maximal ideal of $R$, then a necessary and sufficient condition for $A$ to be a commutator is that $\Tr(X^{r}A)=0$ for $r=0,1,\dots,n-1$. This was proved in \cite{Laffey-Reams} for $R=\Z$, but the proof goes through for any PID with only a minor modification.
In Section~\ref{sec:Comm-fields} we apply the Laffey-Reams criterion for fields to give a short proof of the theorem of Albert and Muckenhoupt mentioned above. We actually prove a stronger and apparently new result, namely that in the commutator one of the matrices may be taken to be regular (see Proposition~\ref{sec:Comm-fields}).
Section~\ref{sec:Similarity} is concerned with similarity of matrices over PIDs, that is, matrices up to conjugation by invertible elements. Our first main result is Theorem~\ref{thm:LF-normalform} stating that every non-scalar element in $\M_{n}(R)$ is similar to one in a special form. This result was established by Laffey and Reams over $\Z$. However, a crucial step in their proof uses the fact that $2$ is a prime element in $\Z$, and the analogue of this does not hold in an arbitrary PID. To overcome this, our proof involves an argument based on the surjectivity of the map $\SL_{n}(R)\rightarrow\SL_{n}(R/I)$ for an ideal $I$, which in a certain sense lets us avoid any finite set of primes, in particular those of index $2$ in $R$ (see Lemma~\ref{lem:b12-avoidsprimes}). This argument is evident especially in the proof of Proposition~\ref{prop:3x3-normalform}. Apart from this, our proof uses the methods of \cite{Laffey-Reams}, although we give a different argument, avoiding case by case considerations, and have made Lemma~\ref{lem:row-column} explicit.
Our second main result is Theorem~\ref{thm:Main} whose proof occupies Section~\ref{sec:Proof-Main}, and follows the lines of \cite[Section~4]{Laffey-Reams}. There are two new key ideas in our proof. First, there is again an argument which at a certain step allows us to avoid finitely many primes, including those of index $2$ in $R$. This step in the proof is the choice of $q$ and uses a special case of Lemma~\ref{lem:GCD}\,\ref{enu:GCD-lemma abx}. Secondly, we apply Lemma~\ref{lem:Centr-product} to obtain a set of generators of the centraliser of a certain matrix modulo a product of distinct primes; see (\ref{eq:Centr-span-severalprimes}). It is this set of generators together with our choice of $q$ and an appropriate choice of $t$ in (\ref{eq:at+y}) which allows us to avoid Dirichlet's theorem. It is interesting to note that the proofs of our main results, Theorems~\ref{thm:LF-normalform} and \ref{thm:Main}, despite being rather different, both involve the technique of avoiding finitely many primes, in particular those of index $2$ in $R$. Our proof of Theorem~\ref{thm:Main} also simplifies parts of the proof of Laffey and Reams over $\Z$ since we avoid some of the case by case considerations present in the latter. By a theorem of Hungerford, Theorem~\ref{thm:Main}, once established, easily extends to any principal ideal ring (not necessarily an integral domain); see Corollary~\ref{cor:Coroll-Main}.
The final Section~\ref{sec:Further-directions} discusses the possibility of generalising Theorem~\ref{thm:Main} to other classes of rings such as Dedekind domains, and mentions some known counter-examples.
We end this introduction by mentioning some recent work on matrix commutators. In \cite{Mesyan} Mesyan proves that if $R$ is a ring (not necessarily commutative) and $A\in\M_{n}(R)$ has trace zero, then $A$ is a sum of two commutators. This result was proved for commutative rings in earlier unpublished work of Rosset. In \cite{Lam-Khurana-Gen-comm} Khurana and Lam study {}``generalised commutators'', that is, elements of the form $XYZ-ZYX$, where $X,Y,Z\in\M_{n}(R)$. They establish in particular that if $R$ is a PID, then every element in $\M_{n}(R)$, $n\geq2$, is a generalised commutator. Although these results may seem closely related to the commutator problem studied in the present paper, the proofs are in fact very different.
\subsection*{Notation and terminology}
We use $\N$ to denote the natural numbers $\{1,2,\dots\}$. Throughout the paper a ring will always mean a commutative ring with identity. In Sections~\ref{sec:LF-criterion}-\ref{sec:Proof-Main} $R$ will be a PID, unless stated otherwise.
Let $R$ be a ring. We denote the set of maximal ideals of $R$ by $\Specm R$ and the ring of $n\times n$ matrices over $R$ by $\M_{n}(R)$. For $A,B\in\M_{n}(R)$ we call $[A,B]=AB-BA$ the \emph{commutator} of $A$ and $B$. Let $A\in\M_{n}(R)$. A matrix $B\in\M_{n}(R)$ is said to be \emph{similar} to $A$ if there exists a $g\in\GL_{n}(R)$ such that $gAg^{-1}=B$. The transpose of $A$ is denoted by $A^{T}$ and the trace of $A$ by $\Tr(A)$. We write $C_{\M_{n}(R)}(A)$ for the centraliser of $A$ in $\M_{n}(R)$, that is, \[ C_{\M_{n}(R)}(A)=\{B\in\M_{n}(R)\mid[A,B]=0\}. \] Let $f(x)=a_{0}+a_{1}x+\dots+x^{n}\in R[x]$ be the characteristic polynomial of $A$. We will refer to the \emph{companion matrix} associated to $A$ (or to $f$) as the matrix $C\in\M_{n}(R)$ such that \[ C=(c_{ij})=\begin{cases} c_{i,i+1}=1 & \text{for }1\leq i\leq n-1,\\ c_{ni}=-a_{i-1} & \text{for }1\leq i\leq n,\\ c_{ij}=0 & \text{otherwise}. \end{cases} \] The identity matrix in $\M_{n}(R)$ is denoted by $1$ or sometimes $1_{n}$. For $u,v\in\N$ we write $E_{uv}$ for the matrix units, that is, $E_{uv}=(e_{ij})$ with $e_{uv}=1$ and $e_{ij}=0$ otherwise. The size of the matrices $E_{uv}$ is suppressed in the notation and will be determined by the context.
\section{\label{sec:Regular-elements}Regular elements}
Let $\bfG$ be a reductive algebraic group over a field $K$ with algebraic closure $\overline{K}$. An element $x\in G=\bfG(\overline{K})$ is called \emph{regular} if $\dim C_{G}(x)$ is minimal, and it is known that this minimal dimension equals the rank $\rk G$ (see \cite{Steinberg-regular} and \cite[Section~14]{dignemichel}). Similarly, if $\mfg$ is the Lie algebra of $\bfG$ an element $X\in\mfg(\overline{K})$ is called \emph{regular} if $\dim C_{G}(X)=\rk G$, where $G$ acts on $\mfg$ via the adjoint action. In the case $\bfG=\GL_{n}$ there are several equivalent characterisations of regular elements in $\mfg(K)=\M_{n}(K)$. More precisely, the following is well-known: \begin{prop} \label{prop:Reg-fields}Let $K$ be a field and $X\in\M_{n}(K)$. Then the following is equivalent \begin{enumerate} \item \label{enu:reg-fields 1}$X$ is regular, \item \label{enu:reg-fields 5-1}There exists a vector $v\in K^{n}$ such that $\{v,Xv,\dots,X^{n-1}v\}$ is a basis for $K^{n}$ over $K$, \item \label{enu:reg-fields 5}The set $\{1,X,\dots,X^{n-1}\}$ is linearly independent over $K$, \item \label{enu:reg-fields 1-1}$X$ is similar to its companion matrix $C$ as well as to $C^{T}$, \item \label{enu:reg-fields 2}$C_{\M_{n}(K)}(X)=K[X]$. \end{enumerate} \end{prop} Regular elements of $\M_{n}(K)$ are sometimes called \emph{non-derogatory }or\emph{ cyclic.} For matrices over arbitrary rings we make the following definition. \begin{defn} Let $R$ be a ring. A matrix $X\in\M_{n}(R)$ is called \emph{regular} if there exists a vector $v\in R^{n}$ such that $\{v,Xv,\dots,X^{n-1}v\}$ is a basis for $R^{n}$ over $R$.\end{defn} \begin{prop} \label{prop:Reg-rings}Let $R$ be a ring and $X\in\M_{n}(R)$. Then the following is equivalent \begin{enumerate} \item \label{enu:reg-rings 1}$X$ is regular, \item \label{enu:reg-rings 2-1}$X$ is similar to its companion matrix $C$ as well as to $C^{T}$, \item \label{enu:reg-rings 3}$C_{\M_{n}(R)}(X)=R[X]$. \end{enumerate} \end{prop} The proof of Proposition \ref{prop:Reg-rings} is the same as in the classical case of matrices over fields. In the following we will use the properties of regular elements expressed in Propositions~\ref{prop:Reg-fields} and \ref{prop:Reg-rings} without explicit reference.
\begin{comment} Even though Condition~\ref{enu:reg-fields 2} makes sense over any ring, it does not lead to a good notion of regular element in general. For example, if $R=k[s,t]/(s^{2},st,t^{2})$ and $X=\begin{pmatrix}s & t\\ 0 & 0 \end{pmatrix}$ we have $C_{\GL_{2}(R)}(X)=ZK$, where $Z$ is the centre of $\GL_{2}(R)$ and $K$ is the kernel of the reduction map $\GL_{2}(R)\to\GL_{2}(k)$. one can regard $\GL_{2}(R)$ as a linear algebraic group over $k$ via the Greenberg functor, so we have the classical notion of regular elements in $\GL_{2}(R)$. With this notion $X$ is not regular since $\dim C_{\GL_{2}(R)}(X)=10$ while the centraliser of for example the element $\begin{pmatrix}1 & 0\\ 0 & 0 \end{pmatrix}$ has dimension $6$. On the other hand, $C_{\GL_{2}(R)}(X)$ is abelian since $K$ is. However, it has been shown by Hill \cite[Theorem~3.6]{Hill_regular} that when $R$ is a local Artinian principal ideal ring this type of anomaly cannot occur and \ref{enu:reg-fields 2} is equivalent to \ref{enu:reg-fields 4}. \begin{rem} Over an arbitrary ring $R$, we may not have a uniquely defined minimal polynomial of a matrix over $R$ (cf.~\cite[Ex.~7.30]{Brown-Matrices}). In case $R$ is an integrally closed domain the minimal polynomial is uniquely defined and divides the characteristic polynomial (see \cite{Frisch}), but even in this case the analogue of part \ref{enu:reg-fields 3} of Proposition~\ref{prop:Reg-fields} for $X\in\M_{n}(R)$ does not imply that $X$ is regular, and furthermore the analogue of part \ref{enu:reg-fields 5} does not imply that $X$ is regular. For a simple counter-example in both of these cases, take for instance $X=\left(\begin{smallmatrix}0 & 2\\ 0 & 0 \end{smallmatrix}\right)\in\M_{2}(\Z)$.\end{rem} \end{comment} If $\phi:R\to S$ is a homomorphism of rings we also use $\phi$ to denote the induced homomorphism $\M_{n}(R)\rightarrow\M_{n}(S)$. \begin{lem} \label{lem:Reg-extnscalars}Let $\phi:R\to S$ be a homomorphism of rings. If $X\in\M_{n}(R)$ is regular, then $\phi(X)$ is regular. \end{lem} \begin{proof} Suppose that $X$ is regular. By definition there exists a vector $v\in R^{n}$ such that $\{v,Xv,\dots,X^{n-1}v\}$ is an $R$-basis for $R^{n}$. Then $\{v\otimes1,Xv\otimes1,\dots,X^{n-1}v\otimes1\}$ is an $S$-basis for $R^{n}\otimes_{R}S$ (cf.~\cite[XVI, Proposition~2.3]{Lang-Algebra}). Let $\phi(v)\in S^{n}$ be the image of $v$ under component-wise application of $\phi$. Under the isomorphism $R^{n}\otimes_{R}S\rightarrow S^{n}$, the elements $X^{i}v\otimes1$ are sent to $\phi(X)^{i}\phi(v)$, so $\{\phi(v),\phi(X)\phi(v),\dots,\phi(X)^{n-1}\phi(v)\}$ is a basis for $S^{n}$. Thus $\phi(X)$ is regular. \end{proof} Let $R$ be a ring and $X\in\M_{n}(R)$. If $\mfp$ is an ideal of $R$ we use $X_{\mfp}$ to denote the image of $X$ under the canonical map $\pi:\M_{n}(R)\to\M_{n}(R/\mfp$), that is, $X_{\mfp}=\pi(X)$. For a general ring $R$ an element in $\M_{n}(R)$ which is regular modulo every maximal ideal may not be regular. However, if $R$ is a local ring, the situation is favourable: \begin{lem} \label{lem:Reg-locring}Assume that $R$ is a local ring with maximal ideal $\mfm$. Then $X\in\M_{n}(R)$ is regular if and only if $X_{\mfm}\in\M_{n}(R/\mfm)$ is regular.\end{lem} \begin{proof} If $X$ is regular, then $X_{\mfm}$ is regular by Lemma~\ref{lem:Reg-extnscalars}. Conversely, suppose that $X_{\mfm}$ is regular and choose $v\in(R/\mfm)^{n}$ such that $(R/\mfm)^{n}=(R/\mfm)[X_{\mfm}]v$. Let $\hat{v}\in R^{n}$ be a lift of $v$. Then $R^{n}=R[X]\hat{v}+\mfm M$ for some submodule $M$ of $R^{n}$, and Nakayama's lemma yields $R^{n}=R[X]\hat{v}$ , so $X$ is regular.\end{proof} \begin{prop} \label{prop:Reg-mod-m}Let $R$ be an integral domain with field of fractions $F$, and let $X\in\M_{n}(R)$. If $X_{\mfm}$ is regular for some maximal ideal $\mfm$ of $R$, then $X$ is regular as an element of $\M_{n}(F)$. \end{prop} \begin{proof} Suppose that $X_{\mfm}$ is regular for some maximal ideal $\mfm$ of $R$. Let $R_{\mfm}$ be the localisation of $R$ at $\mfm$, and let $j:R\to R_{\mfm}$ be the canonical homomorphism. Since the diagram \[ \xymatrix{R\ar[d]\ar[r]^{j} & R_{\mfm}\ar[d]\\ R/\mfm\ar[r]\sp-{\cong} & R_{\mfm}/\mfm } \] commutes, Lemma~\ref{lem:Reg-locring} implies that $j(X)$ is regular. If $\sum_{i=0}^{n-1}r_{i}X^{i}=0$ for some $r_{i}\in R$, then $\sum_{i=0}^{n-1}j(r_{i})j(X)^{i}=0$. But since $j(X)$ is regular, we must have $j(r_{i})=0$ for all $i=0,\dots,n-1$. Since $R$ is an integral domain $j$ is injective, so $r_{i}=0$ for $i=0,\dots,n-1$. Now, if $\sum_{i=0}^{n-1}s_{i}X^{i}=0$ for some $s_{i}\in F$, then clearing denominators shows that $s_{i}=0$ for all $i=0,\dots,n-1$. Hence, by Proposition~\ref{prop:Reg-fields}~\ref{enu:reg-fields 5} the matrix $X$ is regular as an element of $\M_{n}(F)$. \end{proof} The following result has appeared in \cite[Proposition~6]{Vaserstein-Wheland}. \begin{lem} \label{lem:reg-triang}Let $R$ be an arbitrary ring and $A=(a_{ij})\in\M_{n}(R)$ a matrix such that $a_{i,i+1}=1$ for all $1\leq i\leq n$ and $a_{ij}=0$ for all $j\geq i+2$. Then $A$ is regular. \end{lem} \begin{proof} Let $\{e_{1}=(1,0,\dots,0)^{T},e_{2}=(0,1,0,\dots,0)^{T},\dots,e_{n}=(0,\dots,0,1)^{T}\}$ be the standard basis for $R^{n}$. Then the matrix \[ B=(e_{1},Ae_{1},\dots,A^{n-1}e_{1}) \] is upper triangular with $1$s on the diagonal, so $B\in\SL_{n}(R)$. Now for $1\leq i\leq n-1$ we have \[ B^{-1}ABe_{i}=B^{-1}A^{i}e_{1}=e_{i+1} \] (since $Be_{i+1}=A^{i}e_{1}$ ). Thus $B^{-1}AB$ is a companion matrix, and so $A$ is regular. \end{proof}
\section{\label{sec:LF-criterion}\foreignlanguage{british}{The criterion of Laffey and Reams }}
\selectlanguage{british} Throughout this section $R$ is a PID and $F$ its field of fractions. In Theorem~\ref{prop:Criterion} we give a criterion for a matrix in $\M_{n}(R)$ to be a commutator discovered by Laffey and Reams \cite[Section~3]{Laffey-Reams}. This criterion plays an important role in our proof of the main theorem. Laffey and Reams proved the criterion for matrices over fields and over $\Z$, and we only need minor modifications of their proofs, together with Proposition~\ref{prop:Reg-mod-m}, to prove it over arbitrary PIDs.
The following result is from \cite[Section~3]{Laffey-Reams}. We reproduce the proof here for completeness. \begin{prop} \label{prop:LF-criterion-fields}Let $K$ be a field and $X\in\M_{n}(K)$ be regular. Let $A\in\M_{n}(K)$. Then $A=[X,Y]$ for some $Y\in\M_{n}(K)$ if and only if \foreignlanguage{english}{$\Tr(X^{r}A)=0$ for all $r=0,\dots,n-1$.}\end{prop} \selectlanguage{english} \begin{proof} Since $\{1,X,\dots,X^{n-1}\}$ is linearly independent over $K$ the subspace \[ V=\{A\in\M_{n}(K)\mid\Tr(X^{r}A)=0\text{ for }0,1,\dots,n-1\} \] has dimension $n^{2}-n$. The kernel of the linear map $\M_{n}(R)\rightarrow\M_{n}(R)$, $Y\mapsto[X,Y]$ is equal to the centraliser $C_{\M_{n}(K)}(X)$, which has dimension $n$ since $X$ is regular. Thus the image $[X,\M_{n}(K)]$ of the map $Y\mapsto[X,Y]$ has dimension $n^{2}-n$. But if $A\in[X,\M_{n}(K)]$ there exists a $Y\in\M_{n}(K)$ such that for every $r=0,1,\dots,n-1$ we have \[ \Tr(X^{r}A)=\Tr(X^{r}(XY-YX))=\Tr(X^{r+1}Y)-\Tr(X^{r}YX)=0. \] Thus $A\in V$ and so $[X,\M_{n}(K)]\subseteq V$. Since $\dim V=\dim[X,\M_{n}(K)]$ we conclude that $V=[X,\M_{n}(K)]$. \end{proof} \selectlanguage{british} \begin{prop} \label{prop:LF-XYM}Let $X\in\M_{n}(R)$ be such that $X_{\mfp}$ is regular for every maximal ideal $\mfp$ in $R$. Suppose that $M\in\M_{n}(F)$ is such that $[X,M]\in\M_{n}(R)$. Then there exists an $Y\in\M_{n}(R)$ such that $[X,M]=[X,Y]$.\end{prop} \selectlanguage{english} \begin{proof} There exists an element $m\in R$ such that $mY\in\M_{n}(R)$, and we have $[X,mY]=m[X,Y]$. Assume that $d\in R$ is chosen so that it has the minimal number of irreducible factors with respect to the property that $[X,C]=d[X,Y]$ for some $C\in\M_{n}(R)$. If $d$ is a unit we are done, so assume that $p$ is an irreducible factor of $d$. Then $[X,C]\in p\M_{n}(R)$, so $X_{(p)}$ commutes with $C_{(p)}$. But since $X_{(p)}$ is regular, we have $C_{(p)}=f(X_{(p)})$, for some polynomial $f(T)\in R[T]$. Hence $C-f(X)=pD$ for some $D\in\M_{n}(R)$. But this implies that $[X,C]=[X,pD]=p[X,D]$ and thus $(dp^{-1})[X,Y]=[X,D]$, giving a contradiction to our choice of $d$. Hence $d$ is a unit and so $[X,Y]=[X,M]$ with $M=d^{-1}C\in\M_{n}(R)$.\end{proof} \begin{prop} \label{prop:Criterion}Let $A\in\M_{n}(R)$ and let $X\in\M_{n}(R)$ be such that $X_{\mfp}$ is regular for every maximal ideal $\mfp$ in $R$. Then $A=[X,Y]$ for some $Y\in\M_{n}(R)$ if and only if $\Tr(X^{r}A)=0$ for $r=0,\dots,n-1$.\end{prop} \begin{proof} Clearly the condition $\Tr(X^{r}A)=0$ for all $r\geq0$ is necessary for $A$ to be of the form $[X,Y]$ with $Y\in\M_{n}(R)$. Conversely, suppose that $\Tr(X^{r}A)=0$ for $r=0,1,\dots,n-1$. By \foreignlanguage{british}{Proposition~\ref{prop:Reg-mod-m}} $X$ is regular \foreignlanguage{british}{as an element in $\M_{n}(F)$ so Proposition~\ref{prop:LF-criterion-fields} implies that $A=[X,M]$ for some $M\in\M_{n}(F)$. But now the result follows from Proposition~\ref{prop:LF-XYM}.} \end{proof}
\section{\label{sec:Comm-fields}Commutators over fields}
Let $K$ be a field. Using the criterion of Laffey and Reams over fields (Proposition~\ref{prop:LF-criterion-fields}) we give a swift proof of the theorem of Albert and Muckenhoupt \cite{Albert-Muckenhoupt} that every matrix with trace zero in $\M_{n}(K)$ is a commutator.
Note that if $R$ is any ring and $A,X,Y\in\M_{n}(R)$ are such that $A=[X,Y]$, then for every $g\in\GL_{n}(R)$ we have $gAg^{-1}=[gXg^{-1},gYg^{-1}]$. Thus $A$ is a commutator if and only if any matrix similar to $A$ is.
Let $n\in\N$ with $n\geq2$ and $k=\lfloor n/2\rfloor$. The following matrices were considered by Laffey and Reams \cite[Section~4]{Laffey-Reams} who also established the properties stated below. \[ P_{n}=(p_{ij})=\begin{cases} p_{ii}=1 & \text{for }i=2,4,\dots,2k,\\ p_{i,i-2}=1 & \text{for }i=3,4,\dots n,\\ p_{ij}=0 & \text{otherwise}. \end{cases} \] Depending on the context we will consider $P_{n}$ as an element of $\M_{n}(R)$ where $R$ is a ring. For any $m\in\N$ and $a\in R$ we will use $J_{m}(a)$ to denote the $m\times m$ Jordan block with eigenvalue $a$ and $1$s on the subdiagonal. Over any $R$ the matrix $P_{n}$ is similar to $J_{k}(1)\oplus J_{n-k}(0)$ (cf.~\cite[p.~681]{Laffey-Reams}), and thus it is regular by Lemma~\ref{lem:reg-triang}.
For any $A=(a_{ij})\in\M_{n}(R)$ let $c(A)=\sum_{i=1}^{k}a_{2i,2i}$ and $d(A)=\sum_{i=1}^{n-1}a_{i,i+1}$. Suppose now that $R$ is a PID and that $a_{ij}=0$ for $j\geq i+2$. Observe that for any $r\in\N$, $P_{n}^{r}$ has the same diagonal as $P_{n}$ and the $(i,j)$ entry of $P_{n}^{r}$ is $0$ if $i\neq j$ and $i<j+2$. Thus \begin{equation} \Tr(P_{n}^{r}A)=c(A),\,\text{ for }r\in\N.\label{eq:Tr_c(A)} \end{equation}
\begin{prop} \label{prop:Main-fields}Let $K$ be a field and let $A\in\M_{n}(K)$ be a matrix with trace zero. Then $A=[X,Y]$ for some $X,Y\in\M_{n}(K)$, where $X$ is regular. More precisely, if $A$ is non-scalar $X$ can be chosen to be conjugate to $P_{n}$, while if $A$ is scalar we can take $X=J_{n}(0)$.\end{prop} \begin{proof} Assume first that $A$ is non-scalar. It then follows from the rational normal form that $A$ is similar to a matrix $B=(b_{ij})$ with $b_{12}=1$ and $b_{ij}=0$ for $j\geq i+2$, so we have $A=gBg^{-1}$ for some $g\in\GL_{n}(K)$. Define $z\in\SL_{n}(K)$ as \[ z=1+c(B)E_{21}. \] Then the $(i,j)$ entry of $z^{-1}Bz$ is $0$ for $j\geq i+2$ and $c(z^{-1}Bz)=0$, so by (\ref{eq:Tr_c(A)}) we have $\Tr(P_{n}^{r}z^{-1}Bz)=0$ for $r=0,\dots,n-1$. By Proposition~\ref{prop:LF-criterion-fields} it follows that $B=[zP_{n}z^{-1},Y]$ for some $Y\in\M_{n}(K)$, and thus $A=[gzP_{n}(gz)^{-1},gYg^{-1}]$.
Assume on the other hand that $A$ is a scalar. Then $\Tr(J_{n}(0)^{r}A)=0$ for $r=0,\dots,n-1$, and Proposition~\ref{prop:LF-criterion-fields} implies that $A=[J_{n}(0),Y]$, for some $Y\in\M_{n}(K)$. \end{proof}
\section{\label{sec:Similarity}Matrix similarity over a PID}
In this section we extend the results of \cite[Section~2]{Laffey-Reams} on similarity of matrices over $\Z$ to matrices over an arbitrary PID $R$. \begin{lem} \label{lem:b12-avoidsprimes}Let $A\in\M_{n}(R)$ be non-scalar, and let $S$ be a finite set of maximal ideals of $R$ such that $A_{\mfp}\in\M_{n}(R/\mfp)$ is non-scalar for every $\mfp\in S$. Then $A$ is similar to a matrix $B=(b_{ij})\in\M_{n}(R)$ such that $b_{12}\notin\mfp$ for all $\mfp\in S$.\end{lem} \begin{proof} It is well known that for any PID $R$ and any non-zero ideal $\mfa$ of $R$ the natural map \begin{equation} \SL_{n}(R)\longrightarrow\SL_{n}(R/\mfa)\label{eq:strongapprox} \end{equation} is surjective. This follows for example from the fact that $R/\mfa$ is the product of local rings and that over local rings $\SL_{n}$ is generated by elementary matrices (see~\cite[2.2.2~and~2.2.6]{Rosenberg_K-theory}). Moreover, if we take $\mfa=\prod_{\mfp\in S}\mfp$ the Chinese remainder theorem implies that we have an isomorphism \begin{equation} \SL_{n}(R/\mfa)\longiso\prod_{\mfp\in S}\SL_{n}(R/\mfp).\label{eq:Chineserem} \end{equation} Let $\mfp\in S$. Since $A_{\mfp}$ is non-scalar and $R/\mfp$ is a field the rational canonical form for matrices in $\M_{n}(R/\mfp)$ implies that there exists a $g_{\mfp}\in\GL_{n}(R/\mfp)$ such that $g_{\mfp}A_{\mfp}g_{\mfp}^{-1}$ is a matrix whose $(1,2)$ entry is non-zero. Since $\GL_{n}(R/\mfp)=T(R/\mfp)\SL_{n}(R/\mfp)$, where $T(R/\mfp)$ is the diagonal subgroup of $\GL_{n}(R/\mfp)$, we may take $g_{\mfp}$ to be in $\SL_{n}(R/\mfp)$. Suppose that $g_{\mfp}$ is chosen in this way for every $\mfp\in S$. By the surjectivity of the maps (\ref{eq:strongapprox}) and (\ref{eq:Chineserem}), there exists a $g\in\SL_{n}(R)$ such that the image of $g$ in $\SL_{n}(R/\mfp)$ is $g_{\mfp}$ for all $\mfp\in S$. Let $B=(b_{ij})=gAg^{-1}$. Then $B$ is a matrix such that $b_{12}$ is non-zero modulo every $\mfp\in S$. \end{proof} The following lemma will be used repeatedly in the proof of Proposition~\ref{prop:3x3-normalform} and Theorem~\ref{thm:LF-normalform}. It can informally be described as saying that if the off-diagonal entries in a row (column) of a matrix $A\in\M_{n}(R)$ with $n\geq3$ have a greatest common divisor $d$, then $A$ is similar to a matrix in which the corresponding row (column) has off-diagonal entries $d,0,\dots,0$. \begin{lem} \label{lem:row-column}Let $A=(a_{ij})\in\M_{n}(R)$, $n\geq3$. Let $1\leq u\leq n$ and $1\leq v\leq n$ be fixed. Let $r\in R$ be a generator of the ideal $(a_{uj}\mid1\leq j\leq n,\, u\neq j)$, and let $c\in R$ be a generator of the ideal $(a_{iv}\mid1\leq i\leq n,\, i\neq v)$. Then $A$ is similar to a matrix $B=(b_{ij})$ such that if $u=1$ we have $b_{u2}=r$ and $b_{uj}=0$ for all $3\leq j\leq n$, and if $u\geq2$ we have $b_{u1}=r$ and $b_{uj}=0$ for all $1\leq j\leq n$ such that $j\notin\{1,u\}$. Moreover, $A$ is similar to a matrix $C=(c_{ij})$ such that if $v=1$ we have $c_{2v}=r$ and $c_{iv}=0$ for all $3\leq i\leq n$, and if $v\geq2$ we have $c_{1v}=c$ and $c_{iv}=0$ for all $1\leq i\leq n$ such that $i\notin\{1,v\}$.\end{lem} \begin{proof} The proof follows the lines of \cite[Ch.~III, Section~2]{Newman}. For $1\leq i<j\leq n$ and $\left(\begin{smallmatrix}x & y\\ z & w \end{smallmatrix}\right)\in\SL_{2}(R)$, let \begin{align*} M_{ij} & =M_{ij}(x,y,z,w)\\
& =1_{n}+(x-1)E_{ii}+yE_{ij}+zE_{ji}+(w-1)E_{jj}\in\SL_{n}(R). \end{align*} Note that $M_{ij}^{-1}=M_{ij}(w,-y,-z,x)$. Let $3\leq j\leq n$. Direct computation shows that the first row in $B_{1}\coloneqq M_{2j}^{-1}AM_{2j}$ is \begin{align*} (a_{11},a_{12}x+a_{13}z,a_{12}y+a_{13}w,a_{14},\dots,a_{1n}) & \quad\text{if }j=3,\\ (a_{11},a_{12}x+a_{1j}z,a_{13},\dots,a_{1,j-1},a_{12}y+a_{1j}w,a_{1,j+1},\dots,a_{1n}) & \quad\text{if }j>3. \end{align*} Now let $3\leq j\leq n$ be the smallest integer such that $a_{1j}\neq0$ (if no such $j$ exists the assertion of the lemma holds trivially for $A$ and $u=1$). Let $d\in R$ be a generator of $(a_{12},a_{1j})$ and set \[ y=a_{1j}d^{-1},\quad w=-a_{12}d^{-1}. \] Then $(y,w)=(1)$ and hence $x,z\in R$ may be determined so that $xw-yz=1$. Thus $a_{12}x+a_{1j}z=-d$. With these values of $x,y,z,w$ all the entries of $A_{1}$ in positions $(1,3),\dots,(1,j)$ are zero, and the $(1,2)$ entry generates the ideal ($a_{12},a_{1j})$. Repeating the process, let $j<k\leq n$ be the smallest integer such that $a_{1k}\neq0$. Then $B_{2}\coloneqq M_{2k}^{-1}B_{1}M_{2k}$ has all its entries $(1,3),\dots,(1,k)$ zero and its $(1,2)$ entry generates the ideal $(a_{12},a_{1j},a_{1k})$. Proceeding in this way, we obtain a matrix $B=(b_{ij})$ similar to $A$ such that $b_{12}$ is a generator of $(a_{1j}\mid2\leq j\leq n)$ and $b_{1j}=0$ for $3\leq j\leq n$ (the generator $b_{12}$ can be replaced by any other generator of $(a_{1j}\mid2\leq j\leq n)$ by a diagonal similarity transformation of $B$). This shows the existence of $B$ for $u=1$. For $u\geq2$, observe that if we let $W_{u}=(w_{ij}^{(u)})\in\GL_{n}(R)$ be any permutation matrix such that $w_{1u}^{(u)}=w_{u1}=1$, then \[ A'=(a_{ij}')=W_{u}AW_{u}^{-1} \] is a matrix such that $a_{11}'=a_{uu}$ and $\{a_{1j}'\mid2\leq j\leq n\}=\{a_{uj}\mid1\leq j\leq n,u\neq j\}$. Informally, the off-diagonal entries in the $u$-th row of $A$ are the same as the off-diagonal entries in the first row of $A'$, up to a permutation. Thus the existence of $B$ for $u\geq2$ follows from the argument for $u=1$ above.
For the existence of $C$ for $v=1$, let $3\leq i\leq n$ and $C_{1}\coloneqq M_{2i}^{-1}AM_{2i}$. Direct computation shows that the first column in $C_{1}$ is \begin{align*} (a_{11},a_{21}x+a_{31}y,a_{21}z+a_{31}w,a_{41},\dots,a_{n1})^{T} & \quad\text{if }i=3,\\ (a_{11},a_{21}x+a_{i1}y,a_{31},\dots,a_{i-1,1},a_{21}z+a_{i1}w,a_{i+1,1},\dots,a_{n1})^{T} & \quad\text{if }i>3. \end{align*} Now let $3\leq i\leq n$ be the smallest integer such that $a_{i1}\neq0$ (if no such $i$ exists the assertion of the lemma holds trivially for $A$ and $v=1$). Let $e\in R$ be a generator of $(a_{21},a_{i1})$ and set \[ z=a_{i1}e^{-1},\quad w=-a_{21}d^{-1}. \] Then $(z,w)=(1)$ and hence $x,y\in R$ may be determined so that $xw-yz=1$. Thus $a_{21}x+a_{i1}y=-e$. With these values of $x,y,z,w$ all the entries of $C_{1}$ in positions $(3,1),\dots,(i,1)$ are zero, and the $(2,1)$ entry generates the ideal ($a_{21},a_{i1})$. Repeating the process in analogy with the above argument, we obtain a matrix $C$ satisfying the assertion of the lemma for $v=1$. For $v\geq2$ we may use the matrix $W_{v}$ as above to reduce to the case where $v=1$.\end{proof} \begin{prop} \label{prop:3x3-normalform}Let $A\in\M_{3}(R)$ be non-scalar. Then $A$ is similar to a matrix $B=(b_{ij})\in\M_{3}(R)$ such that $b_{12}\mid b_{ij}$ for all $i\neq j$ and $b_{12}\mid(b_{ii}-b_{jj})$ for all $1\leq i,j\leq3$.\end{prop} \begin{proof} Write $A=aI+bA'$, where $a,b\in R$, $b\neq0$ and where, if $A'=(a_{ij}')$, we have $(a_{ii}'-a_{jj}',a_{ij}'\mid i\neq j,1\leq i,j\leq3)=(1)$. Note that $A_{\mfp}'$ is non-scalar for every maximal ideal $\mfp$ of $R$ and that the proposition will follow for $A$ if we can show it for $A'$, that is, if we can show that $A'$ is similar to a matrix whose $(1,2)$ entry is a unit. Without loss of generality we may therefore assume that $A=A'$ so that $A$ satisfies \[ (a_{ii}-a_{jj},a_{ij}\mid i\neq j,1\leq i,j\leq3)=(1). \] Note that any matrix similar to $A$ will also satisfy this. Let \[
S\coloneqq\{\mfp\in\Specm R\mid|R/\mfp|=2\}. \] Note that $S$ is a finite set since in any PID (or any Dedekind domain) there are only finitely many maximal ideals of any given finite index. Since $A_{\mfp}$ is not scalar for any maximal ideal $\mfp$ of $R$, Lemma~\ref{lem:b12-avoidsprimes} implies that $A$ is similar to a matrix $B=(b_{ij})$ such that $b_{12}\notin\mfp$ for all $\mfp\in S$. Among all such matrices choose one for which the number of distinct primes which divide $b_{12}$ is least possible, and subject to this, for which the number of not necessarily distinct prime factors is minimal. By Lemma~\ref{lem:row-column} applied to the first row in $B$, we see that there exists a matrix $B'$ similar to $B$ whose $(1,3)$ entry is zero and whose $(1,2)$ entry, being equal to a generator of $(b_{12},b_{13})$, has no more distinct prime factors than $b_{12}$. Hence we may assume that $B$ has been replaced by $B'$ so that $b_{13}=0$. We thus have the following condition on $B$:
\MyQuote{The matrix $B=(b_{ij})$ is similar to $A$, $b_{12}\notin\mfp$ for all $\mfp\in S$, $b_{13}=0$, the entry $b_{12}$ has the smallest number of distinct prime factors among all the matrices similar to $A$ and among all matrices with these properties $B$ is such that $b_{12}$ has the minimal number of not necessarily distinct prime factors.}Note first that by Lemma~\ref{lem:row-column} applied to the second column in $B$, there exists a matrix similar to $B$ whose $(1,2)$ entry is a generator of $(b_{12},b_{32})$. Thus, by $(*)$ we must have $b_{12}\mid b_{32}$, so $b_{32}=b_{12}a$ for some $a\in R$. Let \[ B_{1}=(b_{ij}^{(1)})=(1-E_{31}a)B(1-E_{31}a)^{-1}. \] Then $b_{12}^{(1)}=b_{12}$ and $b{}_{13}^{(1)}=b_{32}^{(1)}=0$ so that \[ B_{1}=\begin{pmatrix}b_{11}^{(1)} & b_{12} & 0\\ b_{21}^{(1)} & b_{22}^{(1)} & b_{23}^{(1)}\\ b_{31}^{(1)} & 0 & b_{33}^{(1)} \end{pmatrix}. \] In particular, $B'$ satisfies $(*)$. \begin{claim} \label{Claim I}The entry $b_{12}$ divides both $b_{33}^{(1)}-b_{11}^{(1)}$ and $b_{31}^{(1)}$. \end{claim} Let $y\in R$. The first row of the matrix $(1+E_{13}y)B_{1}(1+E_{13}y)^{-1}$ is \[ (b_{11}^{(1)}+yb_{31}^{(1)},\, b_{12},\, y(b_{33}^{(1)}-b_{11}^{(1)}-yb_{31}^{(1)})). \] Thus, by $(*)$ and Lemma~\ref{lem:row-column} applied to the first row in $(1+E_{13}y)B_{1}(1+E_{13}y)^{-1}$ we conclude that $b_{12}$ divides $y(b_{33}^{(1)}-b_{11}^{(1)}-yb_{31}^{(1)})$ for any $y\in R$. Let \[ (b_{12})=\mfp_{1}^{e_{1}}\cdots\mfp_{\nu}^{e_{\nu}} \] be the factorisation of $(b_{12})$, where $\nu\in\N$, $e_{i}\in\N$
and the ideals $\mfp_{i}\in\Specm R$ are distinct for $1\leq i\leq\nu$. By $(*)$ and the definition of $S$ we know that $|R/\mfp_{i}|\geq3$ for any $1\leq i\leq\nu$. Hence there exist elements $y_{i},y_{i}'\in R/\mfp_{i}$ such that \begin{equation} y_{i}\neq0,\quad y_{i}'\neq0,\quad y_{i}\neq y_{i}',\quad\text{for }i=1,\dots,\nu.\label{eq:yi-yi'} \end{equation} By the Chinese remainder theorem we have \[ R/(b_{12})\cong\prod_{i=1}^{\nu}R/\mfp_{i}^{e_{i}}. \] Let $\lambda=(y_{1},\dots,y_{\nu}),\lambda'=(y_{1}',\dots,y_{\nu}')\in\prod_{i=1}^{\nu}R/\mfp_{i}^{e_{i}}$. Then $\lambda$ and $\lambda'$ can be considered as elements in $R/(b_{12})$ and because of (\ref{eq:yi-yi'}) each of $\lambda,\lambda'$ and $\lambda-\lambda'$ is a unit in $R/(b_{12})$. In particular, each of $\lambda,\lambda'$ and $\lambda-\lambda'$ is coprime to $b_{12}$. We know from the above that $b_{12}$ divides $y(b_{33}^{(1)}-b_{11}^{(1)}-yb_{31}^{(1)})$ for any $y\in R$. In particular, choosing $y=\lambda,\lambda',\lambda-\lambda'$, respectively, we obtain $b_{31}^{(1)}(\lambda-\lambda')\in(b_{12})$, hence $b_{31}^{(1)}\in(b_{12})$ and $b_{33}^{(1)}-b_{11}^{(1)}\in(b_{12})$. This proves the claim.
By Claim~\ref{Claim I} there exist elements $\alpha,\beta\in R$ such that \[ b_{33}^{(1)}-b_{11}^{(1)}=\alpha b_{12}\quad\text{and}\quad b_{31}^{(1)}=\beta b_{12}. \] Let \[ B_{2}=(b_{ij}^{(2)})=(1+E_{21}(-\alpha+\beta))(1+E_{31})B_{1}(1+E_{31})^{-1}(1+E_{21}(-\alpha+\beta))^{-1}. \] Then $b_{12}^{(2)}=b_{32}^{(2)}=b_{12}$ and $b_{13}^{(2)}=b_{31}^{(2)}=0$ so that \[ B_{2}=\begin{pmatrix}b_{11}^{(2)} & b_{12} & 0\\ b_{21}^{(2)} & b_{22}^{(2)} & b_{23}^{(2)}\\ 0 & b_{12} & b_{33}^{(2)} \end{pmatrix}. \] Moreover, let \[ B'_{2}=(1-E_{31})B_{2}(1-E_{31})^{-1}=\begin{pmatrix}b_{11}^{(2)} & b_{12} & 0\\ b_{23}^{(2)}+b_{21}^{(2)} & b_{22}^{(2)} & b_{23}^{(2)}\\ b_{33}^{(2)}-b_{11}^{(2)} & 0 & b_{33}^{(2)} \end{pmatrix} \] and \[ B''_{2}=(1-E_{33})B_{2}(1-E_{33})^{-1}=\begin{pmatrix}b_{33}^{(2)} & b_{12} & 0\\ b_{23}^{(2)}+b_{21}^{(2)} & b_{22}^{(2)} & b_{21}^{(2)}\\ b_{11}^{(2)}-b_{33}^{(2)} & 0 & b_{11}^{(2)} \end{pmatrix}. \] We will now show that $B_{2}$ has the property that $b_{12}\mid b_{ij}^{(2)}$ for all $i\neq j$ and $b_{12}\mid(b_{ii}^{(2)}-b_{jj}^{(2)})$ for all $1\leq i,j\leq3$. This follows from the following fact applied to the matrices $B'_{2}$ and $B''_{2}$. \begin{claim} \label{Claim II}Suppose that $C=(c_{ij})\in\M_{n}(R)$ satisfies $(*)$ and that $c_{32}=0$. Then $c_{12}\mid c_{ij}$ for all $i,j$ such that $(i,j)\neq(2,1)$ and $i\neq j$, and $c_{12}\mid(c_{ii}-c_{jj})$ for all $1\leq i,j\leq3$. \end{claim} To prove the claim, let $x\in R$ and \[ X=(x_{ij})=(1+E_{32}x)C(1+E_{32}x)^{-1}. \] Then \begin{align*} x_{12} & =c_{12},\\ x_{32} & =x(c_{22}-c_{33}-xc_{23}), \end{align*} and by Lemma~\ref{lem:row-column} applied to the second column in $X$ we conclude that $c_{12}$ divides $x(c_{22}-c_{33}-xc_{23})$ for any $x\in R$. By $(*)$ and the same argument as in the proof of Claim~\ref{Claim I} we obtain \[ c_{12}\mid(c_{22}-c_{33})\quad\text{and}\quad c_{12}\mid c_{23}. \] Next, for $y\in R$ let \[ Y=(y_{ij})=(1+E_{13}y)C(1+E_{13}y)^{-1}. \] Then \begin{align*} y_{12} & =c_{12},\\ y_{13} & =y(c_{33}-c_{11}-yc_{31}), \end{align*} and by Lemma~\ref{lem:row-column} applied to the first row in $Y$ and the same argument as for the matrix $X$ (that is, using $(*)$ and the same argument as in the proof of Claim~\ref{Claim I}) we obtain \[ c_{12}\mid(c_{33}-c_{11})\quad\text{and}\quad c_{12}\mid c_{31}, \] whence also $c_{12}\mid(c{}_{22}-c_{11})$. This proves Claim~\ref{Claim II} for $C$.
Applying Claim~\ref{Claim II} to the matrices $B'_{2}$ and $B''_{2}$, respectively, we conclude that $B_{2}$ has the property that $b_{12}\mid b_{ij}^{(2)}$ for all $i\neq j$ and $b_{12}\mid(b_{ii}^{(2)}-b_{jj}^{(2)})$ for all $1\leq i,j\leq3$. Since $B_{2}$ is similar to $B$ (and $B$ is similar to $A$), we have \[ (b_{ii}-b_{jj},b_{ij}\mid i\neq j,1\leq i,j\leq3)=(1), \] so $b_{12}$ must be a unit. This proves the proposition. \end{proof} We now use Proposition~\ref{prop:3x3-normalform} to prove the corresponding result for matrices in $\M_{n}(R)$ for all $n\geq3$. More precisely, we have \begin{thm} \label{thm:LF-normalform}Let $A\in\M_{n}(R)$ with $n\geq3$, be non-scalar. Then $A$ is similar to a matrix $B=(b_{ij})\in\M_{n}(R)$ such that $b_{12}\mid b_{ij}$ for all $i\neq j$ and $b_{12}\mid(b_{ii}-b_{jj})$ for all $1\leq i,j\leq n$. Moreover, $B$ may be chosen with $b_{ij}=0$ for all $i,j$ such that $j\geq i+2$ and $1\leq i\leq n-2$.\end{thm} \begin{proof} As in the proof of Proposition~\ref{prop:3x3-normalform}, we may assume that \[ (a_{ii}-a_{jj},a_{ij}\mid i\neq j,1\leq i,j\leq n)=(1), \] and choose a matrix $B$ satisfying the following condition
\MyQuote{The matrix $B=(b_{ij})$ is similar to $A$, $(b_{12},2)=(1)$, $b_{1j}=0$ for $j\geq 3$, the entry $b_{12}$ has the smallest number of distinct prime factors among all the matrices similar to $A$ and among all matrices with these properties $B$ is such that $b_{12}$ has the minimal number of not necessarily distinct prime factors.}If for some $i,j$ the entry $b_{12}$ does not divide $b_{ii}-b_{jj}$, then $b_{12}$ does not divide $b_{11}-b_{vv}$ for some $v$. If $v\geq4$ let $W_{v}=(w_{ij}^{(v)})\in\GL_{n}(R)$ be any permutation matrix such that $w_{11}^{(v)}=w_{22}^{(v)}=1$, $w_{v3}^{(v)}=1$ and $w_{3v}^{(v)}=1$. Then $W_{v}BW_{v}^{-1}$ has $(1,2)$ entry equal to $b_{12}$ and $(3,3)$ entry equal to $b_{vv}$, so we may assume that $b_{12}$ does not divide $b_{11}-b_{22}$ or $b_{11}-b_{33}$. Consider the submatrix \[ B_{0}=(b_{ij})_{1\leq i,j\leq3} \] of $B$ and note that any similarity $B_{0}\mapsto g^{-1}B_{0}g$ for $g\in\GL_{3}(R)$ may be achieved by $B\mapsto(g\oplus I_{n-3})B(g\oplus I_{n-3})^{-1}$. By the minimality property of $b_{12}$ expressed in $(*)$ and the argument in the proof of Proposition~\ref{prop:3x3-normalform} applied to $B_{0}$ we conclude that $b_{12}$ divides both $b_{11}-b_{22}$ and $b_{11}-b_{33}$, which is a contradiction. Thus \[ b_{12}\mid(b_{ii}-b_{jj})\text{ for all }1\leq i,j\leq n\quad\text{and}\quad b_{12}\mid b_{ij}\text{ for all }i\neq j,\,1\leq i,j\leq3. \] Similarly, for any $4\leq v\leq n$ the matrix $W_{v}BW_{v}^{-1}$ has $(3,1)$ entry equal to $b_{v1}$, so by $(*)$ and the argument in the proof of Proposition~\ref{prop:3x3-normalform} applied to $B_{0}$ we conclude that $b_{12}\mid b_{v1}$. Hence \[ b_{12}\mid b_{v1}\text{ for all }4\leq v\leq n. \] Furthermore, by $(*)$ and Lemma~\ref{lem:row-column} applied to the second column in $B$, we see that \[ b_{12}\mid b_{i2}\text{ for all }i\neq2. \] Let $1\leq u,v\leq n$ be such that $u\geq3$ and $v\neq u$. For $x\in R$ let \[ X_{u}=(x_{ij}^{(u)})=(1+E_{u2})B(1+E_{u2})^{-1}, \] so that $x_{v2}^{(u)}=b_{v2}-b_{vu}$ and in particular $x_{12}^{(u)}=b_{12}$. By $(*)$ and Lemma~\ref{lem:row-column} applied to the second column in $X_{u}$ we see that $b_{12}\mid x_{v2}^{(u)}$ and since $b_{12}\mid b_{v2}$ we conclude that $b_{12}\mid b_{vu}$. Hence \[ b_{12}\mid b_{vu}\text{ for all }u\geq3,\, v\neq u. \]
We have thus shown that $B$ has the property that $b_{12}\mid b_{ij}$ for all $i\neq j$ and $b_{12}\mid(b_{ii}-b_{jj})$ for all $1\leq i,j\leq n$.
For the second statement we follow \cite[III,~2]{Newman}. Conjugating $B$ by $1_{2}\oplus M_{3j}\in\GL_{n}(R)$ for a suitable $M_{3j}\in\GL_{n-2}(R)$ (cf.~the proof of Lemma~\ref{lem:row-column}), we can replace $B$ by a matrix $B_{1}$ in which the first row equals that of $B$ and whose $(2,j)$ entries are zero whenever $j\geq4$. Conjugating $B_{1}$ by $1_{3}\oplus M_{4j}\in\GL_{n}(R)$ for a suitable $M_{4j}\in\GL_{n-3}(R)$, we can replace $B_{1}$ by a matrix $B_{2}$ in which the first two rows equal those of $B_{1}$ and whose $(3,j)$ entries are zero whenever $j\geq5$. Proceeding inductively in this way, we obtain a matrix $C=(c_{ij})$ similar to $B$ such that $c_{12}=b_{12}$ and $c_{ij}=0$ for $i,j$ such that $j\geq i+2$ and $1\leq i\leq n-2$. But since $B\equiv b_{11}1_{n}\bmod{(b_{12})}$ we also have $C\equiv b_{11}1_{n}\bmod{(b_{12})}$, so $C$ has the desired form. \end{proof} Using Theorem~\ref{thm:LF-normalform} it is now easy to prove the following result. The following proof is entirely analogous to that of Laffey and Reams for $R=\Z$. \begin{prop} Let $A\in\M_{n}(R)$, $n\geq3$ have trace zero, and suppose that for every $\mfp\in\Specm R$ and every $a\in R/\mfp$, $a\neq0$ we have $A_{\mfp}\neq a1_{n}$. Then $A$ is similar to a matrix $B=(b_{ij})\in\M_{n}(R)$ where $b_{ii}=0$ for all $1\leq i\leq n$.\end{prop} \begin{proof} If $A_{\mfp}=0$ for some $\mfp$, we can write $A=mA'$, where $m\in R$ and $A'$ is such that for every $\mfp\in\Specm R$ and every $a\in R/\mfp$ we have $A_{\mfp}'\neq a1_{n}$. Since $A'$ must be non-scalar Theorem~\ref{thm:LF-normalform} implies that $A'$ is similar to a matrix $A''=(a_{ij}'')$ such that $a_{12}''\mid a_{ij}''$ for all $i\neq j$ and $a_{12}''\mid(a_{ii}''-a_{jj}'')$ for all $1\leq i,j\leq n$. Since $A''$ satisfies $A_{\mfp}''\neq a1_{n}$ for any $\mfp\in\Specm R$ and $a\in R/\mfp$, the entry $a_{12}''$ must be a unit. We may therefore assume without loss of generality that $A=A''$, so that in particular $a_{12}$ is a unit.
We now prove that $A$ is similar to a matrix with zero diagonal by induction on $n$. If $n=2$, the matrix \[ (1+E_{21}a_{11}a_{12}^{-1})A(1+E_{21}a_{11}a_{12}^{-1})^{-1} \] has zero diagonal. If $n>2$, conjugating $A$ by a matrix of the form $1+\alpha E_{n1}$, $\alpha\in R$, we may assume that $a_{n2}=1$, and then conjugating $A$ by a matrix of the form $1+\beta E_{21}$, $\beta\in R$, we may further assume that $a_{11}=0$. Thus we may assume that $A$ is of the form \[ \begin{pmatrix}0 & x\\ y^{T} & A_{1} \end{pmatrix}, \] where $x,y\in R^{n-1}$, $A_{1}=(a_{ij}^{1})\in\M_{n-1}(R)$ with $a_{n-1,1}^{1}=1$ and $\Tr(A_{1})=0$. By Theorem~\ref{thm:LF-normalform} $A_{1}$ is similar to a matrix $A_{2}=(a_{ij}^{2})$ such that $a_{12}^{2}\mid a_{ij}^{2}$ for all $i\neq j$ and $a_{12}^{2}\mid(a_{ii}^{2}-a_{jj}^{2})$ for all $1\leq i,j\leq n$. Since $(A_{2})_{\mfp}\neq a1_{n}$ for all $\mfp\in\Specm R$ and $a\in R/\mfp$, the entry $a_{12}^{2}$ must be a unit. So by induction there exists a $Q\in\GL_{n-1}(R)$ such that $QA_{1}Q^{-1}=B_{1}$ is a matrix with zeros on the diagonal. But then \[ B=(1_{1}\oplus Q)A(1_{1}\oplus Q)^{-1} \]
has the desired form. \end{proof} A matrix in $\M_{n}(R)$ satisfying the conditions on the matrix $B$ in Theorem~\ref{thm:LF-normalform} will be said to be in \emph{Laffey-Reams form}.
\section{\label{sec:Proof-Main}Proof of the main result}
In this section we give a proof of our main theorem on commutators, Theorem~\ref{thm:Main}. We first prove a couple of lemmas used in the proof. \begin{lem} \label{lem:GCD}Let $R$ be a PID. Then the following holds: \begin{enumerate} \item \label{enu:GCD-lemma abx}Let $a,b\in R$ be such that $(a,b)=(1)$, and let $S$ be a finite set of maximal ideals of $R$. Then there exists an $x\in R$ such that for all $\mfp\in S$ we have $a+bx\notin\mfp$.
\item \label{enu:GCD-lemma pqt}Let $\alpha,\beta\in R$ be such that $(\alpha,\beta)=(1)$. Suppose that $\mfp$ is a maximal ideal of $R$ such that $|R/\mfp|\geq3$. Then for every finite set $S$ of maximal ideals of $R$ such that $\mfp\notin S$ there exists a $t\in R$ such that $t\notin\mfp$, $t\in\mfq$ for all $\mfq\in S\setminus\{\mfp\}$ and $\alpha t+\beta\notin\mfp$. \item \label{enu:GCD-lemma abc}Let $a,b,c\in R$ be such that $(a,b,c)=(1)$, $(a,b)\neq(1)$ and $(a,c)\neq(1)$. Then there exists an $x\in R$ such that $(a+cx,b-ax)=(1)$. \end{enumerate} \end{lem} \begin{proof} To prove \ref{enu:GCD-lemma abx}, take $x$ to be a generator of the product \[ \prod_{\substack{\mfp\in S\\ a\notin\mfp } }\mfp \] and let $x=1$ if there is no $\mfp\in S$ such that $a\notin\mfp$. Let $\mfp\in S$ be such that $a\in\mfp$. If $a+bx\in\mfp$, then $ $$bx\in\mfp$ and since $(a,b)=(1)$ we have $x\in\mfp$, which contradicts the definition of $x$. On the other hand, let $\mfp\in S$ be such that $a\notin\mfp$. If $a+bx\in\mfp$, then by the definition of $x$ we have $bx\in\mfp$, so $a\in\mfp$, which is a contradiction. Thus in either case, $a+bx\notin\mfp$.
Next, we prove \ref{enu:GCD-lemma pqt}. Since $|R/\mfp|\geq3$ there exist two elements $r_{1},r_{2}\in R\setminus\mfp$ such that $r_{1}-r_{2}\notin\mfp$. Let $s\in R$ be such that \[ (s)=\prod_{\mfq\in S\setminus\{\mfp\}}\mfq. \] Then for $i=1,2$ we have $r_{i}s\notin\mfp$ and $r_{i}s\in\mfq$ for all $\mfq\in S\setminus\{\mfp\}$. Furthermore, if $\alpha r_{i}s+\beta\in\mfp$ for $i=1,2$, then $\alpha\in\mfp$ and $\beta\in\mfp$, contradicting the hypothesis $(\alpha,\beta)=(1)$. Thus we may assume that $\alpha r_{1}s+\beta\notin\mfp$, and $t=r_{1}s$ yields the desired element.
We now prove \ref{enu:GCD-lemma abc}. We first show that $a+cx$ and $b-ax$ are relatively prime as elements of $R[x]$, that is, that none of them is a multiple of the other. Indeed, if $a+cx=m(b-ax)$ for some $m\in R$, then $a=mb$ and $c=-ma$ so $(1)=(a,b,c)=(mb,b,-m^{2}b)=(b)$, which is impossible since $(a,b)\neq(1)$. Similarly, if $n(a+cx)=b-ax$ for some $n\in R$, then $b=na$ and $a=-nc$ so $(1)=(a,b,c)=(-nc,-n^{2}c,c)=(c)$, which is impossible. Let $K$ be the field of fractions of $R$. Since $a+cx$ and $b-ax$ are relatively prime as elements of $R[x]$, they are relatively prime as element of $K[x]$. Thus there exists $f_{0},g_{0}\in K[x]$ such that $(a+cx)f_{0}+(b-ax)g_{0}=1$, and so there exists some $f,g\in R[x]$ such that \begin{equation} (a+cx)f+(b-ax)g=D\in R\setminus\{0\}.\label{eq:fgD} \end{equation} Let $S$ be the set of maximal ideals dividing $(D)$. By \ref{enu:GCD-lemma abx} we can choose $x\in R$ such that for all $\mfp\in S$ we have $a+cx\notin\mfp$. Now if $\mfp$ is a maximal ideal of $R$ such that $a+cx\in\mfp$ and $b-ax\in\mfp$, then $D\in\mfp$, by (\ref{eq:fgD}), and so $a+cx\notin\mfp$; a contradiction. Thus there is no $\mfp\in\Specm R$ such that $a+cx\in\mfp$ and $b-ax\in\mfp$, that is, $a+cx$ and $b-ax$ are relatively prime. \end{proof} The following result is the Chinese remainder theorem for centralisers of matrices over quotients of $R$. It will be used at a crucial step in our proof of Theorem~\ref{thm:Main}. \begin{lem} \label{lem:Centr-product}Let $X\in\M_{n}(R)$ and let $\mfp_{1},\dots,\mfp_{\nu}$, $\nu\in\N$ be maximal ideals in $R$. Then the map \begin{align*} C_{\M_{n}(R/(\mfp_{1}\cdots\mfp_{\nu}))}(X_{(\mfp_{1}\cdots\mfp_{\nu})}) & \longrightarrow\prod_{i=1}^{\nu}C_{\M_{n}(R/\mfp_{i})}(X_{\mfp_{i}})\\ g & \longmapsto(g_{\mfp_{1}},\dots,g_{\mfp_{\nu}}), \end{align*} is an isomorphism. \end{lem} \begin{proof} Let $\mathcal{C}=C_{\M_{n}(R/(\mfp_{1}\cdots\mfp_{\nu}))}(X_{(\mfp_{1}\cdots\mfp_{\nu})})$. Then $\mathcal{C}$ is a module over $R$. By the Chinese remainder theorem we have an isomorphism $R/(\mfp_{1}\cdots\mfp_{\nu})\rightarrow\prod_{i=1}^{\nu}R/\mfp_{i}$ given by $a\mapsto(a_{\mfp_{1}},\dots,a_{\mfp_{\nu}})$, and tensoring this by $\mathcal{C}$ yields \begin{align*} \mathcal{C} & \cong R/(\mfp_{1}\cdots\mfp_{\nu})\otimes_{R}\mathcal{C}\cong\big(\prod_{i=1}^{\nu}R/\mfp_{i}\big)\otimes_{R}\mathcal{C}\cong\prod_{i=1}^{\nu}(R/\mfp_{i}\otimes_{R}\mathcal{C})\\
& \cong\prod_{i=1}^{\nu}C_{\M_{n}(R/\mfp_{i})}(X_{\mfp_{i}}). \end{align*} Tracking the maps shows that the effect of the above isomorphisms on elements is given by \[ g\longmapsto1\otimes g\longmapsto(1_{\mfp_{1}},\dots,1_{\mfp_{\nu}})\otimes g\longmapsto(1_{\mfp_{1}}\otimes g,\dots,1_{\mfp_{\nu}}\otimes g)\longmapsto(g_{\mfp_{1}},\dots,g_{\mfp_{\nu}}). \]
\end{proof} We now give the proof of our main theorem. Note that our proof in the case $n=2$ is different from the case $n\geq3$, and that for $n=2$, while our argument is not the shortest possible, yields the stronger result that any $A\in\M_{2}(R)$ with trace zero can be written as $A=[X,Y]$ for some $X,Y\in\M_{2}(R)$ and $X$ regular. \begin{thm} \label{thm:Main}Let $R$ be a PID and let $A\in\M_{n}(R)$ be a matrix with trace zero. Then $A=[X,Y]$ for some $X,Y\in\M_{n}(R)$. \end{thm} \begin{proof} For $n=1$ the result is trivial. First assume that $n=2$. By taking out a suitable factor we may assume that the matrix \[ A=\begin{pmatrix}a & b\\ c & -a \end{pmatrix} \] satisfies $(a,b,c)=(1)$. Let $X=\begin{pmatrix}0 & 1\\ x_{1} & x_{2} \end{pmatrix}\in\M_{2}(R)$. By Lemma~\ref{lem:reg-triang} the matrix $X$ is regular so it is regular mod $\mfp$ for every maximal ideal $\mfp$ of $R$. Furthermore, \[ \Tr(XA)=bx_{1}-ax_{2}+c, \] so if $(a,b)=(1)$ we can find $x_{1}$ and $x_{2}$ such that $\Tr(XA)=0$, and Proposition~\ref{prop:Criterion} implies that $A=[X,Y]$, for some $Y\in\M_{2}(R)$. Similarly, the transpose $X^{T}$ of $X$ is also regular, and \[ \Tr(X^{T}A)=cx_{1}-ax_{2}+b, \] so if $(a,c)=(1)$ we can find $x_{1}$ and $x_{2}$ such that $\Tr(X^{T}A)=0$, and so $A=[X^{T},Y]$, for some $ $$Y\in\M_{2}(R)$. Hence, in case $(a,b)=(1)$ or $(a,c)=(1)$ we are done. Assume therefore that $(a,b)\neq(1)$ and $(a,c)\neq(1)$. If we let $T=1+xE_{12}\in\M_{2}(R)$ for some $x\in R$, we have \[ TAT^{-1}=\begin{pmatrix}a+cx & b-ax-x(a+cx)\\ c & -a-cx \end{pmatrix}. \] Now $a+cx$ and $b-ax-x(a+cx)$ are relatively prime if and only if $a+cx$ and $b-ax$ are relatively prime. By Lemma~\ref{lem:GCD}\,\ref{enu:GCD-lemma abc} we can choose $x\in R$ such that $(a'+c'x,b'+a''x)=(1)$, and hence such that the $(1,1)$ and $(1,2)$ entries in $TAT^{-1}$ are relatively prime. As we have already seen, this means that we can find $x_{1}$ and $x_{2}$ such that $\Tr(XTAT^{-1})=0$, so Proposition~\ref{prop:Criterion} yields $A=[T^{-1}XT,Y]$ for some $Y\in\M_{2}(R)$.
Assume now that $n\geq3$. If $A$ is a scalar matrix we obviously have $\Tr(J_{n}(0)^{r}A)=0$ for all $r\geq0$, so Proposition~\ref{prop:Criterion} yields the desired conclusion. We may therefore henceforth assume that $A$ is non-scalar. Write $A=(a_{ij})$ for $1\leq i,j,\leq n$. By Theorem~\ref{thm:LF-normalform} we may assume that $A$ is in Laffey-Reams form. If $d\in R$ is such that $(a_{ij},a_{ii}-a_{jj}\mid i\neq j,1\leq i,j\leq n)=(d)$, we can write $A=dA'$ where $A'=(a'_{ij})\in\M_{n}(R)$ is in Laffey-Reams form and $(a_{11}',a_{12}')=(1)$. It thus suffices to assume that $A=A'$ so that $(a_{11},a_{12})=(1)$, $a_{12}\mid a_{ij}$ for $i\neq j$, $a_{12}\mid(a_{ii}-a_{jj})$ for $1\leq i,j\leq n$, and $a_{ij}=0$ for $j\geq i+2$. Let $k=\lfloor n/2\rfloor$. For $x,y,q\in R$ define the matrix $X=(x_{ij})\in\M_{n}(R)$ by \[ (x_{ij})=\begin{cases} x_{ii}=-y & \text{for }i=2,4,\dots,2k,\\ x_{21}=x,\\ x_{31}=q,\\ x_{j,j-1}=1 & \text{for }j=3,4,\dots,n,\\ x_{ij}=0 & \text{otherwise}. \end{cases} \] \begin{comment} For example, for $n=5$ we have \[ X=\begin{pmatrix}0 & 0 & 0 & 0 & 0\\ x & -y & 0 & 0 & 0\\ q & 1 & 0 & 0 & 0\\ 0 & 0 & 1 & -y & 0\\ 0 & 0 & 0 & 1 & 0 \end{pmatrix}. \] \end{comment} Recall that for any $B=(b_{ij})\in\M_{n}(R)$ we write $c(B)=\sum_{i=1}^{k}b_{2i,2i}$.We have \[ \Tr(XA)=xa_{12}+a_{23}+\dots+a_{n-1,n}-yc(A). \] We claim that $\Tr(XA)=0$ implies that $\Tr(X^{r}A)=0$ for all $r\geq0$. To see this, observe that the matrix $X^{2}+yX$ is lower triangular and its $(i,j)$ entry is $0$ if $j\geq i-1$. Since $\Tr(E_{ij}A)=0$ if $j<i-1$ (since $a_{ij}=0$ for $j\geq i+2$), it follows that $\Tr((X^{2}+yX)A)=0$, so if $\Tr(XA)=0$ we get $\Tr(X^{2}A)=0$. More generally, using the fact that $X$ is lower triangular, we have $\Tr((X^{r}+yX^{r-1})A)=0$, and working inductively we get $\Tr(X^{r}A)=0$ for all $r\geq0$.
Assume for the moment that $a_{12}\mid c(A)$ and let $M=1-c(A)a_{12}^{-1}E_{21}\in\M_{n}(R)$. Then \[ c(MAM^{-1})=0, \] so Proposition~\ref{prop:Criterion} together with (\ref{eq:Tr_c(A)}) and the fact that $P_{n}$ is regular imply $MAM^{-1}=[P_{n},Y]$, for some $Y\in\M_{n}(R)$. Thus in this case $A=[M^{-1}P_{n}M,M^{-1}YM]$, so we may henceforth assume that \begin{equation} a_{12}\nmid c(A).\label{eq:a12-notdiv-c(A)} \end{equation} We now show that there exist elements $x,y\in R$ with $(x,y)=(1)$ and such that $\Tr(XA)=0$. To this end, consider the equation \[ xa_{12}+a_{23}+\dots+a_{n-1,n}=yc(A),\qquad x,y\in R. \] Since $a_{12}$ divides $a_{23},\dots,a_{n-1,n}$, this may be written \begin{equation} a_{12}(x+l)=yc(A),\label{eq:Diophant-xy} \end{equation} for some $l\in R$. Let $d\in R$ be a generator of $(a_{12},c(A))$. Then (\ref{eq:Diophant-xy}) is equivalent to \begin{align*} x & =hc(A)d^{-1}-l\\ y & =ha_{12}d^{-1}, \end{align*} for any $h\in R$. Choose $h$ to be a generator of the product of all maximal ideals $\mfp$ of $R$ such that $a_{12}d^{-1}\in\mfp$ and $l\notin\mfp$ (and let $h=1$ if no such $\mfp$ exist). Suppose that $(x,y)\in(p)$ for some prime element $p\in R$. Then $y\in(p)$ and so $a_{12}d^{-1}\in(p)$ or $h\in(p)$. If $a_{12}d^{-1}\in(p)$ and $l\not\in(p)$, then $h\in(p)$, so $x\notin(p)$. If $a_{12}d^{-1}\in(p)$ and $l\in(p)$, then $h\notin(p)$ and since $(a_{12}d^{-1},c(A)d^{-1})=(1)$ we have $x\notin(p)$. Furthermore, if $h\in(p)$ then $l\notin(p)$ so $x\notin(p)$. Thus $(x,y)=(1)$. If $y$ is a unit then $a_{12}d^{-1}$ must be a unit, and so $a_{12}\mid c(A)$, contradicting (\ref{eq:a12-notdiv-c(A)}). Thus $y$ is not a unit, and so $x^{2}a_{12}\notin(ya_{12})$. Since $a_{12}$ divides each of $a_{11}-a_{22}$, $a_{21}$, $a_{31}$ and $a_{32}$, we have $xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\in(ya_{12})$. Thus, we must have \begin{equation} x^{2}a_{12}+xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\neq0.\label{eq:not-zero} \end{equation} From now on let $x$ and $y$ be as above, so that $(x,y)=(1)$ and $\Tr(XA)=0$. Next, we specify the entry $q$ in $X$.
Let $S_{0}$ be the set of maximal ideals $\mfp$ of $R$ such that $x^{2}a_{12}+xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\in\mfp$, and let \[
S=S_{0}\cup\{\mfp\in\Specm R\mid|R/\mfp|=2\}. \]
Note that $S$ is a finite set because of (\ref{eq:not-zero}) together with the fact that for any PID $R'$ (or any Dedekind domain), there are only finitely many $\mfp\in\Specm R'$ such that $|R'/\mfp|=2$. By Lemma~\ref{lem:GCD}\,\ref{enu:GCD-lemma abx} (with $r=1$) we can thus choose $q\in R$ such that \[ x+qy\notin\mfp,\quad\text{for all }\mfp\in S. \] Assume from now on that $q$ has been chosen in this way. Let $V$ be the set of maximal ideals of $R$ such that $x+qy\in\mfp$, that is, \[ V=\{\mfp\in\Specm R\mid x+qy\in\mfp\}. \] By the choice of $q$ we thus have in particular that \begin{equation} \mfp\in V\Longrightarrow x^{2}a_{12}+xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\notin\mfp.\label{eq:pinV-polynotinp} \end{equation} Note that for every $\mfp\in V$ we have $y\notin\mfp$ since $(x,y)=(1)$. Note also that $S\cap V=\varnothing$.
We claim that $X_{\mfp}\in\M_{n}(R/\mfp)$ is regular for every maximal ideal $\mfp$ not in $V$. To show this, let $\mfp\in(\Specm R)\setminus V$ and let \[ M=\begin{pmatrix}x+qy & 0\\ q & 1 \end{pmatrix}\oplus1_{n-2}\in\M_{n}(R). \] Since $x+qy\not\in\mfp$ the image $M_{\mfp}\in\M_{n}(R/\mfp)$ of $M$ is invertible and, letting $y_{\mfp}$ denote the image of $y$ in $R/\mfp$, we have \[ M_{\mfp}X_{\mfp}M_{\mfp}^{-1}=(m_{ij})=\begin{cases} m_{ii}=-y_{\mfp} & \text{for }i=2,4,\dots,2k,\\ m_{j,j-1}=1 & \text{for }j=2,3,\dots,n,\\ m_{ij}=0 & \text{otherwise}. \end{cases} \] It follows from Lemma~\ref{lem:reg-triang} that $M_{\mfp}X_{\mfp}M_{\mfp}^{-1}$ is regular, and thus $X_{\mfp}$ is regular.
By our choice of $q$ we have $\mfp\notin V$ if $\mfp\in S$, so $X_{\mfp}$ is regular for any $\mfp\in S$, and $S$ is non-empty. By Proposition~\ref{prop:Reg-mod-m} we have that $X$ is regular as an element in $\M_{n}(F)$, where $F$ is the field of fractions of $R$. By our choice of $x$ and $y$ we have $\Tr(X^{r}A)=0$ for $r=0,1,\dots,n-1$, so Proposition~\ref{prop:LF-criterion-fields} implies that we can write $A=[X,Q]$, for some $Q\in\M_{n}(F)$. Clearing denominators in $Q$ we find that there exists a non-zero element $m_{0}\in R$ such that $m_{0}A\in[X,\M_{n}(R)]$. We now highlight a step which we will refer to in the following:
\MyQuote{Let $m\in R$ be such that it has the minimal number of (not necessarily distinct) prime factors among all $m'\in R$ such that $m'A\in[X,\M_n(R)]$, and let $Q\in\M_n(R)$ be such that $mA=[X,Q]$.} We show that the only maximal ideals containing $m$ are those in $V$. Suppose that $\mfp=(p)\in(\Specm R)\setminus V$ and that $m\in\mfp$. Then $0=[X_{\mfp},Q{}_{\mfp}]$, and since $X_{\mfp}$ is regular there exists a polynomial $f\in R[T]$ such that $Q=f(X)+pQ'$ for some $Q'\in\M_{n}(R)$, so $mA=[X,f(X)+pQ']=[X,pQ']$ and thus $mp^{-1}A=[X,Q']$, which contradicts $(*)$. Thus, if $m\in\mfp$ for some $\mfp\in\Specm R$, then we must have $\mfp\in V$. Let $\mfp_{1},\mfp_{2},\dots,\mfp_{\nu}$, $\nu\in\N$ be the elements of $V$ such that $m\in\mfp_{i}$. For each $\mfp_{i}$, choose a generator $p_{i}\in R$, so that $\mfp_{i}=(p_{i})$, for $i=1,\dots,\nu$. We then have \[ (m)=(p_{1}^{e_{1}}p_{2}^{e_{2}}\cdots p_{\nu}^{e_{\nu}}), \] for some $e_{i}\in\N$, $1\leq i\leq\nu$.
The strategy is now to show that $X$ can be replaced by a matrix $X_{1}$ which is regular mod $\mfp$ for every $\mfp\in V$. Let \[ N=1+qE_{21}\in\M_{n}(R). \] For ease of calculation we will consider the matrices \[ A_{0}=NAN^{-1},\quad X_{0}=NXN^{-1},\quad Q_{0}=NQN^{-1}. \] Let $\mfp\in V$ be any of the ideals $\mfp_{1},\mfp_{2},\dots,\mfp_{\nu}$. We have \begin{equation} (X_{0})_{\mfp}=\begin{pmatrix}0 & 0\\ 0 & W_{\mfp} \end{pmatrix}=(0)\oplus W_{\mfp},\label{eq:Wp} \end{equation} where $W_{\mfp}\in\M_{n-1}(R/\mfp)$ is regular. We wish to determine the dimension of the centraliser \[ C(\mfp):=C_{\M_{n}(R/\mfp)}((X_{0})_{\mfp}). \] Since $(x,y)=(1)$, we have $y_{\mfp}\neq0$, so the Jordan form of $(X_{0})_{\mfp}$ is \[ J_{k}(-y_{\mfp})\oplus J_{n-k-1}(0)\oplus J_{1}(0), \] where $k=\lfloor n/2\rfloor$, as before. We have an isomorphism of $R/\mfp$-vector spaces \[ C(\mfp)\cong C_{\M_{k}(R/\mfp)}(J_{k}(-y_{\mfp}))\oplus C_{\M_{n-k}(R/\mfp)}(J_{k}(0)\oplus J_{1}(0)). \] Since $\dim C_{\M_{k}(R/\mfp)}(J_{k}(-y_{\mfp}))=k$ it remains to determine the dimension of $C_{\M_{n-k}(R/\mfp)}(J_{n-k-1}(0)\oplus J_{1}(0))$. A matrix \[ H=\begin{pmatrix}H_{11} & H_{12}\\ H_{21} & H_{22} \end{pmatrix}\in\M_{n-k}(R/\mfp), \] where $H_{11}$ is a $(n-k-1)\times(n-k-1)$ block, $H_{22}$ is a $1\times1$ block, and the other blocks are of compatible sizes, commutes with $J_{n-k-1}(0)\oplus J_{1}(0)$ if and only if \[ H_{11}J_{n-k-1}(0)=J_{n-k-1}(0)H_{11},\quad H_{12}\in\begin{pmatrix}R/\mfp\\ 0 \end{pmatrix},\quad H_{21}\in(0,R/\mfp). \] Hence $\dim C_{\M_{n-k}(R/\mfp)}(J_{n-k-1}(0)\oplus J_{1}(0))=n-k-1+1+1+1$, and so \[ \dim C(\mfp)=n+2, \] that is, $(X_{0})_{\mfp}$ is subregular (cf.~\cite{Springer-Steinberg}). Next, we need the dimension of $(R/\mfp)[(X_{0})_{\mfp}]$ (the algebra of polynomials in $(X_{0})_{\mfp}$ over the field $R/\mfp$). Since $(R/\mfp)[(X_{0})_{\mfp}]\cong(0)\oplus(R/\mfp)[W_{\mfp}]$ and $W_{\mfp}$ is regular, we have $\dim(R/\mfp)[(X_{0})_{\mfp}]=n-1$.
We now find a basis for $C(\mfp)$. We know that $(R/\mfp)[(X_{0})_{\mfp}]$ is an $(n-1)$-dimensional subspace of $C(\mfp)$. Moreover, direct verification shows that $E_{11}$ and $E_{12}+y_{\mfp}E_{13}$ are in $C(\mfp)$. Let $\kappa=n+1-2\lfloor(n+1)/2\rfloor$, that is, $\kappa$ is $0$ if $n$ is odd and $1$ if $n$ is even. Then we also have \[ E_{n1}+\kappa y_{\mfp}E_{n-1,1}\in C(\mfp). \] Since $(X_{0})_{\mfp}$ is lower triangular and the first column of $(X_{0})_{\mfp}^{i}$ is $0$ for all $i\in\N$, the intersection of $(R/\mfp)[(X_{0})_{\mfp}]$ with the $R/\mfp$-span $\langle E_{11},E_{12}+y_{\mfp}E_{13},E_{n1}+\kappa y_{\mfp}E_{n-1,1}\rangle$ is $0$. Since $\{E_{11},E_{12}+y_{\mfp}E_{13},E_{n1}+\kappa y_{\mfp}E_{n-1,1}\}$ is linearly independent, $\dim(R/\mfp)[(X_{0})_{\mfp}]=n-1$ and $\dim C(\mfp)=n+2$, we must have \begin{equation} C(\mfp)=\langle(R/\mfp)[(X_{0})_{\mfp}],E_{11},E_{12}+y_{\mfp}E_{13},E_{n1}+\kappa y_{\mfp}E_{n-1,1}\rangle.\label{eq:Centr-span} \end{equation} We observe that the matrix $E_{n1}+\kappa yE_{n-1,1}\in\M_{n}(R)$, whose image in $\M_{n}(R/\mfp)$ is $E_{n1}+\kappa y_{\mfp}E_{n-1,1}$, satisfies \begin{equation} E_{n1}+\kappa yE_{n-1,1}\in C_{\M_{n}(R)}(X_{0}).\label{eq:E+kyE-inC(X)} \end{equation} Let $\mfa=\prod_{i=1}^{\nu}\mfp_{i}$, so that $\mfa=(p_{1}\cdots p_{\nu})$. By (\ref{eq:Centr-span}) and Lemma~\ref{lem:Centr-product} we have \begin{equation} C_{\M_{n}(R/\mfa)}(X_{\mfa})=\langle(R/\mfa)[(X_{0})_{\mfa}],E_{11},E_{12}+y_{\mfa}E_{13},E_{n1}+\kappa y_{\mfa}E_{n-1,1}\rangle.\label{eq:Centr-span-severalprimes} \end{equation} Since $[X_{0},Q_{0}]=mA_{0}$ we have $([X_{0},Q_{0}])_{\mfa}=0$, that is, $(Q_{0})_{\mfa}\in C_{\M_{n}(R/\mfa)}((X_{0})_{\mfa})$. Hence, by (\ref{eq:Centr-span-severalprimes}) \[ Q_{0}=f(X_{0})+\alpha E_{11}+\beta(E_{12}+yE_{13})+\gamma(E_{n1}+\kappa yE_{n-1,1})+p_{1}\cdots p_{\nu}D, \] for some $\alpha,\beta,\gamma\in R$, $f(T)\in R[T]$ and $D\in\M_{n}(R)$. Using (\ref{eq:E+kyE-inC(X)}) we get \begin{align} [X_{0},Q_{0}] & =[X_{0},f(X_{0})+\alpha E_{11}+\beta(E_{12}+yE_{13})\label{eq:X0-Q0}\\
& \quad+\gamma(E_{n1}+\kappa yE_{n-1,1})+p_{1}\cdots p_{\nu}D]\nonumber \\
& =[X_{0},\alpha E_{11}+\beta(E_{12}+yE_{13})+p_{1}\cdots p_{\nu}D]\nonumber \\
& =[X_{0},Q_{1}],\nonumber \end{align} where \[ Q_{1}:=\alpha E_{11}+\beta(E_{12}+yE_{13})+p_{1}\cdots p_{\nu}D. \] Let $i\in\N$ be such that $1\leq i\leq\nu$. If $(\alpha,\beta)\subseteq\mfp_{i}$ then $[X_{0},Q_{1}]\in p_{i}[X_{0},\M_{n}(R)]$ and so $mp_{i}^{-1}A\in[X,\M_{n}(R)]$, contradicting $(*)$. Thus either $\alpha\notin\mfp_{i}$ or $\beta\notin\mfp_{i}$. We show that the case where $\alpha\in\mfp_{i}$ and $\beta\notin\mfp_{i}$ cannot arise. Since $mA_{0}=[X_{0},Q_{0}]=[X_{0},Q_{1}]$, we have $m\cdot\Tr(Q_{1}A_{0})=0$, whence $\Tr(Q_{1}A_{0})=0$. Together with $\alpha\in\mfp_{i}$ and $\beta\notin\mfp_{i}$ this implies that \[ \Tr((E_{12}+yE_{13})A_{0})\in\mfp_{i}. \] Recalling that $A_{0}=NAN^{-1}$ we thus get \[ -q^{2}a_{12}+q(a_{11}-a_{22})+a_{21}+ya_{31}-qya_{32}\in\mfp_{i} \] and, after multiplying by $y^{2}$, \[ -q^{2}y^{2}a_{12}+qy^{2}(a_{11}-a_{22})+y^{2}(a_{21}+ya_{31}-qya_{32})\in\mfp_{i}. \] Since $\mfp_{i}\in V$ we have $qy\in-x+\mfp_{i}$ and so \[ x^{2}a_{12}+xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\in\mfp_{i}. \] But by our choice of $q$ we have \[ x^{2}a_{12}+xy(a_{11}-a_{22})-y^{2}(a_{21}+ya_{31}+xa_{32})\notin\mfp, \] for all $\mfp\in V$, which together with (\ref{eq:pinV-polynotinp}) yields a contradiction. Therefore we cannot have $\alpha\in\mfp_{i}$ and $\beta\notin\mfp_{i}$, so we must have $\alpha\notin\mfp_{i}$. We have thus shown that \[ \alpha\notin\mfp_{i},\quad\text{for all }i=1,\dots,\nu. \] By Lemma~\ref{lem:GCD}\,\ref{enu:GCD-lemma pqt} and our choice of $S$ there exists a $t\in R$ such that \begin{equation} t\notin\mfp_{i}\quad\text{and}\quad\alpha t+y\notin\mfp_{i},\quad\text{for all }i=1,\dots,\nu.\label{eq:at+y} \end{equation} Define the matrix \begin{align*} X_{1} & =X_{0}+tQ_{1}. \end{align*} Let $\mfp$ be any of the ideals $\mfp_{1},\mfp_{2},\dots,\mfp_{\nu}$. Let $\alpha_{\mfp},\beta_{\mfp},t_{\mfp}$ denote the images of $\alpha$, $\beta$ and $t$ in $R/\mfp$, respectively. As before, let $y_{\mfp}$ denote the image of $y$ in $R/\mfp$. If we let \[ L_{\mfp}=\begin{pmatrix}1 & \beta_{\mfp}\alpha_{\mfp}^{-1} & y_{\mfp}\beta_{\mfp}\alpha_{\mfp}^{-1}\\ 0 & 1 & 0\\ 0 & 0 & 1 \end{pmatrix}\oplus1_{n-3}\in\M_{n}(R/\mfp), \] then direct verification shows that $L_{\mfp}(X_{1})_{\mfp}L_{\mfp}^{-1}=\alpha_{\mfp}t_{\mfp}E_{11}\oplus W_{\mfp}$, where $W_{\mfp}$ is the matrix in (\ref{eq:Wp}). Since $W_{\mfp}$ is regular and neither of its eigenvalues $0$ or $-y_{\mfp}$ equals $\alpha_{\mfp}t_{\mfp}$ by (\ref{eq:at+y}), the matrix $\alpha_{\mfp}t_{\mfp}E_{11}\oplus W_{\mfp}$, and hence $(X_{1})_{\mfp}\in\M_{n}(R/\mfp)$, is regular. We thus see that $(X_{1})_{\mfp_{i}}$ is regular for all $i=1,\dots,\nu$.
By (\ref{eq:X0-Q0}) we have \[ mA_{0}=[X_{0},Q_{0}]=[X_{0},Q_{1}]=[X_{1},Q_{1}], \] and since $(X_{1})_{\mfp_{i}}$ is regular and $m\in\mfp_{i}$ for all $i=1,\dots,\nu$, we get $Q_{1}=g_{i}(X_{1})+p_{i}Q{}_{1}^{(i)}$, for some $g_{i}(T)\in R[T]$ and $Q{}_{1}^{(i)}\in\M_{n}(R)$. Thus \[ mA_{0}=[X_{1},g_{i}(X_{1})+p_{i}Q{}_{1}^{(i)}]=p_{i}[X_{1},Q{}_{1}^{(i)}], \] and so $mp_{i}^{-1}A_{0}=[X_{1},Q{}_{1}^{(i)}]$. Repeating the argument if necessary, we obtain $mp_{i}^{-e_{i}}A_{0}\in[X_{1},\M_{n}(R)]$. Running through each $i=1,\dots,\nu$ we obtain $A_{0}=[X_{1},Y]$ for some $Y\in\M_{n}(R)$, and hence $A=[N^{-1}X_{1}N,NYN^{-1}]$. \end{proof} By a theorem of Hungerford \cite{Hungerford} every principal ideal ring (PIR) is a finite product of rings, each of which is a homomorphic image of a PID. Together with Theorem~\ref{thm:Main} this immediately implies the following: \begin{cor} \label{cor:Coroll-Main}Let $R$ be a PIR (not necessarily an integral domain) and let $A\in\M_{n}(R)$, $n\geq2$, be a matrix with trace zero. Then $A=[X,Y]$ for some $X,Y\in\M_{n}(R)$. \end{cor} We end this section by proving a strengthened version of Theorem~\ref{thm:Main} for $n=3$. \begin{prop} \label{prop:n3regX}Let $R$ be a PID and let $A\in\M_{3}(R)$ be a matrix with trace zero. Then $A=[X,Y]$ for some $X,Y\in\M_{3}(R)$ such that $X_{\mfp}$ is regular for all $\mfp\in\Specm R$. \end{prop} \begin{proof} As in the proof of Theorem~\ref{thm:Main} we may assume that $A$ is in Laffey-Reams form. Define the matrix \[ X=\begin{pmatrix}0 & 0 & 0\\ x & -y & 0\\ q & z & 0 \end{pmatrix}\in\M_{3}(R). \] The same argument as in the proof of Theorem~\ref{thm:Main} shows that $\Tr(XA)=0$ implies that $\Tr(X^{r}A)=0$ for all $r\geq0$. Let $a_{23}'\in R$ be such that $a_{23}=a_{12}a_{23}'$, and let $d\in R$ be a generator of $(a_{12},c(A))$. The condition $\Tr(XA)=0$ is then equivalent to \begin{align*} x & =hc(A)d^{-1}-a_{23}'z\\ y & =ha_{12}d^{-1}, \end{align*} for any $h\in R$. We claim that the system of equations \begin{equation} \begin{cases} x=hc(A)d^{-1}-a_{23}'z\\ y=ha_{12}d^{-1}\\ xz+qy=1 \end{cases}\label{eq:n3-system} \end{equation} has a solution in $x,y,q,z,h\in R$. Indeed, substituting the first two equations in the last, we get \[ -a_{23}'z^{2}+h(c(A)d^{-1}z+qa_{12}d^{-1})=1, \] and since $(c(A)d^{-1},a_{12}d^{-1})=(1)$ we can choose $z$ and $q$ in $R$ such that $ $$c(A)d^{-1}z+qa_{12}d^{-1}=1$, and it then remains to take \[ h=1+a_{23}'z^{2}. \] Suppose now that $x,y,q,z,h\in R$ is a solution of (\ref{eq:n3-system}), and let $\mfp\in\Specm R$. We show that $X_{\mfp}$ is regular. The characteristic polynomial of $X$ is \[ \lambda^{2}(\lambda+y)\in R[\lambda]. \] We have \[ X^{2}=\begin{pmatrix}0 & 0 & 0\\ -xy & y^{2} & 0\\ xz & -yz & 0 \end{pmatrix}. \] Thus, if $y\notin\mfp$ then $(X_{\mfp})^{2}\neq0$, and if $y\in\mfp$, then we must have $xz\not\in\mfp$, so $(X_{\mfp})^{2}\neq0$ also in this case. Furthermore, since $xz+qy=1$ we have \[ X(X+y)=E_{31}\neq0. \] Thus the minimal polynomial of $X_{\mfp}$ must equal the characteristic polynomial, so $X_{\mfp}$ is regular. Since we have $\Tr(X^{r}A)=0$ for all $r\geq0$, Proposition~(\ref{prop:Criterion}) implies that $A=[X,Y]$, for some $Y\in\M_{3}(R)$. \end{proof} We remark that while the matrix $X$ in the proof of the above proposition is regular modulo every $\mfp\in\Specm R$, it is not necessarily regular. Moreover, while for $n=4$ we can find an analogous matrix \[ X=\begin{pmatrix}0 & 0 & 0 & 0\\ x & -y & 0 & 0\\ q & z & 0 & 0\\ 0 & 0 & 1 & -y \end{pmatrix} \] such that $\Tr(AX)=0$ and $xz+yq=1$, in this case the matrix $X_{\mfp}$ may fail to be regular for some $\mfp\in\Spec R$.
\section{\label{sec:Further-directions}Further directions}
If $R$ is a field or if $R$ is a PID and $n=2$, we have shown that every $A\in\M_{n}(R)$ with trace zero can be written $A=[X,Y]$ where $X,Y\in\M_{n}(R)$ and $X$ is regular. Our proof of Theorem~\ref{thm:Main} shows that for any PID $R$, $n\geq2$ and every $A\in\M_{n}(R)$ with trace zero we have $A=[X,Y]$ for some $X,Y\in\M_{n}(R)$ where $X_{\mfp}$ is regular for all but finitely many maximal ideals $\mfp$ of $R$. Moreover, Proposition~\ref{prop:n3regX} says that when $n=3$ the matrix $X$ can be chosen such that $X_{\mfp}$ is regular for all maximal ideals $\mfp$. \begin{problem*} For $n\geq4$ and $A=[X,Y]$, is it always possible to choose $X$ such that $X_{\mfp}$ is regular for all maximal ideals $\mfp$? \end{problem*} This problem is interesting insofar as a proof, if possible, would be likely to yield a substantially simplified proof of Theorem~\ref{thm:Main}.
It is natural to ask for generalisations of Theorem~\ref{thm:Main} to rings other than PIRs. We first mention some counter-examples. It was shown by Lissner \cite{Lissner} that the analogue of Theorem~\ref{thm:Main} fails when $n=2$ and $R=k[x,y,z]$, where $k$ is a field, and more generally that for $R=k[x_{1},\dots,x_{2n-1}]$ there exist matrices in $\M_{n}(R)$ with trace zero which are not commutators (see \cite[Theorem~5.4]{Lissner}). Rosset and Rosset \cite[Lemma~1.1]{Rosset} gave a sufficient criterion for a $2\times2$ trace zero matrix over any commutative ring not to be a commutator. They showed however, that a Noetherian integral domain cannot satisfy their criterion unless it has dimension at least $3$. This means that their criterion is not an obstruction to a $2\times2$ trace zero matrix over a one or two-dimensional Noetherian domain being a commutator. Still, if $R$ is the two-dimensional domain $\R[x,y,z]/(x^{2}+y^{2}+z^{2}-1)$ it can be shown that there exists a matrix in $\M_{2}(R)$ with trace zero which is not a commutator (this example goes back to Kaplansky; see \cite[Section~4, Example~1]{Swan/62}, \cite[p.~532]{Lissner-OPrings} or \cite[Section~3]{Rosset}).
A ring $R$ is called an \emph{OP-ring} if for every $n\geq1$ every vector in $\bigwedge^{n-1}R^{n}$ is decomposable, that is, of the form $v_{1}\wedge\dots\wedge v_{n-1}$ for some $v_{i}\in R^{n}$. This is equivalent to saying that every vector in $R^{n}$ is an outer product (hence the acronym OP). The notion of OP-ring was introduced in \cite{Lissner-OPrings}. In particular, for $n=3$ the condition on $R$ of being an OP-ring is equivalent to the condition that every trace zero matrix in $\M_{2}(R)$ is a commutator (see \cite[Section~3]{Lissner}). It is known that every Dedekind domain is an OP-ring \cite[p.~534]{Lissner-OPrings} and that every polynomial ring in one variable over a Dedekind domain is an OP-ring \cite[Theorem~1.2]{Towber}. This prompts the following problem: \begin{problem*} Let $R$ be a Dedekind domain and assume that $A\in\M_{n}(R)$, $n\geq2$, has trace zero. Is it true that $A=[X,Y]$ for some $X,Y\in\M_{n}(R)$? \end{problem*} \noindent Since Dedekind domains are OP-rings the question has an affirmative answer for $n=2$, and one could ask the same question for any OP-ring. In the setting of matrices over a Dedekind domain the methods we have used to prove Theorem~\ref{thm:Main} are of little use because they rely crucially on the underlying ring being both atomic and B\'ezout, which implies that it is a PID.
\end{document}
|
arXiv
|
{
"id": "1211.6872.tex",
"language_detection_score": 0.7395759224891663,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Super-stability in the \\ Student-Project Allocation Problem with Ties\thanks{A preliminary version of a part of this paper appeared in \cite{OM18}
\begin{abstract} The \emph{Student-Project Allocation problem with lecturer preferences over Students} ({\sc spa-s}) involves assigning students to projects based on student preferences over projects, lecturer preferences over students, and the maximum number of students that each project and lecturer can accommodate. This classical model assumes that each project is offered by one lecturer and that preference lists are strictly ordered. Here, we study a generalisation of {\sc spa-s} where ties are allowed in the preference lists of students and lecturers, which we refer to as the \emph{Student-Project Allocation problem with lecturer preferences over Students with Ties} ({\sc spa-st}). We investigate stable matchings under the most robust definition of stability in this context, namely \emph{super-stability}. We describe the first polynomial-time algorithm to find a super-stable matching or to report that no such matching exists, given an instance of {\sc spa-st}. Our algorithm runs in $O(L)$ time, where $L$ is the total length of all the preference lists. Finally, we present results obtained from an empirical evaluation of the linear-time algorithm based on randomly-generated {\sc spa-st} instances. Our main finding is that, whilst super-stable matchings can be elusive when ties are present in the students' and lecturers' preference lists, the probability of such a matching existing is significantly higher if ties are restricted to the lecturers' preference lists.
\keywords{Student-project allocation \and Stable matching \and Super-stability \and Polynomial-time algorithm \and Empirical evaluation}
\end{abstract}
\thispagestyle{empty} \setcounter{page}{1} \pagestyle{headings}
\section{Introduction} \label{introduction} The \emph{Student-Project Allocation problem} ({\sc spa}) \cite{AIM07,CFG19,Man13} is a many-one matching problem which involves three sets of entities: students, projects and lecturers. Each project is proposed by one lecturer and each student is required to rank a subset of these projects that she finds acceptable, in order of preference. Further, each lecturer may have preferences over the students that find her projects acceptable and/or the projects that she offers. Typically there may be capacity constraint on the number of students that each project and lecturer can accommodate. The goal is to find a \emph{matching}, i.e., an assignment of students to projects based on the stated preferences such that each student is assigned to at most one project, and the capacity constraints on projects and lecturers are not violated.
Applications of {\sc spa} can be found in many university departments, for example, the School of Computing Science, University of Glasgow \cite{KIMS15}, the Faculty of Science, University of Southern Denmark \cite{CFG19}, the Department of Computing Science, University of York \cite{Kaz02}, and elsewhere \cite{AB03,RGSA17,HSVS05}. In this work, we will concern ourselves with a variant of {\sc spa} that involves lecturer preferences over students, which is known as the \emph{Student-Project Allocation problem with lecturer preferences over Students} ({\sc spa-s}) \cite{AIM07,Man13}. This variant falls under the category of bipartite matching problem with two-sided preferences.\footnote{For further reading on the classification of matching problems, we refer the interested reader to \cite{Man13}.} In this context, it has been argued that a natural property for a matching to satisfy is that of \emph{stability} \cite{Rot84,Rot90,Rot91}. Informally, a \emph{stable matching} ensures that no student and lecturer would have an incentive to deviate from the matching by forming a private arrangement involving some project.
The classical {\sc spa-s} model assumes that preferences are strictly ordered. However, this might not be achievable in practice. For instance, a lecturer may be unable or unwilling to provide a strict ordering of all the students who find her projects acceptable. Such a lecturer may be happier to rank two or more students equally in a tie, which indicates that the lecturer is indifferent between the students concerned. This leads to a generalisation of {\sc spa-s} which we refer to as the \emph{Student-Project Allocation problem with lecturer preferences over Students with Ties} ({\sc spa-st}).
If we allow ties in the preference lists of students and lecturers, three different stability definitions naturally arise. Suppose $M$ is a matching in an instance of {\sc spa-st}. Informally, we say that $M$ is \emph{weakly stable, strongly stable} or \emph{super-stable} if there is no student and lecturer such that if they decide to form an arrangement outside the matching, respectively,
\begin{itemize} \item[(i)] both of them would be better off, \item[(ii)] one of them would be better off and the other would be no worse off, \item[(iii)] neither of them would be worse off. \end{itemize}
With respect to this informal definition, a super-stable matching is also strongly stable, and a strongly stable matching is also weakly stable. These concepts were first defined and studied by Irving \cite{Irv94} in the context of the \emph{Stable Marriage problem with Ties} ({\sc smt}), and subsequently extended to the \emph{Hospitals/Residents problem with Ties} ({\sc hrt}) \cite{IMS00,IMS03} (where {\sc hrt} is the special case of {\sc spa-st} in which each lecturer offers only one project, and the capacity of each project is the same as the capacity of the lecturer offering the project; and {\sc smt} is a restriction of {\sc hrt} where the capacity of each hospital is $1$).
Considering the weakest of the three stability concepts mentioned above, every instance of {\sc spa-st} admits a weakly stable matching (this follows by breaking the ties in an arbitrary fashion and applying the stable matching algorithm described in \cite{AIM07} to the resulting {\sc spa-s} instance). However, such matchings could be of different sizes \cite{MIIMM02}. Thus opting for weak stability leads to the problem of finding a weakly stable matching that matches as many students to projects as possible -- a problem that is known to be NP-hard \cite{IMMM99,MIIMM02}, even for the so-called \emph{Stable Marriage problem with Ties and Incomplete lists} ({\sc smti}), which is an extension of {\sc smt} in which the preference lists need not be complete. However, we note that a $\frac{3}{2}$-approximation algorithm was described in \cite{CM18} for the problem of finding a maximum size weakly stable matching, given an instance of {\sc spa-st}.\footnote{This approximation algorithm finds a weakly stable matching that is at least two-thirds the size of a maximum weakly stable matching.}
Although a super-stable matching can be elusive, it avoids the problem of finding a maximum size weakly stable matching, because, as we will show in this paper, analogous to the {\sc hrt} case \cite{IMS00}: (i) all super-stable matchings have the same size; (ii) finding one or reporting that none exists can be accomplished in linear-time; and (iii) if a super-stable matching $M$ exists then all weakly stable matchings are of the same size (equal to the size of $M$), and match exactly the same set of students. Furthermore, Irving \emph{et al}.~\cite{IMS00} argued that super-stability is a very natural solution concept in cases where agents have incomplete information. Central to their argument is the following proposition, stated for {\sc hrt} in \cite[Proposition 2]{IMS00}, which extends naturally to {\sc spa-st} as follows (see Section \ref{subsection:spa-st} for a proof).
\begin{restatable}[]{proposition}{superstability} \label{proposition1} Let $I$ be an instance of {\sc spa-st}, and let $M$ be a matching in $I$. Then $M$ is super-stable in $I$ if and only if $M$ is stable in every instance of {\sc spa-s} obtained from $I$ by breaking the ties in some way. \end{restatable}
In a practical setting, suppose that a student $s_i$ has incomplete information about two or more projects and decides to rank them equally in a tie $T$, and a super-stable matching $M$ exists in the corresponding {\sc spa-st} instance $I$. Then $M$ is stable in every instance of {\sc spa-s} (obtained from $I$ by breaking the ties) that represents the true preferences of $s_i$. Consequently, we will focus on the concept of super-stability in the {\sc spa-st} context.
Unfortunately not every instance of {\sc spa-st} admits a super-stable matching. This is true, for example, in the case where there are two students, two projects and one lecturer, the capacity of each project is $1$, the capacity of the lecturer is $2$, and every preference list is a single tie of length 2; any matching will be undermined by some student $s_i$ and the lecturer involving a project that $s_i$ is not assigned to. Nonetheless, it should be clear from the discussions above that a super-stable matching should be preferred in practical applications when one does exist.
\paragraph{\textbf{Related work.}} Irving \emph{et al}.~\cite{IMS00} described an algorithm to find a super-stable matching given an instance of {\sc hrt}, or to report that no such matching exists. However, merely reducing an instance of {\sc spa-st} to an instance of {\sc hrt} and applying the algorithm described in \cite{IMS00} to the resulting {\sc hrt} instance does not work in general (we explain this further in Section \ref{subsect:cloning}). Other variants of {\sc spa} in the literature involve lecturer preferences over their proposed projects \cite{IMY12,MMO18,MO08}, lecturer preferences over (student, project) pairs \cite{AM09}, and no lecturer preferences at all \cite{KIMS15} (see \cite{CFG19} for a more detailed survey in this latter case). A similar model known as the \textit{Student-Project-Resource Matching-Allocation problem} ({\sc spr}) was recently considered in \cite{IYYY19}. This model is different from {\sc spa-s} in the following ways: (i) in {\sc spa-s}, the capacity of each project is fixed by the lecturer offering it, while in {\sc spr}, the capacity of each project is determined by the resources allocated to it; (ii) in {\sc spa-s}, each lecturer has a fixed capacity on the total number of students that can be assigned to her projects, while in {\sc spr}, there is no notion of lecturer capacity.
\paragraph{\textbf{Our contribution.}} In this paper, we describe the first polynomial-time algorithm to find a super-stable matching or to report that no such matching exists, given an instance of {\sc spa-st} -- thus solving an open problem given in \cite{AIM07,Man13}. Our algorithm is student-oriented because it involves the students applying to projects. Moreover, the algorithm returns the student-optimal super-stable matching, in the sense that if the given instance admits a super-stable matching then our algorithm will output a solution in which each assigned student has the best project that she could obtain in any super-stable matching that the instance admits. We also present the results of an empirical evaluation based on an implementation of our algorithm that investigates how the nature of the preference lists would affect the likelihood of a super-stable matching existing, with respect to randomly-generated {\sc spa-st} instances.\footnote{From a theoretical perspective, the likelihood of a stable matching existing has been explored for the Stable Roommates problem -- a non-bipartite generalisation of the Stable Marriage problem \cite{PI94}.} Our main finding from the empirical evaluation is that super-stable matchings are very elusive with ties in the students' and lecturers' preference lists. However, if the preference lists of the students are strictly ordered and only the lecturers express ties in their preference lists, the probability of a super-stable matching existing is significantly higher.
The remainder of this paper is structured as follows. We give a formal definition of the {\sc spa-s} problem, the {\sc spa-st} variant, and the super-stability concept in Section \ref{section:definitions}. We describe our algorithm for {\sc spa-st} under super-stability in Section \ref{section:algorithm}. Further, Section \ref{section:algorithm} also presents our algorithm's correctness results and some structural properties satisfied by the set of super-stable matchings in an instance of {\sc spa-st}. In Section \ref{emprical-results}, we present the experimental results obtained from our algorithm's empirical evaluation. Finally, Section \ref{section:conclusions} presents some concluding remarks and potential direction for future work.
\section{Preliminary definitions and results} \label{section:definitions} \subsection{Formal definition of {\footnotesize SPA-S}} \label{subsection:spa-s} An instance $I$ of {\sc spa-s} involves a set $\mathcal{S} = \{s_1 , s_2, \ldots , s_{n_1}\}$ of \emph{students}, a set $\mathcal{P} = \{p_1 , p_2, \ldots , p_{n_2}\}$ of \emph{projects} and a set $\mathcal{L} = \{l_1 , l_2, \ldots , l_{n_3}\}$ of \emph{lecturers}. Each student $s_i$ ranks a subset of $\mathcal{P}$ in strict order, which forms $s_i$'s preference list. We say that $s_i$ finds $p_j$ \emph{acceptable} if $p_j$ is in $s_i$'s preference list, and we denote by $A_i$ the set of projects that $s_i$ finds acceptable. Each lecturer $l_k \in \mathcal{L}$ offers a non-empty set of projects $P_k$, where $P_1, P_2, \ldots,$ $P_{n_3}$ partitions $\mathcal{P}$. Also, $l_k$ ranks in strict order of preference those students who find at least one project in $P_k$ acceptable, which forms $l_k$'s preference list. We say that $l_k$ finds $s_i$ \textit{acceptable} if $s_i$ is in $l_k$'s preference list, and we denote by $\mathcal{L}_k$ the set of students that $l_k$ finds acceptable.
For any pair $(s_i, p_j) \in \mathcal{S} \times \mathcal{P}$, where $p_j$ is offered by $l_k$, we refer to $(s_i, p_j)$ as an \textit{acceptable pair} if $s_i$ and $l_k$ both find each other acceptable, i.e., if $p_j \in A_i$ and $s_i \in \mathcal{L}_k$. Each project $p_j \in \mathcal{P}$ has a capacity $c_j \in \mathbb{Z}^+$ indicating the maximum number of students that can be assigned to $p_j$. Similarly, each lecturer $l_k \in \mathcal{L}$ has a capacity $d_k \in \mathbb{Z}^+$ indicating the maximum number of students that $l_k$ is willing to supervise. We assume that for any lecturer $l_k$,
$$\max\{c_j: p_j \in P_k\} \leq d_k \leq \sum \{c_j: p_j \in P_k\},$$ \noindent i.e., the capacity of $l_k$ is (i) at least the highest capacity of the projects offered by $l_k$, and (ii) at most the sum of the capacities of all the projects $l_k$ is offering. We denote by $\mathcal{L}_k^j$, the \emph{projected preference list} of lecturer $l_k$ for $p_j$, which can be obtained from $\mathcal{L}_k$ by removing those students that do not find $p_j$ acceptable (thereby retaining the order of the remaining students from $\mathcal{L}_k$).
An \emph{assignment} $M$ is a subset of $\mathcal{S} \times \mathcal{P}$ such that $(s_i, p_j) \in M$ implies that $s_i$ finds $p_j$ acceptable. If $(s_i, p_j) \in M$, we say that $s_i$ \emph{is assigned to} $p_j$, and $p_j$ \emph{is assigned} $s_i$. For convenience, if $s_i$ is assigned in $M$ to $p_j$, where $p_j$ is offered by $l_k$, we may also say that $s_i$ \emph{is assigned to} $l_k$, and $l_k$ \emph{is assigned} $s_i$.
For any student $s_i \in \mathcal{S}$, we let $M(s_i)$ denote the set of projects that are assigned to $s_i$ in $M$. For any project $p_j \in \mathcal{P}$, we denote by $M(p_j)$ the set of students that are assigned to $p_j$ in $M$. Project $p_j$ is \emph{undersubscribed}, \emph{full} or \emph{oversubscribed} in $M$ according as $|M(p_j)|$ is less than, equal to, or greater than $c_j$, respectively. Similarly, for any lecturer $l_k \in \mathcal{L}$, we denote by $M(l_k)$ the set of students that are assigned to $l_k$ in $M$. Lecturer $l_k$ is \emph{undersubscribed}, \emph{full} or \emph{oversubscribed} in $M$ according as $|M(l_k)|$ is less than, equal to, or greater than $d_k$, respectively.
A \emph{matching} $M$ is an assignment such that each student is assigned to at most one project in $M$, each project is assigned at most $c_j$ students in $M$, and each lecturer is assigned at most $d_k$ students in $M$ (i.e., $|M(s_i)| \leq 1$ for each $s_i \in \mathcal{S}$, $|M(p_j)| \leq c_j$ for each $p_j \in \mathcal{P}$, and $|M(l_k)| \leq d_k$ for each $l_k \in \mathcal{L}$). If $s_i$ is assigned to some project in $M$, for convenience we let $M(s_i)$ denote that project. In what follows, $l_k$ is the lecturer who offers project $p_j$.
\begin{definition}[Stability] \label{def:stability} Let $I$ be an instance of {\sc spa-st}, and let $M$ be a matching in $I$. We say that $M$ is \emph{stable} if it admits no blocking pair, where a \emph{blocking pair} is an acceptable pair $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$ such that (a) and (b) holds as follows: \begin{enumerate}[(a)]
\item either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$;
\item either (i), (ii) or (iii) holds as follows:
\begin{enumerate} [(i)] \item each of $p_j$ and $l_k$ is undersubscribed in $M$; \item $p_j$ is undersubscribed in $M$, $l_k$ is full in $M$ and either
\begin{enumerate}[(1)]
\item $s_i \in M(l_k)$, or
\item $l_k$ prefers $s_i$ to the worst student in $M(l_k)$; \end{enumerate} \item $p_j$ is full in $M$ and $l_k$ prefers $s_i$ to the worst student in $M(p_j)$. \end{enumerate} \end{enumerate}
\end{definition} To find a stable matching in an instance of {\sc spa-s}, two linear-time algorithms were described in \cite{AIM07}. The stable matching produced by the first algorithm is \emph{student-optimal} (i.e., each assigned student has the best-possible project that she could obtain in any stable matching) while the one produced by the second algorithm is \emph{lecturer-optimal} (i.e., each lecturer has the best set of students that she could obtain in any stable matching). The set of stable matchings in a given instance of {\sc spa-s} satisfy several interesting properties that together form what we will call the \emph{Unpopular Projects Theorem} (analogous to the Rural Hospitals Theorem for {\scriptsize HR} \cite{IMS00}), which we state as follows. \begin{theorem}[\cite{AIM07}] \label{thrm:rural-spa-s} For a given instance of {\sc spa-s}, the following holds: \begin{enumerate} \item each lecturer is assigned the same number of students in all stable matchings; \item exactly the same students are unassigned in all stable matchings; \item a project offered by an undersubscribed lecturer is assigned the same number of students in all stable matchings. \end{enumerate} \end{theorem}
As we will see later in this paper, when ties are present in the preference lists of students and lecturers, the set of super-stable matchings also satisfy each of the properties in Theorem \ref{thrm:rural-spa-s}.
\subsection{Ties in the preference lists} \label{subsection:spa-st} We now define formally the generalisation of {\sc spa-s} in which the preference lists can include ties. In the preference list of lecturer $l_k\in \mathcal{L}$, a set $T$ of $r$ students forms a \emph{tie of length $r$} if $l_k$ does not prefer $s_i$ to $s_{i'}$ for any $s_i, s_{i'} \in T$ (i.e., $l_k$ is \emph{indifferent} between $s_i$ and $s_{i'}$). A tie in a student's preference list is defined similarly. For convenience, henceforth, we consider a non-tied entry in a preference list as a tie of length one. We denote by {\sc spa-st} the generalisation of {\sc spa-s} in which the preference list of each student (respectively lecturer) comprises a strict ranking of ties, each comprising one or more projects (respectively students).
An example {\sc spa-st} instance $I_1$ is given in Fig.~\ref{fig:spa-st-instance-1}, which involves the set of students $\mathcal{S} = \{s_1, s_2, s_3, s_4, \\ s_5\}$, the set of projects $\mathcal{P} = \{p_1, p_2, p_3\}$ and the set of lecturers $\mathcal{L} = \{l_1, l_2\}$, with $P_1 = \{p_1, p_2\}$ and $P_2 = \{p_3\}$. Ties in the preference lists are indicated by round brackets.
\begin{figure}
\caption{ \small An example instance $I_1$ of {\sc spa-st}.}
\label{fig:spa-st-instance-1}
\end{figure}
In the context of {\sc spa-st}, we assume that all notation and terminology carries over from Section \ref{subsection:spa-s} as defined for {\sc spa-s} with the exception of stability, which we now define. When ties appear in the preference lists, three levels of stability arise (as in the {\sc hrt} context \cite{IMS00,IMS03}), namely \emph{weak stability, strong stability and super-stability}. The formal definition for weak stability in {\sc spa-st} follows from the definition for stability in {\sc spa-s} (see Definition \ref{def:stability}). Moreover, the existence of a weakly stable matching in an instance $I$ of {\sc spa-st} is guaranteed by breaking the ties in $I$ arbitrarily, thus giving rise to an instance $I'$ of {\sc spa-s}. Clearly, a stable matching in $I'$ is weakly stable in $I$. Indeed a converse of sorts holds, which gives rise to the following proposition.
\begin{restatable}[]{proposition}{weakstability} \label{proposition2} Let $I$ be an instance of {\sc spa-st}, and let $M$ be a matching in $I$. Then $M$ is weakly stable in $I$ if and only if $M$ is stable in some instance $I'$ of {\sc spa-s} obtained from $I$ by breaking the ties in some way. \end{restatable}
\begin{proof} Let $I$ be an instance of {\sc spa-st} and let $M$ be a matching in $I$. Suppose that $M$ is weakly stable in $I$. Let $I'$ be an instance of {\sc spa-s} obtained from $I$ by breaking the ties in the following way. For each student $s_i$ in $I$ such that the preference list of $s_i$ includes a tie $T$ containing two or more projects, we order the preference list of $s_i$ in $I'$ as follows: if $s_i$ is assigned in $M$ to a project $p_j$ in $T$ then $s_i$ prefers $p_j$ to every other project in $T$; otherwise, we order the projects in $T$ arbitrarily. For each lecturer $l_k$ in $I$ such that $l_k$'s preference list includes a tie $X$, if $X$ contains students that are assigned to $l_k$ in $M$ and students that are not assigned to $l_k$ in $M$ then $l_k$'s preference list in $I'$ is ordered in such a way that each $s_i\in X\cap M(l_k)$ is preferred to each $s_{i'}\in X\setminus M(l_k)$; otherwise, we order the students in $X$ arbitrarily. Now, suppose $(s_i, p_j)$ forms a blocking pair for $M$ in $I'$. Given how the ties in $I$ were removed to obtain $I'$, this implies that $(s_i, p_j)$ forms a blocking pair for $M$ in $I$, a contradiction to our assumption that $M$ is weakly stable in $I$. Thus $M$ is stable in $I'$.
Conversely, suppose $M$ is stable in some instance $I'$ of {\sc spa-s} obtained from $I$ by breaking the ties in some way. Now suppose that $M$ is not weakly stable in $I$. Then some pair $(s_i, p_j)$ forms a blocking pair for $M$ in $I$. It is then clear from the definition of weak stability and from the construction of $I'$ that $(s_i, p_j)$ is a blocking pair for $M$ in $I'$, a contradiction. \qed \end{proof} \noindent As mentioned earlier, super-stability is the most robust concept to seek. Only if no super-stable matching exists in the underlying problem instance should other forms of stability be sought in a practical setting. Thus, for the remainder of this paper, we focus on super-stability in the {\sc spa-st} context.
\begin{definition}[Super-stability] \label{definition:super-stability} Let $I$ be an instance of {\sc spa-st}, and let $M$ be a matching in $I$. We say that $M$ is \emph{super-stable} if it admits no blocking pair, where a \emph{blocking pair} is an acceptable pair $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$ such that (a) and (b) holds as follows: \begin{enumerate}[(a)]
\item either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them;
\item either (i), (ii), or (iii) holds as follows:
\begin{enumerate} [(i)] \item each of $p_j$ and $l_k$ is undersubscribed in $M$; \item $p_j$ is undersubscribed in $M$, $l_k$ is full in $M$ and either
\begin{enumerate}[(1)]
\item $s_i \in M(l_k)$, or
\item $l_k$ prefers $s_i$ to the worst student/s in $M(l_k)$ or is indifferent between them; \end{enumerate} \item $p_j$ is full in $M$ and $l_k$ prefers $s_i$ to the worst student/s in $M(p_j)$ or is indifferent between them. \end{enumerate} \end{enumerate}
\end{definition} It may be verified that the matching $M = \{(s_3, p_2), (s_4, p_3), (s_5, p_1)\}$ is super-stable in Fig.~\ref{fig:spa-st-instance-1}. Clearly, a super-stable matching is also weakly stable. Moreover, the super-stability definition gives rise to Proposition \ref{proposition1}, which can be regarded as an analogue of Proposition \ref{proposition2} for super-stability, restated as follows.
\superstability*
\begin{proof} Let $I$ be an instance of {\sc spa-st} and let $M$ be a matching in $I$. Suppose that $M$ is super-stable in $I$. We want to show that $M$ is stable in every instance of {\sc spa-s} obtained from $I$ by breaking the ties in some way. Now, let $I'$ be an arbitrary instance of {\sc spa-s} obtained from $I$ by breaking the ties in some way, and suppose $M$ is not stable in $I'$. This implies that $M$ admits a blocking pair $(s_i, p_j)$ in $I'$. Since $I'$ is an arbitrary {\sc spa-s} instance obtained from $I$ by breaking the ties in some way, it follows that in $I$: (i) if $s_i$ is assigned in $M$ then $s_i$ either prefers $p_j$ to $M(s_i)$ or is indifferent between them, (ii) if $p_j$ is full in $M$ then $l_k$ either prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them, and (iii) if $l_k$ is full in $M$ then either $s_i \in M(l_k)$ or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or is indifferent between them. This implies that $(s_i, p_j)$ forms a blocking pair for $M$ in $I$, a contradiction to the super-stability of $M$.
Conversely, suppose $M$ is stable in every instance of {\sc spa-s} obtained from $I$ by breaking the ties in some way. Now suppose $M$ is not super-stable in $I$. This implies that $M$ admits a blocking pair $(s_i, p_j)$ in $I$. We construct an instance $I'$ of {\sc spa-s} from $I$ by breaking the ties in the following way: (i) if $s_i$ is assigned in $M$ and $s_i$ is indifferent between $p_j$ and $M(s_i)$ in $I$ then $s_i$ prefers $p_j$ to $M(s_i)$ in $I'$; otherwise we break the ties in $s_i$'s preference list arbitrarily, and (ii) if some student, say $s_{i'}$, different from $s_i$ is assigned to $l_k$ in $M$ such that $l_k$ is indifferent between $s_i$ and $s_{i'}$ in $I$ then $l_k$ prefers $s_i$ to $s_{i'}$ in $I'$; otherwise we break the ties in $l_k$'s preference list arbitrarily. Thus $(s_i, p_j)$ forms a blocking pair for $M$ in $I'$, i.e., $M$ is not stable in $I'$, a contradiction to the fact that $M$ is stable in every instance of {\sc spa-s} obtained from $I$ by breaking the ties in some way. \qed\end{proof} \noindent The following proposition, which is a consequence of Propositions \ref{proposition1} and \ref{proposition2}, and Theorem \ref{thrm:rural-spa-s}, tells us that if a super-stable matching $M$ exists in $I$ then all weakly stable matchings in $I$ are of the same size (equal to the size of $M$) and match exactly the same set of students. \begin{restatable}[]{proposition}{allinone} \label{proposition3} Let $I$ be an instance of {\sc spa-st}, and suppose that $I$ admits a super-stable matching $M$. Then the Unpopular Projects Theorem holds for the set of weakly stable matchings in $I$. \end{restatable}
\begin{proof} Let $I$ be an instance of {\sc spa-st}. Let $M$ be a super-stable matching in $I$ and let $M'$ be a weakly stable matching in $I$. Then by Proposition \ref{proposition2}, $M'$ is stable in some instance $I'$ of {\sc spa-s} obtained from $I$ by breaking the ties in some way. Also $M$ is stable in $I'$ by Proposition \ref{proposition1}. By Theorem \ref{thrm:rural-spa-s}, each lecturer is assigned the same number of students in $M$ and $M'$, exactly the same students are unassigned in $M$ and $M'$, and a project offered by an undersubscribed lecturer is assigned the same number of students in $M$ and $M'$. Hence, the Unpopular Projects Theorem holds for the set of weakly stable matchings in $I$. \qed \end{proof}
\subsection{Cloning from {\sc spa-st} to {\sc hrt} does not work in general} \label{subsect:cloning} As mentioned earlier, Irving \textit{et al.}~\cite{IMS00} described a polynomial-time algorithm to find a super-stable matching or report that no such matching exists, given an instance of {\sc hrt}. The authors referred to their algorithm as Algorithm {\sf HRT-Super-Res}. One might assume that reducing a given instance of {\sc spa-st} to an instance of {\sc hrt} (using a ``cloning'' technique) and subsequently applying Algorithm {\sf HRT-Super-Res} to the resulting instance would solve our problem. However, this is not always true. In what follows, we describe an obvious method to clone an instance of {\sc spa-st} to an instance of {\sc hrt}, and we show that applying the super-stable matching algorithm described in \cite{IMS00} to the resulting {\sc hrt} instance does not work in general.
A method to derive an instance $I'$ of {\sc hrt} from an instance $I$ of {\sc spa-st} was described by Cooper and Manlove \cite{CM18a}. We explain this method as follows. The students and projects involved in $I$ are converted into residents and hospitals respectively in $I'$, i.e., each $s_i \in \mathcal{S}$ becomes $r_i$ in the cloned instance, and each $p_j \in \mathcal{P}$ becomes $h_j$. Residents inherit their preference lists naturally from students, i.e., if $r_i$ corresponds to $s_i$ then the preference list of $r_i$ in $I'$ is $A_i$, with each project in $A_i$ being replaced by the associated hospital. Hospitals inherit their preference lists from the projected preference list of the associated project according to the lecturer offering the project, i.e., if $p_j$ corresponds to $h_j$ (where $p_j$ is offered by $l_k$) then the preference list of $h_j$ in $I'$ is $\mathcal{L}_k^j$, with each student in $\mathcal{L}_k^j$ being replaced by the associated resident. Each hospital also inherits its capacity from the project, i.e., for each $h_j$ associated with $p_j$, the capacity of $h_j$ is $c_j$.
Let $l_k$ be an arbitrary lecturer in $I$. In order to translate $l_k$'s capacity into the {\sc hrt} instance, we create $n$ \emph{dummy residents}\footnote{The dummy residents created for each hospital will offset the difference between the corresponding lecturer capacity and the total capacity of her proposed projects.} for each hospital $h_j$ corresponding to a project $p_j \in P_k$, where $n$ is the difference between the sum of the capacities of all the projects in $P_k$ and the capacity of $l_k$ (recall that $\sum_{p_j \in P_k} c_j \geq d_k$). The preference list for each of these dummy residents will be a single tie consisting of all the hospitals corresponding to a project in $P_k$. Further, the preference list for each hospital corresponding to a project in $P_k$ will include a tie in its first position consisting of all the dummy residents associated with $l_k$.
Next, we describe how to map between matchings in $I$ and in $I'$. Let $M$ and $M'$ be a matching in $I$ and $I'$ respectively. Let $r_i$ be the resident associated with $s_i$ and let $h_j$ be the hospital associated with $p_j$. If $s_i$ is assigned in $M$ to project $p_j$, then $r_i$ is assigned in $M'$ to hospital $h_j$. To illustrate the cloning technique described above, we give an example instance $I$ of {\sc spa-st} in Fig.~\ref{fig:super-instance-2} as well as the corresponding cloned {\sc hrt} instance $I'$ in Fig.~\ref{fig:super-instance-2-cloned}. Also, we give an intuition as to why this technique will not work in general.
\begin{figure}\label{fig:super-instance-2}
\end{figure}
\begin{figure}\label{fig:super-instance-2-cloned}
\end{figure}
With respect to Figs.~\ref{fig:super-instance-2} and \ref{fig:super-instance-2-cloned}, each resident $r_1, r_2$ and $r_3$ in $I'$ corresponds to student $s_1, s_2$ and $s_3$ in $I$, respectively; and the preference list of each resident is adapted from the preference list of the associated student. Also, each hospital $h_1, h_2$ and $h_3$ in $I'$ corresponds to project $p_1, p_2$ and $p_3$ in $I$, respectively. The preference list of hospitals $h_1$ and $h_2$ is $\mathcal{L}_1^1$ and $\mathcal{L}_1^2$ respectively, since $l_1$ is the lecturer that offers both $p_1$ and $p_2$. Similarly, the preference list of hospital $h_3$ is $\mathcal{L}_2^3$, since $l_2$ is the lecturer that offers $p_3$. Further, for lecturer $l_1$ who offers both $p_1$ and $p_2$, since $c_1 + c_2 = 2 > 1 = d_1$, we add one dummy resident $r_{d_1}$ to the cloned instance. The preference list of $r_{d_1}$ is a single tie consisting of $h_1$ and $h_2$; and the preference list of both $h_1$ and $h_2$ includes $r_{d_1}$ in first position.
The reader can easily verify that matching $M = \{(s_1, p_1), (s_3, p_3)\}$ is super-stable in the {\sc spa-st} instance $I$ illustrated in Fig.~\ref{fig:super-instance-2}. Now, following our description of how to map between matchings in $I$ and in $I'$, a matching in $I'$ is $M' = \{(r_{d_1}, h_2), (r_1, h_1), (r_3, h_3)\}$, with $(s_1, p_1) \in M$ corresponding to $(r_1, h_1) \in M'$ and $(s_3, p_3) \in M$ corresponding to $(r_3, h_3) \in M'$. Clearly, $M'$ is not super-stable in $I'$ as $(r_{d_1}, h_1)$ forms a blocking pair. In fact, the {\sc hrt} instance $I'$ admits no super-stable matching. The justification for this is as follows: irrespective of the hospital that the dummy resident $r_{d_1}$ is assigned to in any matching obtained from $I'$, $r_{d_1}$ will block this matching via the other hospitals tied in her preference list (since the hospital would be better off taking on $r_{d_1}$, and $r_{d_1}$ would be no worse off).
One way to avoid this problem would be to strictly order the hospitals in $r_{d_1}$'s preference list; however, the order in which the hospitals appear will lead to different possibilities. For instance: if $r_{d_1}$ prefers $h_1$ to $h_2$, the reader can verify that the corresponding {\sc hrt} instance admits no super-stable matching; however, if $r_{d_1}$ prefers $h_2$ to $h_1$, again the reader can verify that the corresponding {\sc hrt} instance admits the super-stable matching $\{(r_{d_1}, h_2), (r_1, h_1), (r_3, h_3)\}$. The downside of this strategy is that there is no obvious reason as to why $r_{d_1}$ should prefer $h_2$ to $h_1$ in the cloned {\sc hrt} instance in Fig.~\ref{fig:super-instance-2-cloned} by merely looking at the original {\sc spa-st} instance in Fig.~\ref{fig:super-instance-2}. Hence, in order to make this technique work in general, we will need to generate every {\sc hrt} instance obtained by ordering the dummy residents' preference lists in some way. This is exponential in the problem instance.
\section{An algorithm for {\small SPA-ST} under super-stability} \label{section:algorithm}
In this section we present our algorithm for {\sc spa-st} under super-stability, which we will refer to as Algorithm {\sf SPA-ST-super}. Before we proceed, we briefly describe Algorithm {\sf HRT-Super-Res} \cite{IMS00}. The algorithm involves a sequence of proposals from the residents to the hospitals. Each resident proposes in turn to all of the hospitals tied together at the head of her preference list, and all proposals are provisionally accepted. If a hospital $h$ becomes oversubscribed then none of $h$'s worst assignees nor any resident tied with these assignees in $h$'s preference list can be assigned to $h$ in any super-stable matching -- such pairs $(r, h)$ are deleted from each other's preference lists. If a hospital $h$ is full then no resident strictly worse than $h$'s worst assignees can be assigned to $h$ in any super-stable matching -- again such $(r,h)$ pairs are deleted from each other's preference lists. The proposal sequence terminates once every resident is either assigned to a hospital or has an empty preference list. At this point, if the constructed assignment of residents to hospitals is super-stable in the original {\sc hrt} instance then the assignment is returned as a super-stable matching. Otherwise, the algorithm reports that no super-stable matching exists.
We note that our algorithm is a non-trivial extension of Algorithm {\sf HRT-Super-Res} for {\sc hrt} \cite{IMS00}. Due to the more general setting of {\sc spa-st}, Algorithm {\sf SPA-ST-super} requires some new ideas (precisely lines 27-34 of the algorithm on page \pageref{algorithmSPA-STsuper}), and the proofs of the correctness results are more complex than for the aforementioned algorithm for {\sc hrt}. We give definitions relating to the algorithm in Section \ref{subsect:algorithm-definition}. We give a description of our algorithm in Section \ref{subsect:algorithm-description}, before presenting it in pseudocode form. In Section \ref{example-description}, we illustrate an execution of our algorithm with respect to an example {\sc spa-st} instance. We present the algorithm's correctness results in Section \ref{correctness-result}. Finally, in Section \ref{subsect:properties}, we show that the set of super-stable matchings in an instance of {\sc spa-st} satisfy analogous properties to those given in Theorem \ref{thrm:rural-spa-s}.
\subsection{Definitions relating to the algorithm} \label{subsect:algorithm-definition}
First, we present some definitions relating to the algorithm. In what follows, $I$ is an instance of {\sc spa-st}, $(s_i, p_j)$ is an acceptable pair in $I$ and $l_k$ is the lecturer who offers $p_j$. Further, if $(s_i,p_j)$ belongs to some super-stable matching in $I$, we call $(s_i, p_j)$ a \textit{super-stable pair}.
During the execution of the algorithm, students become \textit{provisionally assigned} to projects. It is possible for a project to be provisionally assigned a number of students that exceed its capacity. This holds analogously for a lecturer. The algorithm proceeds by deleting from the preference lists certain $(s_i, p_j)$ pairs that cannot be super-stable. By the term \textit{delete} $(s_i, p_j)$, we mean the removal of $p_j$ from $s_i$'s preference list and the removal of $s_i$ from $\mathcal{L}_k^j$ (the projected preference list of lecturer $l_k$ for $p_j$). In addition, if $s_i$ is provisionally assigned to $p_j$ at this point, we break the assignment. If $s_i$ has been deleted from every projected preference list of $l_k$ that she originally belonged to, we will implicitly assume that $s_i$ has been deleted from $l_k$'s preference list. By the \textit{head} of a student's preference list at a given point, we mean the set of one or more projects, tied in her preference list after any deletions might have occurred, that she prefers to all other projects in her list.
For project $p_j$, we define the \textit{tail} of $\mathcal{L}_k^j$ as the least-preferred tie in $\mathcal{L}_k^j$ after any deletions might have occurred (recalling that a tie can be of length one). In the same fashion, we define the \textit{tail} of $\mathcal{L}_k$ (the preference list of lecturer $l_k$) as the least-preferred tie in $\mathcal{L}_k$ after any deletions might have occurred. If $s_i$ is provisionally assigned to $p_j$, we define the \textit{successors} of $s_i$ in $\mathcal{L}_{k}^j$ as those students that are worse than $s_i$ in $\mathcal{L}_{k}^j$. An analogous definition holds for the successors of $s_i$ in $\mathcal{L}_k$.
\subsection{Description of the algorithm} \label{subsect:algorithm-description} We now describe our algorithm, shown in pseudocode form in Algorithm~\ref{algorithmSPA-STsuper}. Algorithm {\sf SPA-ST-super} begins by initialising an empty set $M$ which will contain the provisional assignments of students to projects (and implicitly to lecturers). We remark that such assignments can subsequently be broken during the algorithm's execution. Also, each project is initially assigned to be empty (i.e., not assigned to any student).
The \texttt{while} loop of the algorithm involves each student $s_i$ who is not provisionally assigned to any project in $M$ and who has a non-empty preference list applying in turn to each project $p_j$ at the head of her list. Immediately, $s_i$ becomes provisionally assigned to $p_j$ in $M$ (and to $l_k$). If, by gaining a new student, $p_j$ becomes oversubscribed, it turns out that none of the students $s_t$ at the tail of $\mathcal{L}_k^j$ can be assigned to $p_j$ in any super-stable matching -- such pairs $(s_t, p_j)$ are deleted. Similarly, if by gaining a new student, $l_k$ becomes oversubscribed, none of the students $s_t$ at the tail of $\mathcal{L}_k$ can be assigned to any project offered by $l_k$ in any super-stable matching -- the pairs $(s_t, p_u)$, for each project $p_u \in P_k$ that $s_t$ finds acceptable, are deleted.
Regardless of whether any deletions occurred as a result of the two conditionals described in the previous paragraph, we have two further (possibly non-disjoint) cases in which deletions may occur. If $p_j$ becomes full, we let $s_r$ be any worst student provisionally assigned to $p_j$ (according to $\mathcal{L}_k^j$), and we delete $(s_t, p_j)$ for each successor $s_t$ of $s_r$ in $\mathcal{L}_k^j$. Similarly if $l_k$ becomes full, we let $s_r$ be any worst student provisionally assigned to $l_k$, and we delete $(s_t, p_u)$, for each successor $s_t$ of $s_r$ in $\mathcal{L}_k$ and for each project $p_u \in P_k$ that $s_t$ finds acceptable. As we will prove later, none of the (student, project) pairs that we delete is a super-stable pair.
At the point where the \texttt{while} loop terminates (i.e., when every student is provisionally assigned to one or more projects or has an empty preference list), if some project $p_j$ that was previously full ends up undersubscribed, we let $s_r$ be any one of the most-preferred students (according to $\mathcal{L}_k^j$) who was provisionally assigned to $p_j$ during some iteration of the algorithm but is not assigned to $p_j$ at this point (for convenience, we henceforth refer to such $s_r$ as the most-preferred student rejected from $p_j$ according to $\mathcal{L}_k^j$). If the students at the tail of $\mathcal{L}_k$ (recalling that the tail of $\mathcal{L}_k$ is the least-preferred tie in $\mathcal{L}_k$ after any deletions might have occurred) are no better than $s_r$, it turns out that none of these students $s_t$ can be assigned to any project offered by $l_k$ in any super-stable matching -- the pairs $(s_t, p_u)$, for each project $p_u \in P_k$ that $s_t$ finds acceptable, are deleted. The \texttt{while} loop is then potentially reactivated, and the entire process continues until every student is provisionally assigned to a project or has an empty preference list, at which point the \texttt{repeat-until} loop terminates.
Upon termination of the \texttt{repeat-until} loop, if the set $M$, containing the assignment of students to projects, is super-stable relative to the given instance $I$ then $M$ is output as a super-stable matching in $I$. Otherwise, the algorithm reports that no super-stable matching exists in $I$.
\begin{algorithm}[htbp] \caption{Algorithm {\sf SPA-ST-super}} \label{algorithmSPA-STsuper}
\begin{algorithmic}[1] \Require {{\sc spa-st} instance $I$}
\Ensure{a super-stable matching $M$ in $I$ or ``no super-stable matching exists in $I$''}
\State $M \gets \emptyset$ \ForEach {$p_j \in \mathcal{P}$} \State \texttt{full}($p_j$) = \texttt{false} \EndFor \Repeat{} \While {some student $s_i$ is unassigned and has a non-empty preference list}
\ForEach {project $p_j$ at the head of $s_i$'s preference list}
\State $l_k \gets $ lecturer who offers $p_j$
\State /* $s_i$ applies to $p_j$ */
\State $M \gets M \cup \{(s_i, p_j)\}$ /*provisionally assign $s_i$ to $p_j$ (and to $l_k$) */
\If {$p_j$ is oversubscribed}
\ForEach{student $s_t$ at the tail of $\mathcal{L}_{k}^{j}$}
\State delete $(s_t, p_j)$
\EndFor
\ElsIf {$l_k$ is oversubscribed}
\ForEach{student $s_t$ at the tail of $\mathcal{L}_{k}$}
\ForEach {project $p_u \in P_k \cap A_t$}
\State delete $(s_t, p_u)$
\EndFor
\EndFor
\EndIf
\If {$p_j$ is full}
\State \texttt{full}($p_j$) = \texttt{true}
\State $s_r \gets $ worst student assigned to $p_j$ according to $\mathcal{L}_{k}^{j}$ \{any if $> 1$\}
\ForEach{successor $s_t$ of $s_r$ on $\mathcal{L}_{k}^{j}$}
\State delete $(s_t, p_j)$
\EndFor
\EndIf
\If {$l_k$ is full}
\State $s_r \gets $ worst student assigned to $l_k$ according to $\mathcal{L}_{k}$ \{any if $> 1$\}
\ForEach{successor $s_t$ of $s_r$ on $\mathcal{L}_{k}$}
\ForEach{project $p_u \in P_k \cap A_t$ }
\State delete $(s_t, p_u)$
\EndFor
\EndFor
\EndIf
\EndFor \EndWhile \ForEach{$p_j \in \mathcal{P}$} \If {$p_j$ is undersubscribed and \texttt{full}($p_j$) is \texttt{true}}
\State $l_k \gets $ lecturer who offers $p_j$ \State $s_r \gets $ most-preferred student rejected from $p_j$ according to $\mathcal{L}_{k}^{j}$ \{any if $> 1$\}
\If{the students at the tail of $\mathcal{L}_k$ are no better than $s_r$}
\ForEach{student $s_t$ at the tail of $\mathcal{L}_k$}
\ForEach{project $p_u \in P_k \cap A_t$ }
\State delete $(s_t, p_u)$ \label{alg:deletion-outside}
\EndFor
\EndFor
\EndIf
\EndIf \EndFor \Until {every unassigned student has an empty preference list} \If {$M$ is super-stable in $I$} \State \Return $M$
\Else
\State \Return ``no super-stable matching exists in $I$''
\EndIf \end{algorithmic} \end{algorithm}
\subsection{Example algorithm execution} \label{example-description} We illustrate an execution of Algorithm {\sf SPA-ST-super} with respect to the {\sc spa-st} instance shown in Fig.~\ref{fig:spa-st-instance-1} (page \pageref{fig:spa-st-instance-1}). We initialise $M = \{\}$, which will contain the provisional assignment of students to projects. For each project $p_j \in \mathcal{P}$, we set \texttt{full}($p_j$) = \texttt{false} (\texttt{full}($p_j$) will be set to \texttt{true} when $p_j$ becomes full, so that we can easily identify any project that was full during an iteration of the algorithm and ended up undersubscribed). We assume that the students become provisionally assigned to each project at the head of their list in subscript order. Table~\ref{example-illustration} illustrates how this execution of Algorithm {\sf SPA-ST-super} proceeds with respect to $I_1$.
\begin{table}[htbp] \caption{\label{example-illustration} \small An execution of Algorithm {\sf SPA-ST-super} with respect to Fig.~\ref{fig:spa-st-instance-1}.} \centering \small \setlength{\tabcolsep}{0.8em} \renewcommand{1}{1.7} \begin{tabular}{p{1.6cm}p{2.4cm}p{10cm}} \hline\noalign{
} {\texttt while} loop iterations & Student applies to project & Consequence \\ \noalign{
}\hline\noalign{
} $1$ & $s_1$ applies to $p_1$ & $M=\{(s_1, p_1)\}$. \texttt{full}($p_1$) = \texttt{true}. \\ \hline $2$ & $s_2$ applies to $p_1$ & $M=\{(s_1, p_1), (s_2, p_1)\}$. $p_1$ becomes oversubscribed. The tail of $\mathcal{L}_1^1$ contains $s_1$ and $s_2$ -- thus we delete the pairs $(s_1, p_1)$ and $(s_2, p_1)$ (and we break the provisional assignments). \\
& $s_2$ applies to $p_3$ & $M=\{(s_2, p_3)\}$. \texttt{full}($p_3$) = \texttt{true}.\\ \hline $3$ & $s_3$ applies to $p_2$ & $M=\{(s_2, p_3), (s_3, p_2)\}$. \\ \hline $4$ & $s_4$ applies to $p_2$ & $M = \{(s_2, p_3), (s_3, p_2), (s_4, p_2)\}$. \texttt{full}($p_2$) = \texttt{true}. \\ \hline $5$ & $s_5$ applies to $p_3$ & $M = \{(s_2, p_3), (s_3, p_2), (s_4, p_2), (s_5, p_3)\}$. $p_3$ becomes oversubscribed. The tail of $\mathcal{L}_2^3$ contains only $s_2$ -- thus we delete the pair $(s_2, p_3)$ (and we break the provisional assignment).\\ \hline \multicolumn{3}{p{15.2cm}}{The first iteration of the \texttt{while} loop terminates since every unassigned student (i.e., $s_1$ and $s_2$) has an empty preference list. At this point, \texttt{full}($p_1$) is \texttt{true} and $p_1$ is undersubscribed. Moreover, the student at the tail of $\mathcal{L}_1$ (i.e., $s_4$) is no better than $s_1$, where $s_1$ was previously assigned to $p_1$ and $s_1$ is also the most-preferred student rejected from $p_1$ according to $\mathcal{L}_1^1$; thus we delete the pair $(s_4, p_2)$. The \texttt{while} loop is then reactivated.}\\ \hline $6$ & $s_4$ applies to $p_3$ & $M = \{(s_3, p_2), (s_5, p_3), (s_4, p_3)\}$. $p_3$ becomes oversubscribed. The tail of $\mathcal{L}_2^3$ contains only $s_5$ -- thus we delete the pair $(s_5, p_3)$.\\ \hline $7$ & $s_5$ applies to $p_1$ & $M = \{(s_3, p_2), (s_4, p_3), (s_5, p_1)\}$. \\ \hline \multicolumn{3}{p{15.2cm}}{Again, every unassigned students has an empty preference list. We also have that \texttt{full}($p_2$) is \texttt{true} and $p_2$ is undersubscribed; however no further deletion is carried out in line 34 of the algorithm, since the student at the tail of $\mathcal{L}_1$ (i.e., $s_3$) is better than $s_4$, where $s_4$ was previously assigned to $p_2$ and $s_4$ is also the most-preferred student rejected from $p_2$ according to $\mathcal{L}_1^2$. Hence, the \texttt{repeat-until} loop terminates and the algorithm outputs $M = \{(s_3, p_2), (s_4, p_3), (s_5, p_1)\}$ as a super-stable matching. It is clear that $M$ is super-stable in the original instance $I_2$.}\\ \noalign{
}\hline \end{tabular} \end{table}
\subsection{Correctness of Algorithm {\sf SPA-ST-super}} \label{correctness-result} We now present a series of results concerning the correctness of Algorithm {\sf SPA-ST-super}. The first of these results deals with the fact that no super-stable pair is deleted during an execution of the algorithm. In what follows, $I$ is an instance of {\sc spa-st}, $(s_i, p_j)$ is an acceptable pair in $I$ and $l_k$ is the lecturer who offers $p_j$.
\begin{restatable}[]{lemma}{nopairdeletion} \label{pair-deletion} If a pair $(s_i, p_j)$ is deleted during an execution of Algorithm {\sf SPA-ST-super}, then $(s_i, p_j)$ does not belong to any super-stable matching in $I$. \end{restatable}
\noindent In order to prove Lemma \ref{pair-deletion}, we present Lemmas \ref{lemma:super-pair-deletion-within} and \ref{lemma:super-pair-deletion-outside}.
\begin{lemma} \label{lemma:super-pair-deletion-within} If a pair $(s_i, p_j)$ is deleted within the \texttt{while} loop during an execution of Algorithm {\sf SPA-ST-super} then $(s_i, p_j)$ does not belong to any super-stable matching in $I$. \end{lemma}
\begin{proof} Without loss of generality, suppose that the first super-stable pair to be deleted within the \texttt{while} loop during an arbitrary execution $E$ of the algorithm is $(s_i, p_j)$, which belongs to some super-stable matching, say $M^*$. Suppose that $M$ is the assignment immediately after the deletion. Let us denote this point in the algorithm where the deletion is made by $\ddagger$. During $E$, there are four cases that would lead to the deletion of any (student, project) pair within the \texttt{while} loop. \begin{enumerate}[(1)] \item \emph{$p_j$ is oversubscribed.} Suppose that $(s_i, p_j)$ is deleted because some student (possibly $s_i$) became provisionally assigned to $p_j$ during $E$, causing $p_j$ to become oversubscribed. If $p_j$ is full or undersubscribed at point $\ddagger$, since $s_{i} \in M^*(p_j) \setminus M(p_j)$ and no project can be oversubscribed in $M^*$, then there is some student $s_r \in M(p_j) \setminus M^*(p_j)$ such that $l_k$ prefers $s_r$ to $s_i$ or is indifferent between them. We note that $s_r$ cannot be assigned to a project that she prefers to $p_j$ in any super-stable matching. Otherwise, since $p_j$ must have been in the head of $s_r$'s preference list when she applied, this would mean that a super-stable pair was deleted before $(s_i, p_j)$. Thus either $s_r$ is unassigned in $M^*$ or $s_r$ prefers $p_j$ to $M^*(s_r)$ or $s_r$ is indifferent between them. Clearly, for any combination of $l_k$ and $p_j$ being full or undersubscribed in $M^*$, it follows that $(s_r, p_j)$ blocks $M^*$, a contradiction.
\item \emph{$l_k$ is oversubscribed.} Suppose that $(s_i, p_j)$ is deleted because some student (possibly $s_i$) became provisionally assigned to a project offered by lecturer $l_k$ during $E$, causing $l_k$ to become oversubscribed. At point $\ddagger$, none of the projects offered by $l_k$ is oversubscribed in $M$, otherwise we will be in case (1). Similar to case (1), if $l_k$ is full or undersubscribed at point $\ddagger$, since $s_{i} \in M^*(p_{j}) \setminus M(p_{j})$ and no lecturer can be oversubscribed in $M^*$, it follows that there is some project $p_{j'} \in P_k$ and some student $s_{r} \in M(p_{j'}) \setminus M^*(p_{j'})$ such that $l_k$ prefers $s_{r}$ to $s_i$ or is indifferent between them. We consider two subcases. \begin{enumerate}[(i)] \item If $p_{j'} = p_j$ then $s_{r} \neq s_i$. Moreover, as in case (1), either $s_{r}$ is unassigned in $M^*$ or $s_{r}$ prefers $p_{j'}$ to $M^*(s_{r})$ or $s_r$ is indifferent between them. For any combination of $l_k$ and $p_{j'}$ being full or undersubscribed in $M^*$, we have that $(s_{r}, p_{j'})$ blocks $M^*$, a contradiction. \item If $p_{j'} \neq p_j$. Assume firstly that $s_{r} \neq s_i$. Then as $p_{j'}$ has fewer assignees in $M^*$ than it has provisional assignees in $M$, and as in (i) above, $(s_{r}, p_{j'})$ blocks $M^*$, a contradiction. Finally assume $s_{r} = s_i$. Then $s_i$ must have applied to $p_{j'}$ at some point during $E$ before $\ddagger$. Clearly, either $s_i$ prefers $p_{j'}$ to $p_j$ or $s_i$ is indifferent between them, since $p_{j'}$ must have been in the head of $s_i$'s preference list when $s_i$ applied. Since $s_i \in M^*(l_k)$ and $p_{j'}$ is undersubscribed in $M^*$, it follows that $(s_i, p_{j'})$ blocks $M^*$, a contradiction. \end{enumerate}
\item \emph{$p_j$ is full.} Suppose that $(s_i, p_j)$ is deleted because $p_j$ became full during $E$. At point $\ddagger$, $p_j$ is full in $M$. Thus at least one of the students in $M(p_j)$, say $s_{r}$, will not be assigned to $p_j$ in $M^*$, for otherwise $p_j$ will be oversubscribed in $M^*$. This implies that either $s_{r}$ is unassigned in $M^*$ or $s_{r}$ prefers $p_j$ to $M^*(s_{r})$ or $s_{r}$ is indifferent between them. For otherwise, we obtain a contradiction to $(s_i, p_j)$ being the first super-stable pair to be deleted. Since $l_k$ prefers $s_{r}$ to $s_i$, it follows that $(s_{r}, p_j)$ blocks $M^*$, a contradiction.
\item \emph{$l_k$ is full.} Suppose that $(s_i, p_j)$ is deleted because $l_k$ became full during $E$. We consider two subcases. \begin{enumerate}[(i)]
\item All the students assigned to $p_j$ in $M$ at point $\ddagger$ (if any) are also assigned to $p_j$ in $M^*$. This implies that $p_j$ has one more assignee in $M^*$ than it has provisional assignees in $M$, namely $s_i$. Thus, some other project $p_{j'} \in P_k$ has fewer assignees in $M^*$ than it has provisional assignees in $M$, for otherwise $l_k$ would be oversubscribed in $M^*$. Hence there exists some student $s_{r} \in M(p_{j'}) \setminus M^*(p_{j'})$. It is clear that $s_{r} \neq s_i$, since $s_i$ plays the role of $s_t$ at some for loop iteration in line 24 of the algorithm.
Also, $s_{r}$ cannot be assigned to a project that she prefers to $p_{j'}$ in $M^*$, as explained in case (1). Moreover, since $p_{j'}$ is undersubscribed in $M^*$ and $l_k$ prefers $s_{r}$ to $s_i$, it follows that $(s_{r}, p_{j'})$ blocks $M^*$, a contradiction.
\item Some student, say $s_{r}$, who is assigned to $p_j$ in $M$ is not assigned to $p_j$ in $M^*$, i.e., $s_{r} \in M(p_j) \setminus M^*(p_j)$. Since $s_{r}$ cannot be assigned in $M^*$ to a project that she prefers to $p_j$ and since $l_k$ prefers $s_{r}$ to $s_i$, it follows that $(s_{r}, p_j)$ blocks $M^*$, a contradiction. \end{enumerate} \end{enumerate} \qed \end{proof}
\begin{lemma} \label{lemma:super-pair-deletion-outside} If a pair $(s_i, p_j)$ is deleted in line 34 of Algorithm {\sf SPA-ST-super} then $(s_i, p_j)$ does not belong to any super-stable matching in $I$. \end{lemma} \begin{proof} Without loss of generality, suppose that the first super-stable pair to be deleted during an arbitrary execution $E$ of the algorithm is $(s_i, p_j)$, which belongs to some super-stable matching, say $M^*$. Then by Lemma \ref{lemma:super-pair-deletion-within}, $(s_i, p_j)$ was deleted in line 34 during $E$. Let $l_k$ be the lecturer who offers $p_j$. Suppose that $M$ is the assignment during the iteration of the \texttt{repeat-until} loop where $(s_i, p_j)$ was deleted.
Let $p_{j'}$ be some other project offered by $l_k$ which was full during a previous \texttt{repeat-until} loop iteration and subsequently ends up undersubscribed in the current \texttt{repeat-until} loop iteration, i.e., $p_{j'}$ plays the role of $p_j$ in line 28. Suppose that $s_{i'}$ plays the role of $s_r$ in line 30, i.e., $s_{i'}$ is the most-preferred student rejected from $p_{j'}$ according to $\mathcal{L}_k^{j'}$ (possibly $s_{i'} = s_i$). Moreover $s_{i'}$ was provisionally assigned to $p_{j'}$ during a previous \texttt{repeat-until} loop iteration but $(s_{i'}, p_{j'}) \notin M$ in the current \texttt{repeat-until} loop iteration. Thus $(s_{i'}, p_{j'})$ has been deleted before the deletion of $(s_i, p_j)$ occurred; and thus, $(s_{i'}, p_{j'}) \notin M^*$, since $(s_i, p_j)$ is the first super-stable pair to be deleted. Further, $l_k$ either prefers $s_{i'}$ to $s_i$ or is indifferent between them, since $s_i$ plays the role of $s_t$ at some for loop iteration in line 32.
We remark that no student who is provisionally assigned to some project in $M$ can be assigned to a project better than her current assignment in any super-stable matching. For otherwise, this would mean a super-stable pair must have been deleted before $(s_i, p_j)$, since each student who is assigned in $M$ applies to projects in the head of her preference list. So, either $s_{i'}$ is unassigned in $M^*$ or $s_{i'}$ prefers $p_{j'}$ to $M^*(s_{i'})$ or $s_i$ is indifferent between them. By the super-stability of $M^*$, $p_{j'}$ is full in $M^*$ and $l_k$ prefers every student in $M^*(p_{j'})$ to $s_{i'}$; for otherwise, $(s_{i'}, p_{j'})$ blocks $M^*$, a contradiction.
Let $l_{z_0} = l_k$, $p_{t_0} = p_{j'}$ and $s_{q_0} = s_{i'}$. Just before the deletion of $(s_i, p_j)$ occurred, $p_{t_0}$ is undersubscribed in $M$. Since $p_{t_0}$ is full in $M^*$, there exists some student $s_{q_1} \in M^*(p_{t_0}) \setminus M(p_{t_0})$. We note that $l_{z_0}$ prefers $s_{q_1}$ to $s_{q_0}$; for otherwise, $(s_{i'}, p_{j'})$ blocks $M^*$, a contradiction. Let $p_{t_1} = p_{t_0}$. Since $(s_i, p_j)$ is the first super-stable pair to be deleted, $s_{q_1}$ is assigned in $M$ to a project $p_{t_2}$ such that $s_{q_1}$ prefers $p_{t_2}$ to $p_{t_1}$. For otherwise, as each student applies to projects at the head of her preference list, that would mean $(s_{q_1}, p_{t_1})$ must have been deleted before $(s_i, p_j)$, a contradiction. We note that $p_{t_2} \neq p_{t_1}$, since $(s_{q_1} , p_{t_2}) \in M$ and $(s_{q_1} , p_{t_1}) \notin M$. Let $l_{z_1}$ be the lecturer who offers $p_{t_2}$. By the super-stability of $M^*$, either (i) or (ii) holds as follows:
\begin{enumerate}[(i)]
\item $p_{t_2}$ is full in $M^*$ and $l_{z_1}$ prefers the worst student/s in $M^*(p_{t_2})$ to $s_{q_1}$; \item $p_{t_2}$ is undersubscribed in $M^*$, $l_{z_1}$ is full in $M^*$, $s_{q_1} \notin M^*(l_{z_1})$ and $l_{z_1}$ prefer the worst student/s in $M^*(l_{z_1})$ to $s_{q_1}$. \end{enumerate}
Otherwise $(s_{q_1}, p_{t_2})$ blocks $M^*$. In case (i), there exists some student $s_{q_2} \in M^*(p_{t_2}) \setminus M(p_{t_2})$. Let $p_{t_3} = p_{t_2}$. In case (ii), there exists some student $s_{q_2} \in M^*(l_{z_1}) \setminus M(l_{z_1})$. We note that $l_{z_1}$ prefers $s_{q_2}$ to $s_{q_1}$. Now, suppose $M^*(s_{q_2}) = p_{t_3}$ (possibly $p_{t_3} = p_{t_2}$). It is clear that $s_{q_2} \neq s_{q_1}$. Applying similar reasoning as for $s_{q_1}$, $s_{q_2}$ is assigned in $M$ to a project $p_{t_4}$ such that $s_{q_2}$ prefers $p_{t_4}$ to $p_{t_3}$. Let $l_{z_2}$ be the lecturer who offers $p_{t_4}$. We are identifying a sequence $\langle s_{q_i}\rangle_{i \geq 1}$ of students, a sequence $\langle p_{t_i}\rangle_{i \geq 1}$ of projects, and a sequence $\langle l_{z_i}\rangle_{i \geq 1}$ of lecturers, such that, for each $i \geq 1$
\begin{enumerate}
\item $s_{q_{i}}$ prefers $p_{t_{2i}}$ to $p_{t_{2i-1}}$, \item $(s_{q_i}, p_{t_{2i}}) \in M$ and $(s_{q_i}, p_{t_{2i - 1}}) \in M^*$, \item $l_{z_i}$ prefers $s_{q_{i+1}}$ to $s_{q_{i}}$; also, $l_{z_i}$ offers both $p_{t_{2i}}$ and $p_{t_{2i+1}}$ (possibly $p_{t_{2i}} = p_{t_{2i+1}}$). \end{enumerate}
First we claim that for each new project that we identify, $p_{t_{2i}} \neq p_{t_{2i-1}}$ for $i \geq 1$. Suppose $p_{t_{2i}} = p_{t_{2i-1}}$ for some $i \geq 1$. From above $s_{q_{i}}$ was identified by $l_{z_{i-1}}$ such that $(s_{q_{i}}, p_{t_{2i-1}}) \in M^* \setminus M$. Moreover $(s_{q_{i}}, p_{t_{2i}}) \in M$. Hence we reach a contradiction. Clearly, for each student $s_{q_i}$ that we identify, for $i \geq 1$ , $s_{q_i}$ must be assigned to distinct projects in $M$ and in $M^*$.
Next we claim that for each new student $s_{q_i}$ that we identify, $s_{q_i} \neq s_{q_t}$ for $1 \leq t < i$. We prove this by induction on $i$. For the base case, clearly $s_{q_2} \neq s_{q_1}$. We assume that the claim holds for some $i \geq 1$, i.e., the sequence $s_{q_{1}}, s_{q_2}, \ldots, s_{q_{i}}$ consists of distinct students. We show that the claim holds for $i+1$, i.e., the sequence $s_{q_{1}}, s_{q_2}, \ldots, s_{q_{i}}, s_{q_{i+1}}$ also consists of distinct students. Clearly $s_{q_{i+1}} \neq s_{q_{i}}$ since $l_{z_{i}}$ prefers $s_{q_{i+1}}$ to $s_{q_{i}}$. Thus, it suffices to show that $s_{q_{i+1}} \neq s_{q_{j}}$ for $1 \leq j \leq i-1$. Now, suppose $s_{q_{i+1}} = s_{q_{j}}$ for $1 \leq j \leq i-1$. This implies that $s_{q_{j}}$ was identified by $l_{z_{i}}$ and clearly $l_{z_{i}}$ prefers $s_{q_{j}}$ to $s_{q_{j-1}}$. Now since $s_{q_{i+1}}$ was also identified by $l_{z_{i}}$ to avoid the blocking pair $(s_{q_i}, p_{t_{2_i}})$ in $M^*$, it follows that either (i) $p_{t_{2i}}$ is full in $M^*$, or (ii) $p_{t_{2i}}$ is undersubscribed in $M^*$ and $l_{z_{i}}$ is full in $M^*$. We consider each cases further as follows. \begin{enumerate}[(i)]
\item If $p_{t_{2i}}$ is full in $M^*$, we know that $(s_{q_{i}}, p_{t_{2i}}) \in M \setminus M^*$. Moreover $s_{q_j}$ was identified by $l_{z_{i+1}}$ because of case (i). Furthermore $(s_{q_{j-1}}, p_{t_{2i}}) \in M \setminus M^*$. In this case, $p_{t_{2i+1}} = p_{t_{2i}}$ and we have that $$(s_{q_{i}}, p_{t_{2i+1}})\in M \setminus M^* \mbox{ and } (s_{q_{i+1}}, p_{t_{2i+1}}) \in M^* \setminus M,$$ $$(s_{q_{j-1}}, p_{t_{2i+1}}) \in M \setminus M^* \mbox{ and } (s_{q_{j}}, p_{t_{2i+1}}) \in M^* \setminus M.$$ By the inductive hypothesis, the sequence $s_{q_{1}}, s_{q_2}, \ldots, s_{q_{j-1}}, $ $s_{q_j}, \ldots, s_{q_{i}}$ consists of distinct students. This implies that $s_{q_{i}} \neq s_{q_{j-1}}$. Thus since $p_{t_{2i+1}}$ is full in $M^*$, $l_{z_{i}}$ should have been able to identify distinct students $s_{q_j}$ and $s_{q_{i+1}}$ to avoid the blocking pairs $(s_{q_{j-1}}, p_{t_{2i+1}})$ and $(s_{q_{i}}, p_{t_{2i+1}})$ respectively in $M^*$, a contradiction. \item $p_{t_{2i}}$ is undersubscribed in $M^*$ and $l_{z_{i}}$ is full in $M^*$. Similarly as in case (i) above, we have that $$s_{q_{i}} \in M(l_{z_i}) \setminus M^*(l_{z_i}) \mbox{ and } s_{q_{i+1}} \in M^*(l_{z_i}) \setminus M(l_{z_i}),$$ $$s_{q_{j-1}} \in M(l_{z_i}) \setminus M^*(l_{z_i}) \mbox{ and } s_{q_{j}} \in M^*(l_{z_i}) \setminus M(l_{z_i}).$$ Since $s_{q_{i}} \neq s_{q_{j-1}}$ and $l_{z_{i}}$ is full in $M^*$, $l_{z_{i}}$ should have been able to identify distinct students $s_{q_j}$ and $s_{q_{i+1}}$ corresponding to students $s_{q_{j-1}}$ and $s_{q_{i}}$ respectively, a contradiction. \end{enumerate}
This completes the induction step. As the sequence of distinct students and projects we are identifying is infinite, we reach an immediate contradiction. \qed \end{proof}
Lemmas \ref{lemma:super-pair-deletion-within} and \ref{lemma:super-pair-deletion-outside} immediately give rise to Lemma \ref{pair-deletion}. The next lemma will be used as a tool in the proof of the remaining lemmas.
\begin{restatable}[]{lemma}{lecturerundersubscribedtool}
Let $M$ be the assignment at the termination of Algorithm {\sf SPA-ST-super} and let $M^*$ be any super-stable matching in $I$. Let $l_k$ be an arbitrary lecturer: (i) if $l_k$ is undersubscribed in $M^*$ then every student who is assigned to $l_k$ in $M$ is also assigned to $l_k$ in $M^*$; and (ii) if $l_k$ is undersubscribed in $M$ then $l_k$ has the same number of assignees in $M^*$ as in $M$. \label{lemma:super-lecturer-undersubscribed-tool} \end{restatable} \begin{proof} Let $l_k$ be an arbitrary lecturer. First, we show that (i) holds. Suppose otherwise, then there exists a student, say $s_i$, such that $s_i \in M(l_k) \setminus M^*(l_k)$. Moreover, there exists some project $p_j \in P_k$ such that $s_i \in M(p_j) \setminus M^*(p_j)$. By Lemma \ref{pair-deletion}, $s_i$ cannot be assigned to a project that she prefers to $p_j$ in $M^*$. Also, by the super-stability of $M^*$, $p_j$ is full in $M^*$ and $l_k$ prefers the worst student/s in $M^*(p_j)$ to $s_i$.
Let $l_{z_0} = l_k$, $p_{t_0} = p_{j}$, and $s_{q_0} = s_{i}$. As $p_{t_0}$ is full in $M^*$ and no project is oversubscribed in $M$, there exists some student $s_{q_1} \in M^*(p_{t_0}) \setminus M(p_{t_0})$ such that $l_{z_0}$ prefers $s_{q_1}$ to $s_{q_0}$. Let $p_{t_1} = p_{t_0}$. By Lemma \ref{pair-deletion}, $s_{q_1}$ is assigned in $M$ to a project $p_{t_2}$ such that $s_{q_1}$ prefers $p_{t_2}$ to $p_{t_1}$. We note that $s_{q_1}$ cannot be indifferent between $p_{t_2}$ and $p_{t_1}$; for otherwise, as each student applies to projects at the head of her preference list, since $(s_{q_1}, p_{t_1}) \notin M$, that would mean $(s_{q_1}, p_{t_1})$ must have been deleted during the algorithm's execution, contradicting Lemma \ref{pair-deletion}. It follows that $s_{q_1} \in M(p_{t_2}) \setminus M^*(p_{t_2})$. Let $l_{z_1}$ be the lecturer who offers $p_{t_2}$. By the super-stability of $M^*$, either (i) or (ii) holds as follows:
\begin{enumerate}[(i)]
\item $p_{t_2}$ is full in $M^*$ and $l_{z_1}$ prefers the worst student/s in $M^*(p_{t_2})$ to $s_{q_1}$; \item $p_{t_2}$ is undersubscribed in $M^*$, $l_{z_1}$ is full in $M^*$, $s_{q_1} \notin M^*(l_{z_1})$ and $l_{z_1}$ prefers the worst student/s in $M^*(l_{z_1})$ to $s_{q_1}$. \end{enumerate}
Otherwise $(s_{q_1}, p_{t_2})$ blocks $M^*$. In case (i), there exists some student $s_{q_2} \in M^*(p_{t_2}) \setminus M(p_{t_2})$. Let $p_{t_3} = p_{t_2}$. In case (ii), there exists some student $s_{q_2} \in M^*(l_{z_1}) \setminus M(l_{z_1})$. We note that $l_{z_1}$ prefers $s_{q_2}$ to $s_{q_1}$. Now, suppose $M^*(s_{q_2}) = p_{t_3}$ (possibly $p_{t_3} = p_{t_2}$). It is clear that $s_{q_2} \neq s_{q_1}$. Applying similar reasoning as for $s_{q_1}$, student $s_{q_2}$ is assigned in $M$ to a project $p_{t_4}$ such that $s_{q_2}$ prefers $p_{t_4}$ to $p_{t_3}$. Let $l_{z_2}$ be the lecturer who offers $p_{t_4}$. We are identifying a sequence $\langle s_{q_i}\rangle_{i \geq 1}$ of students, a sequence $\langle p_{t_i}\rangle_{i \geq 1}$ of projects, and a sequence $\langle l_{z_i}\rangle_{i \geq 1}$ of lecturers, such that, for each $i \geq 1$
\begin{enumerate}
\item $s_{q_{i}}$ prefers $p_{t_{2i}}$ to $p_{t_{2i-1}}$, \item $(s_{q_i}, p_{t_{2i}}) \in M$ and $(s_{q_i}, p_{t_{2i - 1}}) \in M^*$, \item $l_{z_i}$ prefers $s_{q_{i+1}}$ to $s_{q_{i}}$; also, $l_{z_i}$ offers both $p_{t_{2i}}$ and $p_{t_{2i+1}}$ (possibly $p_{t_{2i}} = p_{t_{2i+1}}$). \end{enumerate}
Following a similar argument as in the proof of Lemma~\ref{lemma:super-pair-deletion-outside}, we can identify an infinite sequence of distinct students and projects, a contradiction. Hence, if $l_k$ is undersubscribed in $M^*$ then every student who is assigned to $l_k$ in $M$ is also assigned to $l_k$ in $M^*$.
Next, we show that (ii) holds. By the first claim, any lecturer who is full in $M$ is also full in $M^*$, and any lecturer who is undersubscribed in $M$ has as many assignees in $M^*$ as she has in $M$. Hence \begin{eqnarray} \label{ineq:undersubscribed-lecturer-1}
\sum_{l_k \in \mathcal{L}}{|M(l_k)|} \leq \sum_{l_k \in \mathcal{L}}{|M^*(l_k)|} \enspace. \end{eqnarray}
We note that if a student $s_{i}$ is unassigned in $M$, by Lemma \ref{pair-deletion}, $s_{i}$ is unassigned in $M^*$. Equivalently, if $s_{i}$ is assigned in $M^*$ then $s_{i}$ is assigned in $M$. Let $S_1$ denote the set of students who are assigned to at least one project in $M$, and let $S_2$ denote the set of students who are assigned to a project in $M^*$; it follows that $|S_2| \leq |S_1|$. Further, we have that \begin{eqnarray} \label{ineq:undersubscribed-lecturer-2}
\sum_{l_k \in \mathcal{L}}{|M^*(l_k)|} = |S_2| \leq |S_1| \leq \sum_{l_k \in \mathcal{L}}{|M(l_k)|}, \end{eqnarray}
From Inequalities \eqref{ineq:undersubscribed-lecturer-1} and \eqref{ineq:undersubscribed-lecturer-2}, it follows that $|M(l_k)| = |M^*(l_k)|$ for each $l_k \in \mathcal{L}$. \qed \end{proof} The next three lemmas deal with the case that Algorithm {\sf SPA-ST-super} reports the non-existence of a super-stable matching in $I$.
\begin{restatable}[]{lemma}{studentlemma} \label{lemma-super-multi-assignment} If a student is assigned to two or more projects at the termination of Algorithm {\sf SPA-ST-super} then $I$ admits no super-stable matching. \end{restatable} \begin{proof} Let $M$ be the assignment at the termination of the algorithm. Suppose for a contradiction that there exists a super-stable matching $M^*$ in $I$. Suppose that a student is assigned to two or more projects in $M$. Then either (a) any two of these projects are offered by different lecturers or (b) all of these projects are offered by the same lecturer.
Firstly, suppose (a) holds. Then some lecturer has fewer assignees in $M^*$ than in $M$. Suppose not, then \begin{eqnarray} \label{eqn-multiple-assignment-1}
\sum_{l_k \in \mathcal{L}}{|M^*(l_k)|} \geq \sum_{l_k \in \mathcal{L}}{|M(l_k)|}\enspace. \end{eqnarray}
Let $S_1$ and $S_2$ be as defined in the proof of Lemma \ref{lemma:super-lecturer-undersubscribed-tool}, it follows that $|S_2| \leq |S_1|$. Hence, \begin{eqnarray} \label{eqn-multiple-assignment-2}
\sum_{l_k \in \mathcal{L}}{|M^*(l_k)|} = |S_2| \leq |S_1| < \sum_{l_k \in \mathcal{L}}{|M(l_k)|}, \end{eqnarray} since some student in $S_1$ is assigned in $M$ to two or more projects offered by different lecturers. Inequality \eqref{eqn-multiple-assignment-2} contradicts Inequality \eqref{eqn-multiple-assignment-1}. Hence, our claim is established. As some lecturer $l_k$ has fewer assignees in $M^*$ than in $M$, it follows that $l_k$ is undersubscribed in $M^*$, since no lecturer is oversubscribed in $M$. In particular, there exists some project $p_j \in P_k$ and some student, say $s_i$, such that $p_j$ is undersubscribed in $M^*$ and $(s_i, p_j) \in M \setminus M^*$. Since $(s_i, p_j) \in M$, then $p_j$ must have been in the head of $s_i$'s preference list when $s_i$ applied to $p_j$ during the algorithm's execution. By Lemma \ref{pair-deletion}, either $s_i$ is unassigned in $M^*$ or $s_i$ prefers $p_j$ to $M^*(s_i)$ or $s_i$ is indifferent between them. Hence $(s_i, p_j)$ blocks $M^*$, a contradiction.
Next, suppose (b) holds. Then $|S_1| \leq \sum_{l_k \in \mathcal{L}} |M(l_k)|$. As in case (a), since $|S_2|\leq |S_1|$, it follows that
$$\sum_{l_k \in \mathcal{L}} |M^*(l_k)| \leq \sum_{l_k \in \mathcal{L}} |M(l_k)|\enspace.$$
Suppose first that $|M^*(l_k)| < |M(l_k)|$ for some $l_k \in \mathcal{L}$. Then $l_k$ has fewer assignees in $M^*$ than in $M$, and following a similar argument as in case (a) above, we reach an immediate contradiction. Hence, $|M^*(l_k)| = |M(l_k)|$ for all $l_k \in \mathcal{L}$. For each $l_k \in \mathcal{L}$, we claim that every student who is assigned to $l_k$ in $M$ is also assigned to $l_k$ in $M^*$. Suppose otherwise. Let $l_{z_1}$ be an arbitrary lecturer in $\mathcal{L}$. Then there exists some student $s_{q_1} \in M(l_{z_1}) \setminus M^*(l_{z_1})$. Let $M(s_{q_1}) = p_{t_2}$. By Lemma \ref{pair-deletion}, $s_{q_1}$ is assigned in $M^*$ to a project $p_{t_1}$ such that $s_{q_1}$ prefers $p_{t_2}$ to $p_{t_1}$. Clearly, $p_{t_1}$ is not offered by $l_{z_1}$, since $s_{q_1} \in M(l_{z_1}) \setminus M^*(l_{z_1})$. We also note that $s_{q_1}$ cannot be indifferent between $p_{t_2}$ and $p_{t_1}$. Otherwise, the argument follows from (a), since $s_{q_1}$ is assigned in $M$ to two projects offered by different lecturers, and we reach an immediate contradiction. By the super-stability of $M^*$, either (i) or (ii) holds as follows: \begin{enumerate}[(a)]
\item $p_{t_2}$ is full in $M^*$ and $l_{z_1}$ prefers every student in $M^*(p_{t_2})$ to $s_{q_1}$; \item $p_{t_2}$ is undersubscribed in $M^*$, $l_{z_1}$ is full in $M^*$ and $l_{z_1}$ prefers every student in $M^*(l_{z_1})$ to $s_{q_1}$. \end{enumerate}
Otherwise, $(s_{q_1}, p_{t_2})$ blocks $M^*$. In case (i), there exists some student $s_{q_2} \in M^*(p_{t_2}) \setminus M(p_{t_2})$. Let $p_{t_3} = p_{t_2}$. In case (ii), there exists some student $s_{q_2} \in M^*(l_{z_1}) \setminus M(l_{z_1})$. We note that $l_{z_1}$ prefers $s_{q_2}$ to $s_{q_1}$, and clearly $s_{q_2} \neq s_{q_1}$. Let $M^*(s_{q_2}) = p_{t_3}$ (possibly $p_{t_3} = p_{t_2}$). Applying similar reasoning as for $s_{q_1}$, student $s_{q_2}$ is assigned in $M$ to a project $p_{t_4}$ such that $s_{q_2}$ prefers $p_{t_4}$ to $p_{t_3}$. We are identifying a sequence $\langle s_{q_i}\rangle_{i \geq 1}$ of students, a sequence $\langle p_{t_i}\rangle_{i \geq 1}$ of projects, and a sequence $\langle l_{z_i}\rangle_{i \geq 1}$ of lecturers, such that, for each $i \geq 1$
\begin{enumerate}
\item $s_{q_{i}}$ prefers $p_{t_{2i}}$ to $p_{t_{2i-1}}$, \item $(s_{q_i}, p_{t_{2i}}) \in M$ and $(s_{q_i}, p_{t_{2i - 1}}) \in M^*$, \item $l_{z_i}$ prefers $s_{q_{i+1}}$ to $s_{q_{i}}$; also, $l_{z_i}$ offers both $p_{t_{2i}}$ and $p_{t_{2i+1}}$ (possibly $p_{t_{2i}} = p_{t_{2i+1}}$). \end{enumerate}
Following a similar argument as in the proof of Lemma~\ref{lemma:super-pair-deletion-outside}, we can identify an infinite sequence of distinct students and projects, a contradiction.
Now, let $s_i$ be an arbitrary student such that $s_i$ is assigned in $M$ to two or more projects offered by a lecturer, say $l_k$. Then $s_i \in M^*(l_k)$. Moreover, there exists some project $p_j \in P_k$ such that $(s_i, p_j) \in M \setminus M^*$. We claim that $p_j$ is undersubscribed in $M^*$. Suppose otherwise. Let $l_{z_0} = l_k$, $p_{t_0} = p_j$ and $s_{q_0} = s_i$. Then there exists some student $s_{q_1} \in M^*(p_{t_0}) \setminus M(p_{t_0})$, since $p_{t_0}$ is not oversubscribed in $M$ and $s_{q_0} \in M(p_{t_0}) \setminus M^*(p_{t_0})$. Again, by Lemma \ref{pair-deletion}, $s_{q_1}$ is assigned in $M$ to a project $p_{t_1}$ such that $s_{q_1}$ prefers $p_{t_1}$ to $p_{t_0}$. Let $l_{z_1}$ be the lecturer who offers $p_{t_1}$. Following a similar argument as in the proof of Lemma~\ref{lemma:super-pair-deletion-outside}, we can identify a sequence of distinct students and projects, and as this sequence is infinite, we reach a contradiction. Hence our claim holds, i.e., $p_j$ is undersubscribed in $M^*$. Finally, since $s_i$ cannot be assigned to any project that she prefers to $p_j$ in $M^*$ and since $(s_i, p_j) \in M^*(l_k)$, we have that $(s_i, p_j)$ blocks $M^*$, a contradiction. \qed \end{proof}
\begin{restatable}[]{lemma}{lecturerlemma} \label{lemma:super-lec-full-under} If some lecturer $l_k$ becomes full during some execution of Algorithm {\sf SPA-ST-super} and $l_k$ subsequently ends up undersubscribed at the termination of the algorithm, then $I$ admits no super-stable matching. \end{restatable}
\begin{proof} Let $M$ be the assignment at the termination of the algorithm. Suppose for a contradiction that there exists a super-stable matching $M^*$ in $I$.
Let $l_k$ be the lecturer who became full during some execution of the algorithm and subsequently ends up undersubscribed in $M$. By Lemma \ref{lemma:super-lecturer-undersubscribed-tool}, $|M(l_k)| = |M^*(l_k)|$ and thus $l_k$ is undersubscribed in $M^*$. At the point in the algorithm where $l_k$ became full (line 22), we note that none of the projects offered by $l_k$ is oversubscribed. Since $l_k$ ended up undersubscribed in $M$, it follows that there is some project $p_j \in P_k$ that has fewer assignees in $M$ at the termination of the algorithm than it had at some point during the algorithm's execution, thus $p_j$ is undersubscribed in $M$.
We claim that each project offered by $l_k$ has the same number of assignees in $M^*$ as in $M$. Suppose otherwise, then there is some project $p_t \in P_k$ such that $|M^*(p_t)| < |M(p_t)|$; thus $p_t$ is undersubscribed in $M^*$, since no project is oversubscribed in $M$. It follows that there exists some student $s_r \in M(p_t) \setminus M^*(p_t)$. By Lemma \ref{pair-deletion}, $s_r$ is either unassigned in $M^*$ or prefers $p_t$ to $M^*(s_r)$. Since $l_k$ is undersubscribed in $M^*$, $(s_r, p_t)$ blocks $M^*$, a contradiction. Hence $|M^*(p_t)| \geq |M(p_t)|$. Moreover, since $|M(l_k)| = |M^*(l_k)|$, we have that $|M(p_t)| = |M^*(p_t)|$ for all $p_t \in P_k$.
Hence $p_j$ undersubscribed in $M$ implies that $p_j$ is undersubscribed in $M^*$. Moreover, there is some student $s_i$ who was provisionally assigned to $p_j$ at some point during the execution of the algorithm but $s_i$ is not assigned to $p_j$ in $M$. Thus, the pair $(s_i, p_j)$ was deleted during the algorithm's execution, so that $(s_i, p_j) \notin M^*$ by Lemma \ref{pair-deletion}. It follows that either $s_i$ is unassigned in $M^*$ or $s_i$ prefers $p_j$ to $M^*(s_i)$ or $s_i$ is indifferent between them. Hence, $(s_i, p_j)$ blocks $M^*$, a contradiction. \qed \end{proof}
\begin{restatable}[]{lemma}{projectlemma} \label{lemma-super-proj-full-under} If the pair $(s_i, p_j)$ was deleted during some execution of Algorithm {\sf SPA-ST-super}, and at the termination of the algorithm $s_i$ is not assigned to a project better than $p_j$, and each of $p_j$ and $l_k$ is undersubscribed, then $I$ admits no super-stable matching. \end{restatable} \begin{proof} Suppose for a contradiction that there exists a super-stable matching $M^*$ in $I$. Let $(s_i, p_j)$ be a pair that was deleted during an arbitrary execution $E$ of the algorithm. This implies that $(s_i, p_j) \notin M^*$ by Lemma \ref{pair-deletion}. Let $M$ be the assignment at the termination of $E$. By the hypothesis of the lemma, $l_k$ is undersubscribed in $M$. This implies that $l_k$ is undersubscribed in $M^*$, by Lemma~\ref{lemma:super-lecturer-undersubscribed-tool}. Since $p_j$ is offered by $l_k$, and $p_j$ is undersubscribed in $M$, it follows from the proof of Lemma \ref{lemma:super-lec-full-under} that $p_j$ is undersubscribed in $M^*$. Further, by the hypothesis of the lemma, either $s_i$ is unassigned in $M$, or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them. By Lemma \ref{pair-deletion}, this is true for $s_i$ in $M^*$. Hence $(s_i, p_j)$ blocks $M^*$, a contradiction. \qed \end{proof}
The next lemma shows that the final assignment may be used to determine the existence, or otherwise, of a super-stable matching in $I$.
\begin{restatable}[]{lemma}{nosuperstablematchinglemma} \label{lemma-super-correctness} If at the termination of Algorithm {\sf SPA-ST-super}, the assignment $M$ is not super-stable in $I$ then no super-stable matching exists in $I$. \end{restatable} \begin{proof} Suppose $M$ is not super-stable in $I$. If some student $s_i$ is assigned to two or more projects in $M$ then $I$ admits no super-stable matching, by Lemma \ref{lemma-super-multi-assignment}. Hence every student is assigned to at most one project in $M$. Moreover, since no project or lecturer is oversubscribed in $M$, it follows that $M$ is a matching. Let $(s_i, p_j)$ be a blocking pair for $M$, then $s_i$ is either unassigned in $M$ or prefers $p_j$ to $M(s_i)$ or is indifferent between them. Whichever is the case, $(s_i, p_j)$ has been deleted. Let $l_k$ be the lecturer who offers $p_j$. In what follows, we will identify the point in the algorithm at which $(s_i, p_j)$ was deleted, and consequently, we will arrive at a conclusion that no super-stable matching exists.
Firstly, suppose $(s_i, p_j)$ was deleted as a result of $p_j$ being full or oversubscribed (on lines 12 or 21). Suppose $p_j$ is full in $M$. Then $(s_i, p_j)$ cannot block $M$ irrespective of whether $l_k$ is undersubscribed or full in $M$, since $l_k$ prefers the worst assigned student/s in $M(p_j)$ to $s_i$. Hence $p_j$ is undersubscribed in $M$. As $p_j$ was previously full, each pair $(s_t, p_u)$, for each $s_t$ that is no better than $s_i$ at the tail of $\mathcal{L}_k$ and each $p_u \in P_k \cap A_t$, would have been deleted on line 34 of the algorithm. Thus, if $l_k$ is full in $M$ then $(s_i, p_j)$ does not block $M$. Suppose $l_k$ is undersubscribed in $M$. If $l_k$ was full at some point during the execution of the algorithm then $I$ admits no super-stable matching, by Lemma \ref{lemma:super-lec-full-under}. Hence $l_k$ was never full during the algorithm's execution. Recall that each of $p_j$ and $l_k$ is undersubscribed in $M$. As $(s_i, p_j)$ is a blocking pair of $M$, $s_i$ cannot be assigned in $M$ to a project that she prefers to $p_j$. Hence $I$ admits no super-stable matching, by Lemma \ref{lemma-super-proj-full-under}.
Next, suppose $(s_i, p_j)$ was deleted as a result of $l_k$ being full or oversubscribed (on lines 16 or 26), $(s_i, p_j)$ could only block $M$ if $l_k$ is undersubscribed in $M$. If this is the case then $I$ admits no super-stable matching, by Lemma \ref{lemma:super-lec-full-under}.
Finally, suppose $(s_i, p_j)$ was deleted (on line 34) because some other project $p_{j'}$ offered by $l_k$ was previously full and ended up undersubscribed on line 28. Then $l_k$ must have identified the most-preferred student, say $s_r$, who was previously assigned to $p_{j'}$ but subsequently got rejected from $p_{j'}$. At this point, $s_i$ is at the tail of $\mathcal{L}_k$ and $s_i$ is no better than $s_r$ in $\mathcal{L}_k$. Moreover, every project offered by $l_k$ that $s_i$ finds acceptable would have been deleted from $s_i$'s preference list at the for loop iteration in line 34. If $p_j$ is full in $M$ then $(s_i,p_j)$ does not block $M$. Hence $p_j$ is undersubscribed in $M$. If $l_k$ is full in $M$ then $(s_i, p_j)$ does not block $M$, since $s_i \notin M(l_k)$ and $l_k$ prefers the worst student/s in $M(l_k)$ to $s_i$. Hence $l_k$ is undersubscribed in $M$. Again by Lemma \ref{lemma-super-proj-full-under}, $I$ admits no super-stable matching.
Since $(s_i, p_j)$ is an arbitrary pair, this implies that $I$ admits no super-stable matching. \qed \end{proof}
The next lemma shows that Algorithm {\sf SPA-ST-super} may be implemented to run in linear time.
\begin{restatable}[]{lemma}{lineartimelemma} \label{lemma-super-complexity} Algorithm {\sf SPA-ST-super} may be implemented to run in $O(L)$ time and $O(n_1n_2)$ space, where $n_1$, $n_2$, and $L$ are the number of students, number of projects, and the total length of the preference lists, respectively, in $I$. \end{restatable}
\begin{proof} The algorithm's time complexity depends on how efficiently we can execute the operation of a student applying to a project and the operation of deleting a (student, project) pair, each of which occur once for any (student, project) pair. It turns out that both operations can be implemented to run in constant time, giving Algorithm {\sf SPA-ST-super} an overall complexity of $\Theta(L)$, where $L$ is the total length of all the preference lists. In what follows, we describe the non-trivial aspects of such an implementation. We remark that the data structures discussed here are inspired by, and extend, those detailed in \cite[Section 3.3]{AIM07} for Algorithm {\sf SPA}-student.
For each student $s_i$, build an array $\mathit{position}_{s_i}$, where $\mathit{position}_{s_i}(p_j)$ is the position of project $p_j$ in $s_i$'s preference list. For example, if $s_i$'s preference list is $(p_2 \; p_5 \; p_3) \; p_7 \; (p_6 \; p_1)$ then $\mathit{position}_{s_i}(p_5) = 2$ and $\mathit{position}_{s_i}(p_1) = 6$. In general, position captures the order in which the projects appear in the preference list when read from left to right, ignoring any ties. Represent $s_i$'s preference list by embedding doubly linked lists in an array $\mathit{preference}_{s_i}$. For each project $p_j \in A_i$, $\mathit{preference}_{s_i}(\mathit{position}_{s_i}(p_j))$ stores the list node containing $p_j$. This node contains two next pointers (and two previous pointers) -- one to the next project in $s_i$'s preference list (after deletions, this project may not be located at the next array position), and another pointer to the next project $p_{j'}$ in $s_i$'s preference list, where $p_{j'}$ and $p_j$ are both offered by the same lecturer. Construct the latter list by traversing through $s_i$'s preference list, using a temporary array to record the last project in the list offered by each lecturer. Use virtual initialisation (described in \cite[p.~149]{BB96}) for these arrays, since the overall $O(n_1 n_3)$ initialisation may be too expensive.
To represent the ties in $s_i$'s preference list, build an array $\mathit{successor}_{s_i}$. For each project $p_j$ in $s_i$'s preference list, $\mathit{successor}_{s_i}(\mathit{position}_{s_i}(p_j))$ stores the \texttt{true} boolean if $p_j$ is tied with its successor in $A_i$ and \texttt{false} otherwise. After the deletion of any (student, project) pair, update the successor booleans. As an illustration, with respect to $s_i$'s preference list given in the previous paragraph, $\mathit{successor}_{s_i}$ is the array [\texttt{true, true, false, false, true, false}]. Now, suppose $p_3$ was deleted from $s_i$'s preference list, since $\mathit{successor}_{s_i}(\mathit{position}_{s_i}(p_3))$ is \texttt{false} and $\mathit{successor}_{s_i}(\mathit{position}_{s_i}(p_5))$ is \texttt{true}, set $\mathit{successor}_{s_i}$ $(\mathit{position}_{s_i}(p_5))$ to \texttt{false} (since $p_5$ is the predecessor of $p_3$). Clearly using these data structures, we can find the next project at the head of each student's preference list, find the next project offered by a given lecturer on each student's preference list, as well as delete a project from a given student's preference list in constant time.
For each lecturer $l_k$, build two arrays $\mathit{preference}_{l_k}$ and $\mathit{successor}_{l_k}$, where $\mathit{preference}_{l_k}(s_i)$ is the position of student $s_i$ in $l_k$'s preference list, and $\mathit{successor}_{l_k}$ $(\mathit{preference}_{l_k}(s_i))$ stores the position of the first strict successor (with respect to position) of $s_i$ in $\mathcal{L}_k$ or a null value if $s_i$ has no strict successor\footnote{For example, if $l_k$'s preference list is $s_5 \; (s_3 \; s_1 \; s_6) \; s_7 \; (s_2 \; s_8)$ then $\mathit{successor}_{l_k}$ is the array $[2 \; 5 \; 5 \; 5 \; 6 \; 0 \; 0]$.}. Represent $l_k$'s preference list (i.e., $\mathcal{L}_k$) by the array $\mathit{preference}_{l_k}$, with an additional pointer, $\mathit{last}_{l_k}$. Initially, $\mathit{last}_{l_k}$ stores the index of the last position in $\mathit{preference}_{l_k}$. To represent the ties in $l_k$'s preference list, build an array $\mathit{predecessor}_{l_k}$. For each $s_i \in \mathcal{L}_k$, $\mathit{predecessor}_{l_k}(\mathit{preference}_{l_k}(s_i))$ stores the \texttt{true} boolean if $s_i$ is tied with its predecessor in $\mathcal{L}_k$ and \texttt{false} otherwise.
When $l_k$ becomes full, make $\mathit{last}_{l_k}$ equivalent to $l_k$'s worst assigned student through the following method. Perform a backward traversal through the array $\mathit{preference}_{l_k}$, starting at $\mathit{last}_{l_k}$, and continuing until $l_k$'s worst assigned student, say $s_{i'}$, is encountered (each student stores a pointer to their assigned project, or a special null value if unassigned). Deletions must be carried out in the preference list of each student who is worse than $s_{i'}$ on $l_k$'s preference list (precisely those students whose position in $\mathit{preference}_{l_k}$ is greater than or equal to that stored in $\mathit{successor}_{l_k}(\mathit{preference}_{l_k}(s_{i'}))$)\footnote{For efficiency, we remark that it is not necessary to make deletions from the preference lists of lecturers or projected preference lists of lecturers for each project the lecturer offers, since the while loop of Algorithm {\sf SPA-ST-super} involves students applying to projects in the head of their preference list.}.
When $l_k$ becomes oversubscribed, we can find and delete the students at the tail of $l_k$ by performing a backward traversal through the array $\mathit{preference}_{l_k}$, starting at $\mathit{last}_{l_k}$, and continuing until we encounter a student, say $s_{i'}$, such that $\mathit{predecessor}_{l_k}(\mathit{preference}_{l_k}(s_{i'}))$ stores the \texttt{false} boolean. If $l_k$ becomes undersubscribed after we break the assignment of students encountered on this traversal (including $s_{i'}$) to $l_k$, rather than update $\it{last}_{l_k}$ immediately, which could be expensive, we wait until $l_k$ becomes full again. The cost of these traversals taken over the algorithm's execution is thus linear in the length of $l_k$'s preference list.
For each project $p_j$ offered by $l_k$, build the arrays $\mathit{preference}_{p_j}$, $\mathit{successor}_{p_j}$ and $\mathit{predecessor}_{p_j}$ corresponding to $\mathcal{L}_k^j$, as described in the previous paragraph for $\mathcal{L}_k$. Represent the projected preference list of $l_k$ for $p_j$ (i.e., $\mathcal{L}_k^j$) by the array $\mathit{preference}_{p_j}$, with an additional pointer, $\mathit{last}_{p_j}$. These project preference arrays are used in much the same way as the lecturer preference arrays
Since we only visit a student at most twice during these backward traversals, once for the lecturer and once for the project, the asymptotic running time remains linear.
\qed \end{proof}
\noindent Lemma \ref{pair-deletion} shows that there is an optimality property for each assigned student in any super-stable matching found by the algorithm, whilst Lemma \ref{lemma-super-correctness} establishes the correctness of Algorithm {\sf SPA-ST-super}. The following theorem collects together Lemmas \ref{pair-deletion}, \ref{lemma-super-correctness} and \ref{lemma-super-complexity}. \begin{theorem} \label{thrm:super-optimality} For a given instance $I$ of {\sc spa-st}, Algorithm {\sf SPA-ST-super} determines, in $O(L)$ time and $O(n_1n_2)$ space, whether or not a super-stable matching exists in $I$. If such a matching does exist, all possible executions of the algorithm find one in which each assigned student is assigned to the best project that she could obtain in any super-stable matching, and each unassigned student is unassigned in all super-stable matchings. \end{theorem}
Given the optimality property established by Theorem \ref{thrm:super-optimality}, we define the super-stable matching found by Algorithm {\sf SPA-ST-super} to be \textit{student-optimal}.
\subsection{Properties of super-stable matchings in {\sc spa-st}} \label{subsect:properties} In this section, we consider properties of the set of super-stable matchings in an instance of {\sc spa-st}. We show that the Unpopular Projects Theorem for {\sc spa-s} (see Theorem \ref{thrm:rural-spa-s}) holds for {\sc spa-st} under super-stability.
\begin{restatable}[]{theorem}{upt} \label{thrm:upt} For a given instance $I$ of {\sc spa-st}, the following holds: \begin{enumerate} \item each lecturer is assigned the same number of students in all super-stable matchings; \item exactly the same students are unassigned in all super-stable matchings; \item a project offered by an undersubscribed lecturer has the same number of students in all super-stable matchings. \end{enumerate} \end{restatable}
\begin{proof} Let $M$ and $M^*$ be two arbitrary super-stable matchings in $I$. Let $I'$ be an instance of {\sc spa-s} obtained from $I$ by breaking the ties in $I$ in some way. Then by Proposition \ref{proposition1}, each of $M$ and $M^*$ is stable in $I'$. Thus by Theorem \ref{thrm:rural-spa-s}, each lecturer is assigned the same number of students in $M$ and $M^*$, exactly the same students are unassigned in $M$ and $M^*$, and a project offered by an undersubscribed lecturer has the same number of students in $M$ and $M^*$. \qed\end{proof}
\begin{figure}
\caption{ \small Instance $I_2$ of {\sc spa-st}.}
\label{fig:spa-st-instance-2}
\end{figure}
To illustrate this, consider the {\sc spa-st} instance $I_2$ given in Fig.~\ref{fig:spa-st-instance-2}, which admits the super-stable matchings $M_1 = \{(s_3, p_3), (s_4, p_2), (s_5, p_3),$ $(s_6, p_2)\}$ and $M_2 = \{(s_3, p_3), (s_4, p_3), $ $(s_5, p_2), (s_6, p_2)\}$. Each of $l_1$ and $l_2$ is assigned the same number of students in both $M_1$ and $M_2$, illustrating part (1) of Theorem \ref{thrm:upt}. Also, each of $s_1$ and $s_2$ is unassigned in both $M_1$ and $M_2$, illustrating part (2) of Theorem \ref{thrm:upt}. Finally, $l_2$ is undersubscribed in both $M_1$ and $M_2$, and each of $p_3$ and $p_4$ has the same number of students in both $M_1$ and $M_2$, illustrating part (3) of Theorem \ref{thrm:upt}.
\section{Empirical Evaluation} \label{emprical-results} In this section, we evaluate an implementation of Algorithm {\sf SPA-ST-super}. We implemented our algorithm in Python\footnote{https://github.com/sofiatolaosebikan/spa-st-super}, and performed our experiments on a system with dual Intel Xeon CPU E5-2640 processors with 64GB of RAM, running Ubuntu 17.10. For our experiment, we were primarily concerned with the following question: how does the nature of the preference lists in a given {\sc spa-st} instance affect the existence of a super-stable matching?
\subsection{Datasets} When generating random datasets, there are clearly several parameters that can be varied, such as the number of students, projects and lecturers; the lengths of the students' preference lists as well as a measure of the density of ties present in the preference lists. We denote by $t_d$, the measure of the density of ties present in the preference lists. In each student's preference list, the tie density $t_{d_s} \; (0 \leq t_{d_s} \leq 1)$ is the probability that some project is tied to its successor. The tie density $t_{d_l}$ in each lecturer's preference list is defined similarly. At $t_{d_s} = t_{d_l} = 1$, each preference list comprises a single tie while at $t_{d_s} = t_{d_l} = 0$, no tie would exist in the preference lists, thus reducing the problem to an instance of {\sc spa-s}.
\subsection{Experimental Setup} For each range of values for the aforementioned parameters, we randomly generated a set of {\sc spa-st} instances, involving $n_1$ students (which we will henceforth refer to as the size of the instance), $0.5n_1$ projects, $0.2n_1$ lecturers and $1.5n_1$ total project capacity which was randomly distributed amongst the projects. The capacity for each lecturer $l_k$ was chosen uniformly at random to lie between the highest capacity of the projects offered by $l_k$ and the sum of the capacities of the projects that $l_k$ offers.\footnote{We remark that the parameter space was chosen to ensure that projects could typically accommodate more than one student, that the total capacity of the projects exceeded the number of students, and that each lecturer typically offered multiple projects, without reflecting any specific real-world application.} In each set, we measured the proportion of instances that admit a super-stable matching.
It is worth mentioning that when we varied the tie density on both the students' and lecturers' preference lists between $0.1$ and $0.5$, super-stable matchings were very elusive, even with an instance size of $100$ students. Thus, for the purpose of our experiment, we decided to choose a low tie density.
\subsubsection{Correctness testing} To test the correctness of our algorithm's implementation, we implemented an Integer Programming (IP) model for super-stability in {\sc spa-st} (see Appendix \ref{appendixA}) using the Gurobi optimisation solver in Python. We randomly generated $10,000$ {\sc spa-st} instances, each consisting of $100$ students and a constant ratio of projects, lecturers, project capacities and lecturer capacities as described above. Also, each student's preference list was fixed at $10$, with a tie density of $0.1$. With this setup, we verified consistency between the outcomes of our implementation of Algorithm {\sf SPA-ST-super} and our implementation of the IP-based algorithm in terms of the existence or otherwise of a super-stable matching.
\subsubsection{Experiment 1} In our first experiment, we examined how the length of the students' preference lists affects the existence of a super-stable matching. We increased the number of students $n_1$ while maintaining a constant ratio of projects, lecturers, project capacities and lecturer capacities as described above. For various values of $n_1 \; (100 \leq n_1 \leq 1000)$ in increments of $100$, we varied the length of each student's preference list for various values of $x$ ($5 \leq x \leq 50$) in increments of $5$; and with each of these parameters, we randomly generated $1000$ instances. For all the preference lists, we set $t_{d_s} = t_{d_l} = 0.005$ (on average, $1$ out of $5$ students has a single tie of length $2$ in their preference list, and this holds similarly for the lecturers).
The result, which is displayed in Fig.~\ref{super-experiment1}, shows that as we varied the length of the preference list, there was no significant uplift in the number of instances that admitted a super-stable matching. In most cases, we observed that the proportion of instances that admit a super-stable matching is slightly higher when the preference list length is $50$ compared to when the preference list length is $5$. The result also shows that the proportion of instances that admit a super-stable matching decreases as the number of students increases. Further, we recorded the time taken for our algorithm's implementation to terminate, and as can be seen in Table \ref{fig:super-time-table}, for an instance size of $1000$ and preference list length $50$, the algorithm terminates in approximately $0.4$ second.
\begin{figure}
\caption{ Proportion of instances that admit a super-stable matching as the size of the instance increases while varying the length of the preference lists with tie density fixed at $0.005$ in both the students' and lecturers' preference lists.}
\label{super-experiment1}
\end{figure}
\begin{table}[H] \setlength{\tabcolsep}{0.4em} \renewcommand*{1}{1.2} \centering \caption{Time (in seconds) for our algorithm's implementation to terminate averaged over $1000$ for each instance size, with the length of each student's preference list fixed at $50$.} \label{fig:super-time-table}
\begin{tabular}{c|cccccccccc} \hline\noalign{
} $n_1$ & $100$ & $200$ & $300$ & $400$ & $500$ & $600$ & $700$ & $800$ & $900$ & $1000$ \\ \noalign{
}\hline\noalign{
} Time & $0.017$ & $0.046$ & $0.082$ & $0.120$ & $0.160$ & $0.203$ & $0.248$ & $0.298$ & $0.349$ & $0.399$ \\ \noalign{
}\hline \end{tabular} \end{table}
\subsubsection{Experiment 2} In our second experiment, we investigated how the variation in tie density in both the students' and lecturers' preference lists affects the existence of a super-stable matching. To achieve this, we varied the tie density in the students' preference lists $t_{d_s} \; (0 \leq t_{d_s} \leq 0.05)$ and the tie density in the lecturers' preference lists $t_{d_l} \; (0 \leq t_{d_l} \leq 0.05)$, both in increments of $0.005$. For each pair of tie densities in $t_{d_s} \times t_{d_l}$, we randomly-generated $1000$ {\sc spa-st} instances for various values of $n_1 \; (100 \leq n_1 \leq 1000)$ in increments of $100$. For each of these instances, we maintained the same ratio of projects, lecturers, project capacities and lecturer capacities as in Experiment 1. Considering our discussion from Experiment 1, we fixed the length of each student's preference list at $50$.
The result displayed in Fig.~\ref{super-experiment2} shows that increasing the tie density in both the students' and lecturers' preference lists reduces the proportion of instances that admit a super-stable matching. In fact, this proportion reduces further as the size of the instance increases. When ties occur only in the lecturers' preference lists, we found that a significantly higher proportion of instances admit a super-stable matching -- about $74\%$ of the randomly-generated {\sc spa-st} instances involving $1000$ students admitted a super-stable matching. The confidence interval for this value is $(0.71, 0.77)$. However, the reverse is the case when ties occur only in the students' preference lists. We have no explanation for this outcome.
\begin{figure}
\caption{ Result for Experiment 2. Each of the coloured square boxes represents the proportion of the $1000$ randomly-generated {\sc spa-st} instances that admit a super-stable matching, with respect to the tie density in the students' and lecturers' preference lists. See the colour bar transition, as this proportion ranges from dark ($100\%$) to light ($0\%$).}
\label{super-experiment2}
\end{figure}
\section{Discussions and Concluding Remarks} \label{section:conclusions} In this paper, we have described a linear-time algorithm to find a super-stable matching or report that no such matching exists, given an instance of {\sc spa-st}. We established that for instances that do admit a super-stable matching, our algorithm produces the student-optimal super-stable matching, in the sense that each assigned student has the best project that she could obtain in any super-stable matching. We leave open the formulation of a lecturer-oriented counterpart to our algorithm.
Further, we carried out an empirical evaluation of our algorithm's implementation. The purpose of our experiments was to investigate how the nature of the preference lists affects the existence (or otherwise) of super-stable matchings in an arbitrary instance of {\sc spa-st}. Based on the instances we generated randomly, the experimental results suggest that as we increase the size of the instance and the density of ties in the preference lists, the likelihood of a super-stable matching existing decreases. There was no significant uplift in this likelihood even as we increased the length of the students' preference lists. When the ties occur only in the lecturers' preference lists, we found that a significantly higher proportion of instances admit a super-stable matching. However, the reverse is the case when the ties occur only in the students' preference lists.
Given that there are typically more students than lecturers in practical applications, it could be that only lecturers are permitted to have some form of indifference over the students that they find acceptable, whilst each student might be able to provide a strict ordering over what may be a small number of projects that she finds acceptable. Further evaluation of our algorithm could investigate how other parameters (e.g., the popularity of some projects, or the position of the ties in the preference lists) affect the existence of a super-stable matching. It would also be interesting to examine the existence of super-stable matchings in real {\sc spa-st} datasets.
From a theoretical perspective, the following are other directions for future work. Let $I$ be an arbitrary instance of {\sc spa-st}. \begin{enumerate} \item Can we formalise the results on the probability of a super-stable matching existing in $I$? As mentioned in Section \ref{introduction}, this question has been partially explored for the Stable Roommates problem \cite{PI94}.
\item Is there a characterisation of the set of super-stable matchings in $I$ in terms of a lattice structure? It is known that the set of super-stable matchings in an instance of {\sc smt} forms a distributive lattice under the dominance relation \cite{Man02,Spi95}. To generalise this structural result for {\sc spa-st}, ideas from \cite{Man02,Spi95} would certainly be useful. \end{enumerate}
\begin{subappendices}
\renewcommand{\Alph{section}}{\Alph{section}}
\section{An IP model for super-stability in {\small SPA-ST}} \label{appendixA} \subsection{Introduction} In this section, we describe an IP model for super-stability in {\sc spa-st}. Although a super-stable matching in an instance of {\sc spa-st} can be found in polynomial-time (as illustrated by Theorem \ref{thrm:super-optimality}), our reason for this is purely experimental. Let $I$ be an instance of {\sc spa-st} involving a set $\mathcal{S} = \{s_1, s_2, \ldots, s_{n_1}\}$ of students, a set $\mathcal{P} = \{p_1, p_2, \ldots, p_{n_2}\}$ of projects and a set $\mathcal{L} = \{l_1, l_2, \ldots, l_{n_3}\}$ of lecturers. We construct an IP model $J$ of $I$ as follows. Firstly, we create binary variables $x_{i, j} \in \{0, 1\}$ $(1 \leq i \leq n_1, 1 \leq j \leq n_2)$ for each acceptable pair $(s_i, p_j) \in \mathcal{S} \times \mathcal{P}$ such that $x_{i, j}$ indicates whether $s_i$ is assigned to $p_j$ in a solution or not. Henceforth, we denote by $S$ a solution in the IP model $J$, and we denote by $M$ the matching derived from $S$ in the following natural way: if $x_{i,j} = 1$ under $S$ then $s_i$ is assigned to $p_j$ in $M$, otherwise $s_i$ is not assigned to $p_j$ in $M$.
\subsection{Constraints} In this section, we give the set of constraints to ensure that the assignment obtained from a feasible solution in $J$ is a matching, and that the matching admits no blocking pair.
\paragraph{\textbf{Matching constraints.}} The feasibility of a matching can be ensured with the following three set of constraints. \begin{align} \label{ineq:spa-st-ip-studentassignment} \sum\limits_{p_{j} \in A_{i}} x_{i,j} \leq 1 &\qquad (1 \leq i \leq n_1), \\ \label{ineq:spa-st-ip-projectcapacity} \sum\limits_{i = 1}^{n_1} x_{i,j} \leq c_j & \qquad (1 \leq j \leq n_2), \\ \label{ineq:spa-st-ip-lecturercapacity} \sum\limits_{i = 1}^{n_1} \; \sum\limits_{p_{j} \in P_k} x_{i,j} \leq d_k & \qquad (1 \leq k \leq n_3)\enspace. \end{align}
Note that Inequality \eqref{ineq:spa-st-ip-studentassignment} ensures that each student $s_i \in \mathcal{S}$ is not assigned to more than one project, while Inequalities \eqref{ineq:spa-st-ip-projectcapacity} and \eqref{ineq:spa-st-ip-lecturercapacity} ensure that the capacity of each project $p_j \in \mathcal{P}$ and each lecturer $l_k \in \mathcal{L}$ is not exceeded.
Given an acceptable pair $(s_i, p_j)$, we define $\rank(s_i, p_j)$, the \textit{rank} of $p_j$ on $s_i$'s preference list, to be $r+1$, where $r$ is the number of projects that $s_i$ prefers to $p_j$. \label{rank} Clearly, projects that are tied together on $s_i$'s preference list have the same rank. Given a lecturer $l_k \in \mathcal{L}$ and a student $s_i \in \mathcal{L}_k$, we define $\rank(l_k, s_i)$, the \textit{rank} of $s_i$ on $l_k$'s preference list, to be $r+1$, where $r$ is the number of students that $l_k$ prefers to $s_i$. Similarly, students that are tied together on $l_k$'s preference list have the same rank. With respect to an acceptable pair $(s_i, p_j)$, we define $S_{i,j} = \{p_{j'} \in A_i: \rank(s_i, p_{j'}) < \rank(s_i, p_j)\}$, the set of projects that $s_i$ prefers to $p_j$. Let $l_k$ be the lecturer who offers $p_j$. We also define $T_{i,j,k} = \{s_{i'} \in \mathcal{L}_{k}^{j}: \rank(l_k, s_{i'}) < \rank(l_k, s_{i})\}$, the set of students that are better than $s_i$ on the projected preference list of $l_k$ for $p_j$. Finally, we define $D_{i,k} = \{s_{i'} \in \mathcal{L}_{k}: \rank(l_k, s_{i'}) < \rank(l_k, s_{i})\}$, the set of students that are better than $s_i$ on $l_k$'s preference list.
In what follows, we fix an arbitrary acceptable pair $(s_i, p_j)$ and we enforce constraints to ensure that $(s_i, p_j)$ does not form a blocking pair for the matching $M$. Henceforth, $l_k$ is the lecturer who offers $p_j$.
\paragraph{\textbf{Blocking pair constraints.}} First, we define $\theta_{i,j} = 1 - x_{i,j} - \sum\limits_{p_{j'} \in S_{i, j}}x_{i,j'}$. Intuitively, $\theta_{i,j} = 1$ if and only if $s_i$ is unassigned in $M$, or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them. Henceforth, if $(s_i, p_j)$ forms a blocking pair for $M$ then we refer to $(s_i, p_j)$ as a blocking pair of type (i), type (ii) or type (iii), according as $(s_i, p_j)$ satisfies condition (i), (ii), or (iii) of Definition \ref{definition:super-stability}, respectively. We describe the constraints to avoid these types of blocking pair as follows.
\paragraph{\textbf{Type (i)}. \label{type-i}} First, we create a binary variable $\alpha_j$ in $J$ such that if $p_j$ is undersubscribed in $M$ then $\alpha_j = 1$. We enforce this condition by imposing the following constraint. \begin{eqnarray} \label{ineq:spa-st-ip-project-under} c_j \alpha_j \geq c_j - \sum\limits_{i' = 1}^{n_1} x_{i',j}, \end{eqnarray}
where $\sum_{i' = 1}^{n_1} x_{i',j} = |M(p_j)|$. If $p_j$ is undersubscribed in $M$ then the RHS of Inequality \eqref{ineq:spa-st-ip-project-under} is at least $1$ and this implies that $\alpha_j = 1$, otherwise $\alpha_j$ is not constrained. Next, we create a binary variable $\beta_k$ in $J$ such that if $l_k$ is undersubscribed in $M$ then $\beta_k = 1$. We enforce this condition by imposing the following constraint: \begin{eqnarray} \label{ineq:spa-st-ip-lecturerunder} d_k\beta_k \geq d_k - \sum\limits_{i' = 1}^{n_1} \; \sum\limits_{p_{j'} \in P_k} x_{i',j'}, \end{eqnarray}
where $\sum\limits_{i' = 1}^{n_1} \; \sum\limits_{p_{j'} \in P_k} x_{i',j'} = |M(l_k)|$. If $l_k$ is undersubscribed in $M$ then the RHS of Inequality \eqref{ineq:spa-st-ip-lecturerunder} is at least $1$ and this implies that $\beta_k = 1$, otherwise $\beta_k$ is not constrained. The following constraint ensures that $(s_i, p_j)$ does not form a type (i) blocking pair for $M$. \begin{align} \label{ineq:super-bp-type-i} \Aboxed{ \theta_{i,j} + \alpha_{j} + \beta_k \leq 2\enspace.} \end{align}
\paragraph{\textbf{Type (ii)}. \label{type-ii}} We create a binary variable $\eta_{k}$ in $J$ such that if $l_k$ is full in $M$ then $\eta_{k} = 1$. We enforce this condition by imposing the following constraint. \begin{eqnarray} \label{ineq:spa-st-ip-lecturerfull} d_k\eta_{k} \geq \left(1 + \sum\limits_{i' = 1}^{n_1} \; \sum\limits_{p_{j'} \in P_k} x_{i',j'}\right) - d_k\enspace. \end{eqnarray} If $l_k$ is full in $M$ then the RHS of Constraint \eqref{ineq:spa-st-ip-lecturerfull} is at least $1$ and this implies that $\eta_k = 1$, otherwise $\eta_k$ is not constrained. Next, we create a binary variable $\delta_{i,k}$ in $J$ such that if $s_i \in M(l_k)$, or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or is indifferent between them, then $\delta_{i,k} = 1$. We enforce this condition by imposing the following constraint. \begin{eqnarray} \label{ineq:spa-st-ip-lecturerfull-student} d_k\delta_{i,k} \geq \sum\limits_{i' = 1}^{n_1} \; \sum\limits_{p_{j'} \in P_k} x_{i',j'} - \sum\limits_{s_{i'} \in D_{i,k}} \; \sum\limits_{p_{j'} \in P_k}x_{i',j'}\enspace. \end{eqnarray} Note that if $s_i \in M(l_k)$ or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or $l_k$ is indifferent between them, then the RHS of Constraint \eqref{ineq:spa-st-ip-lecturerfull-student} is at least 1 and this implies that $\delta_{i,k} = 1$, otherwise $\delta_{i,k}$ is not constrained. The following constraint ensures that $(s_i, p_j)$ does not form a type (ii) blocking pair for $M$. \begin{align} \label{ineq:super-bp-type-ii} \Aboxed{ \theta_{i,j} + \alpha_{j} + \eta_{k} + \delta_{i,k} \leq 3\enspace.} \end{align}
\paragraph{\textbf{Type (iii)}. \label{type-iii}} Next we create a binary variable $\gamma_{j}$ in $J$ such that if $p_j$ is full in $M$ then $\gamma_{j} = 1$. We enforce this condition by imposing the following constraint. \begin{eqnarray} \label{ineq:spa-st-ip-projectfull} c_j\gamma_{j} \geq \left( 1 + \sum\limits_{i' = 1}^{n_1} \; x_{i',j} \right) - c_j\enspace. \end{eqnarray}
where $\sum_{i' = 1}^{n_1} x_{i',j} = |M(p_j)|$. If $p_j$ is full in $M$ then the RHS of Inequality \eqref{ineq:spa-st-ip-projectfull} is at least $1$ and this implies that $\gamma_j = 1$, otherwise $\gamma_j$ is not constrained. Next, we create a binary variable $\lambda_{i,j,k}$ in $J$ such that if $l_k$ prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them, then $\lambda_{i,j,k}=1$. We enforce this condition by imposing the following constraint. \begin{eqnarray} \label{ineq:spa-st-ip-projectfull-student} c_j\lambda_{i,j,k} \geq \sum\limits_{i' = 1}^{n_1} x_{i',j} - \sum\limits_{s_{i'} \in T_{i,j,k}} x_{i',j}\enspace. \end{eqnarray} Note that if $l_k$ prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them, then the RHS of Inequality \eqref{ineq:spa-st-ip-projectfull-student} is at least 1 and this implies that $\lambda_{i,j,k} = 1$, otherwise $\lambda_{i,j,k}$ is not constrained. The following constraint ensures that $(s_i, p_j)$ does not form a type (iii) blocking pair for $M$. \begin{align} \label{ineq:super-bp-type-iii} \Aboxed{ \theta_{i,j} + \gamma_j + \lambda_{i,j,k} \leq 2\enspace.} \end{align}
\subsection{Variables} \label{sect:spa-st-ip-variables} We define a collective notation for each set of variables involved in $J$ as follows: \begin{center} \begin{tabular}{p{5cm}p{0.2cm}p{6cm}} $A = \{ \alpha_{j}: 1 \leq j \leq n_2\}$, & & $\Gamma = \{ \gamma_{j}: 1 \leq j \leq n_2\}$, \\ $B = \{\beta_{k}: 1 \leq k \leq n_3\}$, & & $\Delta = \{ \delta_{i,k}: 1 \leq i \leq n_1, 1 \leq k \leq n_3\}$, \\ $N = \{\eta_{k}: 1 \leq k \leq n_3\}$, & & $X = \{ x_{i,j}: 1 \leq i \leq n_1, 1 \leq j \leq n_2\}$, \\ \multicolumn{3}{p{12cm}}{$\Lambda = \{\lambda_{i,j,k}: 1 \leq i \leq n_1, 1 \leq j \leq n_2, 1 \leq k \leq n_3 \}$\enspace.} \\
\end{tabular} \end{center}
\subsection{Objective function} On one hand, all super-stable matchings are of the same size, and thus nullifies the need for an objective function. On the other hand, optimization solvers require an objective function in addition to the variables and constraints in order to produce a solution. The objective function given below involves maximising the summation of all the $x_{i,j}$ binary variables. \begin{align} \label{ineq:super-objectivefunction} \Aboxed{\max \sum\limits_{i = 1}^{n_1} \; \sum\limits_{p_j \in A_i}x_{i,j}\enspace.} \end{align} Finally, we have constructed an IP model $J$ of $I$ comprising the set of integer-valued variables $A, B, N, X, \Gamma, \Delta, \mbox{ and } \Lambda$, the set of Inequalities \eqref{ineq:spa-st-ip-studentassignment} - \eqref{ineq:super-bp-type-iii} and an objective function \eqref{ineq:super-objectivefunction}. Note that $J$ can then be used to construct a super-stable matching in $I$, should one exist.
\subsection{Correctness of the IP model} Given an instance $I$ of {\sc spa-st} formulated as an IP model $J$ using the above transformation, we present the following lemmas regarding the correctness of $J$.
\begin{lemma} \label{lemma:super-solution-stability} A feasible solution $S$ to $J$ corresponds to a super-stable matching $M$ in $I$. \end{lemma} \begin{proof} Assume firstly that $J$ has a feasible solution $S$. Let $M = \{(s_i, p_j) \in \mathcal{S} \times \mathcal{P}: x_{i,j} = 1\}$ be the assignment in $I$ generated from $S$. We note that Inequality \eqref{ineq:spa-st-ip-studentassignment} ensures that each student is assigned in $M$ to at most one project. Moreover, Inequalities \eqref{ineq:spa-st-ip-projectcapacity} and \eqref{ineq:spa-st-ip-lecturercapacity} ensures that the capacity of each project and lecturer is not exceeded in $M$. Thus $M$ is a matching. We will prove that Inequalities \eqref{ineq:spa-st-ip-project-under} - \eqref{ineq:super-bp-type-iii} ensures that $M$ admits no blocking pair.
Suppose for a contradiction that there exists some acceptable pair $(s_i, p_j)$ that forms a blocking pair for $M$, where $l_k$ is the lecturer who offers $p_j$. This implies that either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them. Thus $\sum_{p_{j'} \in S_{i,j}} x_{{i},{j'}} = 0$. Moreover, since $s_i$ is not assigned to $p_j$ in $M$, we have that $x_{i,j} = 0$. Thus $\theta_{i,j} = 1$.
Now suppose $(s_i, p_j)$ forms a type (i) blocking pair for $M$. Then each of $p_j$ and $l_k$ is undersubscribed in $M$. Thus $\sum_{i' = 1}^{n_1} x_{i',j} < c_j$ and $\sum_{i' = 1}^{n_1} \; \sum_{p_{j'} \in P_k} x_{i',j'}$ $< d_k$. This implies that the RHS of Inequality \eqref{ineq:spa-st-ip-project-under} and the RHS of Inequality \eqref{ineq:spa-st-ip-lecturerunder} is strictly greater than $0$. Moreover, since $S$ is a feasible solution to $J$, $\alpha_j = \beta_k = 1$. Hence, the LHS of Inequality \eqref{ineq:super-bp-type-i} is strictly greater than $2$, a contradiction to the feasibility of $S$.
Now suppose $(s_i, p_j)$ forms a type (ii) blocking pair for $M$. Then $p_j$ is undersubscribed in $M$ and as explained above, $\alpha_j = 1$. Also, $l_k$ is full in $M$ and this implies that the RHS of Inequality \eqref{ineq:spa-st-ip-lecturerfull} is strictly greater than $0$. Since $S$ is a feasible solution, we have that $\eta_k = 1$. Furthermore, either $s_i \in M(l_k)$ or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or $l_k$ is indifferent between them. In any of these cases, the RHS of Inequality \eqref{ineq:spa-st-ip-lecturerfull-student} is strictly greater than $0$. Thus $\delta_{i,k} = 1$, since $S$ is a feasible solution. Hence the LHS of Inequality \eqref{ineq:super-bp-type-ii} is strictly greater than 3, a contradiction to the feasibility of $S$.
Finally, suppose $(s_i, p_j)$ forms a type (iii) blocking pair for $M$. Then $p_j$ is full in $M$ and thus the RHS of Inequality \eqref{ineq:spa-st-ip-projectfull} is strictly greater than $0$. Since $S$ is a feasible solution, we have that $\gamma_j = 1$. In addition, $l_k$ prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them. This implies that the RHS of Inequality \eqref{ineq:spa-st-ip-projectfull-student} is strictly greater than $0$. Thus $\lambda_{i,j,k} = 1$, since $S$ is a feasible solution. Hence the LHS of Inequality \eqref{ineq:super-bp-type-iii} is strictly greater than 2, a contradiction to the feasibility of $S$. Hence $M$ admits no blocking pair; and hence, $M$ is a super-stable matching in $I$. \qed \end{proof}
\begin{lemma} \label{lemma:super-stability-solution} A super-stable matching $M$ in $I$ corresponds to a feasible solution $S$ to $J$. \end{lemma} \begin{proof} Let $M$ be a super-stable matching in $I$. First we set all the binary variables involved in $J$ to $0$. For each $(s_i, p_j) \in M$, we set $x_{i,j} = 1$. Since $M$ is a matching, it is clear that Inequalities \eqref{ineq:spa-st-ip-studentassignment} - \eqref{ineq:spa-st-ip-lecturercapacity} is satisfied. For any acceptable pair $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$ such that $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them, we set $\theta_{i,j} = 1$. For any project $p_j \in \mathcal{P}$ such that $p_j$ is undersubscibed in $M$, we set $\alpha_j = 1$ and thus Inequality \eqref{ineq:spa-st-ip-project-under} is satisfied. For any lecturer $l_k \in \mathcal{L}$ such that $l_k$ is undersubscribed in $M$, we set $\beta_k = 1$ and thus Inequality \eqref{ineq:spa-st-ip-lecturerunder} is satisfied.
Now, for Inequality \eqref{ineq:super-bp-type-i} not to be satisfied, its LHS must be strictly greater than 2. This would only happen if there exists some $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$, where $l_k$ is the lecturer who offers $p_j$, such that $\theta_{i,j} = 1$, $\alpha_j = 1$ and $\beta_k = 1$. This implies that either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them, and each of $p_j$ and $l_k$ is undersubscribed in $M$. Thus $(s_i, p_j)$ forms a type (i) blocking pair for $M$, a contradiction to the super-stability of $M$. Hence, Inequality \eqref{ineq:super-bp-type-i} is satisfied.
For any lecturer $l_k \in \mathcal{L}$ such that $l_k$ is full in $M$, we set $\eta_k = 1$. Thus Inequality \eqref{ineq:spa-st-ip-lecturerfull} is satisfied. Let $(s_i, p_j)$ be an acceptable pair such that $p_j \in P_k$ and $(s_i, p_j) \notin M$. If $s_i \in M(l_k)$ or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or is indifferent between them, we set $\delta_{i,k} = 1$. Thus Inequality \eqref{ineq:spa-st-ip-lecturerfull-student} is satisfied. Suppose Inequality \eqref{ineq:super-bp-type-ii} is not satisfied. Then there exists $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$, where $l_k$ is the lecturer who offers $p_j$, such that $\theta_{i,j} = 1$, $\alpha_j = 1$, $\eta_k = 1$ and $\delta_{i,k} = 1$. This implies that either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them. In addition, $p_j$ is undersubscribed in $M$, $l_k$ is full in $M$ and either $s_i \in M(l_k)$ or $l_k$ prefers $s_i$ to a worst student in $M(l_k)$ or is indifferent between them. Thus $(s_i, p_j)$ forms a type (ii) blocking pair for $M$, a contradiction to the super-stability of $M$. Hence Inequality \eqref{ineq:super-bp-type-ii} is satisfied.
Finally, for any project $p_j \in \mathcal{P}$ such that $p_j$ is full in $M$, we set $\gamma_j = 1$. Thus Inequality \eqref{ineq:spa-st-ip-projectfull} is satisfied. Let $l_k$ be the lecturer who offers $p_j$ and let $(s_i, p_j)$ be an acceptable pair. If $l_k$ prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them, we set $\lambda_{i,j,k} = 1$. Thus Inequality \eqref{ineq:spa-st-ip-projectfull-student} is satisfied. Suppose Inequality \eqref{ineq:super-bp-type-iii} is not satisfied. Then there exists some $(s_i, p_j) \in (\mathcal{S} \times \mathcal{P}) \setminus M$ such that $\theta_{i,j} = 1$, $\gamma_j = 1$ and $\lambda_{i,j,k} = 1$. This implies that either $s_i$ is unassigned in $M$ or $s_i$ prefers $p_j$ to $M(s_i)$ or is indifferent between them. In addition, $p_j$ is full in $M$ and $l_k$ prefers $s_i$ to a worst student in $M(p_j)$ or is indifferent between them. Thus $(s_i, p_j)$ forms a type (iii) blocking pair for $M$, a contradiction to the super-stability of $M$. Hence, Inequality \eqref{ineq:super-bp-type-iii} is satisfied. Hence $S$, comprising the above assignments of values to the variables in $A \cup B \cup N \cup X \cup \Gamma \cup \Delta \cup \Lambda$, is a feasible solution to $J$. \qed \end{proof}
The following theorem is a consequence of Lemmas \ref{lemma:super-solution-stability} and \ref{lemma:super-stability-solution}. \begin{theorem} \label{theorem:super-stable-solution} Let $I$ be an instance of {\sc spa-st} and let $J$ be the IP model for $I$ as described above. A feasible solution to $J$ corresponds to a super-stable matching in $I$. Conversely, a super-stable matching in $I$ corresponds to a feasible solution to $J$. \end{theorem}
\end{subappendices}
\end{document}
|
arXiv
|
{
"id": "1805.09887.tex",
"language_detection_score": 0.8562983274459839,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{On semi-vector spaces and semi-algebras}
\author{Giuliano G. La Guardia, Jocemar de Q. Chagas, Ervin K. Lenzi, Leonardo Pires \thanks{Giuliano G. La Guardia ({\tt \small [email protected]}), Jocemar de Q. Chagas ({\tt \small [email protected]}) and Leonardo Pires ({\tt \small [email protected]}) are with Department of Mathematics and Statistics, State University of Ponta Grossa (UEPG), 84030-900, Ponta Grossa - PR, Brazil. Ervin K. Lenzi ({\tt \small [email protected]}) is with Department of Physics, State University of Ponta Grossa (UEPG), 84030-900, Ponta Grossa - PR, Brazil. Corresponding author: Giuliano G. La Guardia ({\tt \small [email protected]}). }}
\maketitle
\begin{abstract} It is well-known that the theories of semi-vector spaces and semi-algebras -- which were not much studied over time -- are utilized/applied in Fuzzy Set Theory in order to obtain extensions of the concept of fuzzy numbers as well as to provide new mathematical tools to investigate properties and new results on fuzzy systems. In this paper we investigate the theory of semi-vector spaces over the semi-field of nonnegative real numbers ${\mathbb R}_{0}^{+}$. We prove several results concerning semi-vector spaces and semi-linear transformations. Moreover, we introduce in the literature the concept of eigenvalues and eigenvectors of a semi-linear operator, describing in some cases how to compute them. Topological properties of semi-vector spaces such as completeness and separability are also investigated. New families of semi-vector spaces derived from semi-metric, semi-norm, semi-inner product, among others are exhibited. Additionally, some results on semi-algebras are presented. \end{abstract}
\emph{keywords}: semi-vector space; semi-algebras; semi-linear operators
\section{Introduction}
The concept of semi-vector space was introduced by Prakash and Sertel in \cite{Prakash:1974}. Roughly speaking, semi-vector spaces are ``vector spaces" where the scalars are in a semi-field. Although the concept of semi-vector space was investigated over time, there exist few works available in the literature dealing with such spaces \cite{Radstrom:1952,Prakash:1974,Prakash:1976,Pap:1980,Gahler:1999,Janyska:2007,Milfont:2021}. This fact occurs maybe due to the limitations that such concept brings, i.e., the non-existence of (additive) symmetric for some (for all) semi-vector. A textbook in such a topic of research is the book by Kandasamy~\cite{Kandasamy:2002}.
Although the seminal paper on semi-vector spaces is \cite{Prakash:1974}, the idea of such a concept was implicit in \cite{Radstrom:1952}, where Radstrom shown that a semi-vector space over the semi-field of nonnegative real numbers can be extended to a real vector space (see \cite[Theorem 1-B.]{Radstrom:1952}). In \cite{Prakash:1974}, Prakash and Sertel investigated the structure of topological semi-vector spaces. The authors were concerned with the study of the existence of fixed points in compact convex sets and also to generate min-max theorems in topological semi-vector spaces. In \cite{Prakash:1976}, Prakash and Sertel investigated properties of the topological semi-vector space consisting of nonempty compact subsets of a real Hausdorff topological vector space. In \cite{Pap:1980}, Pap investigated and formulated the concept of integrals of functions having, as counter-domain, complete semi-vector spaces. W. Gahler and S. Gahler \cite{Gahler:1999} showed that a (ordered) semi-vector space can be extended to a (ordered) vector space and a (ordered) semi-algebra can be extended to a (ordered) algebra. Moreover, they provided an extension of fuzzy numbers. Janyska et al.~\cite{Janyska:2007} developed such theory (of semi-vector space) by proving useful results and defining the semi-tensor product of (semi-free) semi-vector spaces. They were also interested to propose an algebraic model of physical scales. Canarutto~\cite{Canarutto:2012} explored the concept of semi-vector spaces to express aspects and to exploit nonstandard mathematical notions of basics of quantum particle physics on a curved Lorentzian background. Moreover, he dealt with the case of electroweak interactions. Additionally, in \cite{Canarutto:2016}, Canarutto provided a suitable formulation of the fundamental mathematical concepts with respect to quantum field theory. Such a paper presents a natural application of the concept of semi-vector spaces and semi-algebras. Recently, Bedregal et al. \cite{Milfont:2021} investigated (ordered) semi-vector spaces over a weak semi-field $K$ (i.e., both $(K, +)$ and $(K, \bullet)$ are monoids) in the context of fuzzy sets and applying the results in multi-criteria group decision-making.
In this paper we extend the theory of semi-vector spaces. The semi-field of scalars considered here is the semi-field of nonnegative real numbers. We prove several results in the context of semi-vector spaces and semi-linear transformations. We introduce the concept of semi-eigenvalues and semi-eigenvectors of an operator and of a matrix, showing how to compute it in specific cases. We investigate topological properties such as completeness, compactness and separability of semi-vector spaces. Additionally, we present interesting new families of semi-vector spaces derived from semi-metric, semi-norm, semi-inner product, metric-preserving functions among others. Furthermore, we show some results concerning semi-algebras. Summarizing, we provide new results on semi-vector spaces and semi-algebras, although such theories are very difficult to be investigated due to the fact that vectors do not even have (additive) symmetrical. These new results can be possibly utilized in the theory of fuzzy sets in order to extend it or in the generation of new results concerning such a theory.
The paper is organized as follows. In Section~\ref{sec2} we recall some concepts on semi-vector spaces which will be utilized in this work. In Section~\ref{sec3} we present and prove several results concerning semi-vector spaces and semi-linear transformations. We introduce naturally the concepts of eigenvalue and eigenvector of a semi-linear operator and of matrices. Additionally, we exhibit and show interesting examples of semi-vector spaces derived from semi-metric, semi-norms, metric-preserving functions among others. Results concerning semi-algebras are also presented. In Section~\ref{sec3a} we show relationships between Fuzzy Set Theory and the theory of semi-vector spaces and semi-algebras. Finally, a summary of this paper is presented in Section~\ref{sec4}.
\section{Preliminaries}\label{sec2}
In this section we recall important facts on semi-vector spaces necessary for the development of this work. In order to define formally such concept, it is necessary to define the concepts of semi-ring and semi-field.
\begin{definition}\label{defSR} A semi-ring $(S, + , \bullet )$ is a set $S$ endowed with two binary operations, $+: S\times S\longrightarrow S$ (addition), $\bullet: S\times S\longrightarrow S$ (multiplication) such that: $\operatorname{(1)}$ $(S, +)$ is a commutative monoid; $\operatorname{(2)}$ $(S, \bullet)$ is a semigroup; $\operatorname{(3)}$ the multiplication $\bullet$ is distributive with respect to $+$: $\forall \ x, y, z \in S$, $(x + y)\bullet z = x\bullet z + y\bullet z$ and $x\bullet(y + z ) = x\bullet y + x\bullet z$. \end{definition}
We write $S$ instead of writing $(S, + , \bullet )$ if there is not possibility of confusion. If the multiplication $\bullet$ is commutative then $S$ is a commutative semi-ring. If there exists $1 \in S$ such that, $ \forall \ x \in S$ one has $1\bullet x = x\bullet 1 = x$, then $S$ is a semi-ring with identity.
\begin{definition}\cite[Definition 3.1.1]{Kandasamy:2002}\label{defSF} A semi-field is an ordered triple $(K, +, \bullet )$ which is a commutative semi-ring with unit satisfying the following conditions: $\operatorname{(1)}$ $\forall \ x, y \in K$, if $x+y=0$ then $x=y=0$; $\operatorname{(2)}$ if $x, y \in K$ and $x\bullet y = 0$ then $x=0$ or $y=0$. \end{definition}
Before proceeding further, it is interesting to observe that in \cite{Gahler:1999} the authors considered the additive cancellation law in the definition of semi-vector space. In \cite{Janyska:2007}, the authors did not assume the existence of the zero (null) vector.
In this paper we consider the definition of a semi-vector space in the context of that shown in \cite{Gahler:1999}, Sect.3.1.
\begin{definition}\label{defSVS} A semi-vector space over a semi-field $K$ is a ordered triple $(V,$ $+, \cdot)$, where $V$ is a set endowed with the operations $+: V\times V\longrightarrow V$ (vector addition) and $\cdot: K\times V\longrightarrow V$ (scalar multiplication) such that: \begin{itemize} \item [ $\operatorname{(1)}$] $(V, +)$ is an abelian monoid equipped with the additive cancellation law: $\forall \ u, v, w \in V$, if $u + v = u + w$ then $v = w$; \item [ $\operatorname{(2)}$] $\forall$ $\alpha\in K$ and $\forall$ $u, v \in V$, $\alpha (u+v)=\alpha u + \beta v$; \item [ $\operatorname{(3)}$] $\forall$ $\alpha, \beta \in K$ and $\forall$ $v\in V$, $(\alpha + \beta)v= \alpha v + \beta v$; \item [ $\operatorname{(4)}$] $\forall$ $\alpha, \beta \in K$ and $\forall$ $v\in V$, $(\alpha\beta)v=\alpha (\beta v)$; \item [ $\operatorname{(5)}$] $\forall$ $v \in V$ and $1 \in K$, $1v=v$. \end{itemize} \end{definition}
Note that from Item~$\operatorname{(1)}$ of Definition~\ref{defSVS}, all semi-vector spaces considered in this paper are \emph{regular}, that it, the additive cancellation law is satisfied. The zero (or null) vector of $V$, which is unique, will be denoted by $0_{V}$. Let $v \in V$, $v\neq 0 $. If there exists $u \in V$ such that $v + u =0$ then $v$ is said to be \emph{symmetrizable}. A semi-vector space $V$ is said to be \emph{simple} if the unique symmetrizable element is the zero vector $0_{V}$. In other words, $V$ is simple if it has none nonzero symmetrizable elements.
\begin{definition}\cite[Definition 1.4]{Janyska:2007}\label{defSBasis} Let $V$ be a simple semi-vector space over ${\mathbb R}_{0}^{+}$. A subset $B \subset V$ is called a semi-basis of $V$ if every $v \in V$, $v\neq 0$, can be written in a unique way as $v = \displaystyle\sum_{i \in I_v}^{} v^{(i)} b_i$, where $v^{(i)} \in {\mathbb R}^{+}$, $b_i \in B$ and $I_v$ is a finite family of indices uniquely determined by $v$. The finite subset $B_v \subset B$ defined by $B_v := \{b_i \}_{i \in I_v }$ is uniquely determined by $v$. If a semi-vector space $V$ admits a semi-basis then it is said to be semi-free. \end{definition}
The concept of semi-dimension can be defined in analogous way to semi-free semi-vector spaces due to the next result.
\begin{corollary}\cite[Corollary 1.7]{Janyska:2007} Let $V$ be a semi-free semi-vector space. Then all semi-bases of $V$ have the same cardinality. \end{corollary} Therefore, the semi-dimension of a semi-free semi-vector space is the cardinality of a semi-basis (consequently, of all semi-bases) of $V$. We next present some examples of semi-vector spaces.
\begin{example}\label{ex1} All real vector spaces are semi-vector spaces, but they are not simple. \end{example}
\begin{example}\label{ex2} The set ${[{\mathbb R}_{0}^{+}]}^{n}=\underbrace{{\mathbb R}_{0}^{+} \times \ldots \times {\mathbb R}_{0}^{+}}_{n \operatorname{times}}$ endowed with the usual sum of coordinates and scalar multiplication is a semi-vector space over ${\mathbb R}_{0}^{+}$. \end{example}
\begin{example}\label{ex3} The set ${\mathcal M}_{n\times m}({\mathbb R}_{0}^{+})$ of matrices $n \times m$ whose entries are nonnegative real numbers equipped with the sum of matrices and multiplication of a matrix by a scalar (in ${\mathbb R}_{0}^{+}$, of course) is a semi-vector space over ${\mathbb R}_{0}^{+}$. \end{example}
\begin{example}\label{ex4} The set ${\mathcal P}_{n}[x]$ of polynomials with coefficients from ${\mathbb R}_{0}^{+}$ and degree less than or equal to $n$, equipped with the usual of polynomial sum and scalar multiplication, is a semi-vector space. \end{example}
\begin{definition}\label{semi-subspace} Let $(V, +, \cdot )$ be a semi-vector space over ${\mathbb R}_{0}^{+}$. We say that a non-empty subset $W$ of $V$ is a semi-subspace of $V$ if $W$ is closed under both addition and scalar multiplication of $V$, that is, \begin{itemize} \item [ $\operatorname{(1)}$] $\forall \ w_1 , w_2 \in W \Longrightarrow w_1 + w_2 \in W$; \item [ $\operatorname{(2)}$] $\forall \ \lambda \in {\mathbb R}_{0}^{+}$ and $\forall \ w \in W \Longrightarrow \lambda w \in W$. \end{itemize} \end{definition}
The uniqueness of the zero vector implies that for each $\lambda \in {\mathbb R}_{0}^{+}$ on has $\lambda 0_{V} = 0_{V}$. Moreover, if $ v \in V$, it follows that $0 v = 0 v + 0 v$; applying the regularity one obtains $0 v =0_{V}$. Therefore, from Item~$\operatorname{(2)}$, every semi-subspace contains the zero vector.
\begin{example}\label{ex4a} Let ${\mathbb Q}_{0}^{+}$ denote the set of nonnegative rational numbers. The semi-vector space ${\mathbb Q}_{0}^{+}$ considered as an ${\mathbb Q}_{0}^{+}$ space is a semi-subspace of ${\mathbb R}_{0}^{+}$ considered as an ${\mathbb Q}_{0}^{+}$ space. \end{example}
\begin{example}\label{ex4b} For each positive integer $ i \leq n$, the subset ${\mathcal P}_{(i)}[x]\cup \{0_{p}\}$, where ${\mathcal P}_{(i)}[x]=\{p(x); \partial (p(x))=i \} $ and $0_{p}$ is the null polynomial, is a semi-subspace of ${\mathcal P}_{n}[x]$, shown in Example~\ref{ex4}. \end{example}
\begin{example}\label{ex4c} The set of diagonal matrices of order $n$ with entries in ${\mathbb R}_{0}^{+}$ is a semi-subspace of ${\mathcal M}_{n}({\mathbb R}_{0}^{+})$, where the latter is the semi-vector space of square matrices with entries in ${\mathbb R}_{0}^{+}$ (according to Example~\ref{ex3}). \end{example}
\begin{definition}\cite[Definition 1.22]{Janyska:2007}\label{semilineartrans} Let $V$ and $W$ be two semi-vector spaces and $T: V\longrightarrow W$ be a map. We say that $T$ is a semi-linear transformation if: $\operatorname{(1)}$ $\forall \ v_1, v_2 \in V$, $T(v_1 + v_2) = T(v_1) + T(v_2)$; $\operatorname{(2)}$ $\forall \lambda \in {\mathbb R}_{0}^{+}$ and $\forall \ v \in V$, $T(\lambda v) =\lambda T(v)$. \end{definition}
If $U$ and $V$ are semi-vector spaces then the set $\operatorname{Hom}(U, V)=\{ T:U\longrightarrow V; T \operatorname{is \ semi-linear} \}$ is also a semi-vector space.
\section{The New Results}\label{sec3}
We start this section with important remarks.
\begin{remark}\label{mainremark} \begin{itemize} \item [ $\operatorname{(1)}$] Throughout this section we always consider that the semi-field $K$ is the set of nonnegative real numbers, i.e., $K= {\mathbb R}_{0}^{+}={\mathbb R}^{+}\cup \{0\}$.
\item [ $\operatorname{(2)}$] In the whole section (except Subsection~\ref{subsec2}) we assume that the semi-vector spaces $V$ are simple, i.e., the unique symmetrizable element is the zero vector $0_{V}$.
\item [ $\operatorname{(3)}$] It is well-known that a semi-vector space $(V, +, \cdot)$ can be always extended to a vector space according to the equivalence relation on $V \times V$ defined by $(u_1 , v_1 ) \sim (u_2 , v_2 )$ if and only if $u_1 + v_2 = v_1 + u_2$ (see \cite{Radstrom:1952}; see also \cite[Section 3.4]{Gahler:1999}). However, our results are obtained without utilizing such a natural embedding. In other words, if one want to compute, for instance, the eigenvalues of a matrix defined over ${\mathbb R}_{0}^{+}$ we cannot solve the problem in the associated vector spaces and then discard the negative ones. Put differently, all computations performed here are restricted to nonnegative real numbers and also to the fact that none vector (with exception of $0_V$) has (additive) symmetrical. However, we will show that, even in this case, several results can be obtained. \end{itemize} \end{remark}
\begin{proposition}\label{prop1} Let $V$ be a semi-vector space over ${\mathbb R}_{0}^{+}$. Then the following hold: \begin{itemize} \item [ $\operatorname{(1)}$] let $ v \in V$, $ v \neq 0_{V}$, and $\lambda \in {\mathbb R}_{0}^{+}$; if $\lambda v = 0_{V}$ then $\lambda = 0$; \item [ $\operatorname{(2)}$] if $\alpha , \beta \in {\mathbb R}_{0}^{+}$, $v \in V$ and $ v \neq 0_{V}$, then the equality $\alpha v = \beta v$ implies that $\alpha = \beta$. \end{itemize} \end{proposition} \begin{proof} $\operatorname{(1)}$ If $\lambda \neq 0$ then there exists its multiplicative inverse ${\lambda}^{-1}$, hence $ 1 v = {\lambda}^{-1} 0_{V}= 0_{V}$, i.e., $v = 0_{V}$, a contradiction.\\ $\operatorname{(2)}$ If $\alpha \neq \beta$, assume w.l.o.g. that $\alpha > \beta$, i.e., there exists a positive real number $c$ such that $\alpha = \beta + c$. Thus, $\alpha v = \beta v$ implies $\beta v + c v = \beta v$. From the cancellation law we have $c v = 0_{V}$, and from Item~$\operatorname{(1)}$ it follows that $c = 0$, a contradiction. \end{proof}
We next introduce in the literature the concept of eigenvalue and eigenvector of a semi-linear operator.
\begin{definition}\label{eigenvector} Let $V$ be a semi-vector space and $T:V\longrightarrow V$ be a semi-linear operator. If there exist a non-zero vector $v \in V$ and a nonnegative real number $\lambda$ such that $T(v)=\lambda v$, then $\lambda$ is an eigenvalue of $T$ and $v$ is an eigenvector of $T$ associated with $\lambda$. \end{definition}
As it is natural, the zero vector joined to the set of the eigenvectors associated with a given eigenvalue has a semi-subspace structure.
\begin{proposition}\label{eigenspace} Let $V$ be a semi-vector space over ${\mathbb R}_{0}^{+}$ and $T:V\longrightarrow V$ be a semi-linear operator. Then the set $V_{\lambda} = \{ v \in V ; T(v)=\lambda v \}\cup \{0_{V}\}$ is a semi-subspace of $V$. \end{proposition} \begin{proof} From hypotheses, $V_{\lambda}$ is non-empty. Let $u, v \in V_{\lambda}$, i.e., $T(u)=\lambda u $ and $T(v)=\lambda v $. Hence, $T(u + v )= T(u) + T(v)= \lambda (u + v )$, i.e., $u + v \in V_{\lambda}$. Further, if $\alpha \in {\mathbb R}_{0}^{+}$ and $u \in V$, it follows that $T(\alpha u)=\alpha T(u)= \lambda (\alpha u)$, that is, $\alpha u \in V_{\lambda}$. Therefore, $V_{\lambda}$ is a semi-subspace of $V$. \end{proof}
The next natural step would be to introduce the characteristic polynomial of a matrix, according to the standard Linear Algebra. However, how to compute $\det (A -\lambda I)$ if $-\lambda$ can be a negative real number? Based on this fact we must be careful to compute the eigenvectors of a matrix. In fact, the main tools to be utilized in computing eigenvalues/eigenvectors of a square matrix whose entries are nonnegative real numbers is the additive cancellation law in ${\mathbb R}_{0}^{+}$ and also the fact that positive real numbers have multiplicative inverse. However, in much cases, such a tools are not sufficient to solve the problem. Let us see some cases when it is possible to compute eigenvalues/eigenvectors of a matrix.
\begin{example}\label{examatr1} Let us see how to obtain (if there exists) an eigenvalue/eigenvector of a diagonal matrix $A \in {\mathcal M}_{2}({\mathbb R}_{0}^{+})$, \begin{eqnarray*} A= \left[\begin{array}{cc} a & 0\\ 0 & b\\ \end{array} \right], \end{eqnarray*} where $a \neq b$ not both zeros.
Let us assume first that $a, b > 0$. Solving the equation $A v = \lambda v$, that is, \begin{eqnarray*} \left[\begin{array}{cc} a & 0\\ 0 & b\\ \end{array} \right] \left[\begin{array}{c} x\\ y\\ \end{array} \right]= \left[\begin{array}{c} \lambda x\\ \lambda y\\ \end{array} \right], \end{eqnarray*} we obtain $\lambda = a$ with associated eigenvector $x(1, 0)$ and $\lambda = b$ with associated eigenvector $y(0, 1)$.
If $a\neq 0$ and $b = 0$, then $\lambda = a$ with eigenvectors $x(1, 0)$.
If $a = 0$ and $b \neq 0$, then $\lambda = b$ with eigenvectors $y(0, 1)$. \end{example}
\begin{example}\label{examatr2} Let $A \in {\mathcal M}_{2}({\mathbb R}_{0}^{+})$ be a matrix of the form \begin{eqnarray*} A= \left[\begin{array}{cc} a & b\\ 0 & a\\ \end{array} \right], \end{eqnarray*} where $a \neq b$ are positive real numbers. Let us solve the matrix equation:
\begin{eqnarray*} \left[\begin{array}{cc} a & b\\ 0 & a\\ \end{array} \right] \left[\begin{array}{c} x\\ y\\ \end{array} \right]= \left[\begin{array}{c} \lambda x\\ \lambda y\\ \end{array} \right]. \end{eqnarray*} If $ y \neq 0$, $\lambda = a$; hence $b y = 0$, which implies $b=0$, a contradiction. If $ y = 0$, $x \neq 0$; hence $\lambda = a$ with eigenvectors $(x, 0)$. \end{example}
If $V$ and $W$ are semi-free semi-vector spaces then it is possible to define the matrix of a semi-linear transformation $T: V \longrightarrow W$ as in the usual case (vector spaces).
\begin{definition}\label{semi-free matrix} Let $T: V \longrightarrow W$ be a semi-liner transformation between semi-free semi-vector spaces with semi-basis $B_1$ and $B_2$, respectively. Then the matrix $[T]_{B_1}^{B_2}$ is the matrix of the transformation $T$. \end{definition}
\begin{theorem}\label{diagonalmatrix} Let $V$ be a semi-free semi-vector space over ${\mathbb R}_{0}^{+}$ and let $T:V\longrightarrow V$ be a semi-linear operator. Then $T$ admits a semi-basis $B = \{ v_1 , v_2 , \ldots , v_n \}$ such that ${[T]}_{B}^{B}$ is diagonal if and only if $B$ consists of eigenvectors of $T$. \end{theorem} \begin{proof} The proof is analogous to the case of vector spaces. Let $B=\{ v_1 , v_2 , \ldots ,$ $v_n \}$ be a semi-basis of $V$ whose elements are eigenvectors of $T$. We then have: \begin{eqnarray*} T(v_1)= {\lambda}_1 v_1 + 0 v_2 + \ldots + 0 v_n,\\ T(v_2)= 0 v_1 + {\lambda}_{2} v_2 + \ldots + 0 v_n,\\ \vdots\\ T(v_n)= 0 v_1 + 0 v_2 + \ldots + {\lambda}_{n} v_n, \end{eqnarray*} which implies that $[T]_{B}^{B}$ is of the form \begin{eqnarray*} [T]_{B}^{B}= \left[\begin{array}{ccccc} {\lambda}_1 & 0 & 0 & \ldots & 0\\ 0 & {\lambda}_2 & 0 & \ldots & 0\\ \vdots & \vdots & \vdots & \ldots & \vdots\\ 0 & 0 & 0 & \ldots & {\lambda}_{n}\\ \end{array} \right]. \end{eqnarray*} On the other hand, let $B^{*}= \{ w_1 , w_2 , \ldots , w_n \}$ be a semi-basis of $V$ such that $[T]_{B^{*}}^{B^{*}}$ is diagonal: \begin{eqnarray*} [T]_{B^{*}}^{B^{*}}=\left[\begin{array}{ccccc} {\alpha}_1 & 0 & 0 & \ldots & 0\\ 0 & {\alpha}_2 & 0 & \ldots & 0\\ \vdots & \vdots & \vdots & \ldots & \vdots\\ 0 & 0 & 0 & \ldots & {\alpha}_{n}\\ \end{array} \right]; \end{eqnarray*} thus,\\ \begin{eqnarray*} T(w_1)= {\alpha}_1 w_1 + 0 w_2 + \ldots + 0 w_n = {\alpha}_1 w_1,\\ T(w_2)= 0 w_1 + {\alpha}_{2} w_2 + \ldots + 0 w_n = {\alpha}_{2} w_2,\\ \vdots\\ T(w_n)= 0 w_1 + 0 w_2 + \ldots + {\alpha}_{n} w_n = {\alpha}_{2} w_{n}. \end{eqnarray*} This means that $w_i$ are eigenvectors of $T$ with corresponding eigenvalues ${\alpha}_{i}$, for all $i = 1, 2, \ldots , n$. \end{proof}
\begin{definition}\label{kernel} Let $T: V \longrightarrow W$ be a semi-linear transformation. The set $\operatorname{Ker}(T)=\{ v \in V ; T(v)=0\}$ is called kernel of $T$. \end{definition}
\begin{proposition}\label{subkernel} Let $T: V \longrightarrow W$ be a semi-linear transformation. Then the following hold: \begin{itemize} \item [ $\operatorname{(1)}$] $\operatorname{Ker}(T)$ is a semi-subspace of $V$; \item [ $\operatorname{(2)}$] if $T$ is injective then $\operatorname{Ker}(T) = \{0_{V}\}$; \item [ $\operatorname{(3)}$] if $V$ has semi-dimension $1$ then $\operatorname{Ker}(T) = \{0_{V}\}$ implies that $T$ is injective. \end{itemize} \end{proposition} \begin{proof} $\operatorname{(1)}$ We have $T(0_{V})= T(0_{V})+T(0_{V})$. Since $W$ is regular, it follows that $T(0_{V})=0_{W}$, which implies $\operatorname{Ker}(T) \neq \emptyset$. If $u, v \in \operatorname{Ker}(T)$ and $\lambda \in {\mathbb R}_{0}^{+}$, then $u + v \in \operatorname{Ker}(T)$ and $\lambda v \in \operatorname{Ker}(T)$, which implies that $\operatorname{Ker}(T)$ is a semi-subspace of $V$.\\ $\operatorname{(2)}$ Since $T(0_{V})=0_{W}$, it follows that $\{0_{V}\}\subseteq \operatorname{Ker}(T)$. On the other hand, let $ u \in \operatorname{Ker}(T)$, that is, $T(u)=0_{W}$. Since $T$ is injective, one has $u = 0_{V}$. Hence, $\operatorname{Ker}(T) = \{0_{V}\}$.\\ $\operatorname{(3)}$ Let $B=\{ v_0 \}$ be a semi-basis of $V$. Assume that $T(u) = T(v)$, where $u, v \in V$ are such that $u = \alpha v_0$ and $v = \beta v_0 $. Hence, $\alpha T(v_0) = \beta T(v_0 )$. Since $\operatorname{Ker}(T) = \{0_{V}\}$ and $v_0 \neq 0$, it follows that $T(v_0) \neq 0$. From Item~$\operatorname{(2)}$ of Proposition~\ref{prop1}, one has $\alpha = \beta$, i.e., $u = v$. \end{proof}
\begin{definition}\label{image} Let $T: V \longrightarrow W$ be a semi-linear transformation. The image of $T$ is the set of all vectors $w \in W$ such that there exists $v \in V$ with $T(v)=w$, that is, $\operatorname{Im}(T)=\{ w \in W ; \exists \ v \in V \operatorname{with} T(v)=w\}$. \end{definition}
\begin{proposition}\label{subImage} Let $T: V \longrightarrow W$ be a semi-linear transformation. Then the image of $T$ is a semi-subspace of $W$. \end{proposition} \begin{proof} The set $\operatorname{Im}(T)$ is non-empty because $T(0_{V})=0_{W}$. It is easy to see that if $w_1 , w_2 \in \operatorname{Im}(T)$ and $\lambda \in {\mathbb R}_{0}^{+}$, then $ w_1 + w_2 \in \operatorname{Im}(T)$ and $\lambda w_1 \in \operatorname{Im}(T)$. \end{proof}
\begin{theorem}\label{isosemi} Let $V$ be a $n$-dimensional semi-free semi-vector space over ${\mathbb R}_{0}^{+}$. Then $V$ is isomorphic to $({\mathbb R}_{0}^{+})^{n}$. \end{theorem} \begin{proof} Let $B = \{ v_1 , v_2 , \ldots , v_n \}$ be a semi-basis of $V$ and consider the canonical semi-basis $e_i = (0, 0, \ldots , $ $0, \underbrace{1}_{i}, 0, \ldots, 0)$ of $({\mathbb R}_{0}^{+})^{n}$, where $i=1, 2, \ldots , n$. Define the map $T:V \longrightarrow ({\mathbb R}_{0}^{+})^{n}$ as follows: for each $v = \displaystyle\sum_{i=1}^{n}a_i v_i \in V$, put $T(v) = \displaystyle\sum_{i=1}^{n}a_i e_i$. It is easy to see that $T$ is bijective semi-linear transformation, i.e., $V$ is isomorphic to $({\mathbb R}_{0}^{+})^{n}$, as required. \end{proof}
\subsection{Complete Semi-Vector Spaces}\label{subsec1}
We here define and study complete semi-vector spaces, i.e., semi-vector spaces whose norm (inner product) induces a metric under which the space is complete.
\begin{definition}\label{semiBanach}
Let $V$ be a semi-vector space over ${\mathbb R}_{0}^{+}$. If there exists a norm $\| \ \|:V \longrightarrow {\mathbb R}_{0}^{+}$ on $V$ we say that $V$ is a normed semi-vector space (or normed semi-space, for short). If the norm defines a metric on $V$ under which $V$ is complete then $V$ is said to be Banach semi-vector space. \end{definition}
\begin{definition}\label{semiHilbert} Let $V$ be a semi-vector space over ${\mathbb R}_{0}^{+}$. If there exists an inner product $\langle \ , \ \rangle:V\times V \longrightarrow {\mathbb R}_{0}^{+}$ on $V$ then $V$ is an inner product semi-vector space (or inner product semi-space). If the inner product defines a metric on $V$ under which $V$ is complete then $V$ is said to be Hilbert semi-vector space. \end{definition}
The well-known norms on ${\mathbb R}^n$ are also norms on $[{\mathbb R}_{0}^{+}]^{n}$, as we show in the next propositions.
\begin{proposition}\label{R+1} Let $V = [{\mathbb R}_{0}^{+}]^{n}$ be the Euclidean semi-vector space
(over ${\mathbb R}_{0}^{+}$) of semi-dimension $n$ . Define the function $\| \ \|:V \longrightarrow
{\mathbb R}_{0}^{+}$ as follows: if $x = (x_1 , x_2 , \ldots ,$ $x_n ) \in V$, put $\| x \|=\sqrt{x_1^2 + x_2^2 + \ldots + x_n^2}$. Then $\| \ \|$ is a norm on $V$, called the Euclidean norm on $V$. \end{proposition}
\begin{proof}
It is clear that $\| x \| = 0$ if and only if $x=0$ and for all $\alpha \in {\mathbb R}_{0}^{+}$ and $x \in V$,
$\| \alpha x \| = |\alpha | \| x \|$. To show the triangle inequality it is sufficient to apply the Cauchy-Schwarz inequality in ${\mathbb R}_{0}^{+}$: if $x = (x_1 , x_2 , \ldots , x_n )$ and $y = (y_1 , y_2 , \ldots , y_n )$ are semi-vectors in $V$ then $\displaystyle\sum_{i=1}^{n} x_i y_i \leq {\left(\displaystyle\sum_{i=1}^{n} x_i^2 \right)}^{1/2} \cdot {\left(\displaystyle\sum_{i=1}^{n} y_i^2 \right)}^{1/2}$. \end{proof}
In the next result we show that the Euclidean norm on $[{\mathbb R}_{0}^{+}]^{n}$ generates the Euclidean metric on it.
\begin{proposition}\label{R+1a} Let $x = (x_1 , x_2 , \ldots ,x_n )$, $y = (y_1 , y_2 , \ldots , y_n )$ be semi-vectors in $V = [{\mathbb R}_{0}^{+}]^{n}$. Define the function $d:V \times V \longrightarrow {\mathbb R}_{0}^{+}$ as follows: for every fixed $i$, if $x_i = y_i$ put $c_i =0$; if $x_i \neq y_i$, put ${\varphi}_i = {\psi}_i + c_i$, where ${\varphi}_i =\max \{x_i, y_i \}$ and ${\psi}_i =\min \{ x_i , y_i\}$ (in this case, $c_i > 0$); then consider $d(x, y) = \sqrt{c_1^2 + \ldots + c_n^2}$. The function $d$ is a metric on $V$. \end{proposition}
\begin{remark} Note that in Proposition~\ref{R+1a} we could have defined $c_i$ simply by the nonnegative real number satisfying $\max \{x_i, y_i \}=\min \{x_i, y_i \} + c_i$. However, we prefer to separate the cases when $c_i=0$ and $c_i > 0$ in order to improve the readability of this paper. \end{remark}
\begin{proof} It is easy to see that $d(x, y)=0$ if and only if $x=y$ and $d(x, y)=d(y,x)$.
We will next prove the triangle inequality. To do this, let $x = (x_1 , x_2 , \ldots ,x_n )$, $y = (y_1 , y_2 , \ldots , y_n )$ and $z = (z_1 , z_2 , \ldots , z_n )$ be semi-vectors in $V = [{\mathbb R}_{0}^{+}]^{n}$. We look first at a fixed $i$. If $x_i = y_i = z_i$ or if two of them are equal then $d(x_i , z_i ) \leq d(x_i , y_i ) + d(y_i, z_i )$. Let us then assume that $x_i$, $y_i$ and $z_i$ are pairwise distinct. We have to analyze the six cases: $\operatorname{(1)}$ $x_i < y_i < z_i$; $\operatorname{(2)}$ $x_i < z_i < y_i$; $\operatorname{(3)}$ $y_i < x_i < z_i$; $\operatorname{(4)}$ $y_i < z_i < x_i$; $\operatorname{(5)}$ $z_i < x_i < y_i$; $\operatorname{(6)}$ $z_i < y_i < x_i$. In order to verify the triangle inequality we will see what occurs in the worst cases. More precisely, we assume that for all $i=1, 2, \ldots , n$ we have $x_i < y_i < z_i$ or, equivalently, $z_i < y_i < x_i$. Since both cases are analogous we only verify the (first) case $x_i < y_i < z_i$, for all $i$. In such cases there exist positive real numbers $a_i$, $b_i$, for all $i=1, 2, \ldots , n$, such that $y_i = x_i + a_i$ and $z_i = y_i + b_i$, which implies $z_i = x_i + a_i + b_i$. We need to show that $d(x, z) \leq d(x, y) + d(y, z)$, i.e., ${\left(\displaystyle\sum_{i=1}^{n}(a_i + b_i)^2\right)}^{1/2} \leq {\left(\displaystyle\sum_{i=1}^{n} a_i^2\right)}^{1/2} + {\left(\displaystyle\sum_{i=1}^{n} b_i^2\right)}^{1/2}$. The last inequality is equivalent to the inequality $\displaystyle\sum_{i=1}^{n} (a_i + b_i)^2 \leq \displaystyle\sum_{i=1}^{n} a_i^2 + \displaystyle\sum_{i=1}^{n} b_i^2 + 2{\left(\displaystyle\sum_{i=1}^{n} a_i^2 \right)}^{1/2} \cdot {\left(\displaystyle\sum_{i=1}^{n} b_i^2\right)}^{1/2}$. Again, the last inequality is equivalent to $\displaystyle\sum_{i=1}^{n} a_i b_i \leq {\left(\displaystyle\sum_{i=1}^{n} a_i^2\right)}^{1/2}\cdot {\left(\displaystyle\sum_{i=1}^{n} b_i^2\right)}^{1/2}$, which is the Cauchy-Schwarz inequality in ${\mathbb R}_{0}^{+}$. Therefore, $d$ satisfies the triangle inequality, hence it is a metric on $V$. \end{proof}
\begin{remark} Note that Proposition~\ref{R+1a} means that the Euclidean norm on $[{\mathbb R}_{0}^{+}]^{n}$ (see Proposition~\ref{R+1}) generates the Euclidean metric on $[{\mathbb R}_{0}^{+}]^{n}$. This result is analogous to the fact that every norm defined on vector spaces generates a metric on it. Further, a semi-vector space $V$ is Banach (see Definition~\ref{semiBanach}) if the norm generates a metric under which every Cauchy sequence in $V$ converges to an element of $V$. \end{remark}
\begin{proposition}\label{R+1b} Let $V = [{\mathbb R}_{0}^{+}]^{n}$ and define the function $\langle \ , \ \rangle:V\times V \longrightarrow {\mathbb R}_{0}^{+}$ as follows: if $u = (x_1 , x_2 , \ldots , x_n )$ and $v = (y_1 , y_2 , \ldots , y_n )$ are semi-vectors in $V$, put $\langle u , v \rangle = \displaystyle\sum_{i=1}^{n}x_i y_i$. Then $\langle \ , \ \rangle$ is an inner product on $V$, called dot product. \end{proposition} \begin{proof} The proof is immediate. \end{proof}
\begin{proposition}\label{R+1c} The dot product on $V = [{\mathbb R}_{0}^{+}]^{n}$ generates the Euclidean norm on $V$. \end{proposition} \begin{proof} If $x= (x_1 , x_2 , \ldots , x_n ) \in V$, define the norm of $x$ by
$\| x \|=\sqrt{\langle x, x\rangle}$. Note that the norm is exactly the Euclidean norm given in Proposition~\ref{R+1}. \end{proof}
\begin{remark}
We observe that if an inner product on a semi-vector space $V$ generates a norm $\| \ \|$ and such a norm generates a metric $d$ on $V$, then $V$ is a Hilbert space (according to Definition~\ref{semiHilbert}) if every Cauchy sequence in $V$ converges w.r.t. $d$ to an element of $V$. \end{remark}
\begin{proposition}\label{R+2}
Let $V = [{\mathbb R}_{0}^{+}]^{n}$ and define the function ${\| \ \|}_1:V
\longrightarrow {\mathbb R}_{0}^{+}$ as follows: if $x = (x_1 , x_2 , \ldots ,$ $x_n ) \in V$, ${\| x \|}_1=\displaystyle\sum_{i=1}^{n} x_i$. Then ${\| x \|}_1$ is a norm on $V$. \end{proposition} \begin{proof} The proof is direct. \end{proof}
\begin{proposition}\label{R+2a} Let $x = (x_1 , x_2 , \ldots ,x_n )$, $y = (y_1 , y_2 , \ldots , y_n )$ be semi-vectors in $V = [{\mathbb R}_{0}^{+}]^{n}$. Define the function $d_1:V \times V \longrightarrow {\mathbb R}_{0}^{+}$ in the following way. For every fixed $i$, if $x_i = y_i$, put $c_i =0$; if $x_i \neq y_i$, put ${\varphi}_i = {\psi}_i + c_i$, where ${\varphi}_i =\max \{x_i, y_i \}$ and ${\psi}_i =\min \{ x_i , y_i\}$. Let us consider that $d_1 (x, y) = \displaystyle\sum_{i=1}^{n} c_i $. Then the function $d_1$ is a metric on $V$ derived from the norm
${\| \ \|}_1$ shown in Proposition~\ref{R+2}. \end{proposition} \begin{proof} We only prove the triangle inequality. To avoid stress of notation, we consider the same that was considered in the proof of Proposition~\ref{R+1a}. We then fix $i$ and only investigate the worst case $x_i < y_i < z_i$. In this case, there exist positive real numbers $a_i$, $b_i$ for all $i=1, 2 , \ldots , n$, such that $y_i = x_i + a_i$ and $z_i = y_i + b_i$, which implies $z_i = x_i + a_i + b_i$. Then, for all $i$, $d_1 (x_i , z_i) \leq d_1 (x_i , y_i ) + d_1 (y_i , z_i)$; hence, $d_1 (x, z)=\displaystyle\sum_{i=1}^{n} d_1 (x_i , z_i) = \displaystyle\sum_{i=1}^{n} (a_i + b_i ) = \displaystyle\sum_{i=1}^{n} a_i + \displaystyle\sum_{i=1}^{n} b_i = \displaystyle\sum_{i=1}^{n} d_1 (x_i , y_i ) + \displaystyle\sum_{i=1}^{n} d_1 (y_i , z_i )= d_1 (x, y) + d_1 (y, z)$. Therefore, $d_1$ is a metric on $V$. \end{proof}
\begin{proposition}\label{R+3}
Let $V = [{\mathbb R}_{0}^{+}]^{n}$ be the Euclidean semi-vector space of semi-dimension $n$. Define the function ${\| \ \|}_2:V \longrightarrow
{\mathbb R}_{0}^{+}$ as follows: if $x = (x_1 , x_2 , \ldots ,$ $x_n ) \in V$, take ${\| x \|}_2=\displaystyle\max_{i} \{ x_i \}$. Then ${\| x \|}_2$ is a norm on $V$. \end{proposition}
\begin{proposition}\label{R+3a} Keeping the notation of Proposition~\ref{R+1a}, define the function $d_2:V \times V \longrightarrow {\mathbb R}_{0}^{+}$ such that $d_2 (x, y) = \max_{i} \{ c_i \}$. Then $d_2$ is a metric on $V$. Moreover, $d_2$
is obtained from the norm ${\| \ \|}_2$ exhibited in Proposition~\ref{R+3}. \end{proposition}
\begin{proposition}\label{R+4}
The norms $\| \ \|$, ${\| \ \|}_1$ and ${\| \ \|}_2$ shown in Propositions~\ref{R+1},~\ref{R+2} and \ref{R+3} are equivalent. \end{proposition} \begin{proof}
It is immediate to see that ${\| \ \|}_2 \leq \| \ \| \leq
{\| \ \|}_1 \leq n {\| \ \|}_2$. \end{proof}
In a natural way we can define the norm of a bounded semi-linear transformation.
\begin{definition}\label{semibounded} Let $V$ and $W$ be two normed semi-vector spaces and let $T:V \longrightarrow W$ be a semi-linear transformation. We say that $T$ is bounded if there exists a real number $c > 0$
such that $\| T(v)\|\leq c \| v \|$. \end{definition}
If $T:V \longrightarrow W$ is bounded and $v \neq 0$
we can consider the quotient $\frac{\| T(v)\|}{\| v \|}$. Since such a quotient is upper bounded by $c$, the supremum $\displaystyle
\sup_{v \in V, v\neq 0}\frac{\| T(v)\|}{\| v \|}$ exists and it is at most $c$. We then define
$$\| T \|= \displaystyle\sup_{v \in V, v\neq 0}\frac{\| T(v)\|}{\| v \|}.$$
\begin{proposition}\label{R+5} Let $T: V \longrightarrow W$ be a bounded semi-linear transformation. Then the following hold: \begin{itemize} \item [ $\operatorname{(1)}$] $T$ sends bounded sets in bounded sets;
\item [ $\operatorname{(2)}$] $\| T \|$ is a norm, called norm of $T$;
\item [ $\operatorname{(3)}$] $\| T \|$ can be written in the form
$\| T \|= \displaystyle\sup_{v \in V, \| v \| = 1 } \| T(v) \|$. \end{itemize} \end{proposition} \begin{proof}
Items~$\operatorname{(1)}$~and~ $\operatorname{(2)}$ are immediate. The proof of Item~$\operatorname{(3)}$ is analogous to the standard proof but we present it here to guarantee that our mathematical tools are sufficient to perform it. Let $v\neq 0$ be a semi-vector with norm $\| v \|= a \neq 0$ and set $u=(1/a)v$. Thus,
$\| u \| =1$ and since $T$ is semi-linear one has
$$\| T \|= \displaystyle\sup_{v \in V, v\neq 0} \frac{1}{a}\|
T(v)\|=\displaystyle\sup_{v \in V, v\neq 0} \| T( (1/a) v) \|=
\displaystyle\sup_{u \in V, \| u \| =1} \| T(u)\|=$$ $=
\displaystyle\sup_{v \in V, \| v \| =1} \| T(v)\|$. \end{proof}
\subsubsection{The Semi-Spaces ${l}_{+}^{\infty}$, ${l}_{+}^{p}$ and ${\operatorname{C}}_{+}[a, b]$}\label{subsubsec1}
In this subsection we investigate topological aspects of some semi-vector spaces over ${\mathbb R}_{0}^{+}$ such as completeness and separability. We investigate the sequence spaces ${l}_{+}^{\infty}$, ${l}_{+}^{p}$, ${\operatorname{C}}_{+}[a, b]$, which will be defined in the sequence.
We first study the space ${l}_{+}^{\infty}$, the set of all bounded sequences of nonnegative real numbers. Before studying such a space we must define a metric on it, since the metric in $l^{\infty}$ which is defined as $ d(x, y)=\displaystyle\sup_{i \in
{\mathbb N}} | {x}_i - {y}_i |$, where $x = ({x}_i )$ and $y = ({y}_i )$ are sequences in $l^{\infty}$, has no meaning to us, because there is no sense in considering $- {y}_i$ if ${y}_i > 0$. Based on this fact, we circumvent this problem by utilizing the total order of ${\mathbb R}$ according to Proposition~\ref{R+1a}. Let $x = ({\mu}_i )$ and $y = ({\nu}_i )$ be sequences in $l_{+}^{\infty}$. We then fix $i$, and define $c_i$ as was done in Proposition~\ref{R+1a}: if ${\mu}_i = {\nu}_i $ then we put $c_i = 0$; if ${\mu}_i \neq {\nu}_i $, let ${\gamma}_i=\max \{{\mu}_i , {\nu}_i \}$ and ${\psi}_i= \min \{{\mu}_i , {\nu}_i \}$; then there exists a positive real number $c_i$ such that ${\gamma}_i = {\psi}_i + c_i$
and, in place of $| {\mu}_i - {\nu}_i |$, we put $c_i$. Thus, our metric becomes \begin{eqnarray}\label{lmetric} d(x, y) = \displaystyle\sup_{i \in {\mathbb N}} \{c_i \}. \end{eqnarray}
It is clear that $d(x, y)$ shown in Eq.~(\ref{lmetric}) defines a metric. However, we must show that the tools that we have are sufficient to proof this fact, once we are working on ${\mathbb R}_{0}^{+}$.
\begin{proposition}\label{metricsup} The function $d$ shown in Eq.~(\ref{lmetric}) is a metric on ${l}_{+}^{\infty}$. \end{proposition} \begin{proof} It is clear that $d(x,y)\geq 0$ and $d(x,y)= 0 \Longleftrightarrow x=y$. Let $x = ({\mu}_i )$ and $y = ({\nu}_i )$ be two sequences in $l_{+}^{\infty}$. Then, for every fixed $i \in {\mathbb N}$, if $c_i= d({\mu}_i , {\nu}_i )=0$ then ${\mu}_i = {\nu}_i$, i.e., $d({\mu}_i , {\nu}_i )=d({\nu}_i , {\mu}_i )$. If $c_i > 0$ then $c_i= d({\mu}_i , {\nu}_i )$ is computed by ${\gamma}_i = {\psi}_i + c_i$, where ${\gamma}_i=\max \{{\mu}_i , {\nu}_i \}$ and ${\psi}_i= \min \{{\mu}_i , {\nu}_i \}$. Hence, $d({\nu}_i , {\mu}_i ) = c_i^{*}$ is computed by ${\gamma}_i^{*} = {\psi}_i^{*} + c_i^{*}$, where ${\gamma}_i^{*}=\max \{{\nu}_i, {\mu}_i \}$ and ${\psi}_i^{*}= \min \{{\nu}_i, {\mu}_i \}$, which implies $d({\mu}_i , {\nu}_i ) =d({\nu}_i , {\mu}_i )$. Taking the supremum over all $i$'s we have $d(x, y) = \displaystyle\sup_{i \in {\mathbb N}} \{c_i \}= \displaystyle\sup_{i \in {\mathbb N}} \{c_i^{*} \}=d(y, x)$.
To show the triangle inequality, let $x = ({\mu}_i )$, $y = ({\nu}_i )$ and $z=({\eta}_i)$ be sequences in $l_{+}^{\infty}$. For every fixed $i$, we will prove that $d({\mu}_i , {\eta}_i )\leq d({\mu}_i , {\nu}_i ) + d({\nu}_i , {\eta}_i )$. If ${\nu}_i = {\mu}_i = {\eta}_i$, the result is trivial. If two of them are equal, the result is also trivial. Assume that ${\mu}_i$, ${\nu}_i$ and ${\eta}_i$ are pairwise distinct. As in the proof of Proposition~\ref{R+1a}, we must investigate the six cases:\\ $\operatorname{(1)}$ ${\mu}_i < {\nu}_i < {\eta}_i$; $\operatorname{(2)}$ ${\mu}_i < {\eta}_i < {\nu}_i$; $\operatorname{(3)}$ ${\nu}_i < {\mu}_i < {\eta}_i$; $\operatorname{(4)}$ ${\nu}_i < {\eta}_i < {\mu}_i$; $\operatorname{(5)}$ ${\eta}_i < {\mu}_i < {\nu}_i$; $\operatorname{(6)}$ ${\eta}_i < {\nu}_i < {\mu}_i$. We only show $\operatorname{(1)}$ and $\operatorname{(2)}$.
To show $\operatorname{(1)}$, note that there exist positive real numbers $c_i$ and $c_i^{'}$ such that ${\nu}_i = {\mu}_i + c_i$ and ${\eta}_i = {\nu}_i + c_i^{'}$, which implies $\eta_i = \mu_i + c_i + c_i^{'}$. Hence, $d({\mu}_i , {\eta}_i )=c_i + c_i^{'}= d({\mu}_i , {\nu}_i ) + d({\nu}_i , {\eta}_i )$.
Let us show $\operatorname{(2)}$. There exist positive real numbers $b_i$ and $b_i^{'}$ such that ${\eta}_i = {\mu}_i + b_i$ and ${\nu}_i={\eta}_i + b_i^{'}$, so ${\nu}_i = {\mu}_i + b_i + b_i^{'}$. Therefore, $d({\mu}_i , {\eta}_i )=b_i < d({\mu}_i , {\nu}_i ) + d({\nu}_i , {\eta}_i )=b_i + 2b_i^{'}$.
Taking the supremum over all $i$'s we have $\displaystyle\sup_{i \in {\mathbb N}} \{d({\mu}_i , {\eta}_i ) \} \leq \displaystyle\sup_{i \in {\mathbb N}} \{d({\mu}_i , {\nu}_i )\} + \displaystyle\sup_{i \in {\mathbb N}} \{d({\nu}_i , {\eta}_i ) \}$, i.e., $d(x, z) \leq d(x, y) + d(y, z)$. Therefore, $d$ is a metric on ${l}_{+}^{\infty}$. \end{proof}
\begin{definition}\label{defl} The metric space ${l}_{+}^{\infty}$ is the set of all bounded sequences of nonnegative real numbers equipped with the metric $d(x, y) = \displaystyle\sup_{i \in {\mathbb N}} \{c_i \}$ given previously. \end{definition}
We prove that ${l}_{+}^{\infty}$ equipped with the previous metric is complete.
\begin{theorem}\label{lcomplete} The space ${l}_{+}^{\infty}$ with the metric $d(x, y) = \displaystyle\sup_{i \in {\mathbb N}} \{c_i \}$ shown above is complete. \end{theorem} \begin{proof} The proof follows the same line as the standard proof of completeness of ${l}^{\infty}$; however it is necessary to adapt it to the metric (written above) in terms of nonnegative real numbers. Let $(x_n)$ be a Cauchy sequence in ${l}_{+}^{\infty}$, where $x_i = ({\eta}_{1}^{(i)}, {\eta}_{2}^{(i)}, \ldots )$. We must show that $(x_n )$ converges to an element of ${l}_{+}^{\infty}$. As $(x_n)$ is Cauchy, given $\epsilon > 0$, there exists a positive integer $K$ such that, for all $n, m > K$, $$d(x_n, x_m)=\displaystyle\sup_{j \in {\mathbb N}} \{c_j^{(n, m)} \} < \epsilon,$$ where $c_j^{(n, m)}$ is a nonnegative real number such that, if ${\eta}_{j}^{(n)}={\eta}_{j}^{(m)}$ then $c_j^{(n, m)}=0$, and if ${\eta}_{j}^{(n)} \neq {\eta}_{j}^{(m)}$ then $c_j^{(n, m)}$ is given by $\max \{{\eta}_{j}^{(n)}, {\eta}_{j}^{(m)}\} = \min \{{\eta}_{j}^{(n)}, {\eta}_{j}^{(m)}\} +c_j^{(n, m)}$. This implies that for each fixed $j$ one has \begin{eqnarray}\label{distCauchy1} c_j^{(n, m)} < \epsilon, \end{eqnarray} where $n, m > K$. Thus, for each fixed $j$, it follows that $({\eta}_{j}^{(1)}, {\eta}_{j}^{(2)}, \ldots )$ is a Cauchy sequence in ${\mathbb R}_{0}^{+}$. Since ${\mathbb R}_{0}^{+}$ is a complete metric space, the sequence $({\eta}_{j}^{(1)}, {\eta}_{j}^{(2)}, \ldots )$ converges to an element ${\eta}_{j}$ in ${\mathbb R}_{0}^{+}$. Hence, for each $j$, we form the sequence $x$ whose coordinates are the limits ${\eta}_{j}$, i.e., $x =({\eta}_{1}, {\eta}_{2}, {\eta}_{3}, \ldots )$. We must show that $x \in {l}_{+}^{\infty}$ and $x_n \longrightarrow x$.
To show that $x$ is a bounded sequence, let us consider the number $c_j^{(n, \infty)}$ defined as follows: if ${\eta}_{j} = {\eta}_{j}^{(n)}$ then $c_j^{(n, \infty)}=0$, and if ${\eta}_{j} \neq {\eta}_{j}^{(n)}$, define $c_j^{(n, \infty)}$ be the positive real number satisfying $\max \{{\eta}_{j} , {\eta}_{j}^{(n)} \}= \min \{{\eta}_{j} , {\eta}_{j}^{(n)} \} + c_j^{(n, \infty)}$. From the inequality $(\ref{distCauchy1})$ one has
\begin{eqnarray}\label{distCauchy2} c_j^{(n, \infty)}\leq\epsilon . \end{eqnarray} Because ${\eta}_{j} \leq {\eta}_{j}^{(n)} + c_j^{(n, \infty)}$ and since ${\eta}_{j}^{(n)} \in l_{+}^{\infty}$, it follows that ${\eta}_{j}$ is a bounded sequence for every $j$. Hence, $x = ({\eta}_{1}, {\eta}_{2}, {\eta}_{3}, \ldots ) \in {l}_{+}^{\infty}$. From $(\ref{distCauchy2})$ we have $$\displaystyle\sup_{j \in {\mathbb N}} \{c_j^{(n, \infty)} \} \leq \epsilon,$$ which implies that $x_n \longrightarrow x$. Therefore, $l_{+}^{\infty}$ is complete. \end{proof}
Although $l_{+}^{\infty}$ is a complete metric space, it is not separable.
\begin{theorem}\label{lnotsep} The space ${l}_{+}^{\infty}$ with the metric $d(x, y) = \displaystyle\sup_{i \in {\mathbb N}} \{c_i \}$ is not separable. \end{theorem} \begin{proof} The proof is the same as shown in \cite[1.3-9]{Kreyszig:1978}, so it is omitted. \end{proof}
Let us define the space analogous to the space $l^p$.
\begin{definition}\label{deflp} Let $p \geq 1$ be a fixed real number. The set ${l}_{+}^{p}$ consists of all sequences $x =({\eta}_{1}, {\eta}_{2}, {\eta}_{3}, \ldots )$ of nonnegative real numbers such that $\displaystyle\sum_{i=1}^{\infty} ({\eta}_{i})^{p} < \infty$, whose metric is defined by $ d(x, y)={\left[\displaystyle\sum_{i=1}^{\infty} {[c_{i}]}^{p}\right]}^{1/p}$, where $y =({\mu}_{1}, {\mu}_{2}, {\mu}_{3}, \ldots )$ and $c_i$ is defined as follows: $c_i = 0$ if ${\mu}_i = {\eta}_i $, and if ${\mu}_i > {\eta}_i$ (respect. ${\eta}_i > {\mu}_i$) then $c_i > 0$ is such that ${\mu}_i = {\eta}_i + c_i$. \end{definition}
\begin{theorem}\label{lp+complete} The space ${l}_{+}^{p}$ with the metric $ d(x,y)= {\left[\displaystyle\sum_{i=1}^{\infty} {[c_{i}]}^{p}\right]}^{1/p}$ exhibited above is complete. \end{theorem} \begin{proof} Recall that given two sequences $({\mu}_i)$ and $({\eta}_i )$ in ${l}_{+}^{p}$ the Minkowski inequality for sums reads as \begin{eqnarray*}
{\left[\displaystyle\sum_{i=1}^{\infty} {|{\mu}_i +
{\eta}_i |}^{p}\right]}^{1/p} \leq {\left[\displaystyle
\sum_{j=1}^{\infty} {|{\mu}_j|}^{p}\right]}^{1/p} + {\left[\displaystyle
\sum_{k=1}^{\infty} {|{\eta}_k|}^{p}\right]}^{1/p}. \end{eqnarray*} Applying the Minkowski inequality as per \cite[1.5-4]{Kreyszig:1978} with some adaptations, it follows that $d(x,y)$ is, in fact, a metric. In order to prove the completeness of ${l}_{+}^{p}$, we proceed similarly as in the proof of Theorem~\ref{lcomplete} with some adaptations. The main adaptation is performed according to the proof of completeness of $l^p$ in \cite[1.5-4]{Kreyszig:1978} replacing the last equality $x=x_m +( x - x_m) \in l^p$ (after Eq.~(5)) by two equalities in order to avoid negative real numbers. \begin{enumerate} \item [ $\operatorname{(1)}$] If the $i$-th coordinate $x^{(i)}- x_{m}^{(i)}$ of the sequence $x- x_m$ is positive, then define $c_{m}^{(i)} = x^{(i)}- x_{m}^{(i)}$ and write $x^{(i)} = x_{m}^{(i)} + c_{m}^{(i)}$. From Minkowski inequality, it follows that the sequence $(x^{(i)})_i$ is in $l_{+}^{p}$. \item [ $\operatorname{(2)}$] If $x^{(j)}- x_{m}^{(j)}$ is negative, then define $c_{m}^{(j)}= x_{m}^{(j)} - x^{(j)}$ and write $x_{m}^{(j)}= x^{(j)} + c_{m}^{(j)} $. Since $x_m \in l_{+}^{p}$, from the comparison criterion for positive series it follows that the sequence $(x^{(j)})_j$ is also in $l_{+}^{p}$. \end{enumerate} \end{proof}
\begin{theorem}\label{lp+separable} The space ${l}_{+}^{p}$ is separable. \end{theorem} \begin{proof} The proof follows the same line of \cite[1.3-10]{Kreyszig:1978}. \end{proof}
\begin{definition}\label{continon[a,b]} Let $I=[a, b]$ be a closed interval in ${\mathbb R}_{0}^{+}$, where $a\geq 0$ and $a < b$. Then ${\operatorname{C}}_{+}[a, b]$ is the set of all continuous nonnegative real valued functions on $I=[a, b]$, whose metric is defined by $d(f(t), g(t)) = \displaystyle\max_{t \in I} \{c(t)\}$, where $c(t)$ is given by $\max \{ f(t), g(t) \} =\min \{ f(t), g(t) \} + c(t)$. \end{definition}
\begin{theorem}\label{cont[a,b]complete} The metric space $({\operatorname{C}}_{+}[a, b], d)$, where $d$ is given in Definition~\ref{continon[a,b]}, is complete. \end{theorem} \begin{proof} The proof follows the same lines as the standard one with some modifications. Let $(f_{m})$ be a Cauchy sequence in ${\operatorname{C}}_{+}[a, b]$. Given $\epsilon > 0$ there exists a positive integer $N$ such that, for all $m, n > N$, it follows that \begin{eqnarray}\label{In1} d(f_{m} , f_{n}) = \displaystyle\max_{t \in I} \{c_{m, n} (t)\} < \epsilon, \end{eqnarray} where $\max \{ f_{m} (t) , f_{n} (t) \} = \min \{ f_{m} (t) , f_{n} (t) \} + c_{m, n}(t)$. Thus, for any fixed $t_0 \in I$ we have $c_{m, n} (t_0 ) < \epsilon$, for all $m, n > N$. This means that $(f_1 (t_0 ), f_2 (t_0 ), \ldots )$ is a Cauchy sequence in ${\mathbb R}_{0}^{+}$, which converges to $f(t_0 )$ when $m \longrightarrow \infty$ since ${\mathbb R}_{0}^{+}$ is complete. We then define a function $f: [a, b] \longrightarrow {\mathbb R}_{0}^{+}$ such that for each $t \in [a, b]$, we put $f(t)$. Taking $n \longrightarrow \infty$ in (\ref{In1}) we obtain $\displaystyle\max_{t \in I} \{c_{m} (t)\} \leq \epsilon$ for all $m > N$, where $\max \{ f_{m} (t) , f(t) \} = \min \{ f_{m} (t) , f(t) \} + c_{m}(t)$, which implies $c_{m}(t)\leq \epsilon$ for all $t \in I$. This fact means that $(f_{m}(t))$ converges to $f(t)$ uniformly on $I$, i.e., $f \in {\operatorname{C}}_{+}[a, b]$ because the functions $f_{m}$'s are continuous on $I$. Therefore, ${\operatorname{C}}_{+}[a, b]$ is complete, as desired. \end{proof}
\subsection{Interesting Semi-Vector Spaces}\label{subsec2}
In this section we exhibit semi-vector spaces over $K= {\mathbb R}_{0}^{+}$ derived from semi-metrics, semi-metric-preserving functions, semi-norms, semi-inner products and sub-linear functionals.
\begin{theorem}\label{teo1} Let $X$ be a semi-metric space and ${ \mathcal M}_{X}=\{ d: X \times X\longrightarrow {\mathbb R}; d$ $\operatorname{is \ a \ semi-metric \ on} X\}$. Then $({ \mathcal M}_{X}, +, \cdot )$ is a semi-vector space over ${\mathbb R}_{0}^{+}$, where $+$ and $\cdot$ are the addition and the scalar multiplication (in ${\mathbb R}_{0}^{+}$) pointwise, respectively. \end{theorem} \begin{proof} We first show that ${ \mathcal M}_{X}$ is closed under addition. Let $d_1 , d_2 \in { \mathcal M}_{X}$ and set $d:= d_1 + d_2$. It is clear that $d$ is nonnegative real-valued function. Moreover, for all $x, y \in X$, $d(x, y) = d(y, x)$. Let $x \in X$; $d(x, x) = d_1(x, x) + d_2 (x,x) =0$. For all $x, y, z \in X$, $d(x, z)=d_1 (x, z) + d_2 (x, z)\leq [d_1 (x, y) + d_2 (x, y)]+ [d_1 (y, z) + d_2 (y, z)]= d(x, y) + d(y, z)$.
Let us show that ${ \mathcal M}_{X}$ is closed under scalar multiplication. Let $d_1 \in { \mathcal M}_{X}$ and define $d = \lambda d_1$, where $\lambda \in {\mathbb R}_{0}^{+}$. It is clear that $d$ is real-valued nonnegative and for all $x, y \in X$, $d(x, y)=d(y, x)$. Moreover, if $x \in X$, $d(x, x)=0$. For all $x, y, z \in X$, $d(x, z)=\lambda d_1 (x, z)\leq \lambda [d_1 (x, y) + d_1 (y, z)]= d(x, y) + d(y, z)$. This means that ${ \mathcal M}_{X}$ is closed under scalar multiplication.
It is easy to see that $({ \mathcal M}_{X}, +, \cdot )$ satisfies the other conditions of Definition~\ref{defSVS}. \end{proof}
Let $(X, d)$ be a metric space. In~\cite{Corazza:1999}, Corazza investigated interesting functions $f:{\mathbb R}_{0}^{+}\longrightarrow {\mathbb R}_{0}^{+}$ such that the composite of $f$ with $d$, i.e., $X \times X \xrightarrow{d} {{\mathbb R}_{0}^{+}} \xrightarrow{f} {{\mathbb R}_{0}^{+}}$ also generates a metric on $X$. Let us put this concept formally.
\begin{definition}\label{metricprese} Let $f:{\mathbb R}_{0}^{+}\longrightarrow {\mathbb R}_{0}^{+}$ be a function. We say that $f$ is metric-preserving if for all metric spaces $(X, d)$, the composite $f \circ d$ is a metric. \end{definition}
To our purpose we will consider semi-metric preserving functions as follows.
\begin{definition}\label{semi-metricprese} Let $f:{\mathbb R}_{0}^{+}\longrightarrow {\mathbb R}_{0}^{+}$ be a function. We say that $f$ is semi-metric-preserving if for all semi-metric spaces $(X, d)$, the composite $f \circ d$ is a semi-metric. \end{definition}
We next show that the set of semi-metric preserving functions has a semi-vector space structure.
\begin{theorem}\label{teo1a} Let ${ \mathcal F}_{pres}=\{ f:{\mathbb R}_{0}^{+}\longrightarrow {\mathbb R}_{0}^{+}; f \operatorname{is \ semi-metric \ preserving} \}$. Then $({ \mathcal F}_{pres}, +, \cdot )$ is a semi-vector space over ${\mathbb R}_{0}^{+}$, where $+$ and $\cdot$ are the addition and the scalar multiplication (in ${\mathbb R}_{0}^{+}$) pointwise, respectively. \end{theorem} \begin{proof} We begin by showing that ${ \mathcal F}_{pres}$ is closed under addition and scalar multiplication pointwise.
Let $f, g \in { \mathcal F}_{pres}$. Given a semi-metric space $(X, d)$, we must prove that $(f + g)\circ d$ is also semi-metric preserving. We know that $[(f + g)\circ d] (x, y ) \geq 0$ for all $x, y \in X$. Let $x \in X$; then $[(f + g)\circ d ](x, x )= f(d(x, x)) + g (d(x, x)) = 0$. It is clear that $[(f + g ) \circ d](x, y)= [(f + g ) \circ d](y, x)$. Let $x, y, z \in X$. One has: $[(f + g ) \circ d](x, y)= f(d(x, y)) + g(d(x, y))\leq [f(d(x, z))+ g(d(x, z))]+ [f(d(z, y))+ g(d(z, y))]= (f + g)(d(x, z)) + (f + g)(d(z, y))= [(f + g)\circ d](x, z) + [(f + g)\circ d](z, y) $.
Here, we show that for each $f \in { \mathcal F}_{pres}$ and $ \alpha \in {\mathbb R}_{0}^{+}$, it follows that $ \alpha f \in { \mathcal F}_{pres}$. We show only the triangular inequality since the other conditions are immediate. Let us calculate: $[\alpha f \circ d](x, y)= \alpha f (d(x, y))\leq \alpha f (d(x, z)) + \alpha f (d(z, y)) = [\alpha f \circ d](x, z) + [\alpha f \circ d](z, y)$.
The null vector is the null function $0_{f}:{\mathbb R}_{0}^{+}\longrightarrow {\mathbb R}_{0}^{+}$. The other conditions are easy to verify. \end{proof}
\begin{theorem}\label{teo2} Let $V$ be a semi-normed real vector space and ${ \mathcal N}_{V}=
\{ \| \ \|: V\longrightarrow {\mathbb R}; \| \ \|$ $\operatorname{is \ a \ semi-norm \ on} V\}$. Then $({ \mathcal N}_{V}, +, \cdot )$ is a semi-vector space over ${\mathbb R}_{0}^{+}$, where $+$ and $\cdot$ are addition and scalar multiplication (in ${\mathbb R}_{0}^{+}$) pointwise, respectively. \end{theorem} \begin{proof}
From hypotheses, ${ \mathcal N}_{V}$ is non-empty. Let ${\| \ \|}_{1} ,
{\| \ \|}_{2} \in { \mathcal N}_{V}$ and set $\| \ \|:=
{\| \ \|}_{1} + {\| \ \|}_{2}$. For all $v \in V$, $\| v \|\geq 0$. If $v \in V$ and $\alpha \in {\mathbb R}$ then $\| \alpha v \|=|\alpha| \| v \|$. For every $u, v \in V$, it follows that $\| u + v \|:= {\| u + v \|}_{1} +
{\| u + v \|}_{2}\leq ({ \| u \|}_{1} + {\| u \|}_{2} ) +
({\| v \|}_{1} + {\| v \|}_{2})= \| u \| + \| v \|$. Hence, ${ \mathcal N}_{V}$ is closed under addition.
We next show that ${ \mathcal N}_{V}$ is closed under scalar multiplication. Let ${\| \ \|}_{1} \in { \mathcal N}_{V}$ and define
$\| \ \|:= \lambda {\| \ \|}_{1}$, where $\lambda \in {\mathbb R}_{0}^{+}$. For all
$v \in V$, $\| v \|\geq 0$. If $\alpha \in {\mathbb R}$ and $ v \in V$,
$ \| \alpha v \|= |\alpha |( \lambda {\| v\|}_{1})= |\alpha | \| v \|$. Let $u, v \in V$. Then $\| u + v \|\leq \lambda {\| u \|}_{1}+
\lambda {\| v \|}_{1}=\|u\| + \|v\|$. Therefore, ${ \mathcal N}_{V}$ is closed under addition and scalar multiplication over ${\mathbb R}_{0}^{+}$.
The zero vector is the null function $ \textbf{0}: V \longrightarrow {\mathbb R}$. The other conditions of Definition~\ref{defSVS} are straightforward. \end{proof}
\begin{remark}
Note that ${ \mathcal N}_{V}^{\diamond}=\{\| \ \|: V\longrightarrow {\mathbb R};
\| \ \|$ $\operatorname{is \ a \ norm \ on} V\}$ is also closed under both function addition and scalar multiplication pointwise. \end{remark}
\begin{lemma}\label{prop1} Let $T:V\longrightarrow W$ be a linear transformation. \begin{itemize}
\item [ $\operatorname{(1)}$] If $\| \ \|:W\longrightarrow {\mathbb R}$ is a semi-norm on
$W$ then $\| \ \|\circ T: V \longrightarrow {\mathbb R}$ is a semi-norm on $V$.
\item [ $\operatorname{(2)}$] If $T$ is injective linear and $\| \ \|:
W\longrightarrow {\mathbb R}$ is a norm on $W$ then $\| \ \|\circ T$ is a norm on $V$. \end{itemize} \end{lemma} \begin{proof}
We only show Item~$\operatorname{(1)}$. It is clear that $[\| \ \|\circ T](v) \geq 0$ for all $v \in V$. For all $\alpha
\in {\mathbb R}$ and $v \in V$, $[\| \ \|\circ T](\alpha v)=
| \alpha | \| T(v) \| = | \alpha | [\| \ \|\circ T](v)$. Moreover, $ \forall \ v_1 , v_2 \in V$,
$[\| \ \|\circ T](v_1 + v_2)\leq [\| \ \|\circ T](v_1 )+ [\| \ \|\circ T](v_2 )$. Therefore, $\| \ \|\circ T$ is a semi-norm on $V$. \end{proof}
\begin{theorem}\label{teo2a} Let $V$ and $W$ be two semi-normed vector spaces and $T:V\longrightarrow W$ be a linear transformation. Then
$${ \mathcal N}_{V_{T}}=\{ \| \ \| \circ T:
V\longrightarrow {\mathbb R}; \| \ \| \operatorname{is \ a \ semi-norm \ on} W\}$$ is a semi-subspace of $({ \mathcal N}_{V}, +, \cdot )$. \end{theorem}
\begin{proof}
From hypotheses, it follows that ${ \mathcal N}_{V_{T}}$ is non-empty. From Item~$\operatorname{(1)}$ of Lemma~\ref{prop1}, it follows that $\| \ \|\circ T$ is a semi-norm on $V$. Let $f, g \in { \mathcal N}_{V_{T}}$, i.e.,
$f = {\| \ \|}_1 \circ T$ and $g = {\| \ \|}_2 \circ T$, where ${\| \ \|}_1$ and ${\| \ \|}_2$
are semi-norms on $W$. Then $f + g = [ {\| \ \|}_1 + {\| \ \|}_2 ]\circ T \in { \mathcal N}_{V_{T}}$. For every nonnegative real number $\lambda$ and $f \in { \mathcal N}_{V_{T}}$,
$\lambda f = \lambda [ \| \ \|\circ T] = (\lambda \| \ \| )\circ T \in { \mathcal N}_{V_{T}}$. \end{proof}
\begin{theorem}\label{teo2b} Let ${\mathcal N}$ be the class whose members are $\{{ \mathcal N}_{V}\}$, where the ${ \mathcal N}_{V}$ are given in Theorem~\ref{teo2}. Let $\operatorname{Hom}({\mathcal N})$ be the class whose members are the sets $$\operatorname{hom}({ \mathcal N}_{V}, { \mathcal N}_{W})=\{
F_T:{ \mathcal N}_{V}\longrightarrow { \mathcal N}_{W}; F_T ( {\| \ \|}_{V})= {\| \ \|}_{V} \circ T\},$$ where $T: W \longrightarrow V$ is a linear transformation and
${\| \ \|}_{V}$ is a semi-norm on $V$. Then $({\mathcal N}, \operatorname{Hom}({\mathcal N}), Id, \circ )$ is a category. \end{theorem} \begin{proof} The sets $\operatorname{hom}({ \mathcal N}_{V}, { \mathcal N}_{W})$ are pairwise disjoint. For each ${ \mathcal N}_{V}$, there exists $Id_{({ \mathcal N}_{V})}$ given by
$Id_{({ \mathcal N}_{V})} ({\| \ \|}_{V})={\| \ \|}_{V}={\| \ \|}_{V}\circ Id_{(V)}$. It is clear that if ${F}_{T}:{ \mathcal N}_{V}\longrightarrow { \mathcal N}_{W}$ then ${F}_{T}\circ Id_{({ \mathcal N}_{V})} = {F}_{T}$ and $Id_{({ \mathcal N}_{W})}\circ {F}_{T} = {F}_{T}$.
It is easy to see that for every $T:W\longrightarrow V$ linear transformation, the map $F_{T}$ is semi-linear, i.e.,
$F_{T}({\| \ \|}_{V}^{(1)} + {\| \ \|}_{V}^{(2)})=
F_{T}({\| \ \|}_{V}^{(1)}) + F_{T}({\| \ \|}_{V}^{(2)})$ and
$F_{T}(\lambda {\| \ \|}_{V})= \lambda F_{T}({\| \ \|}_{V})$, for every ${\| \ \|}_{V}, {\| \ \|}_{V}^{(1)}, {\| \ \|}_{V}^{(2)} \in { \mathcal N}_{V}$ and $\lambda \in {\mathbb R}_{0}^{+}$.
Let ${ \mathcal N}_{U}, { \mathcal N}_{V}, { \mathcal N}_{W}, { \mathcal N}_{X} \in {\mathcal N}$ and $F_{T_1} \in \operatorname{hom}({ \mathcal N}_{U}, { \mathcal N}_{V})$, $F_{T_2} \in \operatorname{hom}({ \mathcal N}_{V}, { \mathcal N}_{W})$, $F_{T_3} \in \operatorname{hom}({ \mathcal N}_{W}, { \mathcal N}_{X})$, i.e., $${ \mathcal N}_{U}\xrightarrow{F_{T_1}} { \mathcal N}_{V}\xrightarrow{F_{T_2}} { \mathcal N}_{W} \xrightarrow{F_{T_3}} { \mathcal N}_{X}.$$ The linear transformations are of the forms $$X\xrightarrow{T_3} W\xrightarrow{T_2} V \xrightarrow{T_1} U
\xrightarrow{{\| \ \|}_{U}} {\mathbb R}.$$ The associativity $(F_{T_3}\circ F_{T_2})\circ F_{T_1}=F_{T_3}\circ (F_{T_2}\circ F_{T_1})$ follows from the associativity of composition of maps. Moreover, the map $F_{T_3}\circ F_{T_2}\circ F_{T_1} \in \operatorname{Hom}({\mathcal N})$ because
$F_{T_3}\circ F_{T_2}\circ F_{T_1} = ({\| \ \|}_{U})\circ (T_1\circ T_2\circ T_3)$ and $T_1\circ T_2\circ T_3$ is a linear transformation. Therefore, $({\mathcal N}, \operatorname{Hom}({\mathcal N}), Id, \circ )$ is a category, as required. \end{proof}
\begin{theorem}\label{teo3} Let $V$ be a real vector space endowed with a semi-inner product and let ${ \mathcal P}_{V}=\{ \langle \ , \ \rangle: V\times V\longrightarrow {\mathbb R}; \langle \ , \ \rangle$ $\operatorname{is \ a \ semi-inner \ product \ on} V\}$. Then $({ \mathcal P}_{V}, +, \cdot )$ is a semi-vector space over ${\mathbb R}_{0}^{+}$, where $+$ and $\cdot$ are addition and scalar multiplication (in ${\mathbb R}_{0}^{+}$) pointwise, respectively. \end{theorem} \begin{proof} The proof is analogous to that of Theorems~\ref{teo1}~and~\ref{teo2}. \end{proof}
\begin{proposition}\label{prop2} Let $V, W$ be two vector spaces and $T_1 , T_2:V\longrightarrow W$ be two linear transformations. Let us consider the map $T_1 \times T_2 : V \times V \longrightarrow W\times W$ given by $T_1 \times T_2 (u, v) = (T_1(u), T_2 (v))$. If $\langle \ , \ \rangle$ is a semi-inner product on $W$ then $\langle \ , \ \rangle \circ T_1 \times T_2$ is a semi-inner product on $V$. \end{proposition} \begin{proof} The proof is immediate, so it is omitted. \end{proof}
Let $V$ be a real vector space. Recall that a sub-linear functional on $V$ is a functional $t: V\longrightarrow {\mathbb R}$ which is sub-additive: $\forall \ u, v \in V$, $t(u + v)\leq t(u) + t(v)$; and positive-homogeneous: $\forall \ \alpha \in {\mathbb R}_{0}^{+}$ and $\forall \ v \in V$, $t(\alpha v ) =\alpha t(v)$.
\begin{theorem}\label{teo4} Let $V$ be a real vector space. Let us consider ${ \mathcal S}_{V}= \{ S: V\longrightarrow {\mathbb R};$ $S \operatorname{is} \operatorname{sub-linear} \operatorname{on} V\}$. Then $({ \mathcal S}_{V}, +, \cdot )$ is a semi-vector space on ${\mathbb R}_{0}^{+}$, where $+$ and $\cdot$ are addition and scalar multiplication (in ${\mathbb R}_{0}^{+}$) pointwise, respectively. \end{theorem} \begin{proof} The proof follows the same line of that of Theorems~\ref{teo1}~and~\ref{teo2}~and~\ref{teo3}. \end{proof}
\subsection{Semi-Algebras}\label{subsec4}
We start this section by recalling the definition of semi-algebra and semi-sub-algebra. For more details the reader can consult \cite{Gahler:1999}. In \cite{Olivier:1995}, Olivier and Serrato investigated relation semi-algebras, i.e., a semi-algebra being both a Boolean algebra and an involutive semi-monoid, satisfying some conditions (see page 2 in Ref.~\cite{Olivier:1995} for more details). Roy \cite{Roy:1970} studied the semi-algebras of continuous and monotone functions on compact ordered spaces.
\begin{definition}\label{semialgebra} A semi-algebra $A$ over a semi-field $K$ (or a $K$-semi-algebra) is a semi-vector space $A$ over $K$ endowed with a binary operation called multiplication of semi-vectors $\bullet: A \times A\longrightarrow A$ such that, $\forall \ u, v, w \in A$ and $\lambda \in K$: \begin{itemize} \item [ $\operatorname{(1a)}$] $ u \bullet (v + w)= (u \bullet v) + (u \bullet w)$ (left-distributivity); \item [ $\operatorname{(1b)}$] $ (u + v)\bullet w= (u \bullet w) + (v \bullet w)$ (right-distributivity); \item [ $\operatorname{(2)}$] $ \lambda (u \bullet v)= (\lambda u)\bullet v = u \bullet (\lambda v)$. \end{itemize} \end{definition}
A semi-algebra $A$ is \emph{associative} if $(u\bullet v)\bullet w=u\bullet (v\bullet w)$ for all $u, v, w \in A$; $A$ is said to be \emph{commutative} (or abelian) is the multiplication is commutative, that is, $\forall \ u, v \in A$, $u\bullet v= v\bullet u$; $A$ is called a semi-algebra with identity if there exists an element $1_A \in A$ such that $\forall \ u \in A$, $1_A \bullet u = u \bullet 1_A =u$; the element $1_A $ is called identity of $A$. The identity element of a semi-algebra $A$ is unique (if exists). If $A$ is a semi-free semi-vector space then the dimension of $A$ is its dimension regarded as a semi-vector space. A semi-algebra is \emph{simple} if it is simple as a semi-vector space.
\begin{example}\label{ex5} The set ${\mathbb R}_{0}^{+}$ is a commutative semi-algebra with identity $e=1$. \end{example}
\begin{example}\label{ex6} The set of square matrices of order $n$ whose entries are in ${\mathbb R}_{0}^{+}$, equipped with the sum of matrices, multiplication of a matrix by a scalar (in ${\mathbb R}_{0}^{+}$, of course) and by multiplication of matrices is an associative and non-commutative semi-algebra with identity $e=I_{n}$ (the identity matrix of order $n$), over ${\mathbb R}_{0}^{+}$. \end{example}
\begin{example}\label{ex7} The set ${\mathcal P}_{n}[x]$ of polynomials with coefficients from ${\mathbb R}_{0}^{+}$ and degree less than or equal to $n$, equipped with the usual of polynomial sum and scalar multiplication is a semi-vector space. \end{example}
\begin{example}\label{ex8} Let $V$ be a semi-vector space over a semi-field $K$. Then the set ${\mathcal L}(V, V)=\{T:V\longrightarrow V; T \operatorname{is \ a \ semi-linear \ operator}\}$ is a semi-vector space. If we define a vector multiplication as the composite of semi-linear operators (which is also semi-linear) then we have a semi-algebra over $K$. \end{example}
\begin{definition}\label{subsemialgebra} Let $A$ be a semi-algebra over $K$. We say that a non-empty set $S \subseteq A$ is a semi-subalgebra if $S$ is closed under the operations of $A$, that is, \begin{itemize} \item [ $\operatorname{(1)}$] $\forall \ u, v \in A$, $u + v \in A$; \item [ $\operatorname{(2)}$] $\forall \ u, v \in A$, $u \bullet v \in A$; \item [ $\operatorname{(3)}$] $\forall \ \lambda \in K$ and $\forall u \in A$, $\lambda u \in A$. \end{itemize} \end{definition}
\begin{definition}\label{A-homomorphism} Let $A$ and $B$ two semi-algebras over $K$. We say that a map $T:A\longrightarrow B$ is an $K$-semi-algebra homomorphism if, $\forall \ u, v \in A$ and $\lambda \in K$, the following conditions hold: \begin{itemize} \item [ $\operatorname{(1)}$] $T(u + v) = T(u) + T(v)$; \item [ $\operatorname{(2)}$] $T(u \bullet v) = T(u) \bullet T(v)$; \item [ $\operatorname{(3)}$] $T(\lambda v ) = \lambda T(v)$. \end{itemize} \end{definition}
Definition~\ref{A-homomorphism} means that $T$ is both a semi-ring homomorphism and also semi-linear (as semi-vector space).
\begin{definition}\label{isomorphic} Let $A$ and $B$ be two $K$-semi-algebras. A $K$-semi-algebra isomorphism $T:A \longrightarrow B$ is a bijective $K$-semi-algebra homomorphism. If there exists such an isomorphism, we say that $A$ is isomorphic to $B$, written $A\cong B$. \end{definition}
The following results seems to be new, because semi-algebras over ${\mathbb R}_{0}^{+}$ are not much investigated in the literature.
\begin{proposition}\label{propalghomo} Assume that $A$ and $B$ are two $K$-semi-algebras, where $K={\mathbb R}_{0}^{+}$ and $A$ has identity $1_A$. Let $T:A \longrightarrow B$ be a $K$-semi-algebra homomorphism. Then the following properties hold: \begin{itemize} \item [ $\operatorname{(1)}$] $T(0_A)= 0_B$; \item [ $\operatorname{(2)}$] If $ u\in A$ is invertible then its inverse is unique and $(u^{-1})^{-1}= u$; \item [ $\operatorname{(3)}$] If $T$ is surjective then $T(1_A) = 1_B$, i.e., $B$ also has identity; furthermore, $T(u^{-1})= [T(u)]^{-1}$; \item [ $\operatorname{(4)}$] If $u, v \in A$ are invertible then $(u\bullet v )^{-1}= v^{-1}\bullet u^{-1}$; \item [ $\operatorname{(5)}$] the composite of $K$-semi-algebra homomorphisms is also a $K$-semi-algebra homomorphism; \item [ $\operatorname{(6)}$] if $T$ is a $K$-semi-algebra isomorphism then also is $T^{-1}:B \longrightarrow A$. \item [ $\operatorname{(7)}$] the relation $A \sim B$ if and only if $A$ is isomorphic to $B$ is an equivalence relation. \end{itemize} \end{proposition} \begin{proof} Note that Item~$\operatorname{(1)}$ holds because the additive cancelation law holds in the definition of semi-vector spaces (see Definition\ref{defSVS}). We only show Item $\operatorname{(3)}$ since the remaining items are direct. Let $v \in B$; then there exists $u \in A$ such that $T(u)=v$. It then follows that $v \bullet T(1_A )= T(u\bullet 1_A)=v$ and $T(1_A ) \bullet v = T(1_A \bullet u)=v$; which means that $T(1_A)$ is the identity of $B$, i.e., $T(1_A) = 1_B$.
We have: $T(u) \bullet T(u^{-1})= T( u \bullet u^{-1})=T(1_A)=1_B$ and $T(u^{-1}) \bullet T(u)= T( u^{-1} \bullet u)=T(1_A)=1_B$, which implies $T(u^{-1})= [T(u)]^{-1}$. \end{proof}
\begin{proposition}\label{associunitsemi} If $A$ is a $K$-semi-algebra with identity $1_A$ then $A$ can be embedded in ${\mathcal L}(A, A)$, the semi-algebra of semi-linear operators on $A$. \end{proposition} \begin{proof} For every fixed $v \in A$, define $v^{*}:A \longrightarrow A$ as $v^{*}(x) = v\bullet x$. It is easy to see that $v^{*}$ is a semi-linear operator on $A$. Define $h: A \longrightarrow {\mathcal L}(A, A)$ by $h(v)= v^{*}$. We must show that $h$ is a injective $K$-semi-algebra homomorphism where the product in ${\mathcal L}(A, A)$ is the composite of maps from $A$ into $A$. Fixing $u, v \in A$, we have: $[h(u + v)](x)= (u + v)^{*}(x)= (u + v)\bullet x = u\bullet x + v \bullet x = u^{*}(x) + v^{*}(x) = [h(u)](x) + [h(v)](x)$, hence $h(u + v)= h(u) + h(v)$. For $\lambda \in K$ and $v \in A$, it follows that $[h(\lambda v)](x) = (\lambda v)^{*}(x)= (\lambda v)x = \lambda (vx)= [\lambda h(v)](x)$, i.e., $h(\lambda v)= \lambda h(v)$. For fixed $u, v \in A$, $[h(u\bullet v)](x)= (u\bullet v)^{*}(x)= (u\bullet v)\bullet x = u\bullet (v\bullet x)=u\bullet v^{*}(x)=u^{*}(v^{*}(x))=[h(u)\circ h(v)](x)$, i.e., $h(u\bullet v)= h(u) \circ h(v)$. Assume that $h(u)=h(v)$, that is, $u^{*}=v^{*}$; hence, for every $x \in A$, $u^{*}(x) = v^{*}(x)$, i.e., $u\bullet x = v\bullet x$ . Taking in particular $x=1_A$, it follows that $u = v$, which implies that $h$ is injective. Therefore, $A$ is isomorphic to $h(A)$, where $h(A)\subseteq {\mathcal L}(A, A)$. \end{proof}
\begin{definition}\label{semi-Liesemialgebra} Let $A$ be a semi-vector space over a semi-field $K$. Then $A$ is said to be a Lie semi-algebra if $A$ is equipped with a product $[ \ , \ ]: A \times A\longrightarrow A$ such that the following conditions hold: \begin{itemize} \item [ $\operatorname{(1)}$] $[ \ , \ ]$ is semi-bilinear, i.e., fixing the first (second) variable, $[ \ , \ ]$ is semi-linear w.r.t. the second (first) one;
\item [ $\operatorname{(2)}$] $[ \ , \ ]$ is anti-symmetric, i.e., $[v , v]=0$ $\forall \ v \in A$;
\item [ $\operatorname{(3)}$] $[ \ , \ ]$ satisfies the Jacobi identity: $\forall \ u, v, w \in A$, $[u, [v,w]]+ [w, [u,v]]+ [v, [w, u]]=0$ \end{itemize} \end{definition}
From Definition~\ref{semi-Liesemialgebra} we can see that a Lie semi-algebra can be non-associa-tive, i.e., the product $[ \ , \ ]$ is not always associative.
Let us now consider the semi-algebra ${ \mathcal M}_n ({\mathbb R}_{0}^{+})$ of matrices of order $n$ with entries in ${\mathbb R}_{0}^{+}$ (see Example~\ref{ex6}). We know that ${ \mathcal M}_n ({\mathbb R}_{0}^{+})$ is simple, i.e., with exception of the zero matrix (zero vector), no matrix has (additive) symmetric. Therefore, the product of such matrices can be nonzero. However, in the case of a Lie semi-algebra $A$, if $A$ is simple then the unique product $[ \ , \ ]$ that can be defined over $A$ is the zero product, as it is shown in the next result.
\begin{proposition}\label{semi-Lieabelian} If $A$ is a simple Lie semi-algebra over a semi-field $K$ then the semi-algebra is abelian, i.e., $[u, v]=0$ for all $u, v \in A$. \end{proposition} \begin{proof} Assume that $u, v \in A$ and $[u, v ] \neq 0$. From Items~$\operatorname{(1)}$~and~$\operatorname{(2)}$ of Definition~\ref{semi-Liesemialgebra}, it follows that $[u+v , u+v ] =[u, u] + [u, v] + [v, u] + [v, v]=0$, i.e., $[u, v] + [v, u]=0$. This means that $[u, v]$ has symmetric $[v, u]\neq 0$, a contradiction. \end{proof}
\begin{definition}\label{subLiesemi} Let $A$ be a Lie semi-algebra over a semi-field $K$. A Lie semi-subalgebra $B \subseteq A$ is a semi-subspace of $A$ which is closed under $[u, v ]$, i.e., for all $u, v \in B$, $[u, v] \in B$. \end{definition}
\begin{corollary} All semi-subspaces of $A$ are semi-subalgebras of $A$. \end{corollary} \begin{proof} Apply Proposition~\ref{semi-Lieabelian}. \end{proof}
\section{Fuzzy Set Theory and Semi-Algebras}\label{sec3a}
The theory of semi-vector spaces and semi-algebras is a natural generalization of the corresponding theories of vector spaces and algebras. Since the scalars are in semi-fields (weak semi-fields), some standard properties does not hold in this new context. However, as we have shown in Section~\ref{sec3}, even in case of nonexistence of symmetrizable elements, several results are still true. An application of the theory of semi-vector spaces is in the investigation on Fuzzy Set Theory, which was introduced by Lotfali Askar-Zadeh \cite{Zadeh:1965}. In fact, such a theory fits in the investigation/extension of results concerning fuzzy sets and their corresponding theory. Let us see an example.
Let $L$ be a linearly ordered complete lattice with distinct smallest and largest elements $0$ and $1$. Recall that a fuzzy number is a function $x:{\mathbb R}\longrightarrow L$ on the field of real numbers satisfying the following items (see \cite[Sect. 1.1]{Gahler:1999}): $\operatorname{(1)}$ for each
$\alpha \in L_0$ the set $x_{\alpha}= \{\varphi \in {\mathbb R} |
\alpha \leq x(\varphi)\} $ is a closed interval $[x_{\alpha l} , x_{\alpha r}]$, where $L_0= \{ \alpha \in L | \alpha > 0\}$;
$\operatorname{(2)}$ $\{\varphi \in {\mathbb R} | 0 < x(\varphi)\}$ is bounded.
We denote the set ${\mathbb R}_L$ to be the set of all fuzzy numbers; ${\mathbb R}_L$ can be equipped with a partial order in the following manner: $x \leq y $ if and only if $x_{\alpha l} \leq y_{\alpha l}$ and $x_{\alpha r} \leq y_{\alpha r}$ for all $\alpha \in L_0$. In this scenario, Gahler et al. showed that the concepts of semi-algebras can be utilized to extend the concept of fuzzy numbers, according to the following proposition: \begin{proposition}\cite[Proposition 19]{Gahler:1999} The set ${\mathbb R}_L$ is an ordered commutative semi-algebra. \end{proposition} Thus, a direct utilization of the investigation of the structures of semi-vector spaces and semi-algebras is the possibility to generate new interesting results on the Fuzzy Set Theory.
Another work relating semi-vector spaces and Fuzzy Set Theory is the paper by Bedregal et al. \cite{Milfont:2021}. In order to study the aggregation functions (geometric mean, weighted average, ordered weighted averaging, among others) w.r.t. an admissible order (a total order $\preceq$ on $L_n ([0, 1])$ such that for all $x, y \in L_n ([0, 1])$, $x \ {\leq}_{n}^{p} \ y \Longrightarrow x\preceq y$), the authors worked with semi-vector spaces over a weak semi-field.
Let $L_n ([0, 1]) = \{(x_1, x_2 , \ldots , x_n ) \in {[0, 1]}^{n}
| x_1 \leq x_2 \leq \ldots \leq x_n \}$ and $U= ([0, 1], \oplus , \cdot)$ be a weak semi-field defined as follows: for all $x, y \in [0, 1]$, $x \oplus y = \min\{ 1, x+y\}$ and $\cdot$ is the usual multiplication. The product order proposed by Shang et al.~\cite{Shang:2010} is given as follows: for all $x= \{(x_1, x_2 , \ldots , x_n )$ and $y= \{(y_1, y_2 , \ldots , y_n )$ vectors in $L_n ([0, 1])$, define $x \ {\leq}_{n}^{p} \ y \Longleftrightarrow {\pi}_{i}(x)\leq {\pi}_{i}(x) $ for each $i \in \{1, 2, \ldots , n\}$, where ${\pi}_i : L_n ([0, 1]) \longrightarrow [0, 1] $ is the $i$-th projection ${\pi}_i (x_1 , x_2 , \ldots , x_n ) = x_i$. With these concepts in mind, the authors showed two important results:
\begin{theorem}(see \cite[Theorem 1]{Milfont:2021})\label{mil21} ${\mathcal L}_{n} ([0, 1]) = (L_n ([0, 1], \dotplus, \odot)$ is a semi-vector space over $U$, where $r \odot v = (rx_1 , \ldots , rx_n )$ and $u \dotplus v = (x_1 \oplus y_1 , \ldots , x_n \oplus y_n ) $. Moreover, $({\mathcal L}_{n} ([0, 1]), {\leq}_{n}^{p})$ is an ordered semi-vector space over $U$, where ${\leq}_{n}^{p}$ is the product order. \end{theorem}
\begin{proposition}(see \cite[Propostion 2]{Milfont:2021}) For any bijection $f: \{1, 2 , \ldots , n\} \longrightarrow \{1, 2 , \ldots , n\}$, the pair\\ $({\mathcal L}_{n}([0, 1]), {\preceq}_f)$ is an ordered semi-vector space over $U$, where ${\preceq}_f$, defined in \cite[Example 1]{Milfont:2021}, is an admissible order. \end{proposition} As a consequence of the investigation made, the authors propose an algorithm to perform a multi-criteria and multi-expert decision making method.
Summarizing the ideas: the better the theory of semi-vector spaces is extended and developed, the more applications and more results we will have in the Fuzzy Set Theory. Therefore, it is important to understand deeply which are the algebraic and geometry structures of semi-vector spaces, providing, in this way, support for the development of the own theory as well as other interesting theories as, for example, the Fuzzy Set Theory.
\section{Summary}\label{sec4}
In this paper we have extended the theory of semi-vector spaces, where the semi-field of scalars considered here is the nonnegative real numbers. We have proved several results in the context of semi-vector spaces and semi-linear transformations. We introduced the concept of eigenvalues and eigenvectors of a semi-linear operator and of a matrix and shown how to compute it in specific cases. Topological properties of semi-vector spaces such as completeness and separability were also investigated. We have exhibited interesting new families of semi-vector spaces derived from semi-metric, semi-norm, semi-inner product, among others. Additionally, some results concerning semi-algebras were presented. The results presented in this paper can be possibly utilized in the development and/or investigation of new properties of fuzzy systems and also in the study of correlated areas of research. \section*{Acknowledgment}
\small
\end{document}
|
arXiv
|
{
"id": "2111.11206.tex",
"language_detection_score": 0.6670609712600708,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Proposed experiment to test fundamentally binary theories}
\author{Matthias~Kleinmann} \email{[email protected]} \affiliation{Department of Theoretical Physics, University of the Basque Country UPV/EHU, P.O.~Box 644, E-48080 Bilbao, Spain}
\author{Tamás~Vértesi} \email{[email protected]} \affiliation{Institute for Nuclear Research, Hungarian Academy of Sciences, H-4001 Debrecen, P.O.~Box 51, Hungary}
\author{Adán~Cabello} \email{[email protected]} \affiliation{Departamento de Física Aplicada II, Universidad de Sevilla, E-41012 Sevilla, Spain}
\begin{abstract} Fundamentally binary theories are nonsignaling theories in which measurements
of many outcomes are constructed by selecting from binary measurements. They constitute a sensible alternative to quantum theory and have never been
directly falsified by any experiment. Here we show that fundamentally binary theories are experimentally testable
with current technology. For that, we identify a feasible Bell-type experiment on pairs of entangled
qutrits. In addition, we prove that, for any $n$, quantum $n$-ary correlations are not
fundamentally $(n-1)$-ary. For that, we introduce a family of inequalities that hold for fundamentally
$(n-1)$-ary theories but are violated by quantum $n$-ary correlations. \end{abstract}
\maketitle
\section{Introduction}
Quantum theory (QT) is the most successful theory physicists have ever devised. Still, there is no agreement on which physical reasons force its formalism
\cite{FS16}. It is therefore important to test ``close-to-quantum'' alternatives, defined as
those which are similar to QT in the sense that they have entangled states,
incompatible measurements, violation of Bell inequalities, and no experiment
has falsified them, and sensible in the sense that they are in some aspects
simpler than QT. Examples of these alternatives are theories allowing for almost quantum
correlations \cite{NGHA15}, theories in which measurements are fundamentally
binary \cite{KC16}, and theories allowing for a higher degree of
incompatibility between binary measurements \cite{BHSS13}.
Each of these alternatives identifies a particular feature of QT that we do not
fully understand and, as a matter of fact, may or may not be satisfied by
nature. For example, we still do not know which principle singles out the set of
correlations in QT \cite{Cabello15}. In contrast, the set of almost quantum correlations satisfies a list of
reasonable principles and is simple to characterize \cite{NGHA15}. Similarly, we do not know why in QT there are measurements that cannot be
constructed by selecting from binary measurements \cite{KC16}. However, constructing the set of measurements of the theory would be simpler if
this would not be the case. Finally, we do not know why the degree of incompatibility of binary
measurements in QT is bounded as it is, while there are theories that are not
submitted to such a limitation \cite{BHSS13}.
Unfortunately, we do not yet have satisfactory answers to these questions. Therefore, it is important to test whether nature behaves as predicted by QT
also in these particular aspects. However, this is not an easy task. Testing almost quantum theories is difficult because we still do not have a
well-defined theory; thus, there is not a clear indication on how we should
aim our experiments. Another reason, shared by theories with larger binary incompatibility, is that
the only way to test them is by proving that QT is wrong, which is, arguably,
very unlikely. The case of fundamentally binary theories is different. We have explicit theories \cite{KC16} and we know that fundamentally binary
theories predict supraquantum correlations for some experiments but subquantum
correlations for others. That is, if QT is correct, there are experiments that can falsify fundamentally
binary theories \cite{KC16}. The problem is that all known cases of subquantum correlations require
visibilities that escape the scope of current experiments.
This is particularly unfortunate now that, after years of efforts, we have
loophole-free Bell inequality tests \cite{HBD15,GVW15,SMC15,HKB16,W16}, tests
touching the limits of QT \cite{PJC15,CLBGK15}, and increasingly sophisticated
experiments using high-dimensional two-photon entanglement
\cite{VWZ02,GJVWZ06,DLBPA11}. Therefore, a fundamental challenge is to identify a feasible experiment
questioning QT beyond the local realistic theories \cite{Bell64}.
The main aim of this work is to present a feasible experiment capable of
excluding fundamentally binary theories. In addition, the techniques employed to identify that singular experiment will
allow us to answer a question raised in Ref.~\cite{KC16}, namely, whether or
not, for some $n$, quantum $n$-ary correlations are fundamentally $(n-1)$-ary.
\subsection{Device-independent scenario}
Consider a bipartite scenario where two observers, Alice and Bob, perform
independent measurements on a joint physical system. For a fixed choice of measurements $x$ for Alice and $y$ for Bob, $P(a,b|x,y)$
denotes the joint probability of Alice obtaining outcome $a$ and Bob obtaining
outcome $b$. We assume that both parties act independently in the sense that the marginal
probability for Alice to obtain outcome $a$ does not depend on the choice of
Bob's measurement $y$, i.e., $\sum_b P(a,b|x,y)\equiv
P(a,\omitted|x,\omitted)$, and analogously $\sum_a P(a,b|x,y)\equiv
P(\omitted,b|\omitted,y)$. These are the nonsignaling conditions, which are obeyed by QT whenever both
observers act independently, in particular, if the operations of the observers
are spacelike separated. However, QT does not exhaust all possible correlations subject to these
constraints \cite{PR94}.
The strength of this scenario lies in the fact that the correlations can be
obtained without taking into account the details of the experimental
implementation and hence it is possible to make statements that are
independent of the devices used. This device-independence allows us to test nature without assuming a particular
theory---such as QT---for describing any of the properties of the measurement
setup. This way, it is also possible to make theory-independent statements and, in
particular, to analyze the structure of any probabilistic theory that obeys
the nonsignaling conditions.
\subsection{Fundamentally binary theories}
One key element of the structure of any probabilistic theory was identified in
Ref.~\cite{KC16} and concerns how the set of measurements is constructed,
depending on the number of outcomes. According to Ref.~\cite{KC16}, it is plausible to assume that a theory
describing nature has, on a fundamental level, only measurements with two
outcomes while situations where a measurement has more outcomes are achieved
by classical postprocessing of one or several two-outcome measurements. To make this a consistent construction, it is also admissible that the
classical postprocessing depends on additional classical information and, in
the bipartite scenario, this classical information might be correlated between
both parties. The total correlation attainable in such a scenario are the binary nonsignaling
correlations, which are characterized by the convex hull of all nonsignaling
correlations obeying $P(a,\omitted|x,\omitted)= 0$ for all measurements $x$
and all but two outcomes $a$, and $P(\omitted,b|\omitted,y) = 0$ for all
measurements $y$ and all but two outcomes $b$. The generalization to $n$-ary nonsignaling correlations is straightforward.
In Ref.~\cite{KC16}, it was shown that for no $n$ the set of $n$-ary nonlocal
correlations covers all the set of quantum correlations. Albeit this being a general result, the proof in Ref.~\cite{KC16} has two
drawbacks: (i) It does not provide a test which is experimentally feasible. (ii) It does not allow us to answer whether or not quantum $n$-ary correlations
are still fundamentally $(n-1)$-ary. For example, the proof in Ref.~\cite{KC16} requires {10}-outcome quantum
measurements for excluding the binary case. In this work, we address both problems and provide (i') an inequality that holds for all binary nonsignaling correlations, but can
be violated using three-level quantum systems (qutrits) with current
technology, and (ii') a family of inequalities obeyed by $(n-1)$-ary nonsignaling correlations
but violated by quantum measurements with $n$ outcomes.
\section{Results}
\subsection{Feasible experiment to test fundamentally binary theories}
We first consider the case where Alice and Bob both can choose between two
measurements, $x=0,1$ and $y=0,1$, and each measurement has three outcomes
$a,b=0,1,2$. For a set of correlations $P(a,b|x,y)$, we define
\begin{equation}
I_a=\sum_{k,x,y=0,1} (-1)^{k+x+y}P(k,k|x,y), \end{equation}
where the outcomes with $k=2$ do not explicitly appear. With the methods explained in Sec.~\ref{polymeth}, we find that, up to
relabeling of the outcomes,
\begin{equation}\label{ineqa}
I_a\le 1 \end{equation}
holds for nonsignaling correlations if and only if the correlations are
fundamentally binary. However, according to QT, the inequality in Eq.~\eqref{ineqa} is violated, and
a value of
\begin{equation}\label{qvaluea}
I_a= 2(2/3)^{3/2}\approx 1.0887 \end{equation}
can be achieved by preparing a two-qutrit system in the pure state
\begin{equation}
\ket\psi=\frac{1}{2}(\sqrt{2}\ket{00}+ \ket{11}-\ket{22}) \end{equation}
and choosing the measurements $x,y=0$ as $M_{k|0}= V\proj{k}V^\dag$, and the
measurements $x,y=1$ as $M_{k|1}= U\proj{k}U^\dag$, where, in canonical matrix
representation,
\begin{equation}
V=\frac1{\sqrt{12}}\begin{pmatrix} 2 & 2 & 2 \\
-\sqrt{3}-1 & \sqrt{3}-1 & 2 \\
\sqrt{3}-1 & -\sqrt{3}-1 & 2 \end{pmatrix}, \end{equation}
and $U=\diag(-1,1,1)V$.
Using the second level of the Navascués--Pironio--Acín (NPA) hierarchy
\cite{NPA07}, we verify that the value in Eq.~\eqref{qvaluea} is optimal
within our numerical precision of $10^{-6}$. The visibility required to observe a violation of the inequality in
Eq.~\eqref{ineqa} is $91.7\%$, since the value for the maximally mixed state
is $I_a=0$. The visibility is defined as the minimal $p$ required to obtain a violation
assuming that the prepared state is a mixture of the target state and a
completely mixed state, $\rho_{\rm prepared} = p \proj\psi + (1-p) \rho_{\rm
mixed}$.
We show in Sec.~\ref{polymeth} that the inequality in Eq.~\eqref{ineqa} holds
already if only one of the measurements of either Alice or Bob is
fundamentally binary. Therefore, the violation of the inequality in Eq.~\eqref{ineqa} allows us to
make an even stronger statement, namely, that none of the measurements used is
fundamentally binary, thus providing a device-independent certificate of the
genuinely ternary character of all measurements in the experimental setup.
The conclusion at this point is that the violation of the inequality in
Eq.~\eqref{ineqa} predicted by QT could be experimentally observable even
achieving visibilities that have been already attained in previous
Bell-inequality experiments on qutrit--qutrit systems
\cite{VWZ02,GJVWZ06,DLBPA11}. It is important to point out that, in addition, a compelling experiment
requires that the local measurements are implemented as measurements with
three outcomes rather than measurements that are effectively two-outcome
measurements. That is, there should be a detector in each of the three possible outcomes of
each party. The beauty of the inequality in Eq.~\eqref{ineqa} and the simplicity of the
required state and measurements suggest that this experiment could be carried
out in the near future.
\subsection{Quantum $n$-ary correlations are not fundamentally $(n-1)$-ary}
If our purpose is to test whether or not one particular measurement is
fundamentally binary (rather than all of them), then it is enough to consider
a simpler scenario where Alice has a two-outcome measurement $x=0$ and a
three-outcome measurement $x=1$, while Bob has three two-outcome measurements
$y=0,1,2$. We show in Sec.~\ref{polymeth} that for the combination of correlations
\begin{equation}\label{ieb}
I_b=-P(0,\omitted|0,\omitted)+\sum_{k=0,1,2}[P(0,0|0,k)-P(k,0|1,k)], \end{equation}
up to relabeling of the outcomes and Bob's measurement settings,
\begin{equation}\label{ineqb}
I_b\le 1 \end{equation}
holds for nonsignaling correlations if and only if the correlations are
fundamentally binary. According to QT, this bound can be violated with a value of
\begin{equation}\label{qvalueb}
I_b=\sqrt{16/15}\approx 1.0328, \end{equation}
by preparing the state
\begin{equation}
\ket\psi=\frac1{\sqrt{(3\zeta+1)^2+2}}(\ket{00}+\ket{11}+\ket{22}+ \zeta\ket\phi\!\ket\phi), \end{equation}
where $\zeta= -\frac13+\frac16\sqrt{10\sqrt{15}-38}\approx -0.19095$,
$\ket\phi=\ket0+\ket1+\ket2$, and choosing Alice's measurement $x=0$ as
$A_{0|0}=\openone-A_{1|0}$, $A_{1|0}=\proj{\phi}/3$, and measurement $x=1$ as
$A_{k|1}=\proj k$, for $k=0,1,2$, and Bob's measurements $y=0,1,2$ as
$B_{0|y}=\openone-B_{1|y}$ and $B_{1|k}=\proj{\eta_k}/\braket{\eta_k|\eta_k}$,
where $\ket{\eta_k}=\ket{k}+\xi\ket\phi$, for $k=0,1,2$, and $\xi =
-\frac13+\frac16\sqrt{6\sqrt{15}+22}\approx 0.78765$. [Another optimal solution is obtained by flipping the sign before the
$(\frac16\sqrt{\,})$-terms in $\xi$ and $\zeta$, yielding $\xi\approx -1.4543$
and $\zeta\approx -0.47572$.]
We use the third level of the NPA hierarchy to confirm that, within our
numerical precision of $10^{-6}$, the value in Eq.~\eqref{qvalueb} is optimal. Notice, however, that the visibility required to observe a violation of the
inequality in Eq.~\eqref{ineqb} is $96.9\%$. This contrasts with the $91.7\%$ required for the inequality in
Eq.~\eqref{ineqa} and shows how a larger number of outcomes allows us to
certify more properties with a smaller visibility.
Nevertheless, what is interesting about the inequality in Eq.~\eqref{ineqb} is
that it is a member of a family of inequalities and this family allows us to
prove that, for any $n$, quantum $n$-ary correlations are not fundamentally
$(n-1)$-ary, a problem left open in Ref.~\cite{KC16}. For that, we modify the scenario used for the inequality in Eq.~\eqref{ineqb},
so that now Alice's measurement $x=1$ has $n$ outcomes, while Bob has $n$
measurements with two outcomes. We let $I_b^{(n)}$ be as $I_b$ defined in Eq.~\eqref{ieb}, with the only
modification that in the sum, $k$ takes values from $0$ to $n-1$. Then,
\begin{equation}\label{ineqc}
I_b^{(n)}\le n-2 \end{equation}
is satisfied for all fundamentally $(n-1)$-ary correlations. The proof is given in Sec.~\ref{proof}. Clearly, the value $I_b^{(n)}=n-2$ can already be reached by choosing the fixed
local assignments where all measurements of Alice and Bob always have outcome
$a,b=0$. According to QT, it is possible to reach values of $I_b^{(n)}> (n-2)+1/(4n^3)$,
as can be found by generalizing the quantum construction from above to
$n$-dimensional quantum systems with $\xi=\sqrt2$ and $\zeta=
-1/n+1/(\sqrt2n^2)$. Thus, the $(n-1)$-ary bound is violated already by $n$-ary quantum
correlations. Note, that the maximal quantum violation is already very small for $n=4$ as the
bound from the third level of the NPA hierarchy is $I_b^{(4)}<2.00959$.
\section{Methods}
\subsection{Restricted nonsignaling polytopes}\label{polymeth}
We now detail the systematic method that allows us to obtain the inequalities
in Eqs.~\eqref{ineqa}, \eqref{ineqb}, and \eqref{ineqc}. We write $S=\bisc{a_1, a_2,\dotsc, a_n}{b_1, b_2,\dotsc, b_m}$ for the case
where Alice has $n$ measurements and the first measurement has $a_1$ outcomes,
the second $a_2$ outcomes, etc., and similarly for Bob and his $m$
measurements with $b_1$, $b_2$,\dots, outcomes. The nonsignaling correlations for such a scenario form a polytope $C(S)$. For another bipartite scenario $S'$ we consider all correlations $P'\in C(S')$
that can be obtained by local classical postprocessing from any $P\in C(S)$. The convex hull of these correlations is again a polytope and is denoted by
$C(S\rightarrow S')$.
The simplest nontrivial polytope of fundamentally binary correlations is then
$C(\bisc{2,2}{2,2}\rightarrow \bisc{3,3}{3,3})$. We construct the vertices of this polytope and compute the {468} facet
inequalities (i.e., tight inequalities for fundamentally binary correlations)
with the help of the Fourier-Motzkin elimination implemented in the software
\texttt{porta} \cite{porta}. We confirm the results by using the independent software \texttt{ppl}
\cite{ppl}. Up to relabeling of the outcomes, only the facet $I_a\le 1$ is not a face of
the set the nonsignaling correlations $C(\bisc{3,3}{3,3})$, which concludes
our construction of $I_a$. In addition, we find that
\begin{equation}\label{coneq} C(\bisc{2,3}{3,3})= C(\bisc{2,2}{2,2}\rightarrow \bisc{2,3}{3,3}), \end{equation}
and therefore the inequality in Eq.~\eqref{ineqa} holds for all nonsignaling
correlations where at least one of the measurements is fundamentally binary.
As a complementary question we consider the case where only a single
measurement has three outcomes. According to Eq.~\eqref{coneq}, the smallest scenarios where such a
verification is possible are $\bisc{2,3}{2,2,2}$ and $\bisc{2,2}{2,2,3}$. We first find that $C(\bisc{2,2}{3,3,3})= C(\bisc{2,2}{2,2,2}\rightarrow
\bisc{2,2}{3,3,3})$, i.e., even if all of Bob's measurements would be
fundamentally ternary, the correlations are always within the set of
fundamentally binary correlations. Hence, we investigate the polytope $C(\bisc{2,2}{2,2,2}\rightarrow
\bisc{2,3}{2,2,2})$ and its {126} facets. Up to symmetries, only the facet $I_b\le 1$ is not a face of
$C(\bisc{2,3}{2,2,2})$.
Our method also covers other scenarios. As an example we study the polytope $C(\bisc{2,4}{2,4}\rightarrow
\bisc{2,2,2}{2,2,2})$ with its {14052} facets. In this case, the four-outcome measurements have to be distributed to
two-outcome measurements (or the two-outcome measurement is used twice). Hence, this scenario is equivalent to the requirement that for each party at
least two of the three measurements are compatible. The polytope has, up to relabeling, {10} facets that are not a face of
$C(\bisc{2,2,2}{2,2,2})$. According to the fourth level of the NPA hierarchy, two of the facets may
intersect with the quantum correlations. While for one of them the required visibility (with respect to correlations
where all outcomes are equally probable) is at least $99.94\%$, the other
requires a visibility of at least $97.88\%$. This latter facet is $I_c\le 0$, where
\begin{multline}
I_c=-P(10|00)-P(00|01)-P(00|10)-P(00|11)\\
-P(10|12)-P(01|20)-P(01|21)+P(00|22). \end{multline}
For arbitrary nonsignaling correlations, $I_c\le 1/2$ is tight, while within
QT, $I_c< 0.0324$ must hold. We can construct a numeric solution for two qutrits which matches the bound
from the third level of the NPA hierarchy up to our numerical precision of
$10^{-6}$. The required quantum visibility then computes to $97.2\%$. The quantum optimum is reached for measurements $A_{0|k}=\proj{\alpha_k}$,
$A_{1|k}=\openone -A_{0|k}$, and $B_{0|k}=\proj{\beta_k}$, $B_{1|k}=\openone
-B_{0|k}$, where all $\ket{\alpha_k}$ and $\ket{\beta_k}$ are normalized and
$\braket{\alpha_0|\alpha_1}\approx 0.098$, $\braket{\alpha_0|\alpha_2}\approx
0.630$, $\braket{\alpha_1|\alpha_2}\approx 0.572$, and
$\braket{\beta_k|\beta_\ell}\approx 0.771$ for $k\ne \ell$. A state achieving the maximal quantum value is $\ket\psi\approx
0.67931\ket{00}+0.67605\ket{11}+0.28548\ket{22}$. Note, that $I_c\approx 0.0318$ can still be reached according to QT, when Alice
has only two incompatible measurements by choosing
$\braket{\alpha_0|\alpha_1}= 0$. Curiously, the facet $I_c\le 0$ is equal to the inequality $M_{3322}$ in
Ref.~\cite{BGS05} and a violation of it has been observed recently by using
photonic qubits \cite{CLBGK15}. However, while $M_{3322}$ is the only nontrivial facet of the polytope
investigated in Ref.~\cite{BGS05}, it is just one of several nontrivial facets
in our case.
\subsection{Proof of the inequality in Eq.~\eqref{ineqc}}\label{proof}
Here, we show that for $(n-1)$-ary nonsignaling correlations, the inequality in
Eq.~\eqref{ineqc} holds. We start by letting for some fixed index $0\le \ell < n$,
\begin{subequations} \begin{align}
F&=-\sum_b R_{0,b|0,\ell} + \sum_k [ R_{0,0|0,k}-R_{k,0|1,k} ],\\
X_{1;a|x,y}&=\sum_b(R_{a,b|x,y}-R_{a,b|x,\ell}),\\
X_{2;b|x,y}&=\sum_a(R_{a,b|x,y}-R_{a,b|0,y}), \end{align} \end{subequations}
where all $R_{a,b|x,y}$ are linearly independent vectors from a real vector
space $V$. Clearly, for any set of correlations, we can find a linear function $\phi\colon
V\rightarrow {\mathbb R}$ with $\phi(R_{a,b|x,y})= P(a,b|x,y)$. For such a function, $I_b^{(n)}= \phi(F)$ holds and $\phi(X_\tau)= 0$ are all
the nonsignaling conditions. The maximal value of $I_b^{(n)}$ for $(n-1)$-ary nonsignaling correlations is
therefore given by
\begin{equation}\label{prim}\begin{split}
\textstyle\max_{\ell'}
\max\{ \phi(F) \mid\; & \phi\colon V\rightarrow {\mathbb R} \text{, linear,}\\
&\phi(X_\tau) = 0, \text{ for all } \tau, \\
& \phi(R_{\ell',b|1,y})= 0, \text{ for all } b,y,\\
& \textstyle\sum_\upsilon \phi(R_\upsilon)= 2n, \text{ and }\\
& \phi(R_\upsilon)\ge 0, \text{ for all } \upsilon\}. \end{split}\end{equation}
Since the value of the inner maximization does not depend on the choice of
$\ell$, we can choose $\ell=\ell'$. Equation~\eqref{prim} is a linear program, and the equivalent dual to this
program can be written as
\begin{equation}\label{dual}
\max_\ell
\min_{t,\boldsymbol\xi, \boldsymbol\eta}
\set{ t | t\ge \zeta_\upsilon \text{ for all } \upsilon}, \end{equation}
where $\boldsymbol\zeta$ is the solution of
\begin{equation}
2 n F - \sum_\tau \xi_\tau X_\tau -\sum_{b,y}\eta_{b,y} R_{\ell,b|1,y}=
\sum_\upsilon \zeta_\upsilon R_\upsilon. \end{equation}
To obtain an upper bound in Eq.~\eqref{dual}, we choose $\boldsymbol\eta\equiv
2n$ and all $\xi_\tau= 0$, but
$\xi_{1;a|0,k}=4$,
$\xi_{1;k|1,k}=-2n$,
$\xi_{2;b|1,\ell}=-3n+2$, and
$\xi_{2;b|1,k}=-(-1)^bn+2$, for $k\ne \ell$.
This yields $\max_\upsilon \zeta_\upsilon= n-2$ for all $\ell$ and hence the
$(n-1)$-ary nonsignaling correlations obey $I_b^{(n)}\le n-2$.
\section{Conclusions}
There was little chance to learn new physics from the recent loophole-free
experiments of the Bell inequality \cite{HBD15,GVW15,SMC15,HKB16,W16}. Years of convincing experiments \cite{FC72,ADR82,WJSWZ98} allowed us to
anticipate the conclusions: nature cannot be explained by local realistic
theories \cite{Bell64}, there are measurements for which there is not a joint
probability distribution \cite{Fine82}, and there are states that are not a
convex combination of local states \cite{Werner89}.
Here we have shown how to use Bell-type experiments to gain insights into QT. In Ref.~\cite{KC16}, it was shown that QT predicts correlations that cannot be
explained by nonsignaling correlations produced by fundamentally binary
measurements (including Popescu--Rohrlich boxes \cite{PR94}). We proposed a feasible experiment which will allow us to either exclude all
fundamentally binary probabilistic theories or to falsify QT. If the results of the experiment violate the inequality in Eq.~\eqref{ineqa},
as predicted by QT, then we would learn that no fundamentally binary theory
can possibly describe nature. In addition, it would prove that all involved measurements are genuine
three-outcome measurements. If the inequality in Eq.~\eqref{ineqa} is not violated despite visibilities
would \emph{a priori} lead to such a violation, then we would have evidence
that QT is wrong at a fundamental level (although being subtle to detect in
experiments). We have also gone beyond Ref.~\cite{KC16} by showing that, for any $n$, already
$n$-ary quantum correlations are not fundamentally $(n-1)$-ary.
\begin{acknowledgments} This work is supported by Project No.~FIS2014-60843-P, ``Advanced Quantum Information'' (MINECO, Spain), with FEDER funds, the FQXi Large Grant ``The Observer Observed: A Bayesian Route to the Reconstruction of Quantum Theory'', the project ``Photonic Quantum Information'' (Knut and Alice Wallenberg Foundation, Sweden), the Hungarian National Research Fund OTKA (Grants No.~K111734 and No.~KH125096), the EU (ERC Starting Grant GEDENTQOPT), and the DFG (Forschungsstipendium KL~2726/2-1). \end{acknowledgments}
\end{document}
|
arXiv
|
{
"id": "1611.05761.tex",
"language_detection_score": 0.8543757796287537,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Quantitative estimates for simple zeros of $L$-functions} \author{Andrew R. Booker} \address{School of Mathematics, University of Bristol, Bristol, BS8 1TW, UK} \email{[email protected]} \author{Micah B. Milinovich} \address{Department of Mathematics, University of Mississippi, University, MS 38677 USA} \email{[email protected]} \author{Nathan Ng} \address{Department of Mathematics and Computer Science, University of Lethbridge, Lethbridge, AB Canada T1K 3M4} \email{[email protected]}
\thanks{Research of the first author was supported by EPSRC Grant \texttt{EP/K034383/1}. Research of the second author was supported by the NSA Young Investigator Grants \texttt{H98230-15-1-0231} and \texttt{H98230-16-1-0311}. Research of the third author was supported by NSERC Discovery Grant (RGPIN- 2015-05972). No data were created in the course of this study.}
\begin{abstract} We generalize a method of Conrey and Ghosh \cite{CG88} to prove quantitative estimates for simple zeros of modular form $L$-functions of arbitrary conductor. \end{abstract}
\subjclass[2010]{Primary 11F66, 11F11, 11M41}
\maketitle
\section{Introduction} Let $f\in S_k(\Gamma_1(N))$ be a classical holomorphic modular form of weight $k$ and level $N$. Assume that $f$ is \emph{primitive}, meaning that it is a normalized Hecke eigenform in the new subspace. Then it has a Fourier expansion of the shape $$ f(z)=\sum_{n=1}^\infty\lambda_f(n)n^{\frac{k-1}2}e^{2\pi inz}, $$
where the $\lambda_f(n)$ are multiplicative and satisfy the Ramanujan bound $|\lambda_f(n)|\le d(n)$. Let $\Lambda_f(s)=\Gamma_\mathbb{C}(s+\tfrac{k-1}2)L_f(s)$ denote the complete $L$-function of $f$, with analytic normalization, where $$ \Gamma_\mathbb{C}(s)=2(2\pi)^{-s}\Gamma(s) \quad\text{and}\quad L_f(s)=\sum_{n=1}^\infty\frac{\lambda_f(n)}{n^s}, $$ and let $$ N^s_f(T)=\#\bigl\{\rho\in\mathbb{C}:\Lambda_f(\rho)=0, \Lambda_f'(\rho)\ne0,
|\Im(\rho)|\le T\bigr\} $$ be the number of simple zeros of $\Lambda_f(s)$ with imaginary part in $[-T,T]$.
In \cite{MN14}, the second and third authors showed that if $\Lambda_f(s)$ satisfies the Generalized Riemann Hypothesis, then $$ N^s_f(T)\ge T(\log{T})^{-\varepsilon} $$ for any fixed $\varepsilon>0$ and all sufficiently large $T>0$. Unconditionally, when $N=1$ and $k=12$, Conrey and Ghosh \cite{CG88} showed that \begin{equation}\label{eq:cgestimate} \forall\varepsilon>0, \exists T\ge\varepsilon^{-1}\text{ such that } N^s_f(T)\ge T^{\frac16-\varepsilon}. \end{equation} Moreover, their proof works more generally for $N=1$ and arbitrary $k$, provided that $N^s_f(T)$ is not identically $0$. In light of the first author's result \cite{Boo16} that $N^s_f(T)\to\infty$ as $T\to\infty$, \eqref{eq:cgestimate} holds for all primitive $f$ of conductor $1$.
In this paper we aim to prove similar unconditional quantitative estimates of simple zeros for primitive forms of arbitrary conductor $N$. However, we encounter some obstacles that are reminiscent of the well-known difficulty of extending Hecke's converse theorem to arbitrary conductor, and are not present for $N=1$. Taking inspiration from Weil's generalization \cite{Wei67} of Hecke's converse theorem, we consider character twists. For a Dirichlet character $\chi\pmod*{q}$, let $f\otimes\chi$ denote the unique primitive form such that $\lambda_{f\otimes\chi}(n)=\lambda_f(n)\chi(n)$ for all $n$ coprime to $q$. \begin{theorem}\label{thm:twist} Let $f\in S_k(\Gamma_1(N))$ be a primitive form. Then there is a Dirichlet character $\chi$ such that \eqref{eq:cgestimate} holds with $f\otimes\chi$ in place of $f$. \end{theorem}
Next, for odd conductors we obtain a weaker but unconditional quantitative estimate for $N^s_f(T)$, without the twist. Moreover, we show that there is a sort of ``Deuring--Heilbronn phenomenon'' at play, so that if $N^s_f(T)$ is unexpectedly small then we can substantially improve our result for $N^s_{f\otimes\chi}(T)$. \begin{theorem}\label{thm:oddN} Let $f\in S_k(\Gamma_1(N))$ be a primitive form of odd conductor. Then $$ \forall\varepsilon>0, \exists T\ge\varepsilon^{-1}\text{ such that } N^s_f(T)\ge\begin{cases} \exp((\log T)^{\frac13-\varepsilon})&\text{if $k=1$ or $f$ is a CM form},\\ \log\log\log{T}&\text{otherwise}. \end{cases} $$ Further, if $N^s_f(T)\ll1+T^\varepsilon$ for every $\varepsilon>0$, then \begin{enumerate} \item[(i)] there is a Dirichlet character $\chi$ such that, $\forall\varepsilon>0, \exists T\ge\varepsilon^{-1}$ such that $\Lambda_{f\otimes\chi}(s)$ has at least $T^{\frac12-\varepsilon}$ simple zeros with real part $\frac12$ and imaginary part in $[-T,T]$; \item[(ii)] $\Lambda_f(s)$ has simple zeros with real part arbitrarily close to $1$. \end{enumerate} \end{theorem}
\begin{remarks}\ \begin{enumerate} \item The exponent $\frac16$ in \eqref{eq:cgestimate} is related to the best known subconvexity estimate for modular form $L$-functions in the $t$ aspect; it can be replaced by any $\delta>0$ such that
$L_f(\frac12+it)\ll_{f,\varepsilon}(1+|t|)^{\frac12-\delta+\varepsilon}$ holds for all primitive forms $f$ and all $\varepsilon>0$. In \cite{BMN19} we showed that $\delta=\frac16$ is admissible. Very recent work of Munshi \cite{Mun18} improves this to $\delta=\frac16+\frac1{1200}$ for forms of level $1$, with a corresponding improvement to \eqref{eq:cgestimate} in that case. \item In Theorem~\ref{thm:twist}, one can take the conductor of $\chi$ to be $1$ or a prime number bounded by a polynomial function of $N$. \item The proof of Theorem~\ref{thm:oddN} makes use of the idea originating with Conrey and Ghosh \cite{CG88} of twisting the coefficients of $L_f(s)$ by $(-1)^n$ to prevent the main terms of our estimate from cancelling out. This relies implicitly on the fact that there is no primitive Dirichlet character of conductor $2$, and is the ultimate reason for our restriction to odd $N$. \item The improved estimate in Theorem~\ref{thm:oddN} in the Galois and CM cases arises from Coleman's Vinogradov-type zero-free region for Hecke $L$-functions \cite{Col90}. \end{enumerate} \end{remarks}
\section{Dirichlet series} In order to establish the existence of simple zeros it is useful to study not only $L_f(s)$, but some related Dirichlet series and their additive twists. This is one of the central ideas in \cite{CG88}. A key role is played by the series $$ D_f(s)=L_f(s)\frac{d^2}{ds^2}\log L_f(s)=\sum_{n=1}^{\infty}c_f(n)n^{-s}, $$ which has a meromorphic continuation to $\mathbb{C}$ with poles precisely at the simple zeros of $L_f(s)$ (including the trivial zeros $s=\frac{1-k}2-n$ for $n=0, 1, 2, \ldots$).
For $\alpha\in\mathbb{Q}^\times$ and $\chi$ a Dirichlet character, let $$ L_f(s,\alpha)=\sum_{n=1}^\infty\lambda_f(n)e(\alpha n)n^{-s} \quad\text{and}\quad L_f(s,\chi)=\sum_{n=1}^\infty\lambda_f(n)\chi(n)n^{-s}. $$ Likewise, define $$ D_f(s,\alpha)=\sum_{n=1}^\infty c_f(n)e(\alpha n)n^{-s} \quad\text{and}\quad D_f(s,\chi)=\sum_{n=1}^\infty c_f(n)\chi(n)n^{-s}. $$
Let $\xi$ denote the nebentypus character of $f$. Set $$ Q(N)=\{1\}\cup\{q\text{ prime}:q\nmid N\}, $$ and for each $q\in Q(N)$, define the rational functions $$ P_{f,q}(x)=\begin{cases} 1&\text{if }q=1,\\ 1-\lambda_f(q)x+\xi(q)x^2&\text{otherwise} \end{cases} $$ and $$ R_{f,q}(x)=\begin{cases} 0&\text{if }q=1,\\ \frac{q\log^2{q}}{q-1} \frac{x(\lambda_f(q)-4\xi(q)x+\lambda_f(q)\xi(q)x^2)} {P_{f,q}(x)} &\text{if }q\ne1. \end{cases} $$ These are such that, if $$ \chi_0(n)=\begin{cases} 1&\text{if }(n,q)=1,\\ 0&\text{otherwise} \end{cases} $$ denotes the trivial character mod $q$, then $$ L_f(s,\chi_0)=P_{f,q}(q^{-s})L_f(s) $$ and \begin{equation}\label{Dfchi0} D_f(s,\chi_0)=P_{f,q}(q^{-s})D_f(s)-\frac{q-1}{q}R_{f,q}(q^{-s})L_f(s). \end{equation}
For any $a\in\mathbb{Z}$ coprime to $q$, we define \begin{align*} D_{f,a,q}(s)&=D_f(s,\tfrac{a}q)-R_{f,q}(q^{-s})L_f(s) =\sum_{n=1}^{\infty}c_{f,a,q}(n)n^{-s},\\ D_{f,a,q}^*(s)&=D_{f,a,q}(s)+\psi'(s+\tfrac{k-1}2)L_f(s,\tfrac{a}{q}), \quad\text{where }\psi(s)=\frac{\Gamma'}{\Gamma}(s) \end{align*} and $$ D_{f,a,q}(s,\alpha)=\sum_{n=1}^{\infty}c_{f,a,q}(n)e(\alpha n)n^{-s} \quad\text{for }\alpha\in\mathbb{Q}^\times. $$
To each of $L_f$, $D_f$, $D_{f,a,q}$, $D_{f,a,q}^*$ and their twists, we define completed versions $\Lambda_f$, $\Delta_f$, $\Delta_{f,a,q}$,
$\Delta_{f,a,q}^*$ obtained by multiplying by $\Gamma_\mathbb{C}(s+\frac{k-1}2)$. By the Ramanujan bound $|\lambda_f(q)|\le 2$ and \cite[Proposition~3.1]{BK11}, $\Delta_f(s,a/q)-\Delta_{f,a,q}^*(s)$ is holomorphic for $\Re(s)>0$. In turn, the analytic properties of $\Delta_{f,a,q}^*(s)$ are described by the following proposition. \begin{proposition}\label{voronoi} Let $f\in S_k(\Gamma_0(N),\xi)$ be a primitive form, $q\in Q(N)$, and $a\in\mathbb{Z}$ coprime to $q$. Then $\Delta_{f,a,q}^*(s)$ is a ratio of entire functions of finite order, has at most simple poles, all of which are contained in the critical strip $\{s\in\mathbb{C}:\Re(s)\in(0,1)\}$, and satisfies the functional equation \begin{equation}\label{dstarfunceq} \Delta_{f,a,q}^*(s)=\epsilon\xi(q)(Nq^2)^{\frac12-s} \Delta_{\bar{f},-\overline{Na},q}^*(1-s), \end{equation} where $\bar{f}\in S_k(\Gamma_0(N),\overline{\xi})$ is the dual of $f$, $\epsilon\in\mathbb{C}^\times$ is the root number of $f$ and $\overline{Na}$ denotes a multiplicative inverse of $Na\pmod*{q}$. \end{proposition} \begin{proof} For $q=1$ the result follows immediately from \cite[(3.1)]{Boo16}, so we may assume that $q$ is prime. Let $\chi$ be a Dirichlet character of conductor $q$. Then the complete twisted $L$-function $\Lambda_f(s,\chi)$ satisfies the functional equation $$ \Lambda_f(s,\chi)=\epsilon\xi(q)\chi(N)\frac{\tau(\chi)^2}{q} (Nq^2)^{\frac12-s}\Lambda_{\bar{f}}(1-s,\overline{\chi}), $$ where $\epsilon\in\mathbb{C}^\times$ is the root number of $f$. Applying \cite[(3.1)]{Boo16} to $f\otimes\chi$, we thus have \begin{equation}\label{dfunceq1} \Delta_f(s,\chi) -\epsilon\xi(q)\chi(N)\frac{\tau(\chi)^2}{q} (Nq^2)^{\frac12-s}\Delta_{\bar{f}}(1-s,\overline{\chi}) =\Lambda_f(s,\chi)\bigl(\psi'(\tfrac{k+1}2-s)-\psi'(s+\tfrac{k-1}2)\bigr). \end{equation}
Next, we have $$ \Delta_f\!\left(s,\frac{a}q\right)=\Delta_f(s) -\frac{q}{q-1}\Delta_f(s,\chi_0) +\frac1{q-1}\sum_{\substack{\chi\pmod*{q}\\\chi\ne\chi_0}} \tau(\overline{\chi})\chi(a)\Delta_f(s,\chi), $$ where $\chi_0$ is the trivial character mod $q$. Combining this with \eqref{Dfchi0} we get \begin{equation}\label{Deltafaq} \begin{aligned} \Delta_{f,a,q}(s) =\left(1-\frac{q}{q-1}P_{f,q}(q^{-s})\right)\Delta_f(s) +\frac1{q-1}\sum_{\substack{\chi\;(\text{mod }q)\\\chi\ne\chi_0}} \tau(\overline{\chi})\chi(a)\Delta_f(s,\chi). \end{aligned} \end{equation} Note in particular that $\Delta_{f,a,q}(s)$ is a ratio of entire functions of finite order, and all of its poles in $\{s\in\mathbb{C}:\Re(s)>0\}$ are simple and located at simple zeros of either $\Lambda_f(s)$ or $\Lambda_f(s,\chi)$ for some $\chi\ne\chi_0$.
Note that $P_{f,q}$ satisfies the functional equation $$ 1-\frac{q}{q-1}P_{f,q}(q^{-s}) =\xi(q)q^{1-2s}\left(1-\frac{q}{q-1}P_{\bar{f},q}(q^{s-1})\right), $$ and thus, by \cite[(3.1)]{Boo16}, \begin{equation}\label{dfunceq2} \begin{aligned} \left(1-\frac{q}{q-1}P_{f,q}(q^{-s})\right)&\Delta_f(s) -\epsilon\xi(q)(Nq^2)^{\frac12-s} \left(1-\frac{q}{q-1}P_{\bar{f},q}(q^{s-1})\right) \Delta_{\bar{f}}(1-s)\\ &=\left(1-\frac{q}{q-1}P_{f,q}(q^{-s})\right) \Lambda_f(s)\bigl(\psi'(\tfrac{k+1}2-s)-\psi'(s+\tfrac{k-1}2)\bigr). \end{aligned} \end{equation} Thus, replacing $f$ by $\bar{f}$, $s$ by $1-s$, $a$ by $-\overline{Na}$ and $\chi$ by $\overline{\chi}$ in \eqref{Deltafaq}, we get \begin{align*} \Delta_{\bar{f},-\overline{Na},q}(1-s) =\left(1-\frac{q}{q-1}P_{\bar{f},q}(q^{s-1})\right)\Delta_{\bar{f}}(1-s) +\frac1{q-1}\sum_{\substack{\chi\pmod*{q}\\\chi\ne\chi_0}} \tau(\chi)\chi(-Na)\Delta_{\bar{f}}(1-s,\overline{\chi}). \end{align*} Applying the functional equations \eqref{dfunceq1} and \eqref{dfunceq2}, together with the relation $\tau(\chi)\tau(\overline{\chi})=\chi(-1)q$, we thus have \begin{align*} &\Delta_{f,a,q}(s) -\epsilon\xi(q)(Nq^2)^{\frac12-s} \Delta_{\bar{f},-\overline{Na},q}(1-s)\\ &=\Biggl[ \left(1-\frac{q}{q-1}P_{f,q}(q^{-s})\right)\Lambda_f(s) +\frac1{q-1}\sum_{\substack{\chi\pmod*{q}\\\chi\ne\chi_0}} \tau(\overline{\chi})\chi(a)\Lambda_f(s,\chi)\Biggr] \bigl(\psi'(\tfrac{k+1}2-s)-\psi'(s+\tfrac{k-1}2)\bigr)\\ &=\Lambda_f(s,\tfrac{a}{q}) \bigl(\psi'(\tfrac{k+1}2-s)-\psi'(s+\tfrac{k-1}2)\bigr). \end{align*} Applying the classical Voronoi formula \cite[p.~179, (A.10)]{KMV02} $$ \Lambda_f(s,\tfrac{a}{q}) =\epsilon\xi(q)(Nq^2)^{\frac12-s} \Lambda_{\bar{f}}\bigl(1-s,-\tfrac{\overline{Na}}q\bigr), $$ we arrive at \eqref{dstarfunceq}.
Finally, by \eqref{Deltafaq} and the nonvanishing of automorphic $L$-functions for $\Re(s)\ge1$ \cite{JS76}, $\Delta_{f,a,q}^*(s)$ is holomorphic for $\Re(s)\ge1$. This conclusion applies to $\Delta_{\bar{f},-\overline{Na},q}^*(s)$ as well, so by \eqref{dstarfunceq}, all poles of $\Delta_{f,a,q}^*(s)$ have real part in $(0,1)$. \end{proof}
Fix, for the remainder of this section, a choice of $f,a,q$ as in Proposition~\ref{voronoi}, and $\alpha\in\mathbb{Q}^\times$. We define $$
N^s_{f,a,q}(T)=\#\bigl\{\rho\in\mathbb{C}:|\Im(\rho)|\le T, \Res{s=\rho}\Delta_{f,a,q}^*(s)\ne0\bigr\} $$ and \begin{equation}\label{Sydefn} S_{f,a,q}(y,\alpha)= \sum_{\rho\in\mathbb{C}}\Res{s=\rho}\Delta_{f,a,q}^*(s)(y-i\alpha)^{-\rho-\frac{k-1}2} \quad\text{for }y\in\mathbb{R}_{>0}, \end{equation} where $(y-i\alpha)^{-\rho-\frac{k-1}2}$ is defined in terms of the principal branch of $\log(y-i\alpha)$. Our goal is to derive the following expression for the Mellin transform of $S_{f,a,q}(y,\alpha)$, up to a holomorphic function on $\{s\in\mathbb{C}:\Re(s)>0\}$: \begin{proposition}\label{prop:Mellin} Define \begin{equation}\label{eq:Hdef} H_{f,a,q,\alpha}(s)= \Delta_{f,a,q}(s,\alpha)-\epsilon\xi(q)(i\sgn\alpha)^k (Nq^2\alpha^2)^{s-\frac12}\Delta_{\bar{f},-\overline{Na},q} \!\left(s,-\frac1{Nq^2\alpha}\right) \end{equation} and $$
I_{f,a,q,\alpha}(s)=\int_0^{|\alpha|/4} S_{f,a,q}(y,\alpha)y^{s+\frac{k-1}2}\frac{dy}{y}. $$ Then $I_{f,a,q,\alpha}(s)-H_{f,a,q,\alpha}(s)$ has analytic continuation to $\Re(s)>0$. Moreover, if $$
\int_0^{|\alpha|/4}|S_{f,a,q}(y,\alpha)| y^{\sigma+\frac{k-1}2}\frac{dy}{y}<\infty $$ for some $\sigma\ge0$, then $H_{f,a,q,\alpha}(s)$ is holomorphic for $\Re(s)>\sigma$. \end{proposition} The proof will be carried out in several lemmas, and involves the following auxiliary functions defined on $\mathbb{H}=\{z\in\mathbb{C}:\Im(z)>0\}$: $$ F(z)=2\sum_{n=1}^\infty c_{f,a,q}(n)n^{\frac{k-1}{2}}e(nz), \quad\overline{F}(z)=2\sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}{2}}e(nz), $$ $$ A(z)=\frac1{2\pi i}\int_{\Re(s)=\frac{k}2}\Lambda_f(s,\tfrac{a}q) \big(\psi'(s+\tfrac{k-1}{2})+\psi'(s-\tfrac{k-1}{2})\big)(-iz)^{-s-\frac{k-1}2}\,ds, $$ and $$ B(z)=\frac1{2\pi i}\int_{\Re(s)=\frac{k}2}\Lambda_f(s,\tfrac{a}{q}) \frac{\pi^2}{\sin^2(\pi(s+\tfrac{k-1}2))}(-iz)^{-s-\frac{k-1}2}\,ds. $$
We first derive the following expression for $S_{f,a,q}$. \begin{lemma}\label{Sfaqz} For $z=\alpha+iy\in\mathbb{H}$, we have \begin{equation}\label{eq:Sfaqz} S_{f,a,q}(y,\alpha)=F(z) -\frac{\epsilon\xi(q)}{(-i\sqrt{N}qz)^k} \overline{F}\!\left(-\frac1{Nq^2z}\right)+A(z)-B(z). \end{equation} \end{lemma} \begin{proof} Let $0<\varepsilon<\frac{1}{2}$. For $z\in\mathbb{H}$ we define \[ I_R(z)=\frac1{2\pi i}\int_{\Re(s)=1+\varepsilon}\Delta_{f,a,q}(s)(-iz)^{-s-\frac{k-1}2}\,ds, \quad I_L(z)=\frac1{2\pi i}\int_{\Re(s)=-\varepsilon}\Delta_{f,a,q}(s)(-iz)^{-s-\frac{k-1}2}\,ds. \] For the remainder of the proof we let $z =\alpha+iy$.
Since $\Delta_{f,a,q}^*(s)$ is a ratio of entire functions of finite order with at most simple poles, by the calculus of residues we have \[ \Res{s=0}\Delta_{f,a,q}(s)(-iz)^{-s-\frac{k-1}2} +S_{f,a,q}(y,\alpha)=I_R(z)-I_L(z). \] Note that the residue term at $s=0$ vanishes unless $k=1$. We have \begin{align*} I_R(z)&=\frac1{2\pi i}\int_{\Re(s)=1+\varepsilon}\Gamma_\mathbb{C}(s+\tfrac{k-1}{2}) D_{f,a,q}(s)(-iz)^{-s-\frac{k-1}2}\,ds\\ &=2(-2\pi iz)^{-\frac{k-1}2}\sum_{n=1}^{\infty} c_{f,a,q}(n) \frac1{2\pi i}\int_{\Re(s)=1+\varepsilon}\Gamma(s+\tfrac{k-1}{2}) (-2\pi inz)^{-s}\,ds. \end{align*} Using the identity $$ \frac1{2\pi i}\int_{\Re(s)=1+\varepsilon}\Gamma(s+\tfrac{k-1}{2})z^{-s}\,ds= z^{\frac{k-1}{2}}e^{-z}\quad\text{for }\Re(z)>0, $$ it follows that \begin{equation}\label{IRidentity} I_R(z)=2\sum_{n=1}^\infty c_{f,a,q}(n)n^{\frac{k-1}{2}}e(nz)=F(z). \end{equation} By the functional equation, we have $$ \Delta_{f,a,q}(s)=\epsilon\xi(q)(Nq^2)^{\frac12-s} \Delta_{\bar{f},-\overline{Na},q}(1-s)+\Lambda_f(s,\tfrac{a}{q}) \big( \psi'(\tfrac{k+1}{2}-s)-\psi'(s+ \tfrac{k-1}{2}) \big), $$ so $I_L(z)=I_{L1}(z)+I_{L2}(z)$, where \begin{align}\label{IL1} I_{L1}(z)&=\frac1{2\pi i}\int_{\Re(s)=-\varepsilon} \epsilon\xi(q)(Nq^2)^{\frac{1}2-s} \Delta_{\bar{f},-\overline{Na},q}(1-s)(-iz)^{-s-\frac{k-1}2}\,ds, \\ \label{IL2} I_{L2}(z)&=\frac1{2\pi i}\int_{\Re(s)=-\varepsilon} \Lambda_f(s,\tfrac{a}{q}) \big(\psi'(\tfrac{k+1}{2}-s)-\psi'(s+\tfrac{k-1}{2})\big) (-iz)^{-s-\frac{k-1}2}\,ds. \end{align}
Making the substitution $s\mapsto 1-s$ in \eqref{IL1}, we get \begin{equation}\label{IL1identity} \begin{aligned} I_{L1}(z)&=\frac{1}{2\pi i} \int_{\Re(s)=1+\varepsilon} \epsilon\xi(q)(Nq^2)^{s-\frac{1}2} \Delta_{\bar{f},-\overline{Na},q}(s)(-iz)^{s-\frac{k+1}2}\,ds\\ &=2\epsilon\xi(q)(Nq^2)^{-\frac12}(-iz)^{-\frac{k+1}2} (2\pi)^{-\frac{k-1}2} \frac1{2\pi i}\int_{\Re(s)=1+\varepsilon} \Delta_{\bar{f},-\overline{Na},q}(s)\Big(\frac{2\pi}{-iNq^2z}\Big)^{-s}\,ds\\ &=2\epsilon\xi(q)(Nq^2)^{-\frac12}(-iz)^{-\frac{k+1}2}(2\pi)^{-\frac{k-1}2} \sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n) \Big(\frac{2\pi n}{-iNq^2z}\Big)^{\frac{k-1}{2}}e\Big(-\frac{n}{Nq^2z}\Big)\\ &=\frac{2\epsilon\xi(q)}{(-i\sqrt{N}qz)^k} \sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}{2}} e\Big(-\frac{n}{Nq^2z}\Big)\\ &=\frac{\epsilon\xi(q)}{(-i\sqrt{N}qz)^k} \overline{F}\!\left(-\frac1{Nq^2z}\right). \end{aligned} \end{equation}
Next, note that the integrand in \eqref{IL2} is holomorphic for $-\frac{k-1}{2}<\Re(s)<\frac{k+1}{2}$. Moving the contour to $\Re(s)=\frac{k}2$, we get a contribution from the pole at $s=0$ (present only when $k=1$) of $$ \Res{s=0}\Lambda_f(s,\tfrac{a}{q})\psi'(s+\tfrac{k-1}2)(-iz)^{-s-\frac{k-1}2} =-\Res{s=0}\Delta_{f,a,q}(s,\tfrac{a}{q})(-iz)^{-s-\frac{k-1}2}. $$ Thus \begin{align*} I_{L2}(z)+\Res{s=0}&\Delta_{f,a,q}(s,\tfrac{a}{q})(-iz)^{-s-\frac{k-1}2}\\ &=\frac1{2\pi i} \int_{\Re(s)=\frac{k}2} \Lambda_f(s,\tfrac{a}{q}) \big(\psi'(\tfrac{k+1}2-s)-\psi'(s+\tfrac{k-1}2)\big)(-iz)^{-s-\frac{k-1}2}\,ds. \end{align*} The reflection formula for $\Gamma$ implies that $\psi'(1-s)+\psi'(s)=\frac{\pi^2}{\sin^2(\pi s)}$, so \[ \psi'(\tfrac{k+1}{2}-s) -\psi'(s+\tfrac{k-1}{2}) =\frac{\pi^2}{\sin^2(\pi(s+\tfrac{k-1}{2}))} -\psi'(s+ \tfrac{k-1}{2})-\psi'(s-\tfrac{k-1}{2}). \] Therefore $I_{L2}(z)+\Res{s=0}\Delta_{f,a,q}(s,\tfrac{a}{q})(-iz)^{-s-\frac{k-1}2} =I_{L2B}(z)-I_{L2A}(z)$, where \begin{align*} I_{L2A}(z)&=\frac1{2\pi i}\int_{\Re(s)=\frac{k}2}\Lambda_f(s,\tfrac{a}{q}) \big(\psi'(s+\tfrac{k-1}{2})+\psi'(s-\tfrac{k-1}{2})\big)(-iz)^{-s-\frac{k-1}2}\,ds,\\ I_{L2B}(z)&=\frac1{2\pi i}\int_{\Re(s)=\frac{k}2} \Lambda_f(s,\tfrac{a}{q})\frac{\pi^2}{\sin^2(\pi(s+\tfrac{k-1}2))}(-iz)^{-s-\frac{k-1}2}\,ds. \end{align*} Hence $$ S_{f,a,q}(y,\alpha)=I_R(z)-I_{L1}(z)+I_{L2A}(z)-I_{L2B}(z). $$ By applying \eqref{IRidentity}, \eqref{IL1identity} and by setting $A(z)=I_{L2A}(z)$ and $B(z)= I_{L2B}(z)$, we establish Lemma \ref{Sfaqz}. \end{proof}
Next we evaluate
$\int_0^{|\alpha|/4}S_{f,a,q}(y,\alpha)y^{s+\frac{k-1}2}\frac{dy}{y}$, considering each term on the right-hand side of \eqref{eq:Sfaqz} in turn. \begin{lemma}\label{Flemma}
$\int_0^{|\alpha|/4}F(\alpha+iy)y^{s+\frac{k-1}2}\frac{dy}{y} -\Delta_{f,a,q}(s,\alpha)$ continues to an entire function of $s$. \end{lemma} \begin{proof} From the definition of $F$ we compute that $$ \int_0^\infty F(\alpha+iy)y^{s+\frac{k-1}2}\frac{dy}{y} =\Delta_{f,a,q}(s,\alpha). $$
Moreover, $F(\alpha+iy)$ decays exponentially as $y\to\infty$, so the contribution to the integral from $y>|\alpha|/4$ is entire. \end{proof}
\begin{lemma}\label{Fbarlemma} For any $M\in\mathbb{Z}_{\ge0}$, \begin{equation}\label{eq:fbarmellin} \begin{aligned}
&\int_0^{|\alpha|/4}\bigl(-i\sqrt{N}q(\alpha+iy)\bigr)^{-k} \overline{F}\!\left(-\frac1{Nq^2(\alpha+iy)}\right) y^{s+\frac{k-1}2}\frac{dy}{y}\\ &-(i\sgn\alpha)^k\sum_{m=0}^{M-1}(-i\alpha)^{-m} {{s+m-\frac{k+1}2}\choose{m}}(Nq^2\alpha^2)^{s-\frac12+m} \Delta_{\bar{f},-\overline{Na},q}\!\left(s+m,-\frac1{Nq^2\alpha}\right) \end{aligned} \end{equation} continues to a holomorphic function on $\{s\in\mathbb{C}:\Re(s)>1-M\}$. \end{lemma} \begin{proof}
As the proof of this lemma is very similar to that of \cite[Lemma~3.3]{Boo16}, we just provide a sketch and refer to the appropriate parts of loc.~cit.\ for the relevant details. Fix $y\in(0,|\alpha|/4]$, and set $z=\alpha+iy$, $\beta=-1/Nq^2\alpha$, and $u=y/\alpha$. It may be checked that \[
-\frac1{Nq^2z}=\beta+i|\beta u|-\frac{\beta u^2}{1+iu}. \] Therefore \begin{align*} &(-i\sqrt{N}qz)^{-k}\overline{F}\!\left(-\frac1{N q^2 z}\right)\\
&=2(-i\sqrt{N}q\alpha)^{-k}\sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}2}e(\beta n)e^{-2\pi n|\beta u|} (1+iu)^{-k}e\Big(-\frac{n\beta u^2}{1+iu}\Big). \end{align*} It was shown in \cite[p.~820]{Boo16} that \[ (1+iu)^{-k}e\Big(-\frac{n\beta u^2}{1+iu}\Big) =\sum_{m=0}^\infty(-iu)^m\sum_{j=0}^m\binom{m+k-1}{m-j}
\frac{(-2\pi n|\beta u|)^j}{j!}, \]
and for $M,K\in\mathbb{Z}_{\ge 0}$ and $|u|\le\frac14$, \[ \sum_{m=M}^\infty(-iu)^m\sum_{j=0}^m\binom{m+k-1}{m-j}
\frac{(-2\pi n|\beta u|)^j}{j!}
\ll_{\alpha,M,K}|u|^{M-K}n^{-K}e^{2\pi n|\beta u|}. \] Thus, we obtain \begin{equation}\label{Fbaridentity} \begin{aligned} &(-i\sqrt{N}qz)^{-k}\overline{F}\!\left(-\frac1{Nq^2z}\right)
=O_{M,K}\Big(y^{M-K}\sum_{n=1}^\infty|c_{\bar{f},-\overline{Na},q}(n)|n^{\frac{k-1}{2}-K}\Big) +2(-i\sqrt{N}q\alpha)^{-k}\\ &\times\sum_{m=0}^{M-1}\Big(-\frac{iy}{\alpha}\Big)^m \sum_{j=0}^m\binom{m+k-1}{m-j} \sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}2}e(\beta n) \frac1{j!}\Big(-\frac{2\pi ny}{Nq^2\alpha^2}\Big)^j e^{-\frac{2\pi ny}{Nq^2\alpha^2}}. \end{aligned} \end{equation} By the choice $K=\lfloor\frac{k-1}2\rfloor+2$, the error term converges and is $O_M(y^{M-K})$. For the other term note that \begin{align*} &2y^m\sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}2}e(\beta n) \frac1{j!}\Big(-\frac{2\pi ny}{Nq^2\alpha^2}\Big)^j e^{-\frac{2\pi ny}{Nq^2\alpha^2}}\\ &=2\frac{y^{j+m}}{j!}\frac{d^j}{dy^j} \sum_{n=1}^\infty c_{\bar{f},-\overline{Na},q}(n)n^{\frac{k-1}2}e(\beta n) e^{-\frac{2\pi ny}{Nq^2\alpha^2}}\\ &=\frac{y^{j+m}}{j!}\frac{d^j}{dy^j} \frac1{2\pi i}\int_{\Re(s)=m+2} (Nq^2\alpha^2)^{s+\frac{k-1}2}\Delta_{\bar{f},-\overline{Na},q}(s,\beta)y^{-s-\frac{k-1}2}\,ds\\ &=\frac1{2\pi i}\int_{\Re(s)=2} \binom{-s-\frac{k-1}2-m}{j} (Nq^2\alpha^2)^{s+\frac{k-1}2+m} \Delta_{\bar{f},-\overline{Na},q}(s+m,\beta)y^{-s-\frac{k-1}2}\,ds. \end{align*} Inserting this in the last term of \eqref{Fbaridentity} and using the Chu--Vandermonde identity \[ \sum_{j=0}^m\binom{m+k-1}{m-j}\binom{-s-\frac{k-1}2-m}{j}= \binom{-s+\frac{k-1}2}{m}=(-1)^m\binom{s+m-\frac{k+1}2}{m}, \] we arrive at \begin{align*} &(-i\sqrt{N}qz)^{-k}\overline{F}\!\left(-\frac1{Nq^2z}\right)\\ &=O_M(y^{M-\lfloor\frac{k+3}2\rfloor}) +(i\sgn\alpha)^k\sum_{m=0}^{M-1}\frac{(-i\alpha)^{-m}}{2\pi i}\int_{\Re(s)=2} {{s+m-\frac{k+1}2}\choose{m}} (Nq^2\alpha^2)^{s-\frac12+m}\\ &\hspace{8cm}\cdot \Delta_{\bar{f},-\overline{Na},q}\!\left(s+m,-\frac1{Nq^2\alpha}\right) y^{-s-\frac{k-1}2}\,ds. \end{align*}
We multiply both sides by $y^{s+\frac{k-1}2-1}$ and integrate over
$y\in(0,|\alpha|/4]$. The error term yields a holomorphic function for $\Re(s)>2-M$. As for the sum over $m$, by shifting the contour to the right, we see that each term decays rapidly as $y\to\infty$, so the integral over $(0,|\alpha|/4]$ differs from the full Mellin transform by an entire function. By Mellin inversion, it follows that \eqref{eq:fbarmellin} is holomorphic for $\Re(s)>2-M$. Finally, replacing $M$ by $M+1$ and discarding the final term of the sum concludes the proof of the lemma. \end{proof}
\begin{lemma}\label{Alemma}
$\Gamma_\mathbb{C}(s)^{-1}\int_0^{|\alpha|/4}A(\alpha+iy)y^s\frac{dy}y$ continues to an entire function of $s$. \end{lemma} \begin{proof} Let $\Phi(s)=\psi'(s+\tfrac{k-1}2)+\psi'(s-\tfrac{k-1}2)$. By the identity $\psi'(s)=\int_1^\infty\frac{\log x}{x-1}x^{-s}\,dx$, we have $\Phi(s)=\int_1^\infty\phi(x)x^{-s-\frac{k-1}2}\,dx$ for $\Re(s)>\frac{k-1}{2}$, where $\phi(x)=\frac{x^{k-1}+1}{x-1}\log{x}$. It follows that $$ \Phi(s)\Gamma(s+\tfrac{k-1}{2})=\int_1^\infty\phi(x)x^{-s-\frac{k-1}2}\,dx \int_0^\infty e^{-y}y^{s+\frac{k-1}2}\,dy =\int_1^\infty\phi(x)\int_0^\infty e^{-y} \Big(\frac{y}{x}\Big)^{s+\frac{k-1}{2}}\frac{dy}{y}. $$ By the variable change $y\mapsto xy$ we obtain $$ \Phi(s)\Gamma(s+\tfrac{k-1}{2})= \int_1^\infty\phi(x)\int_0^\infty e^{-xy}y^{s+\frac{k-1}{2}}\frac{dy}{y} =\int_0^\infty\Big(\int_1^\infty\phi(x)e^{-xy}\,dx\Big)y^{s+\frac{k-1}{2}}\frac{dy}{y}. $$ By Mellin inversion, \begin{equation}\label{mellininversion} \int_1^\infty\phi(x)e^{-xy}\,dx =\frac1{2\pi i}\int_{\Re(s)=2}\Phi(s)\Gamma(s+\tfrac{k-1}2)y^{-s-\frac{k-1}2}\,ds. \end{equation} Observe that $L_{\bar{f}}(s,-\frac{\overline{Na}}{q}) =\sum_{n=1}^\infty b_nn^{-s}$, where $b_n=\lambda_{\bar{f}}(n)e(-\frac{\overline{Na}}{q})$. Thus for $z\in\mathbb{H}$, \begin{align*} A(z)&=2\sum_{n=1}^\infty b_n\cdot \frac1{2\pi i}\int_{\Re(s)=2}\Phi(s) \Gamma(s+\tfrac{k-1}{2})(-2\pi inz)^{-s-\frac{k-1}2}\,ds\\ &=2\sum_{n=1}^\infty b_n\int_1^\infty\phi(x)e^{2\pi inxz}\,dx, \end{align*} where the last step follows from \eqref{mellininversion}. For $z=\alpha+iy$ this simplifies to \begin{equation}\label{eq:Aalphaiy} A(\alpha+iy)=2\sum_{n=1}^\infty b_n\int_1^\infty\phi(x) e(\alpha nx)e^{-2\pi nxy}\,dx. \end{equation} Using this expression, it follows that \begin{equation}\label{Amellintransform} \begin{aligned} &\int_0^\infty A(\alpha+iy)y^s\frac{dy}{y} =2\sum_{n=1}^\infty b_n\int_1^\infty\phi(x)e(\alpha nx) \int_0^\infty e^{-2\pi nxy}y^s\frac{dy}{y}\,dx\\ &=\Gamma_\mathbb{C}(s)\sum_{n=1}^\infty b_nn^{-s} \int_1^\infty\phi(x)e(\alpha nx)x^{-s}\,dx. \end{aligned} \end{equation}
For $j=0,1,2,\ldots$, define the sequence of functions $\phi_j(x,s)$ by \[ \phi_0(x,s)=\phi(x), \quad \phi_{j+1}(x,s)=x\frac{\partial\phi_j}{\partial x}(x,s)-(s+j)\phi_j(x,s). \] Integrating by parts, \[ \int_1^\infty\phi_j(x,s)e(\alpha nx)x^{-s-j}\,dx =-\frac{e(\alpha n)\phi_j(1,s)}{2\pi i\alpha n} -\frac1{2\pi i\alpha n}\int_1^\infty\phi_{j+1}(x,s)e(\alpha nx) x^{-s-j-1}\,dx. \] Repeated application of this yields \begin{equation}\label{intparts} \begin{aligned} \int_1^\infty\phi(x)e(\alpha nx)x^{-s}\,dx &=e(\alpha n)\sum_{j=0}^{m-1}\frac{\phi_j(1,s)}{(-2\pi i\alpha n)^{j+1}}\\ &+(-2\pi i\alpha n)^{-m}\int_1^\infty\phi_m(x,s) e(\alpha nx)x^{-s-m}\,dx \end{aligned} \end{equation} for $m\in\mathbb{Z}_{\ge0}$. By \eqref{Amellintransform} and \eqref{intparts} it follows that \begin{align*} \frac1{\Gamma_\mathbb{C}(s)} \int_0^\infty A(\alpha+iy)y^s\frac{dy}{y} &=\sum_{j=0}^{m-1}\frac{\phi_j(1,s)}{(-2\pi i\alpha)^{j+1}} L_{\bar{f}}(s+j+1,-\tfrac{\overline{Na}}{q}+\alpha)\\ &+(-2\pi i\alpha)^{-m}\sum_{n=1}^\infty\frac{b_n}{n^{s+m}} \int_1^\infty\phi_m(x,s)e(\alpha nx)x^{-s-m}\,dx. \end{align*} Each term in the sum extends to an entire function of $s$, by
\cite[Proposition~3.1]{BK11}. Furthermore, it may be checked that $\phi_m(x,s)\ll_{m,k}(1+|s|)^mx^{k-1}$. Therefore the last integral is holomorphic for $\Re(s)>k-m$. Letting $m\to\infty$ shows that $\Gamma_\mathbb{C}(s)^{-1}\int_0^\infty A(\alpha+iy)y^s\frac{dy}y$ continues to an entire function.
Finally, from \eqref{eq:Aalphaiy} we see that $A(\alpha+iy)$ decays exponentially as $y\to\infty$, and hence
$\int_{|\alpha|/4}^\infty A(\alpha+iy)y^s\frac{dy}{y}$ is entire. This completes the proof. \end{proof}
\begin{lemma}\label{Blemma}
$\Gamma_\mathbb{C}(s)^{-1}\int_0^{|\alpha|/4}B(\alpha+iy)y^s\frac{dy}y$ continues to an entire function of $s$. \end{lemma} \begin{proof} Following the proof of \cite[Lemma~3.4]{Boo16}, we obtain $$ B(\alpha+iy)=\sum_{j=0}^{M-1}P_j(\alpha)y^j+O_M(y^M)
\quad\text{for all }M\in\mathbb{Z}_{\ge0}, y\in\bigl(0,\tfrac{|\alpha|}4\bigr], $$ where $$ P_j(\alpha)=\frac{(-i\alpha)^{-j}}{2\pi i}\int_{\Re(s)=\frac{k}2}
e^{i\frac{\pi}2\sgn(\alpha)(s+\frac{k-1}2)}|\alpha|^{-s-\frac{k-1}2} \binom{-s-\frac{k-1}2}{j}\Lambda_f(s,\tfrac{a}{q}) \frac{\pi^2}{\sin^2(\pi(s+\frac{k-1}2))}\,ds. $$ Hence, $$
\int_0^{|\alpha|/4}B(\alpha+iy)y^s\frac{dy}{y}
-\sum_{j=0}^{M-1}P_j(\alpha)\frac{|\alpha/4|^{s+j}}{s+j} $$ is holomorphic for $\Re(s)>-M$. Note that the sum over $j$ is entire apart from at most simple poles at the poles of $\Gamma_\mathbb{C}(s)$. Dividing by $\Gamma_\mathbb{C}(s)$ and taking $M\to\infty$ concludes the proof. \end{proof}
\begin{proof}[Proof of Proposition~\ref{prop:Mellin}] Combining Lemmas~\ref{Sfaqz}--\ref{Blemma} and taking $M=1$, we see that $I_{f,a,q,\alpha}(s)-H_{f,a,q,\alpha}(s)$ has analytic continuation to $\Re(s)>0$. If
$\int_0^{|\alpha|/4}|S_{f,a,q}(y,\alpha)| y^{\sigma+\frac{k-1}2}\frac{dy}{y}<\infty$ for some $\sigma\ge0$, then the integral defining $I_{f,a,q,\alpha}(s)$ converges absolutely for $\Re(s)>\sigma$, and hence $I_{f,a,q,\alpha}(s)$ is holomorphic in that region. \end{proof}
\section{Estimates for $N^s_{f,a,q}(T)$} Fix $f,a,q$ as in Proposition~\ref{voronoi}, and let $\alpha\in\mathbb{Q}^\times$. In this section, we derive estimates for $N^s_{f,a,q}(T)$ based on Proposition~\ref{prop:Mellin}. \begin{lemma}\label{GLprimebound} Let $f\in S_k(\Gamma_1(N))$ be a primitive form. For $\rho=\beta+i\gamma$ a zero of $\Lambda_f(s)$, we have \begin{equation}\label{compLfncboundrho} \Lambda_f'(\rho)\ll_f
(2+|\gamma|)^{\frac{k}{2}+\frac{|\beta-\frac12|}{3}-\frac16}\log^2(2+|\gamma|)
e^{-\frac{\pi}{2}|\gamma|}. \end{equation} \end{lemma} \begin{proof} We begin by establishing, for $s=\sigma+it$ and $\sigma\in[\frac12,1]$, \begin{equation}\label{compLfncbound} \Gamma_\mathbb{C}(s+\tfrac{k-1}{2})L_f'(s) \ll \tau^{\frac{k}{2}-\frac{1-\sigma}{3}}
e^{-\frac{\pi}{2}|t|}\log^2\tau, \end{equation}
where $\tau=|t|+2$. By \cite[Theorem 1.1]{BMN19}, we have $$ L_f(\tfrac12+it)\ll\tau^{\frac13}\log\tau. $$ By the Phragm\'{e}n--Lindel\"{o}f principle, using $$ L_f\!\left(-\frac1{\log\tau}+it\right)\ll\tau\log\tau \quad\text{and}\quad L_f\!\left(1+\frac1{\log \tau}+it\right)\ll\log\tau, $$ it follows that $L_f(\sigma+it)\ll\tau^{\frac13}\log\tau$
when $|\sigma-\tfrac12|\le1/\log\tau$. An application of the Cauchy integral formula then yields $L_f'(\tfrac12+it)\ll\tau^{\frac13}\log^2\tau$. By Cauchy's inequality and Rankin's estimate
$\sum_{n\le x}|\lambda_f(n)|^2\ll x$, we get $$
|L_f'(1+\varepsilon+it)|
\le\sum_{n=1}^\infty\frac{|\lambda_f(n)|\log n}{n^{1+\varepsilon}}
\le\Big(\sum_{n=1}^\infty\frac{|\lambda_f(n)|^2}{n^{1+\varepsilon}}\Big)^{\frac12} \zeta''(1+\varepsilon)^{\frac12} \ll\varepsilon^{-\frac12}\varepsilon^{-\frac32}=\varepsilon^{-2} $$ for $\varepsilon>0$. Another application of the Phragm\'{e}n--Lindel\"{o}f principle yields $$ L_f'(\sigma+it)\ll\tau^{\frac23(1-\sigma)}\log^2\tau $$ for $\sigma\in[\frac12,1]$. This, together with the Stirling formula estimate $$ \Gamma_\mathbb{C}(s+\tfrac{k-1}{2})\ll \tau^{\sigma+\tfrac{k}{2}-1}
e^{-\frac{\pi}{2}|t|}, $$ yields \eqref{compLfncbound}.
Setting $s=\rho=\beta+i\gamma$ with $\beta\ge\frac12$ in \eqref{compLfncbound} gives \begin{equation}\label{lambdafprimerhochi} \Lambda_f'(\rho) =\Gamma_\mathbb{C}(\rho+\tfrac{k-1}{2})L_f'(\rho)
\ll(2+|\gamma|)^{\frac{k}{2}-\frac{1-\beta}{3}}
\log^2(2+|\gamma|)e^{-\frac{\pi}{2}|\gamma|}. \end{equation} Now suppose $\beta<\frac12$. Differentiating the functional equation we obtain $$ \Lambda_f'(\rho)=-\epsilon N^{\frac12-\rho}\Lambda_{\bar{f}}'(1-\rho), $$
where $|\epsilon|=1$. Applying \eqref{lambdafprimerhochi} to $\Lambda_{\bar{f}}'(1-\rho)$ it follows that \begin{equation}\label{lambdafprimerhochi2} \Lambda_f'(\rho)
\ll(2+|\gamma|)^{\frac{k}{2}-\frac{\beta}{3}}
\log^2(2+|\gamma|)e^{-\frac{\pi}{2}|\gamma|}. \end{equation} Combining \eqref{lambdafprimerhochi} and \eqref{lambdafprimerhochi2} we obtain \eqref{compLfncboundrho}. \end{proof}
\begin{lemma}\label{lem:Nathan} For any fixed $\varepsilon>0$ and all $\sigma\in[\varepsilon,2]$, $$
\int_0^{\frac{|\alpha|}{4}}|S_{f,a,q}(y,\alpha)|y^{\sigma+\frac{k-1}{2}}\frac{dy}{y}\ll \sum_{\substack{\rho=\beta+i\gamma\\\text{a pole of }\Delta_{f,a,q}^*(s)}}
(2+|\gamma|)^{\frac{1+|\beta-\frac12|}3-\sigma}\log^2(2+|\gamma|). $$ \end{lemma} \begin{proof} Throughout this proof we let $\rho=\beta+i\gamma$ denote a pole of
$\Delta_{f,a,q}^*(s)$, and we set $\tau=2+|\gamma|$. Recalling \eqref{Sydefn}, observe that $(y-i\alpha)^{-\rho-\frac{k-1}2}=e^{i\frac{\pi}{2}\sgn(\alpha)(\rho+\frac{k-1}2)}
|\alpha|^{-\rho-\frac{k-1}2}(1+\frac{iy}{\alpha})^{-\rho-\frac{k-1}2}$ and \begin{equation}\label{1plusyalpha} \begin{aligned}
\bigl|(1+i\tfrac{y}{\alpha})^{-(\beta+i\gamma+\frac{k-1}2)}\bigr|
&=\bigl|e^{-(\frac12\log(1+(y/\alpha)^2)+i\arctan(y/\alpha))(\beta+i\gamma+\frac{k-1}2)}\bigr|\\ &=\bigl(1+(\tfrac{y}{\alpha})^2\bigr)^{-\frac{\beta}{2}-\frac{k-1}4} e^{\gamma\arctan(y/\alpha)}. \end{aligned} \end{equation} Therefore \begin{equation}\label{yalphabound} (y-i\alpha)^{-\rho-\frac{k-1}2}
\ll e^{\gamma\sgn(\alpha)(\arctan(y/|\alpha|)-\frac{\pi}2)}. \end{equation} Next, we treat the residue in \eqref{Sydefn}. By \eqref{Deltafaq}, the poles of $\Delta_{f,a,q}^*(s)$ arise from poles of $\Delta_f(s)$ and $\Delta_f(s,\chi)$ with $\chi\ne\chi_0$. The contributrion of an individual term of \eqref{Deltafaq} to $\Res{s=\rho}\Delta_{f,a,q}^*(s)$, if nonzero, is of the form $$ -\Big(1-\frac{q}{q-1}P_{f,q}(q^{-\rho})\Big)\Lambda_f'(\rho) \quad\text{or}\quad -\frac{\tau(\overline{\chi})\chi(a)}{q-1}\Lambda_f'(\rho,\chi). $$ Applying Lemma~\ref{GLprimebound} (possibly replacing $f$ by $f\otimes\chi$) to each of these expressions, it follows that \begin{equation}\label{residue} \Res{s=\rho}\Delta_{f,a,q}^*(s)
\ll\tau^{\frac{k}2+\frac{|\beta-\frac12|}3-\frac16}(\log^2\tau)
e^{-\frac{\pi}{2}|\gamma|}. \end{equation} It follows from \eqref{Sydefn}, \eqref{yalphabound}, and \eqref{residue} that $$ S_{f,a,q}(y,\alpha)\ll\sum_\rho
\tau^{\frac{k}2+\frac{|\beta-\frac12|}3-\frac16}(\log^2\tau)
e^{|\gamma|[\sgn(\alpha\gamma)\arctan(y/|\alpha|)-\frac{\pi}2(1+\sgn(\alpha\gamma))]}. $$ By considering cases and using the bound $\arctan{u}\ge\frac{u}{2}$ for $0\le u\le\frac14$, we have $$ S_{f,a,q}(y,\alpha)\ll\sum_\rho
\tau^{\frac{k}2+\frac{|\beta-\frac12|}3-\frac16}(\log^2\tau)e^{-c|\gamma|y}
\quad\text{for }y\in\bigl(0,\tfrac{|\alpha|}4\bigr], $$
where $c=\frac1{2|\alpha|}>0$. We deduce from this $$
\int_0^{\frac{|\alpha|}{4}}|S_{f,a,q}(y,\alpha)| y^{\sigma+\frac{k-1}{2}}\frac{dy}{y} \ll\sum_\rho
\tau^{\frac{k}2+\frac{|\beta-\frac12|}3-\frac16}\log^2\tau
\int_0^{\frac{|\alpha|}{4}}e^{-c|\gamma|y}y^{\sigma+\frac{k-1}{2}}\frac{dy}{y}. $$ Now $$
\int_0^{\frac{|\alpha|}{4}}e^{-c|\gamma|y}y^{\sigma+\frac{k-1}{2}}\frac{dy}{y}
\ll\int_0^{\frac{|\alpha|}{4}}e^{-c\tau y}y^{\sigma+\frac{k-1}{2}}\frac{dy}{y} \le\int_0^\infty e^{-c\tau y}y^{\sigma+\frac{k-1}{2}}\frac{dy}{y}. $$ By the variable change $u=c\tau y$, the last integral equals $$ \frac1{(c\tau)^{\sigma+\frac{k-1}{2}}}\int_0^\infty e^{-u}u^{\sigma+\frac{k-1}{2}}\frac{du}{u} =\frac{\Gamma(\sigma+\frac{k-1}{2})}{(c\tau)^{\sigma+\frac{k-1}{2}}} \ll\tau^{-\sigma-\frac{k-1}2}, $$ and thus $$
\int_0^{\frac{|\alpha|}{4}}S_{f,a,q}(y,\alpha)y^{\sigma+\frac{k-1}{2}}\frac{dy}{y} \ll\sum_\rho
\tau^{\frac{1+|\beta-\frac12|}3-\sigma}\log^2\tau. $$ \end{proof}
For a meromorphic function $h$ on $\{s\in\mathbb{C}:\Re(s)>1\}$, define $$ \Theta(h)=\inf\bigl\{\theta\ge0:h\text{ continues analytically to } \{s\in\mathbb{C}:\Re(s)>\theta\}\bigr\}. $$ We also set $$ \theta_{f,a,q}(T)=\sup\bigl(\{0\}\cup\bigl\{\Re(\rho),1-\Re(\rho):
\rho\in\mathbb{C}, |\Im(\rho)|\le T, \Res{s=\rho}\Delta_{f,a,q}^*(s)\ne0\bigr\}\bigr) $$ and $$ \theta_{f,a,q}=\lim_{T\to\infty}\theta_{f,a,q}(T). $$ By Proposition~\ref{voronoi}, we have \begin{equation}\label{thetafaq} \theta_{f,a,q}=\max(\Theta(\Delta_{f,a,q}),\Theta(\Delta_{\bar{f},-\overline{Na},q})). \end{equation}
\begin{proposition}\label{NfaqTlowerbound} If $\Theta(H_{f,a,q,\alpha})>0$ then $\theta_{f,a,q}\ge\frac12$ and \begin{equation}\label{eq:omega1} N^s_{f,a,q}(T)= \Omega\bigl(T^{\frac13(1-\theta_{f,a,q})+\Theta(H_{f,a,q,\alpha}) -\frac12-\varepsilon}\bigr) \quad\text{for all }\varepsilon>0. \end{equation} Further, if $\Theta(H_{f,a,q,\alpha})=\frac12$ and $H_{f,a,q,\alpha}(s)$ has a pole with real part $\frac12$, then \begin{equation}\label{eq:omega2} N^s_{f,a,q}(T)=\Omega\!\left( \frac{T^{\frac13(1-\theta_{f,a,q}(T))}}{(1-\theta_{f,a,q}(T))\log^2{T}}\right), \end{equation} and there are arbitrarily large $T>0$ such that \begin{equation}\label{eq:omega3} N^s_{f,a,q}(T)\ge\log\log\log{T}. \end{equation} \end{proposition} \begin{proof}
Let $\beta_n+i\gamma_n$ run through the poles of $\Delta_{f,a,q}^*(s)$, in increasing order of $|\gamma_n|$. For brevity, we write $I(s)$, $H(s)$, $\Theta$, $S(y)$, $N(t)$, $\theta(t)$ and $\theta$ for $I_{f,a,q,\alpha}(s)$, $H_{f,a,q,\alpha}(s)$, $\Theta(H_{f,a,q,\alpha})$, $S_{f,a,q}(y,\alpha)$, $N^s_{f,a,q}(t)$, $\theta_{f,a,q}(t)$ and $\theta_{f,a,q}$, respectively. By Lemma~\ref{lem:Nathan}, we have \begin{align*}
\int_0^{|\alpha|/4}|S(y)|y^{\sigma+\frac{k-1}2}\frac{dy}{y}
&\ll\sum_{n\ge 1}(2+|\gamma_n|)^{\frac{1+|\beta_n-\frac12|}{3}-\sigma}
\log^2(2+|\gamma_n|)\\
&\le\sum_{n\ge 1}(2+|\gamma_n|)^{\frac{\theta(|\gamma_n|)}{3}
+\frac16-\sigma}\log^2(2+|\gamma_n|). \end{align*} If $\Theta>0$ then by Proposition~\ref{prop:Mellin}, the integral must diverge for sufficiently small $\sigma>0$, and thus the right-hand side has infinitely many terms. Thus $\Delta_{f,a,q}^*(s)$ has poles, so $\theta\ge\frac12$.
Suppose that \eqref{eq:omega1} does not hold. Then there exists $\varepsilon\in(0,\Theta)$ such that $N(t)=o(t^{\frac13(1-\theta)+\Theta-\frac12-\varepsilon})$. Choosing $\sigma=\Theta-\frac{\varepsilon}3$ and using the estimate
$\log^2(2+|\gamma_n|)\ll(2+|\gamma_n|)^{\frac{\varepsilon}{3}}$, we have \begin{align*}
\int_0^{|\alpha|/4}|S(y)|y^{\Theta-\frac{\varepsilon}3+\frac{k-1}2}\frac{dy}{y} &\ll\sum_{n\ge1}
(2+|\gamma_n|)^{\frac{\theta}{3}+\frac16-\Theta+\frac23\varepsilon} \ll1+\int_1^\infty t^{\frac{\theta}{3}+\frac16-\Theta+\frac23\varepsilon}\,dN(t)\\ &\ll1+\int_1^\infty t^{\frac{\theta}{3}+\frac16-\Theta+\frac23\varepsilon-1}N(t)\,dt \ll1+\int_1^\infty t^{-1-\frac{\varepsilon}3}\,dt \ll1. \end{align*} By Proposition~\ref{prop:Mellin}, it follows that $H(s)$ is holomorphic for $\Re(s)>\Theta-\frac{\varepsilon}3$. This is a contradiction, so \eqref{eq:omega1} must hold.
Next suppose that $\Theta=\frac12$, and let $\rho$ be a pole of $H(s)$ with $\Re(\rho)=\frac12$. Then for sufficiently small $\delta>0$, by Proposition~\ref{prop:Mellin}, we have $$
\delta^{-1}\ll|H(\rho+\delta)|\ll1+|I(\rho+\delta)|
\le1+\int_0^{|\alpha|/4}|S(y)|y^{\delta+\frac{k}2}\frac{dy}{y}, $$ where we understand the right-hand side to be $\infty$ if the integral diverges. Applying Lemma~\ref{lem:Nathan}, we thus have \begin{equation}\label{eq:gammasum} \delta^{-1}
\ll1+\sum_{n\ge 1}(2+|\gamma_n|)^{\frac{\theta(|\gamma_n|)-1}{3}-\delta}
\log^2(2+|\gamma_n|). \end{equation} In particular, the right-hand side must have infinitely many terms. Applying integration by parts, we get \begin{align*} \delta^{-1}&\ll1+\int_1^\infty t^{\frac{\theta(t)-1}{3}-\delta}\log^2{t}\,dN(t) =1-\int_1^\infty N(t)d(t^{\frac{\theta(t)-1}{3}-\delta}\log^2{t})\\ &\le1+\int_1^\infty N(t)\bigl(\tfrac{1-\theta(t)}{3}+\delta\bigr) t^{\frac{\theta(t)-1}{3}-\delta-1}\log^2{t}\,dt, \end{align*} where for the last inequality we have used the fact that $\theta(t)$ is nondecreasing and $$ d(t^{\frac{\theta(t)-1}{3}-\delta}\log^2{t}) =t^{\frac{\theta(t)-1}{3}-\delta-1}(\log{t}) \bigl[2-\bigl(\tfrac{1-\theta(t)}{3}+\delta\bigr)\log{t}\bigr]\,dt +\tfrac13\log^3{t}\,d\theta(t). $$
Suppose that \eqref{eq:omega2} is false, so that the function $\varepsilon(t)=N(t)t^{\frac13(\theta(t)-1)}(1-\theta(t))\log^2{t}$ satisfies $\lim_{t\to\infty}\varepsilon(t)=0$. Then we have $$ \delta^{-1}\ll1+\int_1^\infty \left(\frac13+\frac{\delta}{1-\theta(t)}\right) \varepsilon(t)t^{-1-\delta}\,dt. $$ By the standard zero-free region \cite[Theorem~5.10]{IK04}, we have $$ \frac1{1-\theta(t)}\ll\log\max(t,2), $$ so that $$ \delta^{-1}\ll1+\int_1^\infty(1+\delta\log{t})\varepsilon(t)t^{-1-\delta}\,dt =1+\delta^{-1}\int_0^\infty\varepsilon(e^{u/\delta})(1+u)e^{-u}\,du =o(\delta^{-1}). $$ This is a contradiction, so \eqref{eq:omega2} holds.
Finally, suppose \eqref{eq:omega3} is false, so that $N(T)<\log\log\log{T}$ for all sufficiently large $T$. Then there exists $n_0\ge\mathbb{Z}_{>0}$ such that
$|\gamma_n|>\exp\exp\exp{n}$ for all $n\ge n_0$. Since the terms from $n<n_0$ contribute a bounded amount to \eqref{eq:gammasum}, we have $$
1+\sum_{n=n_0}^\infty|\gamma_n|^{-\delta}\log^2|\gamma_n| \gg\delta^{-1} $$ for all sufficiently small $\delta>0$.
Next we claim that there are infinitely many $m\ge n_0$ such that \begin{equation}\label{eq:biggap}
\log\log|\gamma_{m+1}|\ge\tfrac{13}{5}\log\log|\gamma_m|. \end{equation} If not then there exists $n_1\ge n_0$ such that \eqref{eq:biggap} fails for all $m\ge n_1$, and by induction it follows that $$
\log\log|\gamma_n|\le(\tfrac{13}{5})^{n-n_1}\log\log|\gamma_{n_1}| =c(\tfrac{13}{5})^n \quad\text{for }n\ge n_1, $$
where $c=(13/5)^{-n_1}\log\log|\gamma_{n_1}|>0$. Hence, $$
n<\log\log\log|\gamma_n|\le\log{c}+n\log\tfrac{13}{5}. $$ Since $\log\frac{13}{5}<1$, this is false for sufficiently large $n$, proving the claim.
Choose a large $m\ge n_0$ satisfying \eqref{eq:biggap}, and set
$\delta_m=(\log|\gamma_m|)^{-\frac{12}{5}}$. Then using the trivial bound $e^{e^e}\le|\gamma_n|\le|\gamma_m|$ for $n_0\le n\le m$, we have $$
\sum_{n=n_0}^m|\gamma_n|^{-\delta_m}\log^2|\gamma_n|
\le m\log^2|\gamma_m|<(\log\log\log|\gamma_m|)\log^2|\gamma_m|
\le(\log|\gamma_m|)^{\frac{11}{5}} =\delta_m^{-\frac{11}{12}}, $$ since $\log\log{x}\le x^{\frac15}$ for all $x>1$.
To estimate the contribution from $n>m$ we apply integration by parts. Set $g(t)=t^{-\delta_m}\log^2{t}$. Then $g'(t)<0$ for
$t>e^{2/\delta_m}=\exp(2(\log|\gamma_m|)^{12/5})$; in particular, if $m$ is sufficiently large then, by \eqref{eq:biggap}, $g'(t)<0$
for $t\ge|\gamma_{m+1}|$. Hence, we have \begin{align*}
\sum_{n=m+1}^\infty g(|\gamma_n|)
&=\lim_{\varepsilon\to0^+}\int_{|\gamma_{m+1}|-\varepsilon}^\infty g(t)\,dN(t)
=\int_{|\gamma_{m+1}|}^\infty(-g'(t))(N(t)-m)\,dt\\
&\le\int_{|\gamma_{m+1}|}^\infty(-g'(t))(\log\log\log{t})\,dt
\le\delta_m\int_{|\gamma_{m+1}|}^\infty t^{-\delta_m-1}(\log{t})^{\frac{11}{5}}\,dt\\
&=\delta_m\int_{\log|\gamma_{m+1}|}^\infty e^{-\delta_mu}u^{\frac{11}5}\,du. \end{align*}
Applying integration by parts three times and using that $\delta_m\log|\gamma_{m+1}|\gg1$, we get $$
\delta_m\int_{\log|\gamma_{m+1}|}^\infty e^{-\delta_mu}u^{\frac{11}5}\,du
\ll|\gamma_{m+1}|^{-\delta_m}(\log|\gamma_{m+1}|)^{\frac{11}{5}}. $$
Note that $\delta_m=(\log|\gamma_m|)^{-\frac{12}{5}}
\ge(\log|\gamma_{m+1}|)^{-\frac{12}{13}}$, so
$|\gamma_{m+1}|^{-\delta_m}\le\exp(-(\log|\gamma_{m+1}|)^{\frac1{13}})$. Hence, we conclude that $$
\sum_{n=m+1}^\infty g(|\gamma_n|)\ll
\exp(-(\log|\gamma_{m+1}|)^{\frac1{13}})(\log|\gamma_{m+1}|)^{\frac{11}{5}} \ll 1. $$
Thus, altogether we have $$
\delta_m^{-1}\ll1+\sum_{n=n_0}^\infty|\gamma_n|^{-\delta_m}\log^2|\gamma_n| \ll1+\delta_m^{-\frac{11}{12}}. $$ This is false for sufficiently large $m$, so \eqref{eq:omega3} must hold for some arbitrarily large $T$. \end{proof}
\section{Proofs of Theorems~\ref{thm:twist} and \ref{thm:oddN}} We begin with an overview of the argument. By Proposition~\ref{NfaqTlowerbound}, $N^s_{f,a,q}(T)$ is sometimes large if there exists $\alpha\in\mathbb{Q}^\times$ for which $H_{f,a,q,\alpha}(s)$ has a pole with large real part. The main obstacle to showing this is that $H_{f,a,q,\alpha}(s)$ is defined as the difference of two functions (cf.~\eqref{eq:Hdef}), whose poles could in principle cancel out. However, as we show, there are some dependencies between $H_{f,a,q,\alpha}(s)$ for various choices of $(a,q,\alpha)$, from which it follows that there is a suitable pole for at least one choice of inputs. More specifically, in Lemma~\ref{lem:holo} we exhibit a relationship between $H_{f,1,1,a/p}(s)$ and $H_{f,a,q,-a/q}(s)$, where $p$ and $q$ are primes satisfying $pq\equiv-1\pmod*{Na}$. For any prime $p\nmid N$, we show that there is some choice of $a\in\mathbb{Z}$ for which this leads to poles at the simple zeros of $\Lambda_f(s)$, and thanks to \cite[Theorem~1.1]{Boo16}, those exist in abundance. Ultimately this implies that at least one of $N^s_f(T)$, $N^s_{f,a,p}(T)$, $N^s_{f,a,q}(T)$ is large, which yields Theorem~\ref{thm:twist}. Choosing $p=2$ and appealing to the second and third conclusions of Proposition~\ref{NfaqTlowerbound} yields Theorem~\ref{thm:oddN}.
Proceeding, given a prime $p$ and $a\in\mathbb{Z}$ coprime to $p$, define \begin{equation}\label{eq:Cdef} C_{f,a,p}(s)=\Delta_{f,a,p}(s)-\xi(p)p^{1-2s}\Delta_f(s). \end{equation} \begin{lemma}\label{lem:holo} Let $a\in\mathbb{Z}$, and let $p$ and $q$ be prime numbers such that $pq\equiv-1\pmod*{Na}$. Then \begin{enumerate} \item[(i)] $C_{f,a,p}(s)-\bigl(H_{f,1,1,a/p}(s)-\xi(p)p^{1-2s}H_{f,a,q,-a/q}(s)\bigr)$ is holomorphic for $\Re(s)>0$; \item[(ii)] $\displaystyle{\sum_{b=1}^{p-1}C_{f,b,p}(s)}=-P_{f,p}(p^{1-s})\Delta_f(s)$. \end{enumerate} \end{lemma} \begin{proof} We first consider $H_{f,a,q,\alpha}(s)$, where $\alpha=-a/q$. We have $$ \Delta_{f,a,q}(s,\alpha)-\Delta_f(s) =-R_{f,q}(q^{-s})\Lambda_f(s), $$ which is holomorphic for $\Re(s)>0$. Set $a'=-\frac{1+pq}{Na}$, so that $\frac{a'}{q}-\frac1{Nq^2\alpha}=-\frac{p}{Na}$. Let $r_{\bar{f},q}(j)$ be the numbers such that $$ R_{\bar{f},q}(x)=\sum_{j=1}^\infty r_{\bar{f},q}(j)x^j. $$ By Fourier inversion, we have $$ \sum_{\substack{j\ge1\\j\equiv{t}\;(\text{mod }\varphi(Na))}} r_{\bar{f},q}(j)x^j =\frac1{\varphi(Na)}\sum_{\ell=1}^{\varphi(Na)} e\!\left(-\frac{\ell{t}}{\varphi(Na)}\right) R_{\bar{f},q}\!\left(e\!\left(\frac\ell{\varphi(Na)}\right)x\right). $$ Thus, \begin{align*} \Delta_{\bar{f},a',q}&\!\left(s,-\frac1{Nq^2\alpha}\right) -\Delta_{\bar{f}}\!\left(s,-\frac{p}{Na}\right) =-\sum_{j=1}^\infty r_{\bar{f},q}(j)q^{-js} \Lambda_{\bar{f}}\!\left(s,\frac{q^{j-1}}{Na}\right)\\ &=-\sum_{t=1}^{\varphi(Na)}\Lambda_{\bar{f}}\!\left(s,\frac{q^{t-1}}{Na}\right) \sum_{\substack{j\ge1\\j\equiv{t}\;(\text{mod }\varphi(Na))}} r_{\bar{f},q}(j)q^{-js}\\ &=-\frac1{\varphi(Na)} \sum_{t=1}^{\varphi(Na)}\Lambda_{\bar{f}}\!\left(s,\frac{q^{t-1}}{Na}\right) \sum_{\ell=1}^{\varphi(Na)} e\!\left(-\frac{\ell{t}}{\varphi(Na)}\right) R_{\bar{f},q}\!\left(e\!\left(\frac\ell{\varphi(Na)}\right)q^{-s}\right), \end{align*} which is again holomorphic for $\Re(s)>0$. Hence, up to a holomorphic function, $H_{f,a,q,\alpha}(s)$ is $$ \Delta_f(s)-\epsilon\xi(q)(-i\sgn{a})^k(Na^2)^{s-\frac12} \Delta_{\bar{f}}\!\left(s,-\frac{p}{Na}\right). $$
Next note that $$ C_{f,a,p}(s)-H_{f,1,1,a/p}(s)= \epsilon(i\sgn{a})^k\left(\frac{Na^2}{p^2}\right)^{s-\frac12} \Delta_{\bar{f}}\!\left(s,-\frac{p}{Na}\right) -\xi(p)p^{1-2s}\Delta_f(s)-R_{f,q}(q^{-s})\Lambda_f(s). $$ Therefore, since $\xi(p)\xi(q)=\xi(-1)=(-1)^k$, we see that $$ C_{f,a,p}(s)-H_{f,1,1,a/p}(s)+\xi(p)p^{1-2s}H_{f,a,q,\alpha}(s) $$ is holomorphic for $\Re(s)>0$.
Finally, by \eqref{Deltafaq} we have \begin{align*} \sum_{b=1}^{p-1}C_{f,b,p}(s) &=(p-1)\left[1-\frac{p}{p-1}P_{f,p}(p^{-s})-\xi(p)p^{1-2s}\right]\Delta_f(s)\\ &=-P_{f,p}(p^{1-s})\Delta_f(s). \end{align*} \end{proof}
In the following we shall make frequent use of the observation that for any pair $h_1,h_2$ of meromorphic functions, \begin{equation}\label{eq:thetah1h2} \Theta(h_1+h_2)\le\max(\Theta(h_1),\Theta(h_2)), \quad\text{with equality when }\Theta(h_1)\ne\Theta(h_2). \end{equation}
Fix a prime $p\nmid N$. By \cite[Theorem~1.1]{Boo16} and the functional equation, $\Delta_f(s)$ has a pole with real part $\ge\frac12$, and thus \begin{equation}\label{thetaf11lb} \Theta(\Delta_f)=\theta_{f,1,1}\ge\frac12. \end{equation} Since all zeros of $P_{f,p}(p^{1-s})$ have real part $1$, this is also true of $P_{f,p}(p^{1-s})\Delta_f(s)$. Hence, by Lemma~\ref{lem:holo}(ii), there exists $a\in\{1,\ldots,p-1\}$ such that $C_{f,a,p}(s)$ has a pole with real part $\ge\frac12$ and satisfies $\Theta(C_{f,a,p})\ge\theta_{f,1,1}$. By \eqref{eq:Cdef} and \eqref{eq:thetah1h2}, it follows that \begin{equation}\label{thetaCfap} \Theta(C_{f,a,p})=\max(\Theta(\Delta_{f,a,p}),\theta_{f,1,1}). \end{equation} Let $q$ be a prime satisfying $pq\equiv-1\pmod*{Na}$, and set $a'=-(1+pq)/(Na)$.
We aim to prove that \begin{equation}\label{eq:summary} \max\bigl(N^s_f(T),N^s_{f,a,p}(T),N^s_{f,a,q}(T)\bigr) =\Omega\bigl(T^{\frac16-\varepsilon}\bigr) \quad\text{for all }\varepsilon>0. \end{equation} To that end, we will show that at least one of the following inequalities holds for some $\alpha\in\mathbb{Q}^\times$: \begin{itemize} \item[(i)] $\max(\Theta(H_{f,1,1,\alpha}),\Theta(H_{\bar{f},1,1,\alpha})) \ge\theta_{f,1,1}\ge\frac12$; \item[(ii)] $\max(\Theta(H_{f,a,p,\alpha}),\Theta(H_{\bar{f},a',p,\alpha})) \ge\theta_{f,a,p}\ge\frac12$; \item[(iii)] $\max(\Theta(H_{f,a,q,\alpha}),\Theta(H_{\bar{f},a',q,\alpha})) \ge\theta_{f,a,q}\ge\frac12$. \end{itemize} To see that this suffices, suppose for instance that (iii) holds. By Proposition~\ref{voronoi}, we have $N^s_{f,a,q}(T)=N^s_{\bar{f},a',q}(T)$ and $\theta_{f,a,q}=\theta_{\bar{f},a',q}$. Thus, applying Proposition~\ref{NfaqTlowerbound} to either $(f,a,q)$ or $(\bar{f},a',q)$, we conclude that $$ N^s_{f,a,q}(T)=\Omega(T^{\beta-\varepsilon}), \quad\text{where } \beta\ge\frac13(1-\theta_{f,a,q})+\theta_{f,a,q}-\frac12 =\frac{2\theta_{f,a,q}}{3}-\frac16\ge\frac16. $$ If, instead, (i) or (ii) holds, then by a similar argument we find that $N^s_{f,1,1}(T)=\Omega(T^{\beta-\varepsilon})$ or $N^s_{f,a,p}(T)=\Omega(T^{\beta-\varepsilon})$ for some $\beta\ge\frac16$. Hence, \eqref{eq:summary} follows in any case.
Let us suppose that conditions (i) and (iii) are false for all $\alpha\in\mathbb{Q}^\times$ and show that this leads to (ii). Since (i) is false, in view of \eqref{thetaf11lb} we must have $\theta_{f,1,1}>\Theta(H_{f,1,1,a/p})$. In turn, by \eqref{thetaCfap} this implies that $\Theta(C_{f,a,p})>\Theta(H_{f,1,1,a/p})$. Hence, by Lemma~\ref{lem:holo}(i) and \eqref{eq:thetah1h2}, we have $\Theta(H_{f,a,q,-a/q})=\Theta(C_{f,a,p})$. By \eqref{thetaCfap}, this implies $\Theta(H_{f,a,q,-a/q})\ge\theta_{f,1,1}>0$, and thus $\theta_{f,a,q}\ge\frac12$, by Proposition~\ref{NfaqTlowerbound}.
Next, by \eqref{thetafaq} we have $\theta_{f,a,p}=\max(\Theta(\Delta_{f,a,p}),\Theta(\Delta_{\bar{f},a',p}))$. If \begin{equation}\label{cond1} \Theta(\Delta_{\bar{f},a',p}) \le\max(\Theta(\Delta_{f,a,p}),\theta_{f,1,1}) =\Theta(H_{f,a,q,-a/q}) \end{equation} then it follows that \begin{equation}\label{ThetaHf} \Theta(H_{f,a,q,-a/q})=\max(\theta_{f,a,p},\theta_{f,1,1}). \end{equation}
Suppose now that \eqref{cond1} is false. Then $\Theta(\Delta_{\bar{f},a',p}) >\max(\Theta(\Delta_{f,a,p}),\theta_{f,1,1})$, so that $$ \theta_{f,a,p}=\Theta(\Delta_{\bar{f},a',p})>\theta_{f,1,1}. $$ Since (i) is false, this implies that $\Theta(\Delta_{\bar{f},a',p}) >\max(\Theta(H_{\bar{f},1,1,a'/p}),\theta_{f,1,1})$. By \eqref{eq:Cdef} and Lemma~\ref{lem:holo}(i) with $(\bar{f},a')$ in place of $(f,a)$, it follows from \eqref{eq:thetah1h2} that \begin{equation}\label{ThetaHbarf} \Theta(H_{\bar{f},a',q,-a'/q})=\Theta(\Delta_{\bar{f},a',p}) =\max(\theta_{f,a,p},\theta_{f,1,1}). \end{equation}
Therefore, since at least one of \eqref{ThetaHf} and \eqref{ThetaHbarf} must hold, we have $$ \max(\Theta(H_{f,a,q,-a/q}),\Theta(H_{\bar{f},a',q,-a'/q})) \ge\max(\theta_{f,a,p},\theta_{f,1,1}). $$ Since (iii) is false, this implies that $\theta_{f,a,q}>\max(\theta_{f,a,p},\theta_{f,1,1})$. Hence, by \eqref{thetafaq}, either \begin{equation}\label{branch} \Theta(\Delta_{f,a,q}) >\max(\theta_{f,a,p},\theta_{f,1,1}) \quad\text{or}\quad \Theta(\Delta_{\bar{f},a',q}) >\max(\theta_{f,a,p},\theta_{f,1,1}). \end{equation}
Suppose that the first inequality in \eqref{branch} holds. Then by \eqref{eq:Cdef} (with $q$ in place of $p$) and \eqref{eq:thetah1h2}, we have $\Theta(C_{f,a,q})=\Theta(\Delta_{f,a,q})>\theta_{f,1,1}$. Since (i) is false, this implies $\Theta(C_{f,a,q})>\Theta(H_{f,1,1,a/q})$. On the other hand, by Lemma~\ref{lem:holo}(i) (with the roles of $p$ and $q$ reversed) and \eqref{eq:thetah1h2}, we have $$\Theta(H_{f,a,p,-a/p})=\Theta(C_{f,a,q}) =\Theta(\Delta_{f,a,q})>\theta_{f,a,p}.$$ This also implies that $\Theta(H_{f,a,p,-a/p})>0$, whence $\theta_{f,a,p}\ge\frac12$, by Proposition~\ref{NfaqTlowerbound}.
If, instead, the second inequality holds in \eqref{branch}, then running through the same argument with $(\bar{f},a')$ in place of $(f,a)$, we find that $$\Theta(H_{\bar{f},a',p,-a'/p})=\Theta(C_{\bar{f},a',q}) =\Theta(\Delta_{\bar{f},a',q})>\theta_{\bar{f},a',p}\ge\frac12. $$ Hence, in either case we see that (ii) holds, and this concludes the proof of \eqref{eq:summary}.
Now, by \eqref{eq:summary} and \eqref{Deltafaq}, it follows that there is a character $\chi$ of conductor $1$, $p$ or $q$ such that $N^s_{f\otimes\chi}(T)=\Omega(T^{\frac16-\varepsilon})$ for all $\varepsilon>0$. This implies Theorem~\ref{thm:twist}.
For the proof of Theorem~\ref{thm:oddN}, we may assume that $N^s_f(T)\ll1+T^\varepsilon$ for all $\varepsilon>0$, since the result is trivial otherwise. To avoid contradicting Proposition~\ref{NfaqTlowerbound}, it must therefore be the case that $\max(\Theta(H_{f,1,1,\alpha}),\Theta(H_{\bar{f},1,1,\alpha})) \le\frac12$ for all $\alpha\in\mathbb{Q}^\times$.
Since $N$ is odd, we can take $p=2$ and $a=1$ in the above, and choose any suitable prime $q$. Then by Lemma~\ref{lem:holo}(ii), we have $$ \Delta_{f,a,p}(s)=\bigl(\xi(p)p^{1-2s}-P_{f,p}(p^{1-s})\bigr)\Delta_f(s), $$ and it follows that $N^s_{f,a,p}(T)\leN^s_f(T)$ and $\max(\Theta(H_{f,a,p,\alpha}),\Theta(H_{\bar{f},a',p,\alpha})) \le\frac12$ for all $\alpha\in\mathbb{Q}^\times$. Thus, by \eqref{eq:summary}, $N^s_{f,a,q}(T)=\Omega(T^{\frac16-\varepsilon})$ for all $\varepsilon>0$. Therefore, by Proposition~\ref{voronoi}, at least one of $\Delta_{f,a,q}(s),\Delta_{\bar{f},a',q}(s)$ has a pole in the region $\{s\in\mathbb{C}:\Re(s)\ge\frac12\}$ that is not a pole of $\Delta_f(s)$. By \eqref{eq:Cdef} and Lemma~\ref{lem:holo}(i), the same applies to one of $H_{f,1,1,a/q}(s)$, $H_{f,a,p,-a/p}(s)$, $H_{\bar{f},1,1,a'/q}(s)$, or $H_{\bar{f},a',p,-a'/p}(s)$.
Since $$ N^s_{f,a,p}(T)=N^s_{\bar{f},a',p}(T)\le N^s_{f,1,1}(T)=N^s_{\bar{f},1,1}(T), $$ whichever function has the pole, we can apply Proposition~\ref{NfaqTlowerbound} to see that $N^s_f(T)$ satisfies the second and third conclusions. In particular, $N^s_f(T)\ge\log\log\log{T}$ for some arbitrarily large $T$, and if $k=1$ or $f$ is a CM form then Coleman's theorem \cite{Col90} implies that $$ 1-\theta_{f,a,p}(T)\ge 1-\theta_{f,1,1}(T)\gg (\log{T})^{-\frac23}(\log\log{T})^{-\frac13} \quad\text{for all }T\ge3, $$ whence $N^s_f(T)=\Omega(\exp((\log{T})^{\frac13-\varepsilon}))$ for all $\varepsilon>0$. Moreover, since $N^s_f(T)\ll1+T^\varepsilon$, we must have $\theta_{f,1,1}=1$, so $\Lambda_f(s)$ has simple zeros with real part arbitrarily close to $1$.
Finally, by Lemma~\ref{lem:holo}(ii) we have $\Theta(C_{f,a,p})=1$. Since $\Theta(H_{f,1,1,a/p})\le\frac12$, Lemma~\ref{lem:holo}(i) and \eqref{eq:thetah1h2} imply that $\Theta(H_{f,a,q,-a/q})=1$. Applying Proposition~\ref{NfaqTlowerbound}, it follows that $N^s_{f,a,q}(T)=\Omega(T^{\frac12-\varepsilon})$ for all $\varepsilon>0$. By Lemma~\ref{lem:holo}(i), $C_{f,a,q}(s)$ and $C_{\bar{f},a',q}(s)$ are holomorphic for $\Re(s)>\frac12$. Hence, by \eqref{eq:Cdef} and Proposition~\ref{voronoi}, all poles of $\Delta_{f,a,q}^*(s)$ that are not poles of $\Delta_f^*(s)$ lie on the line $\{s\in\mathbb{C}:\Re(s)=\frac12\}$. Since $N^s_f(T)\ll1+T^\varepsilon$, $\Delta_{f,a,q}^*(s)$ must have $\Omega(T^{\frac12-\varepsilon})$ poles with real part $\frac12$ and imaginary part in $[-T,T]$. By \eqref{Deltafaq}, the same applies to $\Delta_f(s,\chi)$ for some $\chi\pmod*{q}$.
\begin{comment} Consider the residue sum $$ S(y,\alpha)=\sum_{\rho}\Res{s=\rho}\bigl( \Delta_{f,a,q}^*(s)-\xi(q)q^{1-2s}\Delta_f^*(s)\bigr)(y-i\alpha)^{-\rho} =S_{f,a,q}(y,\alpha)-q\xi(q)S_{f,1,1}(q^2y,q^2\alpha). $$ By Proposition~\ref{prop:Mellin}, $$
\int_0^{|\alpha|/4}S(y,\alpha)y^{s+\frac{k-1}2}\frac{dy}{y} -\bigl(H_{f,a,q,\alpha}(s)-\xi(q)q^{2-2s-k}H_{f,1,1,q^2\alpha}(s)\bigr) $$ is holomorphic for $\Re(s)>0$. We take $\alpha=-a/q$, so that $$ \Theta\bigl(H_{f,a,q,\alpha}(s) -\xi(q)q^{2-2s-k}H_{f,1,1,q^2\alpha}(s)\bigr)=1. $$ Hence, following the proof of \eqref{eq:omega1} with the obvious modifications, we get $$ N^s_{f,a,q}(T)+N^s_{f,1,1}(T)=\Omega(T^{\frac23-\varepsilon}). $$ Since $N^s_{f,1,1}(T)\ll1+T^\varepsilon$, we see that $\Delta_{f,a,q}^*(s)$ has $\Omega(T^{\frac23-\varepsilon})$ poles with real part $\frac12$ and imaginary part in $[-T,T]$. \end{comment}
\end{document}
|
arXiv
|
{
"id": "1806.01959.tex",
"language_detection_score": 0.5159549713134766,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Large Galois images for Jacobian varieties of genus $3$ curves}
\begin{abstract} Given a prime number $\ell \geq 5$, we construct an infinite family of three-dimensional abelian varieties over $\mathbb{Q}$ such that, for any $A/\mathbb{Q}$ in the family, the Galois representation $\overline{\rho}_{A,\ell} \colon G_{\mathbb{Q}} \to \mathrm{GSp}_6(\mathbb{F}_{\ell})$ attached to the $\ell$-torsion of $A$ is surjective. Any such variety $A$ will be the Jacobian of a genus $3$ curve over $\mathbb{Q}$ whose respective reductions at two auxiliary primes we prescribe to provide us with generators of $\mathrm{Sp}_6(\mathbb{F}_{\ell})$. \end{abstract}
\title{Large Galois images for Jacobian varieties of genus $3$ curves}
\section*{Introduction}
Let $\ell$ be a prime number. This paper is concerned with realisations of the general symplectic group $\mathrm{GSp}_6(\mathbb{F}_{\ell})$ as a Galois group over $\mathbb{Q}$, arising from the Galois action on the $\ell$-torsion points of three-dimensional abelian varieties defined over $\mathbb{Q}$.
More precisely, let $g \geq 1$ be an integer. One can exploit the theory of abelian varieties defined over $\mathbb{Q}$ as follows. If $A$ is an abelian variety of dimension $g$ defined over $\mathbb Q$, let $A[\ell] = A(\overline{\mathbb Q})[\ell]$ denote the $\ell$-torsion subgroup of $\overline{\mathbb Q}$-points of $A$. The natural action of the absolute Galois group $G_{\mathbb Q}=\text{Gal}(\overline{\mathbb Q}/\mathbb Q)$ on $A[\ell]$ gives rise to a continuous Galois representation $\overline{\rho}_{A,\ell}$ taking values in $\text{GL}(A[\ell]) \simeq \text{GL}_{2g}(\mathbb F_{\ell})$. If the abelian variety $A$ is moreover principally polarised, the image of $\overline{\rho}_{A,\ell}$ lies inside the general symplectic group $\text{GSp}(A[\ell])$ of $A[\ell]$ with respect to the symplectic pairing induced by the Weil pairing and the polarisation of~$A$; thus, we have a representation $$\overline{\rho}_{A,\ell} \: : \: G_{\mathbb Q} \longrightarrow \text{GSp}(A[\ell]) \simeq \text{GSp}_{2g}(\mathbb F_{\ell}),$$ providing a realisation of $\text{GSp}_{2g}(\mathbb F_{\ell})$ as a Galois group over $\mathbb Q$ if $\overline{\rho}_{A,\ell}$ is surjective.
The image of Galois representations attached to the $\ell$-torsion points of abelian varieties has been widely studied. For an abelian variety $A$ defined over a number field, the classical result of Serre ensures surjectivity for almost all primes $\ell$ when $\mathrm{End}_{\overline{\mathbb Q}}(A)=\mathbb{Z}$ and the dimension of $A$ is 2, 6 or odd (cf.~\cite{OeuvresSerre}). More recently, Hall \cite{Hall11} proves a result for any dimension, with the additional condition that $A$ has semistable reduction of toric dimension 1 at some prime. This result has been further generalised to the case of abelian varieties over finitely generated fields (cf.~\cite{AGP}).
We can use Galois representations attached to the torsion points of abelian varieties defined over $\mathbb{Q}$ to address the Inverse Galois Problem and its variations involving ramification conditions. For example, the Tame Inverse Galois Problem, proposed by Birch, asks if, given a finite group $G$, there exists a tamely ramified Galois extension $K/\mathbb{Q}$ with Galois group isomorphic to $G$. Arias-de-Reyna and Vila solved the Tame Inverse Galois problem for $\mathrm{GSp}_{2g}(\mathbb{F}_{\ell})$ when $g=1, 2$ and $\ell \geq 5$ is any prime number, by constructing a family of genus $g$ curves $C$ such that the Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ attached to the Jacobian variety $\mathrm{Jac}(C)$ is surjective and tamely ramified for every curve in the family (cf. \cite{SaraNuria09}, \cite{SaraNuria11}). For both $g=1$ and $g=2$, the strategy entails determining a set of local conditions at auxiliary primes, (that is to say, prescribing a finite list of congruences that the defining equation of $C$ should satisfy) which ensure the surjectivity of $\overline{\rho}_{\mathrm{Jac}(C), \ell}$, and a careful study of the ramification at $\ell$ in particularly favourable situations.
In fact, the strategy of ensuring surjectivity of the Galois representation attached to the $\ell$-torsion of an abelian variety by prescribing local conditions at auxiliary primes works in great generality. Given a $g$-dimensional principally polarised abelian variety $A$ over $\mathbb{Q}$, such that the Galois representation $\overline{\rho}_{A, \ell}$ is surjective, it is always possible to find some auxiliary primes $p$ and $q$ depending on $\ell$ such that any abelian variety $B$ defined over $\mathbb{Q}$ which is ``close enough'' to $A$ with respect to the primes $p$ and $q$ (in a sense that can be made precise in terms of $p$-adic, resp.~$q$-adic, neighbourhoods in moduli spaces of principally polarised $g$-dimensional abelian varieties with full level structure) also has a surjective $\ell$-torsion Galois representation $\overline{\rho}_{B,\ell}$. This is a consequence of Kisin's results on local constancy in $p$-families of Galois representations; the reader can find a detailed explanation of this aspect in \cite[Section 4.2]{AK13}.
In this paper we focus on the case $g=3$. Our aim is to find auxiliary primes $p$ and $q$ (depending on $\ell$), and explicit congruence conditions on polynomials defining genus~$3$ curves, which ensure that any curve $C$, defined by an equation over $\mathbb{Z}$ satisfying these congruences, will have the property that the image of $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ coincides with $\mathrm{GSp}_{6}(\mathbb{F}_{\ell})$. In this way we obtain many distinct realisations of $\mathrm{GSp}_6(\mathbb{F}_{\ell})$ as a Galois group over $\mathbb{Q}$.
To state our main result, we introduce the following notation: we will say that a polynomial $f(x, y)$ in two variables is of \emph{3-hyperelliptic type} if it is of the form $f(x, y)=y^2-g(x)$, where $g(x)$ is a polynomial of degree $7$ or $8$ and of \emph{quartic type} if the total degree of $f(x, y)$ is $4$.
\begin{thm}\label{thm:main} Let $\ell\geq 13$ be a prime number. For all odd distinct prime numbers $p,q\neq \ell$, with $q>1.82\ell^2$, there exist $f_p(x, y), f_q(x, y)\in\mathbb Z[x,y]$ of the same type ($3$-hyperelliptic or quartic), such that for any $f(x, y)\in\mathbb Z[x,y]$ of the same type as $f_p(x, y)$ and $f_q(x, y)$ and satisfying \begin{equation*}f(x, y)\equiv f_q(x, y)\pmod{q} \quad \text{ and }\quad f(x, y)\equiv f_p(x, y)\pmod{p^3}, \end{equation*}
the image of the Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ attached to the $\ell$-torsion points of the Jacobian of the projective genus~$3$ curve $C$
defined over $\mathbb Q$ by the equation $f(x,y)=0$ is $\mathrm{GSp}_6(\mathbb{F}_{\ell})$.
Moreover, for $\ell\in\{5,7,11\}$ there exists a prime number $q\neq \ell$ for which the same statement holds for each odd prime number $p\neq q,\ell$. \end{thm}
In Section \ref{sec:4} we state and prove a refinement of this Theorem (cf.~Theorem \ref{thm:refined}). In fact, we have a very explicit control of the polynomial $f_p(x, y)$. In general we can say little about $f_q(x, y)$, but for any fixed $\ell\geq 13$ and any fixed $q \geq1.82 \ell^2$ we can find suitable polynomials $f_q(x, y)$ by an exhaustive search as follows: there exist only finitely many polynomials $\bar{f}_q(x, y)\in\mathbb{F}_q[x, y]$ of $3$-hyperelliptic or quartic type with non-zero discriminant. For each of these, we can compute the characteristic polynomial of the action of the Frobenius endomorphism on the Jacobian of the curve defined by $\bar{f}_q(x, y)=0$ by counting the $\mathbb{F}_{q^r}$-points of this curve, for $r=1, 2, 3$, and check whether this polynomial is an ordinary $q$-Weil polynomial with non-zero middle coefficient, non-zero trace modulo $\ell$, and which is irreducible modulo $\ell$. Proposition \ref{irredmodell} ensures that the search will terminate. Then, any lift of $\bar{f}_q(x, y)$, of the same type, gives us a suitable polynomial $f_q(x, y)\in \mathbb{Z}[x, y]$. In Example \ref{ex:mainthm} we present some concrete examples obtained using \textsc{Sage} and \textsc{Magma}.
Note that the above result constitutes an explicit version of Proposition 4.6 of \cite{AK13} in the case of principally polarised $3$-dimensional abelian varieties. We can explicitly give the size of the neighbourhoods where surjectivity of $\overline{\rho}_{A, \ell}$ is preserved; in other words, we can give the powers of the auxiliary primes $p$ and $q$ such that any other curve defined by congruence conditions modulo these powers gives rise to a Jacobian variety with surjective $\ell$-torsion representation.
The proof of Theorem \ref{thm:main} is based on two main pillars: the classification of subgroups of $\mathrm{GSp}_{2g}(\mathbb{F}_{\ell})$ containing a non-trivial transvection, and the fact that one can force the image of $\overline{\rho}_{A, \ell}$ to contain a non-trivial transvection by imposing a specific type of ramification at an auxiliary prime. This strategy goes back to Le Duff \cite{LeDuff98} in the case of Jacobians of genus $2$ hyperelliptic curves, and has been extended to the general case by Hall in \cite{Hall11}, where he obtains a surjectivity result for $\overline{\rho}_{A, \ell}$ for almost all primes $\ell$.
We already followed this strategy in \cite{AAKRTV14} to formulate an explicit surjectivity result for $g$-dimensional abelian varieties (see Theorem 3.10 of loc.~cit.): let $A$ be a principally polarised $g$-dimensional abelian variety defined over $\mathbb{Q}$, such that the reduction of the N\'eron model of $A$ at some prime $p$ is semistable with toric rank 1, and the Frobenius endomorphism at some prime $q$ of good reduction for $A$ acts irreducibly and with trace $a\not=0$ on the reduction of the N\'eron model of $A$ at $q$. We proved that for each prime number $\ell\nmid 6pqa$, coprime with the order of the component group of the N\'eron model of $A$ at $p$, and such that the characteristic polynomial of the Frobenius endomorphism at $q$ is irreducible mod $\ell$, then the representation $\overline{\rho}_{A, \ell}$ is surjective.
Section~\ref{sectionone} collects some notations and tools that we will use in the rest of the paper. In Section~\ref{sec:2} we address the condition of semistable reduction of toric rank $1$ at a prime $p$; we obtain a congruence condition modulo $p^3$ (cf.~Proposition~\ref{jacobianthm}).
In Section~\ref{sec:3} we give conditions ensuring that the reduction of the N\'eron model of a Jacobian variety $A=\mathrm{Jac}(C)$ at a prime $q$ is an absolutely simple abelian variety over $\mathbb{F}_q$ such that the characteristic polynomial of the Frobenius endomorphism at $q$ is irreducible and has non-zero trace modulo $\ell$ (cf.~Theorem~\ref{A}). We make use of Honda-Tate Theory in the ordinary case, which relates so-called ordinary Weil polynomials to isogeny classes of ordinary abelian varieties defined over finite fields of characteristic $q$. First, we need to prove the existence of a suitable prime $q$ and a suitable ordinary Weil polynomial; this is the content of Proposition~\ref{irredmodell}, whose proof is postponed to Section~\ref{sectionirred}. This polynomial provides us with an abelian variety $A_q$ defined over $\mathbb{F}_q$; any abelian variety $A$ such that the reduction of the N\'eron model of $A$ at $q$ coincides with $A_q$ will satisfy the desired condition at $q$. At this point we use the fact that each principally polarised $3$-dimensional abelian variety over $\mathbb{F}_q$ is the Jacobian of a genus $3$ curve, which can be defined over $\mathbb{F}_q$ up to a quadratic twist.
Once we have established congruence conditions at auxiliary primes $p$ and $q$, we need to check that any curve $C$ over $\mathbb{Z}$ whose defining equation satisfies these conditions will provide us with a Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ whose image is $\mathrm{GSp}_{6}(\mathbb{F}_{\ell})$. This is carried out in Section \ref{sec:4}.
David Zywina communicated to us that he has recently and independently developed a method for studying the image of Galois representations $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ attached to the Jacobians of genus $3$ plane quartic curves $C$, for a large class of such curves (cf.~\cite{Zywina15}). In particular, for each prime $\ell$, he obtains a realisation of $\GSp_6(\mathbb{F}_{\ell})$ as a Galois group over $\mathbb{Q}$. Samuele Anni, Pedro Lemos and Samir Siksek also worked independently on this topic. In their paper \cite{ALS15}, they study semistable abelian varieties and provide an example of a hyperelliptic genus $3$ curve $C$ such that $\mathrm{Im}\overline{\rho}_{\mathrm{Jac}(C), \ell}=\GSp_6(\mathbb{F}_{\ell})$ for all $\ell\geq 3$. Both Zywina and Anni et al.~propose a method which, given a fixed genus $3$ curve $C$ satisfying suitable conditions, returns a finite list of primes such that the corresponding representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ is surjective for any $\ell$ outside the list, generalising the approach of \cite{Dieulefait2002} for the case of genus $2$ to genus $3$. Both methods rely on Hall's surjectivity result \cite{Hall11} for the image of Galois representations attached to the torsion points of abelian varieties as the main technical tool. In our paper, however, we fix a prime $\ell\geq 5$ and give congruence conditions such that, for any genus $3$ curve $C$ satisfying them, we can ensure surjectivity of the attached Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$. We also borrow some ideas from Hall's paper \cite{Hall11}, although formally we do not make use of his results.
\section{Geometric preliminaries}\label{sectionone}
In this section we recall some background from algebraic geometry and fix some notations.
\subsection{Hyperelliptic curves and curves of genus~3}\label{subsec:notation}
A smooth geometrically connected projective curve\footnote{In this article, we will say that a \emph{curve over a field $K$} is an algebraic variety over $K$ whose irreducible components are of dimension~$1$. (In particular, a curve can be singular.)} $C$ of genus $g\geq 1$ over a field $K$ is \emph{hyperelliptic} if there exists a degree $2$ finite separable morphism from $C_{\overline K} = C\times_{K} \overline K$ to $\mathbb P^1_{\overline K}$. If~$K$ is algebraically closed or a finite field, then such a curve $C$ has a \emph{hyperelliptic equation} defined over $K$\footnote{When $K$ is not algebraically closed nor a finite field, the situation can be more complicated (cf.~\cite[Section~4.1]{lercier_ritzenthaler}).}. That is to say, the function field of $C$ is $K(x)[y]$ under the relation $y^2+h(x)y=g(x)$ with $g(x), h(x)\in K[x]$, $\deg(g(x))\in \{2g+1, 2g+2\},$ and $\deg(h(x))\leq g$. Moreover, if $\mathrm{char}(K)\neq 2$, we can take $h(x)=0$. Indeed, in that case, the conic defined as the quotient of $C$ by the group generated by the hyperelliptic involution has a $K$-rational point, hence is isomorphic to $\mathbb P^1_{K}$ (see e.g.~\cite[Section~1.3]{lercier_ritzenthaler} for more details).
The curve $C$ is the union of the two affine open schemes \begin{equation*} \begin{aligned} U &=\mathrm{Spec} \left(K[x,y]/(y^2+h(x)y-g(x))\right)\quad \text{and}\\ V&=\mathrm{Spec} \left(K[t,w]/(w^2+t^{g+1}h(1/t)y-t^{2g+2}g(1/t))\right)\\\end{aligned}\end{equation*}
glued along $\mathrm{Spec}(K[x,y,1/x]/(y^2+h(x)y-g(x)))$ via the identifications $x=1/t, y=t^{-g-1}w$.
If $\mathrm{char}(K)\neq 2$, then any separable polynomial $g(x)\in K[x]$ of degree $2g+1$ or $2g+2$ gives rise to a hyperelliptic curve $C$ of genus $g$ defined over $K$ by glueing the open affine schemes $U$ and $V$ (with $h(x)=0$) as above. We will say that $C$ is \emph{given by the hyperelliptic equation $y^2=g(x)$.} We will also say, as in the introduction, that a polynomial in two variables is of \emph{$g$-hyperelliptic type} if it is of the form $y^2-g(x)$ with $g(x)$ a polynomial of degree $2g+1$ or $2g+2$.
In this article, we are especially interested in curves of genus~$3$. If $C$ is a smooth geometrically connected projective non-hyperelliptic curve of genus $3$ defined over a field $K$, then its canonical embedding $C\hookrightarrow \mathbb P_{K}^2$ identifies $C$ with a smooth plane quartic curve defined over $K$. This means that the curve $C$ has a model over $K$ given by $\mathrm{Proj}(K[X,Y,Z]/F(X,Y,Z))$ where $F(X, Y, Z)$ is a degree~$4$ homogeneous polynomial with coefficients in $K$. Conversely, any smooth plane quartic curve is the image by a canonical embedding of a non-hyperelliptic curve of genus $3$. If this curve is $\mathrm{Proj}(K[X,Y,Z]/F(X,Y,Z))$ where $F(X, Y, Z)$ is the homogenisation of a degree~$4$ polynomial $f(x, y)\in K[x,y]$, we will say that $C$ is the \emph{quartic plane curve defined by the affine equation $f(x,y)=0$}. We will say, as in the introduction, that a polynomial in two variables is of \emph{quartic type} if its total degree is $4$.
\subsection{Semistable curves and their generalised Jacobians}\label{subsec:jacobians}
We briefly recall the basic notions we need about semistable and stable curves, give the definition of the intersection graph of a curve and explain the link between this graph and the structure of their generalised Jacobian. The classical references we use are essentially \cite{Liu06} and \cite{BLR90}. For a nice overview which contains other references, the reader could also consult~\cite{romagny}.
A curve $C$ over a field $k$ is said to be \emph{semistable} if the curve $C_{\overline k}=C\times_{k} \overline k$ is reduced and has at most ordinary double points as singularities. It is said to be \emph{stable} if moreover $C_{\overline k}$ is connected, projective of arithmetic genus $\geq 2$, and if any irreducible component of $C_{\overline k}$ isomorphic to $\mathbb{P}^1_{\overline k}$ intersects the other irreducible components in at least three points. A proper flat morphism of schemes $\mathcal C\to S$ is said to be \emph{semistable} (resp. \emph{stable}) if it has semistable (resp. stable) geometric fibres.
Let $R$ be a discrete valuation ring with fraction field $K$ and residue field $k$. Let $C$ be a smooth projective geometrically connected curve over $K$. A \emph{model} of $C$ over $R$ is a normal scheme $\mathcal C/R$ such that $\mathcal C\times_{R} K\cong C$. We say that $C$ has \emph{semistable reduction} (resp. \emph{stable reduction}) if $C$ has a model $\mathcal C$ over $R$ which is a semistable (resp. stable) scheme over $R$. If such a stable model exists, it is unique up to isomorphism and we call it \emph{the stable model of $C$ over $R$} (cf.~\cite[Chap.10, Definition 3.27 and Theorem 3.34]{Liu06}).
If the curve $C$ has genus $g\geq 1$, then it admits a minimal regular model $\mathcal C_{min} $ over $R$, unique up to unique isomorphism. Moreover, $\mathcal C_{min}$ is semistable if and only if $C$ has semistable reduction, and if $g\geq 2$, this is equivalent to $C$ having stable reduction (cf.~\cite[Chap. 10, Theorem 3.34]{Liu06}, or \cite[Theorem 3.1.1]{romagny} when $R$ is strictly henselian).
Assume that $C$ is a smooth projective geometrically connected curve of genus $g\geq 2$ over $K$ with semistable reduction. Denote by $\mathcal C$ its stable model over $R$ and by $\mathcal C_{min}$ its minimal regular model over $R$. We know that the Jacobian variety $J=\mathrm{Jac}(C)$ of $C$ admits a N\'{e}ron model $\mathcal J$ over $R$ and the canonical morphism $\mathrm{Pic}^0_{\mathcal C/R}\to \mathcal J^0$ is an isomorphism (cf.~\cite[$\S 9.7$, Corollary 2]{BLR90}). Note that since $\mathcal C_{min}$ is also semistable, we have $\mathrm{Pic}^0_{\mathcal C_{min}/R}\cong \mathcal J^0$. Moreover, the abelian variety $J$ has semistable reduction, that is to say $\mathcal J^0_{k}\cong \mathrm{Pic}^0_{\mathcal C_{ k}/k}$ is canonically an extension of an abelian variety by a torus $T$. As we will see, the structure of the algebraic group $\mathcal J^0_{ k}$ (by which we mean the toric rank and the order of the component group of its geometric special fibre) is related to the intersection graphs of $\mathcal C_{\overline k}$ and $\mathcal C_{min,\overline k}$.
Let $X$ be a curve over $\overline{k}$. Consider the \emph{intersection graph} (or \emph{dual graph}) $\Gamma(X)$, defined as the graph whose vertices are the irreducible components of $X$, where two irreducible components $X_i$ and $X_j$ are connected by as many edges as there are irreducible components in the intersection $X_i\cap X_j$. In particular, if the curve $X$ is semistable, two components $X_i$ and $X_j$ are connected by one edge if there is a singular point lying on both $X_i$ and $X_j$. Here $X_i=X_j$ is allowed. The \emph{(intersection) graph without loops}, denoted by $\Gamma'(X)$, is the graph obtained by removing from $\Gamma(X)$ the edges corresponding to~$X_i=X_j$.
Next, we paraphrase \cite[$\S 9.2$, Example 8]{BLR90}, which gives the toric rank in terms of the cohomology of the graph $\Gamma(\mathcal C_{\overline k})$. \begin{pr}[\cite{BLR90}, $\S 9.2$, Ex.~8]\label{BLRExactSeq} The N\'{e}ron model $\mathcal J$ of the Jacobian of the curve $\mathcal{C}_k$ has semistable reduction. More precisely, let $X_1,\ldots, X_r$ be the irreducible components of $\mathcal C_{k}$, and let $\widetilde X_1,\dots, \widetilde X_r$ be their respective normalisations. Then the canonical extension associated to $\mathrm{Pic}^0_{\mathcal C_{k}/ k}$ is given by the exact sequence \[ 1\longrightarrow T\hookrightarrow \mathrm{Pic}^0_{\mathcal C_{k}/k}\xrightarrow{\pi^*}\prod_{i=1}^r \mathrm{Pic}^0_{\widetilde X_i/k} \longrightarrow 1 \] where the morphism $\pi^*$ is induced by the morphisms $\pi_i:\widetilde X_i\longrightarrow X_i$. The rank of the torus~$T$ is equal to the rank of the cohomology group $H^1(\Gamma(\mathcal C_{\overline{k}}),\mathbb Z). $ \end{pr} We will use the preceding result in Sections~\ref{sec:2} and \ref{sec:3}. Note that the toric rank does not change if we replace $\mathcal C$ by $\mathcal C_{min}$.
The intersection graph of $\mathcal C_{min,\overline k}$ also determines the order of the component group of the geometric special fibre $\mathcal J_{\overline k}$. Indeed, the scheme $\mathcal C_{min}\times R^{sh}$, where $R^{sh} $ is the strict henselisation of $R$, fits the hypotheses of \cite[$\S 9.6$, Proposition 10]{BLR90} which gives the order of the component group in terms of the graph of $\mathcal C_{min,\overline k}$; we reproduce it here for the reader's convenience. \begin{pr}[\cite{BLR90}, $\S 9.6$, Prop.~10]\label{prop:Phi} Let $X$ be a proper and flat curve over a strictly henselian discrete valuation ring $R$ with algebraically closed residue field $\overline k$. Suppose that $X$ is regular and has geometrically irreducible generic fibre as well as a geometrically reduced special fibre $X_{\overline k}$. Assume that $X_{\overline k}$ consists of the irreducible components $X_1,\dots, X_r$ and that the local intersection numbers of the $X_i$ are $0$ or $1$ (the latter is the case if different components intersect at ordinary double points). Furthermore, assume that the intersection graph without loops $\Gamma'(X_{\overline k})$ consists of $l$ arcs of edges $\lambda_1,\dots,\lambda_l$, starting at $X_1$ and ending at $X_r$, each arc $\lambda_i$ consisting of $m_{i}$ edges. Then the component group $\mathcal J(R^{sh})/\mathcal J^0(R^{sh})$ has order $\sum_{i=1}^l \prod_{j\neq i}m_{j}$. \end{pr}
We will use this result in the proof of Proposition~\ref{jacobianthm}.
\section{Local conditions at $p$}\label{sec:2}
Let $p>2$ be a prime number. Denote by $\mathbb{Z}_p$ the ring of $p$-adic integers and by $\mathbb{Q}_p$ the field of $p$-adic numbers.
\begin{defn}\label{polyn} Let $f(x,y)\in\mathbb Z_p[x,y]$ be a polynomial with $f(0,0)=0$ or $v_p(f(0,0))> 2$. We say that $f(x, y)$ is of type: \begin{enumerate} \item[(H)] if $f(x,y)=y^2-g(x)$, where $g(x)\in\mathbb Z_p[x]$ is of degree $7$ or $8$ and such that $$g(x)\equiv x(x-p)m(x)\bmod{p^2\mathbb Z_p[x]},$$ with $m(x)\in \mathbb{Z}_p[x]$ such that all the roots of its mod $p$ reduction are simple and non-zero;
\item[(Q)] if $f(x,y)$ is of total degree $4$ and such that $$f(x,y)\equiv px+x^2-y^2+x^4+y^4 \bmod{p^2\mathbb Z_p[x,y]}.$$ \end{enumerate} \end{defn}
For $f(x,y) \in \mathbb Z_p[x,y]$ a polynomial of type (H) or (Q), we will consider the projective curve $C$ defined by $f(x, y)=0$ as explained in Subsection~\ref{subsec:notation} and the scheme $\mathcal C$ over $\mathbb Z_p$ defined, for each case of Definition~\ref{polyn} respectively, as follows: \begin{enumerate} \item[(H)] the union of the two affine subschemes $$U=\mathrm{Spec} (\mathbb Z_p[x,y]/(y^2- g(x))) \textrm{ and } V=\mathrm{Spec}(\mathbb Z_p[t,w]/(w^2-g(1/t)t^8))$$ glued along $\mathrm{Spec}(\mathbb Z_p[x,y,1/x]/(y^2-g(x))$ via $x=1/t, y=t^{-4}w$; \item[(Q)] the scheme $\mathrm{Proj}(\mathbb Z_p[X,Y,Z]/(F(X, Y, Z)))$, where $F(X, Y, Z)$ is the homogenisation of $f(x, y)$. \end{enumerate} This scheme has generic fibre $C$.
\begin{pr}\label{prop:curve} Let $f(x,y) \in \mathbb Z_p[x,y]$ be a polynomial of type (H) or (Q) and $C$ be the projective curve defined by $f(x, y)=0$. The curve $C$ is a smooth projective and geometrically connected curve of genus $3$ over $\mathbb Q_p$ with stable reduction. Moreover, the scheme $\mathcal C$ is the stable model of $C$ over $\mathbb Z_p$ and the stable reduction is geometrically integral with exactly one singularity, which is an ordinary double point. \end{pr}
\begin{proof} With the description we gave in Subsection~\ref{subsec:notation} of what we called the \emph{projective curve defined by $f$}, smoothness over $\mathbb{Q}_p$ follows from the Jacobian criterion. This implies that $C$ is a projective curve of genus $3$.
The polynomials defining the affine schemes $U$ and $V$ and the quartic polynomial $F(X, Y, Z)$ are all irreducible over $\overline{\mathbb Q}_p$, hence over $\mathbb Z_p$. So the curve $C$ is geometrically integral (hence geometrically irreducible and geometrically connected) and $\mathcal C$ is integral as a scheme over $\mathbb Z_p$. It follows in particular that $\mathcal C$ is flat over $\mathbb Z_p$ (cf.~\cite[Chap.~4, Corollary 3.10]{Liu06}). Hence, $\mathcal C$ is a model of $C $ over~$\mathbb Z_p$.
We will show that $\mathcal{C}_{\mathbb F_p}$ is semistable (i.e.~reduced with only ordinary double points as singularities) with exactly one singularity.
Combined with flatness, semistability will imply that the scheme $\mathcal C$ is semistable over $\mathbb Z_p$. Since $C$ has genus greater than $2$, and $C=\mathcal C_{\mathbb Q_p}$ is smooth and geometrically connected, this is then equivalent to saying that $C$ has stable reduction at $p$ with stable model $\mathcal C$, as required (cf.~\cite[Theorem~3.1.1]{romagny}).
In what follows, we denote by $\bar{f}$ the reduction modulo $p$ of any polynomial $f$ with coefficient in~$\mathbb Z_p$. In Case (H), $\mathcal{C}_{\overline{\mathbb F}_p}$ is the union of the two affine subschemes $U'=\mathrm{Spec}({\overline{\mathbb F}_p}[x,y]/(y^2-x^2\bar m(x)))$ and $V'=\mathrm{Spec}({\overline{\mathbb F}_p}[t,w]/(w^2-\bar m(1/t)t^6))$, glued along $\mathrm{Spec}(\overline{\mathbb F}_p[x,y,1/x]/(y^2-\bar g(x))$ via $x=1/t $ and $y=t^{-4}w$ (cf.~\cite[Chap.~10, Example 3.5]{Liu06}). In Case (Q), the geometric special fibre is $\mathrm{Proj}({\overline{\mathbb F}_p}[X,Y,Z]/(\bar{F}(X, Y, Z)))$. In both cases, the defining polynomials are irreducible over ${\overline{\mathbb F}_p}$. Hence, $\mathcal{C}_{\overline{\mathbb F}_p}$ is integral, i.e.~reduced and irreducible.
Next, we prove that $\mathcal C_{\overline{\mathbb F}_p}$ has only one ordinary double point as singularity. For Case (H), see e.g.~\cite[Chap.~10, Examples 3.4, 3.5 and 3.29]{Liu06}. For Case (Q), we proceed analogously: first consider the open affine subscheme of $\mathcal{C}_{\overline{\mathbb F}_p}$ defined by $U=\mathrm{Spec}(\overline{\mathbb F}_p[x,y]/\bar{f}(x,y))$, where $\bar{f}(x,y)=x^2-y^2+x^4+y^4\in \mathbb{F}_p[x, y]$. Since $\mathcal C_{\overline{\mathbb F}_p}\backslash U$ is smooth, it suffices to prove that $U$ has only ordinary double singularities. Let $u\in U$. The Jacobian criterion shows that $U$ is smooth at $u\neq (0,0)$. So suppose that $u=(0,0)$, and note that $\bar f(x,y)=x^2(1+x^2)-y^2(1-y^2)$. Since $2\in\overline\mathbb F_p^\times$, there exist $a(x)=1+xc(x)\in {\overline{\mathbb F}_p}[[x]]$ and $b(y)=1+yd(y)\in {\overline{\mathbb F}_p}[[y]]$ such that $1+x^2=a(x)^2 $ and $1-y^2=b(y)^2$, by (\cite[Chap.~1, Exercise 3.9]{Liu06}). Then we have $$\widehat{\mathcal O}_{U,u}\cong {\overline{\mathbb F}_p}[[x,y]]/(xa(x)+yb(y))(xa(x)-yb(y))\cong {\overline{\mathbb F}_p}[[t,w]]/(tw) .$$ It follows that $\mathcal C_{\overline{\mathbb F}_p}$ has only one singularity (at $[0:0:1] $) which is an ordinary double singularity. We have thus showed that $\mathcal C$ is the stable model of $C$ over $\mathbb Z_p$ and that its special fibre is geometrically integral and has only one ordinary double singularity.
\end{proof}
\begin{pr}\label{jacobianthm} Let $f(x,y) \in \mathbb Z_p[x,y]$ be a polynomial of type (H) or (Q) and $C$ be the projective curve defined by $f(x, y)=0$.
The Jacobian variety $\mathrm{Jac}(C)$ of the curve $C$ has a N\'eron model $\mathcal J$ over
$\mathbb Z_p$ which has semi-abelian reduction of toric rank $1$. The component group of the geometric
special fibre of $\mathcal J$ over $\overline \mathbb F_p$ has order $2$.
\end{pr}
\begin{proof} By Proposition~\ref{prop:curve}, the curve $C$ is a smooth projective geometrically connected curve of genus $3$ over $\mathbb Q_p$ with stable reduction and stable model $\mathcal C$ over $\mathbb Z_p$. Let $\mathcal{C}_{min}$ be the minimal regular model of $C$. As recalled in Subsection~\ref{subsec:jacobians}, $\mathrm{Jac}(C)$ admits a N\'eron model
$\mathcal J$ over $\mathbb Z_p$ and the canonical morphism $\mathrm{Pic}^0_{\mathcal C/\mathbb Z_p}\to \mathcal J^0$ is an isomorphism. In particular, $\mathcal J$ has semi-abelian reduction and $\mathcal J^0_{\mathbb F_p}\cong\mathrm{Pic}^0_{\mathcal C_{\mathbb F_p}/{\mathbb F_p}}$. Since $\mathcal C_{min}$ is also semistable, we have $\mathrm{Pic}^0_{\mathcal C_{min}/S}\cong \mathcal J^0$.
By Proposition \ref{BLRExactSeq}, the toric rank of $\mathcal J^0_{\overline\mathbb F_p}$ is equal to the rank of the cohomology group of the dual graph of $\mathcal C_{\overline \mathbb F_p}$. Since $\mathcal C_{\overline \mathbb F_p}$ is irreducible and has only one ordinary double point, the dual graph consists of one vertex and one loop, so the rank of $\mathcal J^0_{\overline \mathbb F_p} $ is $1$.
To determine the order of the component group of the geometric special fibre $\mathcal J_{\overline \mathbb F_p}$, we apply Proposition \ref{prop:Phi} to the minimal regular model $\mathcal C_{min}\times \mathbb Z_p^{sh}$, where $\mathbb Z_p^{sh} $ is the strict henselisation of $\mathbb Z_p$. This is still regular and semistable over $\mathbb Z_p^{sh}$ (cf.~\cite[Chap.~10, Proposition~3.15-(a)]{Liu06}). Let $e$ denote the thickness of the ordinary double point of $\mathcal C_{\overline\mathbb F_p}$ (as defined in \cite[Chap.~10, Definition3.23]{Liu06}). Then by \cite[Chap.~10, Corollary 3.25]{Liu06}, the geometric special fibre $\mathcal C_{min,\overline \mathbb F_p} $ of $\mathcal C_{min}\times \mathbb Z_p^{sh}$ consists of a chain of $e-1$ projective lines over $\mathbb F_p$ and one component of genus $2$ (where the latter corresponds to the irreducible component $\mathcal C_{\overline \mathbb F_p}$), which meet transversally at rational points. It follows from Proposition \ref{BLRExactSeq} that the order of the component group $\mathcal J(\mathbb Z_p^{sh})/\mathcal J^0(\mathbb Z_p^{sh})$ of the geometric special fibre is equal to the thickness~$e$.
We will now show that in both cases (H) and (Q), the thickness $e$ is equal to $2$, which will conclude the proof of Proposition~\ref{jacobianthm}. For this, in several places, we will use the well-known fact that every formal power series in $\mathbb Z_p[[x]]$ (resp. $\mathbb Z_p[[y]]$, $\mathbb Z_p[[x,y]]$) with constant term $1$ (or more generally a unit square in $\mathbb Z_p$) is a square in $\mathbb Z_p[[x]]$ (resp. $\mathbb Z_p[[y]]$, $\mathbb Z_p[[x,y]]$) of some invertible formal power series.
Let $U$ denote the affine subscheme $\mathrm{Spec}(\mathbb Z_p[x,y]/(f(x,y)))$ which contains the ordinary double point $P=[0:0:1]$. Firstly, we claim that, possibly after a finite extension of scalars $R/\mathbb Z_p$ which splits the singularity, in both cases we may write in $R[[x,y]]$: \begin{equation}\label{formf}\pm f(x,y)=x^2a(x)^2-y^2b(y)^2+p\alpha x+p^2yg(x,y)+p^{r}\beta \end{equation} where $ a(x) \in R[[x]]^\times,b(y) \in R[[y]]^\times, g(x,y)\in\mathbb Z_p[x,y], \alpha\in\mathbb Z_p^\times$, $\beta\in\mathbb Z_p$. Moreover, from the assumptions on $f$, it follows that either $\beta=0$, or $\beta \in \mathbb Z_p^\times $ and $r=v_p(f(0,0))> 2$.
We prove the claim case by case: \begin{enumerate} \item[(H)] We have $f(x,y)=y^2-g(x)=y^2-x(x-p)m(x)+p^2h(x)$ for some $h(x)\in\mathbb Z_p[x]$. Since $h(x)=h(0)+xs(x)$ for some $s(x)\in\mathbb Z_p[x]$ and $m(x)+ps(x)=m(0)+p s(0)+xt(x)$ for some $t(x)\in\mathbb Z_p[x]$, we obtain \begin{eqnarray*} f(x,y) &=& y^2-x^2m(x)+px(m(x)+ps(x))+p^2h(0)\\
&=& y^2-x^2(m(x)-pt(x))+px(m(0)+ps(0))+p^2h(0). \end{eqnarray*}
Since $m(0)\neq 0\pmod p,$ we have $m(0)-pt(0)\in\mathbb Z_p^\times, $ hence if we extend the scalars to some finite extension $R$ over $\mathbb Z_p$, in which $m(0)-pt(0)$ is a square, we get that $(m(x)-pt(x))$ is a square of some $a(x)$ in $R[[x]]^\times$. Then $-f(x,y)$ has the expected form.
Note that $R/\mathbb Z_p$ is unramified because $p\neq 2$ and $m(0)\neq 0\pmod p$, so we still denote the ideal of $R$ above $p\in\mathbb Z_p$ by $p$.
\item[(Q)] We have $f(x,y)=x^4+y^4+x^2-y^2+px+p^2h(x,y)$ for some $h(x,y)\in\mathbb Z_p[x,y]$. We may write $h(x,y)=\delta+x\gamma+x^2s(x)+yt(x,y)$ for some $\gamma,\delta\in\mathbb Z_p$, $s(x) \in\mathbb Z_p[x]$ and $t(x,y)\in\mathbb Z_p[x,y]$. We obtain \begin{equation*}\begin{aligned} f(x,y)&=x^2(1+x^2)-y^2(1-y^2)+px+p^2(\delta+x\gamma+x^2s(x)+yt(x,y))\\
& = x^2(1+x^2+p^2 s(x))-y^2(1-y^2)+px(1+p\gamma)+p^2yt(x,y)+p^2\delta. \end{aligned}\end{equation*} Since $1+x^2+p^2 s(x)$ and $1-y^2$ have constant terms which are squares in $\mathbb Z_p^\times$, the formal power series are squares in $\mathbb Z_p[[x]]$, resp. $\mathbb Z_p[[y]]$. So $f(x,y)$ again has the desired form.
\end{enumerate}
Next, we show that $e=2$ for $\pm f(x,y)$ of the form \eqref{formf}. In $R[[x,y]]$, we have $$\pm f(x,y)=\left(xa(x)+p\frac{\alpha}{2a(x)}\right)^2-\left(yb(y)-p^2\frac{g(x,y)}{2b(y)}\right)^2+p^2c(x,y),$$ where $c(x,y)=p^{r-2}\beta-\frac{\alpha^2}{4a(x)^2}+p^2\frac{g(x,y)^2}{4b(y)^2}$. Since either $\beta=0$ or $r>2$ and $\frac{\alpha^2}{4a(0)^2}\not \equiv 0\pmod p$, the constant term $\gamma$ of the formal power series $c(x, y)$ belongs to $R^\times$. It follows that $\gamma^{-1}c(x,y)$ is the square of some formal power series $d(x,y)\in R[[x,y]]^\times$. Defining the variables $$u=\frac{xa(x)}{d(x,y)}+p\frac{\alpha}{2a(x)d(x,y)}-\frac{yb(y)}{d(x,y)}+p^2\frac{g(x,y)}{2b(y)d(x,y)}$$ and $$v= \frac{xa(x)}{d(x,y)}+p\frac{\alpha}{2a(x)d(x,y)}+\frac{yb(y)}{d(x,y)}-p^2\frac{g(x,y)}{2b(y)d(x,y)},$$ we get $ \widehat O_{U\times R,P}\cong R[[u,v]]/(uv\pm p^2\gamma)$. Since $ \gamma\in R^\times$, it follows that $e=2$.
\end{proof}
\section{Local conditions at $q$}\label{sec:3}
This section is devoted to the proof of the following key result. In the statement, the two conditions on the characteristic polynomial, namely non-zero trace and irreducibility modulo $\ell$, are the ones appearing in Theorem~2.10 of~\cite{AAKRTV14} which is used to prove the main Theorem~\ref{thm:main}.
\begin{thm}\label{A} Let $\ell \geq 13$ be a prime number. For every prime number $q>1.82\ell^2$, there exists a smooth geometrically connected curve $C_q$ of genus $3$ over $\mathbb F_q$ whose Jacobian variety $\mathrm{Jac}(C_q)$ is a $3$-dimensional ordinary absolutely simple abelian variety such that the characteristic polynomial of its Frobenius endomorphism is irreducible modulo $\ell$ and has non-zero trace modulo $\ell$.
Moreover, for $\ell\in\{3,5,7,11\}$, there exists a prime number $q>1.82\ell^2$ such that the same statement holds. \end{thm}
For any integer $g \geq 1$, a $g$-dimensional abelian variety over a finite field $k$ with $q$ elements is said to be \emph{ordinary} if its group of $\mathrm{char}(k)$-torsion points has rank $g$.
The proof of Theorem~\ref{A} relies on Honda-Tate theory, which relates abelian varieties to Weil polynomials:
\begin{defn}\label{weilpol} A \emph{Weil $q$-polynomial}, or simply a \emph{Weil polynomial}, is a monic polynomial $P_q(X) \in \mathbb Z[X]$ of even degree $2g$ whose complex roots are all \emph{Weil $q$-numbers}, i.e., algebraic integers with absolute value $\sqrt{q}$ under all of their complex embeddings. Moreover, a Weil $q$-polynomial is said to be \emph{ordinary} if its middle coefficient is coprime to $q$. \end{defn} In particular, for $g=3$, every Weil $q$-polynomial of degree~6 is of the form \begin{equation*} P_q(X)=X^6+aX^5+bX^4+cX^3+qbX^2+q^2aX+q^3 \end{equation*} for some integers $a$, $b$ and $c$ (cf.~\cite[Proposition 3.4]{Howe95}). Such a Weil polynomial is ordinary if, moreover, $c$ is coprime to $q$.
Conversely, not every polynomial of this form is a Weil polynomial. However, we will prove in Proposition~\ref{boundqweil} that for $q > 1.82 \ell^2$,
every polynomial as above with $|a|,|b|,|c| <\ell$ is a Weil $q$-polynomial.
As an important example, the characteristic polynomial of the Frobenius endomorphism of an abelian variety over $\mathbb{F}_q$ is a Weil $q$-polynomial, by the Riemann hypothesis as proven by Deligne.\\
A variant of the Honda-Tate Theorem (cf.~\cite[Theorem~3.3]{Howe95}) states that the map which sends an ordinary abelian variety over $\mathbb F_q$ to the characteristic polynomial of its Frobenius endomorphism induces a bijection between the set of isogeny classes of ordinary abelian varieties of dimension $g \geq 1$ over $\mathbb F_q$ and the set of ordinary Weil $q$-polynomials of degree $2g$. Moreover, under this bijection, isogeny classes of simple ordinary abelian varieties correspond to irreducible ordinary Weil $q$-polynomials.\\
Hence, the proof of Theorem~\ref{A} consists in proving the existence of an irreducible ordinary Weil $q$-polynomial of degree 6 which gives rise to an isogeny class of simple ordinary abelian varieties of dimension 3. By Howe (cf.~\cite[Theorem~1.2]{Howe95}), such an isogeny class contains a principally polarised abelian variety $A$ over $\mathbb F_q$, which is the Jacobian variety of some curve $C_q$ defined over $\overline{\mathbb F}_q$ by results due to Oort and Ueno. If this abelian variety $A$ is moreover absolutely simple, the curve is geometrically irreducible and we can conclude by a Galois descent argument. Thus, it is a natural question whether the Weil $q$-polynomial determines if the abelian varieties in the isogeny class are absolutely simple.
In \cite{HoweZhu02}, Howe and Zhu give a sufficient condition for an abelian variety over a finite field to be absolutely simple; for ordinary varieties, this condition is also necessary. Let $A$ be a simple abelian variety over a finite field, $\pi$ its Frobenius endomorphism and $m_A(X)\in\mathbb{Z}[X]$ the minimal polynomial of $\pi$. Since $A$ is simple, the subalgebra $\mathbb Q(\pi)$ of $\mathrm{End}(A)\otimes \mathbb Q$ is a field; it contains a filtration of subfields $\mathbb Q(\pi^d)$ for $d>1$. If moreover $A$ is ordinary, then the fields $\mathrm{End}(A)\otimes \mathbb Q=\mathbb Q(\pi) $ and $\mathbb Q(\pi^d)$ $(d>1)$ are all CM-fields, i.e., totally imaginary quadratic extensions of a totally real field. A slight reformulation of Howe and Zhu's criterion is the following (see Proposition~3 and Lemma~5 of \cite{HoweZhu02}):
\begin{pr}[Howe-Zhu criterion for absolute simplicity]\label{howe-zhu} Let $A$ be a simple abelian variety over a finite field $k$. If $\mathbb Q(\pi^d)=\mathbb Q(\pi)$ for all integers $d>0$, then $A$ is absolutely simple. If $A$ is ordinary, then the converse is also true, and if $\mathbb Q(\pi^d)\neq\mathbb Q(\pi)$ for some $d>0$, then $A$ splits over the degree $d$ extension of $k$.
Moreover, if $\mathbb Q(\pi^d)$ is a proper subfield of $\mathbb Q(\pi)$ such that $\mathbb Q(\pi^r)=\mathbb Q(\pi)$ for all $r<d$, then either $m_A(X)\in\mathbb Z[X^d] $, or $\mathbb Q(\pi)=\mathbb Q(\pi^d,\zeta_d)$ for a primitive $d$-th root of unity $\zeta_d$. \end{pr}
From this criterion, Howe and Zhu give elementary conditions for a simple $2$-dimensional abelian variety to be absolutely simple, see~\cite[Theorem~6]{HoweZhu02}. Elaborating on their criterion and inspired by \cite[Theorem~6]{HoweZhu02}, we prove the following for dimension 3:
\begin{pr}\label{abssimple} Let $A$ be an ordinary simple abelian variety of dimension $3$ over a finite field $k$ of odd cardinality $q$. Then either $A$ is absolutely simple or the characteristic polynomial of the Frobenius endomorphism of $A$ is of the form $X^6+cX^3+q^3$ with $c$ coprime to $q$ and $A$ splits over the degree~$3$ extension of $k$. \end{pr}
\begin{proof} Let $A$ be an ordinary simple but not absolutely simple abelian variety of dimension $3$ over~$k$. Since $A$ is simple, the characteristic polynomial of $\pi$ is $m_A(X)$. We apply Proposition~\ref{howe-zhu} to $A$: Let $d$ be the smallest integer such that $\mathbb Q(\pi^d)\neq\mathbb Q(\pi)$. Either $m_A(X)\in\mathbb Z[X^d]$ or there exists a $d$-th root of unity $\zeta_d$ such that $\mathbb Q(\pi)=\mathbb Q(\pi^d,\zeta_d)$.
We will prove by contradiction that $m_A(X)\in\mathbb Z[X^d]$. Since $m_A(X)$ is ordinary, the coefficient of degree $3$ is non-zero, and it will follow that $d=3$ and that $m_A(X)$ has the form $X^6+cX^3+q^3$, proving the proposition.
So, suppose that $m_A(X)\not\in\mathbb Z[X^d]$. The field $K=\mathbb Q(\pi)=\mathbb Q(\pi^d,\zeta_d)$ is a CM-field of degree $6$ over $\mathbb Q$, hence its proper CM-subfield $L=\mathbb Q(\pi^d)$ has to be a quadratic imaginary field. It follows that $\phi(d)=3$ or $6$, where $\phi$ denotes the Euler totient function.
However, $\phi(d)=3$ has no solution, so we must have $\phi(d)=6$, i.e. $d\in\{7,9,14,18\}$, and $K=\mathbb Q(\zeta_d)$. Note that $\mathbb Q(\zeta_7)=\mathbb Q(\zeta_{14})$ and $\mathbb Q(\zeta_9)=\mathbb Q(\zeta_{18})$, and they contain only one quadratic imaginary field; namely, $\mathbb Q(\sqrt{-7})$ for $d=7 $ (resp. $14$), and $\mathbb Q(\sqrt{-3})$ for $d=9$ (resp. $d=18$) (cf.~\cite{washington}). Let $\sigma$ be a generator of the (cyclic) group $\mathrm{Gal}(K/L)$ of order~$3$. In their proof of \cite[Lemma 5]{HoweZhu02}, Howe and Zhu show that we can choose $\zeta_d$ such that $\pi^\sigma=\zeta_d \pi$. Moreover, $\zeta_d^\sigma=\zeta_d^k$ for some integer $k$ (which can be chosen to lie in $[0,d-1]$). Since $\sigma$ is of order $3$, we have $\pi=\pi^{\sigma^3}=\zeta_d^{(k^2+k+1)}\pi$, which gives $k^2+k+1 \equiv 0\pmod d$. This rules out the case $d=9$ and $18$, because $-3$ is neither a square modulo $9$ nor a square modulo $18$. So $d=7$ or~$14$, $K=\mathbb Q(\zeta_7)$ and $\mathbb Q(\pi^d)=\mathbb Q(\sqrt{-7})$. It follows that the characteristic polynomial of $\pi^d$, which is of the form
\[
X^6+\alpha X^5+\beta X^4+\gamma X^3+\beta q^d X^2+\alpha q^{2d} X+q^{3d}\in\mathbb Z[X], \]
is the cube of a quadratic polynomial of discriminant $-7$. This is true if and only if
\begin{equation*}\label{quadequ} \alpha^2-36q^d+63=0,\quad\alpha^2-3\beta+9q^d=0\quad\mbox{and}\quad \alpha^3-27\gamma+54\alpha q^d=0,
\end{equation*}
that is,
\begin{equation*}\label{simplequadequ}
\alpha^2=9(4q^d-7),\quad \beta=3(5q^d-7)\quad\mbox{and}\quad 3\gamma=\alpha(10q^d-7).
\end{equation*} However, the first equation has no solution in $q$. Indeed, suppose that $4q^d-7$ is a square, say $u^2$ for some integer $u$. Then $u$ is odd, say $u=1+2t$ for some integer $t$, hence $4q^d=8+4t(t+1)$, so $2$ divides $q$, which contradicts the hypothesis.
Hence, we obtain that $m_A(X)\in\mathbb Z[X^d]$ and Proposition~\ref{abssimple} follows. \end{proof}
Finally, the proof of Theorem~\ref{A} relies on Proposition~\ref{abssimple} and the following proposition, whose proof consists on counting arguments and is postponed to Section~\ref{sectionirred}:
\begin{pr}\label{irredmodell}
For any prime number $\ell\geq 13$ and any prime number $q>1.82\ell^2$,
there exists an ordinary Weil $q$-polynomial $P_q(X)=X^6+aX^5+bX^4+cX^3+qbX^2+q^2aX+q^3$,
with $a\not\equiv 0\pmod \ell$, which is irreducible modulo $\ell$. For $\ell\in\{3,5,7,11\}$,
there exists some prime number $q>1.82\ell^2$ and an ordinary Weil $q$-polynomial as above.
Moreover, for all $\ell \geq 3$, the coefficients $a,b,c$ can be chosen to lie in $\mathbb Z\cap [-(\ell-1)/2,(\ell-1)/2]$. \end{pr}
\begin{rmk}\label{remirred} Computations suggest that for $\ell\in\{5,7,11\}$ and \emph{any} prime number $q > 1.82\ell^2$, there still exist integers $a,b,c$ such that Proposition~\ref{irredmodell} holds. For $\ell=3$, this is no longer true: our computations indicate that if $q$ is such that $\legendre q\ell=-1$, then there are no suitable $a,b,c$, while if $q$ is such that $\legendre q\ell=1$, they indicate that there are $4$ suitable triples $(a,b,c)$. \end{rmk}
We now have all the ingredients to prove Theorem~\ref{A}.
\begin{proof}[Proof of Theorem~\ref{A}.] Let $\ell$ and $q$ be two distinct prime numbers as in Proposition~\ref{irredmodell} and let $P_q(X)$ be an ordinary Weil $q$-polynomial provided by this proposition. Since the polynomial $P_q(X)$ is irreducible modulo $\ell$, it is a fortiori irreducible over $\mathbb Z$. It is also ordinary and of degree $6$. Hence, by Honda-Tate theory, it defines an isogeny class $\mathcal A$ of ordinary simple abelian varieties of dimension $3$ over $\mathbb F_q$. By Proposition~\ref{abssimple}, since $a\neq 0$, the abelian varieties in $\mathcal A$ are actually absolutely simple. Moreover, according to Howe (cf.~\cite[Theorem~1.2]{Howe95}), $\mathcal A$ contains a principally polarised abelian variety $(A,\lambda)$.
Now, by the results of Oort-Ueno (cf.~\cite[Theorem~4]{OortUeno73}), there exists a so-called good curve $C$ defined over $\overline{\mathbb F}_q$ such that $(A,\lambda)$ is $\overline{\mathbb F}_q$-isomorphic to $(\mathrm{Jac}(C),\mu_0)$, where $\mu_0$ denotes the canonical polarisation on $\mathrm{Jac}(C)$. A curve over $\overline{\mathbb F}_q$ is a \emph{good curve} if it is either irreducible and non-singular or a non-irreducible stable curve whose generalised Jacobian variety is an abelian variety (cf.~\cite[Definition (13.1)]{Howe95}). In particular, the curve $C$ is stable, and so semi-stable. Since the generalised Jacobian variety $\mathrm{Jac}(C)\cong\mathrm{Pic}^0_{C}$ is an abelian variety, the torus appearing in the short exact sequence of Proposition~\ref{BLRExactSeq} is trivial. Hence, there is an isomorphism $\mathrm{Jac}(C) \cong \prod_{i=1}^r \mathrm{Pic}^0_{\widetilde{X_i}}$, where $\widetilde{X_1},\ldots,\widetilde{X_r}$ denote the normalisations of the irreducible component of $C$ over $\overline{\mathbb F}_q$. Since $\mathrm{Jac}(C)$ is absolutely simple, we conclude that $r=1$, i.e., the curve $C$ is irreducible, hence smooth.
We can therefore apply Theorem~9 of the appendix by Serre in~\cite{Lauter01} (see also the reformulation in \cite[Theorem~1.1]{Ritzenthaler10}) and conclude that the curve $C$ descends to $\mathbb F_q$. Indeed, there exists a smooth and geometrically irreducible curve $C_q$ defined over $\mathbb F_q$ which is isomorphic to $C$ over $\overline{\mathbb F}_q$. Moreover, either $(A,\lambda)$ or a quadratic twist of $(A,\lambda)$ is isomorphic to $(\mathrm{Jac}(C_q), \mu)$ over $\mathbb F_q$, where $\mu$ denotes the canonical polarisation of $\mathrm{Jac}(C_q)$. The characteristic polynomial of $\mathrm{Jac}(C_q)$ is $P_q(X)$ or $P_q(-X)$, since the twist may replace the Frobenius endomorphism with its negative.
Note that the polynomial $P_q(-X)$ is still an ordinary Weil polynomial which is irreducible modulo $\ell$ with non-zero trace, and $\mathrm{Jac}(C_q)$ is still ordinary and absolutely simple. This proves Theorem~\ref{A}. \end{proof}
\begin{rmk}\label{sek} In the descent argument above, the existence of a non-trivial quadratic twist may occur in the non-hyperelliptic case only. This obstruction for an abelian variety over $\overline{\mathbb{F}}_q$ to be a Jacobian over $\mathbb F_q$ was first stated by Serre in a Harvard course~\cite{Serre85}; it was derived from a precise reformulation of Torelli's theorem that Serre attributes to Weil~\cite{Weil57}. Note that Sekiguchi investigated the descent of the curve in~\cite{Sekiguchi81} and~\cite{Sekiguchi86}, but, as Serre pointed out to us, the non-hyperelliptic case was incorrect. According to MathSciNet review MR1002618 (90d:14032), together with Sekino, Sekiguchi corrected this error in~\cite{SekiSeki88}. \end{rmk}
\section{Proof of the main theorem}\label{sec:4}
The goal of this section is to prove Theorem \ref{thm:main}, by collecting together the results from Sections \ref{sec:2} and \ref{sec:3}. We keep the notation introduced in Subsection \ref{subsec:notation}; in particular, we will consider genus $3$ curves defined by polynomials which are of $3$-hyperelliptic or quartic type. We will prove the following refinement of Theorem \ref{thm:main}:
\begin{thm}\label{thm:refined} Let $\ell\geq 13$ be a prime number. For each prime number $q>1.82 \ell^2$, there exists $\bar{f}_q(x, y)\in \mathbb{F}_q[x, y]$ of $3$-hyperelliptic or
quartic type, such that if $f(x, y)\in \mathbb{Z}[x, y]$ is a lift of $\bar{f}_q(x, y)$, of the same type, satisfying the following two conditions for some prime number $p\not\in\{2, q, \ell\}$: \begin{enumerate}
\item $f(0, 0)=0$ or $v_p(f(0, 0))>2$;
\item $f(x,y)$ is congruent modulo $p^2$ to: $$\begin{cases} y^2 - x(x - p)m(x) &\text{ if } \bar{f}_q(x, y) \text{ is of hyperelliptic type}\\
x^4 + y^4 + x^2 -y^2 + px &\text{ if } \bar{f}_q(x, y) \text{ is of quartic type}\\
\end{cases}$$ for some $m(x) \in \mathbb{Z}_p[x]$ of degree 5 or 6 with simple non-zero roots modulo $p$;
\end{enumerate} \noindent then the projective curve $C$ defined over $\mathbb{Q}$ by the equation $f(x, y)=0$ is a smooth projective geometrically irreducible genus $3$ curve, such that the image of the Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ attached to the $\ell$-torsion of $\mathrm{Jac}(C)$ coincides with $\mathrm{GSp}_6(\mathbb{F}_{\ell})$.
Moreover, if $\ell\in \{5, 7, 11\}$, the statement is true, replacing ``For each prime number $q$'' by ``There exists an odd prime number $q$''. \end{thm}
\begin {rmk}\label{rem:CRT}
Let $\ell\geq 5$ be a prime number. Note that it is easy to construct infinitely many polynomials
$f(x,y)$ satisfying the conclusion of Theorem~\ref{thm:refined}:
choose a polynomial $f_p(x, y)$ satisfying the conditions in Definition~\ref{polyn}. Choose a prime $q>1.82 \ell^2$,
and find a polynomial $\bar{f}_q(x,y)$ that satisfies the conditions in Proposition \ref{irredmodell} (e.g.~by a computer search based on the method suggested after Theorem 0.1). Then
it suffices to choose each coefficient of $f(x, y)$ as a lift of the corresponding coefficient of $\bar{f}_q(x,y)$
to an element of $\mathbb{Z}$,
which is congruent mod $p^3$ to the corresponding coefficient of $f_p(x, y)$.
This also proves that Theorem~\ref{thm:main} follows from Theorem~\ref{thm:refined}. \end {rmk}
\begin{exmp}\label{ex:mainthm} \begin{enumerate} \item For $\ell=13$, we choose $p=7$, $q=313$. A computer search produces the polynomial $\bar{f}_q(x,y)=y^2-(x^7+x-1)$, which defines a hyperelliptic genus $3$ curve over $\mathbb{F}_q$. Let $f_p(x,y)=y^2-x(x-7)(x-1)(x-2)(x-3)(x-4)(x-5)$. Using the Chinese Remainder Theorem we construct the hyperelliptic curve over $\mathbb{Q}$ with equation $f(x,y)=0$, where \begin{multline*} f(x,y)=y^2 -( x^7-14085 x^6 + 33804x^5 -27231 x^4 \\ + 27231x^3 -35995 x^2 -33803x + 25039). \end{multline*} \item For $\ell=5$, we choose $p=3$, $q=97$. Through a computer search we find the quartic polynomial $\bar{f}_q(x,y)=x^4 + y^3+ x^3 y + x y^2 + 1\in\mathbb{F}_q[x, y]$. Take $f_p(x,y)=x^4+y^4+x^2-y^2+3x$. Then we obtain the plane quartic curve over $\mathbb{Q}$ with equation $f(x,y)=0$, where \begin{equation*} f(x,y)=x^4 + 486 x^3 y + y^4 + 486 x y^2 - 485 x^2 + 485 y^2 - 1455 x + 486. \end{equation*}
\end{enumerate} \end{exmp}
The rest of the section is devoted to the proof of Theorem \ref{thm:refined}. For the convenience of the reader, we recall the contents of Theorem 3.10 from \cite{AAKRTV14}:
Let $A$ be a principally polarised $n$-dimensional abelian variety defined over $\mathbb{Q}$. Assume that $A$ has semistable reduction of toric rank $1$ at some prime number $p$.
Denote by $\Phi_p$ the group of connected components of
the N\'eron model of $A$ at $p$.
Let $q$ be a prime of good reduction of $A$ and
$P_q(X)=X^{2n} + aX^{2n-1} + \cdots + q^n\in \mathbb{Z}[X]$ the characteristic polynomial of the
Frobenius endomorphism acting on the reduction of $A$ at $q$.
Then for all primes $\ell$ which do not divide $6pqa\vert \Phi_p\vert $
and such that the reduction of $P_q(X)$ mod $\ell$ is irreducible
in $\mathbb{F}_{\ell}$, the image of $\overline{\rho}_{A, \ell}$
coincides with $\GSp_{2n}(\mathbb{F}_{\ell})$.
\begin{proof}[Proof of Theorem \ref{thm:refined}] Fix a prime $\ell\geq 5$. Let $q$ and $C_{q}$ be a prime, respectively a genus $3$ curve over $\mathbb{F}_{q}$, provided by Theorem~\ref{A}. The curve $C_q$ is either a plane quartic or a hyperelliptic curve. More precisely, it is defined by an equation $\bar{f}_q(x, y)=0$, where $\bar{f}_q(x, y)\in \mathbb{F}_q[x, y]$ is a quartic type polynomial in the first case and a $3$-hyperelliptic type polynomial otherwise (cf.~Subsection \ref{subsec:notation}). Note that if $f(x, y)\in\mathbb{Z}[x, y]$ is a quartic (resp.~$3$-hyperelliptic type) polynomial which reduces to $\bar{f}_q(x, y)$ modulo $q$, then it defines a smooth projective genus $3$ curve over $\mathbb{Q}$ which is geometrically irreducible.
Let now $p\not\in \{2, q, \ell\}$ be a prime. Assume that $f(x, y)\in \mathbb{Z}[x, y]$ is a polynomial of the same type as $\bar{f}_q(x,y)$ which is congruent to $\bar{f}_q(x, y)$ modulo $q$ and also satisfies the two conditions of the statement of Theorem \ref{thm:refined} for this $p$. We claim that the curve $C$ defined over $\mathbb{Q}$ by the equation $f(x, y)=0$ satisfies all the conditions of the explicit surjectivity result of (\cite[Theorem 3.10]{AAKRTV14}). Namely, Proposition \ref{prop:curve} implies that $C$ is a smooth projective geometrically connected curve of genus $3$ with stable reduction. Moreover, according to Proposition~\ref{jacobianthm}, the Jacobian $\mathrm{Jac}(C)$ is a principally polarised $3$-dimensional abelian variety over $\mathbb{Q}$, and its N\'eron model has semistable reduction at $p$ with toric rank equal to $1$. Furthermore, the component group $\Phi_p$ of the N\'eron model of $\mathrm{Jac}(C)$ at $p$ has order $2$. Finally, by the choice of $q$ and $C_q$ provided by Theorem~\ref{A}, $q$ is a prime of good reduction of $\mathrm{Jac}(C)$ such that the Frobenius endomorphism of the special fibre at $q$ has Weil polynomial $P_q(X)= X^6 + a X^5 + b X^4 + c X^3 + qbX^2 + q^2aX + q^3$, which is irreducible modulo $\ell$. Since the prime $\ell$ does not divide $6pqa\vert \Phi_p\vert $, we conclude that the image of the Galois representation $\overline{\rho}_{\mathrm{Jac}(C), \ell}$ attached to the $\ell$-torsion of $\mathrm{Jac}(C)$ coincides with $\mathrm{GSp}_6(\mathbb{F}_{\ell})$ by Theorem~3.10 from \cite{AAKRTV14}. \end{proof}
\section{Counting irreducible Weil polynomials of degree~$6$}\label{sectionirred}
In this section, we will prove Proposition~\ref{irredmodell} stated in Section~\ref{sec:3}. At the end of the section we present some examples.
This proof is based on Proposition~\ref{boundqweil} as well as Lemmas \ref{nnsquaresix} and \ref{redsix} below.
Let $\ell$ and $q$ be distinct prime numbers. Consider a polynomial of the form \begin{equation}\label{weilpolyn} P_q(X)=X^6+a X^5+bX^4+cX^3+qbX^2+q^2aX+q^3 \in\mathbb Z[X].
\tag{$\ast$} \end{equation}
Proposition~\ref{boundqweil} ensures that for $q\gg\ell^2$, every polynomial \eqref{weilpolyn} with coefficients in $]-\ell,\ell[$ is a Weil polynomial. Then Lemmas~\ref{nnsquaresix} and \ref{redsix} allow us to show that the number of such polynomials which are irreducible modulo $\ell$ is strictly positive.
\begin{pr}\label{boundqweil}Let $\ell$ and $q$ be two prime numbers. \begin{enumerate} \item Suppose that $q>1.67\ell^2$. Then every polynomial $$X^4+uX^3+vX^2+uqX+q^2 \in \mathbb Z[X]$$ with integers $u,v$ of absolute value $<\ell$ is a Weil $q$-polynomial. \item Suppose that $q>1.82\ell^2$. Then every polynomial \begin{equation*} P_q(X)=X^6+a X^5+bX^4+cX^3+qbX^2+q^2aX+q^3 \in\mathbb Z[X], \end{equation*} with integers $a,b,c$ of absolute value $<\ell$, is a Weil $q$-polynomial. \end{enumerate} \end{pr}
\begin{rmk} The power in $\ell$ is optimal, but the constants $1.67$ and $1.82$ are not. \end{rmk}
Let $D_6^{*-}$ be the number of polynomials of the form $P_q(X)=X^6+a X^5+bX^4+cX^3+qbX^2+q^2aX+q^3 \in\mathbb Z[X]$ with $a,b,c$ in $[-(\ell-1)/2,(\ell-1)/2]$, $a,c\neq 0$ and whose discriminant $\Delta_{P_q}$ is not a square modulo $\ell$, and $R_6$ the number of such polynomials which are Weil polynomials and are reducible modulo $\ell$. Denoting by $\legendre{.}{\ell}$ the Legendre symbol, we have: \begin{lm}\label{nnsquaresix} Let $\ell>3,$ then $D_6^{*-}\geq \frac 12 (\ell-1)^2\left(\ell-1-\legendre q\ell\right) +\frac 12 (\ell-1)\legendre q\ell\left(1-\legendre{-1}\ell\right)-\ell(\ell-1).$ \end{lm}
\begin{lm}\label{redsix} Let $\ell>3,$ then $R_6\leq \frac 38\ell^3 - \frac 5 8\ell^2\legendre q\ell - \ell^2 + \frac 32\ell\legendre q\ell + \frac 58\ell - \frac 38\legendre q\ell - \frac 12$. \end{lm}
We postpone the proofs of Proposition~\ref{boundqweil} as well as Lemmas \ref{nnsquaresix} and \ref{redsix} to the following subsections but now use those statements to prove Proposition~\ref{irredmodell}. Before that, let us recall a result of Stickelberger, as proven by Carlitz in \cite{Carlitz}, which will also be useful for proving Lemmas~\ref{nnsquaresix} and~\ref{redsix}: For any monic polynomial $P(X)$ of degree $n$ with coefficients in $\mathbb Z$, and any odd prime number $\ell$ not dividing its discriminant $\Delta_P$, the number $s$ of irreducible factors of $P(X)$ modulo $\ell$ satisfies \begin{equation}\label{eq:stickelberger} \left( \frac{\Delta_P}{\ell} \right) = (-1)^{n-s}. \end{equation}
\begin{proof}[Proof of Proposition~\ref{irredmodell}] Let $\ell>3$ be a prime number. It follows from Stickelberger's result that if $P_q(X)$ as in \eqref{weilpolyn} is irreducible modulo $\ell$, then $\left( \frac{\Delta_{P_q}}{\ell} \right) = -1.$ Hence by Proposition~\ref{boundqweil}, when $q>1.82\ell^2$, we find that $(D^{*-}_6-R_6)$ is exactly the number of degree $6$ ordinary Weil polynomials which have non-zero trace modulo $\ell$ and are irreducible modulo $\ell$.
By Lemmas~\ref{nnsquaresix} and \ref{redsix}, we have $$ D_6^{*-}-R_6\geq \frac 18 \ell^3 + \frac 18 \ell^2\legendre{q}\ell - \frac 12\ell\legendre{-q}\ell - \frac 32\ell^2 + \frac 12\legendre{-q}\ell+ \frac{15}8\ell - \frac 58\legendre q\ell,$$ which is strictly positive for all $q$, provided that $\ell\geq 13.$
For $\ell=3,5,7$ or $11$, direct computations of $(D_6^{*-}-R_6)$ using \textsc{Sage} show that $q=19$ for $\ell=3$, $q= 47$ for $\ell = 5,\; q=97 $ for $\ell=7, \; q=223$ for $\ell=11$ will answer to the conditions of Proposition~\ref{irredmodell}. Actually, computations indicate that for $\ell=5,7,11,$ $(D_6^{*-}-R_6)$ should be strictly positive for any prime number $q$ and for $\ell=3$, it should be strictly positive for all prime numbers $ q$ which are not squares modulo $\ell$ (see Remark~\ref{remirred}). \end{proof}
\subsection{Proof of Proposition~\ref{boundqweil}}
Recall that $\ell$ and $q$ are two prime numbers.
We first consider degree $4$ polynomials. One can prove that a polynomial $X^4+uX^3+vX^2+uqX+q^2 \in \mathbb Z[X]$ is a $q$-Weil polynomial if and only if the integers $u,v$ satisfy the following inequalities:
\begin{enumerate}[(1)]
\item\label{ineq1} $|u|\leq 4\sqrt{q}$,
\item\label{ineq2} $2|u|\sqrt q-2q\leq v\leq \frac{u^2}4+2q$.
\end{enumerate}
Let $q>1.67\ell^2 $ and $Q(X)=X^4+uX^3+vX^2+uqX+q^2 \in \mathbb Z[X]$ with $|u|<\ell,|v|<\ell$. Then $q \geq \frac 1{16}\ell^2$ and, since $\ell\geq 2$, we have $q\geq \frac 14\ell^2\geq \frac12\ell$ so \eqref{ineq1} and the right hand side inequality in \eqref{ineq2} are satisfied. Finally, $q\geq \left(1+\frac{1}{2\sqrt 3}\right)^2\ell^2$ so $\sqrt q\geq \left(1+\frac 1{2\sqrt q}\right)\ell$ and the left hand side inequality in \eqref{ineq2} is satisfied. This proves that $Q(X)$ is a Weil polynomial and the first part of the proposition.
Now we turn to degree $6$ polynomials. The proof is similar to the degree $4$ case. According to Haloui \cite[Theorem~1.1]{Haloui10}, a degree $6$ polynomial of the form \eqref{weilpolyn} is a Weil polynomial if its coefficients satisfy the following inequalities: \begin{enumerate}[(1)]
\item\label{condhaloui1} $|a|<6\sqrt q$,
\item\label{condhaloui2} $4\sqrt q |a|-9q<b\leq \frac{a^2}3+3q$, \item\label{condhaloui3} $-\frac{2a^3}{27}+\frac{ab}3+qa-\frac{2}{27}(a^2-3b^2+9q)^{\frac 32} \leq c \leq -\frac{2a^3}{27}+\frac{ab}3+qa + \frac{2}{27}(a^2-3b^2+9q)^{\frac 32} $, \item\label{condhaloui4} $-2qa-2\sqrt q b-2q\sqrt q <c<-2qa+2\sqrt qb+2q\sqrt q$. \end{enumerate}
Let $q>1.82\ell^2$ and $P_q(X)$ a polynomial of the form \eqref{weilpolyn} with $|a|,|b|,|c|<\ell$. Then we note:
\begin{itemize}
\item We have $q>\frac{1}{36}\ell^2$, so $\ell < 6\sqrt q$ and \eqref{condhaloui1} is satisfied. \item The right hand side inequality of \eqref{condhaloui2} is satisfied since $\ell\leq 3q$.
Moreover we have $q>(1+\sqrt{17/8})\ell^2 \geq 4\ell^2(1+\sqrt{1+9/4\ell})^2/81$. Hence $9q-4\ell\sqrt q-\ell>0$ and the left hand inequality of \eqref{condhaloui2} is satisfied.
\item A sufficient condition to have both inequalities in \eqref{condhaloui3} is
$$ 2\ell^3+9\ell^2+27q\ell-2(-3\ell^2+9q)^{3/2}+27\ell\leq 0. $$ A computation shows that this inequality is equivalent to $A\leq B$, with \begin{align*} A=\ell^6\left(\frac{28}{729}+\frac{1}{81\ell}+\frac {7}{108\ell^2}+\frac 1{6\ell^3}+\frac1{4\ell^4}\right) \mbox{ and } B= q^3\left(1-\frac54\frac{\ell^2}{q}+\frac{\ell^4}{q^2}\left(\frac 8{27}-\frac 1{6\ell}-\frac{1}{2\ell^2}\right)\right). \end{align*}
Since $\ell\geq 2$, we have $A\leq \frac{4537}{46656} \ell^6$
and $B\geq q^3\left(1-\frac 54\frac{\ell^2}{q}+\frac{19}{216}\frac{\ell^4}{q^2}\right)$. Furthermore, since the polynomial $$\frac{4537}{46656}X^3-\frac{19}{216}X^2+\frac54 X-1$$ has only one real root with approximate value $0.805$, we find that $A\leq B$, because $q\geq 1.243 \ell^2.$
\item Since $q>1.82\ell^2$ and $\ell\geq 2$, we have $ \ell \left(\frac{1}{2q}+\frac{1}{\sqrt q}+1\right)\leq \ell\left(\frac 1{22}+\frac 1{\sqrt{11}}+1\right) <\sqrt q $. Hence, $-2q\ell - 2\sqrt q \ell + 2q\sqrt q-\ell >0 $ and \eqref{condhaloui4} is satisfied.
\end{itemize} This proves that $P_q(X)$ is a Weil polynomial and the second part of the proposition.
$\qed$
\subsection{Proofs of Lemmas~\ref{nnsquaresix} and \ref{redsix}}
In this section, $\ell>2$, $q\neq \ell$ are prime numbers and we, somewhat abusively, denote with the same letter an integer in $[-(\ell-1)/2,(\ell-1)/2]$ and its image in $\mathbb F_\ell$.
We will repeatedly use the following elementary lemma.
\begin{lm}\label{prelimun} Let $D\in\mathbb F_\ell^*$ and $\varepsilon\in\{-1,1\}.$ We have $$ \sharp \left\{x\in\mathbb F_\ell ;\legendre{x^2-D}{\ell} =\varepsilon\right\}=\frac 12\left(\ell-1-\varepsilon -\legendre D\ell\right); $$ and $$\sharp\left\{(x,y)\in\mathbb F_\ell^2; \legendre{x^2-Dy^2}\ell=\varepsilon\right\}=\frac{1}{2}(\ell-1)\left(\ell-\legendre D\ell\right).$$ \end{lm}
\subsubsection{Estimates on the number of degree 4 Weil polynomials modulo $\ell$}
\begin{pr}\label{weilfour} \begin{enumerate} \item For $\varepsilon\in\{-1,1\}$, we denote by $D_4^\varepsilon$ the number of degree $4$ polynomials of the form $X^4+uX^3+vX^2+uqX+q^2\in\mathbb F_\ell[X]$ with discriminant $\Delta$ such that $\legendre \Delta \ell=\varepsilon$. Then $$ D_4^-=\frac 12(\ell-1)\left(\ell-\legendre q\ell\right)\quad \mbox { and } \quad D_4^+=\frac 12(\ell-3)\left(\ell-\legendre q\ell\right)+1.$$
\item The number $N_4$ of degree $4$ Weil polynomials with coefficients in $[-(\ell-1)/2,(\ell-1)/2]$ which are irreducible modulo $\ell$ satisfies \begin{equation}\label{nfour}
N_4\leq\frac 14 (\ell+1)(\ell-1). \end{equation}
\item The number $T_4$ of degree $4$ Weil polynomials with coefficients in $[-(\ell-1)/2,(\ell-1)/2]$ with exactly two irreducible factors modulo $\ell$ satisfies \begin{equation}\label{tfour} T_4\leq\frac 14(\ell-3)\left(\ell-\legendre q\ell\right)+\frac 18(\ell-1)(\ell+1). \end{equation} \end{enumerate} \noindent Moreover, if $q>1.67\ell^2$, Inequalities (\ref{nfour}) and (\ref{tfour}) are equalities. \end{pr}
\begin{proof}
\begin{enumerate} \item First, we compute $D_4^{\varepsilon}$.
The polynomial $Q(X)=X^4+uX^3+vX^2+uqX+q^2$ has discriminant $$\Delta = q^2 \kappa ^2 \delta \quad \mbox{ where}\quad \kappa= -u^2 +4(v-2q) \quad\mbox{ and }\quad\delta =(v+2q)^2-4qu^2.$$
Since $q\in \mathbb F_\ell^*$, we have $\legendre{\Delta}{\ell}=\legendre\kappa\ell^2\legendre\delta\ell$. Moreover, notice that if $\kappa=0$ then $\delta=(v-6q)^2$.
It follows that $$D_4^- =\sharp\left\{(u,v)\in\mathbb F_\ell^2; \legendre{\delta}{\ell}=-1\right\}$$ and $$D_4^+ =\sharp\left\{(u,v)\in\mathbb F_\ell^2; \legendre{\delta}{\ell}=1\right\}-\sharp\left\{(u,v)\in\mathbb F_\ell^2; v\neq 6q \mbox{ and } u^2=4(v-2q)\right\}.$$
Since the map $(u,v)\mapsto (v+2q,2u)$ is a bijection on $\mathbb F_\ell^2$ (because $\ell\neq 2$), by Lemma~\ref{prelimun} we have $$\sharp\left\{(u,v)\in\mathbb F_\ell^2; \legendre{\delta}\ell =\varepsilon\right\}= \sharp\left\{(x,y)\in\mathbb F_\ell^2; \legendre{x^2-qy^2}\ell=\varepsilon\right\}=\frac{(\ell-1)}2\left(\ell-\legendre q\ell\right)$$ for any $\varepsilon\in\{\pm 1\}.$ This gives the result for $D_4^-$. The result for $D_4^+$ follows from: \begin{eqnarray*} \sharp\left\{(u,v); \; v\neq 6q \mbox{ and } u^2=4(v-2q)\right\}&=& \sharp\left\{(u,v);\; u^2=4(v-2q)\right\}-\sharp\{u\in\mathbb F_\ell; u^2=16q\}\\ &=&\ell-1-\legendre q\ell. \end{eqnarray*}
\item Next, we bound the quantity $N_4$. By Stickelberger's result (see \eqref{eq:stickelberger}),
a monic degree $4$ polynomial in $\mathbb Z[X]$ has non-square discriminant modulo $\ell$ if and only if it has one or three distinct irreducible factors in $\mathbb F_\ell[X]$. In the latter case,
the polynomial has the form $$(X-\alpha')(X-q/\alpha')(X^2-B'X+q)$$ with $X^2-B'X+q$ irreducible in $\mathbb F_\ell[X]$ and $\alpha'\neq q/\alpha'$ in $\mathbb F_\ell^*$. By Lemma~\ref{prelimun}, there are $$\frac 14\left(\ell-2-\legendre{q}{\ell}\right) \left(\ell-\legendre q\ell\right)$$ such polynomials with three irreducible factors. It follows that \[ N_4\leq D_4^- - \frac 14\left(\ell-2-\legendre{q}{\ell}\right) \left(\ell-\legendre q\ell\right)\leq\frac 14(\ell-1)(\ell+1). \]
\item Finally, we bound the quantity $T_4$. As above, Stickelberger's result implies that a degree $4$ Weil polynomial $Q(X)$ in $\mathbb Z[X]$ has exactly two distinct irreducible factors modulo $\ell$ if and only if $\legendre {\Delta_Q}{\ell}=1$ and $Q(X) \pmod \ell$ does not have four distinct roots in $\mathbb F_\ell$. By Lemma~\ref{prelimun}, there are $$\frac 18\left(\ell-\legendre q\ell-2\right)\left(\ell-\legendre q\ell-4\right)$$ Weil polynomials with coefficients in $[-(\ell-1)/2,(\ell-1)/2]$ whose reduction modulo $\ell$ has four distinct roots in $\mathbb F_\ell$. It follows that \begin{eqnarray*} T_4&\leq&D_4^+ - \frac 18 \left(\ell-\legendre q\ell-2\right)\left(\ell-\legendre q\ell-4\right)\\ &\leq& \frac 14(\ell-3)\left(\ell-\legendre q\ell\right)+\frac 18(\ell-1)(\ell+1). \end{eqnarray*} \end{enumerate}
When $q>1.67\ell^2$, these upper bounds for $N_4$ and $T_4$ are equalities, since in this case, by Proposition~\ref{boundqweil}, every polynomial of the form $X^4+uX^3+vX^2+uqX+q^2$ with $|u|,|v|<\ell$ is a Weil polynomial. \end{proof}
\subsubsection{Proof of Lemma~\ref{redsix}}
Recall that $R_6$ denotes the number of Weil polynomials $P_q(X)=X^6+aX^5+bX^4+cX^3+qbX^2+q^2aX+q^3$
with coefficients in $[-(\ell-1)/2,(\ell-1)/2]$, $a,c\neq 0$, non-square discriminant modulo $\ell$ and which are reducible modulo $\ell$. We may drop the conditions $a\neq 0, c\neq 0$ to bound $R_6$.
By Stickelberger's result (see \eqref{eq:stickelberger}), a monic degree $6$ polynomial in $\mathbb Z[X]$ with non-square discriminant modulo $\ell$ has $1,\ 3$ or $5$ distinct irreducible factors in $\mathbb F_\ell[X]$. Hence, the factorisation in $\mathbb F_{\ell}[X]$ of a polynomial $P_q(X)$ as above is of one of the following types (note that a root $\alpha$ of $P_q(X)$ in $\overline{\mathbb F}_\ell$ is in $\mathbb F_\ell$ if and only $q/\alpha$ is also in $\mathbb F_\ell$): \begin{enumerate} \item $P_q (X)\equiv (X-\alpha)(X-\frac q\alpha)(X-\beta)(X-\frac q\beta)(X^2-CX+q)$, with $C^2-4q$ non-square modulo $\ell$ and $\alpha\neq q/\alpha$, $\beta\neq q/\beta$ and $\{\alpha,q/\alpha\}\neq\{\beta,q/\beta\}$; equivalently $P_q(X)\equiv (X^2-AX+q)(X^2-BX+q)(X^2-CX+q)$ where the first two quadratic polynomials are distinct and both reducible and the third one is irreducible;\item $P_q (X)\equiv (X-\alpha)(X-\frac q\alpha) Q(X)$, where $\alpha\neq q/\alpha$ and the irreducible factor $Q(X)$ is the reduction of a degree $4$ Weil polynomial; \item $P_q(X)$ is the product of three distinct irreducible quadratic polynomials, i.e., $P_q(X) \equiv (X^2-CX+q)Q(X)$ where $X^2-CX+q$ is irreducible and $Q(X)$ is the reduction of a degree $4$ Weil polynomial which has two distinct irreducible factors, both of which are distinct from $X^2-CX+q$. \end{enumerate}
We will count the number of polynomials of each type.
\noindent\textbf{Type 1.} By Lemma~\ref{prelimun}, there are $\frac 12\left(\ell-\legendre q\ell\right)$ irreducible quadratic polynomials $X^2-CX+q$. Also by Lemma~\ref{prelimun}, there are $\frac 12\left(\ell-2-\legendre q\ell\right)$ choices for reducible $X^2-AX+q$ without a double root and then there are $\frac 12\left(\ell-2-\legendre q\ell\right)-1$ choices for reducible $X^2-BX+q$ without a double root and distinct from $X^2-AX+q$. It follows that there are $ \frac 1{16}\left(\ell-\legendre q\ell\right)\left(\ell-\legendre q\ell-2\right)\left(\ell-\legendre q\ell -4\right) $ such polynomials.
\noindent\textbf{Type 2.} By Proposition~\ref{weilfour} and Lemma~\ref{prelimun}, the number of polynomials with decomposition of this type is $$\frac 12\left(\ell-\legendre q\ell-2\right)N_4 \leq \frac 18 (\ell+1)(\ell-1)\left(\ell-\legendre q\ell-2\right).$$
\noindent\textbf{Type 3.} Proposition~\ref{weilfour} and Lemma~\ref{prelimun} imply that there are $$ \leq \frac 12\left(\ell-\legendre q\ell\right) T_4 \leq \frac 18\left(\ell-\legendre q\ell\right)^2(\ell-3)+\frac 1{16}(\ell-1)(\ell+1)\left(\ell-\legendre q\ell\right) $$ polynomials of this type. \footnote{The first inequality is due to the fact that we do not take into account that $X^2-CX+q$ has to be distinct from the factors of $Q(X)$.}
Summing these three upper bounds yields the lemma.
$\qed$
\subsubsection{Proof of Lemma~\ref{nnsquaresix}}
The discriminant of $P_q(X)$ is $\Delta_{P_q}= q^6\Gamma^2\delta $, where
$$\Gamma= 8q a^4 + 9q^2a^2 - 42qa^2 b + a^2b^2 - 4a^3c + 108q^3 - 108q^2b + 36qb^2 - 4b^3 + 54qac + 18abc - 27c^2$$ and
$\delta=(c+2aq)^2-4q(b+q)^2.$ Hence, we have
\begin{eqnarray*}
D_6^{*-}&=&\sharp\left\{(a,b,c); a,c\neq 0, \Gamma\not\equiv 0\bmod \ell \mbox{ and } \legendre \delta\ell=-1\right\}\\
&=&\sharp\left\{(a,b,c); a,c\neq 0, \legendre \delta\ell=-1\right\} -\sharp\left\{(a,b,c); a,c\neq0, \Gamma\equiv 0\bmod \ell \mbox{ and } \legendre \delta\ell=-1\right\}\\ &\geq&M-W,
\end{eqnarray*}
where $ M = \sharp\left\{(a,b,c); a,c\neq 0, \legendre \delta\ell=-1\right\}$ and $W = \sharp\left\{(a,b,c); a\neq0, \Gamma\equiv 0\bmod \ell \right\}$.
\paragraph{Computation of $M$.}
Since $\ell >2$ and $q\in\mathbb F_\ell^*$, for any fixed $c\in\mathbb F_\ell^\times$, the map $(a,b)\mapsto (c+2aq,b+q)$ is a bijection from $\mathbb F_\ell^*\times\mathbb F_\ell$ to $\mathbb F_\ell\backslash\{c\}\times\mathbb F_\ell$.
From this and Lemma~\ref{prelimun} we deduce that
\begin{eqnarray*}
M&=& \sum_{c\in\mathbb F_\ell^*} \sharp\left\{(x,y)\in\mathbb F_\ell^2; x\neq c,\; \legendre{x^2-4qy^2}{\ell}=-1\right\}\\
&=& \sum_{c\in\mathbb F_\ell^*} \sharp\left\{(x,y)\in\mathbb F_\ell^2; \legendre{x^2-4qy^2}{\ell}=-1\right\} - \sum_{c\in\mathbb F_\ell^*} \sharp\left\{y\in\mathbb F_\ell ; \legendre{c^2-4qy^2}{\ell}=-1\right\}\\
&=& \frac 12(\ell-1)^2\left(\ell-\legendre q\ell\right) - \sum_{c\in\mathbb F_\ell^*} M'_c,
\end{eqnarray*} where \begin{eqnarray*}
M'_c &=& \sharp\left\{y\in\mathbb F_\ell ; \legendre{c^2-4qy^2}{\ell}=-1\right\}\\
&=&\sharp\left\{y\in\mathbb F_\ell ; \legendre{y^2-(c^2/4q)}{\ell}=-\legendre{-q}\ell\right\} \\
&=& \frac 12\left(\ell-1-\legendre q\ell+\legendre{-q}\ell\right), \end{eqnarray*} the last equality following from Lemma~\ref{prelimun}.
This gives $$ M = \frac 12 (\ell-1)^2\left(\ell-1-\legendre q\ell\right) +\frac 12 (\ell-1)\legendre q\ell\left(1-\legendre{-1}\ell\right). $$
\paragraph{Computation of $W=\sharp\left\{(a,b,c)\in\mathbb F_\ell^3; a\neq0, \Gamma=0 \right\}.$}
The discriminant of $\Gamma$ viewed as a\hfil \newline quadratic polynomial\footnote{More precisely, we have
$ \Gamma=-27c^2+G_1 c+G_0,\ (G_0,G_1\in \mathbb F_\ell[a,b])$ with
$G_1(a,b)=-2a (2a^2 - 27q -9b) $ and $G_0(a,b)=8qa^4 + 9q^2a^2 - 42qa^2b + a^2b^2 + 108q^3 - 108q^2b + 36qb^2 - 4b^3.$}
in $c$ is $\gamma=16(a^2-3(b-3q))^3.$
It follows that \begin{eqnarray*} W&=& 2\cdot\sharp\left\{(a,b)\in\mathbb F_\ell^2; a\neq 0, \legendre \gamma\ell=1\right\}+ \sharp\left\{(a,b)\in\mathbb F_\ell^2; a\neq 0, \gamma=0 \right\}\\ &=& 2\cdot\sharp\left\{(a,b)\in\mathbb F_\ell^2; a\neq 0, \legendre {a^2-3(b-3q)}\ell=1\right\}+ \sharp\left\{(a,b)\in\mathbb F_\ell^2; a\neq 0, a^2=3(b-3q) \right\}. \end{eqnarray*} Moreover, since $\ell>3$, the map $b\mapsto 3(b-3q)$ is a bijection on $\mathbb F_\ell$. So we have \begin{eqnarray*} W &=& 2\cdot\sharp\left\{(x,y)\in\mathbb F_\ell^2; x\neq 0, \legendre {x^2-y}\ell=1\right\}+ \sharp\left\{(x,y)\in\mathbb F_\ell^2; x\neq 0, x^2=y \right\}\\ &=& 2\cdot\sum_{y\in\mathbb F_\ell}\sharp\left\{x\in\mathbb F_\ell; \legendre{x^2-y}\ell=1\right\}-2\cdot\sharp\left\{y\in\mathbb F_\ell; \legendre{-y}{\ell}=1\right\}+\sum_{y\in\mathbb F_\ell^*}\sharp\{x\in\mathbb F_\ell^*; x^2=y\} \\ &=& \sum_{y\in\mathbb F_\ell^*}\left(\ell-2-\legendre y\ell\right)+2(\ell-1) - (\ell-1) + (\ell-1), \end{eqnarray*} using Lemma~\ref{prelimun} (the second term is the contribution of $y=0$). This yields $W=\ell(\ell-1)$ and computing $M-W$ concludes the proof. \qed
\subsection{Examples}\label{ex:section5}
This section contains examples of Weil polynomials satisfying the conditions in Proposition \ref{irredmodell}. They were obtained using \textsc{Sage}. \begin{itemize} \item $\ell=3$, $q=19$: $P_q(X)=X^6 + X^5 + X^3 + 361X + 6859$; \item $\ell=5$, $q=47$: $P_q(X)=X^6 + X^5 + X^4 + X^3 + 47X^2 + 2209X + 103823$; \item $\ell=7$, $q=97$: $P_q(X)=X^6 + X^5 + 3X^3 + 9409X + 912673$; \item $\ell=11$, $q=223$: $P_q(X)=X^6 + X^5 + 5X^3 + 49729X + 11089567$; \item $\ell=13$: \begin{itemize} \item[] $q=311$: $P_q(X)=X^6 + X^5 + 3X^3 + 96721X + 30080231$; \item[] $q=313$: $P_q(X)=X^6 + X^5 + 4X^3 + 97969X + 30664297$; \item[] $q=317$: $P_q(X)=X^6 + X^5 + X^3 + 100489X + 31855013$; \item[] $q=331$: $P_q(X)=X^6 + X^5 + 3X^3 + 109561X + 36264691$. \end{itemize} \end{itemize}
\end{document}
|
arXiv
|
{
"id": "1507.05913.tex",
"language_detection_score": 0.7142180800437927,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{From Unsupervised to Few-shot \\Graph Anomaly Detection: A Multi-scale Contrastive Learning Approach}
\raggedbottom \author{Yu Zheng, Ming Jin, Yixin Liu, Lianhua Chi*, Khoa T. Phan, Shirui Pan, Yi-Ping Phoebe Chen
\thanks{Y. Zheng, L. Chi, K. T. Phan, and Y-P. P. Chen are with Department of Computer Science and Information Technology, La Trobe University, Melbourne Australia. E-mail: \{yu.zheng, l.chi, k.phan, phoebe.chen\}@latrobe.edu.au. } \thanks{M. Jin, Y. Liu, S. Pan are with the Department of Data Science and AI, Faculty of IT, Monash University, Clayton, VIC 3800, Australia.
E-mail: \{ming.jin, yixin.liu, shirui.pan\}@monash.edu. } \thanks {Y. Zheng and M. Jin contributed equally to this work.} \thanks {* Corresponding Author} \thanks{Manuscript received Jan 3, 2022; revised xx xx, 202x.} }
\markboth{Journal of \LaTeX\ Class Files,~Vol.~14, No.~8, Jan~2022} {Zheng \MakeLowercase{\textit{et al.}}: Multi-scale contrastive learning}
\maketitle
\begin{abstract} Anomaly detection from graph data is an important data mining task in many applications such as social networks, finance, and e-commerce. Existing efforts in graph anomaly detection typically only consider the information in a single scale (view), thus inevitably limiting their capability in capturing anomalous patterns in complex graph data. To address this limitation, we propose a novel framework, graph \underline{\textbf{AN}}omaly d\underline{\textbf{E}}tection framework with \underline{\textbf{M}}ulti-scale c\underline{\textbf{ON}}trastive l\underline{\textbf{E}}arning (\texttt{ANEMONE}\xspace in short). By using a graph neural network as a backbone to encode the information from multiple graph scales (views), we learn better representation for nodes in a graph. In maximizing the agreements between instances at both the patch and context levels concurrently, we estimate the anomaly score of each node with a statistical anomaly estimator according to the degree of agreement from multiple perspectives. To further exploit a handful of ground-truth anomalies (few-shot anomalies) that may be collected in real-life applications, we further propose an extended algorithm, \texttt{ANEMONE-FS}\xspace, to integrate valuable information in our method. We conduct extensive experiments under purely unsupervised settings and few-shot anomaly detection settings, and we demonstrate that the proposed method \texttt{ANEMONE}\xspace and its variant \texttt{ANEMONE-FS}\xspace consistently outperforms state-of-the-art algorithms on six benchmark datasets. \end{abstract}
\begin{IEEEkeywords} Anomaly detection, self-supervised learning, graph neural networks (GNNs), unsupervised learning, few-shot learning \end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction} \label{sec:introduction} \IEEEPARstart{A}{S} a general data structure to represent inter-dependency between objects, graphs have been widely used in many domains including social networks, biology, physics, and traffic, etc. Analyzing graph data for various tasks --- detecting anomalies from graph data in particular --- has drawn increasing attention in the research community due to its wide and critical applications in e-commence, cyber-security, and finance. For instance, by using anomaly detection algorithms in e-commerce, we can detect fraudulent sellers by jointly considering their properties and behaviors \cite{pourhabibi2020fraud}. Similarly, we can detect abnormal accounts (social bots) which spread rumors in social networks with graph anomaly detection systems \cite{latah2020detection}.
Different from conventional anomaly detection approaches for tabular/vector data where the attribute information is the only factor to be considered, graph anomaly detection requires collectively exploiting both graph structure as well as attribute information associated with each node. This complexity has imposed significant challenges to this task. Existing research to address the challenges can be roughly divided into two categories: (1) shallow methods, and (2) deep methods. The early shallow methods typically exploit mechanisms such as ego-network analysis \cite{amen_perozzi2016scalable}, residual analysis \cite{radar_li2017radar} or CUR decomposition \cite{anomalous_peng2018anomalous}. These methods are conceptually simple but they may be not able to learn nonlinear representation from complex graph data, leading to a sub-optimal anomaly detection performance. The deep model approaches, such as graph autoencoder (GAE) \cite{dominant_ding2019deep, li2019specae}, learn the non-linear hidden representation, and estimate the anomaly score for each node based on the reconstruction error. These methods have considerable improvements over shallow methods. However, they do not well capture the contextual information (e.g., subgraph around a node) for anomaly detection and still suffer from unsatisfactory performance.
Very recently, a contrastive learning mechanism has been used for graph anomaly detection \cite{cola_liu2021anomaly}, which shows promising performance in graph anomaly detection. The key idea used in the proposed algorithm, namely CoLA \cite{cola_liu2021anomaly}, is to construct pairs of instances (e.g., a subgraph and a target node) and to employ a contrastive learning method to learn the representation. Based on contrastive learning, anomaly scores can be further calculated according to predictions of pairs of instances. Despite its success, only a single scale of information is considered in CoLA. In practice, due to the complexity of graph data, anomalies are often hidden in different scales (e.g., node and subgraph levels). For example, in the e-commerce application, fraudulent sellers may interact with only a small number of other users or items (i.e., local anomalies); in contrast, other cheaters may hide in larger communities (i.e., global anomalies). Such complex scales require more fine-grained and intelligent anomaly detection systems.
Another limitation of existing graph anomaly detection methods is that these methods are designed in a purely unsupervised manner. In scenarios where the ground-truth anomalies are unknown, these methods can make an important role. However, in practice, we may collect a handful of samples (i.e., a few shots) of anomalies. As anomalies are typically rare in many applications, these few-shot samples provide valuable information and should be incorporated into anomaly detection systems \cite{pang2019deep}. Unfortunately, most of existing graph anomaly detection methods \cite{dominant_ding2019deep, li2019specae,cola_liu2021anomaly} failed to exploit these few-shot anomalies in their design, leading to a significant information loss.
To overcome these limitations, in this paper, we propose a graph \underline{\textbf{AN}}omaly d\underline{\textbf{E}}tection framework with \underline{\textbf{M}}ulti-scale c\underline{\textbf{ON}}trastive l\underline{\textbf{E}}arning (\texttt{ANEMONE}\xspace in short) to detect anomalous nodes in graphs. Our theme is to construct multi-scales (views) from the original graphs and employ contrastive learning at both patch and context levels simultaneously to capture anomalous patterns hidden in complex graphs. Specifically, we first employ two graph neural networks (GNNs) as encoders to learn the representation for each node and a subgraph around the target node. Then we construct pairs of positive and negative instances at both patch levels and context levels, based on which the contrastive learning maximizes the similarity between positive pairs and minimize the similarity between negative pairs. The anomaly score of each node is estimated via a novel anomaly estimator by leveraging the statistics of multi-round contrastive scores. Our framework is a general and flexible framework in the sense that it can easily incorporate ground-truth anomalies (few shot anomalies). Concretely, the labeled anomalies are seamlessly integrated into the contrastive learning framework as additional negative pairs for both patch level and context level contrastive learning, leading to a new algorithm, \texttt{ANEMONE-FS}\xspace, for few-shot graph anomaly detection. Extensive experiments on six benchmark datasets validate the effectiveness of our algorithm \texttt{ANEMONE}\xspace for unsupervised graph anomaly detection and the effectiveness of \texttt{ANEMONE-FS}\xspace for few-shot settings.
The main contributions of this work are summarized as follows:
\begin{itemize}
\item We propose a general framework, \texttt{ANEMONE}\xspace, based on contrastive learning for graph anomaly detection. Our method exploits multi-scale information at both patch level and context level to capture anomalous patterns hidden in complex graphs.
\item We present a simple approach based on the multi-scale framework, \texttt{ANEMONE-FS}\xspace, to exploit the valuable few-shot anomalies at hand. Our method essentially enhances the flexibility of contrastive learning for anomaly detection and facilitate broader applications.
\item We conduct extensive experiments on six benchmark datasets to demonstrate the superiority of our \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace for both unsupervised and few-shot graph anomaly detection. \end{itemize}
The reminder of the paper is structured as follows. We review the related works in Section \ref{sec:rw} and give the problem definition in Section \ref{sec:PD}. The proposed method \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace are described in Section \ref{sec:methodology}. The experimental results are shown in Section \ref{sec:experiments}. We conclude this paper in Section \ref{sec:conclusion}.
\section{Related Works} \label{sec:rw} In this section, we survey the representative works in three related topics, including graph neural networks, graph anomaly detection, and contrastive learning.
\subsection{Graph Neural Networks} In recent years, graph neural networks (GNNs) have achieved significant success in dealing with graph-related machine learning problems and applications \cite{gcn_kipf2017semi,gat_velivckovic2018graph,gnn_survey_wu2021comprehensive,wu2021beyond}. Considering both attributive and structural information, GNNs can learn low-dimensional representation for each node in a graph. Current GNNs can be categorized into two types: spectral and spatial methods. The former type of methods was initially developed on the basis of spectral theory \cite{bruna2013spectral,defferrard2016convolutional,gcn_kipf2017semi}. Bruna et al. \cite{bruna2013spectral} first extend convolution operation to graph domain using spectral graph filters. Afterward, ChebNet \cite{defferrard2016convolutional} simplifies spectral GNNs by introducing Chebyshev polynomials as the convolution filter. GCN \cite{gcn_kipf2017semi} further utilizes the first-order approximation of Chebyshev filter to learn node representations more efficiently. The second type of methods adopt the message-passing mechanism in the spatial domain, which propagates and aggregates local information along edges, to perform convolution operation \cite{hamilton2017inductive,gat_velivckovic2018graph,xu2019how}. GraphSAGE \cite{hamilton2017inductive} learns node representations by sampling and aggregating neighborhoods. GAT \cite{gat_velivckovic2018graph} leverages the self-attention mechanism to assign a weight for each edge when performing aggregation. GIN \cite{xu2019how} introduces a summation-based aggregation function to ensure that GNN is as powerful as the Weisfeiler-Lehman graph isomorphism test. For a thorough review, we please refer the readers to the recent survey \cite{gnn_survey_wu2021comprehensive}.
\subsection{Graph Anomaly Detection}
Anomaly detection is a conventional data mining problem aiming to identify anomalous data samples that deviate significantly from others \cite{gad_survey_ma2021comprehensive}. Compared to detecting anomalies from text/image data \cite{deepsad_ruff2019deep}, anomaly detection on graphs is often more challenging since the correlations among nodes should be also considered when measuring the abnormality of samples. To tackle the challenge, some traditional solutions use shallow mechanisms like ego-network analysis (e.g., AMEN\cite{amen_perozzi2016scalable}), residual analysis (e.g., Radar\cite{radar_li2017radar}), and CUR decomposition (e.g., ANOMALOUS \cite{anomalous_peng2018anomalous}) to model the anomalous patterns in graph data. Recently, deep learning becomes increasingly popular for graph anomaly detection \cite{gad_survey_ma2021comprehensive}. As an example of unsupervised methods, DOMINANT \cite{dominant_ding2019deep} employs a graph autoencoder model to reconstruct attribute and structural information of graphs, and the reconstruction errors are leveraged to measure node-level abnormality. CoLA \cite{cola_liu2021anomaly} considers a contrastive learning model that models abnormal patterns via learning node-subgraph agreements. Among semi-supervised methods, SemiGNN \cite{semignn_wang2019semi} is a representative method that leverages hierarchical attention to learn from multi-view graphs for fraud detection. GDN \cite{gdn_ding2021few} adopts a deviation loss to train GNN for few-shot node anomaly detection. Apart from the aforementioned methods for attributed graphs, some recent works also target to identity anomalies from dynamic graphs \cite{dyn_gad_wang2019detecting,taddy_liu2021anomaly}.
\subsection{Graph Contrastive Learning}
Originating from visual representation learning \cite{he2020momentum,chen2020simple,grill2020bootstrap}, contrastive learning has become increasingly popular in addressing self-supervised representation learning problems in various areas. In graph deep learning, recent works based on contrastive learning show competitive performance on graph representation learning scenario \cite{gssl_survey_liu2021graph,zheng2021towards}. DGI \cite{dgi_velickovic2019deep} learns by maximizing the mutual information between node representations and a graph-level global representation, which makes the first attempt to adapt contrastive learning in GNNs. GMI \cite{peng2020graph} jointly utilizes edge-level contrast and node-level contrast to discover high-quality node representations. GCC \cite{qiu2020gcc} constructs a subgraph-level contrastive learning model to learn structural representations. GCA \cite{zhu2021graph} introduces an adaptive augmentation strategy to generate different views for graph contrastive learning. MERIT \cite{Jin2021MultiScaleCS} leverages bootstrapping mechanism and multi-scale contrastiveness to learn informative node embeddings for network data. Apart from learning effective representations, graph contrastive learning is also applied various applications, such as drug-drug interaction prediction \cite{wang2021multi} and social recommendation \cite{yu2021self}.
\section{Problem Definition} \label{sec:PD} In this section, we introduce and define the problem of unsupervised and few-shot graph anomaly detection. Throughout the paper, we use bold uppercase (e.g., $\mathbf{X}$), calligraphic (e.g., $\mathbfcal{V}$), and lowercase letters (e.g., $\mathbf{x}^{(i)}$) to denote matrices, sets, and vectors, respectively. We also summarize all important notations in Table \ref{table:notation}. In this work, we mainly focus on the anomaly detection tasks on attributed graphs that are widely existed in real world. Formally speaking, we define attributed graphs and graph neural networks (GNNs) as follows:
\begin{definition}[Attributed Graphs] Given an attributed graph $\mathcal{G}=(\mathbf{X},\mathbf{A})$, we denote its node attribute (i.e., feature) and adjacency matrices as $\mathbf{X} \in \mathbb{R}^{N \times D}$ and $\mathbf{A} \in \mathbb{R}^{N \times N}$, where $N$ and $D$ are the number of nodes and feature dimensions.
An attribute graph can also be defined as $\mathcal{G}=(\mathbfcal{V}, \mathbfcal{E}, \mathbf{X})$, where $\mathbfcal{V}=\{v_1, v_2, \cdots, v_N\}$ and $\mathbfcal{E}=\{e_1, e_2, \cdots, e_M\}$ are node and edge sets. Thus, we have $N=|\mathbfcal{V}|$, the number of edges $M=|\mathbfcal{E}|$, and define $\mathbf{x}_i \in \mathbb{R}^{D}$ as the attributes of node $v_i$. To represent the underlying node connectivity, we let $\mathbf{A}_{ij}=1$ if there exists an edge between $v_i$ and $v_j$ in $\mathbfcal{E}$, otherwise $\mathbf{A}_{ij}=0$.
In particular, given a node $v_i$, we define its neighborhood set as $\mathbfcal{N}(v_i)=\{v_j \in \mathbfcal{V} | \mathbf{A}_{ij} \neq 0 \}$. \end{definition}
\begin{definition}[Graph Neural Networks] Given an attributed graph $\mathcal{G}=(\mathbf{X},\mathbf{A})$, a parameterized graph neural network $GNN(\cdot)$ aims to learn the low-dimensional embeddings of $\mathbf{X}$ by considering the topological information $\mathbf{A}$, denoted as $\mathbf{H} \in \mathbb{R}^{N \times D'}$, where $D'$ is the embedding dimensions and we have $D' \ll D$. For a specific node $v_i \in \mathbfcal{V}$, we denote its embedding as $\mathbf{h}^{(i)} \in \mathbb{R}^{D'}$ where $\mathbf{h}^{(i)} \in \mathbf{H}$. \end{definition}
In this paper, we focus on two different anomaly detection tasks on attributed graphs, namely unsupervised and few-shot graph anomaly detection.
Firstly, we define the problem of unsupervised graph anomaly detection as follows:
\begin{definition}[Unsupervised Graph Anomaly Detection] Given an unlabeled attribute graph $\mathcal{G}=(\mathbf{X},\mathbf{A})$, we intend to train and evaluate a graph anomaly detection model $\mathcal{F}(\cdot): \mathbb{R}^{N \times D} \to \mathbb{R}^{N \times 1}$ across all nodes in $\mathbfcal{V}$, where we use $\mathbf{y}$ to denote the output node anomaly scores, and $y^{(i)}$ is the anomaly score of node $v_i$. \end{definition}
For graphs with limited prior knowledge (i.e., labeling information) on the underlying anomalies, we define the problem of few-shot graph anomaly detection as below:
\begin{definition}[Few-shot Graph Anomaly Detection]
For an attribute graph $\mathcal{G}=(\mathbf{X},\mathbf{A})$, we have a small set of labeled anomalies $\mathbfcal{V}^L$ and the rest set of unlabeled nodes $\mathbfcal{V}^U$, where $|\mathbfcal{V}^L| \ll |\mathbfcal{V}^U|$ since it is relatively expensive to label anomalies in the real world so that only a very few labeled anomalies are typically available. Thus, our goal is to learn a model $\mathcal{F}(\cdot): \mathbb{R}^{N \times D} \to \mathbb{R}^{N \times 1}$ on $\mathbfcal{V}^L \cup \mathbfcal{V}^U$, which measures node abnormalities by calculating their anomaly scores $\mathbf{y}$. It is worth noting that during the evaluation, the well-trained model $\mathcal{F}^*(\cdot)$ is only tested on $\mathbfcal{V}^U$ to prevent the potential information leakage. \end{definition}
\begin{table}[t]
\small
\centering
\caption{Summary of important notations.}
\begin{tabular}{ p{75 pt}<{\centering} | p{155 pt}}
\toprule[1.0pt]
Symbols & Description \\
\cmidrule{1-2}
$\mathcal{G}=(\mathbf{X}, \mathbf{A})$ & An attributed graph \\
$\mathbfcal{V}, \mathbfcal{E}$ & The node and edge set of $\mathcal{G}$ \\
$\mathbfcal{V^L}$ & The labeled node set where $\mathbfcal{V^L} \in \mathbfcal{V}$ \\
$\mathbfcal{V^U}$ & The unlabeled node set where $\mathbfcal{V^U} \in \mathbfcal{V}$ \\
$\mathbf{A} \in \mathbb{R}^{N \times N}$ & The adjacency matrix of $\mathcal{G}$ \\
$\mathbf{X} \in \mathbb{R}^{N \times D}$ & The node feature matrix of $\mathcal{G}$ \\
$\mathbf{x}^{(i)} \in \mathbb{R}^{D}$ & The feature vector of $v_i$ where $ \mathbf{x}^{(i)} \in \mathbf{X}$ \\
$\mathbfcal{N}(v_i)$ & The neighborhood set of node $v_i \in \mathcal{V}$ \\
\cmidrule{1-2}
$\mathcal{G}^{(i)}_p$, $\mathcal{G}^{(i)}_c$ & Two generated subgraphs of $v_i$ \\
$\mathbf{A}_{view}^{(i)} \in \mathbb{R}^{K \times K}$ & The adjacency matrix of $\mathcal{G}^{(i)}_{view}$ where $view \in \{p, c\}$ \\
$\mathbf{X}_{view}^{(i)} \in \mathbb{R}^{K \times D}$ & The node feature matrix of $\mathcal{G}^{(i)}_{view}$ where $view \in \{p, c\}$ \\
$y^{(i)}$ & The anomaly score of $v_i$ \\
\cmidrule{1-2}
$\mathbf{H}^{(i)}_p \in \mathbb{R}^{K \times D'}$ & The node embedding matrix of $\mathcal{G}^{(i)}_p$ \\
$\mathbf{H}^{(i)}_c \in \mathbb{R}^{K \times D'}$ & The node embedding matrix of $\mathcal{G}^{(i)}_c$ \\
$\mathbf{h}^{(i)}_p \in \mathbb{R}^{1 \times D'}$ & The node embeddings of masked node $v_i$ in $\mathbf{H}^{(i)}_p$ \\
$\mathbf{h}^{(i)}_c \in \mathbb{R}^{1 \times D'}$ & The contextual embeddings of $\mathcal{G}^{(i)}_c$ \\
$\mathbf{z}^{(i)}_p, \mathbf{z}^{(i)}_c \in \mathbb{R}^{1 \times D'}$ & The node embeddings of $v_i$ in patch-level and context-level networks \\
$s^{(i)}_p, \tilde{s}^{(i)}_p \in \mathbb{R}$ & The positive and negative patch-level contrastive scores of $v_i$ \\
$s^{(i)}_c, \tilde{s}^{(i)}_c \in \mathbb{R}$ & The positive and negative context-level contrastive scores of $v_i$ \\
$\mathbf{\Theta}, \mathbf{\Phi} \in \mathbb{R}^{D \times D'}$ & The trainable parameter matrices of two graph encoders \\
$\mathbf{W}_{p},\mathbf{W}_{c} \in \mathbb{R}^{D' \times D'}$ & The trainable parameter matrices of two bilinear mappings \\
\cmidrule{1-2}
$N^L, N^U, N$ & The number of labeled, unlabeled, and all nodes in $\mathcal{G}$ \\
$K$ & The number of nodes in subgraphs \\
$D$ & The dimension of node attributes in $\mathcal{G}$ \\
$D'$ & The dimension of node embeddings \\
$R$ & The number of evaluation rounds in anomaly scoring \\
\bottomrule[1.0pt]
\end{tabular}
\label{table:notation} \end{table}
\section{Methodology} \label{sec:methodology} In this section, we introduce the proposed \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace algorithms in detecting node-level graph anomalies in an unsupervised and few-shot supervised manner. The overall frameworks of our methods are shown in Figure \ref{fig:framework1} and \ref{fig:framework2}, which consist of four main components, namely the \textit{augmented subgraphs generation}, \textit{patch-level contrastive network}, \textit{context-level contrastive network}, and \textit{statistical graph anomaly scorer}. Firstly, given a target node from the input graph, we exploit its contextual information by generating two subgraphs associated with it. Then, we propose two general yet powerful contrastive mechanisms for graph anomaly detection tasks. Specifically, for an attributed graph without any prior knowledge on the underlying anomalies, the proposed \texttt{ANEMONE}\xspace method learns the patch-level and context-level agreements by maximizing (1) the mutual information between node embeddings in the patch-level contrastive network and (2) the mutual information between node embeddings and their contextual embeddings in the context-level contrastive network. The underlying intuition is that there are only a few anomalies, and thus our well-trained model can identify the salient attributive and structural mismatch between an abnormal node and its surrounding contexts by throwing a significantly higher contrastive score. On the other hand, if few labeled anomalies are available in an attributed graph, the proposed \texttt{ANEMONE-FS}\xspace variant can effectively utilize the limited labeling information to further enrich the supervision signals extracted by \texttt{ANEMONE}\xspace. This intriguing capability is achieved by plugging in a different contrastive route, where the aforementioned patch-level and context-level agreements are minimized for labeled anomalies while still maximized for unlabeled nodes as same as in \texttt{ANEMONE}\xspace.
Finally, we design a universal graph anomaly scorer to measure node abnormalities by statistically annealing the patch-level and context-level contrastive scores at the inference stage, which shares and works on both unsupervised and few-shot supervised scenarios.
In the rest of this section, we introduce the four primary components of \texttt{ANEMONE}\xspace in Subsection \ref{subsec: augmented subg generation}, \ref{subsec: patch-level}, \ref{subsec: context-level}, and \ref{subsec: scorer}. Particularly, in Subsection \ref{subsec: few-shot}, we discuss how \texttt{ANEMONE}\xspace can be extended to more competitive \texttt{ANEMONE-FS}\xspace in detail to incorporate the available supervision signals provided by a few labeled anomalies. In Subsection \ref{subsec: optimization}, we present and discuss the training objective of \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace, as well as their algorithms and time complexity.
\begin{figure*}
\caption{ The conceptual framework of \texttt{ANEMONE}\xspace. Given an attributed graph $\mathcal{G}$, we first sample a batch of target nodes, where their associated anonymized subgraphs are generated and fed into two contrastive networks. Then, we design a multi-scale (i.e., patch-level and context-level) contrastive network to learn agreements between node and contextual embeddings from different perspectives. During the model inference, two contrastive scores are statistically annealed to obtain the final anomaly score of each node in $\mathcal{G}$. }
\label{fig:framework1}
\end{figure*}
\subsection{Augmented Subgraphs Generation} \label{subsec: augmented subg generation} Graph contrastive learning relies on effective discrimination pairs to extract supervision signals from the rich graph attributive and topological information \cite{gssl_survey_liu2021graph}. Recently, graph augmentations, such as attribute masking, edge modification, subgraph sampling, and graph diffusion, are widely applied to assist contrastive models in learning expressive graph representations \cite{Jin2021MultiScaleCS, zheng2021towards}. However, not all of them are directly applicable to anomaly detection tasks. For example, edge modification and graph diffusion can distort the original topological information, thus hindering the model to distinguish an abnormal node from its surrounding contexts effectively. To avoid falling into this trap, we adopt an \textit{anonymized subgraph sampling} mechanism to generate graph views for our contrastive networks, which based on two motivations: (1) The agreement between a node and its surrounding contexts (i.e., subgraphs) is typically sufficient to reflect the abnormality of this node \cite{jin2021anemone, zheng2021generative}; (2) Subgraph sampling provides adequate diversity of node surrounding contexts for robust model training and statistical anomaly scoring. Specifically, we explain the details of the proposed augmentation strategy for graph anomaly detection as follows:
\begin{enumerate}
\item \textbf{Target node sampling.}
As this work mainly focuses on node-level anomaly detection, we first sample a batch of target nodes from a given attributed graph. It is worth noting that for a specific target node, it may associate with the label or not, which results in different contrastive routes as shown in the middle red and green dashed boxes in Figures \ref{fig:framework1} and \ref{fig:framework2}. We discuss this in detail in the following subsections.
\item \textbf{Surrounding context sampling.}
Although several widely adopted graph augmentations are available \cite{gssl_survey_liu2021graph}, most of them are designed to slightly attack the original graph attributive or topological information to learn robust and expressive node-level or graph-level representations, which violate our motivations mentioned above and introduce extra anomalies. Thus, in this work, we employ the subgraph sampling as the primary augmentation strategy to generate augmented graph views for each target nodes based on the random walk with restart (RWR) algorithm \cite{tong2006fast}. Taking a target node $v_i$ for example, we generate its surrounding contexts by sampling subgraphs centred at it with a fixed size $K$, denoted as $\mathcal{G}^{(i)}_p = (\mathbf{A}^{(i)}_p, \mathbf{X}^{(i)}_p)$ and $\mathcal{G}^{(i)}_c = (\mathbf{A}^{(i)}_c, \mathbf{X}^{(i)}_c)$ for patch-level and context-level contrastive networks. In particular, we let the first node in $\mathcal{G}^{(i)}_p$ and $\mathcal{G}^{(i)}_c$ as the starting (i.e., target) node.
\item \textbf{Target node anonymization.} Although the above-generated graph views can be directly fed into the contrastive networks, there is a critical limitation: The attributive information of target nodes involves calculating their patch-level and context-level embeddings, which results in information leakage during the multi-level contrastive learning. To prevent this issue and construct harder pretext tasks to boost model training \cite{gssl_survey_liu2021graph}, we anonymize target nodes in their graph views by completely masking their attributes, i.e., $\mathbf{X}^{(i)}_p[1,:] \rightarrow \overrightarrow{0}$ and $\mathbf{X}^{(i)}_c[1,:] \rightarrow \overrightarrow{0}$. \end{enumerate}
\subsection{Patch-level Contrastive Network} \label{subsec: patch-level} The objective of patch-level contrastiveness is to learn the local agreement between the embedding of masked target node $v_i$ in its surrounding contexts $\mathcal{G}^{(i)}_p$ and the embedding of $v_i$ itself. The underlying intuition of the proposed patch-level contrastive learning is that the mismatch between a node and its directly connected neighbors is an effective measurement to detect \textit{local anomalies}, which indicates the anomalies that are distinguishable from their neighbors. For example, some e-commerce fraudsters are likely to transact with unrelated users directly, where patch-level contrastiveness is proposed to detect such anomalies in an attributed graph. As shown in Figure \ref{fig:framework1}, our patch-level contrastive network consists of two main components: Graph encoder and contrastive module.
\noindent \textbf{Graph encoder.} The patch-level graph encoder takes a target node and one of its subgraph (i.e., surrounding context $\mathcal{G}^{(i)}_p$) as the input. Specifically, the node embeddings of $\mathcal{G}^{(i)}_p$ are calculated in below:
\begin{equation} \begin{aligned} \mathbf{H}^{(i)}_{p} &= GNN_{\theta}\left(\mathcal{G}^{(i)}_p\right) = GCN\left(\mathbf{A}^{(i)}_p, \mathbf{X}^{(i)}_p ; \mathbf{\Theta} \right)\\
&= \sigma\left(\widetilde{{\mathbf{D}}^{(i)}_p}^{-\frac{1}{2}} \widetilde{{\mathbf{A}}^{(i)}_p} \widetilde{{\mathbf{D}}^{(i)}_p}^{-\frac{1}{2}} \mathbf{X}^{(i)}_p \mathbf{\Theta} \right), \end{aligned} \label{eq:gnn} \end{equation} where $\mathbf{\Theta} \in \mathbb{R}^{D \times D'}$ denotes the set of trainable parameters of patch-level graph neural network $GNN_{\theta}(\cdot)$. For simplicity and follow \cite{jin2021anemone}, we adopt a single layer graph convolution network (GCN) \cite{gcn_kipf2017semi} as the backbone encoder, where $\sigma(\cdot)$ denotes the ReLU activation in a typical GCN layer, $\widetilde{{\mathbf{A}}^{(i)}_p} = \mathbf{A}^{(i)}_p + \mathbf{I}$, and $\widetilde{{\mathbf{D}}^{(i)}_p}$ is the calculated degree matrix of $\mathcal{G}^{(i)}_p$ by row-wise summing $\widetilde{{\mathbf{A}}^{(i)}_p}$. Alternatively, one may also replace GCN with other off-the-shelf graph neural networks to aggregate messages from nodes' neighbors to calculate $\mathbf{H}^{(i)}_{p}$.
In patch-level contrastiveness, our discrimination pairs are the masked and original target node embeddings (e.g., $\mathbf{h}^{(i)}_{p}$ and $\mathbf{z}^{(i)}_{p}$ for a target node $v_i$), where the former one can be easily obtained via $\mathbf{h}^{(i)}_{p} = \mathbf{H}^{(i)}_{p}[1,:]$. To calculate the embeddings of original target nodes, e.g., $\mathbf{z}^{(i)}_{p}$, we only have to fed $\mathbf{x}^{(i)} = \mathbf{X}[i,:]$ into $GNN_{\theta}(\cdot)$ without the underlying graph structure since there is only a single node $v_i$. In such a way, $GNN_{\theta}(\cdot)$ degrades to a MLP that is parameterized with $\mathbf{\Theta}$. We illustrate the calculation of $\mathbf{z}^{(i)}_{p}$ as follows:
\begin{equation} \mathbf{z}^{(i)}_{p} = GNN_{\theta}\left(\mathbf{x}^{(i)}\right) = \sigma\left(\mathbf{x}^{(i)} \mathbf{\Theta} \right), \label{eq:mlp} \end{equation} where adopting $\mathbf{\Theta}$ ensures $\mathbf{z}^{(i)}_{p}$ and $\mathbf{h}^{(i)}_{p}$ are mapped into the same latent space to assist the following contrasting.
\noindent \textbf{Patch-level Contrasting.} To measure the agreement between $\mathbf{h}^{(i)}_{p}$ and $\mathbf{z}^{(i)}_{p}$, we adopt a bilinear mapping to compute the similarity between them (i.e., the positive score in \texttt{ANEMONE}\xspace), denoted as $\mathbf{s}^{(i)}_{p}$:
\begin{equation} \mathbf{s}^{(i)}_{p} = Bilinear\left( \mathbf{h}^{(i)}_{p}, \mathbf{z}^{(i)}_{p} \right) = \sigma\left(\mathbf{h}^{(i)}_{p} \mathbf{W}_p {\mathbf{z}^{(i)}_{p}}^\top \right), \label{eq:patch-level anemone positive score} \end{equation} where $W_p \in \mathbb{R}^{D' \times D'}$ is a set of trainable weighting parameters, and $\sigma(\cdot)$ denotes the Sigmoid activation in this equation.
Also, there is a \textit{patch-level negative sampling} mechanism to assist model training and avoid it being biased by merely optimizing on positive pairs. Specifically, we first calculate $\mathbf{h}^{(j)}_{p}$ based on the subgraph centred at an irrelevant node $v_j$, then we calculate the similarity between $\mathbf{h}^{(j)}_{p}$ and $\mathbf{z}^{(i)}_{p}$ (i.e., negative score) with the identical bilinear mapping:
\begin{equation} \tilde{\mathbf{s}}^{(i)}_{p} = Bilinear\left( \mathbf{h}^{(j)}_{p}, \mathbf{z}^{(i)}_{p} \right) = \sigma\left(\mathbf{h}^{(j)}_{p} \mathbf{W}_p {\mathbf{z}^{(i)}_{p}}^\top \right). \label{eq:patch-level anemone negative score} \end{equation}
In practice, we train the model in a mini-batch manner as mentioned in Subsection \ref{subsec: augmented subg generation}. Thus, $\mathbf{h}^{(j)}_{p}$ can be easily acquired by using other masked target node embeddings in the same mini-batch with size $B$. Finally, the patch-level contrastive objective of \texttt{ANEMONE}\xspace (under the context of unsupervised graph anomaly detection) can be formalized with the Jensen-Shannon divergence \cite{dgi_velickovic2019deep}:
\begin{equation} \mathcal{L}_{p}=-\frac{1}{2n}\sum_{i=1}^{B}\left(log\left(\mathbf{s}^{(i)}_{p}\right)+log\left(1-\tilde{\mathbf{s}}^{(i)}_{p}\right)\right). \label{eq:patch-level loss} \end{equation}
\begin{figure*}
\caption{ The conceptual framework of \texttt{ANEMONE-FS}\xspace, which shares the similar pipeline of \texttt{ANEMONE}\xspace. Given an attributed graph $\mathcal{G}$, we first sample a batch of target nodes. After this, we design a different multi-scale contrastive network equipped with two contrastive routes, where the agreements between node and contextual embeddings are maximized for unlabeled node while minimized for labeled anomalies in a mini-batch. At the inference stage, it has the same graph anomaly detector to estimate the anomlay socre of each node in $\mathcal{G}$.}
\label{fig:framework2}
\end{figure*}
\subsection{Context-level Contrastive Network} \label{subsec: context-level} Different from the patch-level contrastiveness, the objective of context-level contrasting is to learn the global agreement between the contextual embedding of a masked target node $v_i$ in $\mathcal{G}^{(i)}_c$ and the embedding of itself by mapping its attributes to the latent space. The intuition behind this is to capture the \textit{global anomalies} that are difficult to be distinguished by directly comparing with the closest neighbors. For instance, the fraudsters are also likely to camouflage themselves in large communities, resulting in a more challenging anomaly detection task. To enable the model to detect these anomalies, we propose the context-level contrastiveness with a multi-scale (i.e., node versus graph) contrastive learning schema. In the middle part of Figure \ref{fig:framework1}, we illustrate the conceptual design of our context-level contrastive network, which has three main components: graph encoder, readout module, and contrastive module.
\noindent \textbf{Graph encoder and readout module.} The context-level graph encoder shares the identical neural architecture of the patch-level graph encoder, but it has a different set of trainable parameters $\mathbf{\Phi}$. Specifically, given a subgraph $\mathcal{G}^{(i)}_c$ centred at the target node $v_i$, we calculate the node embeddings of $\mathcal{G}^{(i)}_c$ in a similar way:
\begin{equation} \mathbf{H}^{(i)}_{c} = GNN_{\phi}\left(\mathcal{G}^{(i)}_c\right) = \sigma\left(\widetilde{{\mathbf{D}}^{(i)}_c}^{-\frac{1}{2}} \widetilde{{\mathbf{A}}^{(i)}_c} \widetilde{{\mathbf{D}}^{(i)}_c}^{-\frac{1}{2}} \mathbf{X}^{(i)}_c \mathbf{\Phi} \right). \label{eq:gnn2} \end{equation}
The main difference between the context-level and patch-level contrastiveness is that the former aims to contrast target node embeddings with subgraph embeddings (i.e., node versus subgraph), while the aforementioned patch-level contrasting learns the agreements between the masked and original target node embeddings (i.e., node versus node). To obtain the contextual embedding of target node $v_i$ (i.e., $\mathbf{h}^{(i)}_{c}$), we aggregate all node embeddings in $\mathbf{H}^{(i)}_{c}$ with an average readout function:
\begin{equation} \mathbf{h}^{(i)}_{c} = readout \left( \mathbf{H}^{(i)}_{c} \right) = \frac{1}{K}\sum_{j=1}^{K}\mathbf{H}^{(i)}_{c}[j,:], \label{eq:readout} \end{equation} where $K$ denotes the number of nodes in a contextual subgraph.
Similarly, we can also obtain the embedding of $v_i$ via a non-linear mapping:
\begin{equation} \mathbf{z}^{(i)}_{c} = GNN_{\phi}\left(\mathbf{x}^{(i)}\right) = \sigma\left(\mathbf{x}^{(i)} \mathbf{\Phi} \right), \label{eq:mlp2} \end{equation} where $\mathbf{z}^{(i)}_{c}$ and $\mathbf{h}^{(i)}_{c}$ are projected to the same latent space with a shared set of parameters $\mathbf{\Phi}$.
\noindent \textbf{Context-level contrasting.} We measure the similarity between $\mathbf{h}^{(i)}_{c}$ and $\mathbf{z}^{(i)}_{c}$ (i.e., the positive score in \texttt{ANEMONE}\xspace) with a different parameterized bilinear function:
\begin{equation} \mathbf{s}^{(i)}_{c} = Bilinear\left( \mathbf{h}^{(i)}_{c}, \mathbf{z}^{(i)}_{c} \right) = \sigma\left(\mathbf{h}^{(i)}_{c} \mathbf{W}_c {\mathbf{z}^{(i)}_{c}}^\top \right), \label{eq:context-level anemone positive score} \end{equation} where $W_c \in \mathbb{R}^{D' \times D'}$ and $\sigma(\cdot)$ is the Sigmoid activation. Similarly, there is a \textit{context-level negative sampling} mechanism to avoid model collapse, where negatives $\mathbf{h}^{(j)}_{c}$ are obtained from other irrelevant surrounding contexts $\mathcal{G}^{(j)}_c$ where $j \neq i$. Thus, the negative score can be obtained via:
\begin{equation} \tilde{\mathbf{s}}^{(i)}_{c} = Bilinear\left( \mathbf{h}^{(j)}_{c}, \mathbf{z}^{(i)}_{c} \right) = \sigma\left(\mathbf{h}^{(j)}_{c} \mathbf{W}_c {\mathbf{z}^{(i)}_{c}}^\top \right). \label{eq:context-level anemone negative score} \end{equation}
Finally, the context-level contrastiveness is ensured by optimizing the following objective:
\begin{equation} \mathcal{L}_{c}=-\frac{1}{2n}\sum_{i=1}^{B}\left(log\left(\mathbf{s}^{(i)}_{c}\right)+log\left(1-\tilde{\mathbf{s}}^{(i)}_{c}\right)\right). \label{eq:context-level loss} \end{equation}
\subsection{Few-shot Multi-scale Contrastive Network} \label{subsec: few-shot} The above patch-level and context-level contrastiveness are conceptually designed to detect attributive and structural graph anomalies in an unsupervised manner (i.e., the proposed \texttt{ANEMONE}\xspace method in Algorithm \ref{algo: anemone}). However, how to incorporate limited supervision signals remains unknown if there are a few available labeled anomalies. To answer this question, we proposed an extension of \texttt{ANEMONE}\xspace named \texttt{ANEMONE-FS}\xspace to perform graph anomaly detection in a few-shot supervised manner without drastically changing the overall framework (i.e., Figure \ref{fig:framework2}) and training objective (i.e., Equation \ref{eq:patch-level loss}, \ref{eq:context-level loss}, and \ref{eq:loss}). This design further boosts the performance of \texttt{ANEMONE}\xspace significantly with only a few labeled anomalies, which can be easily acquired in many real-world applications. Specifically, given a mini-batch of target nodes $\mathbfcal{V}_B=\{\mathbfcal{V}^L_B, \mathbfcal{V}^U_B\}$ with labeled anomalies and unlabeled nodes, we design and insert a different contrastive route in the above patch-level and context-level contrastiveness, as the bottom dashed arrows, i.e., the so-called negative pairs, in the red and green dashed boxes shown in the middle part of Figure \ref{fig:framework2}.
\noindent \textbf{Few-shot patch-level contrasting.} For nodes in $\mathbfcal{V}^U_B$, we follow Equation \ref{eq:patch-level anemone positive score} and \ref{eq:patch-level anemone negative score} to compute positive and negative scores as in \texttt{ANEMONE}\xspace. However, for a target node $v_k$ in $\mathbfcal{V}^L_B$, we minimize the mutual information between its masked node embedding $\mathbf{h}^{(k)}_{p}$ and original node embedding $\mathbf{z}^{(k)}_{p}$, which equivalents to enrich the patch-level negative set with an additional negative pair:
\begin{equation} \tilde{\mathbf{s}}^{(k)}_{p} = Bilinear\left( \mathbf{h}^{(k)}_{p}, \mathbf{z}^{(k)}_{p} \right) = \sigma\left(\mathbf{h}^{(k)}_{p} \mathbf{W}_p {\mathbf{z}^{(k)}_{p}}^\top \right). \label{eq:patch-level anemone-fs negative score} \end{equation}
The behind intuitions are in two-folds. Firstly, for most unlabeled nodes, we assume that most of them are not anomalies so that the mutual information between masked and original target node embeddings should be maximized for the model to distinguish normal nodes from a few anomalies in an attributed graph. Secondly, for a labeled target node (i.e., an anomaly), this mutual information should be minimized for the model to learn how anomalies should be different from their surrounding contexts. As a result, there are two types of patch-level negative pairs in \texttt{ANEMONE-FS}\xspace: (1) $\mathbf{h}^{(j)}_{p}$ and $\mathbf{z}^{(i)}_{p}$ where $v_i \in \mathbfcal{V}^U_B$ and $i \neq j$; (2) $\mathbf{h}^{(k)}_{p}$ and $\mathbf{z}^{(k)}_{p}$ where $v_k \in \mathbfcal{V}^L_B$.
\noindent \textbf{Few-shot context-level contrasting.} Similarly, for a node $v_k$ in $\mathbfcal{V}^L_B$, we minimize the mutual information between its contextual embedding $\mathbf{h}^{(k)}_{c}$ and the embedding of itself $\mathbf{h}^{(k)}_{c}$ by treating them as a negative pair:
\begin{equation} \tilde{\mathbf{s}}^{(k)}_{c} = Bilinear\left( \mathbf{h}^{(k)}_{c}, \mathbf{z}^{(k)}_{c} \right) = \sigma\left(\mathbf{h}^{(k)}_{c} \mathbf{W}_c {\mathbf{z}^{(k)}_{c}}^\top \right). \label{eq:context-level anemone-fs negative score} \end{equation}
The behind intuition is the same as in few-shot patch-level contrasting, and we also have two types of negatives in this module to further assist the model in obtaining richer supervision signals from various perspectives. \\
In general, this proposed plug-and-play extension enhances the original self-supervise contrastive anomaly detection mechanism in \texttt{ANEMONE}\xspace by constructing extra negative pairs with the available limited labeled anomalies, enabling the model to achieve better performance in more realistic application scenarios.
\subsection{Statistical Graph Anomaly Scorer} \label{subsec: scorer} So far, we have introduced two contrastive mechanisms in \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace for different graph anomaly detection tasks. After the model is well-trained, we propose an universal statistical graph anomaly scorer for both \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace to calculate the anomaly score of each node in $\mathcal{G}$ during the model inference. Specifically, we first generate $R$ subgraphs centred at a target node $v_i$ for both contrastive networks. Then, we calculate patch-level and context-level contrastive scores accordingly, i.e., $[\mathbf{s}^{(i)}_{p,1}, \cdots, \mathbf{s}^{(i)}_{p,R}, \mathbf{s}^{(i)}_{c,1}, \cdots, \mathbf{s}^{(i)}_{c,R},\tilde{\mathbf{s}}^{(i)}_{p,1}, \cdots, \tilde{\mathbf{s}}^{(i)}_{p,R}, \tilde{\mathbf{s}}^{(i)}_{c,1}, \cdots, \tilde{\mathbf{s}}^{(i)}_{c,R}]$. After this, we define the base patch-level and context-level anomaly scores of $v_i$ as follows:
\begin{equation} b^{(i)}_{view,j} = \tilde{\mathbf{s}}^{(i)}_{view,j} - \mathbf{s}^{(i)}_{view,j}, \label{eq:base score} \end{equation} where $j \in \{1, \cdots, R\}$, and ``view" corresponds to $p$ or $c$ to denote the patch-level or context-level base score, respectively. If $v_i$ is a normal node, then $\mathbf{s}^{(i)}_{view,j}$ and $\tilde{\mathbf{s}}^{(i)}_{view,j}$ are expected to be close to 1 and 0, leading $b^{(i)}_{view,j}$ close to -1. Otherwise, if $v_i$ is an anomaly, then $\mathbf{s}^{(i)}_{view,j}$ and $\tilde{\mathbf{s}}^{(i)}_{view,j}$ are close to 0.5 due to the mismatch between $v_i$ and its surrounding contexts, resulting in $b^{(i)}_{view,j} \rightarrow 0$. Therefore, we have $b^{(i)}_{view,j}$ in the range of $[-1, 0]$.
Although we can directly use the base anomaly score $b^{(i)}_{view,j}$ to indicate whether $v_i$ is an anomaly, we design a more sophisticated statistical abnormality scorer to calculate the final patch-level and context-level anomaly scores $y^{(i)}_{view}$ of $v_i$ based on $b^{(i)}_{view,j}$:
\begin{equation} \begin{aligned} \bar{b}^{(i)}_{view} &={\sum_{j=1}^{R} b^{(i)}_{view,j} }/{R}, \\ y^{(i)}_{view} &= \bar{b}^{(i)}_{view} + \sqrt{{\sum_{j=1}^{R}\left(b^{(i)}_{view,j} - \bar{b}^{(i)}_{view}\right)^{2}}/{R}}. \end{aligned} \label{eq:final score v1} \end{equation}
The underlying intuitions behind the above equation are: (1) An abnormal node usually has a larger base anomaly score; (2) The base scores of an abnormal node are typically unstable (i.e., with a larger standard deviation) under $R$ evaluation rounds. Finally, we anneal $y^{(i)}_{p}$ and $y^{(i)}_{c}$ to obtain the final anomaly score of $v_i$ with a tunable hyper-parameter $\alpha \in [0, 1]$ to balance the importance of two anomaly scores at different scales:
\begin{equation} y^{(i)} = \alpha y^{(i)}_{c} + (1 - \alpha) y^{(i)}_{p}. \label{eq:final score v2} \end{equation}
\begin{algorithm}[t]
\caption{The Proposed \texttt{ANEMONE}\xspace Algorithm}
\label{algo: anemone}
\textbf{Input}: Attributed graph $\mathcal{G}$ with a set of unlabeled nodes $\mathbfcal{V}$; Maximum training epochs $E$; Batch size $B$; Number of evaluation rounds $R$. \\
\textbf{Output}: Well-trained graph anomaly detection model $\mathcal{F}^{*}(\cdot)$. \\
\begin{algorithmic}[1]
\STATE Randomly initialize the trainable parameters $\mathbf{\Theta}$, $\mathbf{\Phi}$, $\mathbf{W}_p$, and $\mathbf{W}_c$;
\STATE $/*$ {\it Model training} $*/$
\FOR{$e \in 1,2,\cdots,E$}
\STATE $\mathbfcal{B} \leftarrow$ Randomly split $\mathbfcal{V}$ into batches with size $B$;
\FOR{batch $\widetilde{\mathbfcal{B}}=(v_{1},\cdots,v_{B}) \in \mathbfcal{B}$}
\STATE Sample two anonymized subgraphs for each node in $\widetilde{\mathbfcal{B}}$, i.e., $\{\mathcal{G}_p^{(1)},\cdots,\mathcal{G}_p^{(B)}\}$ and $\{\mathcal{G}_c^{(1)},\cdots,\mathcal{G}_c^{(B)}\}$;
\STATE Calculate the masked and original node embeddings via Eq. \eqref{eq:gnn}, \eqref{eq:mlp};
\STATE Calculate the masked node and its contextual embeddings via Eq. \eqref{eq:gnn2}, \eqref{eq:readout}, and \eqref{eq:mlp2};
\STATE Calculate the patch-level positive and negative scores for for each node in $\widetilde{\mathbfcal{B}}$ via Eq. \eqref{eq:patch-level anemone positive score} and \eqref{eq:patch-level anemone negative score};
\STATE Calculate the context-level positive and negative scores for for each node in $\widetilde{\mathbfcal{B}}$ via Eq. \eqref{eq:context-level anemone positive score} and \eqref{eq:context-level anemone negative score};
\STATE Calculate the loss $\mathcal{L}$ via Eq. \eqref{eq:patch-level loss}, \eqref{eq:context-level loss}, and \eqref{eq:loss};
\STATE Back propagate to update trainable parameters $\mathbf{\Theta}$, $\mathbf{\Phi}$, $\mathbf{W}_p$, and $\mathbf{W}_c$;
\ENDFOR
\ENDFOR
\STATE $/*$ {\it Model inference} $*/$
\FOR{$v_i \in \mathcal{V}$}
\FOR{evaluation round $r \in 1,2,\cdots,R$}
\STATE Calculate $b^{(i)}_p$ and $b^{(i)}_c$ via Eq. \eqref{eq:base score};
\ENDFOR
\STATE Calculate the final patch-level and context-level anomaly scores $y^{(i)}_p$ and $y^{(i)}_c$ over $R$ evaluation rounds via Eq. \eqref{eq:final score v1};
\STATE Calculate the final anomaly score $y^{(i)}$ of $v_i$ via Eq. \eqref{eq:final score v2};
\ENDFOR
\end{algorithmic} \end{algorithm}
\begin{algorithm}[t]
\caption{The Proposed \texttt{ANEMONE-FS}\xspace Algorithm}
\label{algo: anemone-fs}
\textbf{Input}: Attributed graph $\mathcal{G}$ with a set of labeled and unlabeled nodes $\mathbfcal{V}=\{\mathbfcal{V}^L, \mathbfcal{V}^U\}$ where $|\mathbfcal{V}^L| \ll |\mathbfcal{V}^U|$; Maximum training epochs $E$; Batch size $B$; Number of evaluation rounds $R$. \\
\textbf{Output}: Well-trained graph anomaly detection model $\mathcal{F}^{*}(\cdot)$. \\
\begin{algorithmic}[1]
\STATE Randomly initialize the trainable parameters $\mathbf{\Theta}$, $\mathbf{\Phi}$, $\mathbf{W}_p$, and $\mathbf{W}_c$;
\STATE $/*$ {\it Model training} $*/$
\FOR{$e \in 1,2,\cdots,E$}
\STATE $\mathbfcal{B} \leftarrow$ Randomly split $\mathbfcal{V}$ into batches with size $B$;
\FOR{batch $\widetilde{\mathbfcal{B}}=\{\mathbfcal{V}^L_B, \mathbfcal{V}^U_B\}=(v_{1},\cdots,v_{B}) \in \mathbfcal{B}$}
\STATE Sample two anonymized subgraphs for each node in $\widetilde{\mathbfcal{B}}$, i.e., $\{\mathcal{G}_p^{(1)},\cdots,\mathcal{G}_p^{(B)}\}$ and $\{\mathcal{G}_c^{(1)},\cdots,\mathcal{G}_c^{(B)}\}$;
\STATE Calculate the masked and original node embeddings via Eq. \eqref{eq:gnn} and \eqref{eq:mlp};
\STATE Calculate the masked node and its contextual embeddings via Eq. \eqref{eq:gnn2}, \eqref{eq:readout}, and \eqref{eq:mlp2};
\STATE Calculate the patch-level positive and negative scores for each node in $\mathbfcal{V}^U_B$ via Eq. \eqref{eq:patch-level anemone positive score} and \eqref{eq:patch-level anemone negative score};
\STATE Calculate the patch-level extra negative scores for each node in $\mathbfcal{V}^L_B$ via Eq. \eqref{eq:patch-level anemone-fs negative score};
\STATE Calculate the context-level positive and negative scores for each node in $\mathbfcal{V}^U_B$ via Eq. \eqref{eq:context-level anemone positive score} and \eqref{eq:context-level anemone negative score};
\STATE Calculate the context-level extra negative scores for each node in $\mathbfcal{V}^L_B$ via Eq. \eqref{eq:context-level anemone-fs negative score};
\STATE Calculate the loss $\mathcal{L}$ via Eq. \eqref{eq:patch-level loss}, \eqref{eq:context-level loss}, and \eqref{eq:loss};
\STATE Back propagate to update trainable parameters $\mathbf{\Theta}$, $\mathbf{\Phi}$, $\mathbf{W}_p$, and $\mathbf{W}_c$;
\ENDFOR
\ENDFOR
\STATE $/*$ {\it Model inference} $*/$
\FOR{$v_i \in \mathbfcal{V}^U$}
\FOR{evaluation round $r \in 1,2,\cdots,R$}
\STATE Calculate $b^{(i)}_p$ and $b^{(i)}_c$ via Eq. \eqref{eq:base score};
\ENDFOR
\STATE Calculate the final patch-level and context-level anomaly scores $y^{(i)}_p$ and $y^{(i)}_c$ over $R$ evaluation rounds via Eq. \eqref{eq:final score v1};
\STATE Calculate the final anomaly score $y^{(i)}$ of $v_i$ via Eq. \eqref{eq:final score v2};
\ENDFOR
\end{algorithmic} \end{algorithm}
\subsection{Model Training and Algorithms} \label{subsec: optimization}
\noindent \textbf{Model training.} By combining the patch-level and context-level contrastive losses defined in Equation \ref{eq:patch-level loss} and \ref{eq:context-level loss}, we have the overall training objective by minimizing the following loss:
\begin{equation} \mathcal{L}= \alpha \mathcal{L}_{c} + (1 - \alpha) \mathcal{L}_{p}, \label{eq:loss} \end{equation} where $\alpha$ is same as in Equation \ref{eq:final score v2} to balance the importance of two contrastive modules.
The overall procedures of \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace are in Algorithms \ref{algo: anemone} and \ref{algo: anemone-fs}. Specifically, in \texttt{ANEMONE}\xspace, we first sample a batch of nodes from the input attributed graph (line 5). Then, we calculate the positive and negative contrastive scores for each node (lines 6-10) to obtain the multi-scale contrastive losses, which is adopted to calculate the overall training loss (line 11) to update all trainable parameters (line 12). For \texttt{ANEMONE-FS}\xspace, the differences are in two-folds. Firstly, it takes an attributed graph with a few available labeled anomalies as the input. Secondly, for labeled anomalies and unlabeled nodes in a batch, it has different contrastive routes in patch-level and context-level contrastive networks (lines 9-12). During the model inference, \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace shares the same anomaly scoring mechanism, where the statistical anomaly score for each node in $\mathbfcal{V}$ or$\mathbfcal{V}^U$ is calculated (lines 16-22 in Algorithm \ref{algo: anemone} and lines 18-24 in Algorithm \ref{algo: anemone-fs} ).
\noindent \textbf{Complexity analysis.} We analyse the time complexity of \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace algorithms in this subsection. For the shared anonymized subgraph sampling module, the time complexity of using RWR algorithm to sample a subgraph centred at $v_i$ is $\mathcal{O}(Kd)$, where $K$ and $d$ are the number of nodes in a subgraph and the average node degree in $\mathcal{G}$. Regarding the two proposed contrastive modules, their time complexities are mainly contributed by the underlying graph encoders, which are $\mathcal{O}(K^2)$. Thus, given $N$ nodes in $\mathbfcal{V}$, the time complexity of model training is $\mathcal{O}\big(NK(d+K)\big)$ in both \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace. During the model inference, the time complexity of \texttt{ANEMONE}\xspace is $\mathcal{O}\big(RNK(d+K)\big)$, where $R$ denotes the total evaluation rounds. For \texttt{ANEMONE-FS}\xspace, its inference time complexity is $\mathcal{O}\big(RN^UK(d+K)\big)$, where $N^U = |\mathbfcal{V^U}|$ denotes the number of unlabeled nodes in $\mathcal{G}$.
\section{Experiments} \label{sec:experiments} \begin{table}[t]
\centering
\caption{The statistics of the datasets. The upper two datasets are social networks, and the remainders are citation networks.}
\begin{tabular}{@{}c|c|c|c|c@{}}
\toprule
\textbf{Dataset} & \textbf{Nodes} & \textbf{Edges} & \textbf{Features} & \textbf{Anomalies} \\
\midrule
\textbf{Cora} \cite{sen2008collective} & 2,708 & 5,429 & 1,433 & 150 \\
\textbf{CiteSeer} \cite{sen2008collective} & 3,327 & 4,732 & 3,703 & 150 \\
\textbf{PubMed} \cite{sen2008collective} & 19,717 & 44,338 & 500 & 600 \\
\textbf{ACM} \cite{tang2008arnetminer} & 16,484 & 71,980 & 8,337 & 600 \\
\textbf{BlogCatalog} \cite{tang2009relational} & 5,196 & 171,743 & 8,189 & 300 \\
\textbf{Flickr} \cite{tang2009relational} & 7,575 & 239,738 & 12,407 & 450 \\
\bottomrule
\end{tabular}
\label{table:dataset} \end{table}
In this section, we conduct a series of experiments to evaluate the anomaly detection performance of the proposed \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace on both unsupervised and few-shot learning scenarios. Specifically, we address the following research questions through experimental analysis:
\begin{figure*}
\caption{The comparison of ROC curves on four datasets in unsupervised learning scenario.}
\label{subfig:parameter}
\label{fig:roc}
\end{figure*}
\begin{table*}[!htbp]
\small
\centering
\caption{The comparison of anomaly detection performance (i.e., AUC) in unsupervised learning scenario. The best performance is highlighted in \textbf{bold}.}
{
\begin{tabular}{p{85 pt}<{}|p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}}
\toprule
Method & Cora & CiteSeer & PubMed & ACM & BlogCatalog & Flickr \\
\midrule
AMEN \cite{amen_perozzi2016scalable} & 0.6266 & 0.6154 & 0.7713 & 0.5626 & 0.6392 & 0.6573 \\
Radar \cite{radar_li2017radar} & 0.6587 & 0.6709 & 0.6233 & 0.7247 & 0.7401 & 0.7399 \\
ANOMALOUS \cite{anomalous_peng2018anomalous} & 0.5770 & 0.6307 & 0.7316 & 0.7038 & 0.7237 & 0.7434 \\
\midrule
DGI \cite{dgi_velickovic2019deep} & 0.7511 & 0.8293 & 0.6962 & 0.6240 & 0.5827 & 0.6237 \\
DOMINANT \cite{dominant_ding2019deep} & 0.8155 & 0.8251 & 0.8081 & 0.7601 & 0.7468 & 0.7442 \\
CoLA \cite{cola_liu2021anomaly} & 0.8779 & 0.8968 & 0.9512 & 0.8237 & 0.7854 & 0.7513 \\
\midrule
\texttt{ANEMONE}\xspace & \textbf{0.9057} & \textbf{0.9189} & \textbf{0.9548} & \textbf{0.8709} & \textbf{0.8067} & \textbf{0.7637} \\
\bottomrule
\end{tabular}
} \label{table:overall_unsup} \end{table*}
\begin{table*}[!htbp]
\small
\centering
\caption{The comparison of anomaly detection performance (i.e., AUC) in few-shot learning scenario. The best performance is highlighted in \textbf{bold}.}
{
\begin{tabular}{p{85 pt}<{}|p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}p{45 pt}<{\centering}}
\toprule
Method & Cora & CiteSeer & PubMed & ACM & BlogCatalog & Flickr \\
\midrule
AMEN \cite{amen_perozzi2016scalable} & 0.6257&0.6103&0.7725&0.5632&0.6358&0.6615\\
Radar \cite{radar_li2017radar} &0.6589&0.6634&0.6226&0.7253&0.7461&0.7357\\
ANOMALOUS \cite{anomalous_peng2018anomalous} &0.5698&0.6323&0.7283&0.6923&0.7293&0.7504\\
\midrule
DGI \cite{dgi_velickovic2019deep} &0.7398&0.8347&0.7041&0.6389&0.5936&0.6295\\
DOMINANT \cite{dominant_ding2019deep} &0.8202&0.8213&0.8126&0.7558&0.7391&0.7526\\
CoLA \cite{cola_liu2021anomaly} &0.8810&0.8878&0.9517&0.8272&0.7816&0.7581\\
\midrule
DeepSAD \cite{deepsad_ruff2019deep} & 0.4909& 0.5269& 0.5606& 0.4545& 0.6277& 0.5799\\
SemiGNN \cite{semignn_wang2019semi} & 0.6657& 0.7297& OOM& OOM& 0.5289& 0.5426\\
GDN \cite{gdn_ding2021few} &0.7577&0.7889&0.7166&0.6915&0.5424&0.5240\\
\midrule
\texttt{ANEMONE}\xspace & {0.8997} & {0.9191} & {0.9536} & {0.8742} & {0.8025} & {0.7671} \\
\texttt{ANEMONE-FS}\xspace & \textbf{0.9155} & \textbf{0.9318} & \textbf{0.9561} & \textbf{0.8955} & \textbf{0.8124} & \textbf{0.7781} \\
\bottomrule
\end{tabular}
} \label{table:overall_fs} \end{table*}
\begin{itemize}
\item \textit{RQ1:} How do the proposed \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace perform in comparison to state-of-the-art graph anomaly detection methods?
\item \textit{RQ2:} How does the performance of \texttt{ANEMONE-FS}\xspace change by providing different numbers of labeled anomalies?
\item \textit{RQ3:} How do the contrastiveness in patch-level and context-level influence the performance of \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace?
\item \textit{RQ4:} How do the key hyper-parameters impact the performance of \texttt{ANEMONE}\xspace? \end{itemize}
\subsection{Datasets}
We conduct experiments on six commonly used datasets for graph anomaly detection, including four citation network datasets \cite{sen2008collective,tang2008arnetminer} (i.e., Cora, CiteSeer, PubMed, and ACM) and two social network datasets \cite{tang2009relational} (i.e., BlogCatalog and Flickr). Dataset statistics are summarized in Table \ref{table:dataset}.
Since ground-truth anomalies are inaccessible for these datasets, we follow previous works \cite{dominant_ding2019deep,cola_liu2021anomaly} to inject two types of synthetic anomalies (i.e., structural anomalies and contextual anomalies) into the original graphs. For structural anomaly injection, we use the injection strategy proposed by \cite{anoinj_s_ding2019interactive}: several groups of nodes are randomly selected from the graph, and then we make the nodes within one group fully linked to each other. In this way, such nodes can be regarded as structural anomalies. To generate contextual anomalies, following \cite{anoinj_c_song2007conditional}, a target node along with $50$ auxiliary nodes are randomly sampled from the graph. Then, we replace the features of the target node with the features of the farthest auxiliary node (i.e., the auxiliary node with the largest features' Euclidean distance to the target node). By this, we denote the target node as a contextual anomaly. We inject two types of anomalies with the same quantity and the total number is provided in the last column of Table \ref{table:dataset}.
\subsection{Baselines}
We compare our proposed \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace with three types of baseline methods, including (1) shallow learning-based unsupervised methods (i.e., AMEN \cite{amen_perozzi2016scalable}, Radar \cite{radar_li2017radar}), (2) deep learning-based unsupervised methods (i.e., ANOMALOUS \cite{anomalous_peng2018anomalous}, DOMINANT \cite{dominant_ding2019deep}, and CoLA \cite{cola_liu2021anomaly}), and (3) semi-supervised methods (i.e., DeepSAD \cite{deepsad_ruff2019deep}, SemiGNN \cite{semignn_wang2019semi}, and GDN \cite{gdn_ding2021few}). Details of these methods are introduced as following: \begin{itemize}
\item \textbf{AMEN} \cite{amen_perozzi2016scalable} is an unsupervised graph anomaly detection method which detects anomalies by analyzing the attribute correlation of ego-network of nodes.
\item \textbf{Radar} \cite{radar_li2017radar} identifies anomalies in graphs by residual and attribute-structure coherence analysis.
\item \textbf{ANOMALOUS} \cite{anomalous_peng2018anomalous} is an unsupervised method for attributed graphs, which performs anomaly detection via CUR decomposition and residual analysis.
\item \textbf{DGI} \cite{dgi_velickovic2019deep} is an unsupervised contrastive learning method for representation learning. In DGI, we use the score computation module in \cite{cola_liu2021anomaly} to estimate nodes' abnormality.
\item \textbf{DOMINANT} \cite{dominant_ding2019deep} is a deep graph autoencoder-based unsupervised method that detects anomalies by evaluating the reconstruction errors of each node.
\item \textbf{CoLA} \cite{cola_liu2021anomaly} is a contrastive learning-based anomaly detection method which captures anomalies with a GNN-based contrastive framework.
\item \textbf{DeepSAD} \cite{deepsad_ruff2019deep} is a deep learning-based anomaly detection method for non-structured data. We take node attributes as the input of DeepSAD.
\item \textbf{SemiGNN} \cite{semignn_wang2019semi} is a semi-supervised fraud detection method that use attention mechanism to model the correlation between different neighbors/views.
\item \textbf{GDN} \cite{gdn_ding2021few} is a GNN-based model that detects anomalies in few-shot learning scenarios. It leverages a deviation loss to train the detection model in an end-to-end manner. \end{itemize}
\subsection{Experimental Setting}
\mysubsubtitle{Evaluation Metric} We employ a widely used metric, AUC-ROC \cite{dominant_ding2019deep,cola_liu2021anomaly}, to evaluate the performance of different anomaly detection methods. The ROC curve indicates the plot of true positive rate against false positive rate, and the AUC value is the area under the ROC curve. The value of AUC is within the range $[0,1]$ and a larger value represents a stronger detection performance. To reduce the bias caused by randomness and compare fairly \cite{cola_liu2021anomaly}, for all datasets, we conduct a $5$-run experiment and report the average performance.\\
\mysubsubtitle{Dataset Partition} In an unsupervised learning scenario, we use graph data $\mathcal{G}=(\mathbf{X},\mathbf{A})$ to train the models, and evaluate the anomaly detection performance on the full node set $\mathbfcal{V}$ (including all normal and abnormal nodes).
In few-shot learning scenario, we train the models with $\mathcal{G}$ and $k$ anomalous labels (where $k$ is the size of labeled node set $|\mathbfcal{V}^L|$), and use the rest set of nodes $\mathbfcal{V}^U$ to measure the models' performance. \\
\mysubsubtitle{Parameter Settings} In our implementation, the size $K$ of subgraph and the dimension of embeddings are fixed to $4$ and $64$, respectively. The trade-off parameter $\alpha$ is searched in $\{0.2, 0.4, 0.6, 0.8, 1\}$. The number of testing rounds of anomaly estimator is set to $256$. We train the model with Adam optimizer with a learning rate $0.001$. For Cora, Citeseer, and Pubmed datasets, we train the model for $100$ epochs; for ACM, BlogCatalog, and Flickr datasets, the numbers of epochs are $2000$, $1000$, and $500$, respectively.
\subsection{Performance Comparison (RQ1)}
We evaluate \texttt{ANEMONE}\xspace in unsupervised learning scenario where labeled anomaly is unavailable, and evaluate \texttt{ANEMONE-FS}\xspace in few-shot learning scenario ($k=10$) where $10$ annotated anomalies are known during model training. \\
\mysubsubtitle{Performance in Unsupervised Learning Scenario} In unsupervised scenario, we compared \texttt{ANEMONE}\xspace with $6$ unsupervised baselines. The ROC curves on $4$ representative datasets are illustrated in Fig. \ref{fig:roc}, and the comparison of AUC value on all $6$ datasets is provided in Table \ref{table:overall_unsup}. From these results, we have the following observations. \begin{itemize}
\item \texttt{ANEMONE}\xspace consistently outperforms all baselines on six benchmark datasets. The performance gain is due to (1) the two-level contrastiveness successfully capturing anomalous patterns in different scales and (2) the well-designed anomaly estimator effectively measuring the abnormality of each node.
\item The deep learning-based methods significantly outperform the shallow learning-based methods, which illustrates the capability of GNNs in modeling data with complex network structures and high-dimensional features.
\item The ROC curves by \texttt{ANEMONE}\xspace are very close to the points in the upper left corner, indicating our method can precisely discriminate abnormal samples from a large number of normal samples. \end{itemize}
\begin{table}[t]
\small
\centering
\caption{Few-shot performance analysis of \texttt{ANEMONE-FS}\xspace.}
{
\begin{tabular}{l|cccc}
\toprule
Setting & Cora & CiteSeer & ACM & BlogCatalog \\
\midrule
Unsup. & 0.8997&0.9191&0.8742&0.8025\\
\midrule
1-shot &0.9058&0.9184&0.8858&0.8076\\
3-shot &0.9070&0.9199&0.8867&0.8125\\
5-shot &0.9096&0.9252&0.8906&0.8123\\
10-shot &0.9155&0.9318&0.8955&0.8124\\
15-shot &0.9226&0.9363&0.8953&0.8214\\
20-shot &0.9214&0.9256&0.8965&0.8228\\
\bottomrule
\end{tabular}
} \label{table:fewshot} \end{table}
\mysubsubtitle{Performance in Few-shot Learning Scenario} In few-shot learning scenario, we consider both unsupervised and semi-supervised baseline methods for the comparison with our methods. The results are demonstrated in Table \ref{table:overall_fs}. As we can observe, \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace achieve consistently better performance than all baselines, which validates that our methods can handle few-shot learning setting as well. Also, we find that \texttt{ANEMONE-FS}\xspace has better performance than \texttt{ANEMONE}\xspace, meaning that our proposed solution for few-shot learning can further leverage the knowledge from a few numbers of labeled anomalies. In comparison, the semi-supervised learning methods (i.e., DeepSAD, SemiGNN, and GDN) do not show a competitive performance, indicating their limited capability in exploiting the label information.
\subsection{Few-shot Performance Analysis (RQ2)}
\begin{figure}
\caption{Anomaly detection performance with different selection of trade-off parameter $\alpha$ in unsupervised and few-shot learning scenarios.}
\label{fig:ablation}
\end{figure}
\begin{figure*}
\caption{Parameter sensitivities of \texttt{ANEMONE}\xspace w.r.t. three hyper-parameters on six benchmark datasets.}
\label{subfig:round}
\label{subfig:subg}
\label{subfig:dim}
\label{fig:param}
\end{figure*}
In order to verify the effectiveness of \texttt{ANEMONE-FS}\xspace in different few-shot anomaly detection settings, we change the number $k$ of anomalous samples for model training to form $k$-shot learning settings for evaluation. We perform experiments on four datasets (i.e., Cora, CiteSeer, ACM, and BlogCatalog) and select $k$ from $\{1,3,5,10,15,20\}$. The experimental results are demonstrated in Table \ref{table:fewshot} where the performance in unsupervised setting (denoted as ``Unsup.'') is also reported as a baseline.
The results show that the \texttt{ANEMONE-FS}\xspace can achieve good performance even when only one anomaly node is provided (i.e., 1-shot setting). A representative example is the results of ACM dataset where a $1.16\%$ performance gain is brought by one labeled anomaly. Such an observation indicates that \texttt{ANEMONE-FS}\xspace can effectively leverage the knowledge from scarce labeled samples to better model the anomalous patterns. Another finding is that the anomaly detection performance generally increases following the growth of $k$ especially when $k\leq15$. This finding demonstrates that \texttt{ANEMONE-FS}\xspace can further optimize the anomaly detection model when more labeled anomalies are given.
\subsection{Ablation Study (RQ3)}
In this experiment, we investigate the contribution of patch- and context- level contrastiveness to the anomaly detection performance of \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace. In concrete, we adjust the value of trade-off parameter $\alpha$ and the results are illustrated in Fig. \ref{fig:ablation}. Note that $\alpha = 0$ and $\alpha = 1$ mean that the model only considers patch- and context level contrastive learning, respectively.
As we can observe in Fig. \ref{fig:ablation}, \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace can achieve the highest AUC values when $\alpha$ is between $0.2$ and $0.8$, and the best selections of $\alpha$ for each dataset are quite different. Accordingly, we summarize that jointly considering the contrastiveness in both levels always brings the best detection performance. We also notice that in some datasets (i.e., Cora, CiteSeer, and PubMed) context-level contrastive network performs better than the patch-level one, while in the rest datasets the patch-level contrastiveness brings better results. It suggests that two types of contrastiveness have unique contributions in identifying anomalies from network data with diverse properties.
\subsection{Parameter Sensitivity (RQ4)}
To study how our method is impacted by the key hyper-parameters, we conduct experiments for \texttt{ANEMONE}\xspace with different selections of evaluation rounds $R$, subgraph size $K$, and hidden dimension $D'$.
\mysubsubtitle{Evaluation Rounds} To explore the sensitivity of \texttt{ANEMONE}\xspace to evaluation rounds $R$, we tune the $R$ from $1$ to $512$ on six datasets, and the results are demonstrated in Fig. \ref{subfig:round}. As we can find in the figure, the detection performance is relatively poor when $R<4$, indicating that too few evaluation rounds are insufficient to represent the abnormality of each node. When $R$ is between $4$ and $256$, we can witness a significant growing trade of AUC following the increase of $R$, which demonstrates that adding evaluation rounds within certain ranges can significantly enhance the performance of \texttt{ANEMONE}\xspace. An over-large $R$ ($R=512$) does not boost the performance but brings heavier computational cost. Hence, we fix $R=256$ in our experiments to balance performance and efficiency.
\mysubsubtitle{Subgraph Size} In order to investigate the impact of subgraph size, we search the node number of contextual subgraph $K$ in the range of $\{2,3,\cdots,10\}$. We plot the results in Fig. \ref{subfig:subg}. We find that \texttt{ANEMONE}\xspace is not sensitive to the choice of $K$ on datasets except Flickr, which verifies the robustness of our method. For citation networks (i.e., Cora, CiteSeer, PubMed, and ACM), a suitable subgraph size between $3$ and $5$ results in the best performance. Differently, BlogCatalog requires a larger subgraph to consider more contextual information, while Flickr needs a smaller augmented subgraph for contrastive learning.
\mysubsubtitle{Hidden Dimension} In this experiment, we study the selection of hidden dimension $D'$ in \texttt{ANEMONE}\xspace. We alter the value of $D'$ from $2$ to $256$ and the effect of $D'$ on AUC is illustrated in Fig. \ref{subfig:dim}. As shown in the figure, when $D'$ is within $[2,64]$, there is a significant boost in anomaly detection performance with the growth of $D'$. This observation indicates that node embeddings with a larger length can help \texttt{ANEMONE}\xspace capture more complex information. We also find that the performance gain becomes light when $D'$ is further enlarged. Consequently, we finally set $D'=64$ in our main experiments.
\section{Conclusion} \label{sec:conclusion} In this paper, we investigate the problem of graph anomaly detection. By jointly capturing anomalous patterns from multiple scales with both patch level and context level contrastive learning, we propose a novel algorithm, \texttt{ANEMONE}\xspace, to learn the representation of nodes in a graph. With a statistical anomaly estimator to capture the agreement from multiple perspectives, we predict an anomaly score for each node so that anomaly detection can be conducted subsequently. As a handful of ground-truth anomalies may be available in real applications, we further extend our method as \texttt{ANEMONE-FS}\xspace, a powerful method to utilize labeled anomalies to handle the settings of few-shot graph anomaly detection. Experiments on six benchmark datasets validate the performance of the proposed \texttt{ANEMONE}\xspace and \texttt{ANEMONE-FS}\xspace.
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/yuzheng.jpg}}]{Yu Zheng} received the B.S. and M.S. degrees in computer science from Northwest A\&F University, China, in 2008 and 2011, respectively. She is currently pursuing her Ph.D. degree in computer science at La Trobe University, Melbourne, Australia. Her research interests include image classification, data mining, and machine learning. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/mingjin.jpg}}]{Ming Jin} received the B.Eng. degree from the Hebei University of Technology, Tianjin, China, in 2017, and M.Inf.Tech. degree from the University of Melbourne, Melbourne, Australia, in 2019. He is currently pursuing his Ph.D. degree in computer science at Monash University, Melbourne, Australia. His research focuses on graph neural networks (GNNs), time series analyse, data mining, and machine learning. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/yixin-bio.jpg}}]{Yixin Liu} received the B.S. degree and M.S. degree from Beihang University, Beijing, China, in 2017 and 2020, respectively. He is currently pursuing his Ph.D. degree in computer science at Monash University, Melbourne, Australia. His research concentrates on data mining, machine learning, and deep learning on graphs. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/lianhua.jpg}}]{Lianhua Chi} received the dual Ph.D. degrees in computer science from the University of Technology Sydney, Australia, and the Huazhong University of Science and Technology, Wuhan, China, in 2015. She was a Post-Doctoral Research Scientist in IBM Research Melbourne. Dr. Chi was a recipient of the Best Paper Award in PAKDD in 2013. Currently, she is a Lecturer with the Department of Computer Science and Information Technology at La Trobe University since 2018. Her current research interests include data mining, machine learning and big data hashing. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/phan.jpg}}]{Khoa T. Phan}received the B.Eng. degree in telecommunications (First Class Hons.) from the University of New South Wales (UNSW), Sydney, NSW, Australia, in 2006, the M.Sc. degree in electrical engineering from the University of Alberta, Edmonton, AB, Canada, in 2008, and California Institute of Technology (Caltech), Pasadena, CA, USA, in 2009, respectively, and the Ph.D. degree in electrical engineering from McGill University, Montreal, QC, Canada in 2017.
He is currently a Senior Lecturer and Australia Research Council (ARC) Discovery Early Career Researcher Award (DECRA) Fellow with the Department of Computer Science and Information Technology, La Trobe University, Victoria, Australia. His current research interests are broadly design, control, optimization, and operation of 5G mobile communications networks with applications in the Internet of Things (IoT), satellite communications, machine-type communications (MTC), smart grids, and cloud computing.
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figs/shirui-bio.jpg}}]{Shirui Pan} received a Ph.D. in computer science from the University of Technology Sydney (UTS), Ultimo, NSW, Australia. He is an ARC Future Fellow (2022-2025) and Senior Lecturer with the Faculty of Information Technology, Monash University, Australia. His research interests include data mining and machine learning. To date, Dr Pan has published over 130 research papers in top-tier journals and conferences, including TPAMI, TKDE, TNNLS, ICML, NeurIPS, KDD, AAAI, IJCAI, WWW, and ICDM. His research has attracted over 7600 citations. He is a recipient of the Best Student Paper Award of IEEE ICDM 2020. His survey paper on \textit{``A Comprehensive Survey on Graph Neural Networks"} in TNNLS-21 has been cited over 2500 times. He is recognised as one of the AI 2000 AAAI/IJCAI Most Influential Scholars in Australia (2021). \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,clip,keepaspectratio]{figs/phoebe.jpg}}]{Yi-Ping Phoebe Chen} received the B.Inf.Tech. (First Class Hons.) and the Ph.D. degrees in Computer Science from the University of Queensland, Brisbane, Australia. She is currently a Professor and Chair of the Department of Computer Science and Information Technology, La Trobe University, Melbourne, Australia. She is also the Chief Investigator of the ARC Center of Excellence in Bioinformatics. She is the Steering Committee Chair of the Asia Pacific Bioinformatics Conference (founder) and Multimedia Modeling. She has been involved in research on bioinformatics, health informatics, multimedia, and artificial intelligence. She has published over 250 research papers, many of them appeared in top journals and conferences.
\end{IEEEbiography}
\end{document}
|
arXiv
|
{
"id": "2202.05525.tex",
"language_detection_score": 0.768783688545227,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\pagestyle{plain}
\title{The Lie Algebra of S-unitary Matrices, Twisted Brackets and Quantum Channels}
\author{Clarisson Rizzie Canlubo}
\maketitle
\begin{abstract} A dimension formula was given in \cite{caalim} in order to partially classify the Lie algebras of $S$-unitary type. The natural question of when $\mathfrak{u}_{S}$ and $\mathfrak{u}_{T}$ are isomorphic is left unanswered. In this article, we will give an answer to this question using the notion of quantum channels and their Kraus representation. In line with this, we will also discuss linearly twisted versions of the usual commutator bracket and its relation to the standard Lie algebra structure on $M_{n}(\mathbb{C})$. Finally, we will mention some problems that are still unanswered in relation to $S$-unitary type matrices and twisted brackets. \end{abstract}
\section{Introduction} \label{intro}
Let $S\in M_{n}(\mathbb{C})$. Then, the subspace $\mathfrak{u}_{S}=\left\{ X\in M_{n}(\mathbb{C}) | SX^{\ast}=-XS \right\}$ is a Lie algebra with respect to the usual bracket of matrices, given as $[A,B]=AB-BA$ for any $A,B\in \mathfrak{u}_{S}$. The subset $U_{S}=\left\{ X\in M_{n}(\mathbb{C}) | SX^{\ast}=X^{-1}S \right\}$ of $M_{n}(\mathbb{C})$ is a Lie subgroup of $GL_{n}(\mathbb{C})$ whose Lie algebra is $\mathfrak{u}_{S}$.
In \cite{caalim}, the dimension of $\mathfrak{u}_{S}$ is given in terms of the spectral properties of $S$. Hence, if $S$ and $T$ are unitarily similar the Lie algebras $\mathfrak{u}_{S}$ and $\mathfrak{u}_{T}$ have the same dimension. Although this is not enough to conclude whether $\mathfrak{u}_{S}$ are isomorphic to $\mathfrak{u}_{T}$ as Lie algebras, this turns out to be the case according to the following proposition.
\begin{prop}\label{P1} If $S$ and $T$ are unitarily similar then $\mathfrak{u}_{S}\cong\mathfrak{u}_{T}$ as Lie algebras. \end{prop}
\begin{prf} Suppose $S=V^{\ast}TV$ for some unitary $V$. Then, for any $X\in\mathfrak{u}_{S}$ we have
\[ V^{\ast}TVX^{\ast}=SX^{\ast}=-XS=-XV^{\ast}TV \]
\noindent and so, we have
\[ T(VXV^{\ast})^{\ast}=-(VXV^{\ast})T. \]
\noindent Thus, $\mathfrak{u}_{S}\stackrel{\phi_{V}}{\longrightarrow}\mathfrak{u}_{T}, X\mapsto VXV^{-1}$ gives the desired isomorphism. $\blacksquare$
\end{prf}
The isomorphism $\phi_{V}$ given in the proof of Proposition (\ref{P1}) turns out to be the most general one as indicated in the following theorem.
\begin{thm}\label{T1} If $\mathfrak{u}_{S}\stackrel{\phi}{\longrightarrow}\mathfrak{u}_{T}$ is a Lie algebra isomorphism then $\phi(X)=VXV^{-1}$ for some invertible $V$. \end{thm}
Also, a partial converse of Proposition (\ref{P1}) is a corollary of Theorem (\ref{P1}) as indicated in the next theorem.
\begin{thm}\label{T2} If the Lie algebras $\mathfrak{u}_{S}$ and $\mathfrak{u}_{T}$ are isomorphic then the stabilizers of $S$ and $T$ under the conjugation action of $GL_{n}(\mathbb{C})$ on $M_{n}(\mathbb{C})$ are conjugate subgroups. \end{thm}
\noindent We will prove Theorems (\ref{T1}) and (\ref{T2}) in section [\ref{proof}]. Note that although the entries of the matrices in $\mathfrak{u}_{S}$ are complex numbers, the Lie algebra $\mathfrak{u}_{S}$ is strictly a \textit{real} Lie algebra. Whether $\mathfrak{u}_{S}$ is a complex Lie algebra depends on the existence of a complex structure $J$ (an endomorphism $J$ such that $J^{2}=-I$) which bilinearly commutes with $[,]$, i.e. $[J(X),Y]=J[X,Y]=[X,J(Y)]$ for any $X,Y\in \mathfrak{u}_{S}$.
\section{Twisted Lie Brackets}\label{twisted}
Using a linear map $M_{n}(\mathbb{C})\stackrel{\psi}{\longrightarrow}M_{n}(\mathbb{C})$, one can define a bilinear form $[,]_{\psi}$ on $M_{n}(\mathbb{C})$ as follows. For any $X,Y\in M_{n}(\mathbb{C})$, define $[,]_{\psi}$ by $[X,Y]_{\psi}=X\psi(Y)-Y\psi(X)$. Clearly, $[,]_{\psi}$ is skew-symmetric for any linear map $\psi$. In the event that $[,]_{\psi}$ defines a Lie bracket on $M_{n}(\mathbb{C})$, we will call ${,}_{\psi}$ the $\psi$-twisted Lie bracket on $M_{n}(\mathbb{C})$. However, the Jacobi identity is satisfied only for certain linear maps $\psi$. In general, any linear map $M_{n}(\mathbb{C})\stackrel{\psi}{\longrightarrow}M_{n}(\mathbb{C})$ takes the form $\psi(X)=\sum\limits_{i=1}^{m}A_{i}XB_{i}^{\ast}$ for some matrices $A_{i}, B_{i}\in M_{n}(\mathbb{C})$. If $B_{i}=I$ for all $i=1,\dots, m$ then $\psi(X)=AX$ for all $X\in M_{n}(\mathbb{C})$, where $A=\sum\limits_{i=1}^{m}A_{i}$. In this case, we have
\begin{eqnarray*} \sum\limits_{\circlearrowleft} [X,[Y,Z]_{\psi}]_{\psi} &=& X\psi(Y\psi(Z))-X\psi(Z\psi(Y))-Y\psi(Z)\psi(X)+Z\psi(Y)\psi(X)\\ &+& Y\psi(Z\psi(X))-Y\psi(X\psi(Z))-Z\psi(X)\psi(Y)+X\psi(Z)\psi(Y)\\ &+& Z\psi(X\psi(Y))-Z\psi(Y\psi(X))-X\psi(Y)\psi(Z)+Y\psi(X)\psi(Z)\\ & & \\ &=& XAYAZ-XAZAY-YAZAX+ZAYAX\\ &+& YAZAX-YAXAZ-ZAXAY+XAZAY\\ &+& ZAXAY-ZAYAX-XAYAZ+YAXAZ\\ &=& 0 \end{eqnarray*}
\noindent where the leftmost sum indicates the cyclic sum over $X,Y$ and $Z$. Thus, we have proven the following proposition.
\begin{prop} If $\psi(X)=AX$ for some $A\in M_{n}(\mathbb{C})$ then $[,]_{\psi}$ defines a Lie bracket on $M_{n}(\mathbb{C})$. \end{prop}
The essential ideas of the Kraus representation of a linear map $M_{n}(\mathbb{C})\stackrel{\psi}{\longrightarrow}M_{n}(\mathbb{C})$ says that for any $X\in M_{n}(\mathbb{C})$, we have
\[ \psi(X)=\sum\limits_{i=1}^{m}A_{i}XB_{i}, \]
\noindent the case when the Choi matrix $J(\psi)$ of $\psi$ is of rank $m$. In the event that the rank of $J(\psi)$ is one, i.e. the sum above consists of only one summand, the following proposition gives a necessary condition when $[,]_{\psi}$ defines a Lie bracket on $M_{n}(\mathbb{C})$.
\begin{prop} Let $\psi(X)=AXB$ for some $A,B\in M_{n}(\mathbb{C})$. Suppose the image of the map $M_{n}(\mathbb{C})\longrightarrow M_{n}(\mathbb{C}), X\mapsto AX$ is a subspace of $C(B)$, the commuting ring of $B$. Then $[,]_{\psi}$ defines a Lie bracket on $M_{n}(\mathbb{C})$. In particular, if $A$ is invertible then $\psi$ is of the form $\psi(X)=MX$ for some invertible matrix $M$. \end{prop}
\begin{prf} For any $X\in M_{n}(\mathbb{C})$, the assumption that $AX$ belongs to $C(B)$ implies that $(AX)B=B(AX)$. Thus, we have
\begin{eqnarray*} \sum\limits_{\circlearrowleft} [X,[Y,Z]_{\psi}]_{\psi} &=& X\psi(Y\psi(Z))-X\psi(Z\psi(Y))-Y\psi(Z)\psi(X)+Z\psi(Y)\psi(X)\\ &+& Y\psi(Z\psi(X))-Y\psi(X\psi(Z))-Z\psi(X)\psi(Y)+X\psi(Z)\psi(Y)\\ &+& Z\psi(X\psi(Y))-Z\psi(Y\psi(X))-X\psi(Y)\psi(Z)+Y\psi(X)\psi(Z)\\ & & \\ &=& XAY(AZB)B-XAZ(AYB)B-Y(AZB)(AXB)\\ &+& Z(AYB)(AXB)+YAZ(AXB)B-YAX(AZB)B\\ &-& Z(AXB)(AYB)+X(AZB)(AYB)+ZAX(AYB)B\\ &-& ZAY(AXB)B-X(AYB)(AZB)+Y(AXB)(AZB)\\ & & \\ &=& XAY(AZB-BAZ)B-XAZ(AYB-BAY)B)\\ &+& YAZ(AXB-BAX)B-YAX(AZB-BAZ)B\\ &+& ZAX(AYB-BAY)B-ZAY(AXB-BAX)B\\ &=& 0 \end{eqnarray*}
\noindent for any $X,Y,Z\in M_{n}(\mathbb{C})$. This proves the first claim. Now, if $A$ is invertible then the image of $X\mapsto AX$ is $M_{n}(\mathbb{C})$. This implies that $B$ is necessarily a scalar matrix. Taking $M=BA$ proves the second claim. $\blacksquare$ \end{prf}
The canonical bracket on $M_{n}(\mathbb{C})$ restricts to a Lie bracket on the subspace $\mathfrak{u}_{S}$ described in section [\ref{intro}]. The following proposition gives a necessary condition when this is true for the twisted Lie bracket $[,]_{\psi}$.
\begin{prop}\label{P2} Let $\psi(X)=AX$ for some $A\in M_{n}(\mathbb{C})$. Then, $[,]_{\psi}$ restricts to a Lie bracket on $\mathfrak{u}_{S}$ if and only if $A$ is $S$-Hermitian. \end{prop}
\begin{prf} Suppose $A$ is $S$-Hermitian. Then, for any $X,Y\in\mathfrak{u}_{S}$, we have
\begin{eqnarray*} S[X,Y]_{\psi}^{\ast} &=& S(X\psi(Y)-Y\psi(X))^{\ast} \\ &=& SY^{\ast}A^{\ast}X^{\ast}-SX^{\ast}A^{\ast}Y^{\ast}\\ &=& -YSA^{\ast}X^{\ast}+XSA^{\ast}Y^{\ast}\\ &=& -YASX^{\ast}+XASY^{\ast}\\ &=& YAXS-XAYS\\ &=& (YAX-XAY)S\\ &=& -[X,Y]_{\psi}S \end{eqnarray*}
\noindent Thus, $-[X,Y]_{\psi}\in \mathfrak{u}_{S}$, and so $[,]_{\psi}$ restricts to a Lie bracket on $\mathfrak{u}_{S}$.
Conversely, suppose $[,]_{\psi}$ restricts to a Lie bracket on $\mathfrak{u}_{S}$. Then, for any $X,Y\in \mathfrak{u}_{S}$, we have
\[ -YSA^{\ast}X^{\ast} + XSA^{\ast}Y^{\ast} = -YASX^{\ast} + XASY^{\ast} \]
\noindent from the above computation. Thus, we have
\[ Y(AS-SA^{\ast})X^{\ast} = X(AS-SA^{\ast})Y^{\ast} \]
\noindent for any $X,Y\in \mathfrak{u}_{S}$. Taking $Y=iI\in\mathfrak{u}_{S}$, we see that
\[ (AS-SA^{\ast})X^{\ast}=-X(AS-SA^{\ast}) \]
\noindent and taking $X=I$, we get $SA^{\ast}=AS$. $\blacksquare$
\end{prf}
It is a curiosity to know which linear maps $\psi$ whose brackets $[,]_{\psi}$ induce Lie algebra structures on $M_{n}(\mathbb{C})$ isomorphic to the canonical one. We partially answer this in the next proposition.
\begin{prop}\label{P5} If the Lie bracket $[,]_{\psi}$, with $\psi(X)=AXB^{\ast}$ for all $X\in M_{n}(\mathbb{C})$, coincides with the canonical Lie bracket on $M_{n}(\mathbb{C})$ then $B^{\ast}=A^{-1}$. \end{prop}
\begin{prf} For any $X\in M_{n}(\mathbb{C})$ we have
\[ 0= [X,I] = [X,I]_{\psi} = X\psi(I)-I\psi(X). \]
\noindent And so, we have $\psi(X)=X\psi(I)$ for all $X\in M_{n}(\mathbb{C})$. Since $[I,X]=0$, we also have $\psi(X)=\psi(I)X$ for all $X\in M_{n}(\mathbb{C})$. Thus, $\psi(I)=AB^{\ast}$ is central. Thus, for any $X,Y\in M_{n}(\mathbb{C})$ we have
\[ [X,Y] = [X,Y]_{\psi} = X\psi(Y)-Y\psi(X) = X\psi(I)Y-Y\psi(I)X = \psi(I)[X,Y] \]
\noindent and so, $\psi(I)=I$ from which the conclusion immediately follows. $\blacksquare$
\end{prf}
Using the scalar matrix $iI$ in place of $I$ in the proof of Proposition (\ref{P5}) we get the following corollary.
\begin{cor}\label{C6} If the Lie bracket $[,]_{\psi}$, with $\psi(X)=AX$ for all $X\in M_{n}(\mathbb{C})$, coincides with the canonical Lie bracket on $\mathfrak{u}_{S}$ then $A=I$. \end{cor}
\section{Quantum Channels and Kraus Operators} \label{kraus}
Quantum channels play prominent role in quantum information theory, see \cite{chuang} for more details. They are used to encode operations in the set-up of quantum theory. Quantum channels, in the finite dimensional case, are completely positive maps $M_{n}(\mathbb{C})\stackrel{\Phi}{\longrightarrow}M_{k}(\mathbb{C})$. In some literature, quantum channels are required to be trace-preserving. In this section, we will discuss aspects of quantum channels important for the purpose of this article.
A linear map $A\stackrel{\Phi}{\longrightarrow}B$ between $C^{\ast}$-algebras is said to be $m$-\textit{positive} if the induced map
\[ \Phi_{m}:=I_{m}\otimes\Phi:M_{m}(\mathbb{C})\otimes A\longrightarrow M_{m}(\mathbb{C})\otimes B \]
\noindent sends positive elements to positive elements relative to the natural $C^{\ast}$-algebra structures on the involved tensor products. If $\Phi$ is $m$-positive for all natural numbers $m$ then $\Phi$ is said to be \textit{completely positive}. Note that this notion makes sense for a general map $A\stackrel{\Phi}{\longrightarrow}B$ between $C^{\ast}$-algebras since the minimal and maximal tensor products coincide in the case of tensoring with $M_{n}(\mathbb{C})$. In the case when $A$ and $B$ are finite-dimensional matrix algebras, complete positivity is equivalent to $m$-positivity for some natural number $m$. This is a consequence of Choi's Theorem as stated below. For the proof, see for example \cite{choi} and \cite{mosonyi}.
\begin{thm}{(Choi's Theorem)}\\ \label{cho} Let $M_{n}(\mathbb{C})\stackrel{\Phi}{\longrightarrow}M_{k}(\mathbb{C})$ be linear. Then, the following are equivalent: \begin{enumerate}
\item[(a)] $\Phi$ is completely positive
\item[(b)] $\Phi$ is $n$-positive
\item[(c)] There exists $A_{1},\dots,A_{r}\in M_{k,n}(\mathbb{C})$ such that
\[ \Phi(X)=\sum\limits_{i=1}^{r}A_{i}XA_{i}^{\ast} \]
\noindent for all $X\in M_{n}(\mathbb{C})$. Moreover, the matrices $A_{1},\dots,A_{r}$ satisfy $\sum\limits_{i=1}^{r}A_{i}A_{i}^{\ast}=I$. \end{enumerate} \end{thm}
\noindent The representation given in part $(c)$ of the above theorem is called a \textit{Kraus representation} of $\Phi$ and the matrices $A_{1},\dots,A_{r}$ are called the associated \textit{Kraus operators}. The Kraus representation of a linear operator $\Phi$ is far from unique. However, the Kraus representations of a given quantum channel $\Phi$ satisfy certain transitivity relation as stated by the following theorem.
\begin{thm}\label{unitary} Let $A_{1},\dots,A_{r}$ and $B_{1},\dots,B_{s}$ be two sets of Kraus operators associated to two Kraus representations of a quantum channel $\Phi$. Then, there is a unitary $(U_{ij})\in M_{max\left\{r,s\right\}}(\mathbb{C})$ such that $B_{j}=\sum\limits_{i=1}^{r}U_{ji}A_{i}$. \end{thm}
\noindent For a proof, see pg. 95 of \cite{mosonyi}.
Linear maps that are not necessarily completely positive have similar representations as that of a Kraus representation of a quantum channel. In the general case, a linear map $M_{n}(\mathbb{C})\stackrel{\Phi}{\longrightarrow}M_{k}(\mathbb{C})$ can be represented as
\[ \Phi(X)=\sum\limits_{i=1}^{r} A_{i}XB_{i}^{\ast} \]
\noindent for some matrices $A_{1},\dots,A_{r},B_{1},\dots,B_{r}\in M_{k,n}(\mathbb{C})$.
\section{Proof of Theorems 1 and 2} \label{proof}
\textsc{Proof of Thereom 1:} Let $\Phi$ be a linear automorphism extending the Lie algebra isomorphism $\phi$ on the whole $M_{n}(\mathbb{C})$. Let $\Psi$ be its inverse. Then, using the Kraus representation for linear maps $M_{n}(\mathbb{C})\stackrel{\Phi,\Psi}{\longrightarrow}M_{n}(\mathbb{C})$, there are $r$ matrices $A_{i},B_{i}$ and $s$ matrices $A^{\prime}_{j},B^{\prime}_{j}$ such that
\[ \Phi(X)=\sum\limits_{i=1}^{r} A_{i}XB_{i}^{\ast} \hspace{.5in} \text{and} \hspace{.5in} \Psi(X)=\sum\limits_{j=1}^{s} A^{\prime}_{i}X(B^{\prime}_{i})^{\ast} \]
\noindent for all $X\in M_{n}(\mathbb{C})$. Without loss of generality, we can assume the matrices $A_{i}$ and $B_{i}$ form linearly independent sets of matrices. We assume the same for the matrices $A^{\prime}_{i}$ and $B^{\prime}_{i}$. Then, $\Phi\circ \Psi=id$ gives
\[ id(X)=\sum\limits_{i,j} A_{i}A^{\prime}_{j}X(B^{\prime}_{j})^{\ast}B^{\ast}_{i}.\]
\noindent Complete positivity implies that the matrices $A_{i}A^{\prime}_{j}$ and $B_{i}B^{\prime}_{j}$ constitutes a set of Kraus operators for $id$. However, $id(X)=X$ is also a Kraus representation for $id$. Since the Kraus operators appearing in different Kraus representations of the same quantum channel are related by a unitary according to Theorem (\ref{unitary}), we must have $r=s=1$ and so, $\Phi(X)=AXB^{\ast}$. Since $\Phi$ is an isomorphism, the matrices $A$ and $B$ are necessarily invertible. The restriction $\phi$ of $\Phi$ on $\mathfrak{u}_{S}$ is given by $\phi(X)=AXB^{\ast}$. Thus, for any $X,Y\in \mathfrak{u}_{S}$ we have
\[ AXYB^{\ast}-AYXB^{\ast}=\phi[X,Y]=[\phi(X),\phi(Y)]=AXB^{\ast}AYB^{\ast}-AYB^{\ast}AXB^{\ast} \]
\noindent from which we immediately see that
\[ [X,Y]=XY-YX=XB^{\ast}AY-YB^{\ast}AX=[X,Y]_{\psi} \]
\noindent where $\psi(X)=B^{\ast}AX$ for all $X\in\mathfrak{u}_{S}$. Hence, by Corollary (\ref{C6}) we have $B^{\ast}A=I$ and so, $A^{-1}=B^{\ast}$. Taking $V=A$ proves the theorem. $\blacksquare$
\noindent \textsc{Proof of Theorem 2:} If $X\in \mathfrak{u}_{S}$ then
\[ T\phi(X)^{\ast}=-\phi(X)T \Longleftrightarrow V^{-1}TV^{-\ast}X^{\ast}=-XV^{-1}TV^{-\ast}. \]
\noindent Thus, $\mathfrak{u}_{S}= \mathfrak{u}_{V^{-1}TV^{-\ast}}$. This implies that the Lie groups $U_{S}$ and $U_{V^{-1}TV^{-\ast}}$ associated to $\mathfrak{u}_{S}$ and $\mathfrak{u}_{V^{-1}TV^{-\ast}}$, respectively, are the same. That is,
\[ SX^{\ast}=X^{-1}S \Longleftrightarrow V^{-1}TV^{-\ast}X^{\ast}=X^{-1}V^{-1}TV^{-\ast} \]
\noindent or equivalently,
\[ XSX^{\ast}=S \Longleftrightarrow (VXV^{-1})T(VXV^{-1})^{\ast}=T \]
\noindent for all $X\in U_{S}$. Thus,
\[ stab(S)=U_{S}=V^{-1}\cdot U_{T}\cdot V=V^{-1}\cdot stab(T)\cdot V \]
\noindent showing that $stab(S)$ and $stab(T)$ are conjugate subgroups of $GL_{n}(\mathbb{C})$. $\blacksquare$
\section{Unanswered Questions} \label{problems}
Relative to the usual commutator bracket $[,]$, the subspaces $\mathfrak{u}_{S}$ are Lie subalgebras of $M_{n}(\mathbb{C})$ for any $S\in M_{n}(\mathbb{C})$. However, as we have seen in Proposition (\ref{P2}), not all $\mathfrak{u}_{S}$ are Lie subalgebras of $M_{n}(\mathbb{C})$ relative to the twisted bracket $[,]_{\psi}$.
\begin{que} What are the Lie subalgebras of $M_{n}(\mathbb{C})$ under the Lie bracket $[,]_{\psi}$? \end{que}
The bracket $[,]_{\psi}$ for a linear map $M_{n}(\mathbb{C})\stackrel{\psi}{\longrightarrow}M_{n}(\mathbb{C})$ is a special case of the class of brackets of the form $[X,Y]_{B}:=B(X,Y)-B(Y,X)$ for some bilinear form $B$ on $M_{n}(\mathbb{C})$.
\begin{que} When is the bracket $[X,Y]_{B}=B(X,Y)-B(Y,X)$ a Lie bracket on $M_{n}(\mathbb{C})$? And on $\mathfrak{u}_{S}$? In particular, when does $[,]_{B}$ satisfy the Jacobi identity? \end{que}
In Theorem (\ref{T2}), a necessary condition for $\mathfrak{u}_{S}$ and $\mathfrak{u}_{T}$ to be isomorphic is the (unitary) conjugacy of the stabilizers subgroups of the matrices $S$ and $T$. This is much weaker than $S$ and $T$ being unitarily similar.
\begin{que} Using the orbit-stabilizer theorem for the $\ast$-conjugation action of $GL_{n}(\mathbb{C})$ on $M_{n}(\mathbb{C})$, what can we say about the matrices $S$ and $T$ if $stab(S)=stab(T)$? Or if they are only conjugate subgroups? \end{que}
The main goal of this article is to determine when the Lie algebras $\mathfrak{u}_{S}$ and $\mathfrak{u}_{T}$ are abstractly isomorphic. Another natural inquiry is to understand the lattice structure of the Lie algebras $\mathfrak{u}_{S}$ in terms of inclusions. In line with this, we have the following question.
\begin{que} If $\mathfrak{u}_{S}\leqslant\mathfrak{u}_{T}$, is it the case that $stab(T)$ is conjugate to a (possibly trivial) subgroup of $stab(S)$? \end{que}
More importantly, the author is very much interested with the following question.
\begin{que} Let $\psi(X)=AX$ for some $A\in M_{n}(\mathbb{C})$. What is the Lie group structure on $GL_{n}(\mathbb{C})$ so that its Lie algebra is $M_{n}(\mathbb{C})$ with bracket $[,]_{\psi}$? By Ado's Theorem, every finite dimensional real Lie algebra $\mathfrak{g}$ is the Lie algebra of a Lie subgroup $G$ of $GL_{R}(\mathbb{C})$ for some $R$. Since the bracket $[,]_{\psi}$ is a 'twist' of the usual commutator on the same vector space, it is reasonable to expect that its Lie group has the same underlying manifold as that of $GL_{n}(\mathbb{C})$ but with a 'twisted' multiplication. See \cite{procesi}. \end{que}
\hspace{1in}
\noindent\textsc{Clarisson Rizzie P. Canlubo}\\ University of the Philippines$-$Diliman\\ Quezon City, Philippines 1101\\ [email protected]
\end{document}
|
arXiv
|
{
"id": "1811.02813.tex",
"language_detection_score": 0.6757611036300659,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\pagestyle{plain} \title{Representations of the Infinite-Dimensional Affine Group}
\date{} \author{
\textbf{Yuri Kondratiev}\\ Department of Mathematics, University of Bielefeld, \\ D-33615 Bielefeld, Germany,\\ Dragomanov University, Kyiv, Ukraine\\ } \begin{abstract} We introduce an infinite-dimensional affine group and construct its irreducible unitary representation. Our approach follows the one used by Vershik, Gelfand and Graev for the diffeomorphism group, but with modifications made necessary by the fact that the group does not act on the phase space. However it is possible to define its action on some classes of functions. \end{abstract}
\maketitle
\vspace*{3cm}
{\bf Key words: } affine group; configurations; Poisson measure; ergodicity
{\bf MSC 2010}. Primary: 22E66. Secondary: 60B15.
\section{Introduction}
Given a vector space $V$ the affine group can be described concretely as the semidirect product of $V$ by $\mathrm{GL}(V)$, the general linear group of $V$: $$
\mathrm{Aff} (V)=V \rtimes \mathrm{ GL} (V).
$$ The action of $\mathrm{GL}(V)$ on $V$ is the natural one (linear transformations are automorphisms), so this defines a semidirect product.
Affine groups play important role in the geometry and its applications, see, e.g., \cite{Ar,Ly}. Several recent papers \cite{AJO,AK,EH,GJ,Jo,Ze} are devoted to representations of the real, complex and $p$-adic affine groups and their generalizations, as well as diverse applications, from wavelets and Toeplitz operators to non-Abelian pseudo-differential operators and $p$-adic quantum groups.
In the particular case of field $V= \X$ the group $\mathrm{Aff}(\X)$ defined as following.
Consider a function $b:\X \to \X$ which is a step function on $\X$. Take another matrix valued function $A:\X\to L(\X) $ s.t. $A(x)=\mathrm{Id} +A_0(x)$, $A(x)$ is invertible, $A_0$ is a matrix valued step function on $\X$. Introduce an infinite dimensional affine group $\Aff (\X)$ that is the set of all pairs $g=(A,b)$ with component satisfying assumptions above. Define the group operation $$ g_2 g_1= (A_2,b_2) (A_1, b_1) = (A_1 A_2, b_1 +A_1 b_2). $$ The unity in this group is $e=(\mathrm{Id} ,0)$. For $g\in \Aff(\X)$ holds $g^{-1}= (A^{-1}, -A^{-1}b)$. It is clear that for step mappings we use these definitions are correct. Our aim is to construct irreducible representations of $\Aff (\X)$. As a rule, only special classes of irreducible representations can be constructed for infinite-dimensional groups. For various classes of such groups, special tools were invented; see \cite{Is,Ko} and references therein.
We will follow an approach by Vershik-Gefand -Graev \cite{VGG75} proposed in the case of the group of diffeomorphisms. A direct application of this approach meets certain difficulties related with the absence of the possibility to define the action of the group $\Aff (\X)$ on a phase space similar to \cite{VGG75}. A method to overcome this problem is the main technical step in the present paper. We wold like to mention that a similar approach was already used in \cite{PAFF} for the construction of the representation for p-adic infinie dimensional affine group.
\maketitle
\section{Infinite dimensional affine group}
In our definitions and studies of vector and matrix valued functions on $\X$ we will use as basic functional spaces collections of step mappings. It means that each such mapping is a finite sum of indicator functions with measurable bounded supports with constant vector/matrix coefficients. Such spaces of functions on $\X$ are rather unusual in the framework of infinite dimensional groups but we will try to show that their use is natural for the study of affine groups.
For $x\in\X$ consider the section $G_x= \{g(x)\; |\; g\in \Aff(\X)\}$. It is an affine group with constant coefficients. Note that for a ball $B_N (0) \subset \X$ with the radius $N$ centered at zero we have $g(x)= (1,0), x\in B^c_N(0)$.
Define the action of $g$ on a point $x\in\X$ as $$ gx= g(x)x = A(x)^{-1} (x+b(x)). $$
Denote the orbit $O_x=\{gx| g\in G_x\}\subset \X$. Actually, as a set $O_x=\X$ but elements of this set are parametrized by $g\in G_x$. For any element $y\in O_x$ and $h\in G_x$ we can define $hy= h(gx)= (hg)x\in O_x$. It means that we have the group $G_x$ action on the orbit $O_x$.
It gives
$$ (g_1g_2)(x) x= g_1(x)( g_2(x)x) $$
that corresponds to the group multiplication
$$
g_2 g_1= (A_2,b_2) (A_1, b_1) = (A_1 A_2, b_1 +A_1 b_2)
$$
considered in the given point $x$.
\begin{Remark} The situation we have is quite different w.r.t. the standard group of motions on a phase space. Namely, we have one fixed point $x\in\X$ and the section group $G_x$ associated with this point. Then we have the motion of $x$ under the action of $G_x$. It gives the group action on the orbit $O_x$. \end{Remark}
We will use the configuration space $\Ga(\X)$, i.e., the set of all locally finite subsets of $\X$.
Each configuration may be identified with the measure $$ \gamma(dx) = \sum_{x\in\gamma} \delta_x $$ which is a positive Radon measure on $\X$: $\gamma\in \M(\X)$. We define the vague topology on $\Ga(\X)$ as the weakest topology for which all mappings $$ \Ga(\X) \ni \ga \mapsto <f,\gamma>\in \R,\;\; f\in C_0(\X) $$ are continuous. The Borel $\sigma$-algebra for this topology denoted $\B(\Ga(\X))$.
For $\ga\in \Ga(\X)$, $\ga=\{x\}\subset \X$ define $g\gamma$ as a motion of the measure $\ga$:
$$ g\ga=\sum_{x\gamma} \delta_{g(x)x}\in \M(\X). $$ Here we have the group action of $\Aff(\X)$ produced by individual transformations of points from the configuration. Again, as above, we move a fixed configuration using previously defined actions of $G_x$ on $x\in\ga$.
Note that $g\gamma$ is not more a configuration. More precisely, for some $B_N(0) $ the set $(g\ga)_N= g\ga\cap B_N^c(0)$ is a configuration in $B^c_N(0) $ but the finite part of $g\ga$ may include multiple points.
For any $f\in \mathcal D(\X,\C)$ we have corresponding cylinder function on $\Ga(\X)$: $$ L_f(\ga)= <f,\ga > = \int_{\X} f(x)\ga(dx) = \sum_{x\in \ga} f(x). $$ Denote ${\mathcal P}_{cyl}$ the set of all cylinder polynomials generated by such functions. More generally, consider functions of the form
\begin{equation} \label{cyl} F(\ga)= \psi(<f_1,\ga>,\dots, <f_n,\ga>),\; \ga\in\Ga(\X), f_j\in \mathcal D(\X), \psi\in C_b(\R^n). \end{equation}
These functions form the set $\mathcal F_b(\Ga(\X))$ of all bounded cylinder functions.
For any clopen set $\Lambda \in \mathcal{O}_b(\X)$ (also called a finite volume) denote $\Ga(\Lambda)$ the set of all (with necessity finite) configurations in $\La$. We have as before the vague topology on this space and the Borel $\sigma$-algebra $\B(\Ga(\La))$ is generated by functions $$ \Ga(\La)\ni\ga \mapsto <f,\ga>\in\R $$ for $f\in C_0 (\La)$. For any $\La\in \mathcal{O}_b(\X)$ and $T\in \B(\Ga(\La))$ define a cylinder set $$
C(T)=\{\ga\in\Ga(\X)\;|\; \ga_{\La}=\ga \cap \La \in T\}. $$ Such sets form a $\sigma$-algebra $\B_{\La}(\Ga(\X))$ of cylinder sets for the finite volume $\La$. The set of bounded functions on $\Ga(\X)$ measurable w.r.t. $\B_{\La}(\Ga(\X))$ we denote $B_{\La}(\Ga(\X))$. That is a set of cylinder functions on $\Ga(\X)$. As a generating family for this set we can use the functions of the form $$ F(\ga)= \psi(<f_1,\ga>,\dots, <f_n,\ga>),\; \ga\in\Ga(\X), f_j\in C_0(\La), \psi\in C_b(\R^n). $$
For so-called one-particle functions $f:\X\to\R, f\in\mathcal D(\X)$ consider $$ (gf)(x)= f(g(x) x), x\in \X. $$
Then $gf\in \mathcal D(\X)$. Thus,
we have the group action
$$
\mathcal D(\X)\in f \mapsto gf\in \mathcal D(\X),\;\;g\in\Aff
$$
of the infinite dimensional group $\Aff$ in the space of functions
$\mathcal D(\X)$.
Note that due to our definition, we have $$ <f, g\ga> = <gf,\ga> $$ and it is reasonable to define for cylinder functions (\ref{cyl}) the action of the group $\Aff$ as $$ (V_g F)(\ga)= \psi(<gf_1,\ga>,\dots <gf_n,\ga>. $$ Obviously $V_g: \mathcal F_b (\Ga(\X))\to \mathcal F_b(\Ga(\X))$.
Denote $m(dx)$ the Haar measure on $\X$. The dual transformation to one-particle motion is defined via the following relation $$ \int_{\X} f(g(x)x) m(dx)=\int_{\X} f(x) g^\ast m(dx) $$ if exists such measure $g^\ast m$ on $\X$.
\begin{Lemma} \label{gm}
For each $g\in \Aff$ $$ g^\ast m(dx)= \rho_{g}(x) m(dx) $$ where $\rho_g = 1_{B_R^c(0) } + r_g^0,\;\; r_g^0\in \mathcal D(\X,\R_+).$ Here as above $$
B_R^c(0)= \{x\in\X\;|\; |x|_p \geq R\}. $$
\end{Lemma}
\begin{proof} We have following representations for coefficients of $g(x)$:
$$ b(x)= \sum_{k=1}^{n} b_k 1_{B_k}(x) , $$ $$ a(x)= \sum_{k=1}^{n} a_k 1_{B_k}(x) + 1_{B^c_R(0)}(x) $$ where $B_k$ are certain balls in $\X$. Then $$ \int_{\X} f(g(x)x) m(dx)= \sum_{k=1}^n \int_{B_k} f(\frac{x+b_k}{a_k}) m(dx) + \int_{B^c_R (0)} f(x) m(dx) = $$ $$
\sum_{k=1}^{n} \int_{C_k} f(y) |a_k|_p m(dy) + \int_{B^c_R(0)} f(y) m(dy), $$ where $$ C_k= a_k^{-1}(B_k + b_k). $$ Therefore,
$$g^\ast m= (\sum_{k=1}^n |a_k|_p 1_{C_k} + 1_{B^c_R(0)}) m. $$ Note that informally we can write $$ (g^\ast m)(dx) = dm(g^{-1}x). $$ \end{proof}
Note that by the duality we have the group action on the Lebesgue measure. Namely, for $f\in \mathcal D(\X)$ and $g_1, g_2\in \Aff$ $$ \int_{\X} (g_2 g_1) f(x) m(dx)= \int_{\X} g_1 f (x) (g_2^\ast m) (dx) = $$ $$ \int_{\X} f(x) (g_1^\ast g_2^\ast m)(dx)= \int_{\X} f(x) ((g_2 g_1)^\ast m)(dx). $$ In particular $$ (g^{-1})^\ast (g^\ast m)= m. $$
\begin{Lemma} Let $F\in B_\La (\Ga(\X))$ and $g\in\Aff $ has the form $g(x)=(1, h1_{B}(x))$ with certain $h\in \X$ and $B\in \mathcal{O}_b(\X)$ s.t. $\La\subset B$. Then $$ V_gF\in B_{\La -h} (\Ga(\X)). $$
\end{Lemma} \begin{proof} Due to the formula for the action $V_gF$ we need to analyze the support of functions $f_j (x+h1_B(x))$ for $\supp f_\subset \La$. If $x\in B^c$ then $x\in \La^c$ and therefore $f_j (x+h1_B(x))=f_j(x)=0$. For $x\in B$ we have $f_j(x+h)$ and only for $x+h\in \La$ this value may be nonzero, i.e., $\supp g f_j \subset \La- h$.
\end{proof}
Denote $\pi_m$ the Poisson measure on $\Ga(\X)$ with the intensity measure $m$.
\begin{Lemma} \label{V} For all $F \in {\mathcal P}_{cyl}$ or $F\in \mathcal F_b (\Ga(\X))$ and $g\in \Aff $ holds $$ \int_{\Ga(\X)} V_g F d\pi_m = \int_{\Ga(\X)} Fd\pi_{g^\ast m} . $$
\end{Lemma}
\begin{proof} It is enough to show this equality for exponential functions $$ F(\ga)= e^{<f,\ga>},\;\; f\in\mathcal D(\X). $$
We have $$ \int_{\Ga(\X)} V_g F d\pi_m = \int_{\Ga(\X)} e^{<gf, \ga>} d\pi_m(\ga)= $$ $$ \exp[ \int_{\X} (e^{gf(x)} -1) dm(x)] = \exp[ \int_{\X} (e^{f(x)} -1) d(g^{\ast} m)(x)= $$ $$ \int_{\Ga(\X)} F d\pi_{g^\ast m }. $$
\end{proof}
\begin{Remark} For all functions $F,G\in \mathcal F(\Ga(\X))$ a similar calculation shows $$ \int_{\Ga(\X)} V_g F \; Gd\pi_m = \int_{\Ga(\X)} F \; V_{g^{-1}} G d\pi_{g^\ast m} . $$ \end{Remark} Let $\pi_m$ be the Poisson measure on $\Ga(\X)$ with the intensity measure $m$. For any $\La\in \mathcal{O}_b(\X)$ consider the distribution $\pi_m^\La$ of $\pi_m$ in $\Ga(\La)$ corresponding the projection $\ga\to \ga_\La$. It is again a Poisson measure $\pi_{m_\La}$ in $\Ga(\La)$ with the intensity $m_\La$ which is the restriction of $m$ on $\La$. Infinite divisibility of $\pi_m$ gives for $F_j\in B_{\La_j}(\Ga(\X)), j=1,2$ with $\La_1\cap \La_2=\emptyset$ $$ \int_{\Ga(\X)} F_1(\ga) F_2(\ga) d\pi_m(\ga)= \int_{\Ga(\X)} F_1(\ga) d\pi_m(\ga) \int_{\Ga(\X)} F_2(\ga) d\pi_m(\ga)= $$ $$ \int_{\Ga(\La_1)} F_1 d\pi^{\La_1}_m \int_{\Ga(\La_2)} F_2 d\pi^{\La_2}_m. $$
\begin{Lemma}
For any $F\in B_\La(\Ga(\X)$ and $g=(1, h1_B)\in \Aff $ with $\La \cap (B+h)=\emptyset$ holds $$ \int_{\Ga(\X)} (V_g F)(\ga) d\pi_m(\ga)= \int_{\Ga(\X)} F(\ga)d\pi_m(\ga). $$
\end{Lemma}
\begin{proof} Due to our calculations above we have $$ \int_{\Ga(\X)} (V_gF)(\ga) d\pi_m(\ga)= \int_{\Ga(\X)} F(\ga) d\pi_{g^{\ast}m}(\ga)= $$ $$ \int_{\Ga(\La)} F(\eta) d\pi^{\La}_{g^{\ast}m} (\eta) =\int_{\Ga(\La)} F(\eta) d\pi_{ (g^{\ast}m)_\La} (\eta). $$ But we have shown $$ (g^{\ast}m)(dx)= (1+ 1_{B+h}(x)) m(dx) = m(dx) $$ for $x\in \La$, i.e., $(g^{\ast}m)_\La =m$.
\end{proof}
\begin{Lemma} \label{prod} For any $F_1,F_2 \in \mathcal F_b(\Ga(\X))$ there exists $g\in\Aff$ such that $$ \int_{\Ga(\X)} F_1 \; V_g F_2 d\pi_m = \int_{\Ga(\X)} F_1 d\pi_m \int_{\Ga(\X)} F_2 d\pi_m . $$
\end{Lemma}
\begin{proof} By the definition, $F_j\in B_{\La_j}(\Ga(\X)), j=1,2$ for some $\La_1,\La_2 \in \mathcal{O} (\X)$.
Let us take $g=(1, h1_B)$ with the following assumptions:
$$
\La_2\subset B,\;\; \La_1\cap (\La_2-h) =\emptyset,\;\; \Lambda_2\cap (B+h) =\emptyset.
$$
Then accordingly to previous lemmas $$ \int_{\Ga(\X)} F_1 V_g F_2 d\pi_m = \int_{\Ga(\X)} F_1 d\pi_m \int_{\Ga(\X)} F_2 d\pi_m . $$
\end{proof}
\section{$\Aff$ and Poisson measures}
For $F\in {\mathcal P}_{cyl} $ or $F\in \mathcal F_b (\Ga(\X))$, we consider the motion of $F$ by $g\in \Aff$ given by the operator $V_g$. Operators $V_g$ have the group property defined point-wisely: for any $\ga \in \Ga(\X) $
$$ (V_h (V_gF))(\ga)= (V_{hg} F) (\ga). $$ This equality is the consequence of our definition of the group action of $\Aff$ on cylinder functions.
As above, consider $\pi_m$, the Poisson measure on $\Ga(\X)$ with the intensity measure $m$. For the transformation $V_g$ the dual object is defined as the measure $V^\ast_g \pi_m$ on $\Ga(\X)$ given by the relation $$ \int_{\Ga(\X)} (V_gF) (\ga) d\pi_m(\ga) =\int_{\Ga(\X)} F(\ga) d(V^\ast_g \pi_m)(\ga), $$ where $V^\ast_g \pi_m= \pi_{g^\ast m}$, see Lemma \ref{V}.
\begin{Corollary} For any $g\in \Aff$ the Poisson measure $V_g^\ast \pi_m$ is absolutely continuous
w.r.t. $\pi_m$ with the Radon-Nykodim derivative $$ R(g,\ga)= \frac{d\pi_{g^\ast m}(\ga)}{d\pi_{ m} (\ga)} \in L^1(\pi_m). $$.
\end{Corollary}
\begin{proof} Note that density $\rho_g = 1_{B_R^c(0) } + r_g^0,\;\; r_g^0\in \mathcal D(\X,\R_+)$ of $g^\ast m$ w.r.t. $m$ may be equal zero on some part of $\X$ and, therefore, the equivalence of of considered Poisson measures is absent. Due to \cite{LS03}, the Radon-Nykodim derivative $$ R(g,\ga)= \frac{d\pi_{g^\ast m}(\ga)}{d\pi_{ m} (\ga)} $$ exists if $$
\int_{\X} |\rho_g(x)-1| m(dx)= \int_{B_R(0)} |1-r_g^0 (x)| m(dx) <\infty. $$ \end{proof}
\begin{Remark} As in the proof of Proposition 2.2 from \cite{AKR} we have an explicit formula for $R(g,\ga)$:
$$ R(g,\ga)= \prod_{x\in\ga} \rho_g (x) \exp(\int_{\X} (1-\rho_g(x)) m(dx). $$ The point-wise existence of this expression is obvious.
\end{Remark}
This fact gives us the possibility to apply the Vershik-Gelfand-Graev approach realized by these authors for the case of diffeomorphism group.
Namely, for $F\in {\mathcal P}_{cyl}$ or $F\in {\mathcal P}_{cyl}(\Ga(\X)$ and $g\in \Aff$ introduce operators $$ (U_g F)(\ga) = (R(g^{-1} ,\ga) )^{1/2} (V_gF)(\ga). $$
\begin{Theorem}
Operators $U_g,\; g\in \Aff$ are unitary in $L^2 (\Ga(\X), \pi_m)$ and give an irreducible representation of $\Aff$.
\end{Theorem}
\begin{proof} Let us check the isometry property of these operators. We have using Lemmas \ref{V}, \ref{gm} $$
\int_{\Ga(\X)} |U_g|^2 d\pi_m = \int_{\Ga(\X)} |V_g F|^2(\ga) d\pi_{(g^{-1})^\ast m} (\ga)= $$ $$
\int_{\Ga(\X)} |F(\ga)|^2 d\pi_{(gg^{-1})\ast m}(\ga)= \int_{\Ga(\X)} |F(\ga)|^2 d\pi_{ m}(\ga). $$ From Lemma \ref{V} follows that $U_g^\ast = U_{g^{-1}}.$
We need only to check irreducibility that shall follow from the ergodicity of Poisson measures \cite{VGG75}. But to this end we need first of all to define the action of
the group $\Aff$ on sets from $\B(\Ga(\X)$. As we pointed out above, we can not define this
action point-wisely. But we can define the action of operators $V_g$ on the indicators $1_A(\ga)$ for
$A\in \B(\Ga(Q))$. Namely, for given $A$ we take a sequence of cylinder sets $A_n, n\in \N$ such that
$$
\pi_{m}(A\Delta A_n) \to 0, n\to \infty.
$$
Then
$$
U_g 1_{A_n} =V_g 1_{A_n} (R(g^{-1} ,\cdot) )^{1/2} \to G (R(g^{-1} ,\cdot) )^{1/2} \in L^2(\pi_m), n\to\infty
$$
in $L^2(\pi_m)$. Each $V_g 1_{A_n} $ is an indicator of a cylinder set and
$$
V_g 1_{A_n} \to G \;\; \pi_m - a.s., n\to \infty.
$$
Therefore,
$G=1$ or $G=0$ $\pi_m$-a.s. We denote this function $V_g 1_A$.
For the proof of the ergodicity of the measure $\pi_m$ w.r.t. $\Aff$ we need to show the following fact:
for any $A\in \B(\Ga(\X))$ such that $\forall g\in\Aff\;\; V_g 1_A = 1_A\; \pi_m- a.s.$ holds $\pi_m(A)= 0$
or $\pi_m(A)= 1$.
Fist of all, we will show that for any pair of sets $A_1, A_2 \in \B(\Ga(Q))$ with $\pi_m(A_1)>0,\;\;
\pi_m(A_2) >0$ there exists $g\in\Aff$ such that
\begin{equation}
\label{ineq}
\int_{\Ga(\X)} 1_{A_1} V_g 1_{A_2} d\pi_m \geq \frac{1}{2} \pi_m(A_1) \pi_m(A_2).
\end{equation}
Because any Borel set may be approximated by cylinder sets, it is enough to show this fact
for cylinder sets. But for such sets due to Lemma \ref{prod} we can choose $g\in \Aff$ such that $$
\int_{\Ga(\X)} 1_{A_1} V_g 1_{A_2} d\pi_m = \pi_m(A_1) \pi_m(A_2).
$$ Then using an approximation we will have (\ref{ineq}).
To finish the proof of the ergodicity, we consider any $A\in\B(\Ga(\X)$ such that
$$
\forall g\in \Aff\; V_g1_A = 1_A \;\;\pi_m - a.s.,\;\; \pi_m(A)>0.
$$
We will show that then $\pi_m(A)= 1$. Assume $\pi_m(\Ga\setminus A) >0$.
Due to the statement above, there exists $g\in \Aff$ such that
$$
\int_{\Ga(\X)} 1_{\Ga\setminus A} V_g 1_A >0.
$$
But due to the invariance of $1_A$ it means
$$
\int_{\Ga(\X)} 1_{\Ga\setminus A} 1_A d\pi_m >0
$$
that is impossible. \end{proof}
\end{document}
|
arXiv
|
{
"id": "2006.13014.tex",
"language_detection_score": 0.7345616817474365,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\author[M. El Bachraoui and J. S\'{a}ndor]{Mohamed El Bachraoui and J\'{o}zsef S\'{a}ndor}
\address{Dept. Math. Sci,
United Arab Emirates University, PO Box 15551, Al-Ain, UAE}
\email{[email protected]}
\address{Babes-Bolyai University, Department of Mathematics and Computer Science, 400084 Cluj-Napoca, Romania}
\email{[email protected]}
\keywords{$q$-trigonometric functions; $q$-digamma function; transcendence.} \subjclass{33B15, 11J81, 33E05, 11J86}
\begin{abstract} We evaluate some finite and infinite sums involving $q$-trigonometric and $q$-digamma functions. Upon letting $q$ approach $1$, one obtains corresponding sums for the classical trigonometric and the digamma functions. Our key argument is a theta product formula of Jacobi and Gosper's $q$-trigonometric identities.
\end{abstract}
\date{\textit{\today}}
\maketitle
\section{Introduction}\label{sec-introduction}
Throughout we let $\tau$ be a complex number in the upper half plane and let $q=e^{\pi i\tau}$. Note that the assumption $\mathrm{Im}(\tau)>0$ implies that $|q|<1$. The $q$-shifted factorials of a complex number $a$ are defined by \[ (a;q)_0= 1,\quad (a;q)_n = \prod_{i=0}^{n-1}(1-a q^i),\quad (a;q)_{\infty} = \lim_{n\to\infty}(a;q)_n. \]
For convenience we write \[ (a_1,\ldots,a_k;q)_n = (a_1;q)_n\cdots (a_k;q)_n,\quad (a_1,\ldots,a_k;q)_{\infty} = (a_1;q)_{\infty} \cdots (a_k;q)_{\infty}. \] The $q$-gamma function is given by \[
\Gamma_q(z) = \dfrac{(q;q)_\infty}{(q^{z};q)_\infty} (1-q)^{1-z} \quad (|q|<1) \] and it is well-known that $\Gamma_q (z)$ is a $q$-analogue for the gamma function $\Gamma (z)$, see
\cite{Andrews-Askey-Roy, Askey, Gasper-Rahman, Jackson-1, Jackson-2} for details on the function $\Gamma_q(z)$. The digamma function $\psi(z)$ and the $q$-digamma function $\psi_q (z)$ are given by \[
\psi(z) = \big(\log\Gamma(z)\big)' = \frac{\Gamma'(z)}{\Gamma(z)} \quad\text{and\quad} \psi_q (z) = \big(\log\Gamma_q(z)\big)' = \frac{\Gamma_q '(z)}{\Gamma_q (z)}. \] By Krattenthaler and Srivastava~\cite{Krattenthaler-Srivastava} one has $\lim_{q\to 1} \psi_q(z) = \psi(z)$, showing that the function $\psi_q(z)$ is the $q$-analogue for
the function $\psi (z)$. Jacobi first theta function is defined as follows: \[
\theta_1(z \mid \tau) = 2\sum_{n=0}^{\infty}(-1)^n q^{(2n+1)^2/4}\sin(2n+1)z = i q^{\frac{1}{4}}e^{-iz} (q^2 e^{-2iz},e^{2iz},q^2; q^2)_{\infty}. \] Jacobi theta functions have been extensively studied by mathematicians during the last two centuries with hundreds of properties and formulas as a result. Standard references on theta functions include Lawden~\cite{Lawden} and Whittaker~and~Watson~\cite{Whittaker-Watson}. Among the well-known properties of the function $\theta_1(z\mid\tau)$ which we need in this paper we have \begin{equation}\label{theta-cot}
\frac{\theta_1'(z|\tau)}{\theta_1(z|\tau)} = \cot z + 4\sum_{n=1}^{\infty}\frac{q^{2n}}{1-q^{2n}} \sin (2nz). \end{equation}
Gosper~\cite{Gosper} introduced $q$-analogues of $\sin z$ and $\cos z$ as follows \begin{equation}\label{sine-cosine-q-gamma} \begin{split} \sin_q \pi z &= q^{\frac{1}{4}} \Gamma_{q^2}^2\left(\frac{1}{2}\right) \frac{q^{z(z-1)}}{\Gamma_{q^2}(z) \Gamma_{q^2}(1-z)} \\ \cos_q \pi z &= \Gamma_{q^2}^2\left(\frac{1}{2}\right) \frac{q^{z^2}}{\Gamma_{q^2}\left(\frac{1}{2}-z \right) \Gamma_{q^2}\left(\frac{1}{2}+z\right)}. \end{split} \end{equation}
and proved that
\begin{equation}\label{sine-cosine-theta} \begin{split}
\sin_q (z) = \frac{\theta_1(z\mid \tau')}{\theta_1\left( \frac{\pi}{2}\bigm| \tau' \right)} \qquad \text{and \quad}
\cos_q (z) = \frac{\theta_1\left( z+\frac{\pi}{2} \bigm| \tau' \right)}
{\theta_1 \left( \frac{\pi}{2} \bigm| \tau' \right)} \quad \quad (\tau' = \frac{-1}{\tau}). \end{split} \end{equation} It can be shown that $\lim_{q\to 1}\sin_q z = \sin z$ and $\lim_{q\to 1}\cos_q z = \cos z$. Moreover, from (\ref{sine-cosine-q-gamma}), one can easily verify by differentiating logarithms that $\sin_q' (z)$ is the $q$-analogue of $\sin' (z) = \cos z$ and that $\cos_q' (z)$ is the $q$-analogue of $\cos' (z) = -\sin z$. We mention that there are known other examples of $q$-analogues for the functions $\sin z$ and $\cos z$, see for instance the book by Gasper~and~Rahman~\cite{Gasper-Rahman}. A function which is very important for our current purpose is \begin{equation}\label{Cotan-q}
\Ct_q(z) = \frac{\sin_q' z}{\sin_q z} \end{equation}
for which we clearly have $\lim_{q\to 1} \Ct_q(z) = \cot z$. In addition, by taking in (\ref{sine-cosine-q-gamma}) logarithms and differentiating with respect to $z$ we get \begin{equation}\label{reflection} \psi_{q^2}(z)-\psi_{q^2}(1-z) = (2z-1)\log q - \pi\Ct_q (\pi z), \end{equation} which is the $q$-analogue of the well-known reflection formula \[ \psi(z)-\psi(1-z) = -\pi \cot(\pi z). \]
Jacobi~\cite{Jacobi} proved that \begin{equation}\label{MainProd} \frac{(q^{2n};q^{2n})_{\infty}}{(q^2;q^2)_{\infty}^n}
\prod_{k=-\frac{n-1}{2}}^{\frac{n-1}{2}}\theta_1 \left(z+\frac{k\pi}{n} \bigm| \tau \right) = \theta_1(nz \mid n\tau), \end{equation} see also Enneper~\cite[p. 249]{Enneper}. This formula turns out to be equivalent to the following $q$-trigonometric identity of Gosper~\cite[p. 92]{Gosper}: \begin{equation}\label{SineProd} \prod_{k=0}^{n-1}\sin_{q^n}\pi \left(z+\frac{k}{n} \right) = q^{\frac{(n-1)(n+1)}{12}} \frac{(q;q^2)_{\infty}^2}{(q^n;q^{2n})_{\infty}^{2n}} \sin_q n\pi z \end{equation} which he apparently was not aware of as he stated the identity without proof or reference. Unlike many of Jacobi's results, the formula (\ref{MainProd}) seems not to have received much attention by mathematicians. This is probably due to the lack of applications. The authors recently in~\cite{Bachraoui-Sandor} offered a new proof for~(\ref{SineProd}) and as an application they established a $q$-analogue for the Gauss multiplication formula for the gamma function as well as for an identity of S\'{a}ndor~and~T\'{o}th~\cite{Sandor-Toth} for a short product on Euler gamma function. Our purpose in this note is to apply~(\ref{SineProd}) in order to evaluate finite and infinite sums involving the function $\Ct_q (z)$ along with
the functions $h_{q,M,a}(k)$ and $f_{q,M,a}(k)$ both defined on integers $k$ as follows: \begin{equation}\label{h-f} \begin{split} h_{q,M,a}(k) &= \frac{1}{\pi}\Big( (\log q)\frac{2k+a-2M}{2M}-\psi_q \big(\frac{2k+a}{2M} \big) - \psi_q \big(1-\frac{2k+a}{2M} \big) \Big) \\ f_{q,M,a}(k) &= \sum_{n=1}^{\infty}\frac{q^{\frac{2n}{M}}}{1-q^{\frac{2n}{M}}}\sin \frac{(2k+a)n\pi}{M}. \end{split} \end{equation} More specifically, we shall prove the following main results which are new, up to the authors' best knowledge. \begin{theorem}\label{thm-main-1} Let $M>1$ be an integer and let $a$ be an odd integer. Then
\noindent \emph{(a)\ } \[ \sum_{n=1}^{\infty} \frac{1}{n} \Ct_q\Big(\frac{(2n+a)\pi}{2M}\Big) = -\frac{1}{M} \sum_{k=1}^M \Ct_q\Big(\frac{(2k+a)\pi}{2M}\Big) \psi\big(\frac{k}{M}\big). \] \noindent \emph{(b)\ } The function $h_{q,M,a}(k)$ is periodic with period $M$ and we have \[ \sum_{n=1}^{\infty} \frac{h_{q,M,a}(n)}{n} = -\frac{1}{M} \sum_{k=1}^M h_{q,M,a}(k) \psi\big(\frac{k}{M}\big). \] \noindent \emph{(c)\ } The function $f_{q,M,a}(k)$ is periodic with period $M$ and we have \[ \sum_{n=1}^{\infty} \frac{f_{q,M,a}(n)}{n} = -\frac{1}{M} \sum_{k=1}^M f_{q,M,a}(k) \psi\big(\frac{k}{M}\big). \] \end{theorem}
\begin{theorem}\label{thm-main-2} Let $M$ be a positive integer and let $a$ be an odd integer. Then \begin{align*} \emph{(a)\quad } & \sum_{k=1}^M \Big( \psi_q \big( \frac{2k+a}{2M} \big) - \psi_q \big( 1- \frac{2k+a}{2M} \big) \Big) = \frac{a+1}{2} \log q. \\ \emph{(b)\quad } & \sum_{k=1}^{M}\Big( \psi_{q}\big( \frac{4k+a}{4M} \big) - \psi_{q}\big( 1-\frac{4k+a}{4M} \big) \Big) \\ & \quad = \frac{(a+2)\log q}{4} - M\pi \Ct_{q^{1/(2M)}}\big(\frac{a\pi}{4}\big) \\ & \quad = \begin{cases} \frac{(a+2)\log q}{4} -\frac{\log q}{4}\frac{\Pi_{q^{1/(4M)}}^2}{\Pi_{q^{1/(2M)}}} & \text{if\ } a\equiv 1,-3 \pmod{8} \\ \frac{(a+2)\log q}{4} +\frac{\log q}{4}\frac{\Pi_{q^{1/(4M)}}^2}{\Pi_{q^{1/(2M)}}} & \text{if\ } a\equiv -1,3 \pmod{8}, \end{cases} \\ \emph{(c)\quad } & \sum_{k=1}^{M}\Big( \psi_{q}\big( \frac{6k+a}{6M} \big) - \psi_{q}\big( 1-\frac{6k+a}{6M} \big) \Big) \\ & \quad = \frac{(a+3)\log q}{6} - M\pi \Ct_{q^{1/(2M)}}\big(\frac{a\pi}{6}\big) \\ & \quad = \begin{cases} \frac{(a+3)\log q}{6} +\frac{\log q}{3}\frac{\Pi_{q^{1/(6M)}}^{3/2}}{\Pi_{q^{1/(2M)}}^{1/2}} & \text{if\ } a\equiv 1,-5 \pmod{12} \\ \frac{(a+3)\log q}{6} -\frac{\log q}{3}\frac{\Pi_{q^{1/(6M)}}^{3/2}}{\Pi_{q^{1/(2M)}}^{1/2}} & \text{if\ } a\equiv -1,5 \pmod{12}. \\ \end{cases} \end{align*} \end{theorem}
\begin{remark}\label{rmk-main-1} By letting $q\to 1$ in Theorem~\ref{thm-main-1} and Theorem~\ref{thm-main-2} one gets related sums for the functions $\cot z$ and $\psi(z)$. For instance, from Theorem~\ref{thm-main-1}(a) we obtain for $M>1$ and odd integer $a$ \begin{equation}\label{q-to-1} \sum_{n=1}^{\infty} \frac{1}{n} \cot\Big(\frac{(2n+a)\pi}{2M}\Big) = -\frac{1}{M} \sum_{k=1}^M \cot \Big(\frac{(2k+a)\pi}{2M}\Big) \psi\big(\frac{k}{M}\big) \end{equation} and from Theorem~\ref{thm-main-2}(c) we deduce \[ \sum_{k=1}^M \Big( \psi \big( \frac{6k+a}{6M} \big) - \psi \big( 1- \frac{6k+a}{6M} \big) \Big)
= - M\pi \cot\big(\frac{a\pi}{6}\big) \] \[
= \qquad \begin{cases} - \sqrt{3} M\pi & \text{if\ } a\equiv 1,-5 \pmod{12} \\
\sqrt{3} M\pi & \text{if\ } a\equiv -1,5 \pmod{12} \\
0 & \text{if\ } a\equiv -3,3 \pmod{12}.
\end{cases}
\] \end{remark}
\begin{remark}\label{rmk-transcendence} By the well-known fact that $\cot r\pi$ is an algebraic number for any rational number $r$ and the relation (\ref{q-to-1}) we deduce by a result of Adhikari~\emph{et al.}~\cite{Adhikari-et-al} that the sum $\sum_{n=1}^{\infty} \frac{1}{n} \cot\Big(\frac{(2n+a)\pi}{2M}\Big)$ is either zero or transcendental. A similar statement can be made about the $q$-analogue of the sum given in Theorem~\ref{thm-main-1}(b).
\end{remark} \noindent Blagouchine~\cite{Blagouchine} recently evaluated a variety of finite sums involving the digamma function and the trigonometric functions. For instance, he proved that for any positive integer $M$ \[ \sum_{k=1}^{M-1} \big(\cot\frac{k\pi}{M}\big) \psi\big(\frac{k}{M}\big) = -\frac{\pi(M-1)(M-2)}{6}. \] We have the following related contribution. \begin{theorem}\label{thm-main-3} For any integer $M>1$ and any odd integer $a$ we have \[ \sum_{k=1}^{M-1} \Big( \cot\frac{(2k+a)\pi}{2M} + \cot\frac{(2k-a)\pi}{2M} \Big) \psi\big(\frac{k}{M}\big) = - \sum_{k=1}^{M-1}\big(\cot\frac{k\pi}{M}\big) \cot\frac{(2k+a)\pi}{2M}. \] \end{theorem}
\noindent The rest of the paper is organized as follows. In Section~\ref{Sec:q-trig} we review Gosper's $q$-trigonometry and collect the facts which are needed for our discussion. In Section~\ref{sec:proof-main-1} we give the proof of Theorem~\ref{thm-main-1}, Section~\ref{sec:proof-main-2} is devoted to the proof for Theorem~\ref{thm-main-2}, and Section~\ref{sec:proof-main-3} is devoted to the proof of Theorem~\ref{thm-main-3}.
\section{Facts on Gosper's $q$-trigonometry}\label{Sec:q-trig} \noindent Just as for the function $\sin z$ and $\cos z$, it is easy to verify that
\begin{align}\label{sine-cos-basics}
\sin_q (\frac{\pi}{2}-z)=\cos_q z,\ \sin_q \pi = 0,\ \sin_q \frac{\pi}{2} = \cos_q 0= 1, \\
\sin_q(z+\pi)= -\sin_q z=\sin_q(-z), \ \text{and\ } -\cos_q(z+\pi) = \cos_q z = \cos_q(-z), \nonumber \end{align} from which it follows that for any odd integer $a$, \begin{equation}\label{sin-cos-aux} \begin{split} \sin_{q}\frac{a\pi}{4} &= \begin{cases} \sin_q\frac{\pi}{4} & \text{if\ } a\equiv 1, 3 \pmod{8} \\ - \sin_q \frac{\pi}{4}& \text{if\ } a\equiv -1, -3 \pmod{8}, \end{cases} \\ \sin_{q}\frac{a\pi}{6} &= \begin{cases} \sin_q\frac{\pi}{6} & \text{if\ } a\equiv 1, 5 \pmod{12} \\ -\sin_q\frac{\pi}{6} & \text{if\ } a\equiv -1, -5 \pmod{12} \\ \\ 1 & \text{if\ } a\equiv 3 \pmod{12} \\ -1 & \text{if\ } a\equiv -3 \pmod{12}. \end{cases} \end{split} \end{equation} Also, by using (\ref{sine-cos-basics}) we have \begin{align}\label{special-deriv} \sin_q'\big(\frac{\pi}{2}-z \big)= -\cos_q' z,\ \cos_q'\big(z-\frac{\pi}{2}\big)= \sin_q' z,\\ - \sin_q'(\pi-z) = \sin_q' z,\ \text{and\ } -\cos_q'(\pi-z) = \cos_q' z \nonumber \end{align} where the derivatives here and in what follows are with respect to $z$. We can easily see from (\ref{special-deriv}) that for any odd integer $a$ we have \begin{equation}\label{q-sine-derive-2} \sin_q' \frac{a\pi}{2} = \cos_q' 0 = 0. \end{equation} The following $q$-constant appears frequently in Gosper's manuscript~\cite{Gosper} \[ \Pi_q = q^{\frac{1}{4}} \frac{(q^2;q^2)_{\infty}^2}{(q;q^2)_{\infty}^2}. \] Gosper stated many identities involving $\sin_q z$ and $\cos_q z$ which easily follow just from the definition and basic properties of other related functions. To mention an example, he derived that \begin{equation}\label{q-sine-derive-1} \sin_q' 0 =- \cos_q'\frac{\pi}{2} = \frac{-2 \log q}{\pi}\Pi_q. \end{equation}
On the other hand, Gosper~\cite{Gosper} using the computer facility \emph{MACSYMA} stated without proof a variety of identities involving $\sin_q z$ and $\cos_q z$ and he asked the natural question whether his formulas hold true. For instance, based on his conjectures, he stated \[ \label{q-Double-2} \tag{$q$-Double$_2$} \sin_q(2z) = \frac{\Pi_q}{\Pi_{q^2}} \sin_{q^2} z \cos_{q^2} z, \] \[ \label{q-Double-3} \tag{$q$-Double$_3$} \cos_q(2z) = (\cos_{q^2} z)^2 - (\sin_{q^2} z)^2, \] \[ \label{q-Triple-2} \tag{$q$-Triple$_2$} \sin_q(3z) = \frac{\Pi_q}{\Pi_{q^3}} (\cos_{q^3} z)^2 \sin_{q^3}z - (\sin_{q^3}z)^3, \] and \[ \label{q-Double-5} \tag{$q$-Double$_5$} \cos_q(2z) = (\cos_{q}z)^4- (\sin_{q}z)^4. \] \noindent A proof for (\ref{q-Double-2}) can be found in Mez\H{o}~\cite{Mezo-1} and proofs for (\ref{q-Double-2}) (\ref{q-Triple-2}), and (\ref{q-Double-5}) were obtained in~\cite{Bachraoui-1, Bachraoui-2, Bachraoui-3}. Proofs for other identities of Gosper can be found in~\cite{Touk-Houchan-Bachraoui, He-Zhai, He-Zhang}. Furthermore, Gosper deduced the following special values: \begin{equation}\label{sin-cos-values} \begin{split} \sin_{q^2} \frac{\pi}{4} &= \cos_{q^2} \frac{\pi}{4} = \frac{\Pi_{q^2}^{\frac{1}{2}}}{\Pi_{q}^{\frac{1}{2}}} \\ \left(\sin_{q^3}\frac{\pi}{3} \right)^3 &= \left(\cos_{q^3}\frac{\pi}{6} \right)^3 = \frac{ \left(\frac{\Pi_q}{\Pi_{q^3}} \right)^{\frac{3}{2}}}{\left(\frac{\Pi_q}{\Pi_{q^3}} \right)^2 -1} \\ \left(\sin_{q^3}\frac{\pi}{6} \right)^3 &= \left(\cos_{q^3}\frac{\pi}{3} \right)^3 = \frac{ 1}{\left(\frac{\Pi_q}{\Pi_{q^3}} \right)^2 -1}. \end{split} \end{equation} \noindent As to special values for derivatives we have the following list.
\begin{lemma} \label{lem-1-special} Let $a$ be an odd integer. Then we have \[ \begin{split} \emph{(a)\ } & \sin_{q^2}'\frac{a\pi}{4} = \begin{cases} \frac{\log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}}}{\Pi_{q^2}^{\frac{1}{2}}} & \text{if\ } a\equiv -1, 1 \pmod{8} \\ - \frac{\log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}}}{\Pi_{q^2}^{\frac{1}{2}}} & \text{if\ } a\equiv -3, 3 \pmod{8}. \end{cases} \\
\emph{(b)\ } & \sin_{q^3}'\frac{a\pi}{3} = \begin{cases} \frac{\log q}{\pi} \frac{\Pi_{q^3}^{\frac{1}{3}} (\Pi_q^2 - \Pi_{q^3}^2)^{\frac{2}{3}} (3\Pi_{q^3}^2-\Pi_q^2)} {\Pi_{q}^2 - \Pi_{q^3}^2} & \text{if\ } a\equiv -1, 1 \pmod{6} \\ -\frac{2\log q}{\pi} \Pi_q
& \text{if\ } a\equiv 3 \pmod{6} \end{cases} \\ \emph{(c)\ } & \sin_{q^3}'\frac{a\pi}{6} = \begin{cases} -\frac{2 \log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}} \Pi_{q^3}^{\frac{1}{6}} }{ (\Pi_q^2- \Pi_{q^3}^2)^{\frac{1}{3}}} & \text{if\ } a\equiv 1, -5 \pmod{12} \\ \frac{2 \log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}} \Pi_{q^3}^{\frac{1}{6}} }{ (\Pi_q^2- \Pi_{q^3}^2)^{\frac{1}{3}}} & \text{if\ } a\equiv -1, 5 \pmod{12} \\ 0 & \text{if\ } a\equiv -3, 3 \pmod{12}. \end{cases} \end{split} \] \end{lemma} \begin{proof} (a)\ From (\ref{special-deriv}) we have \begin{equation}\label{q-sine-derive-quarter} \sin_{q}' \frac{\pi}{4} = - \cos_{q}' \frac{\pi}{4}. \end{equation} On the other hand, from (\ref{q-Double-3}) we have \[ 2\cos_q' 2z = 2(\cos_{q^2} z) \cos_{q^2}' z - 2(\sin_{q^2} z) \sin_{q^2}'z, \] where if we let $z=\frac{\pi}{4}$ and use (\ref{q-sine-derive-quarter}) we deduce \[ 2\cos_q'\frac{\pi}{2} = -4 \sin_{q^2}\frac{\pi}{4} \sin_{q^2}'\frac{\pi}{4}. \] Now, combine the previous identity with (\ref{q-sine-derive-1}), (\ref{sin-cos-values}), and (\ref{q-sine-derive-quarter}) to obtain the desired identity for $\sin_{q^2}'\frac{\pi}{4}$. Finally, note that by (\ref{sine-cos-basics}) we have \[ \sin_q'\frac{a\pi}{4} = \begin{cases} \sin_q'\frac{\pi}{4} & \text{if\ } a\equiv \pm 1 \pmod{8}, \\ -\sin_q'\frac{\pi}{4} & \text{if\ } a\equiv \pm 3 \pmod{8} \end{cases} \] to complete the proof of part (a). \noindent As to part (b), from (\ref{special-deriv}), we easily find \begin{equation}\label{sixth-third} \cos_q'\frac{\pi}{6} = -\sin_q'\frac{\pi}{3} \ \text{and\ } \cos_q'\frac{\pi}{3} = -\sin_q'\frac{\pi}{6}. \end{equation} Now differentiating (\ref{q-Double-5}) we have \[ 2\cos_q' 2z = 4 (\cos_q z)^3 \cos_q'z - 4 (\sin_q z)^3 \sin_q'z. \] Then taking $z=\frac{\pi}{6}$ in the previous identity, using (\ref{sixth-third}), and simplifying yield \[ \big(2(\sin_q\frac{\pi}{6})^3 - 1 \big) \sin_q'\frac{\pi}{6} = 2 (\cos_q\frac{\pi}{6})^3 \cos_q'\frac{\pi}{6}, \] in other words, \begin{equation}\label{help1-lem-2-special} \cos_q'\frac{\pi}{6} = \frac{2(\sin_q\frac{\pi}{6})^3 - 1}{2 (\cos_q\frac{\pi}{6})^3} \sin_q'\frac{\pi}{6}. \end{equation} On the other hand, differentiate (\ref{q-Triple-2}) to derive \[ 3 \sin_q 3z = \frac{\Pi_q}{\Pi_{q^3}}\big(\sin_{q^3}'z (\cos_{q^3} z)^2 + 2\sin_{q^3}z\cos_{q^3}z\cos_{q^3}'z \big) - 3 (\sin_{q^3} z)^2 \sin_{q^3}'z, \] which for $z=\frac{\pi}{3}$ and after simplification gives \[ -\sin_q'0 = \Big(\frac{\Pi_q}{\Pi_{q^3}} (\cos_{q^3}\frac{\pi}{3})^2 - 3(\sin_{q^3}\frac{\pi}{3})^2 \Big) \sin_{q^3}'\frac{\pi}{3} + 2 \frac{\Pi_q}{\Pi_{q^3}} \sin_{q^3}\frac{\pi}{3}\cos_{q^3}\frac{\pi}{3}\cos_{q^3}'\frac{\pi}{3}. \] It follows by virtue of (\ref{help1-lem-2-special}) and with the help of (\ref{sin-cos-values}) that \[ \frac{6 \log q}{\pi}\Pi_q =\Big(\frac{\Pi_q}{\Pi_{q^3}} (\cos_{q^3}\frac{\pi}{3})^2 -3(\sin_{q^3}\frac{\pi}{3})^2 + \frac{4 \frac{\Pi_q}{\Pi_{q^3}} \big(\sin_{q^3}\frac{\pi}{3}\big)^4 \cos_{q^3}\frac{\pi}{3}} {2 \big(\sin_{q^3}\frac{\pi}{3}\big)^3 - 1} \Big) \sin_{q^3}'\frac{\pi}{3}. \] Now solving in the previous identity for $\sin_{q^3}'\frac{\pi}{3}$ and using (\ref{sin-cos-values}), after a long but straightforward calculation, we derive the desired formula for $\sin_{q^3}'\frac{\pi}{3}$. Finally, note from (\ref{sine-cos-basics}) that \[ \sin_q'\frac{a\pi}{3} = \begin{cases} \sin_q'\frac{\pi}{3} & \text{if\ } a\equiv \pm 1 \pmod{6}, \\ -\sin_q' 0 & \text{if\ } a\equiv \pm 3 \pmod{6} \end{cases} \] to complete the proof of part (b). The proof for part (c) is similar to the previous parts and it is therefore omitted. \end{proof}
\noindent By a combination of Lemma~\ref{lem-1-special} with (\ref{sin-cos-aux}) and (\ref{sin-cos-values}), we arrive at the main result of this section. \begin{corollary}\label{cor-special-C} Let $a$ be an odd integer. Then we have \[ \begin{split} \emph{(a)\quad } & \Ct_{q^2} \big(\frac{a\pi}{4}\big) = \begin{cases} \frac{\log q}{\pi} \frac{\Pi_{q}^2}{\Pi_{q^2}} & \text{if\ } a\equiv 1, -3 \pmod{8} \\ -\frac{\log q}{\pi} \frac{\Pi_{q}^2}{\Pi_{q^2}} & \text{if\ } a\equiv -1, 3 \pmod{8}. \end{cases} \\ \emph{(b)\quad } & \Ct_{q^3} \big(\frac{a\pi}{6}\big) = \begin{cases} -\frac{2 \log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}}}{\Pi_{q^3}^{\frac{1}{2}}} & \text{if\ } a\equiv 1, -5 \pmod{12} \\ \frac{2 \log q}{\pi} \frac{\Pi_{q}^{\frac{3}{2}}}{\Pi_{q^3}^{\frac{1}{2}}} & \text{if\ } a\equiv -1, 5 \pmod{12} \\ 0 & \text{if\ } a\equiv -3, 3 \pmod{12}. \end{cases} \end{split} \] \end{corollary}
\section{Proof of Theorem~\ref{thm-main-1}}\label{sec:proof-main-1} \noindent We need the following result of Ram~Murty~and~Saradha~\cite{Murty-Saradha} which we record as a lemma. \begin{lemma}\label{lem-MurtSara} Let $f$ be any function defined on the integers and with period $M>1$. \\ \noindent The infinite series $\sum_{n=1}^{\infty} \frac{f(n)}{n}$ converges if and only if $\sum_{k=1}^M f(k) = 0$. In case of convergence, we have \[ \sum_{n=1}^{\infty} \frac{f(n)}{n} = -\frac{1}{M} \sum_{k=1}^M f(k) \psi\big(\frac{k}{M}\big).
\]
\end{lemma} \noindent \emph{Proof of Theorem~\ref{thm-main-1}(a)}\ Let \[ f(k) = \Ct_q \Big( \frac{(2k+a)\pi}{2M} \Big) \] which is clearly well-defined on the integers and it is periodic with period $M$. Then based on Lemma~\ref{lem-MurtSara}, all we need is prove that $\sum_{k=1}^{M} f(k) = 0$. To do so, note that from (\ref{SineProd}) and the fact that $\sin_q(z+\pi) = -\sin_q z$ we find \begin{equation}\label{SineProd-2} \prod_{k=1}^{M}\sin_{q^M}\pi \left(z+\frac{k}{M} \right) = - q^{\frac{(M-1)(M+1)}{12}} \frac{(q;q^2)_{\infty}^2}{(q^M;q^{2M})_{\infty}^{2M}} \sin_q M\pi z. \end{equation} Take logarithms and differentiate with respect to $z$ to derive \[ \pi \sum_{k=1}^M \Ct_{q^M}\Big(z+\frac{k}{M}\Big) = M\pi\Ct_q(M\pi z)= M\pi \frac{\sin_q' M\pi z}{\sin_q M\pi z}. \] Now replace $q^M$ with $q$, let $z=\frac{a}{2M}$, and use (\ref{q-sine-derive-2}) to deduce that that \begin{equation}\label{sum-Ctq-zero} \pi \sum_{k=1}^M \Ct_q \Big( \frac{(2k+a)\pi}{2M} \Big) = 0, \end{equation} or equivalently, \[ \sum_{k=1}^{M} f(k) = 0, \] as desired.
\noindent \emph{Proof of Theorem~\ref{thm-main-1}(b)}\ By the relation (\ref{sine-cosine-q-gamma}) we have \[ \sin_q\pi\big(z+\frac{k}{M}\big) = q^{\frac{1}{4}}\Gamma_{q^2}^2\left(\frac{1}{2}\right) \frac{q^{(z+\frac{k}{M})(z+\frac{k}{M} -1)}}{\Gamma_{q^2}\big(z+\frac{k}{M} \big) \Gamma_{q^2}\big(1-z-\frac{k}{M}\big)} \] which after taking logarithms and differentiating with respect to $z$ gives \[ \pi \Ct_q\pi\big(z+\frac{k}{M}\big) = (\log q) \big( 2\big(z+\frac{k}{M}\big)-1 \big) - \psi_{q^2}\big(z+\frac{k}{M}\big) - \psi_{q^2}\big(1-z-\frac{k}{M}\big). \] Replacing in the previous relation $q^2$ by $q$ and letting $z=\frac{a}{2M}$, we get \[ \Ct_{q^{1/2}}\big(\frac{(2k+a)\pi}{2M}\big) = \frac{1}{\pi}\Big( (\log q)\frac{2k+a-2M}{2M} - \psi_{q}\big(\frac{2k+a}{2M}\big) - \psi_{q}\big(1-\frac{2k+a}{2M}\big) \Big). \] As the left-hand side of the previous identity is evidently periodic with period $M$, the same holds for its right-hand side which is nothing else but $h_{q,M,a}(k)$. We now claim that $\sum_{k=1}^M h_{q,M,a}(k) = 0$. Indeed, apply~(\ref{sine-cosine-q-gamma}) to the factors in identity~(\ref{SineProd-2}), then take logarithms and finally differentiate with respect to $z$ to obtain \begin{align}\label{q-psi-key} M(\log q) & \Big(\sum_{k=1}^M 2\big(z+ \frac{k}{M}\big) -1 \Big) - \sum_{k=1}^M \Big(\psi_{q^{2M}}\big(z+\frac{k}{M}\big) - \psi_{q^{2M}}\big(1-z-\frac{k}{M}\big) \Big) \nonumber \\ & = M\pi \Ct_q(M\pi z) . \end{align} Next replace $q^{2M}$ by $q$, let $z=\frac{a}{2M}$, and use (\ref{q-sine-derive-2}) to deduce that \begin{equation}\label{help1-cor-psiq-1} \sum_{k=1}^M \Big( (\log q)\frac{2k+a-M}{2M} - \psi_{q}\big(\frac{2k+a}{2M}\big) - \psi_{q}\big(1-\frac{2k+a}{2M}\big) \Big) = 0. \end{equation} That is, \[ \sum_{k=1}^M h_{q,M,a}(k) = 0, \] and the claim is confirmed. Finally, apply Lemma~\ref{lem-MurtSara} to the function $h_q(M,a,k)$ to complete the proof of part (b).
\noindent \emph{Proof of Theorem~\ref{thm-main-1}(c)}\ Note that the function $f_{q,M,a}(k)$ is clearly periodic with period $M$. By virtue of~(\ref{sine-cosine-theta}) and~(\ref{SineProd-2}) and after taking logarithm and differentiating we find \[
\pi \sum_{k=1}^M \frac{\theta_1'\big(\pi z +\frac{k\pi}{M} | \frac{\tau'}{M}\big)}{\theta_1\big(\pi z +\frac{k\pi}{M} | \frac{\tau'}{M}\big)} = M\pi \Ct_q(M\pi z), \] which upon substituting $z$ by $\frac{a}{2M}$ and $\tau'$ by $\tau$ yields \[
\sum_{k=1}^M \frac{\theta_1'\big(\frac{(2k+a)\pi}{2M} | \frac{\tau}{M}\big)}{\theta_1\big(\frac{(2k+a)\pi}{2M} | \frac{\tau}{M}\big)} = 0. \] Now combine the foregoing identity with (\ref{theta-cot}) and the $q$-analogue of (\ref{sum-Ctq-zero}) to derive \[ \sum_{k=1}^M f_{q,M,a}(k) = \sum_{k=1}^M \sum_{n=1}^{\infty}\frac{q^{\frac{2n}{M}}}{1-q^{\frac{2n}{M}}}\sin \frac{(2k+a)n\pi}{M} =0. \] Finally apply Lemma~\ref{lem-MurtSara} to the function $f_{q,M,a}(k)$ to complete the proof.
\section{Proof of Theorem~\ref{thm-main-2}}\label{sec:proof-main-2} \noindent (a)\ If $M=1$, then the desired formula \[ \psi_q\big(1+\frac{a}{2}\big) - \psi_q\big(-\frac{a}{2}\big) = \frac{(a+1)\log q}{2} \] follows by virtue of (\ref{reflection}). If $M>1$, then an immediate consequence of (\ref{SineProd-2}) is \[ \sum_{k=1}^M \Big(\psi_{q}\big(\frac{2k+a}{2M}\big) - \psi_{q}\big(1-\frac{2k+a}{2M}\big)\Big) = \frac{(a+1) \log q}{2}, \] which is the desired relation.
\noindent (b)\ If $M=1$, the statement follows by (\ref{reflection}). Now suppose that $M>1$. Then by letting in (\ref{q-psi-key}) $z=\frac{a}{4M}$ and after simplification we find \[ \frac{(a+2)M\log q}{2} - \sum_{k=1}^M \Big( \psi_{q^{2M}}\big(\frac{4k+a}{4M}\big) - \psi_{q^{2M}}\big(\frac{4(M-k)-a}{4M}\big)\Big) = M\pi \Ct_q\big(\frac{a\pi}{4}\big). \] Then uopn replacing $q^{2M}$ by $q$ and rearranging becomes \[ \sum_{k=1}^M \Big(\psi_{q}\big(\frac{4k+a}{4M}\big) - \psi_{q}\big(\frac{4(M-k)-a}{4M}\big)\Big) = \frac{(a+2)\log q}{4} - M\pi \Ct_{q^{1/(2M)}}\big(\frac{a\pi}{4}\big), \] which is the first identity of this part. As to the second formula, simply use Corollary~\ref{cor-special-C} to evaluate the right-hand-side of the previous identity and rearrange in the appropriate way.
\noindent (c)\ If $M=1$, the statement follows by (\ref{reflection}). Now suppose that $M>1$. Let in (\ref{q-psi-key}) $z=\frac{a}{6M}$ and simplify to obtain \[ \frac{(a+3)M\log q}{3} - \sum_{k=1}^M \Big(\psi_{q^{2M}}\big(\frac{6k+a}{6M}\big) - \psi_{q^{2M}}\big(\frac{6(M-k)-a}{6M}\big)\Big) = M\pi \Ct_q\big(\frac{a\pi}{6}\big). \] Then replace in the foregoing formula $q^{2M}$ by $q$ and rearrange to get \[ \sum_{k=1}^M \Big(\psi_{q}\big(\frac{6k+a}{6M}\big) - \psi_{q}\big(\frac{6(M-k)-a}{6M}\big)\Big) = \frac{(a+3)\log q}{6} - M\pi \Ct_{q^{1/(2M)}}\big(\frac{a\pi}{6}\big). \] This establishes the first formula of this part. Finally apply Corollary~\ref{cor-special-C} to complete the proof.
\section{Proof of Theorem~\ref{thm-main-3}}\label{sec:proof-main-3} \noindent In our proof we shall make an appeal to a result of Weatherby~in~\cite{Weatherby} for which we need the following notation. For any real number $\alpha$ and any positive integer $l$, let \[ A_{\alpha,l} := \frac{(-1)^{l-1} \big(\pi\cot(\pi \alpha)\big)^{(l-1)}}{\pi^l(l-1)!} \] and let \[ Z(l) = \begin{cases} 0 & \text{if $l$\ is odd,} \\ \frac{\zeta(l)}{\pi^l} & \text{otherwise.} \end{cases} \] Notice that $Z(l)\in\mathbb{Q}$ for all positive integer $l$.
\begin{lemma}\label{lem-Weatherby} Let $f$ be an algebraic valued function defined on the integers with period $M>1$ and let $l$ be a positive integer. Then \[ \sum_{n\in\mathbb{Z}\setminus\{0\}} \frac{f(n)}{n^l} = \Big(\frac{\pi}{M}\Big)^l \Big( \sum_{k=1}^{M-1} f(k) A_{\frac{k}{M},l}+ 2f(M) Z(l) \Big). \] \end{lemma}
\noindent \emph{Proof of Theorem~\ref{thm-main-3}.\ } Let \[ f(k) = \cot \frac{(2k+a)\pi}{2M}. \] Clearly $f(k)$ is well-defined on the integers since is $a$ is odd and it is periodic with period $M$. It is a well-known fact that for any rational number $r$ we have that $\cot\pi r$ is an algebraic number. Then by virtue of Lemma~\ref{lem-Weatherby} we get \begin{equation}\label{key-sum-main-3} \sum_{n\in\mathbb{Z}\setminus\{0\}} \frac{\cot\pi\big(\frac{2n+a}{2M}\big)}{n^l} =\Big(\frac{\pi}{M}\Big)^l \Big( \sum_{k=1}^{M-1} \big(\cot\pi\frac{2k+a}{2M}\big) A_{\frac{k}{M},l}+ 2f(M) Z(l) \Big), \end{equation} which for $l=1$ reduces to \begin{equation}\label{help2-cor-cot-1} \begin{split} \sum_{n\in\mathbb{Z}\setminus\{0\}} \frac{\cot \frac{(2n+a)\pi}{2M}}{n} &= \frac{\pi}{M}\sum_{k=1}^{M-1}A_{\frac{k}{M},1}\cot\frac{(2k+a)\pi}{2M} \\ &= \frac{\pi}{M}\sum_{k=1}^{M-1} \frac{1}{\pi} \big(\cot\frac{k\pi}{M} \big) \cot\frac{(2k+a)\pi}{2M}. \end{split} \end{equation} On the other hand, with the help of the $q$-analogue of Theorem~\ref{thm-main-1}(a) we deduce \[
\sum_{n=1}^{\infty}\frac{\cot \frac{(-2n+a)\pi}{2M}}{-n} = \sum_{n=1}^{\infty}\frac{\cot \frac{(2n-a)\pi}{2M}}{n} = -\frac{1}{M} \sum_{k=1}^{M} \Big(\cot\frac{(2k-a)\pi}{2M}\Big)\psi\big(\frac{k}{M}\big),
\] which implies that \[ \begin{split} \sum_{n\in\mathbb{Z}\setminus\{0\}} \frac{\cot \frac{(2n+a)\pi}{2M}}{n} &= \sum_{n=1}^{\infty}\frac{\cot \frac{(2n+a)\pi}{2M}}{n} + \sum_{n=1}^{\infty}\frac{\cot \frac{(2n-a)\pi}{2M}}{n} \\ &= -\frac{1}{M} \sum_{k=1}^{M} \psi\big(\frac{k}{M}\big)\Big(\cot\frac{(2k+a)\pi}{2M} + \cot\frac{(2k-a)\pi}{2M} \Big). \end{split} \] As \[\cot\frac{(2M+a)\pi}{2M} + \cot\frac{(2M-a)\pi}{2M} = \frac{\sin 2\pi} {\frac{1}{2}\big(\cos\frac{a\pi}{M} - \cos\frac{2M\pi}{M} \big)} = 0, \] the $M$-th term in the last summation vanishes and so we get \begin{equation}\label{help1-cor-cot-1} \sum_{n\in\mathbb{Z}\setminus\{0\}} \frac{\cot \frac{(2n+a)\pi}{2M}}{n} = -\frac{1}{M} \sum_{k=1}^{M-1} \psi\big(\frac{k}{M}\big)\Big(\cot\frac{(2k+a)\pi}{2M} + \cot\frac{(2k-a)\pi}{2M} \Big). \end{equation} Now combine (\ref{help1-cor-cot-1}) and (\ref{help2-cor-cot-1}) to obtain the desired formula.
\end{document}
|
arXiv
|
{
"id": "1806.03415.tex",
"language_detection_score": 0.5497365593910217,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Minimal set of local measurements and classical communication for two-mode Gaussian state entanglement quantification} \author{Luis F. Haruna, Marcos C. de Oliveira, and Gustavo Rigolin} \affiliation{Instituto de F\'\i sica ``Gleb Wataghin'', Universidade Estadual de Campinas, 13083-970, Campinas, S\~ao Paulo, Brazil.}
\begin{abstract} We develop the minimal requirements for the complete entanglement quantification of an arbitrary two-mode bipartite Gaussian state via local measurements and a classical communication channel. The minimal set of measurements is presented as a reconstruction protocol of local covariance matrices and no previous knowledge of the state is required but its Gaussian character. The protocol becomes very simple mostly when dealing with Gaussian states transformed to its standard form, since photocounting/intensity measurements define the whole set of entangled states. In addition, conditioned on some prior information, the protocol is also useful for a complete global state reconstruction. \end{abstract}
\pacs{03.67.-a, 03.67.Mn} \maketitle
Quantum communication protocols extend the information theoretical notion of channel \cite{Thomas} to the quantum domain by incorporating non-local entangled states. Those channels are generated by the preparation of a pair (or more) of quantum systems in an entangled state, which are then separated to establish non-local correlations \cite{EPR}, allowing several communication tasks otherwise unattainable via classical channels \cite{nielchu}. However, for most of the quantum protocols to work properly (deterministically) one has first to be able to prepare maximally pure entangled states and then to guarantee that those states stay pure or nearly pure during all the processing time. An important problem then arises in this whole process: One has to check the ``quality'' (the amount of entanglement and purity) of the quantum channel, while usually the only available tools for that are local measurements (operations) and one (or several) classical channel.
The quest for an optimal and general solution for this problem has generated a vast literature on the characterization of entangled states under local operations and classical communication (LOCC), either for qubits \cite{nielsen} or for continuous variable systems of the Gaussian type \cite{eisert1,eisert2}. Gaussian states (completely described by up to second order moments) are particularly important since they can be easily generated with radiation field modes. Moreover, operations that keep the Gaussian character (so-called Gaussian operations) are given by the transformations induced by linear (active and passive) optical devices (beam-splitters, phase-shifters, and squeezers) \cite{eisert2}. A particular result for this kind of state is that it is impossible to distill entanglement out of a set of Gaussian states through Gaussian operations \cite{eisert3}.
Assuming one is left with only Gaussian local operations and a classical channel (GLOCC), how is it possible to infer the quality of a quantum channel in use? For a two-mode Gaussian state one possibility is to access directly the entanglement properties of the system after a proper manipulation of the two modes \cite{marcos2,rigolin}. This procedure requires, however, that the two parties (modes) be recombined in a beam-splitter (non-local unitary operation) in which their entanglement content are transferred to local properties of one of the output modes. Another possible way is to completely reconstruct the bipartite quantum system, a resource demanding task \cite{laurat} which also requires global operations here forbidden.
In this Letter we demonstrate a minimal set of GLOCC to completely quantify the entanglement of a two-mode Gaussian state. As a bonus of this procedure one can also assess the purity of the Gaussian state and, for some particular classes of states, reconstruct the bipartite covariance matrix. The protocol consists mainly in the attainment, via local measurements, of all the symplectic invariants that allows, for example, one to test the separability of the system, to know its P-representability properties, and to quantify its entanglement content. We also show that for a particular class of Gaussian states belonging to the set of symmetric Gaussian states \cite{marcos1}, the Einstein-Podolsky-Rosen (EPR) states and general mixed squeezed states , the protocol becomes straightforward due to the relative easiness one obtains the correlation matrix elements from local measurement outcomes. Moreover, since P-representability and separability for these kind of states are equivalent, we show that for two-mode thermal squeezed states with internal noise \cite{daffer} it is possible to decide whether or not they are separable via local photon number measurements.
A two-mode Gaussian state $\rho_{12}$ is characterized by its Gaussian characteristic function $C({\bm\alpha})=e^{-\frac12{\bm{\alpha}^\dagger}{\bf V}{\bm{\alpha}} }$, where $\bm{\alpha}^\dagger=\left(\alpha_1^*, \alpha_1, \alpha_2^*, \alpha_2\right)$ are complex numbers and $a_1$ ($a_1^\dagger$) and $a_2$ ($a_2^\dagger$) the annihilation (creation) operators for parties 1 and 2, respectively \cite{comment1}. The covariance matrix \textbf{V} describing all the second order moments $V_{ij}=(-1)^{i+j}\langle v_i v_j^\dagger + v_j^\dagger v_i \rangle/2$, where $(v_1,v_2,v_3,v_4)=(a_1,a_1^\dagger,a_2,a_2^\dagger)$, is given by \begin{displaymath} \textbf{V}=\left( \begin{array}{cc} \textbf{V}_1 & \textbf{C} \\ \textbf{C}^\dagger & \textbf{V}_2 \end{array} \right) =\left(\begin{array}{cccc} n_1+ \frac{1}{2} & m_1 & m_s & m_c \\ m_1^* & n_1+\frac{1}{2} & m_c^* & m_s^* \\ m_s^* & m_c & n_2+\frac{1}{2} & m_2 \\ m_c^* & m_s & m_2^* & n_2+ \frac{1}{2} \\ \end{array} \right). \end{displaymath}
$\textbf{V}_1$ and $\textbf{V}_2$ are local Hermitian matrices while \textbf{C} is the correlation between the two parties.
Any covariance matrix must be positive semidefinite $\textbf{V}\geq\mathbf{0}$ and the generalized uncertainty principle, $\textbf{V}+(1/2)\textbf{E}\geq\mathbf{0}$, where ${\bf E}=\text{diag}(\mathbf{Z},\mathbf{Z})$ and ${\bm{Z}}=\text{diag}(1,-1)$, must hold \cite{englert}.
From local measurements on both modes of $\rho_{12}$, either through homodyne detection (see \cite{grangier} and references therein) or alternatively by employing single-photon detectors \cite{fiurasek2}, the local covariance matrices $\textbf{V}_1$ and
$\textbf{V}_2$ can be reconstructed. Remark that for the reconstruction of the global matrix \textbf{V}, and therefore the joint bipartite state, one has to obtain \textbf{C}. Obviously, global joint measurements achieved through recombination of the two parties in a beam-splitter followed by local homodyne detections are forbidden. Thus one has to deal only with local measurements whose results can be sent through classical communication channels to the other party. As we now show, there are minimal operations/measurements that can be performed locally on the system to attain $|\det\textbf{C}|$ and $\det$\textbf{V}. These quantities, together with $\det\textbf{V}_1$ and $\det\textbf{V}_2$, will be shown to be all that one needs to determine whether or not a two-mode Gaussian state is entangled as well as how much it is entangled. As it will become clear, the required set of operations is minimal in the sense that only two local measurement procedures are needed - one to characterize local covariance matrices and another to locally assess the parity of one of the modes.
First of all let us introduce an important result \cite{haruna1}. Given a two-mode Gaussian state with density operator $\rho_{12}$ and covariance matrix $\textbf{V}$ we can define the Gaussian operator
$\sigma_1 = Tr_2\left\{e^{i\pi a_2^\dagger a_2}\rho_{12}\right\},$
whose covariance matrix ${\bf\Gamma}_1$ is the Schur complement \cite{horn} of $\textbf{V}$ relative to $\textbf{V}_2$: \begin{equation} {\bf \Gamma}_1=\textbf{V}_1-\textbf{C}\textbf{V}_2^{-1}\textbf{C}^\dagger. \label{schur_rel1} \end{equation}
The meaning of $\sigma_1$ is best appreciated through a partial trace in the Fock basis:
$\sigma_1=\sum_{n_{even}} \!_2\langle n|\rho_{12}|n\rangle_2 -
\sum_{n_{odd}}\!_2\langle n|\rho_{12}|n\rangle_2=\rho_{1_{e}}-\rho_{1_{o}}$, being equal to the difference between Alice's mode states conditioned, respectively, to even and odd parity measurement results by Bob \cite{haruna1}. While $\rho_{1_e}$ and $\rho_{1_o}$ are not generally Gaussian, $\sigma_1$ is a Gaussian operator, and ${\bf\Gamma}_1$ can be built with only second order moments of these conditioned states.
Now suppose that Alice and Bob share many copies of a two-mode Gaussian state. The protocol works as follows: ({\it i}) Firstly, in a subensemble of the copies, each party performs a set of local measurements in such a manner to obtain the covariance matrices $\textbf{V}_1$ and $\textbf{V}_2$, corresponding to the reduced operators $\rho_1=Tr_2\{\rho_{12}\}$ and $\rho_2=Tr_1\{\rho_{12}\}$; ({\it ii}) Then Bob informs Alice, via a classical communication channel, the matrix elements of $\textbf{V}_2$; ({\it iii}) After that, for the remaining copies, Bob performs parity measurements on his mode, letting Alice know to which copies does that operation correspond and the respective outcomes, i.e. even parity (eigenvalue 1) or odd parity (eigenvalue -1); ({\it iv}) Alice then separates her copies in two groups, the even ($e$) and the odd ($o$) ones. The first group ($e$) contains all the copies conditioned on an even parity measurement on Bob's copies. The other one ($o$) contains all the remaining copies, namely those conditioned on an odd parity measurement at Bob's; ({\it v}) For each group, Alice measures the respective correlation matrices $\textbf{V}_{1e}$ and $\textbf{V}_{1o}$; ({\it vi}) Finally, she obtains $\bf\Gamma_1$ (Eq. (\ref{schur_rel1})) subtracting the odd correlation matrix from the even one \cite{haruna1}: ${\bf\Gamma}_1=\textbf{V}_{1e} - \textbf{V}_{1o}$. Remarkably, with $\textbf{V}_1$, $\textbf{V}_2$ and ${\bf\Gamma}_1$ in hand Alice is able to completely characterize the Gaussian state's entanglement content as well as its purity without any global or non-local measurements.
Remembering that a two-mode Gaussian state's purity $\mathcal{P}$ is equal to $1/(4\sqrt{\det\mathbf{V}})$ \cite{adesso} and using the identity \cite{horn}
\begin{equation} \det\textbf{V}=\det\textbf{V}_2\det{\bf\Gamma}_1, \label{detV} \end{equation}
Alice readily obtains the purity of the channel: $\mathcal{P}= 1/(4\sqrt{\det\mathbf{V}_2\det{\bf\Gamma}_1}).$
Her next task is to decide whether or not she deals with an entangled two-mode Gaussian state. Using the Simon separability \cite{simon} test she knows that it is not entangled if, and only if,
\begin{equation}
I_1I_2 + \left( 1/4 - |I_3|\right)^2 - I_4 \geq (I_1 + I_2)/4, \label{separabilidade} \end{equation}
where $I_1=\det\mathbf{V_1}$, $I_2=\det\mathbf{V_2}$, $I_3=\det\mathbf{C}$, and $I_4=\text{tr}(\mathbf{V_1}\mathbf{Z}\mathbf{C}\mathbf{Z} \mathbf{V}_2\mathbf{Z}\mathbf{C^\dagger}\mathbf{Z})$.
These four quantities are the local symplectic invariants, belonging to the $Sp(2,R) \otimes Sp(2,R)$ group \cite{simon}, that characterizes all the entanglement properties of a two-mode Gaussian state. Alice already has $I_1$ and $I_2$. We must show, however, how she can obtain $|I_3|$ and $I_4$. Since one can prove that \cite{rigolin}
\begin{equation}
I_4 = 2|I_3|\sqrt{I_1I_2}, \label{I4} \end{equation}
we just need to show how $|I_3|$ is obtained from $I_1$, $I_2$, and $I_V=\det\mathbf{V}$, the three pieces of information locally available to Alice. To achieve this goal we first note that a direct calculation gives $I_V = I_1I_2 - I_4 + I_3^2$. Using Eq.~(\ref{I4}) we see that $|I_3|$ follows from
$|I_3|^2 - 2 |I_3| \sqrt{I_1I_2} + I_1I_2 - I_V = 0.$
One of its roots is not acceptable since it implies $\mathbf{V}< 0$. Therefore, we are left with
\begin{equation}
|I_3|=\sqrt{I_1I_2} - \sqrt{I_V}. \label{I3} \end{equation}
Hence, substituting Eqs.~(\ref{I4}) and (\ref{I3}) in Eq.~(\ref{separabilidade}), Alice is able to unequivocally tell whether or not she shares an entangled two-mode Gaussian state with Bob.
Finally, if her state is entangled then $I_3<0$ \cite{simon} and, for a symmetric state ($I_1=I_2$), Alice can quantify its entanglement via the entanglement of formation ($E_f$) \cite{Gie03,Rig04}:
\begin{equation}
E_f(\rho_{12}) = f\left(2 \sqrt{I_1+|I_3|-\sqrt{I_4 + 2 I_1
|I_3|}}\right), \label{ef} \end{equation}
where $f(x)=c_+(x)\log_2(c_+(x)) - c_-(x)\log_2(c_-(x))$ and $c_{\pm}(x)=(x^{-1/2}\pm x^{1/2})^2/4$. For arbitrary two-mode Gaussian states ($I_1\neq I_2$) Alice can work with lower bounds for $E_f$ \cite{Rig04} or calculate its negativity or logarithmic negativity \cite{vidal}. This last two quantities are the best entanglement quantifiers for non-symmetric two-mode Gaussian states and are given as analytical functions \cite{adesso,adesso2} of the four invariants here obtained from local measurements:
$I_1$, $I_2$, $|I_3|=\sqrt{I_1I_2} - \sqrt{I_V}$, and
$I_4=2|I_3|\sqrt{I_1I_2}$, with $I_V=\det\mathbf{V}$ given by Eq.~(\ref{detV}). It is worth mentioning that $I_1$ ($I_2$) can easily be determined by the measurement of the purity (Wigner function at the origin of the phase space) of Alice's (Bob's) mode alone \cite{fiurasek2,ban}. This measurement is less demanding than the ones required to reconstruct ${\bf V}_1$ and $\mathbf{V}_2$ \cite{rigolin}.
Besides furnishing all the entanglement properties of an arbitrary two-mode Gaussian state, the previous local protocol can also be employed to reconstruct the covariance matrix for some particular types of Gaussian states. To see this, let ${\bf\Gamma}_1$ be explicitly written as
\begin{equation} {\bf \Gamma}_1=\left( \begin{array}{cc} \eta_1 + \frac{1}{2}& \mu_1 \\ \mu_1^* & \eta_1 + \frac{1}{2}\end{array} \right), \end{equation} where
\begin{eqnarray} &\eta_1&=\langle a_1^\dagger a_1\rangle_{e}-\langle a_1^\dagger a_1\rangle_{o}, \label{eta1}
\\ \mu_1=\langle a_1^2\rangle_{e}&-&\langle a_1^2\rangle_{o}, \ \mu_1^*=\langle (a_1^\dagger)^2\rangle_{e}-\langle (a_1^\dagger)^2\rangle_{o}, \label{mi1} \end{eqnarray}
being $\langle \cdot \rangle_e$ and $\langle \cdot \rangle_o$ the mean values for Alice's even and odd subensembles, respectively. From this identity it is clear that ${\bf\Gamma}_1$ does not necessarily represent a physical state since $\eta_1$ can take negative values \cite{haruna1}. From Eq. (\ref{schur_rel1}) we obtain the following two relations,
\begin{eqnarray} n_1-\eta_1&=&\frac{1}{\left(n_2+\frac{1}{2}\right)^2-\vert m_2\vert^2} \left\{\!\!\left(\vert m_c\vert^2\!\!+\vert m_s\vert^2\right)\!\!\left(\!n_2\!+\!\frac{1}{2}\!\right)\right.\nonumber\\ &&\left. -2\Re e(m_2m_sm_c^*)\right\}, \label{eq1}\\ m_1-\mu_1&=&\frac{1}{\left(n_2+\frac{1}{2}\right)^2-\vert m_2\vert^2} \left\{2m_sm_c\left(n_2+\frac{1}{2}\right)\right.\nonumber\\ &&\left.-m_2^*m_c^2-m_2m_s^2\right\}. \label{eq2} \end{eqnarray}
Eqs.~(\ref{eq1}) and (\ref{eq2}) give the matrix elements of $\mathbf{\Gamma_{1}}$ as a function of the matrix elements of $\mathbf{V}$. If $m_c$ and $m_s$ are real (if either $m_c$ or $m_s$ is zero) Eqs.~(\ref{eq1}) and (\ref{eq2}) can be inverted to give $m_c$ and $m_s$ (either $m_s$ or $m_c$).
Let us explicitly solve the previous equations for an important case, namely the ones in which $\textbf{C}\textbf{C}^\dagger=\vert m_i\vert^2\textbf{I}$, where $i=c$ or $s$ and \textbf{I} is the identity matrix. The states comprehending this class are the ones where \textbf{C} has only diagonal or non diagonal elements, i.e., $m_s=0$ and $m_c\neq0$ or $m_c=0$ and $m_s\neq0$, reducing the unknown quantities to two, namely the absolute value and the phase of $m_s$ or $m_c$. Remark that if $i=s$ the system is separable, since $\det\textbf{C}=\vert m_s\vert^2\geq0$, i.e., the correlation between the two modes is strictly classical \cite{simon}. Otherwise, if $i=c$ the state is not necessarily separable, possibly being entangled, for in this case $\det\textbf{C}=-\vert m_c\vert^2\leq0$. This last case is more interesting since it represents a class of states that might show non-local features \cite{simon}.
From Eqs. (\ref{eq1}) and (\ref{eq2}) the diagonal (off-diagonal) elements of \textbf{C}, $m_i=\vert m_i\vert e^{i\phi_i}$, for $i=s$ ($i=c$), are
\begin{eqnarray} \vert m_i\vert^2&=&\frac{(n_1-\eta_1)}{n_2+1/2} \left[\left(n_2+ 1/2 \right)^2 -\vert m_2\vert^2\right], \label{mc}\\ e^{2i\phi_i}&=&\left(\frac{\mu_1-m_1}{n_1-\eta_1}\right) \frac{n_2+ 1/2}{m_{2i}}, \end{eqnarray} where $m_{2c}=m_2^*$ and $m_{2s}=m_2$.
Note that whenever $m_2=0$, $\phi_i$ becomes undetermined. This problem can be solved by locally (unitary) transforming the two-mode squeezed state to a matrix $V'_2$ with $m'_2\neq 0$, where $\phi'_i$ can be determined. Then, transforming back, we get $\phi_i$. Fortunately, there are various experimentally available bipartite Gaussian states in which all the parameters are real, $m_s(m_c)=m_1=m_2=0$, and $m_c(m_s)\neq 0$. For these states, Eq. (\ref{mc}) is sufficient to determine $\mathbf{C}$.
A natural and important example belonging to this class is the two-mode thermal squeezed state \cite{daffer}, which is generated in a nonlinear crystal with internal noise. Its covariance matrix is \begin{equation} \textbf{V}=\left( \begin{array}{cccc} n+ \frac{1}{2} & 0 & 0 & m_c \\ 0 & n+ \frac{1}{2} & m_c & 0 \\ 0 & m_c & n+ \frac{1}{2} & 0 \\ m_c & 0 & 0 & n+ \frac{1}{2} \\ \end{array} \right), \label{cov_esp} \end{equation}
where $n$ and $m_c$ are time dependent functions having as parameters the relaxation constant of the bath as well as the nonlinearity of the crystal \cite{daffer}. In this case the protocol involves only simple local measurements, i.e., those to get $n$, $\langle a_1^\dagger a_1\rangle_{e}$ and $\langle a_1^\dagger a_1\rangle_{o}$ (or equivalently $\eta_1$) by Alice, and the parity measurements by Bob. The classical communication corresponds to Bob informing Alice the instances he performs the parity measurement in his mode and the respective outcomes. Hence, Eq.~(\ref{mc}) reduces to
\begin{equation} m_c^2=(n-\eta_1)\left(n+ 1/2\right). \label{mc_esp} \end{equation}
Experimentally, $n$ and $\eta_1$ (Eq.~(\ref{eta1})) are readily obtained by photodetection, while the parity measurement is related to the determination of Bob's mode Wigner function at the origin of the phase-space \cite{davidovich}, or alternatively to his mode's purity, both of which can be measured by photocounting experiments \cite{fiurasek2,ban}.
We can also study the P-representability \cite{footnote2} for the state (\ref{cov_esp}), which in this case is equivalent to the Simon separability test \cite{simon,marcos1}. A two-mode Gaussian state is P-representable iff
$\textbf{V}-\frac{1}{2}\textbf{I}\geq0$,
where \textbf{I} is the unity matrix of dimension $4$. Explicitly, this separability condition
in terms of the elements of (\ref{cov_esp}) is equivalent to $n\geq |m_c|$. From this inequality and Eq.~(\ref{mc_esp}) we see that for a given $n$ there exists a bound for $\eta_1$ below which the states are entangled (upper solid curve in Fig. \ref{fig1}):
\begin{equation} -\frac{n/2}{n+1/2}\le\eta_1\le\frac{n/2}{n+1/2}. \label{ineq} \end{equation}
The left bound in Eq. (\ref{ineq}) (lower solid curve in Fig. 1) is a consequence of the uncertainty principle, delimiting the set of all physical symmetric Gaussian states (SGS). This bound is marked by all the pure states and the upper curve bounds (from below) the subset of all separable (P-representable) states \cite{marcos1}. Thus, for the SGS class, photon number measurements, before and after Bob's parity measurements, are all Alice needs to discover whether or not her mode is entangled with Bob's. The exquisite symmetry of those two antagonistic bounds is quite surprising, and possibly valid only for the SGS class. There is another interesting feature for the SGS set that should be emphasized. Note that $\eta_1=0$ contains all the states where Bob has equal chances of getting even or odd outcomes for his parity measurements, delimiting two subsets (even and odd). The even subset contains all the states where Bob has greater probabilities of getting even outcomes while the odd subset contains all the states where he has greater probabilities of getting odd outcomes. The entanglement for states belonging to the SGS can be quantified through $E_f$ (Eq. (\ref{ef})) as depicted by the color scale in Fig.~\ref{fig1}. It is remarkable that the most entangled states (including the pure ones) are concentrated in the $\eta_1<0$ odd subset.
\begin{figure}
\caption{(Color online) Above the upper solid curve lie the separable states. Below it, entanglement is quantified via
$E_f$ (Eq.~(\ref{ef})) up to the lower curve, where the pure entangled states are located. Below this curve there exist no physical states.}
\label{fig1}
\end{figure}
In conclusion, we have presented the minimal set of local operations and classical communication that allows one to quantify the entanglement of an arbitrary two-mode Gaussian state. One important step towards the derivation of this protocol was the mathematical identity relating the two-mode covariance matrix determinant to the product of two local quantities, namely the determinants of the one-mode correlation matrix and its Schur complement. In addition, we have also shown that the Schur complement of one of the modes' covariance matrix is obtained via a set of parity measurements on the other one. We have also explicitly discussed how the protocol works for a particular class of Gaussian states belonging to the SGS set. Within this class, for states written in its standard form, we have shown that only photon number measurements (made before and after a parity measurement on the other mode) are needed to completely characterize the state's entanglement.
\begin{acknowledgments} This work is supported by FAPESP and CNPq. \end{acknowledgments}
\appendix
\section{Erratum}
Eq. (4), on page 2, of [Phys. Rev. Lett. 98, 150501 (2007)] is not so general as we have previously thought. However, our scheme does not rely on it, as we show in what follows.
The experimental proposal we presented in our Letter allows one to locally obtain the matrices $\mathbf{V_1}, \mathbf{V_2}$, and $\mathbf{\Gamma_1}$, without assuming any particular form for the covariance matrix $\mathbf{V}$. We now show that with these three matrices we can determine the four invariants that completely characterize the entanglement content of a two-mode Gaussian state. The first two invariants are
\begin{equation} I_1=\text{det}(\mathbf{V_1}), \hspace{1cm} I_2=\text{det}(\mathbf{V_2}). \end{equation}
The third one is calculated remembering that
$ \mathbf{\Gamma_1} = \mathbf{V_1} - \mathbf{C}\mathbf{V_2^{-1}}\mathbf{C^{\dagger}}. $
A simple algebra on the previous expression gives,
$ \mbox{det}\left( \mathbf{V_1} - \mathbf{\Gamma_1}\right)=\mbox{det}(\mathbf{C})\mbox{det}(\mathbf{V_2^{-1}}) \mbox{det}(\mathbf{C^{\dagger}}). $
But
$\mbox{det}(\mathbf{C}) = \mbox{det}(\mathbf{C^{\dagger}}) = I_3$ and $\mbox{det}(\mathbf{V_2^{-1}}) = 1/\mbox{det}(\mathbf{V_2}) = 1/I_2$.
Hence
\begin{eqnarray}
|I_3|&=&\sqrt{I_2\,\mbox{det}\left( \mathbf{V_1} - \mathbf{\Gamma_1}\right)}. \end{eqnarray}
Furthermore, $\mathbf{\Gamma_1}$ satisfies another mathematical indentity,
$ I_V=\text{det}\mathbf{V} = \text{det}{\mathbf{V_2}}\text{det}{\mathbf{\Gamma_1}}. $
Therefore, since we have $\mathbf{\Gamma_1}$ and $\mathbf{V_2}$, we can also obtain $I_V$. But $I_V$ is related to the other four invariants by the following expression,
$ I_V = I_1I_2 - I_4 + I_3^2. $
Thus, the fourth invariant is simply
\begin{equation} I_4 = I_1I_2 + I_3^2 - I_2\,\text{det}{\mathbf{\Gamma_1}}. \end{equation}
Using $I_1$, $I_2$, $|I_3|$, and $I_4$, as obtained above with the knowledge of $\mathbf{V_1}$, $\mathbf{V_2}$, and $\mathbf{\Gamma_1}$, we can apply the Simon separability test (Eq. (3) of our Letter). If a two-mode Gaussian state is entangled we know for sure that $I_3<0$ and we can, therefore, fully quantify its entanglement either via the entanglement of formation or the negativity/logarithmic negativity, as discussed in our Letter.
Finally, we must emphasize that the main result of our Letter remains unchanged: it is possible to completely characterize via local operations and classical communication (LOCC) the entanglement content of an arbitrary two-mode Gaussian state. Furthermore, all the results presented in the Letter remain valid.
We want to thank Yang Yang, Fu-Li Li and Hong-Rong Li for also calling our attention on the problems related to Eq. (4) of our Letter while this erratum was being formulated.
\end{document}
|
arXiv
|
{
"id": "0702081.tex",
"language_detection_score": 0.7940581440925598,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{abstract} Let $X$ be a topological Hausdorff space together with a continuous action of a finite group $G$. Let $R$ be the ring of integers of a number field~$F$. Let $\calE$ be a $G$-sheaf of flat $R$-modules over $X$ and let $\Phi$ be a $G$-stable paracompactifying family of supports on $X$. We show that under some natural cohomological finiteness conditions the Lefschetz number of the action of $g \in G$ on the cohomology $ \com{H}_\Phi(X,\calE) \otimes_{R} F $ equals the Lefschetz number of the $g$-action on
$ \com{H}_{\Phi|X^G}(X^g, \calE_{|X^g}) \otimes_{R} F $, where $X^g$ is the set of fixed points of $g$ in $X$. More generally, the class $\sum_j (-1)^j [H^j_\Phi (X,\calE) \otimes_R F]$ in the character group equals a sum $\sum_{[H]} \sum_{\lambda \in \widehat{H}_F} m_\lambda [\ind^G_H (V_\lambda)] $ of representations induced from irreducible $F$-rational representations $\: V_\lambda \:$ of $\: H \:,$ where $[H]$ runs in the set of $G$-conjugacy classes of subgroups of $G$. The integral coefficients $m_\lambda$ are explicitly determined. \end{abstract}
\maketitle
\section{Introduction and main results}
The most elementary classical version of the Lefschetz fixed point formula says that the Lefschetz number $\calL(g)$ of a simplicial automorphism $g$ of finite order on a finite simplicial complex $X$ equals the Euler-Poincar\'e characteristic of the fixed point set $X^g \subset X $ of the $g$-action. Here $\calL(g)$ is computed on $ H^\ast (X,\bbQ)$. Brown \cite{Brown1982} (based on Zarelua \cite{Zarelua1969}) and independently Verdier \cite{Verdier1973} have extended this formula to more general spaces under the assumption of cohomological finiteness conditions. Verdier uses cohomology with compact supports.
The objective of this paper is to generalize this Lefschetz fixed point formula to Hausdorff spaces with a continuous action of a finite group $G$ and to cohomology of $G$-sheaves with a paracompactifying family of supports. For applications of the Lefschetz fixed point formula to cohomology of arithmetic groups see e.g.~\cite{Rohlfs1990}.
\subsection{Notation}
Throughout $F$ denotes an algebraic number field and $R$ denotes its ring of integers. Let $G$ be a finite group, then $\Gzero(F[G])$ denotes the Grothendieck group of finitely generated $F[G]$-modules. For every subgroup $H$ of $G$ there is the induction homomorphism $\ind_H^G: \Gzero(F[H]) \to \Gzero(F[G])$
which maps $[M]$ to $[F[G]\otimes_{F[H]}M]$.
For $ g \in G $ the trace of the $g$-action induces a morphism $\tr(g): \Gzero(F[G]) \longrightarrow R$.
Let $Y$ be a Hausdorff space and let $\Phi$ be a family of supports on $Y$. Let $k$ be a ring and $\calE$ be a sheaf of left $k$-modules on $Y$. If the cohomology $\com{H}_\Phi(Y,\calE)$ is finitely generated as $k$-module, then we say that the triple $(Y,\Phi,\calE)$ is of \emph{finite type} with respect to $k$.
Given a sheaf $\calE$ of $R$-modules on $Y$, we write $\chi_\Phi(Y,\calE; F)$ for the Euler-Poincar\'e characteristic of the graded $F$-vectorspace $\com{H}_\Phi(Y,\calE)\otimes_R F$ whenever it is \mbox{finite} dimensional. Similarly, if a finite group $G$ acts continuously on $Y$ and $\calE$ is $G$-equivariant, then we denote the Euler-Poincar\'e characteristic of the graded $F[G]$-module $\com{H}_\Phi(Y,\calE)\otimes_R F$ in the Grothendieck group $\Gzero(F[G])$ by $\chi_\Phi(Y,\calE; F[G])$.
The image of $\chi_\Phi (Y, \calE; F[G]) $ under the morphism $\tr(g)$ is the
\emph{Lefschetz number} $ \calL_\Phi (g, \calE; F) = \sum^\infty_{j = 0} (-1)^j \tr (g| H^j _\Phi (Y ,\calE) \otimes_R F) \:.$
\subsection{Statement of results} Denote by $X$ a topological Hausdorff space together with a continuous action of a finite group $G$. We fix a paracompactifying $G$-stable family of supports $\Phi$ on $X$. For a subgroup $H \leq G$ we denote the normalizer of $H$ in $G$ by $N_G(H)$ or $N(H)$. We write $X^H$ for the set of points in $X$ which are fixed by $H$ and we write $X_H$ for the set of points in $X$ whose stabilizer is exactly the group $H$. Note that $X^H$ is closed in $X$ and $X_H$ is open in $X^H$. Let $\cla(G)$ be the set of conjugacy classes of subgroups of $G$. The paracompactifying family $\Phi$ induces paracompactifying families on the locally closed subspaces $ X_H, X^H, X_C := \bigcup_{H \in C} X_H $ for $C \in \cla(G)$ and on the quotient space $ X_H/ N(H) $. For simplicity these families will be denoted also by $\Phi$.
By $ \widehat{H}_F$ we denote the set of equivalence classes of irreducible representations of $H$ on finite dimensional $F$-vectorspaces. If $\lambda \in \widehat{H}_F$, we write $V_\lambda$ for a representative of $\lambda$. We define $\deg V_\lambda := \dim_F \bigl(\Hom_{F[H]} (V_\lambda, V_\lambda )\bigr)$.
Let $\calE$ be a $G$-sheaf of $R$-modules on $X$.
We say that $\calE$ satisfies the finiteness condition \cF if
for every subgroup $H$ of $G$ the following hold:
\begin{enumerate}
\item The triple $(X_H,\Phi, \calE_{|X_H}) $ is of finite type w.r.t.~$R$, and
\item for any $\lambda \in \widehat{N(H)}_F$
there is an $N(H)$-invariant lattice $L_\lambda$ in $V_\lambda$ such that the triple
$(X_H,\Phi,\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))$ is of finite type w.r.t.~$R$.
\end{enumerate} We comment on this condition in section \ref{sec:Comments}.
\begin{theorem} Let $X$, $G$, $\Phi$ be as above and assume that the cohomological $\Phi$-dimension of $X$ is finite. Let $ \calE$ be a $G$-sheaf of flat $R$-modules such that condition \cF holds. Then \begin{align*}
\chi_\Phi\bigl(X,\calE; F[G]\bigr) &=
\sum_{[H] \in \cla(G)} \frac{|H|}{|N(H)|} \ind_H^G\Bigl(\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[H]\bigr)\Bigr)\\
& =
\sum_{[H] \in \cla(G)} \sum_{\lambda \in \widehat{H}_F}\frac{|H|\cdot e(\lambda)}{|N(H)| \cdot \deg(V_\lambda)} \ind_H^G\bigl([V_\lambda\bigr]). \end{align*}
where $e(\lambda)$ denotes the Euler characteristic
$\chi_{\Phi}\bigl(X_H, \Hom_{R[H]}(M_\lambda, \calE_{|X_H}); F\bigr)$ for any
$H$-stable $R$-lattice $M_\lambda \subset V_\lambda$. \end{theorem} A proof of the theorem will be given in the next section.
\begin{corollary}\label{cor:letg}
Let $G$ be the finite cyclic group generated by an element $g$. Under the assumptions of the theorem we obtain
an equality of Lefschetz numbers
\begin{equation*}
\calL_\Phi\bigl(g,\calE; F\bigr) = \calL_{\Phi}\bigl(g, \calE_{|X^G}; F \bigr).
\end{equation*}
\end{corollary} \begin{proof}
Use that for
$ G = \langle g \rangle $ we have $ \tr (g | \ind^G_H V) = 0 \:$ for all finite dimensional $ F[H]$-modules $V$ if
$ H \neq G $ and that $ X_G = X^G $. \end{proof}
\section{Proof of the Theorem}
This section is devoted to the proof of the theorem. We begin with the following general Lemma. \begin{lemma}\label{lem:EulerCharModp}
Let $p$ be a prime number.
Let $\calE$ be a sheaf of abelian groups on $X$ and assume that the stalks of $\calE$ have no $p$-torsion.
If the triple $(X,\Phi,\calE)$ is of finite type (w.r.t.~$\bbZ$) then the triple
$(X,\Phi,\calE\otimes_\bbZ \bbF_p)$ is of finite type w.r.t.~$\bbF_p$.
Moreover we have
\begin{equation*}
\chi_\Phi(X,\calE; \bbQ) = \chi_\Phi(X,\calE\otimes_\bbZ \bbF_p; \bbF_p).
\end{equation*} Here $\bbF_p$ denotes the finite field with $p$ elements. \end{lemma} \begin{proof}
Since $\calE$ is torsion-free, there is a short exact sequence of sheaves on $X$:
\begin{equation*}
0 \longrightarrow \calE \stackrel{p}{\longrightarrow} \calE \longrightarrow \calE\otimes_\bbZ \bbF_p \longrightarrow 0.
\end{equation*}
Consider the associated long exact sequence. We deduce that $(X,\Phi,\calE\otimes_\bbZ \bbF_p)$ is of finite type.
Further, write $H^i_\Phi(X,\calE) \cong \bbZ^{b_i} \oplus P^i \oplus T^i$, where $P^i$ is the subgroup of elements whose order is a power of $p$
and $T^i$ is the subgroup of elements of finite order prime to $p$.
Let $P^i_p$ denote the elements of order exactly $p$ and let $r_i = \dim_{\bbF_p} P^i_p$.
From the long exact sequence we obtain short exact sequences
\begin{equation*}
0 \longrightarrow H^i_\Phi(X,\calE)\otimes_\bbZ\bbF_p \longrightarrow H^i_\Phi(X,\calE\otimes\bbF_p)\longrightarrow P^{i+1}_p \longrightarrow 0
\end{equation*}
for every degree $i$. Thus $\dim_{\bbF_p} H^i_\Phi(X,\calE\otimes\bbF_p) = b_i + r_i + r_{i+1}$ and the second assertion follows
via alternating summation. \end{proof}
Let $\pi: X \to X/G$ be the canonical projection.
Note that for a sheaf of abelian groups $\calE$ on $X$ there is a canonical isomorphism \begin{equation*}
\com{H}_\Phi(X,\calE) \isomorph \com{H}_\Phi(X/G,\pi_*(\calE)). \end{equation*}
In general, if $\calF$ is a sheaf of $R[G]$-modules on a space $Y$, then we write $\calF^G$ for the subsheaf of $G$-stable sections,
i.e.~$\calF^G(U) = \calF(U)^G$.
Let $\calE$ be a $G$-sheaf of $R$-modules on $X$. We write $\pi_*^G(\calE)$ for $\pi_*(\calE)^G$.
Note that the
triple $(X,\Phi, \calE)$ is of finite type w.r.t.~$R$ if and only if it is of finite type w.r.t.~$\bbZ$.
In this case we simply say that $(X,\Phi, \calE)$ is of finite type.
\begin{lemma}\label{lem:EulerCharCovering}
Suppose that $G$ is abelian and acts freely on $X$. Let $\calE$ be a flat $G$-sheaf of $R$-modules on $X$ such that
$(X,\Phi, \calE)$ is of finite type. In this case $(X/G, \Phi, \pi_*^G(\calE))$ is of finite type and
\begin{equation*}
\chi_\Phi(X,\calE; F) = |G| \chi_\Phi(X/G, \pi_*^G(\calE); F).
\end{equation*} \end{lemma} \begin{proof}
First note that $X/G$ has finite $\Phi$-dimension, since cohomological dimension
is a local property (cf.~ II.~4.14.1 in \cite{Godement1958}) and $\pi$ is a covering map.
Further, the triple $(X/G, \Phi , \pi_*^G(\calE))$ is of finite type due to the Grothendieck spectral sequence
\begin{equation*}
H^p(G, H^q_\Phi(X,\calE)) \implies H^{p+q}_\Phi(X/G,\pi_*^G(\calE))
\end{equation*}
which can be obtained for paracompactifying supports just as in \cite[Thm.~5.2.1]{Grothendieck1957}.
Now we prove the assertion about the Euler characteristic.
It is easy to check that $[F:\bbQ] \chi_\Phi(X, \calE; F) = \chi_\Phi(X, \calE; \bbQ)$, hence we can assume $R = \bbZ$.
By induction on the group structure we can assume that $G$ is finite cyclic of prime order $p$.
The assertion follows from Lemma \ref{lem:EulerCharModp} and a Theorem of E.~E.~Floyd (based on a result of P. A. Smith),
see \cite[Thm.~19.7]{Bredon1997} or \cite[Thm.~4.2]{Floyd1952}. Here we use that $\pi_*^G(\calE)\otimes_\bbZ \bbF_p = \pi_*^G(\calE\otimes_\bbZ \bbF_p)$.
Note that we assumed the $\Phi$-dimension of $X$ to be finite, which
implies in particular that $\dim_{\Phi,\bbF_p} X$ is finite in the notation of \cite{Bredon1997}.
Further the pull-back sheaf $\pi^* (\pi_*^G(\calE))$ is isomorphic to $\calE$ (see~\cite[p.~199]{Grothendieck1957}). \end{proof}
\begin{lemma}
Let $G$ be a finite group which acts freely on $X$ and let $\calE$ be flat a $G$-sheaf of $R$-modules on $X$.
We assume that $(X,\Phi, \calE)$ is of finite type. For any $g \in G$ with $g \neq 1$ the Lefschetz number vanishes.
\end{lemma}
\begin{proof}
By taking a finite extension we can assume without loss of generality that $R$ contains all $|G|$-th roots of unity.
Further we can assume that $G$ is a finite cyclic group. Let $\psi: G \to R^\times$ be a character of $G$.
We can twist the $G$-sheaf $\calE$ with the character $\psi^{-1}$ to obtain a new $G$-sheaf $\calE\otimes \psi^{-1}$.
This sheaf is isomorphic to $\calE$ as a sheaf of $R$-modules, but not as $G$-sheaf.
Further we find that $\pi_*^G(\calE\otimes\psi^{-1})$ is the $\psi$-eigensheaf $\pi_*(\calE)_\psi$ in the
sheaf $\pi_*(\calE)$ of $R[G]$-modules, this means $\pi_*(\calE)_\psi$ is the subsheaf of
sections of $\pi_*(\calE)$ which transform with $\psi$ under the action of~$G$. From Lemma \ref{lem:EulerCharCovering}
we deduce that all the eigensheaves $\pi_*(\calE)_\psi$ have equal Euler characteristic.
However, since
\begin{equation*}
\calL_\Phi(g,\calE; F) = \sum_{\psi \in \widehat{G}} \psi(g) \chi_\Phi(X/G, \pi_*(\calE)_\psi; F)
\end{equation*}
the claim follows.
\end{proof}
We shall frequently use the following Lemma.
\begin{lemma}\label{lem:FixInside}
Let $\calE$ be a sheaf of $R[G]$-modules on a space $X$ and let $\Phi$ be a system of supports on $X$.
The inclusion $\calE^G \to \calE$ induces an isomorphism of vectorspaces
\begin{equation*}
\com{H}_\Phi(X,\calE^G) \otimes_R F \isomorph \com{H}_\Phi(X,\calE)^G \otimes_R F.
\end{equation*} \end{lemma} \begin{proof}
Consider the functor $B: \calE \mapsto \calE^G$ from the category $\Sh_X(R[G])$ of sheaves of $R[G]$-modules
to the category $\Sh_X(R)$ of sheaves of $R$-modules. This functor is left exact and we consider its right derived functor
\begin{equation*}
\RR{B}: \Der^+(\Sh_X(R[G])) \to \Der^+(\Sh_X(R)).
\end{equation*}
Note that $B$ takes injective sheaves of $R[G]$-modules to
flabby sheaves (see Corollary to Prop.~5.1.3 in \cite{Grothendieck1957}).
As in Thm.~5.2.1 in \cite{Grothendieck1957} there is a convergent spectral sequence
\begin{equation*}
H^p_\Phi(X,\RR{B}^q(\calE))\otimes_R F \implies H^{p+q}_\Phi(X,\calE)^G\otimes_R F,
\end{equation*}
where we use that $F$ is a flat $R$-module.
In fact, the stalk at $x \in X$ of $\RR{B}^q(\calE)$ is the group cohomology $H^q(G,\calE_x)$ which is purely $|G|$-torsion
for all $q \geq 1$. Hence the spectral sequence collapses and the claim follows. \end{proof}
We obtain a refined version of Verdier's Lemma (cf.~\cite{Verdier1973}). \begin{lemma}\label{lem:VerdierLemma}
Let $G$ be a finite group which acts freely on $X$ and let $\calE$ be a flat $G$-sheaf of $R$-modules on $X$.
We assume that $(X,\Phi, \calE)$ is of finite type.
In this case we have
\begin{equation*}
\chi_\Phi(X, \calE; F[G]) = \chi_\Phi(X/G,\pi^G_*(\calE); F) \cdot F[G].
\end{equation*}
In particular, Lemma \ref{lem:EulerCharCovering} holds without the assumption that $G$ is abelian. \end{lemma} \begin{proof}
With the same argument as in Lemma \ref{lem:EulerCharCovering} we see that $(X/G, \Phi, \pi^G_*(\calE))$ is of finite type.
It suffices to compute the Lefschetz numbers of all elements of $G$ and compare them with the right hand side.
The vanishing of all Lefschetz numbers for $g \neq 1$ shows that $ \chi_\Phi(X, \calE; F[G])$ is a multiple of the regular representation.
The coefficient is the Euler characteristic of the graded $F$-vectorspace $\com{H}_\Phi(X,\calE)^G\otimes_{R} F$.
Since $\com{H}_\Phi(X,\calE) \cong \com{H}_\Phi(X/G,\pi_*(\calE))$, we can use Lemma \ref{lem:FixInside} to deduce the claim. \end{proof} \begin{remark}
Verdier uses the projection formula, the finite tor-amplitude criterion and a famous theorem of Swan
to obtain this lemma for cohomology with compact supports and constant coefficients. It is possible to extend his approach to the case
of families of supports
using a suitable replacement for the projection formula, see~\cite{Kionke2012}. \end{remark}
Finally we prove the main theorem. Recall that $X$ is a Hausdorff space with an action of a finite group $G$ and $\Phi$ is a $G$-invariant paracompactifying system of supports.
\begin{proof}[Proof of the Theorem]
Note that for every subgroup $H$ of $G$ the space $Y = X^H$ (resp.~$Y =X_H$) has
finite $\Phi$-dimension since $Y$ is (locally) closed and $\Phi$ is paracompactifying (cf.~II.~Rem.~4.14.1 in \cite{Godement1958}).
By condition \cFone the triple
$(X_H,\Phi, \calE_{|X_H})$ is of finite type for every $H \leq G$.
For $i=1, \dots, |G|$ we define the closed set
\begin{equation*}
X^i = \bigcup_{\substack{H \leq G \\ |H|\geq i}} X^H.
\end{equation*}
Then $X^1 = X$ and $X^{|G|} = X^G$ is the set of fixed points.
An element $x\in X$ is in $X^i\setminus X^{i+1}$ exactly if it has an isotropy group with
$i$ elements, hence
\begin{equation*}
X^i\setminus X^{i+1} = \bigcup_{\substack{H \leq G \\ |H|=i}} X_H =: \bigcup_{\substack{C \in \cla(G)_i}} X_C
\end{equation*}
where $\cla(G)_i$ is the set of conjugacy classes of subgroups with $i$ elements.
Note that these unions are topologically disjoint. Using the long exact sequences of the pairs $(X^i,X^{i+1})$
with supports in $\Phi$ (cf.~II.~4.10.1 in \cite{Godement1958}) we obtain
\begin{equation*}
\chi_\Phi\bigl(X,\calE; F[G]\bigr) = \sum_{C \in \cla(G)} \chi_{\Phi}\bigl(X_C, \calE_{|X_C}; F[G]\bigr).
\end{equation*}
Since $X_C$ is the disjoint union $\bigcup_{H \in C} X_H$ and $G$ acts transitively on the components
we obtain
\begin{equation*}
\chi_{\Phi}\bigl(X_C, \calE_{|X_C}; F[G]\bigr) = \ind^G_{N_G(H)}\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[N_G(H)]\bigr)
\end{equation*}
for any representative $H \in C$.
We are now in the specific situation where $H$ acts trivially on $X_H$ and $N_G(H)/H$ acts freely on $X_H$.
For simplicity we write $N$ for the normalizer $N_G(H)$.
We prove the following identity
\begin{equation*}
\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[N]\bigr) = \frac{|H|}{|N|} \ind_H^N\Bigl( \chi_\Phi(X_H, \calE_{|X_H}; F[H]) \Bigr) .
\end{equation*}
By Frobenius reciprocity a finite dimensional $F[N]$-module $V$ is induced from the $F[H]$-module $W$
if and only if
\begin{equation*}
\dim_F \Hom_{F[N]}(V_\lambda, V) = \dim_F \Hom_{F[H]}\bigl((V_\lambda)_{|H}, W\bigr)
\end{equation*}
for all $\lambda \in \widehat{N}_F$. We use this principle in the Euler characteristic.
For $\lambda \in \widehat{N}_F$ we choose some lattice $L_\lambda \subset V_\lambda$ as in condition \cFtwo.
We obtain the $N$-sheaf $\Hom_R(L_\lambda, \calE_{|X_H})$ and the $N/H$-sheaf $\Hom_{R[H]}(L_\lambda, \calE_{|X_H})$.
For simplicity denote the canonical map $X_H \to X_H/N$ by $\pi$ as well.
Now we obtain
\begin{align*}
\chi\Bigl( \Hom_{F[N]}\bigl(V_\lambda, \com{H}_\Phi(X_H, &\calE_{|X_H})\otimes_R F\bigr) \Bigr) \\
&= \chi\Bigl( \Hom_{F[N]}\bigl(V_\lambda, \com{H}_\Phi(X_H/N,\pi_*(\calE_{|X_H}))\otimes_R F\bigr) \Bigr) \\
&= \chi\Bigl( \com{H}_\Phi(X_H/N,\Hom_R\bigl(L_\lambda, \pi_*(\calE_{|X_H})\bigr))^N\otimes_R F \Bigr) \\
(\text{by Lemma \ref{lem:FixInside}})\quad &= \chi\Bigl( \com{H}_\Phi(X_H/N,\pi^N_*\Hom_R\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
&= \chi\Bigl( \com{H}_\Phi(X_H/N,\pi^{N/H}_*\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
(\text{by Lemma \ref{lem:VerdierLemma} and \cFtwo}) \quad &= \frac{|H|}{|N|}\chi\Bigl( \com{H}_\Phi(X_H,\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
(\text{by Lemma \ref{lem:FixInside}}) \quad &= \frac{|H|}{|N|}\chi\Bigl( \Hom_{F[H]}\bigl((V_\lambda)_{|H},\com{H}_\Phi(X_H, \calE_{|X_H} )\otimes_R F\bigr) \Bigr).
\end{align*}
This proves the first equality in the theorem. Next we decompose the $F[H]$-module $\com{H}_\Phi (X_H, \calE_{|X_H} ) \otimes_R F$ into isotypical components. Let $V_\lambda$, with $\lambda \in \widehat{H}_F $, be an irreducible module and choose some $R[H]$ stable lattice $ M_\lambda \subset V_\lambda $. Put $ D_\lambda := \Hom_{F[H]} (V_\lambda, V_\lambda)$. Then $ D_\lambda$ is a division algebra and the multiplicity $m^j_\lambda$ of $V_\lambda$ in
$H^j_\Phi(X_H, \calE_{|X_H}) \otimes_R F $ equals \begin{align*}
m^j_\lambda & = \dim_{D_\lambda} \Hom_{F[H]} \bigl(V_\lambda, H^j_{\Phi} (X_H, \calE_{|X_H} ) \otimes_R F \bigr) \\
& = ( \deg V_\lambda)^{-1} \dim_F \Hom_{F[H]} ( V_\lambda , H^j_{\Phi} (X_H, \calE_{|X_H} ) \otimes_R F ) \\
& = (\deg V_\lambda)^{-1} H^j_\Phi (X_H, \Hom_{R[H]} (M_\lambda , \calE_{|X_H} )) \otimes_R F . \end{align*} Recall that $\deg V_\lambda = \dim_F(D_\lambda)$. \end{proof}
\section{Further comments}\label{sec:Comments}
We add some remarks in order to clarify some assumptions for the main result.
\subsection{The finiteness condition} If $\calE$ is a constant sheaf on $X$, then condition \cFone implies \cFtwo. In general this need not be the case. This can already be seen in examples where the group acts trivially on the space. Let $X$ be the unit disc in $\bbC$ and $G = \bbZ / 2 \bbZ$. Then there exists a sheaf $\calE$ of $\bbZ[G]$-modules on $X$ such that $\com{H}(X, \calE)$ is finitely generated but $ H^2(X, \calE^G)$ is not finitely generated as $\bbZ$-module, i.e.~\cFone holds but \cFtwo fails.
It follows from a \v{C}ech cohomology argument that if $X$ and all $X^H$ are compact and homology locally connected (HLC), then condition \cF holds for every locally constant $G$-sheaf $\calE$ with finitely generated stalks (for the family of all supports).
\subsection{Sheaves of vectorspaces} If we replace $R$ by $F$ and start with a $G$-sheaf $\calE_F$ of $F$-vectorspaces over $X$, then both sides of the formula in the theorem make sense under suitable cohomological finiteness assumptions. However, there are examples which show that the theorem does not hold in this situation. For instance, let $X = S^1$ be the unit circle with the nontrivial action of $ G = \bbZ / 2 \bbZ $ by rotations. There is a $G$-sheaf of $F$-vectorspaces $\calE_F$ such that $\com{H}(S^1,\calE_F)$ is finite dimensional and $\chi(S^1, \calE_F )=1$. Then clearly
$| G | \chi ( X/G , \pi^G_* \calE_F ) \neq \chi (X, \calE_F )$
since the left hand side is an even number. In fact, most complications in the proof arise from the fact that we have to work with sheaves over the ring $R$.
\subsection{Cohomology of arithmetic groups} We indicate that the assumptions of the theorem hold for the cohomology of arithmetic groups. Let $A$ be a reductive algebraic group defined over $\bbQ$ ($A$ is not necessarily connected). By $\Gamma \subset A (\bbQ)$ we denote an arithmetic group. Assume that $G \subset \Aut_\bbQ(A)$ is a finite subgroup which acts on $\Gamma$ and on a finite dimensional rational representation $ \rho $ of $A$ on a $\bbQ$-vectorspace $ E $ such that
$g ( \rho (\eta) e) = \rho ( g(\eta)) ge$ for all $e \in E$, $\eta \in A(\bbQ)$, $g \in G$. Let $ Y $ be the symmetric space attached to $ A (\bbR)$. Then $ G $ and $ \Gamma $ act on $Y$. Put $ X := \Gamma \backslash Y $ as topological quotient and denote by $ f : Y \longrightarrow X $ the natural projection. We choose a $G$ and $\Gamma$-stable lattice $ L $ in $ E $. Put $ \calE = f ^\Gamma_\ast L_Y $, where $ L_Y $ is the constant sheaf with stalks $ L$ on $Y$. Then $ G $ acts on $ \com{H} ( X, \calE) \otimes_{\bbZ} \bbQ = \com{H}(\Gamma, L) \otimes_{\bbZ} \bbQ $. To see that the assumptions of the theorem hold one uses the Borel-Serre compactification for $ X $ and $X^H$, see \cite{BorelSerre1973}.
\subsection{} For a paracompactifying family of supports $\Phi$ on $X$ there is in general no equality of the form \begin{equation*}
\com{H}_\Phi(X,\calE)\otimes_R F = \com{H}_\Phi(X,\calE \otimes_R F) \end{equation*} for a sheaf of $R$-modules $\calE$ on $X$. For the family of compact supports this equality holds. For cohomology of arithmetic groups, we have for all $R[\Gamma]$-modules $M$ and all $j$ that $ H^j(\Gamma, M) \otimes_R F = H^j (\Gamma, M \otimes_R F) \:.$ This follows from the existence of a resolution $P_\bullet \longrightarrow \bbZ \longrightarrow 0 $ of $\bbZ $ by finitely generated free $\bbZ [\Gamma]$-modules and since $F$ is flat as $R$-module, see Thm.~11.4.4 in \cite{BorelSerre1973} and p.~193 in \cite{BrownBook1982}.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\href}[2]{#2}
\end{document}
|
arXiv
|
{
"id": "1307.1356.tex",
"language_detection_score": 0.6639977097511292,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\author[M. V. de Hoop]{Maarten V. de Hoop $^{\diamond}$}
\author[T. Saksala]{Teemu Saksala $^{\diamond,\: \ast}$}
\let\thefootnote\relax\footnote{ $^\diamond$ Department of Computational and Applied mathematics, Rice University, USA
\\ $^\ast$ \textbf{[email protected]}}
\begin{abstract} We show that the travel time difference functions, measured on the boundary, determine a compact Riemannian manifold with smooth boundary up to Riemannian isometry, if boundary satisfies a certain visibility condition. This corresponds with the inverse microseismicity problem.
The novelty of our paper is a new type of a proof and a weaker assumption for the boundary than it has been presented in the literature before. We also construct an explicit smooth atlas from the travel time difference functions. \end{abstract}
\maketitle
\section{Introduction}
\label{Se:motivation} Let $(N,g)$ be a complete, connected smooth Riemannian manifold. We split the manifold into two parts that are a closed set $M$, with non-empty interior, and the closure of the exterior $F:=\overline{N \setminus M}$. We assume that the boundary ${\partial} M$ of $M$ is a smooth co-dimension one manifold. The set $F$ is the known observation domain and $M$ is the object of interest, for instance Earth. The Riemannian metric $g$ can be seen as a proxy of the material parameters of $M$.
For any $p,q \in N$ we denote by $d_N(p,q)$ the length of a distance minimizing geodesic of $(N,g)$ that connects $p$ to $q$. We assume that the wave speed in $F$ is much slower than in $M$. Especially if ${\partial} M$ is strictly convex, we may assume that distance minimizing geodesics of $(N,g)$ connecting $p$ to $q$ stay inside $M$, if $p,q\in M$. This implies \begin{equation} \label{eq:exterior_dist}
d_M(p,q)=d_{N}(p,q), \quad p,q \in M, \end{equation} where $d_M(p,q)$ is the distance from $p$ to $q$ in $M$, that is given as the infimum of lengths of curves from $p$ to $q$ that stay in $M$. For a while we assume that \eqref{eq:exterior_dist} holds and we denote $d_M=d_g$.
Suppose that there exists a Dirac point source $(p,s)\in M \times {\mathbb R}$ of a Riemannian wave equation, with zero Cauchy data. It follows from \cite{duistermaat1972fourier} and \cite{greenleaf1993recovering} that the singularities emitted from $(p,s)$ propagate along the geodesics of $(N,g)$ (see for instance \cite{LaSa} for more details). For every $z \in {\partial} M$ we define the \textit{arrival time} $\mathcal{T}_{p,s}(z)$ to be the infimum of times when a spherical wave emitted form $(p,s)$ is observed at $z$. Hence $\mathcal T_{p,s}(z)=d_g(p,z)+s$, and the \textit{travel time difference function} satisfy an equation \begin{equation} D_p(z_1,z_2):=d_g(p,z_1)-d_g(p,z_2) =\mathcal T_{p,s}(z_1)-\mathcal T_{p,s}(z_2), \quad z_1,z_2 \in {\partial} M. \label{eq:Relation of wave data and DDD} \end{equation} The important property of this function is that it is given as the difference of the arrival times.
The knowledge of the emission time $s$ or the origin remains unknown, but the function $D_p$ can be determined without knowledge on $s$.
\color{black}
This paper is devoted to the study of the inverse problem of travel time difference functions. This problem can be formulated as follows. Does the collection $$ \{D_p:p \in M^{int}\}, $$ determine the Riemannian manifold $(M,g)$ up to isometry?
Now we give our problem setting. Let $(M,g)$ be a compact connected $n$--dimensional Riemannian manifold with smooth boundary ${\partial} M$.
Since $M$ is compact for any points $p,q \in M$ there exists a distance minimizing $C^1$--smooth curve $c$ from $p$ to $q$, see \cite{alexander1981geodesics}. Moreover for any $t_0 \in [0,d_g(p,q)]$ such that point $\gamma(t_0) $ is an interior point of $M$ there exists $\epsilon>0$ such that $c:(t_0-\epsilon, t_0 +\epsilon)$ is a geodesic. We denote the collection of all interior points of $M$ by $M^{int}$. We use the notation $SM$ for the unit sphere bundle of $(M,g)$. Therefore each $(p,v) \in SM$ determines the unique maximal unit speed geodesic $\gamma_{p,v}$ of $(M,g)$.
For any $p \in M$ we define the corresponding \textit{travel time difference function}. \begin{equation} \label{eq:DDF} D_p:{\partial} M \times {\partial} M \to {\mathbb R}, \quad D_p(z_1,z_2):=d_g(p,z_1)-d_g(p,z_2). \end{equation}
Notice that the function $D_p$ is continuous. We assume that the following \textit{travel time difference data} \begin{equation} \label{eq:data} ({\partial} M, \: \{D_p: \: p \in M^{int}\}), \end{equation}
is given. That is we assume, that the $(n-1)$--dimensional smooth manifold ${\partial} M$ without boundary and the collection of functions $\{D_p:{\partial} M \times {\partial} M \to {\mathbb R} \: |\: p \in M^{int}\}$ are given. We emphasize that a priori the points $p$ related to $D_p$ are unknown.
The aim of this paper is to prove that travel time difference data determine $(M,g)$ up to isometry. Before stating our main theorem, we describe an additional geometric property for ${\partial} M$ under which we can prove the uniqueness of the inverse problem.
Let $(N,G)$ be any smooth closed Riemannian manifold that extends $(M,g)$, such that $g=G|_{M}$. We use the notation \[ \ell(x,v):=\inf \{t > 0: \gamma_{x,v}(t) \in N \setminus M\}, \quad (x,v)\in SM. \] Thus the domain of definition for $\gamma_{x,v}$ is $[-\ell(x,-v),\ell(x,v)]$. Moreover by Lemma 1 of \cite{stefanov2009}, $\ell(x,v)$ is independent of the extension. We note that $\gamma_{x,v}$ may intersect the boundary tangentially in many points.
\begin{definition} \label{eq:SU-cond-2} We say that $(M,g)$ satisfies \textit{the visibility} condition, if the following holds: For every $z \in {\partial} M$ there exists $(z,\eta) \in {\partial} S M, \hbox{ such that } \ell(z,\eta) < \infty.$ Geodesic $\gamma_{z,\eta}: [0,\ell(z,\eta)] \to M$ is a distance minimizer and $\gamma_{z,\eta}(\ell(z,\eta))$ is not a cut point to $z$, $\dot{\gamma}_{z,\eta}(\ell(z,\eta))$ is tranversal to ${\partial} M$ and $\gamma_{z,\eta}((0,\ell(z,\eta))) \subset M^{int}$. \end{definition}
Next, we formulate our main Theorem. Let $(M_1,g_1)$ and $(M_2,g_2)$ be two smooth compact Riemannian manifolds with smooth boundaries ${\partial} M_1$ and ${\partial} M_2$. \begin{definition} \label{de:TTDD_agree} We say that the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, if there exists a diffeomorphism $\phi:{\partial} M_1 \to {\partial} M_2$ such that \begin{equation} \label{eq:equivalent_data} \{D_p(\phi^{-1}(\cdot),\phi^{-1}(\cdot)): p \in M_1^{int}\}=\{D_q: q \in M_2^{int}\}. \end{equation} \end{definition}
Then. \begin{theorem} \label{th:main} Let $(M_i,g_i),\: i=1,2$ be compact, connected $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. Suppose that $(M_1,g_1)$ satisfy the visibility condition \ref{eq:SU-cond-2}. If the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, then there exists a Riemannian isometry $\Psi:(M_1,g_1) \to (M_2,g_2)$ such that the restriction of $\Psi$ on ${\partial} M_1$ coincides with $\phi$. \end{theorem} \color{black}
While preparing this paper for submission, the authors became aware that S. Ivanov very recently posted a preprint \cite{ivanov2018distance} on ArXiv with a result (Proposition 7.3.) related to the result presented here. Indeed, he proved a similar result for complete manifolds with boundary under the assumption that the boundary is nowhere concave. On the other hand by the proof of Lemma \ref{Le:jet}, the claim of Proposition 7.3. in \cite{ivanov2018distance} holds if the nowhere concave boundary condition is replaced with the visibility condition.
We give a different proof for Theorem \ref{th:main} (see Section \ref{Se:outline} for the outline of our proof) compared to one given in \cite{ivanov2018distance}. The proof given in \cite{ivanov2018distance} is based on distance comparison inequalities implied by Toponogov's theorem and minimizing geodesic extension property. The latter property provides a lower bound on the length of a minimizing extension of a geodesic beyond a non-cut point in terms of the length of a minimizing extension beyond the other endpoint.
We end this section by comparing the visibility condition to the nowhere concave boundary condition. Recall that the boundary ${\partial} M$ of Riemannian manifold $(M,g)$ is nowhere concave, if for every $z \in {\partial} M$ the second fundamental form of ${\partial} M$ at $z$, with respect to the inward-pointing normal vector, has at least one positive eigenvalue. If ${\partial} M$ is nowhere concave then by the proof of Proposition 3.4. of \cite{zhou2012recovery} and Section 4.1. of \cite{sharafutdinov2012integral} it holds that $(M,g)$ satisfies the visibility condition. Notice that an annulus, contained in Euclidean plane, satisfies the visibility condition, but not the nowhere concave boundary condition. Therefore the visibility condition is more general of these two.
Finally we will give an example of such geometry that does not satisfy either of these boundary conditions. Let $M\subset S^2$ be a spherical cap larger than the half--sphere. If $g$ is the round metric on $M$, then $(M,g)$ does not satisfy the visibility condition, since any $g$--distance minimizing curve between boundary points lies in ${\partial} M$ and therefore it is not a geodesic of $S^2$. In this case ${\partial} M$ is not either nowhere concave. \subsection*{Background}
\subsubsection{Four geometric inverse problems related to the Riemannian wave equation}
In this section we assume that $N,\: M, \: F$ and $g$ are as in Section \ref{Se:motivation}. There are four different data sets that are all related to Riemannian wave equation with the Dirac point source $(p,s)\in M\times {\mathbb R}$ and zero Cauchy data.
The inverse problem of travel time functions have been considered in \cite{Katchalov2001,kurylev1997multidimensional}. The authors study the properties of the map $\mathcal{R}:M \to C({\partial} M)$, in which a point $p \in M$ is mapped into the corresponding travel time function $r_p:{\partial} M \to {\mathbb R}$, given by the formula $$ r_p(z)=d_g(p,z), \quad z \in {\partial} M. $$ The authors show that the data $({\partial} M, \{r_p: p \in M\})$ determine a manifold $(M,g)$ up to isometry. They use the map $\mathcal R$ to construct an isometric copy of $M$ in $C({\partial} M)$. They don't pose any restrictions to the geometry.
In \cite{LaSa} the authors prove a result related to Theorem \ref{th:main}. In this paper it is assumed that the travel time difference function is given in the \textit{observation} set $F$ with non-empty interior $$ D_p:F \times F \to {\mathbb R}. $$ In addition they assume that the Riemannian structure of $(F,g)$ is known. The proof of the main theorem in \cite{LaSa} is very similar to the proof of Theorem \ref{th:main} presented in this paper and we will often refer to it for the details that are not presented in this paper.
In \cite{ivanov2018distance} S. Ivanov extends the result of \cite{LaSa} in the following set up. Let $M$ be any complete, connected Riemannian manifold without boundary. Let $F,U \subset M$ be open. If the topology and differential structure of the observation domain $F$ and $D_p, \: p \in U$ are given then these data determine the geometry of the domain $(U,g_U)$ uniquely up to a Riemannian isometry. The sets $U$ and $F$ can be faraway from each other, which is not the case in \cite{LaSa} where it is assumed that $U=M$. Furthermore S. Ivanov proves that the determination of $(M,g)$ from travel time difference functions $D_p$ is stable, if the underlying manifold has a priori bounds on its diameter, curvature, and injectivity radius. In \cite{ivanov2018distance} also a similar result to our Theorem \ref{th:main} is provided for complete manifolds with nowhere concave boundary.
The inverse problem related to the set of exit directions \[ \Sigma_p=\{(\gamma_{p,v}(\ell(p,v)),\dot \gamma_{p,v}(\ell(p,v)))\in {\partial} SM: v \in S_pM\} \] of geodesics emitted from $p$ has been studied in \cite{lassas2018reconstruction}. Let $$ I(g,w,z,l):= \hbox{ number of $g$--geodesics of lenght $l $ connecting $w$ to $z$}, \quad w,z \in N, \: l >0 $$ The authors show that, if $(N,g)$ is a closed manifold such that \begin{equation} \label{eq:generic} \sup_{w,z,\ell} I(g,w,z,l) <\infty, \end{equation} $M$ is non-trapping and ${\partial} M$ is strictly convex, then the collection of exiting directions $$ \{\Sigma_p \subset {\partial} TM : p \in M^{int}\} $$ determine the manifold $(M,g)$ up to isometry. Assumption \eqref{eq:generic} is needed to show that each set $\Sigma_p$ is produced by the unique $p\in M$. To our understanding, it is not known, if \eqref{eq:generic} follows from the convexity of the boundary and non-trapping properties. On the other hand in \cite{kupka2006focal} it is shown that \eqref{eq:generic} is a generic property in the space of all Riemannian metrics of $N$.
The final data set is related to a \textit{generalized sphere} of radius $r>0$, that is given by formula \[
S(p,r)=:\{\exp_p(v): v\in T_pM,\; \|v\|_g=r,\; \hbox{$\exp_p$ is not singular at $v$}\}. \] In \cite{deHoop1} the authors show that the spherical surface data $$ \{S(q,r)\cap F: q \in M, \: r >0 \} $$ determine the universal cover space of $N$. If a generalized sphere $S(p,r)$ is given the authors show that there exists a specific coordinate structure in a neighborhood of any maximal normal geodesic to $S(p,r)$ such that in these coordinates metric tensor $g$ can can be determined. However this does not determine $g$ globally. The authors provide an example of two different metric tensors which produce the same spherical surface data.
\subsubsection{Microseismicity}
In this paper the results in \cite{LaSa} are adapted, in a fundamental way, to data available from actual seismic surveys. The point sources are microseismic events detected in dense arrays at Earth's surface. In our theorem we show that the data determine the metric up to change of coordinates. This implies that one can locate the closest surface point and to determine the corresponding travel time to each event.
For the following we assume that $M \subset {\mathbb R}^m$ and $p \in M^{int}$. Recall that the arrival time function is $\mathcal{T}_{p,s}(z)=d_g(p,z)+s$, where $z\in {\partial} M$ is a receiver point and $s\in {\mathbb R}$ is the emission time. Since $\mathcal{T}_{p,s}(z)$ is a highly non-linear function of $p$ it is traditional in seismological literature to study the linearization of $ \mathcal{T}_{p,s}(z)$ \cite{waldhauser2000double}. Let $p_0 \in M^{int}$ be a master event i.e. an event for which $d_g(p_0,z)$ is known and $d_g(\cdot,z)$ is $C^1$--smooth near $p_0$. By the Taylor series of $d_g(\cdot,z)$ we have that the linearization \[
r^z_p:=\nabla d_g(\cdot,z)\bigg|_{p_0}\cdot (p-p_0) \approx d_g(p,z)-d_g(p_0,z), \] where, $\nabla$ is the Euclidean gradient and $p$ is close to $p_0$. The \textit{double difference distance function} is $r^z_p-r^z_q$. This function is the difference of differential distances between a (receiver) point $z$ at the boundary, and two source points $p,q$ in the interior -- in which the metric is unknown -- of a manifold. The goal is to use this data to determine travel time $d_g(p,z)$ of the second event and to locate the relative distance $d_g(p,p_0)$ of the second event to the master event.
The event location with this method is known as the DD earthquake location algorithm presented in \cite{waldhauser2000double}. This method assumes a flat earth model and is appropriate for local scale problems. In contrast to seismological literature we measure the difference of the arrival times $\mathcal{T}_{p,s}(z),\mathcal{T}_{p,s}(w)$ of the given event $p\in M^{int}$ to two receivers $z,w \in {\partial} M$. For our theorem it is not necessary to linearize the arrival times.
The travel time difference function, given in \eqref{eq:DDF}, is closer related to applications in exploration seismology with the purpose of locating microseismic events Grechka \textit{et al.} \cite{grechka2015relative}. In this paper the authors assume that the travel time to the receivers and location of the master event is known. Notice that our result do not recover the locations of the events in Cartesian coordinates.
In global seismology, the idea to decouple the earthquake doublets, that is two different events that are close to each other and produce nearly indentical waveform, to locate the events was introduced by Poupinet \textit{et al.} \cite{PoupinetEF-1984}. Zhang \& Thurber \cite{ZhangT-2006, ZhangT-2003} extend the double difference location method of Waldhauser \& Ellsworth \cite{waldhauser2000double} with an attempt to simultaneously solve for both velocity structure and seismic event locations. They develop a regional DD seismic tomography methods that deal effectively with discontinuous velocity structures without knowing them a priori. Their methods also take Earths curvature into account.
\color{black}
\section{Proof of the Main theorem} In this section we prove Theorem \ref{th:main}. Whenever it is not necessary to distinguish manifolds $M_1$ and $M_2$ from one other we drop the subindices. In these cases we work with the data \eqref{eq:data}.
\subsection{Outline of the proof of the Main theorem} \label{Se:outline}
The proof consists of three steps. First we use the data \eqref{eq:data} to construct a mapping ${\mathcal D}$ from points of $M$ to continuous functions on ${\partial} M \times {\partial} M$. We show that this mapping is a topological embedding. Then we use the diffeomorphism $\phi:{\partial} M_1 \to {\partial} M_2$ and \eqref{eq:equivalent_data} to construct a homeomorphism $\Psi:M_1 \to M_2$ as in Theorem \ref{th:main} (see \eqref{eq:map_psi} for the definition). In second part we show that this mapping is a diffeomorphism. We prove the existence of such local coordinate maps that are determined by \eqref{eq:data}. In the third part we first prove that the data \eqref{eq:data} determine the images of geodesic segments that come to the boundary ${\partial} M$. Finally we use this information to prove the uniqueness of Riemannian structure.
The outline of the proof of the main theorem is similar to the proof of the main theorem of \cite{LaSa}. The proof presented in this paper contains two key differences to the earlier result. The first one is the construction of the boundary coordinate system, in the beginning of Section \ref{Se:smooth}. The determination of the boundary defining function (see \eqref{eq:func_f_p} and \eqref{eq:boundary_def_func}), only from the data \eqref{eq:data}, has not been presented in the literature before. The second difference, that is considered in the beginning of Section \ref{Se:Riemannian}, is related to the construction of metric tensor from the data \eqref{eq:data}.
In order to use the similar techniques as in \cite{LaSa}, to prove that the metrics $g_1$ and $\Psi^{\ast}g_2$ coincide, we need to prove that the data \eqref{eq:data} determine the full Taylor expansion of the metric tensor on ${\partial} M$ in boundary normal coordinates. This makes it possible to extend $M_1$ to a closed manifold $N$ given with two smooth metric tensors $G$ and $\widetilde G$ that coincide in $F:=\overline{N\setminus M_1}$, $G|_{M_1}=g_1$ and $\widetilde G|_{M_1}=\Psi^{\ast}g_2$. Since we don't assume ${\partial} M$ to be strictly convex, we will need also to show that the travel time difference functions $D_p:F\times F \to {\mathbb R}, \: p\in N$ of $(N,G)$ and $(N,\widetilde G)$ coincide. For this last step we use the proof of the Proposition 7.3 of \cite{ivanov2018distance} by S. Ivanov. The visibility condition of the Definition \ref{eq:SU-cond-2} is needed to tackle these problems.
\color{black} \subsection{Topology} We start first extending the data to the boundary. If $p,w \in {\partial} M$ then by the triangle inequality it holds that \begin{equation} \label{eq:boundary_distance} d_g(p,w)=\sup_{q \in M^{int}}D_q(p,w). \end{equation} Thus data \eqref{eq:data} determine $d_g:{\partial} M \times {\partial} M \to {\mathbb R}$ and the extended data \begin{equation} \label{eq:data_full} ({\partial} M, \{D_p: \: p \in M\}). \end{equation} Our first Lemma is
\begin{lemma} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. If the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, then \begin{equation} \{D_p(\phi^{-1}(\cdot),\phi^{-1}(\cdot)): p \in M_1\}=\{D_q: q \in M_2\}. \label{eq:equivalent_full_data} \end{equation} \end{lemma} \begin{proof} From \eqref{eq:equivalent_data} and \eqref{eq:boundary_distance} it follows that \begin{equation} \label{eq:boundary_dist_agree} d_1(\phi^{-1}(p),\phi^{-1}(q))=d_2(p,q), \quad p,q \in {\partial} M_2. \end{equation} Here, $d_i$ is the distance function of $g_i$ for $i \in \{1,2\}$. Therefore \eqref{eq:equivalent_full_data} holds. \end{proof} We study the properties of the mapping $$ {\mathcal D}:M \to C({\partial} M \times {\partial} M), \quad {\mathcal D}(p)=D_p, $$ where the target space is equipped with the $L^\infty$--norm.
\begin{lemma} \label{pr:topology} The mapping ${\mathcal D}$ is a topological embedding. \end{lemma} \begin{proof} Using triangle inequality it is easy to see that ${\mathcal D}$ is $2$--Lipschitz.
Next we prove that $\mathcal{D}$ is one-to-one. To show this, assume that $x,y \in M$ are such that $D_x=D_y$. We first show that this implies that the set $\{z_x\}$ of closest boundary points of $x$ coincides with the set $\{z_y\}$ of closest boundary points of $y$. Let $w \in {\partial} M$ and define \begin{equation} \label{eq:func_f_w} f_{x,w}:{\partial} M \to {\mathbb R}, \quad f_{x,w}(z):=D_x(z,w). \end{equation} Then $\{z_x\}$ is the set of minimizers of function $f_{x,w}$. Since $f_{x,w}=f_{y,w}$, we have proven that $\{z_x\}=\{z_y\}$. We also use the function $f_{x,w}$ later when we construct a boundary defining function.
Let $z_0 \in \{z_p\}$ and denote $s_x=d_g(x,z_0)$ and $s_y=d_g(y,z_0)$. Without loss of generality, we can assume that $s_x \leq s_y$. Let $\nu$ be the inward pointing unit normal vector field to ${\partial} M$. Then $\gamma_{z_0,\nu}$ is the distance minimizing geodesic from ${\partial} M$ to $x$ and $y$. Moreover \begin{equation} \label{eq:dist_from_x_to_y} x=\gamma_{z_0,\nu}(s_x), \: y=\gamma_{z_0,\nu}(s_y) \hbox{ and } d(x,y)=s_y-s_x. \end{equation} If $z \in {\partial} M\setminus \{z_0\}$ is close to $z_0$, the distance minimizing geodesic $\gamma_x$ from $z$ to $x$ is not the same geodesic as $\gamma_{z_0,\nu}$, that is, the angle $\beta$ of the curves $\gamma_x$ and $\gamma_{z_0,\nu}$ at the point $x$ is strictly between $0$ and $\pi$. Let $\gamma_y$ be a distance minimizing geodesic from $y$ to $z$. We note that $D_x(z,z_0)=D_y(z,z_0)$ and \eqref{eq:dist_from_x_to_y} yields
$$
\mathcal{L}(\gamma_y)=d(y,z)=d(y,x)+d(x,z)=\mathcal{L}(\gamma_{z_x,\nu}|_{[s_x,s_y]})+\mathcal{L}(\gamma_x). $$ Thus the union $\mu$ of the curves $\gamma_{z_x,\nu}([s_x,s_y])$ and $ \gamma_x$ is a distance minimising curve from $z$ to $y$, and hence it is a geodesic. However, as the angle $\beta$, defined above, is strictly between $0$ and $\pi$, the curve $\mu$ is not smooth at $x$, and hence it is not possible that $\mu$ is a geodesic unless $x=y$. Thus $x$ and $y$ have to be equal.
Since $M$ is compact and we just proved that ${\mathcal D}$ is continuous and one--to--one, we have that mapping ${\mathcal D}$ is closed. Thus the claim is proven. \end{proof} Since the mapping $\phi$, given by Definition \ref{de:TTDD_agree}, is a diffeomorphism the mapping $$ \Phi:C({\partial} M_1 \times {\partial} M_1) \to C({\partial} M_2 \times {\partial} M_2), \quad \Phi(F)=F(\phi^{-1}(\cdot),\phi^{-1}(\cdot)) $$ is an isometry. Let ${\mathcal D}_i, \: i\in \{1,2\}$ be as ${\mathcal D}$ on $(M_i,g_i)$. Now we are ready to define the mapping \begin{equation} \label{eq:map_psi} \Psi:M_1 \to M_2, \quad \Psi = {\mathcal D}_2^{-1} \circ \Phi\circ {\mathcal D}_1. \end{equation} \begin{proposition} \label{th:topology} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. If the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, then the mapping $\Psi$ given by \eqref{eq:map_psi} is a homeomorphism such that the restriction of $\Psi$ on ${\partial} M_1$ coincides with $\phi$. \end{proposition} \begin{proof} By \eqref{eq:equivalent_full_data} and the Proposition \ref{pr:topology} it holds that the map $\Psi$ is a well-defined homeomorphism. If $p \in {\partial} M_1$, then by \eqref{eq:boundary_dist_agree} for any $z, w \in {\partial} M_2$ we have $$ ({\mathcal D}_2(\phi(p))(z,w)=d_2(\phi(p),z)-d_2(\phi(p),w)=d_1(p,\phi^{-1}(z))-d_1(p,\phi^{-1}(w))=((\Phi \circ \mathcal{D}_1)(p))(z,w). $$ Applying ${\mathcal D}^{-1}_2$ for both sides of the equation above we have $\Psi(p)=\phi(p)$. \end{proof} \subsection{Smooth structure} \label{Se:smooth} In this part we show that the mapping $\Psi$ given in \eqref{eq:map_psi} is \\ a diffeomorphism. We consider separately the boundary and the interior cases.
We start with the boundary case. Let $\sigma_{{\partial} M}$ be the collection of all boundary cut points, $$ \sigma_{{\partial} M}:=\{\gamma_{z,\nu}(\tau_{{\partial} M}(z)) \in M: \: z \in M\}, \quad \tau_{{\partial} M}(z):=\sup\{t>0:d_g({\partial} M, \gamma_{z,\nu}(t))=t\}. $$ By Section III.4. of \cite{sakai1996riemannian} it holds that \begin{equation} \label{eq:boundary_cut_locus} \sigma_{{\partial} M}=\overline{\{p\in M: \#\{z\in{\partial} M: d_g(p,z)=d_g(p,{\partial} M)\}\geq 2\}}. \end{equation} Choose $w \in {\partial} M$. Then by \eqref{eq:boundary_cut_locus} and the Proposition \ref{pr:topology} the data \eqref{eq:data_full} determine the set \begin{equation} \label{eq:complement_of_boundary_cut_locus} M \setminus \sigma_{{\partial} M}=\{p \in M : \hbox{ The map $f_{p,w}$ has precicely one minimizer.}\}^{int}, \end{equation} where $f_{p,w}$ is as in \eqref{eq:func_f_w}. \begin{lemma} \label{Le:equivalence_of_cut_loci} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. If the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, then $$ M_2 \setminus \sigma_{{\partial} M_2}=\Psi(M_1 \setminus \sigma_{{\partial} M_1}). $$ \end{lemma} \begin{proof} By the definition of the mapping $\Psi$ we have for any $p \in M_1$ and $w \in {\partial} M_1$ that $$ f^1_{p,w}(z)=f^2_{\Psi(p),\phi(w)}(\phi(z)), \quad z \in {\partial} M_1, $$ where $f^1_{p,w}$ and $f^2_{\Psi(p),\phi(w)}$ are defined as $f_{p,w}$ in \eqref{eq:func_f_w}. Therefore the claim follows from \eqref{eq:complement_of_boundary_cut_locus}. \end{proof}
Next we construct a boundary defining function on $M \setminus \sigma_{{\partial} M}$. Let $p \in M \setminus \sigma_{{\partial} M}$ and denote by $Z(p)$ the closest boundary point of $p$. The map $x \mapsto Z(x) \in {\partial} M$ is smooth on $M \setminus \sigma_{{\partial} M}$. Define a function \begin{equation} \label{eq:func_f_p} f_p(z):=d_g(z,Z(p))-D_p(z,Z(p)), \quad z \in {\partial} M. \end{equation} Notice that this function is determined by the data \eqref{eq:data_full}, and by triangular in equality the function $f_p$ is non-negative. If $p \in {\partial} M$ then $f_p$ is a zero function. If $p \in M^{int} \setminus \sigma_{{\partial} M}$ then \begin{equation} \label{eq:func_f_p_outside_closes_bp} f_p(z)>0, \quad z \in ({\partial} M \setminus Z(p)). \end{equation} If this is not true then there exists ${\partial} M \ni z \neq Z(p)$ such that $$ d_g(p,z)=d_g(Z(p),z)+d_g(p,Z(p)). $$ Which implies that there exists a distance minimizing curve from $p$ to $z$, that goes through $Z(p)$, but is not $C^1$ at $Z(p)$. By \cite{alexander1981geodesics} this is not possible. Thus \eqref{eq:func_f_p_outside_closes_bp} holds. Therefore we have proven the following \begin{equation} \label{eq:char_of_boundary} {\partial} M=\{p \in M \setminus \sigma_{{\partial} M}: f_p \equiv 0\}. \end{equation}
\begin{figure}
\caption{Here is the schematic picture of the function $f_p$.}
\label{Fi:f_p}
\end{figure}
\begin{lemma} \label{Le:smoothnes_of_dist_func} Let $(M,g)$ be a smooth Riemannian manifold with smooth boundary for which the visibility condition \ref{eq:SU-cond-2} holds. Let $p \in {\partial} M$. Then there exist $q \in {\partial} M$ and neighborhoods $U,V \subset M$ of $p$ and $q$ respectively such that $d_g:U\times V$ is smooth. The distance minimizing geodesic from $p$ to $q$ is transversal to ${\partial} M$ at $p$ and $q$. Moreover any distance minimizing geodesic $\gamma$ from $U$ to $V$ is contained $M^{int}$, if the start and end points are excluded. \end{lemma} \begin{proof}
We follow the proof of Theorem 1 of \cite{stefanov2009} and show that \ref{eq:SU-cond-2} implies the following claim: There exists $\eta \in S_pM, \hbox{ that is transversal to ${\partial} M$ and } 0< \ell(p,\eta) < \infty,$ $ \gamma_{p,\eta}: [0,\ell(p,\eta)] \to N \hbox{ is distance}$ minimizer and $q:=\gamma_{p,\eta}(\ell(p,\eta))$ is not a cut point to $p$ along $\gamma_{p,\eta}$. The exit direction $\dot{\gamma}_{p,\eta}(\ell(p,\eta))$ is transversal to ${\partial} M$ and $\gamma_{p,\eta}((0,\ell(p,\eta))) \subset M^{int}$. Moreover $\ell(p,\eta)=d_{g}(p,q)$.
The claim of this lemma follows from implicit function theorem.
\end{proof} Let $p \in {\partial} M$. By Lemma \ref{Le:smoothnes_of_dist_func} there exists $w \in {\partial} M$ and $r>0$ such that the distance function $d_g$ is smooth in $B(p,r)\times B(w,r)$ and $B(p,r)\cap B(w,r)=\emptyset$. Let $r_{{\partial} M}>0$ be the minimum of $r$ and the boundary injectivity radius. Choose $$ z_0 \in ({\partial} M \cap (B(w,r)) \hbox{ and }\delta \in (0,r_{{\partial} M}), $$ such that $z_0$ is not the closest boundary point for any $q \in B(p,\delta)$, $Z(q) \in B(p,r) $ and the distance minimizing geodesic from $z_0$ to $p$ is not normal to ${\partial} M$ at $p$. Then \begin{equation} \label{eq:boundary_def_func} E_{z_0}:B(p,\delta) \to [0,\infty), \quad E_{z_0}(q):=f_q(z_0)=d_g(z_0,Z(q))-D_q(z_0,Z(q)) \end{equation} is well-defined and smooth. Moreover, by \eqref{eq:func_f_p_outside_closes_bp} we have that $E_{z_0}(q)=0$ if and only if $q \in B(p,\delta)\cap {\partial} M$. Thus $E_{z_0}$ is a boundary defining function. Denote $(t,Z)$ for the boundary normal coordinates in $B(p,\delta)$, where $t(q)=d_g({\partial} M, q)$ and $Z(q)$ is the closest boundary point to $q \in B(p,\delta)$. Then the map \begin{equation} \label{eq:boundary_coordinates} W_{z_0}:B(p,\delta) \to [0,\infty) \times {\partial} M, \quad W_{z_0}(q):=(E_{z_0}(q),Z(q)), \end{equation} is smooth.
We show that the Jacobian of this map with respect to boundary normal coordinates is invertible at $p$. By the inverse function theorem this yields the existence of a neighborhood $V\subset M$ of $p$ such that the restriction of $W_{z_0}$ to $V$ is a coordinate map. The Jacobian of $W_{z_0}$ at $p$ is \begin{equation*} \left(\begin{array}{cc} \frac{{\partial}}{{\partial} t} E_{z_0}& \frac{{\partial}}{{\partial} t} Z \\ \\ \frac{{\partial}}{{\partial} Z} E_{z_0}&\frac{{\partial}}{{\partial} Z}Z \end{array}\right) = \left(\begin{array}{cc} \frac{{\partial}}{{\partial} t} E_{z_0}&\bar 0^T \\ \\ \frac{{\partial}}{{\partial} Z} E_{z_0}& Id_{n-1}. \end{array}\right) \end{equation*} Notice $$
\frac{{\partial}}{{\partial} t} E_{z_0}(t,Z)\bigg|_{(t,Z)=(0,p)}=1-g_p(\dot{\gamma}_{z_0,p}(d_g(p,z_0)),\nu)>0. $$ The last inequlity holds since the distance minimizing geodesic $\gamma_{z_0,p}$ from $z_0$ to $p$ is not normal to the boundary at $p$. Thus Jacobian of $W_{z_0}$ at $p$ is invertible.
We use coordinates similar to $W_{z_0}$ to show that $\Psi:M_1\to M_2$ is a diffeomorphism near the boundary of $M_1$. In order to do so we first prove the following lemma.
\begin{lemma} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. If the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide, then \begin{equation} \label{eq:boundary_metric}
g_1|_{{\partial} M_1}=\phi^{\ast}(g_2|_{{\partial} M_2}). \end{equation} \end{lemma} \begin{proof} Since \eqref{eq:equivalent_data} implies \eqref{eq:boundary_dist_agree} the proof of this Lemma follows from the proof of Proposition 3.3. of \cite{zhou2012recovery}.
\end{proof}
Now we are ready to prove the following lemma. \color{black}
\begin{lemma} \label{Le:boundary_coord} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$, whose travel time difference data coincide. Assume that $(M_1,g_1)$ satisfy the visibility condition \ref{de:TTDD_agree}. Let $p \in {\partial} M_1$. There exists a neighborhood $U$ of $p$ in $M_1$ and $z_0 \in {\partial} M_1$ such that on $U$ and $\Psi(U)$ the mappings $W^1_{z_0}(q_1)=(E^1_{z_0}(q_1),Z^1(q_1))$ and $W^2_{\phi(z_0)}(q_2)=(E^2_{\phi(z_0)}(q_2),Z^2(q_2))$ respectively, defined as in \eqref{eq:boundary_def_func} and \eqref{eq:boundary_coordinates}, are smooth local boundary coordinate maps. Moreover, with respect to these coordinates, the local representation of $\Psi$ is \begin{equation} \label{eq:local:rep_of_Psi_boundary} W^1_{z_0}(U)\ni (s,z) \mapsto (s,\phi(z)) \in W^2_{\phi(z_0)}(\Psi(U)). \end{equation}
\end{lemma} \begin{proof} By Lemma \ref{Le:equivalence_of_cut_loci} we have for any $q \in (M_1 \setminus \sigma_{{\partial} M_1})$ that the point $z \in {\partial} M_1$ is the closest boundary to $q$ if and only if $\phi(z) \in {\partial} M_2$ is the closest boundary point to $\Psi(q) \in (M_2 \setminus \sigma_{{\partial} M_2})$. Thus $$ \phi(Z^1(q))=Z^2(\Psi(q)). $$ Therefore, using \eqref{eq:boundary_dist_agree} we have that for all $q \in (M_1 \setminus \sigma_{{\partial} M_1}), \: z \in {\partial} M_1$ \begin{equation} \label{eq:f_p_agree} f^1_q(z):=d_1(z,Z^1(q))-D_q(z,Z^1(q))=d_2(\phi(z),Z^2(\Psi(q))-D_{\Psi(q)}(\phi(z),Z^2(\Psi(q))=:f^2_{\Psi(q)}(\phi(z)). \end{equation}
We choose $w \in {\partial} M_1$ neighborhoods $U'$ and $V$ for $p$ and $w$ respectively as in Lemma \ref{Le:smoothnes_of_dist_func} for $(M_1,g_1)$. Then function $(x,z) \mapsto d_1(x,z)$ is smooth in $(U' \cap {\partial} M_1) \times V\cap {\partial} M_1)$. Let $\gamma$ be the unique distance minimizing geodesic from $p$ to $w$ that is transversal to ${\partial} M_1$ at $p$ and $w$. Since $\phi$ is a diffeomorphism by \eqref{eq:boundary_dist_agree} and \eqref{eq:boundary_metric} it follows that \[
D\phi\;\bigg( \hbox{grad}'_1\;d_1(\cdot,w)\bigg|_{p}\bigg)= \hbox{grad}'_2\;d_2(\cdot,\phi(w))\bigg|_{\phi(p)} .
\]
Here $\hbox{grad}'_i$, $i\in \{1,2\}$ stands for the boundary gradient. Therefore, a $g_2$--distance minimizing unit speed curve $c$ from $\phi(p)$ to $\phi(w)$ is transversal to ${\partial} M_2$ at $\phi(p)$. Switching the order of $p$ and $w$ we prove also that $c$ is transversal to ${\partial} M_2$ at $\phi(w)$. Since $\gamma$ is the unique distance minimizing curve from $p$ to $w$ and $\gamma((0,d_1(p,w))) \subset M_1^{int}$ it holds by \eqref{eq:boundary_dist_agree} that $c((0,d_2(\phi(p),\phi(w)))) \subset M_2^{int}$. Therefore $c$ is a geodesic of $g_2$. Since $d_2(\phi(p),\cdot)|_{{\partial} M}$ is smooth at $\phi(w)$, $c$ is the unique distance minimizing curve of $(M_2,g_2)$ connecting $\phi(p)$ to $\phi(w)$. Moreover due to transversality of $c$ there exists a neighborhood of $\phi(w)$ such that any point in this neighborhood is connected to $\phi(p)$ via the unique distance minimizing geodesic. Since conjugate points of $\phi(p)$ in $(M_2,g_2)$ are accumulation points of those points $q\in M_2$ that can be connected to $\phi(p)$ via multiple distance minimizers, it holds that $\phi(w)$ is not either a conjugate point of $\phi(p)$ along $c$. Therefore $\phi(w)$ is not a cut point of $\phi(p)$ along $c$. This proves that also $(M_2,g_2)$ satisfies the visibility condition.
By Lemma \ref{Le:smoothnes_of_dist_func} we have proved that there exists $r_{\min}>0$ smaller than the minimum of the boundary cut distances of $g_1$ and $g_2$, such that functions \[ (q,z) \mapsto d_1(q,Z^1(q)), \:d_1(q,z), \:d_1(z,Z^1(q)), \quad (q,z) \in B_1(p,r_{\min}) \times (B_1(w,r_{\min})\cap {\partial} M_1) \] and \[ (q',z') \mapsto d_2(q',Z^2(q')), \:d_2(q',z'),\: d_2(z',Z^2(q)), \quad (q',z') \in B_2(\phi(p),r_{\min}) \times (B_2(\phi(w),r_{\min})\cap {\partial} M_2) \] are smooth. Since $\Psi$ is a homeomorphism the existence of set $U$ and $z_0 \in {\partial} M_1$ as in the claim of this Lemma follow.
If $q \in U$ we obtain by \eqref{eq:f_p_agree} the following equation $$ E^1_{z_0}(q)=E^2_{\phi(z_0)}(\Psi(q)). $$ Therefore we have proven that the map given in \eqref{eq:local:rep_of_Psi_boundary} and the mapping $$ W^2_{\phi(z_0)}\circ \Psi\circ (W^1_{z_0})^{-1}:W^1_{z_0}(U) \to W^2_{\phi(z_0)}(\Psi(U)) $$ coincide.
\end{proof} \color{black}
Next we consider the coordinates away from ${\partial} M$. Let $p \in M^{int}$ and choose any closest boundary point $z_p\in {\partial} M$ to $p$. By Lemma 2.15 of \cite{Katchalov2001} there exist neighborhoods $U \subset M^{int}$ of $p$ and $W \subset {\partial} M$ of $z_p$ such that the distance function $d_g:U\times W \to {\mathbb R}$ is smooth. Moreover for every $(q,w) \in U\times W$ the distance $d_g(q,w)$ is realized by the unique distance minimizing geodesic, contained in $M^{int}$, if the end point $w$ is excluded. We use a shorthand notation $v \in S_p M$ for the velocity $\dot \gamma_{z_p,\nu}(d_g(p,z_p))$. A similar argument as in Lemma 2.6. of \cite{LaSa} yields to an existence of a neighborhood $V \subset W$ of $z_p$ such that the set $$ \mathcal{V}=\{(z_i)_{i=1}^n \in V^n: \dim \hbox{span}((F(z_i)-v)_{i=1}^n)=n\} $$
is open and dense in $V^n:= V\times V \times \ldots \times V$. Here $F(q):=-\frac{(\exp_p)^{-1}(q)}{\|(\exp_p)^{-1}(q)\|_g}, \: q \in V$. Notice that this claims follows from Lemma 2.6. of \cite{LaSa} since $F(q)=\frac{(\exp_p)^{-1}(q')}{\|(\exp_p)^{-1}(q')\|_g}$ for some $q'\in M$ if and only if there exists $ 0<t<\tau(p,-F(q))$ such that $q'=\gamma_{p,-F(p)}(t)$.
Moreover for every $(z_i)_{i=1}^n \in \mathcal{V}$ there exists an open neighborhood $U' \subset U$ of $p$ such that $$
H:U'\rightarrow {\mathbb R}^n, \quad H(q)=(d_g(q,z_i)-d_g(q,z_p))_{i=1}^n $$
is a smooth coordinate mapping. This holds, since for any $(z_i)_{i=1}^n \in \mathcal{V}$ the Jacobian of $H$ at $p$ is invertible.
\begin{lemma} \label{Le:interior_coord} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$. Suppose that the travel time difference data of $(M_1,g_1)$ and $(M_2,g_2)$ coincide. Let $p \in M_1^{int}$. Let $z_p$ be any closest boundary point to $p$. There exists a neighborhood $U$ of $p$ in $M_1^{int}$ and a neighborhood $W \subset {\partial} M_1$ of $z_p$ such that the distance functions $d_1:U \times W$ of $(M_1,g_1)$ and $d_2:\Psi(U) \times \phi(W)$ of $(M_2,g_2)$ are smooth.
Moreover there exists points $z_1, \ldots, z_n \in W$ and a neighborhood $V \subset U$ of $p$ such that $$ H_1:V\rightarrow {\mathbb R}^n, \quad H_1(x)=(d_1(x,z_i)-d_1(x,z_p))_{i=1}^n $$ and $$ H_2:\Psi(V)\rightarrow {\mathbb R}^n, \quad H_2(q)=(d_2(q,\phi(z_i))-d_2(q,\phi(z_p)))_{i=1}^n, $$ are smooth coordinate maps. We also have \begin{equation} \label{eq:local:rep_of_Psi_interior} H_1(V)= H_2(\Psi(V)) \hbox{ and } H_2 \circ \Psi\circ H_1 = Id_{{\mathbb R}^n}. \end{equation}
\end{lemma} \begin{proof} Since $\Psi$ is a homeomorphism, the first part of the claim follows from similar construction as done before this Lemma. The proof of the latter part is a modification of the proof of Theorem 2.7. of \cite{LaSa}. \end{proof}
\begin{proposition} \label{th:diffeo} Let $(M_i,g_i),\: i=1,2$ be compact $n$--dimensional Riemannian manifolds with smooth boundaries ${\partial} M_i$ whose travel time difference data coincide. If $(M_1,g_1)$ satisfy the visibility condition \ref{eq:SU-cond-2}, then mapping $\Psi:M_1 \to M_2$, given in \eqref{eq:map_psi}, is a diffeomorphism. \end{proposition} \begin{proof} The claim follows from Proposition \ref{th:topology} and lemmas \ref{Le:boundary_coord}--\ref{Le:interior_coord}. \end{proof}
\subsection{Riemannian structure} \label{Se:Riemannian} As we have proven that the map $\Psi$ is diffeomorphism we can define a pull back metric $\widetilde g:=\Psi^\ast g_2$ on $M_1$. From now on we only consider manifold $M:=M_1$ with smooth boundary equipped with Riemannian metrics $g:=g_1$ and $\widetilde g$. We need to show that $g=\widetilde g$.
First we notice that by the definitions of the diffeomorphism $\Psi$ and metric $\widetilde g$ on $M$ we have by the data \eqref{eq:data_full} that \begin{equation} \label{eq:dist_dif_agree} D_p(z,w)=d_g(p,z)-d_g(p,w)=d_{\widetilde g}(p,z)-d_{\widetilde g}(p,w), \quad p \in M, \: z,w \in {\partial} M. \end{equation}
\begin{lemma} \label{Le:jet} Let $p \in {\partial} M$ and $(x^1,\ldots, x^n)$ be a boundary normal coordinate system of $g$ near $p$ and $\alpha\in {\mathbb N}^{n}$ any multi-index. Write $g=(g_{ij})_{i,j=1}^{n}$ and $\widetilde g=(\widetilde g_{ij})_{i,j=1}^{n}$. Then for all $i,j \in \{1, \ldots,n\}$ holds \begin{equation} \label{eq:jet}
\partial^\alpha g_{ij} |_{{\partial} M}= \partial^\alpha \widetilde g_{ij} |_{{\partial} M}, \quad {\partial}^\alpha:=\prod_{k=1}^n \left(\frac{{\partial}}{{\partial} x^k}\right)^{\alpha_k}.
\end{equation} \end{lemma} \begin{proof}
We prove that the local lens relations $(\ell_g,\sigma_g)$ and $(\ell_{\widetilde g},\sigma_{\widetilde g})$ of $g$ and $\widetilde g$ respectively coincide at some open set $\mathcal D \subset T{\partial} M$. After this the claim follows from the proof of Theorem 1 of \cite{stefanov2009}. For the definitions of local lens relations see \cite{stefanov2009}.
Choose $q \in {\partial} M$ and neighborhoods $U,V \subset M$ of $p$ and $q$ be as in Lemma \ref{Le:smoothnes_of_dist_func} for metric $g$. Let $\gamma$ be the unique geodesic of $g$ connecting $p$ to $q$.
Due to \eqref{eq:boundary_dist_agree}
and Lemma \ref{Le:smoothnes_of_dist_func} it holds that $d_{\widetilde g}$ is smooth on $(U \cap {\partial} M) \times (V \cap {\partial} M)$. Therefore we have for every $(x,y) \in (U \cap {\partial} M) \times (V \cap {\partial} M)$ that \begin{equation} \label{eq:boundary_gradients}
\hbox{grad}'_g \; d_g(\cdot,y)\bigg|_{x}=\hbox{grad}'_{\widetilde g} \; d_{\widetilde g}(\cdot,y)\bigg|_{x} \quad \hbox{ and } \quad \hbox{grad}'_g \; d_g(\cdot,x)\bigg|_{y}=\hbox{grad}'_{\widetilde g} \; d_{\widetilde g}(\cdot,x)\bigg|_{y}. \end{equation}
Denote $\dot{\gamma}(0)=:\eta$ and $\dot{\gamma}(d_g(p,q))=:v$. Then \eqref{eq:boundary_metric} and \eqref{eq:boundary_gradients} imply that $\dot{\widetilde \gamma}(0)=\eta$ and $\dot{\widetilde \gamma}(d_g(p,q))=v$, where $\widetilde \gamma$ is the unique distance minimizing geodesic of $\widetilde g$ from $p$ to $q$. By Lemma \ref{Le:smoothnes_of_dist_func} it holds that $\eta$ and $v$ are transversal to ${\partial} M$.
Therefore after possibly shrinking $U$ and $V$ we have by formula (10) of \cite{stefanov2009} and formulas \eqref{eq:boundary_metric} and \eqref{eq:boundary_gradients} that the local lens relations $(\ell_g,\sigma_g)$ and $(\ell_g,\sigma_{\widetilde g})$ coincide in the set \[
\mathcal{D}:=\{ \hbox{grad}'_g \;d_g(\cdot,y)\bigg|_{x}, \: \hbox{grad}'_g \; d_g(\cdot,x)\bigg|_{y} \in T{\partial} M: \: (x,y) \in (U \cap {\partial} M) \times (V \cap {\partial} M)\}. \] The set $\mathcal D$ is open since it is an image of an open map, given by the composition of the diffeomorphism \[
W_\eta\ni (x,v) \mapsto \gamma_{x,v}(\ell(x,v)),\dot \gamma_{x,v}(\ell(x,v)) \in W_v \] and the orthogonal projection from ${\partial} SM$ to $T{\partial} M$. In the above $W_\eta\subset {\partial} SM$ is some open neighborhood of $(p,\eta)$ and $ W_v\subset {\partial} SM$ is some open neighborhood of $(q,v)$.
\end{proof} \color{black}
Let $(N,G)$ be a smooth closed Riemannian manifold that is a smooth extension of $(M,g)$. We write $F:= N \setminus M^{int}$, as before. By Lemma \ref{Le:jet} $(N,\widetilde G)$ is a smooth extension of $(M,\widetilde g)$, if $\widetilde G$ is a Riemannian metric defined as \begin{equation} \label{eq:def_G_tilde}
\widetilde G|_F=G|_F, \quad \widetilde G|_{M^{int}}=\widetilde g. \end{equation} \begin{lemma} Let $N, F, G$ and $\widetilde G$ be as above. Then
\begin{equation} \label{eq:dist_dif_on_N} d_G(p,z)-d_G(p,w)=d_{\widetilde G}(p,z)-d_{\widetilde G}(p,w) \quad p \in N, \: z,w \in F. \end{equation} The functions $d_G, d_{\widetilde G}$ are the geodesic distances of $G$ and $\widetilde G$ respectively. \end{lemma} \begin{proof} If $p \in M$, we will soon give a proof for \begin{equation} \label{eq:dist_dif_on_N_2} d_G(p,z)-d_G(p,w)=d_{\widetilde G}(p,z)-d_{\widetilde G}(p,w), \quad z,w \in F. \end{equation} This proof is an adaptation of Proposition 7.3 in \cite{ivanov2018distance}. If \eqref{eq:dist_dif_on_N_2} holds for every $p\in M$ then \eqref{eq:dist_dif_on_N_2} holds also for the case $p \in F$. The latter proof is given in Proposition 1.2. of \cite{LaSa}. Therefore equation \eqref{eq:dist_dif_on_N} holds.
Let $p \in M$. Consider first the function $h_p(z)=d_{g}(p,z)-d_{\widetilde g}(p,z), \: z \in {\partial} M$. Let $w \in {\partial} M$. By \eqref{eq:dist_dif_agree} it holds that \[ h_p(z)=d_{g}(p,w)-d_{\widetilde g}(p,w). \] Thus $h_p$ is a constant function.
We will prove that
\begin{equation} \label{eq:proof_of_(31)} d_G(p,z)=\inf\bigg\{d_g(p,y_0)+ \bigg(\sum_{j=1}^Nd_F(y_{j-1},x_j)+d_g(x_{j},y_j)\bigg)+d_F(x_{N},z)\bigg\}, \end{equation}
where $d_F$ is the distance function of the Riemannian manifold $(F,G|_F)$ and \\ $\{y_0, \ldots, y_N, x_1, \ldots, x_N\} \subset {\partial} M$. Notice that similar formula holds for $d_{\widetilde G}$, when $d_g$ is replaced with $d_{\widetilde g}$. If \eqref{eq:proof_of_(31)} holds then, it follows from equation \eqref{eq:boundary_dist_agree} that \[ d_G(p,z)-d_{\widetilde G}(p,z)=h_p(z)=\hbox{constant with respect to $z$}. \] This implies \eqref{eq:dist_dif_on_N}, in the case when $p \in M$.
Finally we will prove \eqref{eq:proof_of_(31)}. Let $\epsilon>0$. Since ${\partial} M$ is a smooth co-dimension 1 submanifold of $N$, it follows from the definition of the Riemannian distance function $d_G$, that there exists a piecewise smooth curve $c$ from $p$ to $q$, that crosses the boundary finitely many times, and whose length is $\epsilon$--close to $d_G(p,z)$. Then \[ d_g(p,y_0)+ \bigg(\sum_{j=1}^Nd_F(y_{j-1},x_j)+d_g(x_{j},y_j)\bigg)+d_F(x_{N},z)\leq \mathcal{L}_G(c)\leq d_G(p,z)+\epsilon, \] where $\{y_0, \ldots, y_N, x_1, \ldots, x_N\} \subset {\partial} M$ are the points where $c$ crosses the boundary. Taking $\epsilon$ to $0$ implies \eqref{eq:proof_of_(31)}.
\end{proof}
Due to the previous Lemma it follows from the Section 2.4 of \cite{LaSa} that metric tensors $G$ and $\widetilde G$ coincide. We will sketch here the main ideas for this proof.
First we prove that the geodesics of metrics $G$ and $\widetilde G$ agree up to reparametrization. Let $\tau_G:SN \to {\mathbb R}$ be the cut distance function of metric tensor $G$. By Lemma 2.9. of \cite{LaSa} the following equality holds for any $(z,v)\in SF^{int}$ \begin{equation} \label{eq:image_of_geo_1} \gamma^G_{z,-v}((0,\tau_G(z,-v))=\{p \in N:D_p(\cdot,z) \hbox{ is smooth at $z$ and grad$_G D_p(\cdot,z)$ at $z$ is $v$} \}.
\end{equation} Where $\gamma^G_{z,-v}$ is the geodesic of $G$ with initial conditions $(z,-v)$. Since $G=\widetilde G$ on $F^{int}$, the formulas \eqref{eq:dist_dif_on_N} and \eqref{eq:image_of_geo_1} imply \begin{equation} \label{eq:image_of_geo_2} \gamma^G_{z,-v}((0,\tau_G(z,-v))=\gamma^{\widetilde G}_{z,-v}((0,\tau_{\widetilde G}(z,-v)), \quad (z,v) \in SF^{int}, \end{equation} where $\tau_{\widetilde G}$ is the cut distance function of $\widetilde G$. Therefore, for any $(z,v) \in SF^{int}$ there exists a diffeomorphism $\alpha_{z,v}:(0,\tau_G(z,-v)) \to (0,\tau_{\widetilde G}(z,-v))$ such that \begin{equation} \label{eq:image_of_geo_3} \gamma^G_{z,-v}(t)=\gamma^{\widetilde G}_{z,-v}(\alpha_{z,v}(t)), \quad t \in (0,\tau_G(z,-v)), \:
(z,v) \in SF^{int}. \end{equation}
Let $p \in M^{int}$. We denote the exponential map of $G$ at $p$ by $\exp_p$. Then the following set is not empty, $$ \Omega_p:=\{rv\in T_pN: r>0, \: v=\exp_p^{-1}(z), \: p \in \sigma(z,v), \: (z,v) \in SF^{int}\}^{int}, $$ and, moreover, if we denote the exponential map of $\widetilde G$ at $p$ by $\widetilde \exp_p$. In view of \eqref{eq:image_of_geo_3} we have \begin{equation} \label{eq:geodesic_in_M} \Omega_p=\{rv\in T_pN: r>0, \: v=\widetilde \exp_p^{-1}(z), \: p \in \sigma(z,v), \: (z,v) \in SF^{int}\}^{int}. \end{equation}
Let $(U,x)$ be a local coordindate chart of $M^{int}$. We denote the Christoffel symbols of $G$ and $\widetilde G$ as $\Gamma$ and $\widetilde \Gamma$, respectively. By \eqref{eq:image_of_geo_3}, \eqref{eq:geodesic_in_M} and Proposition 2.13 of \cite{LaSa} there exists a smooth $1$--form $\beta$ on $U$ such that $$
\Gamma^k_{ij}(x)-\widetilde \Gamma^k_{ij}(x)=\delta^k_i\beta_j(x)+\delta^k_j\beta_i(x), $$ where $\delta^k_j$ is the Kronecker delta. This and Lemma 2.14 of \cite{LaSa} imply that the geodesics of metric tensors $G$ and $\widetilde G$ agree up to reparametrization. See also \cite{matveev2012geodesically} for the similar result. We arrive at.
\begin{lemma} Suppose that $N, F, G$ and $\widetilde G$ are as above. Then $G=\widetilde{G}$ in all of $N$. \label{Le:geodesic eq -> metrics are the same} \end{lemma} \begin{proof} Since geodesics of metric tensors $G$ and $\widetilde G$ agree up to reparametrization the main result of \cite{topalov2003geodesic} shows that the function \begin{equation} \label{Matveev formula} I_0((x,v))=\bigg(\frac{\det (G(x)) }{\det(\widetilde{G}(x))}\bigg)^{\frac{2}{n+1}} \widetilde{G}(x,v), \quad (x,v) \in TN, \end{equation} where $\widetilde {G}(x,v)=\widetilde{G}_{jk}(x)v^jv^k$, is constant on the geodesic flow of $G$. Note that the function $F(x):= \frac{\text{det} (G(x)) }{\text{det}(\widetilde{G}(x))}$ is coordinate invariant.
Let $\varphi_t:SN \to SN$, $t\in {\mathbb R}$ be the geodesic flow of $G$ and $\pi:TN \to N$ the projection onto the base point. Since $G=\widetilde G$ on $F^{int}$, we have $$
G(\varphi_0(z,v)) =\|v\|^2_G=I_0(\varphi_0(z,v), \quad (z,v) \in TF^{int} . $$ Therefore for any $t \in {\mathbb R}$ and for any $(z, v) \in TF^{int} \setminus \{0\}$ the following holds $$
G(\varphi_t(z,v)) =\|v\|^2_G=I_0(\varphi_t(z,v)=F(\pi(\varphi_t(z,v))\widetilde G(\varphi_t(z,v)). $$ This implies the claim. For more details, see Lemma 2.15 of \cite{LaSa}. \end{proof}
We conclude that the proof of Theorem \ref{th:main} follows from Propositions \ref{th:topology}, \ref{th:diffeo} and Lemma \ref{Le:geodesic eq -> metrics are the same}.
\end{document}
|
arXiv
|
{
"id": "1807.02576.tex",
"language_detection_score": 0.7209287881851196,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{On the negativity of random pure states} \author{Animesh Datta}
\email{[email protected]}
\affiliation{Institute for Mathematical Sciences, 53 Prince's Gate, Imperial College, London, SW7 2PG, UK}
\affiliation{QOLS, The Blackett Laboratory, Imperial College London, Prince Consort Road, SW7 2BW, UK}
\date{\today} \begin{abstract} This paper deals with the entanglement, as quantified by the negativity, of pure quantum states chosen at random from the invariant Haar measure. We show that it is a constant ($0.72037$) multiple of the maximum possible entanglement. In line with the results based on the concentration of measure, we find evidence that the convergence to the final value is exponentially fast. We compare the analytically calculated mean and standard deviation with those calculated numerically for pure states generated via pseudorandom unitary matrices proposed by Emerson \emph{et. al.} [Science, \textbf{302}, 3098, (2003)]. Finally, we draw some novel conclusions about the geometry of quantum states based on our result. \end{abstract}
\pacs{03.67.Mn, 02.30.Gp}
\maketitle
\section{Introduction}
Entanglement has come to be believed as one of the cornerstones of quantum information science. The necessity of entanglement in quantum computation~\cite{jl03} and information~\cite{masanes06a,pw09} tasks are well acknowledged. Substantial amounts of experimental effort is expended in the generation and manipulation of quantum entanglement. Nevertheless, the role of entanglement in quantum information science in general, and quantum computation in particular, is far from clear. Meyer has presented a version of the quantum search algorithm that requires no entanglement~\cite{meyer00a}, and instances are known of mixed-state quantum computation where exponential speedup is attained in the presence of only limited amounts of entanglement~\cite{dfc05}, and other quantities have been proposed as alternate resources for the speedup~\cite{dsc08,dg09}. Recent results have further illuminated the role of entanglement in pure-state quantum computation. It was already known, due to the Gottesman-Knill theorem~\cite{nielsen00a}, that entanglement is by no means sufficient for universal quantum computation. The new results~\cite{bmw09,gfe09} show that, in fact, almost all pure states are too entangled to be a universal resource for quantum computation. Though proved in the context to measurement-based quantum computation, and based on the geometric measure of entanglement, which is the absolute square of the inner product with the closest product state, these results drive home the point that implications on the lines of ``more entanglement implies more computational power" are fallacious~\cite{gfe09}. The strategy employed for proving these results can generally be termed as ``concentration of measure"~\cite{hlw06}, by which a typical pure state, chosen at random from the left- and right-invariant Haar measure, is almost always maximally entangled across any bipartition.
Arguments based on the concentration of measure have been used to obtain average value of measures of correlations and entanglement in typical quantum states. Concentration of measure is a very powerful concept from measure theory, which puts bounds on how much the values of certain smooth (Lipshitz) functions can vary from their mean value. This is a consequence of the remarkable fact that the uniform distribution of the $k$-sphere $\mathbb{S}^k$ is concentrated largely on the equator for large $k$, and any polar cap smaller than a hemisphere has a relative volume exponentially small in $k$. Examples in quantum information theory include the entropy of the reduced density matrix, entanglement of formation, distillable common randomness~\cite{hlw06}. The entropy of reduced density matrices of typical states has also been conjectured and calculated independently~\cite{page93,s96,fk94,ruiz95}, as has been their concurrence, purity and the linear entropy~\cite{scott03}. Not much is however known of one of the most common and computable measures of entanglement, the negativity~\cite{zhsl98,vw02}, in random Haar distributed pure states. In this paper, our endeavor will be to address this question. We show that the negativity of a random pure state taken from a Haar distribution, is a constant multiple of the maximum possible. This entanglement can also be generated efficiently using two qubit gates~\cite{odp07}. We will evaluate this constant using techniques similar to those in Refs.~\cite{s96,scott03}, and confirm our results numerically using efficiently generated pseudorandom unitaries~\cite{emerson03a}. For simplicity, we will only present results for equal bipartitions, but the extensions to unequal splits is straightforward.
That the negativity (defined in Eq. (\ref{E:neg})) of random pure states is less than maximal might seem to contradict the statement that random pure states in large enough Hilbert spaces are close to being maximally entangled. This is, however, not true in general. As shown in~\cite{hlw06}, for a state residing in a Hilbert space of dimension $d_A \times d_B$ with a reduced state $\rho_A= {\rm{Tr }}_B(\rho),$ and $d_B$ is a large enough multiple of $d_A\log d_A/\epsilon^2,$ then
\begin{equation}
\label{E:cluster} (1-\epsilon)\frac{1}{d_A}\mathbb{I} \leq \rho_A \leq (1+\epsilon)\frac{1}{d_A}\mathbb{I}
\end{equation} If a state satisfies Eq.~(\ref{E:cluster}), then its negativity is evidently near-maximal. But as the condition for its validity shows, this is only true when the bipartite split is quite asymmetrical. Thus, for equal bipartite splits, which is often of interest in quantum information science, there is no \emph{a priori} reason to expect the negativity of random pure states to be close to maximal. This is the case we study here. Just to highlight the degree of asymmetry needed to have the negativity close to maximal, for $\epsilon = 0.1$ and $d_A=2,$ we require $d_B \gg 200,$ and for $d_A=16,$ $d_B \gg 6400.$
The outline of the paper is as follows. In Sec.~\ref{sec:mean}, we begin by deriving the expression of average negativity. It involves performing integrations over the probability simplex which are rewritten in terms of other nonconstrained variables, finally leaving us with a combination of hypergeometric functions. Sec.~\ref{sec:Var} derives the expressions for the variance in the negativity in terms of similar hypergeometric functions. These functions are explicitly evaluated in Sec.~\ref{sec:evals} numerically. This is necessary as the series we have is provably not summable in closed form, which we discuss in brief in Appendix~\ref{app:digress}. We obtain the final expression for the average negativity of Haar-distributed random pure states. We also compare our results with a numerical simulation using pseudorandom pure states generated from efficiently generated pseudorandom unitaries~\cite{emerson03a}, finding good agreement. We finally conclude in Sec.~\ref{sec:conclude} with discussions about the ramifications of our finding on the geometry of the set of quantum states. We also discuss the prospect of extending the present analysis to random mixed quantum states.
\section{Negativity of typical pure states} \label{sec:mean}
The negativity is an entanglement monotone which is based on the partial transpose test of detecting entanglement~\cite{p96}. Given a bipartite quantum state residing in $\mathcal{H}_A\otimes \mathcal{H}_B$ with dimensions $\mu$ and $\nu$, called $\rho_{AB},$ the negativity is defined as
\begin{equation}
\label{E:neg}
\mathcal{N}(\rho_{AB}) = \frac{||\rho_{AB}^{T_A}||-1}{2},
\end{equation} where $\rho_{AB}^{T_A}$ denotes the partial transpose with respect to subsystem $A$, and $||\sigma||$ denotes the trace norm, or sum of the absolute values of the eigenvalues of $\sigma,$ when $\sigma$ is Hermitian, as is the case with $\rho_{AB}^{T_A}.$ For pure states residing in the above space, it is always possible to write a Schmidt decomposition~\cite{nielsen00a}. This paper will only deal with the scenario $\mu=\nu$, the extension to the unequal case being tedious, but straightforward. The distribution of the Schmidt coefficients is given by (for $\mu = \nu$)~\cite{lp88}
\begin{equation} P(\mathbf{p}) \mathrm{d}\mathbf{p} = N \delta(1-\sum_{i=1}^\mu p_i) \prod_{1\leq i < j \leq \mu} (p_i-p_j)^2 \prod_{k=1}^\mu \mathrm{d}\mathbf{p}_k,
\end{equation} where $\delta(\cdot)$ is the Dirac delta function. The negativity for pure states is
\begin{equation} \mathcal{N} = \frac{1}{2}\left[\left(\sum_{i=1}^\mu\sqrt{p_i}\right)^2-1\right] =\frac{1}{2}\mathop{\sum_{i,j=1}}_{i\neq j}^{\mu}\sqrt{p_i p_j} \label{E:negdef}
\end{equation} and its mean is given by
\begin{equation} \avg{\mathcal{N}}= \frac{1}{2} \int \mathop{\sum_{i,j=1}}_{i\neq j}^{\mu}\sqrt{p_i p_j}P(\mathbf{p}) \mathrm{d}\mathbf{p}.
\end{equation} At the outset, it helps to change variables such that $q_i=r p_i$ which removes the hurdle of integrating over the probability simplex~\cite{s96,scott03}, whereby
\begin{equation} Q(\mathbf{q})\mathrm{d}\mathbf{q}\equiv \prod_{1\leq i<j\leq \mu}\left(q_i-q_j\right)^2 \prod_{k=1}^\mu e^{-q_k}\,\mathrm{d}q_k \label{Q}\\ =N\,e^{-r}r^{\mu^2-1}P(\mathbf{p})\,\mathrm{d}\mathbf{p}\,\mathrm{d}r\;.
\end{equation} The new variables $q_i$ take on values independently in the range $[0,\infty),$ and $r$ is a scaling factor given by $r =\sum_iq_i$. Integrating over all the values of the new variables, we find that the normalization constant is given by $N=\overline Q/\Gamma(\mu\nu)$, where $\overline{Q}\equiv\int Q(\mathbf{q})d\mathbf{q}$. Similarly, we find that \begin{equation} \int \sqrt{q_i q_j}Q(\mathbf{q})\mathrm{d}\mathbf{q} = \overline Q\, \frac{\Gamma(\mu^2+1)}{\Gamma(\mu^2)} \int \sqrt{p_i p_j}P(\mathbf{p})\,\mathrm{d}\mathbf{p}\;, \label{QtoP} \end{equation} with $\Gamma(\mu)=(\mu-1)!.$ Notice that the first product in Eq.~(\ref{Q}) is the square of the Van der Monde determinant~\cite{s96,scott03}
\begin{equation} \hspace{-2.0cm} \Delta(\mathbf{q}) \,\equiv\, \prod_{1\leq i<j\leq
\mu}\left(q_i-q_j\right) = \left| \begin{array}{ccc}
1 & \ldots & 1 \\
q_1 & \ldots & q_\mu \\
\vdots & \ddots & \vdots \\
q_1^{\mu-1} & \ldots & q_\mu^{\mu-1}
\end{array} \right|
= \left| \begin{array}{ccc}
L_0(q_1) & \ldots & L_0(q_\mu) \\
L_1(q_1) & \ldots & L_1(q_\mu) \\
\vdots & \ddots & \vdots \\ \Gamma(\mu) L_{\mu-1}(q_1) & \ldots & \Gamma(\mu)L_{\mu-1}(q_\mu)
\end{array} \right|\;.
\label{Van2} \end{equation} The second determinant in Eq.~(\ref{Van2}), follows from the basic property of invariance after adding a multiple of one row to another, and the polynomials $L_k(q)$ judiciously chosen to be Laguerre polynomials~\cite{gradshteyn}, satisfying the orthogonality relation \begin{equation} \int_0^\infty dq\,e^{-q} L_k(q)L_l(q) = \delta_{kl}\;. \label{L1} \end{equation} These facts in hand, we can evaluate \begin{eqnarray}
\overline{Q} &=& \int\Delta(\mathbf{q})^2\prod_{k=1}^\mu e^{-q_k}\,dq_k \nonumber\\
&=&\mathop{\sum_{T,R \in \mathcal{S}_{\mu}}} (-1)^{T+R}\prod_{k=1}^\mu\Gamma(T(k))\Gamma(R(k))\int dq_k\,e^{-q_k} L_{T(k)-1}(q_{k})L_{R(k)-1}(q_k) \nonumber \\
&=&\sum_{R \in \mathcal{S}_{\mu}}(1)^R\prod_{k=1}^\mu\Gamma(R(k))^2=\mu!\prod_{k=1}^\mu\Gamma(k)^2\;, \end{eqnarray} with $T,R$ being elements of the permutation group on $\mu$ elements $\mathcal{S}_{\mu}.$ We can now calculate the integral over $\{q_1,\cdots,q_{\mu}\}$ in Eq. (\ref{QtoP}) as
\begin{eqnarray}
&&\mathop{\sum_{i,j=1}}_{i\neq j}^\mu \int \sqrt{q_i q_j}Q(\mathbf{q})\,\mathrm{d}\mathbf{q} \nonumber\\
&&=\mathop{\sum_{i,j=1}}_{i\neq j}^\mu \int \sqrt{q_i q_j}\prod_{m=1}^{\mu} dq_m e^{-q_m}\sum_{T,R \in \mathcal{S}_{\mu}}(-1)^{T+R}\prod_{m=1}^{\mu}\Gamma(T(m))\Gamma(R(m))L_{T(m)-1}(q_m)L_{R(m)-1}(q_m) \nonumber \\
&&=\overline{Q}\mathop{\sum_{k,l=0}}^{\mu-1}\sum_{R \in \mathcal{S}_2}(-1)^{R}\int\sqrt{q_k q_l}L_{R(k)-1}(q_k)L_{k-1}(q_k)L_{R(l)-1}(q_l)L_{l-1}(q_l)e^{-q_k-q_l}dq_kdq_l \nonumber\\
&&=\overline{Q}\sum_{k,l=0}^{\mu-1}\sum_{R \in \mathcal{S}_2} (-1)^{R}I_{k,R(k)}^{(1/2)}I_{l,R(l)}^{(1/2)}\nonumber\\
&&= \overline{Q} \sum_{k,l=0}^{\mu-1}\left|\begin{array}{cc}
I_{kk}^{(1/2)} & I_{kl}^{(1/2)} \\
I_{lk}^{(1/2)} & I_{ll}^{(1/2)} \\
\end{array} \right|, \end{eqnarray} where
\begin{equation} I_{kl}^{(\beta)} \equiv \int_0^{\infty}e^{-q}q^{\beta}\,L_k(q)L_l(q)\;\mathrm{d}q, \label{E:Int}
\end{equation}
$|\cdot|$ is the determinant and we have used the orthonormality condition in Eq. (\ref{L1}) in the first step of the evaluation. We thus have
\begin{equation}
\label{E:avgneg} \avg{\mathcal{N}}=\frac{1}{2\mu^2}\sum_{k,l=0}^{\mu-1}\left[ I_{kk}^{(1/2)}I_{ll}^{(1/2)}-\left(I_{kl}^{(1/2)}\right)^2\right]\;,
\end{equation} except that the integral needs to be evaluated.
\section{Variance in the negativity} \label{sec:Var}
Having calculated the mean of the negativity for random, Haar distributed pure states, we move on to calculate its variance. Based on the definition of negativity in Eq. (\ref{E:negdef}), we obtain the expression for the variance of the negativity as ($\sigma$ is the standard deviation)
\begin{equation} \sigma^2 = \frac{1}{4}\left[\avg{\left(\sum_{i=1}^\mu\sqrt{p_i}\right)^4} - \avg{\left(\sum_{i=1}^\mu\sqrt{p_i}\right)^2}^2 \right].
\end{equation} The second term has already been evaluated in the previous section, so we need con concern ourselves with the first term. We begin by expanding the fourth power above as
\begin{equation}
\hspace{-2.5cm} \left(\sum_{i=1}^\mu\sqrt{p_i}\right)^4=1+ 2\mathop{\sum_{i,j=1}}_{i\neq j}^{\mu} \sqrt{p_ip_j} + 2\mathop{\sum_{i,j=1}}_{i\neq j}^{\mu}p_ip_j + 4\mathop{\sum_{i,j,k=1}}_{i\neq j\neq k}^{\mu} p_i\sqrt{p_jp_k} + \mathop{\sum_{i,j,k,l=1}}_{i\neq j\neq k \neq l}^{\mu} \sqrt{p_ip_jp_kp_l}.
\end{equation} Each of these terms can now be individually evaluated, and omitting the details we just present the results as
\begin{eqnarray}
\mathop{\sum_{i,j=1}}_{i\neq j}^{\mu}p_ip_j &=& \frac{1}{\mu^2(\mu^2+1)}\sum_{k,l=0}^{\mu-1}\left|\begin{array}{cc}
I_{kk}^{(1)} & I_{kl}^{(1)} \\
I_{lk}^{(1)} & I_{ll}^{(1)} \\
\end{array} \right|, \\
\mathop{\sum_{i,j,k=1}}_{i\neq j\neq k}^{\mu} p_i\sqrt{p_jp_k} &=& \frac{1}{\mu^2(\mu^2+1)}\sum_{k,l,m=0}^{\mu-1} \left| \begin{array}{ccc}
I_{kk}^{(1)} & I_{kl}^{(1/2)} & I_{km}^{(1/2)} \\
I_{lk}^{(1)} & I_{ll}^{(1/2)} & I_{lm}^{(1/2)} \\
I_{mk}^{(1)} & I_{ml}^{(1/2)} & I_{mm}^{(1/2)} \\
\end{array}\right|, \\
\mathop{\sum_{i,j,k,l=1}}_{i\neq j\neq k \neq l}^{\mu}\sqrt{p_ip_jp_kp_l}&=&
\frac{1}{\mu^2(\mu^2+1)}\sum_{k,l,m,n=0}^{\mu-1} \left| \begin{array}{cccc}
I_{kk}^{(1/2)} & I_{kl}^{(1/2)} & I_{km}^{(1/2)} & I_{kn}^{(1/2)} \\
I_{lk}^{(1/2)} & I_{ll}^{(1/2)} & I_{lm}^{(1/2)} & I_{ln}^{(1/2)} \\
I_{mk}^{(1/2)} & I_{ml}^{(1/2)} & I_{mm}^{(1/2)} & I_{mn}^{(1/2)} \\
I_{nk}^{(1/2)} & I_{nl}^{(1/2)} & I_{nm}^{(1/2)} & I_{nn}^{(1/2)} \\
\end{array} \right|\!.
\end{eqnarray}
\section{Evaluating the integrals} \label{sec:evals}
Having derived formal expressions for the mean and standard deviation of the negativity of a random pure state, we now need to evaluate the integral in Eq. (\ref{E:Int}). To that end, we use the generating function for Laguerre polynomials~\cite{gradshteyn}
\begin{equation}
(1-z)^{-1} e^{xz/z-1} = \sum_{l=0}^{\infty}
L_l(x)z^l\;\;\;\;\;\;\;\; |z|\leq1,
\end{equation} and
\begin{equation} \hspace{-2.0cm}\int_0^{\infty}e^{-st}t^{\beta}\,L_n^{\alpha}(t)\;\mathrm{d}t= \frac{\Gamma(\beta+1)\,\Gamma(\alpha+n+1)}{n!\,\Gamma(\alpha+1)}s^{-\beta-1}F\left(-n,\beta+1;\alpha+1,\frac{1}{s}\right),
\end{equation} $F$ being the hypergeometric function such that
\begin{equation} F(a,b;c;z)= \sum_{n=0}^{\infty} \frac{(a)_n\,(b)_n}{(c)_n}\frac{z^n}{n!},
\end{equation} and $(a)_n = a(a+1)(a+2)...(a+n-1)$ is the Pochhammer symbol. Note that if $a$ is a negative integer, $(a)_n = 0$ for $n > |a|$ and the hypergeometric series terminates. Then,
\begin{eqnarray} \sum_{l=0}^{\infty} I_{kl}^{(\beta)} z^l &=&\int_0^{\infty}e^{-x}x^{\beta}\,L_k(x)(1-z)^{-1} e^{xz/z-1}\;\mathrm{d}x \nonumber\\ &=& s\int_0^{\infty}e^{-sx}x^{\beta}\,L_k(x)\;\mathrm{d}x\;\;\;\;\;\;\;\;\;\;\;\;\;\;s=1/(1-z) \nonumber\\ &=&s^{-\beta}\Gamma(\beta+1)F\left(-k,\beta+1;1;\frac{1}{s}\right)\nonumber\\ &=&\Gamma(\beta+1)\sum_{t=0}^{k}\frac{(-k)_t\,(\beta+1)_t}{(1)_t}\frac{1}{t!}(1-z)^{t+\beta}\nonumber\\ &=&\Gamma(\beta+1)\sum_{l=0}^{\infty}\sum_{t=0}^{k}\frac{(-1)^l}{l!}\frac{(-k)_t\,(\beta+1)_t}{(t!)^2}(t+\beta)_{\underline{l}}\,z^l,
\end{eqnarray} whereby
\begin{equation} I_{kl}^{(\beta)}=\Gamma(\beta+1)\frac{(-1)^l}{l!}\sum_{t=0}^{k}\frac{(-k)_t\,(\beta+1)_t}{(t!)^2}(t+\beta)_{\underline{l}}\;,
\end{equation} and $(a)_{\underline{n}}=a(a-1)(a-2)...(a-n+1)$ is the `falling factorial'. Using the following identities for the Pochhammer symbols
\begin{eqnarray} (x)_{\underline{n}}&=&(-1)^n (-x)_n,\\ (-x)_{n}&=& (-1)^n(x-n+1)_n, \\ (x)_n &=& \Gamma(x+n)/\Gamma(x),
\end{eqnarray} we have
\begin{eqnarray}
\label{E:Ikl}
I_{kl}^{(\beta)}&=&\frac{(-1)^l}{l!}\sum_{t=0}^{k} \left(\begin{array}{c}
k \\
t \\ \end{array}
\right)\frac{[\Gamma(t+\beta+1)]^2}{t!\,\Gamma(t-l+\beta+1)}\nonumber\\ &=&\frac{(-1)^l}{l!}\frac{\Gamma(1+\beta)^2}{\Gamma(1+\beta-l)} \; _3F_2\left(\{\beta+1,\beta+1,-k\};\{1,\beta+1-l\};1\right).
\end{eqnarray}
To get the final expression for the negativity in Eq~(\ref{E:avgneg}), we substitute the expression for the integrals from Eq~(\ref{E:Ikl}). The expressions are not very illuminating, and for the lack of an asymptotic expression, we present the numerical values in Table~(\ref{T:t1}), and plot them in Fig.~(\ref{negativitylimit}). See Appendix~\ref{app:digress} for a note on the summability of the series. Anticipating a scaling in proportion to that of a maximally entangled state, we divide the mean expressed in Eq~(\ref{E:avgneg}) by the maximum possible negativity of a $\mu\times\mu$ system as $\mathcal{N}_{max}=(\mu-1)/2.$ As can be seen from Table~(\ref{T:t1}), the average value of the negativity saturates to a constant multiple of the maximum possible. This constant is found numerically, and in the asymptotic limit of large $n$, the negativity for an equal bipartition of a randomly chosen Haar-distributed pure state is
\begin{equation}
\label{E:result} \avg{\mathcal{N}} \sim 0.72037\left(\frac{2^{n/2}-1}{2}\right).
\end{equation} Though we have not proven this analytically, it is easily seen that the convergence is exponential. This can be concluded from the last column in the table, which shows the difference in the successive values of the third column. The value of $\Delta$ is progressively halved as the number of qubits $n$ goes up, and this shows that the negativity indeed saturates monotonically, and arguably, exponentially fast, to the value presented above. This is to be expected from the concentration of measure results~\cite{hlw06}, which means that the negativity of random states in large enough Hilbert spaces is close to their expectation value.
\begin{table} \begin{center}
\begin{tabular}{c|c|r@{.}l|r@{.}l}
\hline
$n$ & $\mu$ & \multicolumn{2}{c|}{$\avg{\mathcal{N}}/\mathcal{N}_{max}$} & \multicolumn{2}{c}{$\Delta$}\\
\hline
\hline
2 & 2 & 0&589049 \\
4 & 4 & 0&65368 & 0&0646309 \\
6 & 8 & 0&686614 & 0&0329346 \\
8 & 16 & 0&703378 & 0&0167641 \\
10 & 32 & 0&711878 & 0&0084994 \\
12 & 64 & 0&716171 & 0&0042932 \\
14 & 128 & 0&718332 & 0&0021611 \\
16 & 256 & 0&719417 & 0&0010851 \\
18 & 512 & 0&719961 & 0&0005439 \\
20 & 1024 & 0&720233 & 0&0002724 \\
22 & 2048 & 0&72037 & 0&0001366 \\
\hline \end{tabular} \end{center} \caption{Ratio of the negativity of random pure states to the maximal negativity for Haar-distributed states of $n$ qubits. For an equipartition of $n$ qubit states, $\mu=2^{n/2}.$ $\Delta$ is the difference between successive values in the third column, providing evidence for an exponential convergence of $\avg{\mathcal{N}}/\mathcal{N}_{max}$ with $n$.} \label{T:t1} \end{table}
\begin{figure}\label{negativitylimit}
\end{figure}
\subsection{Numerical verification}
\begin{figure}
\caption{Distribution of the negativity of $100000$ pseudorandom pure states, with $n=4$ (Left) and $n=8$ (Right). The pseudorandom unitaries used were generated via the techniques of \cite{emerson03a}, with $j=40$ interactions applied for each unitary. Also plotted is the gaussian distribution function with just the first two moments, as given by Eq.~(\ref{E:distfn}), as well as the analytically calculated mean (solid vertical line) and the standard deviations (dashed vertical lines). Although the convergence of the pseudorandom construction of~\cite{emerson03a} to the Haar measure is not obvious, it has been shown to do so~\cite{ell05,dop07}.}
\label{F:numerics}
\end{figure}
As a final corroboration of our results, we test our calculations against numerically generated pure states. These are pseudorandom rather than random Haar-distributed. They are generated by applying pseudorandom unitaries presented in Ref.~\cite{emerson03a} on fiducial pure states. The negativity of these pure states is calculated and plotted as a histogram in Fig.~(\ref{F:numerics}). We compare this to an approximation of the cumulant generating function, and the probability distribution function for the negativity itself $P(\mathcal{N})d\mathcal{N}$, given by
\begin{eqnarray} P(\mathcal{N})d\mathcal{N}&=&\frac{1}{2\pi}\int_{-\infty}^{\infty}d\omega \exp\left(-i\mathcal{N}\omega + \frac{\avg{\mathcal{N}}}{\mathcal{N}_{max}}i\omega + \frac{\sigma}{\mathcal{N}_{max}}\frac{(i\omega)^2}{2!}\right)d\mathcal{N} \nonumber\\ &=& \frac{1}{\sqrt{2\pi\sigma'^2}}e^{-\left(\mathcal{N}-\mathcal{N'}\right)^2/2\sigma'^2}d\mathcal{N}, \label{E:distfn}
\end{eqnarray} where $\mathcal{N'}=\avg{\mathcal{N}}/\mathcal{N}_{max}$ and $\sigma'=\sigma/\mathcal{N}_{max}.$ As is evident from Fig.~(\ref{F:numerics}), the distributions are very localized, and the gaussian distribution seems quite apt.
\section{Concluding Discussions} \label{sec:conclude}
The negativity provides upper bounds on the teleportation capacity of a state, and its distillability, the latter via the logarithmic negativity. It is in these two contexts that our results on the negativity provides new insights, not achieved through other measures. To address the teleportation capacity, the singlet distance was introduced in~\cite{vw02}. It is defined as closest distance any quantum state can get to the singlet (the ideal resource for teleportation) while undergoing only local operations. Mathematically,
\begin{equation}
\Delta(\ket{\Phi},\rho) = \inf_P||\proj{\Phi}-P(\rho)||_1
\end{equation} where $P$ is the set of all local protocols, and $\ket{\Phi}$ is the singlet residing in $\mathbb{C}^m\otimes \mathbb{C}^m$. Note that in our case $m=2^{n/2} = 2\mathcal{N}_{max}+1.$ The following result, also proved in~\cite{vw02}
\begin{equation}
\Delta(\ket{\Phi},\rho) \geq 2\left(1-\frac{2\mathcal{N}(\rho)+1}{m}\right)
\end{equation} then immediately leads to the conclusion that a pure quantum state $\ket{\psi}$, chosen at random from the Haar measure, will with high probability have a singlet distance given (all $\approx$ signs here and henceforth apply to large $n$)
\begin{equation}
\label{E:singletbnd} \Delta(\ket{\Phi},\ket{\psi}) \geq 2\left(1-\frac{2\avg{\mathcal{N}}+1}{2^{n/2}}\right) \approx 2\left(1-\frac{\avg{\mathcal{N}}}{\mathcal{N}_{max}}\right) \approx 0.55926,
\end{equation} where we have used Eq. (\ref{E:result}), which is that $\frac{\avg{\mathcal{N}}}{\mathcal{N}_{max}} = 0.72037 =c < 1.$ This gives us a nontrivial lower bound on how close a typical pure state can be taken to a singlet by purely local operations. This can be recast in terms of an upper bound on the teleportation fidelity~\cite{hhh99,vw02} of random pure states as
\begin{equation}
\label{E:fid}
f_{opt} \equiv \max_{P}\bra{\Phi}P(\proj{\psi})\ket{\Phi} \leq \frac{2\avg{\mathcal{N}}+1}{m} \lesssim \frac{\avg{\mathcal{N}}}{\mathcal{N}_{max}} \approx 0.72037.
\end{equation}
Another application of our result can be found by using the logarithmic negativity~\cite{p05} as an upper bound on the entanglement of distillation $E_D(\rho)$. It was shown~\cite{vw02} that
\begin{equation}
E_D(\rho) \leq E_{\mathcal{N}}(\rho)
\end{equation} where $E_\mathcal{N}(\rho) = \log_2||\rho^{T_A}||_1 = \log_2(2\mathcal{N}(\rho)+1).$ Using this, we get (where c = 0.72037, as after Eq. (\ref{E:singletbnd}))
\begin{equation}
\avg{||\rho^{T_A}||_1} = c\;2^{n/2} + 1-c,
\end{equation} whereby for a pure state $\ket{\psi}$ chosen at random from the Haar measure, we can set the upper bound of distillable entanglement to be
\begin{equation}
\label{E:distent}
E_D(\ket{\psi}) \leq \log_2\left(\avg{||\rho^{T_A}||_1}\right) \approx \frac{n}{2} +\log_2c.
\end{equation} For the constant we present in this work, this provides us with a bound that is tighter by about half an ebit ($\log_2c \approx -0.47319$). Also note that we have taken a logarithm of the average, which is always greater than or equal to the average of the logarithm.
In addition to the obvious conclusions that the fidelity of teleportation and distillability of random pure states have nontrivial upper bounds, the above two mathematical results tell us a few things about the structure of the set of pure quantum states in general. Firstly, although a random pure state is very likely to be highly entangled (close to maximal), it is in no way close to the singlet state, at least in trace norm. This means that a nonzero fraction of these ``close to maximally entangled" states contain inequivalent types of entanglement which are not related by SLOCC operations to the canonical maximally entangled (singlet) state. A second, and probably stronger statement is that not only do random pure states lie in different inequivalent sets of maximally entangled states, but also that some of these classes have a greater ability to retain their entanglement under distillation protocols than others, resulting thereby in an overall lower distillation rate.
This paper shows that the negativity of $n$-qubit random pure states chosen from the Haar measure is a constant multiple of maximum possible negativity, which goes as $2^{n/2}$ for an equal bipartition of the state. We also provide evidence that the convergence to the asymptotic value is monotonic and exponentially fast. The value of the constant was not evaluated in closed form, and we showed why this was the case. The expression for the negativity is a sum of hypergeometric terms, and the techniques of creative telescoping show that our particular series in not summable. Finally, we show that the results of our analytic calculation are borne out by random states generated by applying pseudorandom unitaries on fiducial states. We also show that probability distribution for the negativity is well approximated by a gaussian distribution whose mean and variance we obtain analytically.
One issue that we have not addressed here is the extension of the above calculation to random quantum states that are mixed. This is made somewhat challenging by the fact that there does \emph{not} exist a unique measure on the space of mixed quantum states. Since any pure state can be generated by applying a unitary matrix on a fiducial state, a unique measure on the space of pure states can be derived from that on the space of unitary matrices, which is the rotationally invariant Haar measure. Mixed quantum states cannot be generated in a likewise manner, and therefore, it is not possible to capture the distribution of mixed states via the Haar measure. However, any mixed state can be diagonalized by a unitary matrix, and this motivates a product measures on the space of mixed states $\mathcal{M},$ which can be defined as $\mathcal{M}=\mathcal{E}\times P,$ where $P$ is the usual Haar measure that captures the distribution of eigenvectors of the states. $\mathcal{E}$ is meant to capture the distribution of eigenvalues, and there is no unique way of doing that. Attempts have been made~\cite{zs01}, and the mean entanglement, as quantified, for instance, by the purity has been calculated, as has been the logarithmic negativity for tripartite states using minimal purifications~\cite{dop07}. The calculation of the negativity for states of this form will be the subject of a future publication. This will provide us with information about the typical entanglement(negativity) content of random mixed states, which are more and more likely to be encountered as we move closer to realistic implementations of quantum technology.
\appendix
\section{A mathematical digression} \label{app:digress}
The final expression for the negativity, though seemingly compact, is, in fact a sum of exponentially many terms. This retards the evaluation of the quantities in Table~(\ref{T:t1}) drastically, unless a closed form is found for quantity in Eq. (\ref{E:Ikl}). Consequently, it would not only be interesting, but indeed essential to have a closed form of the above expression. For some special instances of $k,l$ and $\beta$, this is possible. Unfortunately, this is not possible for general values of $k$ and $l$ (this paper deals only with $\beta = 1/2, 1$). In fact, it can be shown that there exists \emph{no} closed form solution for the sum in Eq. (\ref{E:Ikl}). The arguments leading to this `tragic' conclusion are presented next.
\begin{theorem}[Zeilberger's algorithm or the method of creative telescoping~\cite{pwz97}] Let $F(n,k)$ be a proper hypergeometric term. Then F satisfies a nontrivial recurrence of the form $$ \sum_{j=0}^J a_j(n)F(n+j,k)=G(n,k+1)-G(n,k), $$ in which $G(n,k)/F(n,k)$ is a rational function of n and k. \end{theorem} That this theorem applies to the sum we have at hand is evident. The application of this algorithm to the expression in Eq.~(\ref{E:Ikl}) yields third order recurrences which can be solved using the Gosper-Petkov\v{s}ek algorithm~\cite{pwz97,petkovsek}. This algorithm (also called \texttt{Hyper}~\cite{footnote}) provides a complete solution to the problem in the sense that it either provides all the solution to the recurrence problem. On the other hand, the failure of the algorithm to come up with a solutions proves that the initial series \emph{cannot} be summed into a closed form. It is the latter that happens in our case, thereby proving that the series in Eq.~(\ref{E:Ikl}) is not summable in closed form.
\section*{Acknowledgments}
It is a pleasure to thank Colston Chandler, Anil Shaji, Adolfo del Campo and Miguel Navascu\'{e}s for several interesting discussions during the course of this work, and Martin B. Plenio for several comments on the manuscript. AD was supported by EPSRC (Grant No. EP/C546237/1), EPSRC QIP-IRC and the EU Integrated Project (QAP).
\end{document}
|
arXiv
|
{
"id": "1004.1317.tex",
"language_detection_score": 0.7688999772071838,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{\Large Je\'{s}manowicz' conjecture and Fermat numbers} \author{\large Min Tang\thanks{Corresponding author. This work was supported by the National Natural Science Foundation of China, Grant No.10901002 and Anhui Provincial Natural Science Foundation, Grant No.1208085QA02. Email: [email protected]} and Jian-Xin Weng } \date{} \maketitle
\vskip -3cm \begin{center} \vskip -1cm { \small \begin{center} School of Mathematics and Computer Science, Anhui Normal University, \end{center} \begin{center} Wuhu 241003, China \end{center} }
\end{center}
{\bf Abstract.} Let $a,b,c$ be relatively prime positive integers such that $a^{2}+b^{2}=c^{2}.$ In 1956, Je\'{s}manowicz conjectured that for any positive integer $n$, the only solution of $(an)^{x}+(bn)^{y}=(cn)^{z}$ in positive integers is $(x,y,z)=(2,2,2)$. Let $k\geq 1$ be an integer and $F_k=2^{2^k}+1$ be a Fermat number. In this paper, we show that Je\'{s}manowicz' conjecture is true for Pythagorean triples $(a,b,c)=(F_k-2,2^{2^{k-1}+1},F_k)$.
{\bf Keywords:} Je\'{s}manowicz' conjecture; Diophantine equation; Fermat numbers
2010 {\it Mathematics Subject Classification}: 11D61
\section{Introduction} Let $a,b,c$ be relatively prime positive integers such that $a^{2}+b^{2}=c^{2}$ with $2\mid b.$ Clearly, for any positive integer $n$, the Diophantine equation
\begin{equation}\label{eqn1}(na)^{x}+(nb)^{y}=(nc)^{z}\end{equation}
has the solution $(x, y, z)=(2,2,2).$ In 1956, Sierpi\'{n}ski \cite{Sierpinski} showed there is no other solution when $n=1$ and $(a,b,c)=(3,4,5)$, and Je\'{s}manowicz \cite{Jesmanowicz} proved that when $n=1$ and $(a,b,c)=(5,12,13),(7,24,25),(9,40,41),(11,60,61),$ Eq.(\ref{eqn1}) has only the solution $(x,y,z)=(2,2,2).$ Moreover, he conjectured that for any positive integer $n,$ the Eq.(\ref{eqn1}) has no positive integer solution other than $(x,y,z)=(2,2,2).$ Let $k\geq 1$ be an integer and $F_k=2^{2^k}+1$ be a Fermat number.
Recently, the first author of this paper and Yang \cite{Tang} proved that if $1\leq k\leq 4$, then the Diophantine equation \begin{equation}\label{eqn2}((F_k-2)n)^{x}+(2^{2^{k-1}+1}n)^{y}=(F_kn)^{z}\end{equation}
has no positive integer solution other than $(x,y,z)=(2,2,2)$.
For related problems, see (\cite{Deng}, \cite{Miyazaki}, \cite{Miyazaki2}).
In this paper, we obtain the following result. \begin{theorem}\label{thm1} For any positive integer $n$ and Fermat number $F_k$, Eq.(\ref{eqn2}) has only the solution $(x,y,z)=(2,2,2)$. \end{theorem}
Throughout this paper, let $m$ be a positive integer and $a$ be any integer relatively prime to $m$. If $h$ is the least positive integer such that $a^{h}\equiv 1 \pmod m$, then $h$ is called the order of $a$ modulo $m$, denoted by $\textnormal{ord}_{m}(a)$.
\section{Lemmas}
\begin{lemma}\label{lem1}(\cite{Lu}) For any positive integer $m$, the Diophantine equation $(4m^{2}-1)^{x}+(4m)^{y}=(4m^{2}+1)^{z}$ has only the solution $(x,y,z)=(2,2,2).$\end{lemma}
\begin{lemma}\label{lem2}(See \cite[Lemma 2]{Deng}) If $z\geq max\{x,y\},$ then the Diophantine equation $a^{x}+b^{y}=c^{z},$ where $a,b$ and $c$ are any positive integers (not necessarily relative prime) such that $a^{2}+b^{2}=c^{2}$, has no solution other than $(x,y,z)=(2,2,2).$\end{lemma}
\begin{lemma}\label{lem3} (See \cite[Corollary 1]{Le}) If the Diophantine equation $(na)^{x}+(nb)^{y}=(nc)^{z}$(with $a^2+b^2=c^2$) has a solution $(x,y,z)\neq(2,2,2),$ then $x,y,z$ are distinct.\end{lemma}
\begin{lemma}\label{lem4}(See \cite[Lemma 2.3]{Deng2013}) Let $a,b,c$ be any primitive Pythagorean triple such that the Diophantine equation $a^{x}+b^{y}=c^{z}$ has the only positive integer solution $(x,y,z)=(2,2,2)$. Then (\ref{eqn1}) has no positive integer solution satisfying $x>y>z$ or $y>x>z$.
\end{lemma}
\begin{lemma}\label{lem5}Let $k$ be a positive integer and $F_k=2^{2^k}+1$ be a Fermat number. If $(x,y,z)$ is a solution of the Eq.(\ref{eqn2}) with $(x,y,z)\neq (2,2,2)$, then $x<z<y$. \end{lemma}
\begin{proof} By Lemmas \ref{lem2}-\ref{lem4}, it is sufficient to prove that the Eq.(\ref{eqn2}) has no solution $(x,y,z)$ satisfying $y<z<x$. By Lemma \ref{lem1}, we may suppose that $n\geq2$ and the Eq.(\ref{eqn2}) has a solution $(x,y,z)$ with $y<z<x$. Then we have
\begin{equation}\label{eqn9}2^{(2^{k-1}+1)y}=n^{z-y}\Big(F_k^{z}-(F_k-2)^{x}n^{x-z}\Big).\end{equation}
By \eqref{eqn9} we may write $n=2^{r}$ with $r\geq1$.
Noting that $$\gcd\Big(F_k^{z}-(F_k-2)^{x}2^{r(x-z)},2\Big)=1,$$
we have \begin{equation}\label{eqn10}F_k^{z}-(F_k-2)^{x}2^{r(x-z)}=1.\end{equation}
Since $k\geq 1$, by (\ref{eqn10}) we have $F_k^z\equiv 1\pmod 3$, $z\equiv 0\pmod 2.$
Write $z=2z_{1},$ we have
\begin{equation}\label{eqn11}\Big(\prod\limits_{i=0}^{k-1}F_i\Big)^x2^{r(x-z)}=(F_k^{z_{1}}-1)(F_k^{z_{1}}+1).\end{equation}
Let $F_{k-1}=\prod\limits_{i=1}^tp_i^{\alpha_i}$ be the standard prime factorization of $F_{k-1}$ with $p_1<\cdots<p_t$. By the known Fermat primes, we know that there is the possibility of $t=1$. Moreover,
\begin{equation}\label{eqn12}\textnormal{ ord}_{p_i}(2)=2^{k}, \quad i=1,\cdots,t.\end{equation}
Noting that $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ we know that $p_t$ divide only one of $F_k^{z_{1}}-1$ and $F_k^{z_{1}}+1$.
{\bf Case 1.} $p_t\mid F_k^{z_{1}}-1$. Then $F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_t}$. Noting that $\textnormal{ ord}_{p_t}(2)=2^{k}$, we have $z_1\equiv 0\pmod{2^{k}}$. By (\ref{eqn12}) we have $$F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$ Since $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ by (\ref{eqn11}) we have $$F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_i^{\alpha_ix}}, \quad i=1,\cdots, t.$$ Hence $F_{k-1}^x\mid F_k^{z_{1}}-1$.
{\bf Case 2.} $p_t\mid F_k^{z_{1}}+1$. Then $F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_t}$. Noting that $\textnormal{ ord}_{p_t}(2)=2^{k}$, we have $2^{k-1}\mid z_1$, but $2^{k}\nmid z_1$. By (\ref{eqn12}) we have $$2^{2z_1}-1=(2^{z_1}+1)(2^{z_1}-1)\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$ Thus $$F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$ Since $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ by (\ref{eqn11}) we have $$F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_i^{\alpha_ix}}, \quad i=1,\cdots, t.$$ Hence $F_{k-1}^x\mid F_k^{z_{1}}+1$.
However, $$F_{k-1}^x=\Big(2^{2^{k-1}}+1\Big)^x>\Big(2^{2^{k-1}}+1\Big)^{2z_1}>F_k^{z_1}+1,$$ which is impossible.
This completes the proof of Lemma \ref{lem5}. \end{proof}
\section{Proof of Theorem \ref{thm1}}
By Lemma \ref{lem1} and Lemma \ref{lem5}, we may suppose that $n\geq2$ and the Eq.(\ref{eqn2}) has a solution $(x,y,z)$ with $x<z<y$. Then
\begin{equation}\label{eqn13a}\Big(\prod_{i=0}^{k-1}F_i\Big)^{x}=n^{z-x}\Big(F_k^{z}-2^{(2^{k-1}+1)y}n^{y-z}\Big).\end{equation} It is clear from \eqref{eqn13a} that $$\gcd\Big(n,\prod\limits_{i=0}^{k-1}F_i\Big)>1.$$ Let $\prod\limits_{i=0}^{k-1}F_i=\prod\limits_{i=1}^{t}p_i^{\alpha_i}$
be the standard prime factorization of $\prod\limits_{i=0}^{k-1}F_i$ and write $n=\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}},$
where $\beta_{i_\nu}\geq1$, $\{i_1,\cdots,i_s\}\subseteq \{1,\cdots,t\}$. Let $T=\{1,2,\cdots, t\}\setminus \{i_1,\cdots,i_s\}$. If $T=\emptyset$, then let $P(k,n)=1$. If $T\neq\emptyset$, then let $$P(k,n)=\prod\limits_{i\in T}p_i^{\alpha_i}.$$
By (\ref{eqn13a}), we have
\begin{equation}\label{eqn14a}P(k,n)^x=F_k^{z}-2^{(2^{k-1}+1)y}\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}(y-z)}.\end{equation} Since $y\ge 2$, it follows that \begin{equation}\label{eqn4.3}P(k,n)^x\equiv 1\pmod{2^{2^k}}.\end{equation} If $3\mid P(k,n)$, then $P(k,n)\equiv -1\pmod 4$. This implies that $x$ is even. If $3\nmid P(k,n)$, then $P(k,n)\equiv 1\pmod 4$. Let $P(k,n)=1+2^vW$, $2\nmid W$. Then $v\ge 2$. Suppose that $x$ is odd, then $$P(k,n)^x=1+2^vW', \quad 2\nmid W'.$$ Thus $v\ge 2^k$ and $P(k,n)\ge F_k$, a contradiction with $$ P(k,n)<\prod\limits_{i=0}^{k-1}F_i=F_k-2. $$ Therefore, $x$ is even. Write $x=2^uN$ with $2\nmid N$. Then $u\geq 1$.
{\bf Case 1.} $P(k,n)\equiv -1\pmod 4$. Let $P(k,n)=2^dM-1$ with $2\nmid M$. Then $d\geq 2$ and $$P(k,n)^x=1+2^{u+d}V, \quad 2\nmid V.$$ By (\ref{eqn4.3}) we have $u+d\geq 2^k$.
Choose a $\nu\in\{1,\cdots,s\}$, let $p_{i_\nu}=2^rt+1$ with $r\geq 1$, $2\nmid t$. Then $$2^{d+r-1}<(2^dM-1)(2^rt+1)=P(k,n)\cdot p_{i_\nu}\leq \prod\limits_{i=0}^{k-1}F_i=2^{2^k}-1.$$
Thus $d+r\leq 2^k$. Hence $u\geq r$.
By (\ref{eqn14a}) we have \begin{equation}\label{eqn3.4}P(k,n)^x\equiv 2^z\pmod{p_{i_\nu}}.\end{equation} Noting that $p_{i_\nu}-1\mid 2^ut$, we have \begin{equation}\label{eqn3.5}2^{tz}\equiv P(k,n)^{2^utN}\equiv 1\pmod {p_{i_\nu}}.\end{equation} Since $\textnormal{ord}_{p_{i_\nu}}(2)$ is even and $2\nmid t$, we have $z\equiv 0\pmod 2$.
{\bf Case 2.} $P(k,n)\equiv 1\pmod 4$. Let $P(k,n)=2^{d'}M'+1$ with $2\nmid M'$. Then $d'\geq 2$ and $$P(k,n)^x=1+2^{u+d'}V', \quad 2\nmid V'.$$ By (\ref{eqn4.3}) we have $u+d'\geq 2^k$.
Choose a $\mu\in\{1,\cdots,s\}$, let $p_{i_\mu}=2^{r'}t'+1$ with $r'\geq 1$, $2\nmid t'$. Then $$2^{d'+r'}<(2^{d'}M'+1)(2^{r'}{t'}+1)=P(k,n)\cdot p_{i_\mu}\leq \prod\limits_{i=0}^{k-1}F_i=2^{2^k}-1.$$
Thus $d'+r'<2^k$. Hence $u>r'$.
By (\ref{eqn14a}) we have \begin{equation}\label{eqn3.6}P(k,n)^x\equiv 2^z\pmod{p_{i_\mu}}.\end{equation} Noting that $p_{i_\mu}-1\mid 2^ut'$, we have \begin{equation}\label{eqn3.7}2^{t'z}\equiv P(k,n)^{2^ut'N}\equiv 1\pmod {p_{i_\mu}}.\end{equation} Since $\textnormal{ord}_{p_{i_\mu}}(2)$ is even and $2\nmid t'$, we have $z\equiv 0\pmod 2$.
Write $z=2z_{1}, x=2x_{1}$. By (\ref{eqn14a}), we have \begin{equation}\label{eqn15T}2^{(2^{k-1}+1)y}\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}(y-z)}=\Big(F_k^{z_{1}}-P(k,n)^{x_1}\Big)\Big(F_k^{z_{1}}+P(k,n)^{x_1}\Big).\end{equation} Noting that $$\gcd\Big(F_k^{z_{1}}-P(k,n)^{x_1},F_k^{z_{1}}+P(k,n)^{x_1}\Big)=2,$$ we have \begin{equation}\label{eqn16T}2^{(2^{k-1}+1)y-1}\mid F_k^{z_{1}}-P(k,n)^{x_1},\quad 2\mid F_k^{z_{1}}+P(k,n)^{x_1},\end{equation}
or \begin{equation}\label{eqn17T}2\mid F_k^{z_{1}}+P(k,n)^{x_1},\quad 2^{(2^{k-1}+1)y-1}\mid F_k^{z_{1}}-P(k,n)^{x_1}.\end{equation} However, $$2^{(2^{k-1}+1)y-1}>2^{(2^{k-1}+1)2z_1}>(F_k+F_k-2)^{z_1}>F_k^{z_{1}}+P(k,n)^{x_1},$$ a contradiction.
This completes the proof of Theorem \ref{thm1}.
\section{Acknowledgment} We sincerely thank Professor Yong-Gao Chen for his valuable suggestions and useful discussions. We would like to thank the referee for his/her helpful comments.
\end{document}
|
arXiv
|
{
"id": "1304.0514.tex",
"language_detection_score": 0.5290430784225464,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Sphere covering by minimal number of caps and short closed sets \thanks{{\it 1991 A M S Subject Classification.} 52A45 {\it Key words and phrases.} Sphere covering by closed sets.}} \author{A. B. N\'emeth}
\maketitle
\begin{abstract} A subset of the sphere is said short if it is contained in an open hemisphere. A short closed set which is geodesically convex is called a cap. The following theorem holds: 1. The minimal number of short closed sets covering the $n$-sphere is $n+2$. 2. If $n+2$ short closed sets cover the $n$-sphere then (i) their intersection is empty; (ii) the intersection of any proper subfamily of them is non-empty. In the case of caps (i) and (ii) are also sufficient for the family to be a covering of the sphere. \end{abstract}
\section{Introduction and the main result}
Denote by $\mathbb R^{n+1}$ the $n+1$-dimensional Euclidean space endowed with a Cartesian reference system, with the scalar product $\langle\cdot,\cdot\rangle$ and with the topology it generates.
Denote by $S^n$ the $n$-dimensional unit sphere in $\mathbb R^{n+1}.$
A subset of the sphere $S^n$ is said \emph{short} if it is contained in an open hemisphere.
The subset $C\subset S^n$ is called {\it geodesically convex} if together with any two of its points it contains the arc of minimal length of the principal circle on $S^n$ through these points. $S^n$ itself is a geodesically convex set.
A short closed set which is geodesically convex is called a \emph{cap}.
We use the notation $\co A$ for the convex hull of $A$ and the notation $\sco A$ for the geodesical convex hull of $A\subset S^n$ (the union of the geodesical lines with endpoints in $A$). Further $\dist(\cdot,\cdot)$ will denote the geodesical distance of points. Besides the standard notion of simplex we also use the notion of the spherical simplex $\Delta$ placed in the north hemisphere $S^+$ of $S^n$ such that their vertices are on the equator of $S^n$. In this case
$\|\Delta\|=S^+$.
Our main result is:
\begin{theo} \begin{enumerate}
\item The minimal number of short closed sets covering $S^n$ is $n+2$.
\item If a family $F_1,...F_{n+2}$ of short closed sets covers $S^n$, then:
(i) $\cap_{i=1}^{n+2} F_i = \emptyset$;
(ii) $\cap_{i\not=j}F_i\not= \emptyset,\;\forall \; j=1,...,n+2$;
(iii) if $a_j\in \cap_{i\not=j}F_i$, then the vectors $a_1,...,a_{n+2}$ are the vertices of an $n+1$-simplex containing $0$ in its interior.
\end{enumerate}
If the sets $F_i$ are caps, then (i) and (ii) are also sufficient for the family to be a cover of $S^n$. \end{theo}
Let $\Delta$ be an $n+1$-dimensional simplex with vertices in $S^n$ containing the origin in its interior. Then the radial projection from $0$ of the closed $n$-dimensional faces of $\Delta$ into $S^n$ furnishes $n+2$ caps covering $S^n$ and satisfying (i) and (ii).
A first version for caps of the above theorem is the content of the unpublished note \cite{nemeth2006}.
\begin{remark} We mention the formal relation in case of caps with those in the \emph{Nerve Theorem} (\cite{hatcher2002} Corollary 4G3). If we consider \emph{"open caps"} in place of caps, then the conclusion (ii) can be deduced from the mentioned theorem. Moreover, the conclusion holds for a "good" open cover of the sphere too, i. e., an open cover with contractible members and contractible finite intersections. In our theorem the covering with caps has the properties of a "good" covering in this theorem: the members of the covering together with their nonempty intersections are contractible, but their members are closed, circumstance which seems to be rather sophisticated to be surmounted. (Thanks are due to Imre B\'ar\'any, who mentioned me this possible connection.) \end{remark}
We shall use in the proofs the following (spherical) variant of Sperner's lemma (considered for simplices by Ky Fan \cite{FA}): \begin{lem}\label{sperner} If a collection of closed sets $F_1,...,F_{n+1}$ in $S^n$ covers the spherical simplex engendered by the points $a_1,...a_{n+1}\in S^n$ and $\sco \{a_1,...,a_{i-1},a_{i+1},...,a_{n+1}\}\subset F_i,\; i=1,...,n+1$ then $\cap_{i=1}^{n+1} F_i\not= \emptyset.$ \end{lem}
Our first goal is to present the proof for caps. (We mention that using the methods in \cite{KN} and \cite{KN1} the proof can be carried out in a purely geometric way in contrast with the proof in \cite{nemeth2006}, where we refer to the Sperner lemma.)
Using the variant for caps of the theorem and the Sperner lemma we prove then the variant for short closed sets.
Except the usage of Lemma \ref{sperner}, our methods are elementary: they use repeatedly the induction with respect to the dimension.
\section{The proof of the theorem for caps}
1. Consider $n+1$ caps $C^1,...,C^{n+1}$ on $S^n$. $C^i$ being a cap, can be separated strictly by a hyperplane $$H_i= \{x\in \mathbb R^{n+1}:\,\langle a_i,x\rangle +\alpha_i =0\}$$ from the origin. We can suppose without loss of generality, that the normals $a_i$ are linearly independent, since by slightly moving them we can achieve this, without affecting the geometrical picture. If the normals $a_i$ are considered oriented toward $0$, this strict separation means that $\alpha_i >0,\;i=1,...,n.$ The vectors $a_i,\,i=1,...,n+1$ engender a reference system in $\mathbb R^{n+1}$. Let $x$ be a nonzero element of the positive orthant of this reference system. Then, for $t\geq 0$, one has $\langle a_i,tx \rangle \geq 0,\;\forall \,i=1,...,n+1.$
Hence, for each $t\geq 0$, $tx$ will be a solution of the system
$$\langle a_i,y\rangle +\alpha_i>0,\;i=1,...,n+1.$$ and thus $$(*)\qquad tx\in \cap_{i=1}^{n+1} H_i^+,\;\forall \, t\geq 0$$ with $$H_i^+=\{y \in R^{n+1};\,\langle a_i,y\rangle +\alpha_i > 0\}.$$
Now, if $C^1,...,C^{n+1}$ covers $S^n$, then so does the union $\cup_{i=1}^{n+1} H_i^-$ of halfspaces $$H_i^-=\{y \in R^{n+1};\,\langle a_i,y\rangle +\alpha_i \leq 0\}.$$
Since $H_i^+$ is the complementary set of $H_i^-$ and $S^n\subset \cup_{i=1}^{n+1} H_i^-$, the set $\cap_{i=1}^{n+1} H_i^+$ must be inside $S^n$ and hence bounded. But (*) shows that $tx$ with $x\not= 0$ is in this set for any $t\geq 0$. The obtained contradiction shows that the family $C^1,...,C^{n+1}$ cannot cover $S^n$.
\begin{remark}
The proof of this item is also consequence of the Lusternik-Schnirelmann theorem \cite{LS} which asserts that if $S^n$ is covered by the closed sets $F_1,...,F_k$ with $F_i\cap (-F_i)=\emptyset,\,i=1,...,k,$ then $k\geq n+2.$
\end{remark}
2. Let $C^1,...,C^{n+2}$ be caps covering $S^n$.
(i) Then they cannot have a common point $x$, since this case $-x$ cannot be covered by any $C^i$. (No cap can contain diametrically opposite points of $S^n$.)
Hence, condition (i) must hold.
(ii) To prove that $\cap_{j\not= i} C^j \not= \emptyset, \;\forall \,i=1,...,n+2$ we proceed by induction.
For $S^1$, the circle, $C^i$ is an arc (containing its endpoints) of length $< \pi$, $i=1,2,3$. The arcs $C^1, C^2, C^3$ cover $S^1$. Hence, they cannot have common points, and the endpoint of each arc must be contained in exactly one of the other two arcs. Hence, $C^i$ meets $C^j$ for every $j\not= i.$ If $c_i\in C^j\cap C^k,\; j\not= i\not= k\not=j$, then $c_1, \,c_2,\,c_3$ are tree pairwise different points on the circle, hence they are in general position and $0$ is an interior point of the triangle they span.
Suppose the assertions (ii) and (iii) hold for $n-1$ and let us prove them for $n$.
Take $C^{n+2}$ and let $H$ be a hyperplane through $0$ which does not meet $C^{n+2}.$ Then, $H$ determines the closed hemispheres $S^-$ and $S^+$. Suppose that $C^{n+2}$ is placed inside $S^-$ (in the interior of $S^-$ with respect the topology of $S^n$). Hence, $C^1,...,C^{n+1}$ must cover $S^+$ and denoting by $S^{n-1}$ the $n-1$-dimensional sphere $S^n\cap H$, these sets cover $S^{n-1}.$ Now, $D^i= C^i\cap S^{n-1},\;i=1,...,n+1$ are caps in $S^{n-1}$ which cover this sphere. Thus, the induction hypothesis works for these sets.
Take the points $d_i\in \cap_{j\not= i}D^j$. Then, $d_1,...,d_{n+1}$ will be in general position and $0$ is an interior point of the simplex they span. By their definition, it follows that $d_k\in D^j, \; \forall k\not= j$ and hence $d_1,...,d_{j-1},d_{j+1}, ...,d_{n+1}\in D^j,\; j=1,2,...n+1. $
Consider the closed hemisphere $S^+$ to be endowed with a spherical simplex structure $\Delta$ whose vertices are the points $d_1,...,d_{n+1}$ .
Since $C^1,...,C^{n+1}$ cover $S^+$, and $d_1,...,d_{j-1},d_{j+1}, ...,d_{n+1}\in D^j\subset C^j\cap S^+,\; j=1,2,...n+1 $, Lemma \ref{sperner}
can be applied to the spherical simplex $\Delta$, yielding $$\cap_{j=1}^{n+1} C^j \supset\cap_{j=1}^{n+1} (S^+\cap C^j) \not= \emptyset.$$
This shows that each collection of $n+1$ sets $C^j$ have nonempty intersection and proves (ii) for $n$.
(If we prefer a purely geometric proof of this item, we can refer to the spherical analogue of the results in \cite{KN1}.)
From the geometric picture is obvious that two caps meet if and only if their convex hulls meet. Hence, from the conditions (i) and (ii) for the caps $C^i$, it follows that these conditions hold also for $A^i=\co C^i,\;i=1,...,n+2.$
Take $$a_i\in \cap_{j\not= i}A^j,\;i=1,...,n+2.$$
Let us show that for an arbitrary $k\in N$, $$a_k\not\in
\aff \{a_1,...,a_{k-1},a_{k+1},...,a_{n+2}\}.$$
Assume the contrary. Denote
$$H=\aff \{a_1,...,a_{k-1},a_{k+1},...,a_{n+2}\}.$$ Thus, $\dim H\leq n.$ The points $a_i$ are all in the manifold $H$. Denote $$B^i=H\cap A^i.$$ Since $a_i\in \cap_{j\not= i}A^j$ and $a_i\in H$, it follows that $$a_i\in \cap_{j\not= i}A^j\cap H=\cap_{j\not= i} B^j,\;\forall\,i.$$ This means that the family of convex compact sets $\{B^j:\,j=1,...,n+2\}$ in $H$ possesses the property that every $n+1$ of its elements have nonempty intersection. Then, by Helly's theorem, they have a common point. But this would be a point of $\cap_{i=1}^{n+2} A^i $ too, which contradicts (ii) for the sets $A^i$.
Hence, every $n+2$ points $c_i\in \cap_{j\not= i} C^j \subset \cap_{j\not= i} A^j,\; i=1,...,n+2$ are in general position. Since $c_1,...,c_{i-1},c_{i+1},...,c_{n+2} \in C^i$, it follows that the open halfspace determined by the hyperplane they engender containing $0$ contains also the point $c_i$.
This proves (iii).
Suppose that the caps $C^1,...,C^{n+2}$ posses the properties in (i) and (ii). Then, the method in the above proof yields that the points
$$c_i\in \cap_{j\not= i} C^j,\;i=1,...,n+2$$ engender an $n+1$-simplex with $0$ in its interior and
$$c_1,...,c_{i-1},c_{i+1},...,c_{n+2} \in C^i,\; i=1,...,n+2.$$ The radial projections of the $n$-faces of this simplex into $S^n$ obviously cover $S^n$. The union of these projections are contained in $\cup_{i=1}^{n+2} C^i$.
This completes the proof for cups.
\section{The proof of the theorem for short closed sets}
We carry out the proof by induction.
Consider $n=1$ and suppose $F_1,F_2,F_3$ are short closed sets covering $S^1$.
If $a\in \cap_{i=1}^3 F_i$, then by the above hypothesis $-a\not \in \cup_{i=1}^3 F_i$, which is impossible. Hence, $$\cap_{i=1}^3 F_i= \emptyset$$ must hold.
Denote $C_3= \sco F_3$, then $C=\clo S^1\setminus C_3$ is a connected arc of $S^1$ covered by $F_1,F_2$. One must have $C\cap F_i\not=\emptyset , i=1,2$, since if for instance $C\cap F_2= \emptyset,$ then it would follow that the closed sets $F_1$ and $C_3$, both of geodesical diameter $< \pi$ cover $S^1$, which is impossible. Since $C$ is connected and $C\cap F_i,\; i=1,2$ are closed sets in $C$ covering this set, $F_1\cap F_2\supset (C\cap F_1)\cap (C\cap F_2) \not= \emptyset$ must hold.
The geodesically convex sets $C_i=\sco F_i,\;i=1,2,3$ cover $S^1$, hence applying the theorem for caps to $ a_j \in \cap_{i\not= j} F_i \subset \cap_{i\not= j} C_i,\; j=1,2,3$, we conclude that these points are in general position and the simplex engendered by them must contain $0$ as an interior point.
Suppose that the assertions hold for $n-1$ and prove them for $n$.
Suppose that $$S^n \subset \cup_{i=1}^{n+2} F_i,\; F_i\;\textrm{short, closed},\; i=1,...,n+2.$$
The assertion (i) is a consequence of the theorem for caps applied to $C_i =\sco F_i,\; i=1,...,n+2$ (or a consequence of the Lusternik Schnirelmann theorem).
Suppose that $F_{n+2}$ is contained in the interior (with respect to the topology of $S^n$) of the south hemisphere $S^-$ and denote by $S^{n-1}$ the equator of $S^n$.
Now $S^{n-1} \subset \cup_{i=1}^{n+1} F'_i$ with $F'_i=(S^{n-1}\cap F_i ),\; i=1,...,n+1,$ and we can apply the induction hypothesis for $S^{n-1}$ and the closed sets $F'_i,\; i=1,...,n+1.$ Since $C'_i=\sco F'_i,\; i=1,...,n+1$ cover $S^{n-1}$, and they are caps, the theorem for caps applies and hence the points \begin{equation*} a_j \in \cap_{i=1,i\not=j}^{n+1} C'_i,\;j=1,...,n+1 \end{equation*} are in general position.
The closed sets \begin{equation*} A_i = C'_i\cup(F_i\cap S^+)= C'_i\cup (F_i\cap \inter S^+),\;i=1,...,n+1 \end{equation*}
cover $S^+$, the north hemisphere considered as a spherical simplex $\Delta$ engendered by $a_1,...,a_{n+1}$ $(\|\Delta\|=S^+$). (Here $\inter S^+$ is the interior of $S^+$ in the space $S^n$.) Further, $$\sco \{a_1,...,a_{k-1},a_{k+1},...,a_{n+1}\} \subset A_k,\; k=1,...,n+1.$$ Hence, we can apply Lemma \ref{sperner} to conclude that there exists a point $a$ in $\cap_{i=1}^{n+1} A_i \not= \emptyset.$
Since $$C'_i\cap (F_j\cap \inter S^+)=\emptyset,\;\forall \;i,\;j,$$ it follows that $$a \in \cap_{i=1}^{n+1} A_i= \cap_{i=1}^{n+1} C'_i \cup \cap_{i+1}^{n+1} F_i\cap \inter S^+=\cap_{i+1}^{n+1} F_i\cap \inter S^+,$$ because $\cap_{i=1}^{n+1} C'_i= \emptyset$ by the induction hypothesis and the theorem for caps. Thus, $$a\in \cap_{i=1}^{n+1} F_i,$$ and we have condition (ii) fulfilled for $n$.
The condition (iii) follows from the theorem for caps applied to $$C^i= \sco F^i,\;i=1,...,n+2.$$
\begin{remark} If $S^1$ is covered by the closed sets $F_1, F_2, F_3$ with the property $F_i\cap (-F_i)=\emptyset,\; i=1,2,3 $, then $F_i\cap F_j \not= \emptyset$ $\forall \;i, j.$
Indeed, assume that $F_1\cap F_2= \emptyset$. Then $\dist (F_1,F_2)=\varepsilon >0.$ If $a_i\in F_i$ are the points in $F_i, \; i=1,2$ with $\dist (a_1,a_2)= \varepsilon,$ then the closed arc $C\subset S^1$ with the endpoints $a_1,\; a_2$ must be contained in $F_3$, and hence $-C\cap F_3= \emptyset$, and then $-C$ must be covered by $F_1\cup F_2$. Since $-a_1 \in -C$ cannot be in $F_1$, it must be in $F_2$, and $-a_2\in F_1$. Thus, $F_1\cap -C\not= \emptyset$ and $F_2\cap -C \not= \emptyset,$ while the last two sets cover $-C$. Since $-C$ is connected and the respective sets are closed, they must have a common point, contradicting the hypothesis $F_1\cap F_2= \emptyset$.
This way, we obtain (ii) fulfilled for $n=1$ for this more general case. We claim that the conditions also hold for $n$, that is, if the closed sets $F_1,...,F_{n+2}$ with $F_i\cap (-F_i)=\emptyset,\; i=1,...,n+2 $ cover $S^n$, then condition (ii) holds. (Condition (i) is a consequence of the definition of the sets $F_i$.) \end{remark}
\end{document}
|
arXiv
|
{
"id": "1512.06361.tex",
"language_detection_score": 0.8277437686920166,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{abstract}
We show that any two disjoint crooked planes in $\mathbb{R}^3$ are leaves of a crooked foliation. This answers a question asked by Charette and Kim \cite{foliations}. \end{abstract}
\title{Foliations between crooked planes in $3$-dimensional Minkowski space}
\section{Introduction} In 1983, answering a question of Milnor\cite{Milnor}, Margulis constructed the first examples of nonabelian free groups which act freely and properly discontinuously on $\mathbb{R}^3$ by affine transformations~\cite{Margulis}. In order to better understand these examples, Todd Drumm defined piecewise linear surfaces called \emph{crooked planes} which can bound fundamental domains for such actions~\cite{drummthesis}.
Crooked planes have proven to be very useful in the study of affine actions. Charette-Drumm-Goldman have used them in order to obtain a complete classification for free groups of rank two \cite{CDG1,CDG2,CDG3}. In particular, they show that every free and properly discontinuous affine action of a rank two free group on $\mathbb{R}^3$ admits a fundamental domain bounded by finitely many crooked planes (the \emph{crooked plane conjecture}). A consequence of this is the \emph{tameness conjecture}, that the quotient of $\mathbb{R}^3$ by one of these actions is homeomorphic to the interior of a compact manifold with boundary.
Building on this work, Danciger-Gu\'eritaud-Kassel showed in ~\cite{DGKArcComplex} that crooked planes have a natural interpretation in terms of the deformation theory of hyperbolic surfaces, and used this fact in order to prove the crooked plane conjecture in arbitrary rank, assuming that the linear part is convex cocompact in $\mathsf{O}(2,1)$.
One of the key aspects of the theory of crooked planes is their intersection properties. In particular, knowing when two crooked planes are disjoint is crucial. The \emph{Drumm-Goldman inequality} provides a necessary and sufficient criterion for two crooked planes to be disjoint ~\cite{DRUMM1999323}. This criterion was later expanded upon in ~\cite{halfspaces} and reinterpreted in terms of hyperbolic geometry in ~\cite{DGKArcComplex}.
As an application of the disjointness criterion, the first example of a \emph{crooked foliation}, a smooth $1$-parameter family of pairwise disjoint crooked planes, was given in ~\cite{halfspaces}. Charette-Kim \cite{foliations} investigated these foliations further and gave necessary and sufficient criteria for a one-parameter family of crooked planes to foliate a subset of $\mathbb{R}^3$. They ask the following question : given a pair of disjoint crooked planes in $\mathbb{R}^3$, can the region between them be foliated by crooked planes? We answer this question in the affirmative.
\begin{thm}\label{thm:mainthm}
Let $C,C'$ be a pair of disjoint crooked planes in $\mathbb{R}^3$. Then, there is a \emph{crooked foliation}, that is, a smooth family of pairwise disjoint crooked planes $C_t$, $0\leq t\leq 1$ with $C_0=C$ and $C_1=C'$. \end{thm}
After recalling some definitions from the theory of crooked planes in Minkowski $3$-space in Section 2, we will prove the main theorem in Section 3.\\
We are thankful to the referee for insightful comments and for suggesting an elegant way to shorten the proof of the main theorem.
\section{Definitions} \begin{defn} Lorentzian $3$-space $\mathbb{R}^{2,1}$ is the real three dimensional vector space $\mathbb{R}^3$ endowed with the following symmetric bilinear form of signature $(2,1)$: \[ \cdot : \mathbb{R}^3 \times \mathbb{R}^3 \rightarrow \mathbb{R}\] \[ (\mathbf{u},\mathbf{v}) \mapsto u_1 v_1 + u_2 v_2 - u_3 v_3.\] We fix the orientation given by the standard basis $e_1,e_2,e_3$ and we define the \emph{Lorentzian cross product} \[\mathbf{u}\times \mathbf{v} = (u_2 v_3 - u_3 v_2, u_3 v_1 - u_1 v_3, u_2 v_1 - u_1 v_2)\in\mathbb{R}^{2,1},\] for $\mathbf{u},\mathbf{v}\in\mathbb{R}^{2,1}$. \end{defn}
A \emph{null frame} of $\mathbb{R}^{2,1}$ is a positively oriented basis $\mathbf{u},\mathbf{u}',\mathbf{u}''$ such that $\ldot{\mathbf{u}}{\mathbf{u}}=1$, $\ldot{\mathbf{u}'}{\mathbf{u}''}=-1$ and all other products between the three vectors vanish.
\begin{nota}
Any unit spacelike vector $\mathbf{u}$ can be extended to a null frame. This frame is unique up to scaling $\mathbf{u}'$ and $\mathbf{u}''$ by inverse scalars. As normalization we will choose $\mathbf{u}'$ and $\mathbf{u}''$ so that their third coordinates are positive and equal. Given $\mathbf{u}$, we will denote these two null vectors by $\mathbf{u}^-$ and $\mathbf{u}^+$, respectively. \end{nota}
We will denote by $\mathrm{Min}$ the pseudo-Euclidean affine space which is modeled on the vector space $\mathbb{R}^{2,1}$. In other words, $\mathrm{Min}$ is a topological space on which $\mathbb{R}^{2,1}$ acts simply transitively by homeomorphisms. For $\mathbf{v}\in \mathbb{R}^{2,1}$ and $p\in\mathrm{Min}$, we denote this action by $\mathbf{v}(p) = p + \mathbf{v}$. If $q=p+\mathbf{v}$, we will also write $q-p=\mathbf{v}$. A choice of origin $o\in \mathrm{Min}$ identifies $\mathrm{Min}$ with $\mathbb{R}^{2,1}$ via the map $\mathbf{v}\mapsto o + \mathbf{v}$.
We now recall the definition of a crooked plane. First, we define a \emph{stem}, which will be one of the three linear pieces of a crooked plane. \begin{defn} Let $\mathbf{u}\in \mathbb{R}^{2,1}$ be a unit spacelike vector. The \emph{stem} $S(\mathbf{u})$ is the set of causal vectors orthogonal to $\mathbf{u}$ :
\[S(\mathbf{u}) = \{ \mathbf{v} \in \mathbb{R}^{2,1} ~|~ \ldot{\mathbf{u}}{\mathbf{v}}=0 \text{ and } \ldot{\mathbf{v}}{\mathbf{v}} \leq 0 \}.\] A stem is the union of two opposite closed quadrants (see Figure \ref{fig:consistorient}). \end{defn}
\begin{defn} Let $\mathbf{u}\in \mathbb{R}^{2,1}$ be a unit spacelike vector. The \emph{linear crooked plane} $C(\mathbf{u})$ is the piecewise linear surface defined by:
\[ C(\mathbf{u}) := \{ \mathbf{v} \in \mathbb{R}^{2,1} ~|~ \lcross{v}{w} = k \mathbf{w} \textrm{ for some } \mathbf{w}\in S(\mathbf{u}) \text{ and } k\in \mathbb{R}_{\geq 0}\}.\] \end{defn} From this definition, we see that $S(\mathbf{u})\subset C(\mathbf{u})$ since $\mathbf{v}\times\mathbf{v}=0$ for all $\mathbf{v}\in \mathbb{R}^{2,1}$. The complement of the stem $C(\mathbf{u})-S(\mathbf{u})$ has two connected components which are called the \emph{wings} of the crooked plane. Each wing is a half-plane on which the Lorentzian bilinear form is degenerate, attached to the stem along its boundary (See Fig. \ref{fig:consistorient}). Note that $C(\mathbf{u})=C(-\mathbf{u})$.
\begin{defn} Let $p\in \mathrm{Min}$ and $\mathbf{u}\in \mathbb{R}^{2,1}$ unit spacelike. The \emph{crooked plane} $C(p,\mathbf{u})$ is the set $p + C(\mathbf{u}) \subset \mathrm{Min}$. The vector $\mathbf{u}$ is called a \emph{directing vector} of the crooked plane, and $p$ its \emph{vertex}. \end{defn}
In order to formally state the disjointness criteria from \cite{DRUMM1999323,foliations}, we need a normalization for pairs of unit spacelike vectors. \begin{defn}
Two unit spacelike vectors $\mathbf{u}_1,\mathbf{u}_2 \in \mathbb{R}^{2,1}$ are \emph{consistently oriented} if
\begin{itemize}
\item $\ldot{\mathbf{u}_1}{\mathbf{u}_2}\leq -1$, and
\item $\ldot{\mathbf{u}_i}{\mathbf{u}_j}^\pm\leq 0$ for $1\leq i,j\leq 2$.
\end{itemize} \end{defn}
Two consistently oriented unit spacelike vectors $\mathbf{u},\mathbf{u}'$ are called \emph{ultraparallel} if $\ldot{\mathbf{u}}{\mathbf{u}'}<-1$. They are called \emph{asymptotic} if $\ldot{\mathbf{u}}{\mathbf{u}'}=-1$ and $\mathbf{u}'\neq -\mathbf{u}$. Intersecting $\mathbf{u}^\perp$ and $\mathbf{u'}^\perp$ with the hyperboloid model of the hyperbolic plane defines a pair of hyperbolic geodesics, and the terminology comes from the relative position of these geodesics. Choosing one of the unit vectors $\pm\mathbf{u}$ endows the geodesic in the hyperboloid model of $\mathbb{H}^2$ defined by $\mathbf{u}^\perp$ with a transverse orientation. Two unit spacelike vectors are consistently oriented when the corresponding transversely oriented geodesics are disjoint with transverse orientations pointing away from each other (see Figure \ref{fig:consistorient}).
Whenever there exists a choice of directing vectors $\mathbf{u},\mathbf{u}'$ which are consistently oriented, we will also call a pair of crooked planes $C(p,\mathbf{u}),C(p',\mathbf{u}')$ ultraparallel or asymptotic accordingly.
\begin{figure}
\caption{Consistent orientations, stems and crooked planes.}
\label{fig:consistorient}
\end{figure}
We will use two disjointness criteria for crooked planes, one for pairs of crooked planes and one for foliations. Both depend on the following notion: \begin{defn}
The \emph{stem quadrant} associated to a unit spacelike vector $\mathbf{u}$ is the set
\[\mathsf{V}(\mathbf{u}) := \{a \mathbf{u}^- - b\mathbf{u}^+ : a,b\geq 0\}\backslash\{0\}.\]
Note that $\mathsf{V}(-\mathbf{u})=-\mathsf{V}(\mathbf{u})$. \end{defn}
The following disjointness criterion is a restatement of the \emph{Drumm-Goldman inequality} ~\cite{DRUMM1999323}. \begin{thm}[Burelle-Charette-Goldman \cite{halfspaces}]\label{DrummgoldmanDisjoint}
Let $C=C(p,\mathbf{u})$, $C'=C(p',\mathbf{u}')$ be crooked planes and assume that $\mathbf{u},\mathbf{u}'$ are consistently oriented. Then, $C$ and $C'$ are disjoint if and only if
\[p'-p \in \mathsf{A(\mathbf{u},\mathbf{u}')}:=\mathrm{int}(\mathsf{V}(\mathbf{u}')-\mathsf{V}(\mathbf{u})).\] \end{thm}
\begin{rmk}
It is also shown in \cite{DRUMM1999323} that if there is no choice of sign for $\mathbf{u},\mathbf{u}'$ making them consistently oriented, then $C(p,\mathbf{u})$ and $C(p',\mathbf{u}')$ necessarily intersect. Therefore, the above theorem is a characterization of disjoint crooked planes. \end{rmk}
We will use the following straightforward consequence of the \emph{Charette-Kim criterion} for crooked foliations (foliations of $\mathbb{R}^{2,1}$ by crooked planes) : \begin{thm}[Charette-Kim \cite{foliations}]\label{charettekimUltrap}
Let $(\mathbf{u}_t)_{t\in \mathbb{R}}$ be a path of pairwise ultraparallel or asymptotic unit spacelike vectors such that $-\mathbf{u}_t,\mathbf{u}_s$ are consistently oriented for all $t<s$. Suppose $(p_t)_{t\in \mathbb{R}}$, is a regular curve such that for every $t\in\mathbb{R}$,
\[\dot{p_t} \in \mathrm{int}(\mathsf{V}(\mathbf{u}_t)).\]
Then, $C(p_t,\mathbf{u}_t)$ is a crooked foliation. \end{thm}
\section{Foliations between crooked planes}
We now prove Theorem \ref{thm:mainthm} : there exists a crooked foliation containing any pair of disjoint crooked planes. The theorem is a consequence of the following stronger result :
\begin{prop}
Let $(\mathbf{u}_t)_{t\in[0,1]}$ be a smooth path of unit spacelike vectors which are pairwise ultraparallel or asymptotic. Let $p_0,p_1\in \mathrm{Min}$ such that $C(p_0,\mathbf{u}_0)$ and $C(p_1,\mathbf{u}_1)$ are disjoint crooked planes. Then, there exists a path $(p_t)_{t\in [0,1]}$ starting at $p_0$ and ending at $p_1$ such that $C(p_t,\mathbf{u}_t)$ is a smooth crooked foliation.
\begin{proof}
Since we assume that $\mathbf{u}_s$ are pairwise ultraparallel or asymptotic, we have that $\ldot{\mathbf{u}_t}{\mathbf{u}_s}\geq 1$ for all $t\leq s$. Changing the path $\mathbf{u}_s$ to $-\mathbf{u}_s$ if needed (both paths define the same linear crooked planes) we may also assume that $-\mathbf{u}_t,\mathbf{u}_s$ are consistently oriented for all $t<s$.
For any pair of smooth functions $f,g : [0,1]\rightarrow \mathbb{R}^{>0}$, define \[\mathbf{v}_{f,g}(s) := f(s)\mathbf{u}_s^- - g(s)\mathbf{u}_s^+.\] Then, the path of vertices $p_{f,g}(t) := p_0 + \int_0^t \mathbf{v}_{f,g}(s)\,\mathrm{d} s$ satisfies the hypotheses of Theorem \ref{charettekimUltrap} since its derivative \[\dot{p}_{f,g}(t) = \mathbf{v}_{f,g}(t)\] lies in the interior of $\mathsf{V}(\mathbf{u}_t)$ by definition.
Let $\mathsf{D}$ denote collection of displacement vectors $p_{f,g}(1)-p_0$ :
\[\mathsf{D} = \left\{\left. \int_0^1 \mathbf{v}_{f,g}(s)\,\mathrm{d} s ~\right|~ f,g :[0,1]\rightarrow \mathbb{R}^{>0}\right\}.\] Then $\mathsf{D}$ is a convex cone since $k\mathbf{v}_{f,g}=\mathbf{v}_{kf,kg}$ for $k\in \mathbb{R}^{>0}$ and $\mathbf{v}_{f_1,g_1} + \mathbf{v}_{f_2,g_2} = \mathbf{v}_{f_1+f_2,g_1+g_2}$. Moreover, since by Theorem \ref{charettekimUltrap} the crooked planes $C(p_{f,g}(t),\mathbf{u}_t)$ define crooked foliations, the initial and final crooked planes are disjoint and so $\mathsf{D}\subset\mathsf{A}(-\mathbf{u}_0,\mathbf{u}_1)$. Since the cone $\mathsf{A}(-\mathbf{u}_0,\mathbf{u}_1)$ is the interior of the convex hull of the four rays generated by $\mathbf{u}_0^-,-\mathbf{u}_0^+,\mathbf{u}_1^-,-\mathbf{u}_1^+$, to show equality of the cones it suffices to show that these rays can be approximated by vectors in $\mathsf{D}$.
Consider the sequences $f_n(s) = ne^{-n s}$ and $g_n(s)=e^{-n}$. Integrating by parts we get \[\int_0^1 f_n(s)\mathbf{u}_s^-\,\mathrm{d} s = \mathbf{u}_0^- - e^{-n}\mathbf{u}_1^- + \int_0^1 e^{-n s}\dot{\mathbf{u}}_s^- \,\mathrm{d} s.\] Therefore, as $\mathbf{u}_s$ is smooth and so $\mathbf{u}^+_s$ and $ \dot{\mathbf{u}}^-_s$ are bounded on $[0,1]$, \[\lim_{n\rightarrow \infty} \int_0^1\mathbf{v}_{f_n,g_n}(s)\,\mathrm{d} s = \mathbf{u}^-_0.\] We conclude that $\mathsf{D}$ contains vectors arbitrarily close to the ray $\mathbb{R}^{>0}\mathbf{u}_0^-$.
Similarly, if $f$ is concentrated near $s=1$ and $g$ is small we can approximate the ray $\mathbf{u}_1^-$, and exchanging the roles of $f$ and $g$ we approximate the other two rays on the boundary of the convex cone $\mathsf{A}(\mathbf{u}_0,\mathbf{u}_1)$. \end{proof} \end{prop}
The previous proposition has the following interpretation : given any geodesic foliation $\mathcal{F}$ of the region between two geodesics $\ell_0,\ell_1$ of $\mathbb{H}^2$ and basepoints $p_0,p_1\in \mathrm{Min}$ such that the crooked planes with vertices $p_i$ and stems corresponding to $\ell_i$ are disjoint, $\mathcal{F}$ can be lifted to a foliation by crooked planes of the region between the crooked planes.
{}
\end{document}
|
arXiv
|
{
"id": "1903.01587.tex",
"language_detection_score": 0.7485386729240417,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Quantum thermal machines with single nonequilibrium environments}
\author{Bruno Leggio} \affiliation{Laboratoire Charles Coulomb, UMR 5221 Universit\'{e} de Montpellier and CNRS, F- 34095 Montpellier, France}
\author{Bruno Bellomo} \affiliation{Laboratoire Charles Coulomb, UMR 5221 Universit\'{e} de Montpellier and CNRS, F- 34095 Montpellier, France}
\author{Mauro Antezza} \affiliation{Laboratoire Charles Coulomb, UMR 5221 Universit\'{e} de Montpellier and CNRS, F- 34095 Montpellier, France} \affiliation{Institut Universitaire de France, 103 Boulevard Saint-Michel, F-75005 Paris, France}
\newcommand{\ket}[1]{\displaystyle{|#1\rangle}}
\newcommand{\bra}[1]{\displaystyle{\langle #1|}}
\date{\today}
\begin{abstract} We propose a scheme for a quantum thermal machine made by atoms interacting with a single non-equilibrium electromagnetic field. The field is produced by a simple configuration of macroscopic objects held at thermal equilibrium at different temperatures. We show that these machines can deliver all thermodynamic tasks (cooling, heating and population inversion), and this by establishing quantum coherence with the body on which they act. Remarkably, this system allows to reach efficiencies at maximum power very close to the Carnot limit, much more than in existing models. Our findings offer a new paradigm for efficient quantum energy flux management, and can be relevant for both experimental and technological purposes. \end{abstract}
\pacs{}
\maketitle \section{Introduction} Recent years have seen an uprising interest in thermodynamics at atomic scale \cite{GemmerBook, Blicke2012, Brunner2012, Horodecki2013} due to the latest-generation manipulation of few, if not single, elementary quantum systems \cite{Blicke2012, Haroche2013}. In particular, out-of-equilibrium thermodynamics of quantum systems represents one of the most active research areas in the field \cite{Esposito2009, Deffner2011, Leggio2013a, Leggio2013b,Abah2014}. In this context, triggered by vast technological outcomes \cite{Scully2010, Haenggi2009}, the concept of quantum absorption thermal machine \cite{Scovil1959} has been reintroduced \cite{Linden2010, Levy2012, Correa2014, Venturelli2013, Correa2013, Brunner2014}. These machines are particularly convenient since they function without external work, extracting heat from thermal reservoirs through single atomic transitions to provide thermodynamic tasks (e.g., refrigeration).
Nonetheless, fundamental issues remain unsolved. A first one is the connection a single atomic transition to given thermal reservoirs, posing serious obstacles to practical realizations of such machines. A second, more theoretical issue concerns the role of quantumness. Indeed, in typical models quantum features are not required \cite{Scovil1959,Correa2013}, and only recently the advantages of quantum properties in thermal reservoirs have been pointed out \cite{Correa2014}. The role of quantum features in the machines itself is debated \cite{Correa2013, Brunner2014}, so that the advantages of quantum machines over standard ones remains partially unclear. \begin{figure}
\caption{(a) A slab of thickness $\delta$ at temperature $T_S$ is placed in the blackbody radiation of some walls at temperature $T_W$. Two atoms are placed in the resulting OTE electromagnetic field, at a distance $z$ from the slab and at a distance $r$ from each other. (b) Stationary heat fluxes between the OTE environment $E$ and each atomic transition. The OTE field also mediates for an effective atomic interaction producing an energy exchange $Q_r$ between resonant atomic transitions. Each flux contribution corresponds to a term in the atomic master equation \eqref{METQ}.}
\label{model}
\label{scheme}
\label{system}
\end{figure}
In this paper we address both of these open problems by introducing a new quantum thermal machine setting, based on an out-of-thermal-equilibrium (OTE) electromagnetic bath naturally (i) coupling to each single atomic transitions, and (ii) creating quantum features in the machine. The field is produced by macroscopic objects, and acts on each atomic transition as a different thermal bath at an effective temperature, hence providing all the elements needed for quantum absorption tasks.
This paper is structured as follows: the physical system is introduced in Section \ref{system}, along with the master equation governing its dynamics, while Section \ref{thermodynamics} is devoted to the introduction of thermodynamic quantities characterising the heat exchanges happening between atoms and field. In Section \ref{tasks} the first part of the results is given, concerning the action of the machines, the different tasks it can produce and their intrinsic quantum origin. The second part of the results of this work about the machine efficiency and its Carnot limit is given in Section \ref{efficiency}. Finally, remarks and conclusions are drawn in Section \ref{conclusions}.
\section{Physical system}\label{system} The setup of this paper is schematically depicted in Fig. \ref{system}, where a slab of thickness $\delta$ at temperature $T_S$ is placed in the blackbody radiation emitted by some walls at temperature $T_W\neq T_S$. The total electromagnetic field embedding the space between the slab and the walls is therefore given by the sum of four contribution: the direct blackbody radiation of the walls, the radiation emitted naturally by the slab and the walls' radiation after being either reflected or transmitted by the slab. Such an OTE field has been studied in the context of Casimir-Lifshitz force and heat transfer \cite{Antezza2004, Antezza2005, Antezza2006, Messina2011}, where its properties have been characterised in terms of the field correlators through a scattering matrix approach. The slab and the walls, macroscopic objects, are here the only ones directly connected to thermal baths. In addition, a three-level atom $M$ (machine) and a two-level atom $B$ (target body) are placed at the same distance $z$ from the surface of the slab and spatially separated by a distance $r$. The atomic open system involves then four transitions: the body transition labeled as $B$ and the three machine transitions labeled as $1,2,3$. Transition $1$ connects the two lowest-lying energy eigenstates (red transition in Fig. \ref{scheme}) and transition $2$ the two highest ones (green transition in Fig. \ref{scheme}). The OTE field interacts with them through the Hamiltonian $H_I=-\sum_i\mathbf{d}_i \cdot \mathbf{E}(\mathbf{R}_i)$, where $\mathbf{d}_i$ is the dipole moment of the $i$-th transition of the atomic system and $\mathbf{E}(\mathbf{R}_i)$ is the electromagnetic field at its position $\mathbf{R}_i$. The total Hamiltonian of the system is \begin{equation} H_{tot}=H_M+H_B+H_{\mathrm{field}}+H_I, \end{equation} where $H_{\mathrm{field}}$ is the Hamiltonian of the OTE field. In the following we will not need the explicit expression of $H_{\mathrm{field}}$ since only the field correlations will enter the master equation describing the dynamics of the atoms. The free atomic Hamiltonians $H_M$ and $H_B$ have expressions \begin{eqnarray} H_B&=&\big(\omega_B +\Delta S(\omega_B)\big)\sigma^{\dag}\sigma=\widetilde{\omega}_B\sigma^{\dag}\sigma,\label{HQ}\\ H_M&=&\big(\omega_1 +\Delta S(\omega_1)+S^-(\omega_2)-S^-(\omega_3)\big)\kappa_1^{\dag}\kappa_1\nonumber \\ &+&\big(\omega_3 +\Delta S(\omega_3)+S^+(\omega_2)-S^-(\omega_1)\big)\kappa_3^{\dag}\kappa_3\nonumber\\ &=&\widetilde{\omega}_1\kappa_1^{\dag}\kappa_1+\widetilde{\omega}_3\kappa_3^{\dag}\kappa_3,\label{HT} \end{eqnarray} being $\sigma$ ($\sigma^{\dag}$) the lowering (raising) operator of the body $B$ and $\kappa_n$, $\kappa_n^{\dag}$ ($n=1,2,3$) the lowering and raising operators of $M$. $S^{\pm}(\omega_i)$ here represents a shift of the energy of each level in the $i-$th transition due to the local interaction with the field and $\Delta S(\omega)=S^+(\omega)-S^-(\omega)$. In the third equality the renormalised transition frequencies $\widetilde{\omega}_i$ for $M$ and $B$ have been introduced to account for the effects of the shifts $S^{\pm}(\omega_i)$. Throughout this work we will always assume the physical consequences of such frequency renormalisation to be negligible, such that $\widetilde{\omega}_i=\omega_i\,\,\forall i$. This assumption has been fully confirmed by extended numerical simulations, having always detected the relative error introduced by neglecting these shifts to be less than $1\%$.
It is worth stressing here that, differently from previous works on atomic-scale thermal machines \cite{Linden2010, Levy2012, Correa2014, Correa2013, Brunner2014}, each atomic transition interacts here with \textit{the same} electromagnetic field, which embeds all the space where the atoms are placed. As we will show in what follows, there is then no need to conceive different environments, each interacting with a single atomic transition: a single non-equilibrium electromagnetic field is here able to produce all the physics needed for quantum thermodynamic tasks. \subsection{The master equation} In \cite{Bellomo2013} the master equation (ME) for two emitters in such a field has been derived under the Markovian limit as \begin{equation}\label{METQ} \frac{d\rho}{d t}=-\frac{i}{\hbar}\big[H_T,\rho\big]+D_{B}(\rho)+\sum_{n=1}^3D_n(\rho)+D_d(\rho), \end{equation} where $H_T=H_M+H_B+H_{MB}$. $H_{MB}=\hbar\Lambda(\omega_B)(\sigma^{\dag}\kappa_r+\sigma \kappa_r^{\dag})$ is an effective \textit{field-mediated} dipole interaction coupling resonant atomic transitions. Here we assume $B$ and $M$ to be resonant through transitions at frequency $\omega_B$, and all their dipoles to have the same magnitude and to lie along the line joining the two atoms, and oriented from $B$ to $M$. $\hbar\Lambda(\omega)$ is the effective interaction strength and $\sigma$ ($\kappa_r$) is the lowering operator of the body (of the resonant transition of the machine). $H_{MB}$ originates from the correlations of the fluctuations of atomic dipoles due to the common field.
The derivation of the master equation \eqref{METQ} has been performed under the Markovian and rotating wave approximations. It involves the average photon number $n(\omega, T)=1/\big(e^{\hbar \omega/k_B T}-1\big)$ at frequency $\omega$ and temperature $T$ and the two functions $\alpha_{W(S)}$ which encompass all the properties of the environment, such as the dielectric properties of the slab and the correlation functions of the field. For their explicit expressions we refer the interested reader to \cite{Bellomo2013}. The dissipative effects due to the atom-field coupling are accounted for by the dissipators $D_k$ with expressions \begin{eqnarray} D_B(\rho)&=&\Gamma_B^+(\omega_B)\Big(\sigma\rho\sigma^{\dag}-\frac{1}{2}\big\{\sigma^{\dag}\sigma,\rho\big\}\Big)\nonumber \\ &+&\Gamma_B^-(\omega_B)\Big(\sigma^{\dag}\rho\sigma-\frac{1}{2}\big\{\sigma\sigma^{\dag},\rho\big\}\Big),\label{DQ} \end{eqnarray} \begin{eqnarray} D_n(\rho)&=&\Gamma_n^+(\omega_n)\Big(\kappa_n\rho\kappa_n^{\dag}-\frac{1}{2}\big\{\kappa_n^{\dag}\kappa_n,\rho\big\}\Big)\nonumber \\ &+&\Gamma_n^-(\omega_n)\Big(\kappa_n^{\dag}\rho\kappa_n-\frac{1}{2}\big\{\kappa_n\kappa_n^{\dag},\rho\big\}\Big),\label{DT} \end{eqnarray} \begin{eqnarray}\label{Dnonloc} D_d(\rho)&=&\Gamma_d^+(\omega_B)\Big(\kappa_{r}\rho\sigma^{\dag}-\frac{1}{2}\big\{\sigma^{\dag}\kappa_{r},\rho\big\}\Big)\nonumber \\ &+&\Gamma_d^-(\omega_B)\Big(\kappa_{r}^{\dag}\rho\sigma-\frac{1}{2}\big\{\sigma\kappa_{r}^{\dag},\rho\big\}\Big)+h.c.\label{DTQ} \end{eqnarray} where $\omega_d=\omega_B$. One recognises standard local dissipation terms ($D_B$ and $D_n$), each associated to the degrees of freedom of a well-identified atom, and non-local dissipation ($D_d$) which describes energy exchanges at frequency $\omega_B$ of the atomic system \textit{as a whole} with its OTE environment, not separable in machine or body contributions, its action involving degrees of freedom of both atoms in a symmetric way. The parameters $\Gamma_i^{\pm}(\omega_i)$ (the rates of the dissipative processes of absorption and emission of photons through local or non-local interactions) depend on local or non-local correlations of the field in the atomic positions, which in turn are functions of the temperatures $T_S$ and $T_W$ and the dielectric properties of the slab $S$ as \begin{eqnarray} \frac{\Gamma^+_i(\omega)}{\Gamma^0_i(\omega)}&=&\big[1+n(\omega,T_W)\big]\alpha_W^i(\omega)\nonumber \\ &+&\big[1+n(\omega,T_S)\big]\alpha_S^i(\omega),\\ \frac{\Gamma^-_i(\omega)}{\Gamma^0_i(\omega)}&=&n(\omega,T_W)\alpha_W^i(\omega)^*+n(\omega,T_S)\alpha_S^i(\omega)^*, \end{eqnarray}
where $\Gamma^0_i(\omega)=|\mathbf{d}_i|^2\omega^3/(3\hbar \pi \varepsilon_0 c^3)$, for $i=1,2,3,B$, is the vacuum spontaneous emission rate of the $i$-th atomic transition having a dipole moment $\mathbf{d}_i$, and $\Gamma^0_d(\omega)=\sqrt{\Gamma^0_B(\omega)\Gamma_{r}^0(\omega)}$. Thanks to the functional dependence of these parameters on the frequency and on the position of the atom, and to the critical behaviour shown in correspondence to the resonance frequency $\omega_S$ of the slab material, thermodynamic tasks become achievable. To simplify the notation, in the rest of this work the explicit $\omega$-dependence in all the $\Gamma$s will be omitted.
\section{Thermodynamics of the system}\label{thermodynamics} After having introduced all the dynamic effects characterizing the atomic system, we want in this Section to introduce some quantities which will characterize the machine tasks and functioning. \subsection{Environmental and population temperatures} In order to describe the machine thermodynamics, it is convenient to introduce two kind of temperatures. A first one characterizes the action of the field on the atoms: it has been shown \cite{Bellomo2012} that the atom-field interaction can be effectively rewritten as if each atomic transition felt a local \textit{equilibrium} environment whose temperature depends on the transition frequency, on the properties of the slab and on the slab-atom distance $z$. These effective \emph{environmental temperatures} depend on the rates $\Gamma_n^{\pm}$ as \begin{equation}\label{Tn} T_n=T(\omega_n)=\frac{\hbar \omega_n}{k_B \ln(\Gamma_n^+/\Gamma_n^-)}, \end{equation}
with $n=1,2,3,B,d$. It is important to stress here that, despite these effective environments can be characterised by a temperature, their spectra are not simply blackbody spectra as they have their own transition-dependent Purcell factor \cite{Bellomo2012}.\linebreak In this framework we study thermodynamic effects of \textit{stationary} heat fluxes between $M$ and $B$, mediated and sustained by the OTE environment. To characterize the effects of these fluxes a second kind of temperature has to be introduced. Indeed, as much as the environmental temperatures characterise the thermodynamics of the OTE field, we need a second parameter to describe the energetics of atoms. In particular, atoms exchange energy under the form of heat with their surroundings by emitting photons through one of their transitions. This means that the possibility of such heat exchanges is related to the distribution of population in each atomic level. Note that, from the very definition of $T_n$, the environmental temperature depends on how the field tends to distribute atomic population in each pair of levels, due to the presence of the ratio $\Gamma_n^+/\Gamma_n^-$. A transition is therefore in equilibrium with its effective local environment if and only if its two levels $|a\rangle$ and $|b\rangle$ are populated such that $p_a/p_b=\Gamma^+/\Gamma^-$. If not, the field and the atom will exchange heat along such a transition until such a ratio is reached. This suggests to introduce a second temperature, hereby referred to as \textit{population temperature}, which for a transition of frequency $\omega_n$ ($n=1,2,3,B$) is defined as \begin{equation}\label{thetai} \theta_n=\frac{\hbar \omega_n}{k_B \ln(p^a_n/p^b_n)}, \end{equation} $p^a_n$ ($p^b_n$) being the stationary population of the ground (excited) state of the $n$-th transition. The result of a stationary thermodynamic task on the body, be it refrigeration, heating or population inversion, is then to modify its population temperature $\theta_B$. \subsection{Heat fluxes} The condition $T_i=\theta_i$ is satisfied only if detailed balance ($p^a_i/p^b_i=\Gamma_i^+/\Gamma_i^-$) holds. It can be proven that detailed balance can be broken in a three level atom in OTE fields. As a consequence the machine $M$ produces non-zero stationary heat fluxes with $B$ and the field environment, one for each dissipative process $D_n$ in the ME \eqref{METQ}. These fluxes, following the standard approach in the framework of Markovian open quantum systems \cite{BreuerBook}, are given as $\dot{Q}_n=\mathrm{Tr}\big[H_{at} D_n \rho\big]$, where $\rho$ is here the stationary atomic state and $H_{at}$ is a suitable atomic Hamiltonian which can be $H_M$, $H_B$ or $H_M+H_B$ depending on which part of the atomic system the heat flows into. Note that this definition implies an outgoing heat flux to be negative.
Following their definition, these heat fluxes depend both on the field properties (through the structure of the dissipators $D_n$) and on the properties of the atoms through their stationary state. This dependence, for the local dissipators, can be put under the very clear thermodynamic form \begin{equation}\label{fluxtemp} \dot{Q}_n=K_n\left(e^{\frac{\hbar \omega_n}{k_B \theta_n}}-e^{\frac{\hbar \omega_n}{k_B T_n}}\right)\simeq C_n(\theta_n)\big(T_n-\theta_n\big), \end{equation} where $K_n>0$, $C_n(\theta_n)$ is a positive function of $\theta_n$ (and of other parameters such as the frequency of the transition) and the second approximated equality holds in the limit $\theta_n\simeq T_n$. Equation \eqref{fluxtemp} shows that the direction of heat flowing is uniquely determined by the sign of the difference $T_n-\theta_n$, matching the thermodynamic expectation that heat flows naturally from the hotter to the colder body and strengthening the physical meaning of $\theta_n$.
There being no time-dependence in the Hamiltonian of the model, the first law of thermodynamics at stationarity for the total atomic system comprises only heat terms and assumes the form \begin{equation}\label{1stlaw} \dot{Q}_B+\sum_{n=1}^3\dot{Q}_n+\dot{Q}_d=0. \end{equation} In addition, energy is exchanged between the machine and the body thanks to their field-induced interaction $H_{MB}$. In Appendix A, following the general scheme developed in \cite{Weimer2008}, we show such an exchange to be under the form of heat. Seen by $M$ such a flux is $\dot{Q}_r=-i\mathrm{Tr}\big(H_M[H_{MB},\rho_s]\big)/\hbar$ while as expected $B$ sees the flux $-\dot{Q}_r$. By introducing the explicit expressions for $H_M$ and $H_{MB}$, one can obtain a particularly simple form for $\dot{Q}_r$ as \begin{equation}\label{Qrcorr} \dot{Q}_r=2\hbar\omega_B\Lambda(\omega_B)\langle \sigma^{\dag}\kappa_r \rangle_{-}, \end{equation} where $\langle \sigma^{\dag}\kappa_r \rangle_-=\frac{i}{2}\langle\sigma^{\dag}\kappa_r-\sigma\kappa_r^{\dag}\rangle$. In an analogous way, by employing Eq. \eqref{Dnonloc}, one can evaluate the change in internal energy of $M$ due to the non-local heat flux exchanged by the atomic system with the OTE environment, given by $\dot{Q}_d=\mathrm{Tr}\big[H_M D_d \rho\big]$. It is \begin{equation}\label{Qdexpl} \dot{Q}_d=-\hbar\omega_B\mathrm{Re}\Big\{ \langle \sigma\kappa_r^{\dag} \rangle\big[\Gamma_d^+-\left(\Gamma_d^-\right)^*\big]\Big\}. \end{equation} Finally, the change in the internal energy of $B$ due to the same effect, $\mathrm{Tr}\big[H_B D_d \rho\big]$, is given by same expression \eqref{Qdexpl}.
Fig. \ref{scheme} shows the full scheme of such heat fluxes for a particular configuration of the system. The two levels of $B$ will be labeled here as $|g\rangle$ and $|e\rangle$. Despite the two-level assumption might seem specific, it has been shown in various contexts \cite{Brunner2012, DeLiberato2011} that quantum thermal machines only couple to some effective two-level subspaces in the Hilbert space of the body they are working on. A two-level system is therefore the fundamental building block of the functioning of quantum thermodynamic tasks.
\section{Coherence-driven machine tasks}\label{tasks} The main result of this paper is the possibility to drive the temperature $\theta_B$ of the body outside of the range defined by the external reservoirs at $T_W$ and $T_S$. The body, without the effect of the machine, would thermalise at the local environmental temperature ($\theta_B=T_B$), corresponding to $p_e/p_g=\Gamma_B^-/\Gamma_B^+$. This temperature is necessarily constrained within the range $[T_W,T_S]$ \cite{Bellomo2012}.\linebreak Due to the particular form of the master equation \eqref{METQ}, in which all collective atomic terms involve only resonant atomic transitions, in the non-resonant subspace the collective atomic state will be diagonal in the eigenbasis of $H_B+H_M$. This is due to the fact that local dissipation in Eq. \eqref{METQ} of Section \ref{system} induces a thermalisation with respect to the free atomic Hamiltonians. On the other hand, in the resonant atomic subspace of the eigenbasis of $H_B+H_M$ spanned by the states $|g\rangle, |e\rangle$ of $B$ and the two states $|0_r\rangle, |1_r\rangle$ of the transition of $M$ at frequency $\omega_B$, the most general form of the atomic stationary state is \begin{equation}\label{rhoX} \begin{split}\begin{pmatrix}
p_{e1_r} & 0 & 0 & 0\\
0 & p_{e0_r} & c_r & 0\\
0 & c_r^* & p_{g1_r} & 0\\
0 & 0 & 0 & p_{g0_r}\\ \end{pmatrix}. \end{split}\end{equation}
A coherence $c_r$ is present in the decoupled basis between the two atomic states $|g1_r\rangle$ and $|e0_r\rangle$ having the same energy.
Note that the temperature $\theta_B$ of the body increases monotonically with the ratio $p_e/p_g$. By tracing out the machine degrees of freedom from the master equation \eqref{METQ}, one obtains a diagonal state with stationary populations $p_g$ and $p_e$ of the body $B$. Be now $\dot{Q}_r^B$ the flux $\dot{Q}_r$ seen by $B$. Then the expressions for heat fluxes exchanged by $B$ with its surroundings are \begin{eqnarray} \dot{Q}_r^B&=&-\frac{i}{\hbar}\big\langle[H_B,H_{MB}]\big\rangle,\label{dotQRQ}\\ \dot{Q}_d&=&\Gamma_{d}^+\Big[\big\langle\sigma^{\dag}H_B\kappa_{r}\big\rangle-\frac{1}{2}\big\langle\{H_B,\sigma^{\dag}\kappa_{r}\}\big\rangle\Big]\nonumber\\ &+&\Gamma_{d}^-\Big[\big\langle\sigma H_B\kappa_{r}^{\dag}\big\rangle-\frac{1}{2}\big\langle\{H_B,\sigma\kappa_{r}^{\dag}\}\big\rangle\Big]+c.c.\label{QQTQ},\\ \dot{Q}_B&=&\Gamma_B^+\Big[\big\langle \sigma^{\dag} H_B \sigma \big\rangle-\frac{1}{2}\big\langle \{ H_B,\sigma^{\dag}\sigma \} \big\rangle\Big]\nonumber \\ &+&\Gamma_B^-\Big[\big\langle \sigma H_B \sigma^{\dag} \big\rangle-\frac{1}{2}\big\langle \{ H_B,\sigma\sigma^{\dag} \} \big\rangle\Big],\label{QB} \end{eqnarray} where the mean values are evaluated over the stationary state of the total system. Exploiting its general form \eqref{rhoX}, it is just a matter of straightforward calculations to evaluate all the mean values above. Imposing the sum of \eqref{dotQRQ}, \eqref{QQTQ} and \eqref{QB} to vanish (first law for $B$, analogous to Eq. \eqref{1stlaw}), one obtains \begin{equation}\label{popobj} \frac{p_e}{p_g}=\frac{\Gamma_B^--\Delta(\omega_B)}{\Gamma_B^++\Delta(\omega_B)}, \end{equation} where \begin{equation}\label{delta} \Delta(\omega_B)=2\Lambda(\omega_B)\mathrm{Im}\{c_r\}+\mathrm{Re}\Big\{c_r \big[\Gamma_d^+-\left(\Gamma_d^-\right)^*\big]\Big\}. \end{equation} Note now that, thanks to Eq. \eqref{rhoX}, $\langle \sigma\kappa_r^{\dag} \rangle=c_r$ and $\langle \sigma^{\dag}\kappa_r \rangle_{-}=\mathrm{Im}(c_r)$, such that the first term in $\Delta(\omega_B)$ stems from the resonant heat $-\dot{Q}_r=-2\hbar\omega_B\Lambda(\omega_B)\mathrm{Im}(c_r)$ exchanged with the machine, while the second is due to the non-local heat flux $\dot{Q}_d$. Eqs. \eqref{popobj} and \eqref{delta} show that the thermal machine works \textit{only if a stationary quantum coherence $c_r$ is present}. Remarkably, it can be shown \cite{Spehner2014} that quantum discord \cite{Ollivier2001} (a key measure of purely quantum correlations) is a monotonic function of the absolute value of the coherence $c_r$ in our system. Differently from previous studies \cite{Correa2013}, here discord between $M$ and $B$ is a necessary condition for any thermodynamic task, and represents a resource the machine can use through the two different processes $\dot{Q}_r$ and $\dot{Q}_d$. Eq. \eqref{popobj} means that a quantum coherence between machine and body modifies the stationary temperature of the body with respect to $T_B$. This modification is reported in Fig. \ref{temp1}, where the behaviour of $\theta_B$ as a function of the slab-atoms distance $z$ is shown for two different slab thicknesses $\delta$. Four possible regimes can be singled out: both during refrigeration ($\theta_B<T_B$) and heating ($\theta_B>T_B$), $\theta_B$ can be either driven outside of the range $[T_W,T_S]$ (strong tasks) or kept within it (light tasks). As a limiting case of strong heating, the body can be brought to infinite temperature ($p_e=p_g$) and, further on, to negative ones, producing population inversion. \begin{figure}
\caption{Stationary temperature $\theta_B$ of the body (solid black line), machine resonant temperature $\theta_M$ in the absence of $B$ (dotted pink line) and local-environment temperature $T_B$ felt by the body (dot-dashed green line) versus $z$. The slab is made of sapphire and kept at $T_S=500\,\mathrm{K}$ while $T_W=300\,\mathrm{K}$. The machine transition frequencies are $\omega_1=0.9\,\omega_S$, $\omega_B=\omega_2=0.1\,\omega_S$ and $\omega_3=\omega_S$, $\omega_S=0.81\cdot10^{14}\,\mathrm{rad}s^{-1}$ being the first resonance frequency of sapphire (the optical data for the dielectric permittivity of the slab material are taken from \cite{PalikBook}). The two atoms are placed at a distance $r=1\,\mu\mathrm{m}$ from each other. Panel (a): numerical data for a semi-infinite slab. Light and strong refrigeration are achieved in this configuration. Panel (b): same quantities for a slab of finite thickness. The plotted functions are in this case $-1/\theta_B$, $-1/\theta_M$ and $-1/T_B$ (left vertical scale), with the same color code as before. The population inversion corresponds to divergent $\theta_B$ and $\theta_M$. The corresponding value of temperature can be read on the right vertical scale. All tasks are in this case obtained.}
\label{temp1}
\end{figure} As one can easily see from Fig. \ref{temp1}, the physics behind the absorption tasks is enclosed in the strong sensitivity of the population temperature $\theta_B$ of the body to the population temperature $\theta_M$ of the machine along the resonant transition when the body is not present. \subsection{Optimal conditions for thermodynamic tasks}\label{optimal}
It is shown in Fig. \ref{temp1} that the machine has a very high thermal inertia, such that the body, when put into thermal contact with the machine having a certain temperature $\theta_M$, thermalizes with it and $\theta_B\simeq\theta_M$. Fig. \ref{temprate} shows the mechanism the machine uses to modify its population temperature $\theta_M$ in absence of the body, thanks to the different environmental temperatures each of its transition feels. This drives $M$ out of detailed balance condition and allows $M$ to keep its resonant transition temperature almost constant. We label here the three transitions of the machine as high frequency ($\omega_h$), average frequency ($\omega_a$) and low frequency ($\omega_l$), one of which (suppose here $\omega_l$, connecting states $|0_r\rangle$ and $|1_r\rangle$) is resonant with $\omega_B$. For simplicity, let us focus on refrigeration only, which we suppose to happen either through transition 2 (connecting first and the second excited states), since in this configuration the high-frequency transition $3$ is always used by an absorption refrigerator to dissipate heat into the environment \cite{Correa2014}. As shown in Fig. \ref{temp1}, to obtain a low $\theta_B$ the resonant machine transition must be made cold. This is achieved by reducing the ratio $p_{1_r}/p_{0_r}$, which in turn happens when: \\ (a) the effective environmental temperature $T_h$ felt by the high frequency transition is very cold. In this way the environment contributes in increasing the population of the ground state of $M$ at the expenses of the population of its most energetic state. The resonant transition involves necessarily one of these two levels, and in both cases the effect of the high frequency transition helps reducing $p_{1_r}/p_{0_r}$;\\ (b) the effective environmental temperature felt by the average transition is very hot. This, following the same idea, would either mean reducing the population of $p_1$ or increasing the one of $p_0$, thus reducing $p_{1_r}/p_{0_r}$.\\ \begin{figure}
\caption{Conditions for refrigeration: effective rate temperatures $T_h$, $T_a$ and $T_l$ of the local environments felt by the three machine transitions, for $\omega_h=\omega_S$, $\omega_a=0.8\,\omega_S$ and $\omega_l=0.2\,\omega_S$ ($\omega_S=0.81\cdot 10^{14}$\,rad\,\,$\mathrm{s}^{-1}$) versus the machine-slab distance $z$ in the absence of the body. The slab and walls temperatures are $T_S=200\,$K and $T_W=300\,$K, and the slab thickness is $\delta=0.05\,\mu$m. As the plot shows, the transition having the same frequency as the slab resonance is much more strongly affected by the field emitted by the slab, such that its rate temperature is kept much lower than $T_a$. This produces a mechanism, shown in the inset, according to which excitations (yellow dots) are transferred to the intermediate level of the machine and removed from its upper one. This in turn drives the population temperature $\theta_l$ of the transition at $\omega_l$ (transition 2 in the example) to values lower than the one $T_l$ of its local environment, allowing the machine to refrigerate objects. In this configuration, introducing a body $B$ at $z=1\,\mu$m from the slab and $r=1\,\mu$m from $M$, one obtains $\theta_B=160\,$K $<T_S$.}
\label{temprate}
\end{figure} When these two conditions are met, the machine can always redistribute its populations such that the ratio $p_{1_r}/p_{0_r}$ can be kept low and almost unaffected by the presence of another atom. The advantage of the OTE field configuration is that the effective field temperatures can be manipulated through a wide set of parameters involving $z$, $\delta$, $T_W$ and $T_S$. In particular, the role of the resonance of the slab material is crucial \cite{Bellomo2012}, as explained in the caption of Fig. \ref{temprate}. In the case $T_S<T_W$, transitions strongly affected by the field emitted by the slab feel a cold local environment. Moreover, provided $\omega_a$ is far enough from $\omega_S$, one can at the same time have $T_a\simeq T_W$. By this mechanism, $M$ can change the temperature $\theta_B$, bringing it to values far outside the range $[T_S,T_W]$.
We stress here that the difference between light and strong tasks is a fundamental one: better than light tasks could in principle be done by direct connection of the body to one of the two real reservoirs at $T_S$ or $T_W$, while strong tasks can not be achieved by a simple thermal contact with anything in the system.\linebreak $\Delta(\omega_B)$ strongly depends on the slab-matter system distance $z$ and on the external temperatures through $c_r$, $\Lambda$ and $\Gamma_d^{\pm}$. One can thus engineer one or many of these regimes at will as shown in the functioning-phase diagram of the machine in Fig. \ref{phases} for a fixed thickness $\delta=0.05\,\mu\mathrm{m}$. All the strong and light functioning phases of the machine are found as a function of both $T_W-T_S$ and $z$.
\begin{figure}
\caption{Functioning phases of the absorption machine versus the atoms-slab distance $z$ and $\Delta T=T_W-T_S$. The sapphire slab has a thickness $\delta=0.05\,\mu\mathrm{m}$ and its temperature is continuously changed in the range $[30\,\mathrm{K},570\,\mathrm{K}]$. The walls are at $T_W=300\,\mathrm{K}$. Strong refrigeration (blue areas), strong heating (red areas), light refrigeration (cyan), light heating (orange) and population inversion (green) can all be obtained.}
\label{phases}
\end{figure} \section{Efficiency and Carnot limit}\label{efficiency} Consider now the refrigerating regime in which the machine extracts heat from the body through the transition $2$. The scheme of heat fluxes is then exactly the one depicted in Fig. \ref{scheme}. The efficiency of this process is \begin{equation}\label{eff} \eta_{ref}=\frac{\dot{Q}_r}{\dot{Q}_1+\dot{Q}_2}, \end{equation} due to the fact that $\dot{Q}_r$ is the power produced by the machine, which absorbs energy from its surroundings through transitions $1$ and $2$ (the equivalent of a work input) while uses transition $3$ to dissipate part of the absorbed energy after use (the equivalent of the spiral in a normal fridge). The corresponding Carnot limit $\eta^C_{ref}$ can be obtained by analysing the machine functioning in its reversible limit (zero entropy production). The instantaneous entropy production rate $\tau$ for quantum systems is defined as \cite{BreuerBook} \begin{equation}\label{secondlaw}
\sigma=-\frac{\mathrm{d}}{\mathrm{d}t}S(\rho(t)||\rho^{st})\geq0, \end{equation}
where $S(\rho(t)||\rho^{st})$ is the so-called relative entropy \cite{Vedral2002}, never increasing in time under a Markovian dynamics. Following \cite{Correa2014}, one can apply equation \eqref{secondlaw} term by term to each dissipator in the master equation thanks to the fact that they all are under a Markovian form. One thus obtains \begin{equation}\label{secondlawlocal} \sum_i\mathrm{Tr}\Big[(D_k\rho^{st})\ln \rho_k^{st}\Big]\geq 0, \end{equation} where $\rho_k^{st}$, $k=1,2,3,B,d$ is the kernel (stationary state) of the single dissipator $D_k$. The $3$ local dissipators $D_n$ of the machine and the local dissipator $D_B$ of the body induce stationarity under the standard Gibbs form at the effective environmental temperature, diagonal in the free atomic Hamiltonian basis. The nonlocal dissipator $D_d$, in the case studied here where the dipoles of $B$ and $M$ lie along the line connecting the two atoms (and more in general when $\Gamma_d^{\pm}\in \mathbb{R}$), has the same kernel at environmental temperature $T_d$, local in the degrees of freedom of $M$ and the $B$. Introducing these single-dissipator stationary states into equation \eqref{secondlawlocal} one obtains \begin{equation}\label{2law} \frac{\dot{Q}_1}{T_1}+\frac{\dot{Q}_2}{T_2}+\frac{\dot{Q}_3}{T_3}+\frac{\dot{Q}_B}{T_B}+\frac{2\dot{Q}_d}{T_d}\leq 0, \end{equation}
which is a form of the second law at stationarity for our system. With the help of the first law in Eq. \eqref{1stlaw} of Section \ref{thermodynamics}, the known property of three-level atomic heat fluxes \cite{Scovil1959} $|\dot{Q}_n/\dot{Q}_m|=\omega_n/\omega_m$ $\forall \,m,n=1,2,3$ (where $\dot{Q}_n$ is the total flux along the $n$-th transition) and the fact that in refrigeration $T_3<T_2,T_d$ and $T_2,T_d<T_1$ (as commented in Section \ref{optimal}) and under the condition $\dot{Q}_d<0$ (other cases can be treated analogously), one obtains from \eqref{2law} another first degree inequality. This has a non-trivial solution only if $T_d>T_2$, from which a bound on the efficiency in Eq. \eqref{eff} can be obtained as shown in Appendix B. Such a bound depends only on the three frequencies of the machine and the temperatures of the effective local and non-local environments. In the case of refrigeration along transition $2$ the Carnot efficiency assumes the form \begin{equation}\label{carnot} \eta^C_{ref}= \begin{cases} \frac{\omega_2}{2\omega_1}+\frac{1}{2}\frac{T_2T_d(T_1-T_3)}{T_1T_3(T_d-T_2)}+\frac{\omega_2}{2\omega_1}\frac{T_2(T_d-T_3)}{T_3(T_d-T_2)},\,\,\mathrm{if}\,\,T_d>T_2,\\ \frac{\omega_2}{\omega_1},\,\,\mathrm{if}\,\,T_d\leq T_2. \end{cases} \end{equation} \subsection{Efficiency at maximum power} An important figure of merit for the realistic functioning of any thermal machine is how close to its Carnot limit it works when delivering maximum power (i.e., when $\dot{Q}_r$ is maximised). Many bounds are known for different setups, limiting the efficiency at maximum power $\eta^m$ to some fractions of $\eta^C$ \cite{Correa2014, Curzon1975}. Remarkably our structured OTE environment allows for refrigeration tasks with $\eta^m$ much closer to $\eta^C$ than the bound known for quantum absorption machines \cite{Correa2014} based on ideal blackbody reservoirs, reading for our system $\eta^m<0.75\,\eta^C$. This is exemplified in Fig. \ref{effmax1} for a particular configuration of the model. The blue triangles (left vertical scale) represent the ratio $\eta_{ref}/\eta^C_{ref}$, plotted versus $\omega_3$ while keeping fixed $\omega_2=\omega_B=0.1\,\omega_S$. The red dots (right vertical scale) are the power $\dot{Q}_r$ plotted versus the same quantity, while the red dashed line is the machine-body discord (right vertical scale). It is clear that the power is maximised at $\omega_3=1.05\,\omega_S$, corresponding to $\eta_{ref}^m\simeq0.89\,\eta^C_{ref}$. $\dot{Q}_r$ starts decreasing, as classically expected when the efficiency approaches $\eta^C$, around $\omega_3\simeq0.9\,\omega_S$, but suddenly increases again when $\omega_3$ approaches $\omega_S$. This behaviour is due to the fact that, when one atomic transition is resonant with the characteristic frequency of the slab material, the atomic populations are strongly affected by the field emitted by the slab. Hence the not-black-body nature of the total field become crucial (e.g., the atomic decay rate is no longer proportional to $\omega^3$), allowing to overcome bounds set by the blackbody physics. The role of discord as machine resource is clearly shown here, where discord at resonance has a sharp peak leading to the high-power performance of $M$. \begin{figure}
\caption{The ratio $\eta_{ref}/\eta^C_{ref}$ (blue triangles, left vertical axis), the power of the machine ($\dot{Q}_r$, red dots, right vertical axis in units of $10^{-14}\,\mu \mathrm{J}/s$) and machine-body discord (dashed red line, right vertical scale in units of $10^{-4}$) versus the scaled machine transition frequency $\omega_3/\omega_S$. The sapphire slab is semi-infinite and at $T_S=395\,\mathrm{K}$, while $T_W=125\,\mathrm{K}$ and $z=4.8\,\mu\mathrm{m}$. The transition frequency of the body is fixed as $0.1 \omega_S$ ($\omega_S=0.81\cdot10^{14}\,\mathrm{rad}s^{-1}$), resonant with transition $2$ of the machine. The maximum power is reached for $\omega_3=1.05\,\omega_S$, corresponding to $\eta_{ref}/\eta^C_{ref}\simeq 0.89$. Remarkably, discord shows a sharp resonance peak, similarly to $\dot{Q}_r$.}
\label{effmax1}
\end{figure}
One could wonder whether such an exceptionally high efficiency at maximum power is only seldomly attained for the kind of machines described here. To answer such a question on quantitative bases, we performed a random sampling of over $2\cdot10^4$ thermal machines, all delivering thermodynamic tasks on the same fixed body. In the simulations performed and reported in Fig. \ref{effmax2}, the machines work as a quantum refrigerator delivering strong refrigeration using a semi-infinite slab. In this sampling, the machine-slab distance $z$ has been, for each machine, randomly drawn in the range $[0.9\,\mu \mathrm{m}, 100\,\mu \mathrm{m}]$, the walls temperature has been selected randomly in $T_W\in[50\,\mathrm{K}, 500\,\mathrm{K}]$ and, for each value of $T_W$, the slab temperature has been chosen at random in $T_S\in [T_W,T_W+500\,\mathrm{K}]$. The internal structure of the body is kept fixed during the simulations, with a frequency $\omega_B=0.1 \,\omega_S$ resonant with the transition 2 of $M$. For each machine thus generated, we have then maximised the delivered power by modifying the two other machine frequencies over every possible value of $\omega_1 \in (\omega_2,\omega_S)$ and $\omega_3$ compatible with the condition $\omega_1+\omega_2=\omega_3$. Finally, once obtained the configuration corresponding to the maximum power, we have computed the efficiency of the process. Fig. \ref{effmax2} shows the histogram of the distribution of the ratio $\eta^m/\eta^C$ of efficiency at maximum power to the corresponding Carnot efficiency in the interval $[0,1]$ within these $2\cdot10^4$ random refrigerators. It is remarkable that around $50\%$ of these machines work at maximum power with efficiencies higher than the bound $0.75\,\eta^C$ in \cite{Correa2014} and that none of them have been found to work at maximum power with efficiencies lower than $0.6 \eta^C$. Moreover, as can be clearly seen in Fig. \ref{effmax2}, a small but non-negligible fraction of them can reach $\eta^m\simeq0.98\eta^C$. \begin{figure}
\caption{Statistical occurrence of ratios $\eta^m/\eta^C$ for a random sampling of $2\cdot10^4$ thermal machines, always in resonance with the same body $B$. For each machine, $z$ has been randomly generated in the range $[0.9\,\mu \mathrm{m}, 100\,\mu \mathrm{m}]$, $T_W\in[50\,\mathrm{K}, 500\,\mathrm{K}]$ and, for each value of $T_W$, $T_S\in [T_W,T_W+500\,\mathrm{K}]$. The internal structure of the body is kept fixed during the simulations, with a frequency $\omega_B=0.1 \,\omega_S$ resonant with the transition 2 of $M$. The maximisation is performed over every possible value of $\omega_1 \in (\omega_2,\omega_S)$ and $\omega_3$ compatible with the condition $\omega_1+\omega_2=\omega_3$. Around $50\%$ of machines thus generated have $\eta^m>0.75\,\eta^C$.}
\label{effmax2}
\end{figure}
\section{Conclusions}\label{conclusions} This work introduces a new realization of a quantum thermal machine using atoms interacting with single non-equilibrium electromagnetic fields. By simply connecting two thermal reservoirs to \textit{macroscopic objects}, their radiated field allows the atomic machine to achieve all quantum thermodynamic effects (heating, cooling, population inversion), without any direct external manipulation of atomic interactions. This overcomes the usual difficulty of connecting single transitions to thermal reservoirs, in a realistic and simple configuration where the field-mediated atomic interaction modifies at will stationary inter-atomic energy fluxes.
Despite the environmental dissipative effects, atoms share steady quantum correlations \cite{Bellomo2013, Bellomo2014} which we showed to be necessary for one atom to deliver a thermodynamic task on the other, uncovering genuinely non-classical machine functioning. These particular features affect the tasks efficiency, which can be remarkably high also at maximum power, defying the known bounds for quantum machines based on ideal and independent blackbody reservoirs thanks to the fundamental effect of the resonance with the real material of which the slab is made. Moreover, such a remarkably high efficiency at maximum power is strongly connected to the presence of a peak in quantum correlations between the machine and the body, which represent the resource the machine uses for its tasks.
These results tackle major open problems on quantum thermal machines, paving the way for an efficient quantum energy management based on the potentialities of non-equilibrium and quantum features in atomic-scale thermodynamics.\\ \section*{Acknowledgments} The authors acknowledge fruitful discussions with N. Bartolo and R. Messina, and financial support from the Julian Schwinger Foundation.
\begin{appendix} \section{Resonant heat flux} In this appendix we demonstrate that the resonant energy exchange between $M$ and $B$ due to the field-mediated coherent interaction $H_{MB}$ consists only of heat. Following the approach of \cite{Weimer2008}, the dynamics of the sole $M$ induced by the Hamiltonian interaction $H_{MB}$ comprises in general an Hamiltonian and a dissipative part and can be written as \begin{equation}\label{dotrhoa} \dot{\rho}_M=-\frac{i}{\hbar}\big[H_M+H_M^{\mathrm{eff}},\rho_M\big]+D_{MB}(\rho), \end{equation} where $D_{MB}$ is a non-unitary dissipative term for $M$ due to the interaction with $B$, which depends however on the total state $\rho=\rho_{MB}$ because, in general, the two subparts are correlated. $H_M^{\mathrm{eff}}$ is a renormalised free Hamiltonian of subsystem $M$ due to the interaction with $B$. Defining the two marginals $\rho_{M(B)}=\mathrm{Tr}_{B(M)}\rho$ and the correlation operator $C_{MB}=\rho-\rho_M\otimes\rho_B$, it is shown in \cite{Weimer2008} that \begin{eqnarray} H_M^{\mathrm{eff}}&=&\mathrm{Tr}_B\Big(H_{MB}(\mathbb{I}_M\otimes\rho_B)\Big),\label{HAeff}\\ D_{MB}(\rho)&=&-i \mathrm{Tr}_B\Big([H_{MB},C_{MB}]\Big).\label{DAB} \end{eqnarray} Introducing $H_{M1}^{\mathrm{eff}}$ as the part of $H_M^{\mathrm{eff}}$ which commutes with $H_M$ and $H_{M2}^{\mathrm{eff}}$ which does not, directly from equation \eqref{dotrhoa} one has, for the internal energy of $M$ $U_M=\mathrm{Tr}\big((H_M+H_M^{\mathrm{eff}}) \rho\big)$, \begin{equation}\label{udot}\begin{split} &\dot{U}_M=\mathrm{Tr}_M\Big((H_M+H_{M1}^{\mathrm{eff}})D_{MB}(\rho)\Big)+\mathrm{Tr}_M\big(\dot{H}_{M1}^{\mathrm{eff}}\rho_M\big)\\ &-i\mathrm{Tr}_M\Big(\big[H_M+H_{M1}^{\mathrm{eff}},H_{M2}^{\mathrm{eff}}\big]\rho_M\Big). \end{split}\end{equation} It is custom to identify heat terms as the ones producing a change in the entropy of a subsystem: all the rest is identified as work $W$. Eq. \eqref{udot} can then be split in \begin{eqnarray} \dot{Q}_M&=&\mathrm{Tr}_M\Big((H_M+H_{M1}^{\mathrm{eff}})D_{MB}(\rho)\Big)\label{dotQA},\\ \dot{W}_M&=&\mathrm{Tr}_M\Big(\dot{H}_{M1}^{\mathrm{eff}}\rho_M-i\big[H_M+H_{M1}^{\mathrm{eff}},H_{M2}^{\mathrm{eff}}\big]\rho_M\Big).\label{dotWA} \end{eqnarray}
Introducing the symbols $c_M^{ij}=\langle i|\rho_M|j\rangle$ ($i\neq j$) for the coherences \textit{of the marginal} $\rho_M^{st}$ (different then from the coherence $c_r$ introduced in Eq. \eqref{rhoX} of Section \ref{tasks} which is a two-atom coherence), equation \eqref{HAeff} becomes \begin{equation} H_M^{\mathrm{eff}}\propto \mathrm{Re}(c_M^{10}). \end{equation} By tracing out the machine or the body degrees of freedom from equation \eqref{rhoX}, one can prove that the two stationary marginals $\rho_M^{st}$ and $\rho_B^{st}$ are always diagonal in the eigenbases of their respective free Hamiltonians, so that $c_M^{10}=0$. No renormalisation to the machine Hamiltonian comes therefore from the interaction with $B$, which means that equation \eqref{dotWA} vanishes, proving that no work is involved in machine-body energy exchanges. As for the heat, considering that $[H_{MB},\rho_M^{st}\otimes\rho_B^{st}]=0$, Eq. \eqref{dotQA} reduces to \begin{equation} \dot{Q}_M=-i\mathrm{Tr}_M\Big(H_M\mathrm{Tr}_B\big[H_{MB},\rho^{st}\big]\Big)=\dot{Q}_r, \end{equation} with the same $\dot{Q}_r$ given in Eq. \eqref{Qrcorr}. \section{Carnot limit}
In this appendix we deduce Eq. \eqref{carnot} of Section \ref{efficiency} for the Carnot efficiency in refrigeration along transition 2, and under the condition $\dot{Q}_d<0$. In addition to Eqs. \eqref{1stlaw} and \eqref{2law}, the condition $|\dot{Q}_n/\dot{Q}_m|=\omega_n/\omega_m$ gives for $n=1$ and $m=2$ the following \begin{equation}\label{ratio12} \frac{\dot{Q}_1}{\dot{Q}_2+\dot{Q}_r+\dot{Q}_d}=\frac{\omega_1}{\omega_2}. \end{equation} Solving Eqs. \eqref{1stlaw} and \eqref{ratio12} for $\dot{Q}_3$ and $\dot{Q}_d$ and using these solutions into \eqref{2law} one obtains for $\dot{Q}_r$ \begin{equation}\label{disqr} \dot{Q}_r\leq\dot{Q}_1\frac{T_d T_2}{T_d-T_2}\Bigg[\frac{1}{T_3}\Big(1+\frac{\omega_2}{\omega_1}\Big)-\frac{1}{T_1}-\frac{\omega_2}{\omega_1}\frac{1}{T_d}\Bigg]-\dot{Q}_2 \end{equation}
which, used in Eq. \eqref{eff} of Section \ref{efficiency}, gives a bound on $\eta_{ref}$ as a function of $\dot{Q}_1$ and $\dot{Q}_2$. Finally, using the fact that such a bound is a decreasing function of $\dot{Q}_2$, one obtains the Carnot efficiency as the limit for $\dot{Q}_2\rightarrow 0$, which turns out to be independent on $\dot{Q}_1$ and gives ultimately the first line of Eq. \eqref{carnot}. On the other hand, in the case $T_2<T_d$, one can not obtain anything like Eq. \eqref{disqr} and the only possibility for the machine to work without producing entropy is therefore to have vanishing heat flux from/to the body. This means $\dot{Q}_2=\dot{Q}_d=0$ which, inserted in the expression for the efficiency and using again $|\dot{Q}_n/\dot{Q}_m|=\omega_n/\omega_m$ leads to the second line of Eq. \eqref{carnot}. \end{appendix}
\end{document}
|
arXiv
|
{
"id": "1501.01791.tex",
"language_detection_score": 0.7855095267295837,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Laser-beam scintillations for weak and moderate turbulence}
\author{R.~A.~Baskov} \email{Email address: [email protected]} \affiliation{Institute of Physics of the National Academy of Sciences of Ukraine,\\
pr. Nauky 46, Kyiv-28, MSP 03028, Ukraine}
\author{O.~O.~Chumak}
\affiliation{Institute of Physics of the National Academy of Sciences of Ukraine,\\
pr. Nauky 46, Kyiv-28, MSP 03028, Ukraine}
\begin{abstract} The scintillation index is obtained for the practically important range of weak and moderate atmospheric turbulence. To study this challenging range, the Boltzmann-Langevin kinetic equation, describing light propagation, is derived from first principles of quantum optics based on the technique of the photon distribution function (PDF) [G. P. Berman \textit{et al.}, Phys. Rev. A \textbf{74}, 013805 (2006)]. The paraxial approximation for laser beams reduces the collision integral for the PDF to a two-dimensional operator in the momentum space. Analytical solutions for the average value of PDF as well as for its fluctuating constituent are obtained using an iterative procedure. The calculated scintillation index is considerably greater than that obtained within the Rytov approximation even at moderate turbulence strength. The relevant explanation is proposed.
\end{abstract}
\maketitle
\section{Introduction}
Physics of light beam propagation in the Earth's atmosphere is of great interest for scientists and engineers, see \cite{Tatarskii1,Bar,Andrews,coro,fei}. This interest arises from applications in quantum and classical communications and remote sensing systems. The latest achievements in this field concern problems of quantum key distribution \cite{Capraro, Usenko}, propagation of entangled \cite{Ursin,Yin, Hosseinidehaj} and squeezed \cite{Peuntinger, Vasylyev} states, quantum nonlocality \cite{Semenov, Gumberidze}, quantum teleportation \cite{Ma, Ren}, tests of fundamental physical laws \cite{Rideout, Touboul}. In all these cases, random variations of the atmospheric refraction index distort the phase front of radiation causing intensity fluctuations (scintillations), beam wandering and increasing beam spreading. Scintillations are the most severe problem which manifests itself in a significant reduction of the signal-to-noise ratio (SNR) introducing degradation of the performance of laser communication systems.
A laser beam in the Earth atmosphere is affected by turbulent eddies. Randomly distributed eddies stand for sources of local index-of-refraction fluctuations. There are numerous beam-eddies ``collisions" in the course of long-distance propagation. As a result, the radiation gradually acquires the Gaussian statistics. The scintillation index, $\sigma^2$, which is defined in classical optics as the inverse SNR, asymptotically approaches the level of $\sigma^2=1$. In this case, the intensity fluctuations are referred to as saturated \cite{Kravtsov}.
Scintillations are of importance for design of reliable classical and quantum optical communication systems \cite{Andrews2, Erven}, remote sensing systems \cite{Rino,Churnside}, and adaptive optics \cite{Ribak}. This field of research has also application in atmospheric physics, geophysics, ocean acoustics, planetary physics, and astronomy \cite{Tatarskii2}. The theoretical description of scintillation phenomena faces with the increasing computational complexity when one considers the parameter region of maximal optical beam intensity fluctuations. In order to overcome this problem several phenomenological and semi-phenomenological approaches were developed, which utilize the intensity distribution functions \cite{Jakeman}, phase screens \cite{Dashen-Wang}, turbulence spectrum approximation \cite{Marians}. The existing rigorous first-principles approaches, such as the method of smooth perturbations (Rytov approximation) \cite{Tatarskii1}, the Huygens-Kirchhoff method \cite{banakh79}, the path-integral method \cite{Das}, are applicable merely to the asymptotic regimes of weak and strong optical turbulences. At the same time, maximum scintillations lay in the region of moderate turbulence.
The range of moderate turbulence is the most challenging for rigorous theoretical study. First, the transition from statistics of coherent laser beam to the Gaussian statistics lies just in this region. Second, strong correlations of photon trajectories, which considerably enhance scintillations \cite{enha}, should also be taken into account here. A combined effect of these important factors can lead to maximal scintillations. Such effect clearly manifests itself in various experiments where this maximum may considerably exceed the level of saturation \cite{Kravtsov,consortini,sedin}.
In the present paper we introduce for a first-principle approach for the description of weak and weak-to-moderate turbulence regimes, which remain the most challenging for the analysis. The method is based on the technique of the photon distribution function (PDF) \cite{Chu}, which is derived from the first principles of quantum optics.This method is applicable for an arbitrary quantum state of the light including coherent states, which describe laser-radiation fields.
The PDF is an operator-valued function, $\hat{f}(\mathbf{r},\mathbf{q})$, of the position $\mathbf{r}$ and the wave vector $\mathbf{q}$. It retains the concept of the Wigner function \cite{Wigner} such that the integration with respect to $\mathbf{q}$ or $\mathbf{r}$ results in the field intensity operator $\hat{I}(\mathbf{r})$ or the photon-number operator $\hat{n}(\mathbf{q})$, respectively. The PDF can be found as a solution of the kinetic equation that accounts for random variations of the refractive index in the atmosphere. This approach has been originally introduced in the solid state physics (see, for example, Ref. \cite{chuZ}) and has also been successfully applied for a description of quantum radiation in waveguides \cite{sto,sto14}.
Application of the PDF method to the light propagation in the turbulent atmosphere has been considered in Refs. \cite{enha,Chu, Chumak wander}. It utilizes the approximation of the smoothly varying random force and is applicable only for restricted values of the turbulence parameters. In the present paper we derive a more general kinetic equation for the PDF introducing the collision integral and Langevin source of fluctuations. An approximate solution of this equation enables us to describe the beam characteristics beyond the Rytov approximation at the moderate range of turbulence, which was unreachable with the previous techniques.
To stress the significance of the present paper, it is worthwhile to recall the words of Dashen \cite{Das}. He considers ``the detailed behavior of the wave field at the boundaries between the unsaturated and saturated regimes" ``the remaining problem" in the physics of scintillation phenomena. We hope that our paper as well as the previous one \cite{enha} provide a deeper insight into physics and the theoretical description of this important region.
The rest of this paper is organized as follows. In Sec. \ref{sec:pdf} we give a brief review of the method of the PDF method. In Sec. \ref{sec:ble} and Appendix \ref{sec:appendix}, we explain the derivations of the collision integral and the corresponding Langevin source. In Sec. \ref{sec:scint_ind} and Appendix \ref{sec:appendix1}, we obtain an analytical formula for the scintillation index which is represented by a many-fold integral. In Sec. \ref{sec:discussion}, the results of numerical simulations are discussed. Concluding remarks are given in Sec. \ref{sec:conclusion}.
\section{Photon distribution function} \label{sec:pdf}
The photon distribution function is defined in analogy to the widely used solid state physics distribution functions \cite{chuZ} (the distributions for electrons, phonons, etc). This function is given by, see Ref. \cite{UJP}, \begin{equation}\label{1threee} \hat{f}({\bf r},{\bf q},t)=\frac 1V\sum_{\bf k}e^{-i{\bf kr}}b^\dag_{{\bf q}+ {\bf k}/2}b_{{\bf q}-{\bf k}/2}, \end{equation} where $b^\dag_{\bf q}$ and $b_{\bf q}$ are bosonic creation and annihilation operators of photons with the wave vector ${\bf q}$; $V\equiv L_xL_yL_z\equiv SL_z$ is the normalizing volume. We consider the laser beam propagating along the $z$ axis in the paraxial approximation. For this case the initial polarization of the beam remains almost undisturbed for a wide range of propagation distances, cf. Ref. \cite{stroh}.
The operator $\hat{f}({\bf r},{\bf q},t)$ describes the photon density in the phase space (PDF in ${\bf r}{-}{\bf q}$ space). We consider the scenario with characteristic sizes of spatial inhomogeneities of the radiation field being much greater than the optical wavelength $\lambda=(2\pi/q_0)$; here $q_0$ is the wave vector corresponding to the central frequency of the radiation, $\omega _0=cq_0$. In this case, it is reasonable to restrict the sum in Eq. (\ref{1threee}) by the range of small $k$, i.e. $k< k_0$ such that the inequality $k_0\ll q_0$ is satisfied. At the same time the value of $k_0$ should be large enough to provide a desired accuracy for the description of the beam profile.
Evolution of the PDF $\hat{f}({\bf r},{\bf q},t)$ is governed by the Heisenberg equation \begin{equation}\label{2five} \partial_t \hat{f}({\bf r},{\bf q},t)=\frac 1{i\hbar }[\hat{f}({\bf r},{\bf q},t),\hat{H}], \end{equation} where \begin{equation}\label{3six} \hat{H}=\sum_{\bf q}\hbar\omega_{\bf q}b^\dag_{\bf q}b_{\bf q}-\sum_{\bf q,k}\hbar\omega_{\bf q}n_{\bf k}b^\dag_{\bf q}b_{\bf q+k} \end{equation} is the Hamiltonian of photons in a medium with a fluctuating refraction index $n({\bf r})=1+\delta n ({\bf r})$, where $\delta n ({\bf r})$ stands for fluctuating part representing atmospheric inhomogeneity. The quantities $\hbar\omega_{\bf q}=\hbar cq$ and ${\bf c_q}=\frac{\partial \omega_{\bf q}}{\partial{\bf q}}$ are the photon energy and photon velocity in vacuum, $n_{\bf k}$ is the Fourier transform of the fluctuating refraction index
$\delta n({\bf r})$ is defined by \begin{equation}\label{two} n_{\bf k}=\frac 1V\int dVe^{i{\bf kr}}\delta n({\bf r}). \end{equation}
By substituting the Hamiltonian \eqref{3six} into Eq. \eqref{2five}, the latter is rewritten as \begin{eqnarray}\label{5seven} \partial_t \hat{f}({\bf r},{\bf q},t)+{\bf c_q}\cdot\partial_{\bf r}\hat{f}({\bf r},{\bf q},t)-i\frac{\omega _0}{V}\sum_{{\bf k},{\bf k'}}e^{-i{\bf k\cdot r}}n_{{\bf k}^\prime}\nonumber\\ \times\big[b^\dag _{\bf q+ \frac {k}2}b_{\bf q-\frac{k}{2}+k^\prime}-b^\dag _{\bf q+ \frac {k}2-k^\prime}b_{\bf q-\frac{k}{2}} \big]=0. \end{eqnarray} The first two terms in the left-hand side describe free-space propagation of a laser beam and the last term arises from atmospheric inhomogeneity. The latter can be replaced by ${\bf F}({\bf r})\cdot\partial_{\bf q}\hat{f}({\bf r},{\bf q},t)$ if three components of the turbulence wave vectors ${\bf k}^\prime$ are much smaller than the corresponding characteristic values of ${\bf q}$, i.e. we can express the difference of functions in square brackets in Eq. (\ref{5seven}) by the corresponding derivative. The quantity ${\bf F}({\bf r})=\omega _0\partial_{\bf r}n({\bf r})$ is interpreted as a random force produced by atmospheric vortices \cite{Chu}. With this force Eq. (\ref{5seven}) takes the form of the kinetic equation \begin{equation}\label{6seven1} \partial_t \hat{f}({\bf r},{\bf q},t)+{\bf c_q}\cdot\partial_{\bf r}\hat{f}({\bf r},{\bf q},t)+ {\bf F}({\bf r})\cdot\partial_{\bf q}\hat{f}({\bf r},{\bf q},t)=0. \end{equation} This equation resembles the collisionless Boltzmann equation with a smoothly varying momentum-independent force {\bf F}({\bf r}) acting on point-like particles.
The technique of the PDF (see Refs. \cite{Chu, enha, ChuSingle, Chumak wander, ChuPhase, sto, sto14}) is convenient for obtaining average parameters of the beam as well as for the description of wave-field fluctuations. The distribution function describes the photon density in the configuration-momentum phase space. A solution of the kinetic equation (\ref{6seven1}) with a smoothly varying fluctuation force has been obtained in Refs. \cite{Chu, enha, ChuSingle, Chumak wander, ChuPhase}. This simplified physical picture is justified only if the photon momentum \cite{name}, $\bf{q}$, is much greater than the inverse size of eddies. All components of ${\bf q}$ should obey this requirement. In the paraxial approximation, the perpendicular components of photon wave vector, $\bf q_\bot$, increase with the propagation time $t$ as $t^{1/2}$ \cite{Chu} and the beam inevitably reaches the region of saturated scintillations if $t{\rightarrow}\infty$. This indicates that Refs. \cite{Chu, chuZ, Chumak wander, ChuSingle, ChuPhase} consider the strong turbulence regime, including the limiting case of saturation, rather than the regime of a weak turbulence. The range, where the random force can be considered as smoothly varying function, extends towards smaller distances if the phase diffuser is used. The reason for this is that the phase diffuser increases the characteristic values of ${{\bf q}_\bot}$; see Refs. \cite{Ban55,Ban54,Chu} for more details.
\section{Boltzmann-Langevin equation} \label{sec:ble}
The scheme for derivation of the kinetic equation (\ref{6seven1}), outlined in previous section, can be justified when all components of photon wave vector ${\bf q}$ are sufficiently large. The corresponding situation occurs for long-distance propagation or strong turbulence (see, for example, Sec. VI in \cite{Chu}). It should be emphasized that it is just the case when the direct computer simulation of beam propagation becomes problematic \cite{Gorshkov}. In what follows, we describe more general approach which is free from this undesirable restriction.
In the kinetic equation (\ref{5seven}) the last left-hand-side term describes process of photon ``collisions " with atmospheric inhomogeneities. The amplitude of this process is determined by $n_{\bf k^\prime}$ which is a random quantity with $\langle n_{\bf k^\prime}\rangle =0$. Two operators in square brackets of Eq. (\ref{5seven}) also depend on $ {\bf k}^\prime$. Their explicit dependence on the random refraction index can be obtained from the Heisenberg equations. One of them is given by
\begin{eqnarray}\label{7nine} \!\left\{\partial_t - i\left(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k^\prime}}\right) \right\}b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}+k^\prime}=\nonumber\\ i\omega_0\sum_{{\bf {k}^{\prime\prime}}}n_{\bf k^{\prime\prime}}\bigg[b^\dag_{\bf q+\frac{k}{2}} b_{\bf q-\frac{k}{2}+k^{\prime}+{k}^{\prime\prime}}-b^\dag_{\bf q+\frac{k}{2}-{k}^{\prime\prime}}b_{\bf q-\frac{k}{2}+k^\prime}\bigg]. \end{eqnarray} Its solution can be written as
\begin{eqnarray}\label{8ten}
b^\dag_{\bf q+\frac{k}{2}}&&b_{\bf q-\frac{k}{2}+k^\prime}\bigg|_t=
e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k^\prime}})(t-t_0)}\left(b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}+{k^\prime}}\right)\bigg|_{t_0}\nonumber\\ &&+i\omega_0\sum_{\bf {k}^{\prime\prime}}\int\limits_{t_0}^{t}dt^\prime e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})\left(t-t'\right)}n_{\bf {k}^{\prime\prime}}\nonumber\\
&&\times\bigg(b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}+{k'}+{k}^{\prime\prime}}-b^\dag_{\bf q+\frac{k}{2}-{k}^{\prime\prime}}b_{\bf q-\frac{k}{2}+{k'}}\bigg)\bigg|_{t^\prime}, \end{eqnarray}
where the subscripts $t_0$ and $t^\prime$ indicate the dependence of the corresponding operators on time.
In Eq. (\ref{8ten}) the interval $t-t_0$ is chosen to be large compared with the photon-eddy interaction time $\pi/ck^\prime$ and sufficiently short compared with the relaxation time $1/\nu$ caused by these interactions: \begin{equation}\label{9tenprime} \pi/ck^\prime \ll t- t_0\ll1/\nu . \end{equation}
Here $\nu$ is the collision frequency and the quantity $1/k^\prime$ describes the characteristic length of atmospheric inhomogeneities. In other words, the time hierarchy (\ref{9tenprime}) means that the duration of photon interaction with scatterers is much shorter than the time of free flight. This is a typical criterion ensuring applicability of the Boltzmann equation for the description of many-particle systems (see, for example, Ref. \cite{chuZ}) .
Substituting Eq. (\ref{8ten}) and a similar solution for the operator $b^\dag _{\bf q+ \frac {k}2-k^\prime}b_{\bf q-\frac{k}{2}}\big|_t$ into Eq. (\ref{7nine}) we obtain the kinetic equation for $\hat{f}({\bf r},{\bf q},t)$
\begin{equation}\label{10eleven} \partial_t \hat{f}({\bf r},{\bf {q}},t)+{\bf c_q}\cdot\partial_{\bf r}\hat{f}({\bf r},{\bf q},t)= \hat{K}({\bf r},{\bf q},t)- \hat{\nu}_{\bf q}\big \{ \hat{f}({\bf r},{\bf q},t)\} , \end{equation} where \begin{widetext} \begin{equation}\label{11elevven}
\hat{K}({\bf r},{\bf q},t){=}\frac{i\omega_0}{V}\sum_{{\bf k,k^\prime}}e^{-i{\bf k}\cdot{\bf r}}n_{\bf k^\prime}\big[e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+k^\prime})(t-t_0)}\big(b^\dag_{\bf q{+}\frac{k}{2}}b_{\bf q{-}\frac{k}{2}{+}k'}\big){}\big|_{t=t_0}{-}
e^{i(\omega_{\bf q+\frac{k}{2}-k^\prime}-\omega_{\bf q-\frac{k}{2}})(t-t_0)}\big(b^\dag_{\bf q{+}\frac{k}{2}{-}k^\prime}b_{\bf q{-}\frac{k}{2}}\big)\big|_{t=t_0}\big], \end{equation} \begin{equation}\label{12twelwe} \hat{\nu}_{\bf q}\big \{ \hat{f}({\bf r},{\bf q},t)\}=\frac{2\pi\omega_{0}^{2}}{c}\int d{\bf k'_{\bot}}\psi({\bf k'_{\bot}})\big(\hat{f}({\bf r},{\bf q},t)-\hat{f}({\bf r},{\bf q+k'_{\bot}},t)\big). \end{equation} \end{widetext}
The notation $(_\bot)$ indicates components of the corresponding vector perpendicular to the $z$-axis, and $\psi ({\bf k'_{\bot}})=\frac V{(2\pi)^3}\langle|n_{\bf k'_{\bot}}|^2\rangle$. The value of $\psi ({\bf k})$ is given by the von Karman formula \begin{equation}\label{13twelwwe} \psi ({\bf k})=0.033C_n^2\frac {\exp(-(kl_0/2\pi
)^2)}{(k^2+L_0^{-2})^{11/6}}, \end{equation}
where the structure constant $C_n^2$ describes the strength of the index-of-refraction fluctuations, whereas $L_0$ and $l_0$ are usually referred to as the outer and inner radii of the turbulent eddies, respectively. These radii restrict a range of characteristic values of $\bf k'_{\bot}$. In atmospheric turbulence, $L_0$ may range from 1 to 100 m, and $l_0$ is on the order of few millimeters. It is seen from Eqs. (\ref{10eleven})-(\ref{13twelwwe}) that the random quantity $\hat{K}({\bf r},{\bf q},t)$ linearly depends on $n_{\bf k^\prime}$, while $\hat{\nu}_{\bf q}$ depends only on a regular variable $\langle|n_{\bf k'_{\bot}}|^2\rangle$. The contribution of fluctuating part of $n_{\bf{k}'} n_{\bf{k}''}$ can be neglected (for more details see Appendix \ref{sec:appendix}).
The linear inhomogeneous equation (\ref{10eleven}) governs the evolution of photon distribution in the phase space. The term $\hat{\nu}_{\bf q}\big \{ \hat{f}({\bf r},{\bf q},t)\}$ describes dissipation of the distribution function caused by randomization of the photon wave vector ${\bf q_\bot}$. The term ``dissipation" does not mean here that the total number of photons decreases. Actually, after summing up the collision term (\ref{12twelwe}) over $\bf q$ we get zero, which indicates that the photon number is conserved. The collision frequency $\nu$ can be estimated by $\frac{2\pi\omega_0^2}c\psi (k'_{\bot})k'^2_\bot$, where $ k'_\bot$ is the characteristic value of the momentum transfer.
The Langevin source of fluctuations in Eq. (\ref{10eleven}) is represented by $\hat{K}({\bf r},{\bf q},t)$. Random photon-eddy ``collisions" (see Refs. \cite{chuZ} and \cite{Kogan}) generate the Langevin source. Within the time interval, restricted by the inequality (\ref{9tenprime}), the constituents in the right-hand side of Eq. (\ref{11elevven}) have a simple oscillating dependence on time. Due to this favorable circumstance, the calculation of two-time correlation function $\langle \hat{K}({\bf r},{\bf q},t)\hat{K}({\bf r}^\prime,{\bf q}^\prime,t^\prime)\rangle$ reduces to obtaining the average value of the operator products defined at the same time, $t_0$. The source vanishes after averaging of Eq. (\ref{10eleven}). Then the remaining homogeneous equation for $\langle \hat{f}({\bf r},{\bf q},t)\rangle$ can be used for obtaining parameters of the beam at any distances.[In what follows, we use $f({\bf r},{\bf q},t)$ notation for $\langle \hat{f}({\bf r},{\bf q},t)\rangle$]. For long-distance propagation, where \begin{equation}\label{14twe}
q_{\bot}\gg k'_{\bot}, \end{equation} the collision integral reduces to the differential form \begin{equation}\label{15twell} \hat{\nu}_{\bf q}\big \{ \hat{f}({\bf r},{\bf q},t)\}=-\frac{\pi\omega_{0}^{2}}{c}\int d{\bf k'_{\bot}}\psi({\bf k'_{\bot}})\bigg(\frac \partial{\partial{\bf q}} {\bf k^\prime}_\bot \bigg)^2\hat{f}({\bf r},{\bf q},t), \end{equation} which describes a diffusion-like motion in the wave vector space.
The kinetic equation with $\hat{K}({\bf r},{\bf q},t)=0$ and the collision term, which is similar to (\ref{15twell}), was used in Refs. \cite{Yang51} and \cite{berman95} to investigate the propagation of relativistic charged particles through an inhomogeneous medium (for example, through a foil). The similarity arises from equivalence of the small-scattering-angle approximation, used in Refs. \cite{Yang51}, \cite{berman95}, and the paraxial approximation, used in this paper. Although the linear energy-momentum relationship holds for both the photons and ultrarelativistic particles, the microscopic scattering mechanisms are different for those cases.
\section{Scintillation index} \label{sec:scint_ind} Equation (\ref{10eleven}) can be used to study the effect of photon multiple scattering on their distribution in the phase space. Summation of $\hat{f}({\bf r},{\bf q},t)$ over ${\bf q}$ results in a spatio-temporal photon distribution \begin{equation} \label{16twel} \hat{I}({\bf r},t)=\sum_{\bf q}\hat{f}({\bf r},{\bf q},t),
\end{equation} which includes an average value, $\langle \hat{I}({\bf r},t)\rangle\equiv I({\bf r},t) $, and fluctuations, $\delta \hat{I}({\bf r},t)$, \begin{eqnarray} \label{17svnt}
\hat{I}({\bf r},t)&{=}& I({\bf r},t)+\delta \hat{I}({\bf r},t)\nonumber\\
&{=}&\sum_{\bf q} f({\bf r},{\bf q},t)+\sum_{\bf q}\delta \hat{f}({\bf r},{\bf q},t),
\end{eqnarray} where $\delta \hat{f}({\bf r},{\bf q},t)=\hat{f}({\bf r},{\bf q},t)-f({\bf r},{\bf q},t)$.
To obtain $I({\bf r},t)$, one needs to solve averaged Eq. (\ref{10eleven}), accounting for the boundary conditions at the aperture plain and using $\langle \hat{K}({\bf r},{\bf q},t)\rangle=0$.
The scintillation index is defined by \begin{equation}\label{18fifte} \sigma^2=\frac {\langle : \delta \hat{I}^2({\bf r}):\rangle}{ I({\bf r})^2}=\frac {\langle : \hat{I}^2({\bf r}):\rangle-I({\bf r})^2}{ I({\bf r})^2},
\end{equation} where the symbol $\{:..:\}$ means the normal ordering of the creation and annihilation operators. The definition (\ref{18fifte}) does not include contribution of shot noise. This noise enters the fluctuations of the detector counts and tends to be important in problems of quantum optics. The shot-noise term is linear in the photon density. It can be easily excluded from experimental data to facilitate the comparison with the theoretical calculation.
Calculation of Eq. (\ref{18fifte}) is more intricate. It follows from Eqs. (\ref{17svnt}) and (\ref{18fifte}) that $\sigma^2$ is a quadratic form of PDF fluctuations, $\langle\delta \hat{f}({\bf r},{\bf q},t)\delta \hat{f}({\bf r}^\prime,{\bf q}^\prime,t^\prime)\rangle$. Hence, the calculation of $\sigma^2$ is possible if the correlation function of photon distributions is known. To simplify the problem, we use an approximate iterative scheme.
\subsection{First order approximation} The approximation is based on the assumption that close to the transmitter aperture the collision term does not perturb significantly PDF and can be omitted. In this case, the average value of PDF satisfies the equation \begin{equation}\label{19ninn} (\partial_t+{\bf c_q}\cdot\partial_{\bf r})f_0({\bf r},{\bf q},t)=0. \end{equation}
The fluctuating part of $\delta \hat{f}({\bf r},{\bf q},t)$ is governed by the similar equation supplemented with the Langevin source $\hat{K}$
\begin{equation}\label{20nint}
(\partial_t+{\bf c_q}\cdot\partial_{\bf r})\delta \hat{f}({\bf r},{\bf q},t)=\hat{K}({\bf r},{\bf q},t).
\end{equation}
Equations (\ref{19ninn}) and (\ref{20nint}) follow from Eq. (\ref{10eleven}) after replacing $\hat{f}$ by $f_0+\delta \hat{f}$. The Langevin source linearly depends on $n_{{\bf k}_\bot}$ while the neglected collision integral is quadratic in $n_{{\bf k}_\bot}$. Therefore, Eqs. (\ref{19ninn}) and (\ref{20nint}) can be interpreted as the lowest-order expansions of Eq. (\ref{10eleven}) in powers of $n_{{\bf k}_\bot} $.
The general solution of Eq. (\ref{20nint}) is represented by two terms
\[\delta \hat{f}({\bf r},{\bf q},t)=\delta \hat{f}_0({\bf r_q}(t'),{\bf q},t')|_{t'=0}+\delta \hat{f}_1({\bf r,q},t),\]
where ${\bf r_q}(t^\prime)={\bf r}-{\bf c_q}(t-t')$ and \begin{equation}\label{21twnt}
\delta \hat{f}_1({\bf r,q},t)=\int\limits_{0}^{t}dt'\hat{K}({\bf r_q}(t'),{\bf q},t'). \end{equation}
We consider the aperture plane as a starting points of photon trajectories (at $t'=0$). The paraxial approximation imposes a set of restrictions on the wave-vectors: $q_z{\sim}q_0{\gg}q_\bot,k_\bot,k_\bot^\prime$. Then $z_{\bf q}(t^\prime =0)=z-ct=0$.
The term, $\delta \hat{f}_0({\bf r_q}(t'),{\bf q},t')|_{t'=0}$, describes the evolution of PDF fluctuations in vacuum. In what follows, we neglect fluctuations of the incident light. In this case
$\delta \hat{f}_0({\bf r_q}(t'),{\bf q},t')|_{t'=0}=0$ and only the term, $\delta \hat{f}_1({\bf r,q},t)$, is responsible for the non-zero amount of the scintillation index, $\sigma^2$, at small propagation time $t$. It is given by \begin{widetext}
\begin{equation}\label{22twnt1} \sigma^2=\frac {\sum_{\bf q, q'}\langle:\delta \hat{f}({\bf r},{\bf q},t)\delta \hat{f}({\bf r},{\bf q}^\prime,t):\rangle }{ (\sum_{\bf q}f_0({\bf r},{\bf q},t))^2}=\frac {\sum_{\bf q, q'}\int\limits_{0}^{t}\int\limits_{0}^{t}dt'dt''\langle :\hat{K}({\bf r_q}(t'),{\bf q},t')\hat{K}({\bf r_{q^\prime}}(t''),{\bf q'},t''):\rangle }{ (\sum_{\bf q}f_0({\bf r},{\bf q},t))^2}, \end{equation} where \begin{equation}\label{23twntx}
f_0({\bf r},{\bf q},t)= f_0({\bf r_q}(0),{\bf q},0),\quad
\sum_{\bf q} f_0({\bf r},{\bf q},t) \equiv I_0({\bf r},t)={1\over V} \sum_{\bf q,k}e^{-i{\bf k}({\bf r-c_q}t)}\langle b^\dag_{{\bf q+\frac k2}} b_{{\bf q-\frac k2}}\rangle |_{t=0}. \end{equation} \end{widetext} The first equation in (\ref{23twntx}) means that the left-hand-side term satisfies both the collisionless kinetic equation (\ref{19ninn}) and the boundary conditions at the aperture. The value of $ I_0({\bf r},t)$ is equal to photon density in the absence of turbulence.
The numerator in the right-hand side of Eq. (\ref{22twnt1}) can be calculated using the explicit term (\ref{11elevven}) for $\hat{K}({\bf r},{\bf q},t)$ and meeting boundary conditions (see App. \ref{sec:appendix1}). Then the scintillation index linearly depends on $\langle|n_{\bf k_\bot}|^{2}\rangle$ and reduces to \begin{equation}\label{35twnt66} \sigma ^2=\sigma _1^2L(z,\rho_0,\rho_1), \end{equation} where $\sigma _1^2=1.23C_n^2q_0^{7/6}z^{11/6}$ is the Rytov variance,
$\rho _{0,1}^2={r_{0,1}^2q_0}/z $, $r_0$ is initial radius of the beam, $r^2_1=r_0^2/(1+2r_0 ^2\lambda _c^{-2})$, the quantity $\lambda _c$ describes the effect of the phase diffuser, and $L(z,\rho_0,\rho_1)$ is the double integral \begin{equation}\label{36twnt7} L(z,\rho_0,\rho_1)=4.24\int\limits _0^1d\tau \int\limits _0^\infty d\chi \chi^{-8/3}\exp\Bigg\{-\chi^2\Bigg[\frac {q_0l_0^2}{4\pi ^2z}+ \end{equation} \[ \tau ^2\frac {\rho_0^2+\rho_1^2}{4+\rho_0^2\rho_1^2}\Bigg]\Bigg\} \sin^2\Bigg(\frac {\tau \chi^2}2-\frac {2\tau ^2\chi^2}{4+\rho _0^2\rho _1^2}\Bigg).\]
Equations (\ref{35twnt66}) and (\ref{36twnt7}) were derived in \cite{Chu} using a different approach. It follows from these equations that in the limit of large initial radius of beam aperture ($\rho_0,\rho_1{\rightarrow}\,\infty$) and infinitely small inner scale of turbulence ($l_0{\rightarrow}\,0$), we have the result of Rytov theory ($\sigma ^2=\sigma _1^2$) because $L{\rightarrow}\,1$.
\subsection{Collision term in average intensity}
The numerator as well as the denominator in Eq. ($\ref{22twnt1}$) are derived using only first non-vanishing iterative terms. Extension of the theory towards a moderate turbulence requires accounting for the collision term $-\hat{\nu}\big \{ \hat{f}({\bf r},{\bf q},t)\}$. Following the iterative procedure, we substitute the approximate value of PDF, given by Eq. (\ref{23twntx}), into the collision term of Eq. (\ref{10eleven}). Then the right-hand side of Eq. (\ref{10eleven}) is considered as a known function. After averaging the modified equation, we obtain \begin{equation}\label{24elevenx} (\partial_t +{\bf c_q}\cdot\partial_{\bf r}) f_1({\bf r},{\bf q},t)=-\hat{\nu}_{\bf q}\big \{ f_0({\bf r},{\bf q},t)\}, \end{equation} where $ f_1$ is the first non-vanishing term generated by the collision integral. Solution of Eq. (\ref{24elevenx}), obeying zero-value boundary conditions, is given by \begin{equation}\label{25therty}
f_1({\bf r},{\bf q},t)=-\int\limits _0^tdt^\prime\hat{\nu}_{\bf q}\{ f_0({\bf r_q}(t^\prime),{\bf q},t ^\prime)\}. \end{equation} The contribution of $ f_1({\bf r},{\bf q},t)$ into the total photon density is given by
\begin{eqnarray}\label{26twnt}
I_1({\bf r},t) \equiv \sum_{\bf q} f_1&&({\bf r},{\bf q},t)=-\frac{\omega_0^2t}{cS}\sum_{\bf q,k,k_\bot^\prime}\langle|n_{\bf k_\bot^\prime}|^2\rangle e^{-i{\bf k}({\bf r-c_q}t)}\nonumber\\
&&\times\bigg[1-\frac {\sin({\bf kc_{k_\bot ^\prime} }t)}{{\bf kc_{k_\bot ^\prime} }t}\bigg]\langle b^\dag_{{\bf q+\frac k2}} b_{{\bf q-\frac k2}}\rangle|_{t=0}. \end{eqnarray}
Equation (\ref{26twnt}) accounts for the beam broadening caused by atmospheric eddies. Averaging of each factor in the sum can be performed independently because of the absence of correlations between the source fluctuations and the refractive index fluctuations.
Two quantities, $ I_0({\bf r},t)$ and $ I_1({\bf r},t)$, are zeroth- and first-order terms of the development of average photon density in powers of $\langle|n_{\bf k_\bot}|^2\rangle$, respectively.
\subsection{Second order $\delta \hat{f}_{2}$ and combined effect of fluctuations $\delta \hat{f}_{1}{\cdot}\delta \hat{f}_{2}$}
The second iterative term for fluctuations of PDF, $\delta \hat{f}_2$, obeys the equation \begin{equation}\label{27ninty} \partial_t \delta \hat{f}_2({\bf r},{\bf q},t)+{\bf c_q}\cdot\partial_{\bf r}\delta \hat{f}_2({\bf r},{\bf q},t)=-{\hat \nu}_{\bf q}\{\delta \hat{f}_1({\bf r_q},{\bf q},t)\}, \end{equation} where the function $\delta \hat{f}_1$, given by Eq. (\ref{21twnt}), enters the collision term. Solution of Eq. (\ref{27ninty}) is \begin{equation}\label{28twnt9y} \delta \hat{f}_2({\bf r},{\bf q},t)=-\int\limits_{0}^{t}dt'\hat{\nu}_{\bf q} \{\delta \hat{f}_1({\bf r_q}(t^\prime),{\bf q},t^\prime)\}, \end{equation} were the explicit form of the collision integral is given by \begin{eqnarray}\label{29twnt9yy}
&{{\hat \nu}_{\bf q}\{\delta \hat{f}_1({\bf r_q}(t^\prime),{\bf q},t')\} =\frac{L_z\omega_0^2}{c} \sum\limits_{{\bf k'_\bot}} \langle |n_{\bf k'_\bot}|^2\rangle}\nonumber\\ &\times\big[\delta \hat{f}_1({\bf r_q(t^\prime)},{\bf q},t')-\delta \hat{f}_1({\bf r_q(t^\prime)},{\bf q+k'_\bot},t')\big ]. \end{eqnarray}
To proceed, let us consider a combined effect of fluctuations $\delta \hat{f}_{1,2}({\bf r},{\bf q},t)$ on $\sigma^2$. Contributions of $\delta \hat{f}_{1,2}$ into the photon density are given by $\sum\limits_{\bf q}(\delta \hat{f}_1({\bf r},{\bf q},t)+\delta \hat{f}_2({\bf r},{\bf q},t))$. This sum includes linear and cubic in $n_{\bf{k_\bot}}$ terms. The average square of this sum includes the term
\begin{eqnarray}\label{37p}
&\sum_{{\bf q,q}_1}&\langle\delta\hat{f}_1({\bf r},{\bf q},t){\cdot}{\delta} \hat{f}_2({\bf r},{{\bf q}_1},t)+\delta \hat{f}_2({\bf r},{\bf q},t)\cdot\delta \hat{f}_1({\bf r},{{\bf q}_1},t)\rangle\nonumber\\
&=&{2\sum_{{\bf q,q}_1}\langle\delta \hat{f}_1({\bf r},{\bf q},t)\cdot\delta \hat{f}_2({\bf r},{{\bf q}_1},t)\rangle}
\end{eqnarray}
which is quadratic in $\langle|n_{{\bf k}_\bot}|^2 \rangle$. For obtaining $\sigma^2$, we use this term and neglect terms of order $O(\langle|n_{\bf k_\perp}|^2\rangle^3)$. Then using Eqs. (\ref{21twnt}) and (\ref{28twnt9y}) we obtain the explicit expression for Eq. (\ref{37p}). It is given by
\begin{eqnarray}\label{38p}
2&\sum\limits_{{\bf q,q}_1}&\langle\delta \hat{f}_1({\bf r},{\bf q},t)\cdot\delta \hat{f}_2({\bf r},{{\bf q}_1},t)\rangle\nonumber\\
&=&\frac{2\omega_0^4}{c^2S^2}\sum_{\substack{{\bf q,k,k^\prime} \\ {\bf q_1,k_1,k^{\prime\prime}}}}\langle|n_{{\bf k}^\prime}|^2\rangle\langle| n_{{\bf k}^{\prime\prime}}|^2\rangle\int\limits _0^td\tau\nonumber\\
&\times&\int\limits _\tau^td\tau_1 e^{-i{\bf k\cdot(r-c_q\tau)}-i{\bf k}_1\cdot({\bf r-c_q}_1 \tau_1)}\nonumber\\
&\times&\big[1-e^{-i{{\bf k\cdot c_{k^{\prime}}}\tau)}}\big]\big[1-e^{i{{\bf k\cdot c_{k^{\prime\prime}}}\tau_1)}}\big]\big[1-e^{-i{{\bf k_1\cdot c_{k^{\prime\prime}}}\tau_1)}}\big]\nonumber\\
&\times&\big\langle b^\dag_{{\bf q+k}/2} b^\dag_{{\bf q_1+k_1}/2} b_{{\bf q-k}/2+{\bf k^{\prime\prime}}} b_{{\bf q_1-k_1}/2-{\bf k^{\prime\prime}}}\big\rangle\big|_{t-\tau_1},
\end{eqnarray}
where the operators in the angle brackets depend on time as in the absence of turbulence.
The summation in Eq. (\ref{38p}) runs over components of vectors ${\bf q},{\bf q}_1,{\bf k},{\bf k}_1,{\bf k}^\prime,{\bf k}^{\prime\prime}$ which are perpendicular to the $z$-axis (the labels ($_\bot$) are omitted for brevity). Parallel to the $z$-axis components are given by
\begin{equation}\label{39p}
q_z=q_{1z}=q_0,\quad k_z=k_{1z}=k^\prime_z=k^{\prime\prime}_z=0.
\end{equation}
The relations (\ref{39p}) can be derived from Eq. (\ref{32twnt5xx}).
The conditions $k_z^\prime =k_{z}^{\prime\prime}=0$ are consistent with the Markov approximation \cite{Tatarskii1}, \cite{Fante1} (not used here!) in which the index-of-refraction fluctuations, $\delta n({\bf r})$, are assumed to be delta-function correlated in the direction of propagation:
\[\langle\delta n({\bf r}_\bot,z)\delta n({\bf r}^\prime_\bot,z^\prime)\rangle\sim \delta (z-z^\prime).\] In this case, the turbulent eddies look like flat disks oriented normally to the propagation path. At first sight, this representation of the correlation function seems unrealistic because the atmosphere is assumed to be statistically homogeneous and isotropic. The paradox is explained by the effect of relativistic length contraction (Lorentz contraction) of moving objects. The relative motion of the atmosphere towards photons results in a zero value of correlation length in the direction of motion.
The effect of turbulence comes only from ``diagonal" components $\langle|n_{{\bf k}^\prime_\bot}|^2\rangle$ and $\langle|n_{{\bf k}^{\prime\prime}_\bot}|^2\rangle$ of the correlation function. As before, this is the result of statistical homogeneity of the turbulent atmosphere.
The final result of this Section is represented by \begin{equation}\label{40p}
\sigma^2=\frac{\sum\limits_{{\bf q},{\bf q}_1}\langle\delta \hat{f}_1({\bf r},{\bf q},t)[\delta \hat{f}_1({\bf r},{\bf q}_1,t)+2\delta \hat{f}_2({\bf r},{\bf q}_1,t) ]\rangle}{\big(\sum\limits_{\bf q} f_0({\bf r},{\bf q},t){+} f_1({\bf r},{\bf q},t)\big)^2},
\end{equation}
where the numerator and denominator are defined by Eqs. (\ref{22twnt1})-(\ref{36twnt7}), (\ref{26twnt}), (\ref{37p})-(\ref{39p}) and (\ref{32twnt5xx})-(\ref{34twnt6x}). Bringing together analytical and numerical calculations, we obtain $\sigma^2$ for different experimental conditions. Also, it is possible to compare the scintillation index obtained by employing different numbers of iteration steps as described in this section.
\begin{figure}
\caption{(Color online) Scintillations as function of Rytov parameter. Results for different theoretical approaches qualitatively compared with experimental data. There are theoretical results of current paper (solid line) [Eq. (\ref{40p}) ], Rytov approach (dotted line) [Eq. (\ref{35twnt66}) ], asymptotic formulas for Huygens-Kirchhoff method \cite{banakh79} (dashed lines) and the results of approach that the authors developed in Ref. \cite{enha} (dash-dotted line). The inset shows the typical experimental $\sigma^2$ for the considered atmospheric conditions (adopted from Ref. \cite{consortini} for $4\,\text{mm}<l_0\leq7\,\text{mm}$ ). Parameters for theories: $l_0=6.3\,$mm, $q_0=1.29\times10^{7}\,\text{m}^{-1}$, $r_0=0.01\,\text{m}$, $z=1200\,\text{m}$. The shaded area shows the parameter region considered in the current article.}
\label{fig:Experiment}
\end{figure}
\begin{figure}
\caption{(Color online) Scintillation index for coherent beams vs. propagation distance $z$. On the upper graph dash-dotted curves are obtained using the Rytov approach [Eq. (\ref{35twnt66})]; solid curves are obtained with the account for the collision term [Eq. (\ref{40p})]; dashed curves display the results obtained in Ref. \cite{enha} (see their Fig. 1 and 2), where the correlation of photon trajectories is accounted for. Shaded area at upper graph is enlarged and depicted on lower graph. Inner turbulence scale $\frac{l_0}{2\pi}=10^{-3}\,\text{m}$ and the optical wavelength $q_0=10^{7}\,\text{m}^{-1}$.\\}
\label{fig:IntCollvsCrossCorell}
\end{figure}
\section{Results and discussion} \label{sec:discussion}
A complete theory of scintillations does not exist yet. At the same time, there are well-justified solutions in the limiting cases of weak ($\sigma_1^2\ll1$) and strong ($\sigma_1^2\gg1$) turbulences. The kinetic equation, in which a beam scattering is described by the collision integral, is applicable for any Rytov variance $\sigma_1^2$ with the exception of a very short distance equal to the typical eddy size. An exact solution of this equation is problematic. Therefore, we restrict the numerical solution to a moderate values of $\sigma_1^2$ ($\sigma_1^2\leq 0.85$, see shaded area, in Fig. \ref{fig:Experiment}, and $\sigma_1^2\leq 0.75$ for the other figures) and use the iteration scheme described in Sec. \ref{sec:scint_ind}. At the same time this parameter is appreciably greater than the range of the Rytov approach validity $\sigma_1^2<0.3$ \cite{Fante1}.
Figure \ref{fig:Experiment} compares the scintillation index calculated within the Boltzmann-Langevin approach with other theoretical approaches and with the typical experimental data, adopted from Consortini \textit{et al.} \cite{consortini}. Although the original data of Ref. \cite{consortini} are collected for spherical waves while theory deals with plane waves, we propose a qualitative comparison of results to illuminate peculiarities of scintillations and advantages of our method for their description. Naturally, for small values of the Rytov parameter ( $\sigma_1^2\leq 0.25$) our result coincides with the asymptotics for the Huygens-Kirchhoff method and Rytov-like method, but differs dramatically for the larger values showing the same increasing tendency as experimental data in a weak-to-moderate turbulence regime. For the sake of completeness we also provide theoretical results from the side of large values of Rytov parameter calculated within the approach of Ref. \cite{enha} and the Huygens-Kirchhoff approach. We observe that the Huygens-Kirchhoff method presents only a limited description for strong turbulence, while results of the approach from Ref. \cite{enha} shows better description of scintillation index going deeper to the range of moderate turbulences. Moreover, the results of Ref. \cite{enha} show the tendency to mesh with the results of current paper plausibly repeating the overall behavior of scintillations in the cited experiment. \begin{figure}
\caption{(Color online) Scintillation index for coherent beam vs. propagation distance $z$ for different initial radii of the beam. The rest of the parameters are the same as in Fig. \ref{fig:IntCollvsCrossCorell}. The curves from the left side are obtained using the present approach [Eq. (\ref{40p})]; the curves from the right side are obtained using the approach developed in Ref. \cite{enha}.}
\label{fig:DiffRadius}
\end{figure}
To take a closer look at our results we provide a comparison with the results of the Rytov approach under different configurations of atmospheric channel ( Fig. \ref{fig:IntCollvsCrossCorell}). Again for small values of $\sigma^2$, there is a good agreement for data obtained within the two approaches (enlarged shaded area at lower graph) and for greater values of $\sigma^2$ we can see not only numerical inconsistencies, but also different tendencies of $\sigma^2(z,C_n^2)$ to grow for considered cases. The comparison with the results of the previous paper \cite{enha} for moderate-to-strong turbulence regime displays the tendency for matching at some intermediate region. It also demonstrates that maximum of $\sigma^2$ should be situated at shorter distances $z$ if the structure constant, $C_n^2$, is larger. This result can be easily foreseen in view of the fact that strong photon-turbulence interaction approaches the crossover to the Gaussian statistics.
One more aspect taken under consideration is the dependence of scintillations on the initial radius of laser beam. Figure \ref{fig:DiffRadius} illustrates the behavior of the scintillation index in the regions adjoining the extremum of $\sigma^2$. We can see that the initial growth of $\sigma^2$ is steeper in the case of smaller initial radii $r_0$. This is due to stronger correlation of photon trajectories: the correlation is more pronounced for small $r_0$ \cite{enha}. This is easily explained since if the trajectories are closer to each other, then the probability for different photons to be scattered by the same eddy is greater. This is the case when a random scattering generates photon-photon correlations.
Figure \ref{fig:popravka} can be used for explanation of the physical mechanism responsible for the increase of $\sigma^2$ in the range $\sigma_1^2\leq{0.75}$. The solid lines are obtained using Eq. (\ref{40p}). The data shown by the dash-dot line are obtained from the same expression considering $\delta \hat{f}_2=0$. There is only a small difference between the corresponding pairs of curves. Therefore, the major part of the discrepancy of our results for $\sigma^2$ from the results based on the Rytov approximation is due to the decrease of the photon density caused by the turbulence. This decrease is described by the term $f_1$ in the denominator of (\ref{40p}).
\begin{figure}\label{fig:popravka}
\end{figure}
\section{Conclusion} \label{sec:conclusion}
For decades, the description of light propagation in a turbulent atmosphere has remained a challenging theoretical problem. The interconnection between the initial and the detected signals, obtained theoretically, is not sufficient for the description of atmospheric communication system efficiency. The point is that the detected signal has a memory about random scattering events occurred in the course of propagation. Therefore, even for the statistically homogeneous and stationary atmosphere, the received signal varies (fluctuates) for different paths. The size of these fluctuations is described by the scintillation index.
By definition, the scintillation index is expressed via the correlation functions of the photon distribution. The kinetic equation for the distribution function and its fluctuating part is derived here from first principles. Their solutions are obtained using the iteration procedure which is applicable for short propagation distances or small turbulence structure factors. In our analysis, we use the paraxial approximation for beams. This approximation reduces the problem to the case of a two-dimensional wave vector domain and simplifies the collision integral as well as correlation functions of the Langevin sources.
Concluding, we think that further progress in the problem of scintillations lies in the improvement of our ability to carry out complex multiple integrations.
\section{Acknowledgments} The authors thank A. Gabovych, G. Berman, D. Vasylyev and E. Stolyarov for useful discussions and comments.
\appendix \section{The collision integral} \label{sec:appendix} \numberwithin{equation}{section} \setcounter{equation}{0} The collision integral (\ref{12twelwe}) can be derived using the standard procedure. Nevertheless, some explanations are required. The derivation of Boltzmann-like kinetic equations is based on the assumption of a negligibly short interaction time of individual particles (photons) with scatterers. \begin{widetext} The corresponding criteria are given by Eq. (\ref{9tenprime}). The other point concerns the explicit form of the scattering probability. For our case, the collision process is described by the operator
\begin{equation}\label{A1} \hat{J}=-i\frac{\omega _0}{V}\sum_{{\bf k},{\bf k'}}e^{-i{\bf k\cdot r}}n_{{\bf k}^\prime} \big[b^\dag _{\bf q+ \frac {k}2}b_{\bf q-\frac{k}{2}+k^\prime}-b^\dag _{\bf q+ \frac {k}2-k^\prime}b_{\bf q-\frac{k}{2}} \big], \end{equation} (see Eq. (\ref{5seven})). Using the quantity $b^\dag _{\bf q+ \frac {k}2}b_{\bf q-\frac{k}{2}+k^\prime}$, given by Eq. (\ref{8ten}), we rewrite Eq. (\ref{A1}) as \begin{equation}\label{A2} \hat{J}=-\hat{K}({\bf r},{\bf q},t)+ \frac{\omega_0^2}V\sum_{\bf k,k^\prime, {k}^{\prime\prime}}n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}\int\limits_{t_0}^{t}dt^\prime e^{-i{\bf k \cdot r}} \bigg[e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})\left(t-t'\right)}\big(b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}+{k'}+{k}^{\prime\prime}}-b^\dag_{\bf q+\frac{k}{2}-{k}^{\prime\prime}}b_{\bf q-\frac{k}{2}+{k'}}\big) \end{equation}
\[ -e^{i(\omega_{\bf q+\frac{k}{2}-k^\prime}-\omega_{\bf q-\frac{k}{2}})\left(t-t'\right)}\big(b^\dag_{\bf q+\frac{k}{2}-k^\prime}b_{\bf q-\frac{k}{2}+{k}^{\prime\prime}}-b^\dag_{\bf q+\frac{k}{2}-k^\prime-{k}^{\prime\prime}}b_{\bf q-\frac{k}{2}}\big)\bigg]\bigg|_{t^\prime}=-\hat{K}({\bf r},{\bf q},t)+\hat{\tilde{J}},\] where the second term in square brackets is derived from the first one by replacing ${\bf q} \rightarrow{\bf q-k^\prime}$ in the first one, and the interval $t-t_0$ satisfies the condition (\ref{9tenprime}).
Products of $n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}$ and $b^\dag b$ in Eq. (\ref{A2}) have a fluctuating nature. In what follows, we will neglect correlations between the corresponding subsystems. In this case we may consider them separately.
The quantity $n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}$ contains a nonzero average constituent and a fluctuating part. Let us consider the product $n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}$ in more details. By definition \begin{equation}\label{A3} n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}=\frac 1{V^2}\int\int d{\bf r}d{\bf r}_1e^{i[{\bf k}^\prime\cdot{\bf r}+{\bf k}^{\prime\prime}\cdot{\bf r}_1]}\delta n({\bf r})\delta n({\bf r}_1)= \frac 1{V^2}\int\int d{\bf R}d{\bf s}e^{i({\bf k}^\prime+{\bf k}^{\prime\prime})\cdot{\bf R}+i({\bf k}^{\prime}- {\bf k}^{\prime\prime})\cdot{\bf s}/2} \delta n({\bf R}+\frac {\bf s}2)\delta n({\bf R}-\frac {\bf s}2 ), \end{equation}
where ${\bf R}=({\bf r}+{\bf r}_1)/2,\quad {\bf s}={\bf r}-{\bf r}_1$. The range of $s\lesssim l_{corr}$, where the correlation length $l_{corr}$ is comparable with the eddies size, provides a dominant contribution into the average part of the integral (\ref{A3}). In spatially homogeneous mediums, the quantity ${\langle\delta n({\bf R}{+}\frac {\bf s}2)\delta n({\bf R}{-}\frac {\bf s}2 )}\rangle$ does not depend on $\bf R$ and the characteristic values of $|{\bf k}^{\prime}- {\bf k}^{\prime\prime}|$ are restricted by $ 1/l_{corr}$.
The characteristic value of $R$ is of the order of the system size $L$. In this case
$|{\bf k}^{\prime}+ {\bf k}^{\prime\prime}|{\sim}1/L$ tends to zero if $L{\rightarrow}\infty$. This means that the relation ${{\bf k}^{\prime}=-\bf k}^{\prime\prime}$ holds at any practically important values of ${\bf k}^{\prime}$ and ${\bf k}^{\prime\prime}$. Thus we have
\begin{eqnarray}\label{A4} \langle n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}\rangle &=& \frac 1V\delta_{{\bf k^\prime},-{\bf k^{\prime\prime}}}\int d{\bf s}\int \frac{d{\bf R}}Ve^{i{\bf k^\prime\cdot s}}\langle\delta n({\bf R}+\frac {\bf s}2)\delta n({\bf R}-\frac {\bf s}2 )\rangle\nonumber\\ &=&\delta_{{\bf k^\prime},-{\bf k^{\prime\prime}}}\int \frac {d{\bf s}}V e^{i{\bf k^\prime \cdot s}}\langle\delta n({\bf s})\delta n(0)\rangle\nonumber\\
&=&\delta_{{\bf k^\prime},-{\bf k^{\prime\prime}}}\langle n({\bf r})n(0)\rangle_{{\bf k}^\prime}=\delta_{{\bf k^\prime},-{\bf k^{\prime\prime}}}\langle |n_{\bf k^\prime}|^2 \rangle . \end{eqnarray} The angle brackets mean averaging over the volume $V$, which is assumed to be much greater than the correlation volume $l_{corr}^3$. Such averaging is equivalent to averaging over different configurations of turbulent atmosphere.
The substitution of $ \delta_{\bf {k}^\prime,-{\bf {k}}^{\prime\prime}}\langle|n_{\bf {k}^\prime}|^2\rangle$ for $n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}$ in Eq. (\ref{A2}) transforms the second term there to \begin{eqnarray}\label{A5}
{\hat{\tilde{J}}=\frac{\omega_0^2}V\sum_{\bf k,k^\prime }\langle |n_{\bf k^\prime}|^2 \rangle\int_{t_0}^{t}dt^\prime e^{-i{\bf k\cdot r}} \bigg[e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})\left(t-t'\right)}\big(b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}-b^\dag_{\bf q+\frac{k}{2}+{k}^{\prime}}b_{\bf q-\frac{k}{2}+{k'}}\big)}\nonumber\\
-e^{i(\omega_{\bf q+\frac{k}{2}-k^\prime}-\omega_{\bf q-\frac{k}{2}})\left(t-t'\right)}\big(b^\dag_{\bf q+\frac{k}{2}-k^\prime}b_{\bf q-\frac{k}{2}-{k}^{\prime}}-b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}\big)\bigg]\bigg|_{t^\prime} . \end{eqnarray} The rest of the terms with $n_{\bf {k}^{\prime}}n_{\bf {k}^{\prime\prime}}$, where $\bf {k}^{\prime}\neq-\bf {k}^{\prime\prime}$, have a random nature and should be added to the Langevin source $\hat{K}({\bf r},{\bf q},t)$. These terms contribute negligibly to $\hat{K}$ and can be neglected if Eq. (\ref{9tenprime}) holds true.
For the short interval $t-t_0$ [see (\ref{9tenprime})], the distribution function does not vary significantly and the evolution of operators $b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}$ resembles the evolution in vacuum: \begin{equation}\label{A6}
b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}|_{t^\prime}=e^{-i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}})\left(t-t'\right)}b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}|_{t}. \end{equation} The operators in the right side of Eq. (\ref{A6}) depend only on a fixed time $t$ and the integration in Eq. (\ref{A5}) concerns only the exponential functions \begin{eqnarray}\label{A7}
\int\limits_{t_0}^{t}dt^\prime e^{-i{\bf k\cdot r}} e^{i(\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})\left(t-t'\right)}b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}|_{t^\prime}
=b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}|_t\int\limits_{t_0}^{t}dt^\prime e^{i(\omega_{\bf q-\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})(t-t^\prime)}. \end{eqnarray} The condition (\ref{9tenprime}) enables the interval $t-t_0$ to be replaced by infinity
\begin{eqnarray}\label{A8} {\int\limits_{t_0}^{t}dt^\prime e^{i(\omega_{\bf q-\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}})(t-t^\prime)} \approx\int\limits_0^{\infty}d\tau e^{i(\omega_{\bf q-\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}}+i\eta)\tau}} =\frac i{\omega_{\bf q-\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}}+i\eta}, \end{eqnarray} \end{widetext} where $\eta\rightarrow +0$. Similar consideration is applicable to each term in Eq. (\ref{A5}). Then Eq. (\ref{A5}) reduces to
\begin{eqnarray}\label{A9}
\hat{\tilde{J}}&{=}&\frac{i\omega_0^2}V\sum_{\bf k,k^\prime }\langle |n_{\bf k^\prime}|^2 \rangle e^{-i{\bf k\cdot r}}\bigg[\frac {b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}}{\omega_{\bf q-\frac{k}{2}}-\omega_{\bf q-\frac{k}{2}+{k'}}+i\eta}\nonumber\\ &&-\frac{b^\dag_{\bf q+\frac{k}{2}+{k}^{\prime}}b_{\bf q-\frac{k}{2}+{k'}}}{\omega_{\bf q+\frac{k}{2}}-\omega_{\bf q+\frac{k}{2}+{k'}}+i\eta}- \frac {b^\dag_{\bf q+\frac{k}{2}+k^\prime}b_{\bf q-\frac{k}{2}+k^\prime}}{\omega_{\bf q-\frac{k}{2}+k^\prime}-\omega_{\bf q-\frac{k}{2}}+i\eta}\nonumber\\
&&+\frac{b^\dag_{\bf q+\frac{k}{2}}b_{\bf q-\frac{k}{2}}}{\omega_{\bf q+\frac{k}{2}+k^\prime}-\omega_{\bf q+\frac{k}{2}}+i\eta}\bigg]\bigg|_t. \end{eqnarray}
In the last two terms, the value of ${\bf k^\prime}$ is replaced by ${-\bf k^\prime}$. For paraxial beams, considered here, we can use the approximation $\omega_{\bf q}=cq\approx cq_z$, which implies a negligible contribution of $q_{x,y}$ components. Then, using the relation \[\frac 1{ck^\prime_z-i\eta} -\frac 1{ck^\prime_z+i\eta}=\frac{2\pi i }c\delta(k^\prime_z)\] and integration over $k^\prime_z$, Eq. (\ref{A9}) simplifies to
\begin{equation}\label{A10}
\hat{\tilde{J}}=\frac{2\pi\omega_{0}^{2}}{c}\int d{\bf k'_{\bot}}\psi({\bf k'_{\bot}})\big(\hat{f}({\bf r},{\bf q},t)-\hat{f}({\bf r},{\bf q+k'_{\bot}},t)\big),
\end{equation}
where the definition (\ref{1threee}) of PDF was used. Equation (\ref{A10}) coincides with the collision integral $\hat{\nu}_{\bf q}\big \{ \hat{f}({\bf r},{\bf q},t)\}$ represented by Eq. (\ref{12twelwe}).
\section{Boundary conditions for the incident light} \label{sec:appendix1}
Calculation of concrete parameters of laser radiation is possible if the boundary conditions for the incident light are specified. Usually, the Gaussian distribution of the laser field in the aperture plane is assumed \begin{equation}\label{30twnt4} \Phi({\bf r}_\bot)=(2/\pi r_0^2)^{1/2}e^{-{r^2_\bot}/{r^2_0}}, \end{equation} where $r_0$ is the aperture radius. The laser and outgoing field should match in the aperture plane. This means that \begin{equation}\label{31twnt5x} \sum_{{\bf q}_{\bot},q_z}\bigg( \frac{2\pi\hbar\omega_{\bf q}}V\bigg)^{1/2}b_{\bf q}e^{-i\omega_{\bf q}t+i{\bf q}_{\bot}\cdot{\bf r}_{\bot}}=\alpha_Lb\Phi({\bf r}_{\bot})e^{-i\omega_0t}, \end{equation} where $b$ is the amplitude of the laser mode, and the coefficient $\alpha_L$ describes penetration of this field through the aperture. As before, the paraxial approximation ($\omega_{\bf q}\approx cq_z$) can be used. Also, the requirement of synchronism of both fields, restricts the left-hand side sum with terms $q_z=\omega_0/c=q_0$. Then the explicit value for $b_{\bf q}$ follows from Eq. (\ref{31twnt5x}) \begin{equation}\label{32twnt5xx} b_{\bf q}=b\alpha_L\frac {r_0}{\sqrt{\hbar \omega_0}}\sqrt{\frac{L_z}S}e^{-q^2_{\bot}r_0^2/4}\delta_{q_z,q_0}, \end{equation}
which determines the boundary value of PDF: \begin{equation}\label{33twnt5} f({\bf r_\bot},z{=}0,{\bf q},t)=\delta_ {q_z,q_0}b^\dag(t) b(t)\frac {2{\alpha_L}^2}{\pi S\hbar\omega_0}e^{-q_{\bot}^2r_0^2/2-{2r_{\bot}^2}/{r_0^2}}. \end{equation}
The extension of Eq. (\ref{33twnt5}) for the case of a partially coherent beam is realized by substituting $\frac{q_\bot^2r_1 ^2}2$ for $\frac{q_\bot^2r_0^2}2$ \cite{Chu}. Here $r^2_1=r_0^2/(1+2r_0 ^2\lambda _c^{-2})$, and the quantity $\lambda _c$ describes the effect of the phase diffuser which is used for suppression of scintillations. The mentioned modification of the initial distribution expands the range of $q_\bot $ variation to the values of the order $\frac{\pi}r_1 $ and does not affect the spatial distribution in the ${\bf r}_\bot$-domain. The diffuser influence vanishes in the limit of $\lambda _c\rightarrow\infty$ because in this case ${ r}_1\rightarrow r_0$.
In the case of ${\bf r}_\bot=0$, the denominator in Eq. (\ref{22twnt1}) is given by \begin{equation}\label{34twnt6x}
\sum_{\bf q} f_0({\bf r},{\bf q},t)=\frac {\alpha_L^2r_1^2q_0\langle b^\dag b\rangle}{\pi^2\hbar c(4+\rho_0^2\rho_1^2)}, \end{equation} where the derivation of (\ref{34twnt6x}) was somewhat simplified by inserting $L_0^{-1}=0$ in Eq. (\ref{13twelwwe}), $\rho _{0,1}^2={r_{0,1}^2q_0}/z $.
\end{document}
|
arXiv
|
{
"id": "1712.04780.tex",
"language_detection_score": 0.7092225551605225,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Graphs with bounded tree-width and large odd-girth are almost bipartite} \author{Alexandr V. Kostochka\thanks{ Department of Mathematics, University of Illinois, Urbana, IL 61801 and Institute of Mathematics, Novosibirsk 630090, Russia. E-mail: \texttt{[email protected]}. This author's work was partially supported by NSF grant DMS-0650784 and by grant 09-01-00244-a of the Russian Foundation for Basic Research.} \and Daniel Kr{\'a}l'\thanks{ Institute for Theoretical Computer Science, Faculty of Mathematics and Physics, Charles University, Malostransk{\'e} n{\'a}m{\v e}st{\'\i} 25, 118 00 Prague, Czech Republic. E-mail: \texttt{[email protected]}. The Institute for Theoretical Computer Science (ITI) is supported by Ministry of Education of the Czech Republic as project 1M0545. This research has also been supported by the grant GACR 201/09/0197.} \and Jean-S{\'e}bastien Sereni\thanks{ CNRS (LIAFA, Universit\'e Denis Diderot), Paris, France, and Department of Applied Mathematics (KAM), Faculty of Mathematics and Physics, Charles University, Prague, Czech Republic. E-mail: \texttt{[email protected]}.} \and Michael Stiebitz\thanks{ Technische Universit\"at Ilmenau, Institute of Mathematics, P.O.B. 100 565, D-98684 Ilmenau, Germany. E-mail: \texttt{[email protected]}.}} \date{} \maketitle \begin{abstract} We prove that for every $k$ and every $\varepsilon>0$, there exists $g$ such that every graph with tree-width at most $k$ and odd-girth at least $g$ has circular chromatic number at most $2+\varepsilon$. \end{abstract}
\section{Introduction}
It has been a challenging problem to prove the existence of graphs of arbitrary high girth and chromatic number~\cite{Erd59}. On the other hand, graphs with large girth that avoid a fixed minor are known to have low chromatic number (in particular, this applies to graphs embedded on a fixed surface). More precisely, as Thomassen observed~\cite{Tho88}, a graph that avoids a fixed minor and has large girth is $2$-degenerate, and hence $3$-colorable. Further, Galluccio, Goddyn and Hell~\cite{bib-galluccio} proved the following theorem, which essentially states that graphs with large girth that avoid a fixed minor are almost bipartite. \begin{theorem}[Galluccio, Goddyn and Hell, 2001] \label{thm-ggh} For every graph $H$ and every $\varepsilon>0$, there exists an integer $g$ such that the circular chromatic number of every $H$-minor free graph of girth at least $g$ is at most $2+\varepsilon$. \end{theorem} A natural way to weaken the girth-condition is to require the graphs to have high odd-girth (the \emph{odd-girth} is the length of a shortest odd cycle). However, Young~\cite{You96} constructed $4$-chromatic projective graphs with arbitrary high odd-girth. Thus, the high odd-girth requirement is not sufficient to ensure $3$-colorability, even for graphs embedded on a fixed surface. Klostermeyer and Zhang~\cite{KlZh00}, though, proved that the circular chromatic number of every planar graph of sufficiently high odd-girth is arbitrarily close to $2$. In particular, the same is true for $K_4$-minor free graphs, i.e. graphs with tree-width at most $2$. We prove that the conclusion is still true for any class of graphs of bounded tree-width, which answers a question of Pan and Zhu~\cite[Question 6.5]{bib-pan} also appearing as Question 8.12 in the survey by Zhu~\cite{bib-zhu01}.
\begin{theorem}\label{thm-main} For every $k$ and every $\varepsilon>0$, there exists $g$ such that every graph with tree-width at most $k$ and odd-girth at least $g$ has circular chromatic number at most $2+\varepsilon$. \end{theorem}
Motivated by tree-width duality, Ne{\v s}et{\v r}il and Zhu~\cite{bib-nesetril} proved the following theorem. \begin{theorem}[Ne\v set\v ril and Zhu, 1996]\label{thm-twd} For every $k$ and every $\varepsilon>0$, there exists $g$ such that every graph $G$ with tree-width at most $k$ and homomorphic to a graph $H$ with girth at least $g$ has circular chromatic number at most $2+\varepsilon$. \end{theorem}
To see that Theorem~\ref{thm-main} implies Theorem~\ref{thm-twd}, observe that if $G$ has an odd cycle of length $g$, then $H$ has an odd cycle of length at most $g$.
\section{Notation}
A \emph{$(p,q)$-coloring} of a graph is a coloring of the vertices with colors from the set $\{0,\ldots,p-1\}$ such that the colors of any two adjacent vertices $u$ and $v$ satisfy $q\le |c(u)-c(v)|\le p-q$. The \emph{circular chromatic number $\chi_c(G)$} of a graph $G$ is the infimum (and it can be shown to be the minimum) of the ratios $p/q$ such that $G$ has a $(p,q)$-coloring. For every finite graph $G$, it holds that $\chi(G)=\lceil\chi_c(G)\rceil$ and there is $(p,q)$-coloring of $G$ for every $p$ and $q$ with $p/q\ge\chi_c(G)$. In particular, the circular chromatic number of $G$ is at most $2+1/k$ if and only if $G$ is homomorphic to a cycle of length $2k+1$. The reader is referred to the surveys by Zhu~\cite{bib-zhu01,bib-zhu06} for more information about circular colorings.
A \emph{$p$-precoloring} is a coloring $\varphi$ of a subset $A$ of vertices of a graph $G$ with colors from $\{0,\ldots,p-1\}$, and its \emph{extension} is a coloring of the whole graph $G$ that coincides with $\varphi$ on $A$. The following lemma can be seen as a corollary of a theorem of Albertson and West~\cite[Theorem 1]{AlWe06}, and it is the only tool we use from this area.
\begin{lemma} \label{lm-extend} For every $p$ and $q$ with $2<p/q$, there exists $d$ such that any $p$-precoloring of vertices with mutual distances at least $d$ of a bipartite graph $H$ extends to a $(p,q)$-coloring of $H$. \end{lemma}
A \emph{$k$-tree} is a graph obtained from a complete graph of order $k+1$ by adding vertices of degree $k$ whose neighborhood is a clique. The \emph{tree-width} of a graph $G$ is the smallest $k$ such that $G$ is a subgraph of a $k$-tree. Graphs with tree-width at most $k$ are also called \emph{partial $k$-trees}.
A \emph{rooted partial $k$-tree} is a partial $k$-tree $G$ with $k+1$ distinguished vertices $v_1,\ldots,v_{k+1}$ such that there exists a $k$-tree $G'$ that is a supergraph of $G$ and the vertices $v_1,\ldots,v_{k+1}$ form a clique in $G'$. We also say that the partial $k$-tree is \emph{rooted} at $v_1,\ldots,v_{k+1}$. If $G$ is a partial $k$-tree rooted at $v_1,\ldots,v_{k+1}$ and $G'$ is a partial $k$-tree rooted at $v'_1,\ldots,v'_{k+1}$, then the graph $G\oplus G'$ obtained by identifying $v_i$ and $v'_i$ is again a rooted partial $k$-tree (identify the cliques in the corresponding $k$-trees).
Fix $p$ and $q$. If $G$ is a rooted partial $k$-tree, then $\F(G)$ is the set of all $p$-precolorings of the $k+1$ distinguished vertices of $G$ that can be extended to a $(p,q)$-coloring of $G$.
The next lemma is a standard application of results in the area of graphs of bounded tree-width~\cite{RoSe86}.
\begin{lemma} \label{lm-small} Let $k$ and $N$ be positive integers such that $N\ge k+1$. If $G$ is a partial $k$-tree with at least $3N$ vertices, then there exist partial rooted $k$-trees $G_1$ and $G_2$ such that $G$ is isomorphic to $G_1\oplus G_2$ and $G_1$ has at least $N+1$ and at most $2N$ vertices. \end{lemma}
If $G$ is a partial $k$-tree rooted at $v_1,\ldots,v_{k+1}$, then its \emph{type} is a $(k+1)\times (k+1)$ matrix $M$ such that $M_{ij}$ is the length of the shortest path between the vertices $v_i$ and $v_j$. If there is no such path, $M_{ij}$ is equal to $\infty$. Any matrix $M$ that is a type of a partial rooted $k$-tree satisfies the triangle inequality (setting $\infty+x=\infty$ for any $x$). A symmetric matrix $M$ whose entries are non-negative integers and $\infty$ (and zeroes only on the main diagonal) that satisfies the triangle inequality is a \emph{type}. A type is \emph{bipartite} if $M_{ij}+M_{jk}+M_{ik}\equiv0\;\mod\; 2$ for any three finite entries $M_{ij}$, $M_{jk}$ and $M_{ik}$. Two bipartite types $M$ and $M'$ are \emph{compatible} if $M_{ij}$ and $M'_{ij}$ have the same parity whenever both of them are finite. We define a binary relation on bipartite types as follows: $M\lm M'$ if and only if $M$ and $M'$ are compatible and $M_{ij}\leq M'_{ij}$ for every $i$ and $j$. Note that the relation $\lm$ is a partial order.
We finish this section with the following lemma. Its straightforward proof is included to help us in familiarizing with the just introduced notation.
\begin{lemma} \label{lm-type-glue} Let $G^1$ and $G^2$ be two bipartite rooted partial $k$-trees with types $M^1$ and $M^2$ such that there exists a bipartite type $M^0$ with $M^0\lm M^1$ and $M^0\lm M^2$. Then the types $M^1$ and $M^2$ are compatible, $G^1\oplus G^2$ is a bipartite rooted partial $k$-tree and its type $M$ satisfies $M^0\lm M$. \end{lemma}
\begin{proof} The types $M^1$ and $M^2$ are compatible: if both $M^1_{ij}$ and $M_{ij}^{2}$ are finite, then $M_{ij}^{0}$ is finite and has the same parity as $M^1_{ij}$ and $M_{ij}^{2}$. Hence, the entries $M^1_{ij}$ and $M^2_{ij}$ have the same parity.
Let $M$ be the type of $G^1\oplus G^2$. Note that it does not hold in general that $M_{ij}=\min\{M^1_{ij},M^2_{ij}\}$. We show that $M^0\lm M$ which will also imply that $G^1\oplus G^2$ is bipartite since $M^0$ is a bipartite type. Consider a shortest path $P$ between two distinguished vertices $v_i$ and $v_{i'}$ and split $P$ into paths $P_1,\ldots,P_\ell$ delimited by distinguished vertices on $P$. Note that $\ell\le k$ since $P$ is a path. Let $j_0=i$ and let $j_i$ be the index of the end-vertex of $P_i$ for $i\in\{1,\ldots,\ell\}$. In particular, $j_\ell=i'$. Each of the paths $P_1,\ldots,P_\ell$ is fully contained in $G^1$ or in $G^2$ (possibly in both if it is a single edge). Since $M^0\lm M^1$ and $M^0\lm M^2$, the length of $P_i$ is at least $M^0_{j_{i-1}j_i}$, and it has the same parity as $M^0_{j_{i-1}j_i}$. Since $M^0$ is a bipartite type (among others, it satisfies the triangle inequality), the length of $P$, which is $M_{ii'}$, has the same parity as $M^0_{j_0j_\ell}=M^0_{ii'}$ and is at least $M^0_{ii'}$. This implies that $M^0\lm M$. \end{proof}
\section{The Main Lemma}
In this section, we prove a lemma which forms the core of our argument. To this end, we first prove another lemma that asserts that for every $k$, $p$ and $q$, the set of types of all bipartite rooted partial $k$-trees forbidding a fixed set of $p$-precolorings from extending (and maybe some other precolorings, too) has always a maximal element. We formulate the lemma slightly differently to facilitate its application.
\begin{lemma} \label{lm-mainM} For every $k$, $p$ and $q$, there exists a finite number of (bipartite) types $M^1,\ldots,M^m$ such that for any bipartite rooted partial $k$-tree $G$ with type $M$, there exists a bipartite rooted partial $k$-tree $G'$ with type $M^i$ for some $i\in\{1,\ldots,m\}$ such that $\F(G')\subseteq\F(G)$ and $M\lm M^i$. \end{lemma}
\begin{proof} Let $d\ge 2$ be the constant from Lemma~\ref{lm-extend} applied for $p$ and $q$. Let $M^1,\ldots,M^m$ be all bipartite types with entries from the set $\{1,\ldots,D^{(k+1)^2}\}\cup\{\infty\}$ where $D=4d$. Thus, $m$ is finite and does not exceed $(D^{(k+1)^2}+1)^{k(k+1)/2}$.
Let $G$ be a bipartite rooted partial $k$-tree with type $M$. If $M$ is one of the types $M^1,\ldots,M^m$, then there is nothing to prove (just choose $i$ such that $M=M^i$). Otherwise, one of its entries is finite and exceeds $D^{(k+1)^2}$.
For $i\in\{1,\ldots,(k+1)^2\}$, let $J^i$ be the set of all positive integers between $D^{i-1}$ and $D^i-1$ (inclusively). Let $i_0$ be the smallest integer such that no entry of $M$ is contained in $J^{i_0}$. Since $M$ has at most $k(k+1)/2$ different entries, such an index $i_0$ exists. Note that if $i_0=1$, then Lemma~\ref{lm-extend} implies that $\F(G)$ contains all possible $p$-precolorings, and the sought graph $G'$ is the bipartite rooted partial $k$-tree composed of $k+1$ isolated vertices, with the all-$\infty$ type.
Two vertices $v_i$ and $v_j$ at which $G$ is rooted are \emph{close} if $M_{ij}$ is at most $D^{i_0-1}$. The relation $\approx$ of being close is an equivalence relation on $v_1,\ldots,v_{k+1}$. Indeed, it is reflexive and symmetric by the definition, and we show now that it is transitive. Suppose that $M_{ij}$ and $M_{jk}$ are both at most $D^{i_0-1}$. Then, the distance between $v_i$ and $v_k$ is at most $M_{ij}+M_{jk}\le2D^{i_0-1}-2\le D^{i_0}-1$ since $D\ge2$. Consequently, by the choice of $i_0$, the distance between $v_i$ and $v_k$ is at most $D^{i_0-1}-1$ and thus $v_i\approx v_k$.
Let $C_1,\ldots,C_{\ell}$ be the equivalence classes of the relation $\approx$. Note that $C_1,\ldots,C_{\ell}$ is a finer partition than that given by the equivalence relation of being connected.
Since $G$ is bipartite, we can partition its vertices into two color classes, say red and blue. For every $i\in\{1,\ldots,\ell\}$, contract the closed neighborhood of a vertex $v$ if $v$ is a blue vertex and its distance from any vertex of $C_i$ is at least $D^{i_0-1}$ and keep doing so as long as such a vertex exists. Observe that the resulting graph is uniquely defined. After discarding the components that do not contain the vertices of $C_i$, we obtain a bipartite partial $k$-tree $G_i$ rooted at the vertices of $C_i$: it is bipartite as we have always contracted closed neighborhoods of vertices of the same color (blue) to a single (red) vertex, and its tree-width is at most $k$ since the tree-width is preserved by contractions. Moreover, the distance between any two vertices of $C_i$ has not decreased since any path between them through any of the newly arising vertices has length at least $2D^{i_0-1}-2\ge D^{i_0-1}$.
Now, let $G'$ be the bipartite rooted partial $k$-tree obtained by taking the disjoint union of $G_1,\ldots,G_{\ell}$. The type $M'$ of $G'$ can be obtained from the type of $G$: set $M'_{ij}$ to be $M_{ij}$ if the vertices $v_i$ and $v_j$ are close, and $\infty$ otherwise. Thus, $M'$ is one of the types $M^1,\ldots,M^m$ and $M\lm M'$. It remains to show that $\F(G')\subseteq\F(G)$.
Let $c\in\F(G')$ be a $p$-precoloring that extends to $G'$, and recall that $D\ge4$. For $i\in\{1,\ldots,\ell\}$, let $A_i$ be the set of all red vertices at distance at most $D^{i_0-1}$ and all blue vertices at distance at most $D^{i_0-1}-1$ from $C_i$, and let $R_i$ be the set of all red vertices at distance $D^{i_0-1}-1$ or $D^{i_0-1}$ from $C_i$. Set $B_i=A_i\setminus R_i$ ($B_i$ is the ``interior'' of $A_i$ and $R_i$ its ``boundary''). The extension of $c$ to $G_i$ naturally defines a coloring of all vertices of $A_i$: $G_i$ is the subgraph of $G$ induced by $A_i$ with some red vertices of $R_i$ identified (two vertices of $R_i$ are identified if and only if they are in the same component of the graph $G-B_i$).
Let $H$ be the following auxiliary graph obtained from $G$: remove the vertices of $B=B_1\cup\cdots\cup B_{\ell}$ and, for $i\in\{1,\ldots,\ell\}$, identify every pair of vertices of $R_i$ that are in the same component of $G-B$. Let $R$ be the set of vertices of $H$ corresponding to some vertices of $R_1\cup\cdots\cup R_{\ell}$. Precolor the vertices of $R$ with the colors given by the colorings of $G_i$ (note that two vertices of $R_i$ in the same component of $G-B_i$ are also in the same component of $G-B$, so this is well-defined). The graph $H$ is bipartite as only red vertices have been identified. The distance between any two precolored vertices is at least $d$: consider two precolored vertices $r$ and $r'$ at distance at most $d-1$. Let $i$ and $i'$ be such that $r\in R_i$ and $r'\in R_{i'}$. If $i=i'$, then $r$ and $r'$ are in the same component of $G-B$ and thus $r=r'$. If $i\not=i'$ then by the definition of $R_i$ and $R_{i'}$, the vertex $r$ is in $G$ at distance at most $D^{i_0-1}$ from some vertex $v$ of $C_i$ and $r'$ is at distance at most $D^{i_0-1}$ from some vertex $v'$ of $C_{i'}$. So, the distance between $v$ and $v'$ is at most $2D^{i_0-1}+d<D^{i_0}-1$. Since $M$ has no entry from $J^{i_0}$, the vertices $v$ and $v'$ must be close and thus $i=i'$, a contradiction.
Since the distance between any two precolored vertices is at least $d$, the precoloring extends to $H$ by Lemma~\ref{lm-extend} and in a natural way it defines a coloring of $G$. We conclude that every $p$-precoloring that extends to $G'$ also extends to $G$ and thus $\F(G')\subseteq\F(G)$. \end{proof}
We now prove our main lemma, which basically states that there is only a finite number of bipartite rooted partial $k$-trees that can appear in a minimal non-$(p,q)$-colorable graph with tree-width $k$ and a given odd girth.
\begin{lemma} \label{lm-mainG} For every $k$, $p$ and $q$, there exist a finite number $m$ and bipartite rooted partial $k$-trees $G^1,\ldots,G^m$ with types $M^1,\ldots,M^m$ such that for any bipartite rooted partial $k$-tree $G$ with type $M$ there exists $i$ such that $\F(G^i)\subseteq \F(G)$ and $M\lm M^i$. \end{lemma}
\begin{proof} Let $M^1,\ldots,M^{m}$ be the types from Lemma~\ref{lm-mainM}. We define the graph $G^i$ as follows: for every $p$-precoloring $c$ that does not extend to a bipartite partial rooted $k$-tree with type $M^i$, fix any partial rooted $k$-tree $G^i_c$ with type $M^i$ such that $c$ does not extend to $G^i_c$. Set $G^i=\bigoplus_{c} G^i_c$, where $c$ runs over all such $p$-precolorings. If the above sum of partial $k$-trees is non-empty, then the type $M$ of $G^i$ is $M^i$. Indeed, $M\lm M^i$ by the definition of $G^i$, and Lemma~\ref{lm-type-glue} implies that $M^i\lm M$. If all the $p$-precolorings of the $k+1$ vertices in the root extend to each partial $k$-tree of type $M^i$, then let $G^i$ be the graph consisting of $k+1$ isolated vertices. This happens in particular for the all-$\infty$ type.
Let us verify the statement of the lemma. Let $G$ be a bipartite rooted partial $k$-tree and let $M$ be the type of $G$. If $\F(G)$ is composed of all $p$-precolorings, the sought graph $G^i$ is the one composed of $k+1$ isolated vertices. Hence, we assume that $\F(G)$ does not contain all $p$-precolorings, i.e., there are $p$-precolorings that do not extend to $G$. By Lemma~\ref{lm-mainM}, there exists a bipartite rooted partial $k$-tree $G'$ with type $M'$ such that $M\lm M'=M^i$ for some $i$ and $\F(G')\subseteq\F(G)$. For every $p$-precoloring $c$ that does not extend to $G'$ (and there exists at least one such $p$-precoloring $c$), some graph $G^i_c$ has been glued into $G^i$. Hence, $\F(G^i)\subseteq\F(G')\subseteq\F(G)$. Since the type of $G^i$ is $M^i$, the conclusion of the lemma follows. \end{proof}
\section{Proof of Theorem~\ref{thm-main}}
We are now ready to prove Theorem~\ref{thm-main}, which is recalled below.
\begin{thm2} For every $k$ and every $\varepsilon>0$, there exists $g$ such that every graph with tree-width at most $k$ and odd-girth at least $g$ has circular chromatic number at most $2+\varepsilon$. \end{thm2}
\begin{proof} Fix $p$ and $q$ such that $2<p/q\le 2+\varepsilon$. Let $G^1,\ldots,G^m$ be the bipartite partial $k$-trees from Lemma~\ref{lm-mainG} applied for $k$, $p$ and $q$. Set $N$ to be the largest order of the graphs $G^i$ and set $g$ to be $3N$. We assert that each partial $k$-tree with odd-girth $g$ has circular chromatic number at most $p/q$. Assume that this is not the case and let $G$ be a counterexample with the fewest vertices.
The graph $G$ has at least $3N$ vertices (otherwise, it has no odd cycles and thus it is bipartite). By Lemma~\ref{lm-small}, $G$ is isomorphic to $G_1\oplus G_2$, where $G_1$ and $G_2$ are rooted partial $k$-trees and the number of vertices of $G_1$ is between $N+1$ and $2N$. By the choice of $g$, the graph $G_1$ has no odd cycle and thus it is a bipartite rooted partial $k$-tree. By Lemma~\ref{lm-mainG}, there exists $i$ such that $\F(G^i)\subseteq\F(G_1)$ and $M_1\lm M^i$ where $M_1$ is the type of $G_1$ and $M^i$ is the type of $G^i$. Let $G'$ be the partial $k$-tree $G^i\oplus G_2$.
First, $G'$ has fewer vertices than $G$ since the number of vertices of $G^i$ is at most $N$ and the number of vertices of $G_1$ is at least $N+1$. Second, $G'$ has no $(p,q)$-coloring: if it had a $(p,q)$-coloring, then the corresponding $p$-precoloring of the $k+1$ vertices shared by $G^i$ and $G_2$ would extend to $G_1$ since $\F(G^i)\subseteq\F(G_1)$ and thus $G$ would have a $(p,q)$-coloring, too. Finally, $G'$ has no odd cycle of length at most $g$: if it had such a cycle, replace any path between vertices $v_j$ and $v_{j'}$ of the root of $G^i$ with a path of at most the same length between them in $G_1$ (recall that $M_1\lm M^i$). If such paths for different pairs of $v_j$ and $v_{j'}$ on the considered odd cycle intersect, take their symmetric difference. In this way, we obtain an Eulerian subgraph of $G=G_1\oplus G_2$ with an odd number of edges such that the number of its edges does not exceed $g$. Consequently, this Eulerian subgraph has an odd cycle of length at most $g$, which violates the assumption on the odd-girth of $G$. We conclude that $G'$ is a counterexample with less vertices than $G$, a contradiction. \end{proof}
We end by pointing out that the approach used yields an upper bound of $3(k+1)\cdot2^{2^{p^{k+1}}((4d)^{(k+1)^2}+1)^{k^2}}$ for the smallest $g$ such that all graphs with tree-width at most $k$ and odd-girth at least $g$ have circular chromatic number at most $p/q$, whenever $p/q>2$. More precisely, the value of $N$ cannot exceed $(k+1)\cdot2^{2^{p^{k+1}}((4d)^{(k+1)^2}+1)^{k^2}}$. To see this, we consider all pairs $P=(C,M)$ where $C$ is a set of $p$-precolorings of the root and $M$ is a type such that there is a bipartite rooted partial $k$-tree of type $M$ to which no coloring of $C$ extends. Let $n_P$ be the size of a smallest such partial $k$-tree. We obtain a sequence of at most $2^{p^{k+1}}\times\left((4d)^{(k+1)^{2}}+1\right)^{k^2}$ integers. The announced bound follows from the following fact: if the sequence is sorted in increasing order, then each term is at most twice the previous one.
Indeed, consider the tree-decomposition of the partial $k$-tree $G_P$ chosen for the pair $P$. If the bag containing the root has a single child, then we delete a vertex of the root, and set a vertex in the single child to be part of the root. We obtain a partial $k$-tree to which some $p$-precolorings of $C$ do not extend. Thus, $n_P\le1+n_{P'}$ for some pair $P'$ and $n_{P'}<n_P$. If the bag containing the root has more than one child, then $G_P$ can be obtained by identifying the roots of two smaller partial $k$-trees $G$ and $G'$. By the minimality of $G_P$, the orders of $G$ and $G'$ are $n_{P_1}$ and $n_{P_2}$ for two pairs $P_1$ and $P_2$ such that $n_{P_i}<n_P$ for $i\in\{1,2\}$. This yields the stated fact, which in turn implies the given bound, since the smallest element of the sequence is $k+1$.
\noindent \textbf{Acknowledgment.} This work was done while the first three authors were visiting the fourth at Technische Universit\"at Ilmenau. They thank their host for providing a perfect working environment.
\end{document}
|
arXiv
|
{
"id": "0904.2282.tex",
"language_detection_score": 0.8698550462722778,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Potentiality States: Quantum versus Classical Emergence} \author{\normalsize Diederik Aerts and Bart D'Hooghe \\
\small\itshape
Center Leo Apostel for Interdisciplinary Studies \\
\small\itshape
Departments of Mathematics and Department of Psychology \\
\small\itshape
Brussels Free University, Brussels, Belgium \\
\small
Emails: \url{[email protected], [email protected]}
\\
} \date{} \maketitle
\begin{abstract} \noindent We identify emergence with the existence of states of potentiality related to relevant physical quantities. We introduce the concept of `potentiality state' operationally and show how it reduces to `superposition state' when standard quantum mechanics can be applied. We consider several examples to illustrate our approach, and define the potentiality states giving rise to emergence in each example. We prove that Bell inequalities are violated by the potentiality states in the examples, which, taking into account Pitowsky's theorem, experimentally indicates the presence of quantum structure in emergence. In the first example emergence arises because of the many ways water can be subdivided into different vessels. In the second example, we put forward a full quantum description of the Liar paradox situation, and identify the potentiality states, which in this case turn out to be superposition states. In the example of the soccer team, we show the difference between classical emergence as stable dynamical pattern and emergence defined by a potentiality state, and show how Bell inequalities can be violated in the case of highly contextual experiments. \end{abstract}
\section{Introduction} Many everyday life examples of emergence can be given. Let us consider for instance a set of soccer players. As long as it is just a set of soccer players no emergence takes place. Suppose however that the set of players starts to practice with the aim of forming a soccer team. The co-adaptation that takes place between the different soccer players during their trainings and matches results in the emergence of a soccer team. The soccer team is a new structure that has been formed out of the set of individual soccer players.
In physics emergent phenomena have been studied within the complexity and chaos approach. An example is given by the B\'{e}nard convection effect. This effect occurs when a viscous fluid is heated between two planes. In the pre-boiling stage and under suitable conditions, bubbles begin to rise and vortices --- called B\'{e}nard cells --- arise like little cylinders within which the fluid continuously streams up on the outside of the cylinder and back down through the middle of the cylinder. This motion occurs as the fluid heats up and before it starts to boil. The B\'{e}nard cells fit together in a hexagonal lattice of vortices, even without partitions to keep the boundaries between the vortices stable. The microscopical movements of the individual molecules of the fluid result in a macroscopical dynamical pattern of the fluid as a whole with the emergent B\'{e}nard cell structure.
The existing complexity and chaos models are classical physics models. In this paper we want to show that a classical physics approach has to be generalized in order to describe emergence in a complete way. The reason is that the emergent structure usually contains states that have a relation of `potential' with respect to relevant observable quantities. We will call such states `potentiality states'. These emergent potentiality states cannot be described in a classical physics approach but need a quantum-like formalism. We know that the appearance of potentiality states is a basic aspect of quantum mechanics. Therefore, in this paper we will demonstrate not only the importance of potentiality states for emergence, but also the advantage of a quantum-like description of emergence versus a classical one. In the examples we will indicate in which way the quantum-like formalism appears in the description. The relevance of quantum aspects in emergence confirms earlier results in which quantum mechanical aspects in the macroscopic world are identified in a more general way \cite{aerts82,AeBroGab2000}.
In physics, the state $q(t)$ of a physical entity $S$ at time $t$ represents the reality of this physical entity at that time $t$.\footnote{In physics `statistical states' are sometimes also just called states. We however will always refer with the concept state to what is called a `pure state'.} In the case of classical physics such a state is represented by a point in phase space, while for quantum physics it is represented by a unit vector in Hilbert space. In classical physics the state $q(t)$ of a physical entity $S$ determines the values for all observable quantities at this time $ t$. Hence it is common in classical physics to characterize the state $q(t)$ by the set of all values of the relevant observable quantities. For example, if the physical entity is a particle $S(classical \; particle)$, then the relevant observable quantities are its position $r(t)$ and momentum $p(t)$ at time $t$. And indeed, for each state $q(t)$ of $S(classical \; particle)$, it is the case that $r(t)$ and $p(t)$ have definite values, which makes it possible to represent $q(t)$ with the couple $(r(t),p(t))$ in phase space. In quantum physics the situation is different. The state $q(t)$ of a quantum particle $S(quantum \; particle)$ is represented by a unit vector $\psi (r,t)$ (the normalized wave function) in a Hilbert space $L_2(\mathbb{R}^3)$. Again, the relevant observable quantities are the position and the momentum. However, neither has definite values for the entity $S(quantum \; particle)$ being in a state $\psi (r,t)$. To be more specific, definite values of position exist only if the wave function is a delta function, and definite values of momentum if the wave function is a plane wave.\footnote{If one chooses as the basis of the Hilbert space an orthonormal set of eigenstates corresponding with the position operator, i.e. the state is expressed as a (wave) function $\Psi(x_1,\ldots,x_n;t)$ of variables given by the coordinates $x_i$.} Apart from the fact that in both cases the wave function is not an element of the Hilbert space $L_2(\mathbb{R}^3)$, and hence should be considered as limiting cases, they never can occur together. This means that a quantum particle never has simultaneously a definite position and a definite momentum, which is referred to as `quantum indeterminism'. It can also be expressed as follows: for a quantum particle in state $\psi (r,t)$ the values of position and momentum are potential. This means that the quantum particle has the potential of realizing them, but they are not actually realized in the state $\psi (r,t)$.
Previously it has been shown that whether a specific physical entity needs a quantum-like description --- and hence should be called a quantum entity --- or a classical description, depends on the nature of the entity and the relevant observable quantities, and not on the fact that it belongs to the microworld or to the macroworld \cite{aerts82,AeBroGab2000}.
Because of their importance for emergence we will concentrate on the presence of potentiality states and how without difficulties many situations can be found in the macroscopic world containing this quantum aspect. However, they can not be described by classical physical theories which identify emergent properties of a specific entity with stable dynamical patterns of the entity. The concept of potentiality state makes it possible to describe another type of emergence which is present on an ontological level and not on the dynamic level as in the case of classical emergence.
In standard quantum mechanics a potentiality state related to a specific observable quantity is a superposition state of the eigenstates of this observable quantity. Hence, our potentiality states are superposition states for a situation where standard quantum physics applies. To demonstrate the more general applicability of the concept of potentiality state, we will consider situations that have both quantum and classical aspects and hence are not purely quantum. However for such situations there does not necessarily exist a Hilbert space describing the set of states, as is the case in pure quantum situations, meaning that superposition is not well defined. That is the reason that we use the name potentiality states instead of superposition states for the states that we consider. In the second example of this paper (see section 3), the liar paradox, we will see that the description is fully quantum, such that in this case the potentiality states reduce to superposition states. In the last section we give the example of a team of soccer players to illustrate the difference between emergence due to potentiality states, which is on an ontological level, and classical emergence, defined by dynamical patterns.
\section{Bell Inequalities and Non-classical Emergence}
In this section we consider several examples and analyze in which way the concept of potentiality state appears. We also investigate in which way the presence of potentiality states is linked to the violation of Bell inequalities.
\subsection{Potentiality States in Connected Vessels of Water}
Suppose that we consider two vessels of which one contains 6 liters of water and the other one 14 liters of water. We have at our disposal a third vessel that is empty, but can contain more than 20 liters of water. The entity $ S(water)$ is the water, and we consider the physical quantity which is the volume of the water. Clearly this physical quantity has a definite value for the water that is contained in the two considered vessels, namely 6 liters and 14 liters. This means that the states of water that we consider in both vessels are not potentiality states related to the observable quantity which is the volume.
Suppose now that we take the two vessels and empty them in the third vessel. This third vessel will then contain 20 liters of water, which means that also for this new entity of water the physical quantity volume has a definite value. But by putting the water of the two vessels together, the new entity of water has lost the old properties of 6 liters and 14 liters as actual properties. We could however divide the water again and collect 6 liters in one vessel and 14 liters in the other vessel. This means that potentially the division in 6 liters and 14 liters is still present in the entity of 20 liters of water. But this new state of 20 liters of water contains many more possible subdivisions as potentialities. Also 8 liters and 12 liters, or 11 liters and 9 liters, or 2 liters and 18 liters,$\ldots$ are all potentialities of subdivisions. In general we can say that $x$ liters and $y$ liters of water, such that $x+y=20$ form an infinite continuous set of potential subdivisions. Therefore, from the third vessel, being in the state such that it contains 20 liters, the two subentities can be derived by dividing the 20 liters in the appropriate amounts. The state of 20 liters of water is a potentiality state related to measurements that divide the amount of water in two amounts. Taking into account the measurement that subdivides an amount of water in two, the 20 liters of water, that originated by putting together the original 6 liters and 14 liters has new emergent properties. These properties are described by the potentiality state, that allows the water to be subdivided in all these ways.
If we connect the two original vessels by a tube, we get such a third vessel (see Figure~\ref{mqg01}). We can show that the new properties that this entity has are emergent properties related to measurements that divide the water up again. A criterion that we can use to show the quantum nature of these emergent properties is the violation of Bell inequalities. \begin{figure}
\caption{The vessels of water example violating Bell inequalities. The entity $S$ consists of two vessels containing 20 liters of water that are connected by a tube.}
\label{mqg01}
\end{figure} We will recall in the next section Bell inequalities and then elaborate our connected vessels of water example with the necessary detail such that we can show that Bell inequalities are violated. The vessels of water example was introduced in \cite{aerts82} and elaborated in \cite{aerts85a,aerts85b,AeBroGab2000,aerts92}.
\subsection{Bell Inequalities and the Presence of Quantum Structure}
Violation of Bell inequalities related to the presence of potentiality states can happen as well in the macroworld as in the microworld, depending on the type of states and observable quantities that are considered. Let us first recall some of the most relevant historical results related to Bell inequalities, and show why the violation of Bell inequalities is an experimental indication for the presence of quantum aspects.
In the seventies, a sequence of experiments was carried out to test for the presence of nonlocality in the microworld described by quantum mechanics \cite{clauser76,kas70}, culminating in decisive experiments by Aspect and his team in Paris \cite{aspect81,aspect82}. They were inspired by three important theoretical results: the EPR Paradox \cite{epr35}, Bohm's thought experiment \cite{bohm51}, and Bell's theorem \cite{bell64}. Einstein, Podolsky and Rosen believed to have shown that quantum mechanics is incomplete, in that there exist elements of reality that cannot be described by it \cite{epr35,aerts84,aerts2000}. Bohm took their insight further with a simple example: the `coupled spin-${\frac 12}$ entity' consisting of two particles with spin-${\frac 12}$, of which the spins are coupled such that the quantum spin vector is a non-product vector representing a singlet spin state \cite{bohm51}. Bohm's example inspired Bell to formulate a condition that would test experimentally for incompleteness, the Bell inequalities \cite{bell64}. Bell's theorem states that statistical results of experiments performed on a certain physical entity satisfy his inequalities if and only if the reality in which this physical entity is embedded is local. Experiments performed to test for the presence of nonlocality confirmed the results as predicted by quantum mechanics, such that it is now commonly accepted that the micro-physical world is incompatible with local realism.
Bell inequalities are defined with the following experimental situation in mind. We consider a physical entity $S$, and four experiments $e_1$, $e_2$, $ e_3$, and $e_4$ that can be performed on the physical entity $S$. Each of the experiments $e_i,i\in \{1,2,3,4\}$ has two possible outcomes, respectively denoted $o_i(up)$ and $o_i(down)$. Some of the experiments can be performed together, which in principle leads to `coincidence' experiments $e_{ij},i,j\in \{1,2,3,4\}$. For example $e_i$ and $e_j$ together will be denoted $e_{ij}$. Such a coincidence experiment $e_{ij}$ has four possible outcomes, namely $(o_i(up),o_j(up))$, $(o_i(up),o_j(down))$, $ (o_i(down),o_j(up))$ and $(o_i(down),o_j(down))$. Following Bell \cite{bell64}, we define the expectation values $\hbox{$\mathbb{E}$}_{ij},i,j\in \{1,2,3,4\}$ for these coincidence experiments, as \begin{equation} \begin{array}{ll} \hbox{$\mathbb{E}$}_{ij}= & \left( +1\right) P(o_i(up),o_j(up))+\left( +1\right) P(o_i(down),o_j(down))+ \\ & \left( -1\right) P(o_i(up),o_j(down))+\left( -1\right) P(o_i(down),o_j(up)) \end{array} \end{equation} From the assumption that the outcomes are either $+1$ or $-1$, and that the correlation $\hbox{$\mathbb{E}$}_{ij}$ can be written as an integral over some hidden variable of a product of the two local outcome assignments, one derives Bell inequalities:
\begin{equation}
|\hbox{$\mathbb{E}$}_{13}-\hbox{$\mathbb{E}$}_{14}|+|\hbox{$\mathbb{E}$}_{23}+\hbox{$\mathbb{E}$}
_{24}|\leq 2 \label{bellineq} \end{equation} To come to the point where we can use the violation of Bell inequalities as an experimental indication for the presence of quantum structure, we have to mention the work of Itamar Pitowsky. Pitowsky proved that if Bell inequalities are satisfied for a set of probabilities connected to the outcomes of the considered experiments, there exists a classical Kolmogorovian probability model. In such model the probability can be explained as due to a lack of knowledge about the precise state of the system. If however Bell inequalities are violated, Pitowsky proved that no such classical Kolmogorovian probability model exists \cite{pit89}. Hence violation of Bell inequalities shows that the probabilities that are involved are nonclassical. The only type of nonclassical probabilities that are well known in nature are the quantum probabilities. The probability structure that is present in our examples\footnote{ Except for the liar paradox example, where we derive a pure quantum description and hence the probability model is quantum.} is nonclassical and nonquantum, the classical and quantum probabilities being two special cases of this more general situation.
\subsection{Violation of Bell Inequalities for the Connected Vessels of Water Entity}
Let us consider again the entity $S(connected \; vessels)$ of two vessels connected by a tube and containing 20 liters of transparent water (see Figure~\ref{mqg01}). The entity is in an emergent potentiality state $s$ such that the vessel is placed in the gravitational field of the earth, with its bottom horizontal. To be able to check for the violation of Bell inequalities caused by this potentiality state we have to introduce four measurements, of which some can be performed together.
Let us introduce the experiment $e_1$ that consists of putting a siphon $K_1$ in the vessel of water at the left, taking out water using the siphon, and collecting this water in a reference vessel $R_1$ placed to the left of the vessel. If we collect more than 10 liters of water, we call the outcome $ o_1(up)$, and if we collect less or equal to 10 liters, we call the outcome $ o_1(down)$. We introduce another experiment $e_2$ that consists of taking with a little spoon, from the left, a bit of the water, and determining whether it is transparent. We call the outcome $o_2(up)$ when the water is transparent and the outcome $o_2(down)$ when it is not. We introduce the experiment $e_3$ that consists of putting a siphon $K_3$ in the vessel of water at the right, taking out water using the siphon, and collecting this water in a reference vessel $R_3$ to the right of the vessel. If we collect more or equal to 10 liters of water, we call the outcome $o_3(up)$, and if we collect less than 10 liters, we call the outcome $o_3(down)$. We also introduce the experiment $e_4$ which is analogous to experiment $e_2$, except that we perform it to the right of the vessel (see Figure~\ref{mqg01}).
The experiment $e_1$ can be performed together with experiments $e_3$ and $ e_4$, and we denote the coincidence experiments $e_{13}$ and $e_{14}$. Also, experiment $e_2$ can be performed together with experiments $e_3$ and $e_4$, and we denote the coincidence experiments $e_{23}$ and $e_{24}$. For the vessel in state $s$, the coincidence experiment $e_{13}$ always gives one of the outcomes $(o_1(up),o_3(down))$ or $(o_1(down),o_3(up))$, since more than 10 liters of water can never come out of the vessel at both sides. This shows that $\hbox{$\mathbb{E}$}_{13}=-1$. The coincidence experiment $e_{14}$ always gives the outcome $(o_1(up),o_4(up))$ which shows that $\hbox{$\mathbb{E}$} _{14}=+1$, and the coincidence experiment $e_{23}$ always gives the outcome $ (o_2(up),o_3(up))$ which shows that $\hbox{$\mathbb{E}$}_{23}=+1$. Clearly experiment $e_{24}$ always gives the outcome $(o_2(up),o_4(up))$ which shows that $\hbox{$\mathbb{E}$}_{24}=+1$. Let us now calculate the terms of Bell inequalities, \begin{equation} \begin{array}{ll}
|\hbox{$\mathbb{E}$}_{13}-\hbox{$\mathbb{E}$}_{14}|+|\hbox{$\mathbb{E}$}_{23}+\hbox{$\mathbb{E}$}
_{24}| & =|-1-1|+|+1+1| \\ & =+2+2 \\ & =+4 \end{array} \end{equation} This shows that Bell inequalities are violated. The state $s$, which is a potentiality state related to the measurements that divide up the 20 liters of water in the two reference vessels, is at the origin of this violation.
\section{States of Potentiality and the Liar Paradox}
\subsection{The Cognitive Entity of the Liar Paradox}
Another example of an entity for which a description with potentiality states is useful, is given by the double liar paradox. In fact, it will turn out that the double liar paradox entity $S(double \; liar)$ can be given a pure quantum mechanical description. As we shall show below, the potentiality state of this entity will be given by a superposition state of the states of the sub-entities which compose the double liar entity. Before discussing the double liar paradox, let us first consider some non-paradoxical situations: \begin{itemize} \item[(S 0.1)] The sum of two and two is four. \item[(S 0.2)] This page contains 1764 letters. \item[(S 0.3)] The square root of 1764 is 42. \item[(S 0.4)] 6 times 9 yields 42. \end{itemize}
\noindent Obviously, the truth-value of each sentence can be easily determined. The fact that $2+2=4$ can also be expressed by the set of following two sentences: \begin{itemize} \item[(S 1.1)] Sentence (S 1.2) is true. \item[(S 1.2)] The sum of two and two is four. \end{itemize}
\noindent The truth behavior of the entity, consisting of the set of sentences (S 1.1) and (S 1.2), can also be determined rather easily. The truth values of the sentences are coupled, but we do not encounter any paradoxical situations. This happens if both sentences refer to the truth value of each other. In such case, paradoxical situations arise. Let us now consider the double liar paradox which can be presented in the following way:
\begin{center}
\textsl{`Double Liar'}
(1) \ \ \ sentence (2) is false
(2) \ \ \ sentence (1) is true \end{center}
\noindent Let us describe a typical cognitive interaction that one goes through with these liar paradox sentences. Suppose we hypothesize that sentence (1) is true. We go to sentence (1) and read what is written there. It is written `sentence (2) is false'. From our hypothesis we can infer that sentence (2) is false. Let us go, using this knowledge about the status of sentence (2), to sentence (2). There is written `sentence (1) is true'. From the knowledge that sentence (2) is false, we can infer that sentence (1) is false. This means that from the hypothesis that sentence (1) is true we derive that sentence (1) is false, which is a contradiction. Similarly, starting from the hypothesis that sentence (1) is false one also obtains a contradiction.
In the examples presented in the previous section the presence of potentiality states gives rise to the violation of Bell inequalities, which implies that there is no classical, i.e., Kolmogorovian, representation possible \cite{pit89}. Therefore in general, potentiality states are nonclassical states. In \cite{AeBroSme99,AeBroSme2000} the entity which is the liar sentence is studied in a similar perspective. In the description of the liar entity we interpret the interaction that a person has with this entity, as described in some detail in the forgoing sections, as a measurement. It is with the introduction of the concept of entity and of measurement (or observable quantity) in this operational way, that we can find out what the nature is of this liar entity. It turns out that the liar entity can be described using the formalism of standard quantum mechanics in a complex Hilbert space. Moreover, the self-referential circularity --- more precisely, the truth-value dynamics --- of the liar paradox can be described by the Schr\"{o}dinger equation.
\subsection{Quantum Representation for the Potentiality State of the Liar Paradox} A potentiality state is necessary to represent the state of the entity defined by the liar paradox since its state is not an eigenstate of `truth' (i.e., the sentence(s) are true) neither an eigenstate of `falsehood' (i.e., the sentences are not true). Instead, the state of the liar entity can be regarded as a potentiality state related to the measurements introduced by the persons that interact cognitively with the liar entity. Since we find a full quantum description in this case, it follows that the potentiality state is a superposition state of both eigenstates, just as the singlet state in the case of two correlated spin-$\frac 12$ entities.
Let us discuss the Double Liar sentences in more detail by considering following three situations: \[ \mathrm{A}\ \ \left\{ \begin{array}{ll} \mathrm{(1)\ } & \mathrm{sentence\ (2)\ is\ false} \\ \mathrm{(2)\ } & \mathrm{sentence\ (1)\ is\ true} \end{array} \right. \]
\[ \mathrm{B}\ \ \left\{ \begin{array}{ll} \mathrm{(1)\ } & \mathrm{sentence\ (2)\ is\ true} \\ \mathrm{(2)\ } & \mathrm{sentence\ (1)\ is\ true} \end{array} \right. \]
\[ \mathrm{C}\ \ \left\{ \begin{array}{ll} \mathrm{(1)\ } & \mathrm{sentence\ (2)\ is\ false} \\ \mathrm{(2)\ } & \mathrm{sentence\ (1)\ is\ false} \end{array} \right. \] Since truth value of a sentence is binary: either it is true or it is false, we can associate the outcomes of a dichotomic observable (e.g., the outcomes for a spin-$\frac 12$ outcome are either `spin up' or `spin down') with truth and falsehood. Let us make the convention that the `spin up' state $ \left( \begin{array}{c} 1 \\ 0 \end{array} \right) $ corresponds with truth of a sentence and `spin down' state $\left( \begin{array}{c} 0 \\ 1 \end{array} \right) $ with the falsehood of the respective sentence. It turns out that then for the paradoxes of type B and C the sentences can be represented by coupled $\Bbb{C}^2$ vectors. Indeed, since for each measurement the truth values of the two sentences are coupled (B1 true implies B2 true and B1 false implies B2 false, C1 true implies C2 false and vice versa), the dimensionality of {$\Bbb{C}$}$^2\otimes $\thinspace {$\Bbb{C}$}$^2$ is sufficient to represent these entities. Formally, the corresponding quantum mechanical representation would be by a `singlet state' and `triplet state' respectively. In the singlet state the two spin-1/2 particles are anti-alined and in an anti-symmetrical state (entity C), while in a triplet state the two spin-1/2 particles are alined and in a symmetrical state (entity B). In the specific case of (C), and taking into account the anti-symmetric spin analog, the state of the entity $\Psi $ is given by: \[ \frac 1{\sqrt{2}}\left\{ \left( \begin{array}{c} 1 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 1 \end{array} \right) -\left( \begin{array}{c} 0 \\ 1 \end{array} \right) \otimes \left( \begin{array}{c} 1 \\ 0 \end{array} \right) \right\} \] Equivalently, the liar entity can be represented by other linear combinations of $\left( \begin{array}{c} 1 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 1 \end{array} \right) $ and $\left( \begin{array}{c} 0 \\ 1 \end{array} \right) \otimes \left( \begin{array}{c} 1 \\ 0 \end{array} \right) ,$ provided the coefficients have equal amplitude and the squared amplitudes add to one (such that total probability of finding the entity in one of the two possible states after a measurement equals one). In a similar manner the liar paradox in the `symmetrical' case~(B) can be represented by the triplet state: \[ \frac 1{\sqrt{2}}\left\{ \left( \begin{array}{c} 1 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 1 \\ 0 \end{array} \right) +\left( \begin{array}{c} 0 \\ 1 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 1 \end{array} \right) \right\} \]
\noindent The projection operators which make sentence 1, respectively sentence 2, true are: \[ P_{1,true}=\left( \begin{array}{cc} 1 & 0 \\ 0 & 0 \end{array} \right) \otimes \mathbb{1}_2\ \ \ \ \ \ P_{2,true}=\mathbb{1}_1\otimes \left( \begin{array}{cc} 1 & 0 \\ 0 & 0 \end{array} \right) \]
\noindent The projection operators that make the sentences false are obtained by switching the elements $1$ and $0$ on the diagonal of the matrix. These four projection operators represent all possible `logical' interactions (measurement interactions) between the cognitive observer and the liar entity. During the measurement process carried out on the entities (B) and (C), the observer attributes truth-value to the sentences in a repetitive manner: in case of entity (B) it is a repetition of true-states (resp.~false-states) depending on whether an initial true (resp.~false) state was presupposed. In the case of entity (C) it will be an alternation between true-states and false states, no matter which state was presupposed.
Finally, let us indicate the main points necessary in the derivation of a full description of the original double liar paradox, i.e., case (A), and see what pattern of truth assignments follows during the measurement (we refer to \cite{AeBroSme99} for a more detailed discussion of the derivation of the results). While for cases (B) and (C) the dimensionality of the coupled Hilbert space {$\Bbb{C}$}$^2\otimes $\thinspace {$\Bbb{C}$}$^2$ is sufficient, a space of higher dimension has to be used for case (A). This is due to the fact that no initial state can be found in the restricted space {$ \Bbb{C}$}$^2\otimes $\thinspace {$\Bbb{C}$}$^2$, such that application of the four true--false projection operators results in four orthogonal states respectively representing the four truth--falsehood states. The existence of such a superposition state --- with equal amplitudes of its components --- is required to describe the state of the entity before and after the measurement process. Since the truth-values of the two sentences in the paradox are not anymore coupled like it was the case for (B) and (C), the dynamical pattern of truth assignment by the observer is not anymore a two-step process but a four-step process. Therefore, the entity should be described in a 4 dimensional Hilbert space for each sentence. The Hilbert space needed to describe the Double Liar~(A) is therefore $\Bbb{C}^4 \otimes \Bbb{C}^4$.
The initial un-measured superposition state --- $\Psi _0$ --- of the Double Liar~(A) is given by following superposition of the four true--false states:
{\small \[ \frac 12\left\{ \begin{array}{c} \left( \begin{array}{c} 0 \\ 0 \\ 1 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 1 \\ 0 \\ 0 \end{array} \right) +\left( \begin{array}{c} 0 \\ 1 \\ 0 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 0 \\ 0 \\ 1 \end{array} \right) +\left( \begin{array}{c} 0 \\ 0 \\ 0 \\ 1 \end{array} \right) \otimes \left( \begin{array}{c} 1 \\ 0 \\ 0 \\ 0 \end{array} \right) +\left( \begin{array}{c} 1 \\ 0 \\ 0 \\ 0 \end{array} \right) \otimes \left( \begin{array}{c} 0 \\ 0 \\ 1 \\ 0 \end{array} \right) \end{array} \right\} \] } Each term in this superposition state is the consecutive state which is reached in the course of time, when the paradox is reasoned through. The truth--falsehood values attributed to these states, refer to the chosen measurement projectors.
\noindent Making a sentence true or false in the act of measurement, is described by the appropriate projection operators in $\Bbb{C}^4 \otimes \Bbb{C}^4$. In the case we make sentence 1 (resp. sentence 2) true we get: \begin{eqnarray*} P_{1,true} &=&\left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right) \otimes \mathbb{1}_2 \\ P_{2,true} &=&\mathbb{1}_1\otimes \left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right) \end{eqnarray*} The projectors for the false-states are constructed by placing the $1$ on the final diagonal place: \begin{eqnarray*} P_{1,false} &=&\left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) \otimes \mathbb{1}_2 \\ P_{2,false} &=&\mathbb{1}_1\otimes \left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) \end{eqnarray*}
\noindent Starting by making one of the sentences of (A) either true or false, by logical inference, the four consecutive states are run through repeatedly. In \cite{AeBroSme99} a continuous parameter $t$ was introduced to reflect `reasoning time' of the cognitive observer. This allowed to give a time-ordered description of the cyclic change of state present in the measurement process of the liar paradox. A Hamiltonian $H$ can be constructed such that the unitary evolution operator $U(t)$ --- with $ U(t)=e^{-iHt}$ --- describes this cyclic change.
\noindent The dynamical picture of the Double Liar cognitive entity (A) is therefore as follows: when submitted to measurement, the entity starts its truth--falsehood cycle; when left un-measured the entity remains in the potentiality state. This follows immediately from the fact that the initial state $\Psi _0$ is left unchanged by the dynamical evolution $U(t)$: \[ \Psi _0(t)=\Psi _0 \] because $\Psi _0$ is a time invariant, as an eigenstate of the Hamiltonian $ H $. Because of this time-independence, the state $\Psi _0$ describes the cognitive entity of the liar paradox A, regardless of the observer. The highly contextual nature of the Double Liar (A) --- its unavoidable dynamics induced by the measurement process --- implies that intrinsically it can not expose its complete nature, analogous to the quantum entities of the micro-physical world.
\section{Potentiality States versus Classical Emergence}
\subsection{Potentiality State of a Soccer Team versus its Stable Classical Dynamical Pattern}
Classical emergent properties of a system are defined by dynamical patterns related to its subsystems. Each of these subsystems can be described in a state space, and therefore the emergent properties which are identified with the collection of the subsystems, can be represented in essentially the same space. When potentiality states are involved, the emergent entity needs to be described in a higher dimensional state space, as the example of the liar paradox shows where the potentiality state is a superposition of tensor products of vectors representing the individual sentences. Crucial for the existence of a potentiality state is the contextuality in the measurement process, which causes a `collapse' of the potentiality state into one of the mutual exclusive possible eigenstates. Let us now clarify the main differences between a classical emergent pattern due to dynamical evolution of a complex system, and the new kind of emergence related to the existence of potentiality states, by the concrete example of a soccer team.
A simple example of an entity represented with a potentiality state is given by a soccer team, e.g. the team of R.F.C. Anderlecht of Belgium (abbreviated: the A-team), consisting of eleven soccer players. The classical emergent description of such soccer team would involve analyzing the team in terms of the individual motions of the players and would reflect the dynamical pattern which is present during a game: e.g., whether the team as a whole is attacking or defending at one moment in time. The properties of the soccer team are determined by the capabilities of each player (how fast each player can run, how good they can pass the ball etc.), and on the correlation which exists between the players, i.e., how the members of the team play together as a team. Hence, the whole soccer team can be identified with the eleven members of the team and the team can be represented in terms of the descriptions of the individual players: indeed, once the movement of each player is known, the movement of the team as a whole is known too. This is what we would call the team as a `classical' emergent entity. Notice, that when the eleven players have left the field after the game, the entity of the team stops to exist. Indeed, the movements of the eleven players become incoherent after the game: each player leaves to his own home, and until they have to play the next match, they have stopped to be, in classical emergent terms, part of the larger entity `a soccer team'. For instance, one of the players of the A-team could be a foreigner who has to play a game for his native country before the next match of the A-team. Obviously, while he's playing the match for his native country, he cannot be playing a match for the A-team. Therefore, while he's playing for his home country, the classical emergent entity `the A-team' defined by the eleven soccer players does not exhibit the typical dynamical patterns of a soccer team playing a match, and therefore the entity does not exist at that moment in time.
If we elaborate this example in terms of the potentiality state concept, we obtain the following. The state of the entity `the A-team' is defined by the possible instances of the team as a collection of the individual players. By this we mean that when the eleven players are on the (same) soccer field playing a game for the A-team, they are in fact actualizing a potential game, simply by the act of playing the game. The emergent concept of `the A-team' is more than just the collection of the movements of the eleven players. Even when the eleven players are not actually playing a game, they are still a `potential team'. The instance of the team playing a game is created at the moment the A-team is actually playing a game. To make this more clear, let us consider the following situation. The A-team has qualified for the final of the Belgian soccer cup in which they will have to play against the winner of the other semi-final match Bruges-Ghent which is played a day later than the A-team semi-final. Then `the A-team playing the cup final' can be considered as an emergent entity, defined by the eleven players who are preparing themselves for the final. A day later the match Bruges-Ghent will be actually played and the adversary of the A-team in the final will be known. At the moment the final is played, the instance `the A-team is playing' is activated, and potentiality of `the A-team playing a game' collapses into one of two mutually exclusive possibilities, i.e., a final against Bruges or a final against Ghent. Notice that even when the A-team is not actually playing, the emergent concept defined by the potentiality of letting the eleven players play a game is still present. As such, `the A-team playing the final' can be viewed as a potentiality state of two possible instances: one concerns the final against the (let's say defending) team of Bruges, and one against the (let's say attacking) team of Ghent. Depending on which team reaches the final, the A-team will behave differently because they have to choose between different tactics. Nevertheless, before the score of the semi-final Bruges-Ghent is determined, potentially both tactics could be followed. It is not just the state of the A-team which decides how the final will look like, also the adversary will be decisive, and the result of the game Bruges-Ghent is not influenced by the state of the A-team.
The differences between classical concept of emergence and the emergent behavior which is established by the presence of potentiality states, resemble the differences in dynamical evolution of a classical system versus a quantum system. The evolution of a classical system is continuous, and the act of measurement does not influence the results, the measurements are purely non-perturbative observations. As such, the dynamical pattern of the system as a whole is identified by the dynamical evolution of the sub-systems. In other words, the state of the compound system evolves continuously in time.
The dynamical evolution for a quantum particle, upon which no measurements are carried out, is given by the Schr\"{o}dinger equation, which also describes a continuous evolution of the state of the entity. However, standard quantum theory predicts a non-continuous evolution during the measurement process, such that the state of the system changes instantaneously towards one of the possible eigenstates corresponding with the observable. Similarly, the potentiality of the eleven players of the A-team to play a game against Bruges or a game against Ghent defines an emergent entity. Before the result of Bruges-Ghent is known, potentially the A-team can play a game against Ghent, but also potentially against Bruges. Nevertheless, only one of these possible finals can actually be played. Which final will be played, does not depend on the A-team alone, but also on the adversary who manages to qualify: Bruges or Ghent. This dependence on the context (i.e., the semi-final between Bruges and Ghent) causes the non-continuous evolution of `the A-team playing the final' from a potentiality of both games towards one of the specific instances. As such, one can regard the concept of potentiality state as a possible way to generalize the concept of classical emergence to cases where also contextuality is important. Eventually, this should lead to a general theory capable of describing on the one hand the continuous evolution of a system in the absence of contextual interaction and on the other hand the discontinuous evolution of the system during the interaction process with the environment during which in a highly contextual way one of the possibilities present in the potentiality state of the system is effectively actualized.
\subsection{Violation of Bell Inequalities for the Soccer Teams}
Let us now show that also in the case of a soccer team we can define a set of experiments for which Bell inequalities are violated, indicating the presence of a potentiality state. Depending on the context only one of mutual exclusive potential outcomes will be actually realized during the experiment. The entity that we consider is the set of 22 soccer players of two teams playing a cup final.
The first experiment $e_1$ consists in letting someone give money to a player of team A such that he will cause his team to lose the cup final. If his team actually loses the cup final, the experiment $e_1$ is said to yield the outcome $o_1(up)$, if team A wins the final, the experimental outcome is $o_1(down)$. Taking into account that the final is played until one of the two teams wins (e.g., in the case of a draw the final could be decided with a number of penalties), the experiment will always give one of these two possible outcomes. The second experiment $e_2$ consists in looking whether the referee gives a player of team A a yellow card or not. If he actually gives a yellow card to at least one player of team A, the outcome is $o_2(up) $, and $o_2(down)$ otherwise. The experiment $e_3$ consists in letting someone give money to a player of team B such that he will cause his team to lose the cup final. If team B actually loses the cup final, the experiment $ e_3$ is said to yield the outcome $o_3(up)$, if team B wins the final, the experimental outcome is $o_3(down)$. The fourth and last experiment $e_4$ consists in looking whether the referee gives a player of team B a yellow card. If he actually gives a yellow card to at least one player of team B, the outcome is $o_4(up)$, and $o_4(down)$ otherwise. Finally, we assume that the referee has a bad character such that during the final he will definitely give a yellow card to at least one player of team A and one player of team B. Let us now look at the coincidence experiments $e_{13}$, $ e_{14}$, $e_{23}$ and $e_{24}$. The experiment $e_{14}$ consists in giving a player of team A money with the aim of letting team A lose the final, and looking whether a player of team B has received a yellow card. Of course, even if the other players of team A are playing very good, the bribed player can make intentionally mistakes like own-goals etc.~such that even in that case team A loses the final. Therefore, the experiment $e_{14}$ gives the outcome $(o_1(up),o_4(up))$ and the expectation value $\hbox{$\mathbb{E}$} _{14}=+1.$ Similarly, we obtain that $\hbox{$\mathbb{E}$}_{23}=+1.$ Also, due to our assumption about the bad-character referee, we can deduce that the coincidence experiment $e_{24}$ yields always the outcome $(o_2(up),o_4(up))$ such that the expectation value is given by $\hbox{$\mathbb{E}$}_{24}=+1.$ Finally, let us look at the coincidence experiment $e_{13}.$ In this case, both a player of team A and a player of team B will receive money to let their respective team lose the cup final. However, only one of the two teams can actually lose the final. Therefore, the coincidence experiment $ e_{13}$ can only yield the outcome $(o_1(up),o_3(down))$ or $ (o_1(down),o_3(up)),$ resulting in an expectation value $\hbox{$\mathbb{E}$} _{13}=-1.$ Let us now calculate the terms of Bell inequalities,
\[ \begin{array}{ll}
|\hbox{$\mathbb{E}$}_{13}-\hbox{$\mathbb{E}$}_{14}|+|\hbox{$\mathbb{E}$}_{23}+\hbox{$\mathbb{E}$}
_{24}| & =|-1-1|+|+1+1| \\ & =+2+2 \\ & =+4 \end{array} \] and it follows that Bell inequalities are violated. This violation is due to the explicit contextuality of the experimental outcomes. To make this more clear, we could also specify in the definition of the experiments $e_1$ and $ e_3$ the amount of money which is given to the players. For instance, if we specify that in experiment $e_1$ the bribed player is very poor, and the amount of money is to be one billion dollars, then he will probably do everything possible in order to let his team lose the final. If on the other hand, the player in experiment $e_3$ is already rich, and the amount of money is only 100.000 dollars, he will probably not do such extreme things like making a lot of own-goals etc., which the other, poor player will probably do to earn his billion dollars. This shows that also in the case of deterministic coincidence experiments ($e_{13}$ always yielding the outcome $(o_1(up),o_3(down))$) one can violate Bell-inequalities.
The reason why Bell-inequalities are also violated in the deterministic case is situated in the fact that only one of two possible but mutually exclusive situations can occur (i.e., only one of the two teams can lose the cup final), and that before the experiments are actually performed both experimental outcomes are possible. It is the contextual nature of the experiments which defines which of the possible experimental results will actually occur. This shows that the emergent phenomenon of two teams playing the cup final should be described with a potentiality state, such that in one of the possible cases team A loses the final, and in the other team B loses the final. Depending on which experiment is actually performed (which player is given what amount of money), the actually played final will be different. Therefore, the final cannot be regarded only as the stable dynamical pattern exhibited by the two teams of soccer players playing a game.
\section{Conclusions}
The above examples --- the `vessels of water', `liar paradox' and `the soccer team' --- illustrate the various ways in which potentiality states can be identified in reality. The emergent properties which the potentiality states define, depend on the particular nature of the entity. For the vessels of water example, the appearance of potentiality states indicates the presence of a particular quantum aspect, which can be demonstrated by the violation of Bell inequalities. For the liar paradox example we have worked out a full and detailed quantum description which shows the pure quantum mechanical nature of the example. The potentiality states in this case are superposition states. We mention that the example of the liar paradox shows that the reality of conceptual space is quite different from great part of the macroscopic world. It seems that it contains `Hilbert space-like' features, which makes it quantum-like. We have shown on the example of a soccer team that the emergence due to potentiality states has a quite different status than the one of the emergent dynamical patterns that are identified in classical physics. The emergent potentiality states are ontological states within the formalism, and not connected to dynamical patterns alone. Also in the case of the soccer teams playing a match, one can define experiments such that Bell inequalities are violated, indicating the contextual nature of the defined experiments for these entities.
\noindent {\bf Acknowledgments}
\noindent The authors would like to acknowledge the support by the Fund for Scientific Research--Flanders (Belgium)(F.W.O.--Vlaanderen).
\small
\end{document}
|
arXiv
|
{
"id": "1212.0104.tex",
"language_detection_score": 0.9122969508171082,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{{f On a double integral of a product of Legendre polynomials} \begin{abstract} \noindent We calculate a double integral over a product of Legendre polynomials multiplied by a binomial raised to a power. \end{abstract}
During the calculation of the electromagnetic self-force of a uniformly charged spherical ball, we encountered the integral \begin{align}\label{1} I=\int_0^{\pi}d \theta \sin \theta \int_0^{\pi}d \theta' \sin \theta' (\cos \theta - \cos \theta')^{2n} P_l(\cos \theta) P_l(\cos \theta'), \end{align} where $n$ and $l$ are integer positive numbers and $P_l$ is a Legendre polynomial of order $l$. As far as we know, this integral was not calculated in closed form anywhere in the literature. We calculate it here.
After changing the variables $\cos \theta \rightarrow x$, $\cos \theta' \rightarrow y$ this integral becomes \begin{align}\label{2} I=\int_{-1}^1 dx \int_{-1}^1 dy \;(x-y)^{2n} P_l(x) P_l(y). \end{align} We first perform the integral $\int_{-1}^1 dx (x-y)^{2n} P_l(x).$ For this, we can use {\bf 7.228} and {\bf 8.703} from \cite{gra}. Combining these two equations that read \begin{align}
\frac{1}{2}\Gamma(1+\mu) \int_{-1}^1 P_l(x) (z-x)^{-\mu-1}= (z^2-1)^{- \frac{\mu}{2}} e^{-i \pi \mu} Q_l^{\mu}(z), l=0,1,\dots, |arg(z-1)|<\pi, \end{align} \begin{align} Q_{\nu}^{\mu}(x)= \frac{e^{i\pi \mu} \Gamma(\nu+\mu+1) \Gamma\left(\frac{1}{2}\right)}{2^{\nu+1} \Gamma\left( \nu + \frac{3}{2}\right)} (x^2-1)^{\frac{\mu}{2}} x^{-\nu-\mu-1} {}_2F_{1}\left(\frac{\nu+ \mu +2}{2}, \frac{\nu+\mu+1}{2}; \nu+\frac{3}{2}; \frac{1}{x^2}\right), \end{align} after using \[ \frac{\Gamma(l+\mu+1)}{\Gamma(\mu+1)} =(\mu +1)_l\; \text{and} \; \Gamma\left( l+\frac{3}{2}\right)= \frac{\sqrt{\pi} (l+1)_{l+1}}{2^{2l+1}},\] one obtains for $\mu=-1-2n$ \begin{align} \int_{-1}^1 dx \, (x-y)^{2n}P_l(x) = \frac{(-2n)_l \,2^{l+1} y^{2n-l}}{(l+1)_{l+1}} {}_2F_1 \left( \frac{l}{2}-n+\frac{1}{2}, \frac{l}{2}-n; l+\frac{3}{2}; \frac{1}{y^2} \right). \end{align} The same result given in Eq. (5) can be obtained by putting $a=1$, $m=0$ and $p=-2n$ in Eq. {\bf 2.17.4(5)} from \cite{pru} \begin{align} \int_{-a}^a dx \,\frac{(a^2-x^2)^{\frac{m}{2}}}{(x-y)^p} P_l^m \left( \frac{x}{a} \right)= \frac{2 (-1)^{m-1}(l+m)!}{(p-1)! (l-m)!} (y^2-a^2)^{\frac{m-p+1}{2}} Q_l^{p-m-1} \left( \frac{y}{a}\right), \end{align} although this equation is given in \cite{pru} as being valid only for $p=0,1, \dots$.
The same result given in Eq.(5) can be obtained by direct calculation, by using the Rodrigues formula for Legendre polynomials \cite{rai} \begin{align} P_l(x)= \sum_{k=0}^{[l/2]} \frac{(-1)^k \left( \frac{1}{2}\right)_{l-k} (2x)^{l-2k}}{k!(l-2k)!} \end{align} and the binomial expansion for $(x-y)^{2n}$ \begin{align} (x-y)^{2n}= \sum_{p=0}^{2n} \frac{(-1)^p (2n)!}{p!(2n-p)!} x^p y^{2n-p}, \end{align}
and integrating the resulting double sum term by term. For $l$ odd, after noting that the term by term integration gives non-zero result only for $p$ odd and changing the summation index $p \rightarrow 2p$, one obtains \begin{align} \int_{-1}^1 dx \,(x-y)^{2n} P_l(x)= -2^{l+1} \sum_{k=0}^{[l/2]} \sum_{p=0}^{b-1} \frac{(-1)^k \left( \frac{1}{2} \right)_{l-k}(2n)! y^{2n-2p-1}}{k! (l-2k)!2^{2k}(2p+1)!(2n-2p-1)!}\nonumber \\ \cdot \frac{1}{(l-2k+2p+2)}. \end{align} Writing all the factorials in terms of Pochhammer symbols, the above summation over $k$ can be done as follows \begin{align} &\sum_{k=0}^{\frac{l-1}{2}} \frac{(-1)^k \left( \frac{1}{2}\right)_{l-k}}{k!(l-2k)!2^{2k}(l+2p+2-2k)} \nonumber \\ &= \frac{\left( \frac{1}{2}\right)_l}{\Gamma(1+l) (l+p+2)} {}_3F_2 \left( -\frac{l}{2}+\frac{1}{2}, -\frac{l}{2}, -\frac{l}{2}-p-1; \frac{1}{2}-l, -\frac{l}{2}-p,1\right)\nonumber \\ &\frac{\left(\frac{1}{2}\right)_l \left( \frac{1-l}{2} \right)_{\frac{l-1}{2}} (-p)_{\frac{l-1}{2}}}{l! \,(l+2p+2) \left(\frac{1}{2}-l\right)_{\frac{l-1}{2}} \left(-\frac{l}{2}-p\right)_{\frac{l-1}{2}}}, \end{align} where, when we passed from the second to the third line of the above equation, we used equation {\bf 7.4.4 (81)} from \cite{pru}. We note that, because of the Pochhammer symbol $(-p)_{\frac{l-1}{2}}$, the r.h.s. of Eq. (10) is different from zero only for $p \ge \frac{l-1}{2}$. Introducing Eq. (10) in Eq. (9) and changing the summation index $p\rightarrow i, \, p-\frac{l-1}{2}=i$, the resulting summation over i can be done immediately and one obtains again the result of Eq. (5). The case $l$ even can be considered similarly.
Returning now to Eq. (2), after using Eq. (5) one obtains \begin{align}\label{7} I=\frac{(-2n)_l \;2^{l+1}}{(l+1)_{l+1}} \int_{-1}^1 dy\; y^{2n-l} P_l(y)\; {}_2F_1 \left(\frac{l}{2}-n, \frac{l+1}{2}-n, l+\frac{3}{2};\frac{1}{y^2} \right). \end{align} Note that, for $l \le 2n$, the hypergeometric function in Eq.(\ref{7}) is, in fact, a finite series, because $l/2-n$ and $(l+1)/2-n$ are negative integers when $l$ is even and odd respectively. Again, we consider the cases $l$ even and $l$ odd separately.
For $l$ even, using the definition of the Gauss hypergeometic function, we have \begin{align}\label{8} {}_2F_1 \left(\frac{l}{2}-n, \frac{l+1}{2}-n, l+\frac{3}{2};\frac{1}{y^2} \right)= \sum_{k=0}^{n-\frac{l}{2}} \frac{ \left( \frac{l}{2}-n \right)_k \left(\frac{l+1}{2}-n \right)_k}{k! \left( l+\frac{3}{2} \right)_k} \frac{1}{y^{2k}}. \end{align} From Eqs. (\ref{7}), (\ref{8}), one obtains \begin{align}\label{9} I= \frac{2 (-2n)_l2^{l+1}}{(l+1)_{l+1}} \sum_{k=0}^{n-\frac{l}{2}} \frac{ \left( \frac{l}{2}-n \right)_k \left(\frac{l+1}{2}-n \right)_k}{k! \left( l+\frac{3}{2} \right)_k} \int_0^1 dy \; y^{2n-2k-l} P_l(y), \end{align} where we used the fact that the integrand in the r.h.s. of Eq. (\ref{9}) is an even function, because $P_l(-y)=(-1)^l P_l(y)$ \cite{gra}. The integral in Eq. (\ref{9}) can be performed using ({\bf 2.17.1(4)} from \cite{pru} or {\bf 7.126(2)} from \cite{gra} ) \begin{align}\label{p2} \int_0^1dx \; x^{\sigma} P_{\nu}(x)= \frac{\sqrt{\pi} \;2^{-\sigma-1} \Gamma(1+\sigma)}{\Gamma \left(1+\frac{\sigma-\nu}{2} \right) \Gamma \left( \frac{\sigma+\nu+3}{2} \right)}. \end{align} One obtains \begin{align}\label{11} I=\frac{\sqrt{\pi} \;(-2n)_l \;2^{2l-2n+1}}{(l+1)_{l+1}} \sum_{k=0}^{n-\frac{l}{2}} \frac{2^{2k} \left( \frac{l}{2}-n \right)_k \left( \frac{l+1}{2}-n \right)_k \Gamma(2n-2k-l+1)}{k! \left( l+\frac{3}{2} \right)_k \Gamma(1+n-k-l) \Gamma \left( n-k+\frac{3}{2} \right)}. \end{align} Using the definition of the Pochhammer symbol \cite{pru} \[ (a)_k= \frac{\Gamma(a+k)}{\Gamma(a)}= (-1)^k \frac{\Gamma(1-a)}{\Gamma(1-a-k)}, \] and \cite{pru} \[(a)_{2k}= \left( \frac{a}{2} \right)_k \left( \frac{a+1}{2} \right)_k 2^{2k}, \] we write the Gamma functions in Eq. (\ref{11}) as follows \begin{align} & \Gamma(2n-2k-l+1)= \frac{\Gamma(2n-l+1)}{\left( \frac{l}{2}-n \right)_k \left( \frac{l}{2}-n+\frac{1}{2}\right)_k2^{2k}}, \nonumber\\ &\Gamma(1+n-l-k)= (-1)^k\frac{\Gamma(1+n-l)}{(l-n)_k},\nonumber \\ &\Gamma \left(n+\frac{3}{2}-k \right)= (-1)^k \frac{\Gamma \left( n+\frac{3}{2}\right)}{\left( -n-\frac{1}{2}\right)_k}. \end{align} Introducing (16) in (15), one obtains \begin{align} I=\frac{\sqrt{\pi} \;(-2n)_l\; 2^{2l-2n+1}(2n-l)!}{ (l+1)_{l+1} (n-l)! \left( n+\frac{1}{2} \right)!} {}_2F_1 \left(-n-\frac{1}{2}, l-n; l+\frac{3}{2}; 1 \right). \end{align} But the Gauss hypergeometric function of unit argument can be written as ({\bf 7.3.5(2)} \cite{pru}) \begin{align} {}_2F_1(a,b;c;1)=\frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)}, \end{align} and we obtain \begin{align}\label{17} I=\frac{ \sqrt{\pi}\; (-2n)_l \;2^{2l-2n+1} (2n-l)! \left(l+\frac{1}{2}\right)! (2n+1)!}{ (l+1)_{l+1} (n-l)! \left(n+\frac{1}{2}\right)! (l+n+1)! \left(n+ \frac{1}{2} \right)!}, \end{align} where we use the notation $\Gamma(z)=(z-1)!$ both for integer and noninteger $z$. Using the definition of the Pochhammer symbol and \cite{pru} \begin{align} \frac{\Gamma(2z)}{\Gamma(z)}=\frac{2^{2z-1}}{\sqrt{\pi}} \Gamma \left( z+\frac{1}{2} \right), \end{align} we can write \begin{align}\label{19} &(-2n)_l= (-1)^l \frac{\Gamma(1+2n)}{\Gamma(1+2n-l)},\nonumber \\ & (l+1)_{l+1}=\frac{2^{2l+1}}{\sqrt{\pi}} \Gamma \left(l+\frac{3}{2} \right),\nonumber \\ & \frac{(2n)!}{\left( n+\frac{1}{2}\right)!}=\frac{2^{2n+1}\Gamma(n+1)}{\sqrt{\pi} (2n+1)},\\ & \frac{(2n+1)!}{\left( n+\frac{1}{2}\right)!}= \frac{2^{2n+1}\Gamma(n+1)}{\sqrt{\pi} }. \nonumber \end{align} Introducing (\ref{19}) in (\ref{17}), one obtains for $l$ even \begin{align} I= \frac{(-1)^l 2^{2n+2} (n!)^2}{(2n+1) (n-l)! (n+l+1)!}. \end{align} Note that, because of $(n-l)!$ from the denominator, this result is different from zero only for $n \ge l$. A similar calculation can be done for $l$ odd, and one obtains the same result. So, our final result for the integral (\ref{1}) is \begin{equation} I= \left\lbrace\begin{array}{c}
\frac{(-1)^l 2^{2n+2} (n!)^2}{(2n+1) (n-l)! (n+l+1)!}, n \ge l\\
0, n<l
\end{array}
\right.. \end{equation}
\end{document}
|
arXiv
|
{
"id": "2110.15787.tex",
"language_detection_score": 0.5002799034118652,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title
[Chemical distance exponent]
{\large On the chemical distance exponent for the two-sided level-set of the 2D Gaussian free field} \author{Yifan Gao and Fuxi Zhang}
\address {Yifan Gao\\ School of Mathematical Sciences\\ Peking University\\ Beijing, China, 100871} \email{[email protected]}
\address {Fuxi Zhang\\
School of Mathematical Sciences\\
Peking University\\
Beijing, China, 100871} \email{[email protected]}
\subjclass[2010]{Primary 60K35, 60G60.}
\keywords{Gaussian free field, percolation, chemical distance.}
\begin{abstract}
In this paper we introduce the two-sided level-set for the two-dimensional discrete Gaussian free field. Then we investigate the chemical distance for the two-sided level-set percolation. Our result shows that the chemical distance should have dimension strictly larger than $1$, which in turn stimulates some tempting questions about the two-sided level-set. \end{abstract}
\maketitle
\section{Introduction}
The discrete Gaussian free field (DGFF) in $\mathbb Z^d, d\ge 3$ is a Gaussian random field with mean zero and covariance given by the Green's function. As a ``strongly'' correlated random field, the level-set percolation for the DGFF in three dimensions or higher has been extensively studied and shown to exhibit a non-trivial phase transition by a series of work \cite{MR914444,MR3053773,MR3339867,MR3843421,duminil2020equality}. More precisely, there exists a critical level $0<h_*(d)<\infty$ such that if $h<h_*(d)$, the level-set (a.k.a. excursion set, the random set of points whose value is greater than or equal to $h$) has a unique infinite cluster; if $h>h_*(d)$, the level-set has only finite clusters.
In this paper, we focus on the two-dimensional DGFF (also called harmonic crystal). However, in two dimensions, we can not define the DGFF in the whole discrete plane since the two-dimensional Green's function blows up, while one can take the scaling limit (the lattice spacing is sent to $0$ while the domain is fixed) to get the continuum Gaussian free field. It is then not possible to investigate the level-set percolation in $\mathbb Z^2$ directly as that in $\mathbb Z^d, d\ge 3$. The right way is to take a big discrete box $V_N$ of side length $N$ and define the two-dimensional DGFF on $V_N$ as a centered Gaussian process with covariance given by the Green's function on $V_N$. Then one can study the connectivity properties of the level-set on $V_N$ as $N$ goes to infinity. However, as shown in \cite{MR3800790}, for any level $h\in\mathbb R$, the level-set above $h$ crosses a macroscopic annulus in $V_N$ with non-vanishing probability as $N$ goes to infinity, which suggests that in some sense there is no non-trivial phase transition for the two-dimensional level-set percolation. Furthermore, the chemical distance (intrinsic distance) between two boundaries of a macroscopic annulus is bounded from above by $N(\log N)^{1/4}$ \cite{MR3800790,MR4112719}. Roughly speaking, the chemical distance has dimension $1$. This inspires us to think that once we truncate the DGFF from two sides to get the so-called two-sided level-set in this paper, whether the chemical distance will be of dimension strictly larger than $1$. Our main result below answers this affirmatively in the sense that if there exists macroscopic (nearest-neighbor) path inside the two-sided level-set, its length must be greater than $N^{1+\varepsilon}$ for some $\varepsilon>0$. Although at present we are not able to show that there could exist some macroscopic path inside the two-sided level-set (This is not obvious, see Question~\ref{que:q1} below), our result gives some expected fractal structure for the two-sided level-set, which is drastically different from the (one-sided) level-set.
Next, we introduce our model and then state our main result. For each positive integer $N$, let {$V_{N}=[-N/2,N/2]^2\cap \mathbb Z^2$}. Denote by $\{ \eta^{V_{2N}}(v): v\in V_{2N} \}$ the discrete Gaussian free field (DGFF) on $V_{2N}$ with Dirichlet boundary conditions, which is a mean-zero Gaussian process, vanishing on the boundary, with covariance given by \[ \mathbb E \eta^{V_{2N}} (u) \eta^{V_{2N}} (v) = G_{2N} (u,v) \quad \text{ for } u,v \in V_{2N}, \] where $G_{2N}(u,v)$ is the Green's function of the two-dimensional simple random walk in $V_{2N}$. As usual, we restrict ourselves to consider the DGFF $\eta^{V_{2N}}$ on $V_N$ to avoid boundary issues (This can be made more general, see Remark~\ref{rem:boundary-issue}). Suppose $\lambda > 0$. Let \begin{equation}
{\Lambda_{N,\lambda}} : = \{ v\in V_N: |\eta^{V_{2N}}| \le \lambda \}. \end{equation}
We say that a vertex $v\in V_N$ is $\lambda$-open if $\left|\eta^{V_{2N}}(v)\right|\le\lambda$, and interpret $\Lambda_{N, \lambda}$ as the ``two-sided'' level set. Let
\begin{equation} \label{Eq.defnPke}
{\mathcal{P}_{N}^{\kappa, \epsilon} }= \left\{P: P \text { is a path in } V_{N},\|P\| \geq \kappa N, \text { and }|P| \leq N^{1+\epsilon}\right\},
\end{equation}
where $\|P\|=\|x-y\|$ if $P$ is a path from $x$ to $y$ and $|P|$ is the length of $P$. We say that $P$ is $\lambda$-open if so is every vertex in $P$. Our main result is the following theorem.
\begin{thm}\label{thm:1.1}
For each $\lambda>0$, there exists $\epsilon=\epsilon(\lambda)>0$ such that for every $\kappa\in (0,{1})$,
\begin{equation}\label{eq:complement-event} \lim _{N \rightarrow \infty}\mathbb{P}\big( P \text { is } \lambda\text{-open for some } P \in {\mathcal{P}_{N}^{\kappa, \epsilon} }\big)=0. \end{equation}
\end{thm}
\begin{rem}\label{rem:lambda_0}\label{rem:boundary-issue}
The choice of working with $V_{2N}$ is for convenience; the above theorem holds with { $V_{2N}$ and $V_{N}$ being respectively replaced with $V_{N}$ and $V_{\delta N}$} for any fixed $0<\delta<1$. \end{rem}
\begin{rem}
Theorem~\ref{thm:1.1} also holds for the Gaussian free field on metric graphs since percolation on the metric graph is dominated by the percolation on the integer lattice. \end{rem}
\begin{rem}
Note that it suffices to show \eqref{eq:complement-event} holds for large $\lambda$ since the event is increasing in $\lambda$. In fact, we also obtain some quantitive results, namely, it is able to take $\varepsilon(\lambda)=e^{-a\lambda^2}$ for some absolute constant $a>0$ (see \eqref{eq:eps-lambd}), and the probability in \eqref{eq:complement-event} decays faster than $N^{-c\lambda^{-2}}$ for some absolute constant $c>0$ (see \eqref{eq:decay-rate}). \end{rem}
\begin{rem}
Furthermore, our method is still effective if $\lambda$ depends on $N$. For example, taking $\lambda=\lambda_N=\sqrt{(2a)^{-1}\log\log N}$, then Theorem~\ref{thm:1.1} shows that for any $\lambda_N$-open path with macroscopic distance, its length should be at least $N(\log N)^{1/2}$. On the other hand, it has been shown in \cite[Theorem 2]{MR1880237} that the maximum of the DGFF is at most $2\sqrt{\frac{2}{\pi}}\log N$ with probability tending to $1$, see \cite{MR4043225} for more about the level-set at heights proportional to the absolute maximum. By symmetry, we see that if we take $\lambda_N=2\sqrt{\frac{2}{\pi}}\log N$, then all points are $\lambda_N$-open with overwhelming probability. Our result stimulates the tempting question of determining the borderline with respect to $\lambda_N$ for linear growth of the chemical distance. \end{rem}
\begin{ques}\label{que:q1}
Does there exist a large constant $\lambda>0$ such that with non-vanishing probability there exists a $\lambda$-open path $P$ in $V_N$ with $\|P\| \geq \kappa N$? \end{ques}
Let $p(\lambda, N)$ denote the probability of the above event. Here, we use non-vanishing to mean that $\inf_{N}p(\lambda, N)>0$. It is readily to see that if $\lambda$ is sufficiently small such that $\rho:=\mathbb P(|Z(0,4)|\le\lambda)<\frac14$ where $Z(0,4)$ is a Gaussian random variable with mean $0$ and variance $4$, then $p(\lambda, N)\le N^2(4\rho)^{\kappa N}$ which vanishes exponentially fast in $N$. However, to show it is not the case for large constant $\lambda$ is quite non-trivial. To the best of our knowledge, this question has not been answered yet. We expect that there exists a non-trivial ``phase transition'' for the two-sided level-set percolation. We would like to mention that recently the authors in \cite{ding2020crossing} show that the probability for (one-sided) level-set crossing a rectangle is bounded away from $0$ and $1$. Another closely related work to this direction is \cite{MR3163210}, in which the ``two-sided'' level-set in $\mathbb Z^d, d\ge 3$ is defined as the random set of points whose absolute value is larger than $h$ (note that this is contrary to our convention for the two-sided level-set), and the associated critical value is proved to be finite for all $d\ge 3$.
\begin{ques}
Is there a way to take a scaling limit of the two-sided level-set? \end{ques} This might be reminiscent of the Schramm-Sheffield contour line of DGFF in \cite{MR2486487}, which is shown to converge in distribution to $\mathrm{SLE}_4$, as well as the bounded-type thin local sets (BTLS) constructed in \cite{MR3936643} and the first passage set (FPS) introduced in \cite{MR4091511}. Dynkin’s isomorphism enables us to relate the absolute value of the DGFF to the occupation field generated by the random walk loop soup (see \cite{MR2815763,MR3502602}), so all the above questions can be understood in terms of the loop-soup percolation with respect to the occupation field (see \cite{MR2979861,MR3547746,MR3941462} for more related works about the loop-soup). We hope to find some appealing connections between the two-sided level-set and the objects we mentioned.
\subsection{Background}\label{subsec:background}
The two-dimensional Gaussian free field (GFF) is an important object in statistical physics and the theory of random surfaces \cite{MR2322706}. As {the analog of the Brownian motion with two-dimensional time parameter}, it demonstrates fractal structures in many aspects \cite{MR2486487,MR3947326,MR4019914,MR4076090}. From the perspective of the level-set percolation of the DGFF, we will focus on the chemical distance, which plays a crucial role in {the study of} the fractal structure of clusters in the theory of percolation.
Roughly speaking, the chemical distance is the graph distance on the induced (random) subgraph in some probability models. For instance, the chemical distance for classic percolation models can be defined as the length of the shortest path inside open clusters \cite{MR750568,havlin1985chemical}. However, estimating the chemical distance is quite difficult, which often requires subtle analysis of the structure of the shortest path. Especially, for the two-dimensional critical Bernoulli percolation model, physicists expect that there exists an exponent $d_{\mathrm{min}}$ such that \begin{equation} \ell\sim r^{d_{\mathrm{min}}}, \end{equation} where $r$ and $\ell$ are respectively the Euclidean distance and the chemical distance between two vertices $x$ and $y$, and only the case $r < \infty$ is considered. However, a rigorous way to clarify the equivalence above, i.e., the precise meaning of ``$\sim$", remains to be an open problem \cite{MR2334202,MR3698744}. It is expected in the physics community that $d_{\mathrm{min}}$ is universal in the sense that it does not depend on the choice of vertices and the type of lattice. In \cite{MR1712629}, Aizenman and Burchard show that $\ell\ge r^{\eta}$ for some $\eta>1$, which implies that $d_{\mathrm{min}}>1$. {Furthermore}, upper bounds on the chemical distance can be obtained by comparing the shortest horizontal crossing with the lowest crossing \cite{MR3698744,damron2017strict}. Specifically, for the critical Bernoulli bond percolation on the edges of a box of side length $n$, the expectation of chemical distance between the left and right sides of the box is $O(n^{2-\delta}\pi_3(n))$ for some $\delta>0$, where $\pi_3(n)$ is the three-arm probability to distance $n$ \cite{damron2017strict}.
Additionally, in the subcritical and supercritical cases of Bernoulli percolation of dimension $d\ge2$, chemical distance is comparable to Euclidean distance \cite{MR762034,MR1404543,MR1068308,MR2319709}. In the critical case in high dimensions, it is shown that macroscopic connecting paths have dimension $2$ \cite{MR2551766,MR2748397,MR3224297}.
We next turn to some correlated percolation models, which have been intensively studied recently \cite{biskup2004scaling,MR2915665,MR3692311,MR3800790}. In the supercritical case for a general class of percolation models on $\mathbb Z^d$ {($d\ge3$)}, with long-range correlations (e.g., the random interlacements, the vacant set of random interlacements, the level sets of the GFF), the chemical distance behaves linearly as in the case of Bernoulli percolation; {see \cite{MR3390739} for details. However, the methods developed in three dimensions and higher are invalid in two dimensions, since the two-dimensional DGFF is log-correlated. Thus it becomes quite complicated when we consider the level-set percolation of the DGFF in two dimensions. Recently, {it is shown in \cite{MR3800790} that for level-set percolation of the two-dimensional DGFF, the associated chemical distance between two boundaries of a macroscopic annulus is {$O(Ne^{(\log N)^{\alpha}})$} for any $\alpha>1/2$ with positive probability. Later, the order is improved to $O(N(\log N)^{1/4})$ with high probability, on metric graphs, given connectivity \cite{MR4112719}.} Note that these two results imply that the undetermined chemical distance exponent for level sets of the two-dimensional DGFF is expected to be $1$ in any phase.
In this paper, we investigate the two-sided level-set cluster of the DGFF. By Theorem~\ref{thm:1.1}, two vertices $x$ and $y$ has chemical distance $\ge N^{1+\epsilon}$, provided that they are connected. Then it will indicate the fractal structure of the two-sided level-set clusters, contrasting to the afore-mentioned case of no fractality of the level-set clusters.
\subsection{Notation conventions}\label{subsec:notations} For the sake of the reader, we list some notations here.
For $x=(x_1,x_2), y=(y_1,y_2)\in\mathbb R^2$, let \[
\|x-y\|=\sqrt{|x_1-y_1|^2+|x_2-y_2|^2} \ \text{ and } \
|x-y|_{\infty}=|x_1-y_1|\wedge|x_2-y_2| \]
Let $d(x,B)=\inf_{y\in B}\|x-y\|$ and $d(B_1,B_2)=\inf_{x\in B_1}d(x,B_2)$. Similarly, we define $ d_{\infty}(x,B)=\inf_{y\in B}|x-y|_{\infty}$ and $d_{\infty}(B_1,B_2)=\inf_{x\in B_1}d_{\infty}(x,B_2)$.
For $x\in\mathbb R^2$ and $\ell>0$, let \[
B(x,\ell)=\{ y\in \mathbb R^2:\|x-y\|\le \ell \} \ \text{ and } \
B_{\infty}(x,\ell)=\{ y\in \mathbb R^2: |x-y|_\infty \le \ell \}. \] Denote \[ V_{\ell}(x)=B_{\infty}(x,\ell/2) \cap\mathbb Z^2. \]
For $a\in\mathbb R$, let $\lfloor a\rfloor$ be the greatest integer that is at most $a$. For $r\ge 1$, let $[r]=\{1,\cdots,\lfloor r\rfloor\}$. Throughout this paper, let $C_1,C_2,\cdots>0$ be universal constants. Let $K=2^k$ be large but fixed in terms of $N$ and to be chosen later, where $k$ is a positive integer. Recall $\kappa\in (0,1)$ as stated in Theorem \ref{thm:1.1}. Let $m\in\mathbb Z_+$ be such that \[ K^{m+1} \leq \kappa N<K^{m+2}. \] Note that $m\rightarrow\infty$ since $N\rightarrow\infty$ and $K, \kappa$ are fixed.
Suppose $B$ is a box in $\mathbb R^2$ and $B\cap\mathbb Z^2\neq\emptyset$, we denote the lower left corner of $B\cap\mathbb Z^2$ by $z_B$. For each path $P$ in $\mathbb Z^2$, denote by $x_P$ and $y_P$ the starting and ending vertices of $P$, respectively. Denote $\|P\|=\|x_P-y_P\|$.
\subsection{Outline of the proof}
The general proof strategy we employ in this paper is multi-scale analysis, which is a classic and powerful method in the percolation theory; see for instance \cite{MR1378847,MR1624084,MR3417515,MR3947326}. In order to apply it to prove \eqref{eq:complement-event}, it requires us to combine a contour argument analogous to \cite[Proposition 4]{MR3800790}, which plays an initial role, with the induction analysis analogous to \cite[Lemma 4.4]{MR3947326}. The former is quite similar to \cite{MR3800790}, while the latter is hard in this paper. The main difficulty lies in planning a proper induction strategy and tackling the fluctuation of the harmonic functions in all scales.
Section \ref{sec:2-pre} is devoted to preliminaries, for the sake of the reader. We will list basic results about the DGFF, and show some facts required in later proofs. We will also review the tree structure of a path constructed in \cite{MR3947326}. Roughly speaking, a path $P$ in scale $K^j$, i.e. $\| P \|$ is comparable to $K^j$, is associated with a tree $\mathcal{T}_P$ of depth $j$. Nodes at level $r$ in $\mathcal T_P$ are identified as disjointed sub-paths of $P$ in scale $K^{j-r}$, and the parent/child relation of nodes corresponds to path/sub-path relation. Tame paths are those looking like straight lines, and untamed ones are those looking like curves (see Definition \ref{def:tame}). Then, the fact is that untamed nodes are rare in $\mathcal{T}_P$ for all $P \in \mathcal{P}^{\kappa, \epsilon} $ \cite[Proposition 3.6]{MR3947326}, where
\[
\mathcal{P}^{\kappa, \epsilon} = \left\{P: P \text { is a path in } V_{N},\|P\| \geq \kappa N, \text { and }|P| \leq N^{1+\epsilon}\right\}
\] is defined in \eqref{Eq.defnPke}, and we drop the subscript $N$ for brevity in the context below. Therefore, it remains to show that it is unlikely that tame nodes are all $\lambda$-open, which is actually the essential ingredient of Theorem~\ref{thm:1.1}.
In Section \ref{sec:3-open-path}, we will deal with the contour argument. Concretely, we will show that the probability of there existing a tame and open path started in a fixed box decays stretched-exponentially in $K$ (see Theorem \ref{thm:r=0}). To carry this out, note that the existence of a tame and open path implies that a parallelogram $D$ with aspect ratio $O(K)$ has an open crossing. Next, we cut $D$ into $O(\sqrt K)$ sections uniformly, and extract a sub-parallelograms $D_i$ with aspect ratio $O(1)$ from the middle of each section (see Figure \ref{fig:LDP}). Then, exponential decay follows from the following two facts. One is that with positive probability, a parallelogram with aspect ratio $O(1)$ has no open crossings (Lemma~\ref{lem:para-crossing}). The other is that the Gaussian values in different $D_i$'s are roughly independent.
In Section \ref{sec:Multi-scale analysis}, we will deal with the induction analysis. Recall that $P$ of scale $K^j$ corresponds to a tree $\mathcal T_P$ of depth $j$. Thus the ratio of tame and open leaves in $\mathcal T_P$ is the average of those in $\mathcal T_{P^{(i)}}$'s, where $P^{(i)}$'s are the children of $P$ and are of number at least $K$. Since $K$ is chosen large, one can apply a large deviation analysis (see Theorem \ref{thm:r=1} and Theorem \ref{thm:xi-bound}). However, we will encounter some technicalities during the proof. Concretely, we need to control the fluctuation of harmonic functions at all scales in an efficient way, so that one can translate the open property into a demand on the GFF at every sub-scale. To this goal, for each level $0\le r\le j$, corresponding to scale $K^{j-r}$, we choose the threshhold $\varepsilon_r$ (see~\eqref{eq:epsilon}) to be large enough to make sure that the harmonic functions at scale $K^{j-r}$ exceed $\varepsilon_r$ with probability decaying sufficiently fast (see Lemmas~\ref{lem:E0}, \ref{lem:E-1} and \ref{lem:E-r}), but on the other hand, $\varepsilon_r$ is not too large in the sense that $\varepsilon_r$'s is a summable sequence. The balance of these two parts ensures that our strategy works.
\section{Preliminaries}\label{sec:2-pre} There are some basic facts about the two-dimensional discrete Gaussian free field which will be used intensively throughout this paper. For completeness, we will introduce them in Section \ref{subsec:DGFF}. In Section \ref{subsec:tree-structure}, we will collect the results we need from \cite{MR3947326}, including the tree structure of a path (Proposition \ref{prop:tree}) and the upper bound on the total flow through untamed nodes in the associated tree (Lemma \ref{lem:untame-flow}).
\subsection{Properties of two-dimensional DGFF}\label{subsec:DGFF}
In this section, we give a rigorous definition for the DGFF and review some standard estimates about the DGFF. Let $B\subseteq \mathbb Z^2$ be finite and non-empty. Denote by $\{ \eta^B(v): v\in B \}$ the DGFF on $B$ with Dirichlet boundary conditions. It is a mean-zero Gaussian process that vanishes on the boundary $\partial B=\{u\in B:\|u-v\|=1 \text{ for some } v\in B^c\}$, with covariance given by \[ \mathbb E \eta^B(u)\eta^B(v)=G_B(u,v) \quad \text{ for } u,v\in B, \]
where $G_B(u,v)$ is the Green's function associated with a simple random walk in $B$, i.e., the expected number of visits to $v$ before reaching $\partial B$ for a discrete simple random walk started at $u$. Without loss of generality, we always assume $\eta^B|_{B^c}=0$.
To eliminate boundary issues, we will need to consider vertices that have at least an appropriate distance from the boundary. For this purpose, fix $\chi=\frac{1}{10}$, and if $B\subseteq\mathbb Z^2$ is a box of side length $L$, define the box $B^{\chi}:=\{z\in B:d_{\infty}(z,\partial B)>\chi L\}.$
The next lemma says that the DGFF is log-correlated, which can be found in \cite[Eqaution (4)]{MR3947326}. \begin{lem}\label{lem:log-corr} Suppose that $B\subseteq\mathbb Z^2$ is a box of side length $L$. There is a universal constant $C_1>0$ such that \[
\left|\mathbb E\left(\eta^B(u)\eta^B(v)\right)-\frac{2}{\pi}\log\frac{L}{|u-v|_{\infty}\vee 1}\right|\le C_1 \quad \text{for all } u,v \in B^{\chi}. \] \end{lem}
The next lemma is the well-known Markov property of the DGFF. A version can be found in \cite[Section 2.2]{MR3947326}. \begin{lem}\label{lem:DMP}
Let $D$ be a finite subset of $\mathbb Z^2$, and $B\subseteq D$. Let $\eta^D$ be the DGFF on $D$, $H^B$ be the conditional expectation of $\eta^D$ given $\eta^D|_{B^c\cup\partial B}$. Then
\[
\eta^B:=\eta^D-H^B
\]
is a version of the DGFF on B, and it is independent of $H^B$. In other words, $\eta^D=\eta^B\oplus H^B$ is an orthogonal decomposition. \end{lem}
For the next lemma, we quote a version suited to our needs, which follows straightforwardly from the version in \cite[Lemma 3.10]{MR3433630}. \begin{lem}\label{lem:H^2}
In addition to the assumptions in Lemma \ref{lem:DMP}, we further assume that $B$ is a box of side length $L$. Then
\begin{equation}
\mathbb E\left( H^B(x)-H^B(y) \right)^2\le C_2\frac{|y-x|_{\infty}}{L}\quad \text{ for all } x,y \in B^{\chi},
\end{equation}
where $C_2>0$ is a universal constant. \end{lem}
Next, we estimate the difference of harmonic functions. It will be intensively used for the rest parts of this paper. \begin{lem}\label{lem:fluct-H}
In addition to the assumptions in Lemma \ref{lem:H^2}, we further assume that $U$ is a box of side length $\ell$ in $B^{\chi}$. There exists a universal constant $C_3>0$ such that if $\varepsilon\ge C_3\sqrt{\frac{\ell}{L}}$, then for all $z\in U$,
\[
\mathbb P\Big(\left|H^B(x)-H^B(z)\right|\ge \varepsilon \text{ for some } x\in U \Big)\le
4\exp\left\{-\frac{\varepsilon^2 L}{8C_2\ell}\right\}.
\] \end{lem}
We need the following two lemmas to prove Lemma \ref{lem:fluct-H}.
\begin{lem}[{Dudley’s inequality, \cite[Lemma 4.1]{MR1088478}}]\label{lem:dudley}
Let $U\subseteq \mathbb Z^2$ be a box of side length $\ell$ and $\{G_w: w\in U\}$ be a mean zero Gaussian field satisfying
\[
\mathbb{E}\left(G_{z}-G_{w}\right)^{2} \leq|z-w|_{\infty} / \ell \quad \text { for all } z, w \in U.
\]
Then $\mathbb E\max_{w\in U}G_w\le C_4$, where $C_4>0$ is a universal constant. \end{lem}
\begin{lem}[{Borell–Tsirelson inequality, \cite[Lemma 7.1]{MR3184689}}]\label{lem:borell}
Let $\{ G_z: z\in X\}$ be a Gaussian field on a finite index set $X$. Set $\sigma^2=\max_{z\in X}\mathrm{Var}(G_z)$. Then
\[
\mathbb{P}\left(\left|\max _{z \in X} G_{z}-\mathbb{E} \max _{z \in X} G_{z}\right| \geq a\right) \leq 2 e^{-\frac{a^{2}}{2 \sigma^{2}}} \quad \text { for all } a>0.
\] \end{lem}
\begin{proof}[Proof of Lemma \ref{lem:fluct-H}]
Note that $U\subseteq B^{\chi}\subseteq B\subseteq D$. Fix $z\in U$. For $x\in U$, let $G_x=H^B(x)-H^B(z)$. By Lemma \ref{lem:H^2}, for $x,y\in U$,
\begin{gather}
\mathbb{E}\left(G_{x}-G_{y}\right)^{2} \leq C_2\frac{|y-x|_{\infty}}{L}
\le \frac{C_2\ell}{L}\cdot\frac{|y-x|_{\infty}}{\ell}, \label{eq:Gx-Gy}\\
\mathrm{Var}(G_x)\le C_2\frac{|x-z|_{\infty}}{L}\le \frac{C_2\ell}{L}. \label{eq:Gx}
\end{gather}
By \eqref{eq:Gx-Gy} and Lemma \ref{lem:dudley}, we have $\mathbb E\max_{x\in U} G_x\le \frac{C_3}{2}\sqrt{\frac{\ell}{L}}$,
where $C_3=2C_4\sqrt{C_2}$. Combining the symmetry of Gaussian distribution, we have
\begin{align*}
&\mathbb P\Big(\left|H^B(x)-H^B(z)\right|\ge \varepsilon \text{ for some } x\in U \Big)\\
\le&2\mathbb P\Big(G_x\ge \varepsilon \text{ for some } x\in U \Big)
\le 2\mathbb P\left( \max _{x \in U} G_{x}-\mathbb{E} \max _{x \in U} G_x\ge \varepsilon-\frac{C_3}{2}\sqrt{\frac{\ell}{L}} \right).
\end{align*}
Noting that $\varepsilon\ge C_3\sqrt{\frac{\ell}{L}}$ and applying Lemma \ref{lem:borell}, we obtain
\begin{align*}
&\mathbb P\Big(\left|H^B(x)-H^B(z)\right|\ge \varepsilon \text{ for some } x\in U \Big)\\
\le&2\mathbb P\left( \max _{x \in U} G_{x}-\mathbb{E} \max _{x \in U} G_x\ge \frac{\varepsilon}{2}\right) \le 4 \exp\left\{-\frac{\varepsilon^{2}}{8 \sigma^{2}}\right\},
\end{align*}
where $\sigma^2=\max_{x\in U}\mathrm{Var}(G_x)\le\frac{C_2\ell}{L}$ by \eqref{eq:Gx}. This concludes the lemma. \end{proof}
We will need the following standard estimates on simple random walks. We refer the reader to \cite[Lemma 1]{MR3800790} for a similar derivation. \begin{lem}\label{lem:green's}
Let $\ell_1>0$, $\ell_2\ge\ell_1+2$ and $z\in \mathbb Z^2$. Suppose $V_{\ell_1}(z)\subseteq D\subseteq V_{\ell_2}(z)$. Then for all $u\in\partial V_{\ell_1}(z)$,
\[
\sum_{v\in \partial V_{\ell_1}(z)} G_{D}(u,v)\le\sum_{v\in \partial V_{\ell_1}(z)} G_{V_{\ell_2}(z)}(u,v)\le 2(\ell_2-\ell_1).
\] \end{lem}
\subsection{The tree structure associated with a path}\label{subsec:tree-structure} We now briefly recall some facts from \cite[Section 3]{MR3947326} in this section. For an integer $r\ge 1$, let
\[ \mathcal{B D}_{r}=\left\{\left[a r-\frac{1}{2},(a+1) r-\frac{1}{2}\right] \times\left[b r-\frac{1}{2},(b+1) r-\frac{1}{2}\right]: a, b \in \mathbb{Z}\right\}. \] Note that $(B\cap\mathbb Z^2)$'s partition $\mathbb Z^2$, where $B$ is taken over $\mathcal{B D}_{r}$. Define the sets of paths
\[
{\mathcal{S} \mathcal{L}_{0}:= \mathbb{Z}^{2}}, \ \mathcal{S} \mathcal{L}_{j}:=\left\{P: 1 \leq \frac{1}{K^{j}}\|P\| \leq 1+\frac{1}{K}, P \subseteq B\left(x_{P},\|P\|\right)\right\} \quad \text { for all } j \geq 1,
\] recalling $x_P$ and $y_P$ are the two ends of $P$. If $P\in \mathcal{SL}_j$, $P$ is said to be in scale $K^j$.
\begin{defi}\label{def:tame} For $j\ge0$ and each $P\in \mathcal{SL}_{j+1}$, let
\centerline{$E(P):=\left\{z \in \mathbb{R}^{2}:\left\|x_{P}-z\right\|+\left\|y_{P}-z\right\| \leq\left(1+\frac{2}{K^{2}}\right)\|P\|\right\},$} \centerline{and $\tilde{E}(P)=\left\{z \in \mathbb{R}^{2}: d(z, E(P)) \leq 4 K^{j}\right\}$.}
\noindent A path $P$ is said to be tame if $P\subseteq \tilde{E}(P)$ and untamed otherwise.
\end{defi} Note that only when a path is in scale $K^j$ with $j\ge 1$ could we say it is tame or untamed.
\begin{prop}[{\cite[Proposition 3.1]{MR3947326}}]\label{prop:tree} Suppose that $j\in[m-2]$ and $P\in\mathcal{SL}_{j+1}$. Then, there exists $\ell\in[K^j,(1+\frac1K)K^j]$, a positive integer $d$, and disjoint child-paths $P^{(i)}$of $P$ for $i\in [d]$ such that the following hold. \begin{itemize}
\item[(a)] $d\ge K$.
\item[(b)] Each box in $\mathcal{BD}_{K^j}$ is visited by at most $12$ sub-paths of the form $P^{(i)}, i \in[d]$.
\item[(c)] $P^{(i)} \in \mathcal{S} \mathcal{L}_{j}$ for each $i \in[d]$. \end{itemize}
Furthermore, $d\ge \frac12\|P\|$ for $j=0$; and one can extract $d_0$ disjoint sub-paths in $\mathcal{S} \mathcal{L}_{m-1}$ from $P$ with $\|P\| \geq \kappa N$ such that (b) holds with $j=m-1$, where $ d_{0}:=\left\lfloor\frac{\kappa N}{K^{m-1}}\right\rfloor \geq K. $ \end{prop}
Fix $P\in\mathcal{SL}_{j}$. The tree $\mathcal{T}_P$ associated with $P$ is constructed as follows. The nodes of $\mathcal{T}_P$ correspond to a family of sub-paths of $P$, where the parent/child relation in $\mathcal{T}_P$ corresponds to path/sub-path relation in the plane by Proposition \ref{prop:tree}. In particular, the root denoted by $\rho$ corresponds to $P$ and the leaves denoted by $\mathcal{L}$ correspond to vertices on $P$. Denote the level of a node $u$ by $L(u)$ with $L(\rho)=0$ and identify $u$ as a sub-path in $\mathcal{SL}_{j-L(u)}$ with $d_u$ children. Each node as a path enjoys the properties in Proposition \ref{prop:tree}.
Especially, $P$ with $\|P\| \geq \kappa N$ is associated with a tree $\mathcal{T}_P$ of depth $m$. Let $\theta_P$ be the unit uniform flow on $\mathcal{T}_P$ from $\rho$ to $\mathcal{L}$, with $\theta_{P}(\rho)=1$ and $\theta_{P}(v)=\frac{1}{d_{u}} \theta_{P}(u)$ if $v$ is a child of $u$. For $\delta\in(0,1)$, let \begin{equation}\label{eq:kap-delta-K}
\mathcal{P}^{\kappa, \delta, K}:=\left\{P: P \text { is a path in } V_{N},\|P\| \geq \kappa N \text { and }|P| \leq N^{1+\frac{\delta}{K^{2} k}}\right\}. \end{equation}
\begin{lem}[{\cite[Proposition 3.6]{MR3947326}}]\label{lem:untame-flow}
For each $P\in\mathcal{P}^{\kappa, \delta, K}$,
\[
\sum_{u: 1 \leq L(u) \leq m-1} \theta_{P}(u) 1_{ \{u \text{ is untamed}\}} \leq 2 \delta m.
\] \end{lem}
At the end of this section, we give some definitions that are similar to those in \cite{MR3947326}. Define \[ \mathcal{P}_{j}\left(B\right) :=\left\{P \in \mathcal{S} \mathcal{L}_{j} : x_{P} \in B \right\}, \] \[ T_j(B):=\left\{ P\in\mathcal{P}_{j}(B): P\text{ is tame} \right\}, \] \[ \mathrm{END}_{j} :=\big\{B: B \in \mathcal{B} \mathcal{D}_{K^{j-2}}\text{ and }B\cap V_{N} \neq \emptyset\big\}. \] For $B, B' \in \mathrm{END}_j$, define $T_j(B,B'):=\{ P\in T_j(B): y_P\in B' \}$.
\section{Tame paths are unlikely to be open}\label{sec:3-open-path} In this section, we will use a contour argument to show that the probability of there existing a tame and open path started in a fixed box decays stretched- exponentially in $K$ (see Theorem \ref{thm:r=0}). To this goal, we start by showing that, with positive probability, a parallelogram with aspect ratio $O(1)$ has no open crossings (Lemma \ref{lem:para-crossing} ) in Section \ref{subsec:good-para}. Then in Section \ref{subsec:proof-r=0}, we give the proof of Theorem \ref{thm:r=0} by using Lemma \ref{lem:para-crossing} and estimates about harmonic functions in Lemma \ref{lem:fluct-H}.
Recall that we call a vertex $x\in V_N$ is $\lambda$-open if $\big|\eta^{V_{2N}}(x)\big|\le \lambda$. Next, we extend this definition a bit. For $V\subseteq V_{2N}$ and $\alpha\in\mathbb R$, we say a vertex $x\in V$ is $(V, \lambda, \alpha)$-open if $|\eta^V(x)+\alpha|\le \lambda$. A path $P\subseteq V$ is said to be $(V, \lambda, \alpha)$-open if so is every vertex in $P$. For brevity, we will occasionally use open to mean $\lambda$-open or $(V, \lambda, \alpha)$-open according to the context.
\begin{thm}\label{thm:r=0}
For any $\lambda_0>0$, let $\lambda\ge\lambda_0$. There exists $c=c(\lambda_0)>0$ such that the following holds for all $K\ge K_0(\lambda):=e^{c\lambda^2}$. Suppose that $j\in [m-1]$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\alpha\in\mathbb R$. Then,
\begin{equation}
\mathbb P\big(P \text{ is } (V, \lambda,\alpha)\text{-open for some } P\in T_j(B)\big)
\le e^{-0.01\sqrt{K}}.
\end{equation} \end{thm}
\subsection{Good parallelograms}\label{subsec:good-para} In this section, we consider a closed parallelogram $D$ with corners $(a,b)$, $(a+l,b+h)$, $(a+l,b+h+w)$ and $(a,b+w)$, where $(a,b)\in\mathbb R^2$, $l\ge w\ge 10$ (here $10$ is a somewhat arbitrary choice), and $l\ge h\ge 0$. Especially, we say $D$ is \emph{good} if $a,l\in\mathbb Z$ and $l=16 w$. We call $l, w$, $\theta=\arctan \frac{h}{l}$, and \[ v_0=\left(\left\lfloor\frac{a+h+l-7w\sin^2\theta}{2}\right\rfloor , \left\lfloor \frac{b+h-l+7w\sin\theta\cos\theta}{2} \right\rfloor\right) \] respectively the length, width, angle and anchor of $D$. Note that $\theta\in [0,\frac{\pi}{4}]$ and $v_0\in\mathbb Z^2$. By crossing of good $D$ we mean a path in $D$ connecting the left and right sides of $D$ (see Figure \ref{fig:para}).
Let $V$ be a finite set in $\mathbb Z^2$ and $D\cap\mathbb Z^2\subseteq V$, then let $\mathcal{A}(D,V,\lambda,\alpha)$ be the event that there exists a $(V,\lambda,\alpha)$-open crossing of $D$. The reasoning of the following lemma is analogous to that of \cite[Proposition 4]{MR3800790}.
\begin{figure}
\caption{$D$ and its crossing.}
\label{fig:para}
\end{figure}
\begin{lem}\label{lem:para-crossing}
For any $\lambda_0>0$, let $\lambda\ge\lambda_0$. There exists $c'=c'(\lambda_0)>0$ such that if
\begin{equation}\label{eq:L-w}
L/w\ge e^{c'\lambda^2},
\end{equation}
then for any good parallelogram $D$ with width $w$ and anchor $v_0$, and any $\alpha\in\mathbb{R}$, we have
\begin{equation}\label{eq:A}
\mathbb P\Big( \mathcal{A}\big(D,V_L(v_0),\lambda,\alpha\big) \Big)\le \frac78.
\end{equation} \end{lem}
\begin{proof}
Rotate $D$ around $v_0$ counterclockwise by $i\pi/2$ and denote it by $D_i$, noting $D_0=D$. Since $D$ is good, by our appropriate choice of the anchor $v_0$, $\cup_{i=0}^{3}D_i$ forms an annulus $R$ centered at $v_0$ in $V_{4l}(v_0)$, surrounding $V_{2w}(v_0)$ (see Firgure \ref{fig:rotation}). Let $\mathfrak{C}$ be the collection of all contours in $R$. Here, by contour we mean a path with two endpoints coinciding. We consider a natural partial order on $\mathfrak{C}$: $\mathbf C_1\preceq \mathbf C_2$ if $\mathbf C_1^*\subseteq \mathbf C_2^*$, where $\mathbf C^*$ is the collection of vertices that are surrounded by $\mathbf C$.
\begin{figure}
\caption{$R$ is the (green) annulus. (Red) curves are open crossings of $D_i$'s.}
\label{fig:rotation}
\end{figure}
Denote $V:=V_L(v_0)$ and $\mathcal{A}_i:=\mathcal{A}\big(D_i,V,\lambda,\alpha\big)$, where correspondingly by crossing of $D_i$ for odd $i$, we mean a path in $D_i$ connecting the top an bottom sides of $D_i$. On the event $\cap_{i=0}^3 \mathcal{A}_i$, we can find at least one $(V,\lambda,\alpha)$-open contour in $R$. Let $\mathscr{C}$ be the random subset of $\mathfrak{C}$ consisting of all open contours in $R$. Then, the partial order above generates a well-defined unique maximum contour on $\mathscr{C}$, which is denoted by $\mathcal C$. To the goal, it remains to show
\begin{equation}\label{eq:C*}
\mathbb P\big( \mathscr{C}\neq\emptyset\big)\le \frac12.
\end{equation}
Assuming \eqref{eq:C*} holds, noting $\cap_{i=0}^3 \mathcal{A}_i\subseteq\{ \mathscr{C}\neq\emptyset \}$ and $\mathbb P\left( \mathcal{A}_i\right)=\mathbb P\left( \mathcal{A}_0 \right) $ by rotation invariance, one has $4\big(1-\mathbb P(\mathcal{A}_0)\big)\ge 1-\mathbb P\big(\cap_{i=0}^3 \mathcal{A}_i\big)\ge1/2$, completing the proof.
Next, we will prove \eqref{eq:C*}. Denote
\[
X:=\frac{1}{|\partial V_{2w}(v_0)|}\sum_{u\in\partial V_{2w}(v_0)}\left(\eta^{V}(u)+\alpha\right).
\]
By Lemma \ref{lem:log-corr}, if \eqref{eq:L-w} is satisfied for sufficiently large $c'$, then
\[
\mathbb E\eta^{V}(u)\eta^{V}(v)\ge \frac{2}{\pi}\log\left(\frac{L}{2w}\right)-C_1\ge\frac{1}{\pi}\log\left(\frac{L}{2w}\right) \ \text{ for all } u,v\in \partial V_{2w}(v_0).
\]
It follows that
\begin{equation}\label{eq:var-X}
\mathrm{Var}(X)\ge \frac{1}{\pi}\log\left(\frac{L}{2w}\right).
\end{equation}
For a deterministic contour $\mathbf C\in\mathfrak{C}$, let $\hat{\mathbf {C}}=(V\backslash\mathbf C^*)\cup\mathbf C$ be the set of points outside $\mathbf C$ but within $V$. Denote
$
\mathcal{F}_{\hat{\mathbf C}}:=\sigma\big\{ \eta^{V}(x): x\in \hat{\mathbf C}\big\}
$
and $Y:=X-\mathbb E\big(X|\mathcal{F}_{\hat{\mathbf C}}\big)$.
Then,
\begin{equation}\label{eq:Var-Y}
\mathrm{Var}(Y)=\frac{1}{|\partial V_{2w}(v_0)|^2}\sum_{u,v\in\partial V_{2w}(v_0)}G_{\mathbf C^*}(u,v)\le 16,
\end{equation}
where we have used
$
\sum_{v\in\partial V_{2w}(v_0)}G_{\mathbf C^*}(u,v)\le 2(4l-2w)
$
by setting $D=\mathbf C^*, \ell_1=2w,\ell_2=4l$ in Lemma \ref{lem:green's}.
Note that for each $u\in\partial V_{2w}(v_0)$,
\begin{equation}\label{eq:S-tau}
\mathbb E\big(\eta^{V}(u)|\mathcal{F}_{\hat{\mathbf C}}\big)=\sum_{v\in \mathbf C}\mathbb P^u(S_{\tau}=v)\cdot\eta^{V}(v),
\end{equation}
where $\{S_n\}$ is a simple random walk on $\mathbb Z^2$ started from $u$, and $\tau$ is the first time it hits $\mathbf C$.
By the definition that $\mathcal{C}$ is the outermost open contour in $\mathscr{C}$, one has $\{ \mathcal{C}=\mathbf C \}\in \mathcal{F}_{\hat{\mathbf C}}$.
On the event $\{\mathcal{C}=\mathbf C\}$, we have $|\eta^{V}(v)+\alpha| \le \lambda$ for all $v\in \mathbf C$. Combined with \eqref{eq:S-tau}, it gives that
\[
\Big|\mathbb E\left(\eta^{V}(u)+\alpha\big|\mathcal{F}_{\hat{\mathbf C}}\right)\Big|
\le \sum_{v\in \mathbf C}\mathbb P^u(S_{\tau}=v)\cdot \big|\eta^{V}(v)+\alpha\big| \le \lambda \ \text{ for all } u\in \partial V_{2w}(v_0),
\]
implying
$
\big|\mathbb E\big(X|\mathcal{F}_{\hat{\mathbf C}}\big)\big|
\le \lambda.
$
Consequently, $|Y|\le\lambda$ implies $|X|=\big|\mathbb E\big(X|\mathcal{F}_{\hat{\mathbf C}}\big)+Y\big|\le 2\lambda$.
Noting that $Y$ and $\mathcal{F}_{\hat{\mathbf C}}$ are independent,
\begin{equation*}
\mathbb P\big(|X|\le 2\lambda\big| \mathcal{C}=\mathbf C\big)
\ge\mathbb P\big( |Y|\le\lambda\big|\mathcal{C}=\mathbf C \big)
=\mathbb P\big(|Y|\le\lambda\big).
\end{equation*}
It follows that
\begin{equation}\label{eq:cal-C}
\mathbb P\big(\mathscr C\neq\emptyset\big)
=\sum_{\mathbf C\in\mathfrak{C}}\mathbb P\big(\mathcal C=\mathbf C\big)
=\sum_{\mathbf C\in\mathfrak{C}}\frac{\mathbb P(|X|\le 2\lambda, \mathcal{C}=\mathbf C)}{\mathbb P(|X|\le 2\lambda | \mathcal{C}=\mathbf C)}\le \frac{\mathbb P(|X|\le 2\lambda)}{\mathbb P(|Y|\le \lambda)}.
\end{equation}
Let $\phi_{\sigma^2}$ be the probability density function of a centered Gaussian random variable with variance $\sigma^2$. Set $\sigma_1^2:=\frac{1}{\pi}\log\left(\frac{L}{2w}\right)$. By \eqref{eq:var-X}, \eqref{eq:Var-Y} and \eqref{eq:cal-C},
\[
\mathbb P\big(\mathscr C\neq\emptyset\big)\le \frac{\phi_{\sigma_1^2}(0)\cdot4\lambda}{\phi_{16}(\lambda_0)\cdot2\lambda_0}.
\]
Choose large $c'=c'(\lambda_0)$ such that $\sigma_1$ is large enough to make sure the right hand side above is less than $\frac12$.
This completes the proof of the lemma. \end{proof}
\subsection{Proof of Theorem \ref{thm:r=0}}\label{subsec:proof-r=0} To prove Theorem \ref{thm:r=0}, it suffices to prove the following proposition.
\begin{prop}\label{prop:BB'}
Let $K\ge K_0(\lambda)$. For all $B, B' \in \mathrm{END}_j$ such that $T_j(B,B')\neq\emptyset$,
\begin{equation}
\mathbb P\Big(P \text{ is } (V,\lambda,\alpha)\text{-open for some } P\in T_j(B,B')\Big)
\le e^{-0.015\sqrt{K}}.
\end{equation} \end{prop}
\begin{proof}[Proof of Theorem \ref{thm:r=0}, assuming Proposition \ref{prop:BB'}]
Note that for $B\in\mathrm{END}_j$, one can find at most $K^5$ boxes $B'$'s in $\mathrm{END}_j$ such that $T_j(B,B')\neq\emptyset$. By a union bound, for $K\ge K_0(\lambda)$,
\begin{equation*}
\mathbb P\Big(P \text{ is } (V,\lambda,\alpha)\text{-open for some } P\in T_j(B)\Big)
\le K^5e^{-0.015\sqrt{K}}\le e^{-0.01\sqrt{K}}.
\end{equation*}
This completes the proof. \end{proof}
We formulate ingredients to prove Proposition \ref{prop:BB'} in the remaining context of this section. In what follows, we will always assume that $B, B' \in \mathrm{END}_j$ and $T_j(B,B')\neq\emptyset$. Let $(x,y)$ and $(x',y')$ be lower-left corners of $B$ and $B'$, respectively. Without loss of generality, suppose that $x'-x\ge y'-y\ge 0$. Then, it is not hard to show the following geometric facts hold for $K\ge 2^{32}$ (see Figure \ref{fig:LDP}). \begin{enumerate}
\item[(G1)]
One can find a parallelogram $D$ with width $w=20K^{j-1}$ and length $K^j/4$ such that every path in $T_j(B,B')$ contains a crossing of $D$, recalling Definition \ref{def:tame} for the tame path and noting $B, B' \in \mathrm{END}_j$;
\item[(G2)]
One can extract good $D_i$'s from $D$ for $i\in[\sqrt{K}/8]$ with width $w=20K^{j-1}$ and length $l=16w$ such that $D_i\subseteq V_{4l}(v_i)\subseteq V_i$ for each $i$, where $v_i$ is the anchor of $D_i$, $L=K^{j-1/2}$, $V_i:=V_L(v_i)$, and $V_i$'s are disjoint.
\item[(G3)]
Let $U=\cup_i V_i$. Then $U\subseteq V_{4K^j}(z_{B})\subseteq V$, where $V$ is the set in the statement of Theorem \ref{thm:r=0}. \end{enumerate}
Set $\mathcal{F}_\partial:=\{ \eta^{V}(z):z\in (V\backslash U)\cup\partial U\}$. Let $H_{\partial}(z):=\mathbb E\left(\eta^{V}(z)\big|\mathcal{F}_\partial\right)$ and $ \eta^{V_i}(z):=\eta^{V}(z)-H_{\partial}(z) \text{ for all } z\in V_i. $ By Markov property (Lemma \ref{lem:DMP}), we know that $ \eta^{V_i}=\{\eta^{V_i}(z):z\in V_i \} $ is a DGFF on $V_i$ for each $i\in [\sqrt{K}/8]$, and $\eta^{V_i}$'s are mutually independent by (G2), and they are independent of $H_{\partial}$. Set $ \varepsilon_0=100\sqrt{C_2}, $ where $C_2$ is defined in Lemma \ref{lem:H^2}. Denote \begin{equation}
\mathcal{E}_0=\Big\{ \big|H_{\partial}(z)-H_{\partial}(v_{i})\big|\ge \varepsilon_0 \text{ for some } i\in [\sqrt{K}/8] \text{ and }z\in D_i \Big\}. \end{equation} Set $ C_5=(2\vee C_4)^{32}, $ where $C_4$ is defined in Lemma \ref{lem:dudley}.
\begin{lem}\label{lem:E0}
Let $K\ge C_5$. Then, $\mathbb P\big(\mathcal{E}_0\big)\le e^{-0.5\sqrt{K}}$. \end{lem}
\begin{proof}
Recall that $w=20K^{j-1}$, $l=16w$, $L=K^{j-1/2}$, and $C_3=2C_4\sqrt{C_2}$. For $K\ge C_5$, we have $C_3\sqrt{\frac{4l}{L}}\le\varepsilon_0$.
Setting $\ell=4l$ and $\varepsilon=\varepsilon_0$ in Lemma \ref{lem:fluct-H}, we have
\begin{align*}
&\mathbb P\Big(\big|H_{\partial}(z)-H_{\partial}(v_{i})\big|\ge \varepsilon_0 \text{ for some } z\in D_i \Big)\\
\le&\mathbb P\Big(\big|H_{\partial}(z)-H_{\partial}(v_{i})\big|\ge \varepsilon_0 \text{ for some } z\in V_{4l}(v_i)\Big)
\le 4\exp\left\{-\frac{\varepsilon_0^2 L}{32C_2 l} \right\}\le e^{-0.9\sqrt{K}},
\end{align*}
where we have used (G2) in the first inequality, and $K\ge C_5\ge 2^{32}$ in the last inequality. By a union bound,
$
\mathbb P\big(\mathcal{E}_0\big)\le \frac18 \sqrt{K}e^{-0.9\sqrt{K}}\le e^{-0.5\sqrt{K}}.
$ \end{proof}
\begin{figure}
\caption{$D$ is the (red) parallelogram, with aspect ratio $O(K)$. The parallelograms with (black) shading are $D_i$'s, with aspect ratio $O(1)$. The (blue) squares are $V_i$'s.}
\label{fig:LDP}
\end{figure}
\begin{proof}[Proof of Proposition \ref{prop:BB'}]
Let $w=20K^{j-1}$ and $L=K^{j-1/2}$ as above.
Let $c=c(\lambda_0)$ be a constant such that
\begin{equation}\label{eq:K_0}
K_0(\lambda):=e^{c\lambda^2}\ge 400e^{2c'(\lambda+\varepsilon_0)^2}\vee C_5.
\end{equation}
For $K\ge K_0(\lambda)$, we have
$
L/w\ge e^{c'(\lambda+\varepsilon_0)^2}.
$
Then by (G2) and Lemma \ref{lem:para-crossing}, for each $\alpha$,
\begin{equation}\label{eq:first-term}
\mathbb P\big( \mathcal A\left(D_i,V_i,\lambda+\varepsilon_0,\alpha\right) \big)\le \frac78 \quad \text{ for all } i,
\end{equation}
recalling that $\mathcal A\left(D_i,V_i,\lambda+\varepsilon_0,\alpha\right)$ is the event that there exists a $\left(V_i,\lambda+\varepsilon_0,\alpha\right)$-open crossing of $D_i$ in $V_i$.
Note that for $z\in D_i$,
\[
\eta^{V}(z)=\eta^{V_i}(z)+H_{\partial}(z)
=\big(\eta^{V_i}(z)+H_{\partial}(v_i)\big)+\big(H_{\partial}(z)-H_{\partial}(v_i)\big).
\]
By the triangle inequality, if $|H_{\partial}(z)-H_{\partial}(v_i)|\le\varepsilon_0$ for all $z\in D_i$, then $\left|\eta^{V}(z)+\alpha\right|\le\lambda$
implies
$
\big|\eta^{V_i}(z)+\alpha+H_{\partial}(v_i)\big|\le\lambda+\varepsilon_0.
$
Thus, on the event $\mathcal{E}_0^c$,
\[
\mathcal A\subseteq \bigcap_{i\in [\sqrt{K}/8]}\mathcal A\big(D_i,V,\lambda,\alpha\big)
\subseteq \bigcap_{i\in [\sqrt{K}/8]}\mathcal A_i,
\]
where $\mathcal A:=\mathcal A\big(D,V,\lambda,\alpha\big)$, $\mathcal A_i:=\mathcal A\big(D_i,V_i,\lambda+\varepsilon_0,\alpha+H_{\partial}(v_{i})\big)$, and $H_{\partial}(v_{i})$ is regarded as a constant with respect to the DGFF $\eta^{V_i}$ for independence. Therefore,
\begin{equation}\label{eq:mathcal-A}
\mathbb P\big( \mathcal A \big)\le \mathbb P\Big( \bigcap_{i}\mathcal A_i \Big)+\mathbb P\big( \mathcal{E}_0 \big),
\end{equation}
where the intersection is over $i\in [\sqrt{K}/8]$. By \eqref{eq:first-term},
\begin{equation}\label{eq:intersection}
\mathbb P\Big( \bigcap_{i}\mathcal A_i \Big)
\le \mathbb E\Big(\mathbb P\Big(\bigcap_i\mathcal A_i\Big| \mathcal{F}_\partial\Big)\Big)
\le \mathbb E \prod_i \mathbb P\big(\mathcal A_i\big| \mathcal{F}_\partial\big)
\le \left(\frac78\right)^{\lfloor\sqrt{K}/8\rfloor},
\end{equation}
where we have used the conditional independence of $\mathcal A_i$ given $\mathcal{F}_\partial$.
Combining (G1), \eqref{eq:mathcal-A}, \eqref{eq:intersection} and Lemma \ref{lem:E0}, for $K\ge K_0(\lambda)\ge C_5\ge 2^{32}$,
\[
\mathbb P\Big(P \text{ is } (V,\lambda,\alpha)\text{-open for some } P\in T_j(B,B')\Big)
\le \left(\frac78\right)^{\lfloor\sqrt{K}/8\rfloor}+e^{-0.5\sqrt{K}} \le e^{-0.015\sqrt{K}}.
\]
This completes the proof. \end{proof}
\section{Multi-scale analysis on the hierarchical structure of the path}\label{sec:Multi-scale analysis} In this section, we will prove Theorem \ref{thm:1.1}. It suffices to prove the theorem for $\lambda\ge\lambda_0$ with $\lambda_0>0$ fixed, see Remark~\ref{rem:lambda_0}. Note that if there is a $\lambda$-open path $P$ in $\mathcal{P}^{\kappa,\delta,K}$, then all nodes in $\mathcal{T}_P$ are $\lambda$-open. We will prove that this event has probability tending to $0$ as the depth of the tree $m$ tends to infinity, by showing that tame and open nodes are rare (Theorem \ref{thm:xi-bound} below). Note that untamed nodes are rare by Lemma \ref{lem:untame-flow}.
Let $j\in[m-1], B\in \mathrm{END}_j$. For each $P\in\mathcal P_j(B)$, recall that $\mathcal T_P$ is a tree of depth $j$, associated with $P$. Each node $u$ of $\mathcal T_P$ is identified with a sub-path of $P$, which is also denoted by $u$ to lighten notation. Let $\mathcal T_{P,r}$ be the collection of nodes of level $r$. Note that the root has level $0$. For each $u\in \mathcal T_{P,r}$, there is a unique starting box $B_u\in \mathrm{END}_{j-r}$ containing the starting point of $u$. Let $\mathscr A$ be the collection of real functions defined on $\cup_{j\in[m-1]}\mathrm{END}_{j}$, i.e., on all end-boxes. We always assume that $\bar\alpha$ is a real function in $\mathscr A$. Note that for any $P$, $\bar\alpha$ induces a function on $\mathcal T_P$ by setting $\bar\alpha_u:=\bar\alpha\big(B_u\big)$ for each $u\in \mathcal T_{P}$. Let $\theta_P$ be the unit uniform flow on $\mathcal T_P$ from $\rho$ to $\mathcal{L}$ (the definition is just before \eqref{eq:kap-delta-K}), where $\rho$ is the root and $\mathcal{L}$ is the set of leaves. For $\lambda>0, V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, define \begin{gather} Y_{P,r,\lambda,\bar\alpha}:=\sum_{u\in \mathcal T_{P,r}}\theta_P(u)1_{\{u \text{ is tame and } (V,\lambda,\bar\alpha_u)\text{-open}\}}, \label{eq:Y-Pr}\\ \xi_{r,\lambda,\bar\alpha,j,B}:=\max\big\{Y_{P,r,\lambda,\bar\alpha}: P\in \mathcal P_j(B)\big\}.\label{eq:xi-Pr} \end{gather}
Recall $\lambda\ge\lambda_0$ and $K_0(\lambda)=e^{c\lambda^2}$ as a function of $\lambda$ for some $c=c(\lambda_0)>0$. Noting that
\[
\xi_{0,\lambda,\bar\alpha,j,B}=1_{\left\{P \text{ is } (V,\lambda,\bar\alpha_{\rho})\text{-open for some } P\in \mathcal T_j(B) \right\}},
\] the next corollary restates Theorem \ref{thm:r=0}.
\begin{cor}\label{cor:r=0}
Suppose $K\ge K_0(\lambda)$, $j\in[m-1]$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$. Then,
\begin{equation}
\mathbb P(\xi_{0,\lambda,\bar\alpha,j,B}>0)\le e^{-0.01\sqrt{K}}.
\end{equation} \end{cor}
As for $r=1$, we have a similar result. Set $ \varepsilon_{1}=8\sqrt{C_2}$, $ K_1(\lambda)=K_0(\lambda+\varepsilon_{1}). $ We will prove the following theorem in Section \ref{subsec:4.1}.
\begin{thm}\label{thm:r=1}
Suppose $K\ge K_1(\lambda)$, $j\in[2,m-1]\cap\mathbb Z$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$. Then,
\[
\mathbb P(\xi_{1,\lambda,\bar\alpha,j,B}>\delta)
\le e^{-K^{1/8}} \quad \text{ for all } \delta\ge\delta_1:=\frac12.
\] \end{thm}
Before generalizing Theorem \ref{thm:r=1}, let us set our conventions for constants. Set \begin{equation}\label{eq:beta-c_r} \beta=2^{-9}\ \text{ and }\ c_r=(\beta K)^r. \end{equation} Define $\{\delta_r : r\ge0\}$ to be \begin{gather} \delta_0=0, \quad \delta_1=\frac12; \quad \delta_{r+1}=\delta_r+\Delta_r \ \text{ for all } r\ge 1,\label{eq:delta}\\ \text{ where } \Delta_1=\frac{9\log K}{\beta K^{1/8}}; \quad \Delta_{r+1}=\frac{\log(1+2c_{r})+9\beta^{-1}\log K}{c_{r}}\ \text{ for all } \ r\ge 1.\label{eq:Delta} \end{gather} Set \begin{gather} \varepsilon_0=100\sqrt{C_2}, \quad \varepsilon_1=8\sqrt{C_2}; \quad \varepsilon_{r+1}=4\sqrt{C_2}\beta^{r/2}\ \text{ for all } \ r\ge 1, \label{eq:epsilon}\\ K_{r+1}(\lambda)=K_r(\lambda+\varepsilon_{r+1})=K_0\left(\lambda+\sum_{i=1}^{r+1}\varepsilon_i\right) \ \text{ for all } r\ge 0.\label{eq:Kr} \end{gather}
We will prove the following theorem by induction on admissible pair $(r,j)$ in Section \ref{subsec:4.2}. \begin{thm}\label{thm:xi-bound}
The following holds for any pair $(r,j)$ satisfying $r\in[2,m-2]\cap\mathbb Z$ and $j\in[r+1,m-1]\cap\mathbb Z$. For all $K\ge K_r(\lambda)$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$, we have
\[
\mathbb{P}(\xi_{r,\lambda,\bar\alpha,j,B}>\delta)
\le 2e^{-c_{r-1}(\delta-\delta_r)}
\quad \text{for all } \delta\ge\delta_r.
\] \end{thm}
In other words, by choosing $\delta_r$ as the threshold for total flows through level $r$, the overflows above $\delta_r$ will have an exponential decay uniformly in other parameters. Furthermore, as in Lemma \ref{lem:E0}, we will use $\{\varepsilon_r : r\ge 1\}$ to bound the fluctuation of harmonic functions at different levels in Lemma \ref{lem:E-1} ($r=1$) and Lemma \ref{lem:E-r} ($r\ge2$), respectively.
\subsection{Proof of Theorem \ref{thm:r=1}}\label{subsec:4.1} We assume $j\in[2,m-1]\cap\mathbb Z$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$ in this section. Define $ \mathcal{P}_{j, d}(B):=\big\{P \in \mathcal{P}_{j}\left(B\right) : d_{P}=d\big\}. $ Denote the child-paths of $P$ by $\{P^{(i)}\}_{i\in[d]}$ if $P\in\mathcal{P}_{j, d}(B)$. Note that $d\ge K$ always holds by (a) of Proposition \ref{prop:tree}. Define \[
\mathrm{END}_{j-1, d} :=\left\{\left\{B_i\right\}_{i \in[d]} \subseteq \mathrm{END}_{j-1} :\left|\left\{i : B_{i} \subseteq \tilde B\right\}\right| \leq 12 \text { for each } \tilde B \in \mathcal{B} \mathcal{D}_{K^{j-1}}\right\}, \] and for each sequence $\mathcal{S} :=\left\{ B_i \right\}_{i \in[d]}\in\mathrm{END}_{j-1, d}$, define \[ \mathcal{P}_{j, \mathcal{S}}(B) :=\big\{P \in \mathcal{P}_{j, d}(B) : P^{(i)} \in \mathcal{P}_{j-1}\left(B_i\right) \text { for all } i \in[d]\big\}. \] Furthermore, define \begin{equation*} \mathrm{END}_{j-1, d}(B):=\big\{\mathcal S\in \mathrm{END}_{j-1,d}: \mathcal{P}_{j, \mathcal{S}}(B)\neq\emptyset \big\}. \end{equation*} For the remainder of this paper, we always assume that $\mathcal{S} :=\left\{ B_i \right\}_{i \in[d]}\in\mathrm{END}_{j-1, d}(B)$ and $d\ge K$. Denote for brevity \begin{equation}\label{eq:z-V} z_i:=z_{B_{i}} \ \text{ and } \ V_i:=V_{K^{j-7/8}}(z_i) \ \text { for all } i \in[d]. \end{equation}
Note that $V_i\subseteq V_{4K^j}(z_{B})\subseteq V$ for all $i\in[d]$. Let $H_i$ be the conditional expectation of $\eta^V$ given $\eta^V\big|_{ V_i^c\cup\partial V_i }$. By Lemma \ref{lem:DMP}, $\eta^{V_i}:=\eta^V-H_i$ is a DGFF on $V_i$ for each $i\in[d]$. Recall $\varepsilon_{1}=8\sqrt{C_2}$ and define \begin{equation}\label{eq:E_S}
\mathcal{E}_{\mathcal S}=\bigcup_{i\in[d]}\Big\{ \big|H_i(x)-H_i(z_i)\big|\ge\varepsilon_{1} \text{ for some } x\in V_{4K^{j-1}}(z_i)\Big\}. \end{equation}
For all $P\in\mathcal{P}_{j, \mathcal{S}}(B)$, noting that $P^{(i)}\subseteq V_{4K^{j-1}}(z_i)$ for all $i\in [d]$, and $\eta^{V}(x)=\left(\eta^{V_i}(x)+H_i(z_i)\right)+\big( H_i(x)-H_i(z_i) \big)$ for all $x\in P^{(i)}$; then on the event $\mathcal{E}_{\mathcal S}^c$, by the triangle inequality, $P^{(i)} $ is $(V,\lambda,\alpha_i)$-open implies that it is $\big(V_i,\lambda+\varepsilon_{1},\alpha_i+H_i(z_i)\big)$-open, where $\alpha_i=\bar\alpha(B_i)$ and $H_i(z_i)$ is regarded as a deterministic number with respect to the field $\eta^{V_i}$. Therefore, \[ Y_{P, 1,\lambda, \bar\alpha}\le \frac1d \sum_{i=1}^{d} Y'_{i,0,\lambda+\varepsilon_{1}} \] for all $P\in\mathcal{P}_{j, \mathcal{S}}(B)$ on the event $\mathcal{E}_{\mathcal S}^c$, where $Y'_{i,0,\lambda+\varepsilon_{1}}$ is the indicator function of the event that $P^{(i)}$ is tame and $\left(V_i,\lambda+\varepsilon_{1},\alpha_i+H_i(z_i)\right)$-open. This implies that \begin{equation*} \zeta_{1,\mathcal{S}}\le \frac1d \sum_{i=1}^{d}\xi'_{i} \ \text{ on the event } \mathcal{E}_{\mathcal S}^c, \end{equation*} where $ \zeta_{1,\mathcal{S}} :=\max \big\{Y_{P, 1,\lambda, \bar\alpha}: P \in \mathcal{P}_{j, \mathcal{S}}(B)\big\}, $ and
\begin{equation}\label{eq:xi'0} \xi'_{i}:=1_{ \big\{\text{ there exists a } (V_i,\lambda+\varepsilon_{1},\alpha_i+H_i(z_i))\text{-open path in } T_{j-1}\left(B_i\right) \big\} }. \end{equation} It follows that for all $\delta>0$ and $\mathcal S\in\mathrm{END}_{j-1, d}(B)$, \begin{equation}\label{eq:1S-delta}
\mathbb P\big(\{\zeta_{1,\mathcal{S}} >\delta\}\cap\mathcal{E}_{\mathcal S}^c\big)\le \mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right). \end{equation}
Based on an argument similar to \cite[Lemma 4.4]{MR3947326}, we obtain the following lemma.
\begin{lem}\label{lem:average-level0}
Let $K\ge K_1(\lambda)$. For each $\mathcal{S} :=\left\{ B_i \right\}_{i \in[d]}\in\mathrm{END}_{j-1, d}(B)$, we have
\[
\mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right)\le e^{ -10^{-4}K^{1/4}\delta d } \quad \text{ for all } \delta\ge\delta_{1}=\frac12.
\] \end{lem}
\begin{proof} Let $\beta_K=(48K^{1/4})^{-1}$. We will classify $B_i$'s into $\beta_K^{-1}$ groups in the following procedure, such that $V_i$'s in each group are disjoint. Note that if $d_{\infty}(B_{i},B_{i'})\ge 1.5K^{j-7/8}$, then $d_{\infty}(V_i,V_{i'})\ge K^{j-1}$. First, we classify $\mathcal{BD}_{K^{j-1}}$ into $4K^{1/4}=\left( 2K^{j-7/8}/K^{j-1} \right)^2$ families $\tilde{\mathcal G}_s, s\in [4K^{1/4}]$, where $\tilde{\mathcal G}_1$ consists of boxes respectively containing $(2aK^{j-7/8},2bK^{j-7/8})$, $a,b\in\mathbb Z$ and other $\tilde{\mathcal G}_s$'s are its shifts. Let \[ \mathcal{G}_{s}:=\big\{B_{i}: i \in[d],\text{ and } B_{i} \subseteq \tilde B \text{ for some } \tilde B \in \tilde{\mathcal G}_s\big\}. \] Then, by (b) of Proposition \ref{prop:tree}, we can classify each $\mathcal{G}_{s}$ into $12$ groups $\mathcal{G}_{s,t}, t\in [12]$, such that for each $s,t$, a box in $\tilde{\mathcal G_s}$ contains at most one $B_{i}$ in $\mathcal G_{s,t}$. Thus, $V_i$'s in each group $\mathcal G_{s,t}$ are disjoint.
Let $V_{s,t}=\cup_i V_i$ be the union of $V_i$'s with $i$ such that $B_{i}\in \mathcal{G}_{s,t}$. Define the $\sigma$-field generated by the information outside $V_{s,t}$ by \[ \mathcal F_{s,t}:=\big\{ \eta^{V} (x): x\in (V \backslash V_{s,t})\cup\partial V_{s,t} \big\}. \] Then, conditioned on $\mathcal F_{s,t}$, $\xi'_i$'s in each group $\mathcal{G}_{s,t}$ are mutually independent. Denote \[W_{s,t}:=\prod_{B_{i}\in \mathcal{G}_{s,t}}e^{a\beta_K(\xi'_i-\delta)}, \] where $\delta\ge\delta_1=\frac12$ and $a$ is a positive number to be set. Then, we have \begin{equation}\label{eq:W-st} \mathbb{E}W_{s,t}^{1/\beta_K} =\mathbb{E}\prod_{B_{i}\in \mathcal{G}_{s,t}}e^{a(\xi'_i-\delta)}
=\mathbb{E}\prod_{B_{i}\in \mathcal{G}_{s,t}}\mathbb{E}\left(e^{a(\xi'_i-\delta)} \big| \mathcal F_{s,t}\right). \end{equation}
Next, we will estimate $\mathbb{E}\left(e^{a(\xi'_i-\delta)} | \mathcal F_{s,t}\right)$. Since $K\ge K_1(\lambda)= K_0(\lambda+\varepsilon_{1})$, by Corollary \ref{cor:r=0}, $\xi'_i$ is a Bernoulli random variable with $
\mathbb{P}(\xi'_i=1| \mathcal F_{s,t})\le e^{-0.01\sqrt{K}}=:g(K). $ Consequently, \[
\mathbb{E}\left(e^{a(\xi'_i-\delta)} \big| \mathcal F_{s,t}\right)\le e^{a (1-\delta)}g(K)+e^{-a\delta}. \] Set $a=\log\left(\frac{\delta}{1-\delta}g(K)^{-1} \right)$ to optimize the above bound, noting $a\ge\log\left( \frac{\delta_1}{1-\delta_1}g(K)^{-1} \right)=0.01\sqrt{K}>0$. It follows that \begin{equation}\label{eq:mathod-1}
\mathbb{E}\left(e^{a(\xi'_i-\delta)} \big| \mathcal F_{s,t}\right)\le f(\delta)g(K)^{\delta}\le 2g(K)^{\delta}, \end{equation} where $f(\delta):=\left( \frac{\delta}{1-\delta} \right)^{1-\delta}+\left( \frac{\delta}{1-\delta} \right)^{-\delta}\le 2$. Combined with \eqref{eq:W-st}, this yields \begin{equation}\label{eq:W-st-2} \mathbb{E}W_{s,t}^{1/\beta_K}\le \prod_{B_{i}\in \mathcal{G}_{s,t}} \left(2g(K)^{\delta} \right). \end{equation} By the Cauchy-Schwarz inequality, \begin{equation}\label{eq:C-S} \mathbb E e^{a\beta_K\sum_i(\xi'_i-\delta)} =\mathbb{E}\prod_{s=1}^{4K^{1/4}}\prod_{t=1}^{12}W_{s,t} \le \prod_{s=1}^{4K^{1/4}}\prod_{t=1}^{12}\left(\mathbb{E}W_{s,t}^{1/\beta_K}\right)^{\beta_K}. \end{equation} Combining \eqref{eq:W-st-2} and \eqref{eq:C-S}, we obtain \begin{align} \mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right) &\le \mathbb E e^{a\beta_K\sum_i(\xi'_i-\delta)} \le\prod_{s=1}^{4K^{1/4}}\prod_{t=1}^{12}\prod_{B_i\in \mathcal{G}_{s,t}}\left(2g(K)^{\delta}\right)^{\beta_K}\nonumber\\ &\le\left(2g(K)^{\delta}\right)^{\beta_Kd} \le e^{ -10^{-4}K^{1/4}\delta d },\label{eq:method-2} \end{align} where the last inequality follows from $d\ge K\ge 2^{32}$ and $\delta\ge \frac12$. \end{proof}
Recall \eqref{eq:E_S} for the definition of $\mathcal{E}_{\mathcal S}$ and define the event \begin{equation}\label{eq:E1} \mathcal{E}_1:= \bigcup_{d\ge K}\bigcup_{\mathcal S\in\mathrm{END}_{j-1, d}(B)}\mathcal{E}_{\mathcal S}. \end{equation} To prove Theorem \ref{thm:r=1}, we need to estimate $\mathbb P\big(\mathcal{E}_1 \big)$ in addition. The argument is quite similar to Lemma \ref{lem:E0}.
\begin{lem}\label{lem:E-1}
Let $K\ge C_5$. Then
$
\mathbb P\big(\mathcal{E}_1 \big)
\le e^{-1.5K^{1/8} }.
$ \end{lem} \begin{proof}
There are at most $K^7$ boxes in $\mathcal{BD}_{K^{j-3}}$ intersecting with some paths in $\mathcal{P}_j(B)$. Denote them by $B_t$'s. For each $B_t$, denote for brevity
\[
z_t=z_{B_t}\ \text{ and } \ V_t=V_{K^{j-7/8}}(z_t).
\]
Let $H_t$ be the conditional expectation of $\eta^V$ given $\eta^V\big|_{ V_t^c\cup\partial V_t }$. Setting $\ell=4K^{j-1}, L=K^{j-7/8}$ in Lemma \ref{lem:fluct-H}, recalling $C_5=(2\vee C_4)^{32}$, for $K\ge C_5$, we have
$
C_3\sqrt{\frac{\ell}{L}}=C_3\sqrt{\frac{4}{K^{1/8}}}\le\varepsilon_1,
$
and for all $t$,
\[
\mathbb P\Big( \big|H_t(x)-H_t(z_t)\big|\ge\varepsilon_{1} \text{ for some } x\in V_{4K^{j-1}}(z_t) \Big)\le 4e^{ -2K^{1/8}}.
\]
Note that $\mathcal{E}_1 $ implies that the fluctuation of $H_t$ in $V_{4K^{j-1}}(z_t)$ is greater than $\varepsilon_{1}$ for some $t$. Thus, we obtain
$
\mathbb P\left(\mathcal{E}_1 \right)
\le 4K^7e^{ -2K^{1/8}}
\le e^{ -1.5K^{1/8}},
$
completing the proof. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:r=1}]
It can be seen from the definition that
\begin{equation}\label{eq:xi1-delta}
\mathbb P\big(\xi_{1}>\delta\big)
\le \sum_{d=K}^\infty\sum_{\mathcal S\in\mathrm{END}_{j-1,d}(B)} \mathbb P\big(\{\zeta_{1,\mathcal S}>\delta \}\cap\mathcal{E}_{\mathcal S}^c\big)+\mathbb P\big(\mathcal{E}_1\big),
\end{equation}
Note that there are at most $K^7$ boxes in $\mathcal{BD}_{K^{j-3}}$ intersecting with some path in $\mathcal{P}_j(B)$. Therefore, there are at most $K^{7d}$ sequences in $\mathrm{END}_{j-1,d}(B)$.
Combined with \eqref{eq:1S-delta}, Lemma \ref{lem:average-level0} and Lemma \ref{lem:E-1}, and using a union bound, this yields
\[
\mathbb P(\xi_{1}>\delta)
\le\sum_{d=K}^{\infty}K^{7d}e^{ -10^{-4}K^{1/4}\delta d }+e^{ -1.5K^{1/8}}
\le e^{ -K^{1/8}},
\]
where in the last inequality we have used $\sum_{d=K}^{\infty}K^{7d}e^{ -10^{-4}K^{1/4}\delta d }
\le e^{-K}$ for $\delta\ge\frac12$ and $K\ge 2^{32}$.
This completes the proof of the theorem. \end{proof}
\subsection{Proof of Theorem \ref{thm:xi-bound}}\label{subsec:4.2} Assume $r\in[2,m-2]\cap\mathbb Z$, $j\in[r+1,m-1]\cap\mathbb Z$, $B\in \mathrm{END}_j,V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$ in this section. The reasoning of the proof of Theorem \ref{thm:xi-bound} is similar to that of Theorem \ref{thm:r=1}. Recall $\mathcal{S} :=\left\{B_{i}\right\}_{i \in[d]}\in\mathrm{END}_{j-1, d}(B)$. Compared with \eqref{eq:z-V}, here we set \[ z_i:=z_{B_{i}} \ \text{ and } \ V_i:=V_{4K^{j-1}}(z_i) \ \text { for all } i \in[d]. \]
Noting that $V_i\subseteq V$ for all $i$, let $H_i$ be the conditional expectation of $\eta^V$ given $\eta^V\big|_{ V_i^c\cup\partial V_i }$. By Lemma \ref{lem:DMP}, $\eta^{V_i}:=\eta^V-H_i$ is a GFF on $V_i$ for all $i$. Recall $\beta=2^{-9}$ set in \eqref{eq:beta-c_r} and $\varepsilon_{r+1}=4\sqrt{C_3}\beta^{r/2}$ set in \eqref{eq:epsilon}. Analogous to \eqref{eq:E_S} and \eqref{eq:E1}, we define the events
\begin{equation}\label{eq:Ei}
\mathcal{E}_{r+1,i}:=\left\{\begin{array}{c}
\text { there exists a box } B'\in \mathrm{END}_{j-r-1}\text{ such that } B'\subseteq V_{3K^{j-1}}(z_i) \\
\text{ and }\big|H_i(x)-H_i(z_{B'})\big|\ge\varepsilon_{r+1} \text{ for some } x\in V_{4K^{j-r-1}}(z_{B'})
\end{array}\right\},
\end{equation}
\[
\mathcal{E}_{r+1,\mathcal S}:=\bigcup_{i\in [d]}\mathcal{E}_{r+1,i}, \ \text{ and } \
\mathcal{E}_{r+1}:= \bigcup_{d\ge K}\bigcup_{\mathcal S\in\mathrm{END}_{j-1, d}(B)}\mathcal{E}_{r+1,\mathcal S} \ \text{ for all } r\ge 1.
\]
For $P\in \mathcal{P}_{j, \mathcal{S}}(B), i\in [d], u\in \mathcal{T}_{P^{(i)},r}$, we have $B_u\in \mathrm{END}_{j-r-1}, B_u\subseteq V_{3K^{j-1}}(z_i)$ and $u\subseteq V_{4K^{j-r-1}}(z_{u})$ with $z_u:=z_{B_u}$. Noting that $\eta^{V}(x)=\left(\eta^{V_i}(x)+H_i(z_u)\right)+\left( H_i(x)-H_i(z_u) \right)$ for all $x\in u$, on the event $\mathcal{E}_{r+1,i}^c$, by the triangle inequality, $u$ is $(V,\lambda,\bar\alpha_u)$-open implies that it is $(V_i, \lambda+\varepsilon_{r+1}, \bar\alpha_u+H_i(z_u))$-open.
Thus, by an analogous reasoning of \eqref{eq:1S-delta}, for all $\delta>0$ and $\mathcal S\in\mathrm{END}_{j-1, d}(B)$,
\begin{equation}\label{eq:r+1S-delta}
\mathbb P\left(\{\zeta_{r+1,\mathcal{S}} >\delta\}\cap\mathcal{E}_{r+1,\mathcal S}^c\right)\le \mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_{i,r,\lambda+\varepsilon_{r+1},j-1}>\delta\right),
\end{equation}
where
$
\zeta_{r+1,\mathcal{S}} :=\max \big\{Y_{P, r+1,\lambda, \bar\alpha}: P \in \mathcal{P}_{j, \mathcal{S}}(B)\big\},
$ and
\begin{equation}\label{eq:xi'_i}
\xi'_{i,r,\lambda+\varepsilon_{r+1},j-1}:=\max_{P' \in \mathcal{P}_{j-1}(B_i)}\sum_{u\in \mathcal T_{P',r}}\theta_{P'}(u)1_{\big\{ u \text{ is tame and } (V_i,\lambda+\varepsilon_{r+1},\bar\alpha_u+H_i(z_{u}))\text{-open}\big\}}.
\end{equation}
In addition, the following lemma is analogous to Lemma \ref{lem:E-1}. \begin{lem}\label{lem:E-r}
Let $K\ge C_5$. Then
$
\mathbb P\left(\mathcal{E}_{r+1} \right)
\le e^{-c_r},
$
where $c_r=(\beta K)^r$ is set in \eqref{eq:beta-c_r}. \end{lem}
\begin{proof}
There are at most $K^7$ boxes in $\mathrm{END}_{j-1}$ intersecting with some paths in $\mathcal{P}_j(B)$. Denote them by $B_t$'s. For each $B_t$, denote $z_t:=z_{B_t}, V_t:=V_{4K^{j-1}}(z_t)$, and by $H_t$ the conditional expectation of $\eta^V$ given $\eta^V\big|_{ V_t^c\cup\partial V_t }$. Let $\mathcal{E}_{r+1,t}$ be the event as in \eqref{eq:Ei} with $t$ in place of $i$.
Note that $\mathcal{E}_{r+1}$ implies $\mathcal{E}_{r+1,t}$ for some $t$. It suffices to estimate the probability of $\mathcal{E}_{r+1,t}$ for all $t$.
Setting $\ell=4K^{j-r-1}$ and $L=4K^{j-1}$ in Lemma \ref{lem:fluct-H}, for $K\ge C_5$, we have
$
C_3\sqrt{\frac{\ell}{L}}=C_3 K^{-\frac r2}\le \varepsilon_{r+1},
$
and for any box $B'$ in $\mathrm{END}_{j-r-1}$ and $B'\subseteq V_{3K^{j-1}}(z_t)$, we have $V_{4K^{j-r-1}}(z_{B'})\subseteq V_t^{\chi}$, therefore
\[
\mathbb P\Big( \big|H_t(x)-H_t(z_t)\big|\ge\varepsilon_{r+1} \text{ for some } x\in V_{4K^{j-r-1}}(z_{B'}) \Big)\le
4\exp\left\{-\frac{\varepsilon_{r+1}^2}{8C_3}K^r \right\}\le 4e^{-2c_r}.
\]
Note that there are at most $\left( \frac{3K^{j-1}}{K^{j-r-3}} \right)^2\le 9K^{2r+4}$ boxes $B'$'s in $\mathrm{END}_{j-r-1}$ such that $B'\subseteq V_{3K^{j-1}}(z_t)$. By a union bound,
$
\mathbb P\left(\mathcal{E}_{r+1} \right)
\le K^{7}\cdot 9K^{2r+4} \cdot 4e^{-2c_r}
\le e^{-c_r},
$
completing the proof. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:xi-bound}]
We will apply induction on $r$, similar to the proof of \cite[Lemma 4.4]{MR3947326}. To this end, we will prove that the following hold for all $r\in[2,m-2]\cap\mathbb Z$ and $j\in [r+1,m-1]\cap\mathbb Z$.
(i) Suppose $K\ge K_r(\lambda)$, $B\in \mathrm{END}_j$, $V_{4K^j}(z_{B})\subseteq V\subseteq V_{2N}$, and $\bar\alpha\in\mathscr A$. Then,
\[
\mathbb{P}(\xi_{r,\lambda,\bar\alpha,j,B}>\delta)
\le 2 e^{-c_{r-1}(\delta-\delta_r)}
\quad \text{for all } \delta\ge\delta_r.
\]
(ii) Suppose $K\ge K_{r+1}(\lambda)$, $B\in \mathrm{END}_{j+1}$, $d\ge K$, $\left\{B_{i}\right\}_{i \in[d]} \in \mathrm{END}_{j, d}(B)$, and denote $\xi'_i:=\xi'_{i,r,\lambda+\varepsilon_{r+1},j}, i\in [d]$, defined in \eqref{eq:xi'_i}. Then,
\[
P\left(\frac1d \sum_{i=1}^{d}\xi'_{i}>\delta\right)
\le \Big(K^{-9} e^{-\beta c_{r-1}(\delta-\delta_{r+1})}\Big)^d \text{ for all } \delta\ge\delta_{r+1}.
\]
In Step 1, we will show that (i) implies (ii). In Step 2, we will show (i) for $r+1$ and all $j \in[r+2, m-1] \cap \mathbb{Z}$, provided that (ii) holds for all \(j \in[r+1, m-1] \cap \mathbb{Z}\). In Step 3, we will show (i) holds for $r=2$ and $j\in [3,m-1]\cap\mathbb Z$.
\textbf{Step 1.} Suppose (i) holds. We will prove (ii).
We can classify $\{B_{i}\}_{i\in [d]}$ into $432(\le 2^9=\beta^{-1})$ groups $\mathcal G_t$'s such that $V_i$'s in each group are disjoint, where $V_i=V_{4K^{j-1}}(z_i)$. Let $V_t$ be the union of $V_i$'s with $i$ such that $B_i\in\mathcal G_t$. Define
$
\mathcal F_{t}:=\big\{ \eta^{V} (x): x\in (V \backslash V_{t})\cup\partial V_{t} \big\}.
$
Conditioned on $\mathcal F_{t}$, $\xi'_i$'s in each group $\mathcal G_t$ are mutually independent.
Next, we will estimate $\mathbb{E}\left(e^{a(\xi'_i-\delta)} \big| \mathcal F_{t}\right)$, where $\delta\ge\delta_{r+1}$ and $a>0$. For each $i\in [d]$, we apply (i) to $\lambda+\varepsilon_{r+1}$, and have for $K\ge K_{r+1}(\lambda):=K
_r(\lambda+\varepsilon_{r+1})$,
\begin{equation}\label{eq:xi'i}
\mathbb P\left(\xi'_i>\delta\big| \mathcal F_{t}\right)\le 2e^{-c_{r-1}(\delta-\delta_r)} \quad \text{ for all } \delta\ge\delta_r.
\end{equation}
Note that $0\le\xi'_i\le1$. It follows that for each $a>0$,
\begin{small}
\begin{equation*}
\mathbb E \left(e^{a\xi'_i}\Big| \mathcal F_{t}\right)
\le e^{a\delta_r}+\int_{\delta_r}^{1} \mathbb P\left(\xi'_i>z\big| \mathcal F_{t}\right)ae^{az} dz\\
=e^{a\delta_r}\left( 1+2a\int_{0}^{1-\delta_r}e^{(a-c_{r-1})z}dz \right).
\end{equation*} \end{small}
Take $a=c_{r-1}$, then $\mathbb E \left(e^{a\xi'_i}\big| \mathcal F_{t}\right)\le (1+2c_{r-1})e^{c_{r-1}\delta_r}$. Hence for all $\delta\ge\delta_r$,
\[
\mathbb E \left(e^{a\left(\xi'_i-\delta\right)}\Big| \mathcal F_{t}\right)\le (1+2c_{r-1})e^{-c_{r-1}(\delta-\delta_r)}.
\]
Using the same argument from \eqref{eq:mathod-1} to \eqref{eq:method-2} as in Lemma \ref{lem:average-level0}, we obtain
\begin{equation}\label{eq:4.3-1}
P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right)
\le \Big( (1+2c_{r-1})e^{-c_{r-1}(\delta-\delta_r)} \Big)^{\beta d}.
\end{equation}
Recall $\Delta_r=\delta_{r+1}-\delta_r$ from \eqref{eq:Delta}. We get
$
\Big( (1+2c_{r-1})e^{-c_{r-1}(\delta_{r+1}-\delta_r)} \Big)^{\beta}\le K^{-9}.
$
Combined with \eqref{eq:4.3-1}, this implies (ii).
\textbf{Step 2.} Assuming that (ii) holds for all \(j \in[r+1, m-1] \cap \mathbb{Z}\), we will show (i) for $r+1$ and all $j \in[r+2, m-1] \cap \mathbb{Z}$. Similar to \eqref{eq:xi1-delta}, we have
\begin{equation}\label{eq:xi-r+1-delta}
\mathbb P\big(\xi_{r+1}>\delta\big)
\le \sum_{d=K}^\infty\sum_{\mathcal S\in\mathrm{END}_{j-1,d}(B)} \mathbb P\big(\{\zeta_{r+1,\mathcal S}>\delta \}\cap\mathcal{E}_{r+1,\mathcal S}^c\big)+\mathbb P\big(\mathcal{E}_{r+1}\big).
\end{equation}
Note that $r+2\le j\le m-1$ implies $r+1\le j-1\le m-1$, then for $K\ge K_{r+1}(\lambda)$, we apply (ii) to $j-1$, and have
\[
\mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_{i,r,\lambda+\varepsilon_{r+1},j-1}>\delta\right)\le
\Big(K^{-9} e^{-\beta c_{r-1}(\delta-\delta_{r+1})}\Big)^d \ \text{ for all } \delta\ge\delta_{r+1}.
\]
Combined with \eqref{eq:r+1S-delta}, this gives that for each $\mathcal S\in\mathrm{END}_{j-1,d}(B)$,
\begin{equation}
\mathbb P\big(\{\zeta_{r+1,\mathcal S}>\delta \}\cap\mathcal{E}_{r+1,\mathcal S}^c\big)
\le \Big(K^{-9} e^{-\beta c_{r-1}(\delta-\delta_{r+1})}\Big)^d \ \text{ for all } \delta\ge\delta_{r+1}.
\end{equation}
Note that there are at most $K^{7d}$ sequences in $\mathrm{END}_{j-1,d}(B)$. By a union bound, the first term on the right hand side of \eqref{eq:xi-r+1-delta} is less than
\begin{equation}\label{eq:3/2}
\sum_{d=K}^{\infty}\Big(K^{-2}e^{-\beta c_{r-1}(\delta-\delta_{r+1})}\Big)^d
\le \frac32e^{-c_{r}(\delta-\delta_{r+1})},
\end{equation}
since $K^{-2}e^{-\beta c_{r-1}(\delta-\delta_{r+1})}\le K^{-2}\le\frac13$ for $\delta\ge \delta_{r+1}$.
Moreover, note that $\delta-\delta_{r+1}\le\frac12$ since $\delta\le1$ and $\delta_{r+1}\ge\frac12$, then by Lemma \ref{lem:E-r},
\begin{equation}\label{eq:1/2}
P\big(\mathcal{E}_{r+1}\big)\le e^{-c_r}\le \frac12e^{-c_{r}/2}\le \frac12e^{-c_{r}(\delta-\delta_{r+1})}.
\end{equation}
Plugging \eqref{eq:3/2} and \eqref{eq:1/2} into \eqref{eq:xi-r+1-delta}, we obtain
$
\mathbb P(\xi_{r+1}>\delta) \le 2e^{-c_{r}(\delta-\delta_{r+1})}.
$
That is, (i) holds for $r+1$ and all $j \in[r+2, m-1] \cap \mathbb{Z}$.
\textbf{Step 3.} We will show that (i) holds for $r=2$ and $j\in [3,m-1]\cap\mathbb Z$. This follows lines in Step 1. We write $\xi'_i=\xi'_{i,1,\lambda+\varepsilon_2,j-1}, i\in [d]$ for brevity. Note that $j-1\in [2,m-1]\cap\mathbb Z$. Then applying Theorem \ref{thm:r=1} to $\lambda+\varepsilon_2$ and $K\ge K_2(\lambda)=K_1(\lambda+\varepsilon_2)$, we obtain
\[
\mathbb P\left(\xi'_i>\delta\big| \mathcal F_{t}\right) \le \exp\{ -K^{1/8} \} \ \text{ for all } \delta\ge\delta_{1},
\]
playing the role of \eqref{eq:xi'i}.
Consequently, for $a=K^{1/8}$,
\begin{align*}
& \mathbb E \left(e^{a\xi'_i}\Big| \mathcal F_{t}\right)
=\int_{0}^{1} \mathbb P\left(\xi'_i>z\big| \mathcal F_{t}\right)ae^{az} dz\\
\le & e^{a\delta_1}-1+\int_{\delta_r}^{1} e^{-K^{1/8}}ae^{az} dz \le e^{a\delta_1}-1+e^{a-K^{1/8}}=e^{a\delta_1}.
\end{align*}
Hence, it holds that
\[
\mathbb E \left(e^{a\left(\xi'_i-\delta\right)}\Big| \mathcal F_{t}\right)
\le e^{-K^{1/8}(\delta-\delta_1)}.
\]
Then, we have
\[
\mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right)
\le \left( e^{-\beta K^{1/8}(\delta-\delta_1)}\right)^d,
\]
as the counterpart of \eqref{eq:4.3-1}.
Recall that $\delta_2-\delta_1=\Delta_1=\frac{9\log K}{\beta K^{1/8}}$, thus for $\delta\ge\delta_2$,
\[
\mathbb P\left(\frac1d \sum_{i=1}^{d}\xi'_i>\delta\right)
\le \left( K^{-9}e^{-\beta K^{1/8}(\delta-\delta_2)}\right)^d.
\]
Consequently,
\[
\mathbb{P}(\xi_{2}>\delta)
\le \sum_{d=K}^{\infty}\Big(K^{-2}e^{-\beta K^{1/8}(\delta-\delta_{2})}\Big)^d+e^{-c_1},
\]
as the counterpart of \eqref{eq:xi-r+1-delta}.
With estimates similar to \eqref{eq:3/2} and \eqref{eq:1/2} for $r=2$, we conclude that for $K\ge K_2(\lambda)$,
\[
\mathbb{P}(\xi_{2}>\delta)
\le 2 e^{-c_{1}(\delta-\delta_2)}
\]
for all $\delta\ge\delta_2,$
completing the proof. \end{proof}
\subsection{Proof of Theorem \ref{thm:1.1} }
Recall that $v\in V_{2N}$ is $\lambda$-open if $\left| \eta^{V_{2N}}(v) \right|\le\lambda$, i.e., $(V_{2N},\lambda,0)$-open. Define $ \tilde Y_{P,r,\lambda}:=\sum_{u\in \mathcal T_{P,r}}\theta_P(u)1_{\{u \text{ is tame and } \lambda\text{-open}\}}, $ and \[ \tilde \xi_{r,\lambda,j,B}:=\max\big\{\tilde Y_{P,r,\lambda}: P\in \mathcal P_j(B)\big\}\ \text{ for all } B\in\mathrm{END}_j. \] For $j\in [3,m-1]\cap\mathbb Z$ and $2\le r\le j-1$, let $K\ge K_{r}(\lambda)$. Applying Theorem \ref{thm:xi-bound} to $V=V_{2N}$, $\bar\alpha\equiv 0$ and $B\in\mathrm{END}_j$, we get
\begin{equation}\label{eq:tilde-xi-bound}
\mathbb{P}\left(\tilde\xi_{r,\lambda,j,B}>\delta\right)
\le 2 e^{-c_{r-1}(\delta-\delta_r)} \ \text{ for all } \delta\ge\delta_{r+1},
\end{equation} recalling \eqref{eq:beta-c_r}, \eqref{eq:delta} for the definition of $c_r, \delta_r$. Recall $\mathcal{P}^{\kappa, \delta, K}$ from \eqref{eq:kap-delta-K}. For each $P\in\mathcal{P}^{\kappa, \delta, K}$, let $\big\{P^{(i)}:i\in[d_0]\big\}$ be the child-paths of $P$ in $\mathcal{SL}_{m-1}$ from Proposition \ref{prop:tree}. Recall that $L(u)$ is the depth of $u$ with $L(\rho)=0$. For a sub-path $u$ of $P^{(i)}$ in $\mathcal T_P$, denote by $L_i(u):=L(u)-1$ the level of $u$ in $\mathcal T_{P^{(i)}}$. By Lemma \ref{lem:untame-flow}, \begin{equation*} \sum_{i=1}^{d_0}\sum_{u: 0 \leq L_i(u) \leq m-2}\frac{1}{d_0} \theta_{P^{(i)}}(u) 1_{\{u \text{ is untamed}\}}= \sum_{u: 1 \leq L(u) \leq m-1} \theta_{P}(u) 1_{\{u \text{ is untamed}\}}\leq 2 \delta m. \end{equation*} This implies that there is at least one child-path $P^{(i_0)}$ such that \begin{equation}\label{eq:thm1-untamed} \sum_{u: 0 \leq L_{i_0}(u) \leq m-2} \theta_{P^{(i_0)}}(u) 1_{\{u \text{ is untamed}\}}\leq 2 \delta m. \end{equation} Thus if there is a $\lambda$-open path $P$ in $\mathcal{P}^{\kappa, \delta, K}$, there would exist a $\lambda$-open path $\tilde P$ in $\mathcal P_{m-1}(B)$ for some $B\in\mathrm{END}_{m-1}$ such that \eqref{eq:thm1-untamed} holds with $P^{(i_0)}$ replaced with $\tilde P$ and $L_{i_0}(u)$ replaced with $\tilde L(u)$, the depth of $u$ in $\mathcal T_{\tilde P}$. Note that if $\tilde P$ is $\lambda$-open, then all the sub-paths are $\lambda$-open, which leads to \begin{align*} m-1 &=\sum_{u: 0 \leq\tilde L(u) \leq m-2} \theta_{\tilde P}(u) 1_{\{u \text{ is } \lambda\text{-open}\}}\\ &=\sum_{r=0}^{m-2} \tilde Y_{\tilde P,r,\lambda}+\sum_{u: 0 \leq \tilde L(u) \leq m-2} \theta_{\tilde P}(u) 1_{\{u \text{ is untamed}\}} \le \sum_{r=0}^{m-2}\tilde \xi_{r,\lambda,m-1,B}+2\delta m. \end{align*} By the above inequality, in order to prove that for some $\delta>0, K>0$, \begin{equation}\label{eq:P-kap-delta-K} \lim _{N \rightarrow \infty}\mathbb{P}\big( P \text { is } \lambda\text{-open for some } P \in \mathcal{P}^{\kappa, \delta, K} \big)=0, \end{equation} it is sufficient to show that there exists $\delta>0, K(\lambda,\delta)\in (0,\infty)$ such that for $K\ge K(\lambda,\delta)$, \begin{equation}\label{eq:sum-r} \lim_{m\rightarrow\infty}\mathbb P\left( \sum_{r=0}^{m-2}\tilde \xi_{r,\lambda,m-1,B}\ge m-1-2\delta m \text{ for some } B\in\mathrm{END}_{m-1} \right)=0. \end{equation}
Recall that $\{\varepsilon_r: r\ge 0\}$ is set in \eqref{eq:epsilon} and $K_r(\lambda)=K_0\left(\lambda+\sum_{i=1}^{r}\varepsilon_i\right)$ is defined in \eqref{eq:Kr}. Noting that $\sum_{i=1}^{\infty}\varepsilon_i<\infty$ and $K_0(\cdot)$ is a increasing function, one has $K_{\infty}(\lambda):=K_0\left(\lambda+\sum_{i=1}^{\infty}\varepsilon_i\right)<\infty$. Recall \eqref{eq:Delta} for the definition of $\Delta_r$. There exists $K(\lambda,\delta)\ge K_{\infty}(\lambda)$ such that the following inequality holds for all $K\ge K(\lambda,\delta)$, \[ \sum_{r=1}^{\infty}\Delta_r=\frac{9\log K}{\beta K^{1/8}}+\sum_{r=1}^{\infty}\frac{\log(1+2c_{r})+9\beta^{-1}\log K}{c_{r}}\le \delta. \] Consequently, for $K\ge K(\lambda,\delta)$, we have $\delta_r\le \frac12+\delta$ for all $r\ge 0$. As $m\ge \delta^{-1}$, \begin{equation}\label{eq:sum-r-} \sum_{r=0}^{m-2}\left(\delta_r+\frac{1}{2^{r+2} } \left(1-8\delta\right)m\right) < m-1-2\delta m, \end{equation} where we set $\delta\in(0,\frac18)$. Since $\kappa N<K^{m+2}$, there are at most $(K^6/\kappa)^2$ boxes in $\mathrm{END}_{m-1}$. By a union bound and \eqref{eq:sum-r-}, for $K\ge K(\lambda,\delta)$ and $m\ge \delta^{-1}$, \begin{equation}\label{eq:sum-r-2} \mathbb P\left( \sum_{r=0}^{m-2}\tilde \xi_{r,\lambda,m-1,B}\ge m-1-2\delta m \text{ for some } B\in\mathrm{END}_{m-1} \right) \le \frac{K^{12}}{\kappa^2}\sum_{r=0}^{m-2}p_{m,r}, \end{equation} where $ p_{m,r}=\mathbb P\left(\tilde \xi_{r,\lambda,m-1,B}>\delta_r+\frac{1}{2^{r+2} } \left(1-8\delta\right)m\right). $ As $m\ge \frac{8}{1-8\delta}$, for $r=0,1$, we have $\frac{1}{2^{r+2} } \left(1-8\delta\right)m\ge 1$, implying $p_{m,r}=0$. Applying \eqref{eq:tilde-xi-bound} to $j=m-1$, then for all $2\le r\le m-2$, \[ p_{m,r}\le 2\exp\left\{ -\frac18\left(1-8\delta\right)\left(\frac{\beta K}{2}\right)^{r-1}m \right\}. \] Furthermore, $\beta K/2\ge 2^{22}$ implies that $\frac18\left(\frac{\beta K}{2}\right)^{r-1}\ge r$ for all $r\ge 2$. Thus, \begin{equation}\label{eq:decay-rate} \sum_{r=0}^{m-2}p_{m,r}\le 2\sum_{r=2}^{m-2}e^{ -\left(1-8\delta\right)mr}\le \frac{2}{e^{\left(1-8\delta\right)m}-1}, \end{equation} which converges to $0$ as $m\rightarrow\infty$. Combined with \eqref{eq:sum-r-2}, this implies \eqref{eq:sum-r}.
Especially, set $\delta=\frac{1}{16}$. Then for $\lambda\ge\lambda_0$, there exists $K(\lambda)=e^{b\lambda^2}\ge K(\lambda,1/16)$ for some $b=b(\lambda_0)>0$ such that for $\delta=\frac{1}{16}$ and $K\ge K(\lambda)$, \eqref{eq:P-kap-delta-K} holds. Let \begin{equation}\label{eq:eps-lambd} \epsilon(\lambda)=\frac{1}{16K(\lambda)^2k(\lambda)}, \end{equation} then $\mathcal{P}_{N}^{\kappa,\epsilon(\lambda)}=\mathcal{P}^{\kappa, 1/16, K(\lambda)}$ and \eqref{eq:P-kap-delta-K} implies \eqref{eq:complement-event}. We conclude the proof of Theorem \ref{thm:1.1}.
\noindent {\bf Acknowledgments:} This work is supported by NSF of China 11771027. We would like to thank Jian Ding for his suggestions and helpful discussions.
\end{document}
|
arXiv
|
{
"id": "2011.04955.tex",
"language_detection_score": 0.6217468976974487,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Asymptotic formulas for the gamma function]{Asymptotic formulas for the gamma function constructed by bivariate means} \author{Zhen-Hang Yang} \address{Power Supply Service Center, ZPEPC Electric Power Research Institute, Hangzhou, Zhejiang, China, 310007} \email{[email protected]} \date{July 19, 2014} \subjclass[2010]{Primary 33B15, 26E60; Secondary 26D15, 11B83} \keywords{Stirling's formula, gamma function, mean, inqueality, polygamma function} \thanks{This paper is in final form and no version of it will be submitted for publication elsewhere.}
\begin{abstract} Let $K,M,N$ denote three bivariate means. In the paper, the author prove the asymptotic formulas for the gamma function have the form of \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+1-\theta \right) ^{K\left( x+\epsilon ,x+1-\epsilon \right) }e^{-N\left( x+\sigma ,x+1-\sigma \right) } \end{equation*} or \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+\sigma \right) ^{K\left( x+\epsilon ,x+1-\epsilon \right) }e^{-M\left( x+\theta ,x+\sigma \right) } \end{equation*} as $x\rightarrow \infty $, where $\epsilon ,\theta ,\sigma $ are fixed real numbers. This idea can be extended to the psi and polygamma functions. As examples, some new asymptotic formulas for the gamma function are presented. \end{abstract}
\maketitle
\section{Introduction}
The Stirling's formula \begin{equation} n!\thicksim \sqrt{2\pi n}n^{n}e^{-n}:=s_{n} \label{S} \end{equation} has important applications in statistical physics, probability theory and and number theory. Due to its practical importance, it has attracted much interest of many mathematicians and have motivated a large number of research papers concerning various generalizations and improvements.
Burnside's formula \cite{Burnside-MM-46-1917} \begin{equation} n!\thicksim \sqrt{2\pi }\left( \frac{n+1/2}{e}\right) ^{n+1/2}:=b_{n} \label{B} \end{equation} slight improves (\ref{S}). Gosper \cite{Gosper-PNAS-75-1978} replaced $\sqrt{ 2\pi n}$ by $\sqrt{2\pi \left( n+1/6\right) }$ in (\ref{S}) to get \begin{equation} n!\thicksim \sqrt{2\pi \left( n+\tfrac{1}{6}\right) }\left( \frac{n}{e} \right) ^{n}:=g_{n}, \label{G} \end{equation} which is better than (\ref{S}) and (\ref{B}). In the recent paper \cite {Batir-P-27(1)-2008}, N. Batir obtained an asymptotic formula similar to ( \ref{G}): \begin{equation} n!\thicksim \frac{n^{n+1}e^{-n}\sqrt{2\pi }}{\sqrt{n-1/6}}:=b_{n}^{\prime }, \label{Batir1} \end{equation} which is stronger than (\ref{S}) and (\ref{B}). A more accurate approximation for the factorial function \begin{equation} n!\thicksim \sqrt{2\pi }\left( \frac{n^{2}+n+1/6}{e^{2}}\right) ^{n/2+1/4}:=m_{n} \label{M} \end{equation} was presented in \cite{Mortici-CMI-19(1)-2010} by Mortici.
The classical Euler's gamma function $\Gamma $ may be defined by \begin{equation} \Gamma \left( x\right) =\int_{0}^{\infty }t^{x-1}e^{-t}dt \label{Gamma} \end{equation} for $x>0$, and its logarithmic derivative $\psi \left( x\right) =\Gamma ^{\prime }\left( x\right) /\Gamma \left( x\right) $ is known as the psi or digamma function, while $\psi ^{\prime }$, $\psi ^{\prime \prime }$, ... are called polygamma functions (see \cite{Anderson-PAMS-125(11)-1997}).
The gamma function is closely related to the Stirling's formula, since $ \Gamma (n+1)=n!$ for all $n\in \mathbb{N}$. This inspires some authors to also pay attention to find better approximations for the gamma function. For example, Ramanujan's \cite[P. 339]{Ramanujan-SB-1988} double inequality for the gamma function: \begin{equation} \sqrt{\pi }\left( \tfrac{x}{e}\right) ^{x}\left( 8x^{3}+4x^{2}+x+\tfrac{1}{ 100}\right) ^{1/6}<\Gamma \left( x+1\right) <\sqrt{\pi }\left( \tfrac{x}{e} \right) ^{x}\left( 8x^{3}+4x^{2}+x+\tfrac{1}{30}\right) ^{1/6} \label{R} \end{equation} for $x\geq 1$. Batir \cite{Batir-AM-91-2008} showed that for $x>0$, \begin{eqnarray} &&\sqrt{2}e^{4/9}\left( \frac{x}{e}\right) ^{x}\sqrt{x+\frac{1}{2}}\exp \left( -\tfrac{1}{6\left( x+3/8\right) }\right) \label{Batir2} \\ &<&\Gamma \left( x+1\right) <\sqrt{2\pi }\left( \frac{x}{e}\right) ^{x}\sqrt{ x+\frac{1}{2}}\exp \left( -\tfrac{1}{6\left( x+3/8\right) }\right) . \notag \end{eqnarray} Mortici \cite{Mortici-AM-93-2009-1} proved that for $x\geq 0$, \begin{eqnarray} \sqrt{2\pi e}e^{-\omega }\left( \frac{x+\omega }{e}\right) ^{x+1/2} &<&\Gamma \left( x+1\right) \leq \alpha \sqrt{2\pi e}e^{-\omega }\left( \frac{x+\omega }{e}\right) ^{x+1/2}, \label{Ml} \\ \beta \sqrt{2\pi e}e^{-\varsigma }\left( \frac{x+\varsigma }{e}\right) ^{x+1/2} &<&\Gamma \left( x+1\right) \leq \sqrt{2\pi e}e^{-\varsigma }\left( \frac{x+\varsigma }{e}\right) ^{x+1/2} \label{Mr} \end{eqnarray} where $\omega =\left( 3-\sqrt{3}\right) /6$, $\alpha =1.072042464...$ and $ \varsigma =\left( 3+\sqrt{3}\right) /6$, $\beta =0.988503589...$.
More results involving the asymptotic formulas for the factorial or gamma functions can consult \cite{Shi-JCAM-195-2006}, \cite{Guo-JIPAM-9(1)-2008}, \cite{Mortici-MMN-11(1)-2010}, \cite{Mortici-CMA-61-2011}, \cite {Zhao-PMD-80(3-4)-2012}, \cite{Mortici-MCM-57-2013}, \cite{Qi-JCAM-268-2014} , \cite{Qi-JCAM-268-2014}, \cite{Lu-RJ-35(1)-2014} and the references cited therein).
Mortici \cite{Mortici-BTUB-iii-3(52)-2010} presented an idea that by replacing an under-approximation and an upper-approximation of the factorial function by one of their geometric mean to improve certain approximation formula of the factorial. In fact, by observing and analyzing these asymptotic formulas for factorial or gamma function, we find out that they have the common form of \begin{equation} \ln \Gamma \left( x+1\right) \thicksim \frac{1}{2}\ln 2\pi +P_{1}\left( x\right) \ln P_{2}\left( x\right) -P_{3}\left( x\right) +P_{4}\left( x\right) , \label{g-form} \end{equation} where $P_{1}\left( x\right) ,P_{2}\left( x\right) $ and $P_{3}\left( x\right) $ are all means of $x$ and $\left( x+1\right) $, while $P_{4}\left( x\right) $ satisfies $P_{4}\left( \infty \right) =0$. For example, (\ref{S} )--(\ref{M}) can be written as \begin{eqnarray*} &&\ln n!\thicksim \frac{1}{2}\ln 2\pi +\left( n+\frac{1}{2}\right) \ln n-n, \\ &&\ln n!\thicksim \frac{1}{2}\ln 2\pi +\left( n+\frac{1}{2}\right) \ln \left( n+\frac{1}{2}\right) -\left( n+\frac{1}{2}\right) , \\ &&\ln n!\thicksim \frac{1}{2}\ln 2\pi +\left( n+\frac{1}{2}\right) \ln n-n+ \frac{1}{2}\ln \left( 1+\tfrac{1}{6n}\right) , \\ &&\ln n!\thicksim \frac{1}{2}\ln 2\pi +\left( n+\frac{1}{2}\right) \ln n-n- \frac{1}{2}\ln \left( 1-\tfrac{1}{6n}\right) , \\ &&\ln n!\thicksim \frac{1}{2}\ln 2\pi +\left( n+\frac{1}{2}\right) \ln \sqrt{ \frac{n^{2}+4n\left( n+1\right) +\left( n+1\right) ^{2}}{6}}-\left( n+\frac{1 }{2}\right) . \end{eqnarray*} Inequalities (\ref{R})--(\ref{Mr}) imply that \begin{eqnarray*} &&\ln \Gamma \left( x+1\right) \thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{ 1}{2}\right) \ln x-x+\frac{1}{6}\ln \left( 1+\frac{1}{2x}+\frac{1}{8x^{2}}+ \frac{1}{240x^{3}}\right) , \\ &&\ln \Gamma \left( x+1\right) \thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{ 1}{2}\right) \ln x-x+\frac{1}{2}\ln \left( 1+\frac{1}{2x}\right) -\tfrac{1}{ 6\left( x+3/8\right) }, \\ &&\ln \Gamma \left( x+1\right) \thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{ 1}{2}\right) \ln \left( \left( 1-a\right) x+a\left( x+1\right) \right) -\left( \left( 1-a\right) x+a\left( x+1\right) \right) , \end{eqnarray*} where $a=\omega =(3-\sqrt{3})/6$, $\varsigma =(3+\sqrt{3})/6$.
The aim of this paper is to prove the validity of the form (\ref{g-form}) which offers such a new way to construct asymptotic formulas for Euler gamma function in terms of bivariate means. Our main results are included in Section 2. Some new examples are presented in the last section.
\section{Main results}
Before stating and proving our main results, we recall some knowledge on means. Let $I$ be an interval on $\mathbb{R}$. A bivariate real valued function $M:I^{2}\rightarrow \mathbb{R}$ is said to be a bivariate mean if \begin{equation*} \min \left( a,b\right) \leq M\left( a,b\right) \leq \max \left( a,b\right) \end{equation*} for all $a,b\in I$. Clearly, each bivariate mean $M$ is reflexive, that is, \begin{equation*} M\left( a,a\right) =a \end{equation*} for any $a\in I$. $M$ is symmetric if \begin{equation*} M\left( a,b\right) =M\left( b,a\right) \end{equation*} for all $a,b\in I$, and $M$ is said to be homogeneous (of degree one) if \begin{equation} M\left( ta,tb\right) =tM\left( a,b\right) \label{M-h} \end{equation} for any $a,b\in I$ and $t>0$.
The lemma is crucial to prove our results.
\begin{lemma}[{\protect\cite[Thoerem 1, 2, 3]{Toader.MIA.5.2002}}] \label{Lemma M}If $M:I^{2}\rightarrow \mathbb{R}$ is a differentiable mean, then for $c\in I$, \begin{equation*} M_{a}^{\prime }\left( c,c\right) ,M_{b}^{\prime }\left( c,c\right) \in \left( 0,1\right) \text{ \ and \ }M_{a}^{\prime }\left( c,c\right) +M_{b}^{\prime }\left( c,c\right) =1\text{.} \end{equation*} In particular, if $M$ is symmetric, then \begin{equation*} M_{a}^{\prime }\left( c,c\right) =M_{b}^{\prime }\left( c,c\right) =1/2. \end{equation*} \end{lemma}
Now we are in a position to state and prove main results.
\begin{theorem} \label{MT-p2><p3}Let $M:\left( 0,\infty \right) \times \left( 0,\infty \right) \rightarrow \left( 0,\infty \right) $ and $N:\left( -\infty ,\infty \right) \times \left( -\infty ,\infty \right) \rightarrow \left( -\infty ,\infty \right) $ be two symmetric, homogeneous and differentiable means and let $r$ be defined on $\left( 0,\infty \right) $ satisfying $ \lim_{x\rightarrow \infty }r\left( x\right) =0$. Then for fixed real numbers $\theta ,\theta ^{\ast },\sigma ,\sigma ^{\ast }$ with $\theta +\theta ^{\ast }=\sigma +\sigma ^{\ast }=1$ such that $x>-\min \left( 1,\theta ,\theta ^{\ast }\right) $, we have \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+\theta ^{\ast }\right) ^{x+1/2}e^{-N\left( x+\sigma ,x+\sigma ^{\ast }\right) }e^{r\left( x\right) }\text{, as }x\rightarrow \infty . \end{equation*} \end{theorem}
\begin{proof} Since $\lim_{x\rightarrow \infty }r\left( x\right) =0$, the desired result is equivalent to \begin{equation*} \lim_{x\rightarrow \infty }\left( \ln \Gamma \left( x+1\right) -\ln \sqrt{ 2\pi }-\left( x+\frac{1}{2}\right) \ln M\left( x+\theta ,x+\theta ^{\ast }\right) +N\left( x+\sigma ,x+\sigma ^{\ast }\right) \right) =0. \end{equation*} Due to $\lim_{x\rightarrow \infty }r\left( x\right) =0$ and the known relation \begin{equation*} \lim_{x\rightarrow \infty }\left( \ln \Gamma \left( x+1\right) -\left( x+ \frac{1}{2}\right) \ln \left( x+\frac{1}{2}\right) +\left( x+\frac{1}{2} \right) \right) =\frac{1}{2}\ln 2\pi , \end{equation*} it suffices to prove that \begin{eqnarray*} D_{1} &:&=\lim_{x\rightarrow \infty }\left( x+\frac{1}{2}\right) \ln \frac{ M\left( x+\theta ,x+\theta ^{\ast }\right) }{x+1/2}=0, \\ D_{2} &:&=\lim_{x\rightarrow \infty }\left( N\left( x+\sigma ,x+\sigma ^{\ast }\right) -\left( x+\frac{1}{2}\right) \right) =0. \end{eqnarray*}
Letting $x=1/t$, using the homogeneity of $M$, that is, (\ref{M-h}), and utilizing L'Hospital rule give \begin{eqnarray*} D_{1} &=&\lim_{t\rightarrow 0^{+}}\frac{1+t/2}{t}\ln \frac{M\left( 1+\theta t,1+\theta ^{\ast }t\right) }{1+t/2} \\ &=&\lim_{t\rightarrow 0^{+}}\frac{\ln M\left( 1+\theta t,1+\theta ^{\ast }t\right) -\ln \left( 1+t/2\right) }{t} \\ &=&\lim_{t\rightarrow 0^{+}}\left( \frac{\theta M_{x}\left( 1+\theta t,1+\theta ^{\ast }t\right) +\theta ^{\ast }M_{y}\left( 1+\theta t,1+\theta ^{\ast }t\right) }{M\left( 1+\theta t,1+\theta ^{\ast }t\right) }-\frac{1}{ 2+t}\right) \\ &=&\frac{\theta M_{x}\left( 1,1\right) +\theta ^{\ast }M_{y}\left( 1,1\right) }{M\left( 1,1\right) }-\frac{1}{2}=0, \end{eqnarray*} where the last equality holds due to Lemma \ref{Lemma M}.
Similarly, we have \begin{eqnarray*} D_{2} &=&\lim_{x\rightarrow \infty }\left( N\left( x+\sigma ,x+\sigma ^{\ast }\right) -\left( x+\frac{1}{2}\right) \right) \\ &&\overset{1/x=t}{=\!=\!=}\lim_{t\rightarrow 0^{+}}\frac{N\left( 1+\sigma t,1+\sigma ^{\ast }t\right) -\left( 1+t/2\right) }{t} \\ &=&\lim_{t\rightarrow 0^{+}}\left( \sigma N_{x}\left( 1+\sigma t,1+\sigma ^{\ast }t\right) +\sigma ^{\ast }N_{y}\left( 1+\sigma t,1+\sigma ^{\ast }t\right) -\frac{1}{2}\right) \\ &=&\frac{\sigma +\sigma ^{\ast }}{2}-\frac{1}{2}=0, \end{eqnarray*} which proves the desired result. \end{proof}
\begin{theorem} \label{MT-p2=p3}Let $M:\left( 0,\infty \right) \times \left( 0,\infty \right) \rightarrow \left( 0,\infty \right) $ be a mean and let $r$ be defined on $\left( 0,\infty \right) $ satisfying $\lim_{x\rightarrow \infty }r\left( x\right) =0$. Then for fixed real numbers $\theta ,\sigma $ such that $x>-\min \left( 1,\theta ,\sigma \right) $, we have \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+\sigma \right) ^{x+1/2}e^{-M\left( x+\theta ,x+\sigma \right) }e^{r\left( x\right) } \text{, as }x\rightarrow \infty . \end{equation*} \end{theorem}
\begin{proof} Since $\lim_{x\rightarrow \infty }r\left( x\right) =0$, the desired result is equivalent to \begin{equation*} \lim_{x\rightarrow \infty }\left( \ln \Gamma \left( x+1\right) -\ln \sqrt{ 2\pi }-\left( x+\frac{1}{2}\right) \ln M\left( x+\theta ,x+\sigma \right) +M\left( x+\theta ,x+\sigma \right) \right) =0. \end{equation*} Similarly, it suffices to prove that \begin{eqnarray*} D_{3} &:&=\lim_{x\rightarrow \infty }\left( \left( x+\frac{1}{2}\right) \ln \frac{M\left( x+\theta ,x+\sigma \right) }{x+1/2}-\left( M\left( x+\theta ,x+\sigma \right) -\left( x+\frac{1}{2}\right) \right) \right) \\ &=&\lim_{x\rightarrow \infty }\left( \left( M\left( x+\theta ,x+\sigma \right) -\left( x+\frac{1}{2}\right) \right) \times \left( \frac{1}{L\left( y,1\right) }-1\right) \right) =0, \end{eqnarray*} where $L\left( a,b\right) $ is the logarithmic mean of positive $a$ and $b$, $y=M\left( x+\theta ,x+\sigma \right) /\left( x+1/2\right) $.
Now we first show that \begin{equation*} D_{4}:=M\left( x+\theta ,x+\sigma \right) -\left( x+\frac{1}{2}\right) \end{equation*} is bounded. In fact, by the property of mean we see that \begin{equation*} x+\min \left( \theta ,\sigma \right) -\left( x+\frac{1}{2}\right) <D_{4}<x+\max \left( \theta ,\sigma \right) -\left( x+\frac{1}{2}\right) \end{equation*} that is, \begin{equation*} \min \left( \theta ,\sigma \right) -\frac{1}{2}<D_{4}<\max \left( \theta ,\sigma \right) -\frac{1}{2}. \end{equation*} It remains to prove that \begin{equation*} \lim_{x\rightarrow \infty }D_{5}:=\lim_{x\rightarrow \infty }\left( \frac{1}{ L\left( y,1\right) }-1\right) =0. \end{equation*} Since \begin{equation*} \frac{x+\min \left( \theta ,\sigma \right) }{x+1/2}<y=\frac{M\left( x+\theta ,x+\sigma \right) }{x+1/2}<\frac{x+\max \left( \theta ,\sigma \right) }{x+1/2 }, \end{equation*} so we have $\lim_{x\rightarrow \infty }y=1$. This together with \begin{equation*} \min \left( y,1\right) \leq L\left( y,1\right) \leq \max \left( y,1\right) \end{equation*} yields $\lim_{x\rightarrow \infty }L\left( y,1\right) =1$, and therefore, $ \lim_{x\rightarrow \infty }D_{5}=0$.
This completes the proof. \end{proof}
\begin{theorem} \label{MT-p2=p3=x+1/2}Let $K:\left( -\infty ,\infty \right) \times \left( -\infty ,\infty \right) \rightarrow \left( -\infty ,\infty \right) $ be a symmetric, homogeneous and twice differentiable mean and let $r$ be defined on $\left( 0,\infty \right) $ satisfying $\lim_{x\rightarrow \infty }r\left( x\right) =0$. Then for fixed real numbers $\epsilon ,\epsilon ^{\ast }$ with $\epsilon +\epsilon ^{\ast }=1$, we have \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }\left( x+\frac{1}{2}\right) ^{K(x+\epsilon ,x+\epsilon ^{\ast })}e^{-\left( x+1/2\right) }e^{r\left( x\right) }\text{, as }x\rightarrow \infty \end{equation*} \end{theorem}
\begin{proof} Due to $\lim_{x\rightarrow \infty }r\left( x\right) =0$, the result in question is equivalent to \begin{equation*} \lim_{x\rightarrow \infty }\left( \ln \Gamma \left( x+1\right) -\ln \sqrt{ 2\pi }-K\left( x+\epsilon ,x+\epsilon ^{\ast }\right) \ln \left( x+\frac{1}{2 }\right) +\left( x+\frac{1}{2}\right) \right) =0. \end{equation*} Clearly, we only need to prove that \begin{equation*} D_{6}:=\lim_{x\rightarrow \infty }\left( K\left( x+\epsilon ,x+\epsilon ^{\ast }\right) -\left( x+\frac{1}{2}\right) \right) \ln \left( x+\frac{1}{2} \right) =0. \end{equation*} By the homogeneity of $K$, we get \begin{eqnarray*} &&D_{6}\!\overset{1/x=t}{=\!=\!=}\lim_{t\rightarrow 0^{+}}\frac{K\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) -\left( 1+t/2\right) }{t}\left( \ln \left( 1+\frac{t}{2}\right) -\ln t\right) \\ &=&\lim_{t\rightarrow 0^{+}}\frac{K\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) -\left( 1+t/2\right) }{t^{2}}\lim_{t\rightarrow 0^{+}}\left( t\ln \left( 1+\frac{t}{2}\right) -t\ln t\right) =0, \end{eqnarray*} where the first limit, by L'Hospital's rule, is equal to \begin{eqnarray*} &&\lim_{t\rightarrow 0^{+}}\frac{\epsilon K_{x}\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) +\epsilon ^{\ast }K_{y}\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) -1/2}{2t} \\ &=&\lim_{t\rightarrow 0^{+}}\frac{\epsilon ^{2}K_{xx}\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) +2\epsilon \epsilon ^{\ast }K_{xy}\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) +\epsilon ^{\ast }K_{yy}\left( 1+\epsilon t,1+\epsilon ^{\ast }t\right) }{2} \\ &=&\frac{\epsilon ^{2}K_{xx}\left( 1,1\right) +2\epsilon \epsilon ^{\ast }K_{xy}\left( 1,1\right) +\epsilon ^{\ast }K_{yy}\left( 1,1\right) }{2}=- \frac{\left( 2\epsilon -1\right) ^{2}}{2}K_{xy}\left( 1,1\right) , \end{eqnarray*} while the second one is clearly equal to zero.
The proof ends. \end{proof}
By the above three theorems, the following assertion is immediate.
\begin{corollary} \label{MCg-form1}Suppose that
(i) the function $K:\mathbb{R}^{2}\rightarrow \mathbb{R}$ is a symmetric, homogeneous and twice differentiable mean;
(ii) the functions $M:\left( 0,\infty \right) \times \left( 0,\infty \right) \rightarrow \left( 0,\infty \right) $ and $N:\mathbb{R}^{2}\rightarrow \mathbb{R}$ are two symmetric, homogeneous, and differentiable means;
(iii) the function $r:\left( 0,\infty \right) \rightarrow \left( -\infty ,\infty \right) $ satisfies $\lim_{x\rightarrow \infty }r\left( x\right) =0$.
Then for fixed real numbers $\epsilon ,\epsilon ^{\ast },\theta ,\theta ^{\ast },\sigma ,\sigma ^{\ast }$ with $\epsilon +\epsilon ^{\ast }=\theta +\theta ^{\ast }=\sigma +\sigma ^{\ast }=1$ such that $x>-\min \left( 1,\theta ,\theta ^{\ast }\right) $, we have \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+\theta ^{\ast }\right) ^{K\left( x+\epsilon ,x+\epsilon ^{\ast }\right) }e^{-N\left( x+\sigma ,x+\sigma ^{\ast }\right) }e^{r\left( x\right) },\text{ as }x\rightarrow \infty . \end{equation*} \end{corollary}
\begin{corollary} \label{MCg-form2}Suppose that
(i) the function $K:\left( -\infty ,\infty \right) ^{2}\rightarrow \left( -\infty ,\infty \right) $ is a symmetric, homogeneous and twice differentiable mean;
(ii) the functions $M,N:\left( 0,\infty \right) ^{2}\rightarrow \left( 0,\infty \right) $ are two means;
(iii) the function $r:\left( 0,\infty \right) \rightarrow \left( -\infty ,\infty \right) $ satisfies $\lim_{x\rightarrow \infty }r\left( x\right) =0$.
Then for fixed real numbers $\epsilon ,\epsilon ^{\ast },\theta ,\sigma $ with $\epsilon +\epsilon ^{\ast }=1$ such that $x>-\min \left( 1,\theta ,\sigma \right) $, we have \begin{equation*} \Gamma \left( x+1\right) \thicksim \sqrt{2\pi }M\left( x+\theta ,x+\sigma \right) ^{K\left( x+\epsilon ,x+\epsilon ^{\ast }\right) }e^{-M\left( x+\theta ,x+\sigma \right) }e^{r\left( x\right) },\text{ as }x\rightarrow \infty . \end{equation*} \end{corollary}
Further, it is obvious that our ideas constructing asymptotic formulas for the gamma function in terms of bivariate means can be extended to the psi and polygamma functions.
\begin{theorem} Let $M:\left( 0,\infty \right) ^{2}\rightarrow \left( 0,\infty \right) $ be a mean and let $r$ be defined on $\left( 0,\infty \right) $ satisfying $ \lim_{x\rightarrow \infty }r\left( x\right) =0$. Then for fixed real numbers $\theta $, $\sigma $ such that $x>-\min \left( 1,\theta ,\sigma \right) $, the asymptotic formula for the psi function \begin{equation*} \psi \left( x+1\right) \thicksim \ln M\left( x+\theta ,x+\sigma \right) +r\left( x\right) \end{equation*} holds as $x\rightarrow \infty $. \end{theorem}
\begin{proof} It suffices to prove \begin{equation*} \lim_{x\rightarrow \infty }\left( \psi \left( x+1\right) -\ln M\left( x+\theta ,x+\sigma \right) \right) =0. \end{equation*} Since $M$ is a mean, we have $x+\min \left( \theta ,\sigma \right) \leq M\left( x+\theta ,x+\sigma \right) \leq x+\max \left( \theta ,\sigma \right) $, and so \begin{equation*} \psi \left( x+1\right) -\ln \left( x+\max \left( \theta ,\sigma \right) \right) <\psi \left( x+1\right) -\ln M\left( x+\theta ,x+\sigma \right) <\psi \left( x+1\right) -\ln \left( x+\min \left( \theta ,\sigma \right) \right) , \end{equation*} which yields the inquired result due to \begin{equation*} \lim_{x\rightarrow \infty }\left( \psi \left( x+1\right) -\ln \left( x+\max \left( \theta ,\sigma \right) \right) \right) =\lim_{x\rightarrow \infty }\left( \psi \left( x+1\right) -\ln \left( x+\min \left( \theta ,\sigma \right) \right) \right) =0. \end{equation*} \end{proof}
\begin{theorem} Let $M:\left( 0,\infty \right) ^{2}\rightarrow \left( 0,\infty \right) $ be a mean and let $r$ be defined on $\left( 0,\infty \right) $ satisfying $ \lim_{x\rightarrow \infty }r\left( x\right) =0$. Then for fixed real numbers $\theta ,\sigma $ such that $x>-\min \left( 1,\theta ,\sigma \right) $, the asymptotic formula for the polygamma function \begin{equation*} \psi ^{(n)}\left( x+1\right) \thicksim \frac{\left( -1\right) ^{n-1}\left( n-1\right) !}{M^{n}\left( x+\theta ,x+\sigma \right) }+r\left( x\right) \end{equation*} holds as $x\rightarrow \infty $. \end{theorem}
\begin{proof} It suffices to show \begin{equation*} \lim_{x\rightarrow \infty }\left( \left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{\left( n-1\right) !}{M^{n}\left( x+\theta ,x+\sigma \right) }\right) =0. \end{equation*} For this purpose, we utilize a known double inequality that for $k\in \mathbb{N}$ \begin{equation*} \frac{(k-1)!}{x^{k}}+\frac{k!}{2x^{k+1}}<\left( -1\right) ^{k+1}\psi ^{(k)}\left( x\right) <\frac{(k-1)!}{x^{k}}+\frac{k!}{x^{k+1}} \end{equation*} holds on $(0,\infty )$ proved by Guo and Qi in \cite[Lemma 3] {Guo-BKMS-47(1)-2010} to get \begin{equation*} \frac{k!}{2x^{k+1}}<\left( -1\right) ^{k+1}\psi ^{(k)}\left( x\right) -\frac{ (k-1)!}{x^{k}}<\frac{k!}{x^{k+1}}. \end{equation*} This implies that \begin{equation} \lim_{x\rightarrow \infty }\left( \left( -1\right) ^{k-1}\psi ^{(k)}\left( x\right) -\frac{(k-1)!}{x^{k}}\right) =0. \label{GQ} \end{equation} On the other hand, without loss of generality, we assume that $\theta \leq \sigma $. By the property of mean, we see that \begin{equation*} x+\theta \leq M\left( x+\theta ,x+\sigma \right) \leq x+\sigma , \end{equation*} and so \begin{eqnarray*} \left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{\left( n-1\right) !}{\left( x+\theta \right) ^{n}} &<&\left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{\left( n-1\right) !}{M^{n}\left( x+\theta ,x+\sigma \right) } \\ &<&\left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{1}{\left( x+\sigma \right) ^{n}}. \end{eqnarray*} Then, by (\ref{GQ}), for $a=\theta ,\sigma $, we get \begin{eqnarray*} &&\left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{\left( n-1\right) !}{\left( x+a\right) ^{n}} \\ &=&\left( \left( -1\right) ^{n-1}\psi ^{(n)}\left( x+1\right) -\frac{(n-1)!}{ \left( x+1\right) ^{n}}\right) +\left( \frac{(n-1)!}{\left( x+1\right) ^{n}}- \frac{\left( n-1\right) !}{\left( x+a\right) ^{n}}\right) \\ &\rightarrow &0+0=0\text{, as }x\rightarrow \infty , \end{eqnarray*} which gives the desired result.
Thus we complete the proof. \end{proof}
\section{Examples}
In this section, we will list some examples to illustrate applications of Theorems \ref{MT-p2><p3} and \ref{MT-p2=p3}. To this end, we first recall the arithmetic mean $A$, geometric mean $G$, and identric (exponential) mean $I$ of two positive numbers $a$ and $b$ defined by \begin{eqnarray*} A\left( a,b\right) &=&\frac{a+b}{2}\text{, \ \ \ }G\left( a,b\right) =\sqrt{ ab}, \\ \mathcal{I}\left( a,b\right) &=&\left( b^{b}/a^{a}\right) ^{1/\left( b-a\right) }/e\text{ if }a\neq b\text{ and }I\left( a,a\right) =a, \end{eqnarray*} (see \cite{Stolarsky-MM-48-1975}, \cite{Yang-MPT-4-1987}). Clearly, these means are symmetric and homogeneous. Another possible mean is defined by \begin{equation} H_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) =\frac{ \sum_{k=0}^{n}p_{k}a^{k}b^{n-k}}{\sum_{k=0}^{n-1}q_{k}a^{k}b^{n-1-k}}, \label{H^n,n-1} \end{equation} where \begin{equation} \sum_{k=0}^{n}p_{k}=\sum_{k=0}^{n-1}q_{k}=1. \label{pk-qk1} \end{equation} It is clear that $H_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) $ is homogeneous and satisfies $H_{^{p_{k};q_{k}}}^{n,n-1}\left( a,a\right) =a$.
When $p_{k}=p_{n-k}$ and $q_{k}=q_{n-1-k}$, we denote $ H_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) $ by $S_{^{p_{k};q_{k}}}^{n,n-1} \left( a,b\right) $, which can be expressed as \begin{equation} S_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) =\frac{\sum_{k=0}^{[n/2]}p_{k} \left( ab\right) ^{k}\left( a^{n-2k}+b^{n-2k}\right) }{\sum_{k=0}^{[\left( n-1\right) /2]}q_{k}\left( ab\right) ^{k}\left( a^{n-1-2k}+b^{n-1-2k}\right) }, \label{S^n,n-1} \end{equation} where $p_{k}$ and $q_{k}$ satisfy \begin{equation} \sum_{k=0}^{[n/2]}\left( 2p_{k}\right) =\sum_{k=0}^{[\left( n-1\right) /2]}\left( 2q_{k}\right) =1, \label{pk-qk2} \end{equation} $[x]$ denotes the integer part of real number $x$. Evidently, $ S_{^{p_{k};q_{k}}}^{n,n-1}$ is symmetric and homogeneous, and $ S_{^{p_{k};q_{k}}}^{n,n-1}\left( a,a\right) =a$. But $ H_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) $ and $ S_{^{p_{k};q_{k}}}^{n,n-1}\left( a,b\right) $ are not always means of $a$ and $b$. For instance, when $p=2/3$, \begin{equation*} S_{^{p;1/2}}^{2,1}\left( a,b\right) =\frac{pa^{2}+pb^{2}+\left( 1-2p\right) ab}{\left( a+b\right) /2}=\frac{2}{3}\frac{2a^{2}+2b^{2}-ab}{a+b}>\max (a,b) \end{equation*} in the case of $\max (a,b)>4\min \left( a,b\right) $. Indeed, it is easy to prove that $S_{^{p;1/2}}^{2,1}\left( a,b\right) $ is a mean if and only if $ p\in \lbrack 0,1/2]$.
Secondly, we recall the so-called completely monotone functions. A function $ f$ is said to be completely monotonic on an interval $I$ , if $f$ has derivatives of all orders on $I$ and satisfies
\begin{equation} (-1)^{n}f^{(n)}(x)\geq 0\text{ for all }x\in I\text{ and }n=0,1,2,.... \label{cm} \end{equation}
If the inequality (\ref{cm}) is strict, then $f$ is said to be strictly completely monotonic on $I$. It is known (Bernstein's Theorem) that $f$ is completely monotonic on $(0,\infty )$ if and only if
\begin{equation*} f(x)=\int_{0}^{\infty }e^{-xt}d\mu \left( t\right) , \end{equation*} where $
\mu
$ is a nonnegative measure on $[0,\infty )$ such that the integral converges for all $x>0$, see \cite[p. 161]{Widder-PUPP-1941}.
\begin{example} Let \begin{eqnarray*} K\left( a,b\right) &=&N\left( a,b\right) =A\left( a,b\right) =\frac{a+b}{2}, \\ M\left( a,b\right) &=&A^{2/3}\left( a,b\right) G^{1/3}\left( a,b\right) =\left( \frac{a+b}{2}\right) ^{2/3}\left( \sqrt{ab}\right) ^{1/3} \end{eqnarray*} and $\theta =\sigma =0$ in Theorem \ref{MT-p2><p3}. Then we can obtain an asymptotic formulas for the gamma function as follows. \begin{eqnarray*} \ln \Gamma (x+1) &\thicksim &\frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2} \right) \ln \left( \left( x+\frac{1}{2}\right) ^{2/3}\left( \sqrt{x\left( x+1\right) }\right) ^{1/3}\right) -\left( x+\frac{1}{2}\right) \\ &=&\frac{1}{2}\ln 2\pi +\frac{2}{3}\left( x+\frac{1}{2}\right) \ln \left( x+ \frac{1}{2}\right) +\frac{1}{6}\left( x+\frac{1}{2}\right) \ln x \\ &&+\frac{1}{6}\left( x+\frac{1}{2}\right) \ln \left( x+1\right) -\left( x+ \frac{1}{2}\right) ,\text{ as }x\rightarrow \infty . \end{eqnarray*} \end{example}
Further, we can prove
\begin{proposition} For $x>0$, the function \begin{eqnarray*} f_{1}(x) &=&\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\frac{2}{3}\left( x+\frac{1 }{2}\right) \ln \left( x+\frac{1}{2}\right) -\frac{1}{6}\left( x+\frac{1}{2} \right) \ln x \\ &&-\frac{1}{6}\left( x+\frac{1}{2}\right) \ln \left( x+1\right) +\left( x+ \frac{1}{2}\right) \end{eqnarray*} is a completely monotone function. \end{proposition}
\begin{proof} Differentiating and utilizing the relations \begin{equation} \psi (x)=\int_{0}^{\infty }\left( \frac{e^{-t}}{t}-\frac{e^{-xt}}{1-e^{-t}} \right) dt\text{ \ and \ }\ln x=\int_{0}^{\infty }\frac{e^{-t}-e^{-xt}}{t}dt \label{psi-ln} \end{equation} yield \begin{eqnarray*} f_{1}^{\prime }(x) &=&\psi \left( x+1\right) -\frac{1}{6}\ln \left( x+1\right) -\frac{1}{6}\ln x-\frac{2}{3}\ln \left( x+\frac{1}{2}\right) + \frac{1}{12\left( x+1\right) }-\frac{1}{12x} \\ &=&\int_{0}^{\infty }\left( \frac{e^{-t}}{t}-\frac{e^{-\left( x+1\right) t}}{ 1-e^{-t}}\right) dt-\int_{0}^{\infty }\frac{e^{-t}-e^{-xt}}{6t} dt-\int_{0}^{\infty }\frac{e^{-t}-e^{-\left( x+1\right) t}}{6t}dt \\ &&-\int_{0}^{\infty }\frac{2\left( e^{-t}-e^{-\left( x+1/2\right) t}\right) }{3t}dt+\frac{1}{12}\int_{0}^{\infty }e^{-\left( x+1\right) t}dt-\frac{1}{12} \int_{0}^{\infty }e^{-xt}dt \\ &=&\int_{0}^{\infty }e^{-xt}\left( \frac{1}{6t}+\frac{e^{-t}}{6t}+\frac{ 2e^{-t/2}}{3t}-\frac{e^{-t/2}}{1-e^{-t}}+\frac{1}{12}\left( e^{-t}-1\right) \right) dt \\ &=&\int_{0}^{\infty }e^{-xt}e^{-t/2}\left( \frac{\cosh \left( t/2\right) }{3t }+\frac{2}{3t}-\frac{1}{2\sinh \left( t/2\right) }-\frac{1}{6}\sinh \frac{t}{ 2}\right) dt \\ &:&=\int_{0}^{\infty }e^{-xt}e^{-t/2}u\left( \frac{t}{2}\right) dt, \end{eqnarray*} where \begin{equation*} u\left( t\right) =\frac{\cosh t}{6t}+\frac{1}{3t}-\frac{1}{2\sinh t}-\frac{1 }{6}\sinh t. \end{equation*} Factoring and expanding in power series lead to \begin{eqnarray*} u\left( t\right) &=&-\frac{t\cosh 2t-\sinh 2t-4\sinh t+5t}{12t\sinh t} \\ &=&-\frac{\sum_{n=1}^{\infty }\frac{2^{2n-2}t^{2n-1}}{\left( 2n-2\right) !} -\sum_{n=1}^{\infty }\frac{2^{2n-1}t^{2n-1}}{\left( 2n-1\right) !} -4\sum_{n=1}^{\infty }\frac{t^{2n-1}}{\left( 2n-1\right) !}+5t}{12t\sinh \left( t/2\right) } \\ &=&-\frac{\sum_{n=3}^{\infty }\frac{\left( 2n-3\right) 2^{2n-2}-4}{\left( 2n-1\right) !}t^{2n-1}}{12t\sinh t}<0 \end{eqnarray*} for $t>0$. This reveals that $-f_{1}^{\prime }$ is a completely monotone function, which together with $f_{1}(x)>\lim_{x\rightarrow \infty }f_{1}(x)=0 $ leads us to the desired result. \end{proof}
Using the decreasing property of $f_{1}$ on $\left( 0,\infty \right) $ and notice that \begin{equation*} f_{1}(1)=\ln \frac{2^{3/4}e^{3/2}}{3\sqrt{2\pi }}\text{ \ and \ } f_{1}(\infty )=0 \end{equation*} we immediately get
\begin{corollary} For $n\in \mathbb{N}$, it is true that \begin{equation*} \sqrt{2\pi }\left( \frac{(n+1/2)^{4}n\left( n+1\right) }{e^{6}}\right) ^{\left( n+1/2\right) /6}<n!<\frac{2^{3/4}e^{3/2}}{3}\left( \frac{ (n+1/2)^{4}n\left( n+1\right) }{e^{6}}\right) ^{\left( n+1/2\right) /6}, \end{equation*} with the optimal constants $\sqrt{2\pi }\approx 2.5066$ and $ 2^{3/4}e^{3/2}/3\approx 2.5124$. \end{corollary}
\begin{example} Let \begin{eqnarray*} K\left( a,b\right) &=&N\left( a,b\right) =A\left( a,b\right) =\frac{a+b}{2}, \\ M\left( a,b\right) &=&\mathcal{I}\left( a,b\right) =\left( b^{b}/a^{a}\right) ^{1/\left( b-a\right) }/e\text{ if }a\neq b\text{ and } I\left( a,a\right) =a \end{eqnarray*} and $\theta =0$ in Theorem \ref{MT-p2><p3}. Then we get the asymptotic formulas: \begin{equation*} \ln \Gamma (x+1)\thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \left( (x+1)\ln (x+1)-x\ln x-1\right) -\left( x+\frac{1}{2}\right) , \end{equation*} as $x\rightarrow \infty $. \end{example}
And, we have
\begin{proposition} For $x>0$, the function \begin{equation*} f_{2}(x)=\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+\frac{1}{2}\right) \left( (x+1)\ln (x+1)-x\ln x-1\right) +x+\frac{1}{2} \end{equation*} is a completely monotone function. \end{proposition}
\begin{proof} Differentiation gives \begin{eqnarray*} f_{2}^{\prime }(x) &=&\psi \left( x+1\right) -\left( 2x+\frac{3}{2}\right) \ln \left( x+1\right) +\left( 2x+\frac{1}{2}\right) \ln x+2, \\ f_{2}^{\prime \prime }(x) &=&\psi ^{\prime }\left( x+1\right) -2\ln \left( x+1\right) +2\ln x+\frac{1}{2\left( x+1\right) }+\frac{1}{2x}. \end{eqnarray*} Application of the relations (\ref{psi-ln}), $f_{2}^{\prime \prime }(x)$ can be expressed as \begin{eqnarray*} f_{2}^{\prime \prime }(x) &=&\int_{0}^{\infty }t\frac{e^{-\left( x+1\right) t}}{1-e^{-t}}dt-2\int_{0}^{\infty }\frac{e^{-xt}-e^{-\left( x+1\right) t}}{t} dt+\frac{1}{2}\int_{0}^{\infty }\left( e^{-\left( x+1\right) t}+e^{-xt}\right) dt \\ &=&\int_{0}^{\infty }e^{-xt}\left( \frac{te^{-t}}{1-e^{-t}}-2\frac{1-e^{-t}}{ t}+\frac{1}{2}\left( e^{-t}+1\right) \right) dt \\ &=&\int_{0}^{\infty }e^{-xt}e^{-t/2}\left( \frac{t}{2\sinh \left( t/2\right) }-4\frac{\sinh \left( t/2\right) }{t}+\cosh \frac{t}{2}\right) dt \\ &:&=\int_{0}^{\infty }e^{-xt}e^{-t/2}v\left( \tfrac{t}{2}\right) dt, \end{eqnarray*} where \begin{equation*} v\left( t\right) =\frac{t}{\sinh t}-2\frac{\sinh t}{t}+\cosh t. \end{equation*} Employing hyperbolic version of Wilker inequality proved in \cite {Zhu-MIA-10(4)-2007} (also see \cite{Zhu-AAA-485842-2009}, \cite {Yang-JIA-2014-166}) \begin{equation*} \left( \frac{t}{\sinh t}\right) ^{2}+\frac{t}{\tanh t}>2, \end{equation*} we get \begin{equation*} \frac{\sinh t}{t}v\left( t\right) =\left( \frac{t}{\sinh t}\right) ^{2}+ \frac{t}{\tanh t}-2>0, \end{equation*} and so $f_{2}^{\prime \prime }(x)$ is complete monotone for $x>0$. Hence, $ f_{2}^{\prime }(x)<\lim_{x\rightarrow \infty }f_{2}^{\prime }(x)=0$, and then, $f_{2}(x)>\lim_{x\rightarrow \infty }f_{2}(x)=0$, which indicate that $ f_{2}$ is complete monotone for $x>0$.
This completes the proof. \end{proof}
The decreasing property of $f_{2}$ on $\left( 0,\infty \right) $ and the facts that \begin{equation*} f_{2}\left( 0^{+}\right) =\ln \frac{e}{\sqrt{2\pi }}\text{, \ }f_{2}\left( 1\right) =\ln \frac{e^{3}}{8}\text{, \ }f_{2}\left( \infty \right) =0 \end{equation*} give the following
\begin{corollary} For $x>0$, the sharp double inequality \begin{equation*} \sqrt{2\pi }e^{-2x-1}\frac{(x+1)^{(x+1)\left( x+1/2\right) }}{x^{x\left( x+1/2\right) }}<\Gamma (x+1)<e^{-2x}\frac{(x+1)^{(x+1)\left( x+1/2\right) }}{ x^{x\left( x+1/2\right) }} \end{equation*} holds.
For $n\in \mathbb{N}$, it holds that \begin{equation*} \sqrt{2\pi }e^{-2n-1}\frac{(n+1)^{(n+1)\left( n+1/2\right) }}{n^{n\left( n+1/2\right) }}<n!<\frac{e^{3}}{8}e^{-2n-1}\frac{(n+1)^{(n+1)\left( n+1/2\right) }}{n^{n\left( n+1/2\right) }} \end{equation*} with the best constants $\sqrt{2\pi }\approx 2.5066$ and $e^{3}/8\approx 2.5107$. \end{corollary}
\begin{example} \label{E-M3,2}Let \begin{eqnarray*} K\left( a,b\right) &=&N\left( a,b\right) =A\left( a,b\right) =\frac{a+b}{2}, \\ M\left( a,b\right) &=&M_{^{p;q}}^{3,2}\left( a,b\right) =\frac{ pa^{3}+pb^{3}+\left( 1/2-p\right) a^{2}b+\left( 1/2-p\right) ab^{2}}{ qa^{2}+qb^{2}+(1-2q)ab} \\ &=&\frac{a+b}{2}\frac{2pa^{2}+2pb^{2}+\left( 1-4p\right) ab}{ qa^{2}+qb^{2}+\left( 1-2q\right) ab} \end{eqnarray*} and $\theta =0$ in Theorem \ref{MT-p2><p3}, where $p$ and $q$ are parameters to be determined. Then, we have \begin{eqnarray*} K\left( x,x+1\right) &=&N\left( x,x+1\right) =x+\frac{1}{2}, \\ M\left( x,x+1\right) &=&S_{^{p;q}}^{3,2}\left( x,x+1\right) =\left( x+1/2\right) \frac{x^{2}+x+2p}{x^{2}+x+q}. \end{eqnarray*} Straightforward computations give \begin{eqnarray*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\ln \sqrt{2\pi }-\left( x+1/2\right) \ln M_{p;q}^{3,2}\left( x,x+1\right) +x+1/2}{x^{-1}} &=&q-2p- \frac{1}{24}, \\ \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\ln \sqrt{2\pi }-\left( x+1/2\right) \ln M_{p;2p+1/24}^{3,2}\left( x,x+1\right) +x+1/2}{x^{-3}} &=&- \frac{160}{1920}\left( p-\frac{23}{160}\right) , \end{eqnarray*} and solving the equation set \begin{equation*} q-2p-\frac{1}{24}=0\text{ and }-\frac{160}{1920}\left( p-\frac{23}{160} \right) =0 \end{equation*} leads to \begin{equation*} p=\frac{23}{160},q=\frac{79}{240}. \end{equation*} And then, \begin{equation*} M\left( x,x+1\right) =\left( x+\frac{1}{2}\right) \frac{x^{2}+x+\frac{23}{80} }{x^{2}+x+\frac{79}{240}}. \end{equation*} It is easy to check that $S_{^{p;q}}^{3,2}\left( a,b\right) $ is a symmetric and homogeneous mean of positive numbers $a$ and $b$ for $p=23/160$, $ q=79/240$. Hence, by Theorem \ref{MT-p2><p3}, we have the optimal asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \ln \tfrac{\left( x+1/2\right) \left( x^{2}+x+23/80\right) }{x^{2}+x+79/240} -\left( x+\frac{1}{2}\right) , \end{equation*} as $x\rightarrow \infty $, and \begin{equation*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\ln \sqrt{2\pi }-\left( x+1/2\right) \ln \tfrac{\left( x+1/2\right) \left( x^{2}+x+23/80\right) }{ x^{2}+x+79/240}+x+1/2}{x^{-5}}=-\tfrac{18\,029}{29\,030\,400}. \end{equation*} \end{example}
Also, this asymptotic formula have a well property.
\begin{proposition} For $x>-1/2$, the function $f_{3}$ defined by \begin{equation} f_{3}\left( x\right) =\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+\frac{1 }{2}\right) \ln \tfrac{\left( x+1/2\right) \left( x^{2}+x+23/80\right) }{ x^{2}+x+79/240}+\left( x+\frac{1}{2}\right) . \label{f3} \end{equation} is increasing and concave. \end{proposition}
\begin{proof} Differentiation gives \begin{eqnarray*} f_{3}^{\prime }\left( x\right) &=&\psi \left( x+1\right) +\ln \left( x^{2}+x+ \frac{79}{240}\right) -\ln \left( x^{2}+x+\frac{23}{80}\right) \\ &&-\ln \left( x+\frac{1}{2}\right) -2\frac{\left( x+1/2\right) ^{2}}{ x^{2}+x+23/80}+2\frac{\left( x+1/2\right) ^{2}}{x^{2}+x+79/240}, \end{eqnarray*} \begin{eqnarray*} f_{3}^{\prime \prime }\left( x\right) &=&\psi ^{\prime }\left( x+1\right) +6 \frac{x+1/2}{x^{2}+x+79/240}-6\frac{x+1/2}{x^{2}+x+23/80} \\ &&-\frac{1}{x+1/2}+4\frac{\left( x+1/2\right) ^{3}}{\left( x^{2}+x+23/80\right) ^{2}}-4\frac{\left( x+1/2\right) ^{3}}{\left( x^{2}+x+79/240\right) ^{2}}. \end{eqnarray*} Denote by $x+1/2=t$ and make use of recursive relation \begin{equation} \psi ^{\left( n\right) }(x+1)-\psi ^{\left( n\right) }(x)=\left( -1\right) ^{n}\frac{n!}{x^{n+1}} \label{psi-rel.} \end{equation} yield \begin{eqnarray*} &&f_{3}^{\prime \prime }(t+\frac{1}{2})-f_{3}^{\prime \prime }(t-\frac{1}{2}) \\ &=&-\tfrac{1}{\left( t+1/2\right) ^{2}}+6\tfrac{t+1}{\left( t+1\right) ^{2}+19/240}-6\tfrac{t+1}{\left( t+1\right) ^{2}+3/80}-\frac{1}{\left( t+1\right) }+4\tfrac{\left( t+1\right) ^{3}}{\left( \left( t+1\right) ^{2}+3/80\right) ^{2}} \\ &&-4\tfrac{\left( t+1\right) ^{3}}{\left( \left( t+1\right) ^{2}+19/240\right) ^{2}}-\left( 6\tfrac{t}{t^{2}+19/240}-6\tfrac{t}{ t^{2}+3/80}-\frac{1}{t}+4\tfrac{t^{3}}{\left( t^{2}+3/80\right) ^{2}}-4 \tfrac{t^{3}}{\left( t^{2}+19/240\right) ^{2}}\right) \\ &=&\frac{f_{31}\left( t\right) }{t\left( t+1\right) \left( t+\frac{1}{2} \right) ^{2}\left( t^{2}+2t+83/80\right) ^{2}\left( t^{2}+3/80\right) ^{2}\left( t^{2}+2t+259/240\right) ^{2}\left( t^{2}+19/240\right) ^{2}}, \end{eqnarray*} where \begin{eqnarray*} f_{31}\left( t\right) &=&\tfrac{18\,029}{138\,240}t^{12}+\tfrac{18\,029}{ 23\,040}t^{11}+\tfrac{83\,674\,657}{41\,472\,000}t^{10}+\tfrac{24\,178\,957}{ 8294\,400}t^{9}+\tfrac{34\,366\,211\,867}{13\,271\,040\,000}t^{8}+\tfrac{ 4894\,651\,067}{3317\,760\,000}t^{7} \\ &&+\tfrac{74\,296\,657\,243}{132\,710\,400\,000}t^{6}+\tfrac{ 20\,147\,292\,749}{132\,710\,400\,000}t^{5}+\tfrac{297\,092\,035\,417}{ 9437\,184\,000\,000}t^{4}+\tfrac{66\,777\,391\,051}{14\,155\,776\,000\,000} t^{3} \\ &&+\tfrac{295\,012\,866\,563}{566\,231\,040\,000\,000}t^{2}+\tfrac{ 3972\,595\,981}{188\,743\,680\,000\,000}t+\tfrac{166\,825\,684\,249}{ 60\,397\,977\,600\,000\,000} \\ &>&0\text{ for }t=x+1/2>0\text{.} \end{eqnarray*}
This shows that $f_{3}^{\prime \prime }(t+\frac{1}{2})-f_{3}^{\prime \prime }(t-\frac{1}{2})>0$, that is, $f_{3}^{\prime \prime }(x+1)-f_{3}^{\prime \prime }(x)>0$, and so \begin{equation*} f_{3}^{\prime \prime }(x)<f_{3}^{\prime \prime }(x+1)<f_{3}^{\prime \prime }(x+2)<...<f_{3}^{\prime \prime }(\infty )=0. \end{equation*} It reveals that shows $f_{3}$ is concave on $\left( -1/2,\infty \right) $, and we conclude that, $f_{3}^{\prime }(x)>\lim_{x\rightarrow \infty }f_{3}^{\prime }(x)=0$, which proves the desired result. \end{proof}
As a consequence of the above proposition, we have
\begin{corollary} For $x>0$, the double inequality \begin{equation*} \sqrt{\tfrac{158e}{69}}\left( \tfrac{x+1/2}{e}\tfrac{x^{2}+x+23/80}{ x^{2}+x+79/240}\right) ^{x+1/2}<\Gamma (x+1)<\sqrt{2\pi }\left( \tfrac{x+1/2 }{e}\tfrac{x^{2}+x+23/80}{x^{2}+x+79/240}\right) ^{x+1/2} \end{equation*} holds true, where $\sqrt{158e/69}\approx 2.4949$ and and $\sqrt{2\pi } \approx 2.5066$ are the best.
For $n\in \mathbb{N}$, it is true that \begin{equation*} \left( \tfrac{1118e}{1647}\right) ^{3/2}\left( \tfrac{n+1/2}{e}\tfrac{ n^{2}+n+23/80}{n^{2}+n+79/240}\right) ^{n+1/2}<n!<\sqrt{2\pi }\left( \tfrac{ n+1/2}{e}\tfrac{n^{2}+n+23/80}{n^{2}+n+79/240}\right) ^{n+1/2} \end{equation*} holds true with the best constants $\left( 1118e/1647\right) ^{3/2}\approx 2.5065$ and $\sqrt{2\pi }\approx 2.5066$. \end{corollary}
\begin{example} \label{E-N3,2}Let \begin{eqnarray*} K\left( a,b\right) &=&M\left( a,b\right) =A\left( a,b\right) =\frac{a+b}{2}, \\ N\left( a,b\right) &=&S_{^{p;q}}^{3,2}\left( a,b\right) =\frac{ pa^{3}+pb^{3}+\left( 1/2-p\right) ab^{2}+\left( 1/2-p\right) a^{2}b}{ qa^{2}+qb^{2}+\left( 1-2q\right) ab} \\ &=&\frac{a+b}{2}\frac{2pa^{2}+2pb^{2}+\left( 1-4p\right) ab}{ qa^{2}+qb^{2}+\left( 1-2q\right) ab} \end{eqnarray*} and $\sigma =0$ in Theorem \ref{MT-p2><p3}, where $p$ and $q$ are parameters to be determined. Direct computations give \begin{eqnarray*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln \left( x+1/2\right) +\left( x+1/2\right) \frac{ x^{2}+x+2p}{x^{2}+x+q}}{x^{-1}} &=&2p-q-\frac{1}{24}, \\ \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln \left( x+1/2\right) +\left( x+1/2\right) \frac{ x^{2}+x+2p}{x^{2}+x+2p-1/24}}{x^{-3}} &=&\frac{7}{480}-\frac{1}{12}p. \end{eqnarray*} Solving the simultaneous equations \begin{eqnarray*} 2p-q-\frac{1}{24} &=&0, \\ \frac{7}{480}-\frac{1}{12}p &=&0 \end{eqnarray*} leads to $p=7/40$, $q=37/120$. And then, \begin{equation*} N\left( x,x+1\right) =\left( x+1/2\right) \frac{x^{2}+x+7/20}{x^{2}+x+37/120} . \end{equation*}
An easy verification shows that $S_{^{p;q}}^{3,2}\left( a,b\right) $ is a symmetric and homogeneous mean of positive numbers $a$ and $b$ for $p=7/40$, $q=37/120$. Hence, by Theorem \ref{MT-p2><p3} we get the best asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\thicksim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \ln \left( x+\frac{1}{2}\right) -\left( x+\frac{1}{2}\right) \frac{ x^{2}+x+7/20}{x^{2}+x+37/120}, \end{equation*} as $x\rightarrow \infty $. And we have \begin{equation*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln \left( x+1/2\right) +\left( x+1/2\right) \frac{ x^{2}+x+7/20}{x^{2}+x+37/120}}{x^{-5}}=-\frac{1517}{2419\,200}. \end{equation*} \end{example}
Now we prove the following assertion related to this asymptotic formula.
\begin{proposition} Let the function $f_{4}$ be defined on $\left( -1/2,\infty \right) $ by \begin{equation*} f_{4}(x)=\ln \Gamma (x+1)-\tfrac{1}{2}\ln 2\pi -\left( x+\tfrac{1}{2}\right) \ln (x+\tfrac{1}{2})+\left( x+\tfrac{1}{2}\right) \frac{x^{2}+x+7/20}{ x^{2}+x+37/120}. \end{equation*} Then $f_{4}$ is increasing and convex on $\left( -1/2,\infty \right) $. \end{proposition}
\begin{proof} Differentiation gives \begin{eqnarray*} f_{4}^{\prime }(x) &=&\psi \left( x+1\right) -\ln \left( x+\frac{1}{2} \right) +\frac{1}{24}\frac{1}{x^{2}+x+37/120}-\frac{1}{12}\frac{\left( x+1/2\right) ^{2}}{\left( x^{2}+x+37/120\right) ^{2}}, \\ f_{4}^{\prime \prime }(x) &=&\psi ^{\prime }\left( x+1\right) -\frac{1}{x+1/2 }-\frac{1}{4}\frac{x+1/2}{\left( x^{2}+x+37/120\right) ^{2}}+\frac{1}{3} \frac{\left( x+\frac{1}{2}\right) ^{3}}{\left( x^{2}+x+37/120\right) ^{3}}. \end{eqnarray*} Denote by $x+1/2=t$ and make use of recursive relation (\ref{psi-rel.}) yield \begin{eqnarray*} &&f_{4}^{\prime \prime }(t+\frac{1}{2})-f_{4}^{\prime \prime }(t-\frac{1}{2}) \\ &=&-\tfrac{1}{\left( t+1/2\right) ^{2}}-\frac{1}{t+1}-\frac{1}{4}\frac{t+1}{ \left( \left( t+1\right) ^{2}+7/120\right) ^{2}}+\frac{1}{3}\frac{\left( t+1\right) ^{3}}{\left( \left( t+1\right) ^{2}+7/120\right) ^{3}} \\ &&-\left( -\frac{1}{t}-\frac{1}{4}\frac{t}{\left( t^{2}+7/120\right) ^{2}}+ \frac{1}{3}\frac{t^{3}}{\left( t^{2}+7/120\right) ^{3}}\right) \\ &=&\frac{f_{41}\left( t\right) }{t\left( t+1\right) \left( t+1/2\right) ^{2}\left( t^{2}+7/120\right) ^{3}\left( t^{2}+2t+127/120\right) ^{3}}, \end{eqnarray*} where \begin{eqnarray*} f_{41}\left( t\right) &=&\frac{1517}{11\,520}t^{8}+\frac{1517}{2880}t^{7}+ \frac{161\,087}{192\,000}t^{6}+\frac{387\,883}{576\,000}t^{5}+\frac{ 39\,563\,149}{138\,240\,000}t^{4} \\ &&+\frac{4462\,549}{69\,120\,000}t^{3}+\frac{67\,788\,161}{8294\,400\,000} t^{2}+\frac{2794\,421}{8294\,400\,000}t+\frac{702\,595\,369}{ 11\,943\,936\,000\,000} \\ &>&0\text{ for }t=x+1/2>0. \end{eqnarray*} This implies that $f_{4}^{\prime \prime }(t+\frac{1}{2})-f_{4}^{\prime \prime }(t-\frac{1}{2})>0$, that is, $f_{4}^{\prime \prime }(x+1)-f_{4}^{\prime \prime }(x)>0$, and so \begin{equation*} f_{4}^{\prime \prime }(x)<f_{4}^{\prime \prime }(x+1)<f_{4}^{\prime \prime }(x+2)<...<f_{4}^{\prime \prime }(\infty )=0. \end{equation*} It reveals that shows $f_{4}$ is concave on $\left( -1/2,\infty \right) $, and therefore, $f_{4}^{\prime }(x)>\lim_{x\rightarrow \infty }f_{4}^{\prime }(x)=0$, which proves the desired result. \end{proof}
By the increasing property of $f_{4}$ on $\left( -1/2,\infty \right) $ and the facts \begin{equation*} f_{4}\left( 0\right) =\ln \frac{e^{21/37}}{\sqrt{\pi }}\text{, \ } f_{4}\left( 1\right) =\ln \frac{2e^{423/277}}{3\sqrt{3\pi }}\text{, \ } f_{4}\left( \infty \right) =0, \end{equation*} we have
\begin{corollary} For $x>0$, the double inequality \begin{equation*} e^{21/37}\sqrt{2}\left( \tfrac{x+1/2}{\exp \left( \frac{x^{2}+x+7/20}{ x^{2}+x+37/120}\right) }\right) ^{x+1/2}<\Gamma (x+1)<\sqrt{2\pi }\left( \tfrac{x+1/2}{\exp \left( \frac{x^{2}+x+7/20}{x^{2}+x+37/120}\right) } \right) ^{x+1/2} \end{equation*} holds, where $e^{21/37}\sqrt{2}\approx 2.4946$ and $\sqrt{2\pi }\approx 2.5066$ are the best.
For $n\in \mathbb{N}$, the double inequality \begin{equation*} e^{423/277}\tfrac{2\sqrt{2}}{3\sqrt{3}}(\tfrac{n+1/2}{e})^{n+1/2}\exp \left( -\tfrac{1}{24}\tfrac{n+1/2}{n^{2}+n+37/120}\right) <n!<\sqrt{2\pi }(\tfrac{ n+1/2}{e})^{n+1/2}\exp \left( -\tfrac{1}{24}\tfrac{n+1/2}{n^{2}+n+37/120} \right) \end{equation*} holds true with the best constants $2\sqrt{2}e^{423/277}/\left( 3\sqrt{3} \right) \approx 2.5065$ and $\sqrt{2\pi }\approx 2.5066$. \end{corollary}
\begin{example} \label{E-N4,3}Let \begin{eqnarray*} K\left( a,b\right) &=&M\left( a,b\right) =A\left( a,b\right) =x+1/2, \\ N\left( a,b\right) &=&S_{^{p,q;r}}^{4,3}\left( a,b\right) =\frac{ pa^{4}+pb^{4}+qa^{3}b+qab^{3}+\left( 1-2p-2q\right) a^{2}b^{2}}{ ra^{3}+rb^{3}+\left( 1/2-r\right) a^{2}b+\left( 1/2-r\right) ab^{2}} \end{eqnarray*} and $\sigma =0$ in Theorem \ref{MT-p2><p3}. In a similar way, we can determine that the best parameters satisfy \begin{equation*} r=2p+\frac{1}{2}q-\frac{7}{48}\text{, \ }p=\frac{21}{40}-\frac{7}{4}q\text{, \ }q=\frac{7303}{35\,280}, \end{equation*} which imply \begin{equation*} p=\frac{3281}{20\,160},q=\frac{7303}{35\,280};r=\frac{111}{392}. \end{equation*} Then, \begin{equation} N\left( x,x+1\right) =x+\tfrac{1}{2}+\tfrac{1517}{44\,640}\tfrac{1}{x+1/2}+ \tfrac{343}{44\,640}\tfrac{x+1/2}{x^{2}+x+111/196}:=N_{4/3}\left( x,x+1\right) , \label{N4/3} \end{equation} In this case, we easily check that $S_{^{p,q;r}}^{4,3}\left( a,b\right) $ is a mean of $a$ and $b$. Consequently, from Theorem \ref{MT-p2><p3} the following best asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\sim \frac{1}{2}\ln 2\pi +\left( x+1/2\right) \ln (x+1/2)-N_{4/3}\left( x,x+1\right) \end{equation*} holds true as $x\rightarrow \infty $. And, we have \begin{equation*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln \left( x+1/2\right) +N_{4/3}\left( x,x+1\right) }{ x^{-7}}=\tfrac{10\,981}{31\,610\,880}. \end{equation*} \end{example}
We now present the monotonicity and convexity involving this asymptotic formula.
\begin{proposition} Let $f_{5}$ defined on $\left( -1/2,\infty \right) $ by \begin{equation*} f_{5}(x)=\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln (x+1/2)+N_{4/3}\left( x,x+1\right) , \end{equation*} where $N_{4/3}\left( x,x+1\right) $ is defined\ by (\ref{N4/3}). Then $f_{5}$ is decreasing and convex on $\left( -1/2,\infty \right) $. \end{proposition}
\begin{proof} Differentiation gives \begin{eqnarray*} f_{5}^{\prime }(x) &=&\psi \left( x+1\right) -\ln \left( x+\frac{1}{2} \right) -\frac{1517}{44\,640\left( x+1/2\right) ^{2}} \\ &&+\frac{343}{44\,640\left( x^{2}+x+111/196\right) }-\frac{343}{22\,320} \frac{\left( x+1/2\right) ^{2}}{\left( x^{2}+x+111/196\right) ^{2}}, \end{eqnarray*} \begin{eqnarray*} f_{5}^{\prime \prime }(x) &=&\psi ^{\prime }\left( x+1\right) -\frac{1}{x+1/2 }+\frac{1517}{22\,320\left( x+1/2\right) ^{3}} \\ &&-\frac{343}{7440}\frac{x+1/2}{\left( x^{2}+x+111/196\right) ^{2}}+\frac{343 }{5580}\frac{\left( x+1/2\right) ^{3}}{\left( x^{2}+x+111/196\right) ^{3}}. \end{eqnarray*} Denote by $x+1/2=t$ and make use of recursive relation (\ref{psi-rel.}) yield \begin{eqnarray*} &&f_{5}^{\prime \prime }(t+\frac{1}{2})-f_{5}^{\prime \prime }(t-\frac{1}{2}) \\ &=&-\tfrac{1}{\left( t+1/2\right) ^{2}}-\tfrac{1}{\left( t+1\right) }+\tfrac{ 1517}{22\,320\left( t+1\right) ^{3}}-\tfrac{343}{7440}\tfrac{t+1}{\left( \left( t+1\right) ^{2}+31/98\right) ^{2}}+\tfrac{343}{5580}\tfrac{\left( t+1\right) ^{3}}{\left( \left( t+1\right) ^{2}+31/98\right) ^{3}} \\ &&-\left( -\tfrac{1}{t}+\tfrac{1517}{22\,320t^{3}}-\tfrac{343}{7440}\tfrac{t }{\left( t^{2}+31/98\right) ^{2}}+\tfrac{343}{5580}\tfrac{t^{3}}{\left( t^{2}+31/98\right) ^{3}}\right) \\ &=&-\frac{f_{51}\left( t\right) }{80\left( t+1/2\right) ^{2}t^{3}\left( t+1\right) ^{3}\left( t^{2}+2t+129/98\right) ^{3}\left( t^{2}+31/98\right) ^{3}}, \end{eqnarray*} where \begin{eqnarray*} f_{51}\left( t\right) &=&\tfrac{10\,981}{784}t^{10}+\tfrac{54\,905}{784} t^{9}+\tfrac{21\,028\,039}{134\,456}t^{8}+\tfrac{27\,614\,911}{134\,456} t^{7}+\tfrac{294\,820\,517}{1647\,086}t^{6}+\tfrac{739\,744\,471}{6588\,344} t^{5}+ \\ &&\tfrac{138\,266\,105\,451}{2582\,630\,848}t^{4}+\tfrac{25\,165\,604\,049}{ 1291\,315\,424}t^{3}+\tfrac{2726\,271\,884\,261}{506\,195\,646\,208}t^{2}+ \tfrac{574\,150\,150\,569}{506\,195\,646\,208}t+\tfrac{347\,724\,739\,077}{ 3543\,369\,523\,456} \\ &>&0\text{ for }t=x+1/2>0\text{.} \end{eqnarray*} This implies that $f_{5}^{\prime \prime }(t+\frac{1}{2})-f_{5}^{\prime \prime }(t-\frac{1}{2})<0$, that is, $f_{5}^{\prime \prime }(x+1)-f_{5}^{\prime \prime }(x)<0$, and so \begin{equation*} f_{5}^{\prime \prime }(x)>f_{5}^{\prime \prime }(x+1)>f_{5}^{\prime \prime }(x+2)>...>f_{5}^{\prime \prime }(\infty )=0. \end{equation*} It reveals that shows $f_{5}$ is convex on $\left( -1/2,\infty \right) $, and therefore, $f_{5}^{\prime }(x)<\lim_{x\rightarrow \infty }f_{5}^{\prime }(x)=0$, which proves the desired statement. \end{proof}
Employing the decreasing property of $f_{5}$ on $\left( -1/2,\infty \right) $ , we obtain
\begin{corollary} For $x>0$, the double inequality \begin{eqnarray*} &&\sqrt{2\pi }\left( \tfrac{x+1/2}{e}\right) ^{x+1/2}\exp \left( -\tfrac{1517 }{44\,640}\tfrac{1}{x+1/2}-\tfrac{343}{44\,640}\tfrac{x+1/2}{x^{2}+x+111/196} \right) \\ &<&\Gamma (x+1)<e^{2987/39960}\sqrt{2e}\left( \tfrac{x+1/2}{e}\right) ^{x+1/2}\exp \left( -\tfrac{1517}{44\,640}\tfrac{1}{x+1/2}-\tfrac{343}{ 44\,640}\tfrac{x+1/2}{x^{2}+x+111/196}\right) \end{eqnarray*} holds, where $\sqrt{2\pi }\approx 2.5066$ and $e^{2987/39960}\sqrt{2e} \approx 2.5126$ are the best constants.
For $n\in \mathbb{N}$, it holds that \begin{eqnarray*} &&\sqrt{2\pi }\left( \tfrac{n+1/2}{e}\right) ^{n+1/2}\exp \left( -\tfrac{1517 }{44\,640}\tfrac{1}{n+1/2}-\tfrac{343}{44\,640}\tfrac{n+1/2}{n^{2}+n+111/196} \right) \\ &<&n!<\frac{2\sqrt{6}}{9}\exp \left( \tfrac{829\,607}{543\,240}\right) \left( \tfrac{n+1/2}{e}\right) ^{n+1/2}\exp \left( -\tfrac{1517}{44\,640} \tfrac{1}{n+1/2}-\tfrac{343}{44\,640}\tfrac{n+1/2}{n^{2}+n+111/196}\right) \end{eqnarray*} with the best constants $\sqrt{2\pi }\approx 2.5066$ and $2\sqrt{6}\exp \left( \tfrac{829\,607}{543\,240}\right) /9\approx 2.5067$. \end{corollary}
Lastly, we give an application example of Theorem \ref{MT-p2=p3}.
\begin{example} let \begin{equation*} M\left( a,b\right) =H_{p,q;r}^{2,1}\left( a,b\right) =\frac{ pb^{2}+qa^{2}+(1-p-q)ab}{rb+(1-r)a} \end{equation*} and $\theta =0,\sigma =1$ in Theorem \ref{MT-p2=p3}. Then by the same method previously, we can derive two best arrays \begin{eqnarray*} \left( p_{1},q_{1},r_{1}\right) &=&\left( \frac{129-59\sqrt{3}}{360},\frac{ 129+59\sqrt{3}}{360},\frac{90-29\sqrt{3}}{180}\right) , \\ \left( p_{2},q_{2},r_{2}\right) &=&\left( \frac{129+59\sqrt{3}}{360},\frac{ 129-59\sqrt{3}}{360},\frac{90+29\sqrt{3}}{180}\right) . \end{eqnarray*} Then, \begin{eqnarray} H_{p_{1},q_{1};r_{1}}^{2,1}\left( x,x+1\right) &=&\frac{x^{2}+\frac{180-59 \sqrt{3}}{180}x+\frac{129-59\sqrt{3}}{360}}{x+\frac{90-29\sqrt{3}}{180}} :=M_{1}\left( x,x+1\right) , \label{M1} \\ H_{p_{2},q_{2};r_{2}}^{2,1}\left( x,x+1\right) &=&\frac{x^{2}+\frac{180+59 \sqrt{3}}{180}x+\frac{129+59\sqrt{3}}{360}}{x+\frac{90+29\sqrt{3}}{180}} :=M_{2}\left( x,x+1\right) \label{M2} \end{eqnarray} It is easy to check that $M\left( a,b\right) $ are means of $a$ and $b$ for $ \left( p,q,r\right) =\left( p_{1},q_{1},r_{1}\right) $ and $\left( p_{2},q_{2},r_{2}\right) $. Thus, application of Theorem \ref{MT-p2=p3} implies that both the following two asymptotic formulas \begin{equation*} \ln \Gamma (x+1)\sim \frac{1}{2}\ln 2\pi +\left( x+1/2\right) \ln M_{i}\left( x,x+1\right) -M_{i}\left( x,x+1\right) \text{, }i=1,2 \end{equation*} are valid as $x\rightarrow \infty $. And, we have \begin{eqnarray*} \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln M_{1}\left( x,x+1\right) +M_{1}\left( x,x+1\right) }{x^{-4}} &=&-\tfrac{1481\sqrt{3}}{2332\,800}, \\ \lim_{x\rightarrow \infty }\tfrac{\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln M_{2}\left( x,x+1\right) +M_{2}\left( x,x+1\right) }{x^{-4}} &=&\tfrac{1481\sqrt{3}}{2332\,800}. \end{eqnarray*} \end{example}
The above two asymptotic formulas also have well properties.
\begin{proposition} Let $f_{6},f_{7}$ be defined on $\left( 0,\infty \right) $ by \begin{eqnarray*} f_{6}(x) &=&\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln M_{1}\left( x,x+1\right) +M_{1}\left( x,x+1\right) , \\ f_{7}(x) &=&\ln \Gamma (x+1)-\frac{1}{2}\ln 2\pi -\left( x+1/2\right) \ln M_{2}\left( x,x+1\right) +M_{2}\left( x,x+1\right) , \end{eqnarray*} where $M_{1}$ and $M_{2}$ are defined\ by (\ref{M1}) and (\ref{M2}), respectively. Then $f_{6}\ $is increasing and concave on $\left( 0,\infty \right) $, while $f_{7}$ is decreasing and convex on $\left( 0,\infty \right) $. \end{proposition}
\begin{proof} Differentiation gives \begin{eqnarray*} f_{6}^{\prime }\left( x\right) &=&\psi (x+1)-\ln \frac{x^{2}+\frac{180-59 \sqrt{3}}{180}x+\frac{129-59\sqrt{3}}{360}}{x+\frac{90-29\sqrt{3}}{180}}- \frac{\left( x+\frac{1}{2}\right) \left( 2x+\frac{180-59\sqrt{3}}{180} \right) }{x^{2}+\frac{180-59\sqrt{3}}{180}x+\frac{129-59\sqrt{3}}{360}} \\ &&+\frac{x+\frac{1}{2}}{x+\frac{90-29\sqrt{3}}{180}}+\frac{2x+\frac{180-59 \sqrt{3}}{180}}{x+\frac{90-29\sqrt{3}}{180}}-\frac{x^{2}+\frac{180-59\sqrt{3} }{180}x+\frac{129-59\sqrt{3}}{360}}{\left( x+\frac{90-29\sqrt{3}}{180} \right) ^{2}}, \end{eqnarray*} \begin{eqnarray*} f_{6}^{\prime \prime }\left( x\right) &=&\psi ^{\prime }(x+1)-\frac{2x+\frac{ 180-59\sqrt{3}}{180}}{x^{2}+\frac{180-59\sqrt{3}}{180}x+\frac{129-59\sqrt{3} }{360}}+\frac{1}{x+\frac{90-29\sqrt{3}}{180}} \\ &&+\frac{59\sqrt{3}}{180}\frac{x^{2}+\frac{59-26\sqrt{3}}{59}x+\frac{43}{120} -\frac{13\sqrt{3}}{59}}{\left( x^{2}+\frac{180-59\sqrt{3}}{180}x+\frac{129-59 \sqrt{3}}{360}\right) ^{2}} \\ &&-\frac{7\sqrt{3}}{45}\frac{1}{\left( x+\frac{90-29\sqrt{3}}{180}\right) ^{2}}-\frac{\sqrt{3}}{180}\frac{x-\frac{629\sqrt{3}-90}{180}}{\left( x+\frac{ 90-29\sqrt{3}}{180}\right) ^{3}}. \end{eqnarray*} Employing the recursive relation (\ref{psi-rel.}) and factoring reveal that \begin{equation*} f_{6}^{\prime \prime }\left( x+1\right) -f_{6}^{\prime \prime }\left( x\right) =\frac{1481\sqrt{3}}{19\,440}\frac{f_{61}\left( x\right) }{ f_{62}\left( x\right) }, \end{equation*} where \begin{eqnarray*} f_{61}\left( x\right) &=&x^{9}+\left( 9-\tfrac{337\,153}{266\,580}\sqrt{3} \right) x^{8}+\left( \tfrac{991\,207\,423}{26\,658\,000}-\tfrac{674\,306}{ 66\,645}\sqrt{3}\right) x^{7} \\ &&+\left( \tfrac{2459\,907\,961}{26\,658\,000}-\tfrac{169\,081\,132\,727}{ 4798\,440\,000}\sqrt{3}\right) x^{6}+\left( \tfrac{4335\,292\,090\,469}{ 28\,790\,640\,000}-\tfrac{55\,797\,724\,727}{799\,740\,000}\sqrt{3}\right) x^{5} \\ &&+\left( \tfrac{956\,621\,902\,709}{5758\,128\,000}-\tfrac{ 148\,442\,768\,304\,491}{1727\,438\,400\,000}\sqrt{3}\right) x^{4} \\ &&+\left( \tfrac{229\,288\,958\,388\,788\,929}{1865\,633\,472\,000\,000}- \tfrac{29\,135\,013\,047\,291}{431\,859\,600\,000}\sqrt{3}\right) x^{3} \\ &&+\left( \tfrac{36\,305\,075\,316\,164\,929}{621\,877\,824\,000\,000}- \tfrac{55\,416\,459\,045\,055\,111\,861}{1679\,070\,124\,800\,000\,000}\sqrt{ 3}\right) x^{2} \\ &&+\left( \tfrac{179\,958\,708\,278\,174\,628\,611}{11\,193\,800\,832\,000 \,000\,000}-\tfrac{7731\,435\,289\,282\,423\,861}{839\,535\,062\,400\,000 \,000}\sqrt{3}\right) x \\ &&+\left( \tfrac{21\,826\,051\,463\,638\,680\,611}{11\,193\,800\,832\,000 \,000\,000}-\tfrac{5586\,677\,417\,732\,710\,687}{4975\,022\,592\,000\,000 \,000}\sqrt{3}\right) , \end{eqnarray*} \begin{eqnarray*} f_{62}\left( x\right) &=&\left( x+1\right) ^{2}\left( x^{2}+\tfrac{180-59 \sqrt{3}}{180}x+\tfrac{129-59\sqrt{3}}{360}\right) ^{2}\left( x^{2}+\tfrac{ 540-59\sqrt{3}}{180}x+\tfrac{283-59\sqrt{3}}{120}\right) ^{2} \\ &&\times \left( x+\tfrac{270-29\sqrt{3}}{180}\right) ^{3}\left( x+\tfrac{ 90-29\sqrt{3}}{180}\right) ^{3}. \end{eqnarray*} By direct verifications we see that all coefficients of $f_{61}$ and $f_{62}$ are positive, so $f_{61}\left( x\right) $, $f_{62}\left( x\right) >0$ for $ x>0$. Therefore, we get $f_{6}^{\prime \prime }\left( x+1\right) -f_{6}^{\prime \prime }\left( x\right) >0$, which yields \begin{equation*} f_{6}^{\prime \prime }(x)<f_{6}^{\prime \prime }(x+1)<f_{6}^{\prime \prime }(x+2)<...<f_{6}^{\prime \prime }(\infty )=0. \end{equation*} It shows that $f_{6}$ is concave on $\left( 0,\infty \right) $, and therefore, $f_{6}^{\prime }(x)>\lim_{x\rightarrow \infty }f_{6}^{\prime }(x)=0$, which proves the monotonicity and concavity of $f_{6}$.
In the same way, we can prove the monotonicity and convexity of $f_{7}$ on $ \left( 0,\infty \right) $, whose details are omitted. \end{proof}
As direct consequences of previous proposition, we have
\begin{corollary} For $x>0$, the double inequality \begin{eqnarray*} &&\delta _{0}\sqrt{2\pi }\left( \tfrac{x^{2}+\frac{180-59\sqrt{3}}{180}x+ \frac{129-59\sqrt{3}}{360}}{x+\frac{90-29\sqrt{3}}{180}}\right) ^{x+1/2}\exp \left( -\tfrac{x^{2}+\frac{180-59\sqrt{3}}{180}x+\frac{129-59\sqrt{3}}{360}}{ x+\frac{90-29\sqrt{3}}{180}}\right) \\ &<&\Gamma (x+1)<\sqrt{2\pi }\left( \tfrac{x^{2}+\frac{180-59\sqrt{3}}{180}x+ \frac{129-59\sqrt{3}}{360}}{x+\frac{90-29\sqrt{3}}{180}}\right) ^{x+1/2}\exp \left( -\tfrac{x^{2}+\frac{180-59\sqrt{3}}{180}x+\frac{129-59\sqrt{3}}{360}}{ x+\frac{90-29\sqrt{3}}{180}}\right) \end{eqnarray*} holds, where $\delta _{0}=\exp f_{6}\left( 0\right) \approx 0.96259$ and $1$ are the best constants.
For $n\in \mathbb{N}$, it holds that \begin{eqnarray*} &&\delta _{2}\sqrt{2\pi }\left( \tfrac{n^{2}+\frac{180-59\sqrt{3}}{180}n+ \frac{129-59\sqrt{3}}{360}}{n+\frac{90-29\sqrt{3}}{180}}\right) ^{n+1/2}\exp \left( -\tfrac{n^{2}+\frac{180-59\sqrt{3}}{180}n+\frac{129-59\sqrt{3}}{360}}{ n+\frac{90-29\sqrt{3}}{180}}\right) \\ &<&n!<\sqrt{2\pi }\left( \tfrac{n^{2}+\frac{180-59\sqrt{3}}{180}n+\frac{ 129-59\sqrt{3}}{360}}{n+\frac{90-29\sqrt{3}}{180}}\right) ^{n+1/2}\exp \left( -\tfrac{n^{2}+\frac{180-59\sqrt{3}}{180}n+\frac{129-59\sqrt{3}}{360}}{ n+\frac{90-29\sqrt{3}}{180}}\right) \end{eqnarray*} with the best constants $\delta _{1}=\exp f_{6}\left( 1\right) \approx 0.99965$ and $1$. \end{corollary}
\begin{corollary} For $x>0$, the double inequality \begin{eqnarray*} &&\sqrt{2\pi }\left( \tfrac{x^{2}+\frac{180+59\sqrt{3}}{180}x+\frac{129+59 \sqrt{3}}{360}}{x+\frac{90+29\sqrt{3}}{180}}\right) ^{x+1/2}\exp \left( - \tfrac{x^{2}+\frac{180+59\sqrt{3}}{180}x+\frac{129+59\sqrt{3}}{360}}{x+\frac{ 90+29\sqrt{3}}{180}}\right) \\ &<&\Gamma (x+1)<\tau _{0}\sqrt{2\pi }\left( \tfrac{x^{2}+\frac{180+59\sqrt{3} }{180}x+\frac{129+59\sqrt{3}}{360}}{x+\frac{90+29\sqrt{3}}{180}}\right) ^{x+1/2}\exp \left( -\tfrac{x^{2}+\frac{180+59\sqrt{3}}{180}x+\frac{129+59 \sqrt{3}}{360}}{x+\frac{90+29\sqrt{3}}{180}}\right) \end{eqnarray*} holds, where $\tau _{0}=\exp f_{7}\left( 0\right) \approx 1.0020$ and $1$ are the best constants.
For $n\in \mathbb{N}$, it holds that \begin{eqnarray*} &&\sqrt{2\pi }\left( \tfrac{n^{2}+\frac{180+59\sqrt{3}}{180}n+\frac{129+59 \sqrt{3}}{360}}{n+\frac{90+29\sqrt{3}}{180}}\right) ^{n+1/2}\exp \left( - \tfrac{n^{2}+\frac{180+59\sqrt{3}}{180}n+\frac{129+59\sqrt{3}}{360}}{n+\frac{ 90+29\sqrt{3}}{180}}\right) \\ &<&n!<\tau _{1}\sqrt{2\pi }\left( \tfrac{n^{2}+\frac{180+59\sqrt{3}}{180}n+ \frac{129+59\sqrt{3}}{360}}{n+\frac{90+29\sqrt{3}}{180}}\right) ^{n+1/2}\exp \left( -\tfrac{n^{2}+\frac{180+59\sqrt{3}}{180}n+\frac{129+59\sqrt{3}}{360}}{ n+\frac{90+29\sqrt{3}}{180}}\right) \end{eqnarray*} with the best constants $\delta _{1}=\exp f_{7}\left( 1\right) \approx 1.0001 $ and $1$. \end{corollary}
\section{Open problems}
Inspired by Examples \ref{E-M3,2}--\ref{E-N4,3}, we propose the following problems.
\begin{problem} Let $S_{p_{k};q_{k}}^{n,n-1}\left( a,b\right) $ be defined by (\ref{S^n,n-1} ). Finding $p_{k}$ and $q_{k}$ such that the asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\sim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \ln S_{p_{k};q_{k}}^{n,n-1}\left( x,x+1\right) -\left( x+\frac{1}{2}\right) :=F_{1}\left( x\right) \end{equation*} holds as $x\rightarrow \infty $ with \begin{equation*} \lim_{x\rightarrow \infty }\frac{\ln \Gamma (x+1)-F_{1}\left( x\right) }{ x^{-2n+1}}=c_{1}\neq 0,\pm \infty . \end{equation*} \end{problem}
\begin{problem} Let $S_{p_{k};q_{k}}^{n,n-1}\left( a,b\right) $ be defined by (\ref{S^n,n-1} ). Finding $p_{k}$ and $q_{k}$ such that the asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\sim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \ln \left( x+\frac{1}{2}\right) -S_{p_{k};q_{k}}^{n,n-1}\left( x,x+1\right) :=F_{2}\left( x\right) \end{equation*} holds as $x\rightarrow \infty $ with \begin{equation*} \lim_{x\rightarrow \infty }\frac{\ln \Gamma (x+1)-F_{2}\left( x\right) }{ x^{-2n+1}}=c_{2}\neq 0,\pm \infty . \end{equation*} \end{problem}
\begin{problem} Let $H_{p_{k};q_{k}}^{n,n-1}\left( a,b\right) $ be defined by (\ref{H^n,n-1} ). Finding $p_{k}$ and $q_{k}$ such that the asymptotic formula for the gamma function \begin{equation*} \ln \Gamma (x+1)\sim \frac{1}{2}\ln 2\pi +\left( x+\frac{1}{2}\right) \ln H_{p_{k};q_{k}}^{n,n-1}\left( x,x+1\right) -H_{p_{k};q_{k}}^{n,n-1}\left( x,x+1\right) :=F_{3}\left( x\right) \end{equation*} holds as $x\rightarrow \infty $ with \begin{equation*} \lim_{x\rightarrow \infty }\frac{\ln \Gamma (x+1)-F_{1}\left( x\right) }{ x^{-2n}}=c_{3}\neq 0,\pm \infty . \end{equation*} \end{problem}
\end{document}
|
arXiv
|
{
"id": "1409.6413.tex",
"language_detection_score": 0.41692519187927246,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\setlength{\abovedisplayskip}{8pt} \setlength{\belowdisplayskip}{8pt} \setlength{\abovedisplayshortskip}{4pt} \setlength{\belowdisplayshortskip}{8pt}
\begin{titlepage}
\title{{\bf{\Huge Long-Term Factorization of Affine Pricing Kernels}} \thanks{This paper is based on research supported by the grant CMMI-1536503 from the National Science Foundation.}} \author{Likuan Qin\thanks{[email protected]} } \author{Vadim Linetsky\thanks{[email protected]} } \affil{\emph{Department of Industrial Engineering and Management Sciences}\\ \emph{McCormick School of Engineering and Applied Sciences}\\ \emph{Northwestern University}} \date{} \end{titlepage}
\maketitle
\begin{abstract} This paper constructs and studies the long-term factorization of affine pricing kernels into discounting at the rate of return on the long bond and the martingale component that accomplishes the change of probability measure to the long forward measure. The principal eigenfunction of the affine pricing kernel germane to the long-term factorization is an exponential-affine function of the state vector with the coefficient vector identified with the fixed point of the Riccati ODE. The long bond volatility and the volatility of the martingale component are explicitly identified in terms of this fixed point. A range of examples from the asset pricing literature is provided to illustrate the theory.
\end{abstract}
\section{Introduction}
The stochastic discount factor (SDF) is a fundamental object in arbitrage-free asset pricing models. It assigns today's prices to risky future payoffs at alternative investment horizons. It accomplishes this by simultaneously discounting the future and adjusting for risk. A familiar representation of the SDF is a factorization into discounting at the risk-free interest rate and a martingale component adjusting for risk. This martingale accomplishes the change of probabilities to the risk-neutral probability measure. More recently \citet{alvarez_2005using}, \citet{hansen_2008consumption}, \citet{hansen_2009} and \citet{hansen_2012} introduce and study an alternative {\em long-term factorization} of the SDF. The {\em transitory component} in the long-term factorization discounts at the rate of return on the pure discount bond of asymptotically long maturity (the {\em long bond}). The {\em permanent component} is a martingale that accomplishes a change of probabilities to the {\em long forward measure}. \citet{linetsky_2014long} study the long-term factorization and the long forward measure in the general semimartingale setting.
The long-term factorization of the SDF is particularly convenient in applications to the pricing of long-lived assets and to theoretical and empirical investigations of the term structure of the risk-return trade-off. In addition to the references above, the growing literature on the long-term factorization and its applications includes \citet{hansen_2012pricing}, \citet{hansen_2013}, \citet{borovicka_2014mis}, \citet{borovivcka2011risk}, \citet{borovivcka2016term}, \citet{bakshi_2012}, \citet{bakshia2015recovery}, \citet{christensen2014nonparametric}, \citet{christensen_2013estimating}, \citet{linetsky_2014_cont}, \citet{linetsky2016bond}, \citet{backus2015term}, \citet{filipovic2016linear}, \citet{filipovic2016relation}. Empirical investigations in this literature show that the martingale component in the long-term factorization is highly volatile and economically significant (see, in particular, \citet{bakshi_2012} for results based on pricing kernel bounds, \citet{christensen2014nonparametric} for results based on structural asset pricing models connecting to the macro-economic fundamentals, and \citet{linetsky2016bond} for results based on explicit parameterizations of the pricing kernel, where, in particular, the relationship among the measures ${\mathbb P}$, ${\mathbb Q}$ and ${\mathbb L}$ is empirically investigated).
The focus of the present paper is on the analysis of long-term factorization in affine diffusion models, both from the perspective of providing a user's guide to constructing long-term factorization in affine asset pricing models, as well as employing affine models as a convenient laboratory to illustrate the theory of the long-term factorization. Affine diffusions are work-horse models in continuous-time finance due to their analytical and computational tractability (\citet{vasicek_1977equilibrium}, \citet{cox_1985_2}, \citet{duffie_1996}, \citet{duffie_2000}, \citet{dai_2000}, \citet{duffie_2003}). In this paper we show that the principal eigenfunction of \citet{hansen_2009} that determines the long-term factorization, if it exists, is necessarily in the exponential-affine form in affine models, with the coefficient vector in the exponential identified with the fixed point of the corresponding Riccati ODE. This allows us to give a fully explicit treatment and illustrate dynamics of the long bond, the martingale component and the long-forward measure in affine models. In particular, we explicitly verify that when the Riccati ODE associated with the affine pricing kernel possesses a fixed point, the affine model satisfies the sufficient condition in Theorem 3.1 of \citet{linetsky_2014long} so that the long-term limit exists.
In Section \ref{Brownian} we review and summarize the long-term factorization in Brownian motion-based models. In Section \ref{exist_affine} we present general results on the long-term factorization of affine pricing kernels. The main results are given in Theorem \ref{affine_long}, where the market price of Brownian risk is explicitly decomposed into the market price of risk under the long forward measure identified with the volatility of the long bond and the remaining market price of risk determining the martingale component accomplishing the change of probabilities from the data-generating to the long forward measure. The latter component is determined by the fixed point of the Riccati ODE. In Section \ref{examples} we study a range of examples of affine pricing kernels from the asset pricing literature.
\section{Long-Term Factorization in Brownian Environments} \label{Brownian}
We work on a complete filtered probability space $(\Omega,{\mathscr F},({\mathscr F}_{t})_{t\geq 0},{\mathbb P})$. We assume that all uncertainty in the economy is generated by an $n$-dimensional Brownian motion $W_t^{\mathbb{P}}$ and that
$(\mathscr{F}_t)_{t\geq 0}$ is the (completed) filtration generated by $W_t^{\mathbb{P}}$. We assume absence of arbitrage and market frictions, so that there exists a strictly positive pricing kernel process in the form of an It\^{o} semimartingale. More precisely, we assume that the pricing kernel follows an It\^{o} process ($\cdot$ denotes vector dot product) $$ dS_t=-r_tS_tdt-S_t \lambda_t \cdot dW_t^{\mathbb{P}} $$
with $\int_0^t |r_s|ds<\infty$ and the market price of Brownian risk vector $\lambda_t$ such that the process $$
M_t^0=e^{-\int_0^t \lambda_s\cdot dW_s^{\mathbb{P}}-\frac{1}{2}\int_0^t \|\lambda_s\|^2 ds} $$
is a martingale (Novikov's condition ${\mathbb E}^{\mathbb P}[e^{\frac{1}{2}\int_0^t \|\lambda_s\|^2 ds}]<\infty$ for each $t>0$ suffices). Under these assumptions the pricing kernel has the risk-neutral factorization \begin{equation} S_t=\frac{1}{A_t}M_t^0=e^{-\int_0^t r_s ds}M_t^0 \eel{rnf} into discounting at the risk-free short rate $r_t$ determining the risk-free asset (money market account) $A_t=e^{\int_0^t r_s ds}$ and the exponential martingale $M^0_t$ with the market price of Brownian risk $\lambda_t$ determining its volatility. We also assume that ${\mathbb E}^{\mathbb{P}}[S_T/S_t]<\infty$ for all $T>t\geq 0$. The integrability of the SDF $S_T/S_t$ for any two dates $T>t$ ensures that that zero-coupon bond price processes $$P_t^T:=\mathbb{E}_t^\mathbb{P}[S_T/S_t], \quad t\in [0,T]$$
are well defined for all maturity dates $T>0$ ($\mathbb{E}_t[\cdot]=\mathbb{E}[\cdot|{\mathscr F}_{t}]$).
Since for each $T$ the $T$-maturity zero coupon bond price process $P_t^T$ can be written as $P_t^T=M_t^TP_0^T/S_t$, where $M_t^T=S_t P_t^T/P_0^T= \mathbb{E}_t^\mathbb{P}[S_T]/\mathbb{E}_0^\mathbb{P}[S_T]$ is a positive martingale on $t\in [0,T]$, we can apply the Martingale Representation Theorem to claim that $$dM_t^T=-M_t^T \lambda_t^T \cdot dW^{\mathbb P}_t$$ with some $\lambda_t^T$, and further claim that the bond price process has the representation $$ dP_t^T=(r_t+ \sigma^T_t\cdot \lambda_t) P_t^Tdt+P_t^T \sigma^T_t \cdot dW_t^\mathbb{P} $$ with the volatility process $\sigma^T_t=\lambda_t-\lambda_t^T$.
Following \citet{linetsky_2014long}, for each fixed $T>0$ we define a self-financing trading strategy that rolls over investments in $T$-maturity zero-coupon bonds as follows.
Fix $T$ and consider a self-financing roll-over strategy that starts at time zero by investing one unit of account in $1/P_{0}^T$ units of the $T$-maturity zero-coupon bond. At time $T$ the bond matures, and the value of the strategy is $1/P_{0}^T$ units of account. We roll the proceeds over by re-investing into $1/(P_{0}^T P_{T}^{2T})$ units of the zero-coupon bond with maturity $2T$. We continue with the roll-over strategy, at each time $kT$ re-investing the proceeds into the bond $P_{kT}^{(k+1)T}$. We denote the valuation process of this self-financing strategy $B_t^T$: \[ B_t^T = \left(\prod_{i=0}^k P_{iT}^{(i+1)T}\right)^{-1} P_{t}^{(k+1)T},\quad t\in [kT,(k+1)T),\quad k=0,1,\ldots. \]
For each $T>0$, the process $B_t^T$ is defined for all $t\geq 0$. The process $S_t B_t^T$ extends the martingale $M_t^T$ to all $t\geq 0$. It thus defines the $T$-{\em forward measure} ${\mathbb Q}^T|_{{\mathscr F}_{t}}=M_t^T {\mathbb P}|_{{\mathscr F}_{t}}$
on ${\mathscr F}_{t}$ for each $t\geq 0$, where $T$ now has the meaning of the length of the compounding interval. Under the $T$-forward measure ${\mathbb Q}^T$ extended to all ${\mathscr F}_{t}$, the roll-over strategy $(B_t^T)_{t\geq 0}$ with the compounding interval $T$ serves as the numeraire asset. Following \citet{linetsky_2014long}, we continue to call the measure extended to all ${\mathscr F}_{t}$ for $t\geq 0$ the $T$-forward measure and use the same notation, as it reduces to the standard definition of the forward measure on ${\mathscr F}_{T}$.
Since the roll-over strategy $(B^T_t)_{t\geq 0}$ and the positive martingale $M_t^T=S_t B_t^T$ are defined for all $t\geq 0$, we can write the $T$-{\em forward factorization} of the pricing kernel for all $t\geq 0$: \begin{equation} S_t = \frac{1}{B_t^T}M_t^T. \eel{Tfactorization}
We now recall the definitions of the {\em long bond} and the {\em long forward measure} from \citet{linetsky_2014long}. \begin{definition}{\bf (Long Bond)} \label{def_longbond} If the wealth processes $(B^T_t)_{t\geq 0}$ of the roll-over strategies in $T$-maturity bonds converge to a strictly positive semimartingale $(B_t^\infty)_{t\geq 0}$ uniformly on compacts in probability as $T\rightarrow \infty$, i.e. for all $t>0$ and $K>0$ \[
\lim_{T\rightarrow \infty} {\mathbb P}(\sup_{s\leq t}|B_s^T-B_s^\infty|>K)=0, \] we call the limit the {\em long bond}. \end{definition}
\begin{definition}{\bf (Long Forward Measure)} \label{def_longforward} If there exists a measure $\mathbb{Q}^\infty$ equivalent to $\mathbb{P}$ on each ${\mathscr F}_t$ such that the $T$-forward measures converge strongly to ${\mathbb Q}^\infty$ on each ${\mathscr F}_t$, i.e. \[ \lim_{T\rightarrow \infty}{\mathbb Q}^T(A)={\mathbb Q}^\infty(A) \] for each $A\in {\mathscr F}_t$ and each $t\geq 0$, we call the limit the {\em long forward measure} and denote it ${\mathbb L}$. \end{definition} The following theorem, proved in \citet{linetsky_2014long}, gives a sufficient condition that ensures convergence to the long bond in the semimartingale topology which is stronger than the ucp convergence in Definition 1 and convergence of $T$-forward measures to the long forward measure in total variation, which is stronger than the strong convergence in Definition 2 (we refer to \citet{linetsky_2014long} and the on-line appendix for proofs and details).
\begin{theorem}{\bf (Long Term Factorization and the Long Forward Measure)} \label{implication_L1} Suppose that for each $t>0$ the ratio of the ${\mathscr F}_t$-conditional expectation of the pricing kernel $S_T$ to its unconditional expectation converges to a positive limit in $L^1$ as $T\rightarrow \infty$ (under ${\mathbb P}$), i.e. for each $t>0$ there exists an almost surely positive ${\mathscr F}_t$-measurable random variable which we denote $M_t^\infty$ such that \begin{equation} \frac{{\mathbb E}^{\mathbb P}_t[S_T]}{{\mathbb E}^{\mathbb P}[S_T]} \xrightarrow{\rm L^1} M_t^\infty\quad \text{as} \quad T\rightarrow \infty. \eel{PKL1} Then the following results hold:\\ (i) The collection of random variables $(M_t^\infty)_{t\geq0}$ is a positive ${\mathbb P}$-martingale, and the family of martingales $(M_t^T)_{t\geq 0}$ converges to the martingale $(M_t^\infty)_{t\geq0}$ in the semimartingale topology.\\ (ii) The long bond valuation process $(B_t^\infty)_{t\geq0}$ exists, and the roll-over strategies $(B_t^T)_{t\geq 0}$ converge to the long bond $(B_t^\infty)_{t\geq 0}$ in the semimartingale topology.\\ (iii) The pricing kernel possesses the long-term factorization \begin{equation} S_t=\frac{1}{B_t^\infty}M_t^\infty. \eel{ltf} (iv) $T$-forward measures ${\mathbb Q}^T$ converge to the long forward measure ${\mathbb L}$ in total variation on each ${\mathscr F}_t$, and ${\mathbb L}$ is equivalent to ${\mathbb P}$ on ${\mathscr F}_t$ with the Radon-Nikodym derivative $M_t^\infty$. \end{theorem}
The process $B_t^\infty$ has the interpretation of the gross return earned starting from time zero up to time $t$ on holding the zero-coupon bond of asymptotically long maturity. The long bond is the numeraire asset under the long forward measure $\mathbb{L}$ since the pricing kernel becomes $1/B_t^\infty$ under $\mathbb{L}$. The long-term factorization of the pricing kernel \eqref{ltf} decomposes it into discounting at the rate of return on the long bond and a martingale component encoding a further risk adjustment.
Suppose the condition \eqref{PKL1} in Theorem \ref{implication_L1} holds in the Brownian setting of this paper. Then the long bond valuation process is an It\^{o} semimartingale with the representation $$ dB_t^\infty=(r_t+ \sigma^\infty_t \cdot \lambda_t) B_t^\infty dt + B_t^\infty \sigma^\infty_t \cdot dW_t^\mathbb{P} $$ with some volatility process $\sigma^\infty_t$ such that the process $M_t^\infty=S_t B_t^\infty$ satisfying $$ dM_t^\infty=-M_t^\infty \lambda_t^\infty \cdot dW_t^\mathbb{P} $$ with $\lambda_t^\infty=\lambda_t - \sigma^\infty_t$ is a martingale (the permanent component in the long-term factorization). Thus, the long-term factorization Eq.\eqref{ltf} in the Brownian setting yields a decomposition of the market price of Brownian risk \begin{equation} \lambda_t=\sigma^\infty_t + \lambda_t^\infty \eel{mprdecomposition} into the volatility of the long bond $\sigma_t^\infty$ and the volatility $\lambda_t^\infty$ of the martingale $M_t^\infty$. The change of probability measure from the data-generating measure ${\mathbb P}$ to the long forward measure ${\mathbb L}$ is accomplished via Girsanov's theorem with the ${\mathbb L}$-Brownian motion $W_t^\mathbb{L}=W_t^\mathbb{P}+ \int_0^t \lambda_s^\infty ds.$
\section{Long Term Factorization of Affine Pricing Kernels} \label{exist_affine}
We assume that the underlying economy is described by a Markov process $X$. We further assume $X$ is an affine diffusion and the pricing kernel $S$ is exponential affine in $X$ and the time integral of $X$. Affine diffusion models are widely used in continuous-time finance due to their analytical tractability (\citet{vasicek_1977equilibrium}, \citet{cox_1985_2}, \citet{duffie_1996}, \citet{duffie_2000}, \citet{dai_2000}, \citet{duffie_2003}). We start with a brief summary of some of the key facts about affine diffusions. We refer the reader to \citet{filipovic_2009} for details, proofs and references to the literature on affine diffusion.
The process we work with solves the following SDE on the state space $E=\mathbb{R}_+^m\times\mathbb{R}^n$ for some $m,n\geq 0$ with $m+n=d$, where $\mathbb{R}_+^m=\big\{x\in \mathbb{R}^m : x_i\geq 0$ for $i=1,...,m\big\}$: \begin{equation} d X_t=b(X_t)dt+\sigma(X_t)d W^{\mathbb{P}}_t,\quad X_0=x, \eel{affinesde} where $W^{\mathbb P}$ is a $d$-dimensional standard Brownian motion and the diffusion matrix $\alpha(x)=\sigma(x)\sigma(x)^\dagger$ (here $^\dagger$ denotes matrix transpose to differentiate it from superscript $^T$) and the drift vector $b(x)$ are both affine in $x$: \begin{equation} \alpha(x)=a+\displaystyle{\sum_{i=1}^d}x_i\alpha_i,\quad b(x)=b+\displaystyle{\sum_{i=1}^d}x_i\beta_i=b+Bx \end{equation} for some $d\times d$-matrices $a$ and $\alpha_i$ and $d$-dimensional vectors $b$ and $\beta_i$, where we denote by $B=(\beta_1,...,\beta_d)$ the $d\times d$-matrix with $i$-th column vector $\beta_i$, $1\leq i\leq d$. The first $m$ coordinates of $X$ are CIR-type and are non-negative, while the last $n$ coordinates are OU-type. Define the index sets $\emph{I}=\{1,...,m\}$ and $\emph{J}=\{m+1,...,m+n\}$. For any vector $\mu$ and matrix $\nu$, and index sets $\emph{M},\emph{N}\in \{I,J\}$, we denote by $\mu_\emph{M}=(\mu_i)_{i\in \emph{M}},$ $\nu_{\emph{M}\emph{N}}=(\nu_{ij})_{i\in \emph{M},j\in \emph{N}}$ the respective sub-vector and sub-matrix. To ensure the process stays in the domain $E={\mathbb R}_+^m\times {\mathbb R}^n$, we need the following assumption (cf. \citet{filipovic_2009}) \begin{assumption}{\bf (Admissibility)}\\ (1) $a_{JJ}$ and $\alpha_{i,JJ}$ are symmetric positive semi-definite for all $i=1,2,...,m$,\\ (2) $a_{II}=0,$ $a_{IJ}=a_{JI}^\dagger=0$,\\ (3) $\alpha_j=0$ for $j\in J$,\\ (4) $\alpha_{i,kl}=\alpha_{i,lk}=0$ for $k\in I\backslash \{i\}$ for all $1\leq k,l\leq d,$\\ (5) $b_I\geq 0$, $B_{IJ}=0$, and $B_{II}$ has non-negative off-diagonal elements. \label{admi_and_nonde} \end{assumption} The condition $b_I\geq 0$ on the constant term in the drift of the CIR-type components ensures that the process stays in the state space $E$. Making a stronger assumption $b_I>0$ ensures that the process instantaneously reflects from the boundary $\partial E$ and re-enters the interior of the state space ${\rm int}E=\mathbb{R}_{++}^m\times\mathbb{R}^n,$ where $\mathbb{R}_{++}^m=\big\{x\in \mathbb{R}^m : x_i> 0$ for $i=1,...,m\big\}$. For any parameters satisfying Assumption \ref{admi_and_nonde}, there exists a unique strong solution of the SDE \eqref{affinesde} (cf. Theorem 8.1 of \citet{filipovic_2009}). Denote by ${\mathbb P}_x$ the law of the solution $X^x$ of the SDE \eqref{affinesde} for $x\in E$, ${\mathbb P}_x(X_t\in A):={\mathbb P}(X^x_t\in A)$. Then $P_t(x,A)={\mathbb P}_x(X_t\in A)$ defined for all $t\geq 0$, Borel subsets $A$ of $E$, and $x\in E$ defines a Markov transition semigroup $(P_t)_{t\geq 0}$ on the Banach space of Borel measurable bounded functions on $E$ by $P_tf(x):=\int_E f(y)P_t(x,dy)$. As shown in \citet{duffie_2003}, this semigroup is {\em Feller}, i.e., it leaves the space of continuous functions vanishing at infinity invariant. Thus, the Markov process $((X_t)_{t\geq 0},({\mathbb P}_x)_{x\in E})$ is a {\em Feller process} on $E$. It has continuous paths in $E$ and has the strong Markov property (cf. \citet{yamada_1971}, Corollary 2, p.162). Thus, it is a Borel right process (in fact, a Hunt process).
We make the following assumption about the pricing kernel. \begin{assumption}{\bf (Affine Pricing Kernel)}\label{assumption_affine_PK} We assume that the pricing kernel is exponential-affine in $X$ and its time integral: \begin{equation} S_t=e^{-\gamma t-u^\dagger (X_t-X_0)-\int_0^t \delta^\dagger X_s ds}, \eel{affine_pk} where $\gamma$ is a scalar and $u$ and $\delta$ are $d$-vectors and $^\dagger$ denotes matrix transpose. \end{assumption}
The pricing kernel in this form is a positive multiplicative functional of the Markov process $X$. The associated pricing operator ${\mathscr P}_t$ is defined by $$ {\mathscr P}_tf(x)={\mathbb E}^{\mathbb P}_x[S_t f(X_t)] $$ for a payoff $f$ of the Markov state. We refer the reader to Qin and Linetsky (2016a) for a detailed treatment of Markovian pricing operators. The pricing kernel in the form \eqref{affine_pk} is called affine due to the following key result that shows that the term structure of pure discount bond yields is affine in the state vector $X$ (cf. \citet{filipovic_2009} Theorem 4.1). \begin{proposition} \label{affine_ZCB} Let $T_0>0$. The following statements are equivalent: \\ (i) ${\mathbb E}^{\mathbb P}[S_{T_0}]<\infty$ for all fixed initial states $X_0=x\in {\mathbb R}_+^m\times {\mathbb R}^n$. \\ (ii) There exists a unique solution $(\Phi(\cdot),\Psi(\cdot)):[0,T_0]\rightarrow {\mathbb R}\times {\mathbb R}^d$ of the following Riccati system of equations up to time $T_0$: \begin{equation} \begin{split} &\Phi^\prime(t)=-\frac{1}{2}\Psi_J(t)^\dagger a_{JJ}\Psi_J(t)+b^\dagger\Psi(t)+\gamma, \quad \Phi(0)=0,\\ &\Psi_i^\prime(t)=-\frac{1}{2}\Psi(t)^\dagger \alpha_{i}\Psi(t)+\beta_i^\dagger\Psi(t)+\delta_i,\quad i\in\emph{I},\\ &\Psi_J^\prime(t)=B_{JJ}^\dagger\Psi_J(t)+\delta_J,\quad \Psi(0)=u.\\ \end{split} \eel{riccati_d} In either case, the pure discount bond valuation processes (with unit payoffs) are exponential-affine in $X$: \begin{equation} P_t^T=\mathbb{E}^{\mathbb P}_t[ S_T/S_t]= ({\mathscr P}_{T-t}1)(x)= P(T-t,X_t)=e^{-\Phi(T-t)-(\Psi(T-t)-u)^\dagger X^x_t} \eel{representation} for all $0\leq t\leq T\leq t+T_0$ and the SDE initial condition $x\in {\mathbb R}_+^m\times {\mathbb R}^n$. \end{proposition} Since in this paper our standing assumption is that ${\mathbb E}^{\mathbb P}[S_t]<\infty$ for all $t$, in this case the Riccati ODE system has solutions $\Psi(t)$ and $\Phi(t)$ for all $t$, and the bond pricing function entering the expression \eqref{representation} for the zero-coupon bond process \begin{equation} P(t,x)=({\mathscr P}_t1)(x)=e^{-\Phi(t)-(\Psi(t)-u)^\dagger x} \eel{bondfunction} is defined for all $t\geq 0$ and $x\in E$.
We next show that an affine pricing kernel always possesses the risk-neutral factorization with the affine short rate function. \begin{theorem}{\bf (Risk-Neutral Factorization of Affine Pricing Kernels)}\label{RN_affine} Suppose $X$ satisfies Assumption \ref{admi_and_nonde} and the pricing kernel satisfies Assumption \ref{assumption_affine_PK} together with the assumption that ${\mathbb E}^{\mathbb P}_x[S_t]<\infty$ for all $t\geq 0$ and every fixed initial state $X_0=x\in {\mathbb R}_+^m\times {\mathbb R}^n$.\\ (i) Then the pricing kernel admits the risk-neutral factorization $$S_t=e^{-\int_0^t r(X_s)ds}M^0_t$$ with the affine short rate \begin{equation} r(x)=g+h^\dagger x,\, \eel{affineshortr} with \begin{equation} g=\gamma-\frac{1}{2}u_J^\dagger a_{JJ} u_J+ b^\dagger u, \, h_i=\delta_i-\frac{1}{2}u^\dagger\alpha_i u+\beta_i^\dagger u,\, i\in I, \, h_J=\delta_J+B_{JJ}^\dagger u_J \eel{gh} and the martingale
$$M^0_t=e^{-\int_0^t\lambda_s^\dagger dW_s^{\mathbb P}-\frac{1}{2}\int_0^t \|\lambda_s\|^2ds}$$ with the market price of Brownian risk (column $d$-vector) \begin{equation} \lambda_t = \sigma(X_t)^\dagger u, \eel{mprvaffine}
where $\sigma(x)$ is the volatility matrix of the state variable $X$ in the SDE \eqref{affinesde} and $$\|\lambda_t\|^2=\lambda_t^\dagger\lambda_t=u^\dagger \alpha(X_t)u.$$ \\ (ii) Under the risk-neutral measure ${\mathbb Q}$ defined by the martingale $M$, the dynamics of $X$ reads \begin{equation} d X_t=(b(X_t)-\alpha(X_t)u) dt+\sigma(X_t)d W^{\mathbb{Q}}_t, \eel{affineq} where $W^{\mathbb Q}_t=W_t^{\mathbb P} +\int_0^t \lambda_s ds$ is the standard Brownian motion under ${\mathbb Q}$. \end{theorem} \begin{proof} (i) Define a process $M_t^0:=S_te^{\int_0^t r(X_s)ds}$. It is also in the form of Eq.\eqref{affine_pk} with $\gamma$ replaced by $\gamma-g$ and $\delta$ replaced by $\delta-h$. Thus, Proposition \ref{affine_ZCB} also holds if we replace $S_t$ with $M_t^0$, replace $\gamma$ with $\gamma-g$ and replace $\delta$ with $\delta-h$, i.e. $\mathbb{E}_t^\mathbb{P}[M_T/M_t]=e^{-\Phi(T-t)-(\Psi(T-t)-u)^\dagger X^x_t},$ where \begin{equation} \begin{split} &\Phi^\prime(t)=-\frac{1}{2}\Psi_J(t)^\dagger a_{JJ}\Psi_J(t)+b^\dagger\Psi(t)+\gamma-g, \quad \Phi(0)=0,\\ &\Psi_i^\prime(t)=-\frac{1}{2}\Psi(t)^\dagger \alpha_{i}\Psi(t)+\beta_i^\dagger\Psi(t)+\delta_i-h_i,\quad i\in\emph{I},\\ &\Psi_J^\prime(t)=B_{JJ}^\dagger\Psi_J(t)+\delta_J-h_J,\quad \Psi(0)=u.\\ \end{split}
\end{equation} With the choice of $g$ and $h$ in Eq.\eqref{gh}, the solution to the above ODE is $\Phi(t)=0$ and $\Psi(0)=u$, which implies $\mathbb{E}_t^\mathbb{P}[M_T/M_t]=1$. This shows that $M_t^0$ is a martingale. Furthermore, using the SDE for the affine state $X$, we can cast $M_t^0$ in the exponential martingale form $e^{-\int_0^t \lambda_s^\dagger dW_s^{\mathbb{P}}-\frac{1}{2}\int_0^t \|\lambda_s\|^2ds}$. with $\lambda_t$ given in \eqref{mprvaffine}.
\noindent(ii) The SDE for $X$ under $\mathbb{Q}$ follows from Girsanov's Theorem. $\Box$ \end{proof}
We next turn to the long term factorization of the affine pricing kernel. \begin{theorem}{\bf (Long Term Factorization of Affine Pricing Kernels)} \label{affine_long} Suppose the solution $\Psi(t)$ of the Riccati ODE \eqref{riccati_d} converges to a fixed point $v\in {\mathbb R}^d$: \begin{equation} \lim_{t\rightarrow\infty}\Psi(t)=v. \eel{psi_converge} Then the following results hold.\\ (i) Condition Eq.\eqref{PKL1} is satisfied and, hence, all results in Theorem \ref{implication_L1} hold. \\ (ii) The long bond is given by \begin{equation} B_t^\infty=e^{\lambda t}\frac{\pi(X_t)}{\pi(X_0)}, \eel{long_bond_affine} where \begin{equation} \pi(x)=e^{(u-v)^\dagger x} \eel{affineeigen} is the positive exponential-affine eigenfunction of the pricing operator ${\mathscr P}_t$ $$ {\mathscr P}_t \pi(x)=e^{-\lambda t}\pi(x) $$ with the eigenvalue $e^{-\lambda t}$ with \begin{equation} \lambda=\gamma-\frac{1}{2}v_J^\dagger a_{JJ}v_J+ b^\dagger v \eel{affineeigenv} interpreted as the limiting long-term zero-coupon yield: \begin{equation} \lim_{t\rightarrow \infty}\frac{-\ln P(t,x)}{t}=\lambda \eel{asymptyield} for all $x$.\\ (iii) The long bond has the ${\mathbb P}$-measure dynamics: \begin{equation} dB_t^\infty = (r(X_t)+(\sigma^\infty_t)^\dagger \lambda_t )B_t^\infty dt + B_t^\infty (\sigma_t^\infty)^\dagger dW_t^{\mathbb P}, \end{equation} where the (column vector) volatility of the long bond is given by: \begin{equation} \sigma_t^\infty=\sigma(X_t)^\dagger(u-v). \eel{sigmainf} (iv) The martingale component in the long-term factorization of the PK $M^\infty_t=S_t B_t^\infty$ can be written in the form
\begin{equation} M^\infty_t=e^{-\int_0^t (\lambda_s^\infty)^\dagger dW_s^{\mathbb P}-\frac{1}{2}\int_0^t \|\lambda_s^\infty\|^2 ds}, \eel{minfty} where \begin{equation} \lambda^\infty_t=\lambda_t - \sigma_t^\infty=\sigma(X_t)^\dagger v. \eel{gammainfty} (v) The long-term decomposition of the market price of Brownian risk is given by: \begin{equation} \lambda_t = \sigma_t^\infty + \lambda^\infty_t, \end{equation} where $\sigma_t^\infty$ is the volatility of the long bond \eqref{sigmainf} and $\lambda_t^\infty$ given in \eqref{gammainfty} defines the martingale \eqref{minfty}.\\ (vi) Under the long forward measure $\mathbb{L}$ the state vector $X_t$ solves the following SDE \begin{equation} dX_t=(b(X_t)-\alpha(X_t)v)dt+\sigma(X_t)dW_t^\mathbb{L}, \eel{affinel} where $W_t^\mathbb{L}=W_t^\mathbb{P}+\int_0^t \lambda_s^\infty ds$ is the d-dimensional Brownian motion under $\mathbb{L}$, and the long bond has the ${\mathbb L}$-measure dynamics:
\begin{equation} dB_t^\infty = (r(X_t)+\|\sigma_s^\infty\|^2 )B_t^\infty dt + B_t^\infty (\sigma_t^\infty)^\dagger dW_t^{\mathbb L}. \end{equation} \end{theorem} \begin{proof} Since the solution of the Riccati ODE $\Psi(t)$ converges to a constant as $t\rightarrow \infty$, the right hand side of Eq.\eqref{riccati_d} also converges to a constant. This implies that $\Psi'(t)$ also converges to a constant. This constant must vanish, otherwise $\Psi(t)$ cannot converge to a constant. Thus, the right hand side of Eq.\eqref{riccati_d} also converges to zero. All these imply that $\Psi(t)=v$ is a stationary solution of the Riccati equation Eq.\eqref{riccati_d}. Applying Proposition \ref{affine_ZCB} to the affine kernel of the form $1/B_t^\infty$, where $B_t^\infty$ is the process defined in \eqref{long_bond_affine}, it then follows that $\pi(x)$ defined in Eq.\eqref{affineeigen} is an eigenfunction of the pricing operator with the eigenvalue \eqref{affineeigenv}. We can then verify that $$M_t^\infty := S_t e^{\lambda t}\frac{\pi(X_t)}{\pi(X_0)}$$ is a martingale (with $M_0^\infty=1$). We can use it to define a new probability measure
$$\mathbb{Q}^\pi|_{\mathscr{F}_t}:=M_t^\infty\mathbb{P}|_{\mathscr{F}_t}$$ associated with the eigenfunction $\pi(x)$. The dynamics of $X_t$ under $\mathbb{Q}^\pi$ follows from Girsanov's Theorem. We stress that $\pi(x)$ is the eigenfunction of the pricing semigroup operator, rather than merely an eigenfunction of the generator. It is generally possible for an eigenfunction of the generator to fail to be an eigenfunction of the semigroup. That case will lead to a mere local martingale. In our case, $\pi(x)$ is an eigenfunction of the semigroup by construction, and the process $M_t^\infty$ is a martingale, rather than a mere local martingale.
We now show that the condition \eqref{PKL1} holds under our assumptions in Theorem 3.2. We first re-write it under the probability measure $\mathbb{Q}^{\pi}$: \begin{equation}
\lim_{T\rightarrow\infty}\mathbb{E}^{\mathbb{Q}^\pi}\left[\left|\frac{P_t^T}{P_0^TB_t^\infty}-1\right|\right]=0. \eel{BHS_L1} We will now verify that this indeed holds under our assumptions. First observe that by Eq.\eqref{representation}: \begin{equation} \frac{P_t^T}{P_0^TB_t^\infty}=e^{-\lambda t-(\Phi(T-t)-\phi(T))-(\Psi(T-t)-v)^\dagger (X_t- X_0)}. \end{equation} Since $\lim_{T\rightarrow\infty}\Psi(T)=v$ and $\lim_{T\rightarrow\infty}\Phi^\prime(T)=\lambda$, we have that
$$\lim_{T\rightarrow\infty}\frac{P_t^T}{P_0^TB_t^\infty}=1$$ almost surely. Next, we show $L^1$ convergence. First, we observe that for any $\epsilon>0$ there exists $T_0$ such that for all $T>T_0$ $$|\Psi_i(T-t)-v_i|\leq\epsilon$$ for all $i\in I$ and $$e^{-\lambda t-(\Phi(T-t)-\phi(T))+(\Psi(T)-v)^\dagger X_0}\leq 1+\epsilon.$$
Thus, $$\left|\frac{P_t^T}{P_0^TB_t^\infty}-1\right|\leq1+\left|\frac{P_t^T}{P_0^TB_t^\infty}\right|\leq1+(1+\epsilon)\sum_{k_i=\pm\epsilon}e^{k^\dagger X_t}.$$ Since $X_t$ remains affine under $\mathbb{Q}^\pi$, by Theorem 4.1 of \citet{filipovic_2009} there exists $\epsilon>0$ such that $e^{k^\dagger X_t}$ is integrable under $\mathbb{Q}^\pi$ for all vectors $k$ such that $k_i=\pm\epsilon$. Thus, by the Dominated Convergence Theorem, Eq.\eqref{BHS_L1} holds. This proves (i) and (ii) (Eq.\eqref{asymptyield} follows from Eq.\eqref{bondfunction} and the fact $\Phi'(t)\rightarrow\lambda$ as $t\rightarrow\infty$). (iii) follows from Eq.\eqref{long_bond_affine} and Ito's formula.
To prove (iv), we note that by Theorem \ref{implication_L1} $M_t^\infty$ is a martingale. By It\^{o}'s formula, its volatility is $-\lambda_t^\infty$. This proves (iv). Part (v) follows from Eq.\eqref{gammainfty}. To prove (vi), first note that Eq.\eqref{minfty} and Girsanov's theorem implies that $W_t^\mathbb{L}=W_t^\mathbb{P}+\int_0^t\lambda_s^\infty ds$ is an $\mathbb{L}$-Brownian motion. The dynamics of $X_t$ and $B_t^\infty$ under $\mathbb{L}$ then follows. $\Box$ \end{proof}
The economic meaning of Theorem 3 is that the existence of a fixed point $v$ of the solution to the Riccati equation is sufficient for existence of the long term limit. The fixed point $v$ itself identifies the volatility of the long bond in Eq.(17) and the long-term zero-coupon yield in Eq.(16) via the principal eigenvalue (15).
We note that the condition in Theorem 3.2 of \citet{linetsky_2014long} is automatically satisfied in affine models. Indeed, from Eq.\eqref{bondfunction} when the Riccati equation has a fixed point $v$, from Theorem 3.2 in this paper we have $$ \lim_{T\rightarrow \infty}\frac{P(T-t,x)}{P(T,x)}=e^{\lambda t}, $$ and we can write $P(t,x)=e^{-\lambda t}L_x(t)$, where $L_x(t)=e^{\lambda t}P(t,x)$ is a slowly varying function of time $t$ for each $x$. By Eq.\eqref{asymptyield}, the eigenvalue $\lambda$ is identified with the asymptotic long-term zero-coupon yield.
We note that since $\Psi(t)=v$ is a stationary solution of the Riccati ODE \eqref{riccati_d}, the vector $v$ satisfies the following {\em quadratic vector equation}: $$\frac{1}{2}v^\dagger \alpha_{i}v+\beta_i^\dagger v-\delta_i=0,\quad i\in\emph{I},\quad B_{JJ}^\dagger v_J-\delta_J=0.$$ However, in general this quadratic vector equation may have multiple solutions leading to multiple exponential-affine eigenfunctions. In order to determine the solution that defines the long-term factorization, if it exists, it is essential to verify that $v$ is the limiting solution of the Riccatti ODE, i.e. that Eq.\eqref{psi_converge} holds. In this regard, we recall that \citet{linetsky_2014_cont} identified the unique {\em recurrent eigenfunction} $\pi_R$ of an affine pricing kernel with the {\em minimal} solution of the quadratic vector equation (see Appendix F in the on-line e-companion to \citet{linetsky_2014_cont}). We recall that, for a Markovian pricing kernel $S$ (see \citet{hansen_2009} and \citet{linetsky_2014_cont}), we can associate a martingale $$M^\pi_t=S_t e^{\lambda t}\frac{\pi(X_t)}{\pi(X_0)}$$ with {\em any} positive eigenfunction $\pi(x)$. In general, positive eigenfunctions are not unique. \citet{linetsky_2014_cont} proved uniqueness of a recurrent eigenfunction $\pi_R$ defined as such a positive eigenfunction of the pricing kernel $S$, i.e. $${\mathbb E}_x^{\mathbb P}[S_t \pi(X_t)]=e^{-\lambda t}\pi(x)$$ for some $\lambda$, that, under the locally equivalent probability measure (eigen-measure) ${\mathbb Q}^{\pi_R}$ defined by using the associated martingale $M_t^{\pi_R}$ as the Radon-Nikodym derivative, the Markov state process $X$ is recurrent. However, in general, without additional assumptions, the recurrent eigenfunction $\pi_R$ associated with the minimal solution to the quadratic vector equation may or may not coincide with the eigenfunction $\pi_L$ germane to the long-term limit and, thus, the long forward measure may or may not coincide with the recurrent eigenmeasure (the fixed point $v$ of the Riccati ODE may or may not be the minimal solution of the quadratic vector equation). Under additional exponential ergodicity assumptions the fixed point of the Riccati ODE is necessarily the minimal solution of the quadratic vector equation and $\pi_R=\pi_L$. If the exponential ergodicity assumption is not satisfied, they may differ, or one may exist, while the other does not exist. We refer the reader to \citet{linetsky_2014_cont} and \citet{linetsky_2014long} for the exponential ergodicity assumption. Analytical tractability of affine models allows us to provide fully explicit examples to illustrate these theoretical possibilities. In the next section we give a range of examples.
\section{Examples} \label{examples}
\subsection{Cox-Ingersoll-Ross Model} \label{example_cir}
Suppose the state follows a CIR diffusion (\citet{cox_1985_2}): \begin{equation} dX_t=(a -\kappa_{\mathbb P} X_t)dt+\sigma\sqrt{X_t}dW^{\mathbb{P}}_t, \eel{cir} where $a>0$, $\sigma>0$, $\kappa_{\mathbb P}\in {\mathbb R}$, and $W^{\mathbb{P}}$ is a one-dimensional standard Brownian motion (in this case $m=d=1$ and $n=0$). Consider the CIR pricing kernel in the form \eqref{affine_pk}. The short rate is given by \eqref{affineshortr} with $g=\gamma+au$ and $h=\delta-u\kappa_{\mathbb P}-u^2\sigma^2/2$. For simplicity we choose $\gamma=-au$ and $\delta=1+u\kappa_{\mathbb P}+u^2\sigma^2/2,$ so that the short rate can be identified with the state variable, $r_t=X_t$. The market price of Brownian risk is $\lambda_t=\sigma u \sqrt{X_t}$. Under ${\mathbb Q}$ the short rate follows the process \eqref{affineq}, which is again a CIR diffusion, but with a different rate of mean reversion: \begin{equation} \kappa_{\mathbb Q}=\kappa_{\mathbb P}+\sigma^2u. \end{equation}
The fixed point $v$ of the Riccati ODE $$\Psi'(t)=-\frac{1}{2}\sigma^2\Psi^2(t)-\kappa_{\mathbb P}\Psi(t)+\delta$$ with the initial condition $\Psi(0)=u$ can be readily determined. Since $-\frac{1}{2}u^2\sigma^2-u\kappa_{\mathbb P}+\delta=1>0$, we know that $\Psi(0)=u$ is between the two roots of the quadratic equation $-\frac{1}{2}\sigma^2 x^2-\kappa_{\mathbb P} x+\delta=0$. This immediately implies that $\Psi(t)$ converges to the larger root, i.e. \begin{equation} \lim_{t\rightarrow\infty}\Psi(t)=\frac{\sqrt{\kappa_{\mathbb P}^2+2\sigma^2\delta}-\kappa_{\mathbb P}}{\sigma^2}=\frac{\sqrt{\kappa_{\mathbb Q}^2+2\sigma^2}-\kappa_{\mathbb P}}{\sigma^2}=\frac{\kappa_{\mathbb L}-\kappa_{\mathbb P}}{\sigma^2}=:v, \end{equation} where we introduce the following notation: $$ \kappa_{\mathbb L}=\sqrt{\kappa_{\mathbb Q}^2+2\sigma^2}. $$ Thus, the long bond in the CIR model is given by \begin{equation} B_t^\infty=e^{\lambda t-\frac{\kappa_{\mathbb L}-\kappa_{\mathbb Q}}{\sigma^2}(X_t-X_0)} \eel{cirlongbond} with \begin{equation} \lambda=\frac{a(\kappa_{\mathbb L}-\kappa_{\mathbb Q})}{\sigma^2} \eel{cireigenvalue} and the long bond volatility $$ \sigma_t^\infty=-\frac{\kappa_{\mathbb L}-\kappa_{\mathbb Q}}{\sigma} \sqrt{X_t}. $$ Under the long forward measure the state follows the process \eqref{affinel}, which is again a CIR diffusion, but with the different rate of mean reversion $\kappa_{\mathbb L}>\kappa_{\mathbb Q}$. The fixed point $v$ is proportional to the difference between the rate of mean reversion under the long forward measure ${\mathbb L}$ and the data generating measure ${\mathbb P}$. It defines the market price of risk under ${\mathbb L}$ via $\lambda_t^\infty = v\sigma \sqrt{X_t}$.
We note that if one selects $u=(-\kappa_\mathbb{P}\pm\sqrt{\kappa_\mathbb{P}^2-2\sigma^2})/\sigma^2$ in the specification of the pricing kernel, then $v=0$ and $\lambda_t^\infty=0$, so the margingale component in the long term factorization is degenerate, and the pricing kernel is in the transition independent form. In this case, $\kappa_{\mathbb P}=\kappa_{\mathbb L}$ so that the data-generating measure coincides with the long-forward measure. This is the condition of Ross' recovery theorem (see \citet{linetsky_2014_cont} for more details).
Since the closed form solution for the CIR zero-coupon bond pricing function is available (\citet{cox_1985_2}), these results can also be recovered by directly calculating the limit $$\lim_{T\rightarrow \infty}\frac{P(T-t,y)}{P(T,x)}=e^{\lambda t}\frac{\pi(y)}{\pi(x)}$$ with the eigenvalue $\lambda$ given by Eq.\eqref{cireigenvalue} and the eigenfunction $\pi(x)=e^{-\frac{\kappa_{\mathbb L}-\kappa_{\mathbb Q}}{\sigma^2}x}$.
\begin{remark} \citet{borovicka_2014mis} in their Example 4 on p.2513 also consider an exponential-affine pricing kernel driven by a single CIR factor. However, their specification of the PK is in a special form such that $h=0$ in Eq.(4) for the short rate (which corresponds to the choice $\delta=u\kappa_{\mathbb P}+u^2\sigma^2/2$ in our parameterization). Thus, all dependence on the CIR factor is contained in the martingale component in the risk-neutral factorization of their PK, with the short rate being constant. In this special case the long bond is deterministic and the long forward measure is simply equal to the risk-neutral measure since the short rate is independent of the state variable. In this special case the pricing operator has two distinct positive eigenfunctions. One of the eigenfunctions is constant. This eigenfunction defines the risk-neutral measure, which coincides with the long forward measure in this case due to independence of the short rate and the eigenfunction of the state variable. The second eigenfunction (Eq.(19) in \citet{borovicka_2014mis}) defines a probability measure, which is distinct from the risk-neutral measure and, hence, distinct from the long forward measure as well. Depending on the specific parameter values of the CIR process, either one of the two eigenfunctions may serve as the recurrent eigenfunction. The eigenmeasure associated with the other eigenfunction will not be recurrent, as the CIR process will have a non-mean reverting drift under that measure. \end{remark}
\subsection{CIR Model with Absorption at Zero: ${\mathbb L}$ Exists, ${\mathbb Q}^{\pi_R}$ Does Not Exist} \label{example_absorb} We next consider a degenerate CIR model \eqref{cir} with $a=0$,
$\sigma>0$, and $\kappa\in {\mathbb R}$. When $a$ vanishes, the diffusion has an absorbing boundary at zero, i.e. there is a positive probability to reach zero in finite time and, once reached, the process stays at zero with probability one for all subsequent times. Consider a pricing kernel in the form of Eq.\eqref{affine_pk}. The short rate is given by \eqref{affineshortr} with $g=\gamma$ and $h=\delta-u\kappa_\mathbb{P}-\frac{1}{2}u^2\sigma^2$. We assume $\gamma=0$ and $\delta=1+u\kappa_\mathbb{P}+\frac{1}{2}u^2\sigma^2>0$, so that short rate $r_t$ takes values in $\mathbb{R}_+$. The market price of Brownian risk is $\lambda_t=\sigma u \sqrt{X_t}$, and under ${\mathbb Q}$ the short rate follows the process \eqref{affineq}, which is again a CIR diffusion with an absorbing boundary at zero, but with a different rate of mean reversion $\kappa_\mathbb{Q}=\kappa_\mathbb{P}+\sigma^2 u$.
It is clear that under any locally equivalent measure, zero remains absorbing and thus no recurrent eigenfunction exists. Nevertheless, we can proceed in the same way as in our analysis of the CIR model to show that $$B_t^\infty=e^{-\frac{\kappa_\mathbb{L}-\kappa_\mathbb{Q}}{\sigma^2}(X_t-X_0)}$$ with $\kappa_\mathbb{L}=\sqrt{\kappa_\mathbb{Q}^2+2\sigma^2}$ is the long bond and $X_t$ solves the CIR SDE \eqref{cir} with $a=0$ and mean-reverting rate $\kappa_\mathbb{L}$ under ${\mathbb L}$. In fact, the treatment of the long bond and the long forward measure is exactly the same as in the non-degenerate example with $a>0$, even though this case is transient with absorption at zero. The eigenvalue degenerates in this case, $\lambda=0$, and the asymptotic long-term zero-coupon yield vanishes, corresponding to the eventual absorption of the short rate at zero.
\subsection{Vasicek Model} \label{example_ou} Our next example is the \citet{vasicek_1977equilibrium} model with the state variable following the OU diffusion: \[ dX_t=\kappa(\theta_\mathbb{P}-X_t)dt+\sigma dW^{\mathbb{P}}_t \] with $\kappa>0$, $\sigma>0$ (in this case $m=0$, $n=d=1$). Consider the pricing kernel in the form \eqref{affine_pk}. The short rate is given by \eqref{affineshortr} with $g=\gamma+u\kappa\theta_\mathbb{P}-\frac{1}{2}u^2\sigma^2$ and $h=\delta-u\kappa$. For simplicity we choose $\gamma=-u\kappa\theta_\mathbb{P}+\frac{1}{2}u^2\sigma^2$ and $\delta=1+u\kappa,$ so that the short rate is identified with the state variable, $r_t=X_t$. The market price of Brownian risk is constant in this case, $\lambda_t=\sigma u$. Under ${\mathbb Q}$ the short rate follows the process \eqref{affineq}, which in this case is again the OU diffusion, but with a different long run mean $$\theta_\mathbb{Q}=\theta_\mathbb{P}-\frac{\sigma^2 u}{\kappa}$$ (the rate of mean reversion $\kappa$ remains the same). The explicit solution to the ODE $\Psi'(t)=-\kappa\Psi(t)+\delta$ with the initial condition $\Psi(0)=u$ is $$\Psi(t)=-(\frac{\delta}{\kappa}+u)e^{-\kappa t}+\frac{\delta}{\kappa},$$ and the limit yields the fixed point $\lim_{t\rightarrow\infty}\Psi(t)=\frac{\delta}{\kappa}=:v.$ Thus, the long bond in the Vasicek model is given by $$B_t^\infty=e^{\lambda t-\frac{1}{\kappa}(X_t-X_0)}$$ with the long-term yield $$ \lambda=\theta_\mathbb{Q}-\frac{\sigma^2}{2\kappa^2} $$ and the long bond volatility $$\sigma_t^\infty=-\frac{\sigma}{\kappa}.$$ Under the long forward measure the short rate follows the process \eqref{affinel}, which is again the OU diffusion, but with a different long run mean $$\theta_\mathbb{L}=\theta_\mathbb{Q}-\frac{\sigma^2}{\kappa^2}$$ (the rate of mean reversion remains the same).
\subsection{Non-mean-reverting Gaussian Model: $\mathbb{Q}^{\pi_R}$ Exists, $\mathbb{L}$ Does not Exist}\label{L_no_exist}
Suppose $X_t$ is a Gaussian diffusion with affine drift and constant volatility \begin{equation} dX_t=\kappa(\theta-X_t)dt+\sigma dW^{\mathbb{P}}_t, \end{equation} but now with $\kappa<0$, so that the process is not mean-reverting. Consider a risk-neutral pricing kernel that discounts at the rate $r_t=X_t$, i.e. $S_t=e^{-\int_0^t X_s ds}$. Then the pure discount bond price is given by $P_t^T=P(X_t,T-t)$ with \begin{equation} P(x,t)=A(t)e^{-x B(t)}, \end{equation} \begin{equation} B(t)=\frac{1-e^{-\kappa t}}{\kappa},\enskip A(t)=\exp\Big\{(\theta-\frac{\sigma^2}{2\kappa^2})(B(t)-t)-\frac{\sigma^2}{4\kappa}B^2(t)\Big\}. \eel{bp_ou} It is easy to see that the ratio $P(y,T-t)/P(x,T)$ does not have a finite limit as $T\rightarrow \infty$ and, hence, $P_t^T/P_0^T$ does not converge as $T\rightarrow \infty$. Thus, the long bond and the long forward measure $\mathbb{L}$ do not exist in this case. However, the recurrent eigenfunction $\pi_R$ and the recurrent eigen-measure $\mathbb{Q}^{\pi_R}$ do exist in this case and are explicitly given in Section 6.1.3 of \citet{linetsky_2014_cont}. Under $\mathbb{Q}^{\pi_R}$, $X_t$ is the OU process with mean reversion (since $\kappa<0$): \begin{equation} dX_t=(\sigma^2/\kappa-\kappa\theta+\kappa X_t)dt+\sigma dW_t^{\mathbb{Q}^{\pi_R}}. \end{equation}
\subsection{ Breeden Model} Our next example is a special case of \citet{breeden_1979intertemporal} consumption CAPM considered in Example 3.8 of \citet{hansen_2009}. There are two independent factors, a stochastic volatility factor $X_t^v$ evolving according to the CIR process \begin{equation} dX_t^v=\kappa_v(\theta_v-X_t^v)dt+\sigma_v\sqrt{X_t^v} dW_t^{v,\mathbb{P}} \end{equation} and a mean-reverting growth rate factor $X_t^g$ evolving according to the OU process \[ dX_t^g=\kappa_g(\theta_g-X_t^g)dt+\sigma_g dW_t^{g,\mathbb{P}}. \] Here it is assumed that $\kappa_v,\kappa_g>0$, $\theta_v,\theta_g>0$, $\sigma_g>0$, $\sigma_v<0$ (so that a positive increment to $W^v$ reduces volatility), and $2\kappa_v\theta_v\geq \sigma_v^2$ (so that volatility stays strictly positive). Suppose that equilibrium consumption evolves according to \begin{equation} dc_t=X_t^g dt+\sqrt{X_t^v} dW_t^{v,\mathbb{P}}+\sigma_c dW_t^{g,\mathbb{P}}, \end{equation} where $c_t$ is the logarithm of consumption $C_t$. Thus, $X^g$ models predictability in the growth rate and $X^v$ models predictability in volatility. Suppose also that the representative consumer's preferences are given by \begin{equation} \mathbb{E}\left[\int_0^\infty e^{-b t}\frac{C_t^{1-a}-1}{1-a}dt\right] \end{equation} for $a,b>0$. Then the implied pricing kernel $S_t$ is \begin{equation} S_t=e^{-bt}C_t^{-a}=\exp\left(-a\int_0^t X_s^g ds-b t-a\int_0^t \sqrt{X_s^v} dW_s^{v,\mathbb{P}}-a\int_0^t \sigma_c dW_t^{g,\mathbb{P}}\right). \end{equation} Using the SDEs for $X^g$ and $X^v$ it can be cast in the affine form \eqref{affine_pk}: \begin{equation} \begin{array}{ll} S_t & =\exp\left( -\gamma t-\frac{a}{\sigma_v}(X_t^v-X_0^v)-\frac{a\sigma_c}{\sigma_g}(X_t^g-X_0^g)\right. \\
& \left.\quad-\frac{a\kappa_v}{\sigma_v}\int_0^t X_s^v ds-(a+\frac{a\sigma_c\kappa_g}{\sigma_g})\int_0^t X_s^gds\right),\\ \end{array} \end{equation} where $\gamma=b-\frac{a\kappa_v\theta_v}{\sigma_v}-\frac{a\sigma_c\kappa_g\theta_g}{\sigma_g}$.
\begin{proposition} If $\kappa_g>0$ (mean-reverting growth rate) and $\kappa_v+\sqrt{\kappa_v^2+2a\kappa_v\sigma_v}+a\sigma_v>0$, Eq.\eqref{psi_converge} holds and, thus, Theorem \ref{affine_long} applies. The long bond is given by \begin{equation} B_t^\infty=\exp\left(\lambda t+(\frac{a}{\sigma_v}-v_1)(X_t^v-X_0^v)+(\frac{a\sigma_c}{\sigma_g}-v_2)(X_t^g-X_0^g)\right), \end{equation} where $\lambda=\gamma-\frac{1}{2}\sigma_g^2v_2^2+\kappa_v\theta_v v_1+\kappa_g\theta_g v_2$, $v_1=(\sqrt{\kappa_v^2+2a\kappa_v\sigma_v}-\kappa_v)/\sigma_v^2$, $v_2=a(1/\kappa_g+\sigma_c/\sigma_g)$, and the state variables have the following dynamics under ${\mathbb L}$: \begin{equation} dX_t^v=\left(\kappa_v\theta_v-\sqrt{\kappa_v^2+2a\kappa_v\sigma_v}X_t^v\right)dt+\sigma_v\sqrt{X_t^v}dW_t^{v,\mathbb{L}}, \end{equation} \begin{equation} dX_t^g=\kappa_g\left(\theta_g-\frac{a\sigma_g^2}{\kappa_g^2}-\frac{a\sigma_c\sigma_g}{\kappa_g}-X_t^g\right)dt+\sigma_g dW_t^{g,\mathbb{L}}. \end{equation} \end{proposition} \begin{proof} In this model Eq.\eqref{riccati_d} reduces to \begin{equation} \begin{split} &\Phi^\prime(t)=-\frac{1}{2}\sigma_g^2\Psi_2(t)^2 +\kappa_v\theta_v\Psi_1(t)+\kappa_g\theta_g\Psi_2(t)+\gamma, \quad \Phi(0)=0,\\ &\Psi_1^\prime(t)=-\frac{1}{2}\sigma_v^2\Psi_1(t)^2 -\kappa_v\Psi_1(t)+\frac{a\kappa_v}{\sigma_v},\quad \Psi_1(0)=\frac{a}{\sigma_v},\\ &\Psi_2^\prime(t)=-\kappa_g\Psi_2(t)+a+\frac{a\sigma_c\kappa_g}{\sigma_g},\quad \Psi_2(0)=\frac{a\sigma_c}{\sigma_g}.\\ \end{split} \end{equation} In this special case $\Psi_1(t)$ and $\Psi_2(t)$ are separated and thus can be analyzed independently. It is easy to see that if $\kappa_g>0$ then $\Psi_2(t)$ converges to $v_2$. When $\kappa_v+\sqrt{\kappa_v^2+2a\kappa_v\sigma_v}+a\sigma_v>0$, $\frac{a}{\sigma_v}$ is greater than the smaller root of the second order equation $-\frac{1}{2}\sigma_v^2\Psi_1(t)^2 -\kappa_v\Psi_1(t)+\frac{a\kappa_v}{\sigma_v}$, which implies that $\Psi_1(t)$ converges to the larger root of the second-order equation for $v_1$. The eigenvalue and the dynamics of the state variable can be computed accordingly. $\Box$. \end{proof}
The proof essentially combines the proofs in Examples \ref{example_cir} and \ref{example_ou}. Similar to these examples, we observe that the rate of mean reversion of the volatility factor is higher under the long forward measure, $\sqrt{\kappa_v^2+2a\kappa_v\sigma_v}>\kappa_v$, while the rate of mean reversion of the growth rate remains the same, but its long run level is lower under ${\mathbb L}$.
\subsection{ \citet{borovicka_2014mis} Continuous-Time Long-Run Risks Model}
Our next example is a continuous-time version of the long-run risks model of \citet{bansal_2004risks} studied by \citet{borovicka_2014mis}. It features growth rate predictability and stochastic volatility in the aggregate consumption and recursive preferences. The model is calibrated to the consumption dynamics in \citet{bansal_2004risks}. The two-dimensional state modeling growth rate predictability and stochastic volatility follows the affine dynamics: {\small\begin{equation} d\begin{bmatrix} X^1_t\\ X^2_t\\ \end{bmatrix} =\left( \begin{bmatrix} 0.013\\ 0 \end{bmatrix}+ \begin{bmatrix} -0.013&0\\ 0&-0.021\\ \end{bmatrix} \begin{bmatrix} X^1_t\\X^2_t \end{bmatrix}\right)dt+\sqrt{X_t^1} \begin{bmatrix} -0.038&0\\ 0&0.00034 \\ \end{bmatrix} d \begin{bmatrix} W^{1,\mathbb{P}}_t\\ W^{2,\mathbb{P}}_t\\ \end{bmatrix}, \end{equation}} where $W^{i,\mathbb{P}}_t,$ $i=1,2,$ are two independent Brownian motions. Here $X^1_t$ is the stochastic volatility factor following a CIR process and $X^2_t$ is an OU-type mean-reverting growth rate factor with stochastic volatility. The aggregate consumption process $C_t$ in this model evolves according to \begin{equation} d\log C_t=0.0015dt+X^2_tdt+\sqrt{X^1_t} 0.0078 dW^{3,\mathbb{P}}_t, \end{equation} where $W^{3,\mathbb{P}}$ is a third independent Brownian motion modeling direct shocks to consumption. Numerical parameters are from \citet{borovicka_2014mis} and are calibrated to monthly frequency (here time is measured in months). The representative agent in this model is endowed with recursive homothetic preferences and a unitary elasticity of substitution. \citet{borovicka_2014mis} solve for the pricing kernel: \[ d\log S_t=-0.0035dt-0.0118X^1_tdt-X^2_t dt-\sqrt{X^1_t}\Big[0.0298\quad0.1330\quad0.0780\Big]dW^{\mathbb{P}}_t, \] where the three-dimensional Brownian motion $W^{\mathbb{P}}_t=(W^{i,\mathbb{P}}_t)_{i=1,2,3}$ is viewed as a column vector.
We now cast this model specification in the {\em three-dimensional} affine form of Assumption \ref{assumption_affine_PK}. To this end, we introduce a third factor $X^3_t=\log S_t$. We can then write the pricing kernel in the exponential affine form $S_t=e^{X_t^3}$, where the state vector $(X^1_t,X^2_t,X^3_t)$ follows a three-dimensional affine diffusion driven by a three-dimensional Brownian motion: \begin{equation} dX_t =\left( b+ B X_t\right)dt+\sqrt{X_t^1} \rho dW^{\mathbb{P}}_t, \end{equation} where the numerical values for entries of the three-dimensional vector $b$ and $3\times 3$-matrices $B$ and $\rho$ are given above.
We can now directly apply our general results for affine pricing kernels. First, by Theorem \ref{RN_affine}, the short rate is $r(X_t)=0.0035-0.00057798 X^1_t+X^2_t$ and depends only on the factors $X^1$ and $X^2$ and is independent of $X^3$. The risk-neutral (${\mathbb Q}$-measure) dynamics is given by: \begin{equation} d\begin{bmatrix} X^1_t\\ X^2_t\\ X^3_t\\ \end{bmatrix} =\left( \begin{bmatrix} 0.013\\0\\-0.0035 \end{bmatrix}+ \begin{bmatrix} -0.0119&0&0\\ -0.00004522&-0.021&0\\ 0.0129&-1&0\\ \end{bmatrix} \begin{bmatrix} X^1_t\\X^2_t\\X^3_t \end{bmatrix}\right)dt+\sqrt{X_t^1}\rho dW^{\mathbb{Q}}_t, \end{equation} where \begin{equation} \rho=\begin{bmatrix} -0.038&0&0\\0&0.00034&0\\-0.0298&-0.1330&-0.0780\\ \end{bmatrix}. \end{equation}
The vector $\Psi(t)=(\Psi_1(t),\Psi_2(t),\Psi_3(t))^\dagger$ solves the ODE (here $\alpha:=\rho\rho^\dagger$): $$ \Psi_1^\prime(t)=-\frac{1}{2}\Psi(t)^\dagger \alpha\Psi(t)+B_{11}\Psi_1(t)+B_{21}\Psi_2(t)+B_{31}\Psi_3(t), $$ $$ \Psi_2^\prime(t)=B_{22}\Psi_2(t)+B_{32}\Psi_3(t),\quad \Psi_3^\prime(t)=0 $$ with $\Phi(0)=\Psi_1(0)=\Psi_2(0)=0, \Psi_3(0)=-1$. It is immediate that $$\Psi_3(t)\equiv-1\quad \text{and}\quad \Psi_2(t)=\frac{B_{32}}{B_{22}}(1-e^{B_{22}t})$$ and, since $B_{22}<0$, $$\lim_{t\rightarrow\infty}\Psi_2(t)=B_{32}/B_{22}=47.6191:=v_2.$$ To see $\Psi_1(t)$ convergence, notice that we can write $-\frac{1}{2}\Psi(t)^\dagger \alpha\Psi(t)+B_{11}\Psi_1(t)+B_{21}\Psi_2(t)+B_{31}\Psi_3(t)=c_1(\Psi_1(t))^2 + c_2 \Psi_1(t) + c_3 (\Psi_2(t))^2 + c_4 \Psi_2(t) + c_5$, where $c_1, c_2, c_3, c_4, c_5<0$. Since $\Psi_1(0)=\Psi_2(0)=0$, we have $\Psi_1'(0)<0$. Since $\Psi_2(t)>0$ and it is easy to see that $\Psi_1(t)<0$. Since $\Psi_2(t)<v_2$, we have $c_1(\Psi_1(t))^2 + c_2 \Psi_1(t) + c_3 (\Psi_2(t))^2 + c_4 \Psi_2(t) + c_5>c_1(\Psi_1(t))^2 + c_2 \Psi_1(t) + c_3 v_2^2+c_4 v_2+c_5$. We can check that $c_1(\Psi_1(t))^2 + c_2 \Psi_1(t) + c_3 v_2^2+c_4 v_2+c_5=0$ has two negative roots. Denote the larger root $v_1$, we see that $\Psi_1(t)>v_1$. Combining these facts, we see that $\Psi_1(t)$ converges to $v_1$. The exact value of $v_1$ has to be determined numerically. The numerical solution yields $$v_1=\lim_{t\rightarrow\infty}\Psi_1(t)=-0.2449.$$
In Figure \ref{phipsi}, we plot the functions $\Psi_1(t)$ and $\Psi_2(t)$, as well as the gross return $B_t^{t+T}$ on the $T$-bond over the period $[0,t]$ as a function of $T$. In this numerical example we take $t=12$ months, so we are looking at the one-year holding period return, and assume that the initial state $X_0$ and the state $X_t$ are both equal to the stationary mean under $\mathbb{P}$. We observe that in this model specification $\Psi(t)$ and $B_t^{t+T}$ are already very close to the fixed point for $t$ around 30 years (360 months). \begin{figure}
\caption{Plot of $\Psi_1(t)$, $\Psi_2(t)$ and $B_t^{t+T}$. Time is measured in months.}
\label{phipsi}
\end{figure}
By Theorem \ref{affine_long}, the eigenfunction determining the long bond is $\pi(x)=e^{-v_1 x^1-v_2 x^2},$ corresponding to the eigenvalue (note this is not annualized yield since time unit is in month) $$\lambda=b_1v_1+b_2v_2-b_3=0.0003163,$$ the long bond is given by $$B_t^\infty=e^{\lambda t - v_1(X_t^1-X_0^1)-v_2(X_t^2-X_0^2)},$$ the martingale component is given by $$M_t^\infty=e^{\lambda t - v_1(X_t^1-X_0^1)-v_2(X_t^2-X_0^2)+X_t^3},$$ and the state vector $(X^1_t, X_t^2, X^3_t)$ has the following dynamics under the long forward measure ${\mathbb L}$: \begin{equation} d\begin{bmatrix} X^1_t\\ X^2_t\\ X^3_t\\ \end{bmatrix} =\left( \begin{bmatrix} 0.013\\0\\-0.0035 \end{bmatrix}+ \begin{bmatrix} -0.0115&0&0\\ -0.00005074&-0.021&0\\ 0.0153&-1&0\\ \end{bmatrix} \begin{bmatrix} X^1_t\\X^2_t\\X^3_t \end{bmatrix}\right)dt+\sqrt{X_t^1}\rho dW^{\mathbb{L}}_t. \end{equation}
As already observed by \citet{borovicka_2014mis}, in this model the state dynamics under the long forward measure ${\mathbb L}$ is close to the state dynamics under the risk-neutral measure ${\mathbb Q}$ and is substantially distinct from the dynamics under the data-generating measure ${\mathbb P}$ due to the volatile martingale component $M_t^\infty$. However, our approach to the analysis of this model is different from the analysis of \citet{borovicka_2014mis}. We cast it as a three-factor affine model and directly apply our Theorem \ref{affine_long} for affine models that is, in turn, a consequence of our Theorem \ref{implication_L1} for semimartingale models. We only need to determine the fixed point \eqref{psi_converge} of the Riccati equation. Existence of the long bond, the long term factorization of the pricing kernel, and the long forward measure then immediately follow from Theorem \ref{affine_long}, without any need to verify ergodicity. In fact, the three-factor affine process $(X^1_t,X_t^2,X_t^3)$ is not ergodic, and not even recurrent, as is immediately seen from the dynamics of $X^3$. In contrast, the approach in \citet{borovicka_2014mis} relies on the two-dimensional mean-reverting affine diffusion $(X^1_t,X_t^2)$. Namely, since the Perron-Frobenius theory of \citet{hansen_2009} requires ergodicity to single out the principal eigenfunction and ascertain its relevance to the long-term factorization, \citet{borovicka_2014mis} implicitly split the pricing kernel into the product of two sub-kernels, a multiplicative functional of the two-dimensional Markov process $(X^1_t,X_t^2)$ and the additional factor in the form $e^{-\int_0^t 0.0780\sqrt{X_s^1}dW_s^{3,{\mathbb P}}}$. The Perron-Frobenius theory of \citet{hansen_2009} is then applied to the multiplicative functional of the two-dimensional Markov process $(X^1_t,X_t^2)$. In contrast, in our approach we do not require ergodicity and work directly with the non-ergodic three-dimensional process and verify that the Riccati ODE possesses a fixed point, which is already sufficient for existence of the long-term factorization in affine models by Theorem \ref{affine_long}.
\section{Conclusion}
This paper constructs and studies the long-term factorization of affine pricing kernels into discounting at the rate of return on the long bond and the martingale component that accomplishes the change of probability measure to the long forward measure. It is shown that the principal eigenfunction of the affine pricing kernel germane to the long-term factorization is an exponential-affine function of the state vector with the coefficient vector identified with the fixed point of the Riccati ODE. The long bond volatility and the volatility of the martingale component are explicitly identified in terms of this fixed point. When analyzing a given affine model, a research needs to establish whether the Riccati ODE possesses a fixed point. If the fixed point is determined, the long-term factorization then follows. It is shown how the long-term factorization plays out in a variety of asset pricing models, including single factor CIR and Vasicek models, a two-factor version of Breeden's CCAPM, and the three-factor long-run risks model studied in \citet{borovicka_2014mis}.
\end{document}
|
arXiv
|
{
"id": "1610.00778.tex",
"language_detection_score": 0.6866611242294312,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Hochschild cohomology of the algebra of conformal endomorphisms}
\section{Introduction}
The notion of a conformal (Lie) algebra emerged in \cite{KacValgBeginners} as a tool in the theory of vertex algebras which goes back to mathematical physics \cite{BPZ1983} and representation theory (see, e.g., \cite{Borch}). From the algebraic point of view, the structure of a vertex algebra is a breed of two structures: a differential left-symmetric algebra and a Lie conformal algebra \cite{BK-Field2002}.
The structure theory of (finite) Lie conformal algebra was developed in \cite{DK1998}, irreducible representations of simple and indecomposable semisimple finite Lie conformal algebras were described in \cite{ChengKac}. Given a finite conformal module $M$ over a Lie conformal algebra $L$, the representation of $L$ on $M$ is a homomorphism from $L$ to the Lie conformal algebra of conformal endomorphisms $\mathrm{gc}\,(M)$, see \cite[Ch.~2]{KacValgBeginners}. The latter is an analogue of the ``ordinary'' Lie algebra $\mathrm{gl}\,(V)$ of a linear space $V$ in the category of conformal algebras. As in ordinary algebras, $\mathrm{gc}\,(M)$ is the commutator algebra of an {\em associative} conformal algebra $\mathop {\fam 0 Cend} \nolimits (M)$. Thus the study of associative conformal algebras (and $\mathop {\fam 0 Cend} \nolimits (M)$, in particular) is essential for representation theory of Lie conformal algebras and, as a corollary, for vertex algebras theory. A systematic study of $\mathop {\fam 0 Cend} \nolimits (M)$ was performed in \cite{BKL2003}, its simple subalgebras were described in \cite{Kol2006Adv}. The most interesting case is when $M$ is a free $H$-module of rank $k$, then $\mathop {\fam 0 Cend} \nolimits (M)$ is denoted $\mathop {\fam 0 Cend} \nolimits_k$. This system plays the same role in the theory of conformal algebras as the matrix algebra $M_k(\Bbbk )$ does in the ordinary algebra.
The homological studies for conformal algebras starts from the paper \cite{BKV}. Conceptually, to define (co)chains, (co)cycles, and (co)boundaries for a particular class of algebras over a field $\Bbbk $, one needs to know what a multilinear mapping is, how to combine such mappings, and how symmetric groups act on multilinear mappings. All these notions have their analogues in the category of modules over cocommutative bialgebras, that is, these are pseudo-tensor categories \cite{BDK}. In particular, the definition of Hochschild cohomologies for an associative algebra in the pseudo-tensor category over the polynomial bialgebra $H=\Bbbk [\partial ]$, where $\partial $ is a primitive element, coincides with the definition of Hochschild cohomology of associative conformal algebras in \cite{BKV}.
It is well-known since \cite{Hoch1943} that for the associative algebra $\mathop {\fam 0 End} \nolimits (V)$ of linear transformations of a finite-dimensional space $V$ all $n$th Hochschild cohomology groups are trivial for $n\ge 1$. The problem of description of conformal Hochschild cohomologies of $\mathop {\fam 0 Cend} \nolimits (M)$ for a finite $H$-module $M$ was stated in \cite{BKV}. In \cite{Dolg2009}, it was shown that the second Hochschild cohomology group of $C=\mathop {\fam 0 Cend} \nolimits (M)$ is trivial for all conformal bimodules over~$C$, which was a partial solution of the problem from \cite{BKV}. The purpose of this paper is to complete solving this problem and prove that all $n$th Hochschild cohomology groups of $\mathop {\fam 0 Cend} \nolimits (M)$ for $n\ge 2$ with coefficients in all conformal bimodules over $\mathop {\fam 0 Cend} \nolimits (M)$. Note that the classical argument (see \cite{Hoch1943}) based on the isomorphism $\mathrm H^n(A,M)\simeq \mathrm H^{n-1}(A, \mathrm{Hom}\,(A,M))$ does not work for conformal algebras since
the analogue of $\mathrm{Hom}$ denoted $\mathrm{Chom}$ (see \cite{KacValgBeginners}) does not carry a structure of conformal bimodule due to locality issues.
As shown in \cite{BKV}, the calculation of conformal Hochschild cohomology $\mathrm H^\bullet (C,M)$ of an associative conformal algebra $C$ with coefficients in a conformal bimodule $M$ over $C$ is based on the ordinary Hochschild cohomology $\mathrm H^\bullet (\mathcal A_+(C), M)$, where $\mathcal A_+(C)$ is the positive part of the coefficient algebra of~$C$.
For $C=\mathop {\fam 0 Cend} \nolimits_k$, the positive part $\mathcal A_+(\mathop {\fam 0 Cend} \nolimits_k)$ of its coefficient algebra is isomorphic to the matrix algebra over the first Weyl algebra $W_1$, i.e., the unital associative algebra generated by two elements $p$, $q$ such that $qp-pq=1$.
The series of Weyl algebras (and, in particular, the first one) is under intensive study in various areas of mathematics. Homological properties of these algebras were considered, for example, in \cite{GHL, Rine, Hart}. For instance, the global dimension of the Weyl algebra $W_n$, $n\ge 1$, essentially depends on the characteristic of the base field. One of the by-products of this paper is an explicit computation of the 3rd Hochschild cohomology group of the first Weyl algebra by means of the Anick resolution. We apply the Morse matching method to transform a bar-resolution of the first Weyl algebra into its Anick resolution and calculate explicitly $\mathrm H^3(W_1, M)$ for an arbitrary $W_1$-bimodule~$M$.
As a result, we solve a problem stated in \cite{BKV} on the computation of Hochschild cohomologies of the conformal algebra $\mathop {\fam 0 Cend} \nolimits_k$: we prove $\mathrm H^n(\mathop {\fam 0 Cend} \nolimits_k, M)=0$ for all $n\ge 2$ and for all conformal bimodules $M$ over $\mathop {\fam 0 Cend} \nolimits_k$.
\section{Morse matching method for constructing the Anick resolution}\label{sec:MorseMatching}
The idea of D. Anick on the construction of a relatively small free resolution for an augmented algebra has shown its effectiveness in a series of applications \cite{AK2020,A2021,A2022-Cn, A2022, Akl, lopatkin}. Let us briefly observe the main points of this construction and its application to the computation of Hoch\-schild cohomologies of associative algebras. Suppose $\Lambda $ is a unital associative algebra equipped with a homomorphism $\varepsilon: \Lambda \to \Bbbk $, $\varepsilon(1)=1$ (augmentation). Denote by $A$ the cokernel $\Lambda/\Bbbk $ of the inverse embedding $\eta : \Bbbk \to \Lambda $ and consider the two-sided bar resolution of free $\Lambda$-bimodules \[ 0\leftarrow \Bbbk \leftarrow \mathrm{B}_{0} \leftarrow \mathrm{B}_1 \leftarrow \dots \leftarrow \mathrm{B}_n \leftarrow \mathrm{B}_{n+1} \leftarrow \dots , \] where $\mathrm{B}_0 = \Lambda\otimes \Lambda $, $\mathrm{B}_n = \Lambda \otimes A^{\otimes n}\otimes \Lambda $ for $n\ge 1$.
We will denote a tensor $a_1\otimes \dots \otimes a_n \in A^{\otimes n}$ as $[a_1|\ldots |a_n]$ and omit the tensor product signs between $\Lambda $ and $A^{\otimes n}$. The arrows $d_{n+1}: \mathrm{B}_{n+1} \to \mathrm{B}_n$ are $\Lambda $-bimodule homomorphisms given by \begin{equation}\label{eq:Bar-Differential}
d_{n+1}[a_1|\ldots |a_{n+1}]
= a_1[a_2|\ldots |a_{n+1}]
+\sum\limits_{i=1}^n (-1)^i[a_1| \ldots |a_ia_{i+1}|\ldots |a_{n+1}]
+ (-1)^{n+1} [a_1|\ldots |a_n] a_{n+1}, \end{equation} for $n>0$, and \[ d_1: [a]\mapsto a\otimes 1 - 1\otimes a, \quad d_0: a\otimes b \mapsto \varepsilon(ab). \] If $M$ is an arbitrary unital $\Lambda $-bimodule then \[ \Hom_{\Lambda{-}\Lambda} (\mathrm B_n, M) \simeq \Hom (A^{\otimes n}, M) \] as linear spaces, and for every $\varphi \in \Hom_{\Lambda{-}\Lambda} (\mathrm B_n, M)$ the composition $\varphi d_{n+1}: \mathrm B_{n+1} \to M$ corresponds to the $\Bbbk $-linear map $\Delta^n (\varphi ): A^{\otimes (n+1)}\to M$ which is given by the Hochschild differential formula.
Therefore, if we start with an associative algebra $A$, join an exterior identity to get $\Lambda = A\otimes \Bbbk 1$ with $\varepsilon(A)=0$, then the cochain complex \[ \big ( \Hom_{\Lambda{-}\Lambda} (\mathrm B_\bullet , M), \Delta^\bullet \big ) \] coincides with Hochschild complex $\mathrm {C}^\bullet (A,M)$.
The bar resolution $(\mathrm B_\bullet , d_\bullet)$ is easy to construct but it is too large for particular computations. Therefore, it is reasonable to replace $(\mathrm B_\bullet , d_\bullet)$ with a smaller but homotopy equivalent resolution, e.g., the {\em Anick resolution} $(\mathrm A_\bullet, \delta_\bullet)$, \[ 0\leftarrow \Bbbk \leftarrow \mathrm A_0 \leftarrow \mathrm A_1 \leftarrow \dots \leftarrow \mathrm A_n \leftarrow \mathrm A_{n+1} \leftarrow \dots, \quad \delta_{n+1}: \mathrm A_{n+1}\to \mathrm A_n. \] Then, given an $A$-bimodule (hence, a unital $\Lambda $-bimodule), the cohomologies of the complex \[ \big ( \Hom_{\Lambda{-}\Lambda} (\mathrm A_\bullet, M), \Delta^\bullet \big ), \quad \Delta^{n}\varphi = \varphi \delta_{n+1}, \ \varphi \in \Hom_{\Lambda{-}\Lambda} (\mathrm A_n, M), \] coincide with the Hochschild cohomologies $\mathrm H^\bullet (A,M)$.
Suppose $X$ is a set of generators of the algebra $A$. Denote by $X^*$ the set of nonempty words in $X$, and let $\Bbbk \langle X\rangle $ stand for the linear span of $X^*$, this is the free associative algebra generated by~$X$.
Let $\Sigma \subset \Bbbk \langle X\rangle $ be a Gr\"obner--Shirshov basis of $A$ relative to an appropriate monomial order (e.g., deg-lex order). We will denote by $V = \overline \Sigma $ the set of principal parts of relations from~$\Sigma $ (called {\em obstructions}). Recall that $\mathrm A_0 = \mathrm B_0 = \Lambda \otimes \Lambda $, $\mathrm A_n = \Lambda \otimes \Bbbk V^{(n-1)}\otimes \Lambda $, where $V^{(k)}$ stands for the set of {\em Anick $k$-chains}. By definition (see \cite{Anick1983}), $V^{(0)}=\{[x] \mid x\in X\}$,
$V^{(1)} = \{[x|s] \mid x\in X, s\in X^*, xs\in V\}$, and for $k\ge 2$ the set $V^{(k)}$ is constructed on the words in $X^*$ obtained by consecutive ``hooking'' of the words from $\overline \Sigma $.
This definition becomes transparent in the case when the defining relations $\Sigma $ contain at most quadratic monomials, so that all words in $V $ are of length two. For $n \ge 1$, an Anick $n$-chain is a word
$v=[x_{0}|\ldots |x_{n}] \in X^{*}$ such that $x_ix_{i+1}\in V$ for $i=0,\ldots,n-1$.
\begin{example}\label{exmp:UnivEnvelope} Let $\mathfrak g$ be a Lie algebra over $\Bbbk $ with a linearly ordered basis $X$. Denote $[x,y]\in \Bbbk X$, $x,y\in X$, the Lie product in $\mathfrak g$. Set $\Sigma = \{ xy-yx-[x,y] \mid x,y\in X, x>y \}$, $A=\Bbbk \langle X\rangle /(\Sigma )$. Then $\Lambda = A\oplus \Bbbk 1 $ is exactly the universal
enveloping associative algebra $U(\mathfrak g)$. Then $V^{(k)} = \{[x_0|x_1|\ldots |x_k] \mid x_0>x_1>\dots >x_k, x_i\in X \}$. The elements of $V^{(k)}$ are in obvious one-to-one correspondence with the basis of $\wedge^{k+1}\mathfrak g$. \end{example}
The Anick differentials $\delta_{n+1}:\mathrm A_{n+1}\to \mathrm A_n$ were computed in \cite{Anick1983} by means of a complicated inductive procedure. In order to make this computation easier, in \cite{JollWelker} and, independently, in \cite{Skoldb}, it was proposed to use algebraic discrete Morse theory developed in \cite{formancell, formanguide} to construct a smaller complex (of free modules) which is homotopy equivalent to a given one. In particular, given a bar resolution of an augmented algebra $\Lambda $, the resulting complex is the Anick resolution.
The Morse matching method for computing the Anick resolution \cite{JollWelker}, \cite{Skoldb} is also described in \cite{lopatkin, A2022}. In a few words, the problem is to choose a set of edges in the weighted directed graph describing the structure of the bar resolution. Then one has to transform the graph by means of inverting the matched edges. Inverting means not only switch of direction, but also replacing the weight $c$ of the matched edge with $-c{-1}$. In the resulting graph, the non-matched vertices (critical cells) are exactly the Anick chains. Finally, in order to calcuate the Anick differential $\delta_{n+1}$ on a chain $w$ from $V^{(n)}$ one has to track all paths from $w$ to vertices from $V^{(n-1)}$. The weight of each path is equal to the product of the weights of all its edges.
\begin{example}\label{exmp:Heisen-3} Let $\mathfrak g = H_3$ be the Heisenberg Lie algebra. The universal enveloping algebra $U(H_3)$ is generated by the elements $x,y,z$, relative to the following relations: \[ xy=yx+z,\quad xz=zx,\quad yz=zy. \] Assume $x>y>z$. Then the Anick $n$-chains are: \[
V^{(1)}=\{[x|y],[x|z],[y|z]\},
\quad V^{(2)}=\{[x|y|z]\}, \quad V^{(n)}=\emptyset,\ n\ge 3. \]
In order to compute $\delta_3[x|y|z]$, consider a fragment of the bar resolution graph and choose a Morse matching (dashed edges on Fig.~\ref{Fig1}). Tracking the paths and collecting similar terms lead to the following answer: \[
\delta_3[x|y|z]=x[y|z]-[y|z]x+[x|z]y -y[x|z]+z[x|y]-[x|y]z. \] \begin{figure}
\caption{Calculating the Anick differential of $[x|y|z]$ for $U(H_3)$}
\label{Fig1}
\end{figure} \end{example}
\begin{remark} The differential in Example~\ref{exmp:Heisen-3} corresponds to ``two-sided'' resolution. The restriction to the left module case (i.e., when multiplication by $x,y,z$ from the right is zero) leads us exactly to the Chevalley--Eilenberg differential for the Lie algebra $H_3$. This is a general observation: given a Lie algebra $\mathfrak g$, the ``left'' Anick resolution for $U(\mathfrak g)$ coincides with the Chevalley--Eilenberg resolution for~$\mathfrak g$. \end{remark}
When applied to the settings of Example~\ref{exmp:UnivEnvelope}, the Anick differential for $U(\mathfrak g)$ coincides with the Chevalley--Eilenberg differential for the Lie algebra~$\mathfrak g$.
\section{Conformal endomorphisms and the 1st Weyl algebra}
From now on, $\Bbbk $ is a field of characteristic zero, $H=\Bbbk [\partial ]$ is the polynomial algebra in one variable.
Suppose $V$ and $M$ are two $H$-modules. A {\em conformal homomorphism} \cite{KacValgBeginners} from $V$ to $M$ is a $\Bbbk $-linear map \[ \varphi _\lambda : V\to M[\lambda ]=\Bbbk [\partial,\lambda ]\otimes _H M \] such that \[ \varphi_\lambda (f(\partial ) v) = f(\partial+\lambda )\varphi_\lambda (v) \] for all $v\in V$, $f=f(\partial) \in H$.
If $M=V$ then the space of all conformal homomorphisms from $V$ to $M$ is denoted $\mathop {\fam 0 Cend} \nolimits (V)$. This is also an $H$-module: \[ (\partial \varphi)_\lambda = -\lambda \varphi_\lambda , \] and if $V$ is a finitely generated $H$-module then $\mathop {\fam 0 Cend} \nolimits (V)$ is an {\em associative conformal algebra} \cite{KacValgBeginners}: for every $\varphi,\psi \in \mathop {\fam 0 Cend} \nolimits(V)$ we have \[ (\varphi \oo\lambda \psi ) \in \mathop {\fam 0 Cend} \nolimits (V) \] defined by the rule \[ (\varphi \oo\lambda \psi )_\mu = \varphi_\lambda \psi _{\mu+\lambda }. \] If $V$ is a free $H$-module of rank $k\in \mathbb N$ then $\mathop {\fam 0 Cend} \nolimits(V)$ is denoted $\mathop {\fam 0 Cend} \nolimits_k$.
Up to an isomorphism (see \cite{BKL2003, Kol2006Adv}), one may identify $\mathop {\fam 0 Cend} \nolimits_k$ with the space of all $(k\times k)$-matrices over the polynomial ring $\Bbbk [\partial , x]$ equipped with the operation \[ f(\partial, x)\oo\lambda g(\partial , x) = f(-\lambda , x)g(\partial+\lambda , x+\lambda ), \] $f,g\in \Bbbk [\partial, x]$. For matrices, the operation $(\cdot\oo\lambda \cdot)$ is extended by the ordinary row-column rule.
Let $H$ act from the right on the Lawrent polynomials $\Bbbk [t,t^{-1}]$ in such a way that $\partial = -d/dt$. For every conformal algebra $C$ in the sense of \cite{KacValgBeginners}, one may define the {\em coefficient algebra} $\mathcal A(C)$ as the linear space $\Bbbk[t,t^{-1}]\otimes _H C$ equipped with the multiplication
\begin{equation}\label{eq:CoeffProd} a(n)b(m) = \sum\limits_{s\ge 0} \binom{n}{s} (a\oo{s} b)(n+m-s) \end{equation}
where $t^n\otimes _H a = a(n)$ for $a\in C$, $n\in \mathbb Z$, and $(a\oo s b) $ stands for the coefficient at $\lambda^s/s!$ of $(a\oo\lambda b)$, $a,b\in C$. For polynomials from $\mathop {\fam 0 Cend} \nolimits_1$, for example, we have \[ f(x)\oo{s} g(x) = f(x)\dfrac{d^s}{dx^s} g(x) \] by the Taylor formula.
The subspace of $\mathcal A(C)$ spanned by all $a(n)$, $n\ge 0$, $a\in C$, is a subalgebra of $\mathcal A(C)$ denoted $\mathcal A_+(C)$. For instance, $\mathcal A(\mathop {\fam 0 Cend} \nolimits_1) = \Bbbk [t,t^{-1},x]$ as a linear space, the isomorphism identifies $t^n\otimes _H x^m$, $n\in \mathbb Z$, $m\in \mathbb Z_+$, with $x^m t^n \in \Bbbk [t,t^{-1},x]$. The product of two such monomials is calculated via \eqref{eq:CoeffProd}. For example, \[ t^n \cdot xt^m
= (1\oo{0} x)t^{n+m} + n (1\oo{1} x) t^{n+m-1}
= x t^{n+m} + n t^{n+m-1}, \] so $tx = xt +1$, $t^{-1}x = xt^{-1} - t^{-2}$, etc. Hence, $\mathcal A(\mathop {\fam 0 Cend} \nolimits_1)$ is isomorphic to the localization of the first Weyl algebra $W_1 = \Bbbk \langle p,q\mid qp-pq=1\rangle $ relative to the multiplicative set $\{q^s\mid s\ge 0\}$. The positive part $\mathcal A_+(\mathop {\fam 0 Cend} \nolimits_1)$ is isomorphic to the Weyl algebra itself, so $\mathcal A_+(\mathop {\fam 0 Cend} \nolimits_k) \simeq M_k(W_1)$.
Let $C$ be an associative conformal algebra, and let $M$ be a conformal bimodule over~$C$. Then $M$ is a bimodule over the ordinary associative algebra $A=\mathcal A_+(C)$, the action is given by \[ a(n)\cdot u = a\oo{n} u,\quad u \cdot a(n) = \{u\oo{n} a\} = \sum\limits_{s\ge 0} (-1)^{n+s} \dfrac{1}{s!} \partial^s (u\oo{n+s} a), \] for $u\in M$, $a\in C$, $n\in \mathbb Z_+$.
The {\em basic Hochschild complex} \cite{BKV} of $C$ with coefficients in~$M$ is isomorphic to the Hochschild complex of $A=\mathcal A_+(C)$ with coefficients in the same bimodule~$M$. There is a linear map \[ D_n : \C^n( A,M) \to \C^n( A ,M) \] given by \[ (D_n f)(a_1(m_1),\ldots, a_n(m_n) ) = \partial f(a_1(n_1),\ldots, a_n(m_n)) + \sum\limits_{i=1}^n m_i f(a_1(n_1),\ldots, a_i(m_i-1), \ldots, a_n(m_n)), \] for $f\in \C^n( A ,M)$. The maps $D_n$ are induced by the derivation $\partial: a(m)\mapsto -ma(m-1)$ on the algebra $A$. Since $D_{n+1}\Delta^n = \Delta^n D_n$, the image $D_\bullet \C^\bullet (A,M)$ is a subcomplex of $\C^\bullet (A,M)$, and the quotient
\begin{equation}\label{eq:RestrictedComplex} \overline{\C}^\bullet(A,M) = \C^\bullet (A,M) / D_\bullet \C^\bullet (A,M) \end{equation}
is isomorphic to the {\em reduced Hochschild complex} of the conformal algebra $C$ (see \cite[Theorem 6.1, Corollary 6.1]{BKV}).
\begin{proposition}\label{prop:MainTool} If $C$ is an associative conformal algebra, $A = \mathcal A_+(C)$, $M$ is a conformal bimodule over $C$, and $\mathrm H^q(A,M)=0$ for all $q\ge 3$, then $\mathrm H^q(\overline{\C}^\bullet (A,M)) = 0$ for all $q\ge 3$. \end{proposition}
\begin{proof} The short exact sequence \[ 0\to D_\bullet \C^\bullet (A,M) \to
\C^\bullet (A,M) \to \overline{\C}^\bullet(A,M) \to 0 \] gives rise to the long exact sequence of cohomologies \[ \begin{aligned} \dots \to{}& \mathrm H^q (D_\bullet \C^\bullet (A,M)) \to \mathrm H^q (\C^\bullet (A,M)) \to \mathrm H^q (\overline{\C}^\bullet (A,M)) \\
\to{}& \mathrm H^{q+1} (D_\bullet \C^\bullet (A,M)) \to \mathrm H^{q+1} (\C^\bullet (A,M)) \to \mathrm H^{q+1} (\overline{\C}^\bullet (A,M)) \to \dots \end{aligned} \] By \cite[Proposition 2.1]{BKV}, the complexes $\C^\bullet =\C^\bullet (A,M)$ and $D_\bullet \C^\bullet $ are isomorphic in positive degrees. Hence, under the conditions of the statement, $\mathrm H^q (\overline{\C}^\bullet (A,M))$, $q\ge 3$, is clamped between zeros, thus it is zero itself. \end{proof}
\section{Two-sided Anick resolution for the first Weyl algebra}
In this section, we apply the Morse matching method described in Section \ref{sec:MorseMatching} to compute the 3rd Hochschild cohomology of the first Weyl algebra with coefficients in an arbitrary bimodule.
The Weyl algebra $W_1$ is generated by the elements $q,p,e$, relative to the following relations: \[ qp=pq+e,\quad pe=p,\quad qe=q,\quad eq=q,\quad ep=p,\quad ee=e. \] Assume $q>p>e$. Then the sets of Anick $n$-chains for $n=1,2,3$ are easy to find: \[ \begin{aligned}
V^{(1)}= {} & \{ [q|p],[q|e],[p|e],[e|q],[e|p],[e|e]\}, \\
V^{(2)}={} & \{ [q|p|e],[e|q|p],[q|e|p],[p|e|q],[q|e|e],[p|e|e],[e|e|q],[e|e|p], [e|q|e],\\
& [e|p|e],[q|e|q],[p|e|p],[e|e|e]\}, \\
V^{(3)}= {}& \{ [q|p|e|e],[e|q|p|e],[q|e|p|e],[p|e|q|e],[q|e|e|e],[p|e|e|e],\\
& [e|q|e|e], [e|p|e|e],[q|e|q|e],[p|e|p|e],[e|e|e|e],[e|e|q|p],[e|q|e|p],\\
& [e|p|e|q],[e|e|e|q],[e|e|e|p], [e|e|q|e], [e|e|p|e],[e|q|e|q],\\
& [e|p|e|p],[q|e|e|p],[p|e|e|q],[q|e|e|q],[p|e|e|p], [q|p|e|q], [q|p|e|p]\}. \end{aligned} \] In order to compute $\mathrm H^3(W_1, M)$ for an arbitrary $W_1$-bimodule $M$ we need to know the Anick differentials on $V^{(2)}$ and $V^{(3)}$.
For example, consider a fragment of the graph constructed from the bar resolution of $\Lambda = W_1\oplus \Bbbk 1$
with the vertex $[q|p|e]$ with a matched edge $[p|q|e]\to [pq|e]$, see Fig.~\ref{Fig2}\,a.
Note that $[p|q]$ is not an Anick chain thus should not be a critical cell. Indeed, the vertex $[p|q]$ belongs to another matched edge $[p|q]\to [pq]$ which also appears in the bar resolution graph, see Fig.~\ref{Fig2}\,b. In a similar way, construct a fragment with the vertex $[e|q|p]$ on Fig.~\ref{Fig3}\,a:
all ending vertices of this fragment are either Anick chains or $[p|q]$ which is already matched.
Note that the vertices $[e|p|q]$ and $[q|p|e]$ belong to
matched edges. As a final example, consider the fragment with $[e|q|p|e]$ (Fig.~\ref{Fig3}\,b): all ending vertices of this graph are either Anick chains or already matched ones.
In the sequel, we will often omit symbols $|$ in the elements of $V^{(n)}$.
\begin{figure}
\caption{Calculating the Anick differential of $[q|p|e]$ and $[q|p]$}
\label{Fig2}
\end{figure}
\begin{figure}
\caption{Calculating the Anick differential of $[e|q|p]$ and $[e|q|p|e]$}
\label{Fig3}
\end{figure}
In the same way, one may compute Anick differentials on the other chains from $V^{(2)}$ and $V^{(3)}$. As a resul, we get the following statements.
\begin{lemma}\label{lem:DiffV2} The mapping $\delta _3: \mathrm A_3\to \mathrm A_2$ is defined by \[ \begin{aligned}
\delta_3[qpe]= {}&q[pe]-p[qe]-[ee]+[qp]-[qp]e, \\
\delta_3[eqp]={}&e[qp]-[qp]+[ep]q+[ee]-[eq]p,\\
\delta_3[qep]={}& q[ep]-[qe]p,\\
\delta_3[peq]={}& p[eq]-[pe]q,\\
\delta_3[qee]={}& q[ee]-[qe]e,\\
\delta_3[pee]={}& p[ee]-[pe]e,\\
\delta_3[eeq]={}& e[eq]-[ee]q,\\
\delta_3[eep]={}& e[ep]-[ee]q,\\
\delta_3[eqe]={}& e[qe]-[qe]+[eq]-[eq]e,\\
\delta_3[epe] ={}& e[pe]-[pe]+[pe]-[ep]e,\\
\delta_3[qeq]={}& q[eq]-[qe]q,\\
\delta_3[pep]={}& p[ep]-[pe]p,\\
\delta_3[eee] ={}& e[ee]-[ee]e. \end{aligned} \] \end{lemma}
\begin{lemma}\label{lem:DiffV3} The mapping $\delta _4: \mathrm A_4\to \mathrm A_3$ is defined by \[ \begin{aligned}
\delta_4[qpee]={}&q[pee]-p[qee]-[eee]+[qpe]e, \\
\delta_4[qeep] ={}& q[eep]-[qep]+[qee]p,\\
\delta_4[peeq]={}& p[eeq]-[peq]+[pee]q,\\
\delta_4[qeee] ={}& q[eee]-[qee]+[qee]e,\\
\delta_4[peee]={}& p[eee]-[pee]+[pee]e,\\
\delta_4[eeeq]={}& e[eeq]-[eeq]+[eee]q,\\
\delta_4[eeep]={}& e[eep]-[eep]+[eee]p,\\
\delta_4[eeqe]={}& e[eqe]-[eeq]+[eeq]e,\\
\delta_4[eepe]={}& e[epe]-[eep]+[eep]e,\\
\delta_4[qeeq]={}& q[eeq]-[qeq]+[qee]q,\\
\delta_4[peep]={}& p[eep]-[pep]+[pee]p,\\
\delta_4[eeqp]={}& e[eqp]-[eep]q-[eee]+[eeq]p,\\
\delta_4[eqpe]={}& e[qpe]-[qpe]+[eee]-[eqp]+[eqp]e,\\
\delta_4[qepe]={}& q[epe]-[qep]+[qep]e,\\
\delta_4[peqe]={}& p[eqe]-[peq]+[peq]e,\\
\delta_4[eqee]={}& e[qee]-[qee]+[eqe]e,\\
\delta_4[epee]={}& e[pee]-[pee]+[epe]e,\\
\delta_4[qeqe]={}& q[eqe]-[qeq]+[qeq]e,\\
\delta_4[pepe]={}& p[epe]-[pep]+[pep]e,\\
\delta_4[eeee]={}& e[eee]-[eee]+[eee]e,\\
\delta_4[eqep]={}& e[qep]-[qep]+[eqe]p,\\
\delta_4[epeq]={}& e[peq]-[peq]+[epe]q,\\
\delta_4[epep]={}& e[pep]-[pep]+[epe]p,\\
\delta_4[eqeq]={}& e[qeq]-[qeq]+[eqe]q,\\
\delta_4[qpeq]={}& q[peq]-[eeq]-p[qeq]+[qpe]q,\\
\delta_4[qpep]={}& q[pep]-[eep]-p[qep]+[qpe]p. \end{aligned} \] \end{lemma}
\begin{theorem}\label{thm:WeylCohomology} For an arbitrary $W_1$-bimodule $M$, the Hochchild cohomology group $\mathrm H^3(W_1,M)$ is trivial. \end{theorem}
\begin{proof} It is enough to find the respective cohomology group of the complex $\Hom_{\Lambda{-}\Lambda} (\mathrm A_\bullet , M)$, where $\Lambda = W_1\oplus \Bbbk 1$, as above.
Note that an arbitrary bimodule $M$ over $W_1$ is a direct sum of four components: \[ M = M_{1,1}\oplus M_{0,1}\oplus M_{1,0}\oplus M_{0,0}, \] where the identity element $e\in W_1$ act on $M_{i,j}$ in such a way that $eu= iu$, $ue = ju$, for $u\in M_{i,j}$, $i,j\in \{0,1\}$. Hence, we may consider cohomologies with coefficients on the summands $M_{i,j}$ separately.
First, assume $M=M_{1,1}$, i.e., $eu=ue=u$ for all $u\in M$. Suppose $\varphi : \mathrm A_3 \to M$ is a cocycle, i.e., $\Delta^3(\varphi)=\varphi \delta_{4}=0$. Apply $\varphi $ to all relations in Lemma~\ref{lem:DiffV3}: since zero emerges in all right-hand sides, we get the following relations on the values of $\varphi $ on the basis of $\mathrm A_3$ as of a free $\Lambda $-bimodule:
\begin{equation}\label{eq:CocycleRelations} \begin{aligned} \varphi[qpe]={}& -q\varphi[pee]+p\varphi[qee]+\varphi[eee], \\ \varphi[qep]={}& q\varphi[eep]+\varphi[qee]p,\\ \varphi[peq] ={}&p\varphi[eeq]+\varphi[pee]q,\\
q\varphi[eee]={}& p\varphi[eee] = \varphi[eee]q
= \varphi[eee]p = \varphi[eqe] = \varphi[epe]=0,\\
\varphi[qeq]&=q\varphi[eeq]+\varphi[qee]q,\\ \varphi[pep]&=p\varphi[eep]+\varphi[pee]p,\\ \varphi[eqp]&=\varphi[eep]q+\varphi[eee]-\varphi[eeq]p. \end{aligned} \end{equation}
As a corollary, \[ \varphi[eee] =e\varphi[eee]=q(p\varphi[eee])-p(q\varphi[eee])=0. \] Hence, $\varphi $ is completely determined by its values \[ \varphi[eeq],\ \varphi[eep],\ \varphi[qee],\ \varphi[pee]. \] Let us define $\psi\in \Hom_{\Lambda{-}\Lambda }(\mathrm A_2,M)$ in such a way that \[ \psi[eq]=\varphi[eeq], \ \psi[ep]=\varphi[eep], \ \psi[qe]=-\varphi[qee], \ \psi[pe]=-\varphi[pee], \] and $ \psi[ee]=\psi[qp] = 0$. Then $\Delta^2(\psi ) = \psi \delta_3$ is a coboundary, and \[ \begin{aligned} (\psi \delta_3)[eeq]&=e\psi[eq]-\psi[ee]q=\varphi[eeq]+0=\varphi[eeq],\\ (\psi \delta_3)[eep]&=e\psi[ep]-\psi[ee]p=\varphi[eep]+0=\varphi[eep], \\ (\psi \delta_3)[qee]&=q\psi[ee]-\psi[qe]e=0+\varphi[qee]=\varphi[qee],\\ (\psi \delta_3)[pee]&=p\psi[ee]-\psi[pe]e=0+\varphi[pee]=\varphi[pee]. \end{aligned} \] Hence, $\Delta^2(\psi )= \varphi $, i.e., every 3-cocycle is a coboundary, so $\mathrm H^3(W_1,M)=0$ for every bimodule $M$ over~$W_1$.
Next, assume $M= M_{1,0}$, i.e., $eu=u$ and $ue=0$ for all $u\in M$. It follows from \ref{eq:CocycleRelations} that
\begin{equation}\label{eq:CocycleRelations left} \begin{gathered} q\varphi[pee] -p\varphi[qee]-\varphi[eee]=0, \quad \varphi[qep]= q\varphi[eep],\quad \varphi[peq] = p\varphi[eeq],\\ \varphi[qee]= q[eee],\quad \varphi[pee]= p[eee],\quad \varphi[eqe]= [eeq],\quad \varphi[epe]= [eep],\\ \varphi[qeq]= q\varphi[eeq],\quad \varphi[pep]= p\varphi[eep],\quad \varphi[eqp]=\varphi[eee]. \end{gathered} \end{equation}
Therefore, $\varphi $ is completely determined by its values $\varphi[eeq]$, $\varphi[eep]$, $\varphi[eee]$, $\varphi[qpe]$. Let us define $\psi\in \Hom_{\Lambda }(\mathrm A_2,M)$ in such a way that \begin{gather*} \psi[eq]=\varphi[eeq], \ \psi[ep]=\varphi[eep], \ \psi[qe]=\varphi[qee],\\ \ \psi[pe]=\varphi[pee], \ \psi[ee]=\varphi[eee], \ \psi[qp]=\varphi[qpe]. \end{gather*} Then $\Delta^2(\psi ) = \psi \delta_3$ is a coboundary, and \[ \begin{aligned} (\psi \delta_3)[eeq]&=e\psi[eq]=\psi[eq]=\varphi[eeq],\\ (\psi \delta_3)[eep]&=e\psi[ep]=\psi[ep]=\varphi[eep], \\ (\psi \delta_3)[eee]&=e\psi[ee]=\varphi[eee]=\varphi[eee],\\ (\psi \delta_3)[qpe]&=q\psi[pe]-p\psi[qe]-\psi[ee]+\psi[qp]\\ &=q\varphi[pee]-p\varphi[qee]-\varphi[eee]+\varphi[qpe]\\ &=0+\varphi[qpe]=\varphi[qpe]. \end{aligned} \] Hence, $\Delta^2(\psi )= \varphi $, i.e., every 3-cocycle is a coboundary, so $\mathrm H^3(W_1,M)=0$.
The cases of right-unital ($M_{0,1}$) and trivial ($M_{0,0}$) modules are completely analogous. \end{proof}
Since for every associative algebra $A$ and for every $A$-bimodule $M$ we have $\mathrm H^{n+1}(A,M) = \mathrm H^{n}(A, \Hom(A,M))$,
all higher cohomologies (for $n\ge 3$) also vanish.
\begin{corollary} For every $n\ge 3$ we have $\mathrm H^n(W_1,M)=0$. \end{corollary}
{ The Hochschild cohomology is invariant under Morita equivalence of algebras, and it is known that an algebra $A$ is Morita equivalent to the algebra of matrices $M_n(A)$ \cite{Keller}, \cite[Chapter~7]{Lam}, \cite[Chapter~1]{Loday} so $\mathrm H^n(M_k(W_1),M)=\mathrm H^n(W_1,M)=0$. }
As a corollary, we obtain the following description of conformal Hochschild cohomologies of the associative conformal algebra $\mathop {\fam 0 Cend} \nolimits_k$.
\begin{theorem} Let $M$ be a conformal bimodule over $\mathop {\fam 0 Cend} \nolimits_k$, $k\ge 1$. Then $\mathrm H^n( \mathop {\fam 0 Cend} \nolimits_k,M )=0$ for $n\ge 2$. \end{theorem}
\begin{proof} Proposition~\ref{prop:MainTool} immediately implies $\mathrm H^n( \mathop {\fam 0 Cend} \nolimits_k,M )=0$ for $n\ge 3$. For $n=2$, the result was obtained in \cite{Dolg2009}. \end{proof}
\subsection*{Acknowledgments} The work was supported by Russian Science Foundation, project 23-21-00504.
\input biblio
\end{document}
|
arXiv
|
{
"id": "2306.02643.tex",
"language_detection_score": 0.6669272184371948,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\selectlanguage{english}
\begin{center}\begin{large}\textbf{A Principal--Agent Model of Trading Under Market Impact\footnote{The research leading to these results has received funding from the ERC (grant agreement 249415-RMAC), from the Swiss Finance Institute project {\sl Systemic Risk and Dynamic Contract Theory}, as well as the SFB 649 {\sl Economic Risk}, and it is gratefully acknowledged.} \\ -Crossing networks interacting with dealer markets-}\end{large}
Jana Bielagk\footnote{Department of Mathematics, Humboldt-University Berlin, Unter den Linden 6, 10099 Berlin, Germany. \\ \hspace{1cm} [email protected]}, Ulrich Horst\footnote{Department of Mathematics, Humboldt-University Berlin, Unter den Linden 6, 10099 Berlin, Germany. \\[email protected]} \& Santiago Moreno--Bromberg\footnote{ Center for Finance and Insurance, Department of Banking and Finance, University of Zurich, Plattenstr. 14, 8032 Zurich, Switzerland. [email protected]} \end{center}
\begin{abstract} We use a principal--agent model to analyze the structure of a book--driven \textit{dealer market} when the dealer faces competition from a crossing network or dark pool. The agents are privately informed about their \textit{types} (e.g. their portfolios), which is something that the dealer must take into account when engaging his counterparties. Instead of trading with the dealer, the agents may chose to trade in a \textit{crossing network}. We show that the presence of such a network results in more types being serviced by the dealer and that, under certain conditions and due to reduced adverse selection effects, the book's \textit{spread} shrinks. We allow for the pricing on the dealer market to determine the structure of the crossing network and show that the same conditions that lead to a reduction of the spread imply the existence of an equilibrium book/crossing network pair.
\noindent\textit{AMS Classification}: 49K30; 65K10; 91A13; 91B24.
\noindent\textit{Keywords}: Asymmetric information; crossing networks; dealer markets; non--linear pricing; principal--agent games. \end{abstract}
\section{\large{Introduction}}
Recently, the analysis of optimal trading under market impact has received considerable attention. Starting with the contribution of~\cite{AlmgrenChriss00}, the existence of optimal trading strategies under illiquidity has been established by many authors, including ~\cite{Forsyth2012}, \cite{GatheralSchied11}, \cite{KratzSchoeneborn13} and~\cite{SchiedSchoenebornTehranchi10}, just to name a few. The literature on trading under illiquidity typically assumes that block trading takes place under some (exogenous) pricing schedule, which describes the liquidity available for trading at different price levels. This article studies the impact of a CN on a DM within the scope of principal--agent models under hidden information (adverse selection). This asymmetric--information approach is a significant departure from the settings of the articles mentioned above. Specifically, we consider a one--period model where block trading is modeled via a risk--neutral dealer or market--maker who provides liquidity to a heterogeneous (in terms of idiosyncratic characteristics or ``types'') group of privately--informed investors or traders. Extending the seminal work on asset pricing under asymmetric information in~\cite{BMR}, we assume that each investor has an outside option that provides him with a type--dependent reservation utility that the dealer may not be able to match without making a loss. We allow the dealer to abstain from trading with investors whose outside options would be too costly to match. The fact that the dealer may choose between excluding agents, matching their outside options (which in some cases yields him strictly positive profits) or offering them contracts that result in utilities that strictly dominate their reservation ones, implies that a rich structure (in terms of the partition of the type space) may emerge in equilibrium. For instance, in Example~\ref{RichStructure} we analyze a scenario where the type space is partitioned into two intervals where the agents' outside options are matched, one where they are excluded and three where they earn positive rents. In more mundane terms, within a portfolio--liquidation framework, we may think of traders who need to unwind portfolios whose sizes are private information and who can either trade in a DM or a CN, the latter providing some of them with trading options that the dealer may be unable improve upon without suffering losses. To the best of our knowledge, such adverse--selection models have thus far only been considered by~\cite{BJ:03} and~\cite{Page}. The latter analyzes, in quite a general setting where the set of consumer types is a Polish space and the contract space an arbitrary compact metric space, the problem of a monopolist who faces both an adverse-selection problem (as in the work at hand) as well as a moral-hazard one relative to contract performance. \cite{BJ:03}, on the other hand, only studies the adverse-selection problem in a finite-dimensional setting. This allows him to find a quasi-explicit representation of the optimal contract using Lagrange-multiplier techniques. He identifies conditions for the optimal contract to be separating, to be non--stochastic and to induce full participation. Furthermore, he also discusses the nature of the solution when bunching occurs. He does not, however, analyze the case where the dealer's choices may have an impact on the structure of the reservation--utility function, which in turn would influence his decisions. Our study of such a feedback loop is novel and it is a crucial component in our analysis of the interactions between DMs and CNs, which is typically not unidirectional. To account for the fact that many off--exchange venues settle trades at prices taken from primary venues, we state sufficient conditions for the existence of an equilibrium pricing schedule. By this we mean that there exists a pricing schedule in the DM such that, if trades in the CN are settled at the best bid and ask prices from the DM, then the dealer's optimal pricing schedule is precisely that schedule.
In order to study the impact of a type--dependent outside option, we first analyze the benchmark case where the said option is trivial, i.e. all traders may abstain from engaging the dealer and in turn earn (or lose) nothing. In such a setting the dealer is able to match the traders' outside options by offering ``nothing in exchange for nothing'', which is costless. This analysis follows~\cite{BMR}. Next we look at the general case where the traders' reservation utilities are type dependent and the dealer need not be able to match them without incurring losses. It is well known that asymmetric information results, in equilibrium, in some traders being kept to their reservation utilities. This is due to the adverse--selection costs. Intuitively, these costs increase with the profitability of trading with high--type traders (e.g. investors with large portfolios). This suggests that when mostly high--type traders benefit from the outside option in terms of the latter strictly dominating what the dealer would have offered them in the benchmark case, then more low--type traders will be serviced in equilibrium. As a consequence of the reduced adverse--selection costs, more investors engage in trading, either in the DM or the CN. Our analysis further suggests that the presence of the CN is welfare improving even for investors for whom trading in the CN is not beneficial. We also provide sufficient conditions that guarantee that the competition from the CN results in a narrower spread in the DM. Overall, we propose a benchmark model of optimal block trading of privately--informed traders with an endogenous pricing schedule, analyze the impact of a CN on pricing schedules in DMs and prove an existence result of equilibria of best bid and ask prices in our trading game.
\subsection*{Related literature} \cite{HorstNaujokat13} and~\cite{KratzSchoeneborn13} were the first to allow orders to be simultaneously submitted both to a dealer market (DM) and to an off--exchange venue such as a crossing network (CN) or a dark pool (DP). These are alternative trading facilities that allow investors to reduce their market impact by submitting liquidity that is shielded from the public view. The downside is that trade execution is uncertain: trades take place only when the matching liquidity is or becomes available. In such a case, trades are typically settled at prices prevailing in an associated primary venue, which significantly reduces the cost of large trades if settled in a CN or in a DP. The aforementioned articles on optimal, simultaneous trading in DMs and CNs do not allow for an impact of off--exchange trading on the dynamics of the associated DM. Equilibrium models analyzing the impact of alternative trading venues on DMs and trading behavior have been extensively analyzed in the financial--economics literature; see, e.g.~\cite{Glosten} and~\cite{PS} and the references therein. To simplify the analysis of market impact, this literature typically assumes that the market participants trade only a single unit of the stock. For instance, in their seminal work, \cite{HM} derives conditions for the viability of the alternative trading institutions in a modeling framework where a random number of informed and liquidity traders, each buying or selling a single unit, chooses between a DM and a CN. In their model, dealers receive multiple single--unit orders and cannot distinguish between the informed and the liquidity orders. Hence, their bid--ask spread corresponds to each order's market impact. \cite{DDH} consider the allocation of order flow between a CN and a DM when trading in both markets takes place at exogenously given prices. They show that small differences in the traders' preferences generate a unique equilibrium, in which patient traders use the CN whereas impatient traders submit orders directly to the DM. Due to the fact that prices are exogenous, the equilibrium market share of the CN is fully determined by the price differential between the markets, together with the distribution of the traders' liquidity preferences. In contrast with the two preceding works, where interactions between DMs and CNs are studied, \cite{Buti} take an alternative approach and analyze a dynamic model with single--unit traders who may place market or limit orders in a limit--order book (LOB). Alternatively, should they have access to it, the agents may place an immediate--or--cancel order in a dark pool (DP). Agents differ in their valuation of the asset and their access to the DP. The authors find that, whenever the LOB is illiquid, the presence of a DP leads to widening spreads and to a decline in the book's depth; thus, to a deterioration of market quality and welfare. This, in spite of the fact that, on average, trade volume increases. These negative effects are generally decreasing in the depth of the LOB. The take--home message offered is that, when studying interacting LOBs and DPs, there is a trade--off between trade and volume creation on the one hand, and book depth and spread on the other one.
In terms of the aforementioned effects of the presence of the CN, whereas increases in the number of participating agents and welfare are generic, the narrowing of the spread does not seem to be so. For instance in \cite{Buti}, the presence of a DP results in a migration of liquidity and hence an increasing spread --- an effect that cannot appear in our setting where all traders are liquidity takers. Contrastingly, \cite{Buti_Data}, provide empirical evidence that high DP activity is associated with narrower spreads, but no causality is concluded. In~\cite{Zhu2014}, asymmetric information divides agents into informed and (uninformed) liquidity traders. When a CN complements an existing DM, the spread widens because the liquidity traders move to the CN, whereas the informed ones, who tend to be on one side of the market, prefer the DM. In our setting, agent heterogeneity corresponds to different endowments or preferences, but there is no distinction at the level of access to information. Hence, the spread originates due to the adverse--selection problem faced by the dealer.
The remainder of this article is structured as follows. Our model and main results are presented in Section \ref{sec:Model}. Existence of a solution to the dealer's optimization problem is established in Section \ref{sec:ExistenceSol}. Section \ref{sec:ImpactSpread} studies the impact of a CN on the spread. Section \ref{sec:ExistenceEqui} establishes our result regarding the existence of equilibrium price schedules. A specific application to a portfolio--liquidation problem with dark--pool trading is analyzed in Section \ref{sec:DPtrading} and Section~\ref{sec:Conclusions} concludes.
\section{\large{Model and main results}}\label{sec:Model}
\noindent We consider a quote--driven market for an asset, in which a risk--neutral \textit{dealer} engages a group of privately--informed \textit{traders}\footnote{Our dealer is called the {\sc principal} in the contract--theory jargon and the traders are usually referred to as the {\sc agents}.}. The dealer market (DM for short) is described by a pricing schedule $T:\mathbb{R}\to\mathbb{R}.$ In other words, $q$ units of the asset are offered to be traded, on a take--it--or--leave--it basis, for the amount $T(q)$. For $q\in\mathbb{R},$ we refer to the pair $\big(q, T(q)\big)$ as a \textit{contract}. We assume that $T(0)=0$ and that $T$ is absolutely continuous. Thus, we may write \begin{equation*} T(q) = \int_0^q t(s)ds,\quad q\geq 0, \end{equation*} and analogously for negative values of $q.$ Here $t(s)$ is the marginal price at which the $s$--th unit is traded. As we shall see below, pricing schedules are, in general, not differentiable at zero. Hence, for a particular schedule $T$ the \textit{spread} is \begin{equation*}
\mathcal{S}(T) := |T'(0_+) - T'(0_-)|=|t(0_+) - t(0_-)|, \end{equation*} where $t(0_-)$ and $t(0_+)$ are the \textit{best--bid} and \textit{best--ask} prices, respectively. We denote by $C:\mathbb{R}\to\mathbb{R}$ the dealer's inventory or risk costs associated with a position $q$, e.g. the impact costs of unwinding a portfolio of size $q$ in a limit order book. We assume that the mapping $q\mapsto C(q)$ is strictly convex, coercive and that it satisfies $C(0) = 0.$
The traders' idiosyncratic characteristics are represented by the index $\theta$ that runs over a closed interval $\Theta:=[\underline{\theta}, \overline{\theta}],$ called the set of \textit{types}. We assume that zero belongs to the interior of $\Theta.$ Saying that a trader's type is $\theta$ means that if he trades $q$ shares for $T(q)$ dollars his utility is $u(\theta, q) - T(q),$ where \begin{equation*} u(\theta, q):=\theta \psi_1(q) + \psi_2(q) \end{equation*} and $\psi_1,\psi_2:\mathbb{R}\to\mathbb{R}$ are smooth functions that satisfy $\psi_1(0)=\psi_2(0)=0,$ $\psi_1$ is strictly increasing and $C(q)-\Psi_2(q)\geq 0$ holds for all $q\in\mathbb{R}$. Thus far, with our choice of preferences the traders enjoy a type--independent \textit{reservation utility} of zero, should they decide to abstain from trading in the DM. Such an action is commonly referred to agents choosing their \textit{outside option}. As $C(0) = 0$, providing $\big(0, T(0)\big)$ is costless to the dealer and, since $\big(0, T(0)\big)$ yields all agents their reservation utility, in the absence of any other trading opportunity, we may equate the contract $(0,0)$ to the traders' outside option.
Besides participating in the DM, each trader has the possibility to submit an order to a \textit{crossing network} (CN). The latter is an alternative trading venue where trades take place at fixed bid/ask prices $\pi:=\big(\pi_-, \pi_+\big)$, but where execution might not be guaranteed.\footnote{In other words, the crossing network presents agents with possibly better prices at the cost of an uncertain execution. CN trading often benefits agents who intend to unwind large positions, which might result in a price impact.} The possibility of trading in the crossing network modifies the traders' outside option to the extent that now they may choose between abstaining from all trading and earning zero or participating in the CN if the corresponding expected utility is non--negative. For a specific $\pi,$ the quantity $u_0(\theta; \pi)\geq 0$ represents the expected utility of the $\theta$--type investor who decides to take his (now extended) outside option. In the sequel we indulge in a slight abuse of the language and also refer to $u_0(\cdot; \pi)$ as the agents' outside option(s). Following \cite{DDH,HM} we focus on the case where a trader chooses exclusively between his outside option and trading in the DM, i.e. we do not allow for simultaneous participation in the DM and the CN. Initially we take $\pi$ as given, but later we analyze the case where it is endogenously determined through the interaction between the DM and the CN via the feedback of the spread in the former into the pricing in the latter. We work under the following assumption:\footnote{Once an assumption has been made, we consider it to be standing for the remainder of the paper.}
\begin{Assumption}\label{ass:cost of access} There is a fixed cost $\kappa>0$ of accessing the outside option such that, for all $\pi\in\mathbb{R}^2,$ the function $u_0(\cdot; \pi)$ can be written as $ u_0(\cdot; \pi) =\max\big\{\widetilde{u}_0(\cdot; \pi) - \kappa, 0\big\}, $ where $\widetilde{u}_0(0; \pi) = 0.$ \end{Assumption}
Trading over the DM is anonymous; the dealer is unable to determine a trader's type before he engages the latter. The only ex--ante information the dealer has is the distribution of the individual types over $\Theta,$ which is described by a density $f:\Theta\to\mathbb{R}_+.$ In the sequel we specify the traders' and the dealer's optimization problems and analyze the impact of the CN on the DM, especially on its spread.
\subsection{\large{The traders' problem}}\label{ssec:Agents}
Until further notice we consider $\pi$ to be fixed. The problem of a trader of type $\theta$ is to determine, for a given pricing schedule $T,$ \begin{equation*} q_m(\theta) := \text{argmax}\Big\{u(\theta, q) - T(q)\Big\} \end{equation*} and then choose, for $q_m\in q_m(\theta),$ between his \textit{indirect--utility} $v(\theta):=u\big(\theta, q_m\big) - T\big(q_m\big)$ from trading in the DM and his outside option $u_0(\theta; \pi).$ As the supremum of affine functions, the indirect utility function is convex.
The choice of a pricing schedule $T$ induces a partition of the type space. We say that a trader of type $\theta$ \textit{participates} in the DM if $ v(\theta)\geq u_0(\theta; \pi), $ assuming that ties are broken in the dealer's favor. Conversely, we say that a trader of type $\theta$ \textit{is excluded} from trading in the DM if $ v(\theta) < u_0(\theta; \pi). $ For a given schedule $T,$ we denote the set of excluded types by $\Theta_e(T;\pi).$ Observe that, in the absence of a CN, there is no loss of generality in assuming that all traders participate. We say that a trader of type $\theta$ is \textit{fully serviced} if he earns strictly positive profits from interacting with the dealer.
\subsection{\large{The dealer's problem}}\label{ssec:BenchmarkProblemMM}
The \textit{Revelation Principle} (see, e.g. Meyerson~\cite{Meyerson:91}) says that, when studying Nash--equilibrium outcomes in adverse-selection games such as ours, there is no loss of generality in focusing on direct--revelation mechanisms, i.e. those mechanisms where the set of types indexes the contracts. Furthermore, from the \textit{Taxation Principle} (see, e.g. Rochet~\cite{R:85}) there is also no loss of generality in writing $\tau(\theta)$ instead of $T(q(\theta)),$ where $\tau:\Theta\to\mathbb{R}$ is an absolutely continuous function. From this point on we shall, therefore, study our principal--agent game through books of the form $\big\{\big(q(\theta), \tau(\theta)\big),\theta\in\Theta\big\}$ and drop $T$ from the specification of the indirect--utility functions. We also write $\Theta_e(q,\tau;\pi)$ instead of $\Theta_e(T;\pi)$ for the set of excluded types.
At the onset, a trader of type $\theta$ could misrepresent his type by choosing a contract $\big(q(\widetilde{\theta}), \tau(\widetilde{\theta})\big),$ with $\widetilde{\theta}\neq \theta.$ The dealer strives to avoid this situation, since he wants to exploit the information contained in the density of types. This requires that he offers \textit{incentive--compatible} books, i.e. those that satisfy \begin{equation*} \max_{\widetilde{\theta}\in\Theta}\big\{u\big(\theta, q(\widetilde{\theta})\big) - \tau(\widetilde{\theta})\big\} = u\big(\theta, q(\theta)\big) - \tau(\theta). \end{equation*} In the presence of an incentive--compatible book, the contract that yields a trader of type $\theta$ his indirect utility is precisely the one the dealer has designed for him.
Since the dealer is risk neutral, his goal is to maximize his expected income from engaging the traders. Taking into account the impact of the CN on the traders' optimal actions, his problem is to devise $(q^*, \tau^*)$ so as to solve the problem \begin{equation*} \begin{array}{cc}
\mathcal{P}(\pi) := & \left\{
\begin{array}{l}
\sup_{(q, \tau)} \int_{\Theta_e^c(q, \tau;\pi)}\Big(\tau(\theta) - C\big(q(\theta)\big)\Big)f(\theta)d\theta\\
\text{s.t. }\\
(q(\theta), \tau(\theta))\in\text{argmax}_{\widetilde{\theta}\in\Theta}\big\{u\big(\theta, q(\widetilde{\theta})\big)-\tau(\widetilde{\theta})\big\},\\
\tau \text{ is absolutely continuous}.
\end{array}
\right. \end{array} \end{equation*}
Due to the \textit{Envelope Theorem}, if a contract $\big\{(q(\theta), \tau(\theta)\big),\theta\in\Theta\big\}$ is incentive compatible, then $\psi_1(q(\theta))$ belongs to the subdifferential $\partial v(\theta)$. Since for almost all $\theta\in\Theta$ it holds that $\partial v(\theta)=v'(\theta)$ and $\psi_1$ is strictly increasing, we have for almost all $\theta \in \Theta$ that \begin{equation}\label{eq:QualGrad} q(\theta) = \psi_1^{-1}\big(v'(\theta)\big). \end{equation} Therefore, starting from a convex indirect--utility function we can recover, for almost all types, the quantities in the incentive--compatible book that generated it. Furthermore, the indirect utility function may be written as \begin{equation}\label{eq:IndUt} \begin{split} v(\theta) & = \theta \psi_1\big(\psi_1^{-1}\big(v'(\theta)\big)\big) + \psi_2\big(\psi_1^{-1}\big(v'(\theta)\big)\big) -\tau(\theta) \\ & = \theta\,v'(\theta) + \psi\big(v'(\theta)\big) - \tau(\theta), \end{split} \end{equation} where $\psi:=\psi_2\circ \psi_1^{-1}.$ It follows from Eqs.~\eqref{eq:QualGrad} and~\eqref{eq:IndUt} that the traders' indirect utility function contains all the information about the quantities and the pricing schedule, which allows us to write $\Theta_e^c(v;\pi)$ instead of $\Theta_e^c(q, \tau;\pi).$ In particular, introducing the functions \begin{equation*}
\widetilde{K}(q):=C\big(\psi_1^{-1}(q)\big) - \psi_2\big(\psi_1^{-1}(q)\big) \quad\text{and}\quad i(\theta, v, q):=\theta\cdot q - v - \widetilde{K}(q) \end{equation*} and denoting by $\mathcal{C}$ the cone of all real--valued convex functions over $\Theta$, we can restate the dealer's problem as \begin{equation*}
\mathcal{P}(\pi) =\sup_{v\in\mathcal{C}} \int_{\Theta_e^c(v;\pi)} i\big(\theta,v(\theta),v'(\theta)\big)f(\theta) d\theta. \end{equation*} We prove in Theorem~\ref{thm:Main1} below that, under suitable assumptions, Problem $\mathcal{P}(\pi)$ admits a solution. The latter is, in fact, quasi--unique in the sense that on the set of participating types the solution is indeed unique. However, agents are excluded by offering their types any incentive--compatible, indirect--utility function that lies below $u_0.$ In other words, there is no uniqueness on the set of excluded types. From the agents' point of view there is no ambiguity: they either trade with the specialist or they take their outside option. The non--uniqueness is also a non--issue for the specialist, since it it only appears in subdomains of the type space that he does not access. With this in mind, in the sequel we denote by $v(\cdot;\pi)$ ``the'' solution to Problem $\mathcal{P}(\pi)$.
\begin{Assumption}\label{ass:qc} The functions $\psi_1, \psi_2$ and $C$ are such that $\widetilde{K}$ is strictly convex, coercive, continuously differentiable and it satisfies $\widetilde{K}'(0)=0.$ \end{Assumption}
Determining the set of types who do participate but who earn zero profits is essential to our analysis, since it is precisely at the \textit{boundary types} where $t(0_-)$ and $t(0_+)$ are determined. We prove in Lemma~\ref{lemma:tradingSB} that, by virtue of Assumption~\ref{ass:cost of access}, these limits are always well defined. For any $v\in\mathcal{C}$, we shall refer to \begin{equation*}
\Theta_0(v):=\big\{\theta\in\Theta\,|\,v(\theta)=0\big\} \end{equation*} as the set of \textit{reserved traders}. Whenever we refer to the reserved set corresponding to the solution $v(\cdot;\pi)$ to $\mathcal{P}(\pi)$ we write $\Theta_0(\pi).$ We prove in Proposition~\ref{lm:ZeroatZero} that there is no loss of generality in assuming that any feasible $v\in\mathcal{C}$ satisfies $v(0)=0;$ thus, $\Theta_0(v)\neq\emptyset.$
\begin{remark}\label{rmk:wellpossed} A well defined spread requires $\Theta_0(\pi)$ to be a proper interval $[\underline{\theta}_0(\pi), \overline{\theta}_0(\pi)],$ which will follow from Assumption~\ref{ass:cost of access}, and that there exists $\epsilon>0$ such that $(\underline{\theta}_0(\pi)-\epsilon, \underline{\theta}_0(\pi))$ and $(\overline{\theta}_0(\pi), \overline{\theta}_0(\pi)+\epsilon)$ belong to the set of fully--serviced traders. The existence of such an $\epsilon$ is proved in Lemma~\ref{lemma:tradingSB}. Economically, this conditions means that the CN is not beneficial for low--type traders. We shall encounter several instances where the proofs of our results concern conditions on points to the left of $\underline{\theta}_0(\pi)$ or to the right of $\overline{\theta}_0(\pi)$ that are analogous. So as to streamline the said proofs, whenever we find ourselves in one of these ``either--or'' situations, we deal only with the positive case. \end{remark}
\noindent We are now ready to state the first main result of this paper, whose proof is given in Section~\ref{sec:ExistenceSol} below.
\begin{theorem} \label{thm:Main1} Problem $\mathcal{P}(\pi)$ admits a solution, which is unique on the set of participating types. \end{theorem}
Our second main result concerns the effect of the CN on the spread and the set of participating traders if, disregarding negative expected unwinding costs, the dealer can match the CN.
\begin{Assumption}\label{ass:matching} There exists an incentive compatible book $\big\{(q_c(\theta), \tau_c(\theta) \big), \theta\in\Theta\big\}$ such that for almost all $\theta\in\Theta$ it holds that $ u\big(\theta, q_c(\theta) \big) - \tau_c(\theta)= u_0(\theta; \pi). $ \end{Assumption}
Assumption~\ref{ass:matching} implies that $u_0(\cdot; \pi)$ is also a convex function. The case where $u_0(\cdot; \pi)$ is concave is somewhat simpler, since it boils down to exclusion without matching.
\noindent The following theorem analyzes the impact of the CN on the DM and the traders' welfare.
\begin{theorem} \label{thm:Main2} For a given price $\pi=(\pi_-, \pi_+)$ let $\mathcal{S}_m$ and $\mathcal{S}_o$ be the spreads with and without the presence of the crossing network and $v_o$ and $v(\cdot;\pi)$ the corresponding indirect--utility functions, respectively. In the presence of the crossing network \begin{enumerate}
\item less types are reserved, i.e. $\Theta_0(v_o)\supseteq \Theta_0(\pi).$ Furthermore, the inclusion is strict if there exists $\theta\in\Theta$ such that $u_0(\theta;\pi)>v_o(\theta);$
\item if the types are uniformly distributed ($f\equiv(\overline{\theta}-\underline{\theta})^{-1}$) the spread narrows, i.e. $\mathcal{S}_o \geq \mathcal{S}_m;$
\item the typewise welfare increases, i.e. $v_o(\theta)\leq v(\theta;\pi)$ for all $\theta\in\Theta.$
\end{enumerate} \end{theorem}
In the sequel we use the subindexes $``m"$ and $``o"$ to distinguish structures or quantities with and without a CN, respectively.
\subsection{\large{Equilibrium}}\label{ssec:Equilibrium}
It is natural to assume that pricing in the DM has an impact on the pricing schedule $\pi.$ For example, the CN could be a \textit{dark pool}, where trading takes place at the best--bid and best--ask prices of the primary market. We analyze such an example, within a portfolio--liquidation framework, in Section~\ref{sec:DPtrading}. The pecuniary interaction between the DM and the CN, however, is not unidirectional if the dealer anticipates the effect that his choice of book structure has on the CN. Our main focus is the impact of the CN on the spread in the DM. Specifically, if we denote by $t(0;\pi):=\big(t(0_-;\pi), t(0_+;\pi)\big)$ the best bid--ask prices in the DM for a given CN price schedule $\pi,$ then we call $\pi^*$ an \textit{equilibrium price} if $\pi^* = t(0;\pi^*). $
We make the following natural assumption on the impact of $\pi$ on the traders' outside option.
\begin{Assumption}\label{ass:monotone} Let $\pi_1\le \pi_2,$ where ``$\leq$'' is the lexicographic order in $\mathbb{R}^2,$ then for all $\theta\in\Theta$ it holds that $ u_0(\theta;\pi_1)\geq u_0(\theta;\pi_2). $ Furthermore, we assume that there exists $\big(\underline{\pi}_-, \overline{\pi}_+\big)\in\mathbb{R}^2$ such that $u_0(\cdot;\pi)\leq 0$ for all $(\pi_-,\pi_+)$ such that ${\pi}_-\leq\underline{\pi}_-$ and $\overline{\pi}_+\leq{\pi}_+.$ \end{Assumption}
\noindent The following is our main result on the existence of an equilibrium price.
\begin{theorem} \label{thm:Main3} If types are uniformly distributed, then the mapping $\pi\mapsto t(0; \pi)$ has a fixed point. \end{theorem}
Summarizing, we have that the dealer can correctly anticipate the movements in prices in the CN when he designs the optimal pricing schedule for the DM. Furthermore, the presence of the CN is beneficial in terms of liquidity, market participation and the traders' welfare.
\begin{remark} The uniformity of the distribution of types in Theorems~\ref{thm:Main2} and~\ref{thm:Main3} can be relaxed, which is something we postpone to the corresponding proofs, where the required notation is introduced. \end{remark}
\section{\large{Existence of a solution to Problem $\mathcal{P}(\pi)$}}\label{sec:ExistenceSol}
In this section we prove the existence of a solution to the dealer's problem in the presence of a CN. Even though, strictly speaking, this result is a particular case of Theorem 4.4 in~\cite{Page}, for the reader's convenience we present a proof in our simpler setting. Some of the arguments are somewhat standard, but we include them for completeness. The first important result that we require is that the dealer's optimal choices will lead to him never losing money on types that participate.
\begin{proposition}\label{prop:PosProf} If $(q^*, \tau^*):\Theta\to\mathbb{R}^2$ is an optimal allocation, then for all participating types it holds that $\tau^*(\theta) - C\big(q^*(\theta)\big)\geq 0.$ \end{proposition}
\noindent\begin{Proof} Assume the contrary, i.e. that the set \begin{equation*}
\widetilde{\Theta}:=\big\{\theta\,|\, v(\theta;\pi)\ge u_0(\theta;\pi), \tau^*(\theta)<C\big(q^*(\theta)\big)\big\}, \end{equation*} where $v(\theta;\pi)=u\big(\theta, q^*(\theta)\big) - \tau^*(\theta)$ has positive measure. Define a new pricing schedule via \begin{equation*} \widetilde{\tau}(\theta):=\max\big\{\tau^*(\theta), C\big(q^*(\theta)\big)\big\}. \end{equation*} The incentives for types in $\widetilde{\Theta}^c$ do not change, since their prices remain unchanged, whereas prices for others have increased. Profits corresponding to trading with types in $\widetilde{\Theta}$ increase to zero. As a consequence the dealer's welfare strictly increases, which violates the optimality of $(q^*, \tau^*).$ \end{Proof}
A consequence of Proposition~\ref{prop:PosProf} is that, together with Assumption~\ref{ass:qc}, it allows us to restrict the feasible set of the dealer's problem to a compact one. We prove this in several steps,
\begin{lemma}\label{lm:ZeroatZero} If $v:\Theta\to\mathbb{R}$ is a non--negative, convex function that solves $\mathcal{P},$ then $v(0)=0.$ \end{lemma}
\noindent\begin{Proof} Assume that $v\in\mathcal{C}$ solves $\mathcal{P}$ and $v(0)>0.$ This implies that $\psi_2\big(q(0)\big) - \tau(0) \geq 0.$ Since, from Assumption~\ref{ass:cost of access}, a trader of type $\theta=0$ has no access to a profitable outside option, then he participates. From Proposition~\ref{prop:PosProf} it must then hold that $\tau(0)\geq C\big(q(0)\big)$ which in turn implies that $\psi_2\big(q(0)\big)\geq C\big(q(0)\big).$ This relation, however, can only hold for $q(0)=0,$ which implies that $\tau(0)=v(0)=0.$ \end{Proof}
\begin{lemma}\label{lm:BoundedQ}
There exists $\overline{q}\geq 0$ such that if $v$ is feasible, then $|\partial v|\leq\overline{q}.$ \end{lemma}
\noindent\begin{Proof} From Assumption~\ref{ass:qc} and the compactness of $\Theta$ we have that the mapping $ q\mapsto i(\theta, v, q) $
tends to $-\infty$ as $|q|\to\infty$ uniformly on $\Theta$ for $v\geq 0.$ From Proposition~\ref{prop:PosProf} $i\big(\theta, v(\theta), v'(\theta)\big)$ must be non--negative for all participating types, which concludes the proof. \end{Proof}
From Lemmas~\ref{lm:ZeroatZero} and~\ref{lm:BoundedQ} we have that the quantity $\max_{\theta\in\Theta}\big\{u_0(\theta;\pi)\big\} + \overline{q}\|\Theta\|$ is an upper bound for any feasible choice of $v,$ which yields the following
\begin{corollary}\label{cor:UnifBound} The feasible set $\mathcal{A}\subset\mathcal{C}$ of Problem $\mathcal{P}$ is uniformly bounded and uniformly equicontinuous. \end{corollary}
\noindent\begin{Proof} A uniform bound is $\max_{\theta\in\Theta}\big\{u_0(\theta;\pi)\big\} + \overline{q}\|\Theta\|.$ Lemma~\ref{lm:BoundedQ} guarantees that for any $v\in\mathcal{A}$ it holds that $|\partial v|\leq\overline{q}.$ In other words, $\mathcal{A}$ is composed of convex functions whose subdifferentials are uniformly bounded, hence $\mathcal{A}$ is uniformly equicontinuous. \end{Proof}
Notice that, when it comes to determining quantities and prices for trader types who do participate, Proposition~\ref{prop:PosProf} results in the dealer having to solve the problem \begin{equation*} \begin{array}{cc}
\widetilde{\mathcal{P}}(\pi):= & \left\{
\begin{array}{ll}
\sup_{v\in\mathcal{A}} \int_{\Theta}\big(i\big(\theta, v(\theta), v'(\theta)\big)\big)_+ f(\theta)d\theta\\
\text{s.t. } v(\theta)\geq u_0(\theta;\pi)\text{ for all } \theta\in\Theta.
\end{array}
\right. \end{array} \end{equation*} The last auxiliary result that we need is the following proposition, whose proof is a direct consequence of Fatou's Lemma, together with Lemmas~\ref{lm:ZeroatZero} and~\ref{lm:BoundedQ}.
\begin{proposition}\label{prop:USC} The mapping $v\mapsto\int_{\Theta}\big(i(\theta, v(\theta), v'(\theta))\big)_+ f(\theta)d\theta$ is upper semi--continuous in $\mathcal{A}$ with respect to uniform convergence. \end{proposition}
\noindent We are now ready to prove our first main result:
\noindent\textbf{Proof of Theorem \ref{thm:Main1}:} Assume that $\mathcal{A}\bigcap\big\{v\in\mathcal{C} | v(\cdot)\geq u_0(\cdot ; \pi)\big\}$ is non--empty and consider a maximizing sequence $\big\{\widetilde{v}_n\big\}_{n\in\mathbb{N}}$ of Problem $\widetilde{\mathcal{P}}(\pi).$ From Corollary~\ref{cor:UnifBound} we have that, passing to a subsequence if necessary, there exists $\widetilde{v}\in\mathcal{A}$ such that $\widetilde{v}_n\to\widetilde{v}$ uniformly. A direct application of Proposition~\ref{prop:USC} yields that $\widetilde{v}$ is a solution to $\widetilde{\mathcal{P}}(\pi).$ To finalize the proof we must construct from $\widetilde{v}$ a solution to Problem $\mathcal{P}(\pi).$ To this end, let us define the sets \begin{equation*}
\Theta_-:=\big\{\theta\in\Theta | i\big(\theta, \widetilde{v}(\theta), \widetilde{v}'(\theta)\big)<0\big\}\quad\text{and}\quad \Theta_+:=\Theta_-^c. \end{equation*} It is well known that if a sequence of convex functions converges uniformly (to a convex function), then there is also uniform convergence of the derivatives wherever they exist, which is almost everywhere. This fact, together with the continuity of the mappings $\theta\mapsto\widetilde{v}(\theta)$ and $(\theta,v, q)\mapsto i(\theta,v, q),$ implies that $\Theta_-$ is the union of a disjoint set of open intervals: \begin{equation*} \Theta_-=\bigcup_{i=1}^{\infty}(a_i, b_i). \end{equation*} Define, for each $i\geq 1,$ \begin{equation*}
\widetilde{v}_{a,i}:=\inf\big\{q | q\in\partial \widetilde{v}(a_i)\big\}\quad\text{and}\quad \widetilde{v}_{b,i}:=\sup\big\{q | q\in\partial \widetilde{v}(b_i)\big\} \end{equation*} and consider the support lines to $\text{graph}\{\widetilde{v}\}$ at $a_i$ and $b_i$ given by \begin{equation*} l_i(\theta)=\widetilde{v}(a_i)+\widetilde{v}_{a,i}(\theta-a_i)\quad\text{and}\quad L_i(\theta)=\widetilde{v}(b_i)+\widetilde{v}_{b,i}(\theta-b_i), \end{equation*} respectively. Let $c_i\in(a_i, b_i)$ be, for each $i\geq 1,$ the unique solution to the equation $l_i(\theta)=L_i(\theta)$ and define on $(a_i, b_i)=:\Theta_i$ \begin{equation*} \begin{array}{cc}
v_i^*(\theta):= & \left\{
\begin{array}{ll}
l_i(\theta) & \theta\leq c_i;\\
L_i(\theta) & \theta> c_i.
\end{array}
\right. \end{array} \end{equation*} Finally define \begin{equation*} \begin{array}{cc}
v^*(\theta):= & \left\{
\begin{array}{ll}
\widetilde{v}(\theta) & \theta\in\Theta_+;\\
v_i^*(\theta) & \theta\in\Theta_i, i\in\mathbb{N},
\end{array}
\right. \end{array} \end{equation*} then $v^*$ is a solution to Problem $\mathcal{P}(\pi)$ and $\Theta_e(v^*)=\Theta_-,$ which concludes the proof.
\begin{remark}\label{rem:QuasiUniqueness} If the specialist can profitably match all agents' outside option, then the quasi--uniqueness of a solution to Problem $\mathcal{P}(\pi)$ is in fact uniqueness and it follows directly from Assumption~\ref{ass:qc}. Indeed, in such a case \begin{equation*} \big(i(\theta, v(\theta), v'(\theta))\big)_+=\big(i(\theta, v(\theta), v'(\theta))\big) \end{equation*}
nd problem $\widetilde{\mathcal{P}}(\pi)$ is one of maximizing a strictly concave, coercive functional over a convex set that is closed with respect to uniform convergence. In the general case, we construct the quasi--unique solution in Section~\ref{sec:ModelCN}. Assumption~\ref{ass:qc} remains crucial, since it guarantees that the maximization problems through which we define the optimal quantities have unique maximizers. \end{remark}
\section{\large{The impact of a crossing network}}\label{sec:ImpactSpread}
{In this section we look at the impact that a CN has on the spread, on participation and on the traders' welfare. In order to do so, we provide a characterization of the solution to Problem $\mathcal{P}(\pi).$ It should be noted that, given the restriction of candidate solutions to $\mathcal{C},$ we cannot simply make use of the Euler--Lagrange equations to solve the variational problem, since the said equations are only satisfied when the constraints do not bind.}
\subsection{\large{A benchmark without a CN}}\label{sec:ModelBench}
We first analyze the benchmark case where the traders do not have access to a CN. The corresponding dealer's problem is denoted by $\mathcal{P}_o.$ Recall that all trader types have a zero reservation utility, which the dealer is able to match costlessly by offering the contract $(0, 0).$ The point of making this normalization is to simplify the constraints in the dealer's optimization problem. This will not be possible in the presence of a CN since, even if the dealer were able to match the utility that investors enjoy if they trade in the CN, this would be in general not costless.
We take a Lagrange--multiplier approach to provide a characterization of the solution to Problem $\mathcal{P}_o.$ To this end, let us introduce the following definition: \begin{equation*} I[v] := \int_{\Theta}i\big(\theta, v(\theta), v'(\theta)\big)f(\theta)d\theta. \end{equation*} Let $BV_+(\Theta)$ be the space of non--negative functions of bounded variation $\gamma:\Theta\to\mathbb{R}_+,$ which we place in duality with $C(\Theta, \mathbb{R}),$ the space of real--valued, continuous functions on $\Theta,$ via the standard pairing \begin{equation*} \langle v, \gamma\rangle :=\int_{\Theta} v(\theta)d\gamma(\theta) \end{equation*} for $v\in C(\Theta, \mathbb{R}),$ where $d\gamma$ is the distributional derivative of $\gamma.$ Furthermore, it follows from Pontryagin's Maximum Principle and the fact that $f$ is a probability density function that there is no loss of generality in assuming that $\gamma$ is absolutely continuous and that $\gamma(\overline{\theta}) = 1.$ The Lagrangian for the dealer's problem is \begin{equation*}\label{eq:Lagrangian} \mathcal{L}(v, \gamma) := I[v]+\langle v, \gamma\rangle,\quad v\in\mathcal{C}, \end{equation*} with corresponding Karush--Kuhn--Tucker conditions \begin{equation}\label{eq:KT} \langle v, \gamma\rangle = 0\quad\text{and}\quad d\gamma(\theta)=0\Rightarrow v(\theta)>0. \end{equation} The next result is the formalization of the \textit{vox populi} saying that ``quality does not jump''. Regularity properties of the solutions to variational problems subject to convexity constraints were studied by Carlier and Lachand--Robert in~\cite{CarLach}, and their methodology can be directly adapted to prove the following result.
\begin{proposition}\label{prop:NoJumps1} If $v\in\mathcal{C}$ is a stationary point of $\mathcal{L}(v, \gamma),$ then $v\in C^1(\Theta).$ \end{proposition}
The fact that, at the optimum, the mapping $\theta\mapsto v'(\theta)$ is continuous, implies that $q$ is also a continuous function of the types. This will prove to be extremely useful, specially in the presence of a crossing network. If we integrate by parts, then $\mathcal{L}(v, \gamma)$ can be transformed into \begin{equation*} \Sigma(q, \gamma):=\int_{\Theta}\bigg(\Big(\theta+\frac{F(\theta) -\gamma(\theta)}{f(\theta)}\Big)\psi_1\big(q(\theta)\big) - \widetilde{C}\big(q(\theta)\big)\bigg)f(\theta)d\theta, \end{equation*} where $q(\theta) =\psi_1^{-1}\big(v'(\theta)\big),$ as described above, and $\widetilde{C}(q):=C(q)-\Psi_2(q).$ The idea now is to maximize the mapping \begin{equation*} q \mapsto \sigma(\theta, q, \Gamma) := \Big(\theta +\frac{F(\theta) -\Gamma}{f(\theta)}\Big)\psi_1(q) - \widetilde{C}\big(q\big) \end{equation*} pointwise, for a given fixed $\Gamma$ (in the sequel we use $\Gamma$ whenever we are dealing with an arbitrary but fixed value of $\gamma$). From Assumption~\ref{ass:qc} it follows that we can write down the unique maximizer as \begin{equation*} l(\theta, \Gamma):=K^{-1}\Big(\frac{F(\theta) + \theta\,f(\theta) -\Gamma}{f(\theta)}\Big), \end{equation*} where $K(q):=\widetilde{C}'(q)/\Psi_1'(q).$ For each $\theta\in\Theta$ and $\Gamma\in [0, 1],$ the quantity $l(\theta, \Gamma)$ is a candidate for the optimal $q(\theta)$ and convexity (or incentive compatibility) is verified if the mapping $\theta\mapsto l(\theta, \Gamma)$ is increasing. The crux is then to determine the Lagrange multiplier $\gamma.$ In the sequel we denote $\Theta_o:=\Theta_0(v_o^*),$ where $v_o^*$ solves Problem $\mathcal{P}_o.$ In other words, if $\theta\in\Theta_o$, then $q(\theta)=T(\theta)=v(\theta)=0.$
From Lemma~\ref{lm:ZeroatZero} we have that, unless $v(\underline{\theta})=0,$ the quantity $q(\underline{\theta})<0$ and the complementary--slackness condition imply that $\gamma(\theta)=0$ for $\theta\in[\underline{\theta}, \widetilde{\theta})$ for some $\widetilde{\theta}>\underline{\theta}.$ The left endpoint $\underline{\theta}_0$ of $\Theta_o$ is then determined by solving the equation \begin{equation*} K^{-1}\Big(\theta + \frac{F(\theta)}{f(\theta)}\Big)=0. \end{equation*} Furthermore, since $v$ must be convex, once $v(\hat{\theta})>0$ then $v(\theta)>0$ for all $\theta>\hat{\theta}.$ This implies that the right endpoint $\overline{\theta}_0$ of $\Theta_o$ is determined by solving the equation \begin{equation*} K^{-1}\Big(\theta-\frac{1 - F(\theta)}{f(\theta)}\Big)=0. \end{equation*} The quantities $F(\theta)/f(\theta)$ and $(1 - F(\theta))/f(\theta)$ are know as the \textit{hazard rates}, and sufficient conditions for the mapping $\theta\mapsto l(\theta, \Gamma)$ to be non--decreasing are \begin{equation*} \frac{d}{d\theta}\left(\frac{F(\theta)}{f(\theta)}\right)\geq 0 \geq \frac{d}{d\theta}\Big(\frac{1 - F(\theta)}{f(\theta)}\Big), \end{equation*} see, e.g. Biais et al.~\cite{BMR} for a discussion on this condition.
Let us assume that we have determined $\Theta_o$. What remains is then to connect the participation constraint with the spread. Differentiating Eq.~\eqref{eq:IndUt} and noting that $v'(\theta)=\psi_1(q(\theta))$ we have that \begin{equation*} \tau'(\theta) = q'(\theta)\big(\theta\psi_1'(q(\theta)) + \psi_2'(q(\theta))\big). \end{equation*} Observe that $\tau'(\underline{\theta}_0)$ and $\tau'(\overline{\theta}_0)$ are in fact $T'(0_-)$ and $T'(0_+),$ since by construction $q(\underline{\theta}_0) = q(\overline{\theta}_0)=0.$ If we define $\phi_1:= \psi_1'(0)$ and $\phi_2:=\psi_2'(0)$, then we have that the spread is given by the expressions \begin{equation}\label{eq:Spread} t(0_-) = q'(\underline{\theta}_{0}-)\big(\underline{\theta}_0\phi_1 + \phi_2\big)\quad\text{and}\quad t(0_+) = q'(\overline{\theta}_{0}+)\big(\overline{\theta}_0\phi_1 + \phi_2\big). \end{equation} Our objective in Section~\ref{sec:ModelCN} is to compare the values above to those obtained in the presence of a crossing network.
Before we proceed we present two examples so as to illustrate the use of the methodology described hitherto. The first revisits Mussa \& Rosen \cite{MR:78}. The second is slightly more advanced. We shall use it below to illustrate the complex structure of equilibrium pricing schedules and utilities in the presence of CNs.
\begin{example} \label{MussaRosen} Let us assume that $\Theta = [-r,r]$ for some $r>0$, that types are uniformly distributed and that \begin{equation*}
u(\theta,q) = \theta q. \end{equation*} We also set $C(q)=0.5\,q^2.$ By direct computation we find that $\underline{\theta}_0 = -\frac{r}{2}$ and $\overline{\theta}_0 = \frac{r}{2}.$ Since a trader of type $\theta \in \Theta_o$ is brought down to reservation utility and hence trades $q(\theta)=0,$ the expression \begin{equation*} q(\theta) = \theta + \frac{F(\theta) - \gamma(\theta)}{f(\theta)}=2\theta+r-2r\gamma(\theta) \end{equation*} implies that the Lagrange multiplier is \begin{equation*}
\gamma(\theta) = \left\{ \begin{array}{ll} 0 & \theta < \underline{\theta}_0 \\
\frac{1}{2} + \frac{\theta}{r} & \theta \in \Theta_o \\
1 & \theta > \overline{\theta}_0
\end{array} \right. . \end{equation*} In particular, $q'(\underline{\theta}_{0}-) = q'(\overline{\theta}_{0}+) = 2$ and hence $t(0_-) = -r$ and $t(0_+) = r$. Thus, the spread increases linearly in the highest/lowest type. \end{example}
\begin{example} \label{ex1} Let us assume that the distribution of types over $\Theta=[-1,1]$ is given by $f(\theta)=(2\theta+3)/4$ for $\theta \in [-1,0)$ and $f(\theta)=(3-2\theta)/4$ for $\theta \in [0,1]$; that $C(q)=0.5\,q^2$ and that $u(\theta, q)-\tau = \theta\cdot q + 0.25\,q^2-\tau.$ It is straightforward to show that the conditions on the Hazard rates are satisfied and that \begin{equation*} K^{-1}\Big(\theta+\frac{F(\theta)}{f(\theta)}\Big)={2}\Big[\frac{3\theta^2+6\theta+2}{2\theta+3}\Big] \quad\text{and}\quad K^{-1}\Big(\theta-\frac{1 - F(\theta)}{f(\theta)}\Big)={2}\Big[\frac{3\theta^2-6\theta+2}{2\theta-3}\Big]. \end{equation*} Furthermore, $\Theta_o\approx\big[-0.423, 0.423\big].$ For the spread, we have that $t(0_-)=q'(\underline{\theta}_0)\underline{\theta}_0\approx -1.359$ and $t(0_+)=q'(\overline{\theta}_0)\overline{\theta}_0\approx 1.359.$ In order to obtain $v$ we integrate $q$ (since $\Psi_1(q)=q$) and take into account that $v\equiv 0$ over $\Theta_o.$ We plot graph$\{v_o\}$ in Figure~\ref{fig:Benchmark}, as well as the per--type profits of the dealer.
\begin{figure}
\caption{An example without a crossing network}
\label{fig:IndUt}
\label{fig:SpecProf}
\label{fig:Benchmark}
\end{figure} \end{example}
\subsection{\large{Introducing a crossing network}}\label{sec:ModelCN}
Let us now analyze the dealer's problem when the market participants have access to a CN that yields a trader of type $\theta$ the expected utility $u_0(\theta; \pi).$ In this setting it is no longer without loss of generality to assume that all traders participate in the DM, given that enforcing participation (which can be done thanks to Assumption~\ref{ass:matching}) may result in losses to the dealer. The latter may, as a consequence, choose to abstain from trading with a set of types $\Theta_e(v)$ by offering an incentive--compatible book whose corresponding indirect--utility function lies strictly under $u_0(\theta; \pi)$ for $\theta\in\Theta_e(v)$. The resulting problem for the dealer would be \begin{equation*}
\mathcal{P}(\pi) = \sup_{v\in\mathcal{C}} \int_{\Theta}\Big(\theta\,v'(t) - v(t) - \widetilde{K}\big(v'(\theta)\big)\Big){\mbox{\rm{1}\hspace{-0.09in}\rm{1}\hspace{0.00in}}}_{\{\Theta_e^c(v)\}}(\theta)f(\theta)d\theta. \end{equation*} Dealing with the presence of the zero--one indicator function ${\mbox{\rm{1}\hspace{-0.09in}\rm{1}\hspace{0.00in}}}_{\{\Theta_e^c\}}$ is quite cumbersome (see, e.g. Horst \& Moreno--Bromberg~\cite{HoMo2}), since its domain of definition may change with different book choices. In contrast to the setting studied in~\cite{HoMo2}, however, here the CN is passive. This lack of non--cooperative--games component allows for an alternative way to proceed. To this end, we make use of the following \textit{accounting trick}, which was introduced by Jullien~\cite{BJ:03}: Let us assume that the dealer had access to a fictitious market such that the unwinding costs from trading in it, denoted in the sequel by $C_c,$ satisfy $C_c(q(\theta)) = \tau(\theta)$ for almost all $\theta\in\Theta.$ In this way, we may again assume that the dealer trades with all market participants, but now his costs of unwinding are given by the function $\mathbb{C}:\mathbb{R}\to\mathbb{R}$ defined as \begin{equation*} \mathbb{C}(q):=\min\big\{C(q), C_c(q)\big\},\quad q\in\mathbb{R}. \end{equation*} In terms of incentives, nothing is distorted by introducing the cost function $\mathbb{C},$ but we must identify the points where there is switching from using $C$ to using $C_c$ and vice versa. These switching points will determine the regions of market segmentation.
If we define, for any traded quantity $q,$ the function $\widetilde{\mathbb{C}}(q) := \mathbb{C}\big(q\big) - \psi_2\big(q\big),$ then we may re--use the machinery from Section~\ref{sec:ModelBench} with minor modifications;\footnote{Observe that Assumptions~\ref{ass:cost of access} and~\ref{ass:matching} imply that $\widetilde{\mathbb{C}}$ satisfies Assumption~\ref{ass:qc}.} namely, denoting by $\mathbb{I}$ the energy corresponding to the cost function $\mathbb{C},$ we may write the Lagrangian of the dealer's problem as \begin{equation*}\label{eq:LagrangianCN} \mathbb{L}(v, \gamma) := \mathbb{I}[v]+\langle v - u_0(\cdot; \pi), \gamma\rangle, \end{equation*} with the corresponding complementary--slackness conditions. From here we may proceed as in Section~\ref{sec:ModelBench} to find the quantities that the dealer will choose to offer. Strictly speaking we should find the pointwise maximizer in $q$ of the expression \begin{equation}\label{eq:VirtualSurplus} \Big(\theta +\frac{F(\theta) -\Gamma}{f(\theta)}\Big)\Psi_1(q) - \mathbb{K}(q), \end{equation} where $\mathbb{K}(q):=\widetilde{\mathbb{C}}(q)-\Psi_2(q).$ This may fortunately be avoided, given that whenever $\mathbb{C}(q)=C_c(q)$, the participation constraint binds and $q(\theta)=q_c(\theta).$ Before proceeding to the proof of Theorem~\ref{thm:Main2}, we study the mechanism used by the dealer to choose between excluding types, matching the CN and trading with them while offering strictly positive rents.
Whenever the participation constraint does not bind, the dealer selects the quantity to be chosen via the pointwise maximization of the mapping $q\mapsto \sigma(\theta, q, \Gamma).$ What makes the current problem trickier than the case without a CN is that now we must pay more attention to the evolution of the multiplier $\gamma.$ If we compare $l(\theta, 0)$ and $l(\theta, 1)$ to $q_c(\theta)$ we may pinpoint the set where the participation constraint may bind. Observe that $\big\{l(\theta, 1),\theta\in\Theta\big\}$ and $\big\{l(\theta, 0), \theta\in\Theta\big\}$ are the sets of the lowest and highest quantities the dealer may offer in an individually--rational way. Hence, as long as $l(\theta, 1)\leq q_c(\theta) \leq l(\theta, 0)$ there is the possibility of \textit{profitable matching}.
There might be instances where the participation constraint is binding for some type $\theta\in\Theta$, i.e. $\big(q(\theta), \tau(\theta)\big)=\big(q_c(\theta), \tau_c(\theta)\big),$ and $\tau_c(\theta) - C\big(q_c(\theta)\big)<0.$ In such cases $\mathbb{C}\big(q_c(\theta)\big)=C_c\big(q_c(\theta)\big)$ and $\theta\in\Theta_e(v)$ for the corresponding indirect utility function, and we say there is \textit{exclusion}.
\begin{remark}\label{rem:QuasiUniquenessReprise} It is at this point that the quasi--uniqueness mentioned in Remark~\ref{rem:QuasiUniqueness} can be addressed. The principal's problem $\mathbb{P}(\pi)$ using the cost function $\mathbb{C}$ results in the condition \begin{equation*} \big(i(\theta, v(\theta), v'(\theta))\big)_+=\big(i(\theta, v(\theta), v'(\theta))\big) \end{equation*}
being trivially satisfied. As a consequence, problem $\mathbb{P}(\pi)$ admits a unique solution. The latter coincides, by construction, with the solution to $\mathcal{P}(\pi)$ whenever $\mathbb{C}(q(\theta))=C(q(\theta))$. The caveat is that the solution to problem $\mathbb{P}(\pi)$ is blind towards what is offered to excluded types, since here their outside option is costlessly matched (they are effectively reserved). Constructing incentive compatible contracts for the excluded types is, thanks to the convexity of the indirect utility function, relatively simple. For instance if an interval of types $(\theta_1, \theta_2)$ were excluded (but $\theta_1$ and $\theta_2$ participated) one could consider any two supporting lines to $graph\{v(\cdot;\pi)$ at $(\theta_1, v(\theta_1;\pi))$ and $\theta_2, v(\theta_2;\pi)$. From the resulting indirect--utility function on $(\theta_1, \theta_2)$ one could extract the corresponding quantities and prices. The resulting global convexity of the indirect--utility function offered by the principal would imply that all incentives would remain unchanged. Whether the principal would suffer losses from the contracts offered to types on $(\theta_1, \theta_2)$ would be irrelevant, since the corresponding agents do not participate. \end{remark}
As mentioned above, here it is not necessary to determine $\gamma(\theta)$ in order to do likewise with $q(\theta).$ On the other hand, however, if we interpret $\gamma$ as the shadow cost of satisfying the participation constraint, we may wish to identify the multiplier so as to have a measure of the impact of the CN on the dealer's profits. The following result, which deals with points where there is switching between matching and fully servicing, extends Proposition~\ref{prop:NoJumps1}.
\begin{proposition}\label{prop:NoJumps2} For $\pi\in\mathbb{R}^2$ given, let $\widetilde{\theta}\in\Theta$ be such that there exists $\epsilon>0$ such that $v(\theta;\pi)=u_0(\theta;\pi)$ on $(\widetilde{\theta}-\epsilon, \widetilde{\theta}]$ and $v(\theta;\pi)>u_0(\theta;\pi)$ on $(\widetilde{\theta}, \widetilde{\theta}+\epsilon]$. Furthermore, assume that \begin{equation*} \int_{\widetilde{\theta}-\epsilon}^{\widetilde{\theta}}\big(\tau(\theta)-C(q(\theta))\big)f(\theta)d\theta>0, \end{equation*} where $\big\{\big( q(\theta), \tau(\theta) \big), \theta \in \Theta \big\}$ implements $v(\cdot;\pi).$ In other words, there is profitable matching on $(\widetilde{\theta}-\epsilon, \widetilde{\theta}]$ and the dealer fully services types on $(\widetilde{\theta}, \widetilde{\theta}+\epsilon].$ Then $\partial v(\widetilde{\theta};\pi)$ is a singleton. The result also holds if the order of the matching and full-servicing intervals is switched. \end{proposition}
\noindent The rationale behind Proposition~\ref{prop:NoJumps2} is that, as long as the dealer is able to match the traders' outside option without incurring in a loss, it is possible to normalize the latter to zero and directly apply Proposition~\ref{prop:NoJumps1}. This is, naturally, not the case when matching $u_0$ results in losses. We put Proposition~\ref{prop:NoJumps2} to work in Example~\ref{RichStructure}.
Before moving on, we present below a modification to Example~\ref{ex1} that shows how even agents without access to a non--trivial outside option benefit from the presence of the CN and that the optimal Lagrange multiplier need not be continuous.
\begin{example}\label{ex2}
Let $f,$ $\Theta,$ $C$ and $u$ be as in Example \ref{ex1} and assume that the CN offers the traders the following expected profits: \begin{equation*} u_0(\theta; (3.2, 3.2))=\begin{cases}
-0.975\theta - 0.52, \text{ if } \theta\leq -\frac{8}{15};\\
0.975\theta - 0.52, \text{ if } \theta\geq \frac{8}{15};\\
\text{convex and negative for } \theta\in(-\frac{8}{15}, \frac{8}{15}).
\end{cases} \end{equation*} Matching this outside option would require the dealer to offer the contracts $(\pm 0.975, 0.52)$. This is profitable, hence the indirect utility never lies below $u_0$. To illustrate this, we have plotted the indirect--utility function in Figure~\ref{fig:IndUtNoEx}. It strictly dominates the one plotted in Figure~\ref{fig:IndUt} for all types who earn positive profits. The smooth pasting condition ($l(\theta,\gamma(\theta))=q_c(\theta)$ where $v$ touches $u_0$, i.e. in $\pm 0.675$) determines the optimal Lagrange multiplier, namely $\gamma(-1)=0$ and $\gamma \equiv 0.030$ on $(-1,-0.389]$. For positive types we obtain symmetrically $\gamma(1)=1$ and $\gamma \equiv 0.970$ on $[0.389,1)$. The new spread, given by $\big(t(0_-), t(0_+)\big)=(-1.282, 1.282)$, is strictly smaller than in the case without a CN.
\begin{figure}
\caption{An example without exclusion}
\label{fig:IndUtNoEx}
\label{fig:LMNoEx}
\label{fig:NoExclusion}
\end{figure} \end{example}
The following result will prove to be essential for the results in Section~\ref{sec:ExistenceEqui}. It guarantees,by virtue of Assumption~\ref{ass:cost of access}, our notion of the spread is well defined in the presence of a CN and could be loosely summarized by saying that the first (in terms of moving away from $\theta=0$) types to earn positive utility trade in the DM.
\begin{lemma}\label{lemma:tradingSB} There exists $\epsilon=\epsilon(\pi)$ such that the types that belong to $(\underline{\theta}_0(\pi)-\epsilon,\underline{\theta}_0(\pi) )\cup (\overline{\theta}_0(\pi), \overline{\theta}_0(\pi)+\epsilon)$ are fully serviced. \end{lemma}
\noindent\begin{Proof} Let us denote by $\hat{\theta}$ the positive solution to the equation $u_0(\theta;\pi)=0.$ If there exists $\eta>0$ such that types on $(\hat{\theta}, \hat{\theta}+\eta)$ can be matched profitably, then the result follows either because $\overline{\theta}_0(\pi)<\hat{\theta}$ or because $\overline{\theta}_0(\pi)=\hat{\theta}$ and the types on $(\hat{\theta}, \hat{\theta}+\epsilon),$ for some $0<\epsilon\leq \eta,$ are fully serviced. Let us now assume that such an $\eta$ does not exist, we claim then that $\overline{\theta}_0(\pi)<\hat{\theta}$ must hold. Proceeding by the way of contradiction, let us assume that $\overline{\theta}_0(\pi)=\hat{\theta}$ (which is equivalent to $\overline{\theta}_0(\pi)\geq\hat{\theta}$) and that there exists $\delta>0$ such that $(\hat{\theta}, \hat{\theta}+\delta)\subset\Theta_e(\pi).$ This configuration can be improved upon as follows: let $a>0$ be such that $\hat{\theta}-a>0.$ By construction $l(\hat{\theta}-a, \gamma(\hat{\theta}-a))=0.$ Let us fix $\gamma(\theta)\equiv \gamma(\hat{\theta}-a)=:\Gamma(a)$ for $\theta\in(\hat{\theta}-a, \theta_a),$ where $\theta_a$ the solution to $v_a(\theta)=u_0(\theta;\pi)$ on $(\hat{\theta}-a,\overline{\theta}]$ if it exists or $\theta_a=\overline{\theta}$ otherwise, given that we denote by $v_a$ the indirect--utility function corresponding to $\Gamma(a)$. In particular $\theta_a>\hat{\theta}$ and $l(\theta,\Gamma(a))>0$ for $\theta \in (\hat{\theta}-a, \theta_a)$.
We now have that types $\theta\in(\hat{\theta}-a, \theta_a)$ are fully serviced. By Assumption~\ref{ass:cost of access}, $v'_a(\hat{\theta}-a)=0<u_0'(\hat{\theta};\pi);$ therefore, there exists $a_1>0$ such that for all $a\leq a_1$ it holds that $\theta_a < \hat{\theta}+\delta.$ If we could show that there exists $a\leq a_1$ such that the principal could offer types in $(\hat{\theta}-a, \theta_a)$ the quantities $q_a(\theta)=l(\theta, \Gamma(a))$ at a profit, we would contradict the optimality of $\overline{\theta}_0(\pi)$ and the proof would be finalized, since incentives above $\theta_a$ would not be distorted and the principal's profits would strictly increase. In order to do so, observe that the principal's typewise profit when offering $q_a(\theta)$ is \begin{equation*} P(\theta):=\theta\Psi_1(q_a(\theta))+\Psi_2(q_a(\theta))-v_a(\theta)-C\big(q_a(\theta)\big). \end{equation*} In particular, $P(\hat{\theta}-a)=0$ and \begin{align*} P'(\hat{\theta}-a)& =\Psi_1(q_a(\hat{\theta}-a))+(\hat{\theta}-a)\Psi_1'(q_a(\hat{\theta}-a))q_a'(\hat{\theta}-a)+v_a'(\hat{\theta}-a)-\widetilde{C}'\big(q_a(\hat{\theta}-a)\big)q_a'(\hat{\theta}-a)\\
& =\Psi_1(0)+(\hat{\theta}-a)\Psi_1'(0)q_a'(\hat{\theta}-a)+v_a'(\hat{\theta}-a)-\widetilde{C}'\big(0\big)q_a'(\hat{\theta}-a)\\
& = (\hat{\theta}-a)\Psi_1'(0)q_a'(\hat{\theta}-a). \end{align*} The step from the second to the third equality follows, because by construction $v_a'(\hat{\theta}-a)=0;$ by assumption $\Psi_1(0)=0$ and, from Assumption~\ref{ass:qc}, $\widetilde{C}'\big(0\big)=0.$ Furthermore, since $\Psi_1$ is strictly increasing and $q_a'(\hat{\theta}-a)>0,$ then $P'(\hat{\theta}-a)>0.$ Therefore, there exists $b>0$ such that $P(\theta)> 0$ if $\theta\in(\hat{\theta}-a, \hat{\theta}-a+b).$ As a consequence, if $a<a_1$ is small enough, then $P(\theta)>0$ for $\theta\in(\hat{\theta}-a, \theta_a),$ as required.
\end{Proof}
\noindent We are now in the position to present the proof of our second main result.
\noindent\textbf{{Proof of Theorem~\ref{thm:Main2}.}} (1) Observe that if $\pi$ is such that $\big(\underline{\theta}_0(\pi), \overline{\theta}_0(\pi)\big)=\Theta_0(\pi)\subset \Theta_o,$ then the result follows immediately from Lemma~\ref{lemma:tradingSB}. If we revert the inclusion, two situations are possible, since the addition of the CN--constraint to Problem $\mathcal{P}_o$ may or may not bind for some types. The latter case being trivial, let us look at the case where there is a point $\theta_a>\overline{\theta}_0$ on which it holds that $v_o(\theta_a)=u_0(\theta_b;\pi)$ and such that $v_o(\theta)>u_0(\theta;\pi)$ for $\theta<\theta_a$ and vice versa for $\theta>\theta_a.$ The Lagrange multiplier $\gamma_m$ is active on $(\theta_a, \overline{\theta}],$ which implies that $\gamma_m(\theta_a)<1.$ We know from~\cite{BJ:03}, p. 9, that for all $\theta$ such that $l(\theta, \Gamma)>0,$ the latter is decreasing in $\Gamma.$ As a consequence, the root of the equation \begin{equation*} K^{-1}\Big(\theta+\frac{F(\theta)-\gamma_m(\theta_a)}{f(\theta)}\Big) = 0 \end{equation*} is strictly smaller than that of $l(\theta,1)=0,$ which yields the desired result.
\noindent (2) Let us denote by $t_o(0_-)$ and $t_o(0_+)$ the best bid and ask prices without the presence of a CN and by $t_m(0_-)$ and $t_m(0_+)$ the corresponding marginal prices with one; thus, \begin{equation*} t_o(0_-)=q_o'(\underline{\theta}_{0,o-})\big(\underline{\theta}_{0,o}\phi_1 + \phi_2\big)\text{ and } t_o(0_+) = q_o'(\overline{\theta}_{0,o+})\big(\overline{\theta}_{0,o}\phi_1 + \phi_2\big) \end{equation*} and \begin{equation*} t_m(0_-)=q_m'(\underline{\theta}_{0,m-})\big(\underline{\theta}_{0,m}\phi_1 + \phi_2\big)\text{ and } t_m(0_+) = q_m'(\overline{\theta}_{0,m+})\big(\overline{\theta}_{0,m}\phi_1 + \phi_2\big). \end{equation*} From Part (1) we know that $\underline{\theta}_{0,o}\leq\underline{\theta}_{0,m}$ (both negative) and $\overline{\theta}_{0,m}\leq\overline{\theta}_{0,o}$ (both positive) and, since $\phi_1$ and $\phi_2$ do not depend on the presence of the CN, all we have left to do is show that \begin{equation*} q_m'(\underline{\theta}_{0,m-})\leq q_o'(\underline{\theta}_{0,o-})\text{ and }q_m'(\underline{\theta}_{0,m+})\leq q_o'(\underline{\theta}_{0,o+}). \end{equation*} Using the well--known relation $(f^{-1})'(a)=1/f'(f^{-1}(a))$ we have that \begin{align*}
q_m'(\underline{\theta}_{0,m-})& = \frac{1}{K'\Big(K^{-1}\big(\underline{\theta}_{0,m} - \frac{\gamma(\underline{\theta}_{0,m-})-F(\underline{\theta}_{0,m})}{f(\underline{\theta}_{0,m})}\big)\Big)}\frac{d}{d\theta}\big(\theta-\frac{\gamma(\theta)-F(\theta)}{f(\theta)}\big)\Big|_{\theta=\underline{\theta}_{0,m-}}\\
& = \frac{1}{K'\big(q_m(\underline{\theta}_{0,m})\big)}\frac{d}{d\theta}\big(\theta-\frac{\gamma(\theta)-F(\theta)}{f(\theta)}\big)\Big|_{\theta=\underline{\theta}_{0,m-}}\\
& = \frac{1}{K'(0)}\Big(1-\frac{d}{d\theta}\big(\frac{\gamma(\underline{\theta}_{0,m-})-F(\theta)}{f(\theta)}\big)\Big)\Big|_{\theta=\underline{\theta}_{0,m}}, \end{align*} where we have used the fact that $\gamma$ is constant on $(\underline{\theta}_{0,m}-\delta,\underline{\theta}_{0,m})$ for some $\delta>0.$ We may proceed analogously for the other three quantities. We have to show that \begin{align}\begin{split}\label{eq:condMonot}
\frac{1}{K'(0)}\frac{d}{d\theta}\Big(\frac{\gamma(\underline{\theta}_{0,m-})-F(\theta)}{f(\theta)}\Big)\Big|_{\theta=\underline{\theta}_{0,m}} & \geq\frac{1}{K'(0)}\frac{d}{d\theta}\Big(\frac{-F(\theta)}{f(\theta)}\Big)\Big|_{\theta=\underline{\theta}_{0,o}}\\
\frac{1}{K'(0)}\frac{d}{d\theta}\Big(\frac{\gamma(\overline{\theta}_{0,m+})-F(\theta)}{f(\theta)}\Big)\Big|_{\theta=\overline{\theta}_{0,m}} & \geq\frac{1}{K'(0)}\frac{d}{d\theta}\Big(\frac{1-F(\theta)}{f(\theta)}\Big)\Big|_{\theta=\overline{\theta}_{0,o}}, \end{split} \end{align} which hold with equality under the assumption that $f\equiv(\overline{\theta}-\underline{\theta})^{-1}.$
\noindent (3) If follows from Part (1) that, if $\theta$ participates in the presence of the CN, then $q_o(\theta)\leq q_m(\theta).$ Assume now that the inequality $v_o(\theta) > v(\theta; \pi)$ holds for all $\theta$ in a non--empty interval $(\theta_1, \theta_2)$ and $v_o(\theta_1) = v(\theta_1; \pi)$ and $v_o(\theta_2) = v(\theta_2; \pi).$ By the convexity of $v_o$ and $v(\cdot; \pi),$ this would imply the existence of $\theta_3\in(\theta_1, \theta_2)$ such that $v_o'(\theta) > v'(\theta; \pi)$ holds almost surely in $(\theta_1, \theta_3).$ However $v_o'(\theta)=\psi_1(q_o(\theta)),$ $v'(\theta; \pi)=\psi_1(q_m(\theta))$ and $\psi_1$ is strictly increasing; hence, this would imply that $q_o(\theta)>q_m(\theta)$ for almost all $\theta\in(\theta_1, \theta_3),$ which is a contradiction.
$\Box$
We finalize this section with two examples that showcase the results obtained thus far. Example~\ref{MussaRosenEx} showcases that, in the simple case where the outside option is such that the dealer will (only) exclude all high--enough (in absolute value) types, then the results of Theorem~\ref{thm:Main2} follow trivially.
\begin{example}\label{MussaRosenEx} Let us revisit Example~\ref{MussaRosen} with an extremely steep outside option that will warrant exclusion, namely, for $r_0<r$ let \begin{equation*} u_0(\theta)=\begin{cases}
\infty,\text{ if }\, \theta\in [-r,-r_0)\bigcup (r_0, r];\\
0,\,\,\text{ otherwise}.
\end{cases} \end{equation*} Recall that, for a given value $\Gamma$ of the Lagrange multiplier, the corresponding quantity is \begin{equation*} q(\theta;\Gamma):=2\theta+r-2r\Gamma. \end{equation*} In Example~\ref{MussaRosen} the participation constraint does not bind for high types. In particular, $\gamma\equiv 0$ on $[-r,\underline{\theta}_0)$ and to find the left--hand endpoint of the reserved set we set $\Gamma=0$ and solve $2\theta+r=0.$ In the current setting, the participation constraint must bind for $\theta<-r_0$ and the multiplier will be constant on $(-r_0, \underline{\theta}_0(\Gamma)),$ where \begin{equation*} \underline{\theta}_0(\Gamma):=-\frac{r}{2}\big[1-2\Gamma\big]. \end{equation*} By construction, the choice of $\Gamma$ will bear no weight on the trader types that will be serviced to the left of $\theta=-r_0,$ but only on how many additional low types benefit from the presence of the outside option. By integrating $q(\theta;\Gamma)$ and noting that the corresponding indirect--utility function $v(\cdot;\Gamma)$ must satisfy $v(\underline{\theta}_0(\Gamma);\Gamma)=0,$ we have, for $\theta\in [-r_0,\underline{\theta}_0(\Gamma)]$ \begin{equation*}
v(\theta;\Gamma)=\theta^2+\theta r\big[1-2\Gamma\big]+\frac{r^2}{4}\big[1-2\Gamma\big]^2. \end{equation*} Since the indirect--utility function also satisfies $v(\theta;\Gamma)=\theta q(\theta;\Gamma)-\tau(\theta;\Gamma),$ we have that the dealer market on $[-r_0,\underline{\theta}_0(\Gamma)]$ is described by the quantity--price pairs $\big(q(\theta;\Gamma), \theta^2-\frac{r^2}{4}\big[1-2\Gamma\big]^2\big).$ As a consequence, the per--type profit is \begin{equation*} \Pi(\theta;\Gamma):=-\theta^2-\frac{3}{4}r^2\big[1-2\Gamma\big]^2-2\theta r\big[1-2\Gamma\big], \end{equation*} where the third term on the right--hand side is positive and dominates the first two. Finally, we have that each choice of $\Gamma$ will result in the dealer obtaining the aggregate profits from negative types \begin{equation*} P(\Gamma):=\frac{1}{2r}\int_{-r_0}^{\underline{\theta}_0(\Gamma)}\Pi(\theta;\Gamma)d\theta. \end{equation*} The mapping $\Gamma\mapsto P(\Gamma)$ is strictly concave and the first--order conditions yield that it is maximized at $\Gamma=(r-r_0)/(2r).$ As a result $\underline{\theta}_0(\Gamma)=-r_0/2$ and $v(\theta;\Gamma)=\theta^2+r_0\theta+r_0^2/4,$ which correspond to the boundary of the reserved set and the indirect--utility function for negative trader types in the problem without a CN on $[-r_0, r_0].$ \end{example}
\begin{example}\label{RichStructure} We stay with the basic setup of Examples \ref{ex1} and \ref{ex2}, but now assume that $u_0(\theta; \pi)=\Big(\frac{1- \pi_+}{3}\theta^{6/5}-0.001\Big)_+$ for $\theta\geq 0$ and $u_0(\theta; \pi)\equiv 0$ otherwise. For any type $\theta$ such that $u_0(\theta)>0$ it holds that \begin{equation*} \big(q_c(\theta), \tau_c(\theta)\big) =\Big(\frac{2}{5}(1-\pi_+)\theta^{1/5},\frac{2}{5}(1-\pi_+)\theta^{6/5} + \frac{1}{25}(1-\pi_+)^2\theta^{2/5} - \big(\frac13(1-\pi_+)\theta^{6/5}-0.001\big)_+\Big). \end{equation*} We assume $\pi=(0,1/2).$ The first thing to notice is that the dealer's per-type profit for offering $(q_c(\theta),\tau_c(\theta))$, i.e. $\tau_c(\theta)-C(q_c(\theta))=\theta^{6/5}/30 - \theta^{2/5}/100+0.001$, is negative for types $\theta\in(0.0035, 0.1667)$. On the other hand, the inequality $u_0(\theta; 1/2)\geq 0$ only holds for $\theta\geq 0.014.$ Combining both arguments we see that $\Theta_e(\pi)\subset(0.014, 0.1667).$ Next we observe that the inequality \begin{equation*} l(\theta,1) = K^{-1}\Big(\theta-\frac{1 - F(\theta)}{f(\theta)}\Big)\geq \frac{\sqrt[5]{\theta}}{5} \end{equation*} holds for all $\theta\in [0.4761, 1].$ Hence profitable matching may occur on the interval $(0.1667, 0.4761),$ over which $q(\theta)=q_c(\theta)$ and $\mathbb{C}\big(q(\theta)\big)=C\big(q(\theta)\big).$ Furthermore, Proposition~\ref{prop:NoJumps2} implies that the corresponding indirect utility function will be differentiable at $\theta= 0.4761.$ In order to obtain $v(\theta; \pi)$ for $\theta\in [0.4761, 1],$ we integrate $l(\cdot, 1)$ and determine the corresponding integration constant $c$ by equating \begin{equation*} 2\int_0^{0.4761}\left(\frac{3\theta^2-6\theta+2}{2\theta-3}\right)d\theta + c = \frac{1}{6}(0.4761)^{6/5} - 0.001. \end{equation*} We know from the example without a CN that $\gamma(t)=0$ for $\theta\in[-1, -0.423).$ On $[-0.423, 0)$ the multiplier must satisfy \begin{equation*} K^{-1}\Big(\theta-\frac{\gamma(\theta) - F(\theta)}{f(\theta)}\Big)=0, \end{equation*} which results in $\gamma(\theta)=(3\theta^2+6\theta+2)/4$ on the said interval. What remains to be determined is $\overline{\theta}_0$ and $\gamma(\overline{\theta}_0).$ To this end, we define the family of functions $v(\cdot;\Gamma)$ such that $v'(\theta;\Gamma)=l(\theta,\Gamma)$ whenever this quantity is positive and $v(\theta;\Gamma)=0$ for $\theta\in[0,\theta(\Gamma)],$ where $\theta(\Gamma)$ is the solution to the equation $l(\theta,\Gamma)=0.$ Since $\gamma(0)=0.5,$ we have that $\Gamma>0.5.$\footnote{Pasting when passing from servicing to excluding need not be smooth.} In fact, $\Gamma=\gamma(\overline{\theta}_0)=0.5105,$ $\overline{\theta}_0=0.007$ and the intersection of $v(\cdot;\Gamma)$ and $u_0(\cdot; 1/2)$ occurs at $\theta= 0.0159.$
Summarizing, the types on $[-1,-0.423)\cup(0.007, 0.0159]\cup(0.1667, 1]$ are fully serviced, those on $[-0.423, 0.007]$ are reserved and the ones that lie on $(0.0159, 0.1667)$ are excluded. The left--hand side of the spread is the same as in the example without a CN, whereas the right--hand side is $t(0_+)=0.0281.$ This is significantly smaller than in Example \ref{ex1}.
Determining $\gamma(\theta)$ on $(0, 0.007]$ is relatively simple, as we again must solve $l(\theta,\gamma(\theta))=0,$ which results in $\gamma(\theta)=(-3\theta^2+6\theta+2)/4$. Finally, in order to determine $\gamma$ on $\Theta_e(\pi)$ we must rewrite the virtual surplus using $\mathbb{C}(q(\theta))=\tau_c(\theta),$ which results in ${\mathbb{C}(q)=(5^5/6)q^6-(1/4)q^2+0.001}.$ The pointwise maximization of the resulting virtual surplus must equal $q_c(\theta)=\sqrt[5]{\theta}/5.$ After some lengthy arithmetic that we choose to spare the reader from, we obtain \begin{equation*} \gamma(\theta)=F(\theta)-f(\theta)\bigg[5^5 q_c(\theta)^5-\theta\bigg] = F(\theta)\quad\text{for }\theta\in\Theta_e(\pi). \end{equation*} Finally, in the profitable--matching region we solve $l(\theta,\gamma(\theta))=\sqrt[5]{\theta}/5$ so as to find the multiplier, which yields \begin{equation*} \gamma(\theta)=F(\theta)-f(\theta)\bigg[\frac{1}{10}\theta^{1/5}-\theta\bigg]\quad\text{for }\theta\in [0.1667,0.4761). \end{equation*} or \begin{equation*} \gamma(\theta)=\frac{1}{10}\theta^{1/5}\cdot \frac{2\theta-3}{4} - \frac{3\theta^2-6\theta-2}{4} \quad\text{for }\theta\in [0.1667,0.4761). \end{equation*} Observe that, in contrast with Example~\ref{ex2}, here $\gamma(\theta)=1$ for types that are strictly smaller than one. This means that the rightmost types do not profit from introduction of CN via changes in the quantities they are offered, but rather from changes in the corresponding prices. Intuitively speaking this has to do with how steep the outside option is for large types and, as a consequence, whether or not it will be matched over a non-trivial interval.
We present in Figure~\ref{fig:IndUtOpt} the indirect utilities for positive types (the ones for negative ones being the same as in Figure~\ref{fig:IndUt}). The values of $\gamma$ have been plotted in Figure~\ref{fig:OptMult}. In Figure~\ref{fig:WithExZoom} we provide a magnification around small values of $\theta$ so as to highlight the switching between reservation, full servicing and exclusion. Observe the jump of the Lagrange multiplier at the boundary between fully--serviced and excluded types (Figure~\ref{fig:OptMultZoom}) and between excluded and matched ones (Figure~\ref{fig:OptMult}).
\begin{figure}
\caption{An example with exclusion}
\label{fig:IndUtOpt}
\label{fig:OptMult}
\label{fig:WithEx}
\end{figure}
\begin{figure}
\caption{An example with exclusion (magnified)}
\label{fig:IndUtOptZoom}
\label{fig:OptMultZoom}
\label{fig:WithExZoom}
\end{figure} \end{example}
\noindent We shall revisit this example in the upcoming section, where we look into the existence of equilibrium prices in the CN.
\section{\large{An equilibrium price in the crossing network}}\label{sec:ExistenceEqui}
{In this section we prove the existence of an equilibrium price $\pi^*.$ We first observe that, from Assumption~\ref{ass:monotone}, there is no loss of generality in assuming that $\pi^*$ belongs to some closed and bounded subset of $\mathbb{R}^2,$ which we denote by $\Pi.$ As a consequence we have that $t(0;\cdot):\Pi\to\Pi.$ The restriction of possible equilibrium prices to $\Pi,$ together with Assumptions~\ref{ass:cost of access} and~\ref{ass:monotone}, yields the next result.}
\begin{lemma}\label{lm:BondedParticipation} There exists a non--empty interval $[\epsilon_1, \epsilon_2]\subset\Theta$ such that \begin{enumerate} \item $0\in (\epsilon_1, \epsilon_2);$
\item $u_0(\theta; \pi) = 0$ for all $\theta\in[\epsilon_1, \epsilon_2]$ and all $\pi\in\Pi.$ \end{enumerate} \end{lemma} In the sequel we make use of the results obtained in Section~\ref{sec:ModelCN} to show that the mapping $\pi\mapsto t(0; \pi)$ has the required monotonicity properties so as to use the following result (see, e.g. Aliprantis \& Border~\cite{AB}):
\begin{theorem}\label{thm:Tarski}(Tarski's Fixed Point Theorem) Let $(X, \leq)$ be a non--empty, complete lattice. If $f:X\to X$ is order preserving, then the set of fixed points of $f$ is also a non--empty, complete lattice. \end{theorem}
\noindent We are now ready to give the proof of our third main result.
\noindent\textbf{Proof of Theorem \ref{thm:Main3}.} Lemmas~\ref{lemma:tradingSB} and ~\ref{lm:BondedParticipation} guarantee that we have a well--defined spread; thus, we may decompose the analysis of the mapping $\pi\mapsto t(0; \pi)$ into that of the mappings $\pi_-\mapsto t(0_-; \pi_-)$ and $\pi_+\mapsto t(0_+; \pi_+).$ In other words, for a given price $\pi,$ the dealer's optimal response to $u_0(\cdot;\pi)$ is, modulo a normalization of $\gamma,$ equivalent to the combination of his actions towards negative and positive types separately. We shall concentrate on the existence of a fixed point of the mapping $\pi_+\mapsto t(0_+; \pi_+).$
From Assumption~\ref{ass:monotone} we have that if $\pi_{1+}<\pi_{2+}$, then $u_0(\theta;\pi_{1+})>u_0(\theta;\pi_{2+})$ for all $\theta>0.$ If for $i=1,2$ it holds that $u_0(\theta;\pi_{i+})<v_o(\theta)$ for all $\theta>0$, then $v(\theta;\pi_{1+})=v(\theta;\pi_{2+})$ on the same domain and $t(0_+; \pi_{1+})=t(0_+; \pi_{2+}).$ Next assume that $u_0(\theta;\pi_{i+})\geq v_o(\theta)$ on a subset $\Theta_i$ of $(0, \overline{\theta}],$ for $i=1,2.$ Given that $u_0(\theta;\pi_{1+})>u_0(\theta;\pi_{2+})$ for all $\theta>0,$ then $\overline{\theta}(\pi_1)<\overline{\theta}(\pi_2)$ and the first point $\widetilde{\theta}_1$ such that $v(\theta;\pi_{1+})=u_0(\theta;\pi_{1+})$ holds satisfies $\widetilde{\theta}_1<\widetilde{\theta}_2,$ where the latter is the analogous to $\widetilde{\theta}_1$ in the presence of $u_0(\theta;\pi_{2+}).$ The existence of $\widetilde{\theta}_1$ and $\widetilde{\theta}_2$ is guaranteed by the fact that in both cases the indirect--utility functions intersect the corresponding outside options. Arguing as in the proof of Theorem~\ref{thm:Main2}, Part (2), this also implies that $\overline{\theta}_0(\pi_{1})<\overline{\theta}_0(\pi_{2});$ hence $t(0_+; \pi_{1+})<t(0_+; \pi_{2+}).$ In other words, the mapping $\pi_{+}\mapsto t(0_+; \pi_{+})$ is order--preserving and, using Tarski's Fixed Point Theorem, we may conclude it has a fixed point.
$\Box$
\begin{remark} The requirement of uniformly distributed types can be relaxed to the extent that if $f$ and $K$ are such that Conditions~\eqref{eq:condMonot} are satisfied, then the required monotonicity properties still apply. Unfortunately, these conditions cannot be verified ex--ante, since they include the end points of the set of reserved traders. \end{remark}
\begin{example} Let us go back to our example with exclusion, but introduce the feedback loop between the DM and the CN through the iteration $\pi_{i+1}= t(0;\pi_i).$ We initialize the recursion by setting $\pi_0=(0, 1/2)$ and $\kappa=0.001$, which are the parameters in the aforementioned example.
\begin{figure}
\caption{ The indirect--utility functions corresponding to the iteration $\pi_{i+1}=t(0; \pi_{i})$.}
\label{fig:EquilPriceZoomIn}
\end{figure}
We observe a very swift convergence. Indeed, it takes only four iterations to reach $\|v(\cdot;\pi_i)-v(\cdot; \pi_{i+1})\|_{\infty}\leq 10^{-5}$ and the indirect--utility functions in the third and fourth iteration are almost indistinguishable. The equilibrium price is $\pi^*=(0, 0.015)$. We present in Figure~\ref{fig:EquilPriceZoomIn} the plots of the first four iterates. It is evident that each iteration results in a smaller set of reserved traders and in a higher indirect utility for all types. The spreads, the right endpoints of the reserved regions, the Lagrange multipliers at the right endpoint of the reserved regions and the exclusion regions are provided in Table~\ref{table1}. It is interesting to observe that, as the spread decreases to its equilibrium level, the number of trader types that are reserved decreases and the sets of excluded types grow (in terms of inclusions). This last fact obeys the fact that, when the traders have a more attractive outside option, it is harder for the dealer to match it profitably.
\begin{table*}[ht!] \small \centering \caption{The numbers of the feedback loop}\label{table1}
\begin{tabular}{@{}ccc ccc cccc cccc @{}}\toprule
$\pi_+$ & $\Theta_o$ & $\Gamma$& $\Theta_e(\pi_+)$ \\ \midrule
$ 1/2$ & [-0.423,0.0070] & 0.5105 & [0.0159, 0.1667]\\
$0.0281$ & [-0.423,0.0040] & 0.5061 & [0.0083, 0.4872]\\
$0.0161$ & [-0.423,0.0040] & 0.5060 & [0.0082, 0.4954]\\
$0.0158$ & [-0.423,0.0040] & 0.5060 & [0.0082, 0.4955]\\ \bottomrule \\ \end{tabular}
\end{table*} \end{example}
\section{\large{Portfolio liquidation and dark--pool trading}}\label{sec:DPtrading}
{In this section we present an application of our methodology to portfolio liquidation. We assume that the market participants' aim is to liquidate their current holdings on some traded asset. The sizes of the traders' portfolios are heterogeneous and saying that a trader's type is $\theta$ means that he holds $\theta$ shares of the asset prior to trading.} We set $\Theta=[-1, 1]$ and $f\equiv 1/2.$ If a trader of type $\theta$ trades $q$ shares for $\tau$ dollars, his utility is \begin{equation*} \hat{u}(\theta, q)- \tau:=-\alpha(\theta-q)^2-\tau, \end{equation*} where $0<\alpha$ denotes the traders' (homogeneous) sensitivity towards inventory holdings. Notice that $-\alpha\theta^2$ is the type--dependent reservation utility of a trader of type $\theta.$ If we ``normalize" the said utility to zero, we may write \begin{equation*} u(\theta ,q) - \tau = 2\alpha\theta q -\alpha q^2 -\tau. \end{equation*} In this example the crossing network takes the form of a \textit{dark pool} (DP). Choosing to trade in the latter entails two kinds of costs for the traders: On the one hand, there is a direct, fixed cost $\kappa>0$ of engaging in dark--pool trading. On the other hand, execution in the DP is not guaranteed. We denote by $p\in [0, 1]$ the probability that an order is executed where we assume for simplicity that the probability of order execution is independent of the order size. Pricing in the DP is linear. Namely, for a given execution price $\pi$, the utility that a trader of type $\theta$ extracts from submitting an order of $q$ shares to be traded in the DP is \begin{equation*} p\big[(2\theta\alpha - \pi)q - \alpha q^2\big] - \kappa, \end{equation*} where again we have normalized reservation utilities to zero. The problem of optimal submission to the DP for a $\theta$--type trader is \begin{equation*} \max_q \Big\{p\big[(2\theta\alpha - \pi)q - \alpha q^2\big]\Big\}, \end{equation*} which yields the optimal submission level \begin{equation*} q_d(\theta):= \theta-\frac{\pi}{2\alpha}. \end{equation*} We obtain that opting for the DP results in a trader of type $\theta$ enjoying the expected utility \begin{equation*} u_0(\theta;\pi) = \alpha p\left(\theta-\frac{\pi}{2\alpha}\right)^2 - \kappa. \end{equation*} We assume that $p\pi^2<4\alpha\kappa$ so as to keep the DP unattractive for small types.
We assume that the dealer's costs/profits of unwinding a portfolio of size $q$ are $C(q)=\epsilon\,q+\beta q^2$ where $\beta>0$ and $\epsilon$ is non--negative. Observe that, since $u_0(\cdot;\pi)$ does not satisfy Assumption~\ref{ass:cost of access}, some restrictions must be imposed on the problem's parameters so as to still have Lemma~\ref{lm:BondedParticipation}. Namely, it must hold that \begin{equation}\label{eq:RestParamDP} \pi<2\sqrt{\frac{\alpha\kappa}{p}}. \end{equation} Condition~\eqref{eq:RestParamDP} imposes a hard upper bound on possible equilibrium DP prices. It should be noted that Assumption~\ref{ass:monotone} is not satisfied by $u_0(\cdot;\pi),$ which, together with the way in which we shall define the pricing feedback loop from the DM to the DP, implies that our equilibrium result does not apply ``as is'' to the current setting.
\subsection{\large{The dealer market without a dark pool}}
In the absence of a dark pool, the dealer's optimal choices of quantities are, for negative types \begin{equation*} l(\theta, 0)=\frac{\alpha}{\alpha+\beta}\big(2\theta+1\big)-\frac{\epsilon}{2(\alpha+\beta)} \end{equation*} and for positive types \begin{equation*} l(\theta, 1)=\frac{\alpha}{\alpha+\beta}\big(2\theta-1\big)-\frac{\epsilon}{2(\alpha+\beta)}, \end{equation*} where the boundary of $\Theta_0$ is given by \begin{equation*} \underline{\theta}_0=\frac{1}{2}\Big(\frac{\epsilon}{2\alpha}-1\Big)\quad\text{and}\quad \overline{\theta}_0=\frac{1}{2}\Big(\frac{\epsilon}{2\alpha}+1\Big). \end{equation*} In order to guarantee that $\Theta_0\subset[-1,1]$ the condition $\epsilon<2\alpha$ must be imposed on the corresponding parameters. From the relation $v'(\theta)=\Psi_1\big(q(\theta)\big)$ we have that the indirect--utility function is \begin{equation*} v(\theta)=\begin{cases}
\frac{2\alpha^2}{\alpha+\beta}\theta^2 +\frac{\alpha}{\alpha+\beta}\big(2\alpha-\epsilon\big)\theta+c_1, & \theta\leq\underline{\theta}_0;\\
\frac{2\alpha^2}{\alpha+\beta}\theta^2 -\frac{\alpha}{\alpha+\beta}\big(2\alpha+\epsilon\big)\theta+c_2, & \theta\geq\overline{\theta}_0,
\end{cases} \end{equation*} where \begin{equation*} c_1=\frac{2\alpha^2}{4(\alpha+\beta)}\Big(\frac{\epsilon}{2\alpha}+1\Big)^2 +\frac{\alpha(2\alpha+\epsilon)}{2(\alpha+\beta)}\Big(\frac{\epsilon}{2\alpha}+1\Big)\text{ and } c_2=\frac{2\alpha^2}{4(\alpha+\beta)}\Big(\frac{\epsilon}{2\alpha}-1\Big)^2 -\frac{\alpha(2\alpha+\epsilon)}{2(\alpha+\beta)}\Big(\frac{\epsilon}{2\alpha}-1\Big). \end{equation*} When it comes to the spread, observe that $q'\equiv \frac{2\alpha}{\alpha+\beta},$ $\psi_1\equiv 2\alpha$ and $\psi_2\equiv0,$ which yields \begin{equation*} [t(0_-), t(0_+)]=\frac{4\alpha^2}{\alpha+\beta}[\underline{\theta}_0,\overline{\theta}_0]. \end{equation*} Below we analyze how the spread changes with the introduction of the DP.
\subsection{\large{The impact of a dark pool}}
We first take an exogenous execution price $\pi$ and determine, for each $\theta\in\Theta,$ what is the quantity--price pair $\big(q_c(\theta;\pi), \tau_c(\theta;\pi)\big)$ that the dealer must offer so as to match a DP with execution price $\pi.$ Using the relation $q_c(\theta;\pi)=u_0'(\theta;\pi)$ we obtain \begin{align}\begin{split} q_c(\theta;\pi)=& \ 2\alpha p\left(\theta-\frac{\pi}{2\alpha}\right) \text{ and}\\ \tau_c(\theta;\pi)=& \ \kappa+4\alpha^2 p(\theta -\alpha p)\left(\theta-\frac{\pi}{2\alpha}\right)-\alpha p\left(\theta-\frac{\pi}{2\alpha}\right)^2. \end{split}\end{align}
From the Envelope Theorem and the structure of $u(\theta,q)$ we have that the traders' indirect utility function satisfies \begin{equation}\label{eq:QualEnvel} \frac{v'(\theta)}{2\alpha}=l\big(\theta,\gamma(\theta)\big). \end{equation} In order to determine the spread in the presence of the DP we must determine $\underline{\theta}_{0, m}$ and $\overline{\theta}_{0, m}$ together with $\gamma\big(\underline{\theta}_{0, m}\big)$ and $\gamma\big(\overline{\theta}_{0, m}\big).$ For an arbitrary $\Gamma\in [0, 1]$ we have \begin{equation*} l(\theta,\Gamma)=\frac{\alpha}{\alpha+\beta}\big[2\theta+1-2\Gamma\big]-\frac{\epsilon}{2(\alpha+\beta)}. \end{equation*} Indexed by $\Gamma,$ the candidates for $\underline{\theta}_{0, m}$ are then given by \begin{equation*} \underline{\theta}_{0, m}(\Gamma)=\frac{1}{2}\Big(\frac{\epsilon}{2\alpha}+2\Gamma-1\Big). \end{equation*} Since it must hold that $\underline{\theta}_{0, m}(\Gamma)\leq 0$, then $\Gamma\leq 0.5(1-\epsilon/2\alpha).$ Integrating Expression~\eqref{eq:QualEnvel} we have that, on the interval $[\widetilde{\theta}_{m}(\Gamma), \underline{\theta}_{0, m}(\Gamma)],$ the traders' indirect utility is given by \begin{equation}\label{eq:IndUtGamma} v(\theta;\Gamma)=\frac{2\alpha^2}{\alpha+\beta}\theta^2+2\alpha\Big[\frac{\alpha}{\alpha+\beta}(1-2\Gamma)-\frac{\epsilon}{2(\alpha+\beta)}\Big]\theta+c_{1,m}, \end{equation} where $\widetilde{\theta}_{m}(\Gamma)$ is the first intersection to the left of $\underline{\theta}_{0, m}(\Gamma)$ of $v(\cdot; \Gamma)$ and $u_0(\cdot;\pi)$ and $c_{1,m}$ is determined by the equation \begin{equation*} v\big(\underline{\theta}_{0, m}(\Gamma);\Gamma\big)=0. \end{equation*} Unless the inequality $\Gamma\leq 0.5(1-\epsilon/2\alpha)$ is tight, in which case the types below $\widetilde{\theta}_{m}(\Gamma)$ are excluded, Proposition~\ref{prop:NoJumps2} implies that $\Gamma$ must be chosen so as to satisfy the smooth--pasting condition $u_0'\big(\widetilde{\theta}_{m}(\Gamma);\pi\big)=v'\big(\widetilde{\theta}_{m}(\Gamma);\pi\big),$ which is equivalent to \begin{equation*} \widetilde{\theta}_{m}(\Gamma)=\Big[\frac{2\alpha}{\alpha+\beta}-p\Big]^{-1} \Big[\frac{\epsilon}{2(\alpha+\beta)}-\frac{\alpha}{\alpha+\beta}(1-2\Gamma)-\frac{p\pi}{2\alpha}\Big]. \end{equation*} Observe that, besides the requirement $\Gamma\geq 0.5(1-\epsilon/2\alpha),$ the strategy to determine $\overline{\theta}_{0, m}$ is exactly the same as for $\underline{\theta}_{0, m}.$ Summarizing, from Eq.~\eqref{eq:IndUtGamma} we observe that, if $\Gamma_-$ and $\Gamma_+$ correspond to the optimal choices for the negative and positive endpoints of $\Theta_{0}(\pi),$ then \begin{equation*} q'\big(\underline{\theta}_{0, m}(\Gamma_-)\big)=\frac{1}{2\alpha}v''\big(\underline{\theta}_{0, m}(\Gamma_-);\Gamma_-\big)= \frac{1}{2\alpha}v''\big(\overline{\theta}_{0, m}(\Gamma_+);\Gamma_+\big)=q'\big(\overline{\theta}_{0, m}(\Gamma_+)\big)=\frac{2\alpha}{\alpha+\beta}. \end{equation*} The spread is then \begin{equation*} [t_m(0_-), t_m(0_+)]=\frac{4\alpha^2}{\alpha+\beta}[\underline{\theta}_{0, m}(\Gamma_-),\overline{\theta}_{0, m}(\Gamma_+)]\subset \frac{4\alpha^2}{\alpha+\beta}[\underline{\theta}_0,\overline{\theta}_0], \end{equation*} i.e. the presence of a dark pool strictly narrows the spread in the dealer's market.
\subsection{\large{An equilibrium price}}
A standard (but not unique) way in which dark--pool prices are generated is by computing the average of some publicly available best--bid and best--ask prices. In the case of the US, this is usually the mid--quote of the National Best Bid and Offer (NBBO). Borrowing from this idea we define the price--iteration in the DP as follows: \begin{equation*} \pi_{i+1}=\frac{1}{2}\big(t_i(0_+)-t_i(0_-)\big),\quad i\in\mathbb{N}, \end{equation*} where $\{t_i(0_-), t_i(0_+)\}$ are the best bid and ask prices in the DM in the presence of a DP with execution price $\pi_i.$ We know from the previous section that the sequence $\{\pi_i, i\in\mathbb{N}\}\subset ((4\alpha^2)/(\alpha+\beta))[\underline{\theta}_0,\overline{\theta}_0];$ hence, by the Bolzano--Weierstrass Theorem it has at least one convergent subsequence. The limit of each of the said subsequences will be an equilibrium price. The (possible) non--uniqueness of these prices is due to the fact that by virtue of its definition, the sequence of dark--pool prices need not be monotonic. The problem of non-uniqueness of equilibria in models of competing DMs and CNs has been observed before. We refer to \cite{DDH} for a detailed discussion.
\section{\large{Conclusions}}\label{sec:Conclusions}
We have presented an adverse--selection model to study the structure of the limit--order book of a dealer who provides liquidity to traders of unknown preferences. Furthermore, we have established a link between the traders' indirect--utility function and the bid--ask spread in the DM. Making use of the aforementioned link, we have studied how the presence of a type--dependent outside option impacts the spread of the DM, as well as the set of trader types who participate in the DM and their welfare. In particular, we have shown, in a portfolio--liquidation setting, that the presence of a dark pool results in a shrinkage of the spread in the DM. Finally, we have established that, under certain conditions, the feedback loop introduced by the impact that the spread has on the structure of the outside option leads to an equilibrium price.
\end{document}
\end{document}
|
arXiv
|
{
"id": "1607.04047.tex",
"language_detection_score": 0.7655321955680847,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{On Change of Variable Formulas for non-anticipative functionals
}
\author{M. Mania$^{1)}$ and R. Tevzadze$^{2)}$}
\date{~} \maketitle
\begin{center} $^{1)}$ A. Razmadze Mathematical Institute of Tbilisi State University, 6 Tamarashvili Str., Tbilisi 0177; and Georgian-American University, 8 Aleksidze Str., Tbilisi 0193, Georgia, \newline(e-mail: [email protected]) \\ $^{2)}$ Georgian-American University, 8 Aleksidze Str., Tbilisi 0193, Georgia, Georgian Technical Univercity, 77 Kostava str., 0175, Institute of Cybernetics, 5 Euli str., 0186, Tbilisi, Georgia \newline(e-mail: [email protected]) \end{center}
\begin{abstract} {\bf Abstract.} For non-anticipative functionals, differentiable in Chitashvili's sense, the It\^o formula for cadlag semimartingales is proved. Relations between different notions of functional derivatives are established. \end{abstract}
\noindent {\it 2010 Mathematics Subject Classification. 90A09, 60H30, 90C39}
\
\noindent {\it Keywords}: The It\^o formula, semimartingales, non-anticipative functionals, functional derivatives
\section{Introduction}
The classical It\^o \cite{ito} formula shows that for a sufficiently smooth function\\ $(f(t,x), t\ge0, x\in R)$ the transformed process $f(t,X_t)$ is a semimartingale for any semimartingale $X$ and provides a decomposition of the process $f(t,X_t)$ as a sum of stochastic
integral relative to $X$ and a process of finite variation. This formula is applicable to functions of the current value of semimartingales, but in many applications, such as statistics of random processes, stochastic optimal control or mathematical finance, uncertainty affects through the whole history of the process and it is necessary to consider functionals of entire path of a semimartingale.
In 2009 Dupire \cite{Dupire} proposed a method to extend the It\^o formula for non-anticipative functionals
using naturally defined pathwise time and space derivatives. The space derivative measures the sensitivity of a functional $f:D([0,T], R)\to R$ to a variation in the endpoint of a path $\omega\in D([0,T], R)$ and is defined as a limit $$ \partial_\omega f(t,\omega)=\lim_{h\to 0}\frac{f(t,\omega+hI_{[t,T]})-f(t,\omega)}{h}, $$ if this limit exists, where $D([0,T])$ is the space of RCLL ( right continuous with left limits) functions. Similarly is defined the second order space derivative $\partial_{\omega\omega}f:= \partial_{\omega}(f_{\omega}).$
The definition of the time derivative is based on the flat extension of a path $\omega$ up to time $t+h$ and is defined as a limit $$ \partial_t f(t,\omega)=\lim_{h\to 0+}\frac{f(t+h,\omega^t)-f(t,\omega)}{h}, $$ whenever this limit exists, where $\omega^t=\omega(.\wedge t)$ is the path of $\omega$ stopped at time $t$.
If a continuous non-anticipative functional $f$ is from $C^{1,2}$ , i.e., if $\partial_t f, \partial_\omega f$, $\partial_{\omega\omega}f$ exist and are continuous
with respect to the metric $d_\infty$ (defined in section 2)
and $X$ is a continuous semimartingale, Dupire \cite{Dupire} proved that the process $f(t,X)$ is also a semimartingale and $$ f(t, X)=f(0, X)+\int_0^t\partial_t f(s,X)ds+\int_0^t\partial_\omega f(s,X)dX_s $$ \begin{equation}\label{itoc} +\frac{1}{2}\int_0^t\partial_{\omega\omega}f(s, X)d\langle X\rangle_s. \end{equation} For the special case of $f(t,X_t)$ these derivatives coincide with the usual space and time derivatives and the above formula
reduces to the standard It\^o formula. Erlier related works are the works by Ahn \cite{ahn} and Tevzadze \cite{T2}, where It\^o's formula was derived in very particular cases of functionals that assume the knowledge of the whole path without path dependent dynamics. Further works extending this theory and corresponding references one can see in \cite{CF1}, \cite{CF2}, \cite{LScS},\cite{O}.
Motivated by applications in stochastic optimal control, before Dupire's work, Chitashvili (1983) defined differentiability of non-anticipative functionals in a different way and proved the corresponding It\^o formula for continuous semimartingales. His definition is based
on "hypothetical" change of variable formula for continuous functions of finite variation.
We formulate Chitashvili's definition of differentiability and present his change of variable formula in a simplified form and for one-dimensional case.
Let $C_{[0,T]}$ be the space of continuous functions on $[0,T]$ equipped with the uniform norm. Let $f(t,\omega)$ be non-anticipative continuous mapping of $C_{[0,T]}$ into $C_{[0,T]}$ and denote by ${\cal V}_{[0,T]}$ the space of functions of finite variation on $[0,T]$.
A continuous non-anticipative functional $f$ is differentiable if there exist continuous functionals $f^0$ and $f^1$ such that for all $\omega\in C_{[0,T]}\cap {\cal V}_{[0,T]}$ \begin{equation}\label{chd} f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s,\omega)d\omega_s. \end{equation} A functional $f$ is two times differentiable if $f^1$ is differentiable, i.e., if there exist continuous functionals $f^{0,1}$ and $f^{1,1}$ satisfying \begin{equation}\label{chd2} f^1(t,\omega)=f^1(0,\omega)+\int_0^tf^{1,0}(s,\omega)ds+\int_0^tf^{1,1}(s,\omega)d\omega_s. \end{equation}
for all $\omega\in C_{[0,T]}\cap {\cal V}_{[0,T]}$.
Here functionals $f^0, f^1$ and $f^{1,1}$ play the role of time, space and the second order space derivatives respectively.
It was proved by Chitashvili \cite{Ch} that if the functional $f$ is two times differentiable then the process $f(t,X)$ is a semimartingale for any continuous semimartingale $X$ and is represented as $$ f(t, X)=f(0, X)+\int_0^tf^0(s,X)ds+\int_0^tf^1(s,X)dX_s $$ \begin{equation}\label{itoc} +\frac{1}{2}\int_0^tf^{1,1}(s, X)d\langle X\rangle_s. \end{equation}
The idea of the proof of change of variable formula (\ref{itoc}) for semimartingales is to use the change of variable formula for functions of finite variations, first for the function $f$ and then for its derivative $f^1$, before approximating a continuous semimartingale $X$ by processes of finite variation.
In the paper Ren et al \cite{RTZ} a wider class of $C^{1,2}$ functionals was proposed, which is based on the Ito formula itself. We formulate this definition in equivalent form and in one-dimensional case.
The function $f$ belongs to $C^{1,2}_{RTZ}$, if $f$ is a continuous non-anticipative functional on $[0,T]\times C_{[0,T]}$ and there exist continuous non-anticipative functionals $\alpha, z, \gamma$, such that \begin{equation}\label{itoc2} f(t, X)=f(0, X)+\int_0^t\alpha(s,X)ds+\int_0^tz(s,X)dX_s +\frac{1}{2}\int_0^t\gamma(s, X)d\langle X\rangle_s \end{equation} for any continuous semimartingale $X$. The functionals $\alpha, z$ and $\gamma$ also play the role of time, first and second order space derivatives respectively.
Since any process of finite variation is a semimartingale and any deterministic semimartingale is a function of finite variation, it follows from $f\in C^{1,2}_{RTZ}$ that
$f$ is differentiable in the Chitashvili sense and \begin{equation}\label{ChT} \alpha=f^0,\;\;\;z=f^1. \end{equation} Becides, any $C^{1,2}$ process in the Dupire or Chitashvili sense is in $C^{1,2}_{RTZ}$, which is a consequence of the functional It\^o formula proved in \cite{Dupire} and \cite{Ch} respectively. Although, the definition of the class $C^{1,2}_{RTZ}$ does not require that $\gamma$ be (in some sense) the derivative of $z$, but
if $f\in C^{1,2}$ in the Chitashvili sense, then beside equality (\ref{ChT}) we also have that $\gamma=f^{1,1}$ (i.e., $\gamma=z^1$).
Our goal is to extend the formula (\ref{itoc}) for RCLL (or cadlag in French terminology) semimartingales and to establish how Dupire's, Chitashvili's and other derivatives are related.
Since the bumped path used in the definition of Dupire's vertical derivative is not continuous even if $\omega$ is continuous, to compare derivatives defined by (\ref{chd}) with Dupire's derivatives, one should extend Chitashvili's definition to RCLL processes, or to modify Dupire's derivative in such a way that perturbation of continuous paths remain continuous.
The direct extension of Chitashvili's definition of differentiability for RCLL functions is following:
A continuous functional $f$ is differentiable, if there exist continuous functionals $f^0$ and $f^1$ (continuous with respect to the metric $d_\infty$ defined by (\ref{rho})) such that $ f(\cdot,\omega)\in {\cal V}_{[0,T]}$ for all $\omega\in {\cal V}_{[0,T]}$ and \begin{equation}\label{xvii} f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s \end{equation} $$ +\sum_{s\le t}\big[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\big], $$ for all $(t,\omega)\in [0,T]\times {\cal V}_{[0,T]}$.
In order to compare Dupire's derivatives with Chitashvili's derivatives, we introduce another type of
vertical derivative where, unlike to Dupire's derivative $\partial_\omega f$, the path deformation of continuous paths are also continuous.
We say that a non-anticipative functional $f(t,\omega)$ is vertically differentiable and denote this differential by $D_\omega f(t,\omega)$, if the limit \begin{equation} D_\omega f(t,\omega):=\lim_{h\to0, h>0}\frac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}, \end{equation} exists for all $(t,\omega)\in [0,T]\times {D}_{[0,T]}$, where $$ \chi_{t,h}(s)=(s-t)1_{(t,t+h]}(s)+h1_{(t+h,T]}(s). $$ Let $f(t,\omega)$ be differentiable in the sense of (\ref{xvii}). Then, as proved in Proposition 1, \begin{equation} f^0(t,\omega)=\partial_t f(t,\omega)\;\;\;\;\text{and}\;\;\;\; f^1(t,\omega)=D_\omega f(t,\omega). \end{equation}
for all $(t,\omega)\in [0,T]\times {D}_{[0,T]}$.
Thus, $f^0$ coincides with Dupire's time derivative, but $f^1$ is equal to $D_\omega f$ which is different from Dupire's vertical derivative in general. The simplest counterexample is $f(t,\omega)=\omega_t-\omega_{t-}$. It is evident that in this case $\partial_\omega f=1$ and $D_\omega f=0$. In general, if $g(t,\omega):=f(t-,\omega)$ then $D_\omega g(t,\omega)=D_\omega f(t,\omega)$ and $\partial_\omega g(t,\omega)=0$ if corresponding derivatives of $f$ exist. However, under stronger conditions, e.g. if $f\in C^{1,1}$ in the Dupire sense, then $D_\omega f$ exists and $D_\omega f=f^1=\partial_\omega f.$
The paper is organized as follows: In section 2 we extend Citashvili's change of variable formula for RCLL semimartingales and give an application of this formula on the convergence of ordinary integrals to the stochastic integrals. In section 3 we establish relations between different type of derivatives for non-anticipative functionals.
\section{The It\^o formula according to Chitashvili for cadlag semimartingales}
Let $\Omega:= D([0,T], R)$ be the set of c\`{a}dl\`{a}g paths. Denote by $\omega$ the elements of $\Omega$, by $\omega_t$ the value of $\omega$ at time $t$ and let $\omega^t=\omega(\cdot\wedge t)$ be the path of $\omega$ stopped at $t$. Let $B$ be the canonical process defined by $B_t(\omega)=\omega_t$, $\mathbb{F}=(F_t,t\in[0,T])$ the corresponding filtration and let $\Lambda:= [0,T]\times\Omega$.
The functional $f:[0,T]\times D[0,T]\to R$ is non-anticipative if $$ f(t,\omega)=f(t,\omega^t) $$ for all $\omega\in D[0,T]$, i.e., the process $f(t,\omega)$ depends only on the path of $\omega$ up to time $t$ and is $\mathbb{F}$- adapted.
Following Dupire, we define semi-norms on $\Omega$ and a pseudo-metric on $\Lambda$ as follows: for any $(t, \omega), ( t', \omega') \in\Lambda$,
\begin{eqnarray} \label{rho}
\|\omega\|_{t}&:=& \sup_{0\le s\le t} |
\omega_s|,\nonumber\\[-8pt]\\[-8pt] d_\infty\bigl((t, \omega),\bigl(
t', \omega'\bigr) \bigr)&:=& \bigl|t-t'\bigr| +
\sup_{0\le s\le T} \bigl|\omega_{t\wedge s} - \omega'_{t'\wedge s}\bigr|.\nonumber \end{eqnarray}
Then $(\Omega, \|\cdot\|_{T})$ is a Banach space and $(\Lambda, d_\infty)$ is a complete pseudo-metric space. Let ${\cal V}={\cal V}[0,T]$ be the set of finite variation paths from $\Omega$. Note that, if $f\in C(\Lambda)$, then from $\Delta \omega_t=0$ follows $f(t,\omega)-f(t-,\omega)=0$, since $d_\infty((t_n,\omega),(t,\omega))\to 0$ when $t_n\uparrow t$. Hence $f(t,\omega)-f(t-,\omega)\neq 0$ means $\Delta \omega_t\neq 0$.
Note that any functional $f:[0,T]\times\Omega\to R$ continuous with respect to $d_\infty$ is non-anticipative. In this paper we consider only $d_\infty$-continuous, and hence non-anticipative, functionals.
{\bf {Definition 1.}} We say that a continuous functional $f\in C([0,T]\times \Omega)$ is differentiable , if there exist $f^0\in C([0,T]\times \Omega)$ and $f^1\in C([0,T]\times \Omega)$ such that for all $\omega\in {\cal V}$ the process $ f(t,\omega)$ is of finite variation and \begin{equation}\label{xv} f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s \end{equation} $$ +\sum_{s\le t}\big[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\big], $$ for all $(t,\omega)\in [0,T]\times\cal V$.
A functional $f$ is two times differentiable if $f^1$ is differentiable, i.e., if there exist $f^{0,1}\in C([0,T]\times \Omega)$ and $f^{1,1}\in C([0,T]\times \Omega)$ such that for all $(t,\omega)\in [0,T]\times\cal V$ \begin{equation}\label{two} f^1(t,\omega)=f^1(0,\omega)+\int_0^tf^{1,0}(s,\omega)ds+\int_0^tf^{1,1}(s-,\omega)d\omega_s + V^1(t,\omega), \end{equation} where $$ V^1(t,\omega)=\sum_{s\le t}\big(f^1(s,\omega)-f^1(s-,\omega)-f^{1, 1}(s-,\omega)\Delta\omega_s\big). $$
Now we give a generalization of Theorem 2 from Chitashvili \cite{Ch}
for general cadlag (RCLL) semimartingales.
\begin{thr}
Let $f$ be two times differentiable in the sense of Definition 1 and assume that for some $K>0$ \begin{equation}\label{v}
|f(t,\omega)-f(t-,\omega)-f^1(t-,\omega)\Delta\omega_t|\le K(\Delta\omega_t)^2,\;\; \forall\omega\in\cal V. \end{equation}
Then for any semimartingale $X$ the process $f(t,X)$ is a semimartingale and $$ f(t, X)=f(0, X)+\int_0^tf^0(s,X)ds+\int_0^tf^1(s-,X)dX_s $$ \begin{equation}\label{ito} +\frac{1}{2}\int_0^tf^{1,1}(s, X)d\langle X^c\rangle_s+\sum_{s\le t}\big[f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\big]. \end{equation} \end{thr} {\it Proof.} Let first assume that $X$ is a semimartingale with the decomposition \begin{equation}\label{dec0} X_t=A_t+M_t, t\in[0,T], \end{equation} where $M$ is a continuous local martingale and $A$ is a process of finite variation having only finite number of jumps, i.e., the jumps of $A$ are exhausted by graphs of finite number of stopping times $(\tau_i, 1\le i\le l, l<\infty)$.
Let $X_t^n= A_t+M^n_t$ and \begin{equation} M^n_t= n\int_0^tM_s\exp(-n(\langle M\rangle_t-\langle M\rangle_s)d\langle M\rangle_s. \end{equation} It is proved in \cite{Ch} that \begin{equation}\label{mc}
\sup_{s\le t}|M^n_s-M_t|\to 0, \;\;\;as\;\;n\to\infty,\;\;\; a.s. \end{equation}
Since $X^n$ is of bounded variation, $f$ is differentiable and $\Delta X^n_t=\Delta A_t=\Delta X_t$, it follows from (\ref{xv}) that $$ f(t,X ^n)=f(0, X)+\int_0^tf^0(s,X^n)ds $$ $$ +\int_0^tf^1(s-,X^n)dX_s +\int_0^tf^1(s-,X^n)d(M^n_s-M_s) $$ \begin{equation}\label{itod} +\sum_{s\le t}\big(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X_s\big). \end{equation}
Since $X$ admits finite number of jumps, by continuity of $f$ and $f^1$,
\begin{equation}\label{jumpb} \sum_{s\le t}\big(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X_s\big)\to \end{equation} $$ \to\sum_{s\le t}\big(f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\big) $$
The continuity of $f, f^0, f^1$ and relation (\ref{mc}) imly that \begin{equation}\label{fxn1a} f(t,X^n)\to f(t,X),\;\;\;as\;\;n\to\infty,\;\;\; a.s., \end{equation} \begin{equation}\label{fxn22a} \int_0^tf^0(s,X^n)ds\to\int_0^tf^0(s,X)ds\;\;\;as\;\;n\to\infty,\;\;\; a.s.. \end{equation} by the dominated convergence theorem and \begin{equation}\label{fxn4a} \int_0^tf^1(s-,X^n)dX_s\to\int_0^tf^1(s-,X)dX_s\;\;\;as\;\;n\to\infty,\;\;\; a.s.. \end{equation} by the dominated convergence theorem for stochastic integrals. Here we may use the dominated convergence theorem, since by continuity of $f^i ( i=0,1)$ the process
$\sup_{n, s\le t}|f^i(s-, X^n)|$ is locally bounded (see Lemma A1).
Let us show now that \begin{equation}\label{fx12aa} \int_0^tf^1(s-,X^n)d(M^n_s-M_s)\to\frac{1}{2}\int_0^tf^{1,1}(s,X)d\langle M\rangle_s. \end{equation} Integration by parts and (\ref{two}) give $$ \int_0^tf^1(s,X^n)d(M^n_s-M_s)=(M^n_t-M_t)f^1(t,X^n)- $$ $$ -\int_0^t(M^n_s-M_s)f^{1,0}(s,X^n)ds-\int_0^t(M^n_s-M_s)f^{1,1}(s-,X^n)dA_s $$ $$ -\int_0^t(M^n_s-M_s)f^{1,1}(s-,X^n)dX^n_s-\int_0^t(M^n_s-M^c_s)dV^1(s, X^n)= $$ \begin{equation}\label{i5} =I^1_t(n)+I^2_t(n)+I^3_t(n)+I^4_t(n) +I_t^5(n). \end{equation}
$I^1_t(n)\to 0$ (as $n\to\infty$, a.s.) by continuity of $f^1$ and (\ref{mc}).
$I^2_t(n)$ and $I_t^3(n)$ tend to zero (as $n\to\infty$, a.s.) by continuity of $f^{1,0}$ and $f^{1,1}$, relation (\ref{mc}) and by the dominated convergence theorem (using the same arguments as in (\ref{fxn22a})-(\ref{fxn4a})).
Moreover, since $A$ admits finite number of jumps at $(\tau_i, 1\le i\le l)$ \begin{equation}\label{jump2} I_t(5)=\sum_{s\le t}(M^n_s-M_s)\big(f^1(s,X^n)-f^1(s-,X^n)-f^{1,1}(s-,X^n)\Delta A_s\big) \end{equation} $$ =\sum_{i\le l}(M^n_{\tau_i}-M_{{\tau_i}})\big(f^1(\tau_i,X^n)-f^1(\tau_i-,X^n)-f^{1,1}(\tau_i-,X^n)\Delta A_{\tau_i}\big) $$ $$
\le \sup_{s\le t}|M^n_s-M_s|\big(2l\sup_{n, s\le t}|f^1(s,X^n)|+\sup_{n, s\le t}|f^{1,1}(s,X^n)|\sum_{i\le l}|\Delta A_{\tau_i}|\big)\to 0, $$ as $n\to\infty$, since the continuity of $f^1, f^{1,1}$, relation (\ref{mc}) and Lemma A1 imply that
$\sup_{n, s\le t}|f^1(s,X^n)|+\sup_{n, s\le t}|f^{1,1}(s,X^n)|<\infty$ (a.s.)
Let us consider now the term $$ I_t^4(n)=\int_0^t(M_s-M^n_s)f^{1,1}(s,X^n)dM^n_s $$ Let $$ K^n_t=\int_0^t(M_s-M^n_s)dM^n_s. $$ Using the formula of integration by parts we have $$ K^n_t=-\frac{1}{2}(M_t^n)^2+M_t M_t^n-\int_0^tM_s^ndM_s $$ and it follows from (\ref{mc}), the dominated convergence theorem and equality $M_t^2=2\int_0^tM_sdM_s+\langle M\rangle_t$, that \begin{equation}\label{kn}
sup_{s\le t}|K^n_s-\frac{1}{2}\langle M\rangle_s|\to 0, \;\;\;as\;\;n\to\infty,\;\;\; a.s. \end{equation} From definition of $M^n$, using the formula of integration by parts, it follows that $M^n$ admits representation $$ M^n_t=n\int_0^t(M_s-M^n_s)d\langle M\rangle_s. $$ Therefore $$ K^n_t=n\int_0^t(M_s-M^n_s)^2d\langle M\rangle_s. $$ This implies that $K^n$ is a sequence of increasing processes, which is stochastically bounded by (\ref{kn}) (i.e. satisfies the condition UT from (\cite{JMP}) and by theorem 6.2 of(\cite{JMP}) (it follows also from lemma 12 of \cite{CF1}) $$ \int_0^t(M_s-M^n_s)f^{1,1}(s,X^n)dM^n_s= $$ $$ =\int_0^tf^{1,1}(s,X^n)dK^n_s\to\frac{1}{2}\int_0^tf^{1,1}(s,X)d\langle M\rangle_s,\;\;\;n\to\infty, $$ which (together with (\ref{i5})) implies the convergence (\ref{fx12aa}). Therefore, the formula (\ref{ito}) for the process $X$ with decomposition (\ref{dec0}) follows by passage to the limit in (\ref{itod}) using relations (\ref{jumpb})-(\ref{fx12aa}). Note that in this cased the condition (\ref{v}) is not needed.
Let consider now the general case. Any semimartingale $X$ admits a decomposition $X_t=A_t+M_t$, where $A$ is a process of finite variation and $M$ is a locally square integrable martingale (such decomposition is not unique, but the continuous martingale parts coincide for all such decompositions of $X$, which is sufficient for our goals) see \cite{J}. Let $M_t=M_t^c+M^d_t$, where $M^c$ and $M^d$ are continuous and purely discontinuous martingale parts of $M$ respectively. Let $A_t=A_t^c+A_t^d$ be the decomposition of $A$, where $A^c$ and $A^d$ are continuous and purely discontinuous processes of finite variations respectively. Note that $A^d$ is the sum of its jumps, whereas $M^d$ is the sum of compensated jumps of $M$. So, we shall use the decomposition \begin{equation}\label{dec1} X_t=A_t^c+A_t^d+M_t^c+M_t^d \end{equation} for $X$ and using localization arguments, without loss of generality, one can assume that $M^c$ and $M^d$ are square integrable martingales.
Let $M^d_t(n)$ be the compensated sum of jumps of $M$ of amplitude greater than $1/n$, which is a martingale of finite variation and is expressed as a difference \begin{equation}\label{jump} M^d_t(n)=B^n_t-\widetilde{ B_t^n}, \end{equation}
where $B^n_t=\sum_{s\le t}\Delta M_sI_{(|\Delta M_s|\ge 1/n)}$ and $\widetilde{B^n}$ is the dual predictable projection of $B^n$. It can be expressed also as compensated stochastic integral (see \cite{DM}) $$
M^d_t(n)=\int_0^tI_{(|\Delta M_s|>\frac{1}{n})}{}_{\overset{\bullet}C}dM_s, $$ where by $H{}_{\overset{\bullet}C}Y$ we denote the compensated stochastic integral. Since $$
M^d_t(n)-M_t^d=\int_0^tI_{(0<|\Delta M_s|\le\frac{1}{n})}{}_{\overset{\bullet}C}dM_s, $$ it follows from Doob's inequality and from \cite{DM} (theorem 33, Ch.VIII) that $$
E\sup_{s\le t}|M_s^d(n)-M_s^d|^2\le const E[M^d(n)-M^d]_t= const E[I_{(0<|\Delta M|\le\frac{1}{n})}{}_{\overset{\bullet}C}M] $$ $$
\le const E\int_0^tI_{(0<|\Delta M_s|\le\frac{1}{n})}d[M]_s\to 0, \;\;\;as\;\;n\to\infty $$ by dominated convergence theorem, since $E[M^d]_T<\infty$. Hence \begin{equation}\label{md}
\sup_{s\le t}|M^d_s(n)-M^d_s|\to 0, \;\;\;as\;\;n\to\infty,\;\;\; a.s. \end{equation} for some subsequence, for which we preserve the same notation.
Let $$
A_t^d(n)=\sum_{s\le t}I_{(|\Delta A_s|>\frac{1}{n})}\Delta A_s=\int_0^tI_{(|\Delta A_s|>\frac{1}{n})}dA_s. $$ Since $$
|A^d_t-A_t^d(n)|\le\int_0^tI_{(0<|\Delta A_s|\le\frac{1}{n})}|dA_s| $$ we have that \begin{equation}\label{ad}
\sup_{s\le t}|A^d_s(n)-A^d_t|\to 0, \;\;\;as\;\;n\to\infty,\;\;\; a.s. \end{equation}
Let $$ X^n_t= A^c_t+A_t^d(n)+M_t^d(n)+M_t^c. $$ Relations (\ref{md}) and (\ref{ad}) imply that \begin{equation}\label{x}
\sup_{s\le t}|X_s(n)-X_s|\to 0, \;\;\;as\;\;n\to\infty,\;\;\; a.s., \end{equation} Thus, $X^n$ is a sum of continuous local martingale $M^c$ and a process of finite variation $A^c_t+A_t^d(n)+M_t^d(n)$ which admits only finite number of jumps for every $n\ge 1$.
Therefore, as it is already proved, $$ f(t,X^n)=f(0,X^n)+\int_0^tf^0(s,X^n)ds+\int_0^tf^1(s-,X^n)dX_s $$ $$ +\int_0^tf^1(s-,X^n)d(M_s^n(d)-M_s^d)+\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d) $$ $$ +\frac{1}{2}\int_0^tf^{1,1}(s, X)d\langle X^c\rangle_s $$ \begin{equation}\label{fxnv} +\sum_{s\le t}\big(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\big). \end{equation}
By continuity of $f, f^0$ and $f^1$ \begin{equation}\label{fxn1} f(t,X^n)\to f(t,X),\;\;\;as\;\;n\to\infty,\;\;\; a.s., \end{equation} \begin{equation}\label{fxn22} \int_0^tf^0(s,X^n)ds\to\int_0^tf^0(s,X)ds\;\;\;as\;\;n\to\infty,\;\;\; a.s.. \end{equation} by the dominated convergence theorem and \begin{equation}\label{fxn4} \int_0^tf^1(s-,X^n)dX_s\to\int_0^tf^1(s-,X)dX_s\;\;\;as\;\;n\to\infty,\;\;\; a.s.. \end{equation} by the dominated convergence theorem for stochastic integrals (using the same arguments as in (\ref{fxn22a})- (\ref{fxn4a})).
By properties of compensated stochastic integrals $$
\int_0^tf^1(s-,X^n)d(M^d_s(n)-M^d_s)=\int_0^tf^1(s-,X^n)I_{(0<|\Delta M_s|\le\frac{1}{n})}{}_{\overset{\bullet}C}dM_s $$ and using theorem 33, Ch. VIII from \cite{DM} $$
E\big(\int_0^tf^1(s-,X^n)I_{(0<|\Delta M_s|\le\frac{1}{n})}{}_{\overset{\bullet}C}dM_s\big)^2 $$ \begin{equation}\label{fx}
\le const E\int_0^t(f^1(s-,X^n))^2I_{(0<|\Delta M_s|\le\frac{1}{n})}d[M^d]_s\to 0\;\;\;as\;\;n\to\infty \end{equation} by dominated convergence theorem, since
$\sup_{n, s\le t}(f^1(s,X^n))^2$ is locally bounded (by Lemma A1 from appendix), $I_{(0<|\Delta M_s|\le\frac{1}{n})}\to 0$ and $E[M^d]_T<\infty$.
Similarly, $\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d)$ also tends to zero, since \begin{equation}\label{fxan}
\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d)\le \int_0^t|f^1(s-,X^n)|I_{(0<|\Delta A_s|\le\frac{1}{n})}|dA_s|\to 0. \end{equation}
From (\ref{jump}) $$
\Delta M^n_s(d)=\Delta M_sI_{(|\Delta M_s|\ge 1/n)} - \big( \Delta MI_{(|\Delta M|\ge 1/n)}\big)_s^p, $$ where $Y^p$ is the usual projection of $Y$. Here we used the fact that the jump of the dual projection of $B^n$ is the usual projection of the jump, i.e. $\Delta\widetilde{B^n_t}=(\Delta B^n)_t^p$. Therefore, using condition (\ref{v}) we have that $$
|(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s|\le const. (\Delta X^n_s)^2 $$ $$
= const. \big(\Delta A_sI_{(|\Delta A_s|\ge 1/n)}+\Delta M_sI_{(|\Delta M_s|\ge 1/n)} - ( \Delta MI_{(|\Delta M|\ge 1/n)})_s^p\big)^2 $$ \begin{equation}\label{jump2} \le 3 const.\big( (\Delta A_s)^2+(\Delta M_s)^2+ E( (\Delta M_s)^2/F_{s-})\big). \end{equation}
Since, it follows from (\ref{x}) and continuity of $f$ and $f^1$, that $$ f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\to f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s $$ and $$ \sum_{s\le t}\big ( (\Delta A_s)^2+(\Delta M_s)^2+ E( (\Delta M_s)^2/F_{s-})\big ) < \infty, $$ the dominated convergence theorem implies that $$ \sum_{s\le t}\big(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\big) $$ \begin{equation}\label{jump3} \to\sum_{s\le t}\big(f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\big), \;\;\;as\;\;\;n\to\infty. \end{equation}
Therefore, passing to the limit in (\ref{fxnv}) it follows from (\ref{fxn1})-(\ref{jump3}) that (\ref{ito}) holds.\qed
Now we give one application of the change of variable formula (\ref{ito}) to the convergence of stochastic integrals. If $g(t,x), t\ge0, x\in R)$ is a function of two variables admitting continuous partial derivatives $\partial g(t,x)/\partial t$, $\partial g(t,x)/\partial x$ and $V^n$ is a sequence of processes of finite variations converging to the Wiener process, then it was proved by Wong and Zakai \cite{WZ} that the sequence of ordinary integrals $\int_0^tg(s,V^n_s)dV^n_s$ converges to the Stratanovich stochastic integral. The following assertion generalizes this result for non-anticipative functionals $g(t,\omega)$.
{\bf Corollary}. Assume that $f(t,\omega)$ is differentiable in the sense of Definition 1 and there is a continuous on $[0,T]\times D([0,T])$ functional $F(t,\omega)$ such that \begin{equation}\label{str0} F(t,\omega)=\int_0^tf(s-,\omega)d\omega_s \end{equation} For all $\omega\in{\cal V}_{[0,T]}$. Let $X$ be a cadlag semimartingale and let $(V^n,n\ge1)$ be a sequence of processes of finite variation converging to $X$ uniformly on $[0, T]$. Then \begin{equation}\label{str} \lim_{n\to\infty}\int_0^tf(s-, V^n)dV^n_s= \int_0^tf(s-,X)dX_s +\frac{1}{2}\int_0^tf^{1}(s, X)d\langle X^c\rangle_s. \end{equation} Proof: By continuity of $F$ and (\ref{str0}) \begin{equation}\label{str1} \lim_{n\to\infty}\int_0^tf(s-, V^n)dV^n_s=\lim_{n\to\infty}F(t,V^n)=F(t,X). \end{equation} It is evident that $$ F^1(t,\omega)=f(t,\omega),\;\; F^0(t,\omega)=0\;\;\text{and}\;\;\;F(t,\omega)-F(t-,\omega)-F^1(t-,\omega)\Delta\omega_t=0, $$ Thus, $F$ is two times differentiable in the sense of definition 1 and condition (\ref{v}) is automatically satisfied. Therefore, by the It\^o formula (\ref{ito}) $$
F(t,X)=\int_0^tf(s-,X)dX_s +\frac{1}{2}\int_0^tf^{1}(s, X)d\langle X^c\rangle_s, $$ which, together with (\ref{str1}) implies the convergence (\ref{str}).
\section{The relations between various definitions of functional derivatives}
Following Dupire \cite{Dupire} we define time and space derivatives, called also horizontal and vertical derivatives of the non-anticipative functionals.
{\bf Definition 2}. A non-anticipative functional $f(t,\omega)$ is said to be horizontally differentiable at $(t,\omega)\in\Lambda$ if the limit \begin{equation} \label{hatpat} \partial_t f(t,\omega):= \lim_{h\to0, h>0} \frac {1}{h} \bigl[f (t+h,\omega^t )-f (t, \omega) \bigr],\qquad t<T, \end{equation} exists. If $ \partial_t f(t,\omega)$ exists for all $(t,\omega)\in\Lambda$, then the non-anticipating functional $\partial f_t$ is called the horizontal derivative of $f$.
A non-anticipative functional $f(t,\omega)$ is vertically differentiable at $(t,\omega)\in\Lambda$ if \begin{equation} \label{hatpax} \partial_{\omega} f(t,\omega):= \lim_{h\to0}\frac {1}{h} \bigl[ f(t,\omega+ h 1_{[t,T]}) - f(t,\omega) \bigr], \end{equation} exists. If $f$ is vertically differentiable at all $(t,\omega)\in\Lambda$ then the map $\partial _\omega f :\Lambda\to R$ defines a non-anticipative map,
called the vertical derivative of $f$.
Similarly one can define \begin{equation} \partial_{\omega\omega}f:= \partial_{\omega }(\partial f_{\omega}),\qquad. \end{equation}
Define $C^{1,k}([0, T )\times \Omega)$ as the set of functionals $f$, which are \begin{itemize}
\item horizontally differentiable with $\partial_t f$ continuous at fixed times,
\item $k$ times vertically differentiable with continuous $\partial_{\omega}^k f$. \end{itemize} The following assertion follows from the generalized It\^o formula for cadlag semimartingales proved in \cite{CF1} (see also \cite{LScS}).
\begin{thr} Let $f\in C^{1,1}([0,T]\times \Omega)$. Then for all $(t,\omega)\in [0,T]\times \cal V$ \begin{eqnarray*} f(t,\omega)=f(0,\omega)+\int_0^t\partial_t f(s,\omega)ds+\int_0^t\partial_\omega f(s-,\omega)d\omega_s\\ +\sum_{s\le t}(f(s,\omega)-f(s-,\omega)-\partial_{\omega}f(s-,\omega)\Delta\omega_s) \end{eqnarray*} and $f(t,\omega)\in\cal V$ for all $\omega\in\cal V$. \end{thr} {\bf Corollary}. If $f\in C^{1,1}([0,T]\times \Omega)$, then $f$ is differentiable in the sense of Definition 1 and $$ \partial_tf=f^0,\;\;\;\;\partial _\omega f= f^1. $$
In order to compare Dupire's derivatives with Chitashvili's derivative (the derivative in the sense of Definition 1), we introduce another type of
vertical derivative where, unlike to Dupire's derivative $\partial_\omega f$, the path deformation of continuous paths remain continuous.
{\bf Definition 3}. We say that a non-anticipative functional $f(t,\omega)$ is vertically differentiable and denote this differential by $D_\omega f(t,\omega)$, if the limit \begin{equation} D_\omega f(t,\omega):=\lim_{h\to0, h>0}\frac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}, \end{equation} exists for all $(t,\omega)\in [0,T]\times \Omega$, where $$ \chi_{t,h}(s)=(s-t)1_{(t,t+h]}(s)+h1_{(t+h,T]}(s). $$ The second order derivative is defined similarly $$ D_{\omega\omega}f=D_\omega(D_\omega f). $$
Note that, if $f(t,\omega)=g(\omega_t)$ for any $\omega\in D[0,T]$, where $g=(g(x), x\in R)$ is a differentiable function, then $D_\omega f(t,\omega)$ (so as $\partial _\omega f(t,\omega)$) coincides with $g'(\omega_t)$.
\begin{prop}\label{11} Let $f\in C([0,T]\times \Omega)$ be differentiable in the sense of Definition 1, i.e., there exist $f^0, f^1\in C([0,T]\times \Omega)$, such that for all $(t,\omega)\in [0,T]\times\cal V$ \begin{equation}\label{xv22} f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s + V(t,\omega), \end{equation} where $$ V(t,\omega):=\sum_{s\le t}\big[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\big] $$ is of finite variation for all $\omega\in {\cal V}$.
Then for all $(t,\omega)\in [0,T]\times D([0,T])$ \begin{equation} f^0(t,\omega)=\partial_t f(t,\omega)\;\;\;\;\text{and}\;\;\;\; f^1(t,\omega)=D_\omega f(t,\omega). \end{equation} \end{prop}
{\it Proof.} Since $\omega^t$ is constant on $[t,T]$ and $f(t,\omega^t)=f(t,\omega)$, if $s\le t$, from (\ref{xv22}) we have that for any $ \omega\in{\cal V}$
\begin{equation}\label{xv23} f(t+h,\omega^t)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_t^{t+h}f^0(s,\omega^t)ds+ \end{equation} $$ + \int_0^tf^1(s-,\omega)d\omega_s + V(t,\omega) $$ and \begin{equation}\label{xv24} f(t+h,\omega^t+\chi_{t,h})=f(0,\omega)+\int_0^tf^0(s,\omega)ds+ \int_0^tf^1(s-,\omega)d\omega_s+ \end{equation} $$ +\int_t^{t+h}f^0(s,\omega^t+\chi_{t,h})ds+ \int_t^{t+h}f^1(s,\omega^t+\chi_{t,h})ds+ V(t,\omega). $$ Therefore $$ \partial_t f(t,\omega)=\lim_{h\to0}\frac{f(t+h,\omega^{t})-f(t,\omega)}{h}= $$ $$ =\lim_{h\to0}\frac{1}{h}\int_{t}^{t+h}f^0(s,\omega^{t})ds= f^0(t,\omega) $$ by continuity of $f^0$.
It is evident that $\chi_{t,h}(s)\le h$ and $$\frac{\chi_{t,h}(s)-\chi_{t,0}(s)}{h}=\frac{\chi_{t,h}(s)}{h}\to 1_{[t,T]}(s)\;as\;h\to0+,\;\forall s\in [0,T].$$
Trerefore, relations (\ref{xv24})-(\ref{xv23}) and continuity of $f^1$ and $f^0$ imply that \begin{eqnarray*} D_\omega f(t,\omega)=\lim_{h\to0}\frac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}=\\ =\lim_{h\to0}\frac{1}{h}\int_{t}^{t+h}\big(f^0(s,\omega^{t}+\chi_{t,h})-f^0(s,\omega^{t})\big)ds\\ +\lim_{h\to0}\frac{1}{h}\int_{t}^{t+h}f^1(s,\omega^{t}+\chi_{t,h})ds= f^1(t,\omega) \end{eqnarray*} for any $\omega\in\cal V([0,T])$ and by continuity of $f^1$ this equality is true for all $\omega\in D([0,T])$.
{\bf Remark.} If $f\in C([0,T]\times \Omega)$ is two times differentiable in the sense of Definition 1, then similarly one can show that $$ f^{1,1}(t,\omega)=D_{\omega\omega}f(s,\omega). $$
{\bf Corrolary 1.} Let $f\in C^{1,1}([0,T]\times \Omega)$. Then for all $(t,\omega)\in \Lambda$ \begin{eqnarray*} \partial_\omega f(t,\omega)=f^1(t,\omega)=D_\omega f(t,\omega). \end{eqnarray*}
In general $ \partial_\omega f(t,\omega)$ and $D_\omega f(t,\omega)$ are not equal.
{\bf Counterexample 1}. Let $g=(g(x),x\in r)$ be a bounded differentiable function and let $f(t,\omega)=g(\omega_t)-g(\omega_{t-})$. Then $\partial_\omega f(t,\omega)=g'(\omega_t)$ and \begin{eqnarray*} D_\omega f(t,\omega)=\lim_{h\to 0+}\frac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}=0,\; \rm{since} \\varphi(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})=g(\omega_t+h)-g(\omega_t+h)-g(\omega_t)+g(\omega_t)=0. \end{eqnarray*} It is evident that $f\bar\in C^{1,1}(\Lambda)$, since $f\bar\in C(\Lambda)$ and $\partial_t f=\infty.$
The following assertion shows that if $f$ belongs to the class $C^{1,2}(\Lambda)$ of non-anticipative functionals, then $\partial f_\omega (t,\omega)$ and $\partial f_{\omega\omega} (t,\omega)$
are uniquelly determined by the restriction of $f$ to continuous paths. This assertion is proved by Cont and Fournie \cite{CF} (see also \cite{BCC}) in a complicated way. We give a simple proof based on Proposition 1.
{\bf Corrolary 2.} Let $f^1$ and $f^2$ belong to $\in C^{1,2}(\Lambda)$ in the Dupire sense and \begin{equation}\label{f1f2} f^1(t,\omega)=f^2(t,\omega)\;\;\;\;\text{for all}\;\;\;(t,\omega)\in [0,T]\times C([0,T]). \end{equation} Then \begin{equation}\label{f12} \partial_\omega f^1(t,\omega)=\partial_\omega f^2(t,\omega),\;\;\;\partial_{\omega\omega} f^1(t,\omega)=\partial_{\omega\omega}f^2(t,\omega) \end{equation} for all $(\omega,t)\in [0,T]\times C([0,T])$.
{\it Proof}. By Theorem 2 \begin{equation}\label{bv} f^i(t,\omega)=f^i(0,\omega)+\int_0^t\partial_t f^i(s,\omega)ds+\int_0^t\partial_\omega f^i(s,\omega)d\omega_s\;\;\;i=1,2, \end{equation}
for all $\omega\in C([0,T])\cap{\cal V}([0,T])$. It follows from Proposition 1 that $$ \partial_\omega f^i(t,\omega)=D_\omega f^i(t,\omega);\;\;\; i=1,2. $$
Since $\omega^{t}+\chi_{t,h}\in C([0,T])$ if $\omega\in C([0,T])$, by definition of $D_\omega$ and equality (\ref{f1f2}) we have \begin{equation}\label{df12} D_\omega f^1(t,\omega)=D_\omega f^2(t,\omega)\;\;\;\;\text{for all}\;\;\;(t,\omega)\in [0,T]\times C([0,T])), \end{equation} which implies that \begin{equation}\label{f12} \partial_\omega f^1(t,\omega)=\partial_\omega f^2(t,\omega),\;\;\;\;\text{for all}\;\;\;(t,\omega)\in [0,T]\times C([0,T]), \end{equation} It is evident that $\partial_t f^1(t,\omega)=\partial_t f^2(t,\omega)$ for all $(\omega,t)\in [0,T]\times C([0,T])$. Therefore, comparing the It\^o formulas (\ref{itoc}) for $f^1(t,\omega)$ and $f^2(t,\omega)$ we obtain that $$ \int_t^u\partial_{\omega\omega}f^1(s,\omega)d\langle\omega\rangle_s=\int_t^u\partial_{\omega\omega}f^2(s,\omega)d\langle\omega\rangle_s $$ for any continuous semimartinale $\omega$. Dividing both parts of this equality by $\langle\omega\rangle_u-\langle\omega\rangle_t$ and passing to the limit as $u\to t$, we obtain that $\partial_{\omega\omega} f^1(t,\omega)=\partial_{\omega\omega}f^2(t,\omega)$ for any continuous semimartingale and by continuity of $\partial_{\omega\omega} f^1(t,\omega)$ and $\partial_{\omega\omega}f^2(t,\omega)$ this equality will be true for all $\omega\in C([0,T])$.
\begin{prop}\label{33} Let $f\in C([0,T]\times \Omega)$ be differentiable in the sense of Definition 1 and \begin{equation}\label{xv25}
\left|f(t,\omega)-f(t-,\omega)-\Delta\omega_tf^1(t-,\omega)\right|\le K|\Delta\omega_t|^2 \end{equation} for some $K>0$. Then $$ f^0(t,\omega)=\partial_t f(t,\omega), \;\;\;\;\forall(t,\omega)\in \Lambda, $$ $$ f^1(t,\omega)=\partial_\omega f(t,\omega),\;\;\;\;\forall \omega\in C[0,T] $$ (or for all $\omega$ continuous at $t$). \end{prop}
{\it Proof.} For $\omega\in D[0,T]$ let $\tilde\omega=\omega_s$ if $s<t$ and $\tilde\omega=\omega_{s-}+h$, if $s\ge t$, i.e. $\tilde\omega=\omega^{t-}+h1_{[t,T]}$, hence $\Delta\tilde\omega_s=h$.
Therefore, using condition (\ref{xv25}) for $\tilde\omega$ we have \begin{eqnarray*}
\left|\frac{f(t,\omega^{t-}+h1_{[t,T]})-f(t-,\omega)}{h}-f^1(t-,\omega)\right|\le K|h|,\;\forall h. \end{eqnarray*} It follows from here that $$ \lim_{h\to0}\frac{f(t,\omega^{t-}+h1_{[t,T]})-f(t-,\omega)}{h}=f^1(t-,\omega), $$ which implies that $f^1(t,\omega)=\partial_\omega f(t,\omega)$ if $\omega$ is continuous at $t$. Equality $f^1(t,\omega)=\partial_tf(t,\omega), \forall(t,\omega)\in \Lambda$ is proved in Proposition 1.\qed
Now we introduce definition of space derivatives which can be calculated pathwise along the differentiable paths and using such derivatives in Theorem 3 below a change of variables formula for functions of finite variations is proved, which gives sufficient conditions for the existence of derivatives in the Chitashvili sense.
{\bf Definition 4}. We say that a non-anticipative functional $f(t,\omega)$ is differentiable, if the limits $f_t\;f_\omega\in C(\Lambda)$ exist, where \begin{eqnarray*} f_t(t,\omega)=\lim_{h\to0, h>0}\frac{f(t+h,\omega^{t})-f(t,\omega)}{h}, \;\;\;\; \forall(t,\omega)\in [0,T]\times D[0,T]\\ f_{\omega}(t,\omega)=\lim_{h\to0, h>0}\frac{f(t+h,\omega)-f(t+h,\omega^{t})}{\omega_{t+h}-\omega_t},\;\;\;\;\forall(t,\omega)\in [0,T]\times C^1[0,T].\\ \end{eqnarray*}
\begin{prop}\label{22} Let $f$ be differentiable in the sense of definition 4. Then $\forall(t,\omega)\in [0,T]\times C^1[0,T]$ \beq\label{itt} f(t,\omega)-f(0,\omega)=\int_0^tf_t(s,\omega)ds+\int_0^tf_\omega(s,\omega)d\omega_s. \eeq \end{prop} {\it Proof}. We have \begin{eqnarray*} \lim_{h\to0, h>0}\frac{f(t+h,\omega)-f(t,\omega)}{h}\\ = \lim_{h\to0+}\frac{f(t+h,\omega)-f(t+h,\omega^{t})}{{\omega_{t+h}-\omega_t}}\times \frac{{\omega_{t+h}-\omega_t}}{h} \\ +\lim_{h\to0+}\frac{f(t+h,\omega^{t})-f(t,\omega)}{h} =\omega'(t)f_\omega(t,\omega)+f_t(t,\omega),\\ \forall(t,\omega)\in [0,T]\times C^1[0,T]. \end{eqnarray*} Hence right derivative of \begin{eqnarray*} f(t,\omega)-f(0,\omega)-\int_0^tf_t(s,\omega)ds-\int_0^tf_\omega(s,\omega)\omega'_sds \end{eqnarray*} is zero for each $\omega\in C^1$. By the Lemma A2 of appendix formula (\ref{itt}) is satisfied.
\begin{thr} Let $f\in C(\Lambda)$ and $f_t,f_\omega\in C(\Lambda)$ are derivatives in the sense of definition 4. Assume also that for any $\omega\in\cal V$ $$
\sum_{s\le t}|f(s,\omega)-f(s-,\omega)|<\infty. $$ Then \begin{eqnarray*} f(t,\omega)=f(0,\omega)+\int_0^tf_t(s,\omega)ds+\int_0^tf_\omega(s,\omega)d\omega_s^c\\ +\sum_{s\le t}(f(s,\omega)-f(s-,\omega)). \end{eqnarray*} \end{thr} {\it Proof}. For $\omega\in V$ we have $\omega=\omega^c+\omega^d,\;\omega^d=\sum_{s\le t}\Delta\omega_s,\;\omega^c\in C$. Set
$$\omega^{d,n}=\sum_{s\le t,|\Delta\omega_s|>\frac1n}\Delta\omega_s,\;\omega^n=\omega^c+\omega^{d,n}.$$ It is evident that as $n\to 0$ $$
|\omega^n-\omega|_T=|\omega^d-\omega^{d,n}|_T=\max_t|\int_0^t1_{(|\Delta\omega_s|\le\frac1n)}d\omega_s^d\le\int_0^T1_{(|\Delta\omega_s|\le\frac1n)}dvar_s(\omega^d)\to 0. $$
We know that discontinuity points of $f$ are also discontinuity points of $\omega$. Let $\{t_1<...<t_k\}=\{s:|\Delta\omega_s|>\frac1n\}\cup\{0,T\}$. Denote by $\omega^{\varepsilon}\in C'$ a differentiable approximation of $\omega^c$, such that $var_T(\omega^\varepsilon-\omega^c)<\varepsilon$ and let $\omega^{n,\varepsilon}=\omega^\varepsilon+\omega^{d,n}$. Then by Proposition \ref{22} \begin{eqnarray*} f(t,\omega^{n,\varepsilon})-f(t_i,\omega^{n,\varepsilon})-\int_{t_i}^tf_t(s,\omega^{n,\varepsilon})ds-\int_{t_i}^tf_\omega^{n,\varepsilon}(s,\omega^{n,\varepsilon})\omega^{'\varepsilon}_sds=0,\;t\in[t_i,t_{i+1}) \end{eqnarray*} and \begin{eqnarray*} f(T,\omega^{n,\varepsilon})-f(0,\omega^{n,\varepsilon})=\sum_{i\ge 1} \big(f(t_{i},\omega^{n,\varepsilon})-f(t_{i-1},\omega^{n,\varepsilon})\big)\\ =\sum \big (f(t_{i}-,\omega^{n,\varepsilon})-f(t_{i-1},\omega^{n,\varepsilon})\big )+\sum \big (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\big )\\ =\sum\int_{t_{i-1}}^{t_i}f_t(s,\omega^{n,\varepsilon})ds+\sum\int_{t_{i-1}}^{t_i}f_\omega(s,\omega^{n,\varepsilon})\omega^{'\varepsilon}_sds+\sum \big (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\big )\\ =\sum\int_{t_{i-1}}^{t_i}f_t(s,\omega^{n,\varepsilon})ds+\sum\int_{t_{i-1}}^{t_i}f_\omega(s,\omega^{n,\varepsilon})d\omega_s^{n,\varepsilon}\\ -\sum f_\omega(t_i-,\omega^{n,\varepsilon})\Delta\omega^{n,\varepsilon}_{t_i}+\sum \big (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\big )\\ =\int_0^Tf_s(s,\omega^{n,\varepsilon})ds+\int_0^Tf_\omega(s,\omega^{n,\varepsilon})d\omega^{n,\varepsilon}_s\\ +\sum\big ( f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})-f_\omega(t_i-,\omega^{n,\varepsilon})\Delta\omega^{n,\varepsilon}_{t_i}\big )\\ =\int_0^Tf_t(s,\omega^{n,\varepsilon})ds+\int_0^Tf_\omega(s,\omega^{n,\varepsilon})d\omega_s^\varepsilon+\sum\big ( f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\big ). \end{eqnarray*} Since $f(t,\omega^{n,\varepsilon})$ admits finite number of jumps and $\sup_\varepsilon var_T\omega^\varepsilon<\infty$, passing to the limit as $\varepsilon\to0$ we get \begin{eqnarray*} f(T,\omega^{n})-f(0,\omega^{n})\\ =\int_0^Tf_t(s,\omega^{n})ds+\int_0^Tf_\omega(s,\omega^{n})d\omega_s^c+\sum\big ( f(t_i,\omega^n)-f(t_i-,\omega^n)\big ). \end{eqnarray*} By the continuity of functionals $f,\;f_t,\;f_\omega$ and Lemma A1 from the appendix $$f(t,\omega^n)\to f(t,\omega),\;\int_0^tf_t(s,\omega^n)ds\to \int_0^tf_t(s,\omega)ds, $$ $$\int_0^tf_t(s,\omega^n)d\omega_s^c\to \int_0^tf_t(s,\omega)d\omega_s^c,\;as\;n\to\infty.$$
It remains to show convergence of the sum. Since\\
$f^d(t,\omega)=\sum_{s\le t}f(s,\omega)-f(s-,\omega)$ is of finite variation
\begin{eqnarray*} f^d(t,\omega)=\sum \big ( f(t_i,\omega^{n})-f(t_i-,\omega^{n})\big )-\sum \big (f(t_i,\omega)-f(t_i-,\omega)\big )\\
=\sum_{s\le t} (f(s,\omega)-f(s-,\omega))1_{(|\Delta\omega_s|\le\frac1n)}\\
=\int_0^t1_{(|\Delta\omega_s|\le\frac1n)}df^d(s,\omega)\to 0,\; as\;n\to\infty,
\end{eqnarray*} by the dominated convergence theorem.
{\bf Corollary.} If $f$ satisfies conditions of Theorem 3 then $f$ is differentiable in the sense of Definition 1.
\section{Appendix}
The following lemma is a modification of lemma 6 of \cite{LScS}.
{\bf Lemma A1}. Let $X_n,X\in \Omega$ be a sequence of paths, such that $||X_n-X||_T\to 0$ as $n\to\infty$. Let $f\in C(\Lambda)$. Then
$$\sup_{t\le T}|f(t,X_n)-f(t,X)|\overset{n\to\infty}\to 0.$$
{\it Proof}. If not then $\exists \varepsilon > 0$, a sequence of integers $n_k, k=1,...$, and a sequence $s_k\in [0, T ]$ such that \beq\label{uni}
|f(s_k, X_{n_k})-f(s_k, X)| \ge \varepsilon \eeq By moving to a subsequence we can assume without loss of generality that either $s_k \to s^*,\;s_k\ge s^*$ or $s_k \to s^*,\;s_k< s^*$ for some $s^*\in[0, T]$. In the first case by continuity assumption we get \begin{eqnarray*}
|f(s_k, X_{n_k})-f(s_k, X)|\le |f(s_k, X_{n_k})-f(s^*, X)|\\+|f(s_k, X)-f(s^*, X)| \to 0, \end{eqnarray*} since $d_\infty((s_k,X_{n_k}),(s^*, X))\to 0,\;d_\infty((s_k,X),(s^*, X))\to 0$ .
In the second case we have \begin{eqnarray*}
|f(s_k, X_{n_k})-f(s_k, X)|
\le |f(s_k, X_{n_k})-f(s^*, X^{s^*-})|\\
+|f(s_k, X)-f(s^*, X^{s^*-})|\to 0, \end{eqnarray*} since $d_\infty((s_k, X_{n_k}),(s^*, X^{s^*-}))\to 0,\;d_\infty((s_k, X),(s^*, X^{s^*-}))\to 0$ . This contradicts (\ref{uni}).
We shall need also the following assertion
{\bf Lemma A2}. Let $f$ be a real-valued, continuous function, defined on an arbitrary interval $I$ of the real line. If $f$ is right (or left) differentiable at every point $a \in I$, which is not the supremum (infimum) of the interval, and if this right (left) derivative is always zero, then $f$ is a constant.
{\it Proof}. For a proof by contradiction, assume there exist $a < b$ in $I$ such that $f(a) \neq f(b)$. Then \begin{eqnarray*}
\varepsilon :={\frac {|f(b)-f(a)|}{2(b-a)}}>0. \end{eqnarray*} Define $c$ as the infimum of all those $x$ in the interval $(a,b]$ for which the difference quotient of $f$ exceeds $\varepsilon$ in absolute value, i.e. \begin{eqnarray*}
c=\inf\{\,x\in (a,b]\mid |f(x)-f(a)|>\varepsilon (x-a)\,\}.
\end{eqnarray*} Due to the continuity of $f$, it follows that $c < b$ and $|f(c)-–f(a)|=\varepsilon(c–-a)$. At $c$ the right derivative of $f$ is zero by assumption, hence there exists $d$ in the interval $(c,b]$
with $|f(x)–-f(c)|\le\varepsilon(x–-c)$ for all $x \in (c,d]$. Hence, by the triangle inequality, \begin{eqnarray*}
|f(x)-f(a)|\leq |f(x)-f(c)|+|f(c)-f(a)|\leq \varepsilon (x-a) \end{eqnarray*} for all $x$ in $[c,d)$, which contradicts the definition of $c$.
\end{document}
|
arXiv
|
{
"id": "1903.11571.tex",
"language_detection_score": 0.5604355931282043,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{On $C^*$-algebras associated to actions of discrete subgroups of $\SL(2,\mathbb{R})$ on the punctured plane}
\author{Jacopo Bassi}
\maketitle
\begin{abstract} \noindent Dynamical conditions that guarantee stability for discrete transformation group $C^*$-algebras are determined. The results are applied to the case of some discrete subgroups of $\SL(2,\mathbb{R})$ acting on the punctured plane by means of matrix multiplication of vectors. In the case of cocompact subgroups, further properties of such crossed products are deduced from properties of the $C^*$-algebra associated to the horocycle flow on the corresponding compact homogeneous space of $\SL(2,\mathbb{R})$. \end{abstract}
\section{Introduction} Transformation group $C^*$-algebras represent a tool for the construction of examples of structure and classification theorems for $C^*$-algebras and provide a way to interpret dynamical properties on the $C^*$-algebraic level. Typical examples are the $C^*$-algebras associated to minimal homeomorphisms on infinite compact metric spaces with finite covering dimension (\cite{rieffel-ir,giordano-putnam-skau,toms-winter}), or more generally, free minimal actions of countable residually finite groups with asymptotically finite-dimensional box space on compact metric spaces with finite covering dimension (\cite{swz}). In these cases the structure is that of an $ASH$-algebra and classification is provided by the Elliott invariant. Moving to the non-unital setting, such classification results are still available in the case the $C^*$-algebra is stable and contains projections, assuming a suitable Rokhlin type property for the action. In these situations the resulting transformation group $C^*$-algebra is a stabilized $ASH$-algebra. Examples come from free and minimal actions of the real numbers on compact metric spaces admitting compact transversals (\cite{hsww}), where stability is reminiscent of freeness and the transversal produces a projection in the crossed product. On the other hand, stable simple $\mathcal{Z}$-stable projectionless $C^*$-algebras admit a description of the isomorphism classes of hereditary $C^*$-subalgebras and countable generated Hilbert $C^*$-modules in terms of Cuntz equivalence of positive elements (\cite{zstable_projless}).
Dynamical conditions which ensure stability of a transformation group $C^*$-algebra were given in \cite{green}, where it is proved that $C^*$-algebras arising from actions that are free and wandering on compacts are trivial fields of compact operators. For the case of more general $C^*$-algebras, other characterizations of stability are contained in \cite{rordam-fp,brttw}.\\
The present paper focuses on transformation group $C^*$-algebras associated to the action of discrete subgroups os $\SL(2,\mathbb{R})$ on the punctured plane, by means of matrix multiplication of vectors. Ergodic properties of such dynamical systems have been investigated in several places and the duality with the horocycle flow on the corresponding homogeneous spaces for $\SL(2,\mathbb{R})$ has been successfully employed in \cite{furstenberg,ledrappier, mau_weiss}. The study of such dynamical systems and their generalizations has a number of interesting applications, as observed in \cite{goro_weiss}, as the quantitative Oppenheim conjecture, quantitative estimates of the denseness of certain projections associated to irreducible lattices and strengthenings of distribution results concerning actions of lattices by automorphisms.
The first part of this work focuses on the study of the distribution of orbits of compact sets on the punctured plane under the action of discrete subgroups of $\SL(2,\mathbb{R})$ containing two hyperbolic elements with different axes. Rather than studying the asymptotics of the distribution of such orbits under an increasing family of finite subsets in the lattice, as in \cite{nogueira2002}, \cite{nogueira2010} and \cite{guilloux}, we consider the possibility to find, at every step, an element in the group that \textit{squeezes} enough the image of the compact set under the action of any element in the finite subset. This property of the action resembles the fact that such discrete subgroups of $\SL(2,\mathbb{R})$ actually contain an abundance of hyperbolic elements and represents a weaker version of the wandering on compacts assumption considered in \cite{green}. This dynamical condition guarantees the existence of invertible approximants for the elements in the crossed product $C^*$-algebra. By appealing to \cite{rordam-fp}, we show that in the case of actions that are contractive in a suitable sense, this property is enough in order to ensure stability of the crossed product $C^*$-algebra. The "dual" approach is used in the last part to find properties of the crossed product arising from an action of a cocompact subgroup of $\SL(2,\mathbb{R})$ on $\mathbb{R}^2 \backslash \{0\}$ by establishing a $*$-isomorphism between this $C^*$-algebra and the $C^*$-algebra associated to the horocycle flow on the corresponding homogeneous space for $\SL(2,\mathbb{R})$.\\
\subsection{Notation} If $G$ is a locally compact group and $A$ is a $C^*$-algebra, by an action of $G$ on $A$ we mean a continuous group homomorphism from $G$ to the group $\Aut (A)$ of $*$-automorphisms of $A$, endowed with the topology of pointwise convergence. If $X$ is a locally compact Hausdorff space, by an action of $G$ on $X$ we mean a continuous map $G \times X \rightarrow X$ that is associative and such that the identity of the group leaves every point of the space fixed. If a locally compact group $G$ acts on a locally compact Hausdorff space $X$ by means of an action $\alpha : G \times X \rightarrow X$, we denote by $C_0 (X) \rtimes G$ the associated (full) transformation group $C^*$-algebra, that is the full crossed product $C^*$-algebra relative to the action $\hat{\alpha}_g (f)=f \circ g^{-1}$ for $g \in G$, $f \in C_0 (X)$. Similarly $C_0 (X) \rtimes_r G$ is the reduced transformation group $C^*$-algebra, that is the reduced crossed product relative to the same action. If $X$ and $Y$ are two Hilbert modules over a $C^*$-algebra, we write $X\Subset Y$ to mean that $X$ is compactly contained in $Y$ in the sense of \cite{cuntz_hm} Section 1. If $F \subset S$ is an inclusion of sets, we write $F\Subset S$ to mean that $F$ has finite cardinality. If $X$ is a topological space and $S\subset X$ a subset, we denote by $S^\circ$ its interior.
\section{Weak stable rank $1$} The concept of stable rank for $C^*$-algebras was introduced by Rieffel in \cite{rieffel} as a noncommutative analogue of the covering dimension of a space and the case of stable rank $1$ is of particular interest (see for example \cite{cuntz_hm} and \cite{open_proj}). Conditions under which a transformation group $C^*$-algebra has stable rank $1$ have been given in \cite{poon} for actions of the integers; for actions of other groups with finite Rokhlin dimension on compact spaces such conditions can be obtained by combining the results in \cite{hwz}, \cite{szabo} or \cite{swz} and \cite{rordam-sr}, under some other assumptions, as for example, the existence of an invariant measure. If $A$ is a $C^*$-algebra, it is said to have stable rank $1$ if every element in its minimal unitization $\tilde{A}$ can be approximated by invertible elements in $\tilde{A}$. We will consider a more restrictive (non-stable) approximation property, which was used in a crucial way in \cite{brttw}. The following definition was given by Hannes Thiel during a lecture about the Cuntz semigroup in the Winter semester 2016/2017 at the University of M{\"u}nster. \begin{defn} \label{defn2.0} Let $A$ be a $C^*$-algebra. Then $A$ has \textit{weak stable rank $1$}, $\wsr (A)=1$, if $A \subset \overline{GL(\tilde{A})}$. \end{defn} Another variation of the concept of stable rank $1$ is the following \begin{defn}[\cite{zstable_projless} Definition 3.1] Let $A$ be a $C^*$-algebra. Then $A$ has \textit{almost stable rank $1$}, $\asr (A)=1$, if $\wsr (B) =1$ for every hereditary $C^*$-subalgebra $B \subset A$. \end{defn}
A $C^*$-algebra $A$ is said to be stable if $A\otimes \mathbb{K} \simeq A$, where $\mathbb{K}$ denotes the $C^*$-algebra of compact operators on a separable Hilbert space. Stable $C^*$-algebras always have weak stable rank $1$ by Lemma 4.3.2 of \cite{brttw} and their multiplier algebra is properly infinite by \cite{rordam-fp} Lemma 3.4. The connection between stability and stable rank in the $\sigma$-unital case was already investigated in \cite{rordam-fp} Proposition 3.5 and Proposition 3.6. For our purpose, we need the following slight variation of the results contained in \cite{rordam-fp}: \begin{thm} \label{thm2.0} Let $A$ be a $\sigma$-unital $C^*$-algebra. The following are equivalent \begin{itemize} \item[(i)] $\wsr (A)=1$ and $M(A)$ is properly infinite; \item[(ii)] $A$ is stable. \end{itemize} If $A$ is simple, they are equivalent to \begin{itemize} \item[(iii)] $\wsr(A)=1$ and $M(A)$ is infinite. \end{itemize} \end{thm} \proof The proof of Lemma 3.2 of \cite{rordam-fp} applies under the hypothesis of weak stable rank $1$, hence if $\wsr (A) =1$ and $M(A)$ is properly infinite, then $A$ is stable by the considerations in the proof of \cite{rordam-fp} Proposition 3.6. As already observed, for any stable $C^*$-algebra $A$, $\wsr(A)=1$ and its multiplier algebra is properly infinite. In the simple case the result follows by an application of Lemma 3.3 of \cite{rordam-fp} and the proof is complete.\\
In order to obtain stability for a transformation group $C^*$-algebra, we introduce a certain dynamical condition and observe that it guarantees weak stable rank $1$; this is the content of the rest of this section. We will deduce infiniteness properties for the multiplier algebra by adapting the results contained in \cite{sth} to the locally compact case in the next section.
\begin{defn} \label{defn2.1} Let $G$ be a discrete group acting on a locally compact Hausdorff space $X$. The action is said to be \textit{squeezing} if for every $F\Subset G$ and every $C \subset X$ compact there exists $\gamma \in G$ such that \[ \gamma g \gamma h C \cap \gamma g C \cap C =\emptyset \nonumber \] for all $g,h \in F$. \end{defn} Note that Definition \ref{defn2.1} only makes sense for actions on locally compact non-compact spaces, since the space itself is globally fixed by any homeomorphism.
\begin{pro} \label{prop2.1} Let $G$ be a discrete group acting on a locally compact Hausdorff space $X$ by means of a squeezing action. Then $\wsr (C_0 (X) \rtimes G) =1$. \end{pro} \proof
Every element in $C_0 (X) \rtimes G$ can be approximated by elements in $C_c (G, C_c (X))$, hence it is enough to prove that any element in $C_c (G, C_c (X))$ is the limit of invertible elements in $(C_0 (X) \rtimes G )^\sim$. Let $F \Subset G$ and $z=\sum_{g \in F} z_g u_g$ be such that $z_g \in C_c (X)$ for every $g \in F$. Define $C := \bigcup_{g \in F} \supp (z_g)$ and let $K \subset X$ be a compact subset such that $C \subsetneq K^\circ$. There exists a continuous function $f : X \rightarrow [0,1]$ such that $\supp (f) \subset K$, $f|_{C} =1$; furthermore, since the action is squeezing, there is a group element $\gamma \in G$ such that \[ \gamma g \gamma h K \cap \gamma g K \cap K = \emptyset \nonumber \qquad \mbox{ for all }\quad g,h \in F \cup F^{-1} \cup \{ e\}. \] From our choice of $f$, it follows that we can write $z = f z = (f u_{\gamma})(u_{\gamma^{-1}} z)$. Computing the third power of $u_{\gamma^{-1}} z$ we obtain \[ (u_{\gamma^{-1}} z)^3 = \sum_{g,g' , g'' \in F} (z_g \circ \gamma) (z_{g'} \circ (\gamma^{-1} g \gamma^{-1})^{-1}) (z_{g''} \circ (\gamma^{-1} g \gamma^{-1} g' \gamma^{-1})^{-1}) u_{\gamma^{-1} g \gamma^{-1} g' \gamma^{-1} g''}.
\nonumber \] For every $s \in G$ and $\phi \in C_c (X)$ we have $\supp (\phi \circ s^{-1}) = s \supp (\phi)$ and so from our choice of $K$ and $\gamma$, we see that \[ \begin{split} &\supp (z_g \circ \gamma) \cap \supp (z_{g'} \circ (\gamma^{-1} g \gamma^{-1})^{-1}) \cap \supp (z_{g''} \circ (\gamma^{-1} g \gamma^{-1} g' \gamma^{-1})^{-1}) \\ &\subset \gamma^{-1} (K \cap g \gamma^{-1} K \cap g \gamma^{-1} g' \gamma^{-1} K) = \emptyset, \end{split} \nonumber \] since $K \cap g \gamma^{-1} K \cap g \gamma^{-1} g' \gamma^{-1} K =\emptyset$ if and only if $\gamma(g')^{-1} \gamma g^{-1} K \cap \gamma (g')^{-1} K \cap K = \emptyset$. Hence \[ (u_\gamma^{-1} z)^3 = 0 \nonumber \] and $u_\gamma z$ is nilpotent. In the same way we obtain \[ (f u _{\gamma})^3 = f (f\circ \gamma^{-1}) (f\circ \gamma^{-2}) u_{\gamma^{3}}=0 \nonumber \] since $\gamma^2 K \cap \gamma K \cap K = \emptyset$. Hence $z$ is a product of nilpotent elements, thus is the limit of invertible elements in $(C_0 (X) \rtimes G)^\sim$ (cfr. \cite{rordam-uhf} 4.1) and the claim follows.
\begin{rem} \label{oss2.0} Natural variations of Definition \ref{defn2.1} lead to the same result of Proposition \ref{prop2.1}. The reason why we chose this form is that it fits in the discussion of Section 4. \end{rem}
\begin{rem} \label{oss2.1} Proposition \ref{prop2.1} applies to the reduced crossed product as well. \end{rem}
\section{Contractive and paradoxical actions} In the last section we determined a condition on an action of a discrete group that guarantees weak stable rank $1$ for the transformation group $C^*$-algebra. In view of Theorem \ref{thm2.0} this section is devoted to find conditions that guarantee infiniteness properties for the multiplier algebra of the crossed product $C^*$-algebra.\\
If $A$ is any $C^*$-algebra and $G$ a discrete group acting on it, then $A\rtimes G$ is isomorphic to an ideal in $M(A) \rtimes G$, where the action of $G$ on $M(A)$ is the extension of the action on $A$. Then there is a unital $*$-homomorphism $\phi : M(A) \rtimes G \rightarrow M(A \rtimes G)$; if we identify $M(A\rtimes G)$ with the $C^*$-algebra of double centralizers on $A\rtimes G$ and $A\rtimes G$ with its isomorphic image in $M(A) \rtimes G$, $\phi (x) y = xy$ for any $x$ in $M(A) \rtimes G$ and $y$ in $A\rtimes G$. The same results apply to the reduced crossed product as well. This will be the framework for the following considerations.\\
In virtue of the above discussion, all the results we state in the rest of this section concerning full transformation group $C^*$-algebras hold true for the reduced transformation group $C^*$-algebras as well. The same applies to the results contained in the next section, where in order to prove the analogue of Proposition \ref{prop123} for the reduced crossed product, one can use the extension of the surjective $*$-homomorphism from the full crossed product to the reduced crossed product to the multiplier algebras.
The concept of contractive action (see below) was already considered in \cite{sth} page 22 and has to be compared with the more restrictive Definition 2.1 of \cite{delaroche}. \begin{defn} \label{defn3.1} Let $G$ be a discrete group acting on a locally compact Hausdorff space $X$. The action is said to be \textit{contractive} if there exist an open set $U \subset X$ and an element $t \in G$ such that $t \overline{U} \subsetneq U$. In this case $(U,t)$ is called a \textit{contractive pair} and $U$ a \textit{contractive set}. \end{defn} The notion of scaling element was introduced in \cite{blackadar-cuntz} and was used to characterize stable algebraically simple $C^*$-algebras. \begin{defn}[\cite{blackadar-cuntz} Definition 1.1] \label{defn3.3} Let $A$ be a $C^*$-algebra and $x$ an element in $A$. $x$ is called a \textit{scaling element} if $x^* x (xx^*) = xx^*$ and $x^* x \neq xx^*$. \end{defn} \begin{pro} \label{prop3.1} Let $G$ be a discrete group acting on a locally compact Hausdorff space $X$. Consider the following properties: \begin{itemize} \item[(i)] The action of $G$ on $X$ is contractive. \item[(ii)] There exists a scaling elementary tensor in $C_c (X, C_b (X))$. \end{itemize} Then $(ii) \Rightarrow (i)$. If $X$ is normal, then $(i) \Rightarrow (ii)$. \end{pro} \proof
$(ii) \Rightarrow (i)$: Let $x=u_t f$ be a scaling elementary tensor in $C_c (G, C_b (X))$ and $U$ the interior of $\supp(f)$. Since $x^* x = |f|^2$ and $xx^* = | f \circ t^{-1} |^2$, the condition $x^* x xx^* = xx^*$ implies $|f| |_{t\overline{U}} =1$; in particular $t\overline{U} \subset U$. Suppose that $t\overline{U} =U$. Then \[
|f| |_{U^c} =0, \quad |f||_U = |f||_{t\overline{U}} = 1|_{t\overline{U}} \nonumber \] and \[
|f\circ t^{-1} ||_{U^c} = |f\circ t^{-1} ||_{(t\overline{U})^c} =| f\circ t^{-1} ||_{t (U)^c}=0. \nonumber \] Since $G$ acts by homeomorphisms, $U$ is a clopen set and $t^{-1} U = t \overline{U}$, which entails \[
|f\circ t^{-1}||_U =1|_U = 1|_{t \overline{U}}. \nonumber \]
This would imply $|f|= |f\circ t^{-1}|$ and $x^* x=xx^*$. Hence $t\overline{U} \subsetneq U$.\\ Suppose now that $X$ is normal and let $(U,t)$ be a contractive pair. Take $\xi \in U \backslash (t\overline{U})$. By Urysohn Lemma (normality) there exists a continuous function $f : X \rightarrow [0,1]$ that is $0$ on $U^c$ and $1$ on $\{ \xi \} \cup (t\overline{U})$. The element $x := u_t f \in C_c (G, C_b (X))$ satisfies $x^* x = f^2$, $xx^* = (f\circ t^{-1} )^2$ and $x^* x (xx^*)=xx^*$. Since $\supp (f\circ t^{-1} ) \subsetneq \supp (f)$, we have $x^* x \neq xx^*$, completing the proof.
\begin{cor} \label{cor3.3.1} Let $G$ be a group acting on a locally compact normal Hausdorff space by means of a contractive action. Then $M(C_0 (X) \rtimes G)$ is infinite. \end{cor} \proof Let $x$ be as in Proposition \ref{prop3.1}, we want to show that $\phi(x^*x) \neq \phi (xx^*)$ ($\phi$ is defined at the beginning of the section). For take $\xi \in U \backslash (t\overline{U})$ and let $f \in C_c (X)$ be such that $f(\xi)=1$. Then $(x^*x f)(\xi) \neq 0$ and $(xx^* f)(\xi)=0$ and so $\phi (x^* x) \neq \phi (xx^*)$. As shown in \cite{blackadar-cuntz} Theorem 3.1 the element $\phi(x)+(1-\phi(x^* x))^{1/2}$ is a nontrivial isometry and the claim follows.\\
A variation of the concept of contractive action is the following (see \cite{sth} Lemma 2.3.2) and is a particular case of Definition 2.3.6 of \cite{sth}. \begin{defn} \label{defn3.33} Let $X$ be a locally compact Hausdorff space and $G$ a discrete group acting on it. We say that the action is \textit{paradoxical} if there are positive natural numbers $n$, $m$, group elements $t_1 ,..., t_{n+m}$ and non-empty open sets $U_1 ,..., U_{n+m}$ such that $\bigcup_{i=1}^n U_i = \bigcup_{i=n+1}^{n+m} U_i = X$, $\bigcup_{i=1}^{n+m} t_i (U_i) \subsetneq X$ and $t_i U_i \cap t_j U_j = \emptyset$ for every $i\neq j$. \end{defn}
Adapting the ideas (and methods) of \cite{sth} Lemma 2.3.7 to the locally compact case, we have the following \begin{pro} \label{prop3.2} Let $G$ be a discrete group acting on a locally compact normal Hausdorff space $X$. If the action is paradoxical, then $M(C_0 (X) \rtimes G)$ is properly infinite. \end{pro} \proof Let $n$, $m$, $t_1 ,..., t_{n+m}$ and $U_1 ,..., U_{n+m}$ be as in Definition \ref{defn3.33}. Taking unions and relabeling we can suppose $t_i \neq t_j$ for $i\neq j$. Let $F:= \{ t_1 ,..., t_n \}$, $F' := \{ t_{n+1} ,..., t_{n+m}\}$.\\ Since $X$ is normal we can take a partition of unity $\{\phi_t\}_{t \in F}$ subordinated to $\{U_i\}_{i=1}^n$ and a partition of unity $\{ \psi_{s}\}_{s \in F'}$ subordinated to $\{U_i\}_{i=n+1}^{n+m}$. Consider the extension of the action of $G$ to $C_b (X)$ and the associated crossed product $C^*$-algebra $C_b (X) \rtimes G$.\\ Define $x:= \sum_{t \in F} u_t \phi_t^{1/2}$ and $y:= \sum_{t' \in F'} u_{s} \psi_{s}^{1/2}$. Then \[ x^* x = y^* y = 1. \nonumber \] Note now that \[ x^*y = \sum_{t \in F, s \in F'} \phi_t^{1/2} (\psi_s^{1/2} \circ s^{-1} t ) u_{t^{-1} s} =0 \nonumber \] and so $xx^* \perp yy^*$.\\ Let $\phi :C_b (X) \rtimes G \rightarrow M(C_0 (X) \rtimes G)$ be as at the beginning of this section. Take a positive function $f \in C_c(X)$ that takes the value $1$ on a point $\xi \in (\bigcup_{1\leq i \leq n} t_i U_i)^c$. Then \[ xx^* f = \sum_{t, t' \in F} (\phi_t^{1/2} \circ t^{-1}) (\phi_{t'}^{1/2} \circ t^{-1}) u_{t(t')^{-1}} f \nonumber \] entails $0=(xx^* f)(\xi) \neq f (\xi) =1$. Hence $xx^* f \neq f$ and $\phi (xx^*)\neq \phi (1)=1$. The same applies to $yy^*$ and so $1 \in M(C_0 (X) \rtimes G)$ is properly infinite, as claimed.\\
If a discrete group $G$ acts on a locally compact Hausdorff space $X$, the action is said to be \textit{topologically free} if for every $F \Subset G$ the set $\bigcap_{t \in F \backslash \{e\}} \{ x \in X \; | \; tx \neq x \}$ is dense in $X$ (\cite{archbold-spielberg} Definition 1). Combining Proposition \ref{prop3.2} with the results of Section $2$ we obtain \begin{thm} \label{thm3.1} Let $G$ be a discrete group acting on a locally compact metric space by means of an action that is paradoxical and squeezing. Then $C_0 (X) \rtimes G$ is stable. If the action is topologically free, minimal, squeezing and contractive, then $C_0 (X) \rtimes_r G$ is stable. \end{thm} \proof Since $X$ is second countable, $C_0 (X) \rtimes G$ is separable, hence $\sigma$-unital. The result follows from Theorem \ref{thm2.0}, Proposition \ref{prop3.2} and Proposition \ref{prop2.1}. If the action is topologically free and minimal, then $C_0 (X)\rtimes_r G$ is simple by \cite{archbold-spielberg}; hence Theorem \ref{thm2.0} applies also in this situation.
\section{The case of discrete subgroups of $\SL(2,\mathbb{R})$} A Fuchsian group $\Gamma$ is a discrete subgroup of $\PSL(2,\mathbb{R})$ (\cite{katok} Definition 2.2) and as such it acts on the hyperbolic plane $\mathbb{H}$ and on its boundary $\partial \mathbb{H} =\mathbb{R}\cup \{\infty\} \simeq \mathbb{R} \mathbb{P}^1$ by means of M{\"o}bius transformations. Let $G$ be a discrete subgroup of $\SL(2,\mathbb{R})$ acting on $\mathbb{R}^2 \backslash \{0\}$ by means of matrix multiplication of vectors. The quotient map $\pi : \mathbb{R}^2 \backslash \{0\} \rightarrow \mathbb{R} \mathbb{P}^1$ induces an action of $G$ on $\mathbb{R}\mathbb{P}^1$, which factors through the action of the corresponding Fuchsian group $p (G)$, where $p: \SL(2,\mathbb{R}) \rightarrow \PSL(2,\mathbb{R})$ is the quotient by the normal subgroup $\{-1,+1\}$ of $\SL(2,\mathbb{R})$.\\ If $\gamma$ is a hyperbolic element (\cite{katok} 2.1) in $\PSL(2,\mathbb{R})$ or $\SL(2,\mathbb{R})$ acting on $\mathbb{RP}^1$, we denote by $\gamma^{- (+)}$ its repelling (attracting) fixed point. For a subset of $\SL(2,\mathbb{R})$ or $\PSL(2,\mathbb{R})$ consisting of hyperbolic transformations, we say that its elements have different axes if the fixed-point sets for the action of the elements on $\mathbb{R} \mathbb{P}^1$ are pairwise disjoint. Note that in both $\SL(2,\mathbb{R})$ and $\PSL(2,\mathbb{R})$, discreteness of a subgroup $G$ implies that whenever two hyperbolic elements in $G$ have a common axis, then both their axes coincide. \begin{lem} \label{lem5} Let $\Gamma$ be a Fuchsian group containing two hyperbolic elements with different axes. Then for every $F\Subset \Gamma$ there exists a hyperbolic element $\gamma \in \Gamma$ such that \[ g \gamma^+ \neq \gamma^- \qquad \forall g \in F. \nonumber \] The same is true if $\Gamma$ is a group generated by a hyperbolic element. \end{lem} \proof Let $F \Subset \Gamma$. If $\Gamma$ contains two hyperbolic elements with different axes, then it contains infinitely many, hence we can take $\eta$, $\delta$ hyperbolic with different axes and such that the fixed points of $\eta$ are not fixed by any elements in $F$. Suppose that $F$ is such that for every $n \in \mathbb{N}$ there is a $g \in F$ with $g \eta^n \delta^+ =g(\eta^n \delta \eta^{-n})^+ = (\eta^n \delta \eta^{-n})^- = \eta^n \delta^-$. Then, passing to a subsequence \[ \exists g \in F \quad \mbox{ s.t. } \quad g\eta^{n_k} \delta^+ = \eta^{n_k} \delta^-. \nonumber \] Both $\eta^{n_k} \delta^+$ and $\eta^{n_k} \delta^-$ converge to $\eta^+$ and so $\eta^+$ is fixed by $g$, a contradiction.
\begin{pro} \label{prop4.1} Let $G$ be a discrete subgroup of $\SL(2,\mathbb{R})$ such that $p(G)$ is a Fuchsian group containing two hyperbolic elements with different axes or a Fuchsian group generated by a hyperbolic element. Then the action of $G$ on $\mathbb{R}^2 \backslash \{0\}$ is squeezing. \end{pro} \proof Let $F \Subset G$ and let be given an orthonormal basis $\{e_1 , e_2\}$ for $\mathbb{R}^2$. By Lemma \ref{lem5} there is a $\gamma \in p (G)$ such that $p(g) \gamma^+ \neq \gamma^-$ for every $g \in G$. Let $h \in G$ be such that $p(h) = \gamma$. Hence $h$ is hyperbolic and is conjugated in $\SL(2,\mathbb{R})$ to a diagonal matrix: \[ h = u^{-1} \Lambda u =u^{-1} \left( \begin{array}{cc} \lambda & 0 \\
0 & \lambda^{-1} \end{array}\right)u, \qquad |\lambda| >1. \nonumber \] Let $g'$ and $g$ be elements in $G$ and suppose that the upper-left diagonal entry of the matrix $ug'u^{-1}$ vanishes: $(ug'u^{-1})_{1,1} =0$. This means that $\langle e_1, ug'u^{-1}e_1\rangle =0$, or equivalently, $ug'u^{-1} e_1 \in \mathbb{R} e_2$; hence, since the image of $u^{-1} e_1$ under the quotient map $\pi : \mathbb{R}^2 \backslash \{0\} \rightarrow \mathbb{R} \mathbb{P}^1$ is $\gamma^+$ and the image of $u^{-1} e_2$ under the same map is $\gamma^-$, looking at the action of $p(G)$ on $\mathbb{R} \mathbb{P}^1$ we obtain $p (g') \gamma^+ = \gamma^-$, contradicting the assumption. Hence $(ug'u^{-1})_{1,1}\neq 0$. Define $g_u := ugu^{-1}$, $g'_u := ug' u^{-1}$ and compute for $n \in \mathbb{N}$ \[ \Lambda^n g_u= \left(\begin{array}{cc} \lambda^n (g_{u})_{1,1} & \lambda^n (g_u)_{1,2} \\
\lambda^{-n} (g_{u})_{2,1} & \lambda^{-n} (g_{u})_{2,2} \end{array}\right), \nonumber \] and \[ \Lambda^n g'_u \Lambda^n g_u = \left( \begin{array}{cc} \lambda^{2n} (g'_u)_{1,1} (g_{u})_{1,1} + (g'_u)_{1,2} (g_u)_{2,1} & \lambda^{2n} (g'_u)_{1,1} (g_u)_{1,2} + (g'_u)_{1,2} (g_u)_{2,2} \\ (g'_u)_{2,1} (g_u)_{1,1} + \lambda^{-2n} (g'_u)_{2,2} (g_u)_{2,1} & (g'_u)_{2,1} (g_u)_{1,2} + \lambda^{-2n} (g'_u)_{2,2} (g_u)_{2,2} \end{array}\right). \nonumber \]
Let $C \subset \mathbb{R}^2 \backslash \{ 0 \}$ be a compact subset; take real positive numbers $r_1$ and $r_2$ such that the compact crown $C_{r_1 , r_2} = \{ z \in \mathbb{R}^2 | r_1 \leq \| z \| \leq r_2\}$ contains $uC$. We want to show that there exists $n >0$ such that \[ \Lambda^n g'_u \Lambda^n g_u C_{r_1 , r_2} \cap \Lambda^n g'_u C_{r_1 , r_2} \cap C_{r_1,r_2} = \emptyset, \qquad \forall g,g' \in F. \nonumber \] Let $(x,y)^t \in \mathbb{R}^2$ be such that $\Lambda^n g'_u \Lambda^n g_u (x,y)^t$ belongs to $uC$. In particular, this entails \[
\| \Lambda^n g'_u \Lambda^n g_u (x,y)^t \| \leq r_2 \nonumber \] and taking the first coordinate: \[
| \lambda^{2n} (g'_u)_{1,1} [(g_u)_{1,1} x + (g_u)_{1,2} y] + [ (g'_u)_{1,2} (g_u)_{2,2} x + (g'_u)_{1,2} (g_u)_{2,2} y ]| \leq r_2. \nonumber \] Hence \begin{equation} \label{eq4.1}
|(g_u)_{1,1} x + (g_u)_{1,2} y| \leq \frac{r_2 + | (g'_u)_{1,2} (g_u)_{2,2} x + (g'_u)_{1,2} (g_u)_{2,2} y |}{\lambda^{2n} | (g'_u)_{1,1}|} \end{equation} for every $(x,y)^t \in (\Lambda^n g'_u \Lambda^n g_u)^{-1} C$. Furthermore, if $(x,y)^t \in \mathbb{R}^2$ is such that $\Lambda^n g_u (x,y)^t $ belongs to $C_{r_1,r_2}$, then \begin{equation} \label{eq4.2} \begin{split} r_1^2 &\leq [\lambda^n (g_u)_{1,1} x + \lambda^n (g_u)_{1,2} y]^2 + [\lambda^{-n} (g_u)_{2,1} x + \lambda^{-n} (g_u)_{2,2} y ]^2\\ & = \lambda^{2n} [(g_u)_{1,1} x + (g_u)_{1,2} y]^2 + \lambda^{-2n}[ (g_u)_{2,1} x + (g_u)_{2,2} y ]^2. \end{split} \end{equation} Combining (\ref{eq4.1}) and (\ref{eq4.2}) we obtain \begin{equation} \label{eq4.3}
r_1^2 \leq \frac{[r_2 + | (g'_u)_{1,2} (g_u)_{2,2} x + (g'_u)_{1,2} (g_u)_{2,2} y |]^2}{\lambda^{2n} | (g'_u)_{1,1}|^2} + \lambda^{-2n} [(g_u)_{2,1} x + (g_u)_{2,2} y]^2 \end{equation} for every $(x,y)^t \in (\Lambda^n g'_u \Lambda^n g_u)^{-1} C_{r_1,r_2} \cap (\Lambda^n g_u)^{-1} C_{r_1,r_2}$. If $(x,y)^t$ belongs to $C_{r_1,r_2}$, then there is a constant $M >0$ such that \[
\frac{| (g'_u)_{1,2} (g_u)_{2,2} x + (g'_u)_{1,2} (g_u)_{2,2} y |]^2}{ | (g'_u)_{1,1}|^2} + [(g_u)_{2,1} x + (g_u)_{2,2} y]^2 \leq M \nonumber \] and this constant does not depend on the choice of $g$, $g'$ in $F$. So, by (\ref{eq4.3}), for $n$ large enough \[ (\Lambda^n g'_u \Lambda^n g_u)^{-1} C_{r_1,r_2} \cap (\Lambda^n g_u)^{-1} C_{r_1,r_2} \cap C_{r_1,r_2} = \emptyset, \nonumber \] which entails \[ C_{r_1,r_2} \cap \Lambda^n g'_u C_{r_1,r_2} \cap \Lambda^n g'_u \Lambda^n g_u C_{r_1,r_2}=\emptyset \nonumber \] and so \[
u^{-1}C_{r_1,r_2} \cap h^n g' u^{-1}C_{r_1,r_2} \cap h^n g' h^n g u^{-1}C_{r_1,r_2}=\emptyset. \nonumber \] The result follows since $C \subset u^{-1} C_{r_1 , r_2}$. \\
Hence we have determined a class of discrete subgroups of $\SL(2,\mathbb{R})$ whose action on $\mathbb{R}^2 \backslash \{0\}$ is squeezing. Conditions under which this action is contractive or paradoxical are the content of the following
\begin{pro} \label{prop4.2} Let $G$ be a discrete subgroup of $\SL(2,\mathbb{R})$ acting on $\mathbb{R}^2 \backslash \{ 0 \}$ by means of matrix multiplication of vectors. If $G$ contains a hyperbolic element, then the action is contractive. If $G$ contains at least two hyperbolic elements with different axes, then the action is paradoxical. \end{pro} \proof Suppose that $G$ contains a hyperbolic element, then the same is true for its image under the quotient map $p : \SL(2,\mathbb{R}) \rightarrow \PSL(2,\mathbb{R})$. Since the action of $\Gamma = p (G)$ on $\mathbb{R}\mathbb{P}^1$ is by homeomorphisms and every hyperbolic element in $\Gamma$ is conjugated in $\PSL(2,\mathbb{R})$ to a Moebius transformation of the form $z \mapsto \lambda^2 z$ for some $\lambda >1$, it follows that the action of $\Gamma$ on $\mathbb{R}\mathbb{P}^1$ is contractive. Hence there are $U \subset \mathbb{R}\mathbb{P}^1$ and $\gamma \in \Gamma$ such that \begin{equation}
\gamma \overline{U} \subsetneq U. \nonumber \end{equation} In the case $G$ contains at least two hyperbolic elements with different axes, then the same is true for $\Gamma$ and as is well known, in this case $\Gamma$ contains a countable subset of hyperbolic elements with different axes. In order to see this, let $\gamma$, $\eta$ be hyperbolic elements in $\Gamma$ with different axes; then the elements in the sequence $\{ \eta^n \gamma \eta^{-n}\}_{n \in \mathbb{N}}$ are hyperbolic transformations with different axes. In particular, for every $n, m \geq 2$ natural numbers there are group elements $\gamma_1 ,..., \gamma_{n+m}$ and contractive open sets $U_1 ,..., U_{n+m}$, where for each $i=1,...,n+m$ $U_i$ contains the attracting fixed point $\gamma_i^+$ of $\gamma_i$, such that \begin{equation} \label{eq2} \bigcup_{i=1}^n U_i = \bigcup_{j=n+1}^{n+m} U_j = \mathbb{R}\mathbb{P}^1, \end{equation} \begin{equation} \label{eq3} \gamma_i U_i \cap \gamma_j U_j =\emptyset \qquad \forall i\neq j. \end{equation} Hence, we just need to observe that the same holds after replacing the sets $U_i$ with $\pi^{-1} (U_i)$ and the elements $\gamma_i$ with some representatives in $G$. Equation (\ref{eq2}) automatically holds for the sets $\pi^{-1} (U_i) \subset \mathbb{R}^2 \backslash \{0\}$. Choose a representative $g_i \in G$ for every $\gamma_i \in \Gamma$; since the action of $G$ on $\mathbb{R}\mathbb{P}^1$ factors through the action of $\Gamma$, equation (\ref{eq3}) can be replaced by \[ g_i U_i \cap g_j U_j =\emptyset \qquad \forall i\neq j. \nonumber \] By equivariance of the quotient map $\pi : \mathbb{R}^2 \backslash \{0\} \rightarrow \mathbb{R} \mathbb{P}^1$ it follows that \[ g_i (\pi^{-1} (U_i)) \cap g_j (\pi^{-1} (U_j)) = \emptyset \qquad \forall i\neq j. \nonumber \] We are left to check that the inverse image of a contractive open set is again a contractive open set. Since the map $\mathbb{R}^2 \backslash \{0\} \rightarrow \mathbb{R}\mathbb{P}^1$ is a quotient by a group action (the group is $\mathbb{R}^\times$), it is open and so the inverse image of the closure of a set is the closure of the inverse image of the same set; hence, if $(U, g)$ is a contractive pair with $U \subset \mathbb{R}\mathbb{P}^1$ and $g \in G$, then \[ g \overline{( \pi^{-1} (U))} = g \pi^{-1} (\overline{U}) = \pi^{-1} (g \overline{U}) \subsetneq \pi^{-1} (U). \nonumber \] The proof is complete.
\begin{cor} \label{cor4.1} Let $G$ be a discrete subgroup of $\SL(2,\mathbb{R})$ such that $p (G) \subset \PSL(2,\mathbb{R})$ is a Fuchsian group containing two hyperbolic elements with different axes. The transformation group $C^*$-algebra $C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G$ is stable.\\ If $p(G)$ is generated by a hyperbolic transformation, then $\wsr (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G) =1$ and $M(C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)$ is infinite. \end{cor} \proof Follows from Proposition \ref{prop4.2}, Proposition \ref{prop4.1}, Theorem \ref{thm3.1} and Corollary \ref{cor3.3.1}.\\
Corollary \ref{cor4.1} applies to the case of discrete subgroups of $\SL(2,\mathbb{R})$ associated to Fuchsian groups of the first kind (\cite{katok} 4.5), hence in particular the cocompact ones. Non-lattice subgroups to which Corollary \ref{cor4.1} applies are considered in \cite{semenova}.\\
In Proposition \ref{prop4.2} we deduced paradoxicality for the action of a discrete subgroup $G$ of $\SL(2,\mathbb{R})$ on $\mathbb{R}^2 \backslash \{0\}$ from paradoxicality of the action of the corresponding Fuchsian group on $\mathbb{R} \mathbb{P}^1$ and concluded from this fact that the multiplier algebra of $C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G$ is properly infinite. It follows from \cite{glasner} Example VII.3.6 that if $\Gamma$ is a Fuchsian group of the first kind, then its action on $\mathbb{R} \mathbb{P}^1$ is extremely proximal (see \cite{glasner} page 96 for the definition) and this property represents a stronger form of paradoxicality, hence stronger infiniteness properties for the multiplier algebra of the transformation group $C^*$-algebra are expected in this case. Note that in \cite{boundary} an extremely proximal action is called a strong boundary action.\\ The next Proposition is a consequence of the results contained in \cite{boundary} and \cite{kra}.
\begin{lem} \label{lem4.4} Let $G$ and $H$ be locally compact groups and $A$, $B$ be $C^*$-algebras. Suppose $G$ acts on $A$ and $H$ acts on $B$ and that there is an equivariant involutive homomorphism $\phi : C_c (G,A) \rightarrow C_c (H,B)$ which is continuous for the $L^1$-norms. Then there is a $*$-homomorphism $\hat{\phi} : A\rtimes G \rightarrow B\rtimes H$. \end{lem} \proof
If $\rho: L^1 (H,B) \rightarrow \mathfrak{H}$ is a nondegenerate $L^1$-continuous involutive representation of $L^1 (H, B)$, then the composition $\rho \circ \phi : C_c (G, A) \rightarrow \mathfrak{H}$ is $L^1$-continuous as well. Hence $\| \phi (f) \| \leq \| f \|$ for every $f \in C_c (G,A)$ by \cite{williams} Corollary 2.46, as claimed.
\begin{pro} \label{prop123} Let $G$ be a discrete subgroup of $\SL(2,\mathbb{R})$ such that $p(G) \subset \PSL(2,\mathbb{R})$ is a finitely generated Fuchsian group of the first kind not containing elements of order $2$. Then $M(C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)$ contains a Kirchberg algebra in the UCT class as a unital $C^*$-subalgebra. \end{pro} \proof The quotient map $p : \mathbb{R}^2 \backslash \{0\} \rightarrow \mathbb{R} \mathbb{P}^1$ is surjective and equivariant with respect to the action of $G$, hence it induces a unital $*$-homomorphism $ C(\mathbb{R} \mathbb{P}^1) \rtimes G \rightarrow C_b (\mathbb{R}^2 \backslash \{0\} )\rtimes G$ which can be composed with the unital $*$-homomorphism $C_b (\mathbb{R}^2 \backslash \{0\})\rtimes G \rightarrow M(C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G)$ introduced at the beginning of Section 3 in order to obtain a unital $*$-homomorphism $\phi : C(\mathbb{R} \mathbb{P}^1)\rtimes G \rightarrow M(C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G)$. By \cite{kra}, finitely generated Fuchsian groups of the first kind not admitting elements of order $2$ lift to $\SL(2,\mathbb{R})$. Denote by $\kappa : \Gamma \rightarrow \kappa (\Gamma) \subset G$ a lift. Since the action of $G$ on $\mathbb{R}\mathbb{P}^1$ factors through the action of $\Gamma$, the map \[ \psi_c : C_c (\Gamma , C(\mathbb{R} \mathbb{P}^1)) \rightarrow C_c (\kappa (\Gamma) , C(\mathbb{R}\mathbb{P}^1)) \nonumber \] \[ f \mapsto f \circ \kappa^{-1} \nonumber \] is an involutive homomorphism and it preserves the $L^1$-norm, as well as the inclusion $C_c (\kappa (\Gamma), C(\mathbb{R} \mathbb{P}^1)) \rightarrow C_c (G, C(\mathbb{R} \mathbb{P}^1))$. By Lemma \ref{lem4.4} there is a (unital) $*$-homomorphism $\psi : C(\mathbb{R}\mathbb{P}^1)\rtimes \Gamma \rightarrow C(\mathbb{R}\mathbb{P}^1) \rtimes G$. Hence $\phi \circ \psi: C(\mathbb{R} \mathbb{P}^1 ) \rtimes \Gamma \rightarrow M(C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)$ is a unital $*$-homomorphism. By \cite{boundary} Theorem 5 the $C^*$-algebra $C(\mathbb{R} \mathbb{P}^1 ) \rtimes \Gamma$ is a unital Kirchberg algebra in the UCT class, hence $\psi \circ \phi$ is injective and the result follows.
\section{Cocompact subgroups of $\SL(2,\mathbb{R})$}
Consider the one-parameter subgroup of $\SL(2,\mathbb{R})$ \[
N:= \{ n(t) \in \SL(2,\mathbb{R}) \; | \; n(t)=\left( \begin{array}{cc} 1 & t \\
0 & 1 \end{array}\right), \quad t \in \mathbb{R} \}. \nonumber \] Given a discrete subgroup $G$ of $\SL(2,\mathbb{R})$, one can define a flow on the corresponding homogeneous space $G \backslash \SL(2,\mathbb{R})$ by $Gg \mapsto Gg n(-t)$; this is called the \textit{horocycle flow} (\cite{ew} 11.3.1). The stabilizer of the point $(1,0)^t$ in $\mathbb{R}^2 \backslash \{0\}$ for the action of $\SL(2,\mathbb{R})$ is $N$ and so the quotient $\SL(2,\mathbb{R})/N$, endowed with the action of $\SL(2,\mathbb{R})$ given by left multiplication, is isomorphic, as a dynamical system, to $\mathbb{R}^2 \backslash \{0\}$. The interplay between the action of $G$ on $\mathbb{R}^2 \backslash \{0\}$ and the horocycle flow on $G \backslash \SL(2,\mathbb{R})$ is employed in the following
\begin{pro} \label{prop2.1} Let $G$ be a discrete cocompact subgroup of $\SL(2,\mathbb{R})$. The transformation group $C^*$-algebra $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is simple, separable, stable, $\mathcal{Z}$-stable, with a unique lower semicontinuous $2$-quasitrace and it has almost stable rank $1$. In particular it satisfies the hypothesis of \cite{io} Theorem 3.5. \end{pro} \proof Since $G$ is countable and $\mathbb{R}^2 \backslash \{0\}$ is a locally compact second countable Hausdorff space, $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is separable.\\ As already observed in the discussion after Corollary \ref{cor4.1}, $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is stable in the case $G$ is cocompact. Since the action of $G$ on $\mathbb{R}^2 \backslash \{0\}$ is free and minimal (\cite{ergtopdyn} Theorem IV.1.9), $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is simple (\cite{archbold-spielberg}). By simplicity, the non-trivial lower semicontinuous traces on $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ are semifinite (\cite{dixmier} 6.1.3) and so, in virtue of \cite{green2} Proposition 25 and Proposition 26, the restriction map sets up a bijection with the lower semicontinuous semifinite $G$-invariant traces on $C_0 (\mathbb{R}^2 \backslash \{0\})$. Every such trace is uniquely given by integration against a $G$-invariant Radon measure. By Furstenberg Theorem (\cite{furstenberg}) there is exactly one such non-trivial measure. Hence $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ admits a unique non-trivial lower semicontinuous trace. Since the action of $G$ on $\mathbb{R}^2 \backslash \{0\}$ is amenable, $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is nuclear (\cite{delaroche2} Theorem 3.4). By exactness it admits a unique non-trivial lower semicontinuous $2$-quasitrace (\cite{kirchberg}).\\ In virtue of Corollary 9.1 and Corollary 6.7 of \cite{hsww} the $C^*$-algebra $C(G \backslash \SL(2,\mathbb{R})) \rtimes N$ is stable; hence it follows from Green's imprimitivity Theorem (\cite{williams} Corollary 4.11) that $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G \simeq C(G \backslash \SL(2,\mathbb{R})) \rtimes N$. By \cite{hsww} Corollary 9.1 and Theorem 3.5, $C(G \backslash \SL(2,\mathbb{R}) )\rtimes N$ has finite nuclear dimension; hence, \cite{tikusis} Corollary 8.7 entails $\mathcal{Z}$-stability.\\ As observed in \cite{noncommgeom} page 129, $C(G\backslash \SL(2,\mathbb{R})) \rtimes N$ is projectionless; hence, \cite{zstable_projless} Corollary 3.2 applies and $\asr (C(G\backslash \SL(2,\mathbb{R})) \rtimes N) =1$.\\ The result follows since the Cuntz semigroup of a stable $\mathcal{Z}$-stable $C^*$-algebra is almost unperforated (\cite{rordam-sr} Theorem 4.5).\\
\begin{rem} The stability of $C_0 (\mathbb{R}^2 \backslash \{0\}) G$ in Proposition \ref{prop2.1} can also be established directly from that of $C(G \backslash \SL(2,\mathbb{R})) \rtimes N$. In fact, the rest of the proof shows that $C(G \backslash \SL(2,\mathbb{R})) \rtimes N$ satisfies the hypothesis of \cite{io} Theorem 3.5. Since $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is a hereditary $C^*$-subalgebra of $C(G \backslash \SL(2,\mathbb{R})) \rtimes N$, it is then enough to prove that the non-trivial lower semicontinuous trace on $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is unbounded. But this follows since it is induced by the Lebesgue measure. \end{rem}
As a consequence we obtain the following properties for the $C^*$-algebra associated to the action of a cocompact discrete subgroup of $\SL(2,\mathbb{R})$ on $\mathbb{R}^2 \backslash\{0\}$
\begin{cor} \label{horo_1} Let $G$ be a cocompact discrete subgroup of $\SL(2,\mathbb{R})$, $\tau$ the lower semicontinuous trace associated to the Lebesgue measure $\mu_L$ on $\mathbb{R}^2 \backslash \{0\}$ and $d_\tau$ the corresponding functional on the Cuntz semigroup $Cu (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)$. Then \[
\Ped (C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G)=\{ x \in C(\mathbb{R}^2 \backslash \{0\}) \rtimes G\; : \; d_\tau ([|x|]) < \infty\}. \nonumber \] Every hereditary $C^*$-subalgebra of $C(\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is either algebraically simple or isomorphic to $C(\mathbb{R}^2 \backslash \{0\}) \rtimes G$. \end{cor}
\begin{cor} \label{horo_3} Let $G$ be a cocompact discrete subgroup of $\SL(2,\mathbb{R})$. Every countably generated right Hilbert module for $C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G$ is isomorphic to one of the form \[ \overline{f \cdot (C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G)}, \qquad f \in C_0 (\mathbb{R}^2 \backslash \{0\}). \nonumber \] For two such Hilbert modules we have \[ \overline{f \cdot (C_0 (\mathbb{R}^2 \backslash \{0\}) \rtimes G)} \simeq \overline{g \cdot (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)} \qquad \Leftrightarrow \qquad \mu_L (\supp (f)) = \mu_L (\supp (g)) \nonumber \] and there exists a Hilbert module $E$ such that \[ \overline{f \cdot (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)} \simeq E \Subset \overline{g \cdot (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G)} \nonumber \] if and only if $\mu_L (\supp (f)) < \mu_L (\supp (g))$. \end{cor} \proof The Cuntz semigroup of the $C^*$-algebra $C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes G$ is stably finite by \cite{cuntz_t} Proposition 5.2.10, hence by \cite{cuntz_t} Proposition 5.3.16 it does not contain compact elements, since this $C^*$-algebra is projectionless. Hence the countably generated Hilbert modules correspond to soft elements and Cuntz equivalence of soft elements is implemented by the unique (up to scalar multiples) nontrivial functional associated to the unique (up to scalar multiples) lower semicontinuous trace $\tau$. It follows that all the possible values in the range of the dimension function are obtained by Cuntz equivalence classes of elements in $C_0 (\mathbb{R}^2 \backslash \{0\})$ since, for every $f \in C_0 (\mathbb{R}^2 \backslash \{0\})$, we have $d_\tau (f) = \mu_L (\supp (f))$. The result follows from Theorem 3.5 of \cite{io}.
\section{Final remarks} It follows from the results in the last section that if $G$ is a cocompact discrete subgroup of $\SL(2,\mathbb{R})$, the Cuntz classes of elements in the transformation group $C^*$-algebra $C_0 (\mathbb{R}^2 \backslash\{0\})\rtimes G$ are generated by continuous functions on the plane. It might be possible that this property can be derived from the dynamics.
It can be shown that if we restrict to discrete subgroups of $\SL(2,\mathbb{R})$ which are the inverse images under the quotient map $p: \SL(2,\mathbb{R}) \rightarrow \PSL(2,\mathbb{R})$ of fundamental groups of hyperbolic Riemann surfaces, the construction of the $C^*$-algebra associated to the horocycle flow on the corresponding homogeneous space of $\SL(2,\mathbb{R})$ induces a functor from a category whose objects are hyperbolic Riemann surfaces and the morphisms are finite sheeted holomorphic coverings to the usual category of $C^*$-algebras; this suggests that it might be possible do detect the holomorphic structure at the $C^*$-algebraic level. Observe that, if $\mathcal{M}_g$ is a compact Riemann surface of genus $g$, after identifying $p^{-1} (\pi_1 (\mathcal{M}_g)) \backslash \SL(2,\mathbb{R})$ with the unit tangent bundle $T_1 (\mathcal{M}_g)$, the Thom-Connes isomorphism (\cite{thom-connes}) gives a way to compute the $K$-theory of $C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes p^{-1}(\pi_1 (\mathcal{M}_g)) \simeq C(T_1 (\mathcal{M}_g))\rtimes \mathbb{R}$ and it reads \[ K_0 (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes p^{-1}(\pi_1 (\mathcal{M}_g))) = \mathbb{Z}^{2g+1}, \nonumber \] \[ K_1 (C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes p^{-1}(\pi_1 (\mathcal{M}_g)))= \mathbb{Z}^{2g+1} \oplus \mathbb{Z} /(2g-2) . \nonumber \] Both the order and the scale in $K_0$ are trivial since $C_0 (\mathbb{R}^2 \backslash \{0\})\rtimes p^{-1}(\pi_1 (\mathcal{M}_g))$ is projectionless and stable.\\ Furthermore, by \cite{thom-connes} Corollary 2 the range of the pairing between $K_0$ and the unique trace is determined by the Ruelle-Sullivan current associated to this flow (see \cite{noncommgeom} 5-$\alpha$), which is trivial by \cite{paternain}. Thus the Elliott invariant contains information only about the genus, or equivalently, the homeomorphic class of the Riemann surface. In particular, if the Elliott conjecture holds true for this class of $C^*$-algebras and if it is possible to detect the holomorphic structure at the level of the $C^*$-algebras, this should correspond to something finer than the $C^*$-algebraic structure. \section{Aknowledgements} The author thanks Prof. Wilhelm Winter for the hospitality at the Westf\"alische Wilhelms-Universit\"at of M\"unster and Prof. Roberto Longo for the hospitality at the Universit\`a degli Studi di Roma Tor Vergata for the period of this research. Many thanks go to Prof. Ludwik D\k abrowski who carefully read and gave his important feedback on the parts of this paper that are contained in the author's PhD thesis. The author also thanks the anonymous referee for the valuable comments on a previous version of the manuscript which led to an improved exposition. This research is partially supported by INdAM.
\end{document}
|
arXiv
|
{
"id": "1806.09020.tex",
"language_detection_score": 0.7411617636680603,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{ Reduced Free Products of Finite Dimensional $C^*$-Algebras } \par \author{ Nikolay A. Ivanov } \date{\today}
\address{\hskip-\parindent Nikolay Ivanov \\ Department of Mathematics \\ Texas A\&M University \\ College Station TX 77843-3368, USA} \email{[email protected]}
\begin{abstract} We find a necessary and sufficient conditions for the simplicity and uniqueness of trace for reduced free products of finite families of finite dimensional $C^*$-algebras with specified traces on them. \end{abstract}
\maketitle
\section{Introduction and Definitions}
The notion of reduced free product of a family of $C^*$-algebras with specified states on them was introduced independently by Avitzour
(\cite{A82}) and Voiculescu (\cite{V85}). We will recall this notion and some of its properties here. \par
\begin{defi}
The couple $(A,\phi)$, where $A$ is a unital $C^*$-algebra and $\phi$ a state is called a $C^*$-noncommutative probability space or $C^*$-NCPS. \end{defi}
\par
\begin{defi}
Let $(A,\phi)$ be a $C^*$-NCPS and $\{ A_i | i \in I \}$ be a family of $C^*$-subalgebras of $A$, s.t. $1_A \in A_i$, $\forall i\in I$, where $I$ is an index set. We say that the family $\{ A_i |i \in I \}$ is free if $\phi(a_1...a_n)=0$, whenever $a_j \in A_{i_j} $ with $i_1\neq i_2\neq ... \neq i_n$ and $\phi(a_j)=0$, $\forall j \in \{ 1,...n \}$.
A family of subsets $\{ S_i | i \in I \}$ $\subset$ $A$ is $*$-free if
$\{ C^*(S_i \cup \{ 1_A \} ) | i \in I \}$ is free. \end{defi}
Let $\{ (A_i,\phi_i) | i \in I \}$ be a family of $C^*$-NCPS such that the GNS representations of $A_i$ associated to $\phi_i$ are all faithful. Then there is a unique $C^*$-NCPS $(A,\phi) \overset{def}{=} \underset{i \in I}{*} (A_i,\phi_i)$ with unital embeddings $A_i \hookrightarrow A$, s.t. \\
(1) $\phi|_{A_i}=\phi_i$ \\
(2) the family $\{ A_i | i \in I \}$ is free in $(A,\phi)$ \\ (3) $A$ is the $C^*$-algebra generated by $\underset{i \in I}{\bigcup}A_i$ \\ (4) the GNS representation of $A$ associated to $\phi$ is faithful. \\ And also: \\ (5) If $\phi_i$ are all traces then $\phi$ is a trace too (\cite{V85}). \\ (6) If $\phi_i$ are all faithful then $\phi$ is faithful too (\cite{D98}).
\par In the above situation $A$ is called the reduced free product algebra and $\phi$ is called the free product state. Also the construction of the reduced free product is based on defining a free product Hilbert space, which turns out to be $\mathfrak{H}_A$ - the GNS Hilbert space for $A$, associated to $\phi$. \par
\begin{example}
If $\{ G_i | i \in I \}$ is a family of discrete groups and $C^*_r(G_i)$ are the reduced group $C^*$-algebras, corresponding to the left regular representations of $G_i$ on $l^2(G_i)$ respectively, and if $\tau_i$ are the canonical traces on $C^*_r(G_i)$, $i \in I$, then we have $\underset{i \in I}{*} (C^*_r(G_i), \tau_i)=(C^*_r(\underset{i \in I}{*}G_i), \tau)$, where $\tau$ is the canonical trace on the group $C^*$-algebra $C^*_r(\underset{i \in I}{*} G_i)$. \end{example}
Reduced free products satisfy the following property:
\begin{lemma}[\cite{DR98}]
Let $I$ be an index set and let $(A_i,\phi_i)$ be a $C^*$-NCPS ($i \in I$), where each $\phi_i$ is faithful. Let $(B,\psi)$ be a $C^*$-NCPS with $\psi$ faithful. Let
\begin{center}
$(A,\phi) = \underset{i\in I}{*} (A_i,\phi_i)$.
\end{center}
Given unital $*$-homomorphisms, $\pi_i : A_i \rightarrow B$, such that $\psi \circ \pi_i = \phi_i$ and $\{ \pi_i(A_i) \}_{i\in I}$ is free in $(B, \psi)$, there is a $*$-homomorphism, $\pi : A \rightarrow B$ such that $\pi|_{A_i} = \pi$ and $\psi \circ \pi = \phi$.
\end{lemma}
\par
From now on we will be concerned only with $C^*$-algebras equipped with tracial states.
\par
The study of simplicity and uniqueness of trace for reduced free products of $C^*$-algebras, one can say, started with the paper of Powers \cite{P75}. In this paper Powers proved that the reduced $C^*$-algebra of the free group on two generators $F_2$ is simple and has a unique trace - the canonical one. In \cite{C79} Choi showed the same for the "Choi algebra" $C_r^*(\mathbb{Z}_2 * \mathbb{Z}_3)$ and then Paschke and Salinas in \cite{PS79} generalized the result to the case of $C_r^*(G_1 * G_2)$, where $G_1, G_2$ are discrete groups, such that $G_1$ has at least two and $G_2$ at least three elements. After that Avitzour in \cite{A82} gave a sufficient condition for simplicity and uniqueness of trace for reduced free products of $C^*$-algebras, generalizing the previous results. He proved:
\begin{thm}[\cite{A82}]
Let
\begin{equation*} (\mathfrak{A}, \tau) = (A, \tau_A) * (B, \tau_B), \end{equation*} where $\tau_A$ and $\tau_B$ are traces and $(A,\tau_A)$ and $(B,\tau_B)$ have faithful GNS representations. Suppose that there are unitaries $u,v \in A$ and $w \in B$, such that $\tau_A(u) = \tau_A(v) = \tau_A(u^* v) = 0$ and $\tau_B(w) = 0$. Then $\mathfrak{A}$ is simple and has a unique trace $\tau$.
\end{thm}
{\em Note:} It is clear that $uw$ satisfies $\tau((uw)^n) = 0$, $\forall n \in \mathbb{Z} \backslash \{ 0 \}$. Unitaries with this property we define below.
\section{Statement of the Main Result and Preliminaries}
We adopt the following notation: \\ If $A_0$, ... , $A_n$ are unital $C^*$-algebras equipped with traces $\tau_0$, ... , $\tau_n$ respectively, then $A=\underset{\alpha_0}{\overset{p_0}{A_0}} \bigoplus \underset{\alpha_1}{\overset{p_1}{A_1}} \bigoplus ... \bigoplus \underset{\alpha_n}{\overset{p_n}{A_n}}$ will mean that the $C^*$-algebra $A$ is isomorphic to the direct sum of $A_0$, ... , $A_n$, and is such that $A_i$ are supported on the projections $p_i$. Also $A$ comes with a trace (let's call it $\tau$) given by the formula $\tau=\alpha_0\tau_0 + \alpha_1\tau_1 + ... + \alpha_n\tau_n$. Here of course $\alpha_0$, $\alpha_1$, ... , $\alpha_n > 0$ and $\alpha_0 + \alpha_1 + ... + \alpha_n = 1$.
\begin{defi}
If $(A,\tau)$ is a $C^*$-NCPS and $u\in A$ is a unitary with $\tau(u^n)=0$, $\forall n \in \mathbb{Z} \backslash \{ 0 \}$, then we call $u$ a Haar unitary.
\par
If $1_A \in B \subset A$ is a unital abelian $C^*$-subalgebra of $A$ we call $B$ a diffuse abelian $C^*$-subalgebra of $A$ if $\tau|_B$ is given by an atomless measure on the spectrum of $B$. We also call $B$ a unital diffuse abelian $C^*$-algebra.
\end{defi}
From Proposition 4.1(i), Proposition 4.3 of \cite{DHR97} we can conclude the following:
\begin{prop}
If $(B,\tau)$ is a $C^*$-NCPS with $B$-abelian, then $B$ is diffuse abelian if and only if $B$ contains a Haar unitary.
\end{prop}
$C^*$-algebras of the form $(\underset{\alpha}{\overset{p}{\mathbb{C}}} \bigoplus \underset{1-\alpha}{\overset{1-p}{\mathbb{C}}})*(\underset{\beta}{\overset{q}{\mathbb{C}}}\bigoplus \underset{1-\beta}{\overset{1-q}{\mathbb{C}}})$ have been described explicitly in \cite{ABH91} (see also \cite{D99LN}):
\begin{thm}
Let $1 > \alpha \geqq \beta \geqq \frac{1}{2}$ and let
\begin{center}
$( A,\tau ) = ( \underset{\alpha }{\overset{p}{\mathbb{C}}} \oplus \underset{1-\alpha }{\overset{1-p}{\mathbb{C}}} ) * ( \underset{\beta }{\overset{q}{\mathbb{C}}}\oplus \underset{1-\beta}{\overset{1-q}{\mathbb{C}}} ) $.
\end{center}
If $\alpha > \beta$ then
\begin{equation*} A=\underset{\alpha -\beta }{\overset{p\wedge (1-q)}{\mathbb{C}}}\oplus C([a,b], M_2(\mathbb{C}))\oplus \underset{\alpha + \beta -1}{\overset{p\wedge q}{\mathbb{C}}} , \end{equation*} for some $0 < a < b < 1$. Furthermore, in the above picture
\begin{center} $p=1 \oplus \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1 ,$ \end{center}
\begin{equation*} q=0\oplus \begin{pmatrix} t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & 1-t \end{pmatrix} \oplus 1 , \end{equation*} and the faithful trace $\tau$ is given by the indicated weights on the projections $p\wedge (1-q)$ and $p\wedge q$, together with an atomless measure, whose support is $[a,b]$.
\par
If $\alpha =\beta > \frac{1}{2}$ then
\begin{equation*}
A=\{\ f:[0,b]\rightarrow M_2(\mathbb{C}) |\ f\ is\ continuous\ and\ f(0)\ is\ diagonal\ \} \oplus \underset{\alpha + \beta -1}{\overset{p\wedge q}{\mathbb{C}}}, \end{equation*} for some $0 < b < 1$. Furthermore, in the above picture
\begin{center} $p= \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1,$ \end{center}
\begin{equation*} q= \begin{pmatrix} {t} & {\sqrt{t(1-t)}} \\ {\sqrt{t(1-t)}} & {1-t} \end{pmatrix} \oplus 1, \end{equation*} and the faithful trace $\tau$ is given by the indicated weight on the projection $p\wedge q$, together with an atomless measure on $[0,b]$.
\par
If $\alpha = \beta = \frac{1}{2}$ then
\begin{equation*}
A=\{\ f:[0,1]\rightarrow M_2(\mathbb{C}) |\ f\ is\ continuous\ and\ f(0)\ and\ f(1)\ are\ diagonal\ \}. \end{equation*}
Furthermore in the above picture
\begin{center} $p= \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} ,$ \end{center}
\begin{equation*} q= \begin {pmatrix} t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & 1-t \end{pmatrix} , \end{equation*} and the faithful trace $\tau$ is given by an atomless measure, whose support is $[0,1]$.
\end{thm}
The question of describing the reduced free product of a finite family of finite dimensional abelian $C^*$-algebras was studied by Dykema in \cite{D99}. He proved the following theorem:
\begin{thm}[\cite{D99}]
Let
\begin{equation*} (\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_n}{\overset{p_n}{\mathbb{C}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{C}}} \oplus ... \oplus \underset{\beta_m}{\overset{q_m}{\mathbb{C}}}), \end{equation*}
where $\alpha_0 \geq 0$ and $\beta_0 \geq 0$ and $A_0$ and $B_0$ are equipped with traces $\phi(p_0)^{-1} \phi|_{A_0}$, $\phi(q_0)^{-1}
\phi|_{B_0}$ and $A_0$ and $B_0$ have diffuse abelian $C^*$-subalgebras, and where $n \geq 1$, $m \geq 1$ (if $\alpha_0 = 0$ or $\beta_0 = 0$, or both, then, of course, we don't impose any conditions on $A_0$ or $B_0$, or both respectively). Suppose also that $\dim(A) \geq 2$, $\dim(B) \geq 2$, and $\dim(A) + \dim(B) \geq 5$.
\par
Then
\begin{equation*} \mathfrak{A} = \overset{r_0}{\mathfrak{A}_0} \oplus \underset{(i',j)\in L_+}{\bigoplus} \underset{\alpha_i + \beta_i -1}{\overset{p_i \wedge q_j}{\mathbb{C}}}, \end{equation*}
where $L_+ = \{ (i,j)| 1 \leq i \leq n$, $1 \leq j \leq m$ and $\alpha_i + \beta_j > 1 \}$, and where $\mathfrak{A}_0$ has a unital, diffuse abelian sublagebra supported on $r_0 p_1$ and another one supported on $r_0 q_1$.
\par
Let $ L_0 = \{(i,j)| 1 \leq i \leq n$, $1 \leq j \leq m$ and $\alpha_i + \beta_j = 1 \} .$
\par
If $L_0$ is empty then $\mathfrak{A}_0$ is simple and $\phi(r_0)^{-1} \phi|_{\mathfrak{A}_{0}}$ is the unique trace on $\mathfrak{A}_0.$
\par
If $L_0$ is not empty, then for each $(i,j) \in L_0$ there is a $*$-homomorphism $\pi_{(i,j)}: \mathfrak{A}_0 \rightarrow \mathbb{C}$ such that $\pi_{(i,j)}(r_0 p_i) = 1 = \pi_{(i,j)}(r_0 q_j).$ Then: \\
(1) $\mathfrak{A}_{00} \overset{def}{=} \underset{(i,j)\in L_0}{\bigcap} \ker (\pi_{(i,j)})$ \\
is simple and nonunital, and $\phi(r_0)^{-1} \phi|_{\mathfrak{A}_{00}}$ is the unique trace on $\mathfrak{A}_{00}.$ \\
(2) For each $i\in \{1,...n \}, \ r_0 p_i$ is full in $\mathfrak{A}_0 \cap \underset{i' \neq i}{\underset{(i',j) \in L_0}{\bigcap}} \ker (\pi_{(i',j)}).$ \\
(3) For each $j \in \{ 1, ... , m \}, \ r_0 q_j$ is full in $\mathfrak{A}_{0} \cap \underset{j' \neq j}{\underset{(i,j') \in L_0}{\bigcap}} \ker (\pi_{(i,j')}).$
\end{thm}
One can define von Neumann algebra free products, similarly to reduced free products of $C^*$-algebras. We will denote by $\mathbb{M}_n$ the $C^*$-algebra (von Neumann algebra) of $n \times n$ matrices with complex coefficients. \par Dykema studied the case of von Neumann algebra free products of finite dimensional (von Neumann) algebras:
\begin{thm}[\cite{D93}]
Let
\begin{equation*} A = \underset{\alpha_0}{\overset{p_0}{L(F_s)}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ... \oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}} \end{equation*} and
\begin{equation*} B = \underset{\beta_0}{\overset{q_0}{L(F_r)}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}}, \end{equation*} where $L(F_s), L(F_r)$ are interpolated free group factors, $\alpha_0, \beta_0 \geq 0$, and where $\dim(A) \geq 2$, $\dim(B) \geq 2$ and $\dim(A) + \dim(B)\geq 5$. Then for the von Neumann algebra free product we have:
\begin{equation*} A*B = L(F_t) \oplus \underset{(i,j) \in L_+}{\bigoplus} \underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N(i,j)}}}, \end{equation*}
where $L_+ = \{(i,j) | 1 \leq i \leq k, 1 \leq j \leq l, (\frac{\alpha_i}{n_i^2}) + (\frac{\beta_j}{m_j^2}) > 1 \}$, $N(i,j) = max(n_i, m_j)$, $\gamma_{ij} = N(i,j)^2 \cdot (\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} - 1)$, and $f_{ij} \leq p_i \wedge q_j$.
\end{thm}
{\em Note:} $t$ can be determined from the other data, which makes sense only if the interpolated free group factors are all different. We will use only the fact that $L(F_t)$ is a factor. For definitions and properties of interpolated free group factors see \cite{Ra94} and \cite{D94}. \par In this paper we will extend the result of Theorem 2.4 to the case of reduced free products of finite dimensional $C^*$-algebras with specified traces on them. We will prove:
\begin{thm}
Let
\begin{equation*} (\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ... \oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l} {\mathbb{M}_{m_l}}}), \end{equation*}
where $\alpha_0, \beta_0 \geq 0$, $\alpha_i > 0$, for $i=1,..,k$ and $\beta_j > 0$, for $j=1,...,l$, and where $\phi(p_0)^{-1} \phi|_{A_0}$ and
$\phi(q_0)^{-1} \phi|_{B_0}$ are traces on $A_0$ and $B_0$ respectivelly. Suppose that $\dim(A) \geq 2$, $\dim(B) \geq 2$, $\dim(A) + \dim(B) \geq 5$, and that both $A_0$ and $B_0$ contain unital, diffuse abelian $C^*$-subalgebras (if $\alpha_0 > 0$, respectivelly $\beta_0 > 0$). Then
\begin{equation*} \mathfrak{A}= \underset{\gamma}{\overset{f}{\mathfrak{A}_0}} \oplus \underset{(i,j)\in L_+}{\bigoplus} \underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N(i,j)}}}, \end{equation*}
where $L_+ = \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} > 1 \}$, $N(i,j) = max(n_i,m_j)$, $\gamma_{ij} = N(i,j)^2(\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} -1)$, $f_{ij} \leq p_i \wedge q_j$. There is a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $f p_1$ and another one, supported on $f q_1$.
\par
If $L_0 = \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \},$ is empty, then $\mathfrak{A}_0$ is simple with a unique trace. If $L_0$ is not empty, then $\forall (i,j) \in L_0 ,\ \exists \pi_{(i,j)} : \mathfrak{A}_{0} \rightarrow \mathbb{M}_{N(i,j)}$ a unital $*$-homomorphism, such that $\pi_{(i,j)}(f p_i) = \pi_{(i,j)}(f q_j) = 1$. Then: \\ (1) $\mathfrak{A}_{00} \overset{def}{=} \underset{(i,j)
\in L_0}{\bigcap} \ker (\pi_{(i,j)})$ is simple and nonunital, and has a unique trace $\phi(f )^{-1} \phi |_{\mathfrak{A}_{00}}$. \\ (2) For each $i \in \{ 1, ..., k \}$, $f p_i$ is full in $\mathfrak{A}_0 \cap \underset{i' \neq i}{\underset{(i',j) \in L_0}{\bigcap}} \ker(\pi_{(i',j)})$. \\ (3) For each $j \in \{ 1, ..., l \}$, $f q_j$ is full in $\mathfrak{A}_0 \cap \underset{j' \neq j}{\underset{(i,j') \in L_0}{\bigcap}} \ker(\pi_{(i,j')})$.
\end{thm}
\section{Beginning of the Proof - A Special Case}
In order to prove this theorem we will start with a simpler case. We will study first the $C^*$-algebras of the form $(A,\tau) \overset{def}{=} $ $ ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_m}{\overset{p_m}
{\mathbb{C}}})*(\mathbb{M}_n, tr_n)$ with $0 < \alpha_1 \leq ... \leq \alpha_m$. We chose a set of matrix units for $\mathbb{M}_n$ and denote them by $\{ e_{ij}|i,j \in \{1,...n \} \} $ as usual. Let's take the (trace zero) permutation unitary $$ u \overset{def}{=} \begin{pmatrix} 0 & 1 & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & 1 \\ 1 & 0 & ... & 0 \end{pmatrix} \in \mathbb{M}_n.$$ \\
We see that $\Ad(u)(e_{11}) = u e_{11} u^* = e_{nn}$ and for $2 \leq i \leq n$, $\Ad(u)(e_{ii}) = u e_{ii} u^* = e_{(i-1) (i-1)}$. \par It's clear that $$A = C^*(\{p_1, ..., p_m \}, \{ e_{ii}\}_{i=1}^n, u).$$ Then it is also clear that
$$A = C^*(\{ u^ip_1u^{-i} , ... , u^ip_mu^{-i} \}_{i=0}^{n-1}, \{ e_{ij} \}_{i=1}^{n}, u).$$ We want to show that the family
$$\{ \{ \mathbb{C} \cdot u^ip_1u^{-i} \oplus ,..., \oplus \mathbb{C} \cdot u^ip_mu^{-i} \}_{i=0}^{n-1},\ \{ \mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn} \} \}$$ is free.
We will prove something more general. We denote $$B \overset{def}{=} C^*( \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \}_{k=0}^{n-1}, \{ e_{11}, ... ,e_{nn} \} ).$$
Let $l$ be an integer and $l|n$, $1 < l < n$ (if such $l$ exists). Let $$E \overset{def}{=} C^*( \{ \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \}_{k=0}^{l-1}, \{ e_{11} , ... , e_{nn} \}, \{ u^l, u^{2l}, ... , u^{n-l} \} \} ).$$ It's easy to see that $$C^* ( \{ e_{11}, ... , e_{nn} \}, \{ u^l, u^{2l}, ... , u^{n-l} \} )= \underbrace{\mathbb{M}_{ \frac{n}{l} } \oplus ... \oplus \mathbb{M}_{ \frac{n}{l} }}_{l-times} \subset \mathbb{M}_n.$$ We will adopt the following notation from \cite {D99LN}: \par Let $(D, \varphi)$ be a $C^*$-NCPS and $1_D \in D_1, ..., D_k \subset D$ be a family of unital
$C^*$-subalgebras of $D$, having a common unit $1_D$. We denote by $D^{\circ} \overset{def}{=} \{ d\in D | \varphi(d)=0 \}$ (analoguously for $D_1$, ..., etc). We denote by $\Lambda^{\circ}(D_1^{\circ}, D_2^{\circ} , ..., D_k^{\circ})$ the set of all words of the form $d_1 d_2 \cdots d_j$ and of nonzero length, where $d_t \in D_{i_t}^{\circ}$, for some $1 \leq i_t \leq k$ and $i_t \neq i_{t+1}$ for any $1 \leq t \leq j-1$. \\ \par We have the following
\begin{lemma}
If everything is as above, then:
(i) The family $\{ \{ u^kp_1u^{-k} , ... , u^kp_mu^{-k} \}_{k=0}^{n-1},$ $\{ e_{11} , ... ,e_{nn} \} \}$ is free in $(A,\tau)$. And more generally if $$\omega \in \Lambda^{\circ}( C^*(p_1, ..., p_m)^{\circ}, ..., C^*(u^{n-1}p_1u^{1-n}, ..., u^{n-1}p_mu^{1-n})^{\circ}, C^*(e_{11}, ..., e_{nn})^{\circ}),$$ then $\tau(\omega u^r)=0$ for all $0 \leq r \leq n-1$.
(ii) The family $\{ \{ u^kp_1u^{-k} , ... , uu^kp_mu^{-k} \}_{k=0}^{l-1},$ $\{ e_{11} , ... , e_{nn} , u^l, u^{2l}, ... u^{n-l} \} \}$ is free in $(A,\tau)$. And more generally if $$\omega \in \Lambda^{\circ}(C^*(p_1,..., p_m)^{\circ},..., C^*(u^{l-1}p_1u^{1-l},..., u^{l-1}p_mu^{1-l})^{\circ}, C^*( e_{11}, ..., e_{nn}, u^l,..., u^{n-l})^{\circ}),$$ then $\tau(\omega u^r)=0$ for all $0 \leq r \leq l-1$.
\end{lemma}
\begin{proof}
Each letter $\alpha \in C^*( \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \})$ with $\tau(\alpha) = 0$ can be represented as $\alpha = u^k \alpha' u^{-k}$ with $\tau(\alpha') = 0$, and $\alpha' \in C^*( \{ p_1, ..., p_m \} )$. \par
Case (i): \\ \par Each $$\omega \in \Lambda^{\circ}( C^*(p_1, ..., p_m)^{\circ}, ..., C^*(u^{n-1}p_1u^{1-n}, ..., u^{n-1}p_mu^{1-n})^{\circ}, C^*(e_{11}, ..., e_{nn})^{\circ})$$ is of one of the four following types:
\begin{equation} \omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t}, \end{equation}
\begin{equation} \omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t}, \end{equation}
\begin{equation} \omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1}, \end{equation}
\begin{equation} \omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1}, \end{equation} where $\alpha_{ij} \in C^*(u^{k_{ij}}p_1u^{k_{ij}}, ..., u^{k_{ij}}p_mu^{k_{ij}})^{\circ}$ with $0 \leq k_{ij} \leq n-1$, $k_{ij} \neq k_{i(j+1)}$ and $\beta_i \in C^*(e_{11}, ..., e_{nn})^{\circ}$. \\ \par
We consider the following two cases: \\
(a) We look at $\alpha_{ji} \alpha_{ji+1}$ with $\alpha_{jc}$ $\in$ $C^*(\{ u^{k_{c}}p_1u^{-k_{c}}, ... , u^{k_{c}}p_mu^{-k_{c}} \} )^{\circ}$ for $c=i, i+1$. We write $\alpha_{jc} = u^{k_{c}} \alpha'_{jc} u^{-k_{c}}$ with $\alpha'_{jc} \in C^*( \{ p_1, ... , p_m \} )^{\circ}$ for $c = i, i+1$. So $\alpha_{ji} \alpha_{ji+1} =$ \\ $u^{k_i} \alpha'_{ji} u^{k_{i+1} - k_i} \alpha'_{ji+1} u^{-k_{i+1}}$. Here $\alpha'_{ji}$ and $\alpha'_{ji+1}$ are free from $u^{k_{i+1} - k_i}$ in $(A,\tau)$ (Notice that we have $k_{i+1} - k_i \neq 0$). \\
(b) We look at $\alpha_{ji_j} \beta_j \alpha_{(j+1) 1}$ with $\beta \in C^*( \{e_{11} , ... , e_{nn} \} )^{\circ},$ \\ $\alpha_{(j+1)1} \in C^*( \{ u^{k_{j+1}}p_1u^{-k_{j+1}} , ... , u^{k_{j+1}} p_m u^ {-k _{j+1}} \} )^{\circ},$ \\ $\alpha_{ji_j} \in C^*( \{u^{k_j} p_1 u^{-k_j} , ... , u^{k_j} p_m u^{-k_j} \} )^{\circ}$. Now we write $\alpha_{ji_j} = u^{k_j} \alpha'_{ji_j}
u^{-k_j}$ and $\alpha_{(j+1)1} = u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$ with
$\alpha'_{ji_j} , \alpha'_{(j+1)1} \in
C^*( \{ p_1 , ..., p_m \} )^{\circ}$. We see that $\alpha_{ji_j} \beta_j \alpha_{(j+1)1} =$ $u^{k_j} \alpha'_{ji_j} u^{-k_j} \beta_j u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$. If $k_j = k_{j+1}$ then $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = \tau(u^{k_{j+1}} u^{-k_j} \beta_j) = \tau(\beta_j) = 0$ since $\tau$ is a trace. If $k_j \neq k_{j+1}$ then $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = \tau(u^{k_{j+1}} u^{-k_j} \beta_j)$ and $u^{k_{j+1} - k_j} \beta_j \in \mathbb{M}_n$ is a linear combination of off-diagonal elements, so $\tau(u^{k_{j+1}} u^{-k_j} \beta_j) = 0$ also. Notice that $\alpha'_{ji_j}$ and $\alpha'_{(j+1)1}$ are free from $u^{-k_j} \beta_j u^{k_{j+1}}$ in $(A,\tau)$. \\
Now we expand all the letters in the word $\omega$ according to the cases (a) and (b). We see that we obtain a word, consisting of letters of zero trace, such that every two consequitive letters come either from $C^*( \{p_1, ..., p_m \} )$ or from $\mathbb{M}_n$. So $\tau(\omega) = 0$. It only remains to look at the case of the word $\omega u^r$ which is the word $\omega$, but ending in $u^r$. There are two principally different cases for $\omega u^r$ from the all four possible choices for $\omega$: \\
In cases (1) and (2) $\alpha_{ti_t} = u^k \alpha'_{ti_t} u^{-k}$ for some $0 \leq k \leq n-1$ with $\alpha'_{ti_t} \in C^*( \{ p_1 , ..., p_m \} )^{\circ}$. So the word will end in $u^k \alpha'_{ti_t} u^{r-k}$. If $r = k$ then $\alpha'_{ti_t}$ will be the last letter with trace zero and everything else will be the same as for $\omega$, so the whole word will have trace $0$. If $k \neq r$ then $\tau(u^{r-k}) = 0$ and $u^{r-k}$ is free from $\alpha'_{ti_t}$ so the word in this case will be of zero trace too. \\
In cases (3) and (4) if $\beta_{t-1} u^{r}$ is the whole word then $\beta_{t-1} u^{r}$ is a linear combination of off-diagonal elements of $\mathbb{M}_n$, and so its trace is $0$. If not then $\alpha_{(t-1)i_{t-1}} = u^k \alpha'_{(t-1)i_{t-1}} u^{-k}$ with $\alpha'_{(t-1)i_{t-1}} \in C^*( \{ p_1 , ... , p_m \} )^{\circ}$. So the word ends in \\ $ u^k \alpha'_{(t-1)i_{t-1}} u^{-k} \beta_{t-1} u^{r} $. Similarly as above we see that $\tau(u^{-k} \beta_{t-1} u^{r}) = 0$ for all values of $k$ and $r$. The rest of the word we treat as above and conclude that it's of zero trace in this case too. \\
So in all cases $\tau( \omega u^r) = 0$ just what we had to show. \\ \par Case (ii): \\ \par As in case (i) $$\omega \in \Lambda^{\circ}(C^*(p_1,..., p_m)^{\circ},..., C^*(u^{l-1}p_1u^{1-l},..., u^{l-1}p_mu^{1-l})^{\circ}, C^*( e_{11},..., e_{nn}, u^l,..., u^{n-l})^{\circ})$$ is of one of the following types: \\
\begin{equation} \omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t}, \end{equation}
\begin{equation} \omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t}, \end{equation}
\begin{equation} \omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1}, \end{equation}
\begin{equation} \omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2 \alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1}, \end{equation} where $\alpha_{ij} \in C^*(u^{k_{ij}}p_1u^{k_{ij}}, ..., u^{k_{ij}}p_mu^{k_{ij}})^{\circ}$ with $0 \leq k_{ij} \leq l-1$ and $k_{ij} \neq k_{(i+1)j}$ and $\beta_i \in C^*(e_{11}, ..., e_{nn}, u^l, u^{2l}, ..., u^{n-l})^{\circ}$. \\ \par Similarly as case (i) we consider two cases: \\
(a) We look at $\alpha_{ji} \alpha_{ji+1}$ with $\alpha_{jc}$ $\in$ $C^*(\{ u^{k_{c}}p_1u^{-k_{c}}, ... , u^{k_{c}}p_mu^{-k_{c}} \} )$, and $0 \leq k_c \leq l-1$ for $c=i, i+1$. We write $\alpha_{jc} = u^{k_c} \alpha'_{jc} u^{-k_c}$ with $\alpha'_{jc} \in C^*( \{ p_1, ... , p_m \} )^{\circ}$ for $c = i, i+1$. It follows $\alpha_{ji} \alpha_{ji+1} =$ $u^{k_i} \alpha'_{ji} u^{k_{i+1} - k_i} \alpha'_{ji+1} u^{-k_{i+1}}$. Here $\alpha'_{ji}$ and $\alpha'_{ji+1}$ are free from $u^{k_{i+1} - k_i}$ in $(A,\tau)$ (and again $k_{i+1} - k_i \neq 0$). \\
(b) We look at $\alpha_{ji_j} \beta_j \alpha_{(j+1) 1}$ with $\beta_j \in C^*( \{e_{11} , ... , e_{nn} \} , \{ u^l, u^{2l}, ..., u^{n-l} \} )^{\circ},$ \\ $\alpha_{(j+1)1} \in C^*( \{ u^{k_{j+1}}p_1u^{-k_{j+1}} , ... , u^{k_{j+1}} p_m u^ {-k _{j+1}} \} )^{\circ},$ \\ $\alpha_{ji_j} \in C^*( \{u^{k_j} p_1 u^{-k_j} , ... , u^{k_j} p_m u^{-k_j} \} )^{\circ}$, where in this case $k_j, k_{j+1} \in \{ 0, ..., l-1 \}$. Again we write $\alpha_{ji_j} = u^{k_j} \alpha'_{ji_j} u^{-k_j}$ and $\alpha_{(j+1)1} = u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$ with $\alpha'_{ji_j} , \alpha'_{(j+1)1} \in C^*( \{ p_1 , ... , p_m \} )^{\circ},$. We have $\alpha_{ji_j} \beta_j \alpha_{(j+1)1} =$ $u^{k_j} \alpha'_{ji_j} u^{-k_j} \beta_j u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$. \\ We only need to show that $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = 0$. $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = \tau(u^{k_{j+1}} u^{-k_j} \beta_j) = \tau(u^{k_{j+1} - k_j} \beta_j)$. The case $ k_{j+1} = k_j$ is clear. Notice that if $ k_{j+1} \neq k_j$ then $0 < k_{j+1} - k_j \leq l-1$. Is it clear that $u^{k_{j+1} - k_j} \cdot \Span ( \{ e_{11}, ..., e_{nn} \}) \subset \mathbb{M}_n$ consists of liner combination of off-diagonal elements. The same is clear for $u^{k_{j+1} - k_j} \cdot \Span( \{ u^l, u^{2l} , ..., u^{n-l} \} ) \subset \mathbb{M}_n $. It's not difficult to see then that $$u^{k_{j+1} - k_j} \cdot \Alg ( \{ e_{11}, ..., e_{nn} \}, \{ u^l, u^{2l}, ..., u^{n-l} \} )$$ will consist of linear span of the union of the off-diagonal entries among
$\{ e_{ij} | 1 \leq i,j \leq n \}$ present in $u^{k_{j+1} - k_j} \cdot \Span( \{e_{11}, ..., e_{nn} \})$ and the ones present in \\ $u^{k_{j+1} - k_j} \cdot \Span( \{ u^l, u^{2l}, ..., u^{n-l} \} )$. This shows that $u^{k_{j+1} - k_j} \beta_j$ will be also a linear span of off-diagonal entries in $\mathbb{M}_n$ and will have trace $0$. So $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = 0$. In this case also $\alpha'_{ji_j}$ and $\alpha'_{(j+1)1}$ are free from $u^{-k_j} \beta_j u^{k_{j+1}}$ in $(A,\tau)$. \\
We expand all the letters of the word $\omega$ and see that it is of trace $0$ similarly as in case (i). For the word $\omega u^r$ with $0 \leq r \leq l-1$ we argue similarly as in case (i). Again there are two principally different cases: \\
In cases (5) and (6) $\alpha_{ti_t} = u^k \alpha'_{ti_t} u^{-k}$ for some $0 \leq k \leq l-1$ with $\alpha'_{ti_t} \in C^*( \{ p_1 , ..., p_m \} )^{\circ}$. So the word will end in $u^k \alpha'_{ti_t} u^{r-k}$. If $r = k$ then $\alpha'_{ti_t}$ will be the last letter with trace zero and everything else will be the same as for $\omega$, so the whole word will have trace $0$. If $k \neq r$ then $\tau(u^{r-k}) = 0$ and $u^{r-k}$ is free from $\alpha'_{ti_t}$ so the word in this case will be of zero trace too.
In cases (7) and (8) $\beta_{t-1} u^r$ then this is a linear combination of off-diagonal elements as we showed in case (ii)-(b). If not we write $\alpha_{(t-1)i_{t-1}} = u^k \alpha'_{(t-1)i_{t-1}} u^{-k}$ with $0 \leq k \leq l-1$ and $\alpha'_{(t-1)i_{t-1}} \in C^*( \{ p_1, ..., p_m \} )^{\circ}$. So the word that we are looking at will end in $u^k \alpha'_{(t-1)i_{t-1}} u^{-k} \beta_{t-1} u^{r} $. Since $0 \leq k,r \leq l-1$ similarly as in case (ii)-(b) we see that $\tau(u^{-k} \beta_{t-1} u^{r}) = 0$. We treat the remaining part of the word as above and conclude that in this case the word has trace $0$. \\ \par So in all cases $\tau(\omega u^r) = 0$ just what we had to show. \\ \par This proves the lemma.
\end{proof}
From properties (5) and (6) of the reduced free product it follows that $\tau$ is a faithful trace. From Lemma 1.4 it follows that $$B = (\mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn}) * (\underset{k=0}{\overset{n-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus \mathbb{C} \cdot u^k p_m u^{-k})),$$ $$\cong ( \underset{\frac{1}{n}}{\mathbb{C}} \oplus ... \oplus \underset{\frac{1}{n}}{\mathbb{C}} ) * (\underset{k=0}{\overset{n-1}{*}} (\underset{\alpha_1}{\mathbb{C}} \oplus ... \oplus \underset{\alpha_m}{\mathbb{C}}))$$ and that $$E = C^*( \{ e_{11}, ..., e_{nn}, u_l, u^{2l}, ..., u^{n-l} \} ) * (\underset{k=0}{\overset{l-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus \mathbb{C} \cdot u^k p_m u^{-k})),$$ $$\cong (\underset{\frac{l}{n}}{\mathbb{M}_{\frac{n}{l}}} \oplus ... \oplus \underset{\frac{l}{n}}{\mathbb{M}_{\frac{n}{l}}}) * (\underset{k=0}{\overset{l-1}{*}} (\underset{\alpha_1}{\mathbb{C}} \oplus ... \oplus \underset{\alpha_m}{\mathbb{C}})).$$
\begin{cor}
If everything is as above:
(1) For $b \in B$ and $0 < k \leq n-1$ we have $\tau(b u^k) = 0$, so also $\tau(u^k b) = 0$. \par (2) For $e \in E$ and $0 < k \leq l-1$ we have $\tau(e u^k) = 0$, so also $\tau(u^k e) = 0$.
\end{cor}
For $(B, \tau|_B)$ and $(E, \tau|_E)$ we have that $\mathfrak{H}_B \subset \mathfrak{H}_E \subset \mathfrak{H}_A$. If $a \in A$ we will denote by $\hat{a} \in \mathfrak{H}_A$ the vector in $\mathfrak{H}_A$, corresponding to $a$ by the GNS construction. We will show that
\begin{cor}
If everything is as above: \\
(1) $u^{k_1} \mathfrak{H}_B \bot u^{k_2} \mathfrak{H}_B$ for $k_1 \neq k_2$, $0 \leq k_1, k_2 \leq n-1$. \par (2) $u^{k_1} \mathfrak{H}_E \bot u^{k_2} \mathfrak{H}_E$ for $k_1 \neq k_2$, $0 \leq k_1, k_2 \leq l-1$.
\end{cor}
\begin{proof}
(1) Take $ b_1, b_2 \in B $. We have $\langle u^{k_1} \hat{b_1} , u^{k_2} \hat{b_2} \rangle = \tau(u^{k_2} b_2 b_1^* u^{-k_1}) = \tau(b_2 b_1^* u^{k_2 - k_1}) = 0,$ by the above Corollary. \\ (2) Similarly take $e_1, e_2 \in E$, so $\langle u^{k_1} \hat{e_1}, u^{k_2} \hat{e_2} \rangle = \tau(u^{k_2} e_2 e_1^* u^{-k_1}) = \tau(e_2 e_1^* u^{k_2 - k_1}) = 0,$ again by the above Corollary.
\end{proof}
Now $\mathfrak{H}_A$ can be written in the form $\mathfrak{H}_A = \underset{i=0}{\overset{n-1}{\bigoplus}} u^i \mathfrak{H}_B$ as a Hilbert space because of the Corollary above. Denote by $P_i$ the projection $P_i : \mathfrak{H}_A \rightarrow \mathfrak{H}_A$ onto the subspace $u^i \mathfrak{H}_B$. Now it's also true that $A = \underset{i=0}{\overset{n-1}{\bigoplus}} u^i B$ as a Banach space. To see this we notice that $\Span\{u^iB, i=0, ...n-1\}$ is dense in $A$, also that $u^i B,\ 0 \leq i \leq n-1$ are closed in $A$. Now take a sequence $\{ \sum_{i=0}^{n-1} u^i b_{mi} \}_{m=1}^{\infty}$ converging to an element $a \in A$ ($b_{mi} \in B$). Then for each $i$ we have $\{ P_j \sum_{i=0}^{n-1} u^i b_{mi} P_0 \}_{m=1}^{\infty} = \{ P_j u^j b_{mj} P_0 \}_{m=1}^{\infty}$ converges (to $P_j a P_0$), consequently the sequence $\{ b_{mj} \}_{m=1}^{\infty}$ converges to an element $b_j$ in $B$ $\forall 0 \leq j \leq n-1$. So $a = \sum_{i=0}^{n-1} u^i b_i$. Finally the fact that $u^{i_1} B \cap u^{i_2} B = 0$, for $i_1 \neq i_2$ follows easily from $u^{i_1} \mathfrak{H}_B \cap u^{i_2} \mathfrak{H}_B = 0$, for $i_1 \neq i_2$ and the fact that the trace $\tau$ is faithful. We also have $A = \underset{i=0}{\overset{n-1}{\bigoplus}} B u^i$. \par Let $C$ is a $C^*$-algebra and $\Gamma$ is a discrete group with a given action $\alpha : \Gamma \rightarrow Aut(C)$ on $C$. By $C \rtimes \Gamma$ we will denote the reduced crossed product of $C$ by $\Gamma$. It will be clear what group action we take. \par Let's denote by $G$ the multiplicative group, generated by the automorphism $\Ad(u)$ of $B$. Then $G \cong \mathbb{Z}_n$ and by what we proved above $\mathfrak{H}_A \cong L^2(G,\mathfrak{H}_B)$.
\begin{lemma}
$A \cong B \rtimes G$
\end{lemma}
\begin{proof}
We have to show that the action of $A$ on $\mathfrak{H}_A$ "agrees" with the crossed product action. Take $a= \underset{k=0}{\overset{n-1}{\sum}} b_k u^k \in A$, $b_k \in B, k=0, 1, ..., n-1$ and take $\xi = \underset{k=0}{\overset{n-1}{\sum}} u^k \hat{b'_k} \in \mathfrak{H}_A$, $b'_k \in B, k=0, 1, ..., n-1$. Then $$a(\xi) = \underset{k=0}{\overset{n-1}{\sum}} \underset{m=0}{\overset{n-1}{\sum}} b_k u^k u^m \hat{b'_m} = \underset{k=0}{\overset{n-1}{\sum}} \underset{m=0}{\overset{n-1}{\sum}} u^{k+m} . (u^{-k-m} b_k u^{k+m} ) \hat{b'_m},$$ $$= \underset{s=0}{\overset{n-1}{\sum}} \underset{k=0}{\overset{n-1}{\sum}} (u^s . \Ad(u^{-s})(b_k) ) (\widehat{b'_{s-k(mod\ n)}}).$$ This shows that the action of $A$ on $\mathfrak{H}_A$ is the crossed product action.
\end{proof}
To study simplicity in this situation, we can invoke Theorem 4.2 from \cite{O75} and Theorem 6.5 from \cite{OP78}, or with the same success, use the following result from \cite{K81}:
\begin{thm}[\cite{K81}]
Let $\Gamma$ be a discrete group of automorphisms of $C^*$-algebra $\mathfrak{B}$. If $\mathfrak{B}$ is simple and if each $\gamma$ is outer for the multiplier algebra $M(\mathfrak{B})$ of $\mathfrak{B}$, $\forall \gamma \in \Gamma \backslash \{ 1 \} $, then the reduced crossed product of $\mathfrak{B}$ by $\Gamma$, $\mathfrak{B} \rtimes \Gamma$, is simple.
\end{thm}
An automorphism $\omega$ of a $C^*$-algebra $\mathfrak{B}$ , contained in a $C^*$-algebra $\mathfrak{A}$ is outer for $\mathfrak{A}$, if there doesn't exist a unitary $w \in \mathfrak{A}$ with the property $\omega = \Ad(w)$. \par A representation $\pi$ of a $C^*$-algebra $\mathfrak{A}$ on a Hilbert space $\mathfrak{H}$ is called non-degenerate if there doesn't exist a vector $\xi \in \mathfrak{H}$, $\xi \neq 0$, such that $\pi(\mathfrak{A}) \xi = 0$. \par The idealizer of a $C^*$-algebra $\mathfrak{A}$ in a $C^*$-algebra $\mathfrak{B}$ ($\mathfrak{A} \subset \mathfrak{B}$) is the largest $C^*$-subalgebra of $\mathfrak{B}$ in which $\mathfrak{A}$ is an ideal. \\ We will not give a definition of multiplier algebra of a $C^*$-algebra. Instead we will give the following property from \cite{APT73}, which we will use (see \cite{APT73} for more details on multiplier algebras):
\begin{prop}[\cite{APT73}]
Each nondegenerate faithful representation $\pi$ of a $C^*$-algebra $\mathfrak{A}$ extends uniquely to a faithful representation of $M(\mathfrak{A})$, and $\pi(M(\mathfrak{A}))$ is the idealizer of $\pi(\mathfrak{A})$ in its weak closure.
\end{prop}
Suppose that we have a faithful representation $\pi$of a $C^*$ algebra $\mathfrak{A}$ on a Hilbert space $\mathfrak{H}$. If confusion is impossible we will denote by $\bar{\mathfrak{A}}$ (in $\mathfrak{H}$) the weak closure of $\pi(\mathfrak{A})$ in $\mathbb{B}(\mathfrak{H})$. \par To study uniqueness of trace we invoke a theorem of B$\acute{e}$dos from \cite{B93}. \par Let $\mathfrak{A}$ be a simple, unital $C^*$-algebra with a unique trace $\varphi$ and let $(\pi_{\mathfrak{A}}, \mathfrak{H}_{\mathfrak{A}}, \widehat{1_{\mathfrak{A}}})$ denote the GNS-triple associated to $\varphi$. The trace $\varphi$ is faithful by the simplicity of $\mathfrak{A}$ and $\mathfrak{A}$ is isomorphic to $\pi_{\mathfrak{A}}(\mathfrak{A})$. Let $\alpha \in Aut(\mathfrak{A})$. The trace $\varphi$ is $\alpha$-invariant by the uniqueness of $\varphi$. Then $\alpha$ is implemented on $\mathfrak{H}_{\mathfrak{A}}$ by the unitary operator $U_{\alpha}$ given by $U_{\alpha}(\hat{a}) = \alpha(a) \cdot \widehat{1_{\mathfrak{A}}}$, $a \in \mathfrak{A}$. Then we denote the extension of $\alpha$ to the weak closure $\bar{\mathfrak{A}}$ (in $\mathfrak{H}_{\mathfrak{A}}$) of $\pi_{\mathfrak{A}}(\mathfrak{A})$ on $\mathbb{B}(\mathfrak{H}_{\mathfrak{A}})$ by $\tilde{\alpha} \overset{def}{=} \Ad(U_{\alpha})$. We will say that $\alpha$ is $\varphi$-outer if $\tilde{\alpha}$ is outer for $\bar{\mathfrak{A}}$.
\begin{thm}[\cite{B93}]
Suppose $\mathfrak{A}$ is a simple unital $C^*$-algebra with a unique trace $\varphi$ and that $\Gamma$ is a discrete group with a representation $\alpha : \Gamma \rightarrow Aut(\mathfrak{A})$, such that $\alpha_{\gamma}$ is $\varphi$-outer $\forall \gamma \in \Gamma \backslash \{ 1 \}$. Then the reduced crossed product $\mathfrak{A} \rtimes \Gamma$ is simple with a unique trace $\tau$ given by $\tau = \varphi \circ E$, where $E$ is the canonical conditional expectation from $\mathfrak{A} \rtimes \Gamma$ onto $\mathfrak{A}$.
\end{thm}
Let's now return to the $C^*$-algebra $(A,\tau) = ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_m}{\overset{p_m}{\mathbb{C}}})*(\mathbb{M}_n, tr_n)$, with $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$. If $B \subset E \subset A$ are as in the beginning of this section, then the representations of $B$, $E$ and $A$ on $\mathfrak{H}_A$ are all nondegenerate. Also we have the following:
\begin{lemma}
The weak closure of $B$ in $\mathbb{B}(\mathfrak{H}_B)$ and the one in $\mathbb{B}(\mathfrak{H}_A)$ are the same (or $\bar{B}$ (in $\mathfrak{H}_B$) $\cong$ $\bar{B}$ (in $\mathfrak{H}_A$)). Analoguously, $\bar{E}$ (in $\mathfrak{H}_E$) $\cong$ $\bar{E}$ (in $\mathfrak{H}_A$).
\end{lemma}
\begin{proof}
For $b \in B \subset A$ we have $b(u^t h) = u^t (\Ad(u^{-t} b))(h)$ for $h \in \mathfrak{H}_B$ and $0 \leq t \leq n-1$. Taking a weak limit in $\mathbb{B}(\mathfrak{H}_B)$ we obtain the same equation $\forall \bar{b} \in \bar{B}$ (in $\mathfrak{H}_B$): $\bar{b}(u^th) = u^t(\Ad(u^{-t})(\bar{b}))(h)$, which shows, of course, that $\bar{b}$ has a unique extension to $\mathbb{B}(\mathfrak{H}_A)$. Conversely if $\tilde{b} \in \bar{B}$ (in $\mathfrak{H}_A$), then since $\mathfrak{H}_B$ is invariant for $B$ it will be invariant for $\tilde{b}$ also. So the restriction of $\tilde{b}$ to $\mathfrak{H}_B$ is the element we are looking for. \par Analoguously if $e \in E$ and if $h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1} \in \mathfrak{H}_E$, then for $0 \leq t \leq l-1$ we have $e(u^t(h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1})) = u^t(\Ad(u^{-t})(e))(h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1})$. And again for an element $\bar{e} \in \bar{E}$ (in $\mathfrak{H}_E$) we see that $\bar{e}$ has a unique extension to an element of $\bar{E}$ (in $\mathfrak{H}_A$). Conversely an element $\tilde{e} \in \bar{E}$ (in $\mathfrak{H}_A$) has $\mathfrak{H}_E$ as an invariant subspace, so we can restrict it to $\mathfrak{H}_E$ to obtain an element in $\bar{E}$ (in $\mathfrak{H}_E$).
\end{proof}
We will state the following theorem from \cite{D99}, which we will frequently use:
\begin{thm}[\cite{D99}]
Let $\mathfrak{A}$ and $\mathfrak{B}$ be unital $C^*$-algebras with traces $\tau_{\mathfrak{A}}$ and $\tau_{\mathfrak{B}}$ respectively, whose GNS representations are faithful. Let
\begin{center}
$(\mathfrak{C}, \tau) = (\mathfrak{A}, \tau_{\mathfrak{A}}) * (\mathfrak{B}, \tau_{\mathfrak{B}})$.
\end{center}
Suppose that $\mathfrak{B} \neq \mathbb{C}$ and that $\mathfrak{A}$ has a unital, diffuse abelian $C^*$-subalgebra $\mathfrak{D}$ ($1_{\mathfrak{A}} \in \mathfrak{D} \subseteq \mathfrak{A}$). Then $\mathfrak{C}$ is simple with a unique trace $\tau$.
\end{thm}
Using repeatedly Theorem 2.4 we see that $$B = (\mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn}) * (\underset{k=0}{\overset{n-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus \mathbb{C} \cdot u^k p_m u^{-k})),$$ $$\cong (U \oplus \underset{max \{ n\alpha_m - n + 1,\ 0 \} }{\overset{\tilde{p}}{\mathbb{C}}}) * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}}),$$ where $U$ has a unital, diffuse abelian $C^*$-subalgebra, and where $\tilde{p} = \underset{i=0}{\overset{n-1}{\wedge}} u^i p_m u^{-i}$. \par We will consider the following 3 cases, for $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$: \\ \par (I) $\alpha_m < 1-\frac{1}{n^2}$. \par (II) $\alpha_m = 1-\frac{1}{n^2}$. \par (III) $\alpha_m > 1-\frac{1}{n^2}$. \\ \par We will organize those cases in few lemmas: \par \par
(I)
\begin{lemma}
If $A$ is as above, then for $\alpha_m < 1-\frac{1}{n^2}$ we have that $A$ is simple with a unique trace.
\end{lemma}
\begin{proof}
We consider: \\ (1) $\alpha_m \leq 1-\frac{1}{n}$. \\ Then $B \cong U * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$ containing a unital, diffuse abelian $C^*$-subalgebra (from Theorem 2.4). From the Theorem 3.9 we see that $B$ is simple with a unique trace. \\ (2) $1-\frac{1}{n} < \alpha_m < 1-\frac{1}{n^2}$. \\ Then $B \cong (U \oplus \underset{n\alpha_m - n + 1}{\overset{\tilde{p}}{\mathbb{C}}}) * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$ having a unital, diffuse abelian $C^*$-subalgebra. Using Theorem 2.4 one more time we see that $B$ is simple with a unique trace in this case also. \par We know that $A = B \rtimes G$, where $G = \langle \Ad(u) \rangle \cong \mathbb{Z}_n$. Since $B$ is unital then the multiplier algebra $M(B)$ coinsides with $B$. We note also that since $\bar{B}$ (in $\mathfrak{H}_B$) is isomorphic to $\bar{B}$ (in $\mathfrak{H}_A$) to prove that some element of $Aut(B)$ is $\tau_B$-outer it's enough to prove that this automorphism is outer for $\bar{B}$ (in $\mathfrak{H}_A$) (and it will be outer for $M(B) = B$ also). Making these observations and using Theorem 3.5 and Theorem 3.7 we see that if we prove that $\Ad(u^i)$ is outer for $\bar{B}$ (in $\mathfrak{H}_A$), $\forall 0 < i \leq n-1$, then it will follow that $A$ is simple with a unique trace. We will show that $\Ad(u^i)$ is outer for $\bar{B}$ (in $\mathfrak{H}_A$) (we will write just $\bar{*}$ for $\bar{*}$ (in $\mathfrak{H}_A$) and omit writting $\mathfrak{H}_A$ - all the closures will be in $\mathbb{B}(\mathfrak{H}_{\mathfrak{A}})$) for the case $\alpha_m \leq 1-\frac{1}{n^2}$. \par Fix $0 < k \leq n-1$. Since $u^k \mathfrak{H}_B \perp \mathfrak{H}_B$ it follows that $u^k \notin \bar{B}$ (in $\mathfrak{H}_A$). Suppose $\exists w \in \bar{B}$, such that $\Ad(u^k) = \Ad(w)$ on $\bar{B}$. Then $u^k w u^{-k} = w w w^* = w$ and $u^k w^* u^{-k} = w w^* w^* = w^*$ and this implies that $u^k$, $u^{-k}$, $w$ and $w^*$ commute, so it follows $u^k w^*$ commutes with $\overline{C^*(B, u^k)}$, so it belongs to its center. If $k \nmid n$ then $\overline{C^*(B, u^k)} = \bar{A}$ and by Theorem 2.5 $\bar{A}$ (in $\mathfrak{H}_A$)is a factor, so $u^k w^*$ is a multiple of $1_A$, which contradicts the fact $u^k \notin \bar{B}$. If $k=l \mid n$, then $\overline{C^*(B, u^k)} = \bar{E}$ and $\bar{E}$ (in $\mathfrak{H}_A$) $\cong$ $\bar{E}$ (in $\mathfrak{H}_E$) is a factor too (by Theorem 2.5), so this implies again that $u^k w^*$ is a multiple of $1_A = 1_E$, so this is a contradiction again and this proves that $\Ad(u^k)$ are outer for $\bar{B}$, $\forall 0 < k \leq n-1$. This concludes the proof.
\end{proof}
(III)
\begin{lemma}
If $A$ is as above, then for $\alpha_m > 1-\frac{1}{n^2}$ we have $A = A_0 \oplus \underset{n^2 \alpha_m - n^2 + 1}{\mathbb{M}_n}$, where $A_0$ is simple with a unique trace.
\end{lemma}
\begin{proof}
In this case $B \cong (U \oplus \underset{n\alpha_m - n + 1}{\overset{\tilde{p}}{\mathbb{C}}}) * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$, where $U$ has a unital, diffuse abelian $C^*$-subalgebra. Form Theorem 2.4 we see that $B \cong \overset{\tilde{p}_0}{B_0} \oplus \underset{n \alpha_m - n + \frac{1}{n}}{\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}}} \oplus ... \oplus \underset{n \alpha_m - n + \frac{1}{n}}{\overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}$ with $\tilde{p}_0 = 1- e_{11} \wedge
\tilde{p} - ... - e_{nn} \wedge \tilde{p}$, and $B_0$ being a unital, simple and having a unique trace. It's easy to see that $\Ad(u)$ permutes $\{ e_{ii} | 1 \leq i \leq n \}$ and that $\Ad(u)$ permutes
$\{ u^i p_j u^{-i} | 0 \leq i \leq n-1 \}$ for each $1 \leq j \leq m$. But since $\tilde{p} = \underset{i=0}{\overset{n-1}{\wedge}} u^i p_m u^{-i}$ we see that $\Ad(u)(\tilde{p}) = \tilde{p}$. This shows that $\Ad(u)$ permutes
$\{ e_{ii} \wedge \tilde{p} | 1 \leq i \leq n \}$. This shows that $\Ad(\tilde{p}_0 u)$ is an automorphism of $B_0$ and that $\Ad((1-\tilde{p}_0) u)$ is an automorphism of $\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}$. If we denote $G_1 = \langle \Ad(\tilde{p}_0 u) \rangle$ and $G_2 = \langle \Ad((1-\tilde{p}_0) u) \rangle$, then we have $A = B_0 \rtimes G_1 \oplus (\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}) \rtimes G_2$. Now it's easy to see that $(\overset{e_{11} \wedge \tilde{p}} {\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}) \rtimes G_2 = C^*(\{ e_{11} \wedge \tilde{p}, ..., e_{nn} \wedge \tilde{p} \}, (1-\tilde{p}_0) u) = (1-\tilde{p}_0).C^*( \{ e_{11}, ..., e_{nn} \}, u) \cong \mathbb{M}_n$ (because $\tilde{p}_0$ is a central projection). To study $A_0 \overset{def}{=} B_0 \rtimes G_1$ we have to consider the automorphisms $\Ad(\tilde{p}_0 u)$. From Lemma 3.8 we see that $$\overline{B_0 \oplus \overset{e_{11} \wedge\tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}\ (in\ \mathfrak{H}_B) \cong \overline{B_0 \oplus \overset{e_{11} \wedge\tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}\ (in\ \mathfrak{H}_A).$$ This implies $\bar{B}_0$ (in $\mathfrak{H}_{B_0}$) $\cong$ $\bar{B}_0$ (in $\mathfrak{H}_{A_0}$). This is because $\mathfrak{H}_{A_0} = \tilde{p}_0 \mathfrak{H}_A$ and $\mathfrak{H}_{B_0} = \tilde{p}_0 \mathfrak{H}_B$ (which is clear, since $\mathfrak{H}_{A_0}$ and $\mathfrak{H}_{B_0}$ are direct summands in $\mathfrak{H}_A$ and $\mathfrak{H}_B$ respectivelly). For some
$l | n$ if we denote $E_0 \overset{def}{=} \tilde{p}_0 E$ then by the same reasoning as above $$E = E_0 \oplus (1-\tilde{p}_0). C^*(\{ e_{11}, ..., e_{nn} \}, u^l) \cong E_0 \oplus (\underbrace{\mathbb{M}_{\frac{n}{l}} \oplus ... \oplus \mathbb{M}_{\frac{n}{l}}}_{l-times}).$$ So we similarly have $\bar{E_0}$ (in $\mathfrak{H}_{E_0}$) $\cong$ $\bar{E_0}$ (in $\mathfrak{H}_{A_0}$). We use Theorem 2.5 and see that $\bar{A} \cong L(F_t) \oplus \mathbb{M}_n$ and that $$\bar{E} \cong L(F_{t'}) \oplus (\underbrace{\mathbb{M}_{\frac{n}{l}} \oplus ... \oplus \mathbb{M}_{\frac{n}{l}}}_{l-times}),$$ for some $1 < t, t' < \infty$. This shows that $\bar{A_0}$ and $\bar{E_0}$ are both factors. Now for $\Ad(\tilde{p_0} u^k)$, $1 \leq k \leq n-1$ we can make the same reasoning as in the case (I) to show that $\Ad(\tilde{p_0} u^k)$ are all outer for $\bar{B_0}$, $\forall 1 \leq k \leq n-1$. Now we use Theorem 3.5 and Theorem 3.7 to finish the proof. Notice that the trace of the support projection of $\mathbb{M}_n$, $e_{11} \wedge \tilde{p} + ... + e_{nn} \wedge \tilde{p}$, is $n^2 \alpha_m - n^2 + 1$.
\end{proof}
(II) \\ \par We already proved that $\Ad(u^k)$ are outer for $\bar{B}$, $\forall 1 \leq k \leq n-1$. Using Theorem 2.4 we see $B \cong (U \oplus \underset{1-\frac{1}{n}}{\overset{\tilde{p}}{\mathbb{C}}}) * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$ having a unital, diffuse abelian $C^*$-subalgebra. There are $*$-homomorphisms $\pi_i : B \rightarrow \mathbb{C}$, $1 \leq i \leq n$ with $\pi_i(\tilde{p}) = \pi_i(e_{ii}) = 1$, and such that $B_0 \overset{def}{=} \underset{i=0}{\overset{n-1}{\bigcap}} \ker(\pi_i)$ is simple with a unique trace. Now if $1 \leq k \leq n-1$, then $B_0 \bigcap \Ad(u^k)(B_0) = $ either $0$ or $B_0$, because $B_0$ and $\Ad(u^k)(B_0)$ are simple ideals in $B$. The first possibility is actually impossible, because of dimension reasons, so this shows that $B_0$ is invariant for $\Ad(u^k)$, $1 \leq k \leq n-1$. In other words $\Ad(u^k) \in Aut(B_0)$. Similarly as in Lemma 3.4 it can be shown that
$$A_0 \overset{def}{=} C^*(B_0 \oplus B_0 u \oplus ... \oplus B_0 u^{n-1}) \cong B_0 \rtimes \{ \Ad(u^k) | 0 \leq k \leq n-1 \} \subset A.$$
\begin{lemma}
We have a short split-exact sequence:
\begin{center}
$0 \hookrightarrow A_0 \rightarrow A \overset{\curvearrowleft}{\rightarrow} \mathbb{M}_n \rightarrow 0$.
\end{center}
\end{lemma}
\begin{proof}
It's clear that we have the short exact sequence
\begin{equation*} 0 \rightarrow B_0 \hookrightarrow B \overset{\pi}{\longrightarrow} \underbrace{\mathbb{C} \oplus ... \oplus \mathbb{C}}_{n-times} \rightarrow 0, \end{equation*} where $\pi \overset{def}{=} (\pi_1, ..., \pi_n)$. We think $\pi$ to be a map from $B$ to $diag(\mathbb{M}_n)$, defined by $$\pi(b) = \begin{pmatrix} \pi_1(b) & 0 & ... & 0 \\ 0 & \pi_2(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_n(b) \end{pmatrix} .$$ Now since $\pi_i(\tilde{p}) = \pi_i(e_{ii}) = 1$ and $\Ad(u)(e_{11}) = u e_{11} u^* = e_{nn}$ and for $2 \leq i \leq n$, $\Ad(u)(e_{ii}) = u e_{ii} u^* = e_{(i-1) (i-1)}$, then $\pi_i \circ \Ad(u)(e_{(i+1) (i+1)}) = \pi_i \circ \Ad(u)(\tilde{p}) = 1$ for $1 \leq i \leq n-1$ and $\pi_n \circ \Ad(u)(e_{1 1}) = \pi_n \circ \Ad(u)(\tilde{p}) = 1$. So since two $*$-homomorphism of a $C^*$-algebra, which coinside on a set of generators of the $C^*$-algebra, are identical, we have $\pi_i \circ \Ad(u) = \pi_{i+1}$ for $1 \leq i \leq n-1$ and $\pi_n \circ \Ad(u) = \pi_1$. Define $\tilde{\pi} : A \rightarrow \mathbb{M}_n$ by $\underset{k=0}{\overset{n-1}{\sum}} b_ku^k \mapsto \underset{k=0}{\overset{n-1}{\sum}} \pi(b_k) W^k$ (with $b_k \in B$), where $W \in \mathbb{M}_n$ is represented by the matrix, which represent $u \in \mathbb{M}_n \subset A$, namely $$W \overset{def}{=} \begin{pmatrix} 0 & 1 & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & 1 \\ 1 & 0 & ... & 0 \end{pmatrix} .$$
We will show that if $b \in B$ and $0 \leq k \leq n-1$, then $\pi(u^k b u^{-k}) = W^k \pi(b) W^{-k}$. For this it's enough to show that $\pi(u b u^{-1}) = W \pi(b) W^{-1}$. For the matrix units $\{ E_{ij} | 1 \leq i,j \leq n \}$ we have as above $W E_{ii} W^* = E_{(i-1) (i-1)}$ for $2 \leq i \leq n-1$ and $W E_{11} W^* = E_{nn}$. So $$W \begin{pmatrix} \pi_1(b) & 0 & ... & 0 \\ 0 & \pi_2(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_n(b) \end{pmatrix} W^* = \begin{pmatrix} \pi_2(b) & 0 & ... & 0 \\ 0 & \pi_3(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_1(b) \end{pmatrix} ,$$ $$ = \begin{pmatrix} \pi_1(\Ad(u)(b)) & 0 & ... & 0 \\ 0 & \pi_2(\Ad(u)(b)) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_n(\Ad(u)(b)) \end{pmatrix} = \pi(\Ad(u)(b)),$$ just what we wanted. \par Now for $b \in B$ and $0 \leq k \leq n-1$ we have $$\tilde{\pi}((b u^k)^*) = \tilde{\pi}(u^{-k} b^*) = \tilde{\pi}(u^{-k} b^* u^k u^{-k}) = \pi(u^{-k} b^* u^k) W^{-k} = W^{-k} \pi(b^*) W^k W^{-k} , $$ $$ = W^{-k} \pi(b)^* = (\pi(b) W^k)^* = (\tilde{\pi}(b u^k))^*.$$ Also if $b, b' \in B$ and $0 \leq k, k' \leq n-1$, then $$\tilde{\pi}((b' u^{k'}).(b u^k)) = \tilde{\pi}(b'(u^{k'} b u^{-k'}) u^{k+k'}) =
\pi(b'(u^{k'} b u^{-k'})) W^{k+k'},$$
$$= \pi(b') \pi(u^{k'} b u^{-k'}) W^{k+k'} = \pi(b') W^{k'} \pi(b) W^{-k'} W^{k+k'} = \tilde{\pi}(b' u^{k'}) \tilde{\pi}(b u^k).$$ This proves that that $\tilde{\pi}$ is a $*$-homomorphism. Continuity follows from continuity of $\pi$ and the Banach space representation $A = \underset{i=0}{\overset{n-1}{\bigoplus}} Bu^i$. \par Clearly $A_0 = \underset{i=0}{\overset{n-1}{\bigoplus}} B_0 u^i$ as a Banach space. It's also clear by the definition of $\tilde{\pi}$ that $A_0 \subset \ker(\tilde{\pi})$. Since $A_0$ has a Banach space codimension $n^2$ in $A$, and so does $\ker(\tilde{\pi})$, then we must have $A_0 = \ker(\tilde{\pi})$. \par From the construction of the map $\tilde{\pi}$ we see that $\tilde{\pi}(e_{ii}) = E_{ii}$, since $\pi(e_{ii}) = E_{ii}$ and also
$\tilde{\pi}(u^k) = W^k$. Since $\{e_{ii} | 1 \leq i \leq n \} \cup \{ W^k | 0 \leq k \leq n-1 \}$ generate $\mathbb{M}_n$, then we have $\tilde{\pi}(e_{ij}) = E_{ij}$, so the inclusion map $s: \mathbb{M}_n \rightarrow A$ given by $E_{ij} \mapsto e_{ij}$ is a right inverse for $\tilde{\pi}$.
\end{proof}
From this lemma follows that we can write $A = A_0 \oplus \mathbb{M}_n$ as a Banach space.
\begin{lemma}
If $\eta$ is a trace on $A_0$, then the linear functional on $A$ $\tilde{\eta}$, defined by $\tilde{\eta}(a_0 \oplus M) = \eta(a_0) + tr_n(M)$, where $a_0 \in A_0$ and $M \in \mathbb{M}_n$ is a trace and $\tilde{\eta}$ is the unique extension of $\eta$ to a trace on $A$ (of norm 1).
\end{lemma}
\begin{proof}
The functional $\eta$ can be extended in at most one way to a tracial state on $A$, because of the requirement $\tilde{\eta}(1_A) = 1$, the fact that $\mathbb{M}_n$ sits as a subalgebra in $A$, and the uniqueness on trace on $\mathbb{M}_n$. Since $\tilde{\eta}(1_A) = 1$, to show that $\tilde{\eta}$ is a trace we need to show that $\tilde{\eta}$ is positive and satisfies the trace property. For the trace property: If $x ,y \in A$ then we need to show $\tilde{\eta}(xy) = \tilde{\eta}(yx)$. It is easy to see, that to prove this it's enough to prove that if $a_0 \in A_0$ and $M \in \mathbb{M}_n$, then $\eta(a_0 M) = \eta(M a_0)$. Since $\eta$ is linear and $a_0$ is a linear combination of 4 positive elements we can think, without loss of generality, that $a_0 \geq 0$. Then $a_0 = a_0^{1/2} a_0^{1/2}$ and $M a_0^{1/2}, a_0^{1/2} M \in A_0$, so since $\eta$ is a trace on $A_0$, we have $\eta(M a_0) = \eta((M a_0^{1/2}) a_0^{1/2}) = \eta(a_0^{1/2}(M a_0^{1/2})) = \eta((a_0^{1/2} M) a_0^{1/2}) = \eta(a_0^{1/2}(a_0^{1/2} M)) = \eta(a_0 M).$ This shows that $\tilde{\eta}$ satisfies the trace property. It remains to show positivity. Suppose $a_0 \oplus M \geq 0$. We must show $\eta(a_0 \oplus M) \geq 0$. Write $M = \underset{i=0}{\overset{n}{\sum}} \underset{j=0}{\overset{n}{\sum}} m_{ij} e_{ij}$ and $a_0 = \underset{i=0}{\overset{n}{\sum}} \underset{j=0}{\overset{n}{\sum}} e_{ii} a_0 e_{jj}$ Since $\tilde{\eta}$ is a trace if $i \neq j$, then $\tilde{\eta}(e_{ii} a_0 e_{jj}) = \tilde{\eta}(e_{jj} e_{ii} a_0) = 0$, so this shows that $\tilde{\eta}(a_0 \oplus M) = \underset{i=0}{\overset{n}{\sum}} (\frac{m_{ii}}{n} + \eta(e_{ii} a_0 e_{ii}))$. Clearly $a_0 \oplus M \geq 0$ implies $\forall 1 \leq i \leq n, e_{ii} (a_0 \oplus M) e_{ii} \geq 0$. So to show positivity we only need to show $\forall 1 \leq i \leq n$ $\tilde{\eta}(e_{ii}(a_0 + M)e_{ii}) \geq 0$, given $\forall 1 \leq i \leq n, m_{ii} e_{ii} + e_{ii} a_0 e_{ii} \geq 0$. Suppose that for some $i$, $m_{ii} < 0$. Then it follows that $e_{ii} a_0 e_{ii} \geq -m_{ii} e_{ii}$, so $e_{ii} a_0 e_{ii} \in e_{ii} A_0 e_{ii}$ is invertible, which implies $e_{ii} \in A_0$, that is not true. So this shows that $m_{ii} \geq 0$, and $m_{ii} e_{ii} \geq -e_{ii} a_0 e_{ii}$. If $\{ \epsilon_{\gamma} \}$ is an approximate unit for $A_0$, then positivity of $\eta$ implies
$1 = \| \eta \| = \underset{\gamma}{\lim}\ \eta(\epsilon_{\gamma})$. Since $\eta$ is a trace we have $\underset{\gamma}{\lim}\ \eta(\epsilon_{\gamma} e_{ii}) = \frac{1}{n}$. Since $\forall \gamma,\ m_{ii}\epsilon_{\gamma}^{1/2} e_{ii} \epsilon_{\gamma}^{1/2} \geq - \epsilon_{\gamma}^{1/2} e_{ii} a_0 e_{ii} \epsilon_{\gamma}^{1/2}$, then $$tr_n(m_{ii} e_{ii}) = \frac{m_{ii}}{n} = \underset{\gamma}{\lim}\ \eta(m_{ii} e_{ii} \epsilon_{\gamma}) = \underset{\gamma}{\lim}\ \eta(m_{ii} \epsilon_{\gamma}^{1/2} e_{ii} \epsilon_{\gamma}^{1/2}) \geq \underset{\gamma}{\lim}\ \eta(\epsilon_{\gamma}^{1/2} e_{ii} a_0 e_{ii} \epsilon_{\gamma}^{1/2}),$$ $$ = \underset{\gamma}{\lim}\ \eta(e_{ii} a_0 e_{ii} \epsilon_{\gamma}) = \eta(e_{ii} a_0 e_{ii}).$$ This finishes the proof of positivity and the proof of the lemma.
\end{proof}
\begin{remark}
We will show below that $\tau|_{A_0}$ is the unique trace on $A_0$. Since we have $A = A_0 \oplus \mathbb{M}_n$ as a Banach space, then
clearly the free product trace $\tau$ on $A$ is given by $\tau(a_0 \oplus M) = \tau|_{A_0}(a_0) + tr_n(M)$, where $a_0 \oplus M \in A_0
\oplus \mathbb{M}_n = A$. All tracial positive linear functionals of norm $\leq 1$ on $A_0$ are of the form $t\tau|_{A_0}$, where $0 \leq t \leq 1$.
Then there will be no other traces on $A$ then the family $\lambda_t \overset{def}{=} t \tau|_{A_0} \oplus tr_n$. To show that these are traces indeed, we can use the above lemma (it is still true, no mater that the norm of $t \tau_{A_0}$ can be less than one), or we can represent them as a convex linear combination $\lambda_t = t \tau + (1-t)\mu$ of the free product trace $\tau$ and the trace $\mu$, defined by $\mu(a_0 \oplus M) = tr_n(M) = tr_n(\tilde{\pi}(a_0 \oplus M))$.
\end{remark}
\begin{lemma}
$\bar{B_0}$ (in $\mathfrak{H}_A$) $=$ $\bar{B}$ (in $\mathfrak{H}_A$).
\end{lemma}
\begin{proof}
Let's take $D \overset{def}{=} ( \overset{1-\tilde{p}}{\mathbb{C}} \oplus \overset{\tilde{p}}{\mathbb{C}} ) * ( \overset{e_{11}}{\mathbb{C}} \oplus \overset{e_{22} + ... + e_{nn}}{\mathbb{C}}) \subset B$. Denote $D_0 \overset{def}{=} D\cap B_0$. From Theorem 2.3 follows that
$D \cong \{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is continuous and $f(0)$ - diagonal$\}$ $\oplus \overset{\tilde{p}\wedge (1-e_{11})}
{\mathbb{C}}$, where $0 < b < 1$ and $\tau|_D$ is given by an atomless measure $\mu$ on $\{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is continuous and $f(0)$ - diagonal $\}$, $\tilde{p}$ is represented by $\begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1$, and $e_{11}$ is represented by $\begin{pmatrix} 1-t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & t \end{pmatrix} \oplus 0$. A $*$-homomorphism, defined on the generators of a $C^*$-algebra can be extended in at most one way to the whole $C^*$-algebra. This observation, together with $\pi_1(e_{11}) =
\pi_1(\tilde{p}) = 1$ and $\pi_i(e_{22} = ... + e_{nn}) = \pi(\tilde{p}) = 1$ implies that $\pi_1|_D(f \oplus c) = f_{11}(0)$ and
$\pi_i|_D(f \oplus c) = c$ for $2 \leq i \leq n-1$. This means that $D_0 = \{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is continuous and $f_{11}(0) = f_{12}(0) = f_{21}(0) = 0 \} \oplus 0$. Now we see $\bar{D_0}$ (in $\mathfrak{H}_D$) $\cong$ $\mathbb{M}_2 \otimes L^{\infty}([0,b], \mu) \oplus 0$, so then $e_{11} \in \bar{D_0}$ (in $\mathfrak{H}_D$). So we can find sequence $\{ \varepsilon_n \}$ of self-adjoined elements (functions) of $D_0$, supported on $e_{11}$, weakly converging to $e_{11}$ on $\mathfrak{H}_D$ and such that $\{ \varepsilon_n^2 \}$ also converges weakly to $e_{11}$ on $\mathfrak{H}_D$. Then take $a_1, a_2 \in A$. in $\mathfrak{H}_A$ we have $\langle \widehat{a_1}, (\varepsilon_n^2 - e_{11})\widehat{a_2} \rangle =
\tau( (\varepsilon_n^2 - e_{11}) a_2 a_1^*) = \tau((\varepsilon_n - e_{11}) a_2 a_1^* (\varepsilon_n - e_{11})) \leq 4 \| a_2 a_1^* \| \tau(\varepsilon_n^2 - e_{11})$ (The last inequality is obtained by representing $a_2 a_1^*$ as a linear combination of 4 positive elements and using Cauchy-Bounjakovsky-Schwartz inequality). This shows that $e_{11} \in \bar{D_0}$ (in $\mathfrak{H}_A$) $\subset \bar{B_0}$ (in $\mathfrak{H}_A$). Analoguously $e_{ii} \in \bar{B_0}$ (in $\mathfrak{H}_A$), so this shows $\bar{B_0} = \bar{B}$ (in $\mathfrak{H}_A$).
\end{proof}
It easily follows now that
\begin{cor}
$\bar{A_0}$ (in $\mathfrak{H}_A$) $=$ $\bar{A}$ (in $\mathfrak{H}_A$).
\end{cor}
The representation of $B_0$ on $\mathfrak{H}_A$ is faithful and nondegenerate, and we can use Proposition 3.6, together with Theorem 3.5 and the fact that $\Ad(u^k)$ are outer for $\bar{B} = \bar{B_0}$ to get:
\begin{lemma}
$A_0 = B_0 \rtimes G$ is simple.
\end{lemma}
For the uniqueness of trace we need to modify a little the proof Theorem 3.7 (which is Theorem 1 in \cite{B93}, stated for "nontwisted" crossed products).
\begin{lemma}
$A_0 = B_0 \rtimes G$ has a unique trace, $\tau|_{A_0}$.
\end{lemma}
\begin{proof}
Above we already proved that $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ are
$\tau|_{B_0}$-outer for $B_0$. \par
Suppose that $\eta$ is a trace on $A_0$. We will show that $\tau|_{A_0} = \eta$. We consider the GNS representation of $B$, associated
to $\tau|_B$. By repeating the proof of Lemma 3.13 we see that $\bar{B_0}$ (in $\mathfrak{H}_B$) $=$ $\bar{B}$ (in $\mathfrak{H}_B$). The simplicity of $B_0$
allows us to identify $B_0$ with $\pi_{\tau|_B}(B_0)$. We will also identify $B_0$ with it's canonical copy in $A_0$. $A_0$ is
generated by $\{ b_0 \in B_0 \} \cup \{ u^k | 0 \leq k \leq n-1 \}$ and $\{ \Ad(u^k) | 0 \leq k \leq n-1 \}$ extend to $\bar{B_0}$ (in $\mathfrak{H}_A$), so also to $\bar{B_0}$ (in $\mathfrak{H}_B$) ( $\cong \bar{B}$ (in $\mathfrak{H}_A$)). Now we can form the von Neumann algebra crossed product $\tilde{A} \overset{def}{=} \bar{B_0} \rtimes
\{ \Ad(u^k) | 0 \leq k \leq n-1 \} \cong \bar{B} \rtimes \{ \Ad(u^k) | 0 \leq k \leq n-1 \}$, where the weak closures are in
$\mathfrak{H}_B$. Clearly $\tilde{A} \cong \bar{A}$ (in $\mathfrak{H}_A$). Denote by $\widetilde{\tau_{B_0}}$ the extension of $\tau|_{B_0}$ to $\bar{B_0}$ (in $\mathfrak{H}_A$), given by $\widetilde{\tau_{B_0}}(x) = \langle x(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}_A}$. By Proposition 3.19 of Chapter V in \cite{T79}, $\widetilde{\tau_{B_0}}$ is a faithful normal trace on $\bar{B_0}$ (in $\mathfrak{H}_A$). Now from the fact that $\bar{B_0}$ (in $\mathfrak{H}_A$) is a factor and using Lemma 1 from \cite{L81} we get that $\widetilde{\tau_{B_0}}$ is unique on
$\bar{B_0}$ (in $\mathfrak{H}_A$). By the same argument we have that the extension $\widetilde{\tau_{A_0}}$ of $\tau|_{A_0}$ to $\bar{A_0}$ (in $\mathfrak{H}_{A}$) $\cong$ $\bar{A}$ (in $\mathfrak{H}_A$) is unique, since $\bar{A_0}$ (in $\mathfrak{H}_{A}$) $\cong$ $\bar{A}$ (in $\mathfrak{H}_A$) is a factor. \par We take the unique extension of $\eta$ to $A$. We will call it again $\eta$ for convenience. \\
We denote by $\mathfrak{H}'_{C}$ the GNS Hilbert space for $C$, corresponding to $\eta|_C$ (for $C$ $=$ $A$, $B$, $B_0$, $A_0$).
Since $\eta|_{B_0} = \tau|_{B_0}$ it follows that $\bar{B_0}$ (in $\mathfrak{H}'_{B_0}$) $\cong$ $\bar{B}$ (in $\mathfrak{H}'_B$) and of course $\mathfrak{H}'_{B_0} = \mathfrak{H}'_B$. Then similarly as in Lemma 3.12 we get that $\bar{A_0}$ (in $\mathfrak{H}'_{A_0}$) $\cong$ $\bar{A}$ (in $\mathfrak{H}'_{A}$), so
$\mathfrak{H}'_{A_0} = \mathfrak{H}'_{A}$ (this can be done, since $\tau|_{B_0} = \eta|_{B_0}$). Now again by Proposition 3.19 of Chapter V in \cite{T79} we have that $\tilde{\eta}(x) \overset{def}{=} \langle x(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}'_A}$ ($\widehat{1_A}$ is abuse of notation - in this case it's the element, corresponding to $1_A$ in $\mathfrak{H}'_A$) defines a faithful normal trace on $\overline{\pi'_{A}(A)}$ (in $\mathfrak{H}'_A$).
In particular $\tilde{\eta}|_{\overline{\pi'_A(B)}}$ is a faithful normal trace on $\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_{A}$).
By uniqueness of $\tau|_{B_0}$ we have $\tau|_{B_0} = \eta|_{B_0}$, so for $b_0 \in B_0$ we have $\tilde{\tau} (b_0) = \tau(b_0) = \eta(b_0) = \langle \pi'_{A}(b_0)(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}'_A} = \tilde{\eta}(\pi'_{A}(b_0))$.
\par Since $B_0$ is simple, it follows that $\pi'_{A}|_{B_0}$ is a $*$-isomorphism from $B_0$ onto $\pi'_{A}(B_0)$ and from Exercise 7.6.7 in
\cite{KR86} it follows that $\pi'_{A}|_{B_0}$ extends to a $*$-isomorphism from $\bar{B_0}$ (in $\mathfrak{H}_A$) $\cong$ $\bar{B}$ (in $\mathfrak{H}_A$) onto $\overline{\pi'_{A}(B_0)}$ (in $\mathfrak{H}'_A$) $\cong$ $\overline{\pi'_{A}(B)}$ (in $\mathfrak{H}'_A$). We will denote this $*$-isomorphism by $\theta$. We set $w \overset{def}{=} \pi'_A(u)$, $\beta \overset{def}{=} \theta \Ad(u) \theta^{-1} \in Aut(\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$)). For $b_0 \in B_0$ we have $w \pi'_A(b_0) w^* = \pi'_A(u b_0 u^*) = \pi'_A((\Ad(u))(b_0)) = \beta (\pi'_A(b_0))$. So by weak continuity follows $\beta = \Ad(w)$ on $\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$). Since $\bar{B}$
(in $\mathfrak{H}_A$) is a factor and $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ are all outer, Kallman's Theorem
(Corrolary 1.2 in \cite{Ka69}) gives us that $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ act freely on $\bar{B}$ (in $\mathfrak{H}_A$). Namely if $\bar{b} \in \bar{B}$ (in $\mathfrak{H}_A$), and if $\forall \bar{b}' \in \bar{B}$ (in $\mathfrak{H}_A$), $\bar{b} \bar{b}' = \Ad(u^k)(\bar{b}') \bar{b}$, then $\bar{b} = 0$.
Then by the above settings it is clear that $\{ \Ad(w^k) | 1 \leq k \leq n-1 \}$ also act freely on $\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$). \par Since $\tilde{\eta}$ is a faithful normal trace on $\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$), then by Proposition 2.36 of Chapter V in \cite{T79} there exists a faithful conditional expectation $P: \overline{\pi'_A(A)} \rightarrow \overline{\pi'_A(B)}$ (both weak closures are in $\mathfrak{H}'_A$). $\forall x \in \overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$), and $\forall 1 \leq k \leq n-1$, $\Ad(w^k)(x) w^k = w^k x$. Applying $P$ we get $\Ad(w^k)(x)(P(w^k)) = P(w^k) x$, so by the free action of $\Ad(w^k)$ we get that $P(w^k) = 0$, $\forall 1 \leq k \leq n-1$.
It's clear that $\{ \overline{\pi'_A(B)} \} \cup \{ w^k | 1 \leq k \leq n-1 \}$ generates $\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$) as a von Neumann algebra. Now we use Proposition 22.2 from \cite{S81}. It gives us a $*$-isomorphism $\Phi : \overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$) $\rightarrow \bar{B} \rtimes
\{ \Ad(u^k) | 1 \leq k \leq n-1 \} \cong \bar{A}$ (last two weak closures are in $\mathfrak{H}_A$) with $\Phi(\theta(x)) = x,$ $x\in \bar{B}$ (in $\mathfrak{H}_A$), $\Phi(w) = u$. So since $\bar{A}$ (in $\mathfrak{H}_A$) is a finite factor, so is $\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$), and so it's trace $\tilde{\eta}$ is unique. Hence, $\tilde{\eta} = \tilde{\tau} \circ \Phi$, and so $\forall b \in B$, and $\forall 1 \leq k \leq n-1$ we have $\eta(b u^k) = \tilde{\eta}(\pi'_A(b) \pi'_A(u^k)) = \tilde{\tau}(\Phi(\pi'_A(b)) \Phi(\pi'_A(u^k))) = \tilde{\tau}(\Phi(\theta(b)) \Phi(w^k)) = \tilde{\tau}(b u^k) = \tau(b u^k)$. By continuity and linearity of both traces we get $\eta = \tau$, just what we want.
\end{proof}
We conclude this section by proving the following
\begin{prop}
Let
\begin{center}
$(A,\tau) \overset{def}{=} ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_m}{\overset{p_m} {\mathbb{C}}})*(\mathbb{M}_n, tr_n)$,
\end{center}
where $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$. Then: \par (I) If $\alpha_m < 1-\frac{1}{n^2}$, then $A$ is unital, simple with a unique trace $\tau$. \par (II) If $\alpha_m = 1-\frac{1}{n^2}$, then we have a short exact sequence $0 \rightarrow A_0 \rightarrow A \rightarrow \mathbb{M}_n
\rightarrow 0$, where $A$ has no central projections, and $A_0$ is nonunital, simple with a unique trace $\tau|_{A_0}$. \par (III) If $\alpha_m > 1-\frac{1}{n^2}$, then $A = \underset{n^2 - n^2 \alpha_m}{\overset{f}{A_0}} \oplus \underset{n^2 \alpha_m - n^2 + 1}{\overset{1-f}{\mathbb{M}_n}}$, where $1-f \leq p_m$, and where $A_0$ is unital, simple and has a unique trace
$(n^2 - n^2 \alpha_m)^{-1} \tau|_{A_0}$. \par Let $f$ means the identity projection for cases (I) and (II). Then in all cases for each of the projections $f p_1, ..., f p_m$ we have a unital, diffuse abelian $C^*$-subalgebra of $A$, supported on it.
\par In all the cases $p_m$ is a full projection in $A$.
\end{prop}
\begin{proof}
We have to prove the second part of the proposition, since the first part follows from Lemma 3.10, Lemma 3.11, Lemma 3.12, Lemma 3.17 and Lemma 3.18. From the discussion above we see that in all cases we have $fA = fB \rtimes \{ \Ad(f u^k f) | 0 \leq k \leq n-1 \}$, where $B$
and $\{ \Ad(f u^k ) | 0 \leq k \leq n-1 \}$ are as above. So the existence of the unital, diffuse abelian $C^*$-sublagebras follows from Theorem 2.4, applied to $B$. \par In the case (I) $p_m$ is clearly full, since $A$ is simple. In the case (III) it's easy to see that $p_m \wedge f \neq 0$ and $p_m \geq (1-f)$, so since $A_0$ and $\mathbb{M}_n$ are simple in this case, then $p_m$ is full in $A$. In case (II) it follows from Theorem 2.4 that $p_m$ is full in $B$, and consequently in $A$.
\end{proof}
\section{ The General Case}
In this section we prove the general case of Theorem 2.6, using the result from the previous section (Proposition 3.19). The prove of the general case involves techniques from \cite{D99}. So we will need two technical results from there. \par The first one is Proposition 2.8 in \cite{D99} (see also \cite{D93}):
\begin{prop}
Let $A = A_1 \oplus A_2$ be a direct sum of unital $C^*$-algebras and let $p = 1 \oplus 0 \in A$. Suppose $\phi_A$ is a state on $A$ with $0 < \alpha \overset{def}{=} \phi_A(p) < 1$. Let $B$ be a unital $C^*$-algebra with a state $\phi_B$ and let $(\mathfrak{A}, \phi) = (A, \phi_A) * (B, \phi_B)$. Let $\mathfrak{A}_1$ be the $C^*$-subalgebra of $\mathfrak{A}$ generated by $(0 \oplus A_2) + \mathbb{C} p \subseteq A$, toghether with $B$. In other words
\begin{equation*}
(\mathfrak{A}_1, \phi|_{\mathfrak{A}_1}) = (\underset{\alpha}{\overset{p}{\mathbb{C}}} \oplus \underset{1-\alpha}{\overset{1-p}{A_2}}) * (B, \phi_B). \end{equation*}
Then $p \mathfrak{A} p$ is generated by $p \mathfrak{A}_1 p$ and $A_1 \oplus 0 \subset A$, which are free in $(p \mathfrak{A} p,
\frac{1}{\alpha} \phi|_{p \mathfrak{A} p})$. In other words
\begin{equation*}
(p \mathfrak{A} p, \frac{1}{\alpha} \phi|_{p \mathfrak{A} p}) \cong
(p \mathfrak{A}_1 p,\frac{1}{\alpha} \phi|_{p \mathfrak{A}_1 p}) *
(A_1, \frac{1}{\alpha} \phi_A|_{A_1}). \end{equation*}
\end{prop}
\begin{remark}
This proposition was proved for the case of von Neumann algebras in \cite{D93}. It is true also in the case of $C^*$-algebras.
\end{remark}
The second result is Proposition 2.5 (ii) of \cite{D99}, which is easy and we give its proof also:
\begin{prop}
Let $A$ be a $C^*$-algebra. Take $h \in A, h \geq 0$, and let $B$ be the hereditary subalgebra $\overline{hAh}$ of $A$ ( $\overline{*}$ means norm closure). Suppose that $B$ is full in $A$. Then if $B$ has a unique trace, then $A$ has at most one tracial state.
\end{prop}
\begin{proof}
It's easy to see that $\Span \{ xhahy | a,x,y \in A \}$ is norm dense in $A$. If $\tau$ is a tracial state on $A$ then $\tau(xhahy) = \tau(h^{1/2} ahyx h^{1/2})$. Since $h^{1/2} ahyx h^{1/2} \in B$, $\tau$ is uniquely determined by $\tau_B$.
\end{proof}
It is clear that Proposition 3.19 agrees with Theorem 2.6, so it is a special case. \par As a next step we look at a $C^*$-algebra of the form
\begin{equation*} (M, \tau) = (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n), \end{equation*} where $A_0$ comes with a specified trace and has a unital, diffuse abelian $C^*$-subalgebra with unit $p'_0$. Also we suppose that $\alpha'_0 \geq 0$, $0 < \alpha_1' \leq ... \leq \alpha_k'$, $0 < \alpha_1 \leq ... \leq \alpha_l$, $m_1, ..., m_k \geq 2$, and either $\alpha'_0 > 0$ or $k \geq 1$, or both. Let's denote $p_0 \overset{def}{=} p_0' + p_1' + ... + p_k'$, $B_0 \overset{def}{=} \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}}$, and $\alpha_0 \overset{def}{=} \alpha'_0 + \alpha'_1 + ... + \alpha'_k = \tau(p_0)$. \\ Let's have a look at the $C^*$-subalgebras $N$ and $N'$ of $M$ given by
\begin{equation*}
(N, \tau|_N) = (\underset{\alpha_0}{\overset{p_0}{\mathbb{C}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n) \end{equation*} and
\begin{equation*}
(N', \tau|_{N'}) = (\underset{\alpha_0'}{\overset{p_0'}{\mathbb{C}}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{C}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n). \end{equation*}
We studied the $C^*$-algebras, having the form of $N$ and $N'$ in the previous section. A brief description is as follows: \par If $\alpha_0, \alpha_l < 1-\frac{1}{n^2}$, then $N$ is simple with a unique trace and $N'$ is also simple with a unique trace. For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. \par If $\alpha_0$, or $\alpha_l$ $= 1-\frac{1}{n^2}$, then $N$ has no central projections, and we have a short exact sequence $0 \rightarrow N_0 \rightarrow N \rightarrow \mathbb{M}_n \rightarrow 0$, with $N_0$ being simple with a unique trace. Moreover $p_0$ or $p_l$ respectivelly is full in $N$. For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. \par If $\alpha_0$ or $\alpha_l$ $> 1-\frac{1}{n^2}$, then $N = \overset{q}{N_0} \oplus \mathbb{M}_n$, with $N_0$ being simple and having a unique trace. \par We consider 2 cases: \par (I) case: $\alpha_l \geq \alpha_0$. \par (1) $\alpha_l < 1-\frac{1}{n^2}$. \par In this case $N$ and $N'$ are simple and has unique traces, and $p_0$ is full in $N$ and consequently $1_M = 1_N$ is contained in $\langle p_0 \rangle_N$ - the ideal of $N$, generated by $p_0$. Since $\langle p_0 \rangle_N \subset \langle p_0 \rangle_M$ it follows that $p_0$ is full also in $M$. From Proposition 4.1 we get $p_0 M p_0 \cong (A_0 \oplus B_0) * p_0 N p_0$. Then from Theorem 3.9 follows that $p_0 M p_0$ is simple and has a unique trace. Since $p_0$ is a full projection, Proposition 4.3 tells us that $M$ is simple and $\tau$ is its unique trace. For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it, and comming from $N'$. \par (2) $\alpha_l = 1-\frac{1}{n^2}$. \par In this case it is also true that for each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it, and comming from $N'$. It is easy to see that $M$ is the linear span of $p_0 M p_0$, $p_0 M (1- p_0) N (1- p_0)$, $(1 -p_0) N p_0 M p_0$, $(1- p_0) N p_0 M p_0 N (1- p_0)$ and $(1- p_0) N (1- p_0)$. We know that we have a $*$-homomorphism $\pi : N \rightarrow M_n$, such that $\pi(p_l) = 1$. Then it is clear that $\pi(p_0) = 0$, so we can extend $\pi$ to a linear map $\tilde{\pi}$ on $M$, defining it to equal $0$ on $p_0 M p_0$, $p_0 M (1- p_0) N (1- p_0)$, $(1 -p_0) N p_0 M p_0$ and $(1- p_0) N p_0 M p_0 N (1- p_0)$. It is also clear then that $\tilde{\pi}$ will actually be a $*$-homomorphism. Since $\ker(\pi)$ is simple in $N$ and $p_0 \in \ker(\pi)$, then $p_0$ is full in $\ker(\pi) \subset N$, so by the above representation of $M$ as a linear span we see that $p_0$ is full in $\ker(\tilde{\pi})$ also. From Proposition 4.1 follows that $p_0 M p_0 \cong (A_0 \oplus B_0) * (p_0 N p_0)$. Since $p_0 N p_0$ has a unital, diffuse abelian $C^*$-subalgebra with unit $p_0$, it follows from Theorem 3.9 that $p_0 M p_0$ is simple and has a unique trace (to make this conclusion we could use Theorem 1.5 instead). Now since $p_0 M p_0$ is full and hereditary in $\ker(\tilde{\pi})$, from Proposition 4.3 follows that $\ker(\tilde{\pi})$ is simple and has a unique trace. \par (3) $\alpha_l > 1-\frac{1}{n^2}$. \par In this case $N = \underset{n^2 - n^2 \alpha_l}{\overset{q}{N_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$ and also $N' = \underset{n^2 - n^2 \alpha_l}{\overset{q}{N'_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$ with $N_0$ and $N'_0$ being simple with unique traces. For each of the projections $q p_0', q p_1' , ..., q p_k', q p_1, ..., q p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it, and coming from $N'_0$. \par Since $p_0 \leq q$ we can write $M$ as a linear span of $p_0 M p_0$, $p_0 M p_0 N_0 (1- p_0)$, $(1- p_0) N_0 p_0 M p_0$, $(1- p_0) N_0 p_0 M p_0 N_0 (1- p_0)$, $(1- p_0) N_0 (1- p_0)$ and $\mathbb{M}_n$. So we can write $M = \underset{n^2 - n^2 \alpha_l}{\overset{q}{M_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$, where $M_0 \overset{def}{=} q M q \supset N_0$. We know that $p_0$ is full in $N_0$, so as before we can write $1_{M_0} = 1_{N_0} \in \langle p_0 \rangle_{N_0} \subset \langle p_0 \rangle_{M_0}$, so $\langle p_0 \rangle_{M_0} = M_0$. Because of Proposition 4.1, we can write $p_0 M_0 p_0 \cong (A_0 \oplus B_0) * (p_0 N_0 p_0)$. Since $p_0 N_0 p_0$ has a unital, diffuse abelian $C^*$-subalgebra with unit $p_0$, then from Theorem 3.9 (or from Theorem 1.5) it follows that $p_0 M_0 p_0$ is simple with a unique trace. Since $p_0 M_0 p_0$ is full and hereditary in $M_0$, Proposition 4.3 yields that $M_0$ is simple with a unique trace. \par (II) $\alpha_0$ $>$ $\alpha_l$. \par (1) $\alpha_0 \leq 1- \frac{1}{n^2}$. \par In this case $p_0$ is full in $N$ and also in $N'$, so $1_M = 1_N \in \langle p_0 \rangle_N$, which means $p_0$ is full in $M$ also. $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$ and $p_0 M p_0 \cong (A_0 \oplus B_0) * p_0 N p_0$ by Proposition 4.1. Since $p_0 N p_0$ has a diffuse abelian $C^*$-subalgebra, Theorem 3.9 (or Theorem 1.5) shows that $p_0 M p_0$ is simple with a unique trace and then by Proposition 4.3 follows that the same is true for $M$. For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it, comming from $N'$. \par (2) $\alpha_0$ $> 1-\frac{1}{n^2}$. \\ We have 3 cases: \par (2$'$) $\alpha'_0 > 1-\frac{1}{n^2}$. \par In this case $N \cong \overset{q}{N_0} \oplus \mathbb{M}_n$ and $N' \cong \overset{q'}{N'_0} \oplus \mathbb{M}_n$, where $q \leq q'$, with $N_0$ and $N'_0$ being simple and having unique traces. It is easy to see that $p'_1, ..., p'_k, p_1, ..., p_l \leq q'$, so for each of the projections $p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. So those $C^*$-subalgebras live in $M$ also. We have a unital, diffuse abelian $C^*$-subalgebra of $A_0$, supported on $1_{A_0}$, which yields a unital, diffuse abelian $C^*$-subalgebra on $M$, supported on $p'_0$. It is clear that $p_0$ is full in $N$, so as before, $1_M = 1_N \in \langle p_0 \rangle_N$, so $p_0$ is full in $M$ also, so $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$. From Proposition 4.1 we have $p_0 M p_0 \cong (A_0 \oplus B_0) * ( p_0 N_0 p_0 \oplus \mathbb{M}_n)$. It is easy to see that $\mathbb{M}_n$, for $n \geq 2$ contains two $tr_n$-orthogonal zero-trace unitaries. Since also $p_0 N_0 p_0$ has a
unital, diffuse abelian $C^*$-subalgebra, supported on $1_{N_0}$, it is easy to see (using Proposition 2.2) that it also contains two $\tau|{N_0}$-orthogonal, zero-trace unitaries. Then the conditions of Theorem 1.5 are satisfied. This means that $p_0 M p_0$ is simple with a unique trace and Proposition 4.3 implies that $M$ is simple with a unique trace also. \par (2$''$) $\alpha'_k > 1-\frac{1}{n^2}$. \par Let's denote $$N'' = (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_{k-1}'}{\overset{p_{k-1}'}{\mathbb{M}_{m_{k-1}}}} \oplus \underset{\alpha_{k}'}{\overset{p_k'}{\mathbb{C}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n).$$ Then $N''$ satisfies the conditions of case (I,3) and so $N'' \cong \overset{q}{N''_0} \oplus \mathbb{M}_n$. Clearly $p_0', p_1', ..., p_{k-1}', p_1, ..., p_l \leq q$, so for each of the projections $p_0', p_1', ..., p_{k-1}', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N''_0$, supported on it. Those $C^*$-algebras live in $M$ also. From case (I,3) we have that $p'_k$ is full in $N''$ and as before $1_M = 1_{N''} \in \langle p'_k \rangle_{N''}$ implies that $p'_k$ is full in $M$ also. From Proposition 4.1 follows that $p'_k M p'_k \cong (p'_k N''_0 p'_k \oplus \mathbb{M}_n) * \mathbb{M}_{m_k}$. Since $N''_0$ has a unital, diffuse abelian $C^*$-subalgebra, supported on $q p'_k$, then an argument, similar to the one we made in case (II, 2$"$), allows to apply Theorem 1.5 to get that $p'_k M p'_k$ is simple with a unique trac. By Proposition 4.3 follows that the same is true for $M$. The unital, diffuse abelian $C^*$-subalgebra of $M$, supported on $p'_k$, we can get by applying the note after Theorem 1.5 to $p'_k M p'_k \cong (p'_k N''_0 p'_k \oplus \mathbb{M}_n) * \mathbb{M}_{m_k}$. \par (2$'''$) $\alpha'_0$ and $\alpha'_k$ $\leq 1-\frac{1}{n^2}$. \par In this case $N \cong \overset{q}{N_0} \oplus \mathbb{M}_n$, with $N_0$ being simple and having a unique trace. Moreover $N'$ has no central projections and for each of the projections $p'_0, p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. So those $C^*$-subalgebras live in $M$ also. It is clear that $p_0$ is full in $N$, so as before $1_M = 1_N \in \langle p_0 \rangle_N$, so $p_0$ is full in $M$ also, so $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$. From Proposition 4.1 we have $p_0 M p_0 \cong (A_0 \oplus B_0) * ( p_0 N_0 p_0 \oplus \mathbb{M}_n)$. Since $A_0$ and $p_0 N_0 p_0$ both have unital, diffuse abelian $C^*$-subalgebras, supported on their units, it is easy to see (using Proposition 2.2), that the conditions of Theorem 1.5 are satisfied. This means that $p_0 M p_0$ is simple with a unique trace and Proposition 4.3 yields that $M$ is simple with a unique trace also. \par We summarize the discussion above in the following
\begin{prop}
Let
\begin{equation*} (M,\tau) \overset{def}{=} (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n), \end{equation*} where $n \geq 2$, $\alpha'_0 \geq 0$, $\alpha'_1 \leq \alpha'_2 \leq ... \leq \alpha'_k$, $\alpha_1 \leq ... \leq \alpha_l$, $m_1, ..., m_k \geq 2$, and $\overset{p'_0}{A_0} \oplus 0$ has a unital, diffuse abelian $C^*$-subalgebra, having $p'_0$ as a unit. Then: \par (I) If $\alpha_l < 1-\frac{1}{n^2}$, then $M$ is unital, simple with a unique trace $\tau$. \par (II) If $\alpha_l = 1-\frac{1}{n^2}$, then we have a short exact sequence $0 \rightarrow M_0 \rightarrow M \rightarrow \mathbb{M}_n
\rightarrow 0$, where $M$ has no central projections and $M_0$ is nonunital, simple with a unique trace $\tau|_{M_0}$. \par (III) If $\alpha_l > 1-\frac{1}{n^2}$, then $M = \underset{n^2 - n^2 \alpha_l}{\overset{f}{M_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-f}{\mathbb{M}_n}}$, where $1-f \leq p_l$, and where $M_0$ is unital, simple and has a unique trace
$(n^2 - n^2 \alpha_l)^{-1} \tau|_{M_0}$. \par Let $f$ means the identity projection for cases (I) and (II). Then in all cases for each of the projections $f p_0', f p_1', ..., f p_k', f p_1, ..., f p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it. \par In all the cases $p_l$ is a full projection in $M$.
\end{prop}
To prove Theorem 2.6 we will use Proposition 4.4. First let's check that Proposition 4.4 agrees with the conclusion of Theorem 2.6. We can write $$(M,\tau) \overset{def}{=} (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'} {\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1} {\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * \underset{\beta_1}{\overset{q_1}{\mathbb{M}_n}},$$ where
$q_1 = 1_M$ and $\beta_1 = 1$. It is easy to see that $L_0 = \{ (l,1) | \frac{\alpha_l}{1^2} + \frac{1}{n^2} = 1 \} = \{ (l,1) | \alpha_l =
1-\frac{1}{n^2} \}$, which is not empty if and only if $\alpha_l = 1-\frac{1}{n^2}$. Also $L_+ = \{ (l,1) | \frac{\alpha_l}{1^2} +
\frac{1}{n^2} > 1 \} = \{ (l,1) | \alpha_l > 1-\frac{1}{n^2} \}$, and here $L_+$ is not empty if and only if $\alpha_l > 1-\frac{1}{n^2}$. If both $L_+$ and $L_0$ are empty, then $M$ is simple with a unique trace. If $L_0$ is not empty, then clearly $L_+$ is empty, so we have no central projections and a short exact sequence $0 \rightarrow M_0 \rightarrow M \rightarrow \mathbb{M}_n \rightarrow 0$, with $M_0$ being simple with a unique trace. In this case all nontrivial projections are full in $M$. If $L_+$ is not empty, then clearly $L_0$ is empty and so $M = \underset{n^2 -n^2 \alpha_l}{\overset{q}{M_0}} \oplus \underset{n^2(\frac{\alpha_l}{1^2} + \frac{1}{n^2} - 1)}{\overset{1-q}{\mathbb{M}_n}}$, where $M_0$ is simple with a unique trace. $p_l$ is full in $M$. \\
\par
$Proof\ of\ Theorem\ 2.6:$ \\
\par
Now to prove Theorem 2.6 we start with
\begin{equation*} (\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ... \oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}}), \end{equation*} where $A_0$ and $B_0$ have unital, diffuse abelian $C^*$-subalgebras, supported on their units (we allow $\alpha_0 = 0$ or/and $\beta_0 = 0$). The case where $n_1 = ... = n_k = m_1 = ... = m_l = 1$ is treated in Theorem 2.5. The case where $\alpha_0 = 0$, $k = 1$, and $n_k > 1$ was treated in Proposition 4.4. So we can suppose without loss of generality that $n_k \geq 2$ and either $k > 1$ or $\alpha_0 > 0$ or both. To prove that the conclusions of Theorem 2.6 takes place in this case we will use induction on
$\card \{ i | n_i \geq 2 \} + \card \{ j | m_j \geq 2 \}$, having Theorem 2.5 ($\card \{ i | n_i \geq 2 \} +
\card \{ j | m_j \geq 2 \} = 0$) as first step of the induction. We look at
\begin{equation*}
(\mathfrak{B},\phi|_\mathfrak{B})=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ... \oplus \underset{\alpha_{k-1}}{\overset{p_{k-1}}{\mathbb{M}_{n_{k-1}}}} \oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{C}}}) * (\underset{\beta_0}{\overset{q_0}{B_0}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}}) \subset (\mathfrak{A},\phi). \end{equation*}
We suppose that Theorem 2.6 is true for $(\mathfrak{B},\phi|_\mathfrak{B})$ and we will prove it for $(\mathfrak{A},\phi )$. This will be the induction step and will prove Theorem 2.6.
\par Denote $L_0^{\mathfrak{A}} \overset{def}{=} \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \}$, $L_0^{\mathfrak{B}}
\overset{def}{=} \{ (i,j)| i \leq k-1$ and $\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \} \cup \{ (k,j) | \frac{\alpha_k}{1^2} +
\frac{\beta_j}{m_j^2} = 1 \}$ and similarly $L_+^{\mathfrak{A}} \overset{def}{=} \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2}
> 1 \}$, and $L_+^{\mathfrak{B}} \overset{def}{=} \{ (i,j)| i \leq k-1$ and $\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} > 1 \} \cup \{ (k,j) | \frac{\alpha_k}{1^2} + \frac{\beta_j}{m_j^2} > 1 \}$. Clearly $L_0^{\mathfrak{A}} \cap \{ 1 \leq i \leq k-1 \} = L_0^{\mathfrak{B}} \cap \{ 1 \leq i \leq k-1 \}$ and similarly $L_+^{\mathfrak{A}} \cap \{ 1 \leq i \leq k-1 \} = L_+^{\mathfrak{B}} \cap \{ 1 \leq i \leq k-1 \}$. Let $N_{\mathfrak{A}}(i,j) = max(n_i, m_j)$ and let $N_{\mathfrak{B}}(i,j) = N_{\mathfrak{A}}(i,j), 1 \leq i \leq k-1$, and $N_{\mathfrak{B}}(k,j) = m_j$.
By assumption
\begin{equation*} \mathfrak{B}= \underset{\delta}{\overset{g}{\mathfrak{B}_0}} \oplus \underset{(i,j)\in L_+^{\mathfrak{B}}}{\bigoplus} \underset{\delta_{ij}}{\overset{g_{ij}}{\mathbb{M}_{N_{\mathfrak{B}}(i,j)}}}. \end{equation*}
We want to show that
\begin{equation} \mathfrak{A} = \underset{\gamma}{\overset{f}{\mathfrak{A}_0}} \oplus \underset{(i,j)\in L_+^{\mathfrak{A}}}{\bigoplus} \underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N_{\mathfrak{A}}(i,j)}}}. \end{equation}
We can represent $\mathfrak{A}$ as the span of $p_k \mathfrak{A} p_k$, $p_k \mathfrak{A} p_k \mathfrak{B} (1-p_k)$, $(1-p_k) \mathfrak{B} p_k \mathfrak{A} p_k$, $(1-p_k) \mathfrak{B} p_k \mathfrak{A} p_k \mathfrak{B} (1-p_k)$, and $(1-p_k) \mathfrak{B} (1-p_k)$. From the fact that $g_{kj} \leq p_k$ and $g_{ij} \leq 1-p_k, \forall 1 \leq i \leq k-1$ we see that $p_k \mathfrak{B} (1-p_k) = p_k \mathfrak{B}_0 (1-p_k)$, $(1-p_k) \mathfrak{B} p_k = (1-p_k) \mathfrak{B}_0 p_k$, and $(1-p_k) \mathfrak{B} (1-p_k) = (1-p_k) \mathfrak{B}_0 (1-p_k) \oplus \underset{i \neq k}{\underset{(i,j) \in L_+^{\mathfrak{B}}}{\bigoplus}} \mathbb{M}_{N(i,j)}$. All this tells us that we can represent $\mathfrak{A}$ as the span of $p_k \mathfrak{A} p_k$, $p_k \mathfrak{A} p_k \mathfrak{B}_0 (1-p_k)$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A} p_k$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A} p_k \mathfrak{B}_0 (1-p_k)$, $ (1-p_k) \mathfrak{B}_0 (1-p_k)$, and $\underset{i \neq k}{\underset{(i,j)\in L_+^{\mathfrak{B}}}{\bigoplus}} \underset{\delta_{ij}}{\overset{g_{ij}}{\mathbb{M}_{N(i,j)}}}$. \par In order to show that $\mathfrak{A}$ has the form (9), we need to look at $p_k \mathfrak{A} p_k$. From Proposition 4.1 we have $$p_k \mathfrak{A} p_k \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k} \cong (\underset{\frac{\delta}{\alpha_k}}{\overset{g}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus} \underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N(k,j)}}}) * \mathbb{M}_{n_k}.$$ Since by assumption $p_k \mathfrak{B}_0 p_k$ has a unital, diffuse abelian $C^*$-subalgebra, supported on $1_{p_k \mathfrak{B}_0 p_k}$, we can use Proposition 4.4 to determine the form of $p_k \mathfrak{A} p_k$. \par Thus $p_k \mathfrak{A} p_k$: \par (i) Is simple with a unique trace if whenever for all $1 \leq r \leq l$ with $N(k,r) = 1$ we have $\frac{\delta_{kr}}{\alpha_k} < 1 - \frac{1}{n_k^2}$. \par (ii) Is an extension $0 \rightarrow I \rightarrow p_k \mathfrak{A} p_k \rightarrow \mathbb{M}_{n_k} \rightarrow 0$ if $\exists 1 \leq r \leq l$, with $N(k,r) = 1$, and $\frac{\delta_{kr}}{\alpha_k} = 1 - \frac{1}{n_k^2}$. Moreover $I$ is simple with a unique trace and has no central projections. \par (iii) Has the form $p_k \mathfrak{A} p_k = I \oplus \underset{n_k^2(\frac{\delta_{kr}}{\alpha_k} - 1 + \frac{1}{n_k^2})}{\mathbb{M}_{n_k}}$, where $I$ is unital, simple with a unique trace whenever $\exists 1 \leq r \leq l$ with $N(k,r) = 1$, and $\frac{\delta_{kr}}{\alpha_k} > 1 - \frac{1}{n_k^2}$. \par By assumption $\delta_{ij} = N(i,j)^2 (\frac{\alpha_i}{n_i^2} +\frac{\beta_j}{m_j^2} - 1)$, so when $r$ satisfies the conditions of case (iii) above, then $m_r = 1$ and $n_k^2(\frac{\delta_{kr}}{\alpha_k} - 1 + \frac{1}{n_k^2}) = n_k^2(\frac{\alpha_k + \beta_r - 1}{\alpha_k} + \frac{1}{n_k^2} -1) = \frac{n_k^2}{\alpha_k}(\frac{\alpha_k}{n_k^2} + \frac{\beta_r}{1^2} - 1)$, just what we needed to show. Defining $\mathfrak{A}_0 \overset{def}{=} (1-(\underset{(i,j) \in L_+^{\mathfrak{A}}}{\oplus} f_{ij})) \mathfrak{A} (1-(\underset{(i,j) \in L_+^{\mathfrak{A}}}{\oplus} f_{ij}))$, we see that $\mathfrak{A}$ has the form (9). \par We need to study $\mathfrak{A}_0$ now. Since clearly $g \leq f$, we see that $\mathfrak{A} p_k \mathfrak{B}_0 = \mathfrak{A} p_k g \mathfrak{B}_0 = \mathfrak{A} g p_k \mathfrak{B}_0 = \mathfrak{A}_0 p_k \mathfrak{B}_0$ and similarly $\mathfrak{A} p_k \mathfrak{B}_0 = \mathfrak{A}_0 p_k \mathfrak{B}_0$. From this and from what we proved above follows that:
\begin{gather} \mathfrak{A}_0 \text{ is the span of } p_k \mathfrak{A}_0 p_k,\ (1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k, \\ \notag p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k),\ (1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k), \text{ and } (1-p_k) \mathfrak{B}_0 (1-p_k). \end{gather}
We need to show that for each of the projections $f p_s$, $0 \leq s \leq k$ and $f q_t$, $1 \leq t \leq l$, we have a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on it. The ones, supported on $f p_s$, $1 \leq s \leq k-1$ come from $(1-p_k) \mathfrak{B}_0 (1-p_k)$ by the induction hypothesis. The one with unit $f p_k$ comes from the representation $p_k \mathfrak{A} p_k \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k}$ and Proposition 4.4. For $1 \leq s \leq l$ we have
\begin{gather} q_s \mathfrak{A} q_s \cong \underset{\frac{\gamma}{\beta_s}}{\overset{f q_s}{{q_s \mathfrak{A}_0 q_s}}} \oplus \underset{1 \leq i \leq k-1}{\underset{(i,s) \in L_+^{\mathfrak{A}}}{\bigoplus}} \underset{\frac{\gamma_{is}}{\beta_s}}{\overset{f_{is}}{\mathbb{M}_{N_{\mathfrak{A}}(i,s)}}} \oplus \underset{\frac{\gamma_{ks}}{\beta_s}}{\overset{f_{ks}}{\mathbb{M}_{N_{\mathfrak{A}}(k,s)}}} \end{gather}
and
\begin{gather} q_s \mathfrak{B} q_s \cong \underset{\frac{\delta}{\beta_s}}{\overset{g q_s}{q_s \mathfrak{B}_0 q_s}} \oplus \underset{1 \leq i \leq k-1}{\underset{(i,s) \in L_+^{\mathfrak{B}}}{\bigoplus}} \underset{\frac{\delta_{is}}{\beta_s}}{\overset{g_{is}}{\mathbb{M}_{N_{\mathfrak{B}}(i,s)}}} \oplus \underset{\frac{\delta_{ks}}{\beta_s}}{\overset{g_{ks}}{\mathbb{M}_{N_{\mathfrak{B}}(k,s)}}}. \end{gather}
From what we showed above follows that for $1 \leq i \leq k-1$ we have $\gamma_{is} = \delta_{is}$ and $f_{is} = g_{is}$. If $(k,s) \notin L_+^{\mathfrak{B}}$, (or $\alpha_k < 1 - \frac{\beta_s}{m_s^2}$), then $(k,s) \notin L_+^{\mathfrak{A}}$ and by (11) and (12) we see that $gq_s = fq_s$ and so in $\mathfrak{A}_0$ we have a unital, diffuse abelian $C^*$-subalgebra with unit $gq_s = fq_s$, which comes from $\mathfrak{B}_0$. If $(k,s) \in L_+^{\mathfrak{B}}$, then $gq_s \lvertneqq fq_s$ and since we have a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $gq_s$, comming from $\mathfrak{B}_0$, we need only to find a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $fq_s - gq_s$ and its direct sum with the one supported on $gq_s$ will be a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $fq_s$. But from the form (11) and (12) it is clear that $fq_s - gq_s \leq g_{ks}$, since from (11) and (12) $(f_{1s} + ... + f_{(k-1)s}) q_s \mathfrak{A} q_s (f_{1s} + ... + f_{(k-1)s}) = (g_{1s} + ... + g_{(k-1)s}) q_s \mathfrak{B} q_s (g_{1s} + ... + g_{(k-1)s})$. It is also clear then that $ fq_s - gq_s = f g_{ks} \leq p_k$, since $gq_s \perp g_{ks}$. We look for this $C^*$-subalgebra in $$p_k \mathfrak{A} p_k = \underset{\frac{\gamma}{\alpha_k}}{\overset{fp_k}{ p_k \mathfrak{A}_0 p_k }} \oplus \underset{(k,j)\in L_+^{\mathfrak{A}}}{\bigoplus} \underset{\frac{\gamma_{kj}}{\alpha_k}}{\overset{f_{kj}} {\mathbb{M}_{N_{\mathfrak{A}}(k,j)}}} \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k},$$ $$ \cong (\underset{\frac{\delta}{\alpha_k}}{\overset{g}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus} \underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N_{\mathfrak{B}}(k,j)}}}) * \mathbb{M}_{n_k}.$$ Proposition 4.4 gives us a unital, diffuse abelian $C^*$-subalgebra of $p_k \mathfrak{A}_0 p_k$, supported on $(f p_k) g_{ks} = f g_{ks} = fq_s -gq_s$. This proves that we have a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $fq_s$. \par Now we have to study the ideal structure of $\mathfrak{A}_0$, knowing by the induction hypothesis, the form of $\mathfrak{B}$. We will use the "span representation" of $\mathfrak{A}_0$ (10). \par For each $(i,j) \in L_0^{\mathfrak{B}}$ we know the existance of $*$-homomorphisms $\pi_{(i,j)}^{\mathfrak{B}_0} : \mathfrak{B}_0 \rightarrow \mathbb{M}_{N_{\mathfrak{B}}(i,j)}$. For $i \neq k$ we can write those as $\pi_{(i,j)}^{\mathfrak{B}_0} : \mathfrak{B}_0 \rightarrow \mathbb{M}_{N_{\mathfrak{A}}(i,j)}$ and since the support of $\pi_{(i,j)}^{\mathfrak{B}_0}$ is contained in $(1-p_k)$, using (10), we can extend linearly $\pi_{(i,j)}^{\mathfrak{B}_0}$ to $\pi_{(i,j)}^{\mathfrak{A}_0} : \mathfrak{A}_0 \rightarrow \mathbb{M}_{N_{\mathfrak{A}}(i,j)}$, by defining it to be zero on $p_k \mathfrak{A}_0 p_k$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k$, $p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, and $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$. Clearly $\pi_{(i,j)}^{\mathfrak{A}_0}$ is a $*$-homomorphism also. \par By the induction hypothesis we know that $g p_k$ is full in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{B}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{B}_0}) \subset \mathfrak{B}_0$ and by (10), and the way we extended $\pi_{(i,j)}^{\mathfrak{B}_0}$, we see that $f p_k$ is full in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) \subset \mathfrak{A}_0$. Then $p_k \mathfrak{A}_0 p_k$ is full and hereditary in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, so by the Rieffel correspondence from \cite{R82}, we have that $p_k \mathfrak{A}_0 p_k$ and $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ have the same ideal structure. \par Above we saw that
\begin{gather} p_k \mathfrak{A} p_k = \underset{\frac{\gamma}{\alpha_k}}{\overset{fp_k}{ p_k \mathfrak{A}_0 p_k }} \oplus \underset{(k,j)\in L_+^{\mathfrak{A}}}{\bigoplus} \underset{\frac{\gamma_{kj}}{\alpha_k}}{\overset{f_{kj}} {\mathbb{M}_{N_{\mathfrak{A}}(k,j)}}} \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k} \cong \\ \notag \cong (\underset{\frac{\delta}{\alpha_k}}{\overset{gp_k}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus} \underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N_{\mathfrak{B}}(k,j)}}}) * \mathbb{M}_{n_k}. \end{gather}
From Proposition 4.4 follows that $p_k \mathfrak{A}_0 p_k$ is not simple if and only if $\exists 1 \leq s \leq m$, such that $(k,s) \in L_+^{\mathfrak{B}}, m_s = 1$ with $\frac{\delta_{ks}}{\alpha_k} = 1-\frac{1}{n_k^2}$, where $\delta_{ks} = \alpha_k + \beta_s -1$. This means that $\frac{\alpha_k + \beta_s -1}{\alpha_k} = 1 - \frac{1}{n_k^2}$, which is equivalent to $\frac{\beta_s}{1^2} + \frac{\alpha_k}{n_k^2} = 1$, so this implies $(k,s) \in L_0^{\mathfrak{A}}$. If this is the case (13), together with Proposition 4.4 gives us a $*$-homomorphism $\pi'_{(k,s)} : p_k \mathfrak{A}_0 p_k \rightarrow \mathbb{M}_{n_k}$, such that $\ker(\pi'_{(k,s)}) \subset p_k \mathfrak{A}_0 p_k$ is simple with a unique trace. Using (10) we extend $\pi'_{(k,s)}$ linearly to a linear map $\pi_{(k,s)}^{\mathfrak{A}_0} : \mathfrak{A}_0 \rightarrow \mathbb{M}_{n_k}$, by defining $\pi_{(k,s)}^{\mathfrak{A}_0}$ to be zero on $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k$, $p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, and $(1-p_k) \mathfrak{B}_0 (1-p_k)$. Similarly as before, $\pi_{(k,s)}^{\mathfrak{A}_0}$ turns out to be a $*$-homomorphism. By the Rieffel correspondence of the ideals of $p_k \mathfrak{A}_0 p_k$ and $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, it is easy to see that the simple ideal $\ker(\pi'_{(k,s)}) \subset p_k \mathfrak{A}_0 p_k$ corresponds to the ideal $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) \subset \underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, so $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ is simple. To see that $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ has a unique trace we notice that from the construction of $\pi_{(i,j)}^{\mathfrak{A}_0}$ we have $\ker(\pi'_{(k,s)}) = p_k \ker(\pi_{(k,s)}^{\mathfrak{A}_0}) p_k = p_k \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) p_k$ (the last equality is true because $p_k \mathfrak{A}_0 p_k \subset \ underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$). Now we argue similarly as in the proof of Proposition 4.3, using the fact that $\ker(\pi'_{(k,s)})$ has a unique trace: Suppose that $\rho$ is a trace on $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. It is easy to see that $\Span \{ x p_k a p_k y | x, y, a \in \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}), a \geq 0 \}$ is dense in $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, since $\ker(\pi'_{(k,s)})$ is full in $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. Then since $p_k a p_k \geq 0$ we have $\rho(x p_k a p_k y) = \rho((p_k a p_k) y x) = \rho((p_k a p_k)^{1/2} y x (p_k a p_k)^{1/2})$ and since $(p_k a p_k)^{1/2} y x (p_k a p_k)^{1/2}$ is supported on $p_k$, it follows that $(p_k a p_k)^{1/2} y x (p_k a p_k)^{1/2} \in p_k \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) p_k = \ker(\pi'_{(k,s)})$,
so $\rho$ is uniquely determined by $\rho|_{\ker(\pi'_{(k,s)})}$ and hence $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ has a unique trace. \par If $\nexists 1 \leq s \leq m$ with $(k,s) \in L_0^{\mathfrak{A}}$ it follows from what we said above, that $p_k \mathfrak{A}_0 p_k$ is simple with a unique trace. But since $p_k \mathfrak{A}_0 p_k$ is full and hereditary in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) = \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ it follows that $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ is simple with a unique trace in this case too. \par We showed already that $f p_k$ is full in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. Now let $1 \leq r \leq k-1$. We need to show that $f p_r$ is full in $\underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. From (11) and (12) follows that $f-g \leq p_k$. So $f p_r = g p_r$ for all $1 \leq r \leq k-1$. From the way we constructed $\pi_{(i,j)}^{\mathfrak{A}_0}$ is clear that $f p_r \in \underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. It is also true that $f p_r \notin \ker(\pi_{(r,j)}^{\mathfrak{A}_0})$ for any $1 \leq j \leq l$. So the smallest ideal of $\mathfrak{A}_0$, that contains $f p_r$, is $\underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, meaning that we must have $\langle f p_r \rangle_{\mathfrak{A}_0} = \underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. \par Finally, we need to show that for all $1 \leq s \leq l$ we have that $f q_s$ is full in $\underset{j \neq s}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. Let $(i,j) \in L_0^{\mathfrak{A}}$ with $i \neq k$, $j \neq s$. Since $g q_s \in \ker(\pi_{(i,j)}^{\mathfrak{B}})$ and since $(f-g)q_s \leq p_k$, the way we extended $\pi_{(i,j)}^{\mathfrak{B}}$ to $\pi_{(i,j)}^{\mathfrak{A}}$ shows that $f q_s \in \ker(\pi_{(i,j)}^{\mathfrak{B}})$. Let $(i,s) \in L_0^{\mathfrak{A}}$ and $i \neq k$. Then we know that $g q_s \notin \ker(\pi_{(i,j)}^{\mathfrak{B}})$, which implies $f q_s \notin \ker(\pi_{(i,j)}^{\mathfrak{A}})$. Suppose $(k,s) \in L_0^{\mathfrak{A}}$. Then $m_s = 1$ and (13), Proposition 4.4, and the way we extended $\pi'_{(k,s)}$ to $\pi_{(k,s)}^{\mathfrak{A}_0}$ show, that $f g_{ks} = fq_s - gq_s$ is full in $p_k \mathfrak{A}_0 p_k$, meaning that $fq_s -gq_s$, and consequently $fq_s$, is not contained in $\ker(\pi_{(k,s)}^{\mathfrak{A}_0})$. Finally let $j \neq s$, and suppose $(k,j) \in L_0^{\mathfrak{A}}$. This means that $(k,j) \in L_+^{\mathfrak{B}}$ and also that the trace of $q_j$ is so big, that $(i,s) \notin L_+^{\mathfrak{B}}$ and $(i,s) \notin L_0^{\mathfrak{B}}$ for any $1 \leq i \leq k$. Then (12) shows that $q_s \leq g$. The way we defined $\pi_{(k,j)}^{\mathfrak{A}_0}$ using (13) and Proposition 4.4 shows us that $\mathfrak{B}_0 \subset \ker(\pi_{(k,j)}^{\mathfrak{A}_0})$ in this case. This shows $q_s = g q_s = fq_s \in \ker(\pi_{(k,j)}^{\mathfrak{A}_0})$. All this tells us that the smallest ideal of $\mathfrak{A}_0$, containing $fq_s$, is $\underset{j \neq s}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, and therefore $\langle fq_s \rangle_{\mathfrak{A}_0} = \underset{j \neq s}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. \par This concludes the proof of Theorem 2.6.
\qed
{\em Acknowledgements.} I would like to thank Ken Dykema, my advisor, for the many helpful conversations I had with him, for the moral support and for reading the first version of this paper. I would also like to thank Ron Douglas and Roger Smith for some discussions.
\end{document}
|
arXiv
|
{
"id": "0610713.tex",
"language_detection_score": 0.649049699306488,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Numerical homogenization of H(curl)-problems}
\author{Dietmar Gallistl\footnotemark[2] \and Patrick Henning\footnotemark[3]\and Barbara Verf\"urth\footnotemark[4]} \date{} \maketitle
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}} \footnotetext[2]{Institut f\"ur Angewandte und Numerische Mathematik, Karlsruher Institut f\"ur Technologie, Englerstr. 2, D-76131 Karlsruhe, Germany} \footnotetext[3]{Department of Mathematics, KTH Royal Institute of Technology, Lindstedtsv\"agen 25, SE-100 44 Stockholm, Sweden} \footnotetext[4]{Applied Mathematics, Westf\"alische Wilhelms-Uni\-ver\-si\-t\"at M\"unster, Einsteinstr. 62, D-48149 M\"unster, Germany} \renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{Abstract} If an elliptic differential operator associated with an $\mathbf{H}(\mathrm{curl})$-problem involves rough (rapidly varying) coefficients, then solutions to the corresponding $\mathbf{H}(\mathrm{curl})$-problem admit typically very low regularity, which leads to arbitrarily bad convergence rates for conventional numerical schemes. The goal of this paper is to show that the missing regularity can be compensated through a corrector operator. More precisely, we consider the lowest order N{\'e}d{\'e}lec finite element space and show the existence of a linear corrector operator with four central properties: it is computable, $\mathbf{H}(\mathrm{curl})$-stable, quasi-local and allows for a correction of coarse finite element functions so that first-order estimates (in terms of the coarse mesh-size) in the $\mathbf{H}(\mathrm{curl})$ norm are obtained provided the right-hand side belongs to $\mathbf{H}(\mathrm{div})$. With these four properties, a practical application is to construct generalized finite element spaces which can be straightforwardly used in a Galerkin method. In particular, this characterizes a homogenized solution and a first order corrector, including corresponding quantitative error estimates without the requirement of scale separation. \end{Abstract}
\begin{keywords} multiscale method, wave propagation, Maxwell's equations, finite element method, a priori error estimates \end{keywords}
\begin{AMS} 35Q61, 65N12, 65N15, 65N30, 78M10 \end{AMS}
\section{Introduction} Electromagnetic wave propagation plays an essential role in many physical applications, for instance, in the large field of wave optics. In the last years, multiscale and heterogeneous materials are studied with great interest, e.g., in the context of photonic crystals \cite{JJWM08phc}. These materials can exhibit unusual and astonishing (optical) properties, such as band gaps, perfect transmission or negative refraction \cite{CJJP02negrefraction, EP04negphC, LS15negindex}.
These problems are modeled by Maxwell's equations, which involve the curl-operator and the associated Sobolev space $\VH(\curl)$. Additionally, the coefficients in the problems are rapidly oscillating on a fine scale for the context of photonic crystals and metamaterials. The numerical simulation and approximation of the solution is then a challenging task for the following three reasons. 1.\ As with all multiscale problems, a direct treatment with standard methods in infeasible in many cases because it needs grids which resolve all discontinuities or oscillations of the material parameters. 2.\ Solutions to $\VH(\curl)$-problems with discontinuous coefficients in Lip\-schitz domains can have arbitrarily low regularity, see \cite{BGL13regularitymaxwell, CDN99maxwellinterface, Cost90regmaxwellremark}. Hence, standard methods (see e.g., \cite{Monk} for an overview) suffer from bad convergence rates and fine meshes are needed to have a tolerably small error. 3.\ Due to the large kernel of the curl-operator, we cannot expect that the $L^2$-norm is of a lower order as the full $\VH(\curl)$-norm (the energy norm). Thus, it is necessary to consider dual norms or the Helmholtz decomposition to obtain improved a priori error estimates.
In order to deal with the rapidly oscillating material parameters, we consider multiscale methods and thereby aim at a feasible numerical simulation. In general, these methods try to decompose the exact solution into a macroscopic contribution (without oscillations), which can be discretized on a coarse mesh, and a fine-scale contribution. Analytical homogenization for locally periodic $\VH(\curl)$-problems shows that there exists such a decomposition, where the macroscopic part is a good approximation in $H^{-1}$ and an additional fine-scale corrector leads to a good approximation in $L^2$ and $\VH(\curl)$, cf.\ \cite{CH15homerrormaxwell, HOV15maxwellHMM, Well2}. Based on these analytical results, multiscale methods are developed, e.g., the Heterogeneous Multiscale Method in \cite{HOV15maxwellHMM, CFS17hmmmaxwell} and asymptotic expansion methods in \cite{CZAL10maxwell}. The question is now in how far such considerations can be extended beyond the (locally) periodic case.
The main contribution of this paper is the numerical homogenization of $\VH(\curl)$-elliptic problems -- beyond the periodic case and without assuming scale separation. The main findings can be summarized as follows. We show that the exact solution can indeed be decomposed into a coarse and fine part, using a suitable interpolation operator. The coarse part gives an optimal approximation in the $H^{-1}$-norm, the best we can hope for in this situation. In order to obtain optimal $L^2$ and $\VH(\curl)$ approximations, we have to add a so called fine-scale corrector or corrector Green's operator. This corrector shows exponential decay and can therefore be truncated to local patches of macroscopic elements, so that it can be computed efficiently.
This technique of numerical homogenization is known as Localized Orthogonal Decomposition (LOD) and it was originally proposed by M{\aa}lqvist and Peterseim \cite{MP14LOD} to solve elliptic multiscale problems through an orthogonalization procedure with a problem-specific \quotes{multiscale} inner product. The LOD has been extensively studied in the context of Lagrange finite elements \cite{HM14LODbdry, HP13oversampl}, where we particularly refer to the contributions written on wave phenomena \cite{AH17LODwaves, BrG16, bgp2017, GP15scatteringPG, OV16a, P15LODhelmholtz, PeS17}. Aside from Lagrange finite elements, an LOD application in Raviart-Thomas spaces was given in \cite{HHM16LODmixed}.
A crucial ingredient for numerical homogenization procedures in the spirit of LODs is the choice of a suitable interpolation operator. As we will see later, in our case we require it to be computable, $\VH(\curl)$-stable, (quasi-)local and that it commutes with the curl-operator. Constructing an operator that enjoys such properties is a very delicate task and a lot of operators have been suggested -- with different backgrounds and applications in mind. The nodal interpolation operator, see e.g.\ \cite[Thm.\ 5.41]{Monk}, and the interpolation operators introduced in \cite{DB05maxwellpintpol} are not well-defined on $\VH(\curl)$ and hence lack the required stability. Various (quasi)-interpolation operators are constructed as composition of smoothing and some (nodal) interpolation, such as \cite{Chr07intpol, CW08intpol, DH14aposteriorimaxwell, EG15intpol, Sch05multilevel,Sch08aposteriori}. For all of them, the kernel of the operator is practically hard or even impossible to compute and they only fulfill the projection \emph{or} the locality property. Finally, we mention the interpolation operator of \cite{EG15intpolbestapprox} which is local and a projection, however, which does not commute with the exterior derivative. A suitable candidate (and to the authors' best knowledge, the only one) that enjoys all required properties was proposed by Falk and Winther in \cite{FalkWinther2014}.
This paper thereby also shows the applicability of the Falk-Winther operator. In this context, we mention two results, which may be of own interest: a localized regular decomposition of the interpolation error (in the spirit of \cite{Sch08aposteriori}), and the practicable implementation of the Falk-Winther operator as a matrix. The last point admits the efficient implementation of our numerical scheme and we refer to \cite{EHMP16LODimpl} for general considerations.
The paper is organized as follows. Section \ref{sec:setting} introduces the general curl-curl-problem under consideration and briefly mentions its relation to Maxwell's equations. In Section \ref{sec:motivation}, we give a short motivation of our approach from periodic homogenization. Section \ref{sec:intpol} introduces the necessary notation for meshes, finite element spaces, and interpolation operators. We introduce the Corrector Green's Operator in Section \ref{sec:LODideal} and show its approximation properties. We localize the corrector operator in Section \ref{sec:LOD} and present the main apriori error estimates. The proofs of the decay of the correctors are given in Section \ref{sec:decaycorrectors}. Details on the definition of the interpolation operator and its implementation are given in Section \ref{sec:intpolimpl}.
The notation $a\lesssim b$ is used for $a\leq Cb$ with a constant $C$ independent of the mesh size $H$ and the oversampling parameter $m$. It will be used in (technical) proofs for simplicity and readability.
\section{Model problem} \label{sec:setting} Let $\Omega\subset \mathbb{R}^3$ be an open, bounded, contractible domain with polyhedral Lipschitz boundary. We consider the following so called curl-curl-problem: Find $\Vu:\Omega\to\mathbb{C}^3$ such that \begin{equation} \label{eq:curlcurl} \begin{split} \curl(\mu\curl\Vu)+\kappa\Vu&=\Vf\quad\text{in }\Omega,\\ \Vu\times \Vn&=0\quad\text{on }\partial \Omega \end{split} \end{equation} with the outer unit normal $\Vn$ of $\Omega$. Exact assumptions on the parameters $\mu$ and $\kappa$ and the right-hand side $\Vf$ are given in Assumption~\ref{asspt:sesquiform} below, but we implicitly assume that the above problem is a multiscale problem, i.e.\ the coefficients $\mu$ and $\kappa$ are rapidly varying on a very fine sale.
Such curl-curl-problems arise in various formulations and reductions of Maxwell's equations and we shortly give a few examples. In all cases, our coefficient $\mu$ equals $\tilde{\mu}^{-1}$ with the magnetic permeability $\tilde{\mu}$, a material parameter. The right-hand side $\Vf$ is related to (source) current densities. One possible example are Maxwell's equations in a linear conductive medium, subject to Ohm's law, together with the so called time-harmonic ansatz $\hat{\Vpsi}(x,t)=\Vpsi(x)\exp(-i\omega t)$ for all fields. In this case, one obtains the above curl-curl-problem with $\Vu=\VE$, the electric field, and $\kappa=i\omega\sigma-\omega^2\varepsilon$ related to the electric permittivity $\varepsilon$ and the conductivity $\sigma$ of the material. Another example are implicit time-step discretizations of eddy current simulations, where the above curl-curl-problem has to be solved in each time step. In that case $\Vu$ is the vector potential associated with the magnetic field and $\kappa\approx\sigma/\tau$, where $\tau$ is the time-step size. Coefficients with multiscale properties can for instance arise in the context of photonic crystals.
Before we define the variational problem associated with our general curl-curl-problem \eqref{eq:curlcurl}, we need to introduce some function spaces. In the following, bold face letters will indicate vector-valued quantities and all functions are complex-valued, unless explicitly mentioned. For any bounded subdomain $G\subset \Omega$, we define the space
\[\VH(\curl, G):=\{ \Vv\in L^2(G, \mathbb{C}^3)|\curl\Vv\in L^2(G, \mathbb{C}^3)\}\] with the inner product $(\Vv, \Vw)_{\VH(\curl, G)}:=(\curl\Vv, \curl\Vw)_{L^2(G)}+(\Vv, \Vw)_{L^2(G)}$ with the complex $L^2$-inner product. We will omit the domain $G$ if it is equal to the full domain $\Omega$. The restriction of $\VH(\curl, \Omega)$ to functions with a zero tangential trace is defined as
\[\VH_0(\curl, \Omega):=\{\Vv\in \VH(\curl, \Omega)|\hspace{3pt} \Vv\times \Vn \vert_{\partial \Omega} =0\}. \] Similarly, we define the space
\[\VH(\Div, G):=\{\Vv\in L^2(G, \mathbb{C}^3)|\Div \Vv\in L^2(G, \mathbb{C})\}\] with corresponding inner product $(\cdot, \cdot)_{\VH(\Div, G)}$. For more details we refer to \cite{Monk}.
We make the following assumptions on the data of our problem. \begin{assumption} \label{asspt:sesquiform} Let $\Vf\in \VH(\Div, \Omega)$ and let $\mu\in L^\infty(\Omega, \mathbb{R}^{3 \times 3})$ and $\kappa\in L^\infty(\Omega, \mathbb{C}^{3 \times 3})$. For any open subset $G\subset\Omega$, we define the sesquilinear form $\CB_{G}: \VH(\curl,G)\times \VH(\curl,G)\to \mathbb{C}$ as \begin{equation} \label{eq:sesquiform} \CB_{G}(\Vv, \Vpsi):=(\mu\curl \Vv, \curl\Vpsi)_{L^2(G)}
+(\kappa\Vv, \Vpsi)_{L^2(G)}, \end{equation} and set $\CB:=\CB_\Omega$. The form $\CB_{G}$ is obviously continuous, i.e.\ there is $C_B>0$ such that \begin{equation*}
|\CB_{G}(\Vv, \Vpsi)|\leq C_B\|\Vv\|_{\VH(\curl,G)}\|\Vpsi\|_{\VH(\curl,G)}
\quad\text{for all }\Vv,\Vpsi\in\VH(\curl,G). \end{equation*} We furthermore assume that $\mu$ and $\kappa$ are such that $\CB: \VH_0(\curl)\times \VH_0(\curl)\to \mathbb{C}$ is $\VH_0(\curl)$-elliptic, i.e.\ there is $\alpha>0$ such that \[
|\CB(\Vv, \Vv)|\geq \alpha\|\Vv\|^2_{\VH(\curl)}
\quad\text{for all }\Vv\in\VH_0(\curl) . \] \end{assumption}
We now give a precise definition of our model problem for this article. Let Assumption \ref{asspt:sesquiform} be fulfilled. We look for $\Vu\in \VH_0(\curl, \Omega)$ such that \begin{equation} \label{eq:problem} \CB(\Vu, \Vpsi)=(\Vf, \Vpsi)_{L^2(\Omega)} \quad\text{for all } \Vpsi\in \VH_0(\curl, \Omega). \end{equation} Existence and uniqueness of a solution to \eqref{eq:problem} follow from the Lax-Milgram-Babu{\v{s}}ka theorem \cite{Bab70fem}.
Assumption \ref{asspt:sesquiform} is fulfilled in the following two important examples mentioned at the beginning: (i) a strictly positive real function in the identity term, i.e.\ $\kappa\in L^\infty(\Omega, \mathbb{R})$, as it occurs in the time-step discretization of eddy-current problems; (ii) a complex $\kappa$ with strictly negative real part and strictly positive imaginary part, as it occurs for time-harmonic Maxwell's equations in a conductive medium. Further possibilities of $\mu$ and $\kappa$ yielding an $\VH(\curl)$-elliptic problem are described in \cite{FR05maxwell}.
\begin{remark} The assumption of contractibility of $\Omega$ is only required to ensure the existence of local regular decompositions later used in the proof of Lemma \ref{lem:localregulardecomp}. We note that this assumption can be relaxed by assuming that $\Omega$ is simply connected in certain local subdomains formed by unions of tetrahedra (i.e. in patches of the form $\UN(\Omega_P)$, using the notation from Lemma \ref{lem:localregulardecomp}). \end{remark}
\section{Motivation of the approach} \label{sec:motivation}
For the sake of the argument, let us consider model problem \eqref{eq:curlcurl} for the case that the coefficients $\mu$ and $\kappa$ are replaced by parametrized multiscale coefficients $\mu_{\delta}$ and $\kappa_\delta$, respectively. Here, $0<\delta \ll 1$ is a small parameter that characterizes the roughness of the coefficient or respectively the speed of the variations, i.e.\ the smaller $\delta$, the faster the oscillations of $\mu_{\delta}$ and $\kappa_\delta$. If we discretize this model problem in the lowest order N{\'e}d{\'e}lec finite element space $\mathring{\CN}(\CT_H)$, we have the classical error estimate of the form \begin{align*}
\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu_{\delta} - \mathbf{v}_H \|_{\VH(\curl)} \le C H \left( \| \Vu_{\delta} \|_{H^1(\Omega)} + \| \curl \Vu_{\delta} \|_{H^1(\Omega)} \right). \end{align*} However, if the coefficients $\mu_{\delta}$ and $\kappa_\delta$ are discontinuous the necessary regularity for this estimate is not available, see \cite{Cost90regmaxwellremark, CDN99maxwellinterface, BGL13regularitymaxwell}.
On the other hand, if $\mu_{\delta}$ and $\kappa_\delta$ are sufficiently regular but $\delta$ small, then we face the blow-up with $\| \Vu_{\delta} \|_{H^1(\Omega)} + \| \curl \Vu_{\delta} \|_{H^1(\Omega)}\rightarrow \infty$ for $\delta \rightarrow 0$, which makes the estimate useless in practice, unless the mesh size $H$ becomes very small to compensate for the blow-up. This does not change if we replace the $\VH(\curl)$-norm by the $L^2(\Omega)$-norm since both norms are equivalent in our setting.
To understand if there exist any meaningful approximations of $\Vu_{\delta}$ in $\mathring{\CN}(\CT_H)$ (even on coarse meshes), we make a short excursus to classical homogenization theory. For that we assume that the coefficients $\mu_{\delta}(x)=\mu(x/\delta)$ and $\kappa_\delta(x)=\kappa(x/\delta)$ are periodically oscillating with period $\delta$. In this case it is known (cf.\ \cite{CFS17hmmmaxwell, HOV15maxwellHMM, Well2}) that the sequence of exact solutions $\Vu_{\delta}$ converges weakly in $\VH_0(\curl)$ to a \emph{homogenized} function $\Vu_{0}$. Since $\Vu_0 \in \VH_0(\curl)$ is $\delta$-independent and slow, it can be well approximated in $\mathring{\CN}(\CT_H)$. Furthermore, there exists a \emph{corrector} $\mathcal{K}_{\delta}(\Vu_0)$ such that \[\Vu_{\delta} \approx (\id + \mathcal{K}_{\delta})\Vu_0 \] is a good approximation in $\VH(\curl)$, i.e.\ the error converges strongly to zero with \[
\| \Vu_{\delta} -( \Vu_0 + \mathcal{K}_{\delta}(\Vu_0)) \|_{\VH(\curl)} \rightarrow 0 \qquad \mbox{for } \delta \rightarrow 0. \] Here the nature of the corrector is revealed by two estimates. In fact, $\mathcal{K}_{\delta}(\Vu_0)$ admits a decomposition into a gradient part and a part with small amplitude (cf. \cite{HOV15maxwellHMM, CH15homerrormaxwell, Well2}) such that \[
\mathcal{K}_{\delta}(\Vu_0) = \Vz_{\delta} + \nabla \theta_{\delta} \] with \begin{align} \label{hom-corrector-est-1}
\delta^{-1} \| \Vz_{\delta} \|_{L^2(\Omega)} + \| \Vz_{\delta} \|_{\VH(\curl)} &\le C\| \Vu_0 \|_{\VH(\curl)}\\ \label{hom-corrector-est-2}
\text{and}\qquad\delta^{-1} \| \theta_{\delta} \|_{L^2(\Omega)} + \| \nabla \theta_{\delta} \|_{L^2(\Omega)} &\le C \| \Vu_0 \|_{\VH(\curl)}, \end{align} where $C=C(\alpha,C_B)$ only depends on the constants appearing in Assumption \ref{asspt:sesquiform}. First, we immediately see that the estimates imply that $\mathcal{K}_{\delta}(\Vu_0)$ is $\VH(\curl)$-stable in the sense that it holds \begin{align*}
\| \mathcal{K}_{\delta}(\Vu_0) \|_{\VH(\curl)} \le C \| \Vu_0 \|_{\VH(\curl)}. \end{align*} Second, and more interestingly, we see that alone from the above properties, we can conclude that $\Vu_0$ \emph{must} be a good approximation of the exact solution in the space $H^{-1}(\Omega,\mathbb{C}^3)$. In fact, using \eqref{hom-corrector-est-1} and \eqref{hom-corrector-est-2} we have for any
$\mathbf{v}\in H^1_0(\Omega,\mathbb{C}^3)$ with $\| \mathbf{v} \|_{H^1(\Omega)}=1$ that \begin{align*}
\left|\int_{\Omega} \mathcal{K}_{\delta}(\Vu_0) \cdot \mathbf{v} \right|=
\left|\int_{\Omega} \Vz_{\delta} \cdot \mathbf{v} - \int_{\Omega} \theta_{\delta} \hspace{2pt} (\nabla \cdot \mathbf{v}) \right| \le
\| \Vz_{\delta} \|_{L^2(\Omega)} + \| \theta_{\delta} \|_{L^2(\Omega)}
\le C \delta \| \Vu_0 \|_{\VH(\curl)}. \end{align*} Consequently we have strong convergence in $H^{-1}(\Omega)$ with \begin{align*}
\| \Vu_{\delta} - \Vu_0 \|_{H^{-1}(\Omega)}
\le \| \Vu_{\delta} - ( \Vu_0 + \mathcal{K}_{\delta}(\Vu_0))\|_{H^{-1}(\Omega)} + \| \mathcal{K}_{\delta}(\Vu_0) \|_{H^{-1}(\Omega)} \overset{\delta \rightarrow 0}{\longrightarrow} 0. \end{align*} We conclude two things. Firstly, even though the coarse space $\mathring{\CN}(\CT_H)$ does not contain good $\VH(\curl)$- or $L^2$-approximations, it still contains meaningful approximations in $H^{-1}(\Omega)$. Secondly, the fact that the coarse part $\Vu_0$ is a good $H^{-1}$-approximation of $\Vu_{\delta}$ is an intrinsic conclusion from the properties of the correction $\mathcal{K}_{\delta}(\Vu_0)$.
In this paper we are concerned with the question if the above considerations can be transferred to a discrete setting beyond the assumption of periodicity. More precisely, defining a coarse level of resolution through the space $\mathring{\CN}(\CT_H)$, we ask if it is possible to find a coarse function $\Vu_H$ and an (efficiently computable) $\VH(\curl)$-stable operator $\mathcal{K}$, such that \begin{align} \label{motivation:int-estimates}
\| \Vu_{\delta} - \Vu_H \|_{H^{-1}(\Omega)} \le C H \qquad \mbox{and} \qquad \| \Vu_{\delta} - (I+\mathcal{K})\Vu_H \|_{\VH(\curl)} \le CH, \end{align} with $C$ being independent of the oscillations in terms of $\delta$. A natural ansatz for the coarse part is $\Vu_H=\pi_H( \Vu_{\delta} )$ for a suitable projection $\pi_H : \VH(\curl) \rightarrow \mathring{\CN}(\CT_H)$. However, from the considerations above we know that $\Vu_H=\pi_H( \Vu_{\delta} )$ can only be a good $H^{-1}$-approximation if the error fulfills a discrete analog to the estimates \eqref{hom-corrector-est-1} and \eqref{hom-corrector-est-2}. Since $\Vu_{\delta} - \pi_H( \Vu_{\delta} )$ is nothing but an interpolation error, we can immediately derive a sufficient condition for our choice of $\pi_H$: we need that, for any $\Vv\in \VH_0(\curl, \Omega)$, there are $\Vz\in \VH^1_0(\Omega)$ and $\theta\in H^1_0(\Omega)$ such that \[\Vv-\pi_H \Vv=\Vz+\nabla \theta\] and \begin{equation} \label{motivation:properties-pi-H} \begin{split}
H^{-1}\|\Vz\|_{L^2(\Omega)}+\|\nabla \Vz\|_{\VH(\curl)} &\leq C \|\curl\Vv\|_{L^2(\Omega)},\\
H^{-1}\|\theta\|_{L^2(\Omega)}+\|\nabla \theta\|_{L^2(\Omega)}&\leq C \|\curl\Vv\|_{L^2(\Omega)}. \end{split} \end{equation} This is a sufficient condition for $\pi_H$. Note that the above properties are not fulfilled for e.g. the $L^2$-projection. This resembles the fact that the $L^2$-projection does typically not yield a good $H^{-1}$-approximation in our setting.
We conclude this paragraph by summarizing that if we have a projection $\pi_H$ fulfilling \eqref{motivation:properties-pi-H}, then we can define a coarse scale numerically through the space $\mathring{\CN}(\CT_H) = \mbox{im}(\pi_H)$. On the other hand, to ensure that the corrector inherits the desired decomposition with estimates \eqref{motivation:int-estimates}, it needs to be constructed such that it maps into the kernel of the projection operator, i.e. $\mbox{im}(\mathcal{K})\subset\mbox{ker}(\pi_H)$.
\section{Mesh and interpolation operator} \label{sec:intpol}
In this section we introduce the basic notation for establishing our coarse scale discretization and we will present a projection operator that fulfills the sufficient conditions derived in the previous section.
Let $\CT_H$ be a regular partition of $\Omega$ into tetrahedra, such that $\cup\CT_H=\overline{\Omega}$ and any two distinct $T, T'\in \CT_H$ are either disjoint or share a common vertex, edge or face. We assume the partition $\CT_H$ to be shape-regular and quasi-uniform. The global mesh size is defined as $H:=\max\{ \diam(T)|T\in \CT_{H}\}$. $\CT_H$ is a coarse mesh in the sense that it does not resolve the fine-scale oscillations of the parameters.
Given any (possibly even not connected) subdomain $G\subset \overline{\Omega}$ define its neighborhood via
\[\UN(G):=\Int(\cup\{T\in \CT_{H}|T\cap\overline{G}\neq \emptyset\})\] and for any $m\geq 2$ the patches \[\UN^1(G):=\UN(G)\qquad \text{and}\qquad\UN^m(G):=\UN(\UN^{m-1}(G)),\] see Figure \ref{fig:patch} for an example. The shape regularity implies that there is a uniform bound $C_{\ol, m}$ on the number of elements in the $m$-th order patch
\[\max_{T\in \CT_{H}}\operatorname{card}\{K\in \CT_{H}|K\subset\overline{\UN^m(T)}\}\leq C_{\ol, m}\] and the quasi-uniformity implies that $C_{\ol, m}$ depends polynomially on $m$. We abbreviate $C_{\ol}:=C_{\ol, 1}$.
\begin{figure}
\caption{Triangle $T$ (in black) and its first and second order patches (additional elements for $\UN(T)$ in dark gray and additional elements for $\UN^2(T)$ in light gray).}
\label{fig:patch}
\end{figure}
The space of $\CT_H$-piecewise affine and continuous functions is denoted by $\CS^1(\CT_H)$. We denote the lowest order N{\'e}d{\'e}lec finite element, cf.\ \cite[Section 5.5]{Monk}, by \[
\mathring{\CN}(\CT_H):=\{\Vv\in \VH_0(\curl)|\forall T\in \CT_H: \Vv|_T(\Vx)=\Va_T\times\Vx+\Vb_T \text{ with }\Va_T, \Vb_T\in\mathbb{C}^3\} \] and the space of Raviart--Thomas fields by \[
\mathring{\CR\CT}(\CT_H):=\{\Vv\in \VH_0(\Div)|\forall T\in \CT_H: \Vv|_T(\Vx)=\Va_T\cdot\Vx+\Vb_T \text{ with }\Va_T\in \mathbb{C}, \Vb_T\in\mathbb{C}^3\}. \] As motivated in Section \ref{sec:motivation} we require an $\VH(\curl)$-stable interpolation operator $\pi_H^E:\VH_0(\curl)\to \mathring{\CN}(\CT_H)$ that allows for a decomposition with the estimates such as \eqref{motivation:properties-pi-H}. However, from the view point of numerical homogenization where corrector problems should be localized to small subdomains, we also need that $\pi_H^E$ is local and (as we will see later) that it fits into a commuting diagram with other stable interpolation operators for lowest order $H^1(\Omega)$, $\VH(\Div)$ and $L^2(\Omega)$ elements. As discussed in the introduction, the only suitable candidate is the Falk-Winther interpolation operator $\pi_H^E$ \cite{FalkWinther2014}. We postpone a precise definition of $\pi_H^E$ to Section \ref{sec:intpolimpl} and just summarize its most important properties in the following proposition. \begin{proposition}\label{p:proj-pi-H-E} There exists a projection $\pi_H^E:\VH_0(\curl)\to \mathring{\CN}(\CT_H)$ with the following local stability properties: For all $\Vv\in \VH_0(\curl)$ and all $T\in \CT_H$ it holds that \begin{align} \label{eq:stabilityL2}
\|\pi_H^E(\Vv)\|_{L^2(T)}&\leq C_\pi \bigl(\|\Vv\|_{L^2(\UN(T))}+H\|\curl\Vv\|_{L^2(\UN(T))}\bigr),\\* \label{eq:stabilitycurl}
\|\curl\pi_H^E(\Vv)\|_{L^2(T)}&\leq C_\pi \|\curl\Vv\|_{L^2(\UN(T))}. \end{align} Furthermore, there exists a projection $\pi_H^F:\VH_0(\Div)\to \mathring{\mathcal{RT}}(\CT_H)$ to the Raviart-Thomas space such that the following commutation property holds \[\curl\pi_H^E(\Vv)=\pi_H^F(\curl \Vv).\] \end{proposition} \begin{proof}
See \cite{FalkWinther2014} for a proof, which can be adapted to
the present case of homogeneous boundary values. \end{proof}
As explained in the motivation in Section \ref{sec:motivation}, we also require that $\pi_H^E$ allows for a regular decomposition in the sense of \eqref{motivation:properties-pi-H}. In general, regular decompositions are an important tool for the study of $\VH(\curl)$-elliptic problems and involve that a vector field $\Vv\in \VH_0(\curl)$ is split -- in a non-unique way -- into a gradient and a (regular) remainder in $\VH^1$, see \cite{Hipt02FEem, PZ02Schwarz}. In contrast to the Helmholtz decomposition, this splitting is not orthogonal with respect to the $L^2$-inner product. If the function $\Vv\in \VH_0(\curl)$ is additionally known to be in the kernel of a suitable quasi-interpolation, a modified decomposition can be derived that is localized and $H$-weighted. In particular, the weighting with $H$ allows for estimates similar as the one stated in \eqref{motivation:properties-pi-H}. The first proof of such a modified decomposition was given by Sch\"oberl \cite{Sch08aposteriori}. In the following we shall use his results and the locality of the Falk-Winther operator to recover a similar decomposition for the projection $\pi_H^E$. More precisely, we have the following lemma which is crucial for our analysis.
\begin{lemma} \label{lem:localregulardecomp} Let $\pi_H^E$ denote the projection from Proposition \ref{p:proj-pi-H-E}. For any $\Vv\in \VH_0(\curl, \Omega)$, there are $\Vz\in \VH^1_0(\Omega)$ and $\theta\in H^1_0(\Omega)$ such that \[\Vv-\pi_H^E(\Vv)=\Vz+\nabla \theta\] with the local bounds for every $T\in \CT_H$ \begin{equation} \label{eq:regulardecomp} \begin{split}
H^{-1}\|\Vz\|_{L^2(T)}+\|\nabla \Vz\|_{L^2(T)}&\leq C_z\|\curl\Vv\|_{L^2(\UN^3(T))},\\
H^{-1}\|\theta\|_{L^2(T)}+\|\nabla \theta\|_{L^2(T)}&\leq C_\theta\bigl(\|\Vv\|_{L^2(\UN^3(T))}+H\|\curl\Vv\|_{L^2(\UN^3(T))}\bigr), \end{split} \end{equation} where $\nabla \Vz$ stands for the Jacobi matrix of $\Vz$. Here $C_z$ and $C_\theta$ are generic constants that only depend on the regularity of the coarse mesh. \end{lemma} Observe that \eqref{eq:regulardecomp} implies the earlier formulated sufficient condition \eqref{motivation:properties-pi-H}.
\begin{proof} Let $\Vv\in \VH_0(\curl, \Omega)$. Denote by $I_H^S:\VH_0(\curl,\Omega)\to \mathring{\CN}(\CT_H)$ the quasi-interpolation operator introduced by Sch\"oberl in \cite{Sch08aposteriori}. It is shown in \cite[Theorem 6]{Sch08aposteriori} that there exists a decomposition \begin{equation} \label{eq:schoeberlstab-p1}
\Vv-I_H^S(\Vv) =
\sum_{\substack{P \text{ vertex}\\ \text{of }\CT_H}} \Vv_P \end{equation} where, for any vertex $P$, $\Vv_P\in \VH_0(\curl, \Omega_P)$ and $\Omega_P$ the support of the local hat function associated with $P$. Moreover, \cite[Theorem 6]{Sch08aposteriori} provides the stability estimates \begin{equation}\label{eq:schoeberlstab}
\| \Vv_P \|_{L^2(\Omega_P)} \lesssim \|\Vv\|_{L^2(\UN(\Omega_P))} \quad\text{and}\quad
\|\curl \Vv_P \|_{L^2(\Omega_P)}
\lesssim \|\curl \Vv\|_{L^2(\UN(\Omega_P))} \end{equation} for any vertex $P$. With these results we deduce, since $\pi_H^E$ is a projection onto the finite element space, that \begin{align*} \Vv-\pi_H^E(\Vv) =\Vv-I_H^S(\Vv)-\pi_H^E(\Vv-I_H^S\Vv) =\sum_{\substack{P \text{ vertex}\\ \text{of }\CT_H}}(\id-\pi_H^E)(\Vv_P). \end{align*} Due to the locality of $\pi_H^E$, we have $(\id-\pi_H^E)(\Vv_P)\in \VH_0(\curl, \UN(\Omega_P))$. The local stability of $\pi_H^E$, \eqref{eq:stabilityL2} and \eqref{eq:stabilitycurl}, and the stability \eqref{eq:schoeberlstab} imply \begin{align*}
\|(\id-\pi_H^E)(\Vv_P)\|_{L^2(\UN(\Omega_P))}&\lesssim \|\Vv\|_{L^2(\UN(\Omega_P))}+H\|\curl\Vv\|_{L^2(\UN(\Omega_P))},\\*
\|\curl(\id-\pi_H^E)(\Vv_P)\|_{L^2(\UN(\Omega_P))}&\lesssim \|\curl\Vv\|_{L^2(\UN(\Omega_P))}, \end{align*} We can now apply the regular splitting to $\Vv_P$ (cf.\ \cite{PZ02Schwarz}), i.e.\ there are $\Vz_P\in \VH^1_0(\UN(\Omega_P))$, $\theta_P\in H^1_0(\UN(\Omega_P))$ such that $\Vv_P=\Vz_P+\nabla \theta_P$ and with the estimates \begin{align*}
H^{-1}\|\Vz_P\|_{L^2(\UN(\Omega_P))}+\|\nabla \Vz_P\|_{L^2(\UN(\Omega_P))}&\lesssim \|\curl((\id-\pi_H^E)(\Vv_P))\|_{L^2(\UN(\Omega_P))},\\*
H^{-1}\|\theta_P\|_{L^2(\UN(\Omega_P))}+\|\nabla \theta_P\|_{L^2(\UN(\Omega_P))}&\lesssim \|(\id-\pi_H^E)(\Vv_P)\|_{L^2(\UN(\Omega_P))}. \end{align*} Set $\Vz=\sum_P\Vz_P$ and $\theta=\sum_P\theta_P$, which is a regular decomposition of $\Vv-\pi_H^E(\Vv)$. The local estimates follows from the foregoing estimates for $\Vv_P$ and the decomposition \eqref{eq:schoeberlstab-p1} which yields \begin{align*}
H^{-1}\|\Vz\|_{L^2(T)}+\|\nabla \Vz\|_{L^2(T)}&\leq \sum_{\substack{P \text{ vertex}\\ \text{of } T}} \left(
H^{-1}\| \Vz_P \|_{L^2(\Omega_P)}+\|\nabla \Vz_P \|_{L^2(\Omega_P)} \right)\\ &\lesssim
\sum_{\substack{P \text{ vertex}\\ \text{of } T}} \|\curl (\id-\pi_H^E)(\Vv_P)\|_{L^2(\UN(\Omega_P))}
\lesssim \|\curl\Vv\|_{L^2(\UN^3(T))}. \end{align*} The local estimate for $\theta$ follows analogously. \end{proof}
\section{The Corrector Green's Operator} \label{sec:LODideal}
In this section we introduce an ideal \emph{Corrector Green's Operator} that allows us to derive a decomposition of the exact solution into a coarse part (which is a good approximation in $H^{-1}(\Omega,\mathbb{C}^3)$) and two different corrector contributions. For simplicity, we let from now on $\mathcal{L} : \VH_0(\curl) \rightarrow \VH_0(\curl)^{\prime}$ denote the differential operator associated with the sesquilinear form $\CB(\cdot,\cdot)$, i.e. $\mathcal{L}(v)(w)=\CB(v,w)$.
Using the Falk-Winter interpolation operator $\pi_H^E$ for the N{\'e}d{\'e}lec elements, we split the space $\VH_0(\curl)$ into the finite, low-dimensional coarse space $\mathring{\CN}(\CT_H)=\mbox{im}(\pi_H^E)$ and a corrector space given as the kernel of $\pi_H^E$, i.e.\ we set $\VW:=\ker (\pi_H^E)\subset \VH_0(\curl)$. This yields the direct sum splitting $\VH_0(\curl)=\mathring{\CN}(\CT_H)\oplus\VW$. Note that $\VW$ is closed since it is the kernel of a continuous (i.e. $\VH(\curl)$-stable) operator. With this the ideal Corrector Green's Operator is defined as follows.
\begin{definition}[Corrector Green's Operator] For $\mathbf{F} \in \VH_0(\curl)^\prime$, we define the Corrector Green's Operator \begin{align} \label{cor-greens-op} \mathcal{G}: \VH_0(\curl)^{\prime} \rightarrow \VW \hspace{40pt} \mbox{by} \hspace{40pt} \CB(\mathcal{G}(\mathbf{F}) , \Vw )=\mathbf{F}(\Vw)\qquad \mbox{for all } \Vw\in \VW. \end{align} It is well-defined by the Lax-Milgram-Babu{\v{s}}ka theorem, which is applicable since $\CB(\cdot,\cdot)$ is $\VH_0(\curl)$-elliptic and since $\VW$ is a closed subspace of $\VH_0(\curl)$. \end{definition} Using the Corrector Green's Operator we obtain the following decomposition of the exact solution.
\begin{lemma}[Ideal decomposition] \label{lemma:ideal-decompos} The exact solution $\Vu\in\VH_0(\curl)$ to \eqref{eq:problem} and $\Vu_H:=\pi_H^E(\Vu)$ admit the decomposition \[ \Vu = \Vu_H - (\mathcal{G} \circ \mathcal{L})(\Vu_H) + \mathcal{G}(\Vf). \] \end{lemma} \begin{proof} Since $\VH_0(\curl)=\mathring{\CN}(\CT_H)\oplus\VW$, we can write $\Vu$ uniquely as \[ \Vu = \pi_H^E(\Vu) + (\id - \pi_H^E)(\Vu) = \Vu_H + (\id - \pi_H^E)(\Vu), \] where $(\id - \pi_H^E)(\Vu) \in \VW$ by the projection property of $\pi_H^E$. Using the differential equation for test functions $\Vw\in \VW$ yields that \begin{align*} \CB( (\id - \pi_H^E)(\Vu) , \Vw )= - \CB( \Vu_H , \Vw ) + (\Vf, \Vw)_{L^2(\Omega)} = - \CB( (\mathcal{G} \circ \mathcal{L})(\Vu_H) , \Vw ) + \CB( \mathcal{G}(\Vf) , \Vw ). \end{align*} Since this holds for all $\Vw\in \VW$ and since $\mathcal{G}(\Vf) - (\mathcal{G} \circ \mathcal{L})(\Vu_H) \in \VW$, we conclude that \[ (\id - \pi_H^E)(\Vu) = \mathcal{G}(\Vf) - (\mathcal{G} \circ \mathcal{L})(\Vu_H), \] which finishes the proof. \end{proof} The Corrector Green's Operator has the following approximation and stability properties, which reveal that its contributions is always negligible in the $\VH(\Div)^\prime$-norm and negligible in the $\VH(\curl)$-norm if applied to a function in $\VH(\Div)$.
\begin{lemma}[Ideal corrector estimates] \label{lemma:corrector-props} Any $\mathbf{F} \in \VH_0(\curl)^{\prime}$ satisfies \begin{align} \label{green-est-Hcurl-1}
H \| \mathcal{G}(\mathbf{F}) \|_{\VH(\curl)} + \| \mathcal{G}(\mathbf{F}) \|_{\VH(\Div)^{\prime}} \le C H \alpha^{-1} \| \mathbf{F} \|_{\VH_0(\curl)^{\prime}}. \end{align} If $\mathbf{F} = \mathbf{f} \in \VH(\Div)$ we even have \begin{align} \label{green-est-Hdiv-1}
H \| \mathcal{G}(\mathbf{f}) \|_{\VH(\curl)} + \| \mathcal{G}(\mathbf{f}) \|_{\VH(\Div)^{\prime}} \le C H^2 \alpha^{-1} \| \mathbf{f} \|_{\VH(\Div)}. \end{align} Here, the constant $C$ does only depend on the maximum number of neighbors of a coarse element and the generic constants appearing in Lemma \ref{lem:localregulardecomp}. \end{lemma}
Note that this result is still valid if we replace the $\VH(\Div)^{\prime}$-norm by the $H^{-1}(\Omega,\mathbb{C}^3)$-norm.
\begin{proof}
The stability estimate $\| \mathcal{G}(\mathbf{F}) \|_{\VH(\curl)} \le \alpha^{-1} \| \mathbf{F} \|_{\VH_0(\curl)^{\prime}}$ is obvious. Next, with $\mathcal{G}(\mathbf{F})\in\VW$ and Lemma \ref{lem:localregulardecomp} we have \begin{equation}\label{green-est-Hdiv-1-proof} \begin{aligned}
\| \mathcal{G}(\mathbf{F}) \|_{\VH(\Div)^{\prime}} &=
\underset{\| \mathbf{v} \|_{\VH(\Div)}=1}{\sup_{\mathbf{v}\in \VH(\Div)}} \left|\int_{\Omega} \Vz \cdot \mathbf{v} - \int_{\Omega} \theta (\nabla \cdot \mathbf{v}) \right| \\ & \le
( \| \Vz \|_{L^2(\Omega)}^2 + \| \theta \|_{L^2(\Omega)}^2 )^{1/2}
\le C H \| \mathcal{G}(\mathbf{F}) \|_{\VH(\curl)} \le C H \alpha^{-1} \| \mathbf{F} \|_{\VH_0(\curl)^{\prime}}, \end{aligned} \end{equation} which proves \eqref{green-est-Hcurl-1}. Note that this estimate exploited $\theta \in H^{1}_0(\Omega)$, which is why we do not require the function $\mathbf{v}$ to have a vanishing normal trace. Let us now consider the case that $\mathbf{F} = \mathbf{f} \in \VH(\Div)$. We have by \eqref{green-est-Hdiv-1-proof} that \begin{align*}
\alpha \| \mathcal{G}( \mathbf{f} ) \|_{\VH(\curl)}^2 \le \| \mathcal{G}( \mathbf{f}) \|_{\VH(\Div)^{\prime}}
\| \mathbf{f} \|_{\VH(\Div)} \le C H
\| \mathcal{G}(\mathbf{f}) \|_{\VH(\curl)} \| \mathbf{f} \|_{\VH(\Div)}. \end{align*}
We conclude $\| \mathcal{G}( \mathbf{f} ) \|_{\VH(\curl)} \le C H \alpha^{-1} \| \mathbf{f} \|_{\VH(\Div)}$. Finally, we can use this estimate again in \eqref{green-est-Hdiv-1-proof} to obtain \begin{align*}
\| \mathcal{G}(\Vf) \|_{\VH(\Div)^{\prime}} \le C H \| \mathcal{G}(\Vf) \|_{\VH(\curl)} \le C H^2 \alpha^{-1} \| \mathbf{f} \|_{\VH(\Div)}. \end{align*} This finishes the proof. \end{proof} An immediate conclusion of Lemmas \ref{lemma:ideal-decompos} and \ref{lemma:corrector-props} is the following.
\begin{conclusion} \label{conclusion-ideal-corr-est} Let $\Vu$ denote the exact solution to \eqref{eq:curlcurl} for $ \mathbf{f} \in \VH(\Div)$. Then with the coarse part $\Vu_H:=\pi_H^E(\Vu)$ and corrector operator $\mathcal{K} := - \mathcal{G} \circ \mathcal{L}$ it holds \begin{align*}
H^{-1}\| \Vu - (\id + \mathcal{K})\Vu_H \|_{\VH(\Div)^{\prime}} +
\| \Vu - (\id + \mathcal{K})\Vu_H \|_{\VH(\curl)} + \| \Vu - \Vu_H \|_{\VH(\Div)^{\prime}} \le C H \| \mathbf{f} \|_{\VH(\Div)} . \end{align*} Here, $C$ only depends on $\alpha$, the mesh regularity and on the constants appearing in Lemma \ref{lem:localregulardecomp}. \end{conclusion} \begin{proof}
The estimates for $\Vu - (\id + \mathcal{K})\Vu_H =\mathcal{G}(\Vf)$ directly follow
from \eqref{green-est-Hdiv-1}.
For the estimate of $\Vu - \Vu_H =\mathcal{K}\Vu_H + \mathcal{G} \Vf$, observe that \eqref{green-est-Hcurl-1} and
Proposition~\ref{p:proj-pi-H-E} imply
\begin{equation*}
\| \mathcal{K}\Vu_H \|_{\VH(\Div)^{\prime}}
\lesssim H
\| \CL\Vu_H \|_{\VH_0(\curl)^{\prime}}
\lesssim
H
\| \Vu_H \|_{\VH(\curl)}
=
H
\| \pi_H^E \Vu \|_{\VH(\curl)}
\lesssim
H
\| \Vu \|_{\VH(\curl)} . \end{equation*}
Thus, the proof follows from the stability of the problem
and the the triangle inequality. \end{proof}
It only remains to derive an equation that characterizes $(\id + \mathcal{K})\Vu_H$ as the unique solution of a variational problem. This is done in the following theorem.
\begin{theorem} We consider the setting of Conclusion \ref{conclusion-ideal-corr-est}. Then $\Vu_H=\pi_H^E(\Vu) \in \mathring{\CN}(\CT_H)$ is characterized as the unique solution to \begin{align} \label{ideal-lod} \CB( \hspace{2pt} (\id + \mathcal{K})\Vu_H , (\id + \mathcal{K}^{\ast})\Vv_H \hspace{1pt} ) = ( \Vf, (\id + \mathcal{K}^{\ast})\Vv_H )_{L^2(\Omega)} \qquad \mbox{for all } \Vv_H \in \mathring{\CN}(\CT_H). \end{align} Here, $\mathcal{K}^{\ast}$ is the adjoint operator to $\mathcal{K}$. The sesquilinear form $\CB( \hspace{1pt} (\id + \mathcal{K})\hspace{3pt}\cdot \hspace{2pt}, (\id + \mathcal{K}^{\ast})\hspace{2pt}\cdot \hspace{2pt} )$ is $\VH(\curl)$-elliptic on $\mathring{\CN}(\CT_H)$. \end{theorem} Observe that we have the simplification $\mathcal{K}^{\ast}=\mathcal{K}$ if the differential operator $\mathcal{L}$ is self-adjoint as it is typically the case for $\VH(\curl)$-problems.
\begin{proof} Since Lemma \ref{lemma:ideal-decompos} guarantees $\Vu = \Vu_H - (\mathcal{G} \circ \mathcal{L})(\Vu_H) + \mathcal{G}(\Vf)$, the weak formulation \eqref{eq:problem} yields \begin{align*} \CB( \Vu_H - (\mathcal{G} \circ \mathcal{L})(\Vu_H) + \mathcal{G}(\Vf) , \Vv_H ) = ( \Vf, \Vv_H )_{L^2(\Omega)} \qquad \mbox{for all } \Vv_H \in \mathring{\CN}(\CT_H). \end{align*} We observe that by definition of $\mathcal{G}$ we have \begin{align*} \CB( \mathcal{G}(\Vf) , \Vv_H ) = ( \Vf , (\mathcal{G} \circ \mathcal{L})^{\ast}\Vv_H )_{L^2(\Omega)} \end{align*} and \begin{align*} \CB( \Vu_H - (\mathcal{G} \circ \mathcal{L})(\Vu_H) , (\mathcal{G} \circ \mathcal{L})^{\ast}\Vv_H ) = 0. \end{align*} Combining the three equations shows that $(\id + \mathcal{K})\Vu_H$ is a solution to \eqref{ideal-lod}. The uniqueness follows from the following norm equivalence \begin{align*}
\| \Vu_H \|_{\VH(\curl)} = \| \pi_H^E((\id + \mathcal{K})\Vu_H) \|_{\VH(\curl)} \le C \| (\id + \mathcal{K})\Vu_H \|_{\VH(\curl)}
\le C \| \Vu_H \|_{\VH(\curl)}. \end{align*} This is also the reason why the $\VH(\curl)$-ellipticity of $\CB( \cdot, \cdot)$ implies the $\VH(\curl)$-ellipticity of $\CB( \hspace{1pt} (\id + \mathcal{K})\hspace{3pt}\cdot \hspace{2pt}, (\id + \mathcal{K}^{\ast})\hspace{2pt}\cdot \hspace{2pt} )$ on $\mathring{\CN}(\CT_H)$. \end{proof}
\textbf{Numerical homogenization}. Let us summarize the most important findings and relate them to (numerical) homogenization. We defined a \emph{homogenization scale} through the coarse FE space $\mathring{\CN}(\CT_H)$. We proved that there exists a numerically homogenized function $\Vu_H \in \mathring{\CN}(\CT_H)$ which approximates the exact solution well in $\VH(\Div)^{\prime}$ with \begin{align*}
\| \Vu - \Vu_H \|_{\VH(\Div)^{\prime}} \le C H \| \mathbf{f} \|_{\VH(\Div)}. \end{align*} From the periodic homogenization theory (cf. Section \ref{sec:motivation}) we know that this is the best we can expect and that $\Vu_H$ is typically not a good $L^2$-approximation due to the large kernel of the curl-operator. Furthermore, we showed the existence of an $\VH(\curl)$-stable corrector operator $\mathcal{K}: \mathring{\CN}(\CT_H) \rightarrow \VW$ that corrects the homogenized solution in such a way that the approximation is also accurate in $\VH(\curl)$ with \begin{align*}
\| \Vu - (\id + \mathcal{K})\Vu_H \|_{\VH(\curl)} \le C H \| \mathbf{f} \|_{\VH(\Div)}. \end{align*} Since $\mathcal{K} = - \mathcal{G} \circ \mathcal{L}$, we know that we can characterize $\mathcal{K} (\Vv_H) \in \VW$ as the unique solution to the (ideal) corrector problem \begin{align} \label{ideal-corrector-problem} \CB( \mathcal{K} (\Vv_H) , \Vw )=- \CB( \Vv_H , \Vw ) \qquad \mbox{for all } \Vw\in \VW. \end{align} The above result shows that $(\id + \mathcal{K})\Vu_H$ approximates the analytical solution with linear rate without any assumptions on the regularity of the problem or the structure of the coefficients that define $\CB(\cdot,\cdot)$. Also it does not assume that the mesh resolves the possible fine-scale features of the coefficient. On the other hand, the ideal corrector problem \eqref{ideal-corrector-problem} is global, which significantly limits its practical usability in terms of real computations.
However, as we will see next, the corrector Green's function associated with problem \eqref{cor-greens-op} shows an exponential decay measured in units of $H$. This property will allow us to split the global corrector problem \eqref{ideal-corrector-problem} into several smaller problems on subdomains, similar to how we encounter it in classical homogenization theory. We show the exponential decay of the corrector Green's function indirectly through the properties of its corresponding Green's operator $\mathcal{G}$. The localization is established in Section \ref{sec:LOD}, whereas we prove the decay in Section \ref{sec:decaycorrectors}.
\section{Quasi-local numerical homogenization} \label{sec:LOD}
In this section we describe how the ideal corrector $\mathcal{K}$ can be approximated by a sum of local correctors, without destroying the overall approximation order. This is of central importance for an efficient computability. Furthermore, it also reveals that the new corrector is a quasi-local operator, which is in line with homogenization theory.
We start with quantifying the decay properties of the Corrector Green's Operator in Section \ref{subsec:idealapprox}. In Section \ref{subsec:LODlocal} we apply the result to our numerical homogenization setting and state the error estimates for the \quotes{localized} corrector operator. We close with a few remarks on a fully discrete realization of the localized corrector operator in Section \ref{subsec:discreteLOD}.
\subsection{Exponential decay and localized corrector} \label{subsec:idealapprox}
The property that $\mathcal{K}$ can be approximated by local correctors is directly linked to the decay properties of the Green's function associated with problem \eqref{cor-greens-op}. These decay properties can be quantified explicitly by measuring distances between points in units of the coarse mesh size $H$. We have the following result, which states -- loosely speaking -- in which distance from the support of a source term $\mathbf{F}$, becomes the $\VH(\curl)$-norm of $\mathcal{G}(\mathbf{F})$ negligibly small. For that, recall the definition of the element patches from the beginning of Section \ref{sec:intpol}, where $\UN^m(T)$ denotes the patch that consists of a coarse element $T \in \CT_H$ and $m$ layers of coarse elements around it. A proof of the following proposition is given in Section \ref{sec:decaycorrectors}.
\begin{proposition} \label{prop:decaycorrector1} Let $T\in \CT_H$ denote a coarse element and $m\in \mathbb{N}$ a number of layers. Furthermore, let $\mathbf{F}_T \in \VH_0(\curl)^{\prime}$ denote a local source functional in the sense that $\mathbf{F}_T(\Vv)=0$ for all $\Vv \in \VH_0(\curl)$ with $\supp(\Vv) \subset \Omega \setminus T$. Then there exists $0<\tilde{\beta}<1$, independent of $H$, $T$, $m$ and $\mathbf{F}_T$, such that \begin{equation} \label{eq:decaycorrector1}
\| \mathcal{G}(\mathbf{F}_T) \|_{\VH(\curl, \Omega\setminus \UN^m(T))}\lesssim \tilde{\beta}^m\| \mathbf{F}_T \|_{\VH_0(\curl)^{\prime}}. \end{equation} \end{proposition}
In order to use this result to approximate $\mathcal{K}(\Vv_H) = - (\mathcal{G} \circ \mathcal{L})\Vv_H$ (which has a nonlocal argument), we introduce, for any $T\in\CT_H$, localized differential operators $\CL_T:\VH(\curl,T)\to\VH(\curl,\Omega)'$ with \[\langle \mathcal{L}_T(\Vu), \Vv \rangle := \CB_T(\Vu, \Vv ),\] where $\CB_T(\cdot, \cdot )$ denotes the restriction of $\CB(\cdot, \cdot )$ to the element $T$. By linearity of $\mathcal{G}$ we have that \[\mathcal{G} \circ \mathcal{L} = \sum_{T \in \CT_H} \mathcal{G} \circ \mathcal{L}_T\] and consequently we can write \[ \mathcal{K}( \Vv_H ) = \sum_{T \in \CT_H} \mathcal{G}( \mathbf{F}_T ), \qquad \mbox{with } \mathbf{F}_T:= - \mathcal{L}_T(\Vv_H). \] Obviously, $\mathcal{G}( \mathbf{F}_T )$ fits into the setting of Proposition \ref{prop:decaycorrector1}. This suggests to truncate the individual computations of $\mathcal{G}( \mathbf{F}_T )$ to a small patch $\UN^m(T)$ and then collect the results to construct a global approximation for the corrector. Typically, $m$ is referred to as \emph{oversampling parameter}. The strategy is detailed in the following definition.
\begin{definition}[Localized Corrector Approximation] \label{de:loc-correctors} For an element $T\in \CT_H$ we define the element patch $\Omega_T:=\UN^m(T)$ of order $m\in \mathbb{N}$. Let $\mathbf{F} \in \VH_0(\curl)^{\prime}$ be the sum of local functionals with $\mathbf{F} =\sum_{T\in \CT_H} \mathbf{F}_T$, where $\mathbf{F}_T \in \VH_0(\curl)^{\prime}$ is as in Proposition \ref{prop:decaycorrector1}. Furthermore, let $\VW(\Omega_T)\subset \VW$ denote the space of functions from $\VW$ that vanish outside $\Omega_T$, i.e.
\[\VW(\Omega_T)=\{\Vw\in\VW|\Vw=0 \text{ \textrm{outside} }\Omega_T\}.\] We call $\mathcal{G}_{T,m}( \mathbf{F}_T ) \in \VW(\Omega_T)$ the \emph{localized corrector} if it solves \begin{equation} \label{eq:correctorlocal} \CB( \mathcal{G}_{T,m}( \mathbf{F}_T ) , \Vw )=\mathbf{F}_T(\Vw)\qquad \mbox{for all } \Vw\in \VW(\Omega_T). \end{equation} With this, the global corrector approximation is given by \begin{align*} \mathcal{G}_{m}(\mathbf{F}) := \sum_{T\in \CT_H} \mathcal{G}_{T,m}( \mathbf{F}_T ). \end{align*} \end{definition} Observe that problem \eqref{eq:correctorlocal} is only formulated on the patch $\Omega_T$ and that it admits a unique solution by the Lax-Milgram-Babu{\v{s}}ka theorem.
Based on decay properties stated in Proposition \ref{prop:decaycorrector1}, we can derive the following error estimate for the difference between the exact corrector $\mathcal{G}(\mathbf{F})$ and its approximation $\mathcal{G}_{m}(\mathbf{F})$ obtained by an $m$th level truncation. The proof of the following result is again postponed to Section \ref{sec:decaycorrectors}.
\begin{theorem} \label{thm:errorcorrectors} We consider the setting of Definition \ref{de:loc-correctors} with ideal Green's Corrector $\mathcal{G}(\mathbf{F})$ and its $m$th level truncated approximation $\mathcal{G}_{m}(\mathbf{F})$. Then there exist constants $C_{d}>0$ and $0<\beta<1$ (both independent of $H$ and $m$) such that \begin{align} \label{eq:errorcorrector}
\| \mathcal{G}(\mathbf{F}) - \mathcal{G}_{m}(\mathbf{F}) \|_{\VH(\curl)}&\leq C_{d} \sqrt{C_{\ol,m}}\,\beta^m \left( \sum_{T\in \CT_H} \| \mathbf{F}_T \|_{\VH_0(\curl)^{\prime}}^2 \right)^{1/2} \end{align} and \begin{align} \label{eq:errorcorrector-2}
\| \mathcal{G}(\mathbf{F}) - \mathcal{G}_{m}(\mathbf{F}) \|_{\VH(\Div)^{\prime}}&\leq C_{d} \sqrt{C_{\ol,m}}\, \beta^m H \left( \sum_{T\in \CT_H} \| \mathbf{F}_T \|_{\VH_0(\curl)^{\prime}}^2 \right)^{1/2}. \end{align} \end{theorem} As a direct conclusion from Theorem \ref{thm:errorcorrectors} we obtain the main result of this paper that we present in the next subsection.
\subsection{The quasi-local corrector and homogenization} \label{subsec:LODlocal}
Following the above motivation we split the ideal corrector $\mathcal{K}(\Vv_H) =- (\mathcal{G} \circ \mathcal{L})\Vv_H$ into a sum of quasi-local contributions of the form $\sum_{T \in \CT_H} (\mathcal{G} \circ \mathcal{L}_T)\Vv_H$. Applying Theorem \ref{thm:errorcorrectors}, we obtain the following result.
\begin{conclusion} \label{conclusion-main-result} Let $\mathcal{K}_m := - \sum_{T \in \CT_H} (\mathcal{G}_{T,m} \circ \mathcal{L}_T): \mathring{\CN}(\CT_H) \rightarrow \VW$ denote the localized corrector operator obtained by truncation of $m$th order. Then it holds \begin{align} \label{conclusion-main-result-est}
\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu - (\id + \mathcal{K}_m)\mathbf{v}_H \|_{\VH(\curl)} \le
C \left( H + \sqrt{C_{\ol,m}} \beta^m \right) \| \Vf \|_{\VH(\Div)}. \end{align} \end{conclusion} Note that even though the ideal corrector $\mathcal{K}$ is a non-local operator, we can approximate it by a quasi-local corrector $\mathcal{K}_m$. Here, the quasi-locality is seen by the fact that, if $\mathcal{K}$ is applied to a function $\Vv_H$ with local support, the image $\mathcal{K}(\Vv_H)$ will typically still have a global support in $\Omega$. On the other hand, if $\mathcal{K}_m$ is applied to a locally supported function, the support will only increase by a layer with thickness of order $mH$. \begin{proof}[Proof of Conclusion \ref{conclusion-main-result}] With $\mathcal{K}_m = - \sum_{T \in \CT_H} (\mathcal{G}_{T,m} \circ \mathcal{L}_T)$ we apply Conclusion~\ref{conclusion-ideal-corr-est} and Theorem \ref{thm:errorcorrectors} to obtain \begin{equation*} \begin{aligned} &
\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu - (\id + \mathcal{K}_m)\mathbf{v}_H \|_{\VH(\curl)} \le
\| \Vu - (\id + \mathcal{K})\mathbf{u}_H \|_{\VH(\curl)} + \|(\mathcal{K} - \mathcal{K}_m)\mathbf{u}_H \|_{\VH(\curl)}\\ & \qquad\qquad\qquad\qquad
\le C H \| \Vf \|_{\VH(\Div)} + C \sqrt{C_{\ol,m}}\, \beta^m \left( \sum_{T\in \CT_H} \| \mathcal{L}_T(\mathbf{u}_H) \|_{\VH_0(\curl)^{\prime}}^2 \right)^{1/2}, \end{aligned} \end{equation*}
where we observe with $\| \mathcal{L}_T(\mathbf{v}_H) \|_{\VH_0(\curl)^{\prime}} \le C \| \mathbf{v}_H \|_{\VH(\curl,T)}$ that \begin{align*}
\sum_{T\in \CT_H} \| \mathcal{L}_T(\mathbf{u}_H) \|_{\VH_0(\curl)^{\prime}}^2 \le C \| \mathbf{u}_H \|_{\VH(\curl)}^2
= C \| \pi_H^E(\Vu) \|_{\VH(\curl)}^2 \le C \| \Vu \|_{\VH(\curl)}^2 \le C \| \Vf \|_{\VH(\Div)}^2. \end{align*} \end{proof}
Conclusion \ref{conclusion-main-result} has immediate implications from the computational point of view. First, we observe that $\mathcal{K}_m$ can be computed by solving local decoupled problems. Considering a basis $\{ \boldsymbol{\Phi}_k | \hspace{3pt} 1 \le k \le N \}$ of $\mathring{\CN}(\CT_H)$, we require to determine $\mathcal{K}_m(\boldsymbol{\Phi}_k)$. For that, we consider all $T \in \CT_H$ with $T \subset \supp(\boldsymbol{\Phi}_k)$ and solve for $\mathcal{K}_{T,m}(\boldsymbol{\Phi}_k) \in \VW(\hspace{1pt}\UN^m(T)\hspace{1pt})$ with \begin{align} \label{loc-corrector-problems} \CB_{\UN^m(T)}( \mathcal{K}_{T,m}(\boldsymbol{\Phi}_k), \Vw ) = - \CB_{T}( \boldsymbol{\Phi}_k , \Vw ) \qquad \mbox{for all } \Vw \in \VW(\hspace{1pt}\UN^m(T)\hspace{1pt}). \end{align} The global corrector approximation is now given by \[ \mathcal{K}_m(\boldsymbol{\Phi}_k) = \underset{ T \subset \supp(\boldsymbol{\Phi}_k) }{\sum_{ T \in \CT_H }}
\mathcal{K}_{T,m}(\boldsymbol{\Phi}_k). \] Next, we observe that selecting the localization parameter $m$ such that \[
m\gtrsim \lvert \log H\rvert \big/ \lvert \log \beta\rvert, \] we have with Conclusion \ref{conclusion-main-result} that \begin{align}
\label{curl-est-m-logH}\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu - (\id + \mathcal{K}_m)\mathbf{v}_H \|_{\VH(\curl)} \le
C H \| \Vf \|_{\VH(\Div)}, \end{align} which is of the same order as for the ideal corrector $\mathcal{K}$. Consequently, we can consider the Galerkin finite element method, where we seek $\Vu_{H,m} \in \mathring{\CN}(\CT_H)$ such that \begin{align*} \CB( (\id + \mathcal{K}_m)\Vu_{H,m} , (\id + \mathcal{K}_m)\mathbf{v}_H ) = (\mathbf{f} , (\id + \mathcal{K}_m)\mathbf{v}_H )_{L^2(\Omega)} \qquad \mbox{for all } \Vv_{H,m} \in \mathring{\CN}(\CT_H). \end{align*} Since a Galerkin method yields the $\VH(\curl)$-quasi-best approximation of $\Vu$ in the space \linebreak[4]$(\id + \mathcal{K}_m)\mathring{\CN}(\CT_H)$ we have with \eqref{curl-est-m-logH} that \begin{align*}
\| \Vu - (\id + \mathcal{K}_m)\Vu_{H,m} \|_{\VH(\curl)} \le C H \| \Vf \|_{\VH(\Div)} \end{align*} and we have with \eqref{green-est-Hcurl-1}, \eqref{eq:errorcorrector-2} and the $\VH(\curl)$-stability of $\pi_H^E$ that \begin{align*}
\| \Vu - \Vu_{H,m} \|_{\VH(\Div)^{\prime}} \le C H \| \Vf \|_{\VH(\Div)}. \end{align*} This result is a homogenization result in the sense that it yields a coarse function $\Vu_{H,m}$ that approximates the exact solution in $\VH(\Div)^{\prime}$. Furthermore, it yields an appropriate (quasi-local) corrector $\mathcal{K}_m(\Vu_{H,m})$ that is required for an accurate approximation in $\VH(\curl)$.
\begin{remark}[Refined estimates] With a more careful proof, the constants in the estimate of Conclusion \ref{conclusion-main-result} can be specified as \begin{eqnarray*} \label{eq:errorLOD-refined}
\lefteqn{\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu - (\id + \mathcal{K}_m)\mathbf{v}_H \|_{\VH(\curl)}}\\
\nonumber&\leq& \alpha^{-1}(1+H)\bigl(H\max\{C_z, C_\theta\} \sqrt{C_{\ol,3}}+C_d C_\pi C_B^2\sqrt{C_{\ol,m}C_{\ol}}\, \beta^m\bigr)\|\Vf\|_{\VH(\Div)}, \end{eqnarray*} where $\alpha$ and $C_B$ are as in Assumption \ref{asspt:sesquiform}, $C_{d}$ is the constant appearing in the decay estimate \eqref{eq:errorcorrector}, $C_\pi$ is as in Proposition \ref{p:proj-pi-H-E}, $C_z$ and $C_\theta$ are from \eqref{eq:regulardecomp} and $C_{\ol,m}$ as detailed at the beginning of Section \ref{sec:intpol}. Note that if $m$ is large enough so that $\UN^m(T)=\Omega$ for all $T \in \CT_H$, we have as a refinement of Conclusion \ref{conclusion-ideal-corr-est} that \begin{eqnarray*}
\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu - (\id + \mathcal{K})\mathbf{v}_H \|_{\VH(\curl)} \leq \alpha^{-1}(1+H)\bigl(H\max\{C_z, C_\theta\} \sqrt{C_{\ol,3}} \bigr)\|\Vf\|_{\VH(\Div)}. \end{eqnarray*} \end{remark}
\subsection{A fully discrete localized multiscale method} \label{subsec:discreteLOD} The procedure described in the previous section is still not yet \quotes{ready to use} for a practical computation as the local corrector problems \eqref{loc-corrector-problems} involve the infinite dimensional spaces $\VW(\Omega_T)$. Hence, we require an additional fine scale discretization of the corrector problems (just like the cell problems in periodic homogenization theory can typically not be solved analytically).
For a fully discrete formulation, we introduce a second shape-regular partition $\CT_h$ of $\Omega$ into tetrahedra. This partition may be non-uniform and is assumed to be obtained from $\CT_H$ by at least one global refinement. It is a fine discretization in the sense that $h<H$ and that $\CT_h$ resolves all fine-scale features of the coefficients. Let $\mathring{\CN}(\CT_h)\subset\VH_0(\curl)$ denote the space of N{\'e}d{\'e}lec elements with respect to the partition $\CT_h$. We then introduce the space
\[\VW_h(\Omega_T):=\VW(\Omega_T)\cap\mathring{\CN}(\CT_h)=\{\Vv_h\in\mathring{\CN}(\CT_h)|\Vv_h=0\text{ outside }\Omega_T, \pi_H^E(\Vv_h)=0\}\] and discretize the corrector problem
\eqref{loc-corrector-problems} with this new space. The corresponding correctors are denoted by $\mathcal{K}_{T,m,h}$ and $\mathcal{K}_{m,h}$. With this modification we can prove analogously to the error estimate \eqref{conclusion-main-result-est} that it holds \begin{align} \label{conclusion-main-result-est-h}
\inf_{\mathbf{v}_H \in \mathring{\CN}(\CT_H)} \| \Vu_h - (\id + \mathcal{K}_{m,h})\mathbf{v}_H \|_{\VH(\curl)} \le
C \left( H + \sqrt{C_{\ol,m}} \tilde{\beta}^m \right) \| \Vf \|_{\VH(\Div)}, \end{align} where $\Vu_h$ is the Galerkin approximation of $\Vu$ in the discrete fine space $\mathring{\CN}(\CT_h)$. If $\CT_h$ is fine enough, we can assume that $\Vu_h$ is a good $\VH(\curl)$-approximation to the true solution $\Vu$. Consequently, it is justified to formulate a fully discrete (localized) multiscale method by seeking $\Vu_{H,h,m}^{\ms}:=(\id+\mathcal{K}_{m,h})\Vu_H$ with $\Vu_H\in \mathring{\CN}(\CT_H)$ such that \begin{equation} \label{eq:discreteLOD} \CB(\Vu_{H,h,m}^{\ms}, (\id+\mathcal{K}_{m,h})\Vv_H)=(\Vf, (\id+\mathcal{K}_{m,h})\Vv_H)_{L^2(\Omega)}\qquad\mbox{for all } \Vv_H\in\mathring{\CN}(\CT_H). \end{equation} As before, we can conclude from \eqref{conclusion-main-result-est-h} together with the choice $m\gtrsim \lvert \log H\rvert/\lvert\log \beta\rvert$, that it holds \begin{align*}
\| \Vu_h - \Vu_{H,h,m}^{\ms} \|_{\VH(\curl)} +
\| \Vu_h - \pi_H^E \Vu_{H,h,m}^{\ms} \|_{\VH(\Div)^{\prime}}
\le C H \| \Vf \|_{\VH(\Div)}. \end{align*} Thus, the additional fine-scale discretization does not affect the overall error estimates and we therefore concentrate in the proofs (for simplicity) on the semi-discrete case as detailed in Sections \ref{subsec:idealapprox} and \ref{subsec:LODlocal}. Compared to the fully-discrete case, only some small modifications are needed in the proofs for the decay of the correctors. These modifications are outlined at the end of Section \ref{sec:decaycorrectors}. Note that $\Vu_h$ is not needed in the practical implementation of the method.
\section{Proof of the decay for the Corrector Green's Operator} \label{sec:decaycorrectors} In this section, we prove Proposition \ref{prop:decaycorrector1} and Theorem \ref{thm:errorcorrectors}. Since the latter one is based on the first result, we start with proving the exponential decay of the Green's function associated with $\mathcal{G}$. Recall that we quantified the decay indirectly through estimates of the form \begin{equation*}
\| \mathcal{G}(\mathbf{F}_T) \|_{\VH(\curl, \Omega\setminus \UN^m(T))}\lesssim \tilde{\beta}^m\| \mathbf{F}_T \|_{\VH_0(\curl)^{\prime}}, \end{equation*} where $\mathbf{F}_T$ is a $T$-local functional and $0<\tilde{\beta}<1$.
\begin{proof}[Proof of Proposition \ref{prop:decaycorrector1}]
Let $\eta\in \CS^1(\CT_H)\subset H^1(\Omega)$ be a scalar-valued, piece-wise linear and globally continuous cut-off function with \begin{equation*} \eta=0\qquad \text{in}\quad \UN^{m-6}(T)\qquad \qquad\qquad \eta=1\qquad \text{in}\quad \Omega\setminus\UN^{m-5}(T). \end{equation*}
Denote $\CR=\supp(\nabla \eta)$ and $\Vphi:=\mathcal{G}(\mathbf{F}_T) \in \VW$. In the following we use $\UN^k(\CR)=\UN^{m-5+k}(T)\setminus \UN^{m-6-k}(T)$. Note that $\|\nabla \eta\|_{L^\infty(\CR)}\sim H^{-1}$. Furthermore, let $\Vphi=\Vphi-\pi_H^E\Vphi=\Vz+\nabla \theta$ be the splitting from Lemma \ref{lem:localregulardecomp}. We obtain with $\eta\leq 1$, the coercivity, and the product rule that \begin{align*}
\alpha\|\Vphi\|^2_{\VH(\curl, \Omega\setminus\UN^m(T))}&\leq \bigl|(\mu\curl\Vphi, \eta\curl\Vphi)_{L^2(\Omega)}+(\kappa\Vphi, \eta\Vphi)_{L^2(\Omega)}\bigr|\\
&=\bigl|(\mu\curl\Vphi, \eta\curl\Vz)_{L^2(\Omega)}+(\kappa\Vphi, \eta\nabla\theta+\eta\Vz)_{L^2(\Omega)}\bigr|\\ &\leq M_1 +M_2+M_3+M_4+M_5, \end{align*} where \begin{align*}
& M_1:=\Bigl|\bigl(\mu\curl\Vphi, \curl(\id-\pi_H^E)(\eta\Vz)\bigr)_{L^2(\Omega)} &&\hspace{-23pt}+\enspace \bigl(\kappa \Vphi, (\id-\pi_H^E)
(\eta\Vz+\nabla(\eta\theta))\bigr)_{L^2(\Omega)}\Bigr|, \\
&M_2:=\Bigl|\bigl(\mu\curl\Vphi, \curl\pi_H^ E(\eta\Vz)\bigr)_{L^2(\Omega)}\Bigr|, &&
M_3:=\Bigl|\bigl(\kappa \Vphi,\pi_H^E(\eta\Vz+\nabla(\eta\Vphi))\bigr)_{L^2(\Omega)}\Bigr|, \\ &
M_4:=\Bigl|\bigl(\mu\curl \Vphi, \nabla \eta\times \Vz\bigr)_{L^2(\Omega)}\Bigr|, &&
M_5:=\Bigl|\bigl(\kappa\Vphi, \theta\nabla \eta\bigr)_{L^2(\Omega)}\Bigr|. \end{align*} We used the product rule $\curl(\eta\Vz)=\nabla\eta\times \Vz+\eta\curl\Vz$ here.
We now estimate the five terms separately. Let $\Vw:=(\id-\pi_H^E)(\eta\Vz+\nabla(\eta\theta))$ and note that (i) $\curl\Vw=\curl(\id-\pi_H^E)(\eta\Vz)$, (ii) $\Vw\in \VW$, (iii) $\supp\Vw\subset\Omega\setminus T$. Using the definition of the Corrector Green's Operator in \eqref{cor-greens-op} and the fact that $\mathbf{F}_T(\Vw)=0$ yields $M_1=0$.
For $M_2$, note that the commuting property of the projections $\pi^E$ and $\pi^F$ implies $\curl\pi_H^E(\Vz)=\pi_H^F(\curl \Vz)=\pi_H^F(\curl\Vphi)=\curl\pi_H^E\Vphi=0$ because $\Vphi\in \VW$. Using the stability of $\pi_H^E$ \eqref{eq:stabilitycurl} and Lemma \ref{lem:localregulardecomp}, we can estimate $M_2$ as \begin{align*}
M_2&\lesssim \|\curl\Vphi\|_{L^2(\UN(\CR))}\|\curl\pi_H^E(\eta\Vz)\|_{L^2(\UN(\CR))}\lesssim \|\curl\Vphi\|_{L^2(\UN(\CR))}\|\curl(\eta\Vz)\|_{L^2(\UN^2(\CR))}\\
&\lesssim \|\curl\Vphi\|_{L^2(\UN(\CR))}\Bigl(\|\nabla\eta\|_{L^\infty(\CR)}\|\Vz\|_{L^2(\CR)}
+\|\eta\|_{L^\infty(\UN^2(\CR))}\|\curl\Vz\|_{L^2(\UN^{m-3}(T)\setminus \UN^{m-6}(T)))}\Bigr)\\
&\lesssim \|\curl\Vphi\|_{L^2(\UN(\CR))}\|\curl\Vphi\|_{L^2(\UN^{m}(T)\setminus \UN^{m-9}(T))}. \end{align*}
In a similar manner, we obtain for $M_3$ that \begin{align*}
M_3&\lesssim\|\Vphi\|_{L^2(\UN(\CR))}\Bigl(\|\eta \Vz\|_{L^2(\UN^2(\CR))}+\|\nabla(\eta\theta)\|_{L^2(\UN^2(\CR))}+H\|\curl(\eta\Vz)\|_{L^2(\UN^2(\CR))}\Bigr)\\
&\lesssim \|\Vphi\|_{L^2(\UN(\CR))}\Bigl(\|\Vphi\|_{L^2(\UN^{m}(T)\setminus \UN^{m-9}(T))}+H\|\curl\Vphi\|_{L^2(\UN^{m}(T)\setminus \UN^{m-9}(T))}\Bigr). \end{align*}
Simply using Lemma \ref{lem:localregulardecomp}, we deduce for $M_4$ and $M_5$ \begin{align*}
M_4&\lesssim \|\curl\Vphi\|_{L^2(\CR)}\|\curl\Vphi\|_{L^2(\UN^3(\CR))}, \\
M_5&\lesssim \|\Vphi\|_{L^2(\CR)}
(\|\Vphi\|_{L^2(\UN^3(\CR))}
+ H\|\curl \Vphi\|_{L^2(\UN^3(\CR))}). \end{align*} All in all, this gives \begin{equation*}
\|\Vphi\|^2_{\VH(\curl, \Omega\setminus \UN^m(T))}\leq
\tilde{C} \|\Vphi\|^2_{\VH(\curl, \UN^{m}(T)\setminus \UN^{m-9}(T) )} \end{equation*} for some $\tilde{C}>0$. Moreover, it holds that \begin{equation*}
\|\Vphi\|^2_{\VH(\curl, \Omega\setminus \UN^m(T))} =
\|\Vphi\|^2_{\VH(\curl, \Omega\setminus \UN^{m-9}(T))}
- \|\Vphi\|^2_{\VH(\curl, \UN^m(T)\setminus \UN^{m-9}(T))}. \end{equation*} Thus, we obtain finally with $\tilde{\beta}_{\mathrm{pre}}:=(1+\tilde{C}^{-1})^{-1}<1$, a re-iteration of the above argument, and Lemma~\ref{lemma:corrector-props} that \begin{equation*}
\|\Vphi\|^2_{\VH(\curl, \Omega\setminus \UN^m(T))}\lesssim \tilde{\beta}_{\mathrm{pre}}^{\lfloor m/9\rfloor}\|\Vphi\|^2_{\VH(\curl)}\lesssim \tilde{\beta}_{\mathrm{pre}}^{\lfloor m/9\rfloor}\| \mathbf{F}_T \|^2_{\VH_0(\curl)^\prime}. \end{equation*} Algebraic manipulations give the assertion. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:errorcorrectors}] We start by proving the following local estimate \begin{align} \label{eq:errorcorrectorlocal}
\| \mathcal{G}( \mathbf{F}_T )-\mathcal{G}_{T,m}( \mathbf{F}_T ) \|_{\VH(\curl)}&\leq C_1 \tilde{\beta}^m \| \mathbf{F}_T \|_{\VH_0(\curl)^{\prime}} \end{align} for some constant $C_1>0$ and $0<\tilde{\beta}<1$. Let $\eta\in \CS^1(\CT_H)$ be a piece-wise linear and globally continuous cut-off function with \begin{align*} \eta=0 \qquad \text{in} \quad \Omega\setminus \UN^{m-1}(T)\qquad\qquad\qquad\eta=1\qquad\text{in}\quad \UN^{m-2}(T). \end{align*} Due to C{\'e}a's Lemma we have \begin{align*}
\| \mathcal{G}( \mathbf{F}_T ) - \mathcal{G}_{T,m}( \mathbf{F}_T ) \|_{\VH(\curl)}\lesssim \inf_{\Vw_{T,m}\in \VW(\Omega_T)}\| \mathcal{G}( \mathbf{F}_T ) -\Vw_{T,m}\|_{\VH(\curl)}. \end{align*} We use the splitting of Lemma \ref{lem:localregulardecomp} and write $\mathcal{G}( \mathbf{F}_T )=(\id-\pi_H^E)(\mathcal{G}( \mathbf{F}_T ))=\Vz+\nabla\theta$. Then we choose $\Vw_{T,m}=(\id-\pi_H^E)(\eta\Vz+\nabla(\eta\theta))\in \VW(\Omega_T)$ and derive with the stability of $\pi_H^E$ and \eqref{eq:regulardecomp} \begin{align*}
\|\mathcal{G}( \mathbf{F}_T )-\mathcal{G}_{T,m}( \mathbf{F}_T )\|_{\VH(\curl)}&\lesssim \|(\id-\pi_H^E)(\mathcal{G}( \mathbf{F}_T )-\eta\Vz - \nabla(\eta\theta))\|_{\VH(\curl)}\\
&=\|(\id-\pi_H^E)((1-\eta)\Vz+\nabla((1-\eta)\theta))\|_{\VH(\curl)}\\
&\lesssim \|(1-\eta)\Vz\|_{L^2(\Omega\setminus\{\eta=1\})}+\|\nabla((1-\eta)\theta)\|_{L^2(\Omega\setminus\{\eta=1\})}\\*
&\quad+(1+H)\|\curl((1-\eta)\Vz)\|_{L^2(\Omega\setminus\{\eta=1\})}\\
&\lesssim (1+H)\,\| \mathcal{G}( \mathbf{F}_T )\|_{\VH(\curl, \UN^3(\Omega\setminus\{\eta=1\}))}. \end{align*} Combination with Proposition \ref{prop:decaycorrector1} gives estimate \eqref{eq:errorcorrectorlocal}.
To prove the main estimate of Theorem \ref{thm:errorcorrectors}, i.e.\ estimate \eqref{eq:errorcorrector}, we define, for a given simplex $T\in\CT_H$, the piece-wise linear, globally continuous cut-off function $\eta_T\in \CS^1(\CT_H)$ via \begin{align*} \eta_T=0\qquad \text{in}\quad \UN^{m+1}(T)\qquad\qquad\qquad \eta_T=1\qquad\text{in}\quad \Omega\setminus\UN^{m+2}(T). \end{align*} Denote $\Vw:=(\mathcal{G}-\mathcal{G}_m)(\mathbf{F})=\sum_{T \in \CT_H} \Vw_T$ with $\Vw_T:=(\mathcal{G}-\mathcal{G}_{T,m})(\mathbf{F}_T)$ and split $\Vw$ according to Lemma \ref{lem:localregulardecomp} as $\Vw=\Vw-\pi_H^E(\Vw)=\Vz+\nabla\theta$. Due to the ellipticity of $\CB$ and its sesquilinearity, we have \begin{align*}
\alpha\|\Vw\|^2_{\VH(\curl)} \leq
\Bigl|\sum_{T\in\CT_H}\CB(\Vw_T,\Vw)\Bigr|\leq \sum_{T\in \CT_H}|\CB(\Vw_T,\Vz+\nabla\theta )| \leq \sum_{T\in \CT_H} (A_T + B_T) \end{align*} where, for any $T\in\CT_H$, we abbreviate \begin{equation*}
A_T:=|\CB(\Vw_T,(1-\eta_T)\Vz+\nabla((1-\eta_T)\theta))|
\quad\text{and}\quad
B_T:=|\CB(\Vw_T,\eta_T\Vz+\nabla(\eta_T\theta))| . \end{equation*}
For the term $A_T$, we derive by using the properties of the cut-off function and the regular decomposition \eqref{eq:regulardecomp} \begin{align*}
A_T&\lesssim\|\Vw_T\|_{\VH(\curl)}\|(1-\eta_T)\Vz+\nabla((1-\eta_T)\theta)\|_{\VH(\curl, \{\eta_T\neq 1\})}\\
&\leq \|\Vw_T\|_{\VH(\curl)}\,(1+H)\,\|\Vw\|_{\VH(\curl, \UN^3(\{\eta_T\neq 1\}))}. \end{align*} The term $B_T$ can be split as \begin{align*}
B_T\leq |\CB(\Vw_T,(\id-\pi_H^E)(\eta_T\Vz+\nabla(\eta_T\theta)))|+|\CB(\Vw_T,\pi_H^E(\eta_T\Vz+\nabla(\eta_T\theta)))|. \end{align*} Denoting $\Vphi:=(\id-\pi_H^E)(\eta_T\Vz+\nabla(\eta_T\theta))$, we observe $\Vphi\in \VW$ and $\supp\Vphi\subset\Omega\setminus \UN^m(T)$. Because $\Vphi\in\VW$ with support outside $T$, we have $\CB(\mathcal{G}(\mathbf{F}_T),\Vphi)=\mathbf{F}_T(\Vphi)=0$. Since $\Vphi$ has support outside $\UN^m(T)=\Omega_T$, but $\mathcal{G}_{T,m}(\mathbf{F}_T)\in \VW(\Omega_T)$, we also have $\CB(\mathcal{G}_{T,m}(\mathbf{F}_T),\Vphi)=0$. All in all, this means $\CB(\Vw_T , \Vphi )=0$. Using the stability of $\pi_H^E$ \eqref{eq:stabilityL2}, \eqref{eq:stabilitycurl} and the regular decomposition \eqref{eq:regulardecomp}, we obtain \begin{align*}
B_T &\leq |\CB(\Vw_T,\pi_H^E(\eta_T\Vz+\nabla(\eta_T\theta)))|\\*
&\lesssim\|\Vw_T\|_{\VH(\curl)}\bigl(\|\eta_T\Vz+\nabla(\eta_T\theta)\|_{L^2(\UN^2(\{\eta_T\neq 1\}))}+(1+H)\|\curl(\eta_T\Vz)\|_{L^2(\UN^2(\{\eta_T\neq 1\}))}\bigr)\\
&\lesssim \|\Vw_T\|_{\VH(\curl)}(1+H)\,\|\Vw\|_{\VH(\curl, \UN^5(\{\eta_T\neq 1\}))}. \end{align*} Combining the estimates for $A_T$ and $B_T$ and observing that $\{\eta_T\neq 1\} =\UN^{m+2}(T)$, we deduce \begin{align*}
\alpha\|\Vw\|_{\VH(\curl)}^2&\lesssim \sum_{T\in\CT_H}\|\Vw_T\|_{\VH(\curl)}\,\|\Vw\|_{\VH(\curl, \UN^{m+7}(T))}
\lesssim \sqrt{C_{\ol, m}}\, \|\Vw\|_{\VH(\curl)}\sqrt{\sum_{T\in\CT_H}\|\Vw_T\|_{\VH(\curl)}^2}. \end{align*} Combination with estimate \eqref{eq:errorcorrectorlocal} finishes the proof of \eqref{eq:errorcorrector}. Finally, estimate \eqref{eq:errorcorrector-2} follows with \begin{align*}
\|\Vw\|_{\VH(\Div)^{\prime}} \leq C H \|\Vw\|_{\VH(\curl)}. \end{align*} \end{proof}
\textbf{Changes for the fully discrete localized method.}\hspace{2pt} Let us briefly consider the fully-discrete setting described in Section \ref{subsec:discreteLOD}. Here we note that, up to a modification of the constants, Theorem \ref{thm:errorcorrectors} also holds for the difference
$(\mathcal{G}_h - \mathcal{G}_{h,m})(\mathbf{F})$, where $\mathcal{G}_h(\mathbf{F})$ is the Galerkin approximation of $\mathcal{G}(\mathbf{F})$ in the discrete space $\VW_h:=\{\Vv_h\in\mathring{\CN}(\CT_h)|\pi_H^E(\Vv_h)=0\}$ and where $\mathcal{G}_{h,m}(\mathbf{F})$ is defined analogously to $\mathcal{G}_{h,m}(\mathbf{F})$ but where $\VW_h(\Omega_T):=\{ \Vw_h \in \VW_h| \hspace{3pt} \Vw_h \equiv 0 \mbox{ in } \Omega \setminus \Omega_T \}$ replaces $\VW(\Omega_T)$ in the local problems. Again, the central observation is a decay result similar to Proposition \ref{prop:decaycorrector1}, but now for $\mathcal{G}_{h}(\mathbf{F}_T)$. A few modifications to the proof have to be made, though: The product of the cut-off function $\eta$ and the regular decomposition $\Vz+\nabla\theta$ does not lie in $\mathring{\CN}(\CT_h)$. Therefore, an additional interpolation operator into $\mathring{\CN}(\CT_H)$ has to be applied. Here it is tempting to just use the nodal interpolation operator and its stability on piece-wise polynomials, since $\eta \hspace{2pt} \mathcal{G}_{h}(\mathbf{F}_T)$ is a piece-wise (quadratic) polynomial. However, the regular decomposition employed is no longer piece-wise polynomial and we hence have to use the Falk-Winther operator $\pi_h^E$ onto the fine space $\mathring{\CN}(\CT_h)$ here. This means that we have the following modified terms in the proof of Proposition \ref{prop:decaycorrector1}: \begin{align*}
\tilde{M}_1&:=\Bigl|\bigl(\mu\curl\Vphi, \curl(\id-\pi_H^E)\pi_h^E(\eta\Vz)\bigr)_{L^2(\Omega)} &&\hspace{-17pt}+\enspace
\bigl(\kappa \Vphi, (\id-\pi_H^E)\pi_h^E(\eta\Vz+\nabla(\eta\theta))\bigr)_{L^2(\Omega)}\Bigr|, \\
\tilde{M}_2&:=\Bigl|\bigl(\mu\curl\Vphi, \curl\pi_H^ E\pi_h^E(\eta\Vz)\bigr)_{L^2(\Omega)}\Bigr|, &&
\tilde{M}_3:=\Bigl|\bigl(\kappa \Vphi,\pi_H^E\pi_h^E(\eta\Vz+\nabla(\eta\Vz))\bigr)_{L^2(\Omega)}\Bigr|. \end{align*} They can be treated similarly to $M_1$, $M_2$ and $M_3$, using in addition the stability of $\pi_h^E$. Note that the additional interpolation operator $\pi_h^E$ will enlarge the patches slightly, so that we should define $\eta$ via \begin{align*} \eta=0\qquad \text{in}\quad \UN^{m-8}(T)\qquad\qquad\qquad\eta=1\qquad\text{in}\quad \Omega\setminus \UN^{m-7}(T). \end{align*} The terms $M_4$ and $M_5$ remain unchanged, and we moreover get the terms \begin{align*}
\tilde{M}_6:=\Bigl|\bigl(\mu\curl\Vphi, \curl(\id-\pi_h^E)(\eta\Vz)\bigr)_{L^2(\Omega)}\Bigr|, \qquad
\tilde{M}_7:=\Bigl|\bigl(\kappa \Vphi, (\id-\pi_h^E)(\eta\Vz+\nabla(\eta\theta))\bigr)_{L^2(\Omega)}\Bigr|. \end{align*} These can be estimated simply using the stability of $\pi_h^E$, the properties of $\eta$ and the regular decomposition \eqref{eq:regulardecomp}.
\section{Falk--Winther interpolation} \label{sec:intpolimpl}
This section briefly describes the construction of the bounded local cochain projection of \cite{FalkWinther2014} for the present case of $\VH(\curl)$-problems in three space dimensions. The two-dimensional case is thoroughly described in the gentle introductory paper \cite{FalkWinther2015}. After giving the definition of the operator, we describe how it can be represented as a matrix. This is important because the interpolation operator is part of the algorithm and not a mere theoretical tool and therefore required in a practical realization.
\subsection{Definition of the operator} Let $\Delta_0$ denote the set of vertices of $\CT_H$ and let $\mathring{\Delta}_0:=\Delta_0\cap\Omega$ denote the interior vertices. Let $\Delta_1$ denote the set of edges and let $\mathring{\Delta}_1$ denote the interior edges, i.e., the elements of $\Delta_1$ that are not a subset of $\partial\Omega$. The space $\mathring{\CN}(\CT_H)$ is spanned by the well-known edge-oriented basis $(\Vpsi_E)_{E\in\mathring{\Delta}_1}$ defined for any $E\in\mathring{\Delta}_1$ through the property \begin{equation*} \fint_E \Vpsi_E\cdot \Vt_E\,ds = 1 \quad\text{and}\quad \fint_{E'} \Vpsi_E\cdot \Vt_E\,ds = 0 \quad\text{for all }E'\in\mathring{\Delta}_1\setminus\{E\}. \end{equation*} Here $\Vt_E$ denotes the unit tangent to the edge $E$ with a globally fixed sign. Any vertex $z\in\Delta_0$ possesses a nodal patch (sometimes also called macroelement) \begin{equation*} \omega_z:=\Int\Big(\bigcup\{T\in\CT_H : z\in T\}\Big). \end{equation*} For any edge $E\in\Delta_1$ shared by two vertices $z_1,z_2\in\Delta_0$ such that $E=\operatorname{conv}\{z_1,z_2\}$, the extended edge patch reads \begin{equation*} \omega_E^{\mathit{ext}} := \omega_{z_1}\cup\omega_{z_2}. \end{equation*} The restriction of the mesh $\CT_H$ to $\omega_E^{\mathit{ext}}$ is denoted by $\CT_H(\omega_E^{\mathit{ext}})$. Let $\CS^1(\CT_H(\omega_E^{\mathit{ext}}))$ denote the (scalar-valued) first-order Lagrange finite element space with respect to $\CT_H(\omega_E^{\mathit{ext}})$ and let $\CN(\CT_H(\omega_E^{\mathit{ext}}))$ denote the lowest-order N\'ed\'elec finite element space over $\CT_H(\omega_E^{\mathit{ext}})$. The operator \[
Q^1_E:
\VH(\curl, \omega_E^{\mathit{ext}})
\to
\CN(\CT_H(\omega_E^{\mathit{ext}})) \] is defined for any $\Vu\in \VH(\curl, \omega_E^{\mathit{ext}})$ via \begin{equation*} \begin{aligned}
(\Vu-Q^1_E \Vu, \nabla \tau) &= 0 \quad
&&\text{for all } \tau\in \CS^1(\CT_H(\omega_E^{\mathit{ext}}))
\\
(\curl (\Vu-Q^1_E \Vu),\curl \Vv) &=0
&&\text{for all } \Vv\in \CN(\CT_H(\omega_E^{\mathit{ext}})). \end{aligned} \end{equation*}
Given any vertex $y\in\Delta_0$, define the piecewise constant function $z^0_y$ by \begin{equation*} z^0_y = \begin{cases}
(\operatorname{meas}(\omega_y))^{-1} &\text{in } \omega_y \\
0 &\text{in } \Omega\setminus\omega_y
\end{cases} \end{equation*} Given any edge $E\in\Delta_1$ shared by vertices $y_1,y_2\in\Delta_0$ such that $E=\operatorname{conv}\{y_1,y_2\}$, define \begin{equation*} (\delta z^0)_E :=
z^0_{y_2} - z^0_{y_1} . \end{equation*}
Let $E\in\Delta_1$ and denote by $\mathring{\mathcal{RT}}(\CT_H(\omega_E^{\mathit{ext}}))$ the lowest-order Raviart--Thomas space with respect to $\CT_H(\omega_E^{\mathit{ext}})$ with vanishing normal trace on the boundary $\partial (\omega_E^{\mathit{ext}})$. Let for any $E\in\Delta_1$ the field $\Vz_E^1\in\mathring{\mathcal{RT}}(\CT_H(\omega_E^{\mathit{ext}}))$ be defined by \begin{equation*} \begin{aligned}
\Div \Vz_E^1 &=-(\delta z^0)_E \quad &&
\\
(\Vz_E^1,\curl\Vtau) &= 0
&&\text{for all }
\Vtau\in\mathring{\CN}(\CT_H(\omega_E^{\mathit{ext}})) \end{aligned} \end{equation*} where $\mathring{\CN}(\CT_H(\omega_E^{\mathit{ext}}))$ denotes the N\'ed\'elec finite element functions over $\CT_H(\omega_E^{\mathit{ext}})$ with vanishing tangential trace on the boundary $\partial(\omega_E^{\mathit{ext}})$. The operator $M^1:L^2(\Omega;\mathbb{C}^3)\to\mathring{\CN}(\CT_H)$ maps any $\Vu\in L^2(\Omega;\mathbb{C}^3)$ to \begin{equation*} M^1\Vu := \sum_{E\in\mathring{\Delta}_1}
(\operatorname{length}(E))^{-1}
\int_{\omega_E^{\mathit{ext}}} \Vu\cdot \Vz_E^1\,dx\, \Vpsi_E. \end{equation*}
The operator \[
Q^1_{y,-} : \VH(\curl,\omega_E^{\mathit{ext}})
\to
\CS^1(\CT_H(\omega_E^{\mathit{ext}})) \] is the solution operator of a local discrete Neumann problem. For any $\Vu\in \VH(\curl, \omega_E^{\mathit{ext}})$, the function $ Q^1_{y,-} \Vu $ solves \begin{equation*} \begin{aligned} (\Vu-\nabla Q^1_{y,-} \Vu,\nabla v) &= 0
\quad&&\text{for all } v\in \CS^1(\CT_H(\omega_E^{\mathit{ext}})) \\ \int_{\omega_E^{\mathit{ext}}} Q^1_{y,-} \Vu\,dx & = 0. && \end{aligned} \end{equation*} Define now the operator $S^1:\VH_0(\curl,\Omega)\to \mathring{\CN}(\CT_H)$ via \begin{equation}\label{e:S1def1} S^1 \Vu := M^1 \Vu + \sum_{y\in\mathring{\Delta}_0}
(Q^1_{y,-}\Vu)(y)\nabla \lambda_y . \end{equation} The second sum on the right-hand side can be rewritten in terms of the basis functions $\Vpsi_E$. The inclusion $\nabla \mathring{\CS}^1(\CT_H)\subseteq \mathring{\CN}(\CT_H)$ follows from the principles of finite element exterior calculus \cite{ArnoldFalkWinther2006,ArnoldFalkWinther2010}. Given an interior vertex $y\in\mathring{\Delta}_0$, the expansion in terms of the basis $(\Vpsi_E)_{E\in\mathring{\Delta}_1}$ reads \begin{equation*} \nabla\lambda_z = \sum_{E\in\mathring{\Delta}_1} \fint_E \nabla\lambda_z\cdot \Vt_E\,ds\,\Vpsi_E = \sum_{E\in\Delta_1(z)}
\frac{\operatorname{sign}(\Vt_E\cdot\nabla\lambda_z)}{\operatorname{length}(E)}
\Vpsi_E \end{equation*} where $\Delta_1(z)\subseteq\mathring{\Delta}_1$ is the set of all edges that contain $z$. Thus, $S^1$ from \eqref{e:S1def1} can be rewritten as \begin{equation}\label{e:S1def2} S^1 \Vu := M^1 \Vu + \sum_{E\in\mathring{\Delta}_1}
(\operatorname{length}(E))^{-1} \big((Q^1_{y_2(E),-}\Vu)(y_2(E)) - (Q^1_{y_1(E),-}\Vu)(y_1(E))\big) \Vpsi_E \end{equation} where $y_1(E)$ and $y_2(E)$ denote the endpoints of $E$ (with the orientation convention $\Vt_E = (y_2(E)-y_1(E))/\operatorname{length}(E)$). Finally, the Falk-Winter interpolation operator $\pi_H^E:\VH_0(\curl, \Omega)\to\mathring{\CN}(\CT_H)$ is defined as \begin{equation}\label{e:R1def} \pi_H^E \Vu := S^1 \Vu + \sum_{E\in\mathring{\Delta}_1}
\fint_E
\big((\id-S^1)Q^1_E \Vu\big)\cdot \Vt_E\,ds
\,\Vpsi_E . \end{equation}
\subsection{Algorithmic aspects}
Given a mesh $\CT_H$ and a refinement $\CT_h$, the linear projection $\pi_H : \mathring{\CN}(\CT_h)\to \mathring{\CN}(\CT_H)$ can be represented by a matrix $\mathsf{P}\in\mathbb R^{\dim \mathring{\CN}(\CT_H)\times\dim \mathring{\CN}(\CT_h)}$. This subsection briefly sketches the assembling of that matrix. The procedure involves the solution of local discrete problems on the macroelements. It is important to note that these problems are of small size because the mesh $\CT_h$ is a refinement of $\CT_H$.
Given an interior edge $E\in\mathring{\Delta}_1^H$ of $\CT_H$ and an interior edge $e\in\mathring{\Delta}_1^h$ of $\CT_h$, the interpolation $\pi_H \Vpsi_e$ has an expansion \begin{equation*}
\pi_H \Vpsi_e= \sum_{E'\in\mathring{\Delta}_1^H} c_{E'} \Vpsi_{E'} \end{equation*} for real coefficients $(c_{E'})_{E'\in\mathring{\Delta}_1^H}$. The coefficient $c_E$ is zero whenever $e$ is not contained in the closure of the extended edge patch $\overline{\omega}_E^{\mathit{ext}}$. The assembling can therefore be organized in a loop over all interior edges in $\mathring{\Delta}_1^H$. Given a global numbering of the edges in $\mathring{\Delta}_1^H$, each edge $E\in\mathring{\Delta}_1^H$ is equipped with a unique index $I_H(E)\in\{1,\dots,\operatorname{card}(\mathring{\Delta}_1^H)\}$. Similarly, the numbering of edges in $\mathring{\Delta}_1^h$ is denoted by $I_h$.
The matrix $\mathsf{P}=\mathsf{P_1}+\mathsf{P_2}$ will be composed as the sum of matrices $\mathsf{P_1}$, $\mathsf{P_2}$ that represent the two summands on the right-hand side of \eqref{e:R1def}. Those will be assembled in loops over the interior edges. Matrices $\mathsf{P_1}$, $\mathsf{P_2}$ are initialized as empty sparse matrices.
\subsubsection{Operator $\mathsf{P_1}$}
\noindent \textbf{for} $E\in\mathring{\Delta}_1^H$ \textbf{do}
Let the interior edges in $\mathring{\Delta}_1^h$ that lie inside $\overline{\omega}_E^{\mathit{ext}}$ be denoted with $\{e_1,e_2,\dots,e_N\}$ for some $N\in\mathbb N$. The entries $\mathsf{P}_1(I_H(E),[I_h(e_1)\dots I_h(e_N)])$ of the matrix $\mathsf{P}_1$ are now determined as follows. Compute $\Vz^1_E \in \mathring{\mathcal{RT}}(\CT_H({\omega}_E^{\mathit{ext}}))$. The matrix $\mathsf{M}_E\in\mathbb R^{1\times N}$ defined via \[
\mathsf{M}_E
:=
(\operatorname{length}(E))^{-1}
\left[
\int_{{\omega}_E^{\mathit{ext}}} \Vz^1_E\cdot\Vpsi_{e_j}\,dx
\right]_{j=1}^N \] represents the map of the basis functions on the fine mesh to the coefficient of $M^1$ contributing to $\Vpsi_E$ on the coarse mesh. Denote by $\mathsf{A}_{y_j(E)}$ and $\mathsf{B}_{y_j(E)}$ ($j=1,2$) the stiffness and right-hand side matrix representing the system for the operator $Q_{y_j(E),-}$ \begin{align*}
\mathsf{A}_{y_j(E)}
&:=
\left[
\int_{\omega_{y_j(E)}} \nabla \phi_y \cdot\nabla \phi_z\,dx
\right]_{y,z\in\Delta_0(\CT_H(\omega_{y_j(E)}))}, \\
\mathsf{B}_{y_j(E)}
&:=
\left[
\int_{\omega_{y_j(E)}} \nabla \phi_y \cdot\Vpsi_{e_j}\,dx
\right]_{\substack{y\in\Delta_0(\CT_H(\omega_{y_j(E)}))\\ j=1,\dots,N}}. \end{align*} After enhancing the system to $\tilde{\mathsf{A}}_{y_j(E)}$ and $\tilde{\mathsf{B}}_{y_j(E)}$ (with a Lagrange multiplier accounting for the mean constraint), it is uniquely solvable. Set $\tilde{\mathsf{Q}}_{y_j(E)} =
\tilde{\mathsf{A}}_{y_j(E)}^{-1}\tilde{\mathsf{B}}_{y_j(E)}$ and extract the row corresponding to the vertex $y_j(E)$ \[
\mathsf{Q}_j:=
(\operatorname{length}(E))^{-1}
\tilde{\mathsf{Q}}_{y_j(E)}[y_j(E),:]
\in \mathbb R^{1\times N}. \] Set \[
\mathsf{P}_1(I_H(E),[I_h(e_1)\dots I_h(e_N)])
=
\mathsf{M}_E + \mathsf{Q}_1 -\mathsf{Q}_2 . \] \noindent \textbf{end}
\subsubsection{Operator $\mathsf{P_2}$}
\noindent \textbf{for} $E\in\mathring{\Delta}_1^H$ \textbf{do}
Denote the matrices -- where indices $j,k$ run from $1$ to $\operatorname{card}(\Delta_1(\CT_H({\omega}_E^{\mathit{ext}})))$, $y$ through \linebreak[4]$\Delta_0(\CT_H({\omega}_E^{\mathit{ext}}))$, and $\ell=1,\ldots, N$ -- \begin{equation*} \mathsf{S}_E
:=
\left[
\int_{{\omega}_E^{\mathit{ext}}}
\curl \Vpsi_{E_j} \cdot\curl\Vpsi_{E_k}\,dx
\right]_{j,k} \mathsf{T}_E
:=
\left[
\int_{{\omega}_E^{\mathit{ext}}}
\Vpsi_{E_j} \cdot\nabla\lambda_{y}\,dx
\right]_{j,y} \end{equation*} and \begin{equation*} \mathsf{F}_E
:=
\left[
\int_{{\omega}_E^{\mathit{ext}}}
\curl \Vpsi_{E_j} \cdot\curl\Vpsi_{e_\ell}\,dx
\right]_{j,\ell} \mathsf{G}_E
:=
\left[
\int_{{\omega}_E^{\mathit{ext}}}
\Vpsi_{e_\ell} \cdot\nabla\lambda_{y}\,dx
\right]_{y, \ell} . \end{equation*} Solve the saddle-point system \begin{equation*}
\begin{bmatrix}
\mathsf{S} & \mathsf{T}^* \\ \mathsf{T} & 0
\end{bmatrix} \begin{bmatrix}
\mathsf{U} \\ \mathsf{V}
\end{bmatrix} = \begin{bmatrix}
\mathsf{F} \\ \mathsf{G}
\end{bmatrix} . \end{equation*} (This requires an additional one-dimensional gauge condition because the sum of the test functions $\sum_y\nabla\lambda_y$ equals zero.) Assemble the operator $S^1$ (locally) as described in the previous step and denote this matrix by $\mathsf{P}_1^{\mathit{loc}}$. Compute $\mathsf{U}- \mathsf{P}_1^{\mathit{loc}} \mathsf{U}$ and extract the line $\mathsf{X}$ corresponding to the edge $E$
\[
\mathsf{P_2}(I_H(E),[I_h(e_1)\dots I_h(e_N)])
=
\mathsf{X} . \] \noindent \textbf{end}
\section*{Conclusion} In this paper, we suggested a procedure for the numerical homogenization of $\VH(\curl)$-elliptic problems. The exact solution is decomposed into a coarse part, which is a good approximation in $\VH(\Div)^\prime$, and a corrector contribution by using the Falk-Winther interpolation operator. We showed that this decomposition gives an optimal order approximation in $\VH(\curl)$, independent of the regularity of the exact solution. Furthermore, the corrector operator can be localized to patches of macro elements, which allows for an efficient computation. This results in a generalized finite element method in the spirit of the Localized Orthogonal Decomposition which utilizes the bounded local cochain projection of the Falk-Winther as part of the algorithm.
\section*{Acknowledgments} Main parts of this paper were written while the authors enjoyed the kind hospitality of the Hausdorff Institute for Mathematics in Bonn. PH and BV acknowledge financial support by the DFG in the project OH 98/6-1 ``Wave propagation in periodic structures and negative refraction mechanisms''. DG acknowledges support by the DFG through CRC 1173 ``Wave phenomena: analysis and numerics'' and by the Baden-W\"urttemberg Stiftung (Eliteprogramm f\"ur Postdocs) through the project ``Mehrskalenmethoden für Wellenausbreitung in heterogenen Materialien und
Metamaterialien''.
\end{document}
|
arXiv
|
{
"id": "1706.02966.tex",
"language_detection_score": 0.6384934782981873,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Bi-slant submersions]
{On bi-slant submersions in complex geometry}
\author[C. Sayar]{Cem Sayar$^1$} \address{$^1$Istanbul Technical University\\ Faculty of Science and Letters,\\ Department of Mathematics\\ 34469, Maslak /\.{I}stanbul Turkey} \email{[email protected]} \author[M. A. Akyol]{Mehmet Akif Akyol$^2$} \address{$^2$Bingol University\\ Faculty of Arts and Sciences,\\ Department of Mathematics\\ 12000, Bing\"{o}l, Turkey} \email{[email protected]} \author[R. Prasad]{Rajendra Prasad$^3$} \address{$^3$Lucknow University\\ Department of Mathematics and Astronomy\\ 226007, Uttar Pradesh, Lucknow, India} \email{[email protected]}
\subjclass{Primary 53C15, 53B20}
\keywords{Riemannian submersion, bi-slant submersion, horizontal distribution, Kaehler manifold}
\date{January 1, 2004}
\begin{abstract} In the present paper, we introduce bi-slant submersions from almost Hermitian manifolds onto Riemannian manifolds as a generalization of invariant, anti-invariant, semi-invariant, slant, semi-slant and hemi-slant Riemannian submersions. We mainly focus on bi-slant submersions from Kaehler manifolds. We provide a proper example of bi-slant submersion, investigate the geometry of foliations determined by vertical and horizontal distributions, and obtain the geometry of leaves of these distributions. Moreover, we obtain curvature relations between the base space, the total space and the fibres, and find geometric implications of these relations. \end{abstract}
\maketitle
\section{Introduction}
The notion of a slant submanifold was introduced by B.-Y. Chen in \cite{Chen0} and first results on slant submanifolds were collected in his book \cite{Chen2}. After he defined that notion, many geometers were inspired by that fact and have obtained many results on the notion in the different total space. As a generalization of the notion, J. L. Cabrerizo et. al. defined the notion of bi-slant submanifold in \cite{Cabre} and see also \cite{Carri}.
On the other hand, as an analogue of isometric immersion (Riemannian submanifold), the notion of Riemannian submersion was first introduced by B. O'Neill \cite{O} and A. Gray \cite{Gra} between two Riemannian manifolds. This notion has some aplications in physics and in mathematics. More precisely, Riemannian submersions have applications in supergravity and superstring theories \cite{IV1,M}, Kaluza-Klein theory \cite{BL,IV} and the Yang-Mills theory \cite{BL1,W1}. B. Watson \cite{Wat} considered submersions between almost Hermitian manifolds by taking account of almost complex structure of total manifold. In this case, the vertical and horizontal distributions are invariant. Afterwards, almost Hermitian submersions have been extensively studied different subclasses of almost Hermitian manifolds, for example; see \cite{Fa}.
Inspried by B. Watson's article, B. \c{S}ahin introduced anti invariant submersions from almost Hermitian manifolds onto Riemannian manifolds \cite{Sah}. This notion has opened a new original and effective area in the theory of Riemannian submersions. That paper has been a source of inspiration to so many geometers. For example, as a special case of anti-invariant submersion, Lagrangian submersion was studied by H. M. Tastan \cite{Ta}. Later, several new types of Riemannian submersions were defined and studied such as semi-invariant submersion \cite{Akyol4,cem,Sa}, slant submersion \cite{Er, Gun, Gun1, Sa1}, hemi-slant submersion \cite{Ta3}, semi-slant submersion \cite{Akyol3, Park}, pointwise slant submersion \cite{Lee, Se}, quasi bi-slant submersion \cite{Prasad}, conformal slant submersion \cite{Akyol0,Akyol1} and conformal semi-slant submersion \cite{Akyol2}. Also, these kinds of submersions were considered in different kinds of structures such as cosymplectic, Sasakian, Kenmotsu, nearly Kaehler, almost product, para-contact, and et al. Recent developments in the theory of submersion can be found in the book \cite{baykit}.
Recently, the first author of the paper and et.al. define Generic submersion in the sense of G. B. Ronsse (see: \cite{Rons}) for the complex context in \cite{Cemp}. We are motivated to fill a gap in the literature by giving the notion of bi-slant submersions in which the fibres consist of two slant distributions. In the present paper, as a special case of the above notion and generalization of invariant, anti-invariant, semi-invariant, slant, semi-slant and hemi-slant Riemannian submersions we introduce bi-slant submersion and investigate the geometry of base space, the total space and the fibres.
The paper is organized as follows. Section 2 includes preliminaries. In section 3 contains the definition of bi-slant submersions, a proper example, the geometry of foliations determined by vertical and horizontal distributions and the geometry of leaves of these distributions. The last section of this paper includes curvature relations between the base space, the total space and the fibres, and find geometric implications of these relations.
\section{Riemannian submersions} In this section, we give necessary background for Riemannian submersions.\\
Let $(M,g)$ and $(N,g_{\text{\tiny$N$}})$ be Riemannian manifolds, where $\dim(M)$ is greater than $\dim(N)$. A surjective mapping $\pi:(M,g)\rightarrow(N,g_{N})$ is called a \emph{Riemannian submersion} \cite{O} if\\
\textbf{(S1)} $\pi$ has maximal rank, and \\
\textbf{(S2)} $\pi_{*}$, restricted to $\ker\pi_{*}^{\bot},$ is a linear isometry.\\
In this case, for each $q\in N$, $\pi^{-1}(q)$ is a $k$-dimensional submanifold of $M$ and called a \emph{fiber}, where $k=\dim(M)-\dim(N).$ A vector field on $M$ is called \emph{vertical} (resp. \emph{horizontal}) if it is always tangent (resp. orthogonal) to fibers. A vector field $X$ on $M$ is called \emph{basic} if $X$ is horizontal and $\pi$-related to a vector field $X_{*}$ on $N,$ i.e., $\pi_{*}X_{p}=X_{*\pi(p)}$ for all $p\in M.$ We will denote by $\mathcal{V}$ and $\mathcal{H}$ the projections on the vertical distribution $\ker\pi_{*}$, and the horizontal distribution $\ker\pi_{*}^{\bot},$ respectively. As usual, the manifold $(M,g)$ is called \emph{total manifold} and the manifold $(N,g_{N})$ is called \emph{base manifold} of the submersion $\pi:(M,g)\rightarrow(N,g_{N})$. The geometry of Riemannian submersions is characterized by O'Neill's tensors $\mathcal{T}$ and $\mathcal{A}$, defined as follows: \begin{equation}\label{testequationn} \mathcal{T}_{U}{V}=\mathcal{V}\nabla_{\mathcal{V}{U}}\mathcal{H}{V}+\mathcal{H}\nabla_{\mathcal{V}{U}}\mathcal{V}{V}, \end{equation} \begin{equation}\label{testequationnn} \mathcal{A}_{U}{V}=\mathcal{V}\nabla_{\mathcal{H}{U}}\mathcal{H}{ V}+\mathcal{H}\nabla_{\mathcal{H}{U}}\mathcal{V}{V} \end{equation} for any vector fields ${U}$ and ${V}$ on $M,$ where $\nabla$ is the Levi-Civita connection of $g$. It is easy to see that $\mathcal{T}_{{U}}$ and $\mathcal{A}_{{U}}$ are skew-symmetric operators on the tangent bundle of $M$ reversing the vertical and the horizontal distributions. We now summarize the properties of the tensor fields $\mathcal{T}$ and $\mathcal{A}$. Let $V,W$ be vertical and $X,Y$ be horizontal vector fields on $M$, then we have \begin{equation}\label{testequation111} \mathcal{T}_{V}W=\mathcal{T}_{W}V, \end{equation} \begin{equation}\label{testequation00} \mathcal{A}_{X}Y=-\mathcal{A}_{Y}X=\frac{1}{2}\mathcal{V}[X,Y]. \end{equation} On the other hand, from (\ref{testequationn}) and (\ref{testequationnn}), we obtain \begin{equation}\label{testequation09} \nabla_{V}W=\mathcal{T}_{V}W+\hat{\nabla}_{V}W, \end{equation} \begin{equation}\label{testequation11} \nabla_{V}X=\mathrm{T}_{V}X+\mathcal{H}\nabla_{V}X, \end{equation} \begin{equation}\label{testequation} \nabla_{X}V=\mathcal{A}_{X}V+\mathcal{V}\nabla_{X}V, \end{equation} \begin{equation}\label{testequation123} \nabla_{X}Y=\mathcal{H}\nabla_{X}Y+\mathcal{A}_{X}Y, \end{equation} where $\hat{\nabla}_{V}W=\mathcal{V}\nabla_{V}W$. If $X$ is basic \[\mathcal{H}\nabla_{V}X=\mathcal{A}_{X}V.\] \begin{rem}\label{remark1} In this paper, we will assume all horizontal vector fields as basic vector fields. \end{rem} It is not difficult to observe that $\mathcal{T}$ acts on the fibers as the second fundamental form while $\mathcal{A}$ acts on the horizontal distribution and measures of the obstruction to the integrability of this distribution. For details on Riemannian submersions, we refer to O'Neill's paper \cite{O} and to the book \cite{Fa}.
\section{Bi-slant Submersions}
A manifold $M$ is called an \textit{almost Hermitian manifold} \cite{Yan} if it admits a tensor field $J$ of type (1,1) on itself such that, for any $X,Y \in TM$ \begin{equation} \label{e9} J^{2}=-I,\quad g(X,Y)=g(JX,JY). \end{equation} An almost Hermitian manifold $M$ is called \textit{Kaehler manifold} \cite{Yan} \\if $\forall X,Y \in TM$, \begin{equation} \label{e10} (\nabla_{X}J)Y=0, \end{equation} where $\nabla$ is the Levi-Civita connection with respect to the Riemannian metric $g$ and $I$ is the identity operator on the tangent bundle $TM$.\\ \begin{definition}\label{dfnbislant} Let $(M,g,J)$ be a Kaehler manifold and $(N,g_{\text{\tiny$N$}})$ be a Riemannian manifold. A Riemannian submersion $\pi : (M,g,J)\rightarrow (N,g_{N})$ is called a \textit{bi-slant submersion}, if there are two slant distributions $\mathcal{D}^{\theta_{1}}\subset ker\pi_{*}$ and $\mathcal{D}^{\theta_{2}}\subset ker\pi_{*}$ such that \begin{equation}\label{eqnbislant1}
ker\pi_{*}=\mathcal{D}^{\theta_{1}}\oplus \mathcal{D}^{\theta_{2}}, \end{equation} where, $\mathcal{D}^{\theta_{1}}$ and $\mathcal{D}^{\theta_{2}}$ has slant angles $\theta_{1}$ and $\theta_{2}$, respectively. \end{definition}
Suppose the dimension of distribution of $\mathcal{D}^{\theta_{1}}$ (resp. $\mathcal{D}^{\theta_{2}}$) is $m_1$ (resp. $m_2$). Then we easily see the following particular cases. \begin{enumerate} \item[(a)] If $m_1=0$ and $\theta_2=0$, then $\pi$ is an invariant submersion.
\item[(b)]If $m_1=0$ and $\theta=\frac{\pi}{2},$ then $\pi$ is an anti-invariant submersion.
\item[(c)] If $m_1\neq m_2\neq0,$ $\theta_1=0$ and $\theta_2=\frac{\pi}{2},$ then $\pi$ is a semi-invariant submersion.
\item[(d)] If $m_1=0$ and $0<\theta_2<\frac{\pi}{2},$ then $\pi$ is a proper slant submersion.
\item[(e)] If $m_1\neq m_2\neq0,$ $\theta_1=0$ and $0<\theta_2<\frac{\pi}{2},$ then $\pi$ is a semi-slant submersion.
\item[(e)] If $m_1\neq m_2\neq0,$ $\theta_1=\frac{\pi}{2}$ and $0<\theta_2<\frac{\pi}{2},$ then $\pi$ is a hemi-slant submersion. \end{enumerate}
If each slant angles are different from either zero or $\frac{\pi}{2}$, then the bi-slant submersion is called a \textit{proper bi-slant submersion}. Now, we present a non-trivial example of bi-slant submersions and demonstrate that the method presented in this paper is effective.
\begin{rem} In present paper, we assume bi-slant submersion as proper bi-slant submersion i.e. slant angles are from either zero or $\frac{\pi}{2}$. \end{rem}
\begin{example} Let $\mathbb{R}^{8}$ be $8-dimensional$ Euclidean space. $\mathbb{R}^{8},J,g$ is a Kaehler manifold with Euclidean metric $g$ on $\mathbb{R}^{8}$ and canonical complex structure $J$. Consider the map $\pi : \mathbb{R}^{8} \rightarrow \mathbb{R}^{4}$ with \begin{equation*}
\pi(x_{1},x_{2},...x_{8})\mapsto (\frac{-x_{1}+x_{4}}{\sqrt{2}},-x_{2},\frac{-\sqrt{3}x_{5}+x_{8}}{2},-x_{6}). \end{equation*} Then, we have the Jacobian matrix of $\pi$ has rank $4$. That means $\pi$ is a submersion. So, with some calculations we observe that \begin{equation*}
ker\pi_{*}=\mathcal{D}^{\theta_{1}}\oplus \mathcal{D}^{\theta_{2}}, \end{equation*} where \begin{equation*}
\mathcal{D}^{\theta_{1}}=span \{V_{1}=\frac{1}{\sqrt{2}}(\partial x_{1}+\partial x_{4}), V_{2}=\partial x_{3}\} \end{equation*} and \begin{equation*}
\mathcal{D}^{\theta_{2}}=span \{V_{3}=\frac{1}{2}\partial x_{5}+\frac{\sqrt{3}}{2}\partial x_{8}, V_{4}=\partial x_{7}\}. \end{equation*} Moreover, the slant angle of $\mathcal{D}^{\theta_{1}}$ is $\theta_{1}=\frac{\pi}{4}$ and the slant angle of $\mathcal{D}^{\theta_{2}}$ is $\theta_{2}=\frac{\pi}{3}$. \end{example} Let $\pi : (M,g,J)\rightarrow (N,g_{N})$ be a bi-slant submersion from a Kaehlerian manifold $M$ onto a Riemannian manifold $N$. Then, for any $V \in ker\pi_{*}$, we put \begin{equation}\label{decompvervec}
JV=PV+FV, \end{equation} where $PV \in ker\pi_{*}$ and $FV \in ker\pi_{*}^{\perp}$. Also, for any $\xi \in ker\pi_{*}^{\perp}$, we put \begin{equation}\label{decomphorvec}
J\xi=\phi \xi +\omega \xi, \end{equation} where $\phi \xi \in ker\pi_{*}$ and $\omega \xi \in ker\pi_{*}^{\perp}$. In this case, the horizontal distribution $ker\pi_{*}^{\perp}$ can be decomposed as follows \begin{equation}\label{eqnbislant2}
ker\pi_{*}^{\perp}=F\mathcal{D}^{\theta_{1}}\oplus F\mathcal{D}^{\theta_{2}} \oplus \mu, \end{equation} where $\mu$ is the orthogonal complementary of $F\mathcal{D}^{\theta_{1}}\oplus F\mathcal{D}^{\theta_{2}}$ in $ ker\pi_{*}^{\perp}$, and it is invariant with respect to the complex structure $J$. \\ By using \eqref{decompvervec} and \eqref{decomphorvec}, we obtain the followings. \begin{lemma} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we have \begin{equation*}
\textbf{(a)}P\mathcal{D}^{\theta_{1}}\subset \mathcal{D}^{\theta_{1}},\quad
\textbf{ (b)}P\mathcal{D}^{\theta_{2}}\subset \mathcal{D}^{\theta_{2}},\quad
\textbf{(c)}\phi \mu=\{0\},\quad
\textbf{(d)}\omega \mu = \mu. \end{equation*} \end{lemma} With the help of \eqref{e9}, \eqref{decompvervec} and \eqref{decomphorvec} we obtain the following Lemma. \begin{lemma}\label{general} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we have \begin{equation*}
\textbf{(a)}\, P^{2}X=-\cos^{2}\theta_{1}X, \quad \textbf{(b)}\, P^{2}U=-\cos^{2}\theta_{2}U, \end{equation*} \begin{equation*} \textbf{(c)}\, \phi FX=-\sin ^{2}\theta_{1}X, \quad \textbf{(d)}\, \phi FU=-\sin ^{2}\theta_{2}U, \end{equation*} \begin{equation*}
\textbf{(e)}\, P^{2}X+\phi FX=-X, \quad \textbf{(f)}\, P^{2}U+\phi FU=-U, \end{equation*} \begin{equation*}
\textbf{(g)}\, FPX+\omega FX=0, \quad \textbf{(h)}\, FPU+\omega FU=0, \end{equation*} for any vector field $X \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$. \end{lemma} We investigate the relation between complex structure $J$ and O'Neill tensors $\mathcal{T}$ and $\mathcal{A}$. \begin{lemma}\label{lemmagenel} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we have \begin{equation}\label{eq1}
\phi \mathcal{T}_{X}Y+P\hat{\nabla}_{X}Y=\hat{\nabla}_{X}PY+\mathcal{T}_{X}FY, \end{equation} \begin{equation}\label{eq2}
\omega \mathcal{T}_{X}Y+F\hat{\nabla}_{X}Y=\mathcal{T}_{X}PY+\mathcal{A}_{FY}X, \end{equation} \begin{equation}\label{eq3}
P\mathcal{T}_{X}\xi+\phi \mathcal{A}_{\xi}X=\hat{\nabla}_{X}\phi \xi+\mathcal{T}_{X}\omega \xi, \end{equation} \begin{equation}\label{eq4}
F\mathcal{T}_{X}\xi+\omega \mathcal{A}_{\xi}X=\mathcal{T}_{X}\phi \xi+\mathcal{A}_{\omega \xi}X, \end{equation} \begin{equation}\label{eq5} \phi \mathcal{H}\nabla_{\xi}\eta+P\mathcal{A}_{\xi}\eta=\mathcal{V}\nabla_{\xi}\phi \eta+\mathcal{A}_{\xi}\eta, \end{equation} \begin{equation}\label{eq6} \omega \mathcal{H}\nabla_{\xi}\eta+F\mathcal{A}_{\xi}\eta=\mathcal{A}_{\xi}\phi \eta+\mathcal{H}\nabla_{\xi}\omega \eta, \end{equation} for any $U,V \in ker\pi_{*}$ and $\xi, \eta \in ker\pi_{*}^{\perp}$. \end{lemma} \begin{proof}
Let $U$ and $V$ be in $ker\pi_{*}$. Since $M$ is Kaehlerian manifold, we have $J\nabla_{U}V=\nabla_{U}JV$. From \eqref{testequation09}, \eqref{testequation11}, \eqref{decompvervec} and \eqref{decomphorvec}, we obtain
\begin{eqnarray*}
J\nabla_{U}V&=&\nabla_{U}PV+\nabla_{U}FV\\
\Rightarrow J(\mathcal{T}_{U}V+\hat{\nabla}_{U}V)&=&\mathcal{T}_{U}PV+\hat{\nabla}_{U}PV\\
&+&\mathcal{T}_{U}FV+\mathcal{H}\mathcal{\nabla}_{U}FV.
\end{eqnarray*}
\begin{eqnarray*}
\Rightarrow \phi \mathcal{T}_{U}V+\omega \mathcal{T}_{U}V+P\hat{\nabla}_{U}V+F\hat{\nabla}_{U}V&=& \mathcal{T}_{U}PV+\hat{\nabla}_{U}PV\\
&+&\mathcal{T}_{U}FV+\mathcal{H}\mathcal{\nabla}_{U}FV.
\end{eqnarray*} Then, in the view of Remark \ref{remark1}, considering the vertical and horizontal parts of the last equation gives us \eqref{eq1} and \eqref{eq2}. For the rest of the equations, the same way could be applied. \end{proof} Now, we obtain equations which mean Gauss and Weingarten equations for bi-slant submersions. \begin{lemma}\label{GauWei} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, for any $X,Y \in \mathcal{D}^{\theta_{1}}$ and $U,V \in \mathcal{D}^{\theta_{2}}$, we have \begin{equation}\label{GauWei1}
g(\nabla_{X}Y,U)=\csc^{2}\theta_{1}\,g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X), \end{equation} \begin{equation}\label{GauWei2}
g(\nabla_{U}V,X)=\csc^{2}\theta_{2}\,g(\mathcal{T}_{PX}FV-\mathcal{T}_{X}FPV+\mathcal{A}_{FX}FV,U). \end{equation} \end{lemma} \begin{proof}
Assume that $X,Y$ be in $\mathcal{D}^{\theta_{1}}$ and $U,V$ be in $\mathcal{D}^{\theta_{2}}$. Then, from \eqref{e9}, \eqref{e10} and \eqref{decompvervec}, we have
\begin{eqnarray*}
g(\nabla_{X}Y,U)&=&g(\nabla_{X}JY,JU)\\ &=&g(\nabla_{X}PY,JU)+g(\nabla_{X}FY,JU).
\end{eqnarray*}
With the help of \eqref{e9} and \eqref{decompvervec}, we obtain
\begin{eqnarray*}
\Rightarrow g(\nabla_{X}Y,U)&=&-g(\nabla_{X}P^{2}Y,U)-g(\nabla_{X}FPY,U)\\
&+&g(\nabla_{X}FY,PU)+g(\nabla_{X}FY,FY).
\end{eqnarray*}
By Lemma \ref{general}-(a), Remark \ref{remark1}, \eqref{testequation09} and \eqref{testequation11}, we get
\begin{eqnarray*} \Rightarrow g(\nabla_{X}Y,U)&=&\cos^{2}\theta_{1}\,g(\nabla_{X}Y,U)-g(\mathcal{T}_{X}FPY,U)\\ &+&g(\mathcal{T}_{X}FY,PU)+g(\mathcal{A}_{FY},FU).
\end{eqnarray*} If we edit the last equation and take into account the properties of O'Neill tensors $\mathcal{T}$ and $\mathcal{A}$, we get \eqref{GauWei1}. To obtain \eqref{GauWei2}, the same idea can be used. \end{proof} \subsection{Integrability} In this section, we investigate the integrability of the distributions which are mentioned in the definition of bi-slant submersion. \begin{theorem}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the slant distribution $\mathcal{D}^{\theta_{1}}$ is integrable if and only if
\begin{equation*}
g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X)=g(\mathcal{T}_{PU}FX-\mathcal{T}_{U}FPX+\mathcal{A}_{FU}FX,Y),
\end{equation*}
where $X,Y \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$. \end{theorem} \begin{proof}
Let $X,Y \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$. Then, by \eqref{GauWei1}, we get
\begin{eqnarray*} g([X,Y],U)&=&g(\nabla_{X}Y,U)-g(\nabla_{Y}X,U)\\ &=&\csc^{2}\theta_{1}\big\{ g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X)\\ &-&g(\mathcal{T}_{PU}FX-\mathcal{T}_{U}FPX+\mathcal{A}_{FU}FX,Y)\big\}.
\end{eqnarray*} Therefore, the slant distribution $\mathcal{D}^{\theta_{1}}$ is integrable if and only if $[X,Y] \in \mathcal{D}^{\theta_{1}}$, for any $X,Y \in \mathcal{D}^{\theta_{1}}$. So we obtain the assertion. \end{proof} \begin{theorem}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the slant distribution $\mathcal{D}^{\theta_{2}}$ is integrable if and only if
\begin{equation*}
g(\mathcal{T}_{PX}FU-\mathcal{T}_{X}FPU+\mathcal{A}_{FX}FU,V)=g(\mathcal{T}_{PX}FV-\mathcal{T}_{X}FPV+\mathcal{A}_{FX}FV,U),
\end{equation*}
where $X \in \mathcal{D}^{\theta_{1}}$ and $U,V \in \mathcal{D}^{\theta_{2}}$. \end{theorem} \begin{proof}
Let $X \in \mathcal{D}^{\theta_{1}}$ and $U,V \in \mathcal{D}^{\theta_{2}}$. Then, from \eqref{GauWei2}, we get
\begin{eqnarray*} g([U,V],X)&=&g(\nabla_{U}V,X)-g(\nabla_{V}U,X)\\ &=&\csc^{2}\theta_{2}\big\{ g(\mathcal{T}_{PX}FV-\mathcal{T}_{X}FPV+\mathcal{A}_{FX}FV,U)\\ &-&g(\mathcal{T}_{PX}FU-\mathcal{T}_{X}FPU+\mathcal{A}_{FX}FU,V)\big\}.
\end{eqnarray*} So, the assertion is obtained. \end{proof} \subsection{Totally and Mixed Geodesicness} In this section, we investigate the geometry of the fibers, vertical distribution and horizontal distribution for a bi-slant submersion. \begin{theorem}\label{GEOD1}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the slant distribution $\mathcal{D}^{\theta_{1}}$ defines a totally geodesic foliation on $ker\pi_{*}$ if and only if the following condition holds;
\begin{equation}\label{geod1}
g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X)=0,
\end{equation}
where $X,Y \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$. \end{theorem} \begin{proof} Let $X,Y \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$. From \eqref{testequation09} and \eqref{GauWei1}, we have \begin{eqnarray*} g(\hat{\nabla}_{X}Y,U)&=&g(\nabla_{X}Y,U)\\ &=&\csc^{2}\theta_{1}\,g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X). \end{eqnarray*} So, the slant distribution $\mathcal{D}^{\theta_{1}}$ defines a totally geodesic foliation on $ker\pi_{*}$ if and only if $\hat{\nabla}_{X}Y \in \mathcal{D}^{\theta_{1}}$ i.e. $g(\mathcal{T}_{PU}FY-\mathcal{T}_{U}FPY+\mathcal{A}_{FU}FY,X)$. \end{proof} \begin{theorem}\label{GEOD2}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the slant distribution $\mathcal{D}^{\theta_{2}}$ defines a totally geodesic foliation on $ker\pi_{*}$ if and only if the following condition holds;
\begin{equation}\label{geod2}
g(\mathcal{T}_{PX}FV-\mathcal{T}_{X}FPV+\mathcal{A}_{FX}FV,U)=0,
\end{equation}
where $X \in \mathcal{D}^{\theta_{1}}$ and $U,V \in \mathcal{D}^{\theta_{2}}$. \end{theorem} \begin{proof} Let $X$ be in $\mathcal{D}^{\theta_{1}}$ and $U$ and $V$ be in $\mathcal{D}^{\theta_{2}}$. Thus, with the help of \eqref{testequation09} and \eqref{GauWei2}, we obtain \begin{eqnarray*} g(\hat{\nabla}_{U}V,X)&=&g(\nabla_{U}V,X)\\ &=&\csc^{2}\theta_{2}\,g(\mathcal{T}_{PX}FV-\mathcal{T}_{X}FPV+\mathcal{A}_{FX}FV,U). \end{eqnarray*} Therefore, we obtain the assertion. \end{proof} In the view of Theorem \ref{GEOD1} and Theorem \ref{GEOD2}, we have the following result. \begin{corollary}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the vertical distribution $ker\pi_{*}$ is a locally product $M_{\mathcal{D}^{\theta_{1}}}\times M_{\mathcal{D}^{\theta_{2}}}$ if and only if \eqref{geod1} and \eqref{geod2} hold, where $M_{\mathcal{D}^{\theta_{1}}}$ and $M_{\mathcal{D}^{\theta_{2}}}$ are integral manifolds of the distributions $\mathcal{D}^{\theta_{1}}$ and $\mathcal{D}^{\theta_{2}}$, respectively. \end{corollary} \begin{theorem}\label{VERGEODESIC}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, $ker\pi_{*}$ defines a totally geodesic foliation if and only if
\begin{equation}\label{vergeodesic} \omega (\mathcal{T}_{W}PZ+\mathcal{A}_{FZ}W)+F(\hat{\nabla}_{W}PZ+\mathcal{T}_{W}FZ)=0,\\ \end{equation} where $W,Z \in ker\pi_{*}$. \end{theorem} \begin{proof}
Let $W$ and $Z$ be in $ker\pi_{*}$. Then, from \eqref{testequation09}, \eqref{testequation11}, \eqref{e9}, \eqref{decompvervec} and \eqref{decomphorvec}, we obtain
\begin{eqnarray*} \nabla_{W}Z&=&-J\nabla_{W}JZ=-J(\nabla_{W}PZ+\nabla_{W}FZ)\\ &=&-J(\mathcal{T}_{W}PZ+\hat{\nabla}_{W}PZ+\mathcal{T}_{W}FZ+\mathcal{A}_{FZ}W)\\ &=&-\phi \mathcal{T}_{W}PZ-\omega \mathcal{T}_{W}PZ-P\hat{\nabla}_{W}PZ-F\hat{\nabla}_{W}PZ\\ & &-P\mathcal{T}_{W}FZ+F\mathcal{T}_{W}FZ-\phi \mathcal{A}_{FZ}W-\omega \mathcal{A}_{FZ}W.
\end{eqnarray*}
Thus, it is known that $ker\pi_{*}$ defines a totally geodesic foliation if and only if $\nabla_{W}Z \in ker\pi_{*}$. So, we get the assertion. \end{proof} \begin{theorem}\label{HORGEODESIC}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, $ker\pi_{*}^{\perp}$ defines a totally geodesic foliation if and only if
\begin{equation}\label{horgeodesic}
\phi (\mathcal{A}_{\xi}\phi \eta +\mathcal{H}\nabla_{\xi}\omega \eta)+P(\mathcal{A}_{\xi}\omega \eta+\mathcal{V}\nabla_{\xi}\phi \eta)=0
\end{equation}
for any $\xi , \eta \in ker\pi_{*}^{\perp}$. \end{theorem} \begin{proof}
Let $\xi , \eta \in ker\pi_{*}^{\perp}$. With the help of the equations \eqref{testequation}, \eqref{testequation111}, \eqref{e9}, \eqref{decompvervec} and \eqref{decomphorvec}, we get
\begin{eqnarray*} \nabla_{\xi}\eta&=&-J\nabla_{\xi}J\eta=-J(\nabla_{\xi}\phi \eta+\nabla_{\xi}\omega \eta)\\ &=&-J(\mathcal{A}_{\xi} \phi \eta +\mathcal{V}\nabla_{\xi}\phi \eta +\mathcal{H}\nabla_{\xi}\omega \eta+\mathcal{A}_{\xi}\omega \eta)\\ &=& -\phi \mathcal{A}_{\xi} \phi \eta - \omega \mathcal{A}_{\xi} \phi \eta - P\mathcal{V}\nabla_{\xi}\phi \eta -F\mathcal{V}\nabla_{\xi}\phi \eta\\ &-& \phi \mathcal{H}\nabla_{\xi}\omega \eta - \omega \mathcal{H}\nabla_{\xi}\omega \eta - P \mathcal{A}_{\xi}\omega \eta- F \mathcal{A}_{\xi}\omega \eta.
\end{eqnarray*}
Therefore, from the last equation, $ker\pi_{*}^{\perp}$ defines a totally geodesic foliation if and only if $\phi (\mathcal{A}_{\xi}\phi \eta +\mathcal{H}\nabla_{\xi}\omega \eta)+P(\mathcal{A}_{\xi}\omega \eta+\mathcal{V}\nabla_{\xi}\phi \eta)=0$. \end{proof} In the view of Theorem \ref{VERGEODESIC} and Theorem \ref{HORGEODESIC}, we give the following result. \begin{corollary} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, the following three facts are equal to each other: \begin{eqnarray*}
&\textbf{(i)}& \text{M is a locally product } M_{ker\pi_{*}} \times M_{ker\pi_{*}^{\perp}}, \\
&\textbf{(ii)}& \pi \text{ is a totally geodesic map}, \\
&\textbf{(iii)}& \eqref{vergeodesic} \text{ and } \eqref{horgeodesic} \text{ hold}, \\ \end{eqnarray*} where $M_{ker\pi_{*}}$ and $M_{ker\pi_{*}^{\perp}}$ are integral manifolds of distributions $ker\pi_{*}$ and $ker\pi_{*}^{*}$, respectively. \end{corollary} \subsection{Parallelism of Canonical Structures} In this section, we investigate the parallelism of the canonical structures for a bi-slant submersion.\\ Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we define \begin{eqnarray} (\nabla_{W}P)Z&=&\hat{\nabla}_{W}PZ-P\hat{\nabla}_{W}Z, \label{pparallel}\\ (\nabla_{W}F)Z&=&\mathcal{H}\nabla_{W}FZ-F\hat{\nabla}_{W}Z,\label{fparallel}\\ (\nabla_{W}\phi)\xi &=&\hat{\nabla}_{W}\phi \xi-\phi \mathcal{H}\nabla_{W}\xi, \label{fiparallel}\\ (\nabla_{W}\omega)\xi &=&\mathcal{H}\nabla_{W}\omega \xi - \omega \mathcal{H}\nabla_{W}\xi, \label{omegaparallel} \end{eqnarray} where $W,Z \in ker\pi_{*}$ and $\xi \in ker\pi_{*}^{\perp}$. Then, it is said that \begin{itemize}
\item $P$ is \textit{parallel} $\Leftrightarrow$ $\nabla P\equiv 0$,
\item $F$ is \textit{parallel} $\Leftrightarrow$ $\nabla F\equiv 0$,
\item $\phi$ is \textit{parallel} $\Leftrightarrow$ $\nabla \phi \equiv 0$,
\item $\omega$ is \textit{parallel} $\Leftrightarrow$ $\nabla \omega \equiv 0$. \end{itemize} In the view of Lemma \ref{lemmagenel} and \eqref{pparallel}$\sim$\eqref{omegaparallel}, we have the following lemma. \begin{lemma}\label{paralleldefn} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, for any $W,Z \in ker\pi_{*}$ and $\xi \in ker\pi_{*}^{\perp}$, we get \begin{eqnarray} (\nabla_{W}P)Z&=&\phi \mathcal{T}_{W}Z-\mathcal{T}_{W}FZ,\label{pparallel2}\\ (\nabla_{W}F)Z&=&\omega \mathcal{T}_{W}Z-\mathcal{T}_{W}PZ, \label{fparallel2}\\ (\nabla_{W}\phi)\xi &=&P\mathcal{T}_{W}\xi - \mathcal{T}_{W}\omega \xi,\label{fiparallel2}\\ (\nabla_{W}\omega)\xi &=& F \mathcal{T}_{W}\xi - \mathcal{T}_{W}\phi \xi\label{omegaparallel2}. \end{eqnarray} \end{lemma} \begin{theorem} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, $F$ is parallel if and only if $\phi$ is parallel. \end{theorem} \begin{proof}
Let $F$ be parallel. Then, for any $W,Z \in ker\pi_{*}$, from \eqref{fiparallel2} we have $\omega \mathcal{T}_{W}Z=\mathcal{T}_{W}PZ$. By using \eqref{e9}, \eqref{decompvervec} and fundamental properties of O'Neill tensor $\mathcal{T}$, we get
\begin{eqnarray*} g(P\mathcal{T}_{W}\xi,Z)&=&g(J\mathcal{T}_{W}\xi,Z)=-g(\mathcal{T}_{W}\xi,JZ)\\ &=&-g(\mathcal{T}_{W}\xi,PZ)=g(\mathcal{T}_{W}PZ, \xi).
\end{eqnarray*} In the view of the fact of parallelism of $F$, we obtain \begin{eqnarray*} g(P\mathcal{T}_{W}\xi,Z)&=&g(\mathcal{T}_{W}PZ, \xi)=g(\omega \mathcal{T}_{W}Z, \xi)\\ &=&g(J\mathcal{T}_{W}Z, \xi)=-g(\mathcal{T}_{W}Z, \omega \xi)=g(\mathcal{T}_{W} \omega \xi,Z). \end{eqnarray*} So, we have for any $Z \in ker\pi_{*}$ $g(P\mathcal{T}_{W}\xi,Z)=g(\mathcal{T}_{W} \omega \xi,Z)$ i.e. $\phi$ is parallel. \end{proof} It is said that the fiber is \textit{$\mathcal{D}^{\theta_{1}}\!-\!\mathcal{D}^{\theta_{2}}$-mixed geodesic}, for any two distributions $\mathcal{D}^{\theta_{1}}$ and $\mathcal{D}^{\theta_{2}}$ defined on the fiber of a Riemannian submersion, if for any $X \in \mathcal{D}^{\theta_{1}}$ and $U \in \mathcal{D}^{\theta_{2}}$, $\mathcal{T}_{X}U=0$. \begin{theorem} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$ with parallel canonical structure $F$. Then, the fibers are $\mathcal{D}^{\theta_{1}}\!-\!\mathcal{D}^{\theta_{2}}$-mixed geodesic. \end{theorem} \begin{proof}
Let $X$ be in $\mathcal{D}^{\theta_{1}}$ and $U$ in $\mathcal{D}^{\theta_{2}}$. Then, from Lemma \ref{general}-(b) and \eqref{fparallel2}, we obtain
\begin{equation*}
\omega^{2}\mathcal{T}_{X}U=\omega(\omega \mathcal{T}_{X}U)=\omega \mathcal{T}_{X}PU=\mathcal{T}_{X}P^{2}U=-\cos^{2}\theta_{2}\mathcal{T}_{X}U.
\end{equation*}
On the other hand, from Lemma \ref{general}-(a) and \eqref{fparallel2}, we get
\begin{equation*}
\omega^{2}\mathcal{T}_{X}U=\omega^{2}\mathcal{T}_{U}X=\omega(\mathcal{T}_{U}PX)=\mathcal{T}_{U}P^{2}X=-\cos^{2}\theta_{1}\mathcal{T}_{U}X.
\end{equation*}
Therefore, we obtain
\begin{equation*}
-\cos^{2}\theta_{2}\mathcal{T}_{X}U=-\cos^{2}\theta_{1}\mathcal{T}_{X}U.
\end{equation*} Since $\cos^{2}\theta_{2}\mathcal{T}_{X}U=\cos^{2}\theta_{1}\mathcal{T}_{X}U$, we have $\mathcal{T}_{X}U=$. That implies the fibers are $\mathcal{D}^{\theta_{1}}\!-\!\mathcal{D}^{\theta_{2}}$-mixed geodesic. \end{proof} \section{Curvature Relations} In this section, the sectional curvatures of the total space, base space and the fibers of a bi-slant submersion are investigated.\\
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. We denote the Riemannian curvature tensors of $M$, $N$ and any fiber of the submersion with $R$, $R^{*}$ and $\hat{R}$, respectively. Also, we denote the sectional curvatures of $M$, $N$ and any fiber of the submersion with $K$, $K^{*}$ and $\hat{K}$, respectively. It is known that the sectional curvature for a Riemannian submersion is defined, for any pair of non-zero orthogonal vectors $U$ and $V$ \cite{O} \begin{equation}\label{sectionalcurvature} K(U,V)= \frac{R(U,V,V,U)}{g(U,U)g(V,V)}. \end{equation} For any $e_{1},e_{2}\in ker\pi_{*}$ and $E_{1}, E_{2} \in ker\pi^{\perp}_{*} $ the Riemannian curvature tensor $R$ is given by \cite{O} \begin{eqnarray} R(e_{1},e_{2},e_{3},e_{4})&=&\hat{R}(e_{1},e_{2},e_{3},e_{4})-g(\mathcal{T}_{e_{1}}e_{4},\mathcal{T}_{e_{2}}e_{3})\nonumber \\ & &+g(\mathcal{T}_{e_{2}}e_{4},\mathcal{T}_{e_{1}}e_{3}), \label{r1} \end{eqnarray} \begin{eqnarray} R(e_{1},e_{2},e_{3},E_{1})&=&g((\nabla_{e_{1}}\mathcal{T})(e_{2},e_{3}),E_{1})-g((\nabla_{e_{2}}\mathcal{T})(e_{1},e_{3}),E_{1}),\label{r2} \end{eqnarray} \begin{eqnarray} R(E_{1},E_{2},E_{3},e_{1})&=&-g((\nabla_{E_{3}}\mathcal{A})(E_{1},E_{2}),e_{1})-g(\mathcal{A}_{E_{1}}E_{2},\mathcal{T}_{e_{1}}E_{3})\nonumber \\ & &g(\mathcal{A}_{E_{2}}E_{3},\mathcal{T}_{e_{1}}E_{1})+g(\mathcal{A}_{E_{3}}E_{1},\mathcal{T}_{e_{1}}E_{2}),\label{r3} \end{eqnarray} \begin{eqnarray} R(E_{1},E_{2},E_{3},E_{4})&=&R^{*}(E_{1},E_{2},E_{3},E_{4})+2g(\mathcal{A}_{E_{1}}E_{2},\mathcal{A}_{E_{3}}E_{4})\nonumber \\ & &-g(\mathcal{A}_{E_{2}}E_{3},\mathcal{A}_{E_{1}}E_{4})+g(\mathcal{A}_{E_{1}}E_{3},\mathcal{A}_{E_{2}}E_{4}),\label{r4} \end{eqnarray} \begin{eqnarray} R(E_{1},E_{2},e_{1},e_{2})&=&-g((\nabla_{e_{1}}\mathcal{A})(E_{1},E_{2}),e_{2})+g((\nabla_{e_{2}}\mathcal{A})(E_{1},E_{2}),e_{1})\nonumber \\ & &-g(\mathcal{A}_{E_{1}}e_{1},\mathcal{A}_{E_{2}}e_{2})+g(\mathcal{A}_{E_{1}}e_{2},\mathcal{A}_{E_{2}}e_{1})\nonumber \\ & &+g(\mathcal{T}_{e_{1}}E_{1},\mathcal{T}_{e_{2}}E_{2})-g(\mathcal{T}_{e_{2}}E_{1},\mathcal{T}_{e_{1}}E_{2}),\label{r5} \end{eqnarray} \begin{eqnarray} R(E_{1},e_{1},E_{2},e_{2})&=&-g((\nabla_{E_{1}}\mathcal{T})(e_{1},e_{2}),E_{2})-g((\nabla_{e_{1}}\mathcal{A})(E_{1},E_{2}),e_{2})\nonumber \\ & & g(\mathcal{T}_{e_{1}}E_{1},\mathcal{T}_{e_{2}}E_{2})-g(\mathcal{A}_{E_{1}}e_{1},\mathcal{A}_{E_{2}}e_{2}),\label{r6} \end{eqnarray} where $R$, $R^{*}$ and $\hat{R}$ is Riemannian curvature of $M$, $N$ and fiber, respectively.\\ Furthermore, let $\pi$ be submersion from a Riemannian manifold $M$ onto a Riemannian manifold $N$. Then, the followings are given \cite{O}: \begin{eqnarray}
K(e_{1},e_{2})=\hat{K}(e_{1},e_{2})-g(\mathcal{T}_{e_{1}}e_{1},\mathcal{T}_{e_{2}}e_{2})+\|\mathcal{T}_{e_{1}}e_{2}\|^{2},\label{curv1} \end{eqnarray} \begin{eqnarray}
K(E_{1},e_{1})=g((\nabla_{E_{1}}\mathcal{T})(e_{1},e_{1}),E_{1})+\|\mathcal{A}_{E_{1}}e_{1}\|^{2}-\|\mathcal{T}_{e_{1}}E_{1}\|^{2},\label{curv2} \end{eqnarray} \begin{eqnarray}
K(E_{1},E_{2})=K^{*}(E_{1},E_{2})-3\|\mathcal{A}_{E_{1}}E_{2}\|^{2},\label{curv3} \end{eqnarray} where $e_{1},e_{2}\in ker\pi_{*}$ and $E_{1}, E_{2} \in ker\pi^{\perp}_{*} $ orthonormal vector fields.
\begin{theorem}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we obtain
\begin{eqnarray}\label{sect1}
K(e_{1},e_{2})&=&\hat{K}(Pe_{1},Pe_{2})\|Pe_{1}\|^{-2}\|Pe_{2}\|^{-2}+K^{*}(F{e_1},Fe_{2})\|Fe_{1}\|^{-2}\|Fe_{2}\|^{-2}\nonumber\\
& &-g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{Pe_{2}}Pe_{2})+\|\mathcal{A}_{Fe_{1}}Pe_{2}\|^{2}\nonumber\\
& &+g((\nabla_{Fe_{2}}\mathcal{T})(Pe_{1},Pe_{2}),Fe_{2})-\|\mathcal{T}_{Pe_{1}}Fe_{2}\|^{2}\nonumber\\
& &-3\|\mathcal{A}_{Fe_{1}}Fe_{2}\|^{2}+\|\mathcal{T}_{Pe_{2}}Pe_{1}\|^{2},
\end{eqnarray}
\begin{eqnarray}\label{sect2}
K(e_{1},E_{1})&=&\hat{K}(Pe_{1},\phi E_{1})\|Pe_{1}\|^{-2}\|\phi E_{1}\|^{-2}+K^{*}(Fe_{1},\omega E_{1})\|Fe_{1}\|^{-2}\|\omega E_{1}\|^{-2}\nonumber\\
& &-\|\mathcal{T}_{\phi E_{1}}Pe_{1}\|^{2}-\|\mathcal{T}_{P e_{1}}\omega E_{1}\|^{2}-3\|\mathcal{A}_{Fe_{1}}\omega E_{1}\|^{2} \nonumber \\
& &+ \|\mathcal{A}_{\omega E_{1}}Pe_{1}\|^{2}-\|\mathcal{T}_{\phi E_{1}}Fe_{1}\|^{2}-g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{\phi E_{1}}\phi E_{1}) \nonumber \\
& & +\|\mathcal{A}_{Fe_{1}}\phi E_{1}\|^{2}+g((\nabla_{\omega E_{1}}\mathcal{T})(Pe_{1},Pe_{1}),\omega E_{1}) \nonumber \\ & & +g((\nabla_{Fe_{1}}\mathcal{T})(\phi E_{1},\phi E_{1}),Fe_{1}), \end{eqnarray} \begin{eqnarray}\label{sect3}
K(E_{1},E_{2})&=&\hat{K}(\phi E_{1},\phi E_{2})\|\phi E_{1}\|^{-2}\|\phi E_{2}\|^{-2}+K^{*}(\omega E_{1},\omega E_{2})\|\omega E_{1}\|^{-2}\|\omega E_{2}\|^{-2}\nonumber \\
& &+\|\mathcal{T}_{\phi E_{2}}\phi E_{1}\|^{2}-g(\mathcal{T}_{\phi E_{1}}\phi E_{1},\mathcal{T}_{\phi E_{2}}\phi E_{2}) \nonumber \\
& & +g((\nabla_{\omega E_{2}}\mathcal{T})(\phi E_{1},\phi E_{2}),\omega E_{2})-\|\mathcal{T}_{\phi E_{1}}\omega E_{2}\|^{2}\nonumber \\
& & +\|\mathcal{A}_{\omega E_{2}}\phi E_{1}\|^{2}+g((\nabla_{\omega E_{1}}\mathcal{T})(\phi E_{2},\phi E_{2}),\omega E_{1})\nonumber \\
& & -\|\mathcal{T}_{\phi E_{2}}\omega E_{1}\|^{2}+\|\mathcal{A}_{\omega E_{1}}\phi E_{2}\|^{2}-3\|\mathcal{A}_{\omega E_{1}}\omega E_{2}\|^{2}. \end{eqnarray} \end{theorem} \begin{proof} Let $e_{1},e_{2}\in ker\pi_{*}$ and $E_{1}, E_{2} \in ker\pi^{\perp}_{*} $ be orthonormal vector fields. Then, by the fact that $K(e_{1},e_{2})=K(Je_{1},Je_{2})$, \eqref{decompvervec} and \eqref{decomphorvec}, we get \begin{eqnarray*} K(e_{1},e_{2})=K(Je_{1},Je_{2})&=&K(Pe_{1},Pe_{2})+K(Pe_{1},Fe_{2})\\ & &+K(Fe_{1},Pe_{2})+K(Fe_{1},Fe_{2}). \end{eqnarray*} By the definition of the sectional curvature, we obtain \begin{eqnarray*} \Rightarrow K(e_{1},e_{2})&=& R(Pe_{1},Pe_{2},Pe_{2},Pe_{1})+R(Pe_{1},Fe_{2},Fe_{2},Pe_{1})\\ & &R(Fe_{1},Pe_{2},Pe_{2},Fe_{1})+R(Fe_{1},Fe_{2},Fe_{2},Fe_{1}). \end{eqnarray*} Thus, with the help of \eqref{r1}$\sim$\eqref{r6}, we have \begin{eqnarray*}
\Rightarrow K(e_{1},e_{2})&=&\hat{R}(Pe_{1},Pe_{2},Pe_{2},Pe_{1})-g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{Pe_{2}}Pe_{2})+\|\mathcal{T}_{Pe_{1}}Pe_{2}\|^{2}\\
& & +g((\nabla_{Fe_{2}}\mathcal{T})(Pe_{1},Pe_{1}),Fe_{2})-\|\mathcal{T}_{Pe_{1}}Fe_{2}\|^{2}+\|\mathcal{A}_{Fe_{2}}Pe_{1}\|^{2}\\
& &+g((\nabla_{Fe_{1}}\mathcal{T})(Pe_{2},Pe_{2}),Fe_{1})-\|\mathcal{T}_{Pe_{2}}Fe_{1}\|^{2}+\|\mathcal{A}_{Fe_{1}}Pe_{2}\|^{2}\\
& & +R^{*}(Pe_{1},Pe_{2},Pe_{2},Pe_{1})-3\|\mathcal{A}_{Fe_{1}}Fe_{2}\|^{2}. \end{eqnarray*} Since, \begin{equation*}
\hat{R}(Pe_{1},Pe_{2},Pe_{2},Pe_{1})=\hat{K}(Pe_{1},Pe_{2})\|Pe_{1}\|^{-2}\|Pe_{2}\|^{-2} \end{equation*} \[and\] \begin{equation*}
R^{*}(Pe_{1},Pe_{2},Pe_{2},Pe_{1})=K^{*}(F{e_1},Fe_{2})\|Fe_{1}\|^{-2}\|Fe_{2}\|^{-2} \end{equation*} \eqref{sect1} is obtained. \eqref{sect2} and \eqref{sect3} can be obtained with a similar way. \end{proof} Now, we give some inequalities for sectional curvatures of total manifold, base manifold and fibers. \begin{corollary}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we have\\ \[
\begin{array}{ccc}
\hat{K}(Pe_{1},Pe_{2})\|Pe_{1}\|^{-2}\|Pe_{2}\|^{-2} & &g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{Pe_{2}}Pe_{2}) \\
+K^{*}(F{e_1},Fe_{2})\|Fe_{1}\|^{-2}\|Fe_{2}\|^{-2}& \leq & +\|\mathcal{T}_{Pe_{1}}Fe_{2}\|^{2}, \\
-\hat{K}(e_{1},e_{2}) & & \\
\end{array} \] \end{corollary} \begin{proof}
Let $e_{1},e_{2}\in ker\pi_{*}$ be orthonormal vector fields. Then, by \eqref{curv1} and \eqref{sect1}, we get
\begin{eqnarray*}
\hat{K}(e_{1},e_{2})-g(\mathcal{T}_{e_{1}}e_{1},\mathcal{T}_{e_{2}}e_{2})+\|\mathcal{T}_{e_{1}}e_{2}\|^{2}&=&\hat{K}(Pe_{1},Pe_{2})\|Pe_{1}\|^{-2}\|Pe_{2}\|^{-2}\nonumber \\
& &+K^{*}(F{e_1},Fe_{2})\|Fe_{1}\|^{-2}\|Fe_{2}\|^{-2} \nonumber \\ & & -g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{Pe_{2}}Pe_{2})\nonumber \\
& &+\|\mathcal{A}_{Fe_{1}}Pe_{2}\|^{2} -\|\mathcal{T}_{Pe_{1}}Fe_{2}\|^{2}\nonumber \\ & &+g((\nabla_{Fe_{2}}\mathcal{T})(Pe_{1},Pe_{2}),Fe_{2})\nonumber \\
& & -3\|\mathcal{A}_{Fe_{1}}Fe_{2}\|^{2}+\|\mathcal{T}_{Pe_{2}}Pe_{1}\|^{2}.
\end{eqnarray*} Thus, we obtain the assertion. \end{proof} \begin{corollary}
Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, \[
\begin{array}{ccc}
\hat{K}(Pe_{1},\phi E_{1}) & & g((\nabla_{E_{1}}\mathcal{T})(e_{1},e_{1}),E_{1})+\|\mathcal{A}_{E_{1}}e_{1}\|^{2}\\
+K^{*}(Fe_{1},\omega E_{1}) & \leq &+\|\mathcal{T}_{P e_{1}}\omega E_{1}\|^{2}+\|\mathcal{T}_{\phi E_{1}}Pe_{1}\|^{2} \\
& & +3\|\mathcal{A}_{Fe_{1}}\omega E_{1}\|^{2}+g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{\phi E_{1}}\phi E_{1}), \\
\end{array}
\]
where $e_{1}\in ker\pi_{*}$ and $E_{1} \in ker\pi^{\perp}_{*} $ orthonormal vector fields. \end{corollary} \begin{proof}
Let $e_{1}\in ker\pi_{*}$ and $E_{1} \in ker\pi^{\perp}_{*} $ be orthonormal vector fields. Then, by \eqref{curv2} and \eqref{sect2}, we have
\begin{eqnarray*}
g((\nabla_{E_{1}}\mathcal{T})(e_{1},e_{1}),E_{1})+\|\mathcal{A}_{E_{1}}e_{1}\|^{2}&=&\hat{K}(Pe_{1},\phi E_{1})\|Pe_{1}\|^{-2}\|\phi E_{1}\|^{-2}\nonumber \\
& & +K^{*}(Fe_{1},\omega E_{1})\|Fe_{1}\|^{-2}\|\omega E_{1}\|^{-2} \nonumber \\
& & -\|\mathcal{T}_{\phi E_{1}}Pe_{1}\|^{2}-\|\mathcal{T}_{P e_{1}}\omega E_{1}\|^{2}\nonumber \\
& &-3\|\mathcal{A}_{Fe_{1}}\omega E_{1}\|^{2}+\|\mathcal{A}_{\omega E_{1}}Pe_{1}\|^{2} \nonumber \\
& &-\|\mathcal{T}_{\phi E_{1}}Fe_{1}\|^{2}-g(\mathcal{T}_{Pe_{1}}Pe_{1},\mathcal{T}_{\phi E_{1}}\phi E_{1})\nonumber \\
& &+\|\mathcal{A}_{Fe_{1}}\phi E_{1}\|^{2}+\|\mathcal{T}_{e_{1}}E_{1}\|^{2}\nonumber \\ & &+g((\nabla_{\omega E_{1}}\mathcal{T})(Pe_{1},Pe_{1}),\omega E_{1}) \nonumber \\ & &+g((\nabla_{Fe_{1}}\mathcal{T})(\phi E_{1},\phi E_{1}),Fe_{1}).
\end{eqnarray*} Therefore, the assertion is obtained. \end{proof} \begin{corollary} Let $\pi$ be a bi-slant submersion from a Kaehlerian manifold $(M,g,J)$ onto a Riemannian manifold $(N,g_{N})$. Then, we obtain \[
\begin{array}{ccc}
\hat{K}(\phi E_{1},\phi E_{2})\|\phi E_{1}\|^{-2}\|\phi E_{2}\|^{-2} & & g(\mathcal{T}_{\phi E_{1}}\phi E_{1},\mathcal{T}_{\phi E_{2}}\phi E_{2})+\|\mathcal{T}_{\phi E_{1}}\omega E_{2}\|^{2} \\
+K^{*}(\omega E_{1},\omega E_{2})\|\omega E_{1}\|^{-2}\|\omega E_{2}\|^{-2} & \leq & + \|\mathcal{T}_{\phi E_{2}}\omega E_{1}\|^{2}+3\|\mathcal{A}_{\omega E_{1}}\omega E_{2}\|^{2} \\
-K^{*}(E_{1},E_{2}) & & \\
\end{array} \] \end{corollary} \begin{proof}
Let $E_{1}, E_{2} \in ker\pi^{\perp}_{*} $ be orthonormal vector fields. From \eqref{curv3} and \eqref{sect3}, we get
\begin{eqnarray*}
K^{*}(E_{1},E_{2})-3\|\mathcal{A}_{E_{1}}E_{2}\|^{2}&=&\hat{K}(\phi E_{1},\phi E_{2})\|\phi E_{1}\|^{-2}\|\phi E_{2}\|^{-2}\nonumber \\
& &+K^{*}(\omega E_{1},\omega E_{2})\|\omega E_{1}\|^{-2}\|\omega E_{2}\|^{-2}\nonumber \\
& &+\|\mathcal{T}_{\phi E_{2}}\phi E_{1}\|^{2}-g(\mathcal{T}_{\phi E_{1}}\phi E_{1},\mathcal{T}_{\phi E_{2}}\phi E_{2})\nonumber \\
& &+g((\nabla_{\omega E_{2}}\mathcal{T})(\phi E_{1},\phi E_{2}),\omega E_{2})-\|\mathcal{T}_{\phi E_{1}}\omega E_{2}\|^{2}\nonumber \\
& &+\|\mathcal{A}_{\omega E_{2}}\phi E_{1}\|^{2}+g((\nabla_{\omega E_{1}}\mathcal{T})(\phi E_{2},\phi E_{2}),\omega E_{1}) \nonumber \\
& &-\|\mathcal{T}_{\phi E_{2}}\omega E_{1}\|^{2}+\|\mathcal{A}_{\omega E_{1}}\phi E_{2}\|^{2}-3\|\mathcal{A}_{\omega E_{1}}\omega E_{2}\|^{2}.
\end{eqnarray*} Hence, the assertion is obtained. \end{proof}
\end{document}
|
arXiv
|
{
"id": "1911.04889.tex",
"language_detection_score": 0.5655350089073181,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\newcommand{\begin{equation}}{\begin{equation}} \newcommand{\end{equation}}{\end{equation}}
\title{Comment on ``On Visibility in the Afshar Two-Slit Experiment"}
\author{Tabish Qureshi} \institute{Department of Physics, Jamia Millia Islamia\\ New Delhi-110025, India.\\ \email{[email protected]}}
\maketitle
\begin{abstract}
Recently Kastner has analyzed the issue of visibility in a modified two-slit experiment carried out by Afshar et al, which has been a subject of much debate. Kastner describes a thought experiment which is claimed to show interference with hundred percent visibility and also an ``apparent" which-slit information. We argue that this thought experiment does not show interference at all, and is thus not applicable to the Afshar experiment.
\keywords{Complementarity \and Two-slit experiment \and Wave-particle duality} \PACS{PACS 03.65.Ud ; 03.65.Ta} \end{abstract}
An experiment which claims to violate Bohr's complementarity principle, proposed and carried out by Afshar et al \cite{afsharfp}, is a subject of current debate. Basically, it consists of a standard two-slit experiment, with a converging lens behind the conventional screen for obtaining the interference pattern. Although If the screen is removed, the light passes through the lens and produces two images of the slits, which are captured on two detectors $D_A$ and $D_B$ respectively. Opening only slit $A$ results in only detector $D_A$ clicking, and opening only slit $B$ leads to only $D_B$ clicking. Afshar argues that the detectors $D_A$ and $D_B$ yield information about which slit, $A$ or $B$, the particle initially passed through. If one places a screen before the lens, the interference pattern is visible.
Conventionally, if one tries to observe the interference pattern, one cannot get the which-way information. Afshar has a clever scheme for establishing the existence of the interference pattern without actually observing it. First the exact location of the dark fringes are noted by observing the interference pattern. Then, thin wires are placed in the exact locations of the dark fringes. The argument is that if the interference pattern exists, sliding in wires through the dark fringes will not affect the intensity of light on the two detectors. If the interference pattern is not there, some photons are bound to hit the wires, and get scattered, thus reducing the photon count at the two detectors. This way, the existence of the interference pattern can be established without actually disturbing the photons in any way. Afshar et al carried out the experiment and found that sliding in wires in the expected locations of the dark fringes, doesn't lead to any significant reduction of intensity at the detectors. Hence they claim that they have demonstrated a violation of complementarity.
Recently, Kastner has addressed the issue of interference visibility in the Afshar experiment \cite{kastner09}. Kastner believes that the essence of the Afshar experiment is captured by a thought experiment discussed by Srikanth \cite{srikanth} in the context of complementarity. Kastner analyzed this two-slit experiment in which there is an additional internal degree of freedom of the detector elements
(which can be considered a “vibrational” component). The particle + detector state evolves from the slits to the final screen with initial detector state $|0\rangle$. The detector spatial basis states
$|\phi_x\rangle$ and vibrational basis states $|v_U\rangle$ and $|v_L\rangle$ (corresponding to the particle passing through the upper and lower slit, respectively) are activated. This evolution, from the initial state to the detected particle, is given by \begin{equation}
{1\over \sqrt{2}}(|U\rangle+|L\rangle)|0\rangle
\rightarrow \sum_x |x\rangle \left\{a_x |\phi_x\rangle|v_U\rangle +
b_x |\phi_x\rangle|v_L\rangle\right\}, \end{equation}
where amplitudes $a_x$ and $b_x$ depend on wave number, distance, and slit of origin, and $|x\rangle$ are final particle basis states. Upon detection at a particular location $x$, one term remains from the sum on the right-hand side of (1): \begin{equation}
|x\rangle \left\{a_x |\phi_x\rangle|v_U\rangle +
b_x |\phi_x\rangle|v_L\rangle\right\}. \end{equation}
Kastner argues that the result of this experiment is even more dramatic than that of the Afshar experiment, because visibility is hundred percent since a fully articulated interference pattern has been irreversibly recorded - not just indicated indirectly - and yet a measurement can be performed later, that seems to reveal “which slit” the photon went through.
However, this argument is not correct, as can be seen from the following. Suppose there were no ``vibrational states", then the term which remains from the sum in (1) would be given by \begin{equation}
|x\rangle \left\{a_x |\phi_x\rangle + b_x |\phi_x\rangle\right\}. \end{equation} The probability density of detecting the particle at position $x$ is then given by \begin{equation}
P(x) = \left\{|a_x|^2 + |b_x|^2 + a_x^*b_x + a_xb_x^* \right\}
\langle\phi_x|\phi_x\rangle, \end{equation} where the last two terms in the curly brackets denote interference.
One the other hand, the probability density of detecting the particle at position $x$, in the presence of ``vibrational states" is given by \begin{eqnarray}
P(x) &=& \{|a_x|^2\langle v_U|v_U\rangle + |b_x|^2\langle v_L|v_L\rangle
+ a_x^*b_x\langle v_U|v_L\rangle + a_xb_x^*\langle v_L|v_U\rangle \}
\langle\phi_x|\phi_x\rangle \nonumber\\
&=& \left\{|a_x|^2 + |b_x|^2 \right\} \langle\phi_x|\phi_x\rangle, \end{eqnarray}
where the interference terms are killed by the orthogonality of $|v_U\rangle$
and $|v_L\rangle$.
So, contrary to the claim in \cite{kastner09}, this experiment does not show any interference, although the ``vibrational states" do provide which-way information. This is in perfect agreement with Bohr's complementarity principle. It can show interference if $|v_U\rangle$
and $|v_L\rangle$ are not strictly orthogonal. However, in that case one cannot extract any which-way information.
In conlcusion, we have shown that the thought experiment, described by Kastner, does not show interference at all. What the experiment does show is that if there exists which-way information in the state, there is no interference pattern on the screen, in agreement with Bohr's complementarity principle.
\end{document}
|
arXiv
|
{
"id": "1002.3686.tex",
"language_detection_score": 0.8708795309066772,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Effective Mass Dirac-Morse Problem with any $\kappa$-value}
\author{\small Altuð Arda}
\email[E-mail: ]{[email protected]}\affiliation{Department of Physics Education, Hacettepe University, 06800, Ankara,Turkey} \author{\small Ramazan Sever} \email[E-mail: ]{[email protected]}\affiliation{Department of Physics, Middle East Technical University, 06531, Ankara,Turkey} \author{\small Cevdet Tezcan} \email[E-mail: ]{[email protected]}\affiliation{Faculty of Engineering, Baþkent University, Baglýca Campus, Ankara,Turkey} \author{\small H\"{u}seyin Akçay} \email[E-mail: ]{[email protected]}\affiliation{Faculty of Engineering, Baþkent University, Baglýca Campus, Ankara,Turkey}
\date{\today}
\begin{abstract}
The Dirac-Morse problem are investigated within the framework of an approximation to the term proportional to $1/r^2$ in the view of the position-dependent mass formalism. The energy eigenvalues and corresponding wave functions are obtained by using the parametric generalization of the Nikiforov-Uvarov method for any $\kappa$-value. It is also studied the approximate energy eigenvalues, and corresponding wave functions in the case of the constant-mass for pseudospin, and spin cases, respectively.\\ Keywords: generalized Morse potential, Dirac equation, Position-Dependent Mass, Nikiforov-Uvarov Method, Spin Symmetry, Pseudospin Symmetry \end{abstract}
\pacs{03.65.-w; 03.65.Ge; 12.39.Fd}
\maketitle
The investigation of the solutions for quantum mechanical systems having certain potentials in the case of position-dependent mass (PDM) [1, 2] has been received great attentions. Many authors have studied the solutions of different potentials for spatially-dependent mass, such as hypergeometric type potentials [3], Coulomb potential [4], $PT$-symmetric kink-like, and inversely linear plus linear potentials [5]. It is well known that the theory based on the effective-mass Schr\"{o}dinger equation is a useful ground for investigation of some physical systems, such as semiconductor heterostructures [6], the impurities in crystals [7-9], and electric properties of quantum wells, and quantum dots [10]. In the present work, we tend to solve the Dirac-Morse problem within the PDM formalism.
The pseudospin symmetry is an interesting result appearing in Dirac equation of a particle moving in an external scalar, and vector potentials in the case of it when the sum of the potentials is nearly zero. It was observed that the single particle states have a quasidegeneracy labeled with the quantum numbers $\tilde{\ell}$, and $\tilde{s}$, which are called the pseudo-orbital angular momentum, and pseudospin angular momentum quantum numbers, respectively [11-16]. The concept of pseudospin symmetry has received great attentions in nuclear theory because of being a ground to investigate deformation, and superdeformation in nuclei [17, 18], and to build an effective shell-model coupling scheme [19, 20]. The symmetry appears in that case, when the magnitude of scalar potential is nearly equal to the magnitude of vector potential with opposite sign [14, 21-25] and the Dirac equation has the pseudospin symmetry, when the sum of the vector, and scalar potentials is a constant, i.e., $\Sigma(r)=V_v(r)+V_s(r)=const.$ or $d\Sigma(r)/dr=0$ [16]. The spin symmetry is another important symmetry occurring in Dirac theory in the presence of external scalar, and vector potentials. The spin symmetry appears in the Dirac equation, when the difference of scalar, and vector potentials is a constant, i.e., $\Delta(r)=V_{v}(r)-V_{s}(r)=const.$ [14, 16].
Recently, the pseudospin and/or spin symmetry have been studied by many authors for some potentials, such as Morse potential [26-28], Woods-Saxon potential [29], Coulomb [30], and harmonic potentials [31-33], Eckart potential [34-36], P\"{o}schl-Teller potential[37, 38], Hulth\'{e}n potential [39], and Kratzer potential [40]. In Ref. [41], the bound-state solutions of Dirac equation are studied for generalized Hulth\'{e}n potential with spin-orbit quantum number $\kappa$ in the position-dependent mass background. In this letter, we tend to show that the new scheme of the Nikiforov-Uvarov (NU) method could be used to find the energy spectra, and the corresponding eigenspinors within the framework of an approximation to the term proportional to $1/r^2$ for arbitray spin-orbit quantum number $\kappa$, i.e. $\kappa\neq 0$, when the mass depends on position. The NU method is a powerful tool to solve of a second order differential equation by turning it into a hypergeometric type equation [42].
Dirac equation for a spin-$\frac{1}{2}$ particle with mass $m$ moving in scalar $V_s(r)$, and vector potential $V_v(r)$ can be written as (in $\hbar=c=1$ unit) \begin{eqnarray} [\alpha\,.\,\textbf{P}+\beta(m+V_s(r))]\,\Psi_{n\kappa}(r)=[E-V_v(r)]\,\Psi_{n\kappa}(r)\,. \end{eqnarray} where $E$ is the relativistic energy of the particle, $\textbf{P}$ is three-momentum, $\alpha$ and $\beta$ are $4 \times 4$ Dirac matrices, which have the forms of $\alpha=\Bigg(\begin{array}{cc}
0 & \sigma \\ \sigma & 0 \end{array}\Bigg)$ and $\beta=\Bigg(\begin{array}{cc} 0 & I \\ -I & 0 \end{array}\Bigg)$, respectively, [43]. Here, $\sigma$ is a three-vector whose components are Pauli matrices and $I$ denotes the $2 \times 2$ unit matrix. $\textbf{J}$ denotes the total angular momentum , and $\hat{K}=-\beta(\sigma.\textbf{L}+1)$ corresponds to the spin-orbit operator of the Dirac particle in a spherically symmetric potential, where $\textbf{L}$ is the orbital angular momentum operator of the particle. The eigenvalues of the spin-orbit operator $\hat{K}$ are given as $\kappa=\pm(j+1/2)$, where $\kappa=-(j+1/2)<0$ correspond to the aligned spin $j=\ell+1/2$, and $\kappa=(j+1/2)>0$ correspond to the unaligned spin $j=\ell-1/2$. The total angular momentum quantum number of the particle is described as $j=\tilde{\ell}+\tilde{s}$\,,where $\tilde{\ell}=\ell+1$ is the pseudo-orbital angular momentum quantum number, and $\tilde{s}=1/2$ is the pseudospin angular momentum quantum number. For a given $\kappa=\pm1, \pm2, \ldots$, the relation between the spin-orbit quantum number $\kappa$\,, and "two" orbital angular momentum quantum numbers are given by $\kappa(\kappa+1)=\ell(\ell+1)$, and $\kappa(\kappa-1)=\tilde{\ell}(\tilde{\ell}+1)$.
The Dirac spinor in spherically symmetric potential can be written in terms of upper and lower components as \begin{eqnarray} \Psi_{n \kappa}(r)=\,\frac{1}{r}\,\Bigg(\begin{array}{c} \,\chi_{n \kappa}\,(r)Y_{jm}^{\ell}(\theta,\phi) \\ i\phi_{n \kappa}\,(r)Y_{jm}^{\tilde{\ell}}(\theta,\phi) \end{array}\Bigg)\,, \end{eqnarray} where $Y_{jm}^{\ell}(\theta,\phi)$, and $Y_{jm}^{\tilde{\ell}}(\theta,\phi)$ are the spherical harmonics, and $\chi_{n \kappa}\,(r)/r$, and $\phi_{n \kappa}\,(r)/r$ are radial part of the upper and lower components. Substituting Eq. (2) into Eq. (1) enable us to write the Dirac equation as a set of two couple differential equations in terms of $\chi_{n \kappa}\,(r)$ and $\phi_{n \kappa}\,(r)$. By eliminating $\chi_{n \kappa}\,(r)$ or $\phi_{n \kappa}\,(r)$ in these coupled equations, we obtain \begin{eqnarray} \Big\{\,\frac{d^2}{dr^2}-\,\frac{\kappa(\kappa+1)}{r^2}\, +\,\frac{1}{M_{\Delta}(r)}\Big(\frac{dm(r)}{dr} -\frac{d\Delta(r)}{dr}\Big)\,(\frac{d}{dr}\,+\,\frac{\kappa}{r})\Big\}\chi_{n\kappa}(r)= M_{\Delta}(r)M_{\Sigma}(r)\chi_{n\kappa}(r)\,, \end{eqnarray} \begin{eqnarray} \Big\{\,\frac{d^2}{dr^2}-\,\frac{\kappa(\kappa-1)}{r^2}\, -\,\frac{1}{M_{\Sigma}(r)}\Big(\frac{dm(r)}{dr}+ \frac{d\Sigma(r)}{dr}\Big)\,(\frac{d}{dr}\,-\,\frac{\kappa}{r})\Big\}\phi_{n\kappa}(r)= M_{\Delta}(r)M_{\Sigma}(r)\phi_{n\kappa}(r)\,, \end{eqnarray} where $M_{\Delta}(r)=m+E_{n\kappa}-\Delta(r)$\,, $M_{\Sigma}(r)=m-E_{n\kappa}+\Sigma(r)$, and $\Delta(r)=V_{v}\,(r)-V_s\,(r)$, $\Sigma(r)=V_{v}\,(r)+V_s\,(r)$.
In the NU-method, the Schr\"{o}dinger equation is transformed by using an appropriate coordinate transformation \begin{eqnarray} \sigma^{2}(s)\Psi''(s)+\sigma(s)\tilde{\tau}(s) \Psi'(s)+\tilde{\sigma}(s)\Psi(s)=0\,, \end{eqnarray} where $\sigma(s)$, $\tilde{\sigma}(s)$ are polynomials, at most second degree, and $\tilde{\tau}(s)$ is a first degree polynomial. The polynomial $\pi(s)$, and the parameter $k$ are required in the method \begin{eqnarray} \pi(s)=\frac{1}{2}\,[\sigma^{\prime}(s)-\tilde{\tau}(s)]\pm \sqrt{\frac{1}{4}\,[\sigma^{\prime}(s)-\tilde{\tau}(s)]^2- \tilde{\sigma}(s)+k\sigma(s)}, \end{eqnarray} \begin{eqnarray} \lambda=k+\pi^{\prime}(s ), \end{eqnarray} where $\lambda$ is a constant. The function under the square root in the polynomial in $\pi(s)$ in Eq. (6) must be square of a polynomial in order that $\pi(s)$ be a first degree polynomial. Replacing $k$ into Eq. (6), we define \begin{eqnarray} \tau(s)=\tilde{\tau}(s)+2\pi(s). \end{eqnarray} where the derivative of $\tau(s)$ should be negative [42]. Eq. (5) has a particular solution with degree $n$, if $\lambda$ in Eq. (7) satisfies \begin{eqnarray} \lambda=\lambda_{n}=-n\tau^{\prime}-\frac{\left[n(n-1)\sigma^{\prime\prime}\right]}{2}, \quad n=0,1,2,\ldots \end{eqnarray} To obtain the solution of Eq. (5) it is assumed that the solution is a product of two independent parts as $\Psi(s)=\phi(s)~y(s)$, where $y(s)$ can be written as \begin{eqnarray} y_{n}(s)\sim \frac{1}{\rho(s)}\frac{d^{n}}{ds^{n}} \left[\sigma^{n}(s)~\rho(s)\right], \end{eqnarray} where the function $\rho(s)$ is the weight function, and should satisfy the condition \begin{eqnarray} \left[\sigma(s)~\rho(s)\right]'=\tau(s)~\rho(s)\,, \end{eqnarray} and the other factor is defined as \begin{eqnarray} \frac{1}{\phi(s)}\frac{d\phi(s)}{ds}=\frac{\pi(s)}{\sigma(s)}. \end{eqnarray} In order to clarify the parametric generalization of the NU method, let us take the following general form of a Schr\"{o}dinger-like equation written for any potential, \begin{eqnarray} \left\{\frac{d^{2}}{ds^{2}}+\frac{\alpha_{1}-\alpha_{2}s}{s(1-\alpha_{3}s)} \frac{d}{ds}+\frac{-\xi_{1}s^{2}+\xi_{2}s-\xi_{3}}{[s(1-\alpha_{3}s)]^{2}}\right\}\Psi(s)=0. \end{eqnarray} When Eq. (13) is compared with Eq. (5), we obtain \begin{eqnarray} \tilde{\tau}(s)=\alpha_{1}-\alpha_{2}s\,\,\,;\,\,\sigma(s)=s(1-\alpha_{3}s)\,\,\,;\,\, \tilde{\sigma}(s)=-\xi_{1}s^{2}+\xi_{2}s-\xi_{3}\,. \end{eqnarray} Substituting these into Eq. (6) \begin{eqnarray} \pi(s)=\alpha_{4}+\alpha_{5}s\pm\sqrt{(\alpha_{6}-k\alpha_{3})s^{2}+(\alpha_{7}+k)s+\alpha_{8}}\,, \end{eqnarray} where the parameter set are \begin{eqnarray} \begin{array}{lll} \alpha_{4}=\frac{1}{2}\,(1-\alpha_{1})\,, & \alpha_{5}=\frac{1}{2}\,(\alpha_{2}-2\alpha_{3})\,, & \alpha_{6}=\alpha_{5}^{2}+\xi_{1} \\ \alpha_{7}=2\alpha_{4}\alpha_{5}-\xi_{2}\,, & \alpha_{8}=\alpha_{4}^{2}+\xi_{3}\,. & \end{array} \end{eqnarray} In NU-method, the function under the square root in Eq. (15) must be the square of a polynomial [42], which gives the following roots of the parameter $k$ \begin{eqnarray} k_{1,2}=-(\alpha_{7}+2\alpha_{3}\alpha_{8})\pm2\sqrt{\alpha_{8}\alpha_{9}}\,, \end{eqnarray} where $\alpha_{9}=\alpha_{3}\alpha_{7}+\alpha_{3}^{2}\alpha_{8}+\alpha_{6}$\,. We obtain the polynomials $\pi(s)$ and $\tau(s)$ for $k=-(\alpha_{7}+2\alpha_{3}\alpha_{8})-2\sqrt{\alpha_{8}\alpha_{9}}$, respectively \begin{eqnarray} \pi(s)=\alpha_{4}+\alpha_{5}s-\left[(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}}\,)s-\sqrt{\alpha_{8}}\,\right]\,, \end{eqnarray} \begin{eqnarray} \tau(s)=\alpha_{1}+2\alpha_{4}-(\alpha_{2}-2\alpha_{5})s-2\left[(\sqrt{\alpha_{9}} +\alpha_{3}\sqrt{\alpha_{8}}\,)s-\sqrt{\alpha_{8}}\,\right]. \end{eqnarray} Thus, we impose the following for satisfying the condition that the derivative of the function $\tau(s)$ should be negative in the method \begin{eqnarray} \tau^{\prime}(s)&=&-(\alpha_{2}-2\alpha_{5})-2(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}}\,) \nonumber \\ &=&-2\alpha_{3}-2(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}}\,)\quad<0. \end{eqnarray} From Eqs. (7), (8), (19), and (20), and equating Eq. (7) with the condition that $\lambda$ should satisfy given by Eq. (9), we find the eigenvalue equation \begin{eqnarray} \alpha_{2}n-(2n+1)\alpha_{5}&+&(2n+1)(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}}\,)+n(n-1)\alpha_{3}\nonumber\\ &+&\alpha_{7}+2\alpha_{3}\alpha_{8}+2\sqrt{\alpha_{8}\alpha_{9}}=0. \end{eqnarray} We obtain from Eq. (11) the polynomial $\rho(s)$ as $\rho(s)=s^{\alpha_{10}-1}(1-\alpha_{3}s)^{\frac{\alpha_{11}}{\alpha_{3}}-\alpha_{10}-1}$ and substituting it into Eq. (10) gives \begin{eqnarray} y_{n}(s)=P_{n}^{(\alpha_{10}-1,\frac{\alpha_{11}}{\alpha_{3}}-\alpha_{10}-1)}(1-2\alpha_{3}s)\,, \end{eqnarray} where $\alpha_{10}=\alpha_{1}+2\alpha_{4}+2\sqrt{\alpha_{8}}$, $\alpha_{11}=\alpha_{2}-2\alpha_{5}+2(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}})$ and $P_{n}^{(\alpha,\beta)}(1-2\alpha_{3}s)$ are the Jacobi polynomials. From Eq. (12), one obtaines \begin{eqnarray} \phi(s)=s^{\alpha_{12}}(1-\alpha_{3}s)^{-\alpha_{12}-\frac{\alpha_{13}}{\alpha_{3}}}\,, \end{eqnarray} then the general solution $\Psi(s)=\phi(s)y(s)$ becomes \begin{eqnarray} \Psi(s)=s^{\alpha_{12}}(1-\alpha_{3}s)^{-\alpha_{12}-\frac{\alpha_{13}}{\alpha_{3}}} P_{n}^{(\alpha_{10}-1,\frac{\alpha_{11}}{\alpha_{3}}-\alpha_{10}-1)}(1-2\alpha_{3}s). \end{eqnarray} where $\alpha_{12}=\alpha_{4}+\sqrt{\alpha_{8}}$ and $\alpha_{13}=\alpha_{5}-(\sqrt{\alpha_{9}}+\alpha_{3}\sqrt{\alpha_{8}}\,)$. Let us study the case where the parameter $\alpha_3=0$. In this type of problems, the eigenfunctions become \begin{eqnarray} \Psi(s)=s^{\alpha_{12}}\,e^{\alpha_{13}s}\,L^{\alpha_{10}-1}_{n}(\alpha_{11}s)\,, \end{eqnarray} when the limits $lim_{\alpha_3 \rightarrow 0}\,P^{(\alpha_{10}-1\,,\frac{\alpha_{11}}{\alpha_{3}}-\alpha_{10}-1)}_{n}(1-\alpha_{3}s)= L^{\alpha_{10}-1}_{n}(\alpha_{11}s)$ and $lim_{\alpha_3 \rightarrow 0}\,(1-\alpha_{3}s)^{-\,\alpha_{12}-\frac{\alpha_{13}}{\alpha_{3}}}= e^{\alpha_{13}s}$ are satisfied and the corresponding energy spectrum is \begin{eqnarray} \alpha_{2}n-2\alpha_{5}n+(2n+1)(\sqrt{\alpha_{9}\,}&-&\alpha_{3}\sqrt{\alpha_{8}\,}\,)+n(n-1)\alpha_{3} +\alpha_{7}\nonumber\\&+&2\alpha_{3}\alpha_{8}-2\sqrt{\alpha_{8}\alpha_{9}\,}+\alpha_{5}=0\,. \end{eqnarray}
The generalized Morse potential is given by [44] \begin{eqnarray} V_M(x)=De^{-2\beta x}-2De^{-\beta x}\,, \end{eqnarray} where $x=(r/r_0)-1$\,,\,$\beta=\alpha r_0$\,,\,$D$ is the dissociation energy, $r_0$ is the equilibrium distance, and $\alpha$ is the potential width. The term proportional to $1/r^2$ in Eq. (4) can be expanded about $x=0$ [45] \begin{eqnarray} V_M(x)=\,\frac{\kappa(\kappa-1)}{r^2}=\,\frac{a_{0}}{(1+x)^2}=a_{0}(1-2x+3x^2+\ldots)\,;\,\, a_{0}=\,\frac{\kappa(\kappa-1)}{r_0^2}\,, \end{eqnarray} Instead, we now replace $V_M(x)$ by the potential [45] \begin{eqnarray} \tilde{V}_M(x)=a_{0}(a_{1}+a_{2}e^{-\beta x}+a_{3}e^{-2\beta x})\,, \end{eqnarray} Expanding the potential $\tilde{V}_M(x)$ around $x=0$, and combining equal powers with Eq. (28), one can find the arbitrary constants in the new form of the potential as \begin{eqnarray} a_{1}=1-\,\frac{3}{\beta}\,+\,\frac{3}{\beta^2}\,\,;\,\,\,a_{2}=\,\frac{4}{\beta}\,-\,\frac{6}{\beta^2}\,\,;\,\,\, a_{3}=-\,\frac{1}{\beta}\,+\,\frac{3}{\beta^2}\,. \end{eqnarray} Eq. (4) can not be solved analytically because of the last term in the equation, we prefer to use a mathematical identity such as $dm(r)/dr=-d\Sigma(r)/dr$ to eliminate this term. We obtain the mass function from the identity as \begin{eqnarray} m(x)=m_{0}+m_{1}e^{-\beta x}+m_{2}e^{-2\beta x}\,, \end{eqnarray} where $m_{0}$ corresponds to the integral constant, and the parameters $m_{1}$, and $m_{2}$ are $2D$, and $-D$, respectively. The parameter $m_{0}$ will denote the rest mass of the Dirac particle. By using the potential form given by Eq. (29) replaced by Eq. (28), inserting the mass function in Eq. (31), setting the "difference" potential $\Delta(r)$ to generalized Morse potential in Eq. (27) and using the new variable $s=e^{-\beta x}$, we have \begin{eqnarray} \Big\{\,\frac{d^2}{ds^2}\,+\,\frac{1}{s}\,\frac{d}{ds}\,&+&\frac{1}{s^2}\Big[ -\delta^2(a_{0}a_{1}+m^2_{0}-E^2)-\delta^2[a_{0}a_{2}+(m_{0}-E)(m_{1}+2D)] s\nonumber\\&-&\delta^2[a_{0}a_{3}+(m_{0}-E)(m_{2}-D]s^2\Big]\Big\}\phi_{n\kappa}(s)=0\,. \end{eqnarray} Comparing Eq. (32) with Eq. (13) gives the parameter set \begin{eqnarray} \begin{array}{ll} \alpha_1=1\,, & -\xi_1=-\delta^2[a_{0}a_{3}+(m_{0}-E)(m_{2}-D] \\ \alpha_2=0\,, & \xi_2=-\delta^2[a_{0}a_{2}+(m_{0}-E)(m_{1}+2D)] \\ \alpha_3=0\,, & -\xi_3=-\delta^2(a_{0}a_{1}+m^2_{0}-E^2) \\ \alpha_4=0\,, & \alpha_5=0 \\ \alpha_6=\xi_1\,, & \alpha_7=-\xi_2 \\ \alpha_8=\xi_3\,, & \alpha_9=\xi_1 \\ \alpha_{10}=1+2\sqrt{\xi_3}\,, & \alpha_{11}=2\sqrt{\xi_1} \\ \alpha_{12}=\sqrt{\xi_3}\,, & \alpha_{13}=-\sqrt{\xi_1} \end{array} \end{eqnarray} where $\delta=1/\alpha$. We write the energy eigenvalue equation of the generalized Morse potential by using Eq. (26) \begin{eqnarray} 2\delta\sqrt{a_{0}a_{1}+m^2_{0}-E^2\,} -\delta\,\frac{a_{0}a_{2}+(m_{0}-E)(m_{1}+2D)}{\sqrt{a_{0}a_{3}+(m_{0}-E)(m_{2}-D)\,}} =2n+1\,. \end{eqnarray} Since the negative energy eigenstates exist in the case of the pseudospin symmetry [14, 15, 16], so we choose the negative energy solutions in Eq. (46). In Table I, we give some numerical values of the negative bound state energies obtained from Eq. (46) for $CO$ molecule in atomic units, where we use the input parameter set as $D=11.2256$ eV, $r_{0}=1.1283$ $\AA$, $m_{0}=6.8606719$ amu, and $a=2.59441$ [46], and summarize our results for different $\tilde{\ell}$, and $n$ values. The corresponding lower spinor component can be written by using Eq. (25) \begin{eqnarray} \phi(s)=s^{w_{1}}\,e^{-\,w_{2}s}L^{2w_{1}}_{n}(2w_{2}s)\,, \end{eqnarray} where $w_{1}=\delta\sqrt{a_{0}a_{1}+m^2_{0}-E^2\,}$, and $w_{2}=\delta\sqrt{a_{0}a_{3}+(m_{0}-E)(m_{2}-D)\,}$.
Let us study the two special limits, pseudospin and spin symmetry cases, respectively, in the case of the constant mass. \subsubsection{Pseudospin Case} The Dirac equation has the exact pseudospin symmetry if the "sum" potential could satisfy the condition that $d\Sigma(r)/dr=0$, i.e. $\Sigma(r)=A (const.)$ [14]. The parameters in our formalism become $m_{1}=m_{2}=0$. Setting the "difference" potential $\Delta(r)$ to the generalized Morse potential in Eq. (27), using Eq. (29) for the term proportional to $1/r^2$, and using the new variable $s=e^{-\beta x}$, we have from Eq. (4) \begin{eqnarray} \Big\{\,\frac{d^2}{ds^2}\,+\,\frac{1}{s}\,\frac{d}{ds}\,&+&\frac{1}{s^2}\Big[ -\delta^2[a_{0}a_{1}+M(m_{0}+E)]-\delta^2(2MD+a_{0}a_{2}) s\nonumber\\&+&\delta^2(MD-a_{0}a_{3})s^2\Big]\Big\}\phi(s)=0\,. \end{eqnarray} where $M=m_{0}+A-E$. By following the same procedure, the energy eigenvalue equation for the exact pseudospin symmetry in the case of constant mass is written \begin{eqnarray} 2\sqrt{a_{0}a_{1}+M(m_{0}+E)\,}=\frac{a_{0}a_{2}+2DM}{\sqrt{a_{0}a_{3}-DM\,}}+\alpha(2n+1)\,. \end{eqnarray} and the corresponding wave functions read as \begin{eqnarray} \phi^{m_{1}=m_{2}=0}(s)=s^{w'_{1}}\,e^{-\,w'_{2}s}L^{2w'_{1}}_{n}(2w'_{2}s)\,, \end{eqnarray} where $w\,'_{1}=\delta\sqrt{a_{0}a_{1}+M(m_{0}+E)\,}$\,, and $w\,'_{2}=\delta\sqrt{a_{0}a_{3}-DM\,}$\,. We must consideration the negative bound states solutions in Eq. (37) because there exist only the negative eigenvalues in the exact pseudospin symmetry [14, 15, 16]. \subsubsection{Spin Case} The spin symmetry appears in the Dirac equation if the condition is satisfied that $\Delta(r)=V_{v}(r)-V_{s}(r)=A(const.)$. In this case, we have from Eq. (3) \begin{eqnarray} \Big\{\frac{d^2}{dr^2}-\frac{\kappa(\kappa+1)}{r^2}-(m_{0}+E-A)(m_{0}-E-\Sigma(r))\Big\} \chi(r)=0\,, \end{eqnarray} where we set the "sum" potential as generalized Morse potential given in Eq. (27), and use approximation for the term proportional to $1/r^2$ in Eq. (29) [45] \begin{eqnarray} \tilde{V}_M(x)=b_{0}(b_{1}+b_{2}e^{-\beta x}+b_{3}e^{-2\beta x})\,, \end{eqnarray} where $b_{0}=\kappa(\kappa+1)/r^2_{0}$\,, and the parameters $b_{i} (i=1, 2, 3)$ are given in Eq. (30). Using the variable $s=e^{-\beta x}$, and inserting Eq. (40) into Eq. (39), we obtain \begin{eqnarray} \Big\{\,\frac{d^2}{ds^2}\,+\,\frac{1}{s}\,\frac{d}{ds}\,&+&\frac{1}{s^2}\Big[ -\delta^2[b_{0}b_{1}+M'(m_{0}-E)]+\delta^2(2DM\,'-b_{0}b_{2}) s\nonumber\\&-&\delta^2(b_{0}b_{3}+DM\,')s^2\Big]\Big\}\chi(s)=0\,. \end{eqnarray} where $M\,'=m_{0}+E-A$. We write the energy eigenvalue equation, and corresponding wave equations in the spin symmetry limit, respectively, \begin{eqnarray} \frac{\delta[2DM\,'-b_{0}b_{2}]}{\sqrt{b_{0}b_{3}+DM\,'\,}} +2\delta\sqrt{b_{0}b_{1}+M\,'(m_{0}-E)\,}=2n+1\,, \end{eqnarray} and \begin{eqnarray} \chi^{m_{1}=m_{2}=0}(s)=s^{w''_{1}}\,e^{-\,w''_{2}s}L^{2w''_{1}}_{n}(2w''_{2}s)\,, \end{eqnarray} where $w\,''_{1}=\delta\sqrt{b_{0}b_{1}+M'(m_{0}-E)\,}$\,, and $w\,''_{2}=\delta\sqrt{b_{0}b_{3}+DM'\,}$\,. We must take into account the positive energy solutions in Eq. (42) in the case of the exact spin symmetry [14, 15, 16].
In Summary, we have approximately solved the effective mass Dirac equation for the generalized Morse potential for arbitrary spin-orbit quantum number $\kappa$ in the position-dependent mass background. We have found the eigenvalue equation, and corresponding two-component spinors in terms of Legendre polynomials by using the parametric NU-method within the framework of an approximation to the term proportional to $1/r^2$\,. We have also obtained the energy eigenvalue equations, and corresponding wave functions for exact pseudospin, and spin symmetry limits in the case of constant mass. We have observed that our analytical results in the case of the pseudospin symmetry are good agreement with the ones obtained in the literature.
\begin{table} \begin{ruledtabular} \caption{Energy eigenvalues for the $CO$ molecule for different values of $\tilde{\ell}$ and $(n,\kappa)$ in the case of position dependent mass.} \begin{tabular}{ccccc} $\tilde{\ell}$ & $n$ & $\kappa$ & state & $E<0$ \\ \hline 1 & 1 & -1 & $1s_{1/2}$ & 6.15913020 \\ 2 & 1 & -2 & $1p_{3/2}$ & 6.52968379 \\ 3 & 1 & -3 & $1d_{5/2}$ & 6.89146288 \\ 4 & 1 & -4 & $1f_{7/2}$ & 7.24974882 \\ \end{tabular} \end{ruledtabular} \end{table}
\end{document}
|
arXiv
|
{
"id": "1003.0269.tex",
"language_detection_score": 0.6683343648910522,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{ Randomisation of Pulse Phases for Unambiguous and Robust Quantum Sensing }
\author{Zhen-Yu Wang$^{1,\dagger}$} \email{E-mail: [email protected]} \author{Jacob E. Lang$^{2}$} \thanks{These authors contributed equally to this work} \author{Simon Schmitt$^{3}$} \thanks{These authors contributed equally to this work} \author{Johannes Lang$^{3}$, \\Jorge Casanova$^{4,5}$, Liam McGuinness$^{3}$, Tania S. Monteiro$^{2}$, Fedor Jelezko$^{3}$} \author{Martin B. Plenio$^{1}$} \affiliation{1. Institut f\"ur Theoretische Physik und IQST, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89069 Ulm, Germany} \affiliation{2. Department of Physics and Astronomy, University College London, Gower Street, London WC1E 6BT, United Kingdom} \affiliation{3. Institute of Quantum Optics, Albert-Einstein-Allee 11, Universit\"at Ulm, D-89069 Ulm, Germany} \affiliation{4. Department of Physical Chemistry, University of the Basque Country UPV/EHU, Apartado 644, 48080 Bilbao, Spain} \affiliation{5. IKERBASQUE, Basque Foundation for Science, Maria Diaz de Haro 3, 48013, Bilbao, Spain}
\begin{abstract} We develop theoretically and demonstrate experimentally a universal dynamical decoupling method for robust quantum sensing with unambiguous signal identification. Our method uses randomisation of control pulses to suppress simultaneously two types of errors in the measured spectra that would otherwise lead to false signal identification. These are spurious responses due to finite-width $\pi$ pulses, as well as signal distortion caused by $\pi$ pulse imperfections. For the cases of nanoscale nuclear spin sensing and AC magnetometry, we benchmark the performance of the protocol with a single nitrogen vacancy centre in diamond against widely used non-randomised pulse sequences. Our method is general and can be combined with existing multipulse quantum sensing sequences to enhance their performance. \end{abstract}
\maketitle
\emph{Introduction.--} The nitrogen-vacancy (NV) centre~\cite{doherty2013} in diamond has demonstrated excellent sensitivity and nanoscale resolution in a range of quantum sensing experiments~\cite{schirhagl2014nitrogen,rondin2014magnetometry,suter2016single,wu2016diamond}. In particular, under dynamical decoupling (DD) control~\cite{souza2012robust} the NV centre can be protected against environmental noise~\cite{ryan2010robust,deLange2010universal,naydenov2011dynamical} while at the same time being made sensitive to an AC magnetic field of a particular frequency~\cite{deLange2011single}. This makes the NV centre a highly promising probe for nanoscale nuclear magnetic resonance (NMR) and magnetic resonance imaging (MRI)~\cite{staudacher2013nuclear,rugar2015proton,deVience2015nanoscale,shi2015single,schmitt2017sub,boss2017quantum,glenn2018high,rosskopf2017quantum,pham2016nmr}. Moreover, NV centers under DD control can be used to detect, identify, and control nearby single nuclear spins~\cite{taminiau2012detection,kolkowitz2012sensing,zhao2012sensing,Muller2014,lang2015dynamical,sasaki2018determination,zopes2018nc,zopes2018prl,pfender2019high} and spin clusters~\cite{zhao2011atomic,shi2014sensing,wang2016positioning,wang2017delayed,abobeih2018one}, for applications in quantum sensing~\cite{degen2017quantum}, quantum information processing~\cite{casanova2016noise,casanova2017arbitrary}, quantum simulations~\cite{cai2013a}, and quantum networks~\cite{humphreys2018deterministic,perlin2019noise}.
Errors in the DD control pulses are unavoidable in experiments and limit performance especially for larger number of pulses. To compensate for detuning and amplitude errors in control pulses, robust DD sequences that include several pulse phases~\cite{gullion1990new,ryan2010robust,casanova2015robust,genov2017arbitrarily} were developed. However, these robust sequences still require good pulse-phase control and, more importantly, they introduce spurious harmonic response \cite{loretz2015spurious} due to the finite length of the control pulses. This spurious response leads to false signal identification, e.g. the misidentification of $^{13}$C nuclei for $^{1}$H nuclei, and hence impact negatively the reliability and reproducibility of quantum sensing experiments. Under special circumstances it is possible to control some of these spurious peaks~\cite{haase2016pulse,lang2017enhanced,shu2017unambiguous}. However, it is highly desirable to design a systematic and reliable method to suppress any spurious response and to improve robustness of all existing DD sequences, such as the routinely used XY family of sequences~\cite{gullion1990new}, the universally robust (UR) sequences~\cite{genov2017arbitrarily}, and other DD sequences leading to enhanced nuclear selectivity~\cite{casanova2015robust,haase2018soft}.
\begin{figure}\label{Fig1}
\end{figure}
In this Letter, we demonstrate that phase randomisation upon repetition of a basic pulse unit of DD sequences is a generic tool that improves their robustness and eliminates spurious response whilst maintaining the desired signal. This is achieved by, firstly, adding a global phase to the applied $\pi$ pulses within one elemental unit and, secondly, randomly changing this phase each time the unit is repeated. Our method is universal, that is, it can be directly incorporated to arbitrary DD sequences and is applicable for any physical realisation of a qubit sensor.
\begin{figure}
\caption{Quantum spectroscopy with DD. (a) Simulated averaged population signal as a function of the DD frequency [$1/(2\tau)$ for pulse spacing $\tau$]. One $^{1}{\rm H}$ spin and one $^{13}{\rm C}$ spin are coupled to the NV centre via the hyperfine-field components~\cite{casanova2015robust,seeSM} $(A_{\perp},A_{\parallel})=2\pi\times(2,1)$ kHz and $2\pi\times(5,50)$ kHz, respectively. The orange dashed line (blue solid line) is the signal obtained by a standard XY8 (randomised XY8) sequence using rectangular $\pi$ pulses with a time duration of $200$ ns and $M=200$. The presence of the $^{13}{\rm C}$ distorts the proton spin signal centred at the proton spin frequency (see the vertical dashed lines indicating the target $^{1}{\rm H}$ and the spurious $^{13}{\rm C}$ resonance frequencies for a magnetic field 450~G). The randomised XY8 sequence significantly reduces the signal distortion due to non-instantaneous control and reveals the real proton signal (see the green dash-dotted line for the signal obtained by a perfect XY8 sequence). (b) As (a) but adding $5\%$ (in terms of the ideal Rabi frequency) of errors in both driving amplitude and frequency detuning to the $\pi$ pulses. (c) and (d) [(e) and (f)] are the same as (a) and (b) but for the YY8~\cite{shu2017unambiguous} [UR8~\cite{genov2017arbitrarily}] sequence. Despite the YY8 sequence - which uses single-axis control to mitigate the spurious peak in the $^{13}{\rm C}$ spectrum when there is no pulse error - the presence of the $^{13}{\rm C}$ still distorts the proton spin signal centred at the proton spin frequency. In all cases, the randomised protocol reduces the signal distortion due to non-instantaneous control and control errors.}
\label{FigH}
\end{figure}
\emph{DD-based quantum sensing.--}
Whilst our method is applicable to any qubit sensor, we illustrate it here with single NV centres. For all experiments in this work a bias magnetic field between 400 and 500 Gauss aligned with the NV-axis splits the degenerate $m_s=\pm 1$ spin states allowing the selective addressing of the $m_s=0 \leftrightarrow m_s=-1$ transition, which represents our sensor qubit with the qubit states $|0(1)\rangle$ [see Fig.~\ref{Fig1}(a) and \cite{seeSM} for details of the experimental set-up].
The sensor qubit and its environmental interaction takes the general form $\hat{H}^\prime(t)=\frac{1}{2}\hat{\sigma}_{z}\hat{E}(t)$. Here $\hat{\sigma}_{z}=|0\rangle\langle 0|-|1\rangle\langle 1|$ is the Pauli operator of the sensor qubit, and $\hat{E}(t)$ is an operator that includes the target signal which oscillates at a particular frequency as well as the presence of noisy environmental fluctuations. In the case of nuclear-spin sensing, $\hat{E}(t)$ contains target and bath nuclear spin operators oscillating at their Larmor frequencies. For AC magnetometry, $\hat{E}(t)$ describe classical oscillating magnetic fields. The aim of quantum sensing is to detect a target such as a single nuclear spin via the control of the quantum sensor with a sequence of DD $\pi$ pulses. The latter often corresponds to a periodic repetition of a basic pulse unit which has a time duration $T$ and a number $N$ of pulses [see Fig.~\ref{Fig1}(b)]. The propagator of a single $\pi$ pulse unit in a general form reads $\hat{U}_{\rm unit}(\{\phi_{j}\}) = \hat{\mathtt{f}}_{N+1}\hat{P}(\phi_N)\hat{\mathtt{f}}_{N}\cdots \hat{P}(\phi_2)\hat{\mathtt{f}}_{2}\hat{P}(\phi_1)\hat{\mathtt{f}}_{1}$, where $\hat{\mathtt{f}}_{j}$ are the free evolutions separated by the control $\pi$ pulses with the propagator $\hat{P}(\phi_j)$. Errors in the control are included in $\hat{P}(\phi_j)$, while the different pulse phases $\phi_j$ are used by robust DD sequences to mitigate the effect of detuning and amplitude errors of the $\pi$ pulses. Using $M$ repetitions of the basic DD unit [see Fig.~\ref{Fig1}(c) for the case of a standard construction] allows for $M$-fold increased signal accumulation time $T_{\rm total}=M T$ which enhances the acquired contrast of the weak signal as $\propto M^2$~\cite{zhao2011atomic} and improves the fundamental frequency resolution to $\sim 1/T_{\rm total}$.
To see how a target signal is sensed, we write the Hamiltonian $H^\prime(t)$ in the interaction picture of the DD control as~\cite{seeSM} \begin{equation}
\hat{H}(t) = \frac{1}{2}F_z(t) \hat{\sigma}_z \hat{E}(t) + \frac{1}{2}[F_{\perp}(t) \hat{\sigma}_{-}+{\rm H.c.}]\hat{E}(t), \label{HInt} \end{equation}
where $\hat{\sigma}_{-}=|0\rangle\langle 1|$. For ideal instantaneous $\pi$ pulses, $F_{\perp}(t)=0$ vanishes [see Fig.~\ref{Fig1} (b) which shows how the $F_{\perp}(t)$ vanishes between the $\pi$ pulses] and the modulation function $F_{z}(t)$ is the stepped modulation function widely used in the literature, that is, $F_{z}(t)=(-1)^{m}$ when $m$ $\pi$-pulses have been applied up to the moment $t$. The role of a DD based quantum sensing sequence is to tailor $F_{z}(t)$ such that it oscillates at the same frequency as the target signal in $\hat{E}(t)$, allowing resonant coherent coupling between the sensor and the target.
\begin{figure}
\caption{Removing spurious response with the phase randomisation protocol. (a) In the measured spectrum of an AC magnetic field sensed by a standard repetition of the XY8 sequence (see orange diamonds), the non-instantaneous $\pi$ pulses produce spurious peaks at the frequencies $2\nu_{0}$ and $4\nu_{0}$. Repeating the XY8 sequence with phase randomisation (see blue bullets) preserves the desired signal centred at $\nu_{0}$ and efficiently suppresses all the spurious peaks. The XY8 unit was repeated $M=25$ times in the upper panel and $M=125$ in the lower panel for a longer sensing time. (b) Detection of proton spins using the XY8 sequence. For the measured spectrum obtained by the standard protocol, the $^{13}{\rm C}$ nuclear spins naturally in diamond produce a strong and wide spurious peak that hinders proton spin detection. Using the randomisation protocol, the spurious $^{13}{\rm C}$ peak has been suppressed, revealing the proton spin signal centred around a frequency of 1.9 MHz.}
\label{FigS}
\end{figure}
In realistic situations, where the $\pi$ pulses are not instantaneous due to limited control power, the function $F_{\perp}(t)$ has a non-zero value during $\pi$ pulse execution and $F_z(t)$ deviates from $\pm 1$~\cite{lang2017enhanced,lang2019non} [see Fig.~\ref{Fig1} (b) for the example of XY8 sequences]. While it is possible to eliminate the effect of deviation in $F_z(t)$ by pulse shaping technique~\cite{casanova2018shaped}, the presence of non-zero $F_{\perp}(t)$ may still alter the expected signal or cause spurious peaks to appear~\cite{loretz2015spurious}. In general, an oscillating component with a frequency $k/T_{\rm total}$ ($k$ being an integer) in $\hat{E}(t)$, not resonant with $F_z(t)$, will create spurious response when the Fourier amplitude~\cite{lang2017enhanced,seeSM} \begin{equation} f^{\perp}_{k}=\frac{1}{T_{\rm total}}\int_{0}^{T_{\rm total}}F_{\perp}(t)\exp(-i 2\pi k t/T_{\rm total})dt \end{equation} of $F_{\perp}(t)$ is non-zero. This spurious response can cause false signal identification, e.g., a wrong conclusion on the detected nuclear species~\cite{loretz2015spurious}, exemplified in Figs.~\ref{FigH} and \ref{FigS}. Suppressing the spurious response from $^{13}\rm{C}$ nuclei is especially critical, as it allows reliable nanoscale NMR or MRI without the use of hard to manufacture and consequently expensive, highly isotopically $^{12}\rm{C}$ purified diamond. However, as shown in Fig.~\ref{FigH} (c),(d), even for a YY8 sequence (designed to remove spurious resonances~\cite{shu2017unambiguous}) the target proton signal is still perturbed by other nuclear species ($^{13}\rm{C}$ in this case). In the presence of amplitude and detuning errors, standard strategies perform even worse.
To remove all spurious peaks, one seeks to design a DD sequence that minimises the effect of $F_{\perp}(t)$ in a robust manner. We observe that by introducing a global phase to all the $\pi$ pulses, the form of $F_{z}(t)$ is unchanged but a phase factor is added to $F_{\perp}(t)$. This motivates the following method to preserve $F_{z}(t)$ and to suppress the effect of $F_{\perp}(t)$ by phase randomisation.
\emph{Phase randomisation.--} In the randomisation protocol, a random global phase $\Phi_{r,m}$ (where the subscript $r$ means a random value) is added to all the pulses within each unit $m$, as shown in Fig.~\ref{Fig1}(d). The propagator of $M$ DD units with independent global phases reads $\hat{U}_{r} = \prod_{m=1}^{M}\hat{U}_{\rm unit}(\{\phi_j+\Phi_{r,m}\})$. If one sets all the random phases $\Phi_{r,m}$ to the same value (e.g. zero) the original DD sequence can be recovered [Fig.~\ref{Fig1}(c)]. Since each of the global phases does not change the internal structure (i.e., the relative phases among $\pi$ pulses) of the basic unit, the robustness of the basic DD sequence is preserved. On the other hand, as we will show in the following, these random global phases prevent control imperfections from accumulating.
\emph{Universal suppression of spurious response.--} The randomisation protocol provides a universal method to suppress spurious response. For the sequence with randomisation, one can find that the Fourier amplitude reads $f^{\perp}_{k}=Z_{r,M} \tilde{f}^{\perp}_{k/M}$, where $\tilde{f}^{\perp}_{k/M}=\frac{1}{T}\int_{0}^{T}F_{\perp}(t)\exp(-i\frac{2\pi k t}{M T})dt$ is the Fourier component defined over a single period $T$~\cite{seeSM}. For random phases $\{\Phi_{r,m}\}$, the factor \begin{equation} Z_{r,M} =\frac{1}{M} \sum_{m=1}^{M} \exp(i \Phi_{r,m}), \label{eq:phaseAverage} \end{equation}
captures the effect of the randomisation protocol. Due to the random values of the phases $\Phi_{r,m}$, $Z_{r,M}$ becomes a (normalised) 2D random walk with $\langle|Z_{r,M}|^2\rangle=1/M$ thus suppressing the contrast of spurious response by a factor of $1/(2M)$ compared with the standard protocol~\cite{seeSM}. Here, we note that one can design a set of specific (i.e. not random) phases $\Phi_{r,m}$ that minimise a certain $f^{\perp}_{k}$ completely. However, this set of phases would be specific to one $k$-value (i.e. it does not suppress all spurious peaks simultaneously). In this respect, the power of our method is that it is simple to implement and fully universal, suppressing all spurious peaks produced by any sequence whilst still retaining the ideal signal, as shown in Fig.~\ref{FigH}.
To experimentally benchmark the performance, we carried out nanoscale detection of a classical AC magnetic field [Fig.~\ref{FigS} (a)] and, separately, the nanoscale NMR detection of an ensemble of proton spins with a natural $^{13}$C abundance ($1.1\%$) diamond [Fig.~\ref{FigS} (b)]. The standard repetition of the XY8 sequence, which was widely used in various sensing and sensing based applications (e.g., see Refs.~\cite{staudacher2013nuclear,rugar2015proton,deVience2015nanoscale,shi2015single,glenn2018high,abobeih2018one,humphreys2018deterministic,rosskopf2017quantum,loretz2015spurious,pham2016nmr}), produces spurious peaks when the duration of $\pi$ pulses is non-zero. In contrast, the randomisation protocol suppresses all the spurious peaks in the spectrum efficiently, and the spurious background noise from a $^{13}{\rm C}$ nuclear spin bath in diamond was removed while the desired proton signal was unaffected, demonstrating a clear and unambiguous proton spin detection without the use of $^{12}$C isotopically pure diamonds.
In the experiments, we have repeated the randomisation protocol with $K=10$ samples of the random phase sequences $\{\Phi_{r,m}\}$ and averaged out the measured signals.
This reduces the fluctuations of the (suppressed) spurious peaks, introduced by the applied random phases, because the variance of $|Z_{r,M}|^2$ (which is $(M-1)/M^3$) is further reduced by a factor of $1/K$~\cite{seeSM}.
Removing the spurious response also improves the accuracy, for example, in measuring the depth of individual NV centres~\cite{pham2016nmr}. By falsely assuming that all the signal around $1.9$ MHz obtained by the standard XY8 sequences originates from hydrogen spins, the computed NV centre depth would be $5.88\pm0.52$~nm, instead of $7.62\pm0.29$~nm obtained by the randomised XY8 - a deviation of about 30~$\%$ [see Fig.~\ref{FigS} (b)].
\begin{figure}
\caption{Experimental enhancement of sequence robustness with the phase randomisation protocol. (a) The fidelity of XY8 sequences as a function of detuning and Rabi frequency errors for randomisation (upper panels) and standard (lower panels) protocols. The control errors are measured in terms of the ideal Rabi frequency $\Omega_{\rm ideal}= 2\pi\times 32.8 $ MHz. The sequences have inter-pulse spacing $200$ ns and $M=25$ XY8 units. (b) The fidelity of XY8 sequences with respect to a static phase error between the X and Y pulses and the inter-pulse time interval $\tau$, for randomised (upper panels) and standard (lower panels) protocols with $M=12$. Resonant microwave $\pi$ pulses are used with a Rabi frequency $\Omega_{\rm ideal}= 2\pi\times 66.6 $ MHz. }
\label{FigR}
\end{figure}
\emph{Enhancement on control robustness.--} As indicated in Fig.~\ref{FigH}, the randomisation protocol also enhances the robustness of the whole DD sequence. For simplicity, in the following discussion we neglect the effect of the environment and concentrate on static control imperfections. The latter introduce errors in the form of non-zero matrix elements
$\langle 0|\hat{U}_{\rm unit}|1\rangle = C \epsilon + O(\epsilon^2)$, where $\epsilon$ is a small parameter and $C$ is a prefactor depending on the explicit form of control (see~\cite{seeSM} for details). For the standard protocol where the same $\hat{U}_{\rm unit}$ block is repeated, the static errors accumulate coherently, yielding $\langle 0|(\hat{U}_{\rm unit})^{M}|1\rangle = MC \epsilon+ O(\epsilon^2)$. The random phases in the randomisation protocol avoids this coherent error accumulation and one can find $\langle 0|\hat{U}_{r}|1\rangle = Z_{r,M} MC \epsilon+ O(\epsilon^2)$, where the error is suppressed by the factor $Z_{r,M}$ which is given by Eq.~(\ref{eq:phaseAverage}) for random phases~\cite{seeSM}. Compared with the suppression of control imperfections by deterministic phases, the randomisation protocol is universal and achieves both suppression of spurious response and enhancement of robustness, without loss of sensitivity to target signals as shown in Figs.~\ref{FigH} and \ref{FigS}.
In Fig.~\ref{FigR} (a), we show the robustness of the widely used XY8 sequence, with respect to amplitude bias and frequency detuning of the microwave pulses, for the randomisation and standard protocols. The simulation and experiment demonstrate robustness improvement after applying phase randomisation. As shown in Fig.~\ref{FigR} (b), the randomisation protocol also suppresses errors in pulse phases. The latter is especially relevant for digital pulsing devices where the signal from a microwave source is split-up and the phase in one arm is shifted by suitable equipments. On top of errors due to the working accuracy of these devices, different cable lengths in both arms can sum up to errors in the relative phase.
\emph{Conclusion.--} We present a randomisation protocol for DD sequences that efficiently and universally suppresses spurious response whilst maintaining the expected signal. This method is simple to implement, only requiring additional random control-pulse phases, and is valid for all DD sequence choices. The protocol functions equally well for quantum and classical signals, allowing clear and unambiguous AC field and nuclear spin detection, e.g., with the widely used XY family of sequences. Furthermore, the protocol also enhances the robustness of the whole pulse sequences.
For sensing experiments with NV centres, the protocol reduces the reliance on hard to manufacture, expensive, highly isotopically purified diamond. The method has a general character being equally applicable to other quantum platforms and other DD applications. For example, it could be used to improve correlation spectroscopy~\cite{laraoui2013,ma2016proposal,wang2017delayed,rosskopf2017quantum} in quantum sensing and fast quantum gates in trapped ions~\cite{arrazola2018arrazola,manovitz2017fast} where DD has been used as an important ingredient.
\emph{Acknowledgements.--} M.~B.~P. and Z.-Y.~W. acknowledge support by the ERC Synergy grant BioQ (Grant No. 319130), the EU project HYPERDIAMOND and AsteriQs, the QuantERA project NanoSpin, the BMBF project DiaPol, the state of Baden-W{\"u}rttemberg through bwHPC, and the German Research Foundation (DFG) through Grant No. INST 40/467-1 FUGG. J.~E.~L. is funded by an EPSRC Doctoral Prize Fellowship. F.~J., S.~S., L.~M., and J.~L. acknowledge support of Q-Magine of the QUANTERA, DFG (FOR 1493, SPP 1923, JE 290/18-1 and SFB 1279), BMBF (13N14438, 16KIS0832, and 13N14810), ERC (BioQ 319130), VW Stiftung and Landesstiftung BW. J.~C. acknowledges financial support from Juan de la Cierva Grant No. IJCI-2016-29681.
\begin{thebibliography}{99} \bibitem{doherty2013} M. W. Doherty, N. B. Manson, P. Delaney, F. Jelezko, J. Wrachtrup, and L. C. L. Hollenberg, The nitrogen-vacancy colour centre in diamond, {\em Phys. Rep.} \textbf{528}, 1 (2013).
\bibitem{schirhagl2014nitrogen} R. Schirhagl, K. Chang , M. Loretz and C. L. Degen, Nitrogen-Vacancy Centers in Diamond: Nanoscale Sensors for Physics and Biology, {\em Annu. Rev. Phys. Chem.} \textbf{65}, 83 (2014).
\bibitem{rondin2014magnetometry} L. Rondin, J. P. Tetienne, T. Hingant, J. F. Roch, P. Maletinsky and V. Jacques, Magnetometry with nitrogen-vacancy defects in diamond, {\em Rep. Prog. Phys.} \textbf{77}, 056503 (2014).
\bibitem{suter2016single} D. Suter and F. Jelezko, Single-spin magnetic resonance in the nitrogen-vacancy center of diamond, {\em Prog. Nucl. Magn. Reson. Spectrosc.} \textbf{98-99}, 50 (2017).
\bibitem{wu2016diamond} Y. Wu, F. Jelezko, M. B. Plenio and T. Weil, Diamond Quantum Devices in Biology, {\em Angew. Chem. Int. Ed.} \textbf{55}, 6586 (2016).
\bibitem{souza2012robust} A. M. Souza, G. A. {\'A}lvarez, and D. Suter, Robust dynamical coupling. {\em Phil. Trans. R. Soc. A} \textbf{370}, 4748 (2012).
\bibitem{ryan2010robust} C. A. Ryan, J. S. Hodges, and D. G. Cory, Robust Decoupling Techniques to Extend Quantum Coherence in Diamond, {\em Phys. Rev. Lett.} \textbf{105}, 200402 (2010).
\bibitem{deLange2010universal} G. de Lange, Z. Wang, D. Riste, V. Dobrovitski, and R. Hanson, Universal Dynamical Decoupling of a Single Solid-State Spin from a Spin Bath, {\em Science} \textbf{330}, 60 (2010).
\bibitem{naydenov2011dynamical} B. Naydenov, F. Dolde, L. T. Hall, C. Shin, H. Fedder, L. C. L. Hollenberg, F. Jelezko, and J. Wrachtrup, Dynamical Decoupling of a Single-Electron Spin at Room Temperature, Phys. Rev. B 83, 081201 (2011).
\bibitem{deLange2011single} G. De Lange, D. Riste, V. V. Dobrovitski, and R. Hanson, Single-Spin Magnetometry with Multipulse Sensing Sequences, Phys. Rev. Lett. 106, 080802 (2011).
\bibitem{staudacher2013nuclear} T. Staudacher, F. Shi, S. Pezzagna, J. Meijer, J. Du, C. A. Meriles, F. Reinhard, J. Wrachtrupp, Nuclear Magnetic Resonance Spectroscopy on a (5-Nanometer)$^3$ Sample Volume, {\em Science} \textbf{339}, 561 (2013).
\bibitem{rugar2015proton} D. Rugar, H. J. Mamin, M. H. Sherwood, M. Kim, C. T. Rettner, K. Ohno, and D. D. Awschalom, Proton magnetic resonance imaging using a nitrogen–vacancy spin sensor, {\em Nat. Nanotechnol.} \textbf{10}, 120 (2015).
\bibitem{deVience2015nanoscale}S. J. DeVience, L. M. Pham, I. Lovchinsky, A. O. Sushkov, N. Bar-Gill, C. Belthangady, F. Casola, M. Corbett, H. Zhang, M. Lukin, H. Park, A. Yacoby, and R. L. Walsworth, Nanoscale NMR spectroscopy and imaging of multiple nuclear species, {\em Nat. Nanotechnol.} \textbf{10}, 129 (2015).
\bibitem{shi2015single}F. Shi, Q. Zhang, P. Wang, H. Sun, J. Wang, X. Rong, M. Chen, C. Ju, F. Reinhard, H. Chen, J. Wrachtrup, J. Wang, and J. Du, Single-protein spin resonance spectroscopy under ambient conditions, {\em Science} \textbf{347}, 1135 (2015).
\bibitem{schmitt2017sub} S. Schmitt, T. Gefen, F. M. Stürner, T. Unden, G. Wolff, C. Müller, J. Scheuer, B. Naydenov, M. Markham, S. Pezzagna, J. Meijer, I. Schwarz, M. Plenio, A. Retzker, L. P. McGuinness, and F. Jelezko, Submillihertz magnetic spectroscopy performed with a nanoscale quantum sensor, {\em Science} \textbf{356}, 832 (2017).
\bibitem{boss2017quantum} J. M. Boss, K. S. Cujia, J. Zopes, and C. L. Degen, Quantum sensing with arbitrary frequency resolution, {\em Science} \textbf{356}, 837 (2017).
\bibitem{glenn2018high} D. R. Glenn, D. B. Bucher, J. Lee, M. D. Lukin, H. Park, and R. L. Walsworth, High-Resolution Magnetic Resonance Spectroscopy Using a Solid-State Spin Sensor, {\em Nature (London)} \textbf{555}, 351 (2018).
\bibitem{rosskopf2017quantum} T.Rosskopf, J. Zopes, J. M. Boss, and C. L. Degen, A quantum spectrum analyzer enhanced by a nuclear spin memory, {\em npj Quantum Inf.} \textbf{3}, 33 (2017).
\bibitem{pham2016nmr} L. M. Pham, S. J. DeVience, F. Casola, I. Lovchinsky, A. O. Sushkov, E. Bersin, J. Lee, E. Urbach, P. Cappellaro, H. Park, A. Yacoby, M. Lukin, and R. L. Walsworth, NMR technique for determining the depth of shallow nitrogen-vacancy centers in diamond, {\em Phys. Rev. B} \textbf{93}, 045425 (2016).
\bibitem{taminiau2012detection} T. H. Taminiau, J. J. T. Wagenaar, T. Van der Sar, F. Jelezko, V. V. Dobrovitski and R. Hanson, Detection and Control of Individual Nuclear Spins Using a Weakly Coupled Electron Spin, {\em Phys. Rev. Lett.} \textbf{109}, 137602 (2012).
\bibitem{kolkowitz2012sensing} S. Kolkowitz, Q. P. Unterreithmeier, S. D. Bennett and M. D. Lukin, Sensing Distant Nuclear Spins with a Single Electron Spin, {\em Phys. Rev. Lett.} \textbf{109}, 137601 (2012).
\bibitem{zhao2012sensing} N. Zhao, J. Honert, B. Schmid, M. Klas, J. Isoya, M. Markham, D. Twitchen, F. Jelezko, R. B. Liu, H. Fedder and J. Wrachtrup, Sensing single remote nuclear spins, {\em Nat. Nanotechnol.} \textbf{7}, 657 (2012).
\bibitem{Muller2014} C. M{\"u}ller, X. Kong, J.-M. Cai, K. Melentijevic, A. Stacey, M. Markham, J. Isoya, S. Pezzagna, J. Meijer, J. Du, M.B. Plenio, B. Naydenov, L.P. McGuinness and F. Jelezko. {\em Nuclear magnetic resonance spectroscopy with single spin sensitivity.} Nature Comm. {\bf 5}, 4703 (2014).
\bibitem{lang2015dynamical} J. E. Lang, R. B. Liu, and T. S. Monteiro, Dynamical-Decoupling-Based Quantum Sensing: Floquet Spectroscopy, {\em Phys. Rev. X} \textbf{5}, 041016 (2015).
\bibitem{sasaki2018determination} Kento Sasaki, Kohei M. Itoh, and Eisuke Abe, Determination of the position of a single nuclear spin from free nuclear precessions detected by a solid-state quantum sensor, {\em Phys. Rev. B} \textbf{98}, 121405(R) (2018).
\bibitem{zopes2018nc} J. Zopes, K. S. Cujia, K. Sasaki, J. M. Boss, K. M. Itoh, and C. L. Degen, Three-dimensional localization spectroscopy of individual nuclear spins with sub-Angstrom resolution, {\em Nat. Commun.} \textbf{9}, 4678 (2018).
\bibitem{zopes2018prl} J. Zopes, K. Herb, K. S. Cujia, and C. L. Degen, Three-Dimensional Nuclear Spin Positioning Using Coherent Radio-Frequency Control, {\em Phys. Rev. Lett.} \textbf{121}, 170801 (2018).
\bibitem{pfender2019high} M. Pfender, P. Wang, H. Sumiya, S. Onoda, W. Yang, D. B. R. Dasari, P. Neumann, X.-Y. Pan, J. Isoya, R.-B. Liu, J. Wrachtrup, High-resolution spectroscopy of single nuclear spins via sequential weak measurements, {\em Nat. Commun.} \textbf{10}, 594 (2019).
\bibitem{zhao2011atomic} N. Zhao, J-L. Hu, S-W. Ho, J. T. K. Wan and R. B. Liu R B, Atomic-scale magnetometry of distant nuclear spin clusters via nitrogen-vacancy spin in diamond, {\em Nat. Nanotechnol.} \textbf{6}, 242 (2011).
\bibitem{shi2014sensing} F. Shi, X. Kong, P. Wang P, F. Kong, N. Zhao, R. B. Liu and J. Du, Sensing and atomic-scale structure analysis of single nuclear-spin clusters in diamond, {\em Nature Phys.} \textbf{10}, 21 (2014).
\bibitem{wang2016positioning} Z-Y. Wang, J. F. Haase, J. Casanova and M. B. Plenio, Positioning nuclear spins in interacting clusters for quantum technologies and bioimaging, {\em Phys. Rev. B} \textbf{93}, 174104 (2016).
\bibitem{wang2017delayed} Z.-Y. Wang, J. Casanova, and M. B. Plenio, Delayed entanglement echo for individual control of a large number of nuclear spins. {\em Nat. Commun.} \textbf{8}, 14660 (2017).
\bibitem{abobeih2018one} M. H. Abobeih, J. Cramer, M. A. Bakker, N. Kalb, M. Markham, D. J. Twitchen, and T. H. Taminiau, One-second coherence for a single electron spin coupled to a multi-qubit nuclear-spin environment, {\em Nat. Commun.} \textbf{9}, 2552 (2018).
\bibitem{degen2017quantum} C. L. Degen, F. Reinhard, and P. Cappellaro, Quantum sensing, {\em Rev. Mod. Phys.} \textbf{89}, 035002 (2017).
\bibitem{casanova2016noise} J. Casanova, Z.-Y. Wang, and M. B. Plenio, Noise-Resilient Quantum Computing with a Nitrogen-Vacancy Center and Nuclear Spins, {\em Phys. Rev. Lett.} \textbf{117}, 130502 (2016).
\bibitem{casanova2017arbitrary} J. Casanova, Z.-Y. Wang, and M. B. Plenio, Arbitrary nuclear-spin gates in diamond mediated by a nitrogen-vacancy-center electron spin. {\em Phys. Rev. A} \textbf{96}, 032314 (2017).
\bibitem{cai2013a} J. M. Cai, A. Retzker, F. Jelezko, and M. B. Plenio, A large-scale quantum simulator on a diamond surface at room temperature, {\em Nat. Phys.} \textbf{9}, 168 (2013).
\bibitem{humphreys2018deterministic} P. C. Humphreys, N. Kalb, J. P. J. Morits, R. N. Schouten, R. F. L. Vermeulen, D.l J. Twitchen, M. Markham, and R. Hanson, Deterministic delivery of remote entanglement on a quantum network, {\em Nature (London)} \textbf{558}, 268 (2018).
\bibitem{perlin2019noise} M. A. Perlin, Z.-Y. Wang, J. Casanova, and M. B. Plenio, Noise-resilient architecture of a hybrid electron-nuclear quantum register in diamond, {\em Quantum Sci. Technol.} \textbf{4}, 015007 (2019).
\bibitem{gullion1990new} T. Gullion, D. B. Barker and M. S. Conradi, New, compensated Carr-Purcell sequences, {\em J. Magn. Reson.} \textbf{89}, 479 (1990).
\bibitem{casanova2015robust} J. Casanova, Z-Y. Wang, J. F. Haase, and M. B. Plenio, Robust dynamical decoupling sequences for individual-nuclear-spin addressing, {\em Phys. Rev. A} \textbf{92}, 042304 (2015).
\bibitem{genov2017arbitrarily} G. T. Genov, D. Schraft, N. V. Vitanov, and T. Halfmann, Arbitrarily Accurate Pulse Sequences for Robust Dynamical Decoupling, {\em Phys. Rev. Lett.} \textbf{118}, 133202 (2017).
\bibitem{loretz2015spurious} M. Loretz, J. M. Boss, T. Rosskopf, H. J. Mamin, D. Rugar and C. L. Degen, Spurious Harmonic Response of Multipulse Quantum Sensing Sequences, {\em Phys. Rev. X } \textbf{5}, 021009 (2015).
\bibitem{haase2016pulse} J. F. Haase, Z-Y. Wang, J. Casanova and M. B. Plenio, Pulse-phase control for spectral disambiguation in quantum sensing protocols, {\em Phys. Rev. A} \textbf{94}, 032322 (2016).
\bibitem{lang2017enhanced} J. E. Lang, J. Casanova, Z.-Y. Wang, M. B. Plenio, T. S. Monteiro, Enhanced Resolution in Nanoscale NMR via Quantum Sensing with Pulses of Finite Duration, {\em Phys. Rev. Applied} \textbf{7}, 054009 (2017).
\bibitem{shu2017unambiguous} Z. Shu, Z. Zhang, Q. Cao, P. Yang, M. B. Plenio, C. M{\"u}ller, J. Lang, N. Tomek, B. Naydenov, L. P. McGuinness, F. Jelezko, and J. Cai, Unambiguous nuclear spin detection using an engineered quantum sensing sequence, {\em Phys. Rev. A} \textbf{96}, 051402(R) (2017).
\bibitem{haase2018soft} J. F. Haase, Z.-Y. Wang, J. Casanova, M. B. Plenio, Soft Quantum Control for Highly Selective Interactions among Joint Quantum Systems, {\em Phys. Rev. Lett.} \textbf{121}, 050402 (2018).
\bibitem{seeSM} See Supplemental Material.
\bibitem{lang2019non} J. E. Lang, T. Madhavan, J.-P. Tetienne, D. A. Broadway, L. T. Hall, T. Teraji, T. S. Monteiro, A. Stacey, and L. C. L. Hollenberg, Nonvanishing effect of detuning errors in dynamical-decoupling-based quantum sensing experiments, {\em Phys. Rev. A} \textbf{99}, 012110 (2019).
\bibitem{casanova2018shaped} J. Casanova, Z.-Y. Wang, I. Schwartz, M. B. Plenio, Shaped Pulses for Energy-Efficient High-Field NMR at the Nanoscale, {\em Phys. Rev. Applied} \textbf{10}, 044072 (2018).
\bibitem{laraoui2013} A. Laraoui, F. Dolde, C. Burk, F. Reinhard, J. Wrachtrup, and C. A. Meriles, High-resolution correlation spectroscopy of $^{13}{\rm C}$ spins near a nitrogen-vacancy centre in diamond, {\em Nat. Commun.} \textbf{4}, 1651 (2013).
\bibitem{ma2016proposal} W.-L. Ma and R.-B. Liu, Proposal for Quantum Sensing Based on Two-Dimensional Dynamical Decoupling: NMR Correlation Spectroscopy of Single Molecules, {\em Phys. Rev. Applied} \textbf{6}, 054012 (2016).
\bibitem{arrazola2018arrazola} I. Arrazola, J. Casanova, J. S. Pedernales, Z.-Y. Wang, E. Solano, M. B. Plenio, Pulsed dynamical decoupling for fast and robust two-qubit gates on trapped ions. {\em Phys. Rev. A} \textbf{97}, 052312 (2018).
\bibitem{manovitz2017fast} T. Manovitz, A. Rotem, R. Shaniv, I. Cohen, Y. Shapira, N. Akerman, A. Retzker, and R. Ozeri, Fast Dynamical Decoupling of the M{\o}lmer-S{\o}rensen Entangling Gate, {\em Phys. Rev. Lett.} \textbf{119}, 220505 (2017).
\end{thebibliography}
\pagebreak{}
{}
\begin{center} \textbf{\large{{Supplemental Material}}}{\large{ }} \par\end{center}{\large \par}
\setcounter{equation}{0} \setcounter{figure}{0} \setcounter{table}{0} \setcounter{page}{1} \makeatletter \global\long\defS\arabic{equation}{S\arabic{equation}}
\global\long\defS\arabic{figure}{S\arabic{figure}}
\global\long\def\bibnumfmt#1{[S#1]}
\global\long\def\citenumfont#1{S#1}
\section{Experimental methods}
\subsection{Diamonds}
All experiments were performed on single NV centres. For the nanoscale NMR experiments [Fig. 3(b) of the main text] a $^{13}$C natural abundance diamond was implanted with $^{15}N$ ions using an energy of 1.5\,keV and a dose of $2 \times 10^9\mathrm{\frac{^{15}N^+}{cm^2}}$. Subsequent annealing in vacuum at 1000$^\circ$C for 3 hours created shallow single NV centres with depths around $5\pm 1$ \,nm. For the experiments measuring the classical AC fields [Fig. 3 (a)] we used a different diamond, which was polished into a solid immersion lens. In order to create NV centres in this diamond, the flat surface was overgrown with an about 100\,nm thick layer of isotopically enriched $^{12}$C (99.999$\%$) using the plasma enhanced chemical vapor deposition method, with parameters as in \cite{sm:Osterkamp}. The same diamond was used for the experiments showing the improved robustness of the randomisation protocol (Fig. 4). The experiments presented in \ref{add_exp} were measured with an about 4\,$\mu$m deep NV in a flat diamond with 0.1\% $^{13}$C content. Before experiments, all diamonds were boiled in a 1:1:1 tri-acid mixture (H$_2$SO$_4$:HNO$_3$:HClO$_4$) for 4 hours at 130$^{\circ}$C.
\subsection{Setup}
Using a home-built confocal setup, read-out and initialisation (into the $|0\rangle$ spin state) of the NV center was performed using a 532\,nm laser. The laser beam was chopped using an acousto optical modulator into pulses of 3\,$\mu$s duration. The spin-dependent fluorescence from the NV spin states was detected using an avalanche photodiode. The first 500\,ns of the every laser pulse yield the spin population while the fluorescence between 1.5\,$\mu$s and 2.5\,$\mu$s was used to normalise the data.
Magnetic bias fields between 400\,G and 500\,G were used to lift the degeneracy of the $|-1\rangle, |+1\rangle$ spin states and create an effective qubit.
Microwave pulses resonant with the NV centre spin were applied using a 20\,$\mu$m diameter copper wire placed on the diamond surface as an antenna. The pulses were generated with an Arbitrary Waveform Generator (Tektronix AWG70001A, sampling rate 50GSamples/s) and amplified to give Rabi frequencies between 5-70\,MHz. The same wire was used to apply classical radio-frequency fields generated by a Gigatronics 2520B signal generators. For the classical AC field detection, background magnetic noise at the frequencies detected was determined to be at least 100 fold weaker than the measured signals.
\subsection{Measurement protocol}
All experiments were performed using the QuDi software suite \cite{sm:qudi}. For the randomised protocols the standard versions were modified by adding a random global phase to all $\pi$ pulses in a basic unit, as described in the main text. These phases were generated using the Python package 'random' with a uniform distribution between 0 and 2$\pi$.
Before applying the dynamical decoupling protocols, the spin of the NV centre is initialized in a coherent superposition ($\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle$)). Therefore, additionally to the laser pulse, a $\pi$/2 pulse is applied. Also, before the readout the acquired sensor phase is mapped into a population difference by an additional $\pi$/2 pulse. For our experiments showing the improved robustness we intentionally introduce pulse errors to the $\pi$ pulses. Those errors were calibrated in terms of the real Rabi frequency. Thereby, the two $\pi$/2 pulses were always applied error-free. Every experiment was repeated several times under identical experimental conditions, but with different sets of phases, and the resulting data was averaged.
\section{Hamiltonian under dynamical decoupling control }
As stated in the main text, the Hamiltonian without dynamical decoupling (DD) control has the general form \begin{equation} \hat{H}^{\prime}(t)=\frac{1}{2}\hat{\sigma}_{z}\hat{E}(t), \end{equation}
where $\hat{\sigma}_{z}=|0\rangle\langle0|-|1\rangle\langle1|$ is the Pauli operator of the sensor. The environment operator $\hat{E}(t)$ includes both the target signal to be sensed and environmental noise. For the relevant case of nuclear spin sensing, $\hat{E}(t)=\frac{1}{2}\sum_{n}\left[\left(A_{n}^{\perp}\hat{I}_{n}^{+}e^{-i\omega_{n}t}+{\rm h.c.}\right)+A_{n}^{\parallel}\hat{I}_{n}^{z}\right],$where $\hat{I}_{n}^{\alpha}$ ($\alpha=x,y,z$) are spin operators for the $n$th nuclear spin. $A_{n}^{\perp}$ and $A_{n}^{\parallel}$ are components of hyperfine field at the position of the nuclear spin. The nuclear spin precession frequency $\omega_{n}$ is the Larmor frequency of the nuclear spin shifted by the hyperfine field at the location of the nuclear spin. For the case of a classical AC field, $\hat{E}(t)$ takes the form $\sum_{n}b_{n}\cos(\omega_{n}t+\phi_{n})$.
A sequence of applied microwave pulses yields the control Hamiltonian \begin{equation} \hat{H}_{{\rm ctrl}}(t)=\frac{1}{2}\Omega(t)\left[\hat{\sigma}_{x}\cos\phi(t)+\hat{\sigma}_{y}\cos\phi(t)\right]. \end{equation} In the rotating frame with respect to the control $\hat{H}_{{\rm ctrl}}(t)$, the Hamiltonian $\hat{H}^{\prime}(t)$ becomes \begin{equation} \hat{H}(t)=\frac{1}{2}\hat{\sigma}(t)\hat{E}(t), \end{equation} where $\hat{\sigma}(t)$ is $\hat{\sigma}_{z}$ in the Heisenberg picture with respect to $\hat{H}_{{\rm ctrl}}(t)$. In the following, we derive $\hat{\sigma}(t)$ and hence Eq. (2) in the main text.
If a $\pi$ pulse is applied at time $t_j$, the evolution driven by $\hat{H}_{{\rm ctrl}}(t)$ reads $\hat{P}_{j}(\theta) = \exp\left[-i\frac{1}{2} \theta \left( \hat{\sigma}_{x} \cos\phi_{j} + \hat{\sigma}_{y} \cos\phi_{j} \right) \right]$, where $\theta=\theta(t)\in[0,\pi]$ is the angle of rotation and $\phi(t_j)=\phi_j$. Defining $\hat{P}_{j}(\pi)\equiv \hat{P}_{j}$ as the propagator of a single $\pi$ pulse, the propagator for $2n+1$ ($j=0,1,\ldots$) pulses \begin{align} \hat{U}_{2n+1} & =\hat{P}_{2n+1}\cdots\hat{P}_{2}\hat{P}_{1}\\
& =(-1)^{n+1}e^{i\frac{\pi}{2}}\exp\left(-i\varphi_{2n+1}\right)|0\rangle\langle1|+{\rm h.c}., \end{align} and for $2n$ pulses \begin{align} \hat{U}_{2n} & =\hat{P}_{2j}\cdots\hat{P}_{2}\hat{P}_{1}\\
& =(-1)^{n}\exp\left(i\varphi_{2n}\right)|0\rangle\langle0|+(-1)^{n}\exp\left(-i\varphi_{2n}\right)|1\rangle\langle1|, \end{align} where $\varphi_{2n+1}=-\sum_{l=1}^{2n+1}(-1)^{l}\phi_{l}$ and $\varphi_{2n}=-\sum_{l=1}^{2j}(-1)^{l}\phi_{l}$. Using $\hat{U}_{2n+1}$ and $\hat{U}_{2n}$, we find $\hat{\sigma}_{z}$ in the rotating frame of the control during the $j$th pulse \begin{align} \hat{\sigma}(t) & =[\hat{P}_{j}(\theta)\hat{U}_{j-1}]^{\dagger}\hat{\sigma}_{z}[\hat{P}_{j}(\theta)\hat{U}_{j-1}]\\
& =F_{z}(t)\hat{\sigma}_{z}+\left[F_{\perp}(t)|1\rangle\langle0|+{\rm h.c.}\right], \end{align} where the modulation functions are \begin{equation} F_{z}(t)=(-1)^{j-1}\cos\theta\label{sm:eq:Fz} \end{equation} \begin{equation} F_{\perp}(t)=i(-1)^{j-1}\exp\left\{-i[2\sum_{l=1}^{j-1}(-1)^{l}\phi_{l}+(-1)^{j}\phi_{j}]\right\}\sin\theta.\label{sm:eq:Fx} \end{equation} Because $\theta=\theta(t)$ in Eqs. (\ref{sm:eq:Fz}) and (\ref{sm:eq:Fx}) is the pulse area that the $j$th pulse has rotated at the moment $t$, for instantaneous pulses $F_{\perp}(t)$ has no effect (because $\sin\theta=0$ for all time $t$) and $F_{\perp}(t) \in \{\pm 1\}$. For the realistic case that the pulses are not instantaneous, $F_{\perp}(t)$ is non-zero during the $\pi$ pulses.
\section{Fourier amplitudes of the modulation functions} For DD sequences that are $M$ periodic repetitions of a basic pulse unit with period $T$ and $F_{\alpha}(t+T)=F_{\alpha}(t)$ ($\alpha=z,\perp$), the $k$th Fourier amplitude of the modulation functions (over the total sequence time $T_{{\rm total}}=MT$) is \begin{align} f^{\alpha}_{k} & \equiv\frac{1}{MT}\int_{0}^{MT}F_{\alpha}(t)\exp\left(-i\frac{2\pi kt}{MT}\right)dt\\
& =\frac{1}{MT}\sum_{m=1}^{M}\int_{(m-1)T}^{mT}F_{\alpha}(t)\exp\left(-i\frac{2\pi kt}{MT}\right)dt\\
& =c_{k,M}\tilde{f}^{\alpha}_{k/M} \end{align} where \begin{equation} \tilde{f}^{\alpha}_{k/M} \equiv \frac{1}{T}\int_{0}^{T}F_{\alpha}(t)\exp\left(-i\frac{2\pi k t}{M T}\right) dt, \end{equation} and $c_{k,M}=\frac{1}{M}\sum_{m=1}^{M}\exp\left(-i\frac{2\pi k(m-1)}{M}\right)$. When $k/M$ is not an integer $Mc_{k,M}$ is a sum over roots of unity so it cancels to zero. When $k/M$ is an integer however the sum gives $c_{k,M}=1$. Therefore for standard repetitions of a basic pulse unit, we obtain (for $k=1,2,\ldots$) \begin{equation} f^{\alpha}_{k}=\begin{cases} \tilde{f}^{\alpha}_{k/M} & {\rm if}\ k/M\in\mathbb{Z},\\ 0 & {\rm otherwise}. \end{cases} \end{equation}
Under the randomisation protocol a random phase is added to all pulses in the $m$-th repetition of the basic unit, so a set of $M$ random phases is generated, $\{\Phi_{r,m}|m=1,\ldots,M\}$. This transformation does not affect $F_{z}(t)$ but alters $F_{\perp}(t)\rightarrow F_{\perp}(t)e^{i\Phi_{r,m}}$ for the $m$-th unit of the sequence. The Fourier amplitudes $f^{z}_{k}$ are thus unaffected but we have \begin{align} f^{\perp}_{k} & =\frac{1}{MT}\int_{0}^{MT}F_{\perp}(t)\exp\left(-i\frac{2\pi kt}{MT}\right)dt\\
& =\frac{1}{MT}\sum_{m=1}^{M}\int_{(m-1)T}^{mT}F_{\perp}(t)e^{-i\Phi_{r,m}}\exp\left(-i\frac{2\pi kt}{MT}\right)dt\\
& =Z_{r,M}\tilde{f}^{\perp}_{k/M}, \end{align} where $Z_{r,M}=\frac{1}{M}\sum_{m=1}^{M}e^{i[\Phi_{r,m}-2\pi k(m-1)/M]}$. Because $\Phi_{r,m}$ is chosen randomly, $\Phi_{r,m}-2\pi k(n-1)/M$ is also random and we can write \begin{equation} Z_{r,M}=\frac{1}{M}\sum_{m=1}^{M}\exp(i\Phi_{r,m}), \end{equation}
which is Eq. (5) in the main text. Here $Z_{r,M}$ is a sum of random complex phases and represents a 2D random walk. It can be shown that $|Z_{r,M}|^2$ has the average $\langle|Z_{r,M}|^2\rangle=1/M$ and the variance $\langle(|Z_{r,M}|^2- \langle|Z_{r,M}|^2\rangle)^2\rangle =(M-1)/M^3$. For example, the average can be obtained as follows. By definition, \begin{align}
|Z_{r,M}|^2 & = \frac{1}{M^2}\sum_{m,n=1}^{M}\exp[i(\Phi_{r,m}-\Phi_{r,n})] \\ & = \frac{1}{M^2} \left[M + \sum_{m\neq n}^{M}e^{i(\Phi_{r,m}-\Phi_{r,n})}\right]. \end{align}
Therefore, $\langle|Z_{r,M}|^2\rangle=1/M$ because the average of independent random phases is zero. Similarly, one obtains the variance of $|Z_{r,M}|^2$.
Consider the signal of a single nuclear spin. The population signal of expected resonances is given by $P=\cos^2(\frac{1}{2} |f^{z}_{k}|A_{\perp}MT)$, where $A_\perp$ is the perpendicular coupling strength to a single spin-half~\cite{sm:lang2017enhanced}. When the signals are weak this can be approximated by $P=1- (\frac{1}{2}|f^{z}_{k}|A_{\perp}MT)^2$ thus the signal contrast is proportional to $M^2$. This is unaffected by the addition of the random phase as $F_z(t)$ is insensitive to the pulse phases.
The spurious signal of a nuclear spin is given by $P =1-\sin^2(\frac{1}{2}A_{\perp}|f^{\perp}_{k}|MT)\cos^2(\phi^{\perp}_{k})$, where $\phi^{\perp}_{k}$ is the complex phase of $f^{\perp}_{k}$~\cite{sm:lang2017enhanced}. For the standard protocol, we have $P= 1-\sin^2(\frac{1}{2}A_{\perp}|\tilde{f}^{\perp}_{k/M}|MT)\cos^2(\phi^{\perp}_{k})$. When the signal is weak this can be approximated by $P \approx 1-(\frac{1}{2}A_{\perp}|\tilde{f}^{\perp}_{k/M}|MT)^2\cos^2(\phi^{\perp}_{k})$ so when no random phase is added the spurious signal contrast is proportional to $M^2$.
When the random phase is added the expected value of the signal contrast is given by $P \approx 1 - \frac{1}{8}M(T A_{\perp}|\tilde{f}^{\perp}_{k/M}|)^2$ (using $\langle |f^{\perp}_{k}|^2 \rangle = \langle |Z_{r,M}\tilde{f}^{\perp}_{k/M}|^2 \rangle = |\tilde{f}^{\perp}_{k/M}|^2/M$ and $\langle\cos^2(\phi_\perp^k)\rangle = 1/2$). Compared with standard repetitions of a basic pulse unit, this contrast only grows proportional to $M/2$ thus providing a significant suppression of spurious signals completely independent of the used pulse sequence. As shown above, the variance of the spurious signal due to random phases is determined by the variance of $|Z_{r,M}|^2$ (which is $(M-1)/M^3$). When one repeats the randomisation protocol with $K$ realizations of the random phase sequences $\{\Phi_{r,m}\}$ and average out the measure signals, the variance is further reduced by a factor of $1/K$ according to the central limit theorem.
\section{Enhancing sequence robustness} For simplicity, in the following discussion we neglect the effect of the environment and concentrate on static control imperfections.
\subsection{Evolution operator of a basic pulse unit}
The evolution driven by a single $\pi$ pulse with control errors takes the general form \begin{equation} \hat{U}_{\pi}(\phi)=\left(\begin{array}{cc} e^{-i\alpha}\sin\epsilon & ie^{-i(\beta+\phi)}\cos\epsilon\\ ie^{i(\beta+\phi)}\cos\epsilon & e^{i\alpha}\sin\epsilon \end{array}\right). \end{equation} We assume that each pulse has the same static errors, that is, $\alpha$, $\beta$, $\epsilon$ are the same for all pulses. The pulse phase $\phi$ determined by the initial phase of the driving field is a controllable parameter. When $\epsilon = 0$ and $\beta=0$, $\hat{U}_{\pi}(\phi)$ describes a perfect $\pi$ pulse.
Consider a basic unit with $N$ $\pi$ pulses applied at $t_{j}$ $(j=1,\ldots,N)$ with phases $\phi_{j}$. For simplicity, we use the transformation $t_{j+1}-t_{j}=\tau_{j}+\tau_{j+1}$ with $\tau_{0}\equiv0$. This transformation splits $t_{j+1}-t_{j}$ into two parts where $\tau_{j}$ ($\tau_{j+1}$) is associate with the $j$th ($(j+1)$th) pulses. From the definition, we have \begin{align} \tau_{N+1} & =(t_{N+1}-t_{N})-\tau_{N}\\
& =(-1)^{N}\sum_{j=0}^{N}(-1)^{j}(t_{j+1}-t_{j}). \end{align} by recursively using $\tau_{j+1}=(t_{j+1}-t_{j})-\tau_{j}$. Because a a basic DD unit is designed to eliminate static dephasing noise, the timing of the sequence satisfy $\sum_{j=0}^{N}(-1)^{j}(t_{j+1}-t_{j})=0$. In other words, $\tau_{N+1}=0$ for a basic pulse unit.
With $\tau_{0}=\tau_{N+1}=0$ and that a detuning $\Delta$ of the control field introduces a control phase error $\Delta(t_{j+1}-t_{j})=\Delta(\tau_{j+1}+\tau_{j})$ during the times $t_{j}$ and $t_{j+1}$, the propagator of a basic pulse unit can be written as \begin{equation} \hat{U}_{{\rm unit}}=\hat{U}_{N}\hat{U}_{N-1}\cdots \hat{U}_{2}\hat{U}_{1}, \end{equation} by combining the contribution of a $\pi$ pulse and the free evolution we obtain \begin{align} \hat{U}_{j} & =\left(\begin{array}{cc} e^{-i[\alpha+(\tau_{j}+\tau_{j-1})\Delta]}\sin\epsilon & ie^{-i[\beta+\phi_{j}-(\tau_{j}+\tau_{j-1})\Delta]}\cos\epsilon\\ ie^{i[\beta+\phi_{j}-(\tau_{j}+\tau_{j-1})\Delta]}\cos\epsilon & e^{i[\alpha+(\tau_{j}+\tau_{j-1})\Delta]}\sin\epsilon \end{array}\right).\nonumber\\
& =\left(\begin{array}{cc} e^{-i[\alpha+(\tau_{j}+\tau_{j-1})\Delta]}\epsilon & ie^{-i[\beta+\phi_{j}-(\tau_{j}+\tau_{j-1})\Delta]}\\ ie^{i[\beta+\phi_{j}-(\tau_{j}+\tau_{j-1})\Delta]} & e^{i[\alpha+(\tau_{j}+\tau_{j-1})\Delta]}\epsilon \end{array}\right)+O(\epsilon^2), \end{align}
\begin{figure}
\caption{Spectra of a single NV centre coupled to both individual $^{13}$C spins and the background $^{13}$C spin bath. a) The readout in x and -x-bases highlights the saturation feature typical for a bath. b) Comparison of standard XY8 and its randomisation version. The randomisation of the $\pi$ pulse phases suppresses the spurious signals efficiently.}
\label{fig_bath_single}
\end{figure}
For two pulses, we find \begin{equation} \hat{U}_{j+1}\hat{U}_{j}=\left(\begin{array}{cc} e^{i\varphi_{j}} & ic_{j}\epsilon\\ ic_{j}^{*}\epsilon & -e^{-i\varphi_{j}} \end{array}\right)+O(\epsilon^{2}), \end{equation} where \begin{equation} \varphi_{j}= \Delta(\tau_{j+1}-\tau_{j-1})-(\phi_{j+1}-\phi_{j})+\pi, \end{equation} and \begin{equation} c_{j}=e^{-i[\beta+\phi_{j}+\alpha+\Delta(\tau_{j+1}-\tau_{j-1})]}+e^{-i[\beta+\phi_{j+1}-\alpha-\Delta(\tau_{j+1}+2\tau_{j}+\tau_{j-1})]}, \end{equation} is a sum of phase factors where each term has a $\phi_{j}$ or $\phi_{j+1}$. Timing the $U_{j}$ recursively and using $\tau_{0}=\tau_{N+1}=0$, we obtain for even $N$ \begin{equation} \hat{U}_{{\rm unit}}=\left(\begin{array}{cc} e^{i\varphi} & iC\epsilon\\ iC^{*}\epsilon & e^{-i\varphi} \end{array}\right)+O(\epsilon^{2}), \label{sm:eq:UunitEven} \end{equation} where \begin{equation} \varphi=\sum_{j=1}^{N/2}\left[\phi_{2j-1}-\phi_{2j}+\pi\right], \end{equation} and $C$ is a sum of phase factors where each term has an independent sum of the phases $\phi_{j}$. In deed, Eq.~(\ref{sm:eq:UunitEven}) has the general form of a pulse sequence with an even number of $\pi$ pulses with respect to the leading order error $\epsilon$~\cite{sm:genov2017arbitrarily}.
Similarly, we have for odd $N$, \begin{equation} \hat{U}_{{\rm unit}}=\left(\begin{array}{cc} C^{\prime*}\epsilon & ie^{-i(\varphi+\beta)}\\ ie^{i(\varphi+\beta)} & C^{\prime}\epsilon \end{array}\right)+O(\epsilon^{2}),\label{sm:eq:UunitOdd} \end{equation} where \begin{equation} \varphi=\sum_{j=1}^{(N-1)/2} \left[\phi_{2j-1}-\phi_{2j}+\pi\right]+\phi_{N}, \end{equation} and $C^\prime$ is a sum of phase factors where each term has an independent sum of the phases $\phi_{j}$.
For the case that the lower-order errors of single $\pi$ pulses have been compensated by a robust sequence, one can still write the propagator in terms of the leading order error that has not been compensated by the sequence. The evolution operator of a single pulse sequence unit still has a general form given by Eq.~(\ref{sm:eq:UunitEven}) or (\ref{sm:eq:UunitOdd}), but may have another error $\epsilon_{\varphi}$ added to $\varphi$. For many sequences, such as the CP~\cite{sm:carr1954effects}, XY8~\cite{sm:gullion1990new}, AXY8~\cite{sm:casanova2015robust}, YY8~\cite{sm:shu2017unambiguous}, and UR-($4n+2$) ($n=1,2,\ldots$)~\cite{sm:genov2017arbitrarily} sequences, $\epsilon_{\varphi}$ is a higher-order error compared with $\epsilon$ and therefore can be neglected in the leading order error analysis.
\subsection{Standard protocol} It is obvious that the control errors coherently accumulate in the standard protocol where the basic pulse unit is repeated $M$ times as $\hat{U} = (\hat{U}_{\rm unit})^M$. For example, for even $N$ and $\varphi=0$, \begin{equation} \hat{U} = \left(\begin{array}{cc} 1 & i M C \epsilon \\ i M C^{*} \epsilon & 1 \end{array}\right)+O(\epsilon^{2}), \end{equation} where the error $MC\epsilon$ scales linearly with $M$.
\subsection{Randomisation protocol} When one adds a random global phase $\Phi_{r,m}$ on all the $\pi$ pulses in a basic DD unit, each $\hat{U}_{{\rm unit}}$ becomes \begin{equation} \hat{U}_{{\rm unit}}(\Phi_{r,m})=\left(\begin{array}{cc} e^{i\varphi} & iCe^{-i\Phi_{r,m}}\epsilon\\ iC^{*}e^{i\Phi_{r,m}}\epsilon & e^{-i\varphi} \end{array}\right)+O(\epsilon^{2}). \end{equation} For two $U_{{\rm unit}}$, we have \begin{equation} \hat{U}_{{\rm unit}}(\Phi_{r,m+1})\hat{U}_{{\rm unit}}(\Phi_{r,m})=\left(\begin{array}{cc} e^{2i\varphi} & iZ_m\epsilon\\ iZ_m^{*}\epsilon & e^{-2i\varphi} \end{array}\right)+O(\epsilon^{2}), \end{equation} where $Z_m=e^{-i\varphi} C (e^{-i\Phi_{r,m+1}}+e^{-i(\Phi_{r,m}-2\varphi)})$ is a sum of two phase factors and can be equally written as $Z_j=e^{-i\varphi} C (e^{-i\Phi_{r,m+1}}+e^{-i\Phi_{r,m}})$ for random phases $\Phi_{r,m}$ and $\Phi_{r,m+1}$. By mathematical induction, the evolution operator of $M$ DD units with random phases $\{\Phi_{r,m}\}$ is \begin{align} \hat{U}_{M} & =\hat{U}_{{\rm unit}}(\Phi_{r,M})\cdots \hat{U}_{{\rm unit}}(\Phi_{r,2})\hat{U}_{{\rm unit}}(\Phi_{r,1}),\\
& =\left(\begin{array}{cc} e^{iM\varphi} & i Z_{r,M}MC\epsilon\\ iZ_{r,M}^{*}MC^{*}\epsilon & e^{-iM\varphi} \end{array}\right)+O(\epsilon^{2}), \end{align} where the error $MC\epsilon$ is suppressed by the factor $Z_{r,M}=\frac{1}{M}\sum_{m=1}^{M}\exp(i\Phi_{r,m})$ for the random phases $\{\Phi_{r,m}\}$. This result is valid for an odd number $N$ of pulses as well.
\section{Additional experiments} \label{add_exp} One of the most important advantages of quantum sensors is the possibility to measure quantum signals, such as hyperfine fields of single spins. This is highly relevant for the characterization of quantum systems. The randomisation protocol efficiently suppresses both spurious harmonics from a bath as well as from single spins. In Fig. \ref{fig_bath_single}(a) we show the spectrum of an NV center that couples to both individual $^{13}$C spins and the background $^{13}$C spin bath. The signal of the bath is centered around the bare Larmor of $^{13}$C at this bias field and shows the typical saturation highlighted by measuring the spectra for both x-basis and -x-basis readout. The signal of at least one strongly coupled spin is shifted to higher frequencies due to the hyperfine coupling and it overlaps for the different readout bases. In Fig. \ref{fig_bath_single}(b) we compare the spectra measured with standard XY8 and the randomisation version. The identical signal shape and amplitude of the non-spurious signals verify that the randomized version does not alter the signal accumulation. In order to amplify the spurious harmonics, we use larger number of $\pi$-pulses ($M$=60 and 100). We observe peaks at 2$\nu_0$ and 4$\nu_0$ for the standard XY8 method. For the same bias field the Larmor frequency of $^1$H is about 1.81\,MHz, what would make a differentiation very difficult. These spurious signals can be efficiently suppressed with the randomisation protocol.
\end{document}
|
arXiv
|
{
"id": "1903.01559.tex",
"language_detection_score": 0.7192839980125427,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{abstract} This work studies the Cauchy problem for the energy-critical inhomogeneous Hartree equation with inverse square potential
$$i\partial_t u-\mathcal K_\lambda u=\pm |x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u, \quad \mathcal K_\lambda=-\Delta+\frac\lambda{|x|^2}$$
in the energy space $H_\lambda^1:=\{f\in L^2,\quad\sqrt{\mathcal{K}_\lambda}f\in L^2\}$. In this paper, we develop a well-posedness theory and investigate the blow-up of solutions in $H_\lambda^1$. Furthermore we present a dichotomy between energy bounded and non-global existence of solutions under the ground state threshold. To this end, we use Caffarelli-Kohn-Nirenberg weighted interpolation inequalities and some equivalent norms considering $\mathcal K_\lambda$, which make it possible to control the non-linearity involving the singularity $|x|^{-\tau}$ as well as the inverse square potential. The novelty here is the investigation of the energy critical regime which remains still open and the challenge is to deal with three technical problems: a non-local source term, an inhomogeneous singular term $|\cdot|^{-\tau}$, and the presence of an inverse square potential.
\end{abstract} \maketitle \tableofcontents
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\section{Introduction}
In this paper we are concerned with the Cauchy problem for the inhomogeneous generalized Hartree equation with inverse square potential \begin{equation} \begin{cases}\label{S}
i\partial_t u-\mathcal K_\lambda u=\epsilon |x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u,\\
u(x,0)=u_0(x), \quad (x,t)\in \mathbb{R}^n \times \mathbb{R}, \end{cases} \end{equation}
where $p>2$, $\epsilon=\pm1$, and $\mathcal K_\lambda:=-\Delta+\frac\lambda{|x|^{2}}$ satisfying $\lambda>-\frac{(n-2)^2}{4}$. Here the case $\epsilon =1$ is \textit{defocusing}, while the case $\epsilon =-1$ is \textit{focusing}. The Riesz potential is defined on $\mathbb{R}^n$ by
$$I_\alpha:=\frac{\Gamma(\frac{n-\alpha}2)}{\Gamma(\frac\alpha2)\pi^\frac{n}22^\alpha}\,|\cdot|^{\alpha-n},\quad 0<\alpha<n.$$ The assumption on $\lambda$ comes from the sharp Hardy inequality \cite{abde}, \begin{equation}\label{prt}
\frac{(n-2)^2}4\int_{\mathbb{R}^n}\frac{|f(x)|^2}{|x|^2}\,dx\leq \int_{\mathbb{R}^n}|\nabla f(x)|^2 dx, \end{equation}
which guarantees that $\mathcal K_\lambda$ is thepositive self-adjoint extension of $-\Delta+\lambda/|x|^{-2}$. It is known that in the range $-\frac{(n-2)^2}4 <\lambda< 1-\frac{(n-2)^2}4$, the extension is not unique (see \cite{ksww,ect}). In such a case, one picks the Friedrichs extension (see \cite{ksww,pst}).
The problem \eqref{S} arises in various physical contexts. In the linear regime ($\epsilon=0$), the considered Schr\"odinger equation models quantum mechanics \cite{ksww,haa}. In the non-linear regime without potentials, namely $\lambda=0\neq\epsilon$, the equation \eqref{S} is of interest in the mean-field limit of large systems of non-relativistic bosonic atoms and molecules in a regime where the number of bosons is very large, but the interactions between them are weak \cite{fl,hs,pg,mpt}. The homogeneous problem associated to the considered problem \eqref{S}, specifically, when $\lambda=\tau=0$, has several physical origins such as quantum mechanics \cite{pg,pgl} and Hartree-Fock theory \cite{ll}. The particular case $p=2$ and $\lambda=\tau=0$ is called standard Hartree equation. It is a classical limit of a field equation describing a quantum mechanical non-relativistic many-boson system interacting through a two body potential \cite{gvl}.
Now, let us return to the mathematical aspects of the generalized Hartree equation \eqref{S}. Recall the critical Sobolev index. If $u(x,t)$ is a solution of \eqref{S}, so is the family $$u_\delta(x,t):=\delta^{\frac{2-2\tau+\alpha}{2(p-1)}} u(\delta x, \delta^2 t),$$ with the re-scaled initial data $u_{\delta,0}:=u_{\delta}(x,0)$ for all $\delta>0$. Then, it follows that \begin{equation*}
\|u_{\delta,0}\|_{\dot H^1}=\delta^{1-\frac n2 +\frac{2-2\tau+\alpha}{2(p-1)}}\|u_0\|_{\dot H^1}. \end{equation*} If $p=1+\frac{2-2\tau+\alpha}{n-2}$, the scaling preserves the $\dot H^1$ norm of $u_0$, and in this case, \eqref{S} is referred as the energy-critical inhomogeneous generalized Hartree equation. Moreover, the solution to \eqref{S} satisfies the mass and energy conservation, where the mass conservation is \begin{equation}
\mathcal M[u(t)]:=\int_{\mathbb{R}^n} |u(x,t)|^2 dx = \mathcal M [u_0], \end{equation} and the energy conservation is \begin{equation}
\mathcal E[u(t)]:= \int_{\mathbb{R}^n}\Big(|\nabla u|^2 +\lambda |x|^{-2} |u|^2\Big)\,dx+ \frac{\epsilon}{p}\mathcal P[u(t)]=\mathcal E[u_0], \end{equation} where the potential energy reads
$$\mathcal P[u(t)]:=\int_{\mathbb{R}^n} |x|^{-\tau}\big(I_\alpha *|\cdot|^{-\tau}|u|^p\big)|u|^p dx.$$
To the best of our knowledge, this paper is the first one dealing with the energy-critical inhomogeneous Hatree equation with inverse square potential, precisely \eqref{S} with $\lambda\neq0$.
The main contribution is to develop a local well-posedness theory in the energy-critical case, as well as to investigate the blow-up of the solution in energy space for the inhomogeneous generalized Hartree equation \eqref{S}. Precisely, the local theory is based on the standard contraction mapping argument via the availability of Strichartz estimates. More interestingly, we take advantage of some equivalent norms considering the operator $\mathcal{K}_\lambda$, namely $\|\sqrt{\mathcal{K}_\lambda}u\|_r\simeq\|u\|_{\dot W^{1,r}}$, which makes it possible to apply the contraction mapping principle without directly handling with the operator. In the repulsive regime($\epsilon=-1$), we prove that the solution blows up in finite time without assuming the classical assumption such as radially symmetric or $|x|u_0 \in L^2$. The blow-up phenomenon is expressed in terms of the non-conserved potential energy, which may give a criteria in the spirit of \cite{vdd}, which implies in particular the classical phenomena under the ground state threshold in the spirit of \cite{km}.
In this paper, we deal with three technical problems by the equation \eqref{S}, a non-local source term, the inhomogeneous singular term $|\cdot|^{-\tau}$, and the presence of an inverse square potential. Indeed, in order to deal with the singular term $|\cdot|^{-\tau}$ in Lebesgue spaces, the method used in the literature decomposes the integrals on the unit ball and it's complementary (see, for example, \cite{mt}).
However, this is no more sufficient to conclude in the energy critical case. For $\lambda=0$, the first author used some Lorentz spaces with the useful property $|\cdot|^{-\tau}\in L^{\frac{n}{\tau},\infty}$. To overcome these difficulties, we make use of some Caffarelli-Kohn-Nirenberg weighted interpolation inequalities which is different from the existing approaches.
Before stating our results, we introduce some Sobolev spaces defined in terms of the operator $\mathcal K_\lambda$ as the completion of $C^\infty_0(\mathbb{R}^n)$ with respect to the norms \begin{align*}
\|u\|_{\dot W^{1,r}_\lambda}&:=\|\sqrt{\mathcal K_\lambda} u\|_{L^r} \quad \textnormal{and} \quad \|u\|_{W^{1,r}_\lambda}:=\|\langle \sqrt{\mathcal K_\lambda}\rangle u\|_{L^r}, \end{align*}
where $\langle \cdot\rangle:=(1+|\cdot|^2)^{1/2}$ and $L^r:=L^r(\mathbb{R}^n)$. We denote also the particular Hilbert cases $\dot W^{1,2}_\lambda=\dot H^1_\lambda$ and $W^{1,2}_\lambda=H^1_\lambda$. We note that by the definition of the operator $\mathcal K_\lambda$ and Hardy estimate \eqref{prt}, one has \begin{align*}
\|u\|_{\dot H^1_\lambda}&:=\|\sqrt{\mathcal K_\lambda}u\|=\big(\|\nabla u\|^2+\lambda\||x|^{-1}u\|^2\big)^\frac12\simeq \|u\|_{\dot H^1}, \end{align*}
where we write for simplicity $\|\cdot\|:=\|\cdot\|_{L^2(\mathbb{R}^n)}$.
\subsection{Well-posedness in the energy-critical case}
The theory of well-poseddness for the inhomogeneous Hartree equation ($\lambda=0$ in \eqref{S}) has been extensively studied in recent several years and is partially understood. (See, for examples, \cite{mt,sa, kls, sk} and references therein). For related results on the scattering theory, see also \cite{sx} for spherically symmetric datum and \cite{cx} in the general case.
Our first result is the following well-posedness in the energy-critical case. \begin{thm}\label{loc}
Let $n\ge3$, $\lambda >-\frac{(n-2)^2}{4}$ and $2\kappa=n-2-\sqrt{(n-2)^2+4\lambda}$.
Assume that
\begin{equation}\label{1.6}
0<\alpha<n, \quad 2\kappa < n-2-\frac{2(n-2)}{3n-2+2\sqrt{9n^2+8n-16}}
\end{equation}
and
\begin{equation}\label{1.7}
\frac{\alpha}{2}-\frac{n+2+\sqrt{9n^2+8n-16}}{2}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}.
\end{equation}
Then, for $u_0 \in H_{\lambda}^1(\mathbb{R}^n)$, there exist $T>0$ and a unique solution
$$u\in C([0,T]; H_\lambda^1) \cap L^q([0,T];W_{\lambda}^{1,r})$$
to \eqref{S} with $p=1+\frac{2-2\tau+\alpha}{n-2}$
for any admissible pair $(q,r)$ in Definition \ref{dms}.
Furthermore, the continuous dependence on the initial data holds. \end{thm}
We also provide the small data global well-posedness and scattering results as follows: \begin{thm}\label{glb}
Under the same conditions as in Theorem \ref{loc} and the smallness assumption on $\|u_0\|_{H_{\lambda}^1}$, there exists a unique global solution $$u\in C([0,\infty); H_\lambda^1) \cap L^q([0,\infty);W_{\lambda}^{1,r})$$ to \eqref{S} with $p=1+\frac{2-2\tau+\alpha}{n-2}$ for any admissible pair $(q,r)$.
Furthermore, the solution scatters in $H_\lambda^1$, i.e., there exists $\phi\in H_\lambda^1$ such that
$$\lim_{t\to\infty} \|u(t)-e^{-it\mathcal{K}_{\lambda}}\phi\|_{H_{\lambda}^1}=0.$$ \end{thm}
\subsection{Blow-up of energy solutions}
We now turn to our attention to blow-up of the solution to \eqref{S} under the ground state threshold, in the focusing regime. A particular global solution of \eqref{S} with $\epsilon=-1$ is the stationary solution to \eqref{S}, namely \begin{equation}\label{E}
-\Delta \varphi+\frac{\lambda}{|x|^2}\varphi=|x|^{-\tau}|\varphi|^{p-2}(I_\alpha *|\cdot|^{-\tau}|\varphi|^p)\varphi,\quad 0\neq \varphi\in {H^1_\lambda}. \end{equation} Such a solution called ground state plays an essential role in the focusing regime. The following result is the existence of ground states to \eqref{E}. \begin{thm}\label{gag} {Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$ and $p=1+\frac{2-2\tau+\alpha}{n-2}$. Assume that \begin{equation}\label{as} 0<\alpha<n \quad \text{and} \quad 0<\tau< 1+\frac{\alpha}{n}. \end{equation}\label{inte} Then, the following inequality holds: \begin{equation}\label{gagg}
\int_{\mathbb{R}^n} |x|^{-\tau}|u|^p (I_\alpha \ast |\cdot|^{-\tau}|u|^p) \leq C_{N,\tau,\alpha,\lambda} \big\|\sqrt{\mathcal{K}_{\lambda} }u\big\|^{2p}.
\end{equation}} Moreover, there exists $\varphi\in H_{\lambda}^1$ a ground state solution to \eqref{E}, which is a minimizing of the problem \begin{equation}\label{min}
\frac1{C_{N,\tau,\alpha,\lambda}}=\inf\Big\{\frac{\|\sqrt{\mathcal K_\lambda} u\|^{2p}}{\mathcal P[u]},\quad0\neq u\in H^1_\lambda\Big\}.
\end{equation} \end{thm}
\begin{rems} \begin{enumerate} \item[1.] Theorem \ref{gag} does not require to assume that $p\geq2$; \item[2.] $C_{N,\tau,\alpha,\lambda}$ denotes the best constant in the inequality \eqref{gagg}; \item[3.] compared with the homogeneous regime $\tau=0$, the minimizing \eqref{min} is never reached for $\lambda>0$, see \cite{kmvzz}. \end{enumerate} \end{rems}
Here and hereafter, we denote $\varphi$ a ground state solution of \eqref{E} and the scale invariant quantities \begin{align*} \mathcal{ME}[u_0]:=\frac{\mathcal E[u_0]}{\mathcal E[\varphi]},\quad
\mathcal{MG}[u_0]:=\frac{\|\sqrt{\mathcal K_\lambda} u_0\|}{\|\sqrt{\mathcal K_\lambda} \varphi\|},\quad \mathcal{MP}[u_0]:=\frac{\mathcal P[u_0]}{\mathcal P[\varphi]}. \end{align*}
The next theorem gives a blow-up phenomenon in the energy-critical focusing regime under the ground state threshold. \begin{thm}\label{t1} Under the assumptions in \ref{loc} and $\epsilon=-1$, let $\varphi$ be a ground state solution to \eqref{E} and $u\in C_{T^*}(H^1_\lambda)$ be a maximal solution of the focusing problem \eqref{S}. If \begin{equation} \label{ss'}
\sup_{t\in[0,T^*)}\mathcal I[u(t)]<0, \end{equation}
then $u$ blows-up in finite or infinite time. Here, $\mathcal I[u]:=\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal{P}[u]$. \end{thm} \begin{rems}
\begin{enumerate}
\item[1.]
$u$ blows-up in infinite time means that it is global and there is $t_n\to\infty$ such that $\|\sqrt{\mathcal K_\lambda} u(t_n)\|\to\infty$;
\item[2.]
the threshold is expressed in terms of the potential energy $\mathcal P[u]$, which is a non conserved quantity;
\item[3.]
the theorem here doesn't require the classical assumptions such as spherically symmetric data or $|x|u_0\in L^2$;
\item[4.]
a direct consequence of the variance identity is that the energy solution to \eqref{S} blows-up in finite time if $|x|u_0\in L^2$ and \eqref{ss'} is satisfied;
\end{enumerate} \end{rems}
The next result is a consequence of Theorem \ref{t1}. \begin{cor}\label{t2} Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$, Let $\varphi$ be a ground state of \eqref{E} and $u_0\in H^1_\lambda$ such that \begin{equation} \label{t11} \mathcal{ME}[u_0]<1. \end{equation} If we assume that \begin{equation}\label{t13}
\mathcal{MG}[u_0]>1,\end{equation} then the energy solution of \eqref{S} blows-up in finite or infinite time \end{cor}
\begin{rems} \begin{enumerate} \item[1.] The assumptions of the above result are more simple to check than \eqref{ss'}, because they are expressed in terms of conserved quantities; \item[2.] the above ground state threshold has a deep influence in the NLS context since the pioneering papers \cite{km,Holmer};
\end{enumerate} \end{rems}
Finally, we close this subsection with some additional results which gives the boundedness of the energy solution. \begin{prop}\label{s} Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$, let $\varphi$ be a ground state solution to \eqref{E} and $u\in C_{T^*}(H^1_\lambda)$ be a maximal solution of the focusing problem \eqref{S}. If \begin{equation} \label{ss}
\sup_{t\in[0,T^*)}\mathcal{MP}[u(t)]<1, \end{equation} then $u$ is bounded in $H^1_\lambda$. \end{prop} The next is a consequence of Proposition \ref{s} \begin{cor}\label{s2} Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$. Let $\varphi$ be a ground state of \eqref{E} and $u_0\in H^1_\lambda$ satisfying \eqref{t11}
If \begin{equation}\label{t12}
\mathcal{MG}[u_0]<1, \end{equation} then the energy solution of \eqref{S} is bounded. \end{cor} \begin{rem} \begin{enumerate}
\item[1.] the global existence and energy scattering under the assumptions \eqref{ss} in Proposition \ref{s} or \eqref{t11}-\eqref{t12} in Corollary \ref{s2} is investigated in a paper in progress. \end{enumerate} \end{rem}
The rest of this paper is organized as follows. In Section 2 we introduce some useful properties that we need. Section 3 develops a local theory and a global one for small datum. In section 4, the existence of ground states is established. Section 5 establishes blow-up of solutions under the ground state threshold and the boundedness of energy solutions. In the appendix, a Morawetz type estimate is proved.
Throughout this paper, the letter $C$ stands for a positive constant which may be different at each occurrence. We also denote $A \lesssim B$ to mean $A \leq CB$ with unspecified constants $C>0$.
\section{Preliminaries} In this section, we introduce some useful properties which will be utilized throughout this paper. We also recall the Strichartz estimates. Let us start with the Hardy-Littlewood-Sobolev inequality \cite{el} which is suitable for dealing with non-local source term in \eqref{S}: \begin{lem}\label{hls} Let $n\geq1$ and $0 <\alpha < n$. \begin{enumerate} \item[1.] Let $s\geq1$ and $r>1$ such that $\frac1r=\frac1s+\frac\alpha n$. Then,
$$\|I_\alpha*g\|_{L^s}\leq C_{n,s,\alpha}\|g\|_{L^r}.$$ \item[2.] Let $1<s,r,t<\infty$ be such that $\frac1r +\frac1s=\frac1t +\frac\alpha n$. Then,
$$\|f(I_\alpha*g)\|_{L^t}\leq C_{n,t,\alpha}\|f\|_{L^r}\|g\|_{L^s}.$$ \end{enumerate} \end{lem}
The following lemma is a weighted version of the Sobolev embedding, that is, a special case of Caffarelli-Kohn-Nirenberg weighted interpolation inequalities \cite{sgw,csl}: {\begin{lem}\label{ckn}
Let $n\geq1$ and
$$1< p\leq q<\infty, \quad -\frac nq<b\leq a<\frac n{p'} \quad \text{and} \quad a-b-1=n\Big(\frac1q-\frac1p\Big).$$
Then,
$$\||x|^{b}f\|_{L^q}\leq C\||x|^a\nabla f\|_{L^p}.$$ \end{lem}}
Now, we describe several properties related to the operator $\mathcal K_\lambda.$
Since $\|f\|_{H^1} \simeq \|f\|_{H_\lambda^1}$, one has the following compact Sobolev injection (\cite[Lemma 3.1]{cg}): \begin{lem}\label{compact} Let $n\geq3$, $0<\tau<2$ and $2<r<\frac{2(n-\tau)}{n-2}$. Then, the following injection is compact:
$$H^1_\lambda\hookrightarrow\hookrightarrow L^{r}(|x|^{-\tau}\,dx) .$$ \end{lem}
We also have the following equivalent norms to Sobolev ones, see \cite{kmvzz} and \cite[Remark 2.2]{cg}: \begin{lem}\label{2.2} Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$, $1<r<\infty$ and $2\kappa=n-2-\sqrt{(n-2)^2+4\lambda}$. Then,
\begin{enumerate}
\item[1.]
if $\frac{1+\kappa}n<\frac1r<\min\{1,1-\frac\kappa n\}$, thus, $\|f\|_{\dot W^{1,r}}\lesssim \|f\|_{\dot W_\lambda^{1,r}}$
\item[2.]
if $\max\{\frac{1}n,\frac{\kappa}n\}<\frac1r<\min\{1,1-\frac\kappa N\}$, thus, $\|f\|_{\dot W_\lambda^{1,r}}\lesssim\|f\|_{\dot W^{1,r}}$
\end{enumerate} \end{lem}
Finally, we recall the Strichartz estimates. As we shall see, the availability of these estimates is the key role in the proof of Theorem \ref{loc}. \begin{defi}\label{dms} Let $n\ge3$. We say that $(q,r)$ is an admissible pair if it satisfies \begin{equation*}
2\le q \le \infty, \quad 2\le r \le \frac{2n}{n-2} \quad \text{and} \quad \frac2q+\frac{n}{r}=\frac{n}{2}.
\end{equation*} \end{defi} \begin{prop}\cite{bpst,zz,df}\label{str} Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$. Then, there exists $C>0$ such that \begin{enumerate} \item[1.]
$\|e^{-it\mathcal K_\lambda}f\|_{L_t^q(L_x^r)}\leq C\|f\|,$ \item[2.]
$\|\int_0^{t_1}e^{-i(t-t_1)\mathcal K_\lambda}F(\cdot,t_1)dt_1\|_{L_t^q(L_x^r)}\leq C\|F\|_{L_t^{\tilde q'}L_x^{\tilde r'}}.$ \end{enumerate} \end{prop}
Finally, one gives a classical Morawetz estimate proved in the appendix. Let $\phi:\mathbb{R}^n\to\mathbb{R}$ be a smooth function and define the variance potential
$$V_\phi(t):=\int_{\mathbb{R}^n}\phi(x)|u(x,t)|^2\,dx,$$ and the Morawetz action $$M_\phi(t):=2\Im\int_{\mathbb{R}^n} \bar u(\xi_ju_j)\,dx=2\Im\int_{\mathbb{R}^n} \bar u(\nabla\phi\cdot\nabla u)\,dx.$$
\begin{prop}\label{mrwz}
Let $\phi:\mathbb{R}^N \rightarrow \mathbb{R}$ be a radial, real-valued multiplier, $\phi=\phi(|x|)$.
Then, for any solution $u\in C([0,T];H_\lambda^1)$ of the generalized Hartree equation \eqref{S} in the focusing sign with initial data $u_0\in H_{\lambda}^{1}$, the following virial-type identities hold:
\begin{equation*}
V'_\phi(t)=
2\Im\int_{\mathbb{R}^n} \bar u\nabla\phi\cdot\nabla u dx
\end{equation*}
and
\begin{align*}
V''_\phi(t)=M_\phi'(t)&=4\sum_{k,l=1}^{N}\int_{\mathbb{R}^n}\partial_l\partial_k\phi\Re(\partial_ku\partial_l\bar u)dx-\int_{\mathbb{R}^n}\Delta^2\phi|u|^2dx+4\lambda\int_{\mathbb{R}^n}\nabla\phi\cdot x\frac{|u|^2}{|x|^4}dx\\
&\qquad-\frac{2(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
&\qquad\qquad -\frac{4\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi|x|^{-\tau-2}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
&\qquad\qquad -\frac{4(N-\alpha)}p\sum_{k=1}^N\int_{\mathbb{R}^n}|x|^{-\tau}|u|^{p}\partial_k\phi(\frac{x_k}{|\cdot|^2}I_\alpha*|\cdot|^{-\tau}|u|^p)dx.
\end{align*} \end{prop}
{\section{Well-posedness in the energy space}}
In this section, we develop a local theory in the energy space $H_{\lambda}^1$, Theorem \ref{loc}. Moreover, we prove Theorem \ref{glb} about the global theory for small datum. Let us first denote the source term \begin{align*}
\mathcal N&:=\mathcal N[u]:=|x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u. \end{align*}
\subsection{Nonlinear estimates} We first establish some nonlinear estimates for $\mathcal{N}[u]$. These nonlinear estimates will play an important role in proving the well-posedness results applying the contraction mapping principle. Before stating the nonlinear estimates, we introduce some notations. We set $$\mathcal A=\{(q,r):(q,r) \,\,\text{is}\,\, \text{admissible}\},$$ and then define the norm
$$\|u\|_{\Lambda(I)}=\sup_{(q,r)\in \mathcal A}\|u\|_{L_t^q(I;L_x^r)}$$ and its dual weighted norm
$$\|u\|_{\Lambda'(I)}=\sup_{(\tilde q,\tilde r)\in \mathcal A}\|u\|_{L_t^{\tilde q'}(I;L_x^{\tilde r'})}$$ for any interval $I\subset \mathbb{R}.$
\begin{lem}\label{non} Let $n\ge3$, $\lambda >-\frac{(n-2)^2}{4}$ and $p=1+\frac{2-2\tau+\alpha}{n-2}$. Assume that \begin{equation}\label{ass1} 0<\alpha<n, \quad 2\kappa < \frac{5n-4-\sqrt{9n^2+8n-16}}{2} \end{equation} and \begin{equation}\label{ass2} \frac{\alpha}{2}-\frac{n+2+\sqrt{9n^2+8n-16}}{2}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}. \end{equation} Then, {there is an admissible pair $(q,r)$, such that} \begin{equation}\label{non1}
\|\sqrt{\mathcal{K}_{\lambda}}\,\mathcal{N}[u]\|_{\Lambda'(0,T)} \leq C \|u\|_{L_t^{q}(I;\dot W_{\lambda}^{1,r})}^{2p-1} \end{equation} and \begin{equation}\label{non2}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(0,T)} \leq C (\|u\|_{L_t^q(I;\dot W_{\lambda}^{1,r})}^{2p-2} +\|u\|_{L_t^q(I;\dot W_{\lambda}^{1,r})}^{2p-2})\|u-v\|_{L_t^q(I;L_x^r)}. \end{equation} \end{lem}
\begin{proof} It is sufficient to show that there exist $(q,r)$ for which \begin{equation}\label{non11}
\|\nabla\mathcal{N}[u]\|_{L_t^2(I;L_x^{\frac{2n}{n+2}})} \leq C \|\nabla u\|_{L_t^{q}(I;L_x^r)}^{2p-1} \end{equation} and \begin{equation}\label{non22}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{L_t^2(I;L_x^{\frac{2n}{n+2}})} \leq C \|\nabla u\|_{L_t^q(I;L_x^r)}^{2p-2} \|u-v\|_{L_t^q(I;L_x^r)} \end{equation} hold for $\lambda, \alpha, \kappa, \tau, p$ given as in the lemma. Indeed, by applying the equivalent norms to Sobolev ones (see Lemma \ref{2.2}) we obtain the desired estimates \eqref{non1} and \eqref{non2} if \begin{equation}\label{ka} \max\{\frac1n,\frac{\kappa}n\}<\frac{n+2}{2n}<\min\{1,\frac{n-\kappa}n\}, \quad \frac{1+\kappa}{n}<\frac1{r}<\min\{1,\frac{n-\kappa}{n}\}, \end{equation}
Here, one can easily see that the first condition in \eqref{ka} is always satisfied.
Now we start to prove \eqref{non11}. Let us set \begin{equation}\label{st} \frac1q=\frac{1}{2(2p-1)}, \quad \frac{n-2}{2n}\leq \frac1r \leq \frac12, \quad \frac{2}q+\frac{n}{r}=\frac{n}{2}, \quad 0<\alpha<n. \end{equation} We first see that \begin{align*}
\|&\nabla\mathcal{N}[u]\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim\big\||x|^{-\tau-1}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\big\|_{L_x^{\frac{2n}{n+2}}}+\big\||x|^{-\tau}|u|^{p-1}(I_\alpha \ast|\cdot|^{-\tau-1}|u|^{p})\big\|_{L_x^{\frac{2n}{n+2}}}\\
&+\||x|^{-\tau}|u|^{p-2}|\nabla u|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}+\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^{p-1}\nabla u)\|_{L_x^{\frac{2n}{n+2}}}\\ &:=A_1+A_2+A_3+A_4. \end{align*} The first term $A_1$ is bounded by using Lemmas \ref{hls} and \ref{ckn},
in turn, \begin{align*}
\big\||x|^{-\tau-1}|u|^{p-1}|I_\alpha\ast|\cdot|^{-\tau}|u|^p|\big\|_{L_x^{\frac{2n}{n+2}}}
&\lesssim \||x|^{-\tau-1}|u|^{p-1}\|_{L_x^{a_1}} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}} \\
&=\||x|^{-\frac{\tau+1}{p-1}}u\|^{p-1}_{L_x^{(p-1)a_1}}\||x|^{-\frac{\tau}{p}}u\|^{p}_{L_x^{pb_1}}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1}, \end{align*} if \begin{equation}\label{c7} 0<\frac1{a_1},\frac1{b_1}<1, \quad \frac1{a_1}+\frac1{b_1}=\frac{n+2}{2n}+\frac{\alpha}{n}, \end{equation} \begin{equation}\label{c8} 0<\frac{1}{(p-1)a_1}\leq \frac1r\leq1,\quad 0\leq\frac{\tau+1}{p-1}<\frac{n}{(p-1)a_1} ,\quad \frac{\tau+1}{p-1}-1=\frac{n}{(p-1)a_1}-\frac{n}{r} \end{equation} \begin{equation}\label{c9} 0<\frac{1}{pb_1}\leq \frac1r\leq 1,\quad 0 \leq \frac{\tau}{p}<\frac{n}{pb_1},\quad \frac{\tau}{p}-1=\frac{n}{pb_1}-\frac{n}{r}. \end{equation}
Similarly, \begin{align*}
A_2&=\big\||x|^{-\tau}|u|^{p-1}(I_\alpha \ast|\cdot|^{-\tau-1}|u|^{p})\big\|_{L_x^{\frac{2n}{n+2}}}\\
&\leq \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau-1}|u|^p\|_{L_x^{b_2}}\\
&\lesssim \|\nabla u\|^{2p-1}_{L_x^r} \end{align*} if \begin{equation}\label{c10} 0<\frac1{a_2}, \frac{1}{b_2}<1, \quad \frac{1}{a_2}+\frac{1}{b_2}=\frac{n+2}{2n}+\frac{\alpha}{n}, \end{equation} \begin{equation}\label{c11} 0<\frac1{(p-1)a_2}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}<\frac{n}{(p-1)a_2}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)a_2}-\frac{n}{r}, \end{equation} \begin{equation}\label{c12} 0<\frac{1}{pb_2}\leq \frac1r \leq 1, \quad 0\leq \frac{\tau+1}{p}<\frac{n}{pb_2}, \quad \frac{\tau+1}{p}-1=\frac{n}{pb_2}-\frac{n}{r}. \end{equation}
The third term $A_3$ is bounded by using Lemma \ref{hls}, H\"older's inequality and Lemma \ref{ckn}
in turn as \begin{align*}
\big\||x|^{-\tau}|u|^{p-2}|\nabla u||I_\alpha \ast|\cdot|^{-\tau}|u|^{p}|\big\|_{L_x^{\frac{2n}{n+2}}}
&\lesssim \||x|^{-\tau}|u|^{p-2}|\nabla u|\|_{L_x^{a_1}} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&\leq \||x|^{-\tau}|u|^{p-2}\|_{L_x^{a_3}}\|\nabla u\|_{L_x^r} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&=\||x|^{-\frac{\tau}{p-2}}u\|^{p-2}_{L_x^{(p-2)a_3}} \|\nabla u\|_{L_x^r} \||x|^{-\frac{\tau}{p}}u\|^p_{L_x^{pb_1}}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1} \end{align*} if \begin{equation}\label{c1} 0<\frac1{a_1}, \frac1{b_1}<1, \quad \frac{1}{a_1}+\frac{1}{b_1}=\frac{n+2}{2n}+\frac{\alpha}{n},\quad \frac{1}{a_1}=\frac{1}{a_3}+\frac{1}{r}, \end{equation} \begin{equation}\label{c2} 0<\frac{1}{(p-2)a_3} \leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-2}<\frac{n}{(p-2)a_3}, \quad \frac{\tau}{p-2}-1=\frac{n}{(p-2)a_3}-\frac{n}{r}, \end{equation} \begin{equation}\label{c3} 0<\frac1{pb_1}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p}<\frac{n}{pb_1}, \quad \frac{\tau}{p}-1=\frac{n}{pb_1}-\frac{n}{r}. \end{equation}
Similarly, \begin{align*}
A_4&=\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^{p-1}\nabla u)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau}|u|^{p-1}\nabla u\|_{L_x^{b_2}}\\
&\leq \||x|^{-\frac{\tau}{p-1}}|u|\|^{p-1}_{L_x^{(p-1)a_2}}\||x|^{-\frac{\tau}{p-1}}u\|^{p-1}_{L_x^{(p-1)b_4}}\|\nabla u\|_{L_x^r}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1} \end{align*} if \begin{equation}\label{c4} 0<\frac1{a_2}, \frac{1}{b_2}<1,\quad \frac1{a_2}+\frac1{b_2}=\frac{n+2}{2n}+\frac{\alpha}{n},\quad \frac{1}{b_2}=\frac{1}{b_4}+\frac1r \end{equation} \begin{equation}\label{c5} 0<\frac{1}{(p-1)a_2}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}\leq \frac{n}{(p-1)a_2}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)a_2}-\frac{n}{r}, \end{equation} \begin{equation}\label{c6} 0<\frac1{(p-1)b_4}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}<\frac{n}{(p-1)b_4}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)b_4}-\frac{n}{r}. \end{equation}
On the other hand, in order to show \eqref{non22}, we first use the following simple inequality \begin{align*}
\big|\mathcal N[u]-\mathcal N[v]\big| &\lesssim \Big||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\Big|
\\&\qquad\quad+\Big||x|^{-\tau}|v|^{p-1}\big(I_\alpha \ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\big)\Big|. \end{align*} From this, we see that \begin{align*}
\|\mathcal N[u]-\mathcal N[v]\|_{L_x^{\frac{2n}{n+2}}} &\leq \||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}\\
&\qquad+\||x|^{-\tau}|v|^{p-1}\big(I_\alpha \ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\big)\|_{L_x^{\frac{2n}{n+2}}}\\ &:=B_1+B_2. \end{align*}
Replacing $\nabla u$ with $u-v$ in the process of dealing with $A_3$, we get \begin{align*}
B_1&=\||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \Big(\||x|^{-\tau}|u|^{p-2}|u-v|\|_{L_x^{a_1}}+\||x|^{-\tau}|v|^{p-2}|u-v|\|_{L_x^{a_1}}\Big)\||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&\lesssim \Big(\||x|^{-\frac{\tau}{p-2}}u\|_{L_x^{(p-2)a_3}}^{p-2}+ \||x|^{-\frac{\tau}{p-2}}u\|_{L_x^{(p-2)a_3}}^{p-2}\Big)\|u-v\|_{L_x^r}\||x|^{-\frac{\tau}{p}}u\|_{L_x^{pb_1}}^p\\
&\lesssim \big(\|\nabla u\|_{L_x^{r}}^{2p-2}+\|\nabla v\|_{L_x^{r}}^{2p-2}\big)\|u-v\|_{L_x^r} \end{align*} under the conditions \eqref{c1}, \eqref{c2} and \eqref{c3}. Similarly, replacing $\nabla u$ with $u-v$ in estimating $A_4$, we also have \begin{align*}
B_2&=\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\|_{L_x^{b_2}}\\
&\lesssim \||x|^{-\frac{\tau}{p-1}}u\|_{L_x^{(p-1)a_2}}^{p-1} \Big(\||x|^{-\frac{\tau}{p-1}}u\|_{L_x^{(p-1)b_4}}^p+\||x|^{-\frac{\tau}{p-1}}v\|_{L_x^{(p-1)b_4}}^p\Big)\|u-v\|_{L_x^r}\\
&\lesssim \big(\|\nabla u\|_{L_x^{r}}^{2p-2}+\|\nabla v\|_{L_x^{r}}^{2p-2}\big)\|u-v\|_{L_x^r} \end{align*} under the conditions \eqref{c4}, \eqref{c5} and \eqref{c6}.
Now it remains to eliminate some redundant pairs, we then show that there exists an admissible pair $(q,r)$ satisfying the assumptions in the lemma. The third conditions of \eqref{c8} and \eqref{c9} can be rewritten with respect to $a$ and $b$, respectively, as follow: \begin{equation}\label{c19} \frac{n}{a_1}=\frac{(p-1)n}{r}+\tau-p+2, \quad \frac{n}{b_1}=\frac{pn}{r}+\tau-p. \end{equation} Inserting these into the second condition of \eqref{c7} implies \begin{equation}\label{a} \frac{(2p-1)n}{r}=\alpha+2p-2\tau-1+\frac{n}{2}. \end{equation} Here, we note that this equation is equivalent to the second condition of \eqref{c10} by using \eqref{c18}. Inserting \eqref{c19} into the conditions in \eqref{c7}, \eqref{c8} and \eqref{c9}, these conditions are summarized as follows: \begin{equation}\label{c15} \frac{p-\tau-2}{p-1}<\frac{n}{r}<\frac{p-\tau-2+n}{p-1}, \quad \frac{p-\tau}{p}<\frac{n}{r}<\frac{p-\tau+n}{p}, \end{equation} \begin{equation}\label{c13} \tau-p+2\leq 0, \quad \frac{n}{r}\leq n, \quad \frac{p-\tau-2}{p-1} \leq 1 <\frac{n}{r} \end{equation} \begin{equation}\label{c14} \tau-p\leq 0 , \quad \frac{p-\tau}{p}\leq 1 < \frac{n}{r} \end{equation} Since $\tau>0$, the first inequalities of the last conditions in \eqref{c13} and \eqref{c14} are redundant. The first condition in \eqref{c14} is also redundant by the first one in \eqref{c13}.
Also, the third conditions of \eqref{c11} and \eqref{c12} can be rewritten as \begin{equation}\label{c18} \frac{n}{a_2}=\frac{(p-1)n}{r}+\tau-p+1, \quad \frac{n}{b_2}=\frac{pn}{r}+\tau-p+1 \end{equation} Inserting these into the conditions \eqref{c10}, \eqref{c11} and \eqref{c12}, these conditions are summarized as \begin{equation}\label{c16} \frac{p-\tau-1}{p-1}<\frac{n}{r}<\frac{p-\tau-1+n}{p-1}, \quad \frac{p-\tau-1}{p}<\frac{n}{r}<\frac{p-\tau-1+n}{p} \end{equation} \begin{equation}\label{c17} \tau-p+1\leq 0, \quad \frac{n}{r}\leq n, \quad \frac{p-\tau-1}{p-1}\leq 1 <\frac{n}{r}, \quad \frac{p-\tau-1}{p}\leq 1 < \frac{n}{ r}. \end{equation} Here, since $\tau>0$, the first inequalities of the last two conditions in \eqref{c17} are redundant. The first conditions in \eqref{c17} is also eliminated by the first one of \eqref{c13}.
Finally, the first two conditions of \eqref{c2} and \eqref{c6} are summarized by inserting the third conditions of \eqref{c2} and \eqref{c6} as \begin{equation}\label{c20} \frac{p-\tau-2}{p-2}<\frac{n}r\leq n, \quad \tau-p+2\leq0, \quad \frac{p-\tau-2}{p-2}\leq 1<\frac{n}{r}, \end{equation} \begin{equation}\label{c21} \frac{p-\tau-1}{p-1}<\frac{n}{r}\leq n, \quad \tau-p+1\leq 0, \quad \frac{p-\tau-1}{p-1}\leq 1 <\frac{n}{r}. \end{equation} Here, the second condition in \eqref{c21} is eliminated by the second one in \eqref{c20}. Since $p>2$ and $\tau>0$, all lower bounds of $n/r$ in \eqref{c15}, \eqref{c16}, \eqref{c20} and \eqref{c21} are eliminated by $1$. Moreover, by using $p>2$ the upper bounds of $n/r$ in the second condition of \eqref{c15} and the first one of \eqref{c16} are also eliminated by the upper one of $n/r$ in the second condition of \eqref{c16}. As a result, combining all the above conditions, we get \begin{equation}\label{rr} 1<\frac{n}{r}<\min\Big\{\frac{p-\tau-2+n}{p-1}, \frac{p-\tau-1+n}{p}\Big\}, \quad 0<\tau\leq p-2. \end{equation}
On the other hand, substituting the first condition into the third one in \eqref{st} implies \begin{equation}\label{r} \frac{n}{r}=\frac{n}{2}-\frac1{2p-1}. \end{equation} Note that \eqref{a} is exactly same as $p=1+\frac{2-2\tau+\alpha}{n-2}$ by substituting \eqref{r} into \eqref{a}. Eliminating $r$ by inserting \eqref{r} into the second conditios of \eqref{ka} and \eqref{st}, the first one of \eqref{rr}, we then get \begin{equation}\label{c25} 1+\kappa < \frac{n}{2}-\frac1{2p-1}<\min\{n,n-\kappa\}, \quad \frac{n-2}{2}\leq \frac{n}{2}-\frac1{2p-1}\leq \frac{n}2, \end{equation} \begin{equation}\label{c22} p>\frac{n}{2(n-2)}, \quad \tau <n-1+\min\Big\{(p-1)\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big), p\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big)\Big\}. \end{equation} Here, the first condition in \eqref{c25} can be divided into two inequalities \begin{equation}\label{c26} p>\frac12+\frac{1}{n-2-2\kappa}, \quad \max\big\{-\frac{n}{2},-\frac{n}{2}+\kappa\big\}<\frac1{2p-1}, \end{equation} in which the second condition is redundant since the maximum value is always negative. Since $p>2$ and $n\ge3$, the last condition in \eqref{c25} and the first condition in \eqref{c22} are redundant. Moreover, since $\frac{2p}{2p-1}-\frac{n}{2}<0$, the last condition in \eqref{c22} is reduced \begin{equation}\label{c23} \tau <n-1+ p\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big). \end{equation}
In order to eliminate $\alpha$, inserting $p=1+\frac{2-2\tau+\alpha}{n-2}$ into the last condition in \eqref{st}, we also have \begin{equation}\label{c24} \frac{n}{2}-\frac{(n-2)p}{2}<\tau<n-\frac{(n-2)p}{2}. \end{equation} Now we make the lower bounds of $\tau$ less than the upper ones of $\tau$ in \eqref{rr}, \eqref{c23} and \eqref{c24} to obtain \begin{align*} \nonumber \max\Big\{2, &\frac{5n-4-\sqrt{9n^2+8n-16}}{4(n-2)},\frac{n+4}{n}, \frac{n-2}{2(n-1)}\Big\} \leq \\ &\qquad\qquad\qquad\qquad\quad p \leq \min\Big\{\frac{2n}{n-2}, \frac{5n-4+\sqrt{9n^2+8n-16}}{4(n-2)} \Big\}, \end{align*} which is reduced \begin{equation}\label{c27} \max\Big\{2,\frac{n+4}{n},\frac12+\frac1{n-2-2\kappa}\Big\} < p < \frac{5n-4+\sqrt{9n^2+8n-16}}{4(n-2)} \end{equation} by using $p>2$, $n\ge3$ and combining the first condition in \eqref{c26}.
The assumption \eqref{ass2} follows from inserting $p=1+\frac{2-2\tau+\alpha}{n-2}$ into \eqref{c27}. In fact, \eqref{c27} is expressed with respect to $\tau$, as follows: \begin{equation} \frac{\alpha}{2}-\frac{n-4+\sqrt{9n^2+8n-16}}{8}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}. \end{equation} Finally, we make the lower bound of $\tau$ less than the upper ones of $\tau$ to deduce \begin{equation*} 2\kappa < \frac{5n-4-\sqrt{9n^2+8n-16}}{2}, \end{equation*}
which implies the assumption \eqref{ass1}.
Indeed, to obtain \eqref{ass1}, we can compute as follows: \begin{eqnarray*} &&\frac{n-4+\sqrt{9n^2+8n-16}}{8}>\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\\
&\Leftrightarrow&\frac{n-2-2\kappa}{8}>\frac{\kappa}{3n-4+\sqrt{9n^2+8n-16}}\times\frac{\sqrt{9n^2+8n-16}-(3n-4)}{\sqrt{9n^2+8n-16}-(3n-4)}\\
&\Leftrightarrow&n-2>\frac{\sqrt{9n^2+8n-16}+5n-4}{4(n-1)}\kappa. \end{eqnarray*} This is equivalent to \begin{align*} \kappa<\frac{4(n-1)(n-2)}{5n-4+\sqrt{9n^2+8n-16}}&=\frac{4(n-1)(n-2)\big\{5n-4-\sqrt{9n^2+8n-16}\big\}}{(5n-4)^2-9n^2-8n+16}\\
&=\frac{5n-4-\sqrt{9n^2+8n-16}}{4}. \end{align*} This ends the proof. \end{proof}
\subsection{Local well-posedness in the energy space}
By Duhamel's principle, we first write the solution of the Cauchy problem \eqref{S} as fix points of the function $$\Phi(u)=e^{-it\mathcal{K}_{\lambda}}u_0 + i\epsilon\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](s,\cdot) ds$$
where $\mathcal N[u]=|x|^{-\tau}|u|^{p-2}(J_\alpha \ast |\cdot|^{-\tau}|u|^p)u$. For appropriate values of $T,M,N>0$, we shall show that $\Phi$ defines a contraction map on
$$X(T,M,N)=\{u \in C_t(I;H_\lambda^1) \cap L_t^{q}(I;W^{1,r}_{\lambda}): \sup_{t\in I} \|u\|_{H_{\lambda}^1}\leq M, \|u\|_{\mathcal W_{\lambda}(I)}\leq N\}$$ equipped with the distance
$$d(u,v)=\|u-v\|_{\Lambda(I)}.$$ Here, $I=[0,T]$ and $(q,r)$ is given as in Proposition \ref{str}. We also define
$$\|u\|_{\mathcal W_{\lambda}(I)}:= \|u\|_{\Lambda(I)} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}$$ and
$$\|u\|_{\mathcal W{'}_{\lambda}(I)}:= \|u\|_{\Lambda'(I)} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda'(I)}.$$
We now show that $\Phi$ is well defined on $X$. By Proposition \ref{str}, we get \begin{equation}\label{w1}
\|\Phi(u)\|_{\mathcal W_{\lambda}(I)}\leq C\|e^{-it\mathcal K_{\lambda}}u_0\|_{\mathcal W_{\lambda}(I)} +C\big\|\mathcal N[u]\big\|_{\mathcal W{'}_{\lambda}(I)} \end{equation} and \begin{equation*}
\sup_{t\in I}\|\Phi(u)\|_{H_{\lambda}^1}\leq \|u_0\|_{H_{\lambda}^1}+\sup_{t \in I}\Big\|\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](\cdot,s) ds\Big\|_{H_{\lambda}^1}. \end{equation*}
Here, for the second inequality we used the fact that $e^{it\mathcal K_{\lambda}}$ is an unitary on $L^2$. Since $\|\langle \sqrt{\mathcal K_{\lambda}} \rangle u \|\lesssim \|u\| + \|\sqrt{\mathcal K_{\lambda}}u\|$, using the fact $e^{it\mathcal K_{\lambda}}$ is an unitary on $L^2$ again, and then applying the dual estimate of the first one in Proposition \ref{str}, we see that
$$ \sup_{t\in I} \Big\| \int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](\cdot,s) ds \Big\|_{H_{\lambda}^1} \lesssim \|\mathcal{N}[u]\|_{\Lambda'(I)} +\|\sqrt{\mathcal{K}_{\lambda}}\mathcal{N}[u]\|_{\Lambda'(I)}. $$ Hence, \begin{equation*}
\sup_{t\in I}\|\Phi(u)\|_{H_\lambda^1} \leq C \|u_0\|_{H_{\lambda}^1}+C\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'(I)}. \end{equation*} On the other hand, using Lemma \ref{non}, we get \begin{align}\label{w2} \nonumber
\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'(I)}&\leq C \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-1} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-2}\|u\|_{\Lambda(I)} \\ \nonumber
&\leq C \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-2}\|u\|_{\mathcal{W}_{\lambda}(I)}\\ &\leq C N^{2p-1} \end{align} if $u \in X$, and for some $\varepsilon>0$ small enough which will be chosen later we get \begin{equation}\label{sm}
\|e^{it\mathcal{K}_{\lambda}}u_0\|_{\mathcal{W}_{\lambda}(I)}\leq \varepsilon \end{equation} which holds for a sufficiently small $T>0$ by the dominated convergence theorem. We now conclude that \begin{equation*}
\|\Phi(u)\|_{\mathcal{W}_{\lambda}(I)} \leq \varepsilon + CN^{2p-1} \quad \textnormal{and} \quad \sup_{t \in I}\|\Phi(u)\|_{H_{\lambda}^1} \leq C \|u_0\|_{H_\lambda^1} + CN^{2p-1}. \end{equation*} Hence we get $\Phi(u)\in X$ for $u \in X$ if \begin{equation}\label{w3}
\varepsilon + CN^{2p-1} \leq N \quad \textnormal \quad C\|u_0\|_{H_{\lambda}^1} + CN^{2p-1} \leq M. \end{equation}
Next we show that $\Phi$ is a contraction on $X$. Using the same argument used in \eqref{w1}, we see \begin{equation*}
\|\Phi(u)-\Phi(v)\|_{\Lambda(I)} \leq C \|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(I)}. \end{equation*} By applying Lemma \ref{non} (see \eqref{non2}), we see \begin{align*}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(I)}&\leq C\big(\|\sqrt{\mathcal{K}_{\lambda}} u\|_{\Lambda(I)}^{2p-1}+\|\sqrt{\mathcal{K}_{\lambda}} v\|_{\Lambda(I)}^{2p-2}\big)\|u-v\|_{\Lambda(I)} \\
&\leq C N^{2p-2}\|u-v\|_{\Lambda(I)} \end{align*}
as in \eqref{w2}. Hence, for $u,v \in X$ we obtain $d(\Phi(u), \Phi(v))\leq C N^{2p-2}d(u,v)$. Now by taking $M=2C\|u_0\|_{H_\lambda^1}$ and $N=2\varepsilon$ and then choosing $\varepsilon>0$ small enough so that \eqref{w3} holds and $CN^{2p-2}\leq 1/2$, it follows that $\Phi$ is a contraction on $X$. Therefore, we have proved that there exists a unique local solution with $u \in C_t(I;H_{\lambda}^1) \cap L_t^q(I;W_{\lambda}^{1,r})$ for any admissible pair $(q,r)$.
\subsection{Global well-posedness in the energy space for small data}
Using the first estimate in Proposition \ref{str}, we observe that \eqref{sm} is satisfied also if $\|u_0\|_{H_{\lambda}^1}$ is sufficiently small, \begin{equation*}
\|e^{-it\mathcal K_{\lambda}}u_0\|_{\mathcal{W}_\lambda(I)} \leq C \|u_0\|_{H_{\lambda}^1}\leq \varepsilon \end{equation*} from which one can take $T=\infty$ in the above argument to obtain a global unique solution.
The continuous dependence of the solution $u$ with respect to the initial data $u_0$ follows clearly in the same way: \begin{align*} d(u,v) &\lesssim d(e^{-it\mathcal{K}_{\lambda}}u_0,e^{-it\mathcal{K}_{\lambda}}v_0) + d\Big(\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u]ds,\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[v]ds \Big) \\
&\lesssim \|u_0 - v_0\| + \frac12 d(u,v) \end{align*} which implies \begin{align*}
d(u,v) \lesssim \|u_0-v_0\|_{H_{\lambda}^1}. \end{align*} Here, $u,v$ are the corresponding solutions for initial data $u_0, v_0$, respectively.
\subsection{Scattering in the energy space for small data}
To prove the scattering property, we first note that \begin{align*}
\|e^{it_2 \mathcal{K}_{\lambda}}u(t_2)-e^{it_2 \mathcal{K}_{\lambda}}u(t_1)\|_{H_{\lambda}^1} = \Big\|\int_{t_1}^{t_2} e^{is\mathcal{K}_{\lambda}}\mathcal{N}[u]\Big\|_{H_{\lambda}^1}\\
\lesssim \|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}{'}([t_1 ,t_2])} \\
\lesssim \|u\|^{2p-1}_{\mathcal{W}_{\lambda}([t_1,t_2])} \quad \rightarrow\quad 0 \end{align*} as $t_1, t_2 \rightarrow {\infty}$. This implies that $\phi :=\lim_{t\rightarrow {\infty}} e^{it\mathcal{K}_{\lambda}}u(t)$ exists in $H_\lambda^1$. Furthermore, $$u(t)-e^{-it\mathcal{K}_{\lambda}}\phi=i\int_t^{\infty} e^{i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u]ds,$$ and hence \begin{align*}
\|u(t)-e^{-it{\mathcal K}_\lambda}\phi\|_{H^1_\lambda}&=\Big\|\int_t^{\infty} e^{i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u] ds\Big\|_{H_{\lambda}^1}\\
&\lesssim\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'([t,\infty])} \\
&\lesssim \|u\|^{2p-1}_{\mathcal{W}_{\lambda}([t,\infty])} \quad \rightarrow \quad 0 \end{align*} as $t\rightarrow {\infty}$. {The scattering is proved.}
\section{Ground states and Gagliardo-Nirenberg estimate}
In this section, we prove Theorem \ref{gag} dealing with the existence of ground states solutions to \eqref{E} and the Gagliardo-Nirenberg type estimate \eqref{gagg}.
\subsection{Gagliardo-Nirenberg estimate}
Using the Hardy-Littlewood-Sobolev inequality (Lemma \ref{hls}), we first see \begin{equation}\label{ie}
\int_{\mathbb{R}^n} |x|^{-\tau}|u|^p (I_\alpha \ast |\cdot|^{-\tau}|u|^p) dx \lesssim \big\||x|^{-\tau}|u|^p\big\|^2_{{\frac{2n}{\alpha+n}}} \end{equation} if $0<\alpha<n.$ Applying Lemma \ref{ckn} to the right-hand side of \eqref{ie} with $b=-\frac{\tau}{p}$, $q=\frac{2np}{\alpha+n}$, $a=0$ and $p=2$, we get \begin{equation}\label{5.100'}
\big\||x|^{-\tau}|u|^p\big\|^2_{{\frac{2n}{\alpha+n}}}= \big\||x|^{-\frac{\tau}{p}}u\big\|^{2p}_{{\frac{2np}{\alpha+n}}} \lesssim \|\nabla u\|^{2p} \end{equation} if \begin{equation}\label{iee}
0<\frac{\alpha+n}{2np} \leq \frac12 <1, \quad -\frac{\alpha+n}{2p}<-\frac{\tau}{p} \leq 0, \quad \frac{\tau}{p}-1=\frac{\alpha+n}{2p}-\frac{n}2. \end{equation} Finally, using the equivalent norm to Sobolev one (see Lemma \ref{2.2}), we obtain the desired estimate \eqref{inte} if $\frac{1+\kappa}{n}<\frac12<\min\{1, 1-\frac{\kappa}{n}\}$ which does not affect the assumptions in \eqref{as}.
Now it remains to derive the assumptions in \eqref{as}. We note that the last equality in \eqref{iee} is equivalent to $p=1+\frac{2-2\tau+\alpha}{n-2}$. Using $p=1+\frac{2-2\tau+\alpha}{n-2}$, the requirements \eqref{iee} can be written as \begin{equation}\label{ie1}
\alpha+n>0, \quad \alpha+n \ge n\tau, \quad 0\leq 2\tau<\alpha+n, \end{equation} which are reduced to $0<\tau\leq 1+\frac{\alpha}{n}$ since $\alpha+n\geq n\tau>2\tau>0$, as desired.
\subsection{Existence of ground states}
By using \eqref{gagg}, we first set $J(u)=\|\sqrt{\mathcal K_\lambda}u\|^{2p}/\|\mathcal P[u]\|$ and take a sequence $\{u_n\}_{n\in\mathbb{N}}$ in $H_{\lambda}^1$ such that \begin{align*}
\gamma := \frac1{C_{n,\tau,\alpha,\lambda}}
=\lim_{n\rightarrow \infty} \frac{\|\sqrt{\mathcal{K}_{\lambda}}u_n\|^{2p}}{\mathcal{P}[u_n]}. \end{align*} By the scaling $u(x) \mapsto u^{\delta,\mu}(x)=\delta u(\mu x)$ for $\delta, \mu \in \mathbb{R}$, we have \begin{align*}
\|u^{\delta,\mu}\|^2&=\delta^2 \mu^{-n}\|u\|^2 \\
\|\sqrt{\mathcal K_{\lambda}}u^{\delta,\mu}\|^2&= \|\nabla u^{\delta,\mu}\|^2+\lambda\big\|\frac{u^{\delta,\mu}}{|x|}\big\|^2 \\
&=\delta^2 \mu^{2-n}\Big(\|\nabla u\|^2+\lambda\big\|\frac{u}{|x|}\big\|^2\Big)=\delta^2 \mu^{2-n}\|\sqrt{\mathcal K_\lambda}u\|^2 \\
\int_{\mathbb{R}^n}|x|^{-\tau}|u^{\delta,\mu}|^p(I_\alpha \ast |\cdot|^{-\tau}|u^{\delta,\mu}|^p) dx &= \delta^{2p}\mu^{2\tau-n-\alpha}\int_{\mathbb{R}^n}|x|^{-\tau}|u|^p(I_\alpha \ast |\cdot|^{-\tau}|u|^p) dx, \end{align*} which implies that $J(u^{\delta,\mu})=J(u)$ by $p=1+\frac{2-2\tau+\alpha}{n-2}$. Let $\psi_n = u_n^{\delta_n,\mu_n}$ where
$$\delta_n= \frac{\|u_n\|^{\frac{n}2-1}}{\|\sqrt{\mathcal K_\lambda} u_n\|^{\frac{n}{2}}}, \quad \mu_n=\frac{\|u_n\|}{\|\sqrt{\mathcal K_\lambda} u_n\|}.$$ Then, we have
$$\|\psi_n\|=\|\sqrt{\mathcal K_\lambda} \psi_n\|=1 \quad \textnormal{and} \quad \gamma=\lim_{n\rightarrow\infty} J(\psi_n)=\lim_{n\rightarrow \infty} \frac1{\mathcal P[\psi_n]}.$$
Now we take $\psi \in H_\lambda^1$ so that $\psi_n \rightharpoonup \psi$ in $H_\lambda^1$ and we will show that $$\frac{1}{\mathcal P[\psi_n]} \rightarrow \frac1{\mathcal P[\psi]} \quad \textnormal{as} \quad n\rightarrow \infty.$$ By using Lemma \ref{hls} {via \eqref{5.100'}}, we have \begin{align} \nonumber
&\int_{\mathbb{R}^n} |x|^{-\tau}|\psi_n|^p (I_\alpha \ast |\cdot|^{-\tau}|\psi_n|^p)- |x|^{-\tau}|\psi|^p (I_\alpha \ast |\cdot|^{-\tau}|\psi|^p)dx \\ \nonumber
&\quad =\int_{\mathbb{R}^n} |x|^{-\tau}|\psi|^p \big(I_\alpha \ast |\cdot|^{-\tau}(|\psi_n|^p - |\psi|^p)\big) dx\\ \nonumber
&\qquad \qquad \qquad+ \int_{\mathbb{R}^n}|x|^{-\tau}(|\psi_n|^p-|\psi|^p)(I_\alpha \ast |\cdot|^{-\tau}|\psi_n|^p)dx\\ \nonumber
&\quad\lesssim \Big(\big\||x|^{-\tau}|\psi|^p\big\|_{{\frac{2n}{\alpha+n}}} +\big\||x|^{-\tau}|\psi_n|^p\big\|_{{\frac{2n}{\alpha+n}}}\Big)
\big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}\\ \label{dif}
&\quad\lesssim \big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}. \end{align} Using the following simple inequality
$$|u|^p-|v|^p \lesssim |u-v|(|u|^{p-1}+|v|^{p-1}), \quad p\ge1$$ and H\"older's inequality, the last term in \eqref{dif} is bounded as \begin{align}\label{i} \nonumber
\big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}
&\lesssim (\|\psi_n\|_{{(p-1)a_1}}^{p-1}+\|\psi\|_{{(p-1)a_1}}^{p-1}) \||x|^{-\tau}|\psi-\psi_n|\|_{{a_2}}\\
&\lesssim (\|\psi\|_{H_\lambda^1}^{p-1}+\|\psi_n\|_{H_\lambda^1}^{p-1})\||x|^{-\tau}|\psi-\psi_n|\|_{{a_2}} \end{align} if $0<\tau<2$ and \begin{equation}\label{gagc}
\frac{\alpha+n}{2n}=\frac{1}{a_1}+\frac{1}{a_2}, \quad \frac{n-2}{2n}\leq\frac{1}{(p-1)a_1}\leq\frac12. \end{equation} {Indeed, for the last inequality we used the Sobolev embedding, $H^1(\mathbb{R}^n) \hookrightarrow L^q({\mathbb{R}^n})$ for $2\leq q \leq \frac{2n}{n-2}$ if $n\ge3$.} Thanks to the compactness of the Sobolev injection, Lemma \ref{compact}, under the condition \begin{equation}\label{gagcc}
\frac{n-2}{2(n-\tau)}<\frac{1}{a_2}<\frac12, \end{equation} we then get $1/{\mathcal P[\psi_n]} \rightarrow 1/{\mathcal P[\psi]}=\gamma$ as $n \rightarrow \infty$. We need to check that there exist $a_1$ and $a_2$ satisfying \eqref{gagc}, \eqref{gagcc} and the assumptions in Theorem \ref{gag}, but we will postpone this until the end of the proof.
By the lower semi-continuity of the norm, we see
$$\|\psi\|\leq 1 \quad \textnormal{and} \quad \|\sqrt{\mathcal K_\lambda} \psi\|\leq 1,$$
from which $J(\psi)<\gamma$, and hence $\|\psi\|=\|\sqrt{\mathcal K_\lambda}\psi\|=1.$ Consequently, $$\psi_n \rightarrow \psi \quad \textnormal{in} \quad H_\lambda^1 \quad \textnormal{and} \quad \gamma=J(\psi)=\frac1{\mathcal P[\psi]}.$$ $\psi$ satisfies \eqref{E} because the minimizer satisfies the Euler equation
$$\partial_\epsilon J(\psi+\epsilon \eta)_{|\epsilon=0}=0, \quad \forall \eta \in C_0^{\infty} \cap H_\lambda^1.$$
It remains to check the existence of $a_1$ and $a_2$ satisfying the conditions \eqref{gagc}, \eqref{gagcc} under the assumptions in Theorem \ref{gag}. Substituting the first condition in \eqref{gagc} into the second one of \eqref{gagc} with $p=1+\frac{2-2\tau+\alpha}{n-2}$, we see
{\begin{equation}\label{gagc2}
\frac{\alpha+n}{2n}-\frac{2-2\tau+\alpha}{2(n-2)}\leq\frac1{a_2} \leq \frac{\alpha+n}{2n}-\frac{2-2\tau+\alpha}{2n}.
\end{equation}} To eliminate $a_2$, we make the lower bounds of $1/a_2$ of \eqref{gagcc} and \eqref{gagc2} less than the upper ones of $1/a_2$ of \eqref{gagcc} and \eqref{gagc2}.
{Indeed, starting the process from the lower bound in \eqref{gagc2}, we arrive at $n\tau<\alpha+n$ which is satisfied by the assumption \eqref{as}. Similarly from the lower bound in \eqref{gagcc}, we arrive at $0<\tau<\frac{n+2}{2}$, but this is eliminated by \eqref{as} using the facts that $n\ge3$ and $\tau<2$.}
\section{Blow-up of the energy solutions}
In this section, we prove Theorem \ref{t1} which provides a criterion for blow-up phenomena in the energy-critical focusing regime under the threshold of the ground state. As a consequence, we establish Corollary \ref{t2}. Moreover, we prove Proposition \ref{s} and Corollary \ref{s2} about energy bounded solutions.
\subsection{Criterion for blow-up}
In order to prove Theorem \ref{t1}, we use proof by contradiction through the following inequality which will be proved: \begin{equation}\label{qq}
V_{R}''\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}+\frac C{R^2}, \end{equation}
where $\mathcal I [u] = \|\sqrt{\mathcal K_\lambda} u\|^2 - \mathcal P[u]$ and $R\gg1$. Indeed, taking $u_0\in H^1_\lambda$ with \eqref{ss'} and assuming that $u$ is global, for $R\gg1$, $$V_{R}''\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}+\frac C{R^2}<-c<0$$
if there is no sequence $t_n\to\infty$ such that $\|\sqrt{\mathcal K_\lambda} u(t_n)\|\to \infty$, which is contradiction.
Before starting to prove \eqref{qq}, we first define $\phi_R(\cdot):=R^2\phi(\frac{\cdot}{R})$, $R>0$, where the radial function $\phi\in C_0^\infty(\mathbb{R}^n)$ satisfies
$$\phi(|x|)=\phi(r):=\left\{ \begin{array}{ll}
\frac{r^2}2,\quad\mbox{if}\quad r\leq1 ;\\
0,\quad\mbox{if}\quad r\geq2,
\end{array} \right.\quad\mbox{and}\quad \phi''\leq1.$$ Then, $\phi_R$ satisfies $$\phi_R''\leq1,\quad \phi_R'(r)\leq r,\quad\Delta \phi_R\leq N$$
and, for $|x|\leq R$ \begin{align}\label{calc}
\nabla\phi_R(x)=x,\quad\Delta\phi_R(x)=N. \end{align}
By recalling the definition of $V(t)$ and $M(t)$ in Section 2, we denote the localized variance and Morawetz action as \begin{align*}
V_R(t):=\int_{\mathbb{R}^n}\phi_R(x)|u(x,\cdot)|^2\,dx, \quad V_R'(t)=M_R(t):=2\Im\int_{\mathbb{R}^n}\bar u\nabla \phi_R \cdot \nabla udx. \end{align*} By Proposition \ref{mrwz}, we divide $M_R'$ into two parts, $A$ and $B$, as $M_R'(t)=A + B$ where
$$A=4\sum_{k,l=1}^{N}\int_{\mathbb{R}^n}\partial_l\partial_k\phi_R\Re(\partial_ku\partial_l\bar u)dx-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2dx+4\lambda\int_{\mathbb{R}^n}\nabla\phi_R\cdot x\frac{|u|^2}{|x|^4}dx$$ and \begin{align}
\nonumber
B&=-\frac{2(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi_R|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
\nonumber
&\quad\qquad -\frac{4\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi_R|x|^{-\tau-2}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
\nonumber
&\qquad\qquad\qquad -\frac{4(N-\alpha)}p\sum_{k=1}^N\int_{\mathbb{R}^n}|x|^{-\tau}|u|^{p}\partial_k\phi_R(\frac{x_k}{|\cdot|^2}I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
\label{123}
&=:B_1 + B_2+ B_3. \end{align}
Using the following radial relations \begin{equation}\label{''}
\partial_k=\frac{x_k}r\partial_r,\quad\partial_l\partial_k=\Big(\frac{\delta_{lk}}r-\frac{x_lx_k}{r^3}\Big)\partial_r+\frac{x_lx_k}{r^2}\partial_r^2 \end{equation} and the Cauchy-Schwarz inequality via the properties of $\phi$, it follows that \begin{align}
\nonumber
A&= 4\int_{\mathbb{R}^N} |\nabla u|^2 \frac{\phi_R'}{r}dx + 4 \int_{\mathbb{R}^N}
|x\cdot \nabla u|^2 \big(\frac{\phi_R''}{r^2}-\frac{\phi_R'}{r^3}\big) dx \\
\nonumber
&\qquad \qquad\qquad \qquad\qquad \qquad\quad-\int_{\mathbb{R}^N} \Delta^2\phi_R|u|^2dx + 4 \int_{\mathbb{R}^N} \frac{|u|^2}{r^3}\phi_R' dx \\
\nonumber
&\leq4\int_{\mathbb{R}^n}|\nabla u|^2\frac{\phi_R'}r\,dx+4\int_{\mathbb{R}^n}\frac{|x\cdot\nabla u|^2}{r^2}\big(1-\frac{\phi_R'}r\big)dx\\
\nonumber
&\qquad \qquad \qquad \qquad\qquad \qquad\quad -\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2\,dx+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^3}\phi_R'dx\nonumber\\
\label{(I)}
&\leq4\int_{\mathbb{R}^n}|\nabla u|^2dx-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2dx+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^2}dx. \end{align}
On the other hand, to handle the part $B$, we split the integrals in $B$ into the regions $|x|<R$ and $|x|>R$. Then, by \eqref{calc}, the first two terms in $B$ are written \begin{align}
\nonumber
B_1+B_2&=\frac{2N(2-p)-4\tau}{p}\int_{|x|<R}|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
\nonumber
&\qquad \qquad \qquad \qquad +O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg)\\
&=\frac{2(N(2-p)-2\tau)}p\mathcal{P}[u]+O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg).\label{12} \end{align} For the third term $B_3$, with calculus done in \cite[Lemma 4.5]{st4}, we have \begin{align}
B_3&=\frac{2(\alpha-N)}{p}\int_{|y|<R}\int_{|x|<R}I_\alpha(x-y)|y|^{-\tau}|u(y)|^p|x|^{-\tau}|u(x)|^{p}\,dx\,dy\nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad +O\bigg(\int_{|x|>R}(I_\alpha*|\cdot|^{-\tau}|u|^p)|x|^{-\tau}|u|^pdx\bigg)\nonumber\\
&=\frac{2(\alpha-N)}{p}\int_{|x|<R}|x|^{-\tau}|u(x)|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx \nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad+O\bigg(\int_{|x|>R}(I_\alpha*|\cdot|^{-\tau}|u|^p)|x|^{-\tau}|u|^pdx\bigg)\nonumber\\
&=\frac{2(\alpha-N)}{p}\mathcal P[u]+O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg).\label{372} \end{align} Combining \eqref{(I)}, \eqref{123}, \eqref{12} and \eqref{372}, we then obtain \begin{align}
M_R'&\leq-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2\,dx+4\int_{\mathbb{R}^n}|\nabla u|^2+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^2}\,dx
-4\mathcal P[u]\nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad \quad +O\left(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx\right)\nonumber\\
&\leq4\Big(\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal P[u]\Big)+O\left(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx\right)+O(R^{-2}).\nonumber \end{align}
Here, for the last inequality we used the fact that $|\partial^{\nu}\phi_R|\lesssim R^{2-|\nu|}$.
Now, using Lemma \ref{hls} and Lemma \ref{ckn} with $b=-\frac{\tau}{p}$, $q=\frac{2Np}{\alpha+N}$, $a=0$ and $p=2$, we obtain \begin{eqnarray*}
\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx
&\lesssim& \||x|^{-\tau}|u|^{p}\|^2_{\frac{2N}{\alpha+N}}\\
&\lesssim& R^{-2\tau}\|u\|_{\frac{2Np}{\alpha+N}}^{2p}\\
&\lesssim& R^{-2\tau}\|\nabla u\|^{2p} \end{eqnarray*} if \begin{equation}\label{q}
0<\frac{\alpha+n}{2np}\leq \frac12<1 , \quad -\frac{\alpha+n}{2p}<-\frac{\tau}{p}\leq 0, \quad \frac{\tau}{p}-1=\frac{\alpha+n}{2p}-\frac{n}{2}. \end{equation} Here we note that the last equality in \eqref{q} is equivalent to $p=1+\frac{2-2\tau+\alpha}{n-2}$. Using $p=1+\frac{2-2\tau+\alpha}{n-2}$, the requirement \eqref{q} can be written as \eqref{ie1} which are reduced to $0<\tau\leq 1+\frac{\alpha}{n}$ since $\alpha+n\geq n\tau>2\tau>0$.
Consequently, for large $R\gg1$, using the equivalent norm to Sobolev one (see Lemma \ref{2.2}), we get \begin{equation*}
M_R'\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}\|\sqrt{\mathcal K_\lambda} u\|^{2p}+\frac C{R^2}, \end{equation*} as desired.
\subsection{The boundedness of the energy solution}
Now, we prove Proposition \ref{s}.
Specifically, the energy bound is demonstrated by combining the conservation law with the following lemma known as coercivity (or energy trapping) results, obtained through the assumption \eqref{ss}. \begin{lem}\label{bnd'} Let $\varphi\in H_\lambda^1$ be a ground state solution to \eqref{E}. Assume that there is $0<c<1$ satisfying $$\mathcal P[u]<c \mathcal P[\varphi], \quad u\in H_\lambda^1.$$ Then there exists a constant $c_\varphi>0$ such that \begin{align*}
\|\sqrt{\mathcal K_\lambda} u\|^2&<c_\varphi\mathcal E[u]. \end{align*} \end{lem} \begin{proof} Thanks to \eqref{E}, we first see \begin{equation}\label{p}
\mathcal P[\varphi]:=\int_{\mathbb{R}^n} |x|^{-\tau}|\varphi|^p(I_\alpha *|\cdot|^{-\tau}|\varphi|^p)dx=\|\sqrt{\mathcal K_\lambda}\varphi\|^2. \end{equation}
Applying the Gagliardo-Nirenberg inequality, Theorem \ref{gag}, we have \begin{align*}
(\mathcal P[u])^p \leq (\mathcal P[u])^{p-1}\cdot C_{N,\tau,\alpha,\lambda} \|\sqrt{\mathcal K_\lambda} u\|^{2p}\leq \Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^{p-1}\|\sqrt{\mathcal K_\lambda} u\|^{2p}. \end{align*} Here, for the second inequality, we used the fact that \begin{equation}\label{poh}
C_{N,\tau,\alpha,\lambda}=\frac{\mathcal P(\varphi)}{\|\sqrt{\mathcal K_\lambda}\varphi\|^{2p}}=({\mathcal P[\varphi]})^{1-p} \end{equation} by using \eqref{p}. Therefore, we obtain \begin{eqnarray*} \mathcal P[u]
&\leq&\Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^\frac{p-1}p\|\sqrt{\mathcal K_\lambda} u\|^2, \end{eqnarray*} which implies that \begin{align*}
\mathcal E[u]&=\|\sqrt{\mathcal K_\lambda} u\|^2-\frac1p\mathcal P[u]\\
&\geq\Big(1-\frac{1}p\Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^\frac{p-1}p\Big)\|\sqrt{\mathcal K_\lambda} u\|^2\\
&\geq\Big(1-\frac{c^{(p-1)/p}}p\Big)\|\sqrt{\mathcal K_\lambda} u\|^2. \end{align*} {This concludes the proof.} \end{proof}
\subsection{Energy bounded/non-global solutions} Finally, we prove Corollaries \ref{t2} and \ref{s2}, which presents the dichotomy of energy bounded/non-global existence of solutions. \subsubsection{Energy bounded solutions}
First, Corollary \ref{s2} follows from the invariance of \eqref{t11} and \eqref{t12} under the flow of \eqref{S}. We first define a function $f:[0,T^\ast) \rightarrow \mathbb{R}$ as \begin{equation}\label{def}
f(t)= t- \frac{C_{N,\tau,\alpha,\lambda}}{p} t^{p}. \end{equation}
Since $p>1$, the function $f(t)$ has a maximum value $f(t_1)=\frac{p-1}{p}C_{N,\tau,\alpha,\lambda}^{-\frac1{p-1}}$ at $t_1= (C_{N,\tau,\alpha,\lambda})^{-\frac1{p-1}}$. We note here that $t_1=\|\sqrt{\mathcal K_\lambda} \varphi\|^2$ by \eqref{poh} and \eqref{p}.
Using the Gagliardo-Nirenberg type inequality, Theorem \ref{gag}, we see \begin{align}
\mathcal E[u]=\|\sqrt{\mathcal K_\lambda} u\|^2 - \frac1p \mathcal P[u] &\geq\|\sqrt{\mathcal K_\lambda} u\|^2-\frac{C_{N,\tau,\alpha,\lambda}}{p}\|\sqrt{\mathcal K_\lambda} u\|^{2p}\label{xxx}\\
&=f\big(\|\sqrt{\mathcal K_\lambda} u\|^2\big).\nonumber \end{align} By the assumption \eqref{t11} with \eqref{poh} and \eqref{p}, we also see \begin{equation}\label{x}
\mathcal E[u_0]<\mathcal E[\varphi] =f(t_1), \end{equation} which implies \begin{equation}\label{xx}
f\big(\|\sqrt{\mathcal K_\lambda}u\|^2\big) \leq \mathcal E[u]= \mathcal E[u_0] <f(t_1). \end{equation}
Since $\|\sqrt{\mathcal K_\lambda}u_0\|^2<\|\sqrt{\mathcal K_\lambda}\varphi\|^2=t_1$ by the assumption \eqref{t12}, and the continuity in time with \eqref{xx}, we get
$$\|\sqrt{\mathcal K_\lambda}u(t)\|^2 < t_1, \quad \forall t \in [0,T^\ast),$$ which is equivalent to $$\mathcal{MG}[u(t)]<1, \quad \forall t \in [0,T^\ast).$$ Therefore \eqref{t11} and \eqref{t12} are invariant under the flow \eqref{S} and this implies that $T^\ast=\infty$, which concludes the proof.
\subsubsection{Blow-up }
To prove Corollary \ref{t2}, we use the same function $f(t)$ defined as \eqref{def}. By the assumption \eqref{t13} with \eqref{poh} and \eqref{p}, we have
$$\|\sqrt{\mathcal K_\lambda} u_0\|^2>\|\sqrt{\mathcal K_\lambda}\varphi\|^2=t_1.$$ Thus, the continuity in time with \eqref{xx} gives
$$ \|\sqrt{\mathcal K_\lambda} u (t)\|^2>t_1,\quad \forall\, t\in [0,T^*) .$$
Hence, $\mathcal{MG}[u(t)]>1$ on $[0,T^*)$, and this and \eqref{t11} are invariant under flow \eqref{S}.
Finally, by using $\mathcal{E}[u(t)]>1$, $\mathcal{MG}[u(t)]>1$ and the identity $p\mathcal E[\varphi]=(p-1)\|\sqrt{\mathcal K_\lambda}\varphi\|^2$, we obtain for all $t\in[0,T^\ast)$ \begin{align*}
\mathcal I[u(t)]&=\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal P[u]\\
&=p\mathcal E[u]-(p-1)\|\sqrt{\mathcal K_\lambda} u\|^2\\
&< p\mathcal E[\varphi]-(p-1)\|\sqrt{\mathcal K_\lambda}\varphi\|^2<0, \end{align*} which concludes the proof by using Theorem \ref{t1}.
\section{Appendix: Morawetz estimate}
In this section, we present a virial identity (Proposition \eqref{mrwz}) that exhibits the convexity property in time for certain quantities associated with solutions of the generalized Hartree equation \eqref{S}. This identity serves as the basis for studying blow-up phenomena. The virial identity for the free nonlinear Schr\"odinger equation was first established by Zakharov \cite{Za} and Glassey \cite{G}. When the free equation is perturbed by an electromagnetic potential, Fanelly and Vega \cite{FV} derived the corresponding virial identities for the linear Schr\"odinger and linear wave equations. The proof relies on the standard technique of Morawetz multipliers, introduced in \cite{M} for the Klein-Gordon equation. The identity we present here is the same as that in \cite{sx}, with the addition of a term corresponding to the contribution from the inverse square potential.
\begin{proof}[Proof of Proposition \eqref{mrwz}]
Let $u \in C_t([0,T];H_\lambda^1)$ be a solution to the focusing case of equation \eqref{S} \begin{align}\label{S1}
i\partial_t u
&=-\Delta u + \frac\lambda{|x|^2}u -|x|^{-\tau}|u|^{p-2}\Big(I_\alpha*|\cdot|^{-\tau}|u|^p\Big)u \\ \nonumber
&=-\Delta u + \frac\lambda{|x|^2}u -\mathcal N. \end{align} By multiplying $2 \bar u$ to \eqref{S1}, we obtain \begin{equation*}
-2 \Im{(\bar u \Delta u)} = \partial_t(|u|^2) \end{equation*} Using this, we can compute $$V_\phi'(t)=2\Im\int_{\mathbb{R}^n} \bar u \nabla\phi\cdot\nabla u dx=2\sum_{k=1}^N \Im\int_{\mathbb{R}^n} \bar u \partial_k\phi\cdot \partial_k u dx.$$
In order to consider the second derivative of $V_\xi$, we need to compute \begin{equation}\label{sec} V''_{\phi}(t)=2\sum_{k=1}^N \int_{\mathbb{R}^n} \partial_k\phi\cdot \partial_t \Im(\bar u \partial_k u) dx. \end{equation} Using \eqref{S1}, we have \begin{align} \partial_t\Im(\bar u \partial_k u ) &=\Re(i\partial_t u \partial_k\bar u )-\Re(i\bar{u} \partial_k \partial_t u )\nonumber\\
&=\Re\big(\partial_k\bar u (-\Delta u + \frac\lambda{|x|^2}u -\mathcal N)\big)-\Re\big(\bar u \partial_k(-\Delta u + \frac\lambda{|x|^2}u -\mathcal N)\big)\nonumber\\
&=\Re\big(\bar u \partial_k\Delta u -\Delta u\partial_k\bar u \big)+\Re\big(\bar u \partial_k\mathcal N-\mathcal N\partial_k\bar u \big)+\lambda\Re\big(\frac{u}{|x|^2}\partial_k\bar u-\bar u \partial_k(\frac{u }{|x|^2}) \big).\label{vr} \end{align} Here, for the last term, we see \begin{equation}\label{aa}
\Re\big(\bar u \partial_k(\frac{u }{|x|^2})-\frac{u}{|x|^2}\partial_k\bar u \big)=-2\frac{x_k}{|x|^4}{|u|^2}. \end{equation} For the first two term, we will apply the following lemma, omitted here, can be found in the proof of \cite[Proposition 2.12]{sx}. \begin{lem}\label{lem}
Let $\phi:\mathbb{R}^N \rightarrow \mathbb{R}$ be a radial, real-valued multiplier with $\phi=\phi(|x|)$.
Then, for $\mathcal N$ defined as $\mathcal N = -|x|^{-\tau}|u|^{p-2}\big(J_\alpha*|\cdot|^{-\tau}|u|^p\big)u$, we have \begin{align*} &\Re\int_{\mathbb{R}^N}(\bar u \partial_k\Delta u -\Delta u\partial_k\bar u )+(\bar u \partial_k\mathcal N-\mathcal N\partial_k\bar u ) dx\\
&\qquad\qquad =\sum_{l=1}^{N}2\int_{\mathbb{R}^n}\partial_l\partial_k\phi\,\Re(\partial_ku\partial_l\bar u)dx-\frac12\int_{\mathbb{R}^n}\Delta^2\phi|u|^2dx\\
&\qquad \qquad\qquad-\frac{(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi|x|^{-\tau}|u|^p(J_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
&\qquad\qquad \qquad \qquad -\frac{2\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi|x|^{-\tau-2}|u|^{p}(J_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
& \qquad \qquad\qquad \qquad \qquad -\frac{2(N-\alpha)}p\int_{\mathbb{R}^n}\partial_k \phi|x|^{-\tau}|u|^{p}(\frac{x_k}{|\cdot|^2}J_\alpha*|\cdot|^{-\tau}|u|^p)dx. \end{align*} \end{lem} Therefore, by combining \eqref{sec}, \eqref{vr}, Lemma \ref{lem} and \eqref{aa}, we finish the proof. \end{proof}
\section{Declarations}
$\!\!\!\!\!\!\bullet$ The authors have no relevant financial or non-financial interests to disclose.\\ $\bullet$ The authors have no competing interests to declare that are relevant to the content of this article.\\ $\bullet$ All authors certify that they have no affiliations with or involvement in any organization or entity with any financial interest or non-financial interest in the subject matter or materials discussed in this manuscript.\\ $\bullet$ The authors have no financial or proprietary interests in any material discussed in this article.\\
$\bullet$ The data that support the findings of this study are available from the corresponding author upon reasonable request.
\end{document}
|
arXiv
|
{
"id": "2305.00746.tex",
"language_detection_score": 0.5017264485359192,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Generalized description of the spatio-temporal biphoton state in spontaneous parametric down-conversion}
\author{Baghdasar Baghdasaryan} \email{[email protected]} \affiliation{Theoretisch-Physikalisches Institut, Friedrich Schiller University Jena, 07743 Jena, Germany} \affiliation{Helmholtz-Institut Jena, 07743 Jena, Germany}
\author{Carlos Sevilla-Gutiérrez}
\affiliation{Fraunhofer Institute for Applied Optics and Precision Engineering IOF, 07745 Jena, Germany}
\author{Fabian Steinlechner} \email{[email protected]}
\affiliation{Fraunhofer Institute for Applied Optics and Precision Engineering IOF, 07745 Jena, Germany}
\affiliation{Abbe Center of Photonics, Friedrich Schiller University Jena, 07745 Jena, Germany}
\author{Stephan Fritzsche} \affiliation{Theoretisch-Physikalisches Institut, Friedrich Schiller University Jena, 07743 Jena, Germany} \affiliation{Helmholtz-Institut Jena, 07743 Jena, Germany}
\affiliation{Abbe Center of Photonics, Friedrich Schiller University Jena, 07745 Jena, Germany} \date{\today}
\begin{abstract} Spontaneous parametric down-conversion (SPDC) is a widely used source for photonic entanglement. Years of focused research have led to a solid understanding of the process, but a cohesive analytical description of the paraxial biphoton state has yet to be achieved. We derive a general expression for the spatio-temporal biphoton state that applies universally across common experimental settings and correctly describes the nonseparability of spatial and spectral modes. We formulate a criterion on how to decrease the coupling of the spatial from the spectral degree of freedom by taking into account the Gouy phase of interacting beams. This work provides new insights into the role of the Gouy phase in SPDC, and also into the preparation of engineered entangled states for multidimensional quantum information processing. \end{abstract}
\maketitle \section{Introduction}\label{introduction} Photon pairs generated via spontaneous parametric down-conversion (SPDC) have provided an experimental platform for fundamental quantum science \cite{doi:10.1063/5.0023103} and figure prominently in applications in quantum information processing, including recent milestone experiments in photonic quantum computing \cite{doi:10.1126/science.abe8770}.
Several works in recent years have addressed the challenge of tailoring the spectral and spatial properties of $signal$ and $idler$ photons generated via SPDC in theory and experiment. In the spatial domain, that is, the transverse momentum space, much of this work was motivated by the objective of improving fiber coupling efficiency \cite{PhysRevA.83.023810,srivastav2021characterising} or the dimensionality of spatial entanglement \cite{Krenn6243,PhysRevA.102.052412,PhysRevApplied.14.054069}. In the spectral domain, the motivation was usually to engineer pure spectral states, which are crucial for protocols based on multiphoton interference \cite{Caspani2017}. This has been performed either by tailoring the nonlinearity of the crystal \cite{Graffitti:18} or by using counterpropagating photon pair generation in periodically poled waveguides \cite{Luo:20}. The frequency degree of freedom (DOF) has also been used to generate entangled states via spatial shaping of the pump beam \cite{Francesconi2021} or by transferring polarization into color entanglement \cite{PhysRevLett.103.253601}. The spatial shaping of the pump beam has been also used in Hong-Ou-Mandel interference experiments, in order to control the two-photon interference behavior \cite{PhysRevLett.90.143601}.
Closed expressions for the state emitted by SPDC in bulk crystals have been derived using very special techniques and approximations, such as the narrowband \cite{PhysRevA.83.033816}, thin-crystal \cite{Yao_2011,PhysRevA.103.063508} or plane wave approximations \cite{PhysRevLett.99.243601}, where either the spectral or spatial biphoton state is considered. However, from the $X$-shaped spatio-temporal correlations \cite{PhysRevLett.102.223601,PhysRevLett.109.243901}, the spatial and spectral properties of SPDC have been known to be coupled. The $X$-shaped spatio-temporal correlation implies that if the twin photons are collected from different positions, they are detected with a certain time delay. In contrast, if the photons are detected at the same position, the time delay is very short (a few nanoseconds) \cite{Zhang2017}.
To date, models that address both spectrum and space together have been limited to approximate phase matching functions \cite{Osorio_2008} or numerical calculations \cite{PhysRevA.86.053803}. The work \cite{PhysRevLett.102.223601} investigated the quite general phase matching function, but the pump beam was limited to monochromatic plane wave.
Here, we present a simple-to-use closed expression for the biphoton state. The approach describes the full spectral and spatial properties of all interacting beams and applies to a wide range of experimental settings. The analytical treatment of the biphoton state decomposed into discrete Laguerre Gaussian (LG) modes also provides a deeper insight into the role of the Guoy phase in PDC. Especially, the spectral response of spatial modes in SPDC is determined by the Gouy phase of the pump, signal and idler beams. We will also show that the Gouy phase can be used to control the coupling strength of spatial and spectral DOF in parametric down-conversion (PDC). Next to providing an intuitive understanding, we also demonstrate the utility of the expression for quantum state engineering in spatial DOF for multidimensional quantum information processing.
\section{Theoretical Methods}
Let us start with the basic expressions of SPDC. We can make use of the paraxial approximation, since typical optical apparatuses support only paraxial rays about a central axis. In the paraxial regime, the longitudinal and transverse components of the wave vector can be treated separately $\bm{k}=\bm{q}+k_z(\omega)\bm{z}$. Consequently, the biphoton state in the momentum space can be represented by the following expression \cite{PhysRevA.62.043816,WALBORN201087,Karan_2020}
\begin{align}\label{SPDCstate}
\ket{\Psi} = \iint & d\bm{q}_s \: d\bm{q}_i \:d\omega_s \: d\omega_i\: \Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)\nonumber\\&
\hat{a}^{\dagger}_s(\bm{q}_s,\omega_s)\:\hat{a}^{\dagger}_i(\bm{q}_i,\omega_i)\ket{vac}. \end{align}
Equation \eqref{SPDCstate} refers to the generation of photon pairs with energies $\omega_{s,i}$ and transverse momenta $\bm{q}_{s,i}$ from the vacuum state $\ket{vac}$. The biphoton mode function $\Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)$ contains the rich high-dimensional spatio-temporal structure of SPDC that arises from the coupling between the wave vectors of the pump, signal, and idler beams.
\subsection{Biphoton state decomposed in Laguerre Gaussian basis}
The transverse spatial \cite{WALBORN201087,PhysRevA.83.052325} and frequency DOF \cite{PhysRevA.105.052429} have been successfully used in continuous variable information processing. However, in practical experimental settings, the continuous variable space is more often discretized using a set of modes. The proper choice of a set reduces the number of dimensions needed to describe the state. Moreover, discrete modes are easy to manipulate and detect using efficient experimental techniques \cite{Bolduc2013,Eckstein2011}. Since the projection of the orbital angular momentum (OAM) is conserved in SPDC \cite{Mair2001}, it is convenient to decompose the biphoton state into LG modes $\ket{p,\ell,\omega}=\int d\bm{q}\, \mathrm{LG}_{p}^{\ell}(\bm{q})\, \hat{a}^{\dagger}(\bm{q},\omega) \ket{vac} $, which are eigenstates of OAM \cite{ doi:10.1126/science.1227193}: \begin{align}\label{decomposition}
\ket{\Psi}= &\iint \:d\omega_s \: d\omega_i\: \nonumber \\& \sum_{p_s,p_i=0}^{\infty}\: \sum^{\infty}_{\ell_s,\ell_i=-\infty}C_{p_s,p_i}^{\ell_s,\ell_i} \ket{p_s,\ell_s,\omega_s}\ket{p_i,\ell_i,\omega_i}, \end{align}
where the coincidence amplitudes are calculated from the overlap integral $ C^{\ell_s,\ell_i}_{p_s,p_i} = \braket{p_s,\ell_s,\omega_s;p_i,\ell_i,\omega_i |\Psi}$,
\begin{align}\label{coe1}
C^{\ell_s,\ell_i}_{p_s,p_i}
= \iint d\bm{q}_s \: d\bm{q}_i \: \Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)\:[\mathrm{LG}_{p_s}^{\ell_s}(\bm{q}_s)]^*\nonumber&\\
\times [\mathrm{LG}_{p_i}^{\ell_i}(\bm{q}_i)]^*. \end{align}
The angular distribution of an LG mode in the momentum space is given by
\begin{align} \label{LG}
\mathrm{LG}_{p}^{\ell}(\rho,\varphi)
=&e^{\frac{-\rho^2\,w^2}{4}}\,e^{i\ell\,\varphi}\,\sum_{u=0}^p\, T_u^{p,\ell}\, \rho^{2k+\abs{\ell}} \end{align} with $ T_u^{p,\ell}$ being
\begin{align*}
T_u^{p,\ell}= &\sqrt{\frac{p!\,(p+|\ell|)!}{\pi}}\,
\biggr(\frac{ w}{\sqrt{2}}\biggl)^{2u+|\ell|+1}\,\frac{(-1)^{p+u}(i)^{\ell}}{(p-u)!\,(\abs{\ell}+u)!\,u!}, \end{align*}
and where $\rho$ and $\varphi$ stand for the cylindrical coordinates $\bm{q}=(\rho,\varphi)$. The summations in Eq. \eqref{decomposition} run over the LG mode numbers $p$ and $\ell$ associated with the radial momentum and the OAM projection, respectively. Except for the fact, that we now deal with summations instead of integrations, this discretization will also help us to understand the coupling of spatial and spectral DOF in the frame of the Gouy phase of LG modes. Note that we discretize only the transverse spatial DOF, but in principle, it is also possible to discretize the frequency DOF \cite{Gil-Lopez:21}.
The construction of the biphoton state reduces to the calculation of the coincidence amplitudes $C^{\ell_s,\ell_i}_{p_s,p_i}$, which in turn depend on the mode function $\Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)$. A compact expression for the mode function can be derived if the experimental setup and geometry is fixed.
\subsection{Geometry and mode function} Here, we consider the scenario when a coherent laser beam propagates along the $z$ axis and is focused in the middle of a nonlinear crystal placed at $z=0$. Signal and idler fields propagate close to the pump direction, known as the quasicollinear regime. The crystal and the pump beam have typical transverse cross sections in the order of millimeters and micrometers, respectively. Hence, we assume that the crystal compared to the pump beam is infinitely extended in the transverse direction, which enforces the conservation of the transverse momentum, $\bm{q}_p=\bm{q}_{s}+\bm{q}_{i}$ \cite{PhysRevA.62.043816}. Taking into account also the energy conservation $\omega_p=\omega_s+\omega_i$, the mode function can be written as \cite{Karan_2020}
\begin{align} \label{phasefunction}
\Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)=&N_0\,\mathrm{V}_p(\bm{q}_s+\bm{q}_i)\,\mathrm{S}_p(\omega_s+\omega_i)\nonumber\\&\times\int_{-L/2}^{L/2} dz\:\exp{\biggr[iz(k_{z,p}-k_{z,s}-k_{z,i})\biggl]},
\end{align}
where $N_0$ is the normalization constant, $\mathrm{V}_p(\bm{q}_p)$ is the spatial and $\mathrm{S}_p(\omega_p)$ the spectral distribution of the pump beam, and $L$ is the length of the nonlinear crystal along the $z$ axis.
The important component of the mode function \eqref{phasefunction} is the phase mismatch in the $z$ direction $\Delta k_z=k_{z,p}-k_{z,s}-k_{z,i}$, which characterizes the differences in the energies and momenta of the signal and idler photons. Therefore, careful calculation of $\Delta k_z$ is essential for the quantitative description of SPDC, which we will do next.
Experimentally generated lights are usually not monochromatic and contain a frequency distribution. Therefore, except for the central frequencies that meet energy conservation condition $\omega_{0,p}=\omega_{0,s}+\omega_{0,i}$, we expect a deviation from them, $\omega=\omega_0+\Omega$ with the assumption $\Omega\ll\omega_0$. Furthermore, in the paraxial approximation, the transverse component of the momentum is much smaller than the longitudinal component $|\bm{q}|\ll k$. Hence, we can apply the Taylor series on $k_z$ (Fresnel approximation) to $|\bm{q}|/k$ and also to small $\Omega$:
\begin{equation*}
k_z=k(\Omega)\sqrt{1-\frac{|\bm{q}|^2}{k(\Omega)^2}}\approx k+\frac{\Omega}{u_g}+\frac{G\Omega^2}{2}-\frac{|\bm{q}|^2}{2k},
\end{equation*}
where $u_g=1/(\partial k/\partial \Omega)$ is the group velocity and $G=\partial/\partial \Omega \,(1/u_g)$ is the group velocity dispersion, evaluated at the respective central frequency. Here, we also assume that the propagation is along a principal axis of the crystal, so we can ignore the Poynting vector walk-off of extraordinary beams in the crystal. Next, we insert the corresponding $k_z$ of the pump, signal, and idler into the phase mismatch $\Delta k_z$ and arrive at
\begin{equation}\label{phaseMatching} \Delta k_z= \Delta_{\Omega}+\rho_{s}^2\frac{k_p-k_s}{2k_pk_s}+\rho_{i}^2\frac{k_p-k_i}{2k_pk_i} -\frac{\rho_{s}\rho_{i}}{k_p}\cos{(\varphi_i-\varphi_s)}, \end{equation}
where the frequency part $\Delta_{\Omega}$ is given by
\begin{align}\label{spectralStr} \Delta_{\Omega}=&\frac{\Omega_s+\Omega_i}{u_{g,p}}-\frac{\Omega_s}{u_{g,s}}-\frac{\Omega_i}{u_{g,i}}+\frac{G_p(\Omega_s+\Omega_i)^2}{2}\nonumber\\& -\frac{G_s\Omega_s^2}{2}-\frac{G_i\Omega_i^2}{2}. \end{align}
We used in Eq. \eqref{phaseMatching} the relation $\rho_p^2=\rho^2_s+\rho^2_i+2\rho_s\rho_i\cos{(\varphi_i-\varphi_s)}$ and assumed momentum conservation for central frequencies $\Delta k=k_p-k_s-k_i=0$. The condition $\Delta k=0$ ensures constructive interference in the crystal between the pump, signal, and idler beams, which is usually performed with birefringent crystals \cite{Karan_2020} or more recently by periodic poling along the crystal axis, $k_p-k_s-k_i-2 \pi /\Lambda=0$, where $\Lambda$ is the poling period \cite{doi:10.1063/1.123408}.
The remaining components of the mode function \eqref{phasefunction} that we should still fix are the pump characteristics. We model the angular distribution of the pump with an LG beam. The advantage of this choice is that an arbitrary paraxial optical field can be expressed as a sum of LG beams $\sum_n a_n \mathrm{LG}_{p_n}^{\ell_n} $ with $\sum_n |a_n|^2=1$ by using their completeness relation. Thus, the theory developed for the LG pump can be easily extended to SPDC with a particular pump. The amplitudes $\eqref{expression}$ can then be upgraded to revised amplitudes $\sum_n a_n C_n$, which follows from Eq. \eqref{coe1}. Finally, the temporal distribution is modeled with a Gaussian envelope of pulse duration $t_0$, $\mathrm{S}_p(\omega_p)=\exp{[-(\omega_p-\omega_{0,p})^2\,t_0^2/4]}\, t_0/\sqrt{\pi}$ \cite{PhysRevA.56.1627}, but which can be extended to any arbitrary pump spectrum.
\subsection{Derivation of coincidence amplitudes} We can now substitute Eqs. \eqref{LG}-\eqref{spectralStr} into Eq. \eqref{coe1} and calculate the coincidence amplitudes: \begin{widetext} \begin{align} \label{expansionfirst}
C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}=N_0\sum_{u=0}^p\,\sum_{s=0}^{p_s}\,\sum_{i=0}^{p_i}\,T_u^{p,\ell}\: (T_s^{p_s,\ell_s})^* \:(T_i^{p_i,\ell_i})^*\: \int\,dz\,d\rho_s\,d\rho_i\,d\varphi_s\,d\varphi_i\,
\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)
e^{i\ell\varphi_s}\,e^{i(-\ell_s\varphi_s-\ell_i\varphi_i)}, \end{align}
where we used a revised notation for the coincidence amplitudes $C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}$ to indicate the mode numbers of the pump. The function $\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)$ is defined as
\begin{align}
\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)=&
[\rho^2_s+\rho^2_i+2\rho_s\rho_i\cos{(\varphi_i-\varphi_s)}]^\frac{2u+(\abs{\ell}-\ell)}{2}\,\rho_s^{\,\abs{\ell_s}+2s+1}\,
\rho_i^{\,\abs{\ell_i}+2i+1}\,(\rho_s+\rho_i\,e^{i(\varphi_i-\varphi_s)})^{\ell}
\nonumber\\&\times
\exp{\biggl[-\frac{[\rho^2_s+\rho^2_i+2\rho_s\rho_i\cos{(\varphi_i-\varphi_s)}]\,w^2}{4}-\frac{\rho_s^2\,w_s^2}{4}-\frac{\rho_i^2\,w_i^2}{4}\biggr]}\,\frac{t_0}{\sqrt{\pi}}e^{-\frac{t_0^2(\Omega_s+\Omega_i)^2}{4}}
\nonumber \\& \times
\exp{\biggr[iz\biggr(\Delta_{\Omega}+\rho_s^2\frac{k_p-k_s}{2k_pk_s}+\rho_i^2\frac{k_p-k_i}{2k_pk_i}-\cos{(\varphi_i-\varphi_s)}\frac{\rho_s\rho_i}{k_p}\biggl)\biggl]}\label{bigexpression}. \end{align} \end{widetext}
In Eq. \eqref{bigexpression}, the polar angle $\varphi$ of the pump beam has been expressed as a function of signal and idler coordinates, \begin{equation*} e^{i\,\ell\,\varphi}=(\cos{\varphi}+i\sin{\varphi})^{\ell}=\frac{e^{i\ell\varphi_s}}{\rho_p^{\ell}}\,(\rho_s+\rho_i\,e^{i(\varphi_i-\varphi_s)})^{\ell}, \end{equation*}
by taking into account the conservation of transverse momentum, \begin{equation*} \bm{q}_p=\bm{q}_{s}+\bm{q}_{i}= \begin{pmatrix} \rho_{s}\cos{\varphi_{s}}+\rho_{i}\cos{\varphi_{i}}\\ \rho_{s}\sin{\varphi_{s}}+\rho_{i}\sin{\varphi_{i}} \end{pmatrix}. \end{equation*} The presentation of the coincidence amplitudes $C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}$ in Eq. \eqref{expansionfirst} with the function $\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)$ follows the goal to show the OAM conservation in SDPC. To do so, we expand the function $\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)$ as superposition of plane waves with the phases $\exp{[i\ell^{'}(\varphi_i-\varphi_s)]} $(Fourier series with complex coefficients),
\begin{equation}\label{phasedif}
\Theta(z,\rho_s,\rho_i,\varphi_i-\varphi_s)=\sum_{\ell^{'}=-\infty}^{\infty}f_{\ell^{'}}(z,\rho_s,\rho_i)e^{i\ell^{'}(\varphi_i-\varphi_s)}.
\end{equation} We substitute expression \eqref{phasedif} into Eq. \eqref{expansionfirst} and perform the integration over the polar angles $\varphi_s$ and $\varphi_i$:
\begin{align}
\sum^\infty_{\ell^{'}=-\infty}f_{\ell^{'}}(z,\rho_s,\rho_i)\int_0^{2\pi}\int_0^{2\pi} e^{i\ell\varphi_s}\,e^{i(-\ell_s\varphi_s-\ell_i\varphi_i)}\nonumber\\
\times e^{i\ell^{'}(\varphi_i-\varphi_s)}d\varphi_sd\varphi_i \propto \delta_{\ell^{'},\ell-\ell_s}\delta_{\ell^{'},\ell_i}.\label{delta} \end{align}
As expected, the Kronecker delta functions appear in Eq. \eqref{delta} which enforce the conservation of OAM $\ell-\ell_s=\ell_i$. This conservation is not valid out of the quasicollinear regime \cite{MOLINATERRIZA2003155} because of the spin-orbital angular momentum coupling in the nonparaxial regime \cite{PhysRevA.99.023403}. In a non-collinear regime, the total angular momentum should remain conserved, which can be a future topic to study.
Going back to expression \eqref{expansionfirst}, we now calculate the integration over polar coordinates $\varphi_{s,i}$ explicitly. For simplicity, we consider the coincidence amplitudes $ C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}$ for positive OAM number of the pump beam $\ell \geq 0$. The coincidence amplitude for $\ell< 0$ is then given by $ C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}= (C_{p,p_s,p_i}^{-\ell,-\ell_s,-\ell_i})^*$, which follows from Eq. \eqref{coe1}. Furthermore, the two brackets on the first line in Eq. \eqref{bigexpression} should be rewritten as finite sums by using the binomial formula. For instance, the first bracket is written as \begin{align*}
[\rho^2_s+\rho^2_i+2\rho_s\rho_i\cos{(\varphi_i-\varphi_s)}]^u=\sum_{m=0}^{u}
\binom{u}{m}(\rho^2_s+\rho^2_i)^{u-m}&\\
\times [2\rho_s\rho_i\cos{(\varphi_i-\varphi_s)}]^m. \end{align*}
The \textit{cosine} function can be expressed as the sum of two exponential functions by using Euler's formula, which should be again expressed as a Binomial sum. After this step, the angular integration takes the form of the integral representation of the Bessel function of the first kind \cite{YOUSIF1997199}
\begin{equation*} \frac{1}{2\pi}\int^{2\pi}_0 e^{i n\varphi \pm iz\cos{(\varphi-\varphi^{\prime})}
}d\varphi= (\pm i)^n e^{i n\varphi^{\prime}} J_n(z). \end{equation*} Next, the sum representation of the Bessel function should be used \begin{equation}
J_n(z)= \sum_{k=0}^{\infty} \frac{(-1)^k}{k!\, \Gamma (k+n+1)}\biggl(\frac{z}{2}\biggr)^{2k+n},\label{Bsum} \end{equation}
which transforms the integration over the radial coordinates into \begin{equation*}
\int_0^{\infty} d\rho\,\rho^n e^{-a \,\rho^2}=\frac{\Gamma(\frac{n+1}{2})}{2a^{\frac{n+1}{2}}}. \end{equation*} The final result is achieved via summing over $k$ from Eq. \eqref{Bsum} by using the definition of the \textit{Regularized} hypergeometric function \cite{Hypergeometric2F1}. The coincidence amplitudes read for $\ell \geq 0$ as
\begin{align} \label{expression}
C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}
= & N_0\,\pi^{3/2}\:t_0\: e^{-\frac{t_0^2(\Omega_s+\Omega_i)^2}{4}} \: \delta_{\ell,\ell_s+\ell_i} \nonumber\\
& \sum_{u=0}^{p}\sum_{s=0}^{p_s}\sum_{i=0}^{p_i} T_u^{p,\ell}\: (T_s^{p_s,\ell_s})^* \:(T_i^{p_i,\ell_i})^*\: \sum_{n=0}^{\ell}\sum_{m=0}^{u}\nonumber\\
&
\binom{\ell}{n}\binom{u}{m}\sum_{f=0}^{u-m}\sum_{v=0}^{m}\: \binom{u-m}{f}\binom{m}{v} \Gamma[h]\:\Gamma[b]\nonumber\\
&
\int_{-L/2}^{L/2}dz\:e^{i z\, \Delta_{\Omega}}\:\frac{D^{d}}{H^{h}\: B^{b}}\: {_2}{\Tilde{F}}_1\biggl[h,b, 1+d,\frac{D^2}{H \,B
}\biggl] \end{align}
and $ C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}= (C_{p,p_s,p_i}^{-\ell,-\ell_s,-\ell_i})^*$ for $\ell < 0$. The function ${_2}{\Tilde{F}}_1$ is known as the \textit{regularized} \textit{hypergeometric} function \cite{Hypergeometric2F1}. The missing coefficients of Eq. \eqref{expression} are given by
\begin{eqnarray*}
H &=& \frac{w_p^2}{4}+\frac{w_s^2}{4}-i z\frac{k_p-k_s}{2k_p k_s}, \qquad D = -\frac{ w_p^2}{4}-iz\frac{1}{2k_p}, \\[0.1cm]
B& = & \frac{w_p^2}{4}+\frac{w_i^2}{4}-i z\frac{k_p-k_i}{2k_p k_i}, \qquad d =\ell_i+m-n-2v, \\[0.1cm]
h & = & \frac{1}{2}(2+2s+\ell+\ell_i+2(-f+u)-2n-2v+\abs{\ell_s}), \\[0.1cm]
b &=& \frac{1}{2}(2+2f+2i+\ell_i+2m-2v+\abs{\ell_i}), \end{eqnarray*}
where $w_p$, $w_s$ and $w_i$ are the beam waists of the pump signal and the idler beams, respectively.
Expression \eqref{expression} for the coincidence amplitudes as a function of the pump mode constitutes the main result of this work. It allows the spatial and spectral emission profiles to be reconstructed mode by mode and is applicable in any experimental setting that exhibits cylindrical symmetry. It can be readily used to calculate many characteristics of SPDC: joint spectral density, photon bandwidths, pair-collection probability, heralding ratio, spectral and spatial correlation, etc. Previously, these could only be achieved through numerical calculations or for special cases with a limited scope of applicability. The experimental demonstration of Eq. \eqref{expression} has already been presented in Ref. \cite{carlos}, where we also showed how the coupling of spatial and spectral DOF deteriorates the spatial entanglement but can be compensated directly by a proper choice of the collection mode.
\subsection{Gouy phase and spatio-temporal coupling} The spatio-temporal coupling encoded in Eq. \eqref{expression} is a fundamental feature of SPDC. However, the usual applications in quantum optics utilize either the spatial or spectral DOF, neglecting the correlation between them. Nevertheless, this coupling remains a fundamental issue in many protocols based on entangled photon sources, where any distinguishability arising from not-considered DOF reduces the coherence of the state. Next, we will illustrate the utility of expression \eqref{expression} in the frame of possible decoupling of spatial and spectral DOF $\Phi(\bm{q}_s,\bm{q}_i,\omega_s,\omega_i)= \Phi_{\bm{q}}(\bm{q}_s,\bm{q}_i)\Phi_{\omega}(\omega_s,\omega_i)$. We will show that this decoupling is closely related to the Gouy phase of interacting beams.
The role of the Gouy phase in nonlinear processes has been investigated before. For instance, in SPDC, the change of the Gouy phase $\psi_G(z)=(N+1)\arctan(z/z_R)$ within the propagation distance has been used to control the relative phase of two different LG modes of measurement basis \cite{PhysRevLett.101.050501,DEBRITO2021126989}. Here, $N$ is the combined LG mode number $N=2p+\abs{\ell}$ and $z_R$ is the Rayleigh length. In four-wave mixing (FWM), the conversion behavior between LG modes is strongly affected by the Gouy phase \cite{PhysRevA.103.L021502}. The authors observed that the existence of a relative Gouy phase between modes with different mode numbers $N$ leads to a reduced FWM efficiency.
Here, we have a similar situation: pump, signal, and idler fields acquire different Gouy phases along with propagation in the crystal due to different mode numbers $N$, causing a reduced efficiency of mode down-conversion. We expect intuitively that the shape of the spectrum of spatial modes is affected by the relative Gouy phase of interacting beams. This is still a guess and requires proof.
We consider for simplicity the scenario in which the Rayleigh lengths of the three beams are equal $z_{R,p}=z_{R,i}=z_{R,s}$ and fixed. This condition matches the Gouy angle $\arctan(z/z_R)$ for all beams. Hence, the relative Gouy phase can be written as \begin{equation*}
\psi_{G,p}-\psi_{G,s}-\psi_{G,i}=(N_p-N_s-N_i-1)\arctan(z/z_R). \end{equation*}
This implies that the Gouy phase is fully defined by the relative mode number $N_R= N_p-N_s-N_i$. If the Gouy phase is responsible for different spectral dependencies of the coincidence amplitudes $C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}(\Omega_s$,$\Omega_i)$, the shape of the spectrum should remain the same for fixed $N_R$. Assuming $k_p= 2 k_s$, Eq. \eqref{expression} transforms into
\begin{equation}
C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}(\Omega_s,\Omega_i)\propto \int_{-L/2}^{L/2}dz\:e^{i z\, \Delta_{\Omega}}\:\frac{(i2z+k_p w_p^2)^{N_R}}{(-i2z+k_p w_p^2)^{N_R+1}}. \label{gouy} \end{equation}
We see from Eq. \eqref{gouy} that the spectral response of $C_{p,p_s,p_i}^{\ell,\ell_s,\ell_i}(\Omega_s,\Omega_i)$ encoded only in the term $e^{i z\, \Delta_{\Omega}}$ remains unaffected up to a constant if $N_R$ is fixed. On the other hand, $N_R$ can be rewritten as
\begin{equation}\label{Rnumber}
N_R=\frac{ \psi_{G,R}}{\arctan(z/z_R)}+1, \end{equation}
where $\psi_{G,R}$ is the relative Gouy phase $\psi_{G,p}-\psi_{G,s}-\psi_{G,i}$. Therefore, it follows from Eqs. \eqref{gouy} and \eqref{Rnumber} that the spectral response of coincidence amplitudes is determined by the relative Gouy phase $\psi_{G,R}$ if the pump characteristics $z_R$, $w_p$, and $k_p$ are fixed. This is what we wanted to prove. Note that the simple form of Eq. \eqref{gouy} is due to the assumptions $k_p= 2 k_s$ and $z_{R,p}=z_{R,i}=z_{R,s}$. The analytical proof for the general case requires more effort, which we omit here.
This proof brings us a step forward in the decoupling problem of spatial and spectral DOF: the decoupling can be achieved for a selected subspace of modes that possess the same relative Gouy phase. So, if a state is engineered that consists of modes with $N_R=const.$ assuming $z_{R,p}=z_{R,i}=z_{R,s}$, then the modes contributing to the state have the same spectrum, i.e., the state is separable. The question of decoupling of spatial and spectral DOF can be now reformulated: How do we engineer a state only consisting of modes with the same relative mode number $N_R$.
\begin{figure*}\label{fig1.pdf}
\end{figure*}
\section{Engineering high-dimensional entangled states in OAM basis}
The state engineering in spatial DOF has been investigated theoretically in the thin crystal regime \cite{PhysRevA.67.052313,https://doi.org/10.1002/qute.202100066} and also implemented experimentally \cite{PhysRevA.98.060301,PhysRevA.98.062316}. In particular, three-, four-, and five-dimensional entangled states in OAM basis have been generated in Ref. \cite{PhysRevA.98.060301} using a superposition of LG beams for the pump. The correct superposition for the pump has been determined with a simultaneous perturbation stochastic approximation algorithm \cite{705889}.
We show in this section how to calculate the correct superposition of LG beams with Eq. \eqref{expression}, in order to generate entangled states in OAM basis including the states from Ref. \cite{PhysRevA.98.060301}. Our method is very straightforward and requires no optimization algorithm. In comparison to Refs. \cite{PhysRevA.67.052313,https://doi.org/10.1002/qute.202100066}, our results can be directly implemented in a real experiment, since we do not consider the thin crystal approximation. State engineering in the thin crystal regime is inefficient due to an infinite amount of spatial modes generated in the down-conversion.
\subsection{Determination of pump beam} We consider the four-dimensional subspace $\ell_s,\ell_i=0,1,2,3$ and $p_s=p_i=0$, which we refer to as $S_4$, with associated notation $\ket{p_s=0,\ell_s,\omega_s}\ket{p_i=0,\ell_i,\omega_i}:=\ket{\ell_s(\omega_s),\ell_i(\omega_i)}$. The goal is to engineer a four-dimensional maximally entangled state in this subspace. We model the pump beam as a superposition of LG beams,
\begin{eqnarray*}
\mathrm{V}_p & = & \sum_{\ell}a_{\ell}\;\mathrm{LG}_{0}^{\ell}, \end{eqnarray*}
where the range of summation is determined with the possible minimal and maximal OAM values in the subspace, $\ell=[\mathrm{min}(\ell_s+\ell_i),\mathrm{max}(\ell_s+\ell_i)]$. The correct choice of the expansion amplitudes $a_{\ell}$ is now our task. Since the pump function appears in Eq. \eqref{decomposition} linearly, the corresponding state in $S_4$ is given by \begin{align*}
\ket{\Psi_4}
= \sum^6_{\ell=0}a_{\ell} \:\sum^{3}_{\ell_s,\ell_i=0}C_{0,0,0}^{\ell,\ell_s,\ell_i} \ket{\ell_s,\ell_i}. \end{align*}
The matrix representation of the state $\ket{\Psi_4}$ can clarify the right choice of the coefficients $a_{\ell}$. The matrix consists of $16$ elements and is given by the left-hand side of the following expression:
\begin{align} \begin{pmatrix} \textcolor{blue}{a_0}\,C_{0,0} & \textcolor{red}{a_1}\,C_{1,0} &\textcolor{blue}{a_2}\,C_{2,0}& a_3\,C_{3,0}\\ \textcolor{red}{a_1\,}C_{0,1} & \textcolor{blue}{a_2}\,C_{1,1} & a_3\,C_{2,1}& \textcolor{blue}{a_4}\,C_{3,1}\\ \textcolor{blue}{a_2}\,C_{0,2} & a_3\,C_{1,2} & \textcolor{blue}{a_4}\,C_{2,2}&\textcolor{red}{a_5}\,C_{3,2}\\ a_3\,C_{0,3} & \textcolor{blue}{a_4}\,C_{1,3} & \textcolor{red}{a_5}\,C_{2,3}& \textcolor{blue}{a_6}\,C_{3,3} \end{pmatrix}\rightarrow \begin{pmatrix} 0 & \textcolor{red}{1} &0&0\\ \textcolor{red}{1} & 0 & 0& 0\\ 0& 0 & 0&\textcolor{red}{1}\\ 0& 0 & \textcolor{red}{1}& 0 \end{pmatrix},\label{matrix} \end{align}
where we used the notation $ C_{0,0,0}^{\ell_s+\ell_i,\ell_s,\ell_i}= C_{\ell_i,\ell_s}$. The state becomes maximally entangled in this subspace if the matrix has exactly one entry of $1$ in each row and each column and $0$ elsewhere (permutation matrix). The right-hand side of expression \eqref{matrix} is such a state that can be engineered if we select $a_1=1/C_{0,1}\approx1/C_{1,0}$, $a_5=1/C_{2,3}\approx1/C_{3,2}$ and $a_0=a_2=a_3=a_4=a_6=0$, where we assumed degenerate SPDC $k_p\approx 2 k_s$. This choice leads to the state $\ket{\Psi_4}=\frac{1}{2}(\ket{0,1}+\ket{1,0}+\ket{2,3}+\ket{3,2})$. Thus, the state engineering is finished, where the coefficients of the pump superposition $\{a_1,a_5\}$ should be calculated with the expression \eqref{expression}. In the same way, the state $\ket{\Psi^{\prime}_4}=\frac{1}{2}(\ket{0,0}+\ket{1,1}+\ket{2,2}+\ket{3,3})$ from Ref. \cite{PhysRevA.98.060301} can also be engineered, if we select $\{a_0,a_2,a_4,a_6\}$ to be equal to $\{1/C_{0,0},1/C_{1,1},1/C_{2,2},1/C_{3,3}\}$ and $a_1=a_3=a_5=0$.
The states $\ket{\Psi^{\prime}_4}$ and $\ket{\Psi_4}$ are presented in Figs. \ref{fig1.pdf}(a) and \ref{fig1.pdf}(b) with blue-colored bars on top. As we can see, the modes contributing to the states $\ket{\Psi^{\prime}_4}$ and $\ket{\Psi_4}$ represent just a part of the full OAM emission (spiral bandwidth). Therefore, the postselection should be the final step in the engineering process, where undesirable modes are sorted out. Next, we should calculate the Schmidt number and the purity of the presented states, in order to evaluate the efficiency of the state preparation in the subspace $S_4$. We will use for all our calculations the same experimental parameters as in Ref. \cite{PhysRevA.98.060301}: $15$-$mm$-thick periodically poled $\mathrm{KTiOPO}_4$ crystal designed for a collinear frequency degenerate type-II phase matching, continuous-wave laser of wavelength $405$ \textit{nm} with beam waist $w_p=25$ $\mu m$ and detection modes of radius $w_{s,i}=33$ $\mu m$.
\subsection{Schmidt number and purity of subspace states}
We compare first the azimuthal Schmidt numbers of the states $\ket{\Psi_4}$ and $\ket{\Psi^{\prime}_4}$ in the subspace $S_4$. Obviously, the diagonal modes $\{\ket{0,2},\ket{2,0},\ket{1,3},\ket{3,1}\}$ in Fig. \ref{fig1.pdf} (a) are non-desirable and lead to a decrease of entanglement in $S_4$. Consequently, the state $\ket{\Psi^{\prime}_4}$ has an azimuthal Schmidt number less than $4$, $K=2.04$, while the Schmidt number of the state $\ket{\Psi_4}$ equals $4$. Therefore, the preparation of the state $\ket{\Psi_4}$ is more efficient in $S_4$ than for $\ket{\Psi^{\prime}_4}$. $K=4$ is necessary, but not a sufficient condition for a four-dimensional state to be maximally entangled. Additionally, the state should be pure. Hence, the state $\ket{\Psi_4}$ can be called maximally entangled in $S_4$, if it is also spatially pure.
In order to calculate the spatial purity of $\ket{\Psi_4}$, we need the reduced density matrix $\rho_{\bm{q}}$, which results from tracing over the frequency $\rho_{\bm{q}}=\mathrm{Tr}_{\Omega}(\rho)$. The fact of a continuous wave laser $\mathrm{S}_p(\omega_p)\propto\delta(\omega_p-\omega_{0,p})$ sets the condition $\Omega_s=-\Omega_i:=\Omega$, which transforms Eq. \eqref{decomposition} into
\begin{align}
\ket{\Psi}= &\iint \:d\Omega\: \sum^{\infty}_{\ell_s,\ell_i=-\infty}C_{\ell_s,\ell_i}(\Omega) \ket{\ell_s,\Omega}\ket{\ell_i,-\Omega}. \end{align}
Now, we calculate the density matrix $\rho=|\Psi\rangle\langle\Psi|$ and then trace over the spectral domain, which yields: \begin{equation}
\rho_{\bm{q}}=\sum_{\ell_s,\ell_i}\sum_{\Tilde{\ell_s},\Tilde{\ell_i}}A^{\Tilde{\ell_s},\Tilde{\ell_i}}_{\ell_s,\ell_i}|\ell_s,\ell_i\rangle\langle\Tilde{\ell_s},\Tilde{\ell_i}|,\label{density} \end{equation}
where $A^{\Tilde{\ell_s},\Tilde{\ell_i}}_{\ell_s,\ell_i}=\int d\Omega\: C_{\ell_s,\ell_i}(\Omega)\:[C_{\Tilde{\ell_s},\Tilde{\ell_i}}(\Omega)]^*$ is the overlap integral of the spectra of the OAM modes. Equation \eqref{density} is very useful to calculate the spatial purity in small subspaces.
We run summations in Eq. \eqref{density} over $\ell_s,\ell_i,\Tilde{\ell_s},\Tilde{\ell_i}=0,1,2,3$, renormalize the state, construct the density matrix of the subspace $\rho_{\bm{q},s}$ and calculate the purity $\mathrm{Tr}(\rho^2_{\bm{q},s})$. Here, the subscript $s$ indicates the consideration of the subspace $S_4$. In fact, the state $\ket{\Psi_4}$ is spatially pure, $\mathrm{Tr}(\rho^2_{\bm{q},s})=1$. The reason is very trivial: all modes that contribute to the state consist of only positive OAM numbers, which leads to the same $N_R=\abs{\ell}-\abs{\ell_s}-\abs{\ell_i}=0$ for all modes due to $\ell=\ell_s+\ell_i$. Moreover, the experimental parameters from Ref. \cite{PhysRevA.98.060301} satisfy the condition $z_{R,p}\approx z_{R,i}\approx z_{R,s}$. Hence, all modes have the same relative Gouy phase and consequently, the same spectrum, which is presented in Fig. \ref{fig1.pdf}(c) with the blue curve. This means that the spatial and spectral DOF are decoupled in $S_4$. Interestingly, even though the authors did not consider the spectral DOF, the prepared state from Ref. \cite{PhysRevA.98.060301} is also separable in space and frequency in the smaller subspace of only four modes $\{\ket{0,0},\ket{1,1},\ket{2,2},\ket{3,3}\}$. We suppose that the engineering of maximally entangled states in spatial DOF in a certain subspace enforces automatic decoupling in spatial and spectral DOF in that subspace.
\subsection{Purity and Schmidt number of the full biphoton state}
Obviously, the subspace $S_4$ is a part of the full SPDC emission. The first four OAM modes out of the subspace in Fig. \ref{fig1.pdf}(b), $\ket{2,-1}$ and $\ket{3,-2}$, possess different spectra in contrast to the modes in $S_4$, shown in Fig. \ref{fig1.pdf}(c) with dotted and dashed curves, respectively. The appearance of modes with distinguishable spectra indicates the inseparability of spatial and spectral DOF out of $S_4$. The more distinguishable modes contribute to the state, the stronger the spatio-temporal coupling. This, in turn, leads to reduced purity for the spatial biphoton state. Usually, narrowband filters are used in front of detectors to increase the purity of the spatial state. On one hand, the spectral filters improve the purity of the spatial state; on the other hand, they reduce the rate of entangled photons.
We calculated the spatial purity $\mathrm{Tr}(\rho^2_{\bm{q},\mathrm{full}})$ \cite{Osorio_2008} of the full biphoton state \eqref{SPDCstate} depending on the filter bandwidth $\Delta \lambda$, in order to quantify the influence of spectral filters on the biphoton state. We chose as a pump the same beam, which leads to the state $\ket{\Psi_4}$. Very narrow filters are required to end up with a more or less pure state, as we can see from Fig. \ref{fig2}. For instance, a typical spectral filter with a bandwidth of $1$ \textit{nm} would leave the state in a mixed state of purity $0.33$.
The Schmidt number of the full spatio-temporal biphoton state is also different in comparison to the subspace state. The total Schmidt number can be calculated from the reduced density matrix in space and frequency for the signal by tracing over the idler $\rho_{\mathrm{signal}}=\mathrm{Tr}_{\mathrm{idler}}(\rho)$\cite{Osorio_2008}. The Schmidt number is then given by $K=1/\mathrm{Tr}(\rho^2_{\mathrm{signal}})$ \cite{computing}. The number of both spatial and spectral Schmidt modes in the range of frequencies $810 \pm 10$ \textit{nm} equals $140$, where $810$ \textit{nm} is the central frequency for signal and idler photons. In comparison, the number of Schmidt modes generated only at central frequency $810$ $nm$ equals $5.8$.
Finally, a small remark about the thin crystal regime: The spatio-temporal coupling is absent in the thin crystal regime $L \ll z_{R,p}$, since the biphoton state is independent of the crystal features. The problem with this regime is that it gives rise to a huge amount of spatial modes. Assume we keep all parameters the same as in Ref. \cite{PhysRevA.98.060301}, but change the crystal length to $L=1$ $\mu m$. The thin crystal regime is then well achieved according to Ref. \cite{PhysRevA.103.063508}. The state becomes spatially pure, but possesses a large amount of Schmidt modes, $10^7$.
\begin{figure}
\caption{Purity of the spatial biphoton state depending on the bandwidth of spectral filter.}
\label{fig2}
\end{figure}
\section{CONCLUSION} In summary, we derived a closed analytical expression for the biphoton spatio-temporal state in terms of the LG mode amplitudes. The expression readily reveals the dependence of the modal decomposition on frequency and thus correctly describes spectral-spatial coupling, a quintessential feature of SPDC. The expression provides a new understanding of how the Gouy phase is related to the decoupling of spatial and spectral DOF: the relative Gouy phase of the interacting beams fully defines the shape of the spectrum of down-converted photons.
Engineering the modal decomposition of the pump beam can be used to engineer a high-dimensional OAM entanglement. State engineering can also be used to decrease the coupling between the spatial and spectral DOF, leading to an increase of the correlation stored in the spatial DOF. We thus hope that it will aid experimenters in the design and quantitative modeling of challenging experiments based on PDC.
The authors thank Egor Kovlakov and Darvin Wanisch for very helpful discussions.
\end{document}
|
arXiv
|
{
"id": "2208.09423.tex",
"language_detection_score": 0.7391462922096252,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Splitting the K\"unneth formula} \author{Laurence R. Taylor} \address{Department of Mathematics\newline \indent University of Notre Dame\newline \indent Notre Dame, IN 46556\newline \indent U.S.A.} \email{[email protected]} \begin{abstract} There is a description of the torsion product of two modules in terms of generators and relations given by Eilenberg and Mac Lane. With some additional data on the chain complexes there is a splitting of the map in the K\"unneth\ formula in terms of these generators. Different choices of this additional data determine a natural coset reminiscent of the indeterminacy in a Massey triple product. In one class of examples the coset actually is a Massey triple product.
The explicit formulas for a splitting enable proofs of results on the behavior of the interchange map and the long exact sequence boundary map on all the terms in the K\"unneth\ formula. Information on the failure of naturality of the splitting is also obtained. \end{abstract} \maketitle
\date{\today}
\section{Introduction} Fix a principal ideal domain $R$ and let $\complex[1]_\ast$ and $\complex[3]_\ast$ be two chain complexes of $R$ modules. The K\"unneth\ formula states that if $\complex[1]_\ast\tor[R]\complex[3]_\ast$ is acyclic then there is a short exact sequence \begin{xyMatrixLine} 0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\complex[1]_\ast) \tensor[R] H_\secondIndex(\complex[3]_\ast) \ar[r]^-{\cs{cross product}}& H_{\totalInt}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \ar[r]^-{\cs{to torsion product}}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_\secondIndex(\complex[3]_\ast)\to0 \end{xyMatrixLine} which is natural for pairs of chain maps and which is split. For a proof in this generality see for example Dold \cite{Dold}*{VI, 9.13}.
Let $\cs{to torsion product}_{k}\def\secondInt{\ell}\def\totalInt{n,\secondInt}\colon H_{\totalInt}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \to H_{k}\def\secondInt{\ell}\def\totalInt{n}(\complex[1]_\ast)\tor[R] H_{\secondInt}(\complex[3]_\ast)$ denote $\cs{to torsion product}$ followed by projection. Say that a map $\sigma\colon H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast) \to H_{i}\def\secondIndex{j+\secondIndex+1}(\complex[1]_\ast\tensor[R] \complex[3]_\ast)$ \emph{splits the K\"unneth\ formula at $(i}\def\secondIndex{j,\secondIndex)$} provided $\cs{to torsion product}_{k}\def\secondInt{\ell}\def\totalInt{n,\secondInt}\circ \sigma = \identyMap{H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast)}$ if $(k}\def\secondInt{\ell}\def\totalInt{n,\secondInt)=(i}\def\secondIndex{j,\secondIndex)$ and is $0$ otherwise.
\section{The main idea}\sectionLabel{main idea} Suppose the $R$ modules in the complexes $\complex[1]_\ast$ and $\complex[3]_\ast$ are free, so the K\"unneth\ formula holds. The general case is discussed in \S \namedSection{general case}.
In \cite{Eilenberg-Mac Lane}*{\S11} Eilenberg and Mac Lane gave a generators and relations description of the torsion product: $\complex[1]\tor[R]\complex[3]$ is the free $R$ module on symbols $\cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}$ where $\ringElement\in R$, $\element[1]\in \complex[1]$ with $\element[1]\moduleDot\ringElement = 0$ and $\element[2]\in \complex[3]$ with $\ringElement\moduleDot\element[2] = 0$ modulo four types of relations described below, (\ref{free cycle gives map}.1) -- (\ref{free cycle gives map}.4). The symbols $\cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}$ will be called \emph{elementary tors}.
In what follows, given any complex $\complex[2]_{\ast}$, $\complexCycles[2]_\ast$ denotes the cycles and $\complexBoundaries[2]_\ast$ denotes the boundaries. Given any cycle $\elementCycle[4]$ of degree $\abs{\element[4]}$ in $\complex[2]_{\ast}$, write $\homologyClassOf{\elementCycle[4]}\in H_{\abs{\element[4]}}(\complex[2]_\ast)$ for the homology class $\elementCycle[4]$ represents. Let $\cyclesToHomology[2]\colon \complexCycles[2]_\ast \to H_\ast(\complex[2]_\ast)$ denote the canonical map.
Mac Lane \cite{Mac Lane}*{Prop.~V.10.6} describes a cycle in $H_{\totalInt}(\complex[1]_\ast\tensor[R]\complex[3]_\ast)$ representing a given elementary tor in the range of $\cs{to torsion product}$. Mac Lane's cycle is defined as follows. Lift $\element[1]$ to a cycle, $\elementCycle[1]$, and $\element[2]$ to a cycle $\elementCycle[3]$. Since $\element[1]\moduleDot \ringElement = 0$, $\elementCycle[1]\moduleDot \ringElement$ is a boundary. Choose $\liftToChain[1]\in \complex[1]_{\abs{\element[1]}+1}$ so that $\boundary[1]_{\abs{\element[1]}+1}(\liftToChain[1] ) = \elementCycle[1] \moduleDot\ringElement$. Choose $\liftToChain[3]$ so that $\boundary[3]_{\abs{\element[3]}+1}(\liftToChain[3]) = \ringElement\moduleDot \elementCycle[3]$. Up to sign and notation, Mac Lane's cycle is given by \namedNumber[Formula]{Mac Lane cycle A} \begin{equation*}\tag{\ref{Mac Lane cycle A}} \cs{tor cycles into}\bigl( {\elementCycle[1]}, \liftToChain[1]; \elementCycle[3], \liftToChain[3]\bigr) = (-1)^{\abs{\element[1]}+1} \elementCycle[1] \tensor \liftToChain[3] + \liftToChain[1] \tensor \elementCycle[3] \end{equation*} Mac Lane puts the sign in front of the other term but then gets a sign when evaluating $\cs{to torsion product}$. Mac Lane also writes (\ref{Mac Lane cycle A}) as a Bockstein.
The short exact sequence $\xyLine[@C10pt]{0\ar[r]&R \ar[rr]^-{\moduleDot\ringElement}&& R\ar[rr]^-{\rho^{\ringElement}}&&\ry{\ringElement}\ar[r]&0}$ gives rise to a long exact sequence whose boundary term is called the Bockstein associated to the sequence: \begin{math} \mathfrak b^{\ringElement}_{\totalInt}\colon H_{\totalInt}\bigl({\complex[2]_\ast} {\tensor[R]}\ry{\ringElement}\bigr) \to H_{\totalInt-1}({\complex[2]_{\ast}}) \end{math} In terms of the Bockstein and the pairing \begin{equation*} H_{k}\def\secondInt{\ell}\def\totalInt{n}\bigl(\complex[1]_\ast\tensor[R]\ry{\ringElement}\bigr) \cs{cross product} H_{\secondInt}\bigl(\complex[3]_\ast\tensor[R]\ry{\ringElement}\bigr) \to H_{k}\def\secondInt{\ell}\def\totalInt{n+\secondInt}\bigl(\complex[1]_\ast\tensor[R]\complex[3]_\ast \tensor[R]\ry{\ringElement}\bigr) \end{equation*} \vskip-10pt \namedNumber[Formula]{Mac Lane cycle B} \begin{equation*}\tag{\ref{Mac Lane cycle B}} \cs{tor cycles into}\bigl( {\elementCycle[1]}, \liftToChain[1]; \elementCycle[3], \liftToChain[3]\bigr) = (-1)^{\abs{\element[1]}+1}\mathfrak b^{\ringElement}_{\abs{\element[1]}+\abs{\element[3]}+2}\bigl( \liftToChain[1] \tensor \liftToChain[3] \bigr) \end{equation*}
Given a different choice of cycle for $\elementCycle[1]$, say $\elementCycleB[1]$, $\elementCycleB[1] =\elementCycle[1] + \boundary[1]_{\abs{\element[1]}+1} (\liftDelta[1])$. Take $\liftToChainA[1]{1} = \liftToChain[1] + \liftDelta[1]\moduleDot \ringElement$. With a similar choice of lift on the right, $\cs{tor cycles into}\bigl( {\elementCycleB[1]}, \liftToChainA[1]{1}; \elementCycleB[3], \liftToChainA[3]{1}\bigr) - \cs{tor cycles into}\bigl( {\elementCycle[1]}, \liftToChain[1]; \elementCycle[3], \liftToChain[3]\bigr)$ is a boundary and so different choices of cycles give the same homology class.
\vskip10pt Indeterminacy comes from the choices of $\liftToChain[1]$ and $\liftToChain[3]$. With $\elementCycle[1]$ and $\elementCycle[3]$ fixed, $\liftToChain[1]$ is determined up to a cycle. Let $\liftToChainA[1]{1} = \liftToChain[1] + \liftCycle[1]$ and let $\liftToChainA[3]{1} = \liftToChain[3] + \liftCycle[3]$. Then
\begin{equation*} {\homologyClassOf[big]{ \cs{tor cycles into}\bigl( {\element[1]}, \liftToChainA[1]{1}; \element[2], \liftToChainA[3]{1}\bigr)}} = \homologyClassOf[big]{\cs{tor cycles into}\bigl( {\element[1]}, \liftToChain[1]; \element[2], \liftToChain[3]\bigr) + (-1)^{\abs{\liftCycle[1]}} \bigl(\element[1]\cs{cross product} \homologyClassOf{\liftCycle[3]}\bigr)} + \bigl(\homologyClassOf{\liftCycle[1]}\cs{cross product} \element[2]\bigr) \end{equation*} Since $\homologyClassOf{\liftCycle[1]}$ and $\homologyClassOf{\liftCycle[3]}$ can be chosen arbitrarily, any element in the coset $\fundamentalCoset{\element[1]}{\element[2]}{\complex[1]_\ast}{\complex[3]_\ast} {\abs{\element[1]}}{\abs{\element[2]}}$ can be realized. Let \namedNumber[Formula]{double coset} \begin{equation*}\tag{\ref{double coset}} \cosetTor[{\element[1]}]{\ringElement}{\element[2]} \subset H_{\abs{\element[1]}+\abs{\element[2]}+1} (\complex[1]_\ast\tensor[R]\complex[3]_\ast) \end{equation*}
\noindent denote the coset determined by any of the $\homologyClassOf[big]{ \cs{tor cycles into}\bigl( {\element[1]}, \liftToChainA[1]{1}; \element[2], \liftToChainA[3]{1}\bigr)}$.
The above discussion and Proposition V.10.6 of \cite{Mac Lane} shows the following. \begin{ThmS}[Mac Lane main lemma]{Lemma} For two complexes of free $R$ modules, $R$ a PID, the element $\homologyClassOf[big]{ \cs{tor cycles into}\bigl( {\element[1]}, \liftToChain[1]; \element[2], \liftToChain[3]\bigr)}$ determines $\cosetTor[{\element[1]}]{\ringElement}{\element[2]} $ a well-defined coset of $\fundamentalCoset{\element[1]}{\element[2]}{\complex[1]_\ast}{\complex[3]_\ast} {\abs{\element[1]}}{\abs{\element[2]}}$ such that \begin{equation*} \cs{to torsion product}_{s,t} \bigl(\cosetTor[{\element[1]}]{\ringElement}{\element[2]}\bigr) = \begin{cases} \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]} & s=\abs{\element[1]}, t = \abs{\element[2]}\\ 0&\text{otherwise}\\ \end{cases} \end{equation*} \end{ThmS}
To get a splitting requires one more step. Since $R$ is a PID, the set of boundaries in a free chain complex is a free submodule and hence there is a splitting of the boundary maps. Choose splittings for the complexes being considered here: $\complexSplitting[1]\colon\complexBoundaries[1]\to \complex[1]_{\ast+1}$ and $\complexSplitting[3]\colon\complexBoundaries[3]\to \complex[3]_{\ast+1}$.
Define \namedNumber{torsion product cycle} \newCS{torsion product cycle 1}{{\ref{torsion product cycle}.1}} \newCS{torsion product cycle 2}{{\ref{torsion product cycle}.2}} \newCS{env:torsion product cycle 1}{{Formula}} \newCS{env:torsion product cycle 2}{{Formula}} \begin{align*}\tag{\cs{torsion product cycle 1}} \cs{tor cycles into}\bigl( {\elementCycle[1]}, \complexSplitting[1]; \elementCycle[3], \complexSplitting[3];\ringElement\bigr) =& (-1)^{\abs{\element[1]}+1} \elementCycle[1]\tensor \complexSplitting[3]\bigl(\ringElement \elementCycle[3]\bigr) + \complexSplitting[1]\bigl(\elementCycle[1] \ringElement\bigr) \tensor \elementCycle[3]\\ \tag{\cs{torsion product cycle 2}} \cs{tor cycles into}\bigl( {\elementCycle[1]}, \complexSplitting[1]; \elementCycle[3], \complexSplitting[3];\ringElement\bigr) =& (-1)^{\abs{\element[1]}+1}\mathfrak b^{\ringElement}_{\abs{\element[1]}+\abs{\element[3]}+2} \Bigl(\complexSplitting[1]\bigl(\elementCycle[1] \ringElement\bigr) \tensor \complexSplitting[3]\bigl(\ringElement \elementCycle[3]\bigr)\Bigr) \end{align*} \begin{ThmS}[free splitting is independent of cycles]{Lemma} The homology class $\homologyClassOf[big]{\cs{tor cycles into}\bigl( {\elementCycle[1]}, \complexSplitting[1]; \elementCycle[3], \complexSplitting[3];\ringElement\bigr)}$ is independent of the choice of cycles $\elementCycle[1]$ and $\elementCycle[3]$. \end{ThmS} \begin{proof} See the paragraph just below (\ref{Mac Lane cycle B}). \end{proof} Define \begin{equation*} \cs{homology splitting}[{\complexSplitting[1]}] {\complexSplitting[3]}_{\abs{\element[1]}, \abs{\element[2]}} \bigl( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]} \bigr) = \homologyClassOf[big]{\cs{tor cycles into}\bigl( {\elementCycle[1]}, \complexSplitting[1]; \elementCycle[3], \complexSplitting[3]; \ringElement\bigr)} \end{equation*}
\begin{ThmS}[free cycle gives map]{Theorem} For fixed splittings $\complexSplitting[1]$ and $\complexSplitting[3]$, the function $\cs{homology splitting}[{\complexSplitting[1]}] {\complexSplitting[1]}_{\abs{\element[1]}, \abs{\element[2]}}$ defined on elementary tors induces an $R$ module map \begin{equation*} \cs{homology splitting}[{\complexSplitting[1]}] {\complexSplitting[3]}_{\abs{\element[1]}, \abs{\element[2]}}\colon H_{\abs{\element[1]}}(\complex[1]_\ast)\tor[R] H_{\abs{\element[2]}}(\complex[3]_\ast) \to H_{\abs{\element[1]} + \abs{\element[2]}+1}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \end{equation*} which splits the K\"unneth\ formula at $\bigl(\abs{\element[1]},\abs{\element[2]}\bigr)$. \end{ThmS} \begin{proof} The splitting at $\bigl(\abs{\element[1]},\abs{\element[2]}\bigr)$ follows from \namedRef{Mac Lane main lemma}. Fix splittings and let $\localName{\element[1]}{\ringElement}{\element[2]} = \homologyClassOf{\cs{tor cycles into}\bigl( {\elementCycle[1]}, \complexSplitting[1]; \elementCycle[3], \complexSplitting[3];\ringElement\bigr)}$. By Eilenberg and Mac Lane \cite{Eilenberg-Mac Lane}*{\S 11}, to prove $\cs{homology splitting}$ is a module map, it suffices to prove the following
\vskip 10pt \noindent(\ref{free cycle gives map}.1) \enumline{$\localName{\element[1]_1}{\ringElement}{\element[2]} + \localName {\element[1]_2}{\ringElement}{\element[2]} = \localName {\element[1]_1+\element[1]_2}{\ringElement}{\element[2]}$} {$\element[1]_{i}\def\secondIndex{j}\ringElement = 0$; $\ringElement\element[2]=0$} (\ref{free cycle gives map}.2) \enumline{$\localName {\element[1]}{\ringElement}{\element[2]_1} + \localName {\element[1]}{\ringElement}{\element[2]_2} = \localName {\element[1]}{\ringElement}{\element[2]_1+\element[2]_2}$} {$\element[1]\ringElement=0$; $\ringElement \element[2]_{i}\def\secondIndex{j}=0$} (\ref{free cycle gives map}.3) \Enumline{$\localName {\element[1]}{\ringElement_1\cdot \ringElement_2}{\element[2]} = \localName {\element[1] \ringElement_1}{\ringElement_2}{\element[2]}$} {$\element[1] \ringElement_1 \ringElement_2 = 0$; $\ringElement_2\element[2]=0$} (\ref{free cycle gives map}.4) \Enumline{$\localName {\element[1]}{\ringElement_1\cdot \ringElement_2}{\element[2]} = \localName {\element[1]}{\ringElement_1}{\ringElement_2\element[2]}$} {$\element[1]\ringElement_1=0$; $\ringElement_1 \ringElement_2\element[2]=0$}
These formulas are easily verified at the chain level using (\cs{torsion product cycle 1}), \namedRef{free splitting is independent of cycles} and carefully chosen cycles. \end{proof}
\begin{DefS}{Remark} Eilenberg and Mac Lane work over $\mathbb Z$ but, as pointed out explicitly in \cite{Mac Lane slides}*{about the middle of page 285}, the proof uses nothing more than that submodules of free modules are free and that finitely generated modules are direct sums of cyclic modules. Hence the results are valid for PID's. \end{DefS}
\begin{DefS}{Remark} The data contained in a splitting is surely related to the structure introduced by Heller in \cite{Heller}. See also Section \namedSection{Bocksteins determine}. \end{DefS}
\section{Free Approximations}\sectionLabel{free approximations} A result attributed to Dold by Mac Lane \cite{Mac Lane}*{Lemma 10.5} is that given any chain complex over a PID there exists a free chain complex with a quasi-isomorphic chain map to the original complex. In this paper any such complex and quasi-isomorphism will be called a \emph{free approximation}. \begin{DefS*}{Warning} Some authors also require the chain map to be surjective. \end{DefS*} Here is a review of a construction of a free approximation, mostly to establish notation. Some lemmas needed later are also proved here.
\def\chainMap[1]+ \chainMap[2]{ } A \emph{weak splitting} of a chain complex $\complex[1]_\ast$ at an integer $\totalInt$ is a free resolution $\xyLine[@C20pt]{0\ar[r]& \freeBoundaries[1]_{\totalInt} \ar[r]^-{\iota^{\complex[1]}_{\totalInt}}& \freeCycles[1]_{\totalInt} \ar[rr]^-{\hat{\freeCyclesMap[0]_{ }}_{H_{\totalInt}(\complex[1]_\ast)}}&& H_{\totalInt}(\complex[1]_\ast)\ar[r]&0}$ and a pair of maps $\splitPair[1]_{\totalInt} = \{\freeCyclesMap[1]_{\totalInt}, \freeBoundariesMap[1]_{\totalInt} \}$ of the resolution into $\complex[1]_\ast$ where $\freeCyclesMap[1]_{\totalInt}\colon \freeCycles[1]_{\totalInt}\to \complexCycles[1]_{\totalInt}$ and $\freeBoundariesMap[1]_{\totalInt}\colon \freeBoundaries[1]_{\totalInt}\to \complex[1]_{\totalInt+1}$. It is further required that \\ \null\hskip5pt\begin{minipage}{0.30\textwidth} \begin{xyMatrix} \freeCycles[1]_{\totalInt}\ar[r]^-{\freeCyclesMap[1]_{\totalInt}} \ar[dr]_{\hat{\freeCyclesMap[0]_{ }}_{H_{\totalInt}(\complex[1]_\ast)}}& \ar[d]^-{\cyclesToHomology[1]_{\totalInt}} \complexCycles[1]_{\totalInt} \\ &H_{\totalInt}(\complex[1]_\ast) \end{xyMatrix} \end{minipage}\hbox to 0.8in{\hfil and\hfil} \begin{minipage}{0.35\textwidth} \begin{xyMatrix}[@C40pt] \freeBoundaries[1]_{\totalInt}\ar[r]^-{\iota^{\complex[1]}_{\totalInt}}\ar[d]^-{\freeBoundariesMap[1]_{\totalInt}} &\freeCycles[1]_{\totalInt}\ar[d]^-{\freeCyclesMap[1]_{\totalInt}}\\ \complex[1]_{\totalInt+1}\ar[r]^-{\boundary[1]_{\totalInt+1}}& \complexCycles[1]_{\totalInt}\\ \end{xyMatrix} \end{minipage} commute.
The complex is said to be \emph{weakly split} if it is weakly split at $\totalInt$ for all integers $\totalInt$. Any module over a PID has a free resolution and any complex has a weak splitting. If the complex is free, a splitting as in \S\namedSection{main idea} is a weak splitting.
\vskip 10pt
Given a weakly split complex, define a complex whose groups are $\naturalFreeComplex[1]_{\totalInt} = \freeBoundaries[1]_{\totalInt-1} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt}$ and whose boundary maps are the compositions
\noindent \resizebox{\textwidth}{!}{{$ \xymatrix@C30pt{ \complexFreeBoundaryMap[1]_{\totalInt}\colon \naturalFreeComplex[1]_{\totalInt} = \freeBoundaries[1]_{\totalInt-1} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt}\ar[r]& \freeBoundaries[1]_{\totalInt-1}\ar[r]^-{\iota^{\complex[1]}_{\totalInt-1}}& \freeCycles[1]_{\totalInt-1}\ar[r]& \freeBoundaries[1]_{\totalInt-2} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt-1} = \naturalFreeComplex[1]_{\totalInt-1} } $ }}
\noindent The submodule $0\displaystyle\mathop{\oplus} \freeBoundaries[1]_{\totalInt-1}\subset \freeBoundaries[1]_{\totalInt-2} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt-1} = \naturalFreeComplex[1]_{\totalInt-1}$ is the image of $\complexFreeBoundaryMap[1]_{\totalInt}$ so one choice of splitting, called the \emph{canonical splitting}, is the composition \begin{xyMatrix} \naturalFreeSplitting[1]\colon \mathbf{B}_{\totalInt-1}(\naturalFreeComplex[1]_\ast)=0 \displaystyle\mathop{\oplus} \freeBoundaries[1]_{\totalInt-1}\ar[r]& \freeBoundaries[1]_{\totalInt-1}\ar[r]& \freeBoundaries[1]_{\totalInt-1} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt} = \naturalFreeComplex[1]_{\totalInt} \end{xyMatrix}
\begin{ThmS}[free approximations]{Lemma} The map \begin{equation*} \naturalFreeMap[1]_{\totalInt}=\freeBoundariesMap[1]_{\totalInt-1}+ \freeCyclesMap[1]_{\totalInt}\colon \naturalFreeComplex[1]_{\totalInt} = \freeBoundaries[1]_{\totalInt-1} \displaystyle\mathop{\oplus} \freeCycles[1]_{\totalInt} \to \complex[1]_{\totalInt} \end{equation*} is a chain map which is a quasi-isomorphism. If $\freeCyclesMap[1]_\ast\colon\freeCycles[1]_\ast \to \complexCycles[1]_\ast$ is onto then $\naturalFreeMap[1]_{\ast}$ is onto. It is always possible to choose $\freeCyclesMap[1]_\ast$ to be onto. \end{ThmS} The proofs of the claimed results are standard. \begin{ThmS}[cover surjective chain maps]{Lemma} Let $\chainMap[1]_\ast\colon\complex[1]_\ast\to\complex[3]_\ast$ be a surjective chain map and let $\naturalFreeMap[3]_{\ast}\colon \freeApproximation[3]_\ast\to\complex[3]_\ast$ be a free approximation. Then there exist free approximations $\naturalFreeMap[1]_{\ast}\colon \freeApproximation[1]_\ast\to\complex[1]_\ast$ and surjective chain maps $\freeApproximationChainMap[1]_\ast\colon \freeApproximation[1]_\ast \to \freeApproximation[3]_\ast$ making \begin{xyMatrix} \freeApproximation[1]_\ast\ar[r]^-{\freeApproximationChainMap[1]_\ast} \ar[d]^-{\naturalFreeMap[1]_{\ast}}& \freeApproximation[3]_\ast \ar[d]^-{\naturalFreeMap[3]_{\ast}}\\ \complex[1]_\ast\ar[r]^-{\chainMap[1]_\ast}& \complex[3]_\ast \end{xyMatrix} commute. \end{ThmS} \begin{proof} Let $\xymatrix{ P_\ast\ar[r]^-{\hat{\chainMap[1]_{ }}_\ast} \ar[d]^-{\zeta_{\ast}}& \freeApproximation[3]_\ast \ar[d]^-{\naturalFreeMap[3]_{\ast}}\\ \complex[1]_\ast\ar[r]^-{\chainMap[1]_\ast}& \complex[3]_\ast }$
be a pull back. \def\chainMap[1]+ \chainMap[2]{P} Since $\chainMap[1]_\ast$ is onto, so is $\hat{\chainMap[1]_{ }}_\ast$ and the kernel complexes are isomorphic. By the 5 Lemma, $\zeta_\ast$ is a quasi-isomorphism. Let $\naturalFreeMap[10]_\ast\colon \freeApproximation[1]_\ast\to P_\ast$ be a surjective free approximation. Then $\naturalFreeMap[1]_{\ast} = \zeta_\ast\circ \naturalFreeMap[10]_{\ast}$ and $\freeApproximationChainMap[1]_\ast = \hat{\chainMap[1]_{ }}_\ast\circ \naturalFreeMap[10]_\ast$ are the desired maps. \end{proof} \begin{ThmS}[short exact free approximation]{Lemma} If $ \xymatrix@1@C10pt{ 0\ar[r]& \complex[1]_\ast\ar[rr]^-{\chainMap[1]_\ast}&& \complex[3]_\ast\ar[rr]^-{\chainMap[2]_\ast}&& \complex[2]_\ast\ \ar[r]& 0}$ is exact, there exist free approximations making the diagram below commute. \begin{xyMatrix}[@C10pt] 0\ar[r]&\freeApproximation[1]_\ast\ar[rr]^-{\freeMapApproximation[1]_\ast} \ar[d]^-{\vertMap{\complex[1]}_\ast}&& \freeApproximation[3]_\ast\ar[rr]^-{\freeMapApproximation[2]_\ast} \ar[d]^-{\vertMap{\complex[3]}_\ast}&& \freeApproximation[2]_\ast \ar[d]^-{\vertMap{\complex[2]}_\ast} \ar[r]& 0\\
0\ar[r]&\complex[1]_\ast\ar[rr]^-{\chainMap[1]_\ast}&& \complex[3]_\ast\ar[rr]^-{\chainMap[2]_\ast}&& \complex[2]_\ast\ar[r]&0 \end{xyMatrix} \end{ThmS} \begin{proof} Use \namedRef{cover surjective chain maps} to get $\freeMapApproximation[2]$. Let $\freeApproximation[1]_\ast$ be the kernel complex, hence free. There is a unique map $\vertMap{\complex[1]}_{\ast}$ making the diagram commute. By the 5 Lemma, $\vertMap{\complex[1]}_{\ast}$ is a quasi-isomorphism. \end{proof} \begin{ThmS}[Dold splitting]{Lemma} Suppose $\complex[1]_\ast\tor[R]\complex[3]_\ast$ is acyclic. Suppose $\naturalFreeMap[1]_{\ast}\colon \freeApproximation[1]_\ast\to\complex[1]_\ast$ and $\naturalFreeMap[3]_{\ast}\colon \freeApproximation[3]_\ast\to\complex[3]_\ast$ are free approximations. Then so is {\setlength\belowdisplayskip{-10pt} \begin{equation*} \naturalFreeMap[1]_{\ast} \tensor \naturalFreeMap[3]_{\ast}\colon \freeApproximation[1]_\ast\tensor[R]\freeApproximation[3]_\ast \to\complex[1]_\ast\tensor[R]\complex[3]_\ast \end{equation*} }\end{ThmS}\nointerlineskip \namedNumber{Dold splitting diagram} \begin{proof} The K\"unneth\ formula is natural for chain maps so
\noindent\resizebox{\textwidth}{!}{{$\xymatrix{ 0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\freeApproximation[1]_\ast)\tensor[R] H_{\secondIndex}(\freeApproximation[3]_\ast) \ar[r]^-{\cs{cross product}} \ar[d]^-{\hbox{\tiny$\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} \naturalFreeMap[1]_\ast \tensor \naturalFreeMap[3]_\ast$}}_-{\hbox to 1in{(\ref{Dold splitting diagram})
}} & H_{\totalInt}(\freeApproximation[1]_\ast \tensor[R] \freeApproximation[3]_\ast) \ar[r]^-{\cs{to torsion product}} \ar[d]^-{(\naturalFreeMap[1]\tensor \naturalFreeMap[3])_\ast}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} H_{i}\def\secondIndex{j}(\freeApproximation[1]_\ast)\tor[R] H_{\secondIndex}(\freeApproximation[3]_\ast) \to 0 \ar[d]^-{\hbox{\tiny$\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} \naturalFreeMap[1]_\ast\tor \naturalFreeMap[3]_\ast$}}
\\
0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tensor[R] H_{\secondIndex}(\complex[3]_\ast) \ar[r]^-{\cs{cross product}} & H_{\totalInt}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \ar[r]^-{\cs{to torsion product}} & \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast) \to 0 }$}}
\noindent commutes. The left and right vertical maps are tensor and torsion products of isomorphisms and hence isomorphisms. The middle vertical map is an isomorphism by the 5 Lemma. \end{proof}
\section{The general case}\sectionLabel{general case} With notation and hypotheses as in \namedRef{Dold splitting}, applying $(\naturalFreeMap[1]\tensor \naturalFreeMap[3])_\ast$ to the cycle in (\cs{torsion product cycle 1}) gives
\namedNumber{torsion product cycle II} \newCS{torsion product cycle II 1}{{\ref{torsion product cycle II}.1}} \newCS{torsion product cycle II 2}{{\ref{torsion product cycle II}.2}} \newCS{env:torsion product cycle II 2}{{Formula}} \begin{equation*}\tag{\cs{torsion product cycle II 1}} \cs{tor cycles into}(\splitPair[1]_\ast, \splitPair[3]_\ast)\bigl(\elementCycle[1], \ringElement, \elementCycle[3]\bigr) = \epsilon\, \freeCyclesMap[1]_\ast(\elementCycle[1]) \tensor \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) + \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \tensor
_\ast(\elementCycle[3])\\ \end{equation*}
\noindent where $\elementCycle[1]\in \freeCycles[1]_\ast$ satisfies $\freeHomologyMap[1]_\ast(\elementCycle[1]) = \element[1]$, $\elementCycle[3]\in \freeCycles[3]_\ast$ satisfies $\freeHomologyMap[3]_\ast(\elementCycle[3]) = \element[2]$ and $\epsilon=(-1)^{\abs{\element[1]}+1}$.
In general there is no analogue to (\cs{torsion product cycle 2}) because not all complexes have the necessary Bocksteins. If $\complex[1]_\ast$ and $\complex[3]_\ast$ are torsion free then the necessary Bocksteins exist and applying $(\naturalFreeMap[1]\tensor \naturalFreeMap[3])_\ast$ to (\cs{torsion product cycle 2}) gives \begin{align*} \tag{\cs{torsion product cycle II 2}} \cs{tor cycles into}(\splitPair[1]_\ast, \splitPair[3]_\ast)\bigl(\elementCycle[1], \ringElement, \elementCycle[3]\bigr) =& \epsilon\, \mathfrak b^{\ringElement}_{\abs{\element[1]}+\abs{\element[3]}+2}\Bigl( \freeBoundariesMap[1]_\ast\bigl(\elementCycle[1]\moduleDot\ringElement\bigr) \tensor \freeBoundariesMap[3]_\ast \bigl(\ringElement \moduleDot\elementCycle[3]\bigr) \Bigr) \end{align*}
\begin{ThmS}{Lemma} The homology class $\homologyClassOf[big]{\cs{tor cycles into}(\splitPair[1]_\ast, \splitPair[3]_\ast)(\elementCycle[1],\ringElement, \elementCycle[3])}$ is independent of the lifts $\elementCycle[1]$ and $\elementCycle[3]$. \end{ThmS} \begin{proof} The cycles $\elementCycle[1]$ and $\elementCycle[3]$ are cycles in $\naturalFreeComplex[1]_\ast$ and $\naturalFreeComplex[3]_\ast$ so the result is immediate from \namedRef{free splitting is independent of cycles} \end{proof} \begin{ThmS}[weak splittings give map]{Theorem} Assume $\complex[1]_\ast\tor[R]\complex[3]_\ast$ is acyclic. For fixed weak splittings $\splitPair[1]_\ast$ and $\splitPair[3]_\ast$ taking the homology class of $\cs{tor cycles into}(\splitPair[1]_\ast, \splitPair[3]_\ast)(\elementCycle[1],\elementCycle[3])$ yields a map \begin{equation*} \cs{homology splitting}[{\splitPair[1]_\ast}] {\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex}\colon H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast) \to H_{i}\def\secondIndex{j+\secondIndex+1}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \end{equation*} which splits the K\"unneth\ formula at $(i}\def\secondIndex{j,\secondIndex)$. \end{ThmS} \begin{proof} The cycle \ref{torsion product cycle II}.1 is the image of the cycle \ref{torsion product cycle}.1 and so $\cs{homology splitting}$ is a map by \namedRef{free cycle gives map}. \namedNumberRef{Dold splitting} applies and (\ref{Dold splitting diagram}) has exact rows. The splitting result follows from \namedRef{free cycle gives map}. \end{proof} \begin{ThmS}[weak splitting map in correct coset]{Corollary} The map $\cs{homology splitting}[{\splitPair[1]_\ast}] {\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex}$ will depend on the weak splittings. For any choices of weak splittings, $\cs{homology splitting}[{\splitPair[1]_\ast}] {\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex}\bigl( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]} \bigr)$ is in the same coset of $\fundamentalCoset{\element[1]}{\element[2]}{\complex[1]_\ast}{\complex[3]_\ast} {i}\def\secondIndex{j}{\secondIndex}$. Denote this coset by $\cosetTor[{\element[1]}]{\ringElement}{\element[2]}$. \end{ThmS} \begin{proof} Suppose given two weak splittings, $\splitPair[1]_{i}\def\secondIndex{j} = \{\freeCyclesMap[1]_{i}\def\secondIndex{j}, \freeBoundariesMap[1]_{i}\def\secondIndex{j}\}$ and $\splitPairA[1]_{i}\def\secondIndex{j} = \{ \freeCyclesMapA[1]_{i}\def\secondIndex{j}, \freeBoundariesMapA[1]_{i}\def\secondIndex{j}\}$. Then $\freeCyclesMapA[1]_{i}\def\secondIndex{j} - \freeCyclesMap[1]_{i}\def\secondIndex{j} \colon\freeCycles[1]_{i}\def\secondIndex{j} \to \complexCycles[1]_{i}\def\secondIndex{j} \to H_{i}\def\secondIndex{j}(\complex[1]_\ast)$ is trivial so $\freeCyclesMapA[1]_{i}\def\secondIndex{j} - \freeCyclesMap[1]_{i}\def\secondIndex{j} \colon\freeCycles[1]_{i}\def\secondIndex{j} \to \complexBoundaries[1]_{i}\def\secondIndex{j}$. Since $\freeCycles[1]_{i}\def\secondIndex{j}$ is free, there exists a lift $\Psi_{i}\def\secondIndex{j}\colon \freeCycles[1]_{i}\def\secondIndex{j} \to \complex[1]_{i}\def\secondIndex{j+1}$. Next consider $\xyLine{ \boundary[1]_{i}\def\secondIndex{j+1}\bigl(\Psi_{i}\def\secondIndex{j} - (\freeBoundariesMapA[1]_{i}\def\secondIndex{j} - \freeBoundariesMap[1]_{i}\def\secondIndex{j}) \bigr) \colon \freeCycles[1]_{i}\def\secondIndex{j} \to \complex[1]_{i}\def\secondIndex{j+1} \ar[r]^-{\boundary[1]_{i}\def\secondIndex{j+1}}& \complex[1]_{i}\def\secondIndex{j}}$. This map is also trivial so there is a unique map $\freeCycles[1]_{i}\def\secondIndex{j} \to \complexCycles[1]_{i}\def\secondIndex{j+1}$ and hence a unique map $\Phi_{i}\def\secondIndex{j}\colon \freeCycles[1]_{i}\def\secondIndex{j} \to \complexCycles[1]_{i}\def\secondIndex{j+1} \to H_{i}\def\secondIndex{j+1}(\complex[1]_\ast)$. Then $\cs{homology splitting}[{\splitPairA[1]_\ast}] {\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex}\bigl( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]} \bigr) - \cs{homology splitting}[{\splitPair[1]_\ast}] {\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex}\bigl( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]} \bigr) = (-1)^{i}\def\secondIndex{j+1} \homologyClassOf[big]{\Phi_{i}\def\secondIndex{j}(\elementCycle[1]) \cs{cross product} \elementCycle[3]} \in H_{i}\def\secondIndex{j+1}(\complex[1]_\ast)\cs{cross product} \element[3]$. A similar calculation shows the variation in the other variable lies in $\element[1]\cs{cross product}H_{\secondIndex+1}(\complex[3]_\ast)$. \end{proof}
\section{Splitting via Universal Coefficients}\sectionLabel{Bocksteins determine} In the torsion free case, \cs{env:torsion product cycle II 2} \cs{torsion product cycle II 2} suggests another way to produce a splitting. The Universal Coefficients formula says that for a torsion-free complex $\complex[2]_\ast$, there exists a natural short exact sequence which is unnaturally split:
\noindent\resizebox{\textwidth}{!}{{$\xymatrix@C18pt{ 0\ar[r]&H_{\totalInt}\bigl(\complex[2]_\ast\bigr) \tensor[R]\ry{\ringElement[4]}\ar[rr]&& H_{\totalInt}\bigl(\complex[2]_\ast \tensor[R]\ry{\ringElement[4]}\bigr)\ar[rr]^-{\universalCoefficientsMap{2}{4}{\totalInt}}&& \rtorsion{\ringElement[4]}{H_{\totalInt-1}(\complex[2]_{\ast})}\ar[r]&0 }$}}
\noindent where for a fixed $\ringElement$ in a PID $R$ and an $R$ module $\@ifnextchar_{\LRT@P}{P}$, $\rtorsion{\ringElement}{\@ifnextchar_{\LRT@P}{P}} = P\tor[R]\ry{\ringElement}$ denotes the submodule of elements annihilated by $\ringElement$.
\noindent The Bockstein $\mathfrak b^{\ringElement[4]}_{\totalInt}$ is the composition {\setlength{\abovedisplayskip}{0pt} \setlength{\belowdisplayskip}{-10pt} \begin{equation*} \xymatrix@C18pt{ H_{\totalInt}\bigl(\complex[2]_\ast \tensor[R]\ry{\ringElement[4]}\bigr)\ar[rr]^-{\universalCoefficientsMap{2}{4}{\totalInt}}&& \rtorsion{\ringElement[4]}{H_{\totalInt-1}(\complex[2]_{\ast})} \subset H_{\totalInt-1}(\complex[2]_{\ast}) } \end{equation*} } \begin{ThmS}[Bocksteins in correct coset]{Theorem} Let $\complex[1]_\ast$ and $\complex[3]_\ast$ be torsion-free complexes. Given $\element[1]\in H_{i}\def\secondIndex{j}(\complex[1]_\ast)$ pick $\elementR[1]\in H_{i}\def\secondIndex{j+1}\bigl(\complex[1]_\ast\tensor[R]\ry{\ringElement}\bigr)$ such that $\universalCoefficientsMap{1}{0}{i}\def\secondIndex{j+1}(\elementR[1]) = \element[1]$. Given $\element[3]\in H_{\secondIndex}(\complex[3]_\ast)$ pick $\elementR[3]\in H_{\secondIndex+1}\bigl(\complex[3]_\ast\tensor[R]\ry{\ringElement}\bigr)$ such that $\universalCoefficientsMap{3}{0}{\secondIndex+1}(\elementR[3]) = \element[3]$. On elementary tors $\cs{elementary tor} {\element[1]}{\ringElement}{\element[3]}$ define \begin{equation*} \splitByCrossProductOfBocksteins_{i}\def\secondIndex{j,\secondIndex}\bigl(\cs{elementary tor} {\element[1]}{\ringElement}{\element[3]}\bigr) = (-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \elementR[1] \tensor \elementR[3] \bigr) \end{equation*} Then $\splitByCrossProductOfBocksteins_{i}\def\secondIndex{j,\secondIndex}\bigl(\cs{elementary tor} {\element[1]}{\ringElement}{\element[3]}\bigr) \in\cosetTor[{\element[1]}]{\ringElement}{\element[2]}$. \end{ThmS} \begin{proof} From \namedRef{weak splitting map in correct coset}, $(-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}(\elementR[1]\tensor \elementR[3])$ lies in $\cosetTor[{\element[1]}]{\ringElement}{\element[2]}$ if the splittings used are ones from a weak splitting. Any other choice of splitting for $\complex[1]_\ast$ is of the form $\elementR[1] + X_{\element[1]}$ for $X_{\element[1]}\in H_{i}\def\secondIndex{j+1}(\complex[1]_\ast)$ and any other choice of splitting for $\complex[3]_\ast$ is of the form $\elementR[3] + X_{\element[3]}$ for $X_{\element[3]}\in H_{\secondIndex+1}(\complex[3]_\ast)$. Then \begin{equation*} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( (\elementR[1]+ X_{\element[1]})\tensor( \elementR[3]+X_{\element[3]})\bigr) = \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}(\elementR[1]\tensor \elementR[3]) + X_{\element[1]}\cs{cross product} \element[3] + (-1)^{i}\def\secondIndex{j+1} \element[1]\cs{cross product} X_{\element[3]} \end{equation*} The result follows. \end{proof}
If the Universal Coefficients splittings are chosen arbitrarily the map on the elementary tors may not descend to a map on the torsion product. This problem is overcome as follows. A family of splittings \begin{equation*} \splitBocksteinHomology^{\complex[1],\ringElement}_{\totalInt}\colon \rtorsion{\ringElement}{ H_{\totalInt}(\complex[1]_{\ast})} \to H_{\totalInt+1}\bigl(\complex[1]_{\ast}\tensor[R] \ry{\ringElement}\bigr)\\ \end{equation*} one for each non-zero $\ringElement\inR$ is \emph{a compatible family of splittings of $\complex[1]_\ast$ at $\totalInt$} provided, for all non-zero elements $\ringElement[4]_1$, $\ringElement[4]_2\inR$ the diagram
\noindent\resizebox{\textwidth}{!}{{$\xymatrix{ \rtorsion{\ringElement[4]_2}{H_{\totalInt}(\complex[1]_\ast)} \ar[r]^-{\subset}\ar[d]^-{\splitBocksteinHomology^{\complex[1],\ringElement[4]_2}_\totalInt}& \rtorsion{\ringElement[4]_1\moduleDot\ringElement[4]_2}{H_{\totalInt}(\complex[1]_\ast)} \ar[r]^-{\moduleDot[1]\ringElement[4]_2}\ar[d]^-{\splitBocksteinHomology^{\complex[1],\ringElement[4]_1\moduleDot\ringElement[4]_2}_\totalInt}& \rtorsion{\ringElement[4]_1}{H_{\totalInt}(\complex[1]_\ast)} \ar[d]^-{\splitBocksteinHomology^{\complex[1],\ringElement[4]_1}_\totalInt}\\
H_{\totalInt+1}\bigl(\complex[1]_{\ast}\tensor[R] \ry{\ringElement[4]_2}\bigr) \ar[r]^-{\ringElement[4]_1\moduleDot[1]} \ar[d]^-{\universalCoefficientsMapA{1}{\ringElement[4]_2}{\totalInt+1}}& H_{\totalInt+1}\bigl(\complex[1]_{\ast}\tensor[R] \ry{\ringElement[4]_1\moduleDot\ringElement[4]_2}\bigr) \ar[r]^-{\rho^{\ringElement[4]_1}} \ar[d]^-{\universalCoefficientsMapA{1}{\ringElement[4]_1\moduleDot[1]\ringElement[4]_2}{\totalInt+1}}& H_{\totalInt+1}\bigl(\complex[1]_{\ast}\tensor[R] \ry{\ringElement[4]_1}\bigr) \ar[d]^-{\universalCoefficientsMapA{1}{\ringElement[4]_1}{\totalInt+1}}\\
\rtorsion{\ringElement[4]_2}{H_{\totalInt}(\complex[1]_\ast)} \ar[r]^-{\subset}& \rtorsion{\ringElement[4]_1\moduleDot\ringElement[4]_2}{H_{\totalInt}(\complex[1]_\ast)} \ar[r]^-{\moduleDot[1]\ringElement[4]_2}& \rtorsion{\ringElement[4]_1}{H_{\totalInt}(\complex[1]_\ast)} \\ }$}}
\noindent commutes, where the horizontal maps are induced from the short exact sequence of modules $\xyLine[@C30pt]{0\to\ry{\ringElement[4]_2}\ar[r]^-{\ringElement[4]_1\moduleDot[1]}& \ry{\ringElement[4]_1\moduleDot\ringElement[4]_2}\ar[r]^-{\rho^{\ringElement[4]_1}}& \ry{\ringElement[4]_1}} \to 0$ and the rows are exact. The diagram consisting of the bottom two rows always commutes and the vertical maps from the first row to the third are the identity.
If the splittings come from a weak splitting of $\complex[1]_\ast$ then they are compatible for any $\totalInt$.
\begin{ThmS}{Theorem} Suppose $\complex[1]_\ast$ and $\complex[3]_\ast$ are torsion-free. Given a compatible family of splittings of $\complex[1]_\ast$ at $i}\def\secondIndex{j$ and a compatible family of splittings of $\complex[3]_\ast$ at $\secondIndex$, the formula
\begin{equation*} \splitByCrossProductOfBocksteinsA{\elementRr[1]{i}\def\secondIndex{j}}{\elementRr[3]{\secondIndex}}_{i}\def\secondIndex{j,\secondIndex}\bigl(\cs{elementary tor} {\element[1]}{\ringElement}{\element[3]}\bigr) = (-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \elementRr[1]{i}\def\secondIndex{j} \cs{cross product} \elementRr[3]{\secondIndex} \bigr) \end{equation*} defines a map from $H_{i}\def\secondIndex{j}(\complex[1]_\ast) \tor[R] H_{{\secondIndex}}(\complex[3]_\ast)$ to $H_{{i}\def\secondIndex{j}+{\secondIndex}+1} (\complex[1]_\ast\tensor[R]\complex[3]_\ast)$ splitting the K\"unneth\ formula at $({i}\def\secondIndex{j},{\secondIndex})$. \end{ThmS} \begin{proof} It follows from \namedRef{Bocksteins in correct coset} that if $\splitByCrossProductOfBocksteins_{i}\def\secondIndex{j,\secondIndex}$ is a map then it splits the K\"unneth\ formula at $(i}\def\secondIndex{j,\secondIndex)$.
To show $\splitByCrossProductOfBocksteinsA{\elementRr[1]{i}\def\secondIndex{j}} {\elementRr[3]{\secondIndex}}_{i}\def\secondIndex{j,\secondIndex}$ is a map, it suffices to show that (\ref{free cycle gives map}.1-\ref{free cycle gives map}.4) hold. Equations (\ref{free cycle gives map}.1) and (\ref{free cycle gives map}.2) hold whether the splittings are compatible or not since the cross product, and hence $\splitByCrossProductOfBocksteinsA{\elementRr[1]{i}\def\secondIndex{j}} {\elementRr[3]{\secondIndex}}_{i}\def\secondIndex{j,\secondIndex}$ is bilinear.
\newcommand{\elementT}[3][0]{\splitBocksteinHomology^{\complex[#1],#3}_{#2}(\element[#1])} \newcommand{\elementS}[4][0]{\splitBocksteinHomology^{\complex[#1],#3}_{#2}(#4)}
To verify (\ref{free cycle gives map}.3) it suffices to show \namedNumber{equal Bocksteins} \begin{equation*}\tag{\ref{equal Bocksteins}} \mathfrak b^{\ringElement_1 \moduleDot \ringElement_2}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \elementT[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} \cs{cross product} \elementT[3]{\secondIndex}{\ringElement_1\moduleDot \ringElement_2} \bigr) = \mathfrak b^{\ringElement_2}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \elementS[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot \ringElement_1} \cs{cross product} \elementT[3]{\secondIndex}{\ringElement_2} \bigr) \end{equation*} To compute a Bockstein of a homology class, $\element[4]\in H_{\totalInt}\bigl( \complex[2]_\ast\tensor[R]\ry{\ringElement[4]}\bigr)$, first lift to a chain, $\hat{\element[4]}\in \complex[2]_{\totalInt}$ and then $\boundary[2]_{\totalInt}(\hat{\element[4]}) = \ringElement[4] \element[100]$. The class $\element[100]$ is unique because $\complex[2]_{\totalInt}$ is torsion-free and $\mathfrak b^{\ringElement[4]}_{\totalInt}(\element[4])$ is the homology class represented by $\element[100]$.
\newcommand{\elementTC}[3][0]{\splitBocksteinChains^{\complex[#1],#3}_{#2}(\element[#1])} \newcommand{\elementSC}[4][0]{\splitBocksteinChains^{\complex[#1],#3}_{#2}(#4)} There are four homology classes in (\ref{equal Bocksteins}). For uniform notation, given $\elementS[2]{\totalInt}{\ringElement[4]}{\element[4]}$, let $\elementSC[2]{\totalInt}{\ringElement[4]}{\element[4]}$ be a lift to a representing chain. The cross product of homology classes is represented by the tensor product of chains so $C_1 = \elementTC[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} \tensor \elementTC[3]{\secondIndex}{\ringElement_1\moduleDot \ringElement_2} $ is a chain to compute the left hand side of (\ref{equal Bocksteins}) and $C_2 = \elementSC[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot \ringElement_1} \tensor \elementTC[3]{\secondIndex}{\ringElement_2} $
is a chain to compute the right hand side of (\ref{equal Bocksteins}).
Note $\homologyClassOf[Big]{\boundary[1]_{i}\def\secondIndex{j}\bigl( \elementTC[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} \bigr)} = \elementT[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} (\ringElement_1\moduleDot \ringElement_2) $ and $\homologyClassOf[Big]{\boundary[1]_{i}\def\secondIndex{j}\bigl( \elementSC[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot\ringElement_1} \bigr)} = \elementS[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot\ringElement_1} (\ringElement_1\moduleDot \ringElement_2) $. If the splittings are compatible, $\elementT[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} = \elementS[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot\ringElement_1}$ so choose $\elementTC[1]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} = \elementSC[1]{i}\def\secondIndex{j}{\ringElement_2}{\element[1]\moduleDot\ringElement_1}$.
Also $\homologyClassOf[Big]{\boundary[3]_{i}\def\secondIndex{j}\bigl( \elementTC[3]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} \bigr)} = (\ringElement_1\moduleDot \ringElement_2) \elementT[3]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} $ whereas $\homologyClassOf[Big]{\boundary[3]_{i}\def\secondIndex{j}\bigl( \elementSC[3]{i}\def\secondIndex{j}{\ringElement_2}{\element[3]} \bigr)} = \ringElement_2\elementS[3]{i}\def\secondIndex{j}{\ringElement_2}{\element[3]} $. If the splittings are compatible, $\elementT[3]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} = \elementS[3]{i}\def\secondIndex{j}{\ringElement_2}{\element[3]}$ so choose $\elementTC[3]{i}\def\secondIndex{j}{\ringElement_1\moduleDot \ringElement_2} = \ringElement_1 \elementSC[3]{i}\def\secondIndex{j}{\ringElement_2}{\element[3]}$.
It follows that $C_1 = \ringElement_1\moduleDot C_2$. Since \begin{xyMatrix}[@C12pt] 0\ar[r]&R\ar[rr]^-{\ringElement_2} \ar[d]_-{\identyMap{R}} &&R\ar[rr]^-{\rho^{\ringElement_2}} \ar[d]^-{\ringElement_1\moduleDot[1]} &&\ry{\ringElement_2}\ar[r] \ar[d]^{\ringElement_1\moduleDot[1]} &0 \\ 0\ar[r]&R\ar[rr]^-{\ringElement_1\moduleDot[1]\ringElement_2}&&R\ar[rr]^-{\rho^{\ringElement_1\moduleDot[1]\ringElement_2}} &&\ry{\ringElement_1\moduleDot\ringElement_2}\ar[r]&0 \end{xyMatrix} commutes, $\mathfrak b^{\ringElement_1 \moduleDot \ringElement_2}_{i}\def\secondIndex{j+\secondIndex+2}(C_1) = \mathfrak b^{\ringElement_2}_{i}\def\secondIndex{j+\secondIndex+2}(\ringElement_1 C_2) $ as required. \end{proof}
\section{Naturality of the splitting}\sectionLabel{Naturality} \namedNumber{p0} \namedNumber{p1} \namedNumber{p2} Fix a chain map $\chainMap[1]_\ast\colon \complex[1]_\ast \to \complex[2]_\ast$ between two weakly split chain maps. Pick a map $\freeCyclesChainMap[1]_{\totalInt}\colon \freeCycles[1]_{\totalInt} \to \freeCycles[2]_{\totalInt}$ satisfying \begin{equation*}\tag{\ref{p0}} \cyclesToHomology[2]_{\totalInt}\circ \freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} = \chainMap[1]_{\totalInt}\circ \cyclesToHomology[1]_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt}\colon \freeCycles[1]_{\totalInt} \to H_{\totalInt}(\complex[2]_\ast) \end{equation*} Since the right hand square in the diagram below commutes \begin{xyMatrix}[@C1pt] \freeBoundaries[1]_{\totalInt}\ \subset \ar@<-12pt>@{.>}[d]^-{\freeBoundariesChainMap[1]_{\totalInt}} & \ar@<-2pt>[d]^-{\freeCyclesChainMap[1]_{\totalInt}} \freeCycles[1]_{\totalInt} \ar[rrrrrrr]^-{\cyclesToHomology[1]_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt}} &&&&&&& \ar[d]^-{\chainMap[1]_{\totalInt}} H_{\totalInt}(\complex[1]_\ast) \ar[d]^-{\chainMap[1]_{\totalInt}}\\ \freeBoundaries[2]_{\totalInt}\ \subset & \freeCycles[2]_{\totalInt} \ar[rrrrrrr]^-{\cyclesToHomology[2]_{\totalInt}\circ \freeCyclesMap[2]_{\totalInt}} &&&&&&& H_{\totalInt}(\complex[2]_\ast) \\ \end{xyMatrix}
\noindent there exists a unique map $\freeBoundariesChainMap[1]_{\totalInt} \colon \freeBoundaries[1]_{\totalInt} \to \freeBoundaries[2]_{\totalInt}$ making the left hand square commute.
The set of choices for $\freeCyclesChainMap[1]_{\totalInt}$ consists of any one choice plus any map $L_{\totalInt}\colon \freeCycles[1]_\ast\to \freeBoundaries[2]_{\totalInt}$. The restricted map is $\freeBoundariesChainMap[1]_{\totalInt}$ plus the restriction of $L_{\totalInt}$.
\vskip10pt The maps $ \freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt}$ and $ \chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt} $ have domain $\freeCycles[1]_{\totalInt}$ and range $\complexCycles[2]_{\totalInt}$ and they represent the same homology class. Hence $ \freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} - \chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt}$ lands in $\complexBoundaries[2]_{\totalInt}$. Since $\freeCycles[1]_{\totalInt}$ is free, there is a lift of this difference to a map $\weakMap[1]_{\totalInt}\colon\freeCycles[1]_{\totalInt} \to \complex[2]_{\totalInt+1}$ satisfying
\begin{equation*}\tag{\ref{p1}} \boundary[2]_{\totalInt+1}\circ \weakMap[1]_{\totalInt} = \freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} - \chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt} \end{equation*} If $\freeCyclesChainMap[1]_{\totalInt}$ is replaced by $\freeCyclesChainMap[1]_{\totalInt} + L_{\totalInt}$, a choice for the new $\weakMap[1]_{\totalInt}$ is $\weakMap[1]_{\totalInt} + \freeBoundariesMap[2]\circ L_{\totalInt}$.
The set of solutions to (\ref{p1}) consists of one solution, $\weakMap[1]_{\totalInt}$, plus any map of the form $\Lambda_{\totalInt}\colon \freeCycles[1]_{\totalInt} \to \complexCycles[2]_{\totalInt+1}\subset \complex[2]_{\totalInt+1}$.
Given a fixed solution to (\ref{p1}) consider \begin{equation*} \xi = \weakMap[1]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl( \freeBoundariesMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap_{\totalInt+1}\circ \freeBoundariesMap[1]_{\totalInt} \bigr)\colon \freeBoundaries[1]_{\totalInt} \to \complex[2]_{\totalInt+1} \end{equation*}
\noindent Notice if $\freeCyclesChainMap[1]_{\totalInt}$ is replaced by $\freeCyclesChainMap[1]_{\totalInt} + L_{\totalInt}$, the new $\xi$ is the same map as the old $\xi$. The image of $\xi$ is contained in the cycles of $\complex[2]_{\totalInt+1}$ and so gives a map \begin{equation*}\tag{\ref{p2}} \weakHomologyMap[1]_{\totalInt} = \bigl( \freeBoundariesMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap_{\totalInt+1}\circ \freeBoundariesMap[1]_{\totalInt} \bigr) - \weakMap[1]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} \colon \freeBoundaries[1]_{\totalInt} \to H_{\totalInt+1}(\complex[2]_{\ast}) \end{equation*} which does not depend on the choice of $\freeCyclesChainMap[1]_\ast$.
The map $\weakHomologyMap[1]_\ast$ induces a map \begin{equation*} \weakTorsionHomologyMap[1]<1>_{\totalInt}\colon \rtorsion{\ringElement[1]}{H_{\totalInt}(\complex[1]_\ast) \to H_{\totalInt+1}(\complex[2]_\ast)}\tensor \ry{r} \end{equation*} defined as follows. Given $\element[1]\in {}\rtorsion{\ringElement[1]}{H_{\totalInt}(\complex[1]_\ast)}$ pick $\elementCycle[1]\in \freeCycles[1]_{\totalInt}$ so that $\homologyClassOf{\freeCyclesMap[1]_{\totalInt}(\elementCycle[1])} = \element[1]$. Then $\elementCycle[1]\moduleDot \ringElement[1] \in\freeBoundaries[1]_{\totalInt}$ so let $\weakTorsionHomologyMap[1]<1>_{\totalInt}(\element[1])$ be the homology class represented by $\weakHomologyMap[1]_{\totalInt}(\elementCycle[1]\moduleDot \ringElement[1])$ reduced mod $\ringElement[1]$.
\begin{ThmS}{Proposition} Given a chain map $\chainMap[1]_\ast\colon \complex[1]_\ast\to\complex[2]_\ast$ between two weakly split chain complexes over a PID $R$, the map \begin{equation*} \weakTorsionHomologyMap[1]<1>_{\totalInt} \colon \rtorsion{\ringElement[1]}{H_{\totalInt}(\complex[1]_\ast) \to H_{\totalInt+1}(\complex[2]_\ast)}\tensor \ry{r} \end{equation*} is well-defined regardless of the choices made in (\ref{p0}) and (\ref{p1}). \end{ThmS} \begin{proof} Any other choice of element in $\freeCycles[1]_{\totalInt}$ has the form $\elementCycle[1] + b$ for $b\in \freeBoundaries[1]_{\totalInt}$. Then ${ \weakHomologyMap[1]_{\totalInt}\Bigl(\bigl(\elementCycle[1] + b\bigr)\moduleDot \ringElement[1]\Bigr) = \weakHomologyMap[1]_{\totalInt}(\elementCycle[1]\moduleDot \ringElement[1] ) + \weakHomologyMap[1]_{\totalInt} \bigl(b\moduleDot \ringElement[1] \bigr) = \weakHomologyMap[1]_{\totalInt}(\elementCycle[1] \moduleDot \ringElement[1] ) + \weakHomologyMap[1]_{\totalInt}\bigl(b\bigr) \moduleDot \ringElement[1] }$ since \penalty-1000 $b\in\freeBoundaries[1]_{\totalInt}$. Hence $\weakHomologyMap[1]_{\totalInt}\Bigl(\bigl(\elementCycle[1] + b\bigr)\moduleDot \ringElement[1]\Bigr)$ and $\weakHomologyMap[1]_{\totalInt}(\elementCycle[1]\moduleDot \ringElement[1])$ represent the same element in $H_{\totalInt+1}(\complex[2]_\ast)\tensor \ry{r}$ and therefore $\weakTorsionHomologyMap[1]<1>_{\totalInt}$ is well-define. Since $\weakHomologyMap[1]_{\totalInt}$ is an $R$ module map, so is $\weakTorsionHomologyMap[1]<1>_{\totalInt}$.
Given a second lift, it has the form $\weakMap[1]_{\totalInt} + \Lambda$ where $\Lambda\colon \freeCycles[1]_{\totalInt} \to \complexCycles[2]_{\totalInt+1}$ and the new $\weakHomologyMap$ is $\weakHomologyMap[1]_{\totalInt} - \Lambda$. Compute \begin{math} \bigl(\weakHomologyMap[1]_{\totalInt} - \Lambda\bigr) (\elementCycle[1] \moduleDot \ringElement[1] ) = \weakHomologyMap[1]_{\totalInt}(\elementCycle[1] \moduleDot \ringElement[1] ) - \Lambda(\elementCycle[1]\moduleDot \ringElement[1]) \end{math}
But $\Lambda$ is defined on all of $\freeCycles[1]_{\totalInt}$ so \begin{math} \bigl(\weakHomologyMap[1]_{\totalInt} - \Lambda\bigr) (\elementCycle[1]\moduleDot \ringElement[1] ) = \weakHomologyMap[1]_{\totalInt}(\elementCycle[1]\moduleDot \ringElement[1]) - \Lambda(\elementCycle[1])\moduleDot \ringElement[1] \end{math}
and $\weakTorsionHomologyMap[1]<1>_{\totalInt}$ is independent of the lift. \end{proof} \begin{DefS*}{Remark} A similar result holds for left $R$ modules. \end{DefS*}
\begin{DefS}[weak split chain map definition]{Definition} A \emph{weak split chain map} between two weakly split chain complexes $\{\complex[1]_\ast, \splitPair[1]_\ast\}$ and $\{\complex[2]_\ast, \splitPair[2]_\ast\}$ consists of a chain map $\chainMap[1]_\ast\colon \complex[1]_\ast \to \complex[2]_\ast$, a map $\freeCyclesChainMap[1]_\ast\colon \freeCycles[1]_\ast \to \freeCycles[2]_\ast$ satisfying (\ref{p0}) and a map $\weakMap[1]_{\totalInt}\colon\freeCycles[1]_{\totalInt} \to \complex[2]_{\totalInt+1}$ satisfying (\ref{p1}). From the above discussion, given any two weakly split chain complexes and a chain map between them, this data can be completed to a weakly split chain map. The map $\weakTorsionHomologyMap[1]<1>_{\ast}$ is independent of this completion. \end{DefS}
\begin{ThmS}[deviation from naturality in Kunneth formula]{Theorem} Suppose given four weakly split complexes and weakly split chain maps $\chainMap[3]_\ast\colon \complex[1]_\ast \to \complex[2]_\ast$ and $\chainMap[2]_\ast\colon \complex[3]_\ast \to \complex[4]_\ast$.
If $\cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}\in H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast)$ then \begin{align*} \cs{homology splitting}[{\splitPair[2]_\ast}]{\splitPair[4]_\ast}_{i}\def\secondIndex{j,\secondIndex} \bigl(\cs{elementary tor}{\chainMap[1](\element[1])}{\ringElement}{\chainMap[2](\element[2])} \bigr) &=\ \bigl(\chainMap[1]\tensor\chainMap[2]\bigr)_\ast\bigl( \cs{homology splitting}[{\splitPair[1]_\ast}]{\splitPair[3]_\ast}_{i}\def\secondIndex{j,\secondIndex} (\cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}) \bigr) +\\ \noalign{\vskip 10pt}&\hskip-40pt (-1)^{i}\def\secondIndex{j} \chainMap[1](\element[1])\cs{cross product} \weakTorsionHomologyMap[2]<1>_{\secondIndex}(\element[2]) + \weakTorsionHomologyMap[1]<1>_{i}\def\secondIndex{j}(\element[1])\cs{cross product} \chainMap[2](\element[2]) \end{align*} \end{ThmS} \begin{DefS}{Remark} The $\weakTorsionHomologyMap$ maps take values in $H_\ast(\,\_\,)\tensor \ry{\ringElement[1]}$ but since the other factor in the cross product is $\ringElement[1]$-torsion, each cross product is well-defined in $H_{i}\def\secondIndex{j+\secondIndex+1}(\complex[2]_\ast\tensor[R] \complex[4]_\ast)$. \end{DefS} \begin{proof} It suffices to check the formula on elementary tors so fix $\cs{elementary tor}{\element[1]}{\ringElement}{\element[3]}$. The corresponding cycle \ref{torsion product cycle II} is \begin{equation*} X_0 = (-1)^{\abs{\element[1]}+1} \freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1]) \tensor \freeBoundariesMap[3]_{\secondIndex}\bigl(\ringElement \elementCycle[3]\bigr) + \freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr) \tensor \freeCyclesMap[3]_{\secondIndex}(\elementCycle[3]) \end{equation*} Evaluating $\chainMap[1]\otimes\chainMap[2]$ on $X_0$ gives \begin{equation*} X_1=(-1)^{{i}\def\secondIndex{j}+1} \chainMap[1]_{i}\def\secondIndex{j}\bigl(\freeCyclesMap[1]_{{i}\def\secondIndex{j}}(\elementCycle[1])\bigr) \tensor \chainMap[2]_{\secondIndex+1} \Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr) + \chainMap[1]_{i}\def\secondIndex{j+1}\Bigl(\freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr)\Bigr) \tensor \chainMap[2]_{\secondIndex} \bigl(\freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\bigr) \end{equation*} and a chain representing $\cs{homology splitting}[{\splitPair[2]_\ast}] {\splitPair[4]_\ast}_{i}\def\secondIndex{j,\secondIndex}\bigl( \cs{elementary tor}{\chainMap[1](\element[1])} {\ringElement}{\chainMap[2](\element[2])} \bigr) $ is
\noindent\resizebox{\textwidth}{!}{{ $X_2 = (-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[2]_{i}\def\secondIndex{j}\bigl(\freeCyclesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) \tensor \Bigl(\freeBoundariesMap[4]_{\secondIndex} \bigl(\ringElement \freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3])\bigr)\Bigr) + \Bigl(\freeBoundariesMap[2]_{\secondIndex} \bigl(\freeCyclesChainMap[1]_{\secondIndex}(\elementCycle[1]) \ringElement\bigr) \Bigr) \tensor \freeCyclesMap[4]_{\secondIndex} \bigl(\freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3])\bigr) $}}
It suffices to prove the theorem for $\chainMap[1]_\ast\tensor \identyMap{\complex[3]_\ast}$ and then for $\identyMap{\complex[2]_\ast} \tensor\chainMap[2]_\ast$ and these calculations are straightforward. \end{proof} \begin{math check} \vskip10pt It suffices to prove the theorem for $\chainMap[1]_\ast\tensor \identyMap{\complex[3]_\ast}$ and then for $\identyMap{\complex[2]_\ast} \tensor\chainMap[2]_\ast$.
Here is the proof for $\chainMap[1]_\ast\tensor \identyMap{\complex[3]_\ast}$. In this special case, $X_1$ and $X_2$ become \begin{align*} Y_1=& (-1)^{i}\def\secondIndex{j+1} \chainMap[1]_{i}\def\secondIndex{j}\bigl(\freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) \tensor \freeBoundariesMap[3]_{\secondIndex}\bigl(\ringElement \elementCycle[3]\bigr) + \chainMap[1]_{i}\def\secondIndex{j+1}\Bigl(\freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr)\Bigr) \tensor \freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\\ Y_2=& (-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[2]_{i}\def\secondIndex{j}\bigl(\freeCyclesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) \tensor \freeBoundariesMap[3]_{\secondIndex}\bigl(\ringElement \elementCycle[3]\bigr) + \freeBoundariesMap[2]_{i}\def\secondIndex{j} \bigl(\freeCyclesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1]) \ringElement\bigr) \tensor \freeCyclesMap[3]_{\secondIndex}(\elementCycle[3]) \end{align*}
By (\ref{p1}) \begin{equation*} \freeCyclesMap[2]_{i}\def\secondIndex{j}\bigl(\freeCyclesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) = \chainMap[1]_{i}\def\secondIndex{j}\bigl(\freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) + \boundary[2]_{i}\def\secondIndex{j+1}\bigl( \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr) \end{equation*} By (\ref{p2}) \begin{equation*} \freeBoundariesMap[2]_{i}\def\secondIndex{j} \bigl(\freeCyclesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1]) \ringElement\bigr) = \freeBoundariesMap[2]_{i}\def\secondIndex{j} \bigl(\freeBoundariesChainMap[1]_{i}\def\secondIndex{j}(\elementCycle[1] \ringElement)\bigr) = \chainMap[1]_{i}\def\secondIndex{j+1}\Bigl(\freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr)\Bigr) + \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1] \ringElement) + \weakHomologyMap[1]_{i}\def\secondIndex{j} (\elementCycle[1]\moduleDot\ringElement[1]) \end{equation*} Hence \alignLine{ Y_2 - Y_1 =& (-1)^{i}\def\secondIndex{j+1} \Bigl(\boundary[2]_{i}\def\secondIndex{j+1} \bigl( \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr)\Bigr)\tensor \Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr) + \Bigl(\weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1] \ringElement) + \weakHomologyMap[1]_{i}\def\secondIndex{j} (\elementCycle[1]\moduleDot\ringElement[1]) \Bigr)\tensor \freeCyclesMap[3]_{\secondIndex}(\elementCycle[3]) = \\& (-1)^{i}\def\secondIndex{j+1}\boundary[5]_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\tensor \freeBoundariesMap[3]_{\secondIndex}\bigl(\ringElement \elementCycle[3]\bigr) \bigr) + \weakTorsionHomologyMap[1]<1>_{i}\def\secondIndex{j}(\element[1])\tensor \freeCyclesMap[3]_{\secondIndex}(\elementCycle[3]) } since \begin{align*} \boundary[5]_{i}\def\secondIndex{j+\secondIndex+2}& \Bigl(\weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\tensor \freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr) =\\& \Bigl(\boundary[2]_{i}\def\secondIndex{j+1} \bigl( \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr)\Bigr)\tensor \Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr)+ (-1)^{i}\def\secondIndex{j+1} \Bigl(\weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1] )\Bigr)\tensor \Bigl(\ringElement\freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\Bigr)=\\& \Bigl(\boundary[2]_{i}\def\secondIndex{j+1} \bigl( \weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\bigr)\Bigr)\tensor \Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr)+ (-1)^{i}\def\secondIndex{j+1} \Bigl(\weakMap[1]_{i}\def\secondIndex{j}(\elementCycle[1] \ringElement)\Bigr)\tensor \Bigl(\freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\Bigr) \end{align*}
\vskip10pt For the other case $X_1$ and $X_2$ become \begin{align*} Y_1=&(-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1]) \tensor \chainMap[2]_{\secondIndex+1} \Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\ringElement \elementCycle[3]\bigr)\Bigr) + \freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr) \tensor \chainMap[2]_{\secondIndex}\bigl(\freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\bigr)\\ Y_2=&(-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1]) \tensor \freeBoundariesMap[4]_{\secondIndex} \bigl(\ringElement \freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3])\bigr) + \freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr) \tensor \freeCyclesMap[4]_{\secondIndex} \bigl(\freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3])\bigr) \end{align*}
By (\ref{p1}) \begin{equation*} \freeCyclesMap[4]_{\secondIndex} \bigl(\freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3])\bigr) = \chainMap[2]_{\secondIndex} \bigl(\freeCyclesMap[3]_{\secondIndex}(\elementCycle[3])\bigr) + \boundary[4]_{\secondIndex+1} \bigl( \weakMap[2]_{\secondIndex}(\elementCycle[3])\bigr) \end{equation*} By (\ref{p2}) \begin{equation*} \freeBoundariesMap[4]_{\secondIndex} \bigl(\freeCyclesChainMap[2]_{\secondIndex}(\elementCycle[3]) \ringElement\bigr) = \freeBoundariesMap[4]_{\secondIndex} \bigl(\freeBoundariesChainMap[2]_{\secondIndex}(\elementCycle[3] \ringElement)\bigr) = \chainMap[2]_{\secondIndex+1}\Bigl(\freeBoundariesMap[3]_{\secondIndex} \bigl(\elementCycle[3] \ringElement\bigr)\Bigr) + \weakMap[2]_{\secondIndex}(\elementCycle[3] \ringElement) - \weakHomologyMap[2]_{\secondIndex} (\elementCycle[3]\moduleDot\ringElement[1]) \end{equation*}
Hence \begin{align*} Y_2-Y_1=& (-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\tensor \bigl( \weakMap[2]_{\secondIndex}(\elementCycle[3] \ringElement) + \weakHomologyMap[2]_{\secondIndex} (\elementCycle[3]\moduleDot\ringElement[1]) \bigr) + \freeBoundariesMap[1]_{i}\def\secondIndex{j} \bigl(\elementCycle[1] \ringElement\bigr) \tensor \boundary[4]_{\secondIndex+1} \bigl( \weakMap[2]_{\secondIndex}(\elementCycle[3])\bigr) =\\& \boundary[6]_{i}\def\secondIndex{j+\secondIndex+2}\bigl(\freeBoundariesMap[1]_{i}\def\secondIndex{j} (\elementCycle[1] \ringElement) \tensor \weakMap[2]_{\secondIndex}(\elementCycle[3] \ringElement)\bigr) +(-1)^{i}\def\secondIndex{j+1} \freeCyclesMap[1]_{i}\def\secondIndex{j}(\elementCycle[1])\tensor \weakHomologyMap[2]_{\secondIndex} (\elementCycle[3]\moduleDot\ringElement[1] \end{align*} \end{math check}
\begin{ThmS}[naturality of cosets]{Corollary} Given chain maps $\chainMap[1]_\ast\colon \complex[1]_\ast \to \complex[2]_\ast$ and $\chainMap[2]_\ast\colon \complex[3]_\ast \to \complex[4]_\ast$
\begin{equation*} \bigl(\chainMap[1]_\ast\tensor \chainMap[2]_\ast\bigr)_\ast\bigl( \cosetTor[{\element[1]}]{\ringElement}{\element[2]} \bigr)\subset \cosetTor[{\chainMap[1]_\ast(\element[1])}]{\ringElement} {\chainMap[2]_\ast(\element[2])} \end{equation*} In words, the cosets are natural and do not depend on the weak splittings of the complexes. \end{ThmS} \begin{proof} First check that the $0$-cosets behave correctly:
\noindent\mathLine{ \bigl(\chainMap[1]_\ast\tensor \chainMap[2]_\ast\bigr)_\ast\Bigl( \fundamentalCoset{\element[1]}{\element[2]}{\complex[1]_\ast}{\complex[3]_\ast} {i}\def\secondIndex{j}{\secondIndex}\Bigr) \subset \fundamentalCoset{\chainMap[1]_\ast(\element[1])} {\chainMap[2]_\ast(\element[2])} {\complex[2]_\ast}{\complex[4]_\ast} {i}\def\secondIndex{j}{\secondIndex}} By \namedRef{deviation from naturality in Kunneth formula} $\cs{homology splitting}[{\splitPair[2]_\ast}]{\splitPair[4]_\ast}_{i}\def\secondIndex{j,\secondIndex} \bigl(\cs{elementary tor}{\chainMap[1](\element[1])}{\ringElement} {\chainMap[2](\element[2])} \bigr) \subset \cosetTor[{\chainMap[1]_\ast(\element[1])}]{\ringElement} {\chainMap[2]_\ast(\element[2])}$. One application of \namedRef{deviation from naturality in Kunneth formula} is to the case in which $\chainMap[1]_\ast$ is the identity but the weak splittings change. Hence changing the weak splittings does not change the cosets. The result follows. \end{proof}
\section{The interchange map and the K\"unneth\ formula} There are natural isomorphisms $I\colon \complex[1]\tensor[R] \complex[3] \cong \complex[3]\tensor[R] \complex[1]$ and $I\colon \complex[1]\tor[R] \complex[3] \cong \complex[3]\tor[R] \complex[1]$. On elementary tensors, $I(\element[1]\tensor \element[2]) = \element[2]\tensor \element[1]$ and $I(\cs{elementary tor} {\element[1]}{\ringElement}{\element[2]})=\cs{elementary tor} {\element[2]}{\ringElement}{\element[1]}$. Applying $I$ to the tensor product of two chain complexes is not a chain map: a sign is required. The usual choice is \begin{equation*} T\colon \complex[1]_\ast\tensor[R] \complex[3]_\ast \to \complex[3]_\ast\tensor[R] \complex[1]_\ast \end{equation*} defined on elementary tensors by $T(\element[1] \tensor \element[2]) = (-1)^{\abs{\element[1]}\abs{\element[2]}} \element[2] \tensor \element[1]$.
It follows that the cross product map satisfies \begin{equation*} T_\ast(\element[1] \cs{cross product} \element[2]) = (-1)^{\abs{\element[1]}\abs{\element[2]}} \element[2] \cs{cross product} \element[1] \end{equation*} for all $\element[1]\in H_{\abs{\element[1]}}(\complex[1]_\ast)$ and $\element[2]\in H_{\abs{\element[2]}}(\complex[3]_\ast)$.
\begin{ThmS}[flip theorem I]{Theorem} For all $\element[1]\in H_{i}\def\secondIndex{j}(\complex[1]_\ast)$ and $\element[2]\in H_{\secondIndex}(\complex[3]_\ast)$ {\setlength\belowdisplayskip{-10pt} \begin{equation*} T_\ast\Bigl(\cs{homology splitting}[{\splitPair[1]}] {\splitPair[3]}_{i}\def\secondIndex{j+\secondIndex+1}\bigr( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}\bigr)\Bigr) = (-1)^{i}\def\secondIndex{j\cdot\secondIndex+1} \cs{homology splitting}[{\splitPair[3]}]{\splitPair[1]}_{i}\def\secondIndex{j+\secondIndex+1} \bigl( \cs{elementary tor}{\element[2]}{\ringElement}{\element[1]}\bigr) \end{equation*} } \end{ThmS} \begin{proof} Apply $T$ to the cycle in \cs{torsion product cycle II 1}. \end{proof}
\begin{math check} $\epsilon\, \freeCyclesMap[1]_\ast(\elementCycle[1]) \tensor \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) + \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \tensor \freeCyclesMap[3]_\ast(\elementCycle[3]) $.
\begin{align*} T\Bigl(& \epsilon\, \freeCyclesMap[1]_\ast(\elementCycle[1]) \tensor \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) + \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \tensor \freeCyclesMap[3]_\ast(\elementCycle[3])\Bigr) = \\& (-1)^{i}\def\secondIndex{j(\secondIndex+1)}\Bigl( (-1)^{i}\def\secondIndex{j+1} \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) \tensor \freeCyclesMap[1]_\ast(\elementCycle[1]) \Bigr) + (-1)^{(i}\def\secondIndex{j+1)\secondIndex}\Bigl( \freeCyclesMap[3]_\ast(\elementCycle[3]) \tensor \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \Bigr)=\\& (-1)^{i}\def\secondIndex{j\secondIndex+1}\Bigl( \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) \tensor \freeCyclesMap[1]_\ast(\elementCycle[1]) + (-1)^{i}\def\secondIndex{j\secondIndex+1}\Bigl( (-1)^{\secondIndex+1} \freeCyclesMap[3]_\ast(\elementCycle[3]) \tensor \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \Bigr)=\\& (-1)^{i}\def\secondIndex{j\secondIndex+1}\Bigl( (-1)^{\secondIndex+1} \freeBoundariesMap[3]_\ast\bigl(\ringElement \elementCycle[3]\bigr) \tensor \freeCyclesMap[1]_\ast(\elementCycle[1]) + \freeCyclesMap[3]_\ast(\elementCycle[3]) \tensor \freeBoundariesMap[1]_\ast \bigl(\elementCycle[1] \ringElement\bigr) \Bigr) \end{align*} This cycle represents $(-1)^{i}\def\secondIndex{j\cdot\secondIndex+1} \cs{homology splitting}[{\splitPair[3]}]{\splitPair[1]}_{i}\def\secondIndex{j+\secondIndex+1} \bigl( \cs{elementary tor}{\element[2]}{\ringElement}{\element[1]}\bigr) $. \end{math check}
\begin{ThmS}[flip and Kunneth]{Corollary} If $R$ is a PID and if $\complex[1]_\ast\tor[R]\complex[3]_\ast$ is acyclic
\noindent\resizebox{\textwidth}{!}{{$\xymatrix{ 0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tensor[R] H_{\secondIndex}(\complex[3]_\ast) \ar[r]^-{\cs{cross product}} \ar[d]^-{\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt}(-1)^{i}\def\secondIndex{j \secondIndex} I} & H_{\totalInt}(\complex[1]_\ast\tensor[R] \complex[3]_\ast) \ar[r]^-{\cs{to torsion product}} \ar[d]^-{T_\ast}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} H_{i}\def\secondIndex{j}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[3]_\ast)\to0 \ar[d]\ar[d]_-{\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} (-1)^{i}\def\secondIndex{j \secondIndex + 1} I}
\\
0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\complex[3]_\ast)\tensor[R] H_{\secondIndex}(\complex[1]_\ast) \ar[r]^-{\cs{cross product}}& H_{\totalInt}(\complex[3]_\ast\tensor[R] \complex[1]_\ast) \ar[r]^-{\cs{to torsion product}}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt-1} H_{i}\def\secondIndex{j}(\complex[3]_\ast)\tor[R] H_{\secondIndex}(\complex[1]_\ast)\to0 }$}}
\noindent commutes. The splittings can be chosen to make the diagram commute. \end{ThmS}
\section{The boundary map and the K\"unneth\ formula} The boundary map in question is the map associated with the long exact homology sequence for a short exact sequence of chain complexes. Before stating the result some preliminaries are needed. \begin{DefS}{Definition} A pair of composable chain maps $\xymatrix@1@C12pt{\complex[1]_\ast\ar[rr]^-{\chainMap[1]_\ast}&& \complex[3]_\ast}$ and $\xymatrix@1@C12pt{\complex[3]_\ast\ar[rr]^-{\chainMap[2]_\ast}&& \complex[2]_\ast}$ form \emph{a weak exact sequence} provided there exists a short exact sequence of free approximations and chain maps making (\ref{weak exact sequence diagram}) below commute. \namedNumber{weak exact sequence diagram} {\setlength\belowdisplayskip{-10pt} \begin{equation*}\tag{\ref{weak exact sequence diagram}} \xymatrix@C10pt{ 0\ar[r]&\freeApproximation[1]_\ast\ar[rr]^-{\freeApproximationChainMap[1]_\ast} \ar[d]^-{\vertMap{\complex[1]}_\ast}&& \freeApproximation[3]_\ast\ar[rr]^-{\freeApproximationChainMap[2]_\ast} \ar[d]^-{\vertMap{\complex[3]}_\ast}&& \freeApproximation[2]_\ast \ar[d]^-{\vertMap{\complex[2]}_\ast} \ar[r]& 0\\
& \complex[1]_\ast\ar[rr]^-{\chainMap[1]_\ast}&& \complex[3]_\ast\ar[rr]^-{\chainMap[2]_\ast}&& \complex[2]_\ast} \end{equation*} } \end{DefS} Given a weak exact sequence there is a long exact homology sequence coming from the long exact sequence of the top row of (\ref{weak exact sequence diagram}): \begin{equation*} \xymatrix@C28pt{\cdots\to H_{i}\def\secondIndex{j+1}(\complex[2]_\ast)\ar[r]^-{\boldsymbol{\partial}_{i}\def\secondIndex{j+1}}& H_{i}\def\secondIndex{j}(\complex[1]_\ast)\ar[r]^-{\chainMap[1]_\ast}& H_{i}\def\secondIndex{j}(\complex[3]_\ast)\ar[r]^{\chainMap[2]_\ast}& H_{i}\def\secondIndex{j}(\complex[2]_\ast)\ar[r]^-{\boldsymbol{\partial}_{i}\def\secondIndex{j}}&\cdots }\] The boundary $\boldsymbol{\partial}_{i}\def\secondIndex{j+1} = \vertMap{\complex[1]}_\ast \circ \partial_{i}\def\secondIndex{j+1}\circ (\vertMap{\complex[2]}_\ast)^{-1}$ where $\partial_{i}\def\secondIndex{j+1}$ is the usual boundary in the long exact homology sequence for the free complexes. \begin{ThmS}{Lemma} A short exact sequence of chain complexes \begin{equation*} \xyLine[@C10pt]{ 0\ar[r]& \complex[1]_\ast\ar[rr]^-{\chainMap[1]_\ast}&& \complex[3]_\ast\ar[rr]^-{\chainMap[2]_\ast}&& \complex[2]_\ast\ \ar[r]& 0} \end{equation*} is weak exact. The boundary $\boldsymbol{\partial}_{i}\def\secondIndex{j+1}$ is the usual boundary map. \end{ThmS} \begin{proof} The commutative diagram of free approximations (\ref{weak exact sequence diagram}) is given by \namedRef{short exact free approximation}. The description of the boundary map is immediate. \end{proof}
\begin{ThmS}[weak exact is preserved by products]{Lemma} If $\complex[1]_\ast\tor[R]\complex[4]_\ast$, $\complex[3]_\ast\tor[R]\complex[4]_\ast$ and $\complex[2]_\ast\tor[R]\complex[4]_\ast$ are acyclic and if $\xymatrix@1{ \complex[1]_\ast\ar[r]^-{\chainMap[1]_\ast}& \complex[3]_\ast\ar[r]^-{\chainMap[2]_\ast}& \complex[2]_\ast }$ is weak exact, then so are {\setlength\abovedisplayskip{0pt} \setlength\belowdisplayskip{0pt} \begin{equation*} \xymatrix@C40pt@R10pt{ \complex[1]_\ast\tensor[R]\complex[4]_\ast \ar[r]^-{\chainMap[1]_\ast \tensor \identyMap{\complex[4]_\ast}}& \complex[3]_\ast\tensor[R]\complex[4]_\ast \ar[r]^-{\chainMap[2]_\ast \tensor \identyMap{\complex[4]_\ast}}& \complex[2]_\ast\tensor[R]\complex[4]_\ast \\ \complex[4]_\ast\tensor[R]\complex[1]_\ast \ar[r]^-{\identyMap{\complex[4]_\ast} \tensor \chainMap[1]_\ast}& \complex[4]_\ast\tensor[R]\complex[3]_\ast \ar[r]^-{\identyMap{\complex[4]_\ast}\tensor\chainMap[2]_\ast}& \complex[4]_\ast\tensor[R]\complex[2]_\ast }\end{equation*}} \end{ThmS} \begin{proof} Pick free approximations satisfying (\ref{weak exact sequence diagram}), $\vertMap{\complex[1]}_\ast$, $\vertMap{\complex[3]}_\ast$, $\vertMap{\complex[2]}_\ast$ and a free approximation $\vertMap{\complex[4]}_\ast$. By \namedRef{Dold splitting} the required free approximations are $\vertMap{\complex[1]}_{\ast} \tensor\vertMap{\complex[4]}_{\ast}$, $\vertMap{\complex[3]}_{\ast} \tensor\vertMap{\complex[4]}_{\ast}$, $\vertMap{\complex[2]}_{\ast} \tensor\vertMap{\complex[4]}_{\ast}$, or $\vertMap{\complex[4]}_{\ast} \tensor\vertMap{\complex[1]}_{\ast}$, $\vertMap{\complex[4]}_{\ast} \tensor\vertMap{\complex[3]}_{\ast}$, $\vertMap{\complex[4]}_{\ast} \tensor\vertMap{\complex[2]}_{\ast}$. \end{proof}
\begin{DefS*}{Warning} Even if $\xymatrix@1{ \complex[1]_\ast\ar[r]^-{\chainMap[1]_\ast}& \complex[3]_\ast\ar[r]^-{\chainMap[2]_\ast}& \, \complex[2]_\ast }$ is short exact, the pair $\chainMap[1]_\ast\tensor \identyMap{\complex[4]_\ast}$ and $\chainMap[2]_\ast\tensor \identyMap{\complex[4]_\ast}$ may only be weak exact. For them to be short exact requires that either $\complex[2]_\ast$ or $\complex[4]_\ast$ be torsion free. \end{DefS*}
\begin{ThmS}[boundary of elementary tor]{Theorem} Suppose $\complex[1]_\ast\tor[R]\complex[4]_\ast$, $\complex[3]_\ast\tor[R]\complex[4]_\ast$ and $\complex[2]_\ast\tor[R]\complex[4]_\ast$ are acyclic and suppose $\xymatrix@1{ \complex[1]_\ast\ar[r]^-{\chainMap[1]_\ast}& \complex[3]_\ast\ar[r]^-{\chainMap[2]_\ast}& \,\complex[2]_\ast }$ is weak exact. Then for $\element[1]\in H_{i}\def\secondIndex{j}(\complex[2]_\ast)$ and $\element[2]\in H_{\secondIndex}(\complex[4]_\ast)$ {\setlength\belowdisplayskip{-10pt} \begin{equation*} \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1} \bigl(\cosetTor[{\element[1]}]{\ringElement}{\element[2]}\bigr) \subset - \cosetTor[{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}]{\ringElement}{\element[2]} \end{equation*} } \end{ThmS}\nointerlineskip \begin{proof} By \namedRef{weak exact is preserved by products} it may be assumed that the complexes are all free. Pick compatible splittings for $\complex[1]_\ast$, $\complex[2]_\ast$ and $\complex[4]_\ast$. Recall that Bocksteins and long exact sequence boundary maps anti-commute and that in short exact sequences of free chain complexes $\boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex}(\elementCycle[1]\otimes\elementCycle[4]) = \boldsymbol{\partial}_{i}\def\secondIndex{j}(\elementCycle[1])\otimes\elementCycle[4] $. A routine calculation completes the proof. \end{proof}
\begin{math check} Then \begin{equation*} (-1)^{i}\def\secondIndex{j+1} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr) \in\cosetTor[{\element[1]}]{\ringElement}{\element[2]} \end{equation*} Since $\boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1} \circ \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2} = - \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1}\circ \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+2}$ \begin{align*} \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1}\Bigl( (-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl(& \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr)\Bigr) =\\& (-1)^{i}\def\secondIndex{j} \biggl(\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1}\Bigr( \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr)\Bigr)\biggr)=\\& (-1)^{i}\def\secondIndex{j} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1}\Bigr( \boldsymbol{\partial}_{i}\def\secondIndex{j+1}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1])\bigr) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \Bigr)\ . \end{align*} On the other side \begin{equation*} (-1)^{i}\def\secondIndex{j-1+1} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j-1+\secondIndex+2}\Bigl( \splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \Bigr) \in\cosetTor[{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}]{\ringElement}{\element[2]} \end{equation*}
Both $\splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr)$ and $\boldsymbol{\partial}_{i}\def\secondIndex{j+1}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1])\bigr) $ are chains in $\complex[1]_{i}\def\secondIndex{j}$ which are cycles in $\complex[1]_{i}\def\secondIndex{j}\tensor[R]\ry{\ringElement}$. Applying Bocksteins shows $\mathfrak b^{\ringElement}_{i}\def\secondIndex{j}\Bigl( \splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr)\Bigr) = \ringElement \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])$ and $\mathfrak b^{\ringElement}_{i}\def\secondIndex{j}\Bigl(\boldsymbol{\partial}_{i}\def\secondIndex{j+1}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1])\bigr) \Bigr) = -\boldsymbol{\partial}_{i}\def\secondIndex{j} \Bigl(\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+1} \bigl(\splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \bigr)\Bigr) = -\boldsymbol{\partial}_{i}\def\secondIndex{j} (\ringElement \element[1]) = -\ringElement \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1]) $. Hence $Z = \splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr) + \boldsymbol{\partial}_{i}\def\secondIndex{j+1}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1])\bigr) $ is a cycle.
Hence \begin{align*} &\boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1}\Bigl( (-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr)\Bigr) =\\& \hskip 40pt(-1)^{i}\def\secondIndex{j} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1}\biggl( \Bigl(Z - \splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr)\Bigr) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \biggr) =\\& (-1)^{i}\def\secondIndex{j+1} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1} \Bigl(\splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \Bigr)
+(-1)^{i}\def\secondIndex{j+1} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1}\bigl( Z \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr)=\\& \hskip 40pt-(-1)^{i}\def\secondIndex{j} \mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+1} \Bigl(\splitBocksteinHomology^{\complex[1],\ringElement}_{i}\def\secondIndex{j-1}\bigl( \boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\bigr) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \Bigr)
+(-1)^{i}\def\secondIndex{j+1+i}\def\secondIndex{j} Z \cs{cross product} \element[2] \end{align*} and therefore \begin{equation*} \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1}\Bigl( (-1)^{i}\def\secondIndex{j+1}\mathfrak b^{\ringElement}_{i}\def\secondIndex{j+\secondIndex+2}\bigl( \splitBocksteinHomology^{\complex[2],\ringElement}_{i}\def\secondIndex{j}(\element[1]) \cs{cross product} \splitBocksteinHomology^{\complex[4],\ringElement}_{\secondIndex}(\element[2]) \bigr)\Bigr) \in -\cosetTor[{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}]{\ringElement}{\element[2]} \end{equation*}
Since one element of $\boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1} \bigl(\cosetTor[{\element[1]}]{\ringElement}{\element[2]}\bigr) $ is in $-\cosetTor[{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}]{\ringElement}{\element[2]}$ and since \mathLine{\boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1}\Bigl( \fundamentalCoset{\element[1]}{\element[2]}{\complex[1]_\ast}{\complex[4]_\ast} {i}\def\secondIndex{j}{\secondIndex}\Bigr) \subset \bigl(\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])\cs{cross product} H_{\secondIndex+1}(\complex[4]_\ast)\bigr) \displaystyle\mathop{\oplus} \bigl(H_{i}\def\secondIndex{j}(\complex[1]_\ast)\cs{cross product}\element[2]\bigr) } the result follows. \end{math check}
\begin{ThmS}{Corollary} With assumptions and notation as in \namedRef{boundary of elementary tor} {\setlength\belowdisplayskip{-10pt} \begin{equation*} \boldsymbol{\partial}_{i}\def\secondIndex{j+\secondIndex+1} \bigl(\cosetTor[{\element[2]}]{\ringElement}{\element[1]}\bigr) \subset (-1)^{\secondIndex+1} \cosetTor[{\element[2]}]{\ringElement}{{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}} \end{equation*}} \end{ThmS}\nointerlineskip \begin{proof} Apply the interchange map (\ref{flip theorem I}) to get to the situation of \namedRef{boundary of elementary tor} and then apply the interchange map again. \end{proof}
\begin{ThmS}[boundary and Kunneth]{Corollary} With assumptions and notation as in \namedRef{boundary of elementary tor} let $\boldsymbol{\partial}_{i}\def\secondIndex{j}\tor \identyMap{H_{\secondIndex}(\complex[4]_\ast)} \colon H_{i}\def\secondIndex{j}(\complex[2]_\ast)\tor[R] H_{\secondIndex}(\complex[4]_\ast) \to H_{i}\def\secondIndex{j-1}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[4]_\ast) $ be the map defined by $\boldsymbol{\partial}_{i}\def\secondIndex{j}\tor \identyMap{H_{\secondIndex}(\complex[4]_\ast)}\bigl( \cs{elementary tor}{\element[1]}{\ringElement}{\element[2]}) = \cs{elementary tor}{\boldsymbol{\partial}_{i}\def\secondIndex{j}(\element[1])}{\ringElement}{\element[2]} $. Then
\noindent\resizebox{\textwidth}{!}{{$\xymatrix@R30pt{ 0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt+1} H_{i}\def\secondIndex{j}(\complex[2]_\ast)\tensor[R] H_{\secondIndex}(\complex[4]_\ast) \ar[r]^-{\cs{cross product}} \ar[d]^-{\hbox{\tiny{$\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt+1} \boldsymbol{\partial}_{i}\def\secondIndex{j}\tensor \identyMap{H_{\secondIndex}(\complex[4]_\ast)}$}}}& H_{\totalInt+1}(\complex[2]_\ast\tensor[R] \complex[4]_\ast) \ar[r]^-{\cs{to torsion product}} \ar[d]_-{\boldsymbol{\partial}_{\totalInt+1}}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j}(\complex[2]_\ast)\tor[R] H_{\secondIndex}(\complex[4]_\ast)\to0 \ar[d]\ar[d]_-{\hbox{\tiny{$\displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} -\boldsymbol{\partial}_{i}\def\secondIndex{j}\tor \identyMap{H_{\secondIndex}(\complex[4]_\ast)}$}}}
\\
0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt+1} H_{i}\def\secondIndex{j-1}(\complex[1]_\ast)\tensor[R] H_{\secondIndex}(\complex[4]_\ast) \ar[r]^-{\cs{cross product}}& H_{\totalInt}(\complex[1]_\ast\tensor[R] \complex[4]_\ast) \ar[r]^-{\cs{to torsion product}}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H_{i}\def\secondIndex{j-1}(\complex[1]_\ast)\tor[R] H_{\secondIndex}(\complex[4]_\ast)\to0 }$}}
\noindent commutes. \end{ThmS} \begin{proof}The proof is immediate. \end{proof}
\section{The Massey triple product} Suppose $X$ and $Y$ are CW complexes with finitely many cells in each dimension. Then the cellular cochains are free $\mathbb Z$ modules and the K\"unneth\ formula plus the Eilenberg-Zilber chain homotopy equivalence yields a K\"unneth\ formula \mathLine{ \xymatrix{ 0\to \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt} H^{i}\def\secondIndex{j}(X)\otimes H^{\secondIndex}(Y) \ar[r]^-{\cs{cross product}}& H^{\totalInt}(X\times Y)\ar[r]^-{\cs{to torsion product}}& \displaystyle\mathop{\oplus}_{i}\def\secondIndex{j+\secondIndex=\totalInt+1} H^{i}\def\secondIndex{j}(X)\tor H^{\secondIndex}(Y)\to0 }} Given $u\in H^{i}\def\secondIndex{j}(X)$ define $\secondU{u}\in H^{i}\def\secondIndex{j}(X\times Y)$ by $\secondU{u}=p_X^\ast(u)$ where $p_X\colon X\times Y \to X$ is the projection. For $v\in H^{\secondIndex}(Y)$ define $\secondU{v}\in H^{\secondIndex}(X\times Y)$ similarly and recall $u\cs{cross product} v = \secondU{u} \cup \secondU{v}$ where $\cup$ denotes the cup product.
\begin{ThmS}{Theorem} With notation as above and non-zero $m\in \mathbb Z$ \begin{equation*} \cosetTor[u]{m}{v} = \boldsymbol{\langle} \secondU{u}, \secondU{(m)},\secondU{v} \boldsymbol{\rangle} \end{equation*} where $\boldsymbol{\langle} \secondU{u}, \secondU{(m)},\secondU{v} \boldsymbol{\rangle} $ is the Massey triple product of the indicated cohomology classes where $\secondU{(m)}$ is $m$ times the multiplicative identity in $H^0(X\times Y)$. \end{ThmS} The proof is immediate from \namedRef{Mac Lane cycle A} and the definition of the Massey triple product.
\section{Weakly split chain complexes} Heller's category in \cite{Heller} carries much the same information as weak splittings.
\gdef\chainMap[1]+ \chainMap[2]{\chainMap[2]\circ\chainMap[1]} \begin{ThmS}{Proposition} If $\chainMap[1]_\ast\colon \complex[1]_\ast \to \complex[3]_\ast$ and $\chainMap[2]_\ast\colon \complex[3]_\ast \to \complex[2]_\ast$ are weakly split chain maps, then $\chainMap[2]\circ\chainMap[1]$ is weakly split by $\freeCyclesChainMap[10]_{\totalInt}= \freeCyclesChainMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt}$ and $\weakMap[10]_{\totalInt} = \chainMap[2]_{\totalInt+1}\circ \weakMap[1]_{\totalInt} + \weakMap[2]_{\totalInt} \circ \freeCyclesChainMap[1]_{\totalInt}$. With these choices \begin{equation*} \weakTorsionHomologyMap[10]<1>_{\totalInt} = (\chainMap[2]_{\totalInt+1}\tensor \identyMap{\ry{\ringElement}})\circ\weakTorsionHomologyMap[1]<1>_{\totalInt} + \weakTorsionHomologyMap[2]<1>_{\totalInt}\circ \chainMap[1]_{\totalInt} \end{equation*}
\end{ThmS} \begin{proof} Formula (\ref{p0}) is immediate. Formula (\ref{p1}) is a routine calculation. It is straightforward to check $\weakHomologyMap[10]_{\totalInt} = \chainMap[2]_{\totalInt+1}\circ \weakHomologyMap[1]_{\totalInt} + \weakHomologyMap[2]_{\totalInt} \circ \freeBoundariesChainMap[1]_{\totalInt} $ from which the formula for the $\weakTorsionHomologyMap$ follows. \end{proof}
\begin{DefS}{Remark} Composition can be checked to be associative. \def\chainMap[1]+ \chainMap[2]{\identyMap{{\complex[1]_{ }}_{\ast}}} The pair $\freeCyclesChainMap[10]_{\ast}=\chainMap[1]+ \chainMap[2]$ and $\weakMap[10]_{\ast}=0$ give the identity for any weak spitting of $\complex[1]_{\ast}$. Hence weakly split chain complexes and weakly split chain maps form a category. \end{DefS} \begin{math check} \begin{equation*} \boundary[2]_{\totalInt+1}\circ \weakMap[10]_{\totalInt} = \freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt} - \chainMap[2]_{\totalInt}\circ\chainMap[1]_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} \end{equation*}
\begin{align*} \boundary[2]_{\totalInt+1}\bigl(&\chainMap[4]_{\totalInt+1}\circ \weakMap[1]_{\totalInt}+ \weakMap[2]_{\totalInt} \circ \freeCyclesChainMap[1]_{\totalInt}\bigr) = \chainMap[4]_{\totalInt}\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt}\bigr) + \boundary[2]_{\totalInt+1}\bigl(\weakMap[2]_{\totalInt} \circ \freeCyclesChainMap[1]_{\totalInt}\bigr) =\\& \chainMap[4]_{\totalInt}\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt}\bigr) + \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[2]_{\totalInt} - \chainMap[4]_{\totalInt}\circ \freeCyclesMap[3]_{\totalInt}\bigr)\circ \freeCyclesChainMap[1]_{\totalInt} =\\& \chainMap[4]_{\totalInt}\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt}\bigr) + \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[2]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} \bigr) - \bigl(\chainMap[4]_{\totalInt}\circ \freeCyclesMap[3]_{\totalInt}\bigr)\circ \freeCyclesChainMap[1]_{\totalInt} =\\& \chainMap[4]_{\totalInt}\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt}\bigr) + \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt}\bigr) - \bigl(\chainMap[4]_{\totalInt}\circ \freeCyclesMap[3]_{\totalInt}\bigr)\circ \freeCyclesChainMap[1]_{\totalInt} =\\& \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt}\bigr)+ \chainMap[4]_{\totalInt}\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt}\bigr) - \bigl(\chainMap[4]_{\totalInt}\circ \freeCyclesMap[3]_{\totalInt}\bigr)\circ \freeCyclesChainMap[1]_{\totalInt} =\\& \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt}\bigr)+ \chainMap[4]\bigl(\boundary[3]_{\totalInt}\circ \weakMap[1]_{\totalInt} - \freeCyclesMap[3]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt}\bigr) =\\& \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt}\bigr)+ \chainMap[4]_{\totalInt}\bigl( -\chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt}\bigr) = \bigl(\freeCyclesMap[2]_{\totalInt}\circ \freeCyclesChainMap[10]_{\totalInt}\bigr)- \chainMap[4]_{\totalInt}\circ \chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt} \end{align*}
The required formula has been verified. \begin{equation*} \weakHomologyMap[10]_{\totalInt} = \weakMap[10]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl( \freeBoundariesMap[4]_{\totalInt}\circ \freeBoundariesChainMap[10]_{\totalInt} - (\chainMap[2]_{\ast}\circ \chainMap[1]_{\ast})_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} \bigr)\colon \freeBoundaries[1]_{\totalInt} \to \complex[4]_{\totalInt+1} \end{equation*}
\begin{align*} \weakMap[10]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl( & \freeBoundariesMap[4]_{\totalInt}\circ \freeBoundariesChainMap[10]_{\totalInt} - (\chainMap[2]_{\ast}\circ \chainMap[1]_{\ast})_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} \bigr) =\\& \bigl( \chainMap[2]_{\totalInt+1}\circ \weakMap[1]_{\totalInt} + \weakMap[2]_{\totalInt} \circ \freeCyclesChainMap[1]_{\totalInt}\bigr) \big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} -\\&\hskip10pt \bigl( \freeBoundariesMap[4]_{\totalInt}\circ \freeBoundariesChainMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[2]_{\totalInt+1}\circ \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} \bigr) =\\& \chainMap[2]_{\totalInt+1}\Bigl(\weakMap[1]_{\totalInt} \big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} -\bigl( \freeBoundariesMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt}\bigr)\Bigr) +\\& \weakMap[2]_{\totalInt} \circ \freeCyclesChainMap[1]_{\totalInt} \big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl( \freeBoundariesMap[4]_{\totalInt}\circ \freeBoundariesChainMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[2]_{\totalInt+1}\circ \freeBoundariesMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} \bigr)=\\& \chainMap[2]_{\totalInt+1}\Bigl(\weakMap[1]_{\totalInt} \big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} -\bigl( \freeBoundariesMap[2]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt}\bigr)\Bigr) +\\& \Bigl(\weakMap[2]_{\totalInt} \big\vert_{_{\scriptstyle\freeBoundaries[2]_{\totalInt}}} - \bigl( \freeBoundariesMap[4]_{\totalInt}\circ \freeBoundariesChainMap[2]_{\totalInt} - \chainMap[2]_{\totalInt+1}\circ \freeBoundariesMap[2]_{\totalInt}\bigr) \Bigr) \freeBoundariesChainMap[1]_{\totalInt}=\\& \chainMap[2]_{\totalInt+1}\circ \weakHomologyMap[1]_{\totalInt} + \weakHomologyMap[2]_{\totalInt} \circ \freeBoundariesChainMap[1]_{\totalInt} \end{align*} The result follows. \end{math check}
\begin{ThmS}{Proposition} Let $\chainMap[3]_\ast\colon \complex[1]_\ast \to \complex[3]_\ast$ be a weakly split chain map and suppose $\chainMap[4]_\ast\colon \complex[1]_\ast \to \complex[3]_\ast$ is a chain map chain homotopic to $\chainMap[3]$. Let $D_\ast\colon \complex[1]_\ast \to \complex[3]_{\ast+1}$ be a chain homotopy with \begin{equation*} \chainMap[4]_\ast - \chainMap[3]_\ast = \boundary[3]_{\ast+1}\circ D_\ast + D_{\ast-1}\circ \boundary[1]_\ast \end{equation*} Then $\chainMap[4]$ is weakly split by $\freeCyclesChainMap[2]_{\totalInt}=\freeCyclesChainMap[1]_{\totalInt}$ and \begin{equation*} \weakMap[2]_{\totalInt} = \weakMap[1] + D_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} + \boundary[3]_{\totalInt+2}\circ D_{\totalInt+1}\circ\freeBoundariesMap[1]_{\totalInt} \end{equation*} With these choices, $\weakTorsionHomologyMap[2]<1>_{\totalInt}=\weakTorsionHomologyMap[1]<1>_{\totalInt}$ \end{ThmS} \begin{proof} Since chain homotopic maps induce the same map in homology, it is possible to take $\freeCyclesChainMap[1]_{\totalInt}=\freeCyclesChainMap[2]_{\totalInt}$ and then $\freeBoundariesChainMap[1]_{\totalInt}=\freeBoundariesChainMap[2]_{\totalInt}$ The required verifications are straightforward. \end{proof}
\begin{math check} \begin{align*} \boundary[3]_{\totalInt+1}\circ \weakMap[2]_{\totalInt} = & \boundary[3]_{\totalInt+1}\bigl(\weakMap[1]_{\totalInt} + D_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} + \boundary[3]_{\totalInt+2}\circ D_{\totalInt+1}\circ\freeBoundariesMap[1]_{\totalInt} \bigr)=\\& \bigl(\freeCyclesMap[3]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} - \chainMap_{\totalInt}\circ \freeCyclesMap[1]_{\totalInt}\bigr) + \bigl( \chainMap[4]_{\totalInt} - \chainMap[3]_{\totalInt} - D_{\totalInt-1}\boundary[1]_\ast \bigr)\circ \freeCyclesMap[1]_{\totalInt}=\\& \freeCyclesMap[3]_{\totalInt}\circ \freeCyclesChainMap[1]_{\totalInt} - \chainMap[4]_{\totalInt} \circ\freeCyclesMap[1]_{\totalInt} = \freeCyclesMap[3]_{\totalInt}\circ \freeCyclesChainMap[2]_{\totalInt} - \chainMap[4]_{\totalInt} \circ\freeCyclesMap[1]_{\totalInt} \end{align*}
\begin{align*} &\weakHomologyMap[2]_{\totalInt} = \weakMap[2]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} -\bigl( \freeBoundariesMap[3]_{\totalInt}\circ \freeBoundariesChainMap[2]_{\totalInt} - \chainMap[2]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt}\bigr) \bigr) = \\& \weakMap[1]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} + D_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} + \boundary[3]_{\totalInt+2}\circ D_{\totalInt+1}\circ\freeBoundariesMap[1]_{\totalInt} -\bigl( \freeBoundariesMap[3]_{\totalInt}\circ \freeBoundariesChainMap[2]_{\totalInt} - \chainMap[2]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt}\bigr) \bigr) = \\& \weakMap[1]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl(\freeBoundariesMap[3]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} \bigr) + D_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} + \boundary[3]_{\totalInt+2}\circ D_{\totalInt+1}\circ\freeBoundariesMap[1]_{\totalInt} -\\&\hskip30pt \bigl( \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} - \chainMap[2]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt}\bigr) \bigr) =\\& \weakMap[1]_{\totalInt}\big\vert_{_{\scriptstyle\freeBoundaries[1]_{\totalInt}}} - \bigl(\freeBoundariesMap[3]_{\totalInt}\circ \freeBoundariesChainMap[1]_{\totalInt} - \chainMap[1]_{\totalInt+1} \circ \freeBoundariesMap[1]_{\totalInt} \bigr) + D_{\totalInt} \circ \freeCyclesMap[1]_{\totalInt} - D_{\totalInt}\circ \boundary[1]_{\totalInt+1}\circ \freeBoundariesMap[1]_{\totalInt} = \weakHomologyMap[1]_{\totalInt} \end{align*}
The required formulas have been verified. \end{math check}
The remaining results are routine verifications. \begin{ThmS}[weakly split direct sum]{Proposition} Given two weakly split chain complexes, $\{\complex[1]_\ast$, $\splitPair[1]_\ast\}$ and $\{\complex[2]_\ast$, $\splitPair[2]_\ast\}$, then $\complex[1]_\ast\displaystyle\mathop{\oplus} \complex[3]_\ast$ is weakly split by the following data:\\ \def\chainMap[1]+ \chainMap[2]{\complex[1]\oplus \complex[3]} $\freeCycles[100]_{\totalInt}= \freeCycles[1]_{\totalInt}\displaystyle\mathop{\oplus} \freeCycles[2]_{\totalInt}$, $\freeCyclesMap[100]_{\totalInt} = \freeCyclesMap[1]_{\totalInt}\displaystyle\mathop{\oplus}\freeCyclesMap[2]_{\totalInt} $. Then $\freeBoundaries[100]_{\totalInt}= \freeBoundaries[1]_{\totalInt}\displaystyle\mathop{\oplus} \freeBoundaries[2]_{\totalInt}$ so let $\freeBoundariesMap[100]_{\totalInt} = \freeBoundariesMap[1]_{\totalInt}\displaystyle\mathop{\oplus}\freeBoundariesMap[2]_{\totalInt} $. \end{ThmS}
\begin{ThmS}{Proposition} Given weakly split chain maps $\chainMap[1]_\ast\colon\complex[1]_\ast \to\complex[2]_\ast$ and $\chainMap[2]_\ast\colon\complex[3]_\ast \to\complex[4]_\ast$ then $\chainMap[1]_\ast\displaystyle\mathop{\oplus}\chainMap[2]_\ast$ is weakly split by \def\chainMap[1]+ \chainMap[2]{\chainMap[1]\oplus \chainMap[2]} $\freeCyclesChainMap[10]_{\totalInt}= \freeCyclesChainMap[2]_{\totalInt}\displaystyle\mathop{\oplus} \freeCyclesChainMap[1]_{\totalInt}$ and $\weakMap[10]_{\totalInt} = \weakMap[1]_{\totalInt} \displaystyle\mathop{\oplus} \weakMap[2]_{\totalInt}$. With these choices \begin{equation*} \weakTorsionHomologyMap[10]<1>_{\totalInt} = \weakTorsionHomologyMap[1]<1>_{\totalInt} \displaystyle\mathop{\oplus} \weakTorsionHomologyMap[2]<1>_{\totalInt} \end{equation*} \end{ThmS} \begin{DefS}{Remark} The zero complex with its evident splitting is a zero for the direct sum operation. The zero chain map between any two weakly split complexes is weakly split by letting \def\chainMap[1]+ \chainMap[2]{0_\ast} $\freeCyclesChainMap[10]_{\totalInt}$ and $\weakMap[10]_{\totalInt}$ be trivial. Then $\weakTorsionHomologyMap[10]<1>_{\totalInt}$ is also trivial. \end{DefS}
There is an internal sum result.
\begin{ThmS}{Proposition} Given weakly split chain maps $\chainMap[1]_\ast\colon\complex[1]_\ast \to\complex[2]_\ast$ and $\chainMap[2]_\ast\colon\complex[1]_\ast \to\complex[2]_\ast$ then $\chainMap[1]_\ast+\chainMap[2]_\ast$ is weakly split by \def\chainMap[1]+ \chainMap[2]{\chainMap[1]+ \chainMap[2]} $\freeCyclesChainMap[10]_{\totalInt}= \freeCyclesChainMap[2]_{\totalInt}+ \freeCyclesChainMap[1]_{\totalInt}$ and $\weakMap[10]_{\totalInt} = \weakMap[1]_{\totalInt} + \weakMap[2]_{\totalInt}$. With these choices \begin{equation*} \weakTorsionHomologyMap[10]<1>_{\totalInt} = \weakTorsionHomologyMap[1]<1>_{\totalInt} + \weakTorsionHomologyMap[2]<1>_{\totalInt} \end{equation*} \end{ThmS}
\begin{DefS}{Remark} Unlike the direct sum case (\ref{weakly split direct sum}), there does not seem to be an easy way to weakly split the tensor product. \end{DefS}
\begin{references} \bib{Dold}{book}{
author={Dold, Albrecht},
title={Lectures on algebraic topology},
series={Grundlehren der Mathematischen Wissenschaften [Fundamental
Principles of Mathematical Sciences]},
volume={200},
edition={2},
publisher={Springer-Verlag, Berlin-New York},
date={1980},
pages={xi+377},
isbn={3-540-10369-4},
review={\MR{606196 (82c:55001)}}, } \bib{Eilenberg-Mac Lane}{article}{
author={Eilenberg, Samuel},
author={Mac Lane, Saunders},
title={On the groups $H(\Pi,n)$. II. Methods of computation},
journal={Ann. of Math. (2)},
volume={60},
date={1954},
pages={49--139},
issn={0003-486X},
review={\MR{0065162 (16,391a)}},
} \bib{Heller}{article}{
author={Heller, Alex},
title={On the K\"unneth theorem},
journal={Trans. Amer. Math. Soc.},
volume={98},
date={1961},
pages={450--458},
issn={0002-9947},
review={\MR{0126479 (23 \#A3775)}},
}
\bib{Mac Lane slides}{article}{
author={MacLane, Saunders},
title={Slide and torsion products for modules},
journal={Univ. e Politec. Torino. Rend. Sem. Mat.},
volume={15},
date={1955--56},
pages={281--309},
review={\MR{0082488 (18,558b)}}, }
\bib{MacLane}{book}{
author={Mac Lane, Saunders},
title={Homology},
edition={1},
note={Die Grundlehren der mathematischen Wissenschaften, Band 114},
publisher={Springer-Verlag, Berlin-New York},
date={1967},
pages={x+422},
review={\MR{0349792 (50 \#2285)}},
} \end{references}
\end{document}
|
arXiv
|
{
"id": "1506.02063.tex",
"language_detection_score": 0.3322577178478241,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Some properties of the inverse error function} \author{Diego Dominici } \address{Department of Mathematics\\ State University of New York at New Paltz\\ 75 S. Manheim Blvd. Suite 9\\ New Paltz, NY 12561-2443\\ USA\\ Phone: (845) 257-2607\\ Fax: (845) 257-3571} \email{[email protected]} \thanks{This work was partially supported by a Provost Research Award from SUNY New Paltz.} \subjclass{Primary 33B20; Secondary 30B10, 34K25} \date{June 4, 2007} \keywords{Inverse error function, asymptotic analysis, discrete ray method, differential-difference equations, Taylor series}
\begin{abstract} The inverse of the error function, $\operatorname{inverf}(x),$ has applications in diffusion problems, chemical potentials, ultrasound imaging, etc. We analyze the derivatives $\left. \frac{d^{n}}{dz^{n}} \operatorname*{inverf}\left( z\right) \right\vert _{z=0}$, as $n\rightarrow \infty$ using nested derivatives and a discrete ray method. We obtain a very good approximation of $\operatorname{inverf}(x)$ through a high-order Taylor expansion around $x=0$. We give numerical results showing the accuracy of our formulas.
\end{abstract} \maketitle
\section{Introduction}
The error function $\operatorname{erf}(z),$ defined by \[ \operatorname{erf}(z)=\frac{2}{\sqrt{\pi}}\int\limits_{0}^{z}\exp\left( -t^{2}\right) dt, \] occurs widely in almost every branch of applied mathematics and mathematical physics, e.g., probability and statistics \cite{MR0034250}, data analysis \cite{MR999553}, heat conduction \cite{MR0016873}, etc. It plays a fundamental role in asymptotic expansions \cite{MR1429619} and exponential asymptotics \cite{MR990851}.
Its inverse, which we will denote by $\operatorname*{inverf}\left( z\right) ,$ \[ \operatorname*{inverf}\left( z\right) =\operatorname{erf}^{-1}(z), \] appears in multiple areas of mathematics and the natural sciences. A few examples include concentration-dependent diffusion problems \cite{MR0071876}, \cite{MR0281322}, solutions to Einstein's scalar-field equations \cite{PhysRevD.51.444}, chemical potentials \cite{MR2166352}, the distribution of lifetimes in coherent-noise models \cite{PhysRevE.59.R2512}, diffusion rates in tree-ring chemistry \cite{MR2142222} and $3D$ freehand ultrasound imaging \cite{san-joseMICCAI03}.
Although some authors have studied the function $\operatorname*{inverf}\left( z\right) $ (see \cite{MR1986919} and references therein), little is known about its analytic properties$,$ the major work having been done in developing algorithms for numerical calculations \cite{MR0341812}. Dan Lozier, remarked the need for new techniques in the computation of $\operatorname*{inverf} \left( z\right) $ \cite{MR1393742}.
In this paper, we analyze the asymptotic behavior of the derivatives $\left. \frac{d^{n}}{dz^{n}}\operatorname*{inverf}\left( z\right) \right\vert _{z=0}$ for large values of $n,$ using a discrete WKB method \cite{MR1373150}. In Section 2 we present some properties of the derivatives of $\operatorname*{inverf}\left( z\right) $ and review our previous work on nested derivatives. In Section 3 we study a family of polynomials $P_{n}(x)$ associated with the derivatives of $\operatorname*{inverf}\left( z\right) $, which were introduced by L. Carlitz in \cite{MR0153878}. Theorem \ref{theorem} contains our main result on the asymptotic analysis of $P_{n}(x).$ In Section 4 we give asymptotic approximations for $\left. \frac{d^{n}}{dz^{n} }\operatorname*{inverf}\left( z\right) \right\vert _{z=0}$ and some numerical results testing the accuracy of our formulas.
\section{Derivatives}
Let us denote the function $\operatorname*{inverf}\left( z\right) $ by $\mathfrak{I}(z)$ and its derivatives by \begin{equation} d_{n}=\left. \frac{d^{n}}{dz^{n}}\operatorname*{inverf}\left( z\right) \right\vert _{z=0},\quad n=0,1,\ldots. \label{dn} \end{equation} Since $\operatorname{erf}(z)$ tends to $\pm1$ as $z\rightarrow\pm\infty,$ it is clear that $\operatorname*{inverf}\left( z\right) $ is defined in the interval $\left( -1,1\right) $ and has singularities at the end points.
\begin{proposition} The function $\mathfrak{I}(z)$ satisfies the nonlinear differential equation \begin{equation} \mathfrak{I}^{\prime\prime}-2\mathfrak{I}\left( \mathfrak{I}^{\prime}\right) ^{2}=0 \label{ODE} \end{equation} with initial conditions \begin{equation} \mathfrak{I}(0)=0,\quad\mathfrak{I}^{\prime}(0)=\frac{\sqrt{\pi}}{2}. \label{d0d1} \end{equation}
\end{proposition}
\begin{proof} It is clear that $\mathfrak{I}(0)=0,$ since $\operatorname{erf}(0)=0.$ Using the chain rule, we have \[ \mathfrak{I}^{\prime}\left[ \operatorname{erf}(z)\right] =\frac {1}{\operatorname{erf}^{\prime}(z)}=\frac{\sqrt{\pi}}{2}\exp\left\{ \mathfrak{I}^{2}\left[ \operatorname{erf}(z)\right] \right\} \] and therefore \begin{equation} \mathfrak{I}^{\prime}=\frac{\sqrt{\pi}}{2}\exp\left( \mathfrak{I}^{2}\right) . \label{I'} \end{equation} Setting $z=0$ we get $\mathfrak{I}^{\prime}(0)=\frac{\sqrt{\pi}}{2}$ and taking the logarithmic derivative of (\ref{I'}) the result follows. \end{proof}
To compute higher derivatives of $\mathfrak{I}(z),$ we begin by establishing the following corollary.
\begin{corollary} The function $\mathfrak{I}(z)$ satisfies the nonlinear differential-integral equation \begin{equation} \mathfrak{I}^{\prime}(z)\int\limits_{0}^{z}\mathfrak{I}(t)dt=-\frac{1} {2}+\frac{1}{\sqrt{\pi}}\mathfrak{I}^{\prime}(z). \label{int diff} \end{equation}
\end{corollary}
\begin{proof} Rewriting (\ref{ODE}) as \[ \mathfrak{I}=\frac{1}{2}\frac{\mathfrak{I}^{\prime\prime}}{\left( \mathfrak{I}^{\prime}\right) ^{2}} \] and integrating, we get \[ \int\limits_{0}^{z}\mathfrak{I}(t)dt=\frac{1}{2}\left[ -\frac{1} {\mathfrak{I}^{\prime}(z)}+\frac{1}{\mathfrak{I}^{\prime}(0)}\right] =\frac{1}{2}\left[ -\frac{1}{\mathfrak{I}^{\prime}(z)}+\frac{2}{\sqrt{\pi} }\right] \] and multiplying by $\mathfrak{I}^{\prime}(z)$ we obtain (\ref{int diff}). \end{proof}
\begin{proposition} The derivatives of $\mathfrak{I}(z)$ satisfy the nonlinear recurrence \begin{equation} d_{n+1}=\sqrt{\pi}\sum\limits_{k=0}^{n-1}\binom{n}{k+1}d_{k}d_{n-k},\quad n=1,2,\ldots\label{recurrence} \end{equation} with $d_{0}=0$ and $d_{1}=\frac{\sqrt{\pi}}{2}.$ \end{proposition}
\begin{proof} Using \[ \mathfrak{I}(z)=\sum\limits_{n=0}^{\infty}d_{n}\frac{z^{n}}{n!} \] and $d_{1}=\frac{\sqrt{\pi}}{2}$ in (\ref{int diff}), we have \[ \left[ \frac{\sqrt{\pi}}{2}+\sum\limits_{n=1}^{\infty}d_{n+1}\frac{z^{n}} {n!}\right] \left[ \sum\limits_{n=1}^{\infty}d_{n-1}\frac{z^{n}}{n!} -\frac{1}{\sqrt{\pi}}\right] =-\frac{1}{2} \] or \[ \frac{\sqrt{\pi}}{2}\sum\limits_{n=1}^{\infty}d_{n-1}\frac{z^{n}}{n!} +\sum\limits_{n=2}^{\infty}\left[ \sum\limits_{k=0}^{n-2}\binom{n}{k+1} d_{k}d_{n-k}\right] \frac{z^{n}}{n!}-\frac{1}{\sqrt{\pi}}\sum\limits_{n=1} ^{\infty}d_{n+1}\frac{z^{n}}{n!}=0. \] Comparing powers of $z^{n},$ we get \[ \frac{\sqrt{\pi}}{2}d_{n-1}+\sum\limits_{k=0}^{n-2}\binom{n}{k+1}d_{k} d_{n-k}-\frac{1}{\sqrt{\pi}}d_{n+1}=0 \] or \[ \sum\limits_{k=0}^{n-1}\binom{n}{k+1}d_{k}d_{n-k}-\frac{1}{\sqrt{\pi}} d_{n+1}=0. \]
\end{proof}
Although one could use (\ref{recurrence}) to compute the higher derivatives of $\operatorname*{inverf}\left( z\right) ,$ the nonlinearity of the recurrence makes it hard to analyze the asymptotic behavior of $d_{n}$ as $n\rightarrow \infty.$ Instead, we shall use an alternative technique that we developed in \cite{MR2031140} and we called the method of "nested derivatives". The following theorem contains the main result presented in \cite{MR2031140}.
\begin{theorem} Let \[ H(x)=h^{-1}(x),\quad f(x)=\frac{1}{h^{\prime}(x)},\quad z_{0}=h(x_{0}),\text{ \ }\ \left\vert f(x_{0})\right\vert \in\left( 0,\infty\right) . \] $\ $ \ Then, \[ H(z)=x_{0}+f(x_{0})\sum\limits_{n=1}^{\infty}\mathfrak{D}^{n-1}[f]\,(x_{0} )\frac{(z-z_{0})^{n}}{n!}, \] where we define $\mathfrak{D}^{n}[f]$\thinspace$(x),$ \textit{the n}$^{th} $\textit{ nested derivative} of the function $f(x),$ by $\mathfrak{D} ^{0}[f]\,(x)=1$ and \begin{equation} \mathfrak{D}^{n+1}[f]\,(x)=\frac{d}{dx}\left[ f(x)\times\mathfrak{D} ^{n}[f]\,(x)\right] ,\quad n=0,1,\ldots. \label{nested} \end{equation}
\end{theorem}
The following proposition makes the computation of $\mathfrak{D} ^{n-1}[f]\,(x_{0})$ easier in some cases.
\begin{proposition} Let \begin{equation} \mathfrak{D}^{n}[f]\,(x)=\sum\limits_{k=0}^{\infty}A_{k}^{n}\frac {(x-x_{0})^{k}}{k!},\qquad f(x)=\sum\limits_{k=0}^{\infty}B_{k}\frac {(x-x_{0})^{k}}{k!}. \label{AB} \end{equation} Then, \begin{equation} A_{k}^{n+1}=\left( k+1\right) \sum\limits_{j=0}^{k+1}A_{k+1-j}^{n}B_{j}. \label{A} \end{equation}
\end{proposition}
\begin{proof} From (\ref{AB}) we have \begin{equation} f(x)\mathfrak{D}^{n}[f]\,(x)=\sum\limits_{k=0}^{\infty}\alpha_{k}^{n} \frac{(x-x_{0})^{k}}{k!}, \label{Dfxf} \end{equation} with \begin{equation} \alpha_{k}^{n}=\sum\limits_{j=0}^{k}A_{k-j}^{n}B_{j}. \label{alpha} \end{equation} Using (\ref{AB}) and (\ref{Dfxf}) in (\ref{nested}), we obtain \[ \sum\limits_{k=0}^{\infty}A_{k}^{n+1}(x-x_{0})^{k}=\frac{d}{dx}\sum \limits_{k=0}^{\infty}\alpha_{k}^{n}(x-x_{0})^{k}=\sum\limits_{k=0}^{\infty }\left( k+1\right) \alpha_{k+1}^{n}(x-x_{0})^{k} \] and the result follows from (\ref{alpha}). \end{proof}
To obtain a linear relation between successive nested derivatives, we start by establishing the following lemma.
\begin{lemma} Let \begin{equation} g_{n}\left( x\right) =\frac{\mathfrak{D}^{n}[f]\,(x)}{f^{n}\left( x\right) }. \label{gn} \end{equation} Then, \begin{equation} g_{n+1}(x)=g_{n}^{\prime}\left( x\right) +\left( n+1\right) \frac {f^{\prime}(x)}{f(x)}g_{n}\left( x\right) ,\quad n=0,1,\ldots. \label{ddnested} \end{equation}
\end{lemma}
\begin{proof} Using (\ref{nested}) in (\ref{gn}), we have \begin{gather*} g_{n+1}\left( x\right) =\frac{\mathfrak{D}^{n+1}[f]\,(x)}{f^{n+1}\left( x\right) }=\frac{\frac{d}{dx}\left[ f(x)\times\mathfrak{D}^{n} [f]\,(x)\right] }{f^{n+1}\left( x\right) }\\ =\frac{\frac{d}{dx}\left[ g_{n}\left( x\right) f^{n+1}\left( x\right) \right] }{f^{n+1}\left( x\right) }=\frac{g_{n}^{\prime}\left( x\right) f^{n+1}\left( x\right) +g_{n}\left( x\right) (n+1)f^{n}\left( x\right) f^{\prime}(x)}{f^{n+1}\left( x\right) } \end{gather*} and the result follows. \end{proof}
\begin{corollary} Let \[ H(x)=h^{-1}(x),\quad f(x)=\frac{1}{h^{\prime}(x)},\quad z_{0}=h(x_{0}),\text{ \ }\ \left\vert f(x_{0})\right\vert \in\left( 0,\infty\right) . \] Then,$\ $ \begin{equation} \frac{d^{n}H}{dz^{n}}(z_{0})=\left[ f(x_{0})\right] ^{n}g_{n-1}(x_{0}),\quad n=1,2,\ldots. \label{deriv} \end{equation}
\end{corollary}
For the function $h(x)=\operatorname{erf}(z),$ we have \begin{equation} f(x)=\frac{1}{h^{\prime}(x)}=\frac{\sqrt{\pi}}{2}\exp\left( x^{2}\right) , \label{f} \end{equation} and setting $x_{0}=0$ we obtain $z_{0}=\operatorname{erf}(0)=0.$ Using the Taylor series \[ \frac{\sqrt{\pi}}{2}\exp\left( x^{2}\right) =\frac{\sqrt{\pi}}{2} \sum\limits_{k=0}^{\infty}\frac{x^{2k}}{k!} \] in (\ref{A}), we get \[ A_{k}^{n+1}=\frac{\sqrt{\pi}}{2}\left( k+1\right) \sum\limits_{j=0} ^{\left\lfloor \frac{k+1}{2}\right\rfloor }\frac{A_{k+1-2j}^{n}}{j!}, \] with $A_{k}^{n}$ defined in (\ref{AB}). Using (\ref{f}) in (\ref{ddnested}), we have \begin{equation} g_{n+1}(x)=g_{n}^{\prime}\left( x\right) +2\left( n+1\right) xg_{n}\left( x\right) ,\quad n=0,1,\ldots, \label{g} \end{equation} while (\ref{deriv}) gives \begin{equation} d_{n}=\left( \frac{\sqrt{\pi}}{2}\right) ^{n}g_{n-1}(0),\quad n=1,2,\ldots. \label{deriv2} \end{equation}
In the next section we shall find an asymptotic approximation for a family of polynomials closely related to $g_{n}\left( x\right) $.
\section{The polynomials $P_{n}(x)$}
We define the polynomials $P_{n}(x)$ by $P_{0}(x)=1$ and \begin{equation} P_{n}(x)=g_{n}\left( \frac{x}{\sqrt{2}}\right) 2^{-\frac{n}{2}}. \label{Pngn} \end{equation} \begin{equation} P_{n+1}(x)=P_{n}^{\prime}(x)+\left( n+1\right) xP_{n}(x), \label{diffdiff} \end{equation} The first few $P_{n}\left( x\right) $ are \[ P_{1}(x)=x,\quad P_{2}(x)=1+2x^{2},\quad P_{3}(x)=7x+6x^{3},~\ldots~. \]
The following propositions describe some properties of $P_{n}\left( x\right) .$
\begin{proposition} Let \begin{equation} P_{n}(x)=\sum\limits_{k=0}^{\left\lfloor \frac{n}{2}\right\rfloor }C_{k} ^{n}x^{n-2k}, \label{Pcnk} \end{equation} where $\left\lfloor \cdot\right\rfloor $ denotes the integer part function. Then, \begin{equation} C_{0}^{n}=n! \label{C01} \end{equation} and \begin{equation} C_{k}^{n}=n!\sum\limits_{j_{k}=0}^{n-1}\sum\limits_{j_{k-1}=0}^{j_{k}-1} \cdots\sum\limits_{j_{1}=0}^{j_{2}-1}\prod_{i=1}^{k}\frac{j_{i}-2i+2}{j_{i} +1},\quad k=1,\ldots,\left\lfloor \frac{n}{2}\right\rfloor . \label{cnk} \end{equation}
\end{proposition}
\begin{proof} Using (\ref{Pcnk}) in (\ref{diffdiff}) we have \begin{gather*} \sum\limits_{0\leq2k\leq n+1}^{{}}C_{k}^{n+1}x^{n+1-2k}=\sum\limits_{0\leq 2k\leq n}^{{}}C_{k}^{n}\left( n-2k\right) x^{n-2k-1}+\sum\limits_{0\leq 2k\leq n}^{{}}\left( n+1\right) C_{k}^{n}x^{n+1-2k}\\ =\sum\limits_{2\leq2k\leq n+2}^{{}}C_{k-1}^{n}\left( n-2k+2\right) x^{n+1-2k}+\sum\limits_{0\leq2k\leq n}^{{}}\left( n+1\right) C_{k} ^{n}x^{n+1-2k}. \end{gather*} Comparing coefficients in the equation above, we get \begin{equation} C_{0}^{n+1}=C_{0}^{n}, \label{C0} \end{equation} \begin{equation} C_{k}^{n+1}=\left( n-2k+2\right) C_{k-1}^{n}+\left( n+1\right) C_{k} ^{n},\quad k=1,\ldots,\left\lfloor \frac{n}{2}\right\rfloor \label{Cn+1} \end{equation} and for $n=2m-1,$ \[ C_{m}^{2m}=C_{m-1}^{2m-1},\quad m=1,2,\ldots. \] From (\ref{C0}) we immediately conclude that $C_{0}^{n}=n!,$ while (\ref{Cn+1}) gives \begin{equation} C_{k}^{n}=n!\sum\limits_{j=0}^{n-1}\frac{j-2k+2}{\left( j+1\right) !} C_{k-1}^{j},\quad n,k\geq1. \label{Cnk1} \end{equation}
Setting $k=1$ in (\ref{Cnk1}) and using (\ref{C01}), we have \begin{equation} C_{1}^{n}=n!\sum\limits_{j=0}^{n-1}\frac{j}{\left( j+1\right) !}C_{0} ^{j}=n!\sum\limits_{j=0}^{n-1}\frac{j}{j+1}. \label{C1n} \end{equation} Similarly, setting $k=2$ in (\ref{Cnk1}) and using (\ref{C1n}), we get \[ C_{2}^{n}=n!\sum\limits_{j=0}^{n-1}\frac{j-2}{\left( j+1\right) !}\left[ j!\sum\limits_{i=0}^{j-1}\frac{i}{i+1}\right] =n!\sum\limits_{j=0}^{n-1} \sum\limits_{i=0}^{j-1}\frac{j-2}{j+1}\frac{i}{i+1} \] and continuing this way we obtain (\ref{cnk}). \end{proof}
\begin{proposition} The zeros of the polynomials $P_{n}(x)$ are purely imaginary for $n\geq1.$ \end{proposition}
\begin{proof} For $n=1$ the result is obviously true. Assuming that it is true for $n$ and that $P_{n}(x)$ is written in the form \begin{equation} P_{n}(x)=n!
{\displaystyle\prod\limits_{k=1}^{n}}
(z-z_{k}),\quad\operatorname{Re}(z_{k})=0,\quad1\leq k\leq n, \label{product} \end{equation} we have two possibilities for $z^{\ast},$ with$\ P_{n+1}(z^{\ast})=0$:
\begin{enumerate} \item $z^{\ast}=z_{k}$, for some $1\leq k\leq n.$
In this case, $\operatorname{Re}(z^{\ast})=0$ and the proposition is proved.
\item $z^{\ast}\neq z_{k}$, for all $1\leq k\leq n$.
From (\ref{diffdiff}) and (\ref{product}) we get \[ \frac{P_{n+1}(x)}{P_{n}(x)}=\frac{d}{dx}\ln\left[ P_{n}(x)\right] +(n+1)x=
{\displaystyle\sum\limits_{k=1}^{n}}
\frac{1}{z-z_{k}}+(n+1)x. \] Evaluating at $z=z^{\ast},$ we obtain \[ 0=
{\displaystyle\sum\limits_{k=1}^{n}}
\frac{1}{z^{\ast}-z_{k}}+(n+1)z^{\ast} \] and taking $\operatorname{Re}(\bullet),$ we have \begin{gather*} 0=\operatorname{Re}\left[
{\displaystyle\sum\limits_{k=1}^{n}}
\frac{1}{z^{\ast}-z_{k}}+(n+1)z^{\ast}\right] \\ =
{\displaystyle\sum\limits_{k=1}^{n}}
\frac{\operatorname{Re}\left( z^{\ast}-z_{k}\right) }{\left\vert z^{\ast }-z_{k}\right\vert ^{2}}+(n+1)\operatorname{Re}(z^{\ast})=\operatorname{Re} (z^{\ast})\left[
{\displaystyle\sum\limits_{k=1}^{n}}
\frac{1}{\left\vert z^{\ast}-z_{k}\right\vert ^{2}}+n+1\right] \end{gather*} which implies that \ $\operatorname{Re}(z^{\ast})=0.$ \end{enumerate} \end{proof}
\subsection{Asymptotic analysis of $P_{n}(x)$}
We first consider solutions to (\ref{diffdiff}) of the form \begin{equation} P_{n}(x)=n!A^{\left( n+1\right) }(x), \label{Pn1} \end{equation} with $x>0.$ Replacing (\ref{Pn1}) in (\ref{diffdiff}) and simplifying the resulting expression, we obtain \[ A^{2}(x)=A^{\prime}(x)+xA(x), \] with solution \begin{equation} A(x)=\exp\left( -\frac{x^{2}}{2}\right) \left[ C-\sqrt{\frac{\pi}{2} }\operatorname{erf}\left( \frac{x}{\sqrt{2}}\right) \right] ^{-1}, \label{f1} \end{equation} for some constant $C.$ Note that (\ref{Pn1}) is not an exact solution of (\ref{diffdiff}), since it does not satisfy the initial condition $P_{0}(x)=1.$ To determine $C$ in(\ref{f1}), we observe from (\ref{C01}) that \begin{equation} P_{n}(x)\sim n!x^{n},\quad x\rightarrow\infty. \label{Pn4} \end{equation} As $x\rightarrow\infty,$ we get from (\ref{f1}) \[ \ln\left[ A(x)\right] \sim-\frac{x^{2}}{2}-\ln\left( C-\sqrt{\frac{\pi}{2} }\right) +\frac{\exp\left( -\frac{x^{2}}{2}\right) }{\left( C-\sqrt {\frac{\pi}{2}}\right) x},\quad x\rightarrow\infty, \] which is inconsistent with (\ref{Pn4}) unless $C=\sqrt{\frac{\pi}{2}}.$ In this case, we have \begin{equation} A(x)\sim x+\frac{1}{x},\quad x\rightarrow\infty, \label{A1} \end{equation} matching (\ref{Pn4}). Thus, \begin{equation} A(x)=\sqrt{\frac{2}{\pi}}\exp\left( -\frac{x^{2}}{2}\right) \left[ 1-\operatorname{erf}\left( \frac{x}{\sqrt{2}}\right) \right] ^{-1}. \label{psi2} \end{equation} Since (\ref{Pn1}) and (\ref{A1}) give \[ P_{n}(x)\sim n!x^{n+1},\quad x\rightarrow\infty, \] instead of (\ref{Pn4}), we need to consider \begin{equation} P_{n}(x)=n!A^{\left( n+1\right) }(x)B(x,n). \label{Pn2} \end{equation} Replacing (\ref{Pn2}) in (\ref{diffdiff}) and simplifying, we get \[ B(x,n+1)=B(x,n)+\frac{1}{A(x)(n+1)}\frac{\partial B}{\partial x}(x,n). \] Using the approximation \[ B(x,n+1)=B(x,n)+\frac{\partial B}{\partial n}(x,n)+\frac{1}{2}\frac {\partial^{2}B}{\partial n^{2}}(x,n)+\cdots, \] we obtain \[ \frac{\partial B}{\partial n}=\frac{1}{A(x)(n+1)}\frac{\partial B}{\partial x}, \] whose solution is \begin{equation} B(x,n)=F\left[ \frac{n+1}{1-\operatorname{erf}\left( \frac{x}{\sqrt{2} }\right) }\right] , \label{B1} \end{equation} for some function $F(u).$ Matching (\ref{Pn2}) with (\ref{Pn4}) requires \begin{equation} B(x,n)\sim\frac{1}{x},\quad x\rightarrow\infty. \label{B2} \end{equation} Since in the limit as $x\rightarrow\infty,$ with $n$ fixed we have \[ \ln\left[ \frac{n+1}{1-\operatorname{erf}\left( \frac{x}{\sqrt{2}}\right) }\right] \sim\frac{x^{2}}{2}, \] (\ref{B1})-(\ref{B2}) imply \[ F(u)=\frac{1}{\sqrt{2\ln(u)}}. \] Therefore, for $x>0,$ \begin{equation} P_{n}(x)\sim n!\Phi\left( x,n\right) ,\quad n\rightarrow\infty, \label{Pn5} \end{equation} with \[ \Phi\left( x,n\right) =\left[ \sqrt{\frac{2}{\pi}}\frac{\exp\left( -\frac{x^{2}}{2}\right) }{1-\operatorname{erf}\left( \frac{x}{\sqrt{2} }\right) }\right] ^{n+1}\left[ 2\ln\left( \frac{n+1}{1-\operatorname{erf} \left( \frac{x}{\sqrt{2}}\right) }\right) \right] ^{-\frac{1}{2}}. \]
From (\ref{Pcnk}) we know that the polynomials $P_{n}(x)$ satisfy the reflection formula \begin{equation} P_{n}(-x)=\left( -1\right) ^{n}P_{n}(x). \label{reflection} \end{equation} Using (\ref{reflection}), we can extend (\ref{Pn5}) to the whole real line and write \begin{equation} P_{n}(x)\sim n!\left[ \Phi\left( x,n\right) +\left( -1\right) ^{n} \Phi\left( -x,n\right) \right] ,\quad n\rightarrow\infty. \label{Pn6} \end{equation} In Figure \ref{P10} we compare the values of $P_{10}(x)$ with the asymptotic approximation (\ref{Pn6}).
\begin{figure}\label{P10}
\end{figure}
We see that the approximation is very good, even for small values of $n.$ We summarize our results of this section in the following theorem.
\begin{theorem} \label{theorem}Let the polynomials $P_{n}(x)$ be defined by \[ P_{n+1}(x)=P_{n}^{\prime}(x)+\left( n+1\right) xP_{n}(x), \] with $P_{0}(x)=1.$ Then, we have \begin{equation} P_{n}(x)\sim P_{n}(x)\sim n!\left[ \Phi\left( x,n\right) +\left( -1\right) ^{n}\Phi\left( -x,n\right) \right] ,\quad n\rightarrow\infty, \label{Pasympt} \end{equation} where \begin{equation} \Phi\left( x,n\right) =\left[ \sqrt{\frac{2}{\pi}}\frac{\exp\left( -\frac{x^{2}}{2}\right) }{1-\operatorname{erf}\left( \frac{x}{\sqrt{2} }\right) }\right] ^{n+1}\left[ 2\ln\left( \frac{n+1}{1-\operatorname{erf} \left( \frac{x}{\sqrt{2}}\right) }\right) \right] ^{-\frac{1}{2}}. \label{Phi} \end{equation}
\end{theorem}
\section{Higher derivatives of $\operatorname*{inverf}\left( z\right) $}
From (\ref{deriv2}) and (\ref{Pngn}), it follows that \begin{equation} d_{n}=\frac{1}{\sqrt{2}}\left( \sqrt{\frac{\pi}{2}}\right) ^{n} P_{n-1}(0),\quad n=1,2,\ldots, \label{deriv3} \end{equation} where $d_{n}$ was defined in (\ref{dn}). Using Theorem \ref{theorem} in (\ref{deriv3}), we have \[ d_{n}\sim\frac{1}{\sqrt{2}}\left( \sqrt{\frac{\pi}{2}}\right) ^{n} \Phi\left( 0,n-1\right) \left[ 1+\left( -1\right) ^{n-1}\right] , \] as $n\rightarrow\infty.$ Using (\ref{Phi}), we obtain \begin{equation} \frac{d_{n}}{n!}\sim\frac{1}{2n\sqrt{\ln(n)}}\left[ 1+\left( -1\right) ^{n-1}\right] ,\quad n\rightarrow\infty. \label{deriv5} \end{equation} Setting $n=2N+1$ in (\ref{deriv5}), we have \begin{equation} \frac{d_{2N+1}}{\left( 2N+1\right) !}\sim\frac{1}{\left( 2N+1\right) \sqrt{\ln(2N+1)}},\quad N\rightarrow\infty. \label{derivodd} \end{equation}
\subsection{Numerical results}
In this section we demonstrate the accuracy of the approximation (\ref{deriv5}) and construct a high order Taylor series for $\operatorname*{inverf}\left( x\right) .$ In Figure \ref{compare1} we compare the logarithm of the exact values of $\left. \frac{d^{2n+1} }{dz^{2n+1}}\operatorname*{inverf}\left( x\right) \right\vert _{x=0}$ and our asymptotic formula (\ref{deriv5}). We see that there is a very good agreement, even for moderate values of $n$.
\begin{figure}\label{compare1}
\end{figure}
Using (\ref{recurrence}), we compute the exact values \[ d_{1}=\frac{1}{2}\pi^{\frac{1}{2}},\quad d_{3}=\frac{1}{4}\pi^{\frac{3}{2} },\quad d_{5}=\frac{7}{8}\pi^{\frac{5}{2}},\quad d_{7}=\frac{127}{16} \pi^{\frac{7}{2}},\quad d_{9}=\frac{4369}{32}\pi^{\frac{9}{2}} \] and form the polynomial Taylor approximation \[ T_{9}(x)=\sum\limits_{k=0}^{4}d_{2k+1}\frac{x^{2k+1}}{\left( 2k+1\right) !}. \] In Figure \ref{compare2} we graph $\frac{T_{9}(x)}{\operatorname*{inverf} \left( x\right) }$ \ and \ $\frac{T_{9}(x)+R_{N}(x)}{\operatorname*{inverf} \left( x\right) },$ for $N=10,20,$ where \begin{equation} R_{N}(x)=\sum\limits_{k=5}^{N}\frac{x^{2k+1}}{\left( 2N+1\right) \sqrt {\ln(2N+1)}},\quad N=5,6,\ldots. \label{T1} \end{equation}
\begin{figure}\label{compare2}
\end{figure}
The functions are virtually identical in most of the interval $\left( -1,1\right) $ except for values close to $x=\pm1.$ We show the differences in detail in Figure \ref{compare3}. Clearly, the additional terms in $R_{20}(x)$ give a far better approximation for $x\simeq1.$
\begin{figure}\label{compare3}
\end{figure}
In the table below we compute the exact value of and optimal asymptotic approximation to $\operatorname*{inverf}\left( x\right) $ for some $x$: \[ \begin{tabular}
[c]{|c|c|c|c|}\hline $x$ & $\operatorname*{inverf}\left( x\right) $ & $T_{9}(x)+R_{N}(x)$ & $N$\\\hline $0.7$ & $.732869$ & $.732751$ & $6$\\\hline $0.8$ & $.906194$ & $.905545$ & $7$\\\hline $0.9$ & $1.16309$ & $1.16274$ & $11$\\\hline $0.99$ & $1.82139$ & $1.82121$ & $57$\\\hline $0.999$ & $2.32675$ & $2.32676$ & $423$\\\hline $0.9999$ & $2.75106$ & $2.75105$ & $3685$\\\hline \end{tabular} \ \ . \] Clearly, (\ref{T1}) is still valid for $x\rightarrow1,$ but at the cost of having to compute many terms in the sum. In this region it is better to use the formula \cite{MR1986919} \[ \operatorname*{inverf}\left( x\right) \sim\sqrt{\frac{1}{2} \operatorname*{LW}\left[ \frac{2}{\pi\left( x-1\right) ^{2}}\right] },\quad x\rightarrow1^{-}, \] where $\operatorname*{LW}(\cdot)$ denotes the Lambert-W function \cite{MR1414285}, which satisfies \[ \operatorname*{LW}(x)\exp\left[ \operatorname*{LW}(x)\right] =x. \]
\newcommand{\etalchar}[1]{$^{#1}$} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\end{document}
|
arXiv
|
{
"id": "0607230.tex",
"language_detection_score": 0.4851197302341461,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{The Moduli of Klein Covers of Curves} \author{Charles Siegel} \address[Charles Siegel]{Kavli Institute for the Physics and Mathematics of the Universe (WPI), Todai Institutes for Advanced Study, the University of Tokyo} \email[Charles Siegel]{[email protected]} \urladdr{http://db.ipmu.jp/member/personal/2754en.html} \date{} \subjclass{} \keywords{}
\begin{abstract} We study the moduli space $\KM{g}$ of Klein four covers of genus $g$ curves and its natural compactification. This requires the construction of a related space which has a choice of basis for the Klein four group. This space has the interesting property that the two components intersect along a component of the boundary. Further, we carry out a detailed analysis of the boundary, determining components, degrees of the components over their images in $\overline{\mathcal{M}_g}$, and computing the canonical divisor of $\overline{\KM{g}}$. \end{abstract}
\maketitle
\tableofcontents
\section*{Introduction}
Ever since the nineteenth century, unramified double covers have been an essential tool for studying curves. They correspond to square roots of the trivial line bundle which form a group. This formulation can be used to study surface groups and the mapping class group, and also theta characteristics, the square roots of the canonical line bundle. There are intricate relationships between double covers and theta characteristics, in particular, and the difference between them only become completely clear after passing to moduli.
The moduli of double covers of curves has two components, one that is isomorphic to $\mathcal{M}_g$, where the double cover is a disconnected union of two copies of the base curve, and one where the double cover is nontrivial, traditionally denoted by $\mathcal{R}_g$. While the moduli of theta characteristics also has two components, neither one is isomorphic to $\mathcal{M}_g$. The components, $\mathcal{S}_g^\pm$, correspond to whether the theta characteristic has an even or odd dimensional space of global sections.\footnote{The notations come from the French for covering, rev\^etement and from the fact that curves with theta characteristics are often called spin curves, due to connections with the quantum mechanical notion of spin.}
Another, slightly more subtle, connection between the two moduli spaces is that the theta characteristics on a curve correspond to quadratic forms on the ($\mathbb{F}_2$-vector space of) points of order two on the curve. The quadratic form is given by, if $L$ is a theta characteristic, $\mu\mapsto h^0(L\otimes \mu)-h^0(L)\mod 2$, and induces a skew-symmetric bilinear form on the points of order two. This bilinear form is independent of the theta characteristic chosen and is called the Weil pairing. The Weil pairing, however, is really an invariant of a Klein four subgroup of the Jacobian, as $\langle \mu,\nu\rangle=\langle\mu,\mu+\nu\rangle$, and in fact, if $\{0,\mu_1,\mu_2,\mu_3\}$ is a Klein four subgroup of $\mathcal{J}(C)[2]$, then the Weil pairing on the group has the value $h^0(L\otimes\mathscr{O}_C)+h^0(L\otimes \mu_1)+h^0(L\otimes \mu_2)+h^0(L\otimes \mu_3)\mod 2$, which is manifestly symmetric in the group elements, suggesting that it could be clarified by studying the moduli of Klein covers.
The boundaries of these moduli spaces have been studied in detail, and the fibers of the natural projection to $\overline{\mathcal{M}_g}$ have been made very explicit. The approach originates in {\cite{MR1082361}} for $\overline{\mathcal{S}_g}^{\pm}$ and is pushed through in detail in {\cite{MR2007379}}, and adapted to $\overline{\mathcal{R}_g}$ in {\cite{MR2117416}}. More recently, this approach has been adapted to proving that pluricanonical forms extend to both $\overline{\mathcal{S}_g}^{\pm}$ {\cite{MR2551759}} and to $\overline{\mathcal{R}_g}$ {\cite{MR2639318}}, bringing the study of the birational geometry of these spaces into reach.
\subsection*{This paper}
In this paper, we extend the description of the boundary and pluricanonical forms to the moduli of Klein covers of curves. This, however, is difficult to do directly, so instead we introduce an intermediate moduli space, $\overline{\ZZM{g}}$, of pairs of Prym curves and study it, then use the relationship between it and the moduli of Klein covers $\overline{\KM{g}}$ to prove the results on this space. In fact, we use this relationship to define $\overline{\KM{g}}$.
In section 1 of this paper, we recall relevant facts about double covers. In particular, the classification of points in the fiber over a stable curve from {\cite{MR2117416}}, and the relationship between two competing notations for the components of the boundary of $\overline{\mathcal{R}_g}$, used in, for instance, {\cite{MR903385}} and \cite{MR2976944}.
In section 2, we initiate the study of $\overline{\ZZM{g}}$, focusing on the interior. We construct the space and show that there are two connected components, corresponding to Weil pairing 0, which was studied in \cite{1302.5946} under the notation $\mathcal{R}^2\mathcal{M}_g$, and Weil pairing 1, and the degree of each component over $\mathcal{M}_g$, reproducing a result in {\cite{1206.5498}}, which holds in the degenerate case where the dihedral group is only four elements.
In section 3, we analyze the boundary of $\overline{\ZZM{g}}$ in detail, describing the fibers over $\overline{\mathcal{M}_g}$, then identify the boundary components and determine how many objects in each fiber lie in each component. Here, we note an interesting fact. Although $\ZZM{g}$ is an unramified covering of $\mathcal{M}_g$ and has two components, the natural compactification $\overline{\ZZM{g}}$ is in fact connected, and the boundaries of the two components intersect nicely along a single component.
In section 4, we proceed to analyzing $\overline{\KM{g}}$ and its boundary. We do so by showing that the group action of $\PSL_2(\mathbb{F}_2)$ on $\ZZM{g}$ extends to the boundary of each of the two components separately, identifying several components of $\partial\overline{\ZZM{g}}$. This allows us to describe the (much simple) boundary of $\overline{\KM{g}}$and to show that the natural map $\overline{\KM{g}}\to \overline{\mathcal{M}_g}$ is simply ramified along a single boundary component.
In the last section, we follow \cite{MR2639318}, \cite{MR2551759} and \cite{MR664324}, to extend the pluricanonical forms from the smooth locus to an arbitrary resolution of singularities. The main tool in this is the Reid--Shepherd-Barron--Tai criterion \cite{MR605348,MR763023}. We conclude with a slope criterion for $\overline{\KM{g}}^i$ to be of general type analagous to similar results for $\overline{\mathcal{M}}_g$ and $\overline{\mathcal{R}_g}$:
\begin{reptheorem}{maintheorem} For any $g$, $\overline{\KM{g}}^i$ has general type if there exists a single effective divisor $D\equiv a\lambda-\sum_T b_{\Delta_T}\Delta_T$ where $T$ runs over all boundary components, such that all the ratios $\frac{a}{b_T}$ are less than $\frac{13}{2}$ and the ratios $\frac{a}{b_{II,III,III}}$, $\frac{a}{b_{1,g-1,1:g-1}}$, $\frac{a}{b_{1,1,1}}$, $\frac{a}{b_{g-1,g-1,g-1}}$, $\frac{a}{b_{1,1:g-1,1:g-1}}$, $\frac{a}{b_{g-1,1:g-1,1:g-1}}$, and $\frac{a}{b_{1:g-1,1:g-1,1:g-1}}$ are less than $\frac{13}{3}$. \end{reptheorem}
\subsection*{Acknowledgments} I would like to thank Gavril Farkas, for suggesting that the birational geometry of this space might be interesting, as well as for conversations on the relationship between the Weil pairing, theta characteristics and Klein four curves. Also I would like to thank Angela Gibney, Joe Harris, Tyler Kelly and Angela Ortega for helpful discussions on the moduli of curves, coverings, the Weil pairing and birational geometry and Amir Aazami and Jesse Wolfson for comments on an earlier draft. This work was supported by World Premier International Research Center Initiative (WPI Initiative), MEXT, Japan.
\section{Background}
In this section, we will recall relevant facts about double covers and points of order two on curves. For $C$ a smooth projective curve over $\mathbb{C}$, we denote by $\mathcal{J}(C)$ the group of line bundles of degree $0$ on $C$. It has a natural subgroup $\mathcal{J}(C)[2]$ consisting of the elements whose square is trivial.
\begin{lemma} \label{lemma:prymdefs} The following data are equivalent:
\begin{enumerate}
\item $\tilde{C}\to C$ an irreducible \'etale double cover,
\item $\mu\in \mathcal{J}(C)[2]$ nonzero, and
\item $\tilde{C}\in\mathcal{M}_{2g-1}$ with $\iota:\tilde{C}\to\tilde{C}$ a fixed point free involution. \end{enumerate} \end{lemma}
\begin{proof} Given a point of order two, we get an unramified double cover by looking at $\underline{\Spec}(\mathscr{O}_C\oplus \mu)$. Conversely, given a double cover, there is a single point of order two that pulls back to zero.
To get between 1 and 3, we note that $C\cong \tilde{C}/\iota$. \end{proof}
By Lemma \ref{lemma:prymdefs}, we have an equivalence between $\mathcal{J}(C)[2]$ and the set of \'etale double covers, with 0 corresponding to the trivial double cover. This induces a group structure on double covers, and if $\tilde{C}_\mu,\tilde{C}_\nu$ correspond to $\mu,\nu$, then $\tilde{C}_{\mu+\nu}$ is given by $\tilde{C}_\mu\times_C\tilde{C}_\nu/(\iota_\mu,\iota_\nu)$.
\begin{definition}[quasistable curve] A genus $g\geq 2$ curve $X$ is \emph{quasistable} if every smooth rational component has at least two nodes and no two of these components intersect. We call the stable curve $C$ obtained by removing these rational components and gluing the nodes together the stabilization of $X$, and the nodes of $C$ obtained this way are the exceptional nodes and the rational components of $X$ over them exceptional components. \end{definition}
We define the nonexceptional curve to be the union of the nonexceptional components and denote it by $X_{ne}$.
\begin{definition}[Prym curve] A \emph{Prym curve} is a triple $(X,\eta,\beta)$ where $X$ is quasistable, $\eta\in\mathcal{J}(X)$ such that for all exceptional components, $E$, we have $\eta_E\cong\mathscr{O}_E(1)$, and $\beta:\eta^{\otimes 2}\to\mathscr{O}_X$ is a homomorphism that is generically nonzero on each nonexceptional component. \end{definition}
\begin{remark} In the notation we will use for other objects, a Prym curve would be called a $\mathbb{Z}/2\mathbb{Z}$ curve, but we will continue to refer to them as Prym curves. \end{remark}
\begin{definition}[Isomorphism of Prym Curves] An isomorphism of Prym curves $(X,\eta,\beta)$ and $(X',\eta',\beta')$ is an isomorphism $\sigma:X\to X'$ such that there exists an isomorphism $\tau:\sigma^*(\eta')\to \eta$ such that the diagram commutes:
\begin{center} \leavevmode \begin{xy} (0,0)*+{\sigma^*(\mathscr{O}_{X'})}="a"; (20,0)*+{\mathscr{O}_X}="b"; (0,20)*+{\sigma^*(\eta')^{\otimes 2}}="c"; (20,20)*+{\eta^{\otimes 2}}="d"; {\ar^{\sim} "a";"b"}; {\ar^{\tau^{\otimes 2}} "c";"d"}; {\ar_{\sigma^*(\beta')} "c";"a"}; {\ar^{\beta} "d";"b"}; \end{xy} \end{center} \end{definition}
Note that the definition of isomorphism of Prym curves does not depend on what $\tau$ is chosen, only on $\sigma$.
Any automorphism of a Prym curve which induces the identity on the stable model of $X$ will be called \textit{inessential}. The group of automorphisms will be denoted by $\Aut(X,\eta,\beta)$ and the inessential automorphisms will be $\Aut_0(X,\eta,\beta)$.
For the rest of this paper, for any curve, denote by $\nu$ the normalization morphism for a curve, and denote by $g^\nu$ the geometric genus of the normalization:
\begin{proposition}[{\cite[Proposition 11]{MR2117416}}] Let $X$ be a quasistable curve, $Z$ its stable model, $\Gamma_Z$ the dual graph of $Z$, and $\Delta_X$ the set of nodes not lying under an exceptional curve, and assume further that $\Delta_X^c$ is eulerian. Then there are $2^{2g^\nu+b_1(\Delta_X)}$ Prym curves supported on $X$ and each has multiplicity $2^{b_1(\Gamma_Z)-b_1(\Delta_X)}$ in the fiber of $\mathcal{R}_g\to\mathcal{M}_g$. \end{proposition}
We will denote the moduli space of these curves by $\overline{\ZM{g}}$, and we note that it has two components. One, isomorphic to $\overline{\mathcal{M}}_g$ where the Prym curve has $\eta\cong \mathscr{O}_X$ over a stable base, and $\overline{\mathcal{R}}_g$, the nontrivial Prym curves.
\begin{remark}[Notation] The boundary components of the Prym moduli space have several competing notations in the literature. For the $2^{2g}$ objects, we always have 1 that is the disconnected double cover, and in this setting, it lies over the stable curve itself.
Additionally, Donagi \cite{MR903385} classified the nontrivial points of order two on an irreducible 1-nodal curve in terms of the vanishing cycle $\delta$. In his notation, $\Delta_I$ is the subset with the marked point $\mu$ being equal to $\delta$, $\Delta_{II}$ when $\langle \delta,\mu\rangle=0$ but $\mu\neq\delta$ and $\Delta_{III}$ being when $\langle \delta,\mu\rangle\neq 0$, under the Weil pairing, defined below. Alternately, these three components are denoted by $\Delta_0''$, $\Delta_0'$ and $\Delta_0^{ram}$ by Farkas \cite{MR2639318} and it is noted that $\Delta_{III}=\Delta_0^{ram}$ is precisely the set of Prym curves on the quasistable curve. In the rest of this article, however, we will follow Donagi's notation.
Over the 1-nodal reducible curves, the notation agrees, and the components are $\Delta_i$, $\Delta_{g-i}$ and $\Delta_{i:g-i}$, for the Prym curves supported on the component of genus $i$, $g-i$ or both, respectively. \end{remark}
\section{The space \texorpdfstring{$\overline{\ZZM{g}}$}{ZMg}}
\begin{definition}[Weil Pairing] Let $\mu,\nu\in\mathcal{J}(C)[2]$, and let $\kappa\in\Pic(C)$ such that $\kappa^{\otimes 2}\cong K_C$. Then the Weil pairing on the curve $C$ is given by \[\langle\mu,\nu\rangle=h^0(\kappa)+h^0(\kappa\otimes\mu)+h^0(\kappa\otimes \nu)+h^0(\kappa\otimes\mu\otimes\nu)\mod 2.\] \end{definition}
The Weil pairing is bilinear, skew-symmetric, and independent of the choice of $\kappa$, and therefore we can see that $\langle\mu,\nu\rangle=\langle\mu,\mu+\nu\rangle=\langle \nu,\mu+\nu\rangle$, and so it is an invariant of a rank 2 subgroup of $\mathcal{J}(C)[2]$.
\begin{lemma} \label{lemma:Z22Mg} The following data are equivalent:
\begin{enumerate}
\item A curve $C\in\mathcal{M}_g$ along with $\tilde{C}_i\to C$, $i=1,2$ irreducible, nonisomorphic unramified double covers,
\item A curve $C\in\mathcal{M}_g$ along with $\mu_1,\mu_2\in \mathcal{J}(C)[2]$, with $\mu_1\neq\mu_2$, and
\item A pair of curves $C,D\in \mathcal{M}_{2g-1}$ along with involutions $\sigma_C,\sigma_D$ that act freely on $C,D$ respectively and which are such that $C/\sigma_C$ and $D/\sigma_D$ are isomorphic, but the pairs $(C,\sigma_C)$ and $(D,\sigma_D)$ are not. \end{enumerate} \end{lemma}
\begin{proof} This is just an application of Lemma \ref{lemma:prymdefs}. \end{proof}
A related, but slightly different result is the following, where we do not choose a basis for the Klein four group.
\begin{lemma} \label{lemma:K2^2Mg} The following data are equivalent:
\begin{enumerate}
\item A curve $C\in \mathcal{M}_g$, and $\tilde{C}\to C$ an \'etale Klein $4$ cover,
\item A curve $C\in \mathcal{M}_g$, and $\phi:V_{4}\to \mathcal{J}(C)[2]$ an injective homomorphism, and
\item A curve $\tilde{C}\in \mathcal{M}_{4g-3}$ with a free action of $V_{4}$ on $\tilde{C}$. \end{enumerate} \end{lemma}
\begin{proof} This is again just an application of Lemma \ref{lemma:prymdefs}. \end{proof}
For the rest of this section, we will be working in the case with a basis, and later will return to the basis-free case.
\begin{definition}[$\mathbb{Z}_2^2$ curve] A $\mathbb{Z}_2^2$ curve is $(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)$ where $(X_i,\eta_i,\beta_i)$ is a Prym curve for $i=1,2$ and $X_1$ and $X_2$ have isomorphic stabilizations. \end{definition}
An isomorphism of $\mathbb{Z}_2^2$ curves is just a pair of isomorphisms of Prym curves that induce the same isomorphism on the stable model. Equivalently, an automorphism can be seen by looking at the quasistable curve with exceptional nodes the union of the exceptional nodes of $X_1$ and $X_2$. From this viewpoint, an isomorphism is an isomorphism of such quasi-stable curves which induces isomorphisms on the pullbacks of $\eta_i$ and $\eta_i'$.
Given this, we can see that the moduli space of $\mathbb{Z}_2^2$ curves is just $\overline{\ZM{g}}\times_{\overline{\mathcal{M}}_g}\overline{\ZM{g}}$. It is easy to see that it has at least five components, depending on $\eta_1,\eta_2$. If both are trivial, we have a copy of $\overline{\mathcal{M}}_g$. If one is trivial but the other is nontrivial, then we get two copies of $\mathcal{R}_g$. Additionally, when $\eta_1\cong \eta_2$, we get another copy of $\overline{\mathcal{R}}_g$ leaving the remnant:
\begin{definition}[$\overline{\ZZM{g}}$] We denote by $\overline{\ZZM{g}}$ the closure in $\overline{\ZM{g}}\times_{\overline{\mathcal{M}}_g}\overline{\ZM{g}}$ of the locus of $\mathbb{Z}_2^2$ curves with $\eta_1\not\cong\eta_2$ both nontrivial. The locus of smooth curves in $\overline{\ZZM{g}}$ will be denoted by $\ZZM{g}$. \end{definition}
Geometrically, the most natural thing to study is the moduli space of Klein four subgroups, with no choice of basis. This, by Lemma \ref{lemma:K2^2Mg} is then the moduli space of Klein four covers of curves. However, it is much easier to construct the space with a choice of basis, which is $\overline{\ZZM{g}}$ (also note that the geometricity of the covers is less clear on the boundary). There is a natural $\PSL_2(\mathbb{F}_2)$ action on $\ZZM{g}$, permuting the ordered bases of the $\mathbb{Z}_2^2\subset \mathcal{J}(C)[2]$. Below, we will extend it to the boundary and construct $\overline{\KM{g}}$, and we will deduce many of its properties from those of $\overline{\ZZM{g}}$.
Here, we note that $\Aut(C,\eta_1,\eta_2)$ is just $\Aut(X_1,\eta_1,\beta_1)\times_{\Aut(C)}\Aut(X_2,\eta_2,\beta_2)$ where $(X_i,\eta_i,\beta_i)$ are the two Prym structures. Thus, the results of \cite{MR2117416} apply without difficulty, so we have good behavior of the universal deformation, we have a subgroup of inessential automorphisms, etc. We will recall those facts we need in the last section of the paper.
\begin{lemma} \label{DegZZM} The map $\ZZM{g}\to\mathcal{M}_g$ has degree $(2^{2g}-1)(2^{2g}-2)$, thus, so does $\overline{\ZZM{g}}\to\overline{\mathcal{M}_g}$. \end{lemma}
\begin{proof} We know that the degree of $\ZM{g}\times_{\mathcal{M}_g}\ZM{g}\to\mathcal{M}_g$ is $2^{4g}$ and that it breaks down as $\mathcal{M}_g\cup\mathcal{R}_g\cup\mathcal{R}_g\cup\mathcal{R}_g\cup\ZZM{g}$. Each component is dominant and equidimensional, so we can compute the degree on $\ZZM{g}$ as $2^{4g}-1-3(2^{2g}-1)=(2^{2g}-1)(2^{2g}-2)$. \end{proof}
The space $\ZZM{g}$ is not irreducible. We can see that there must be at least two components because the Weil pairing is deformation invariant; we will write $\ZZM{g}^0$ and $\ZZM{g}^1$ for the two components, with Weil pairing respectively 0 and 1.
\begin{lemma} The spaces $\ZZM{g}^0$ and $\ZZM{g}^1$ are both irreducible. \end{lemma}
\begin{proof} For any curve $C$ of genus $g$, the action of $\Sp(2g,\mathbb{F}_2)$ on the space $\mathcal{J}(C)[2]\times \mathcal{J}(C)[2]$ has two orbits, pairs of points that are orthogonal and pairs that are nonorthogonal, and this is the monodromy of $\ZZM{g}\to \mathcal{M}_g$. \end{proof}
\begin{remark} Although slightly more complex, in the case where we look at points of order $n$ rather than points of order $2$, we get a similar theorem, where the number of components is indexed by $\mathbb{Z}_n$. Similarly, the Klein moduli space, which we will study below, will have components indexed by $\mathbb{Z}_n/\mathbb{Z}_n^\times$. In our case, both of these are $\mathbb{Z}_2$, and we will identify them with $\{0,1\}$. \end{remark}
Before moving on to a detailed analysis of the boundary, we compute the degrees of the maps $\ZZM{g}^i\to\mathcal{M}_g$.
\begin{proposition} \label{DegZZM01} We have natural maps to $\mathcal{M}_g$ forgetting the points of order two, and their degrees are:
\begin{enumerate}
\item $\ZZM{g}^0\to \mathcal{M}_g$ has degree $(2^{2g}-1)(2^{2g-1}-2)$
\item $\ZZM{g}^1\to \mathcal{M}_g$ has degree $(2^{2g}-1)2^{2g-1}$ \end{enumerate} \end{proposition}
\begin{proof} Fix a smooth curve $C$ of genus $g$.
Any element of $\ZZM{g}$ lying over $C$ is of the form $(C,\eta,\eta')$ where $\eta,\eta'\in\mathcal{J}(C)[2]$, which is an $\mathbb{F}_2$ vector space. To be in $\ZZM{g}^0$ they must be orthogonal under the Weil pairing, which is a nondegenerate form. Thus, for each $\eta$, we must choose $\eta'\in\eta\perp$, a hyperplane in $\mathcal{J}(C)[2]$. However, as they are linearly independent, we asset that $\eta'\notin\{0,\eta\}$. Thus, we have $2^{2g}-1$ choices for $\eta$, and given one of those, we have $2^{2g-1}-2$ choices of $\eta'$ satisfying these conditions, which computes the degree of $\ZZM{g}^0\to\mathcal{M}_g$.
To determine the degree of $\ZZM{g}^1\to \mathcal{M}_g$, we note that we can again choose $\eta$ freely, and now $\eta'\in \mathcal{J}(C)[2]\setminus \eta^\perp$, giving us the degree claimed. \end{proof}
\section{Geometry of the boundary}
\begin{proposition} Let $Z$ be a stable curve with dual graph $\Gamma_Z$. The fiber over $Z$ in the $\overline{\ZZM{g}}$ consists of the following objects:
\begin{enumerate}
\item $(2^{2g^\nu+b_1(\Gamma_Z)}-1)(2^{2g^\nu+b_1(\Gamma_Z)}-2)$ objects of multiplicity 1 where both Prym curves are supported on $Z$.
\item If $X$ is quasistable with stabilization $Z$ and $\Delta_X^c$ is Eulerian, then we get $2^{4g^\nu+b_1(\Delta_X)+b_1(\Gamma_Z)}-2^{2g^\nu+b_1(\Delta_X)}$ objects of multiplicity $2^{b_1(\Gamma_Z)-b_1(\Delta_X)}$ supported on each of $(X,Z)$ and $(Z,X)$.
\item If $X$ is quasistable with stabilization $Z$ and $\Delta_X^c$ is Eulerian, then we get two types of objects supported on $(X,X)$:
\begin{enumerate}
\item $2^{4g^\nu+2b_1(\Delta_X)}-2^{2g^\nu+b_1(\Delta_X)}$ objects of multiplicity $2^{2b_1(\Gamma_Z)-2b_1(\Delta_X)}$ with two distinct Prym structures
\item $2^{2g^\nu+b_1(\Delta_X)}$ objects of multiplicity $2^{2b_1(\Gamma_Z)-2b_1(\Delta_X)}-2^{b_1(\Gamma_Z)-b_1(\Delta_X)}$ with the same Prym structure.
\end{enumerate}
\item If $X_1$, $X_2$ are quasistable over $Z$ and $\Delta_{X_1}^c$ and $\Delta_{X_2}^c$ are Eulerian, then there exist $2^{4g^\nu+b_1(\Delta_X)+b_1(\Delta_X)}$ objects of multiplicity $2^{2b_1(\Gamma_Z)-b_1(\Delta_{X_1})-b_1(\Delta_{X_2})}$ on $(X_1,X_2)$ and again on $(X_2,X_1)$. \end{enumerate} \end{proposition}
\begin{proof} The fiber is a subscheme of $R_Z\times R_Z$ which is the complement of $M_g\cup R_g\cup R_g\cup R_g$, components which split off completely over smooth base curves. Away from the diagonal, we simply remove anything where one of the components is the trivial line bundle. On the diagonal, the situation is somewhat more intricate. We look at $\overline{\ZM{g}}\times_{\overline{\mathcal{M}_g}}\overline{\ZM{g}}\setminus(\overline{\mathcal{M}_g}\cup\overline{\ZM{g}}\cup\overline{\ZM{g}}\cup\overline{\ZM{g}})$ and then take the closure. This leaves us with some points on the diagonal, which we can see because on any degeneration, there will be classes of Prym curves that will degenerate to the same thing, which is seen by noting the multiplicity greater than 1.
Additionally, a straightforward computation summing these over all of the Euler paths gives us total degree $(2^{2g^\nu+2b_1(\Gamma_Z)}-1)(2^{2g^\nu+2b_1(\Gamma_Z)}-2)$, which is the degree of the moduli space, showing that nothing has been missed. \end{proof}
Now, applying the above to the general point on the boundary, we see that for a reducible 1-nodal curve, all Prym curves on it are supported on the stable curve itself, yielding $(2^{2g}-1)(2^{2g}-2)$ objects of multiplicity 1. The case of an irreducible curve is a bit more complex:
\begin{corollary} \label{deltairrlemma}
Let $Z$ be a 1-nodal irreducible stable curve of genus $g$ and $\nu:Z^\nu\to Z$ its normalization. There is only one unstable quasistable curve, $X=Z^\nu\cup_{x,y}\mathbb{P}^1$, where $x,y$ are the preimages of the node in $Z^\nu$. Then the fiber of $\overline{\ZZM{g}}\to \overline{\mathcal{M}}_g$ over $Z$ consists of the following objects:
\begin{enumerate}
\item On $(Z,Z)$, we have $(2^{2g-1}-1)(2^{2g-1}-2)$ objects of multiplicity 1.
\item On each $(Z,X)$ and $(X,Z)$, we have a total of $2^{4g-2}-2^{2g-1}$ objects of multiplicity 2
\item On $(X,X)$ we have two types of objects: \begin{enumerate}
\item $2^{4g-4}-2^{2g-2}$ objects of multiplicity 4 with non-isomorphic projections to $\overline{\ZM{g}}$.
\item $2^{2g-2}$ objects of multiplicity 2 with the same projections to $\overline{\ZM{g}}$. \end{enumerate} \end{enumerate} \end{corollary}
Now, we must compute the list of boundary divisors. We begin by looking at the boundary of $\overline{\ZZM{g}}\times_{\overline{\mathcal{M}_g}}\overline{\ZZM{g}}$. Points on the boundary can be classified into products $\Delta_a\times \Delta_b$ where $a,b$ were in $\{I,II,III\}$ over an irreducible 1-nodal curve and $\{i,g-i,i:g-i\}$ over a reducible curve with components of genus $i$ and $g-i$. We denote the restrictions of these loci to $\overline{\ZZM{g}}$ by $\Delta_{a,b}$, and note that although some are, many of these are not irreducible.
The components with nonirreducible restrictions to $\overline{\ZZM{g}}^i$ are $\Delta_{II,II}$, $\Delta_{III,III}$, $\Delta_{i,i:g-i}$, $\Delta_{i:g-i,i}$, $\Delta_{g-i,i:g-i}$, $\Delta_{i:g-i,g-i}$, and $\Delta_{i:g-i,i:g-i}$. Specifically, they break up as \begin{eqnarray*} \Delta_{II,II} &=& \Delta_{II,II}^{\pm}+\Delta_{II,II}'\\ \Delta_{III,III} &=& \Delta_{III,III}^{diag}+\Delta_{III,III}'\\ \Delta_{i,i:g-i} &=& \Delta_{i,i:g-i}^i+\Delta_{i,i:g-i}'\\ \Delta_{i:g-i,i} &=& \Delta_{i:g-i,i}^i+\Delta_{i:g-i,i}'\\ \Delta_{g-i,i:g-i} &=& \Delta_{g-i,i:g-i}^{g-i}+\Delta_{g-i,i:g-i}'\\ \Delta_{i:g-i,g-i} &=& \Delta_{i:g-i,g-i}^{g-i}+\Delta_{i:g-i,g-i}'\\ \Delta_{i:g-i,i:g-i} &=& \Delta_{i:g-i,i:g-i}^i+\Delta_{i:g-i,i:g-i}^{g-i}+\Delta_{i:g-i,i:g-i}' \end{eqnarray*} where (with equality meaning equal to the closure of) \begin{eqnarray*}
\Delta_{II,II}^{\pm} &=& \{(\eta_1,\eta_2)|\nu^*\eta_1\cong\nu^*\eta_2\}\\
\Delta_{II,II}' &=& \{(\eta_1,\eta_2)|\nu^*\eta_1\not\cong\nu^*\eta_2\}\\
\Delta_{III,III}^{diag} &=& \{(\eta_1,\eta_2)|\eta_1\cong \eta_2\}\\
\Delta_{III,III}' &=& \{(\eta_1,\eta_2)|\eta_1\not\cong\eta_2\}\\
\Delta_{i,i:g-i}^i &=& \{(\eta_1,\eta_2)|\eta_1|_{C_1}\cong \eta_2|_{C_1}\}\\
\Delta_{i,i:g-i}' &=& \{(\eta_1,\eta_2)|\eta_1|_{C_1}\not\cong \eta_2|_{C_1}\}\\
\Delta_{i:g-i,i}^i &=& \{(\eta_1,\eta_2)|\eta_1|_{C_1}\cong \eta_2|_{C_1}\}\\
\Delta_{i:g-i,i}' &=& \{(\eta_1,\eta_2)|\eta_1|_{C_1}\not\cong \eta_2|_{C_1}\}\\
\Delta_{g-i,i:g-i}^{g-i} &=& \{(\eta_1,\eta_2)|\eta_1|_{C_2}\cong \eta_2|_{C_2}\}\\
\Delta_{g-i,i:g-i}' &=& \{(\eta_1,\eta_2)|\eta_1|_{C_2}\not\cong \eta_2|_{C_2}\}\\
\Delta_{i:g-i,g-i}^{g-i} &=& \{(\eta_1,\eta_2)|\eta_1|_{C_2}\cong \eta_2|_{C_2}\}\\
\Delta_{i:g-i,g-i}' &=& \{(\eta_1,\eta_2)|\eta_1|_{C_2}\not\cong \eta_2|_{C_2}\}\\
\Delta_{i:g-i,i:g-i}^i &=& \{(\eta_1,\eta_2)|\eta_1|_{C_1}\cong \eta_2|_{C_1}\}\\
\Delta_{i:g-i,i:g-i}^{g-i}&=& \{(\eta_1,\eta_2)|\eta_1|_{C_2}\cong \eta_2|_{C_2}\}\\
\Delta_{i:g-i,i:g-i}' &=& \{(\eta_1,\eta_2)|\eta_1|_{C_i}\not\cong \eta_2|_{C_i}\mbox{ for }i=1,2\} \end{eqnarray*}
Now that we have a list of all of the components, we compute which objects are in which, a straightforward computation:
\begin{proposition} \label{prop:1nidegrees} Over the locus of 1-nodal irreducible curves in $\overline{\mathcal{M}_g}$ of the boundary components of $\overline{\ZZM{g}}$ consist of
\begin{eqnarray*} \Delta_{I,II} &=& 2^{2g-1}-2\mbox{ objects of multiplicity }1\\ \Delta_{II,I} &=& 2^{2g-1}-2\mbox{ objects of multiplicity }1\\ \Delta_{I,III} &=& 2^{2g-2}\mbox{ objects of multiplicity }2\\ \Delta_{III,I} &=& 2^{2g-2}\mbox{ objects of multiplicity }2\\ \Delta_{II,III} &=& 2^{2g-2}(2^{2g-1}-2)\mbox{ objects of multiplicity }2\\ \Delta_{III,II} &=& 2^{2g-2}(2^{2g-1}-2)\mbox{ objects of multiplicity }2\\ \Delta_{II,II}^{\pm} &=& 2^{2g-1}-2\mbox{ objects of multiplicity }1\\ \Delta_{II,II}' &=& (2^{2g-1}-2)(2^{2g-1}-4)\mbox{ objects of multiplicity }1\\ \Delta_{III,III}^{diag} &=& 2^{2g-2}\mbox{ objects of multiplicity }2\\ \Delta_{III,III}' &=& 2^{4g-4}-2^{2g-2}\mbox{ objects of multiplicity }4 \end{eqnarray*} \end{proposition}
\begin{remark} \label{Intersection} It is interesting to note that away from $\Delta_{III,III}'$, the Weil pairing is well-defined by continuity. However, on this divisor, this fails. Let us look at a simple example. Let $C$ be a smooth genus 2 curve with $p_1,\ldots,p_6$ the fixed points of the hyperelliptic involution. Then any point of order two is $p_i-p_j$ for $i\neq j$, and $p_i-p_j\equiv p_j-p_i$. If the vanishing cycle of the degeneration is $p_1-p_2$, then the pairs $(p_1-p_3,p_1-p_4)$ and $(p_1-p_3,p_2-p_4)$ both degenerate to the same point of $\Delta_{III,III}'$. We note that, for a genus 2 curve, the Weil pairing can be described as the cardinality of the intersection of the set of indices appearing in this representation. Thus, $\langle p_1-p_3,p_1-p_4\rangle=1$ and $\langle p_1-p_3,p_2-p_4\rangle=0$. Thus, the two components of this moduli space intersect on the boundary! \end{remark}
In fact, we can say a bit more about the intersection:
\begin{theorem} \label{thm:intersection} The intersection $\Delta_{III,III}'=\overline{\ZZM{g}}^0\cap\overline{\ZZM{g}}^1$ is transverse, in the sense that if $(C,\eta_1,\eta_2)\in\Delta_{III,III}'$, then we have \[T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}}\cong T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}}^0\oplus_{T_{(C,\eta_1,\eta_2)}\Delta_{III,III}'}T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}}^1.\] \end{theorem}
\begin{proof} To see that this is precisely the intersection, we look at a degeneration of Prym curves with vanishing cycle $\delta$. The fiber over the nodal curve only has points coming together over $\Delta_{III}$, which is the part of the fiber over the quasi-stable curve. There, every Prym curve structure is the limit of both $\eta$ and $\eta+\delta$ for $\eta$ some Prym curve structure on a smooth curve in the degeneration. The Weil pairing is well-defined on all components over $\Delta_{III}$ other than $\Delta_{III,III}'$, by linearity and the definitions of $\Delta_I$ and $\Delta_{II}$. However, on $\Delta_{III,III}$, we have $(\eta_1,\eta_2)$ two Prym structures giving a $\mathbb{Z}_2^2$ curve. This limit as $\delta$ vanishes, is the same as the limit of $(\eta_1,\eta_2+\delta)$, and also two other loci. However, the Weil pairing is linear, so $\langle\eta_1,\eta_2+\delta\rangle=\langle\eta_1,\eta_2\rangle+\langle\eta_1,\delta\rangle$, and because it is in $\Delta_{III}$, $\langle \eta_1,\delta\rangle=1$, so this point is a limit of families of Veil pairing both 0 and 1, and this holds for every point in $\Delta_{III,III}'$.
Transversality follows by looking at first order deformations of $(C,\eta_1,\eta_2)$. The Weil pairing determines which component $(C,\eta_1,\eta_2)$ is on, except along $\Delta_{III,III}'$ where it is indeterminate. So we describe all of the space:
\begin{eqnarray*} T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}} &=& \mbox{First order deformations with }\langle\eta_1,\eta_2\rangle\mbox{ undefined, } 0, \mbox{ or }1, \\
T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}}^0 &=& \mbox{First order deformations with }\langle\eta_1,\eta_2\rangle\mbox{ undefined, or } 0, \\ {T_{(C,\eta_1,\eta_2)}\Delta_{III,III}'} &=& \mbox{First order deformations with }\langle\eta_1,\eta_2\rangle\mbox{ undefined,} \\ T_{(C,\eta_1,\eta_2)}\overline{\ZZM{g}}^1 &=& \mbox{First order deformations with }\langle\eta_1,\eta_2\rangle\mbox{ undefined, or } , \end{eqnarray*}
From these descriptions, transversality follows immediately. \end{proof}
A similar computation to Proposition \ref{prop:1nidegrees} over reducible 1-nodal curves gives:
\begin{proposition} \label{prop:1nrdegrees} Over the locus of 1-nodal reducible curves that are a union of a genus $i$ and a genus $g-i$ curve in $\overline{\mathcal{M}_g}$, the boundary components of $\overline{\ZZM{g}}$ are of the following degrees (and all objects are multiplicity 1): \begin{eqnarray*} \Delta_{i,i} &=& (2^{2i}-1)(2^{2i}-2)\\ \Delta_{g-i,g-i} &=& (2^{2(g-i)}-1)(2^{2(g-i)}-2)\\ \Delta_{i,g-i} &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{g-i,i} &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i,i:g-i}^i &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i,i:g-i}' &=& (2^{2i}-2)(2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,i}^i &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,i}' &=& (2^{2i}-2)(2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{g-i,i:g-i}^{g-i} &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{g-i,i:g-i}' &=& (2^{2(g-i)}-2)(2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,g-i}^{g-i} &=& (2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,g-i}' &=& (2^{2(g-i)}-2)(2^{2i}-1)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,i:g-i}^i &=& (2^{2i}-1)(2^{2(g-i)}-1)(2^{2(g-i)}-2)\\ \Delta_{i:g-i,i:g-i}^{g-i}&=& (2^{2i}-1)(2^{2i}-2)(2^{2(g-i)}-1)\\ \Delta_{i:g-i,i:g-i}' &=& (2^{2i}-1)(2^{2(g-i)}-1)\\&&\times((2^{2i}-1)(2^{2(g-i)}-1)-(2^{2i}-1)-(2^{2(g-i)}-1)) \end{eqnarray*} \end{proposition}
\section{The moduli of Klein curves}
In this section, we extend the results of the previous section to a compactification of $\KM{g}=\ZZM{g}/\PSL_2(\mathbb{F}_2)$, that is, the moduli space where we do not choose a basis of the Deck transformations. We will do so by extending the group action to the compactification $\overline{\ZZM{g}}$, and defining the quotient to be $\overline{\KM{g}}$.
\begin{proposition} The group action $\PSL_2(\mathbb{F}_2)\times \ZZM{g}\to\ZZM{g}$ extends to each component of $\overline{\ZZM{g}}$. \end{proposition}
\begin{proof} Let $D_1=\Delta_{I,III}\cup\Delta_{III,I}\cup\Delta_{III,III}^{\diag}$ and $D_2=\Delta_{III,III}'\cup \Delta_{II,III}\cup\Delta_{III,II}$. Then the extension is actually straightforward over $\overline{\ZZM{g}}\setminus (D_1\cup D_2)$, as over this locus, the fibers are reduced, and the action is just by change of basis on a Klein 4 group. Only in the cases where multiplicities are no longer 1, namely $D_1$ and $D_2$, will these fail to just be Klein four groups.
Now, we take the orbit of a $\mathbb{Z}_2^2$-curve in the locus where we have the group action, and degenerate it to $D_1$. Then, the six objects of multiplicity 1 of this fiber degenerate to $(\eta,\eta)\in\Delta_{III,III}^{\diag}$, $(\eta,\mathscr{O}^-_X)\in \Delta_{III,I}$ and $(\mathscr{O}^-_X,\eta)\in\Delta_{I,III}$, where $\mathscr{O}^-_X$ is the Prym curve structure on $X$ lying in $\Delta_I\subset\overline{\ZM{g}}$. Each of these appears with multiplicity 2. Here, the group action can be seen most clearly by noting that $\PSL_2(\mathbb{F}_2)\cong S_3$ (and in fact, the change of basis on a Klein four group is just permuting the three nonzero elements) and seeing the action as being that of $S_3$ on the ordered set $(\mathscr{O}_X^-,\eta,\eta)$ followed by forgetting the last element. Deeper degeneration into the strata $D_1\setminus D_2$ can be handled in the same way, leaving only $D_2$ remaining.
To extend the action to $D_2$, we must first restrict to the individual irreducible components of $\overline{\ZZM{g}}$. This is because $\Delta_{III,III}'$ is the intersection of the two components, by Theorem \ref{thm:intersection}. So, by transversality the fiber multiplicity of elements in the intersection must be split evenly between the components.
Now, let $(\epsilon_1,\epsilon_2)\in\Delta_{III,III}'$. Then $\epsilon_2\otimes\epsilon^{-1}_1$ gives a Prym structure on the closure of the complement of the exceptional components. There are two different Prym structures on the stable curve that pull back to this under the stabilization map, but one of them lies on each componenet. We will denote by $\eta^i$ the Prym structure such that $(\eta^i,\epsilon_1),(\eta^i,\epsilon_2)\in\overline{\ZZM{g}}^i$. Then, the $\PSL_2(\mathbb{F}_2)\cong S_3$ action is given by permutation of $\epsilon_1,\epsilon_2,\eta^i$. \end{proof}
This extension allows us to take the quotient, which constructs from the moduli of pairs of Prym structures, which is the same as the moduli of Klein four groups of Prym structures, with the moduli of Klein four groups of Prym covers without a basis.
\begin{definition}[Moduli of Klein four covers] We define the space $\overline{\KM{g}}$ to be the quotient of $\overline{\ZZM{g}}$ by the relation described above, and we call it the \emph{moduli of Klein four covers of genus $g$ curves}. \end{definition}
Given an orbit $\{(C,\eta_1^i,\eta_2^i)\}$ where $i$ runs over the elements of the orbit, we will denote by $(C,\{\eta^i_j\}_{i,j})$ the corresponding point of $\overline{\KM{g}}$, with $i$ running over the orbit and $j=1,2$.
The boundary in the Klein moduli space simplifies significantly. Because the action of $\PSL_2(\mathbb{F}_2)$ exchanges some boundary components, we group them together and give names to their images (fixing the Weil pairing as either $0$ or $1$ in each case) in the following:
\begin{eqnarray*}
\Delta_{I,II}\cup \Delta_{II,I}\cup\Delta_{II,II}^\pm&\to&\Delta_{I,II,II}\\
\Delta_{I,III}\cup\Delta_{III,I}\cup\Delta_{III,III}^{diag}&\to&\Delta_{I,III,III}\\
\Delta_{II,III}\cup\Delta_{III,II}\cup\Delta_{III,III}'&\to&\Delta_{II,III,III}\\
\Delta_{II}'&\to&\Delta_{II,II,II}\\
\Delta_{i,g-i}\cup\Delta_{g-i,i}\cup\Delta_{i,i:g-i}^i\cup\Delta_{i:g-i,i}^i\cup\Delta_{g-i,i:g-i}^{g-i}\cup\Delta_{i:g-i,g-i}^{g-i}&\to&\Delta_{i,g-i,i:g-i}\\
\Delta_{i,i}&\to&\Delta_{i,i,i}\\
\Delta_{g-i,g-i}&\to&\Delta_{g-i,g-i,g-i}\\
\Delta_{i,i:g-i}'\cup \Delta_{i:g-i,i}' \cup\Delta_{i:g-i,i:g-i}^i&\to&\Delta_{i,i:g-i,i:g-i}\\
\Delta_{g-i,i:g-i}'\cup\Delta_{i:g-i,g-i}'\cup\Delta_{i:g-i,i:g-i}^{g-i}&\to&\Delta_{g-i,i:g-i,i:g-i}\\
\Delta_{i:g-i,i:g-i}'&\to&\Delta_{i:g-i,i:g-i,i:g-i} \end{eqnarray*}
Between the degrees computed in the previous section and the maps above all being $\PSL_2(\mathbb{F}_2)$ quotients, we find the following structure on the boundary
\begin{eqnarray*}
\Delta_{I,II,II} &=& 2^{2g-2}-1\mbox{ objects of multiplicity }1\\
\Delta_{I,III,III} &=& 2^{2g-2}\mbox{ objects of multiplicity }1\\
\Delta_{II,III,III} &=& 2^{2g-2}(2^{2g-2}-1)\mbox{ objects of multiplicity }2\\
\Delta_{II,II,II} &=& \frac{(2^{2g-1}-2)(2^{2g-1}-4)}{6}\mbox{ objects of multiplicity }1\\
\Delta_{i,g-i,i:g-i} &=& (2^{2i}-1)(2^{2(g-i)}-1)\mbox{ objects of multiplicity }1\\
\Delta_{i,i,i} &=& \frac{(2^{2i}-1)(2^{2i}-2)}{6}\mbox{ objects of multiplicity }1\\
\Delta_{g-i,g-i,g-i} &=& \frac{(2^{2(g-i)}-1)(2^{2(g-i)}-2)}{6}\mbox{ objects of multiplicity }1\\
\Delta_{i,i:g-i,i:g-i} &=& \frac{(2^{2i}-1)(2^{2i}-2)(2^{2(g-i)}-1)}{2}\mbox{ objects of multiplicity }1\\
\Delta_{g-i,i:g-i,i:g-i} &=& \frac{(2^{2(g-i)}-1)(2^{2(g-i)}-2)(2^{2i}-1)}{2}\mbox{ objects of multiplicity }1\\
\Delta_{i:g-i,i:g-i,i:g-i} &=& \frac{(2^{2i}-1)(2^{2(g-i)}-1)(2^{2i}-2)(2^{2(g-i)}-2)}{6}\mbox{ objects of}\\ && \mbox{multiplicity }1 \end{eqnarray*}
This gives us
\begin{proposition} The morphsim $\overline{\KM{g}}\to\mathcal{M}_g$ has degree $\frac{(2^{2g}-1)(2^{2g}-2)}{6}$ and is simply ramified along $\Delta_{II,III,III}$. \end{proposition}
Thus, we have
\begin{corollary} The canonical divisor of $\overline{\KM{g}}$ is \begin{eqnarray*} K_{\overline{\KM{g}}}&=&13\lambda-2\Delta_{I,II,II}-2\Delta_{I,III,III}-2\Delta_{II,II,II}-3\Delta_{II,III,III}\\ &&-\Delta_{1,g-1,1:g-1}-\Delta_{1,1,1}-\Delta_{g-1,g-1,g-1}\\ &&-\Delta_{1,1:g-1,1:g-1}-\Delta_{g-1,1:g-1,1:g-1}-\Delta_{1:g-1,1:g-1,1:g-1}\\ &&-2\sum_{i=1}^{\lfloor g/2\rfloor}(\Delta_{i,g-i,i:g-i}+\Delta_{i,i,i}+\Delta_{g-i,g-i,g-i}+\Delta_{i,i:g-i,i:g-i}\\ &&+\Delta_{g-i,i:g-i,i:g-i}+\Delta_{i:g-i,i:g-i,i:g-i}). \end{eqnarray*} \end{corollary}
\begin{proof} We use the Hurwitz formula, which tells us that $K_{\overline{\KM{g}}}=\pi^*K_{\overline{\mathcal{M}_g}}+\Delta_{II,III,III}$. The canonical divisor of $\overline{\mathcal{M}_g}$ is $13\lambda-2\delta_0-3\delta_1-2\delta_2-\ldots -2\delta_{\lfloor g/2\rfloor}$\cite{MR664324}.
We note that $\pi^*(\Delta_i)=\Delta_{i,g-i,i:g-i}+\Delta_{i,i,i}+\Delta_{g-i,g-i,g-i}+\Delta_{i,i:g-i,i:g-i}+\Delta_{g-i,i:g-i,i:g-i}+\Delta_{i:g-i,i:g-i,i:g-i}$ and $\pi^*\Delta_0=\Delta_{I,II,II}+\Delta_{I,III,III}+\Delta_{II,II,II}+2\Delta_{II,III,III}$ and $\pi^*\lambda=\lambda$. \end{proof}
Now we'll work out some numerics of the odd and even components of $\overline{\KM{g}}$
\begin{proposition} The degrees of the natural projection maps to $\overline{\mathcal{M}_g}$ are \begin{itemize}
\item for $\overline{\KM{g}}$, $\frac{(2^{2g}-1)(2^{2g}-2)}{6}$.
\item for $\overline{\KM{g}}^0$, $\frac{(2^{2g}-1)(2^{2g-1}-2)}{6}$.
\item for $\overline{\KM{g}}^1$, $\frac{(2^{2g}-1)2^{2g-1}}{6}$. \end{itemize} \end{proposition}
This follows directly from Proposition \ref{DegZZM01} and Lemma \ref{DegZZM}.
As a final computation involving the degrees, we compute the content of the boundary divisors when restricted to $\overline{\KM{g}}^0$ and $\overline{\KM{g}}^1$.
\begin{theorem} The fiber over the generic element $C$ of the boundary of $\overline{\mathcal{M}}_g$ in $\overline{\KM{g}}$ is:
\begin{enumerate}
\item if $C$ is irreducible 1-nodal, the fiber in $\overline{\KM{g}}^0$ is
\begin{enumerate}
\item $2^{2g-2}-1$ elements of $\Delta_{I,II,II}^0$ with multiplicity $1$,
\item $\frac{(2^{2g-1}-2)(2^{2g-2}-4)}{6}$ elements of $\Delta_{II,II,II}^0$ with multiplicity $1$,
\item ${(2^{2g-1}-2)2^{2g-4}}$ elements of $\Delta_{II,III,III}^0$ with multiplicity $2$,
\end{enumerate}
\item if $C$ is irreducible 1-nodal, the fiber in $\overline{\KM{g}}^1$ is
\begin{enumerate}
\item $\frac{(2^{2g-1}-2)2^{2g-2}}{6}$ elements of $\Delta_{II,II,II}^1$ with multiplicity $1$,
\item ${(2^{2g-1}-2)2^{2g-4}}$ elements of $\Delta_{II,III,III}^1$ with multiplicity $2$,
\item ${2^{2g-2}}$ elements of $\Delta_{I,III,III}^1$ with multiplicity $1$,
\end{enumerate}
\item if $C$ is reducible with components of genus $i$ and $g-i$, the fiber in $\overline{\KM{g}}^0$ is
\begin{enumerate}
\item $(2^{2i}-1)(2^{2(g-i)}-1)$ elements of $\Delta_{i,g-i,i:g-i}^0$ with multiplicity $1$,
\item $\frac{(2^{2i}-1)(2^{2i-1}-2)}{6}$ elements of $\Delta_{i,i,i}^0$ with multiplicity $1$,
\item $\frac{(2^{2(g-i)}-1)(2^{2(g-i)-1}-2)}{6}$ elements of $\Delta_{g-i,g-i,g-i}^0$ with multiplicity $1$,
\item ${(2^{2i-1}-1)(2^{2i-2}-1)(2^{2(g-i)}-1)}$ elements of $\Delta_{i,i:g-i,i:g-i}^0$ with multiplicity $1$,
\item ${(2^{2(g-i)-1}-1)(2^{2(g-i)-2}-1)(2^{2i}-1)}$ elements of $\Delta_{g-i,i:g-i,i:g-i}^0$ with multiplicity $1$,
\item $\frac{(2^{2i}-1)(2^{2(g-i)}-1)((2^{2i-1}-2)(2^{2(g-i)-1}-2)+(2^{2i-1})(2^{2(g-i)-1}))}{6}$ elements of\\ $\Delta_{i:g-i,i:g-i,i:g-i}^0$ with multiplicity $1$,
\end{enumerate}
\item if $C$ is reducible with components of genus $i$ and $g-i$, the fiber in $\overline{\KM{g}}^1$ is
\begin{enumerate}
\item $\frac{(2^{2i}-1)2^{2i-1}}{6}$ elements of $\Delta_{i,i,i}^1$ with multiplicity $1$,
\item $\frac{(2^{2(g-i)}-1)2^{2(g-i)-1}}{6}$ elements of $\Delta_{g-i,g-i,g-i}^1$ with multiplicity $1$,
\item ${(2^{2i-1}-1)(2^{2i-1})(2^{2(g-i)}-2)}$ elements of $\Delta_{i,i:g-i,i:g-i}^1$ with multiplicity $1$,
\item ${(2^{2(g-i)-1}-1)(2^{2(g-i)-2})(2^{2i}-1)}$ elements of $\Delta_{g-i,i:g-i,i:g-i}^1$ with\\ multiplicity $1$,
\item $\frac{(2^{2i}-1)(2^{2(g-i)}-1)((2^{2i-1}-2)(2^{2i-1})+(2^{2(g-i)-1})(2^{2(g-i)-1}-2))}{6}$ elements of\\ $\Delta_{i:g-i,i:g-i,i:g-i}^1$ with multiplicity $1$.
\end{enumerate} \end{enumerate} \end{theorem}
\begin{proof} We will work out parts 1 and 3, parts 2 and 4 being analagous.
With the exception of $\Delta_{II,III,III}$, we can compute the Weil pairing by choosing any pair of elements in the group. For $\Delta_{II,III,III}$, we note that it must be divided evenly between the components. The groups consist of two elements from $\Delta_{III}$ and the one element of $\Delta_{II}$, and can be chosen to either be glued by $+1$ or $-1$ at the node. These will correspond to Weil pairing $0$ and $1$, thus dividing $\Delta_{II,III,III}$ evenly.
For $\Delta_{I,II,II}$, by definition, we must have $\Delta_{I,II,II}^0=\Delta_{I,II,II}$. Similarly, we can see that $\Delta_{I,III,III}^0=\emptyset$. We can finish by computing that $\Delta_{II,II,II}^0$ must be the correct size to, with the other components, add up to $\frac{(2^{2g}-1)(2^{2g-1}-2)}{6}$. However, we can compute this directly by choosing $\mu_1,\mu_2\in\Delta_{II}$ distinct and orthogonal. Then, if $\delta$ is the vanishing cycle, we have $\mu_1\in\delta^\perp\setminus(\delta)$ and $\mu_2\in (\delta,\mu_1)^\perp\setminus(\delta,\mu_1)$, which gives the appropriate number.
Over a reducible curve $C=C_i\cup C_{g-i}$, although the expressions are more complex, the situation is simpler. We begin by noting that everything in $\Delta_{i,g-i,i:g-i}$ must be in $\Delta_{i,g-i,i:g-i}^0$, because the generators have no common support curve. As for $\Delta_{i,i,i}$ and $\Delta_{g-i,g-i,g-i}$, they will be precisely the fibers of the lower genus maps $\KM{i}^0\to\mathcal{M}_i$ and $\KM{g-i}^0\to \mathcal{M}_{g-i}$. On $\Delta_{i,i:g-i,i:g-i}$, we can have any nonzero square trivial line bundle on the component $C_i$, and the second generator can have any restriction to $C_{g-i}$, but the restriction to $C_i$ must be orthogonal to the first, and here, we only divide by two choices in $\Delta_{i:g-i}$ that can be basis elements. The next component, $\Delta_{g-i,i:g-i,i:g-i}$, can be computed in a similar way. The final component, $\Delta_{i:g-i,i:g-i,i:g-i}$ starts with an arbitary element of $\Delta_{i:g-i}$, and the second must either have both restrictions orthogonal to those of the first, or else both nonorthogonal, and then we divide by 6 from choices of basis, completing the computation. \end{proof}
\section{Pluricanonical forms}
In this section, we show that pluricanonical forms on $\overline{\KM{g}}$ extend to any smooth model, allowing us to compute the Kodaira dimension on $\overline{\KM{g}}$ itself, rather than having to work on the set of smooth models. As such, the goal of this section is to prove
\begin{theorem} \label{extensiontheorem} Fix $g\geq 4$ and $i\in\{0,1\}$, and let $\widehat{\ZZM{g}}^i\to\overline{\ZZM{g}}^i$ be any resolution of the singularities. Then every pluricanonical form defined on $\overline{\ZZM{g}}^{i,reg}$, the smooth locus, extends holomorphically to $\widehat{\ZZM{g}}^i$. Specifically, for all integers $\ell\geq 0$, we have isomorphisms \[H^0(\overline{\ZZM{g}}^{i,reg},K_{\overline{\ZZM{g}}^{i,reg}}^{\otimes\ell})\cong H^0(\widehat{\ZZM{g}}^i,K_{\widehat{\ZZM{g}}^i}^{\otimes\ell})\] \end{theorem}
Analogues of this theorem are known for all of the relevant related moduli spaces: $\overline{\mathcal{M}_g}$ is proved in \cite[Theorem 1]{MR664324}, $\overline{\mathcal{R}_g}$ is proved in \cite[Theorem 6.1]{MR2639318}, and the moduli of spin curves in \cite[Theorem 4.1]{MR2551759}. Our proof will very closely follow the one in \cite{MR2639318} for $\overline{\mathcal{R}_g}$, which can be expected as $\overline{\ZZM{g}}$ is two of the components of $\overline{\mathcal{R}_g}\times_{\overline{\mathcal{M}}_g}\overline{\mathcal{R}_g}$.
Before we can proceed, we need to make a few remarks about the versal deformations of an object $X=(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)\in\overline{\ZZM{g}}$. Let $\mathbb{C}_t^{3g-3}$ be the versal deformation space of $Z$, the stabilization of $X_i$ and $\mathbb{C}_\alpha^{3g-3}$ the versal deformation space of $X$. There are compatible decompositions \begin{eqnarray*} \mathbb{C}_\alpha^{3g-3}&\cong&\bigoplus_{p_i\in\Delta_{X_1}^c\cap\Delta_{X_2}}\mathbb{C}_{\tau_i}\oplus\bigoplus_{p_i\in\Delta_{X_2}^c\cap\Delta_{X_1}}\mathbb{C}_{\tau_i}\oplus\bigoplus_{p_i\in\Delta_{X_1}^c\cap\Delta_{X_2}^c}\mathbb{C}_{\tau_i}\\ &&\oplus\bigoplus_{p_i\in\Delta_{X_1}\cap\Delta_{X_2}}\mathbb{C}_{\tau_i}\oplus\bigoplus_{C_j\subset C}H^1(C_j^\nu,T_{C_j^\nu}(-D_j))\\ \mathbb{C}_t^{3g-3}&\cong&\bigoplus_{p_i\in\Sing(C)} \mathbb{C}_{t_i}\oplus\bigoplus_{C_j\subset C}H^1(C_j^\nu,T_{C_j^\nu}(-D_j)) \end{eqnarray*} where $D_j$ is the sum of the preimages of the nodes under the normalization map. There is a natural map from the versal deformation space of a $\mathbb{Z}_2^2$ curve to that of the underlying stable curve, given by $t_i=\alpha_i^2$ if $t_i=0$ is the locus where the exceptional node $p_i\in\Delta_{X_1}^c\cup \Delta_{X_2}^c$ persists and $t_i=\alpha_i$ otherwise. Similarly to the discussion in Section 1.2 of \cite{MR2117416}, we can blow up along all of the exceptional components and extend $\eta_1,\eta_2$ using only those in $\Delta_{X_1}^c$ and $\Delta_{X_2}^c$ respectively.
This description makes the rest of the work in Section 6 of \cite{MR2639318} relatively straightforward to generalize. Set $X_\Delta$ to be the quasi-stable curve with exceptional nodes $\Delta_{X_1}^c\cup\Delta_{X_2}^c$.
\begin{definition}[Elliptic tail] Let $X$ be a quasi-stable curve, a component $C_j$ is an elliptic tail if it has arithmetic genus 1 and intersects the rest of the curve in a single point. That point is called an elliptic tail node, and any automorphism of $X$ that is the identity away from $C_j$ is an elliptic tail automorphism. \end{definition}
\begin{proposition} Let $\sigma\in \Aut(X)$ be an automorphism in genus $g\geq 4$. Then $\sigma$ acts on $\mathbb{C}_\alpha^{3g-3}$ as a quasi-reflection if and only if $X_\Delta$ has an elliptic tail $C_j$ such that $\sigma$ is the elliptic tail involution with respect to $C_j$. \end{proposition}
The proof of this proposition follows from the proof of \cite[6.6]{MR2639318}. It implies that the smooth locus of $\overline{\ZZM{g}}$ is the locus where the automorphism group is generated by elliptic tail involutions. Now that we have determined the smooth locus, we must determine the non-canonical locus. If $G$ acts on a vector space $V$ by quasi-reflections, then $V/G\cong V$, so we let $H\subset\Aut(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)$ be generated by automorphisms acting as quasi-reflections, that is elliptic tail involutions. Then $\mathbb{C}^{3g-3}_\alpha/H\cong \mathbb{C}^{3g-3}_\nu$ where $\nu_i=\alpha_i^2$ if $p_i$ is an elliptic tail node and $\nu_i=\alpha_i$ else. On $\mathbb{C}^{3g-3}_\nu$, the automorphisms act without quasi-reflections, so the Reid--Shepherd-Barron--Tai criterion can be applied.
\begin{theorem}[Reid--Shepherd-Barron--Tai Criterion \cite{MR605348,MR763023}] Let $V$ be a vector space of dimension $d$, $G\subset \GL(V)$ a finite group and $V_0\subset V$ the open set where $G$ acts freely. Fix $g\in G$, and let $g$ be conjugate to a diagonal matrix with $\zeta^{a_i}$ for $i=1,\ldots,d$ on the diagonal for $\zeta$ a fixed $m^{th}$ root of unity and $0\leq a_i<m$. If for all $g$ and $\zeta$, we have $\frac{1}{m}\sum_{i=1}^d a_i\geq 1$, then any $n$-canonical form on $V_0/G$ extends holomorphically to a resolution $\widetilde{V/G}$. \end{theorem}
It is straightforward to check that for $g\geq 4$, we have a noncanonical singularity if $X_\Delta$ has an elliptic tail $C_j$ with $j$-invariant $0$ such that $\eta_1,\eta_2$ are both trivial on $C_j$. This goes as in \cite{MR2639318}, where the action of $\sigma$ is determined to be as the square of a sixth root of unity in two coordinates for an automorphism of order $6$ and as a cube root of unity in those two coordinates for an order $3$ element. Both of these fail the Reid--Shepherd-Barron--Tai criterion.
Now, assuming that we have a noncanonical singularity, then we have an automorphism $\sigma$ of order $n$ failing Reid--Shepherd-Barron--Tai. Our goal is to classify such things, and eventually show that only the examples above exist. Let $p_{i_0}$, $p_{i_1}=\sigma(p_{i_0})$,$\ldots$, $\sigma^{m-1}(p_{i_0})=p_{i_{m-1}}$ be distinct nodes of the stabilization, $C$, which are permuted by $\sigma$ and not elliptic tail nodes. The action on the subspace corresponding to these nodes is then given by a matrix \[\left(\begin{array}{cccc}0&c_1&&\\\vdots&&\ddots&\\0&&&c_{m-1}\\c_m&0&\ldots&0\end{array}\right)\] for some complex numbers $c_j$. We call the pair $(X,\sigma)$ \emph{singularity reduced} if $\prod_{j=1}^m c_j$ is not $1$.
By \cite{MR664324} and \cite[Proposition 3.6]{MR2551759}, we know that there is a deformation $X'$ of $X$ such that $\sigma$ deforms to $\sigma'$, an automorphism of $X'$ such that every cycle of nodes with $\prod_{j=1}^m c_j=1$ is smoothed and the action of $\sigma$ and $\sigma'$ on $\mathbb{C}_\nu^{3g-3}$ and $\mathbb{C}_{\nu'}^{3g-3}$ have the same eigenvalues. In particular, one will satisfy Reid--Shepherd-Barron--Tai if and only if the other does.
Now, we fix a pair $(X,\sigma)$ that is singularity reduced and fails the Reid--Shepherd-Barron--Tai inequality. On $C$, the stabilization, the induced automorphism $\sigma_C$ must either fix all of the nodes or else exchange a single pair of them. We look at what the action does on the components. In \cite[Proposition 6.9]{MR2639318} the proof of \cite[Proposition 3.8]{MR2551759} is adapted to the situation of $\overline{\ZM{g}}$, and this proof goes through verbatum, telling us that the action fixes each component of the stable model. Now, we recall that
\begin{theorem}[{\cite[Page 36]{MR664324}}] Assume that $(X,\sigma)$ is singularity reduced and fails the Reid--Shepherd-Barron--Tai inequality. Denote by $\varphi_j$ the induced automorphism on the normalization $C_j^\nu$ of the irreducible component $C_j$ of the stabilization $C$ of $X$. Then the pair $(C_j^\nu,\varphi_j)$ is one of the following: \begin{enumerate}
\item $C_j^\nu$ rational, and the order of $\varphi_j$ is 2 or 4,
\item $C_j^\nu$ elliptic, and the order of $\varphi_j$ is 2,4,3 or 6,
\item $C_j^\nu$ hyperelliptic of genus 2, and $\varphi_j$ is the hyperelliptic involution,
\item $C_j^\nu$ bielliptic of genus 2, and $\varphi_j$ is the associated involution,
\item $C_j^\nu$ hyperelliptic of genus 3, and $\varphi_j$ is the hyperelliptic involution, and
\item $C_j^\nu$ arbitary, and $\varphi_j$ is the identity. \end{enumerate} \end{theorem}
As pointed out in \cite[Proposition 3.10]{MR2551759}, this rules out the possibility of nodes being exchanged, so the automorphism must fix all nodes and all components on the stable curve.
\begin{proposition}[{\cite[Proposition 6.12]{MR2639318}}] In the same situation as above, set $D_j$ to be the divisor of the marked points on $C_j^\nu$ that are preimages of nodes. Then the triples $(C_j^\nu,D_j,\varphi_j)$ are one of the following types, and the contribution to the left hand side of the Reid--Shepherd-Barron--Tai inequality are at least $w_j$: \begin{enumerate}
\item $C_j^\nu$ arbitary, $\varphi_j$ is the identity, and $w_j=0$,
\item Elliptic tails: $C_j^\nu$ is elliptic, $D=p_1^+$ which is fixed by $\varphi_j$, $\varphi_j$ has order 2,3,4 or 6, and $w_j$ is, respectively, $0$, $\frac{1}{3}$, $\frac{1}{2}$ and $\frac{1}{3}$.
\item Elliptic ladder: $C_j^\nu$ is elliptic and $D=p_1^++p_2^+$, with both points fixed, the automorphism is of order $2$, $3$, or $4$ and $w_j$ is, respectively, $\frac{1}{2}$, $\frac{2}{3}$, and $\frac{3}{4}$
\item Hyperelliptic tail: $C_j^\nu$ has genus 2, $\varphi_j$ is the hyperelliptic involution, and $D_j=p_1^+$ fixed by $\varphi_j$. Then $w_j=\frac{1}{2}$. \end{enumerate} \end{proposition}
With a bit of case by case work, essentially \cite{MR2639318} Propositions 6.13, 6.14, 6.15 and 6.16, we can see that hyperelliptic tails, elliptic ladders, and elliptic tails of order 4 do not occur, and that there must, in fact, be at least one elliptic tail of order 3 or 6, giving us our restrictions on the curve. Now, we look to the line bundles. Because the automorphism must pull back the line bundle to itself on the elliptic curve, it must be trivial on the elliptic tail, and this must hold for both of the Prym line bundles. Thus, if we start with $(X,\sigma)$ failing Reid--Shepherd-Barron--Tai, then we can deform to a singularity reduced pair $(X',\sigma')$ such that the Reid--Shepherd-Barron--Tai value is constant. The pair $(X',\sigma')$ must have an elliptic tail with $j$ invariant $0$, the automorphism must be of order 3 or 6, and $\eta_1,\eta_2$ must both be trivial along it. Thus:
\begin{proposition}
Fix $g\geq 4$. A point $(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)\in\overline{\ZZM{g}}$ is a non-canonical singularity if and only if $X_\Delta$ has an elliptic tail $C_j$ with $j$-invariant $0$ and $\eta_1|_{C_j}\cong \eta_2|_{C_j}\cong \mathscr{O}_{C_j}$. \end{proposition}
\begin{proof}[Proof of Theorem \ref{extensiontheorem}]
Let $\omega$ be a pluricanonical form on $\overline{\ZZM{g}}^{i,reg}$. We want to show that it lifts to a desingularization of some neighborhood of any point $(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)\in \overline{\ZZM{g}}^i$. Because this can be done for canonical singularities, we assume that $(X_1,X_2,\eta_1,\eta_2,\beta_1,\beta_2)$ is a general non-canonical singularity, and thus $X_\Delta=C_1\cup_p C_2$ where $(C_1,p)\in \mathcal{M}_{g-1,1}$ and $(C_2,p)\in \mathcal{M}_{1,1}$ with $j(C_2)=0$. We also assume that $\eta_1|_{C_2}\cong \eta_2|_{C_2}\cong \mathscr{O}_{C_2}$ and $\eta_i|_{C_1}$ are two arbitrary line bundles on $C_1$, so that we are on a hypersurface in $\Delta_{g-1,g-1}$. We consider the pencil $\phi:\overline{\mathcal{M}_{1,1}}\to \overline{\ZZM{g}}^i$ given by $\phi(C,p)=C_1\cup_p C$ and line bundles $\eta'_i$ trivial on $C$ and isomorphic to $\eta_i|_{C_i}$ on $C_i$. As $\phi(\overline{\mathcal{M}_{1,1}})$ does not intersect the ramification locus, then just as in \cite{MR664324} pages 41-44, we can construct an open neighborhood of the pencil, $S$, such that the restriction of $\overline{\ZZM{g}}^i\to\overline{\mathcal{M}_g}$ to $S$ is an isomorphism and every pluricanonical form on the smooth locus extends to a resolution $\hat{S}$ of $S$. For the arbitrary case, with more than one node, $\omega$ will extend locally to a desingularization, just as in \cite[Theorem 4.1]{MR2551759}. \end{proof}
Then, Theorem \ref{extensiontheorem} in fact implies the same result for $\overline{\KM{g}}^i$. This is because $\overline{\ZZM{g}}^i\to\overline{\KM{g}}^i$ is a quotient by $\PSL_2(\mathbb{F}_2)$. The action is free except for along $\Delta_{I,III}\cup\Delta_{III,I}\cup\Delta_{III,III}^{\diag}$, where the stabilizer of a point is $\mathbb{Z}/2\mathbb{Z}$. Looking at the Reid--Shepherd-Barron--Tai criterion for $m=2$, we find that either the pluricanonical forms extend or we have a quasi-reflection, in which case the pluricanonical forms will also extend. So, either way, we can see that what we get are the invariants: $H^0(\overline{\KM{g}}^{i,reg},K^{\otimes\ell})\cong H^0(\overline{\ZZM{g}}^i,K^{\otimes \ell})^{\PSL_2(\mathbb{F}_2)}$, and so, because we can also do this for partial resolutions of $\overline{\ZZM{g}}^i$, we can do this for any resolution $\widehat{\KM{g}}^i$.
We conclude with a statement about the birational geometry of these moduli spaces, justified by the above
\begin{theorem} \label{maintheorem} For any $g$, $\overline{\KM{g}}^i$ has general type if there exists a single effective divisor $D\equiv a\lambda-\sum_T b_{\Delta_T}\Delta_T$ where $T$ runs over all boundary components, such that all the ratios $\frac{a}{b_T}$ are less than $\frac{13}{2}$ and the ratios $\frac{a}{b_{II,III,III}}$, $\frac{a}{b_{1,g-1,1:g-1}}$, $\frac{a}{b_{1,1,1}}$, $\frac{a}{b_{g-1,g-1,g-1}}$, $\frac{a}{b_{1,1:g-1,1:g-1}}$, $\frac{a}{b_{g-1,1:g-1,1:g-1}}$, and $\frac{a}{b_{1:g-1,1:g-1,1:g-1}}$ are less than $\frac{13}{3}$. \end{theorem}
This allows us to begin computing the classes of divisors on the Klein moduli space to determine its Kodaira dimension, and thus begin the study of the birational geometry of these spaces.
\end{document}
|
arXiv
|
{
"id": "1407.3530.tex",
"language_detection_score": 0.7281727194786072,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\draft
\twocolumn[\hsize\textwidth\columnwidth\hsize\csname@twocolumnfalse\endcsname
\title{Thermal entanglement in three-qubit Heisenberg models} \author{Xiaoguang Wang$^{1,2}$ Hongchen Fu$^{3}$ Allan I Solomon$^{3}$} \address{1. Institute of Physics and Astronomy, University of Aarhus, DK-8000, Aarhus C, Denmark.} \address{2. Institute for Scientific Interchange (ISI) Foundation, Viale Settimio Severo 65, I-10133 Torino, Italy} \address{3. Quantum Processes Group, The Open University, Milton Keynes, MK7 6AA, United Kingdom.} \date{\today} \maketitle
\begin{abstract} We study pairwise thermal entanglement in three-qubit Heisenberg models and obtain analytic expressions for the concurrence. We find that thermal entanglement is absent from both
the antiferromagnetic $XXZ$ model, and the ferromagnetic $XXZ$ model with anisotropy parameter $\Delta\ge 1$. Conditions for the existence of thermal entanglement are discussed in detail, as is the role of degeneracy and the effects of magnetic fields on thermal entanglement and the quantum phase transition. Specifically, we find that the magnetic field can induce entanglement in the antiferromagnetic $XXX$ model, but cannot induce entanglement in the ferromagnetic $XXX$ model. \end{abstract}
\pacs{PACS numbers: 03.65.Ud, 03.67.Lx, 75.10.Jm.}
]
\narrowtext
\section{Introduction}
\label{sec:intro}
Over the past few years much effort has been put into studying the entanglement of multipartite systems both qualitatively and quantitatively. Entangled states constitute a valuable resource in quantum information processing\cite{Bennett}. Quite recently, entanglement in quantum operations \cite {EO0,EO1,EO2} and entanglement in indistinguishable fermionic and bosonic systems\cite{EI1,EI2,EI3} have been considered. Entanglement in two-qubit states has been well studied in the literature, as have various kinds of three-qubit entangled states\cite {Dur,threeq,Rajagopal}. The three-qubit entangled states have been shown to possess
advantages over the two-qubit states in quantum teleportation\cite {tele}, dense coding\cite{dense} and quantum cloning\cite{clone}.
An interesting and natural type of entanglement, thermal entanglement, was introduced and analysed within the Heisenberg $XXX$\cite{Arnesen}, $XX$\cite{Wang1}, and $XXZ$\cite{Wang2} models as well as the Ising model in a magnetic field\cite{Ising}. The state of the system at thermal equilibrium is represented by the density operator $\rho (T)=\exp \left( -\frac H{kT}\right) /Z,$ where $Z=$tr$\left[ \exp \left( -\frac H{kT}\right) \right]$ is the partition function, $H$ the system Hamiltonian, $k$ is Boltzmann's constant which we henceforth take equal to 1, and $T$ the temperature. As $\rho (T)$ represents a thermal state, the entanglement in the state is called {\em thermal entanglement}\cite{Arnesen}. A complication in the analysis is that, although standard statistical physics is characterized by the partition function, determined by the eigenvalues of the system, thermal entanglement properties require in addition knowledge
of the eigenstates.
The Heisenberg model has been used to simulate a quantum computer\cite {Loss}, as well as quantum dots\cite{Loss}, nuclear spins\cite {Kane}, electronic spins\cite{Vrijen} and optical lattices\cite{Moelmer}. By suitable coding, the Heisenberg interaction alone can be used for quantum computation\cite{Loss2}. The entanglement in the ground state of the Heisenberg model has been discussed by O'Connor and Wootters\cite{Oconnor}.
In previous studies of thermal entanglement analytical results were only available for two-qubit quantum spin models. In this paper we analyze the three-qubit case, i.e. we consider pairwise thermal entanglement in three-qubit Heisenberg models.
A general 3-qubit Heisenberg $XYZ$ model in a non-uniform magnetic field {\bf $B$} is given by: \begin{eqnarray} H &=&H_{XYZ}+H_{\rm mag} \nonumber \\ H_{XYZ} &=&\sum_{n=1}^3\left( \frac{J_1}{2}\sigma _{n}^{x}\sigma _{n+1}^{x}+\frac{J_2}{2}\sigma _{n}^{y}\sigma _{n+1}^{y}+\frac{J_3}{2}\sigma _{n}^{z}\sigma _{n+1}^{z}\right) \nonumber \\ H_{\rm mag} &=&\sum_{n=1}^3 {B_n \sigma _n^z}. \label{eq:xyz} \end{eqnarray} We use the standard notation, detailed later, and assume a periodic boundary, identifying the subscript $4$ with $1$ in the above expressions. For the 3-qubit case even this most general scenario is susceptible to numerical analysis. However, in this paper we shall restrict ourselves to special cases of Eq.(\ref{eq:xyz}) for which we are able to provide a succinct analytic treatment.
The 3-site Heisenberg models we will study in this paper are the following: \begin{enumerate} \item The $XX$ model, corresponding to $J_1=J_2,\;\;\; J_3=0$ and ${\bf B}=0$. \item The $XXZ$ model, for which $J_1=J_2, J_3\neq 0$ and ${\bf B}=0$. \item The $XXZ$ model with uniform magnetic field ($B_1=B_2=B_3$). \end{enumerate} We start in Sec. II by examining the three-qubit $XX$ model. In Sec. III, IV, and V, we study thermal entanglement in the $XX$ model, the $XXZ$ model and the $XXZ$ model in a magnetic field, respectively.
During the course of the analysis it will become clear that degeneracy plays an important role in thermal entanglement, as does the presence of magnetic fields. We find the critical temperatures involved in the quantum phase transition associated with the existence of entanglement in these quantum spin models.
\section{Three-qubit $XX$ model and its solution}
The three-qubit $XX$ model is described by the Hamiltonian\cite{Lieb} \begin{eqnarray} H_{XX} &=&\frac J2\sum_{n=1}^3\left( \sigma _{n}^{x}\sigma _{n+1}^{x}+\sigma _{n}^{y}\sigma _{n+1}^{y}\right) \nonumber \\ &=&J\sum_{n=1}^3\left( \sigma _{n}^{+}\sigma _{n+1}^{-}+\sigma _{n}^{-}\sigma _{n+1}^{+}\right) , \label{xy1} \end{eqnarray} where $\sigma _n^\alpha $ $(\alpha =x,y,z)$ are the Pauli matrices of the $n$ -th qubit, $\sigma _n^{\pm }=\frac 12\left( \sigma _n^x\pm i\sigma _n^y\right) $ the raising and lowering operators, and $J$ is the exchange interaction constant. Positive (negative) $J$ corresponds to the antiferromagnetic (ferromagnetic) case. As signalled above, we adopt periodic boundary conditions; $\sigma_4^x=\sigma_1^x,$ $\sigma_4^y=\sigma_1^y.$ We are therefore considering a three-qubit Heisenberg ring. The $XX$ model was intensively investigated in 1960 by Lieb, Schultz, and Mattis\cite{Lieb}. More recently the $XX$ model has been realized in the quantum-Hall system\cite{Hall}, the cavity QED system\cite {Zheng} and quantum dot spins\cite{I} for a quantum computer.
In order to study thermal entanglement, the first step is to obtain all the eigenvalues and eigenstates of the Hamiltonian Eq.(\ref{xy1}). The eigenvalues themselves do not suffice to calculate the entanglement. The eigenvalue problem of the $XX$ model can be exactly solved by the Jordan-Wigner transformation\cite{JW}. In the three-qubit case the eigenvalues are more simply obtained as \cite{Wang1}
\begin{eqnarray} E_0 &=&E_7=0, \nonumber \\ E_1 &=&E_2=E_4=E_5=-J, \nonumber \\ E_3 &=&E_6=2J. \label{eq:eeeigen} \end{eqnarray} \newline and the corresponding eigenstates are explicitly given by
\begin{eqnarray}
|\psi _0\rangle &=&|000\rangle , \nonumber \\
|\psi _1\rangle &=&3^{-1/2}\left( q|001\rangle +q^2|010\rangle +|100\rangle \right) , \nonumber \\
|\psi _2\rangle &=&3^{-1/2}\left( q^2|001\rangle +q|010\rangle +|100\rangle \right) , \nonumber \\
|\psi _3\rangle &=&3^{-1/2}\left( |001\rangle +|010\rangle +|100\rangle \right) , \nonumber \\
|\psi _4\rangle &=&3^{-1/2}\left( q|110\rangle +q^2|101\rangle +|011\rangle \right) , \nonumber \\
|\psi _5\rangle &=&3^{-1/2}\left( q^2|110\rangle +q|101\rangle +|011\rangle \right) , \nonumber \\
|\psi _6\rangle &=&3^{-1/2}\left( |110\rangle +|101\rangle +|011\rangle \right) , \nonumber \\
|\psi _7\rangle &=&|111\rangle . \label{eq:estate} \end{eqnarray} with $q=\exp \left( i2\pi /3\right) $ satisfying
\begin{eqnarray} q^3 &=&1, \nonumber \\ q^2+q+1 &=&0. \end{eqnarray} This set (\ref{eq:estate}) of three-qubit states is itself interesting. Rajagopal and Rendell\cite{Rajagopal} have considered a similar set of three-qubit states which they have classified by means of
permutation symmetries. Here the states $|\psi _0\rangle ,|\psi _3\rangle ,$
$|\psi _6\rangle ,$ and $|\psi _7\rangle $ are symmetric in the permutation of any pair of particles. We define a cyclic shift operator $P$ by its action on the basis $|ijk\rangle $\cite{Schnack}
\begin{equation}
P|ijk\rangle =|kij\rangle . \end{equation}
Obviously the four states $|\psi _0\rangle ,|\psi _3\rangle ,$ $|\psi _6\rangle ,$ and $|\psi _7\rangle $ are the eigenstates of $P$ with eigenvalue 1. The other four states in the set (\ref{eq:estate}) are also eigenstates of $P$ as follows:
\begin{mathletters} \begin{eqnarray}
P|\psi _i\rangle &=&q^2|\psi _i\rangle \text{ }(i=1,4), \\
P|\psi _j\rangle &=&q|\psi _j\rangle \text{ }(j=2,5). \end{eqnarray} \end{mathletters} This is not surprising since the Hamitonian $H_{XX}$ as well as the other Hamiltonians considered later are invariant under the cyclic shift operator.
For $J>0$ ($J<0$) the ground state is four (two)-fold degenerate. We will see that the degeneracy of the system influences thermal entanglement greatly. There is no pairwise entanglement in the eigenstate $|\psi _0\rangle $ and $|\psi _7\rangle .\,$\thinspace Pairwise entanglement exists in the state $|\psi _i\rangle $ ($i=1,2,...,6$) and the concurrence between any two different qubits is given by 2/3\cite{Dur,Koashi}.
\section{Thermal entanglement in the $XX$ model}
We first recall the definition of {\em concurrence}\cite{Wootters1} between a pair of qubits. Let $\rho _{12}$ be the density matrix of the pair and it can be either pure or mixed. The concurrence corresponding to the density matrix is defined as \begin{equation} {\cal C}_{12}=\max \left\{ \lambda _1-\lambda _2-\lambda _3-\lambda _4,0\right\} , \label{eq:c1} \end{equation} where the quantities $\lambda _i$ are the square roots of the eigenvalues of the operator \begin{equation} \varrho _{12}=\rho _{12}(\sigma _1^y\otimes \sigma _2^y)\rho _{12}^{*}(\sigma _1^y\otimes \sigma_2^y) \label{eq:c2} \end{equation} in descending order. The eigenvalues of $\varrho _{12}$ are real and non-negative even though $\varrho _{12}$ is not necessarily Hermitian. The values of the concurrence range from zero, for an unentangled state, to one, for a maximally entangled state.
The state at thermal equilibrium is described by the density matrix \begin{eqnarray} \rho (T) &=&\frac 1Z\exp \left( -\beta H\right) , \nonumber \\
&=&\frac 1Z\sum_{k=0}^7\exp \left( -\beta E_k\right) |\psi _k\rangle \langle
\psi _k|. \label{eq:rhoo} \end{eqnarray} where $\beta =1/T.$ From Eq.(\ref{eq:eeeigen}), the partition function is obtained as
\begin{equation} Z=2+4e^{\beta J}+2e^{-2\beta J}. \label{eq:z} \end{equation} >From Eqs.(\ref{eq:eeeigen}) and (\ref{eq:rhoo}), the density matrix can be written as
\begin{eqnarray}
\rho (T) &=&\frac 1Z[|\psi _0\rangle \langle \psi _0|+|\psi _7\rangle
\langle \psi _7| \nonumber \\
&&+e^{\beta J}(|\psi _1\rangle \langle \psi _1|+|\psi _4\rangle \langle \psi _4| \nonumber \\
&&+|\psi _2\rangle \langle \psi _2|+|\psi _5\rangle \langle \psi _5|) \nonumber \\
&&+e^{-2\beta J}(|\psi _3\rangle \langle \psi _3|+|\psi _6\rangle \langle
\psi _6|)]. \label{eq:re} \end{eqnarray}
In this paper we consider only pairwise thermal entanglement, and so we need to calculate the reduced density matrix $\rho_{12}(T)=\text{tr}_3(\rho(T))$.
We denote the reduced density matrix tr$_3[|\psi _{i_1}\rangle \langle
\psi _{i_1}|+...+|\psi _{i_N}\rangle \langle \psi _{i_N}|]$ by $\rho _{12}^{(i_1i_2...i_N)}.$ From Eq.(\ref{eq:estate}), we obtain
\begin{mathletters} \begin{eqnarray} \rho _{12}^{(07)} &=&\left( \begin{array}{llll} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) , \label{eq:aaa}\\ \rho _{12}^{(1245)} &=&\frac 23\left( \begin{array}{llll} 1 & 0 & 0 & 0 \\ 0 & 2 & -1 & 0 \\ 0 & -1 & 2 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) , \label{eq:bbb}\\ \rho _{12}^{(36)} &=&\frac 23\left( \begin{array}{llll} \frac 12 & 0 & 0 & 0 \\ 0 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 \\ 0 & 0 & 0 & \frac 12 \end{array} \right),\label{eq:ccczzz}\\ \rho _{12}^{(012)} &=&\frac 23\left( \begin{array}{llll} \frac 52 & 0 & 0 & 0 \\ 0 & 1 & -\frac 12 & 0 \\ 0 & -\frac 12 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right) ,\label{eq:ddd}\\ \rho _{12}^{(12)} &=&\frac 23\left( \begin{array}{llll} 1 & 0 & 0 & 0 \\ 0 & 1 & -\frac 12 & 0 \\ 0 & -\frac 12 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right).\label{eq:eee} \end{eqnarray} The last two reduced density matrices will be used later.
>From Eqs. (\ref{eq:re}) and (\ref{eq:aaa}-\ref{eq:ccczzz}), we obtain \end{mathletters} \begin{eqnarray} \rho _{12}(T) &=&\frac 1Z\left( \rho _{12}^{(07)}+e^{\beta J}\rho _{12}^{(1245)}+e^{-2\beta J}\rho _{12}^{(36)}\right) \nonumber \\ &=&\frac 2{3Z}\left( \begin{array}{llll} v & 0 & 0 & 0 \\ 0 & w & y & 0 \\ 0 & y & w & 0 \\ 0 & 0 & 0 & v \end{array} \right) \label{eq:vwy} \end{eqnarray} with \begin{eqnarray} v &=&\frac 32+e^{\beta J}+\frac 12e^{-2\beta J}, \nonumber \\ w &=&2e^{\beta J}+e^{-2\beta J}, \nonumber \\ y &=&e^{-2\beta J}-e^{\beta J}. \end{eqnarray}
The square roots of the four eigenvalues of the operator $\varrho _{12}$ are \begin{eqnarray} \lambda _1 &=&\frac{2(w-y)}{3Z},\text{ }\lambda _2=\frac{2(w+y)}{3Z}, \nonumber \\ \lambda _3 &=&\lambda _4=\frac{2v}{3Z}. \label{eq:lam} \end{eqnarray} >From Eqs.(\ref{eq:c1}), (\ref{eq:z}), (\ref{eq:vwy}), and (\ref{eq:lam}), we obtain the concurrence\cite{Oconnor}
\begin{eqnarray}
{\cal C}&=&\frac 4{3Z}\max (|y|-v,0), \label{eq:ccc} \\
&=&\max \left[ \frac{2|e^{-2x}-e^x|-3-2e^x-e^{-2x}}{ 3(1+2e^x+e^{-2x})},0\right] \label{eq:cc1} \end{eqnarray} where $x \equiv \beta J = J/T$. The concurrence depends only on the {\em ratio} of $J$ and $T$. Due to symmetry under cyclic shifts, the value of the concurrence does not depend on the choice of the pair of qubits.
>From (\ref{eq:cc1}) we see that entanglement appears only when \begin{equation}
\frac{2|z^{-2}-z|-3-2z-z^{-2}}{ 3(1+2z+z^{-2})} >0, \end{equation} or in other words \begin{equation} \label{fenmu}
2|z^{-2}-z|- 3 - 2 z- z^{-2} >0, \end{equation} where $z=\exp(x)$. We now consider two different cases:
{\it Case 1}. Antiferromagnetic system; $J>0$;$z^{-2}-z<0$. In this case relation (\ref{fenmu}) requires \begin{equation} z^{-2}<-1 \end{equation} which is impossible. So there is no entanglement when $J>0$.
{\it Case 2}. Ferromagnetic system; $J<0$,$z^{-2}-z > 0$. Relation (\ref{fenmu}) becomes \begin{equation} z^{-2}-4 z -3 >0. \end{equation} or \begin{equation} f(z) \equiv 4z^3+3z^2-1<0. \label{yfunction} \end{equation} The function $f(z)$ is an increasing function of the positive real argument $z$ and relation (\ref{yfunction}) is valid iff $0<z<z_c$, where the critical value $z_c$ determined by $f(z_c)=0$
is 0.4554; that is, $ x<-0.7866 $. For fixed $J$, we obtain the critical temperature $T_c=1.21736|J|$, above which there is no thermal entanglement. The critical temperature depends linearly on the absolute value of $J.$
In the ferromagnetic case the concurrence \begin{equation} {\cal C}= \max\left[\frac{1-4z^3-3z^2}{3(1+2z^3+z^2)},0\right] \label{eq:cc111} \end{equation} reaches its maximum value of $1/3$ when $z \to 0$, that is when $x\to -\infty$. Since the entanglement is a monotonic increasing function of ${\cal C}$ this means that the entanglement attains its maximum value for zero temperature, when $J$ is finite and nonzero. For finite temperatures, this maximum is also attained when $J\to -\infty$.
In summary, we find that \\ \\ {\bf Theorem 1:} {\it The XX model is thermally entangled if and only if $J<-0.7866 T$; maximum entanglement is attained when $T \to 0$ or $J\to -\infty$.}
The above discussion shows that in our 3-qubit model pairwise thermal entanglement occurs only in the ferromagnetic case. This result differs from that for the two-qubit $XX$ model, for which thermal entanglement exists in both the antiferromagnetic and ferromagnetic cases\cite{Wang1}.
For the ferromagnetic case
the states $|\psi _3\rangle $ and $|\psi_6\rangle $ constitute a doubly-degenerate ground state. Eq.(\ref{eq:cc111}) shows that the concurrence ${\cal C}=1/3$
at zero temperature. As noted in the last section the concurrence for any two qubits in the state $|\psi _3\rangle $ or $|\psi _6\rangle $ is 2/3. Here the value 1/3 appears due to the degeneracy. In fact, at zero temperature, the thermal entanglement can be calculated from $\rho^{(36)}_{12}$(\ref{eq:eee}). After normalization it is easy to obtain the concurrence, which is just 1/3.
\section{The Anisotropic Heisenberg $XXZ$ model} We now consider a more general Heisenberg model, the anisotropic Heisenberg $XXZ$ model, which is described by the Hamiltonian\cite{A} \begin{equation} H_{XXZ}=H_{XX}+\frac{\Delta J}2\sum_{n=1}^3(\sigma _n^z\sigma _{n+1}^z-1), \end{equation} where $\Delta $ is the anisotropy parameter. The model reduces to the $XX$ model when $\Delta =0,$ and the isotropic Heisenberg $XXX$ model when $\Delta =1.$
It is straightforward to check that the added anisotropic term $H_{XXZ}-H_{XX}$ commutes with $H_{XX}$. Therefore the eigenstates of the $XXZ$ model are still given by Eq. (\ref{eq:estate}), now with the different eigenvalues \begin{eqnarray} E_0 &=&E_7=0, \nonumber \\ E_1 &=&E_2=E_4=E_5=-2J(\Delta +\frac 12), \nonumber \\ E_3 &=&E_6=-2J(\Delta -1). \end{eqnarray}
Following the procedure of the previous section, we obtain the concurrence, which is of the same form as Eq.(\ref{eq:ccc}) with however the parameters $v,w,y,$ and the partition function $Z$ now given by \begin{eqnarray} v &=&\frac 32+\frac 12z^{2\Delta }(2z+z^{-2}), \nonumber \\ w &=&z^{2\Delta }(2z+z^{-2}), \nonumber \\ y &=&z^{2\Delta }(z^{-2}-z), \nonumber \\ Z &=&2+2z^{2\Delta }(2z+z^{-2}). \label{eq:vvv} \end{eqnarray}
As in the last section, since $Z$ is always positive, we need only
consider \begin{eqnarray} f(\Delta,z)
&\equiv& |y|-v \nonumber \\
&=& z^{2\Delta}|z^{-2}-z|-\frac{3}{2}- z^{2\Delta+1}-\frac{1}{2}z^{2\Delta-2} \end{eqnarray} to determine whether entanglement occurs or not. Again, we have to consider two different cases:
{\em Case 1}. When $J>0$ ($z>1$), namely the antiferromagnetic $XXZ$ model, the condition on $f(\Delta,z)$ leads to \begin{equation} z^{2\Delta-2}=e^{2x(\Delta-1)}<-1, \end{equation} which is impossible. So there is no entanglement in this case, irrespective of $\Delta$.
{\em Case 2}. When $J<0$ ($z<1$), namely the ferromagnetic $XXZ$ model, the condition $f(\Delta,z)>0$ gives \begin{equation} \label{Jsmaller0} z^{2\Delta-2}-4 z^{2\Delta+1} -3 >0. \end{equation} We consider some special values of $\Delta$.
(1) $\Delta\ge 1$: For $\Delta=1$ the relation (\ref{Jsmaller0}) implies $z^3<-1/2$ which is impossible. So there is no entanglement in the $XXX$ model. We can further prove that there is no entanglement for $\Delta>1$. In fact, it is easy to see that \begin{equation} f(\Delta,z)<z^{2(\Delta-1)}-3<0, \end{equation} where we have used the inequalities $z^{2\Delta+1}>0$ and $z^{2(\Delta-1)}<1$ for $\Delta>1$ and $z<1$. This means ${\cal C}=0$ and thus there is no entanglement.
(2) $\Delta=1/2$: In this case\cite{Plus} the entanglement condition is obtained as \begin{equation} 4z^3+3z-1<0, \end{equation} which is an increasing function of $z$. So the model is entangled iff $0<z<z_c\approx 0.298$, where $z_c$ is determined as a root of $4z^3+3z-1=0$.
(3) $\Delta=-1/2$: This is an interesting case whose importance has been emphasized recently\cite{Minus}.
>From the eigenvalues we see that the excited state of the system is 6-fold degenerate when $\Delta=-1/2$. The function $f(\Delta,z)$ now reduces to $z^{-3}-7$, from which the critical values $z_c$ and $T_c$ are obtained analytically as $z_c=7^{-1/3}, T_c={3}|J|/{\ln 7}\approx 1.5417|J|$.
(4) The limit case $\Delta\to -\infty$: The critical value $z_c$ is now determined by $z^{-2}-4z=0$, i.e., $z_c=4^{-1/3}$.
Therefore the critical temperature $T_c=3|J|/\ln 4\approx 2.164 |J|$.
Finally, for more general values of the anisotropy parameter we need to resort to numerical calculations. Fig.1 is a plot of the critical temperature as a function of the anisotropy parameter $\Delta$. From this we see that the critical temperature decreases as $\Delta$ increases, and reaches the asymptotic value $T_c=2.1640|J|$ as $\Delta \rightarrow -\infty .$
\begin{figure}
\caption{The critical temperature $T_c$ as a function of the anisotropy parameter $\Delta$. The exchange constant $J=-1$.}
\end{figure}
We now give further analytical results for the case $\Delta<1$ and $z<1$. Consider $f(\Delta, z)$ as a function of $\Delta$. Then, from \begin{eqnarray} \frac{\partial f(\Delta, z)}{\partial \Delta} &=&(\ln z) z^{2\Delta} (z^{-2}-4z) \nonumber\\ && \left\{\begin{array}{ll} =0, & \mbox{when } z=z_0\equiv 4^{-1/3}\approx 0.62996; \\ >0, & \mbox{when } z>z_0; \\ <0, & \mbox{when } z<z_0. \end{array} \right. \label{eq:gradients} \end{eqnarray} we see that $f(\Delta, z)$ is an increasing (decreasing) function when $z>z_0$ ($z<z_0$). We consider these cases separately.
{\em Case 2a}. When $z=z_0$, $f(\Delta, z_0)=-3<0$. So there is no entanglement in this case.
{\em Case 2b}. When $z>z_0$, the function $f(\Delta, z)$ is an increasing function which reaches its maximum when $\Delta\to 1$. Since we have seen that there is no entanglement when $\Delta=1$ \begin{equation} f(\Delta, z)<f(1, z)<0 \quad \mbox{for }z>z_0, \end{equation} which means that there is no entanglement when $z>z_0$.
{\em Case 2c}. The case $z<z_0$. Define the $z$-dependent point $\Delta_z$ by $f(\Delta_z, z)=0$ where \begin{equation} \Delta_z=\frac{1}{2\beta J} \ln \left[ \frac{3}{z^{-2}-4z}\right]<1. \label{eq:delta} \end{equation} Thus from Eq.(\ref{eq:gradients}) we know that $f(\Delta, z)>0$ when $\Delta<\Delta_z$ for all $z<z_0$, which is just the condition for entanglement.
In Fig.\,2 we give plots of $f(\Delta, z)$ for $z=0.6295, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1$. Note that $\Delta_z$ is a decreasing function of $z$ and that \begin{equation} \Delta_z \to \left\{ \begin{array}{ll} -\infty & \mbox{when } z\to z_0; \\ 1 & \mbox{when } z\to 0, \end{array} \right. \end{equation} as indicated in Fig.\,2.
\begin{figure}
\caption{The function $f(\Delta, z)$ with respect $\Delta$ for $z=0.6295, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1$ (from left to right).}
\end{figure}
In summary, we have \\ \\ {\bf Theorem 2} {\it The XXZ model exhibits thermal entanglement only when $z<z_0$, that is, $J<-.4621T$, and $\Delta<\Delta_z$.}
Note that Theorem 2 is entirely consistent with Theorem 1, since $\Delta_z=0$ in Eq.(\ref{eq:delta}) corresponds to $J=-0.7866T.$
In Fig.3 we plot the concurrence as a function of the anisotropy parameter $\Delta $ and exchange constant $J$. The figure shows that there is no thermal entanglement for the antiferromagnetic ($J>0$) $XXZ$ model, nor for the ferromagnetic ($J<0$) $XXZ$ model when $\Delta \ge 1$.
\begin{figure}
\caption{The concurrence as a function of $\Delta$ and $J$. The temperature $T=1$.}
\end{figure}
To end this section we investigate the concurrence at zero temperature. Nonzero concurrence occurs for the case $\Delta<1, J<0$. In this case, the doubly-degenerate ground state consists of
$|\psi_3\rangle $ and $|\psi_6\rangle $. We may calculate the concurrence ${\cal C} = 1/3$ directly from the density matrix $\rho^{(36)}_{12}$.
\section{Effects of magnetic fields} In this section we consider the effect of magnetic fields on thermal entanglement. The $XXZ$ model with uniform magnetic field $B$ along the $z$ direction is given by \begin{equation} H_{XXZM}=H_{XXZ}+B\sum_{n=1}^3\sigma _n^z. \end{equation} It is easy to check that the added magnetic term commutes with the Hamiltonian $H_{XXZ}$. Therefore the eigenstates of the $XXZ$ model are given by Eq.(\ref{eq:estate}). The eigenvalues are now \begin{eqnarray} E_0 &=&-3B \nonumber \\ E_1 &=&E_2=-2J(\Delta +\frac 12)-B \nonumber \\ E_3 &=&-2J(\Delta -1)-B, \nonumber \\ E_4 &=&E_5=-2J(\Delta +\frac 12)+B, \nonumber \\ E_6 &=&-2J(\Delta -1)+B. \nonumber \\ E_7 &=&3B\label{eq:eigen} \end{eqnarray} We see that the magnetic field partly removes the degeneracy.
With a derivation completely analogous to that of Sec. III and Sec. IV, the reduced density operator is \begin{equation} \rho _{12}=\frac 2{3Z}\left( \begin{array}{llll} u & 0 & 0 & 0 \\ 0 & w & y & 0 \\ 0 & y & w & 0 \\ 0 & 0 & 0 & v \end{array} \right) \end{equation} with
\begin{eqnarray} u &=&\frac 32e^{3\beta B}+\frac 12 e^{\beta B}z^{2\Delta}\left( 2z+z^{-2}\right) , \nonumber \\ v &=&\frac 32e^{-3\beta B}+\frac 12e^{-\beta B}z^{2\Delta}\left( 2z+z^{-2}\right) , \nonumber \\ w &=&\cosh (\beta B)z^{2\Delta }\left( 2z+z^{-2}\right) , \nonumber \\ y &=&\cosh (\beta B)z^{2\Delta }\left(z^{-2}-z\right) , \nonumber \\ Z &=&2\cosh (3\beta B)+2\cosh (\beta B)z^{2\Delta} \nonumber \\ &&\times (2z+z^{-2}) \end{eqnarray}
The concurrence is then given by
\begin{equation}
{\cal C}=\frac 4{3Z}\max (|y|-\sqrt{uv},0). \end{equation} As an immediate consequence we see that the concurrence is an even function of the magnetic field.
As the quantities $Z, u, v$ are all positive, for convenience we consider the quantity $y^2-uv$ instead of $|y|-\sqrt{uv}$. Thermal entanglement occurs when \begin{equation} y^2-uv=h\cosh (2\beta B)-g>0, \end{equation} where \begin{eqnarray} &&g=\frac{1}{4}[9+z^{4(\Delta -1)}(2z^6+8z^3-1)], \nonumber \\ &&h=\frac{1}{2}z^{2\Delta }\left[ z^{2\Delta }(z^{-2}-z)^2-(6z+3z^{-2})\right] . \label{eq:yyuv} \end{eqnarray}
We now consider the effect of a magnetic field on the thermal entanglement.
We first consider the $XXX$ model, $\Delta =1$, which does not exhibit thermal entanglement when $B=0$. One might expect that the magnetic field would induce thermal entanglement. It is easy to see that \begin{equation} 2(y^2-uv)=\cosh (2\beta B)\left( z^6-8z^3-2\right) -(z^3+2)^2. \end{equation} If $z<(4+3\sqrt{2})^{1/3}\approx 2.02$, $z^6-8z^3-2<0$ and thus $y^2-uv<0$ for any $B$. So for this range of $z$ values there is no thermal entanglement no matter how strong the magnetic field is. However, when $z>(4+3\sqrt{2})^{1/3}$, $z^6-8z^3-2>0$ and the condition for entanglement becomes \begin{equation} \cosh (2\beta B)>\frac{(z^3+2)^2}{z^6-8z^3-2} \end{equation} which can be fulfilled for strong enough $B$. So a magnetic field can induce entanglement in the $XXX$ model when $z>(4+3\sqrt{2})^{1/3}$.
Now consider the case $\Delta =-1/2$. >From Eq.(\ref{eq:yyuv}) we obtain \begin{eqnarray} h &=&\frac 12(p^2-5p-5), \\ g &=&\frac 14(11+8p-p^2), \\ h-g &=&\frac 14(3p^2-18p-21), \end{eqnarray} which are parabolas in $p\equiv z^{-3}$, as shown in Fig.\,4. We consider three different cases:
{\em Case 1: } $p<p_1=5/2+3\sqrt{5}/2$, In this case $h<0,g>0,h-g<0$ and $y^2-uv<0$. So there is no thermal entanglement.
{\em Case 2:} $p_1<p<p_2=7$. In this case $h>0,g>0,h-g<0$. So $y^2-uv>0$, and so entanglement appears if the magnetic field is strong enough.
{\em Case 3:} $p_2<p$. In this case $h>0, h-g>0$ and $y^2-uv$ is always positive; that is, here the XXZ model exhibits thermal entanglement for any magnetic field. Note that $p_2 = z^{-3}_c$ where $z_c$ is the critical value given in last section.
The above two models show that the magnetic field can either induce entanglement in a non-entangled system or extend the entanglement range for an already entangled system.
\begin{figure}
\caption{The functions $h$, $g$, and $h-g$ in terms of $p=z^{-3}$.}
\end{figure}
In Fig.5 we plot the concurrence as a function of the magnetic field $B$ and exchange constant $J.$
At $B=0$ there is no thermal entanglement. The entanglement increases with the magnetic field $|B|$ until it reaches a maximum value, then decreases and gradually disappears. We can clearly see that there is no thermal entanglement for the ferromagnetic case, while thermal entanglement exists for the antiferromagnetic case. In other words, we can induce entanglement in the antiferromagnetic $XXX$ system by introducing a magnetic field, but cannot induce entanglement in the ferromagnetic $XXX$ system for any strength of magnetic field.
\begin{figure}
\caption{ Concurrence as a function of the magnetic field $B$ and the exchange constant $J$. The temperature $T=1$ and the anisotropy parameter $\Delta=1$. }
\end{figure}
\begin{figure}
\caption{ Concurrence as a function of $T$ for different magnetic fields $B=1$(solid line), 3/2(dashed line), and 2(circle point line). }
\end{figure}
Fig. 6 gives a plot of the concurrence as a function of the temperature for different magnetic fields. One can see that there exist critical temperatures above which the entanglement vanishes. It is also noteworthy that the critical temperature increases as the magnetic field $B$ increases. Consider the interesting case $B=2$. We observe that the concurrence is zero at zero temperature and there is a maximum value of concurrence at a finite temperature. The entanglement can be increased by increasing the temperature. The maximum value is due to the optimal mixing of all eigenstates in the system. When considering zero temperature we find that there are different limits for different magnetic fields. Actually a more general result exists \begin{eqnarray*} \lim_{T\rightarrow 0}{\cal C}(\Delta ,B,1,T) &=&\frac 13\text{ for }\Delta
>|B|-1/2. \\
&=&\frac 29\text{ for }\Delta =|B|-1/2, \\
&=&0\text{ for }\Delta <|B|-1/2. \end{eqnarray*}
The special point $T=0, \; \; \Delta=B-1/2$ ($B\ge 0$ is assumed without loss of generality), at which the entanglement undergoes a sudden change with adjustment of the parameters $\Delta$ and $B$, is the point of quantum phase transition\cite{QPT}. The quantum phase transition takes place at zero temperature due to the variation of interaction terms in the Hamiltonian. By examining the eigenvalues (\ref{eq:eigen}) we can understand the phase transition. When $\Delta=B-1/2$, the ground state contains
the three-fold degenerate states $|\psi_0\rangle,
|\psi_1\rangle$, and $|\psi_2\rangle$. One may calculate the thermal entanglement from the density matrix $\rho_{12}^{(012)}$(\ref{eq:ddd}) and find the concurrence to be $2/9$. When $\Delta>B-1/2$, the ground state contains the two-fold degenerate states
$|\psi_1\rangle$ and $|\psi_2\rangle$. The concurrence has the value $1/3$ as calculated from
$\rho_{12}^{(12)}$(\ref{eq:eee}). When $\Delta<B-1/2$, the ground state is $|\psi_0\rangle$ and not degenerate. And the concurrence is zero in this case.
\section{Conclusions} Apart from being a fundamental property of quantum mechanics, it appears that entanglement may provide an important resource in quantum information processes. One source of entanglement is provided by magnetic systems, such as those modelled in this paper. Within the current state of knowledge, only measures for {\em pairwise} entanglement are available. Thus, in order to study the entanglement properties of systems more complex than those simply involving two qubits, it is necessary to adopt a procedure whereby one traces out a subsystem, leaving effectively only a two-qubit system for which we can calculate the {\em concurrence}, which in turn gives a measure of the entanglement. Using this procedure, we have studied pairwise thermal entanglement in the following Heisenberg models; the $XX$ model, the $XXZ$ model and the $XXZ$ model in a magnetic field. We obtained analytical expressions for the concurrence, which indicated no thermal entanglement for the antiferromagnetic $XXZ$ model, nor for the ferromagnetic $XXZ$ model when the anisotropy parameter $\Delta \ge 1$. Conditions for the existence of thermal entanglement were studied in detail. The effects of magnetic fields on entanglement were also considered. We found that the magnetic field can induce entanglement in the antiferromagnetic $XXX$ model, but cannot induce entanglement in the ferromagnetic $XXX$ model, no matter how strong the magnetic field is.
In this paper we have extended previous work on thermal entanglement from two-qubit models to three qubit models, concentrating on those systems where the pairwise entanglement can be studied analytically. It would be an attractive proposition to extend further the investigation of such Heisenberg models to the $N$-qubit case, which are under consideration. \acknowledgments
X. Wang thanks K. M\o lmer, A. S\o rensen, W. K. Wootters and Paolo Zanardi for many valuable discussions. He is supported by the Information Society Technologies Programme IST-1999-11053, EQUIP, action line 6-2-1 and European project Q-ACTA. H.\,Fu is supported in part by the National Natural Science Foundation of China (19875008), and A.\,I.\,Solomon acknowledges the hospitality of the Laboratoire de Physique Th\'{e}orique des Liquides, Paris University VI.
\begin{references}
\bibitem{Bennett}C. H. Bennett and D. P. DiVincenzo, Nature {\bf 404}, 247 (2000).
\bibitem{EO0} P. Zanardi, C. Zalka, and L. Faoro, Phys. Rev. A {\bf 62}, 030301(R) (2000); P. Zanardi, Phys. Rev. A{\bf \ 63}, 040304(R) (2001).
\bibitem{EO1} W. D\"{u}r , G. Vidal, J. I. Cirac, N. Linden and S. Popescu, quant-ph/0006034.
\bibitem{EO2} J. I. Cirac, W. D\"{u}r, B. Kraus and M. Lewenstein, Phys. Rev. Lett. {\bf 86}, 544 (2001).
\bibitem{EI1} J. Schliemann, J. I. Cirac, M. Ku\'{s}, M. Lewenstein, and D. Loss, Phys. Rev. A {\bf 64}, 022303 (2001).
\bibitem{EI2} Y. S. Li, B. Zeng, X. S. Liu, and G. L. Long, Phys. Rev. A {\bf 64}, 054302 (2001).
\bibitem{EI3} P. Zanardi, quant-ph/0104114.
\bibitem{Dur}W. D\"{u}r, G. Vidal, and J. I. Cirac, Phys. Rev. A {\bf 62}, 062314 (2000).
\bibitem{threeq} V. Coffman,J. Kundu, W. K. Wootters, Phys. Rev. A {\bf 61},052306 (2000); A. Sudbery, J. Phys. A: Math. Gen. {\bf 34}, 643 (2001); H. A. Carteret and A. Sudbery, quant-ph/0001091; T. A. Brun and O. Cohen, Phys. Lett. A {\bf} 281, 88 (2001); A. Ac\'{i}n, A. Andrianov, L. Costa, E. Jan\'{e}, J. I. Latorre, and R. Tarrach, Phys. Rev. Lett. {\bf 85}, 1560 (2000); A. Ac\'{i}n, A. Andrianov, E. Jan\'{e}, and R. Tarrach, quant-ph/0009107.
\bibitem{Rajagopal} A. K. Rajagopal and R. W. Rendell, quant-ph/0104122.
\bibitem{tele} A. Karlsson and M. Bourennane, Phys. Rev. A {\bf 58}, 4394 (1998); V. N. Gorbachev and A. I. Trubilko, JETP {\bf 91}, 894 (2000).
\bibitem{dense} J. C. Hao, C. F. Li, and G. C. Guo, Phys. Rev. A {\bf 63}, 054301 (2001).
\bibitem{clone} D. Bru\ss ,D. P. DiVincenzo, A. Ekert, C. A. Fuchs, C. Macchiavello, and J. A. Smolin, Phys. Rev. A {\bf 57}, 2368 (1998).
\bibitem{Arnesen} M. A. Nielsen, Ph. D thesis, University of Mexico, 1998, quant-ph/0011036; M. C. Arnesen, S. Bose, and V. Vedral, quant-ph/0009060.
\bibitem{Wang1}X. Wang, Phys. Rev. A {\bf 64}, 012313 (2001).
\bibitem{Wang2}X. Wang, Phys. Lett. A {\bf 281}, 101 (2001).
\bibitem{Ising}D. Gunlycke, S. Bose, V. M. Kendon, and V. Vedral, Phys. Rev. A 64, 042302 (2001).
\bibitem{Loss} D. Loss and D. P. Divincenzo, \pra {\bf 57}, 120 (1998); G. Burkard, D. Loss and D. P. DiVincenzo,\prb {\bf 59}, 2070 (1999).
\bibitem{Kane} B. E. Kane, Nature {\bf 393}, 133 (1998).
\bibitem{Vrijen} R. Vrijen et al., quant-ph/9905096.
\bibitem{Moelmer} Anders S\o rensen and Klaus M\o lmer, Phys. Rev. Lett. {\bf 83}, 2274 (1999).
\bibitem{Loss2} D. A. Lidar, D. Bacon, and K. B.\ Whaley, Phys. Rev. Lett. {\bf 82}, 4556 (1999); D. P. DiVincenzo, D. Bacon, J. Kempe, G. Burkard, and K. B. Whaley, Nature {\bf 408}, 339 (2000)
\bibitem{Oconnor} K. M. O$^{\prime }$Connor, W. K. Wootters, Phys. Rev. A {\bf 63}, 052302 (2001).
\bibitem{Lieb} E. Lieb, T. Schultz, and D. Mattis, Ann. Phys. (N. Y.) {\bf 16}, 407 (1961).
\bibitem{Hall} V. Privman, I. D. Vagner and G. Kventsel, quant-ph/9707017.
\bibitem{Zheng} S. B. Zheng, G. C. Guo, Phys. Rev. Lett. {\bf 85}, 2392 (2000).
\bibitem{I} A. Imamo\={g}lu, D. D. Awschalom, G. Burkard, D. P. DiVincenzo, D. Loss, M. Sherwin, and A. Small, Phys. Rev. Lett. {\bf 83}, 4204 (1999).
\bibitem{JW} P. Jordan and E. Wigner, Z. Phys. {\bf 47}, 631 (1928).
\bibitem{Schnack} J. Schnack, cond-mat/0006317.
\bibitem{Koashi} M. Koashi, V. Bu\v {z}ek, and N. Imoto, Phys. Rev. A {\bf 62}, 050302 (2000).
\bibitem{Wootters1} S. Hill and W. K. Wootters, Phys. Rev. Lett. {\bf 78}, 5022 (1997); W. K. Wootters, Phys. Rev. Lett. {\bf 80}, 2245; V. Coffman, J. Kundu, and W. K. Wootters, \pra {\bf 61}, 052306 (2000).
\bibitem{A}R. Orbach, Phys. Rev. {\bf 112}, 309 (1958);L. R. Walker, Phys. Rev. {\bf 116}, 1089 (1959); J. des Cloizeaux and M. Gaudin, J. Math. Phys. {\bf 7}, 1384 (1966); C. N. Yang and C. P. Yang, Phys. Rev. {\bf 150}, 321 (1966)
\bibitem{Plus}V. Fridkin, Yu. Stroganov and D. Zagier, J. Phys. A: Math. Gen. {\bf 33}, L121 (2000).
\bibitem{Minus}Yu.G. Stroganov, J.Phys. A: Math.Gen. {\bf 34}, L179 (2001); A. V. Razumov and Yu. G. Stroganov, J. Phys. A: Math. Gen. {\bf 34} 3185 (2001); M. T. Batchelor, J. de. Gier, and B. Nienhuis, cond-mat/0101385.
\bibitem{QPT} S. Sachdev, Quantum phase transitions (Cambridge University Press, Cambridge, 1999).
\end{references}
\end{document}
|
arXiv
|
{
"id": "0105075.tex",
"language_detection_score": 0.6686086654663086,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\def\paragraph{Proof.}{\paragraph{Proof.}} \def
$\square${
$\square$} \def
$\square${
$\square$}
\def{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}{{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}} \def{\mathbb Q}}\def\R{{\mathbb R}} \def\E{{\mathbb E}{{\mathbb Q}}\def\R{{\mathbb R}} \def\E{{\mathbb E}} \def{\mathbb P}{{\mathbb P}}
\def{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}{{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}} \def{\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}{{\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}} \def{\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}{{\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}} \def{\cal H}}\def\Ker{{\rm Ker}{{\cal H}}\def\Ker{{\rm Ker}}
\title{\bf{Artin groups of spherical type up to isomorphism}}
\author{ \textsc{Luis Paris}}
\date{\today}
\maketitle
\begin{abstract} We prove that two Artin groups of spherical type are isomorphic if and only if their defining Coxeter graphs are the same. \end{abstract}
\noindent {\bf AMS Subject Classification:} Primary 20F36.
\section{Introduction}
Let $S$ be a finite set. Recall that a {\it Coxeter matrix} over $S$ is a matrix $M=(m_{s\,t})_{s,t \in S}$ indexed by the elements of $S$ such that $m_{s\,s}=1$ for all $s \in S$, and $m_{s\,t}=m_{t\,s} \in \{2, 3, 4, \dots, +\infty\}$ for all $s,t \in S$, $s \neq t$. A Coxeter matrix $M=(m_{s\,t})$ is usually represented by its {\it Coxeter graph}, $\Gamma$, which is defined as follows. The set of vertices of $\Gamma$ is $S$, two vertices $s,t$ are joined by an edge if $m_{s\,t}\ge 3$, and this edge is labelled by $m_{s\,t}$ if $m_{s\,t} \ge 4$. For $s,t \in S$ and $m \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}_{\ge 2}$, we denote by $w(s,t:m)$ the word $sts \dots$ of length $m$. The {\it Artin group} associated to $\Gamma$ is defined to be the group $G=G_\Gamma$ presented by $$
G= \langle S\ |\ w(s,t:m_{s\,t}) = w(t,s:m_{s\,t})\text{ for } s,t \in S,\ s \neq t\text{ and } m_{s\,t} < +\infty \rangle\,. $$ The {\it Coxeter group} $W=W_\Gamma$ associated to $\Gamma$ is the quotient of $G$ by the relations $s^2=1$, $s\in S$. We say that $\Gamma$ (or $G$) is of {\it spherical type} if $W$ is finite, that $\Gamma$ (or $G$) is {\it right-angled} if $m_{s\,t} \in \{2, +\infty \}$ for all $s,t \in S$, $s \neq t$, and that $G$ (or $W$) is {\it irreducible} if $\Gamma$ is connected. The
number $n=|S|$ is called the {\it rank} of $G$ (or of $W$).
One of the main question in the subject is the classification of Artin groups up to isomorphism (see \cite{Bes2}, Question 2.14). This problem is far from being completely solved as Artin groups are poorly understood in general. For example, we do not know whether all Artin groups are torsion free, and we do not know any general solution to the word problem for these groups. The only known results concerning this classification question are contained in a work by Brady, McCammond, M\"uhlherr, and Neumann \cite{BMMN}, where the authors determine a sort of transformation on Coxeter graphs which does not change the isomorphism class of the associated Artin groups, and a work by Droms \cite{Dro}, where it is proved that, if $\Gamma$ and $\Omega$ are two right-angled Coxeter graphs whose associated Artin groups are isomorphic, then $\Gamma= \Omega$. Notice that an Artin group is biorderable if and only if it is right-angled, hence a consequence of Droms' result is that, if $\Gamma$ is a right-angled Coxeter graph and $\Omega$ is any Coxeter graph, and if the Artin groups associated to $\Gamma$ and $\Omega$ are isomorphic, then $\Gamma=\Omega$. The fact that right-angled Artin groups are biorderable is proved in \cite{DuTh}. In order to show that the remainig Artin groups are not biorderable, one has only to observe that, if $2<m_{s\,t}< +\infty$, then $(st)^{m_{s\,t}} = (ts)^{m_{s\,t}}$ and $st \neq ts$, and that, in a biorderable group, two distinct elements cannot have a common $m$-th power for a fixed $m$.
In this paper we answer the classification question in the restricted framework of spherical type Artin groups. More precisely, we prove the following.
\begin{thm} Let $\Gamma$ and $\Omega$ be two spherical type Coxeter graphs, and let $G$ and $H$ be the Artin groups associated to $\Gamma$ and $\Omega$, respectively. If $G$ is isomorphic to $H$, then $\Gamma=\Omega$. \end{thm}
\noindent {\bf Remark.} I do not know whether a non spherical type Artin group can be isomorphic to a spherical type Artin group.
Artin groups were first introduced by Tits \cite{Tit2} as extensions of Coxeter groups. Later, Brieskorn \cite{Bri1} gave a topological interpretation of the Artin groups of spherical type in terms of complements of discriminantal varieties. Define a (real) {\it reflection group} of rank $n$ to be a finite subgroup $W$ of $GL(n,\R)$ generated by reflections. Such a group is called {\it essential} if there is no non-trivial subspace of $\R^n$ on which $W$ acts trivially. Let ${\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}$ be the set of reflecting hyperplanes of $W$, and, for $H \in {\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}$, let $H_\C$ denote the complexification of $H$, {\it i.e.} the complex hyperplane in $\C^n$ defined by the same equation as $H$. Then $W$ acts freely on $M(W)= \C^n \setminus \cup_{H \in {\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}} H_\C$, and, by \cite{Che}, $N(W)= M(W)/W$ is the complement in $\C^n$ of an algebraic variety, $D(W)$, called {\it discriminantal variety} of type $W$. Now, take a spherical type Coxeter graph $\Gamma$, and consider the associated Coxeter group $W=W_\Gamma$. By \cite{Cox}, the group $W$ can be represented as an essential reflection group in
$GL(n,\R)$, where $n=|S|$ is the rank of $W$, and, conversely, any essential reflection group of rank $n$ can be uniquely obtained in this way. By \cite{Bri1}, $\pi_1(N(W))$ is the Artin group $G=G_\Gamma$ associated to $\Gamma$.
So, a consequence of Theorem 1.1 is that $\pi_1(N(W))= \pi_1( \C^n \setminus D(W))$ completely determines the reflection group $W$ as well as the discriminantal variety $D(W)$.
Since the work of Brieskorn and Saito \cite{BrSa} and that of Deligne \cite{Del}, the combinatorial theory of spherical type Artin groups has been well studied. In particular, these groups are know to be biautomatic (see \cite{Cha2}, \cite{Cha}), and torsion-free. This last result is a direct consequence of \cite{Del} and \cite{Bri1}, it is explicitely proved in \cite{Deh}, and it shall be of importance in the remainder of the paper.
The first step in the proof of Theorem 1.1 consists of calculating some invariants for spherical type Artin groups (see Section 3). It actually happens that these invariants separate the irreducible Artin groups of spherical type (see Proposition 5.1). Afterwards, for a given isomorphism $\varphi: G \to H$ between spherical type Artin groups, we show that, up to some details, $\varphi$ sends each irreducible component of $G$ injectively into a unique irreducible component of $H$, and that both components have the same invariants. In order to do that, we first need to show that an irreducible Artin group $G$ cannot be decomposed as a product of two subgroups which commute, unless one of these subgroups lies in the center of $G$ (see Section 4).
From now on, $\Gamma$ denotes a spherical type Coxeter graph, $G$ denotes its associated Artin group, and $W$ denotes its associated Coxeter group.
\noindent {\bf Acknowledgments.} The idea of looking at centralizers of ``good'' elements in the proof of Proposition 4.2 is a suggestion of Benson Farb. I am grateful to him for this clever idea as well as for all his useful conversations. I am also grateful to Jean Michel who pointed out to me his work with Michel Brou\'e, and to John Crisp for so many discussions on everything concerning this paper.
\section{Preliminaries}
We recall in this section some well-known results on Coxeter groups and Artin groups.
For a subset $X$ of $S$, we denote by $W_X$ the subgroup of $W$ generated by $X$, and by $G_X$ the subgroup of $G$ generated by $X$. Let $\Gamma_X$ be the full Coxeter subgraph of $\Gamma$ whose vertex set is $X$. Then $W_X$ is the Coxeter group associated to $\Gamma_X$ (see \cite{Bou}), and $G_X$ is the Artin group associated to $\Gamma_X$ (see \cite{Lek} and \cite{Par1}). The subgroup $W_X$ is called {\it standard parabolic subgroup} of $W$, and $G_X$ is called {\it standard parabolic subgroup} of $G$.
For $w \in W$, we denote by ${\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (w)$ the word length of $w$ with respect to $S$. The group $W$ has a unique element of maximal length, $w_0$, which satisfies $w_0^2=1$ and $w_0 S w_0 =S$, and whose length is $m_1 + \dots + m_n$, where $m_1, m_2, \dots, m_n$ are the exponents of $W$.
The connected spherical Coxeter graphs are exactly the graphs $A_n$ ($n \ge 1$), $B_n$ ($n\ge 2$), $D_n$ ($n\ge 4$), $E_6$, $E_7$, $E_8$, $F_4$, $H_3$, $H_4$, $I_2(p)$ ($p \ge 5$) represented in \cite{Bou}, Ch. IV, \S 4, Thm. 1. (Here we use the notation $I_2(6)$ for the Coxeter graph $G_2$. We may also use the notation $I_2(3)$ for $A_2$, and $I_2(4)$ for $B_2$.)
Let $F:G \to W$ be the natural epimorphism which sends $s$ to $s$ for all $s \in S$. This epimorphism has a natural set-section $T: W \to G$ defined as follows. Let $w \in W$, and let $w=s_1s_2 \dots s_l$ be a reduced expression of $w$ ({\it i.e.} $l={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}(w)$). Then $T(w)=s_1s_2 \dots s_l \in G$. By Tits' solution to the word problem for Coxeter groups \cite{Tit}, the definition of $T(w)$ does not depend on the choice of the reduced expression.
Define the {\it Artin monoid} associated to $\Gamma$ to be the (abstract) monoid $G^+$ presented by $$
G^+= \langle S\ |\ w(s,t: m_{s\,t}) = w(t,s: m_{s\,t}) \text{ for } s\neq t \text{ and } m_{s\,t} <+\infty \rangle^+\,. $$ By \cite{BrSa}, the natural homomorphism $G^+ \to G$ which sends $s$ to $s$ for all $s \in S$ is injective. Note that this fact is always true, even if $\Gamma$ is not assumed to be of spherical type (see \cite{Par2}).
The {\it fundamental element} of $G$ is defined to be $\Delta= T(w_0)$, where $w_0$ denotes the element of $W$ of maximal length. For $X \subset S$, We denote by $w_X$ the element of $W_X$ of maximal length, and by $\Delta_X=T(w_X)$ the fundamental element of $G_X$.
The defining relations of $G^+$ being homogeneous, we can define two partial orders $\le_L$ and $\le_R$ on $G^+$ as follows.
$\bullet$ We set $a \le_L b$ if there exists $c \in G^+$ such that $b=ac$.
$\bullet$ We set $a \le_R b$ if there exists $c \in G^+$ such that $b=ca$.
Now, the following two propositions are a mixture of several well-known results from \cite{BrSa} and \cite{Del}.
\begin{prop} (1) $G^+$ is cancellative.
(2) $(G^+, \le_L)$ and $(G^+, \le_R)$ are lattices.
(3) $\{a \in G^+; a\le_L \Delta\} = \{a \in G^+; a\le_R \Delta \} = T(W)$.
$\square$ \end{prop}
Note that the fact that $G^+$ is cancellative is true even if $\Gamma$ is not of spherical type (see \cite{Mic}). The elements of $T(W)$ are called {\it simple elements}. We shall denote the lattice operations of $(G^+, \le_L)$ by $\vee_L$ and $\wedge_L$, and the lattice operations of $(G^+, \le_R)$ by $\vee_R$ and $\wedge_R$.
Define the {\it quasi-center} of $G$ to be the subgroup $QZ(G)=\{ a \in G; aSa^{-1} = S \}$.
\begin{prop} Assume $\Gamma$ to be connected.
(1) For $X \subset S$ we have $$ \vee_L \{s; s\in X\} = \vee_R \{s; s\in X\} = \Delta_X\,. $$ In particular, $$ \vee_L \{s; s\in S\} = \vee_R \{s; s\in S\} = \Delta\,. $$
(2) There exists a permutation $\mu: S \to S$ such that $\mu^2= \Id$ and $\Delta s = \mu(s) \Delta$ for all $s \in S$.
(3) The quasi-center $QZ(G)$ of $G$ is an infinite cyclic subgroup generated by $\Delta$.
(4) The center $Z(G)$ of $G$ is an infinite cyclic subgroup of $G$ generated either by $\delta= \Delta$ if $\mu=\Id$, or by $\delta= \Delta^2$ if $\mu\neq \Id$.
$\square$ \end{prop}
The generator $\delta$ of $Z(G)$ given in the above proposition shall be called the {\it standard generator} of $Z(G)$. Note also that the assumption ``$\Gamma$ is connected'' is not needed in (1) and (2). Let $\Gamma$ be connected. Then $\mu \neq \Id$ if and only if $\Gamma$ is either $A_n$, $n\ge 2$, or $D_{2n+1}$, $n\ge 2$, or $E_6$, or $I_2(2p+1)$, $p\ge 2$ (see \cite{BrSa}, Subsection 7.2).
Now, the following result can be found in \cite{Cha}.
\begin{prop}[Charney \cite{Cha}] Each $a \in G$ can be uniquely written as $a=bc^{-1}$ where $b,c \in G^+$ and $b \wedge_R c =1$.
$\square$ \end{prop}
The expression $a=bc^{-1}$ of the above proposition shall be called the {\it Charney form} of $a$.
An easy observation shows that, if $s_1s_2 \dots s_l$ and $t_1 t_2 \dots t_l$ are two positive expressions of a same element $a \in G^+$, then the sets $\{s_1, \dots, s_l\}$ and $\{t_1, \dots, t_l\}$ are equal. In particular, if $a \in G_X^+$, then all the letters that appear in any positive expression of $a$ lie in $X$. A consequence of this fact is the following.
\begin{lemma} Let $X$ be a subset of $S$, let $a\in G_X$, and let $a=bc^{-1}$ be the Charney form of $a$ in $G$. Then $b,c \in G_X^+$ and $a=bc^{-1}$ is the Charney form of $a$ in $G_X$. \end{lemma}
\paragraph{Proof.} Let $\vee_{X,R}$ and $\wedge_{X,R}$ denote the lattice operations of $(G_X^+,\le_R)$. The above observation shows that, if $a\le_R b$ and $b \in G_X^+$, then $a \in G_X^+$. This implies that $b \wedge_{X,R} c = b \wedge_R c$ for all $b,c \in G_X^+$. Now, let $a \in G_X$ and let $a=bc^{-1}$ be the Charney form of $a$ in $G_X$. We have $b,c \in G_X^+ \subset G^+$ and $b \wedge_R c = b \wedge_{X,R} c = 1$, thus $a=bc^{-1}$ is also the Charney form of $a$ in $G$.
$\square$
\begin{corollary} Let $X$ be a subset of $S$. Then $G_X \cap G^+ = G_X^+$.
$\square$ \end{corollary}
\begin{corollary} Let $X$ be a subset of $S$, $X \neq S$. Then $G_X \cap \langle \Delta \rangle = \{ 1 \}$. \end{corollary}
\paragraph{Proof.} Take $s \in S \setminus X$. By Proposition 2.2, we have $s \le_R \Delta$, thus $\Delta \not \in G_X^+ = G_X \cap G^+$.
$\square$
\section{Invariants}
The purpose of the present section is to calculate some invariants of the spherical type Artin groups.
The first invariant that we want to calculate is the cohomological dimension, denoted by $\cd (G)$. We assume the reader to be familiar with this notion, and we refer to \cite{Bro} for definitions and properties. Our result is the following.
\begin{prop}
Let $n=|S|$ be the rank of $G=G_\Gamma$. Then $\cd (G)=n$. \end{prop}
\paragraph{Proof.} Recall the spaces $M(W)$ and $N(W)$ defined in the introduction. Recall also that $\pi_1(N(W))=G$, that $W$ acts freely on $M(W)$, and that $N(W)=M(W)/W$. In particular, $\pi_1(M(W))$ is a subgroup of $\pi_1(N(W))=G$ (it is actually the kernel of the epimorphism $F: G \to W$). Finally, recall the well-known fact that, if $H_1$ is a subgroup of a given group $H_2$, then $\cd (H_1) \le \cd (H_2)$.
Deligne proved in \cite{Del} that $M(W)$ is aspherical, and Brieskorn proved in \cite{Bri2} that $H^n(M(W), {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})$ is a free abelian group of rank $\prod_{i=1}^n m_i \neq 0$, where $m_1, m_2, \dots, m_n$ are the exponents of $W$, thus $n \le \cd( \pi_1( M(W)) \le \cd(G)$. On the other hand, Salvetti has constructed in \cite{Sal} an aspherical CW-complex of dimension $n$ whose fundamental group is $G$, therefore $\cd (G) \le n$.
$\square$
The next invariant which interests us is denoted by $\mf (G)$ and is defined to be the maximal order of a finite subgroup of $G/Z(G)$, where $Z(G)$ denotes the center of $G$. Its calculation is based on Theorems 3.2 and 3.3 given below.
Recall the permutation $\mu: S \to S$ of Proposition 2.2. This extends to an isomorphism $\mu: G^+ \to G^+$ which permutes the simple elements. Actually, $\mu(a)= \Delta a \Delta^{-1}$ for all $a \in G^+$.
\begin{thm}[Bestvina \cite{Bes}] Assume $\Gamma$ to be connected. Let $\GG= G/ \langle \Delta^2 \rangle$, and let $H$ be a finite subgroup of $\GG$. Then $H$ is a cyclic group, and, up to conjugation, $H$ has one of the following two forms.
\noindent {\bf Type 1:} The order of $H$ is even, say $2p$, and there exists a simple element $a \in T(W)$ such that $a^p=\Delta$, $\mu(a)=a$, and $\overline{a}$ generates $H$, where $\overline{a}$ denotes the element of $\GG$ represented by $a$.
\noindent {\bf Type 2:} The order of $H$ is odd, say $2p+1$, and there exists a simple element $a \in T(W)$ such that $(a\, \mu(a))^{{p-1}\over 2} a = \Delta$ and $\overline{ a\,\mu(a)}$ generates $H$.
$\square$ \end{thm}
Now, recall the so-called {\it Coxeter number} $h$ of $W$ (see \cite{Hum}, Section 3.18). Recall also that this number is related to the length of $\Delta$ by the following formula $$ {{nh}\over 2} = m_1+ \dots +m_n= {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta)\,, $$
where $n=|S|$ is the rank of $G$, and $m_1, \dots, m_n$ are the exponents of $W$.
\begin{thm}[Brieskorn-Saito \cite{BrSa}] Choose any order $S=\{s_1,s_2, \dots, s_n\}$ of $S$ and write $\pi=s_1s_2 \dots s_n \in G$. Let $h$ be the Coxeter number of $W$.
(1) If $\mu = \Id$, then $h$ is even and $\pi^{h \over 2} = \Delta$.
(2) If $\mu \neq \Id$, then $\pi^h= \Delta^2$.
$\square$ \end{thm}
Now, we can calculate the invariant $\mf (G)$.
\begin{prop} Assume $\Gamma$ to be connected, and let $h$ be the Coxeter number of $W$.
(1) If $\mu = \Id$, then $\mf (G)= h/2$.
(2) If $\mu \neq \Id$, then $\mf (G)=h$. \end{prop}
\paragraph{Proof.} Assume $\mu=\Id$. Let $\GG^0= G/Z(G) = G/ \langle \Delta \rangle$. First, observe that $\mf(G) \ge {h \over 2}$ by Theorem 3.3. So, it remains to prove that $\mf(G) \le {h \over 2}$,
namely, that $|H| \le {h \over 2}$ for any finite subgroup $H$ of $\GG^0$.
Let $H$ be a finite subgroup of $\GG^0$. Consider the exact sequence $$ 1 \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/ 2{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to \GG \stackrel{\phi}{\rightarrow} \GG^0 \to 1\,, $$ where $\GG= G/ \langle \Delta^2 \rangle$, and set $\tilde H= \phi^{-1}(H)$. By Theorem 3.2, $\tilde H$ is a cyclic group and, up to conjugation, $\tilde H$ is either of Type 1 or of Type 2. The order of $\tilde H$ is even, say $2p$, thus $\tilde H$ is of Type 1, and there exists a simple element $a \in T(W)$ such that $a^p=\Delta$ and $\overline{a}$ generates $\tilde H$. Let $a=s_1 s_2 \dots s_r$ be an expression of $a$, and let $X=\{s_1, s_2, \dots, s_r\}$. We
have $\Delta= a^p \in G_X$, thus, by Corollary~2.6, $X=S$ and $r={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (a) \ge |S|=n$. Finally, $$
|H|={|\tilde H| \over 2} = p = {{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta) \over {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (a)} = \left( {nh \over 2} \right) /r \le {h \over 2}\,. $$
Now, assume $\mu \neq \Id$. Let $\GG= G/Z(G)= G/\langle \Delta^2 \rangle$. First, observe that $\mf (G) \ge h$ by Theorem 3.3. So, it remains to prove that $\mf (G) \le h$, namely,
that $|H| \le h$ for any finite subgroup $H$ of $\GG$.
Let $H$ be a finite subgroup of $\GG$. By Theorem 3.2, $H$ is cyclic and, up to conjugation, $H$ is either of Type 1 or of Type 2. Let $p$ be the order of $H$. In both cases, Type 1 and Type 2, there exists an element $b \in G^+$ such that $b^p=\Delta^2$ and $\overline{b}$ generates $H$ (take $b=a$ if $H$ is of Type 1, and $b=a\, \mu(a)$ if $H$ is of Type 2). Let $b=s_1 s_2 \dots s_r$ be an expression of $b$, and let $X=\{s_1, s_2, \dots, s_r\}$. We have $\Delta^2=b^p
\in G_X$, thus, by Corollary~2.6, $X=S$ and $r={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (b) \ge |S|=n$. It follows that $$
|H| = p = {{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta^2) \over {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (b)} = {nh \over r} \le h\,. $$
$\square$
The values of the Coxeter numbers of the irreducible Coxeter groups are well-known (see, for instance, \cite{Hum}, Section 3.18). Applying Proposition 3.4 to these values, one can easily compute the invariant $\mf (G)$ for each irreducible (spherical type) Artin group. The result is given in Table 1.
\def\vrule height 0pt depth 8pt width 0pt{\vrule height 0pt depth 8pt width 0pt} \def\vrule height 16pt depth 0pt width 0pt{\vrule height 16pt depth 0pt width 0pt} \def\vrule height 16pt depth 8pt width 0pt{\vrule height 16pt depth 8pt width 0pt} $$\vbox{ \begin{tabular}{ccccccccccccccc} \hline \vrule height 16pt depth 0pt width 0pt&{\vline\kern -0.2 em \vline}&&\vline&&\vline&&\vline& $D_n,\, n\ge 4$ &\vline& $D_n,\, n\ge 5$ &\vline&&\vline\\ \vrule height 0pt depth 8pt width 0pt$\Gamma$ &{\vline\kern -0.2 em \vline}& $A_1$ &\vline& $A_n,\ n\ge 2$ &\vline& $B_n,\, n\ge 2$ &\vline& $n$ even &\vline& $n$ odd &\vline& $E_6$ &\vline\\ \hline \vrule height 16pt depth 8pt width 0pt$\mf (G)$ &{\vline\kern -0.2 em \vline}& 1 &\vline& $n+1$ &\vline& $n$ &\vline& $n-1$ &\vline& $2n-2$ &\vline& 12 &\vline\\ \hline \end{tabular}
\par \begin{tabular}{ccccccccccccccccc} \hline \vrule height 16pt depth 0pt width 0pt&{\vline\kern -0.2 em \vline}&&\vline&&\vline&&\vline&&\vline&&\vline& $I_2(p),\, p\ge 6$ &\vline& $I_2(p),\, p\ge 5$ &\vline\\ \vrule height 0pt depth 8pt width 0pt$\Gamma$ &{\vline\kern -0.2 em \vline}& $E_7$ &\vline& $E_8$ &\vline& $F_4$ &\vline& $H_3$ &\vline& $H_4$ &\vline& $p$ even &\vline& $p$ odd &\vline\\ \hline \vrule height 16pt depth 8pt width 0pt$\mf (G)$ &{\vline\kern -0.2 em \vline}& 9 &\vline& 15 &\vline& 6 &\vline& 5 &\vline& 15 &\vline& $p/2$ &\vline& $p$ &\vline\\ \hline \end{tabular}
}$$
\centerline{{\bf Table 1:} The invariant $\mf(G)$.}
\noindent {\bf Remark.} Combining \cite{Bes}, Theorem 4.5, with \cite{BrMi}, Section 3, one can actually compute all the possible orders for a finite subgroup of $G/Z(G)$. The maximal order suffices for our purpose, thus we do not include this more complicate calculation in this paper.
The next invariant that we want to compute is the rank of the abelianization of $G$ that we denote by ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$. This invariant can be easily computed using the standard presentation of $G$, and the result is as follows.
\begin{prop} Let $\Gamma_0$ be the (non-labelled) graph defined by the following data.
$\bullet$ $S$ is the set of vertices of $\Gamma_0$;
$\bullet$ two vertices $s,t$ are joined by an edge if $m_{s\,t}$ is odd.
\noindent Then the abelianization of $G$ is a free abelian group of rank ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$, the number of connected components of $\Gamma_0$.
$\square$ \end{prop}
The last invariant which interests us is the rank of the center of $G$ that we denote by $\rkZ (G)$. The following proposition is a straightforward consequence of Proposition 2.2.
\begin{prop} The center of $G$ is a free abelian group of rank $\rkZ (G)$, the number of components of $\Gamma$.
$\square$ \end{prop}
\section{Irreducibility}
Throughout this section, we assume that $G$ is irreducible (namely, that $\Gamma$ is connected). Let $H_1, H_2$ be two subgroups of $G$. Recall that $[H_1, H_2]$ denotes the subgroup of $G$ generated by $\{a_1^{-1} a_2^{-1} a_1a_2; a_1 \in H_1\text{ and } a_2 \in H_2\}$. Our goal in this section is to show that $G$ cannot be expressed as $G= H_1 \cdot H_2$ with $[H_1, H_2] = \{1\}$, unless either $H_1 \subset Z(G)$ or $H_2 \subset Z(G)$. This shall implies that $G$ cannot be a non-trivial direct product.
Recall that $\delta$ denotes the standard generator of $Z(G)$. For $X \subset S$, we denote by $\delta_X$ the standard generator of $G_X$, and, for $a \in G$, we denote by $Z_G(a)$ the centralizer of $a$ in $G$.
\begin{lemma} Let $t \in S$ such that $\Gamma_{S \setminus \{t\}}$ is connected and $\mu(t) \neq t$ if $\mu \neq \Id$. Then $Z_G( \delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{ \delta \}$ and is isomorphic to $G_{S \setminus \{t\}} \times \{ \delta \}$. \end{lemma}
\paragraph{Proof.} Assume first that $\mu=\Id$ (in particular, $\delta=\Delta$). By \cite{Par1}, Theorem 5.2, $Z_G(\delta_{S\setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{ \Delta^2, \Delta \Delta_{S \setminus \{t\}}^{-1}\}$, thus $Z_G(\delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{ \delta \}$.
Now, assume $\mu \neq \Id$ (in particular, $\delta = \Delta^2$ and $\mu(t) \neq t$). By \cite{Par1}, Theorem 5.2, $Z_G(\delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{ \Delta^2, \Delta \Delta_{S \setminus \{\mu(t)\}}^{-1} \Delta \Delta_{S \setminus \{t\}}^{-1} \}$. Observe that \break $\Delta \Delta_{S \setminus \{\mu(t)\}}^{-1} \Delta \Delta_{S \setminus \{t\}}^{-1} = \Delta^2 \Delta_{S \setminus \{t\}}^{-2}$, thus $Z_G(\delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{\delta\}$.
By the above, we have an epimorphism $G_{S \setminus \{t\}} \times \langle \delta \rangle \to Z_G( \delta_{S \setminus \{t\}})$, and, by Corollary~2.6, the kernel of this epimorphism is $\{1\}$.
$\square$
\noindent {\bf Remark.} It is an easy exercise to show (under the assumption that $\Gamma$ is connect) that there always exists $t \in S$ such that $\Gamma_{S \setminus \{t\}}$ is connected and $\mu(t) \neq t$ if $\mu \neq \Id$.
\begin{prop} Let $H_1, H_2$ be two subgroups of $G$ such that $G=H_1 \cdot H_2$ and $[H_1,H_2]=\{1\}$. Then either $H_1 \subset Z(G)$ or $H_2 \subset Z(G)$. If, moreover, $H_1 \cap H_2 = \{1\}$, then either $H_1=\{1\}$ and $H_2=G$, or $H_1=G$ and $H_2= \{1\}$. \end{prop}
\paragraph{Proof.} We argue by induction on $n= |S|$. If $n=1$, then $\Gamma= A_1$ and $G={\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, and the conclusion of the proposition is well-known.
Assume $n \ge 2$. For $i=1,2$, let $\tilde H_i$ denote the subgroup of $G$ generated by $H_i \cup \{ \delta\}$. We have $G= \tilde H_1 \cdot \tilde H_2$, $[\tilde H_1, \tilde H_2] = \{1\}$, $H_1 \subset \tilde H_1$, and $H_2 \subset \tilde H_2$. Observe also that $\tilde H_1 \cap \tilde H_2$ must be included in the center of $G$, and that $\delta \in \tilde H_1 \cap \tilde H_2$, thus $\tilde H_1 \cap \tilde H_2 = \langle \delta \rangle$. Take $t \in S$ such that $\Gamma_{S \setminus \{t\}}$ is connected and $\mu(t) \neq t$ if $\mu \neq \Id$, write $X=S \setminus \{t\}$, and choose $d_1 \in \tilde H_1$ and $d_2\in \tilde H_2$ such that $\delta_X=d_1d_2$.
Let $a \in G_X$. Choose $a_1 \in \tilde H_1$ and $a_2 \in \tilde H_2$ such that $a=a_1a_2$. We have $$ 1= a^{-1} \delta_X^{-1} a \delta_X = a_1^{-1} d_1^{-1} a_1d_1 a_2^{-1} d_2^{-1} a_2d_2\,, $$ thus $$ a_1^{-1} d_1^{-1} a_1d_1 = d_2^{-1} a_2^{-1} d_2a_2 \in \tilde H_1 \cap \tilde H_2 = \langle \delta \rangle \,. $$ Let $k \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $a_1^{-1} d_1^{-1} a_1d_1 = \delta^k$. Consider the homomorphism $\deg: G \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ which sends $s$ to $1$ for all $s \in S$. Then $$ 0= \deg( a_1^{-1} d_1^{-1} a_1d_1) = \deg( \delta^k)= k\,{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\delta)\,, $$ thus $k=0$, hence $a_1$ and $d_1$ commute. Now, $a_1$ and $d_2$ also commute (since $a_1 \in \tilde H_1$ and $d_2 \in \tilde H_2$), thus $a_1$ commutes with $\delta_X = d_1d_2$. By Lemma 4.1, $a_1$ can be written as $a_1= b_1 \delta^{p_1}$, where $b_1 \in G_X$ and $p_1 \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Note also that $b_1 = a_1 \delta^{-p_1} \in \tilde H_1$, since $\delta \in \tilde H_1$, thus $b_1 \in G_X \cap \tilde H_1$. Similarly, $a_2$ can be written as $a_2=b_2 \delta^{p_2}$ where $b_2 \in G_X \cap \tilde H_2$ and $p_2 \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. We have $\delta^{p_1+p_2} = a b_1^{-1} b_2^{-1} \in G_X \cap \langle \delta \rangle = \{1\}$ (by Corollary 2.6), thus $p_1+p_2=0$ and $a=b_1b_2$.
So, we have $$ G_X= (G_X \cap \tilde H_1) \cdot (G_X \cap \tilde H_2)\,. $$ Moreover, by Corollary 2.6, $$ (G_X \cap \tilde H_1) \cap (G_X \cap \tilde H_2) = G_X \cap \langle \delta \rangle = \{1\}\,. $$ By the inductive hypothesis, it follows that, up to permutation of 1 and 2, we have $G_X \cap \tilde H_1 = G_X$ (namely, $G_X \subset \tilde H_1$), and $G_X \cap \tilde H_2 = \{1\}$.
We turn now to show that $\tilde H_2 \subset \langle \delta \rangle = Z(G)$. Since $H_2 \subset \tilde H_2$, this shows that $H_2 \subset Z(G)$.
Let $a \in \tilde H_2$. Since $\delta_X \in G_X \subset \tilde H_1$, $a$ and $\delta_X$ commute. By Lemma 4.1, $a$ can be written as $a=b \delta^p$, where $b \in G_X$ and $p \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Since $\delta \in \tilde H_2$, we also have $b = a \delta^{-p} \in \tilde H_2$, thus $b \in G_X \cap \tilde H_2 = \{1\}$, therefore $a = \delta^p \in \langle \delta \rangle$.
Now, assume that $H_1 \cap H_2=\{1\}$. By the above, we may suppose that $H_2 \subset Z(G)= \langle \delta \rangle$. In particular, there exists $k \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $H_2 = \langle \delta^k \rangle$. Choose any order $S=\{s_1, \dots, s_n\}$ of $S$, and write $\pi=s_1s_2 \dots s_n \in G$. Let $b \in H_1$ and $p \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $\pi= b \delta^{pk}$. Observe that $b \neq 1$ since $\pi$ is not central in $G$. Let $h$ denote the Coxeter number of $W$. By Theorem~3.3, $\pi^h= b^h \delta^{phk} \in Z(G)$, thus $b^h \in Z(G)$. Moreover, $b^h \neq 1$ since $G$ is torsion free and $b \neq 1$. This implies that $Z(H_1)\neq\{1\}$. Now, observe that $Z(H_1) \subset Z(G)= \langle \delta \rangle$, thus there exists $l >0$ such that $Z(H_1)=\langle \delta^l \rangle$. Finally, $\delta^{lk} \in H_1 \cap H_2 = \{1\}$, thus $kl=0$, therefore $k=0$ (since $l\neq 0$) and $H_2=\{1\}$. Then we also have $H_1=G$.
$\square$
\begin{prop}
Assume $n=|S| \ge 2$. Let $H$ be a subgroup of $G$ such that $G=H \cdot \langle \delta \rangle$. Then $\cd (H)= \cd (G)$, $\mf(H) = \mf(G)$, and ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (H)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)$. \end{prop}
\paragraph{Proof.} For all $s \in S$, take $b_s \in H$ and $p_s \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $s=b_s \delta^{p_s}$. We can and do suppose that $p_s=p_t$ if $s$ and $t$ are conjugate in $G$. Then the mapping $S \to H$, $s \mapsto b_s=s\delta^{-p_s}$ determines a homomorphism $\varphi: G \to H$.
We show that $\varphi: G \to H$ is injective. Observe that the mapping $S \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, $s \mapsto p_s$ determines a homomorphism $\eta: G \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, and that $\varphi(a)= a \delta^{-\eta (a)}$ for all $a \in G$. In particular, if $a \in \Ker \varphi$, then $a=\delta^{\eta(a)} \in Z(G)$. Choose any order $S=\{s_1, \dots, s_n\}$ of $S$, and write $\pi=s_1s_2 \dots s_n \in G$. Note that $\varphi(\pi) \neq 1$, since $\pi$ is not central in $G$, and that, by Theorem 3.3, there exists $k>0$ such that $\pi^k = \delta$. Let $a \in \Ker \varphi$. Then $a = \delta^{\eta (a)} = \pi^{k \eta(a)}$, thus $1=\varphi(a)= \varphi(\pi)^{k \eta(a)}$. We have $\varphi(\pi) \neq 1$ and $G$ is torsion free, hence $\eta(a) =0$ (since $k>0$) and $a=1$.
Now, recall that $\cd (H_1) \le \cd (H_2)$ if $H_1$ is a subgroup of a given group $H_2$. So, $$ \cd (G)= \cd (\varphi(G)) \le \cd(H) \le \cd (G)\,. $$
The equality $G=H \cdot \langle \delta \rangle = H \cdot Z(G)$ implies that $Z(H)= Z(G) \cap H$ and $G/Z(G)= H/Z(H)$. In particular, we have $\mf(H)= \mf(G)$.
Let ${\cal H}}\def\Ker{{\rm Ker}$ be a group, let $g$ be a central element in ${\cal H}}\def\Ker{{\rm Ker}$, and let $p>0$. Let $\GG= ({\cal H}}\def\Ker{{\rm Ker} \times {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})/ \langle (g,p) \rangle$. Then one can easily verify (using the Reidemeister-Schreier method, for example) that we have exact sequences $1 \to {\cal H}}\def\Ker{{\rm Ker} \to \GG \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$ and $1 \to \Ab ({\cal H}}\def\Ker{{\rm Ker}) \to \Ab (\GG) \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$, where $\Ab (\GG)$ (resp. $\Ab({\cal H}}\def\Ker{{\rm Ker})$) denotes the abelianization of $\GG$ (resp. ${\cal H}}\def\Ker{{\rm Ker}$).
Now, recall the equality $G=H \cdot \langle \delta \rangle$. By Proposition 4.2, we have $H \cap \langle \delta \rangle \neq \{1\}$. So, there exists $p>0$ such that $H \cap \langle \delta \rangle = \langle \delta^p \rangle$. Write $d=\delta^p \in H$. Then $d$ is central in $H$ and $G \simeq (H \times {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})/\langle (d,p) \rangle$. By the above observation, it follows that we have an exact sequence $1 \to \Ab (H) \to \Ab (G) \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$, thus $\Ab (H)$ is a free abelian group of rank ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$.
$\square$
\section{Proof of the main theorem}
\begin{prop} Let $\Gamma$ and $\Omega$ be two connected spherical type Coxeter graphs, and let $G$ and $H$ be the Artin groups associated to $\Gamma$ and $\Omega$, respectively. If $\cd (G)= \cd (H)$, $\mf (G)= \mf (H)$, and ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)$, then $\Gamma= \Omega$. \end{prop}
\paragraph{Proof.} Let $n$ and $m$ be the numbers of vertices of $\Gamma$ and $\Omega$, respectively. By Proposition~3.1, we have $n= \cd(G)= \cd(H) = m$.
Suppose $n=m=1$. Then $\Gamma= \Omega= A_1$.
Suppose $n=m \ge 3$. Then one can easily verify in Table 1 that the equality $\mf (G)= \mf (H)$ implies $\Gamma= \Omega$.
Suppose $n=m=2$. Let $p,q \ge 3$, such that $\Gamma= I_2(p)$ and $\Omega= I_2(q)$. By Proposition~3.5, either ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)={\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)=2$ and $p,q$ are both even, or ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)=1$ and $p,q$ are both odd. If $p,q$ are both even, then, by Table 1, ${p \over 2} = \mf(G) = \mf(H)= {q \over 2}$, thus $p=q$ and $\Gamma=\Omega= I_2(p)$. If $p,q$ are both odd, then, by Table 1, $p= \mf (G)= \mf (H)=q$, thus $\Gamma=\Omega= I_2(p)$.
$\square$
\begin{corollary} Let $\Gamma$ and $\Omega$ be two connected spherical type Coxeter graphs, and let $G$ and $H$ be the Artin groups associated to $\Gamma$ and $\Omega$, respectively. If $G$ is isomorphic to $H$, then $\Gamma=\Omega$.
$\square$ \end{corollary}
\noindent {\bf Proof of Theorem 1.1.} Let $\Gamma$ and $\Omega$ be two spherical type Coxeter graphs, and let $G$ and $H$ be the Artin groups associated to $\Gamma$ and $\Omega$, respectively. We assume that $G$ is isomorphic to $H$ and turn to prove that $\Gamma=\Omega$.
Let $\Gamma_1, \dots, \Gamma_p$ be the connected components of $\Gamma$, and let $\Omega_1, \dots, \Omega_q$ be the connected components of $\Omega$. For $i=1, \dots, p$, we denote by $G_i$ the Artin group associated to $\Gamma_i$, and, for $j=1, \dots, q$, we denote by $H_j$ the Artin group associated to $\Omega_j$. We have $G= G_1 \times G_2 \times \dots \times G_p$ and $H=H_1 \times H_2 \times \dots \times H_q$. We may and do assume that there exists $x \in \{0,1, \dots, p\}$ such that $\Gamma_i \neq A_1$ for $i=1, \dots, x$, and $\Gamma_i=A_1$ for $i=x+1, \dots, p$. So, $G_1, \dots, G_x$ are non abelian irreducible Artin groups of rank $\ge 2$, and $G_{x+1}, \dots, G_p$ are all isomorphic to ${\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Similarly, we may and do assume that there exists $y \in \{0,1, \dots, q\}$ such that $\Omega_j \neq A_1$ for $j=1, \dots, y$, and $\Omega_j=A_1$ for $j=y+1, \dots, q$. We can also assume that $x \ge y$.
A first observation is, by Proposition 3.6, that $$ p= \rkZ(G)= \rkZ(H)= q\,. $$
Now, fix an isomorphism $\varphi: G \to H$. For $1 \le i\le p$, let $\iota_i: G_i \to G$ be the natural embedding, for $1 \le j\le p$, let $\kappa_j: H \to H_j$ be the projection on the $j$-th component, and, for $1 \le i,j \le p$, let $\varphi_{i\,j} = \kappa_j \circ \varphi \circ \iota_i: G_i \to H_j$.
Let $j\in \{1, \dots, y\}$. Observe that $H_j= \prod_{i=1}^p \varphi_{i\,j}(G_i)$, and that $[\varphi_{i\,j}(G_i), \varphi_{k\,j}(G_k)]=1$ for all $i,k \in \{1, \dots, p\}$, $i \neq k$. Let $\delta_j^H$ denote the standard generator of $Z(H_j)$, and, for $i\in\{1, \dots, p\}$, let $\tilde H_{i\,j}$ be the subgroup of $H_j$ generated by $\varphi_{i\,j}(G_i) \cup \{\delta_j^H\}$. By Proposition~4.2, there exists $\chi (j) \in \{1, \dots, p\}$ such that $H_j= \tilde H_{\chi(j)\,j}$, and $\tilde H_{i\,j} = Z(H_j)= \langle \delta_j^H \rangle$ for $i \neq \chi(j)$. Since $H_j$ is non abelian, $\chi(j)$ is unique and $\chi(j)\in \{1, \dots, x\}$.
We turn now to show that the map $\chi: \{1, \dots, y\} \to \{1, \dots, x\}$ is surjective. Since $x \ge y$, it follows that $x=y$ and $\chi$ is a permutation.
Let $i \in \{1, \dots, x\}$ such that $\chi(j) \neq i$ for all $j \in \{1, \dots, y\}$. Then $\varphi_{i\,j} (G_i) \subset Z(H_j)$ for all $j=1, \dots, p$, thus $\varphi(G_i) \subset Z(H)$. This contradicts the fact that $\varphi$ is injective and $G_i$ is non abelian.
So, up to renumbering the $\Gamma_i$'s, we can suppose that $\chi(i)=i$ for all $i\in \{1, \dots, x\}$.
We prove now that $\varphi_{i\,i}: G_i \to H_i$ is injective for all $i \in \{1, \dots, x\}$. Let $a \in \Ker \varphi_{i\,i}$. Since $\varphi_{i\,j} (a) \in Z(H_j)$ for all $j \neq i$, we have $\varphi(a) \in Z(H)$. Since $\varphi$ is injective, it follows that $a \in Z(G_i)$. Let $\{s_1, \dots, s_r\}$ be the set of vertices of $\Gamma_i$, and let $\pi= s_1s_2 \dots s_r \in G_i$. Observe that $\varphi_{i\,i}(\pi) \neq 1$ since $\pi$ is not central in $G_i$. Let $\delta_i^G$ be the standard generator of $Z(G_i)$. By Theorem 3.3, there exists $k >0$ such that $\pi^k= \delta_i^G$. On the other hand, since $a \in Z(G_i)$, there exists $l \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $a= (\delta_i^G)^l= \pi^{kl}$. Now, $1= \varphi_{i\,i}(a)= \varphi_{i\,i}(\pi)^{kl}$, $H_i$ is torsion free, and $\varphi_{i\,i}(\pi) \neq 1$, thus $kl=0$ and $a=\pi^{kl}=1$.
Let $i \in \{1, \dots, x\}$. Recall that $\varphi_{i\,i}: G_i \to H_i$ is injective, and $H_i= \varphi_{i\,i} (G_i) \cdot \langle \delta_i^H \rangle$, where $\delta_i^H$ denotes the standard generator of $H_i$. By Proposition 4.3, it follows that $$ \cd(G_i)= \cd(H_i)\,,\quad \mf(G_i) =\mf(H_i)\,,\quad {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G_i)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H_i)\,, $$ thus, by Proposition 5.1, $\Gamma_i=\Omega_i$. Let $i \in \{x+1, \dots, p\}$. Then $\Gamma_i= \Omega_i = A_1$. So, $\Gamma= \Omega$.
$\square$
\noindent {\bf Remark.} In the proof above, the homomorphism $\varphi_{i\,i}$ is injective but is not necessarily surjective as we show in the following example.
Let $G_1= \langle s_1,s_2 | s_1s_2s_1= s_2s_1s_2 \rangle$ be the Artin group associated to $A_2$, let $G_2={\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}= \langle t \rangle$, and let $G=G_1 \times G_2$. We denote by $\delta= (s_1s_2)^3$ the standard generator of $Z(G_1)$. Let $\varphi: G \to G$ be the homomorphism defined by $$ \varphi(s_1)= s_1 \delta t\,, \quad \varphi(s_2)= s_2 \delta t \,, \quad \varphi(t)= \delta t \,. $$ Then $\varphi$ is an isomorphism but $\varphi_{1\,1}$ is not surjective. The inverse $\varphi^{- 1}: G \to G$ is determined by $$ \varphi^{-1} (s_1)= s_1 t^{-1}\,, \quad \varphi^{-1} (s_2)= s_2 t^{-1}\,, \quad \varphi^{-1} (t)= \delta^{-1} t^7\,. $$
\noindent \halign{#
\cr Luis Paris\cr Institut de Math\'ematiques de Bourgogne\cr Universit\'e de Bourgogne\cr UMR 5584 du CNRS, BP 47870\cr 21078 Dijon cedex\cr FRANCE\cr \noalign{
} \texttt{[email protected]}\cr}
\end{document}
|
arXiv
|
{
"id": "0310315.tex",
"language_detection_score": 0.6739957332611084,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Memory for Light as a Quantum Process}
\author{M. Lobino,$^1$ C. Kupchak,$^1$ E. Figueroa,$^{1,2}$ and A. I. Lvovsky$^{1,}$}
\email{[email protected]}
\affiliation{$^1$ Institute for Quantum Information Science, University of Calgary, Calgary, Alberta T2N 1N4, Canada}
\affiliation{$^2$ Max-Planck-Institut f{\"u}r Quantenoptik, Hans-Kopfermann-Str.\ 1, 85748 Garching, Germany}
\begin{abstract} We report complete characterization of an optical memory based on electromagnetically induced transparency. We recover the superoperator associated with the memory, under two different working conditions, by means of a quantum process tomography technique that involves storage of coherent states and their characterization upon retrieval. In this way, we can predict the quantum state retrieved from the memory for any input, for example, the squeezed vacuum or the Fock state. We employ the acquired superoperator to verify the nonclassicality benchmark for the storage of a Gaussian distributed set of coherent states. \end{abstract}
\pacs{42.50.Ex, 03.67.-a, 32.80.Qk, 42.50.Dv}
\maketitle
\paragraph{Introduction} Quantum memory for light is an essential technology for long distance quantum communication \cite{DLCZ} and for any future optical quantum information processor. Recently, several experiments have shown the possibility to store and retrieve nonclassical states of light such as the single photon \cite{KuzmichSingle,LukinSingle}, entangled \cite{KimbleEntang} and squeezed vacuum \cite{KozumaStorage,LvovskyStorage} states using coherent interactions with an atomic ensemble.
In order to evaluate the applicability of a quantum memory apparatus for practical quantum communication and computation, it is insufficient to know its performance for specific, however complex, optical states, because in different protocols, different optical states are used for encoding quantum information \cite{DLCZ,Lloyd}. Practical applications of memory require answering a more general question: how will an \emph{arbitrary} quantum state of light be preserved after storage in a memory apparatus?
Here we answer this question by performing complete characterization of the quantum process associated with optical memory based on electromagnetic induced transparency (EIT) \cite{FleischhauerReview}. Memory characterization is achieved by storing coherent states (i.~e. highly attenuated laser pulses) of different amplitudes and subsequently measuring the quantum states of the retrieved pulses. Based on the acquired information, the retrieved state for any arbitrary input can be predicted and additionally, any theoretical benchmark on quantum memory performance can be readily verified.
\paragraph{Coherent state quantum process tomography} We can define complete characterization of an optical quantum memory as the ability to predict the retrieved quantum state $\hat{\mathcal{E}}(\hat\rho)$ when the stored input state $\hat\rho$ is known. This is a particular case of the quantum ``black box" problem, which is approached through a procedure called quantum process tomography (QPT) \cite{MohseniQPT}. \begin{figure}
\caption{(color online). Schematic of the experimental setup used
to characterize the process associated with the quantum memory.
PBS, polarizing beam splitter.}
\label{fig.1}
\end{figure} QPT is based on the fact that every quantum process (in our case, optical memory) is a linear map on the linear space $\mathbb{L}(\mathbb{H})$ of density matrices over the Hilbert space $\mathbb{H}$ on which the process is defined. The associated process can thus be characterized by constructing a spanning set of ``probe" states in $\mathbb{L}(\mathbb{H})$ and subjecting each of them to the action of the quantum ``black box". If we measure the process output $\hat{\mathcal{E}}(\hat\rho_i)$ for each member $\hat\rho_i$ of this spanning set, we can calculate the process output for any other state $\hat\rho=\sum_i a_i\hat\rho_i$ according to \begin{equation}
\hat{\mathcal{E}}(\hat\rho)=\sum_i a_i\hat{\mathcal{E}}(\hat\rho_i).
\label{Eq.linearity} \end{equation}
The challenge associated with this approach is the construction of the appropriate spanning set, given the infinite dimension of the optical Hilbert space and the lack of techniques for universal optical state preparation. For this reason, characterizing memory for light, that is not limited to the qubit subspace, is much more difficult than memory for superconducting qubits, which has been reported recently \cite{NeeleyQPT}. Our group has recently developed a process characterization technique that overcomes these challenges
\cite{LobinoQPT}. Any density matrix $\hat\rho$ of a quantum optical state can be written as a linear combination of density matrices of coherent states $|\alpha\rangle$ according to the optical equivalence theorem \begin{equation}
\hat\rho=2\int P_{\hat\rho}(\alpha)|\alpha\rangle\langle\alpha|d^2\alpha,
\label{Eq.Pfunction} \end{equation} where $P_{\hat\rho}(\alpha)$ is the state's Glauber-Sudarshan P-function and the integration is performed over the entire complex plane. Although the P-function is generally highly singular, any quantum state can be arbitrarily well approximated by a state with an infinitely smooth, rapidly decreasing P-function \cite{Klauder}. Therefore, by measuring how the process affects coherent states, one can predict its effect on any other state. The advantage of such approach (which we call coherent-state quantum process tomography or csQPT) is that it permits complete process reconstruction using a set of ``probe" states that are readily available from a laser.
\paragraph{Experimental setup} We performed csQPT on optical memory \cite{LvovskyStorage} realized in a warm rubidium vapor by means of electromagnetically-induced transparency (Fig.~\ref{fig.1}). The atoms are $^{87}$Rb and the vapor temperature is kept constant at 65$^\circ$C.
The signal field is resonant with the $|^5 S_{1/2}, F=1
\rangle\leftrightarrow|^5 P_{1/2}, F=1\rangle$ transition at 795 nm and is produced by a continuous-wave Ti:Sapphire laser. An external cavity diode laser, phase locked at 6834.68 MHz to the signal laser
\cite{AppelPhaselock} serves as the EIT control field source, and is resonant with the $|^5 S_{1/2}, F=2 \rangle\leftrightarrow|^5 P_{1/2}, F=1\rangle$ transition. The fields are red detuned from resonance by 630 MHz in order to improve the storage efficiency. The control field power is 5 mW and the beam spatial profile is mode matched with the signal beam to a waist of 0.6 mm inside the rubidium cell. Signal and control fields are orthogonally polarized; they are mixed and separated using polarizing beam splitters.
The two photon detuning $\Delta_2$ between the signal and control fields is modified by varying the frequency of the control field laser through the phase lock circuit, while an acousto-optical modulator (AOM) is used to switch on and off the control field intensity. We analyzed two different operative conditions characterized by $\Delta_2$ = 0 and 0.54 MHz.
The input pulse is obtained by chopping the continuous-wave signal beam via an AOM to produce 1 $\mu$s pulses [Fig.~\ref{fig.2}(c)] with a 100 kHz repetition rate. A second AOM is used to compensate for the frequency shift generated by the first. Transfer of the light state into the atomic ground state superposition (atomic spin wave) is accomplished by switching the control field off for the storage duration of $\tau=1\ \mu$s when the input pulse is inside the rubidium cell.
We performed full state reconstruction of both the input and retrieved fields by time domain homodyne tomography \cite{LvovskyReview}. A part of the Ti:Sapphire laser beam serves as a local oscillator for homodyne detection; while its phase is scanned via a piezoelectric transducer, the homodyne current is recorded with an oscilloscope. For every state, 50000 samples of phase and quadrature are measured and processed by the maximum likelihood algorithm \cite{LvovskyMaxLik,LvovskyMaxLik2}, estimating the state density matrix in the Fock basis.
\begin{figure}\label{fig.2}
\end{figure}
\paragraph{Tomography of quantum memory} In order to determine the coherent state mapping necessary for reconstructing the process, we measured 10 different coherent states
$|\alpha_i\rangle$ with mean photon numbers ranging from 0 to 285 along with their corresponding retrieved states
$\hat{\mathcal{E}}(|\alpha_i\rangle\langle\alpha_i|)$ [Fig.~\ref{fig.2}(a) and (b)]. Subsequently, we applied polynomial interpolation to determine the value of $\hat{\mathcal{E}}(|\alpha\rangle\langle\alpha|)$ for any value of $\alpha$
in the range 0 to 16.9. Performing tomographic reconstruction for these highly displaced states requires good phase stability between the signal and local oscillator. Phase fluctuations produce an artefact in the reconstruction in the form of amplitude dependent increase in the phase quadrature variance. In our measurements, the reconstructed input states $| \alpha_i \rangle$ resemble theoretical coherent states with a fidelity higher than $0.999$ for mean photon values up to 150 [Fig.~\ref{fig.2}(a) and (b)].
By inspecting the Wigner functions of the input and retrieved states, one can clearly notice the detrimental effects of the memory. First, there is attenuation of the amplitude by a factor of $0.41\pm0.01$ for the signal field in two-photon resonance with the control, which increases to a factor of $0.33\pm0.02$ when a two-photon detuning of $\Delta_2$ = 0.54 MHz is introduced. This corresponds to a mean photon number attenuation by factors of $0.17\pm0.02$ and $0.09\pm0.01$, respectively. Note that in the case of nonzero two-photon detuning, the attenuation is greater than the factor of 0.14 obtained in classical intensity measurement [Fig.~ \ref{fig.2}(c)]. This is because the temporal mode of the retrieved state is slightly chirped, and could not be perfectly matched to the mode of the local oscillator.
\begin{figure}
\caption{(color online).
The diagonal elements of the process tensor $\chi_{kk}^{mm}$,
measured by csQPT in the Fock basis for $\Delta_2$ = 0 (a) and 0.54 MHz (b).}
\label{fig.1n}
\end{figure}
Second, retrieved coherent states experience an increase in the phase quadrature variance that depends quadratically on the state amplitude. This effect produces an ellipticity in the retrieved state Wigner function (Fig.~ \ref{fig.2}(a) and (b)] and can be attributed to the noise in the phase lock between the signal and control lasers \cite{AppelPhaselock}. Fluctuations $\Delta\phi$ of the relative phases between the two interacting fields randomize the phase of the retrieved signal field with respect to the local oscillator. Assuming a Gaussian distribution for $\Delta\phi$ with zero mean and variance $\sigma^2_\phi$ the variance of the phase quadrature can be expressed as: \begin{equation}
\sigma^2_q=\frac{1}{2}+\frac{q_0^2}{2}\left(1-e^{2\sigma^2_\phi}\right),
\label{Eq.variance} \end{equation} where $q_0$ is the mean amplitude. We fit our experimental data with Eq.\ref{Eq.variance} and estimate an $11^\circ$ standard deviation for $\Delta\phi$ [Fig.~\ref{fig.2}(d)], in agreement with independent estimates \cite{AppelPhaselock}.
The third detrimental effect preventing the atomic ensemble from behaving as a perfect memory is the population exchange between atomic ground states
\cite{HetetMemory,FigueroaSlowlight}. Besides limiting the memory lifetime, this exchange generates spontaneously emitted photons in the signal field mode adding an extra noise that thermalizes the stored light by increasing the quadrature variance independently of the input amplitude and phase. We measured the extra noise from the quadrature variance of retrieved vacuum states $\hat{\mathcal{E}}(|0\rangle\langle 0|)$ and found it to equal 0.185 dB when both fields were tuned exactly at the two photon resonance, which corresponds to the mean photon number in the retrieved mode equal to $\overline{n}=Tr\left[\hat{n}
\hat{\mathcal{E}}(|0\rangle\langle 0|) \right]=0.022$. This noise is reduced to 0.05 dB (corresponding to $\overline{n}=0.005$ ) in the presence of two photon detuning. For this reason, it is beneficial to implement storage of squeezed light in the presence of two-photon detuning, in spite of higher losses.
\begin{figure}
\caption{(color online). Comparison of the experimentally
measured squeezed vacuum states retrieved from the quantum
memory and those predicted with csQPT. For each case, the
Wigner function and the quadrature variance as a function of
the local oscillator phase are shown. (a), Experimental
measurement \cite{LvovskyStorage} with $\Delta_2$ = 0.54 MHz. (b), Prediction with $\Delta_2$ = 0.54 MHz.
(c), Experimental measurement with $\Delta_2$ = 0. (d), Prediction with $\Delta_2$ = 0.
}
\label{fig.3}
\end{figure}
In the presence of the two-photon detuning, the evolution of the atomic ground state superposition brings about a phase shift of the retrieved state with respect to the input by $2\pi\Delta_2\tau$ = $200^\circ$ as is visible in Fig.~\ref{fig.2}(a).
Based on the information collected from the storage of coherent states, we reconstruct the memory process in the $\chi$-matrix representation, defined by \cite{NielsenBook,Chuang} \begin{equation}
\hat{\mathcal{E}}(\hat\rho)=\sum_{k,l,m,n}\chi^{n,m}_{k,l}A_{l,n}\hat\rho A_{m,k},
\label{Eq.chi} \end{equation} where $\chi^{n,m}_{k,l}$ is the rank 4 tensor comprising full information about the process and ${A_{i,j}}$ is a set of operators that form a basis in the space of operators on $\mathbb{H}$. Since
$\mathbb{H}$ is the Hilbert space associated with an electromagnetic oscillator, it is convenient to choose $A_{i,j}=|i\rangle\langle j|$, where $|i\rangle$ and $|j\rangle$ are the photon number states. The details of calculating the process tensor are described elsewhere \cite{LobinoQPT}; Fig.~\ref{fig.1n} displays the diagonal subset $\chi^{m,m}_{k,k}$ of the process tensor elements.
\paragraph{Performance tests} In order to verify the accuracy of our process reconstruction, we have used it to calculate the effect of storage on squeezed vacuum with $\Delta_2$ = 0.54 MHz, as studied in a recent experiment of our group \cite{LvovskyStorage}, and with $\Delta_2$ = 0 MHz. We applied the superoperator tensor measured with csQPT to the squeezed vacuum produced by a subthreshold optical parametric amplifier with a noise reduction in the squeezed quadrature of $-1.86$ dB and noise amplification in the orthogonal quadrature of $5.38$ dB (i.e. the same state as used as the memory input in Ref.~\cite{LvovskyStorage}). In this way, we obtained a prediction for the state retrieved from the memory, which we then compared with the results of direct experiments. This comparison yields quantum mechanical fidelities of $0.9959\pm0.0002$ and $0.9929\pm0.0002$ for the two-photon detunings of $\Delta_2$ = 0.54 MHz and $\Delta_2$ = 0 respectively (Fig.~\ref{fig.3}).
As discussed above, zero detuning warrants lower losses (thus higher amplitude of the noise variance) and no phase rotation, but higher excess noise (thus no squeezing in the retrieved state). Nevertheless the two photon resonant configuration offers a better fidelity if the single photon state is stored \cite{KuzmichSingle,LukinSingle}, as evidenced by comparing the superoperator element $\chi_{1,1}^{1,1}$ of Fig. \ref{fig.1n} (a) and (b).
In addition to the ability to predict the output of the memory for any input state, our procedure can be used to estimate the performance of the memory according to any available benchmark. As an example, we analyze the performance of our memory with respect to the classical limit on average fidelity associated with the storage of coherent states with amplitudes distributed in phase space according to a Gaussian function of width $1/\lambda$ \cite{PolzikBenchmark}. This limit as a function of $\lambda$ is given by: \begin{equation}
F(\lambda)=2\lambda \int_0^{+\infty}\exp{(-\lambda\alpha^2)\langle\alpha|\hat{\mathcal{E}}(|\alpha\rangle\langle\alpha|)|\alpha\rangle}\alpha
d\alpha\leq\frac{1+\lambda}{2+\lambda}.
\label{Eq.benchmark} \end{equation} From csQPT data, we evaluate the average fidelity associated with our memory for both values of $\Delta_2$ (Fig.~\ref{fig.4}). Both configurations show nonclassical behavior. The higher value of average fidelity correspond to $\Delta_2$ = 0 and is explained by a higher storage efficiency. \begin{figure}
\caption{(color online). Average fidelity of the
quantum memory for a Gaussian distributed set of coherent states. Blue empty (red filled) dots show
the average fidelity calculated from the csQPT experimental
data for
$\Delta_2$ = 0 (0.54 MHz). The experimental uncertainty is 0.0002.
The solid line shows the classical limit \cite{PolzikBenchmark}.
}
\label{fig.4}
\end{figure}
\paragraph{Conclusion} In summary, we have demonstrated complete characterization of an EIT-based quantum memory by csQPT. This procedure allows one to predict the effect of the memory on an arbitrary quantum-optical state, and thus provides the ``specification sheet" of quantum-memory devices for future applications in quantum information technology. Furthermore, our results offer insights into the detrimental effects that affect the storage performance and provide important feedback for the device optimization. We anticipate this procedure to become standard in evaluating the suitability of a memory apparatus for practical quantum telecommunication networks.
\paragraph*{Acknowledgements} This work was supported by NSERC, iCORE, CFI, AIF, Quantum$Works$, iCORE (C.K.) and CIFAR (A.L.).
\end{document}
|
arXiv
|
{
"id": "0812.4053.tex",
"language_detection_score": 0.8296714425086975,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Convex ordering and quantification of quantumness]{Convex ordering and quantification of quantumness}
\author{J Sperling} \address{Arbeitsgruppe Theoretische Quantenoptik, Institut f\"ur Physik, Universit\"at Rostock, D-18051 Rostock, Germany} \ead{[email protected]} \author{W Vogel} \address{Arbeitsgruppe Theoretische Quantenoptik, Institut f\"ur Physik, Universit\"at Rostock, D-18051 Rostock, Germany} \ead{[email protected]}
\begin{abstract}
The characterization of physical systems requires a comprehensive understanding of quantum effects.
One aspect is a proper quantification of the strength of such quantum phenomena.
Here, a general convex ordering of quantum states will be introduced which is based on the algebraic definition of classical states.
This definition resolves the ambiguity of the quantumness quantification using topological distance measures.
Classical operations on quantum states will be considered to further generalize the ordering prescription.
Our technique can be used for a natural and unambiguous quantification of general quantum properties whose classical reference has a convex structure.
We apply this method to typical scenarios in quantum optics and quantum information theory to study measures which are based on the fundamental quantum superposition principle. \end{abstract}
\pacs{03.67.Mn, 42.50.-p, 02.40.Ft, 03.65.Fd}
\begin{indented}
\item[]\today,\submitto{\PS} \end{indented}
{\it Keywords}: Convex geometry, convex ordering, quantumness measures
\maketitle \ioptwocol
\section{Introduction}
Characterizing the differences between the quantum and classical domain of physics is of fundamental interest for uncovering the quantumness of nature.
Typically there are quantum counterparts to classical physics, such as coherent states in the system of the harmonic oscillator, or product states in the field of compound systems.
Using classical statistical mixing, these pure states may be generalized to mixed classical ones.
Thus, we obtain convex sets of states having a classical analogue with respect to a given physical property.
Different measures have been introduced for quantifying the amount of quantumness of states having no such classical correspondence.
These measures induce an ordering prescription enabling us to compare the quantumness of different states.
In the system of the harmonic oscillator, one of the early attempts to quantify the amount of nonclassicality has been given by the trace-distance of an arbitrary state to the set of all classical ones being mixtures of coherent states~\cite{TraceDist,TraceDist2}.
This led to a number of distance based nonclassicality probes, e.g., Hilbert-Schmidt-norm~\cite{HSDist,HSDist2} or the Bures distance~\cite{BuresDist} measures.
Some nonclassicality metrics are based on the amount of Gaussian noise which is needed for the elimination of any quantum interference within the corresponding phase-space representation~\cite{Gauss1,Gauss12,Gauss2} or they directly use the negativities within the quasiprobability distribution~\cite{NegWiegner,FM10} as an indicator of the amount of nonclassicality.
Another method for the quantification of nonclassicality is given via the potential of a state to generate entanglement~\cite{EntPot}.
This translates quantumness of a single-mode harmonic oscillator to the quantification of entanglement.
The axiomatic definition of general entanglement measure is given in~\cite{AxiomEntM,AxiomEntM2,AxiomEntM3}.
This definition is based on so-called local operations and classical communications mapping separable quantum states onto separable ones.
Under all examples of entanglement measures, there is one which is of particular interest for our considerations: the Schmidt number~\cite{SchmNummer,TerhalSN}.
It has been shown that this entanglement measure has some advantageous properties in relation to other measures~\cite{SchmidtUni}.
In particular, the degree of nonclassicality of a single mode system is directly transformed into the same Schmidt number using linear optics~\cite{UniQuant}.
For some applications not all states with the same amount of quantumness are equally useful.
For example, it has been shown that states can be too entangled for quantum computation~\cite{Eisert-QC}.
Consequently, operational nonclassicality and entanglement measures have been introduced~\cite{SchmidtUni,Gehrke}.
In particular quantum information protocols require information related measures of quantum effects.
For example, the Fischer information~\cite{Fischer,FKMMSV10} is such a proper operational probe.
More generally, entropic measures have been intensively studied~\cite{EntropyInformation,Context}.
It has been shown that entropic inequalities and tomographic information can determine quantum correlations~\cite{MM09, MM14a,MM14}.
In general, a given operational, distance-based, or entropic metrics induces an ordering prescription, which yields a particular sorting of quantum states regarding their amount of quantumness for some applications.
In the current contribution we will use the inverse approach, i.e.: a convex ordering prescription of quantum states will imply a canonic measure.
It will be shown that distance measures are, in general, not completely suitable for ordering quantum states unambiguously.
Studying the algebraic implications of the definition of convex sets, we rigorously formulate an ordering procedure which does not depend on a distinct topological distance.
We expand this method to include classical operations being especially defined for a particular notion of quantumness under study.
The obtained sorting procedure induces a corresponding quantumness measures in a natural way.
We apply this method to basic examples, such as entanglement, nonclassicality, and quantum information, showing the importance of the quantum superposition principle for the quantification of different quantum features.
The paper is structured as follows.
In Sec.~\ref{Sec:Motivation} we motivate our treatment.
An unambiguous convex ordering prescription will be proposed in Section~\ref{Sec:Ordering}.
In Sec.~\ref{Sec:ClassicalOperations} we include classical operations to further enhance the ordering technique.
We introduce an axiomatic quantification and we study measures that count quantum superpositions in Sec.~\ref{Sec:Quantification}.
A summary and conclusions are given in Sec.~\ref{Sec:SumCon}.
\section{Motivation}\label{Sec:Motivation}
Let us consider the convex set of all (pure and mixed) quantum states, $\mathcal Q$, and a closed, non-empty, and convex subset $\mathcal C\subset\mathcal Q$.
The elements of $\mathcal C$ are supposed to be states with a given classical property, e.g.: separable states, $\mathcal C_{\rm sep}={\rm conv}\{|a\rangle\langle a|\otimes|b\rangle\langle b|:|a\rangle\in\mathcal H_A \wedge |b\rangle\in\mathcal H_B\}$, or coherent states, $\mathcal C_{\rm coh}={\rm conv}\{|\alpha\rangle\langle \alpha|:\alpha\in\mathbb C\}$.
The general task is the determination of the amount of quantumness of an arbitrary quantum state $\rho\in\mathcal Q$ with respect to the classical property under study.
The convexity of the set $\mathcal C$ guarantees that a mixing of two classical states remains classical.
This is important, because it ensures that statistical averaging cannot increase quantum correlations.
The closure of $\mathcal C$ is motivated by the argument that a convergent sequence of classical states should have its limit in the classical domain too.
These fundamental requirements ensure that a classical system remains classical employing classical operations and classical statistics.
Let us note that the property of quantum discord does not meet these conditions, since a non-zero discord can be obtained from a classical mixing of two zero discord states~\cite{DiscordReview}.
One way of ordering quantum states is given by the distance of these states to the set of classical states $\mathcal C$.
Here we will show that sorting quantum states by a distance cannot lead to one distinct order of states.
For the time being, let us assume a two dimensional convex set $\mathcal C$.
Using an appropriate coordinate transformation, this classical set $\mathcal C$ can be assumed to be a sphere -- with respect to the Euclidean norm $\|\,\cdot\,\|_2$ -- in the form:
\begin{eqnarray}
\mathcal C=\{x\in\mathcal Q:\, \|x\|_2\leq 1/2\}.
\end{eqnarray}
Now we may choose two nonclassical elements $y_1,y_2\in\mathcal Q\setminus\mathcal C$, which are given in the standard basis: $y_1=(1,0)^{\rm T}$ and $y_2=\frac{1}{\sqrt{2}}(1,1)^{\rm T}$.
The distance $d_p$ to the set of classical states $\mathcal C$ in $p$-norm is given by
\begin{eqnarray}
d_p(y,\mathcal C)=\inf_{x\in\mathcal C}\|y-x\|_p.
\end{eqnarray}
For all $p$-norms the minimal distance of $y_1$ and $y_2$ to the classical states is obtained for $x_1=\frac{1}{2}(1,0)^{\rm T}\in\mathcal C$ and $x_2=\frac{1}{2\sqrt 2}(1,1)^{\rm T}\in\mathcal C$, respectively, cf. Fig.~\ref{Fig:Norms}.
Thus, we can calculate $d_p(y_1,\mathcal C)$ and $d_p(y_2,\mathcal C)$ for different values of $p$,
\begin{eqnarray}
d_p(y_1,\mathcal C)=\|y_1-x_1\|_p=\left[\left(\frac{1}{2}\right)^p+0^p\right]^{1/p}=\frac{1}{2}\\
\nonumber d_p(y_2,\mathcal C)=\|y_2-x_2\|_p\\
\phantom{d_p(y_2,\mathcal C)}
=\left[\left(\frac{1}{2\sqrt 2}\right)^p+\left(\frac{1}{2\sqrt 2}\right)^p\right]^{1/p}
=\frac{2^{1/p}}{2\sqrt 2}.
\end{eqnarray}
This result displays the paradox of the quantification of quantumness with distance measures in Fig.~\ref{Fig:Norms}.
Depending on the choice of the norm, we can claim that: $y_1$ is more nonclassical than $y_2$ ($2<p\leq\infty$); or $y_1$ is less nonclassical than $y_2$ ($1\leq p<2$); or $y_1$ and $y_2$ have an equal nonclassicality ($p=2$).
\begin{figure}
\caption{(color online)
The dark gray area represents $\mathcal C$, and both gray areas depict $\mathcal Q$.
The upper point represents $y_1$, the other one represents $y_2$.
The blue circles are the spheres in $2$-norm showing the distance to $\mathcal C$.
The equal size of them implies an equal $2$-norm-distance for both points.
The green squares represent the spheres around the considered points in $1$-norm.
In the case of the $1$-norm, the square around $y_2$ is larger than those around $y_1$.
Whereas for the $\infty$-norm spheres (red squares) the relation is the other way around.
}
\label{Fig:Norms}
\end{figure}
Let us note that this particular two-dimensional cut already provides the ambiguity of the distance-measure approach for any dimension of convex sets.
Additionally, any monotonic function of a distance, for example entropies, will inherit this characteristic.
While those metrics can be useful in an operational sense, they are not suitable for an unambiguous quantification of the quantumness property itself.
In the following we will show that the convexity of the classical set serves as the key element to resolve this paradox.
\section{Ordering Quantum States}\label{Sec:Ordering}
A convex set $\mathcal C$ is characterized through its algebraic definition,
\begin{eqnarray}\label{Eq:Convexity}
\rho,\rho'\in\mathcal C \wedge \lambda\in[0,1] \Rightarrow \lambda \rho+(1-\lambda)\rho'\in\mathcal C.
\end{eqnarray}
The question whether a general element $\rho\in\mathcal Q$ is in the convex set $\mathcal C$, or not, is independent of the choice of a distance.
In addition, we show in~\ref{App:Normalization} that the normalization to ${\rm tr}\,\rho=1$ can be neglected from the mathematical point of view.
For the quantification, we start with the formulation of a preorder relation $\preceq$.
\begin{definition}\label{Def:Preorder}
Two quantum states $\rho,\rho'\in\mathcal Q$ can be compared by $\preceq$:
\begin{eqnarray*}
\rho\preceq \rho' \Leftrightarrow \exists \gamma\in\mathcal C\, \exists \lambda\in[0,1]: \rho=\lambda \rho'+(1-\lambda)\gamma.
\end{eqnarray*}
\end{definition}
This means a quantum state $\rho$ has less or equal nonclassicality compared with another state $\rho'$, if $\rho$ can be written as a classical statistical mixture of $\rho'$ and a classical state $\gamma$.
Let us prove, that this relation fulfills the requirements of a preorder.
\paragraph*{Proof.}
$\preceq$ is reflexive: $\rho=1\rho+(1-1)\gamma\Rightarrow \rho\preceq \rho$;
$\preceq$ is transitive: $\rho_1\preceq \rho_2$ and $\rho_2\preceq \rho_3$ imply
\begin{eqnarray*}
&\rho_1=\lambda \rho_2+(1-\lambda)\gamma_1 \wedge \rho_2=\kappa \rho_3+(1-\kappa)\gamma_2 \Rightarrow\\
&\rho_1=\lambda\kappa \rho_3 +(1-\lambda\kappa)\gamma_3 \Rightarrow \rho_1\preceq \rho_3,
\end{eqnarray*}
with $\gamma_3=\frac{\lambda(1-\kappa)}{1-\lambda\kappa}\gamma_2+\frac{1-\lambda}{1-\lambda\kappa}\gamma_1$ and $\frac{\lambda(1-\kappa)}{1-\lambda\kappa}+\frac{1-\lambda}{1-\lambda\kappa}=1$.
In conclusion, $\preceq$ is a preorder.
$\blacksquare$
For generating an order from the preorder $\preceq$, we consider the following equivalence~$\cong$.
\begin{definition}\label{Def:Eqivalence}
Two quantum states $\rho,\rho'\in\mathcal Q$ have the same order of quantumness, if
\begin{eqnarray*}
\rho\cong \rho' \Leftrightarrow \rho\preceq \rho' \wedge \rho'\preceq \rho.
\end{eqnarray*}
\end{definition}
\paragraph*{Proof.}
$\cong$ is reflexive: $\rho\preceq\rho\wedge\rho\preceq\rho$;
$\cong$ is symmetric: $\rho\preceq\rho'\wedge\rho'\preceq\rho\Leftrightarrow\rho'\preceq\rho\wedge\rho\preceq\rho'$;
$\cong$ is transitive: $\rho_1\cong\rho_2$ and $\rho_2\cong\rho_3$ are equivalent to
\begin{eqnarray*}
\rho_1\preceq\rho_2\quad\wedge\quad\rho_2\preceq\rho_1\quad\wedge\quad\rho_3\preceq\rho_2\quad\wedge\quad\rho_2\preceq\rho_3.
\end{eqnarray*}
Using the transitivity of $\preceq$, we obtain $\rho_1\preceq\rho_3\wedge\rho_3\preceq\rho_1$.
Thus, $\cong$ is an equivalence relation.
$\blacksquare$
With respect to the equivalence $\cong$, the $\preceq$ preorder given in Definition~\ref{Def:Preorder} becomes an order.
The missing property is that $\preceq$ must be antisymmetric,
\begin{eqnarray}
\rho\preceq \rho'\wedge\rho'\preceq \rho\Rightarrow\rho\cong\rho',
\end{eqnarray}
which is true, cf. Definition~\ref{Def:Eqivalence}.
Thus, we have constructed a rigorous way to order quantum states.
\begin{proposition}\label{Lem:MinClass}
Classical states have a minimal and equal order, i.e.:
\begin{eqnarray*}
\gamma\in\mathcal C \wedge \rho\in\mathcal Q\Rightarrow \gamma\preceq\rho \mbox{ and }
\gamma,\gamma'\in\mathcal C\Rightarrow\gamma\cong\gamma'.
\end{eqnarray*}
Any state $\rho\in\mathcal Q$ with a minimal order, $\rho\preceq\gamma\in\mathcal C$, is classical, $\rho\in\mathcal C$.
\end{proposition}
\paragraph*{Proof.}
From $\gamma=0\rho+(1-0)\gamma$ and Definition~\ref{Def:Preorder} follows $\gamma\preceq\rho$.
Hence we find for all classical states $\gamma,\gamma'\in\mathcal C$: $\gamma\preceq\gamma'\wedge\gamma'\preceq\gamma$; and therefore $\gamma\cong\gamma'$.
If $\rho\preceq\gamma\in\mathcal C$, i.e. $\exists\gamma'\in\mathcal C,\lambda\in[0,1]:\rho=\lambda\gamma+(1-\lambda)\gamma'$ , then $\rho$ is a convex combination of classical states and therefore classical.
$\blacksquare$
The Definitions~\ref{Def:Preorder}~and~\ref{Def:Eqivalence} provide an order of quantum states, which is solely based on the convex structure of $\mathcal C$.
These definitions highlight the natural assumption that a statistical mixture of a nonclassical state with a classical one cannot become more nonclassical than the initial one, cf. Fig.~\ref{Fig:NonclOrder}.
Further on, in Proposition~\ref{Lem:MinClass} it has been shown that this order implies that all classical states are the only ones with a minimal nonclassicality.
The mixing property and the minimality property of classical states are essential for any quantification of nonclassicality.
\begin{figure}
\caption{(color online)
The inner green area represents $\mathcal C$, and the complete area represents $\mathcal Q$.
A nonclassical element $\rho$ is given.
All elements $\rho_1$ in the red (triangular) area above $\rho$ fulfill: $\rho\preceq\rho_1$.
All elements $\rho_2$ in the green and blue area below $\rho$ fulfill: $\rho_2\preceq\rho$.
}
\label{Fig:NonclOrder}
\end{figure}
\section{Classical Operations}\label{Sec:ClassicalOperations}
A classical quantum state may evolves in an experiment or it propagates in a classical channel including noise effects.
Thus we have to deal with operations which map our state within the set $\mathcal Q$.
Operations with a classical counterpart must not increase the amount of quantumness.
Therefore, we study transformations mapping classical states onto each other.
\begin{definition}
We call a linear operation $\Lambda:\mathcal Q\to\mathcal Q$ a classical one, if $\forall\gamma\in\mathcal C: \Lambda(\gamma)\in\mathcal C$.
The set of all classical operations $\Lambda$ is denoted as $\mathcal{CO}$.
\end{definition}
\begin{proposition}\label{Prop:SemiGroup}
The set $\mathcal{CO}$ is convex and a semi-group.
\end{proposition}
\paragraph*{Proof.}
The convexity follows from the linearity of the operation space together with the convexity of the set of classical states,
\begin{eqnarray*}
(\lambda\Lambda_1+(1-\lambda)\Lambda_2)(\gamma)=\lambda\underbrace{\Lambda_1(\gamma)}_{\in\mathcal C}+(1-\lambda)\underbrace{\Lambda_2(\gamma)}_{\in\mathcal C}\in\mathcal C.
\end{eqnarray*}
The semi-group property is given by
\begin{eqnarray*}
\Lambda_1,\Lambda_2\in\mathcal{CO}: (\Lambda_1\circ\Lambda_2)(\gamma)=&\Lambda_1(\Lambda_2(\gamma))\in\mathcal C,
\end{eqnarray*}
with the identity ${\rm Id}(\gamma)=\gamma\in\mathcal C$, being classical.
$\blacksquare$
These classical operations or channels can be considered as quantum physical systems having a classical analogue.
This includes interactions which evolve states in a classical way, or mix them with classical noise.
For special quantum tasks it might be also useful to consider only sub-semi-groups of $\mathcal{CO}$, for example one-way classical communications for entanglement or phase rotations and phase dispersion for coherent states.
Now, we have to verify that classical operations do not change the previously defined order.
Therefore we formulate the following proposition.
\begin{proposition}\label{Theo:ClassOp}
{\rm (i)} A classical operation does not change the order, $\rho\preceq\rho' \Rightarrow \Lambda(\rho)\preceq\Lambda(\rho')$.
{\rm (ii)} Mixing a quantum state with a classical one, is a classical operation.
\end{proposition}
\paragraph*{Proof.}
For (i) let us consider two states with $\rho\preceq\rho'$ and a classical operation $\Lambda$, $\rho=\lambda\rho'+(1-\lambda)\gamma$, which implies $\Lambda(\rho)=\lambda\Lambda(\rho')+(1-\lambda)\Lambda(\gamma)$.
Together with $\Lambda(\gamma)\in\mathcal C$ and Definition~\ref{Def:Preorder} we obtain (i).
For claim (ii), we consider that a state $\rho$ is mixed with a classical state $\gamma$, $\lambda\in[0,1]$,
\begin{eqnarray*}
\rho'&=\lambda\rho+(1-\lambda)\gamma\\
&=\lambda{\rm Id}(\rho)+(1-\lambda)({\rm tr}\rho)\gamma\\
&=\left[\lambda{\rm Id}(\,\cdot\,)+(1-\lambda)({\rm tr}(\,\cdot\,))\gamma\right](\rho)=\Lambda(\rho).
\end{eqnarray*}
The identical transformation ${\rm Id}$ and $({\rm tr}(\,\cdot\,))\gamma$ are classical, i.e., $\forall \gamma' \in\mathcal C:({\rm tr}\,\gamma')\gamma\in\mathcal C$, and the convex structure of $\mathcal{CO}$ implies that $\Lambda\in\mathcal{CO}$.
$\blacksquare$
This means that classical operations are compatible with the order $\preceq$, and they cannot increase the quantumness of the initial state.
Therefore, the order given in Definition~\ref{Def:Preorder} can be generalized by using Proposition~\ref{Theo:ClassOp}.
\begin{definition}\label{Def:NO}
A quantum state $\rho$ has a lower or equal order of nonclassicality than the state $\rho'$, $\rho\preceq\rho'$, iff $\exists\Lambda\in\mathcal{CO}:\rho=\Lambda(\rho').$
They have the same nonclassicality, $\rho\cong\rho'$, if $\rho\preceq\rho'\wedge\rho'\preceq\rho$.
\end{definition}
This quantumness ordering prescription naturally generalizes the previous convex ordering with respect to $\mathcal C$ by including classical operations $\mathcal{CO}$.
Condition~(ii) in Proposition~\ref{Theo:ClassOp} proves that the ordering includes the previous Definition~\ref{Def:Preorder}.
In addition, the Definition~\ref{Def:NO} implies that all quantum states below a given state $\rho$ can be written as $\Lambda(\rho)$ for a classical operation $\Lambda$.
Therefore it simply follows
\begin{eqnarray}
\Lambda(\rho)\preceq\rho.
\end{eqnarray}
Let us stress again, that the minimal states are uniquely classical ones.
Now we want to further study properties of classical operations.
A subgroup of $\mathcal{CO}$ are classical invertible maps $\mathcal{CO}_{-1}$, defined by
\begin{eqnarray}
\Lambda\in\mathcal{CO}_{-1}\Leftrightarrow \Lambda\in\mathcal{CO}\wedge\exists \Lambda^{-1}\in\mathcal{CO}.
\end{eqnarray}
These are classical operations which can be reversed, and the inverse is again a classical operation.
This group always exists, since the identical transformation is its own inverse, ${\rm Id}\in\mathcal{CO}_{-1}$.
The importance of this group is that it yields classes of quantum states with an equivalent order.
Let us assume a classical invertible $\Lambda\in\mathcal{CO}_{-1}$ and an arbitrary state $\rho\in\mathcal Q$.
It follows from $\rho'=\Lambda(\rho)$ that $\rho=\Lambda^{-1}(\rho')$.
Together with the Definitions~\ref{Def:Eqivalence}~and~\ref{Def:NO}
\begin{eqnarray}
\rho\preceq\rho'\wedge\rho'\preceq\rho \Leftrightarrow \rho\cong\rho'.
\end{eqnarray}
Hence, it is possible to identify quantum states with an equal order of quantumness applying the group $\mathcal{CO}_{-1}$.
\begin{proposition}
All quantum states $\rho,\rho'\in\mathcal Q$, with $\rho'=\Lambda(\rho)$ and $\Lambda\in\mathcal{CO}_{-1}$, have an equal order of quantumness, $\rho\cong\rho'$.
$\blacksquare$
\end{proposition}
Using the sphere shaped classical set in Fig.~\ref{Fig:NonclOrder}, we observe in this case that classical invertible maps are rotations around the center.
This structure, in the generalized scenario, will lead subsequently to nested sets with increasing amount of quantum interferences.
\section{Axiomatic Quantification of Nonclassicality}\label{Sec:Quantification}
So far, we have introduced the algebraic quantumness ordering prescription $\preceq$ on arbitrary classical, convex sets $\mathcal C$ that are closed under classical statistical mixtures and operations.
Hence, a distance independent ordering technique is obtained.
Eventually, we will use this approach to quantify the amount of quantumness in a natural way.
Let us stress again that the standard approach is formulated in the opposite direction, i.e., a measure is proposed which implies an sorting of states.
Contrary, the approach under study starts from a convex geometric ordering.
Using the derived ordering, we can properly define quantumness measures.
This means that we can introduce functions $\mu$, which map a classical states, $\rho\in\mathcal C$, to a real number $\mu(\rho)$.
\begin{definition}\label{Def:Measure}
A function $\mu:\mathcal Q\to\mathbb R$ is a quantumness measure, if $\rho\preceq\rho'\Leftrightarrow \mu(\rho)\leq\mu(\rho')$.
\end{definition}
The definition says that the measure quantifies the ordering, which is given by the algebraic sorting $\preceq$.
Since for all classical states $\gamma\in\mathcal C$ holds $\gamma\preceq\rho\in\mathcal Q$, we have $\mu(\rho)=\inf_{\gamma\in\mathcal C}\mu(\gamma)=:\mu_{\rm min}$ if and only if $\rho\in\mathcal C$, cf. Proposition~\ref{Lem:MinClass}.
Typically, one uses the convention $\mu_{\rm min}=0$.
From the definition also follows
\begin{eqnarray}
\mu(\rho)\geq\mu(\Lambda(\rho)),
\end{eqnarray}
for any classical operation $\Lambda\in\mathcal{CO}$.
Moreover, equally ordered quantum states, $\rho\cong\rho'$, have an equivalent amount of quantumness,
\begin{eqnarray}
\rho\preceq\rho'\wedge\rho'\preceq\rho\,\Leftrightarrow\,\mu(\rho)\leq\mu(\rho')\wedge\mu(\rho')\leq\mu(\rho).
\end{eqnarray}
The here considered quantification of quantum states with nonclassical properties has been based only on the most elementary definition of statistical averaging (convexity of $\mathcal C$) and the physical need for classical transformations, $\mathcal{CO}$.
We did not make any further assumption about the classical property itself.
In the case of entanglement, Definition~\ref{Def:Measure} is equivalent to the axiomatic definition of entanglement measures~\cite{AxiomEntM,AxiomEntM2,AxiomEntM3} adding the compatibility with local invertible transformations.
For nonclassicality in the notion of coherent states, Definition~\ref{Def:Measure} is equivalent to the algebraic approach in Refs.~\cite{UniQuant,Gehrke}.
Note that the quantification procedure loses its generality if only subsets of $\mathcal{CO}$ are considered, as it is often done in entanglement theory by restricting the set of all separable operations to operational subset of so-called local operations and classical communication~\cite{RMP-Horo}.
\subsection{Quantumness measures based on the quantum superposition principle}\label{Sec:Example}
As an example, we will consider in the following a measure which relies on the quantum superposition principle.
Superpositions are the origin of the most fundamental differences between classical and quantum physics.
Therefore, let us start with a set $\mathcal C_0$ of pure classical states, $|c\rangle\in\mathcal C_0$.
The elements of the convex set $\mathcal C$ of all classical states are given by
\begin{eqnarray}
\gamma=\int_{\mathcal C_0} dP_{\rm cl}(c) |c\rangle\langle c|,
\end{eqnarray}
for a classical probability distribution $P_{\rm cl}$.
Hence, a general classical state is a statistical mixtures of pure classical ones.
For nonclassical states, $\rho\in\mathcal Q\setminus\mathcal C$, such a $P_{\rm cl}$ does not exist.
The typical situation in quantum physics is that a generalized $P$ exists, but it has negativities.
This scenario is relevant for the representations of both: expanding nonclassical states using coherent ones $\mathcal C_{0,\rm coh}=\{|\alpha\rangle:\, \alpha\in\mathbb C\}$ with the Glauber-Sudarshan representation~\cite{GSRep2,GSRep1}; and expanding entangled states by factorized ones $\mathcal C_{0,\rm sep}=\{|a\rangle\otimes|b\rangle:\,|a\rangle\in\mathcal H_A\wedge|b\rangle\in\mathcal H_B\}$ using optimized entanglement quasi-probabilities~\cite{EntRep}.
Let us consider a classical operation, which has the following form,
\begin{eqnarray}
\Lambda(\rho)=M\rho M^\dagger,\mbox{ with } M|c\rangle=g(c)|f(c)\rangle,
\end{eqnarray}
with a classical valued function $f$, i.e. $|f(c)\rangle\in\mathcal C_0$, and a complex valued function $g$.
This operation is a classical one,
\begin{eqnarray}
\nonumber\Lambda(\gamma)=\int_{\mathcal C_0} dP_{\rm cl}(c) M|c\rangle\langle c|M^\dagger\\
\phantom{\Lambda(\gamma)}=\int_{\mathcal C_0} dP_{\rm cl}(c) |g(c)|^2|f(c)\rangle\langle f(c)|,
\end{eqnarray}
which is again (neglecting normalization, see~\ref{App:Normalization}) a statistical mixture of pure classical states.
In case that $f$ is bijective and $g(c)\neq0$ for all $c$, we have a classical operation in $\mathcal{CO}_{-1}$,
\begin{eqnarray}
M^{-1}|c\rangle=\frac{1}{g(c)}|f^{-1}(c)\rangle.
\end{eqnarray}
Examples are local invertible maps $M=A\otimes B$ ($\exists A^{-1},B^{-1}$) for separable states, or, for coherent states,
\begin{eqnarray}
M=\exp[xa^\dagger a]\exp[ya]\exp[za^\dagger],
\end{eqnarray}
where $x,y,z\in\mathbb C$, the annihilation and creation operators $a$ and $a^\dagger$, respectively, and
\begin{eqnarray}
M|\alpha\rangle=\exp[xa^\dagger a]\exp[ya]\exp[za^\dagger]|\alpha\rangle\nonumber\\
\phantom{M|\alpha\rangle}=e^{\frac{|z+\alpha|^2-|\alpha|^2}{2}+y(z+\alpha)}|(\alpha+z)e^{x}\rangle\in\mathcal C_0.
\end{eqnarray}
It is worth to note that the convex set of all classical operations, $\Lambda\in\mathcal{CO}$, can be written in the form of operator-sum decompositions~\cite{OpSumRep}, also called Krauss operators,
\begin{eqnarray}\label{Eq:Krauss}
\Lambda(\rho)=\sum_i M_i\rho M_i^\dagger.
\end{eqnarray}
Now we want to analyze a pure nonclassical state, which may be written as
\begin{eqnarray}\label{Eq:SuperPosClassical}
|\psi\rangle=\sum_{k=1}^r \psi_k |c_k\rangle,
\end{eqnarray}
with $|c_k\rangle\in\mathcal C_0$ and $r$ being the minimal number which allows this decomposition.
This representation is possible for any pure state, if $\mathcal C_0$ includes at least a basis of the Hilbert space.
Therefore, the state $|\psi\rangle$ is a superposition of $r$ classical states.
The classical operator $M$ acts like
\begin{eqnarray}
M|\psi\rangle=\sum_{k=1}^r \psi_k g(c_k)|f(c_k)\rangle.
\end{eqnarray}
It is important that $M$ can only decrease the number $r$, for example, in the case $g(c_k)=0$ for some $k$ or for $f(c_k)=f(c_{k'})$.
If $M\,\cdot\,M^\dagger\in\mathcal{CO}_{-1}$, then $r$ remains even unchanged.
Therefore, let us define this minimal number $r$ of superimposed classical states as $r(\psi)$,
\begin{eqnarray}
r(\psi)=\inf\left\{r:|\psi\rangle=\sum_{k=1}^r \psi_k |c_k\rangle \wedge |c_k\rangle\in\mathcal C_0\right\}.
\end{eqnarray}
Obviously this number is 1, iff the state is an element of $\mathcal C_0$, and greater than one for a nonclassical pure state.
Now let us consider a mixed state $\rho\in\mathcal Q$.
This state can be written in various forms as a convex combination of pure states,
\begin{eqnarray}\label{Eq:Dec1}
\rho=\sum_i p_i|\psi_i\rangle\langle\psi_i|,
\end{eqnarray}
with $p_i>0$ and $\sum_i p_i=1$.
In this case $\mu(\rho)$ can be obtained from a convex roof construction of $r(\psi)$~\cite{uhlmann}.
In a particular decomposition given in Eq.~(\ref{Eq:Dec1}) the largest number of superposition of a pure state $|\psi_i\rangle$ can be found as $\sup_i\{r(\psi_i)\}$.
Under all decompositions of $\rho$, the desired one is that with a minimum of needed superpositions.
Thus, $\mu(\rho)$ is given by
\begin{eqnarray}\label{eq:superpsoMeasure}
\mu(\rho)=\inf \left\{\sup_i\{r(\psi_i)\}:\rho=\sum_i p_i|\psi_i\rangle\langle\psi_i|\right\}-1.
\end{eqnarray}
This number is 0, iff the mixed state is classical and greater than zero for nonclassical states.
The number can become infinity, if no finite number of superpositions yields the given state.
Let us highlight that states with an amount of quantumness up to $r$ define nested convex sets, $\mathcal C_{\mu\leq r}=\{\rho\in\mathcal Q:\mu(\rho)\leq r\}$ with $\mathcal C_r\subset \mathcal C_{r'}$ for $r\leq r'$.
For convenience, it is also possible to map $\mu(\rho)$ together with a monotonically increasing function to another measure $\mu'$, e.g.,
\begin{eqnarray}
\mu'(\rho)= 1-\exp(-\mu(\rho))\in[0,1].
\end{eqnarray}
We also point out that the measures $\mu$ and $\mu'$ are invariant under classical invertible maps, $\mathcal{CO}_{-1}$, which is important for being compatible with the unambiguous ordering prescription.
As we mentioned in Sec.~\ref{Sec:Motivation}, this is not true for a distance-based quantumness measure.
Since $\mathcal{CO}_{-1}$ maps can be considered as a transformation of the underlying metric, a distance is in general not preserved.
This function $\mu(\rho)$ in Eq.~(\ref{eq:superpsoMeasure}) is found to be an example of a quantumness measure based on convex ordering, which additionally characterizes the fundamental quantum superposition principle.
In the case of coherent states it counts the minimal number of superpositions of (classical) coherent states needed to generate the state under study~\cite{UniQuant,Gehrke}.
In the case of entanglement it represents the Schmidt number~\cite{SchmidtUni}.
Hence, the given approach unifies and generalizes the previously considered methods.
States with at most $r$ superpositions define nested, convex sets $\mathcal C_{\mu\leq r}$, which is advantageous for the construction of quantumness witnesses; cf.~\cite{MelWitness} and~\cite{SNWitness} for the construction of degree of nonclassicality witnesses and Schmidt number witnesses, respectively.
Let us note that the number of superpositions as a quantifier of quantumness in Eq.~(\ref{eq:superpsoMeasure}) may be further refined.
For example the properties of the individual classical terms $|c_k\rangle$ in the superposition decomposition in Eq.~(\ref{Eq:SuperPosClassical}) could be taken into account.
For certain practical applications, such as special quantum teleportation protocols, also the weighting coefficients $\psi_k$ can play a significant role.
This, however, leads to operational quantumness measures, cf.~\cite{SchmidtUni}, which are important for quantifying the useful nonclassicality for particular applications.
It might be also useful to use the purity of a quantum state $\rho$ to further refine quantumness measures.
\subsection{Example: Bits versus qubits}
Another application of the superposition number is related to quantum information processing.
A classical sequence of $N$ bits $\boldsymbol i=(i_1,\dots,i_N)$, with truth values ``0'' and ``1'', has a classical counterpart in a compound qubit quantum system $(\mathbb C^{2})^{\otimes N}$ as
\begin{equation}
|\boldsymbol i\rangle=|i_1\rangle\otimes\dots\otimes|i_N\rangle\in\mathcal C_0,
\end{equation}
where $|0\rangle$ and $|1\rangle$ are the ground and excited state, respectively, of any two-level system being described by the individual Hamiltonians
\begin{equation}
H=\frac{\hbar\omega}{2} \sigma_{z}, \mbox{ with } \sigma_z=|1\rangle\langle 1|-|0\rangle\langle 0|.
\end{equation}
Using classical probabilities, we only have statistical mixtures of sequences of bits as
\begin{equation}
\gamma=\sum_{\boldsymbol i\in\{0,1\}^N} p_{\boldsymbol i} |\boldsymbol i\rangle\langle \boldsymbol i|\in\mathcal C.
\end{equation}
Classical computational operations are those which compute -- including statistical imperfections or errors -- from a given classical sequence $\boldsymbol i$ another classical string $\boldsymbol j$ of $N$ bits with the probability $p(\boldsymbol j|\boldsymbol i)$:
\begin{equation}
\Lambda(|\boldsymbol i\rangle\langle \boldsymbol i|)=\sum_{\boldsymbol j\in\{0,1\}^N} p(\boldsymbol j|\boldsymbol i)\, |\boldsymbol j\rangle\langle \boldsymbol j|.
\end{equation}
An example of a classical invertible map is the $N$-bit NOT operation, $\Lambda(\,\cdot\,)={\rm NOT}^{\otimes N}(\,\cdot\,){\rm NOT}^{\otimes N}$, with ${\rm NOT}={\rm NOT}^\dagger=\sigma_x=|1\rangle\langle 0|+|0\rangle\langle 1|$.
Please also note that the free unitary evolution with the given Hamiltonian also maps any classical string onto itself, see also~\cite{CMMV13}.
Having identified the classical regime, we may study the quantum regime.
Here, the pure states can be decomposed as
\begin{equation}
|\psi\rangle=\sum_{\boldsymbol i\in\{0,1\}^N}\psi_{\boldsymbol i}|\boldsymbol i\rangle,
\end{equation}
which is quantified by the superposition number
\begin{equation}
r(\psi)=|\{\psi_{\boldsymbol i}\neq0\}|,
\end{equation}
being the cardinality of the non-vanishing expansion coefficients $\psi_{\boldsymbol i}$.
For example, a coherent superposition in a GHZ-type configuration, $(|0,\dots,0\rangle +|1,\dots,1\rangle)/\sqrt 2$, has a quantumness of $r=2$.
This result, $r>1$, quantifies that such a state is beyond the classical information approach.
A particular effect which can destroy these quantum interferences is given by decoherence, being the map
\begin{equation*}
\Lambda_{\rm dc}(\rho)=\int_{-\pi}^{+\pi} \!\!\!d\varphi\, p(\varphi) (\exp[i\varphi\sigma_z])^{\otimes N}\rho(\exp[-i\varphi\sigma_z])^{\otimes N},
\end{equation*}
for a classical phase distribution $p(\varphi)$.
We observe that a full decoherence, i.e. a uniform distribution $p(\varphi)=1/(2\pi)$, maps any initial state onto the corresponding classical one,
\begin{equation}
\Lambda_{\rm dc}(|\psi\rangle\langle\psi|)=\sum_{\boldsymbol i\in\{0,1\}^N} |\psi_{\boldsymbol i}|^2|\boldsymbol i\rangle\langle \boldsymbol i|.
\end{equation}
Consistently our approach identifies that decoherence diminishes quantum properties.
In the case of full decoherence we have $\mu(\Lambda_{\rm dc}(\rho))=0$ for any state $\rho\in\mathcal Q$, cf. Eq.~(\ref{eq:superpsoMeasure}).
Therefore, our approach not only predicts an unambiguous order of quantumness in quantum information.
It additionally characterizes the evolution of these quantum properties in realistic scenarios.
\section{Summary and conclusions}\label{Sec:SumCon}
We have studied the quantification of quantum properties with a convex classical reference.
It was outlined that distances-based measures, in general, lead to an ambiguous quantification.
The origin of such a paradox lies in the fact that the nature of quantumness is an algebraic rather than a topological one:
The mixture of classical states yields a convex subset of all quantum states.
Based on the conservation of a classical feature under mixing, we have proposed a general convex ordering method.
For handling classical processes or channels, we have additionally considered classical operations.
We have shown that these transformations can be used to generalize our sorting procedure.
By quantifying this order, we have obtained quantumness measures in a canonic form.
In particular, quantumness probes based on the determination of quantum superpositions have been examined.
The technique has been applied to typical examples in quantum physics such as entanglement and nonclassicality in terms of the Glauber-Sudarshan representation.
Moreover, the embedding of classical information processing into the quantum domain led to a measure of the amount of quantumness in quantum information.
In case of decoherence, we consistently retrieved the classical domain through our quantification.
In conclusion, the number of quantum superpositions represents a vital measure to quantify the quantum nature of a system.
Known examples have been considered in this context and they have been generalized.
Ambiguities, as observed for other measures, do not occur and the role of reversible classical operations has been outlined.
Our approach characterizes the quantum nature of states in terms of the fundamental superposition principle, and it naturally relates classical correlations to statistical mixing of states.
We believe that this approach will be useful for characterizing even so-far unknown quantum effects in a broader context and for the general understanding of the strength of quantum effects in physical systems.
\appendix \section{Normalization}\label{App:Normalization}
Let us consider the normalization.
It is more convenient to use the following sets,
\begin{eqnarray}
\mathcal Q'=&\{\lambda\rho: \lambda\geq0 \wedge \rho\in\mathcal Q \},\\
\mathcal C'=&\{\lambda\rho: \lambda\geq0 \wedge \rho\in\mathcal C \},
\end{eqnarray}
instead of the normalized states, i.e. states with a unit trace: ${\rm tr}\,\rho=1$.
The sets $\mathcal Q'$ and $\mathcal C'$ represent a cone construction over the sets $\mathcal Q$ and $\mathcal C$, respectively.
According to these definitions, an element $\rho_3$ is element in $\mathcal C'$, if it can be written as a positive ($\lambda_1,\lambda_2\geq0$) linear combination of elements $\rho_1,\rho_2\in\mathcal C$,
\begin{eqnarray}
\rho_3=\lambda_1\rho_1+\lambda_2\rho_2.
\end{eqnarray}
In general, this linear combination is given by neither normalized states nor in a convex form.
However, it can be rewritten in such a form.
With ${\rm tr}\,\rho_3=\lambda_1{\rm tr}\,\rho_1+\lambda_2{\rm tr}\,\rho_2$, we obtain
\begin{eqnarray*}
\frac{\rho_3}{{\rm tr}\,\rho_3}{=}\frac{\lambda_1{\rm tr}\,\rho_1}{\lambda_1{\rm tr}\,\rho_1+\lambda_2{\rm tr}\,\rho_2} \frac{\rho_1}{{\rm tr}\,\rho_1}
{+}\frac{\lambda_2{\rm tr}\,\rho_2}{\lambda_1{\rm tr}\,\rho_1+\lambda_2{\rm tr}\,\rho_2}\frac{\rho_2}{{\rm tr}\,\rho_2}.
\end{eqnarray*}
This is obviously a convex combination of normalized states.
Therefore we can neglect without any loss of generality the normalization of the quantum states and perform the normalization at the end of our treatment.
\ack This work was supported by the Deutsche Forschungsgemeinschaft through SFB 652. The authors gratefully acknowledge many stimulating discussions with Margarita and Vladimir Man'ko.
\section*{References}
\end{document}
|
arXiv
|
{
"id": "1004.1944.tex",
"language_detection_score": 0.7477425932884216,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\newcommand{(\mathcal{C}, ~\mathcal{F},~\textit{c},~\textit{f},~\textit{u}, ~\mathcal{B})}{(\mathcal{C}, ~\mathcal{F},~\textit{c},~\textit{f},~\textit{u}, ~\mathcal{B})} \newcommand{(\mathcal{C}, ~\mathcal{F},~\textit{c},~\textit{f},~\textit{u}, ~\mathcal{B})}{(\mathcal{C}, ~\mathcal{F},~\textit{c},~\textit{f},~\textit{u}, ~\mathcal{B})}
\newcommand{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u},~k)}{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u},~k)} \newcommand{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u},~k)}{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u},~k)} \newcommand{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u})}{(\mathcal{C},~\mathcal{F},~\textit{c},~\textit{f},~\textit{u})}
\newcommand{\starinst}[5]{\mathcal{#1}_{#2}(#2,~#3,~#4,~#5)}
\newcommand{\starinstflp}[4]{\mathcal{#1}_#2(#2,~#3,~#4)} \newcommand{\InstanceSTAR}[5]{\mathcal{#1}_#2(#2,~#3,~#4,~#5)}
\newcommand{\overbar}[1]{\mkern 1.5mu\overline{\mkern-1.5mu#1\mkern-1.5mu}\mkern 1.5mu}
\newcommand{\thetajjp}[2]{\theta({#1},{#2})}
\newcommand{\textbf{LP(UKMP)}}{\textbf{LP(UKMP)}}
\newcommand{\mathcal{F}}{\mathcal{F}} \newcommand{\mathcal{F}^1}{\mathcal{F}^1} \newcommand{\mathcal{F}^2}{\mathcal{F}^2} \newcommand{\mathcal{F}}{\mathcal{F}} \newcommand{\mathcal{A}}{\mathcal{A}} \newcommand{\mathcal{A}^2}{\mathcal{A}^2} \newcommand{\mathcal{T}}{\mathcal{T}}
\newcommand{\mathcal{C}}{\mathcal{C}} \newcommand{\mathcal{C}}{\mathcal{C}}
\newcommand{\si}[1]{\mathcal{S}_#1} \newcommand{\sset}[1]{\mathcal{S}_#1}
\newcommand{\mathcal{B}}{\mathcal{B}} \newcommand{\mathcal{B}^1}{\mathcal{B}^1} \newcommand{\mathcal{B}^2}{\mathcal{B}^2} \newcommand{\budgetset}[1]{\mathcal{B}_{#1}}
\newcommand{\textit{u}}{\textit{u}} \newcommand{\textit{U}}{\textit{U}}
\newcommand{\mathcal{C}_D}{\mathcal{C}_D} \newcommand{\mathcal{C}_S}{\mathcal{C}_S} \newcommand{\mathcal{C}_D}{\mathcal{C}_D} \newcommand{\mathcal{C}_S}{\mathcal{C}_S} \newcommand{\mathcal{C}'_S}{\mathcal{C}'_S} \newcommand{\mathcal{\tilde{C}}_S}{\mathcal{\tilde{C}}_S} \newcommand{\csparsegroup}[1]{\mathcal{C}_{S,#1}} \newcommand{\facilitysetgroup}[1]{\mathcal{F}_{#1}} \newcommand{\mathcal{C}_S^1}{\mathcal{C}_S^1} \newcommand{\mathcal{C}_S^2}{\mathcal{C}_S^2} \newcommand{\mathcal{C}_D^1}{\mathcal{C}_D^1}
\newcommand{\mathcal{C}^D_{Good}}{\mathcal{C}^D_{Good}} \newcommand{\mathcal{C}^D_{Bad}}{\mathcal{C}^D_{Bad}} \newcommand{\mathcal{\tilde{C}}}{\mathcal{\tilde{C}}} \newcommand{\mathcal{\tilde{C}_S}}{\mathcal{\tilde{C}_S}}
\newcommand{\textit{Good Dense}}{\textit{Good Dense}} \newcommand{\textit{Bad Dense}}{\textit{Bad Dense}} \newcommand{\textit{Sparse}}{\textit{Sparse}} \newcommand{\textit{Dense}}{\textit{Dense}}
\newcommand{\overline{\mathcal{C}}}{\overline{\mathcal{C}}} \newcommand{\mathcal{L}}{\mathcal{L}}
\newcommand{\ballofj}[1]{\textit{\mathcal{B}($ #1 $)}}
\newcommand{\C}[1]{\hat{C_{#1}}} \newcommand{\hatofC}[1]{\hat{C_{#1}}} \newcommand{\hatof}[2]{\hat{{#1}_{#2}}}
\newcommand{\bundle}[1]{\mathcal{N}_{#1}} \newcommand{\neighbor}[1]{\mathcal{N}_{#1}} \newcommand{\p}[1]{\mathcal{N}_{#1}}
\newcommand{\nnew}[1]{\mathcal{N}'_{#1}}
\newcommand{\Njincbar}[1]{\delta({#1})}
\newcommand{\Tp}[1]{\mathcal{T}'({#1})} \newcommand{\mathcal{TC}}{\mathcal{TC}}
\newcommand{\cen}[1]{G_{#1}} \newcommand{\sj}[1]{\mathcal{S}_{#1}} \newcommand{\mathcal{S}}{\mathcal{S}}
\newcommand{\dist}[2]{c(#1,~#2)} \newcommand{\dists}[2]{c_{s}(#1,~#2)}
\newcommand{\textit{$ f_i $}}{\textit{$ f_i $}} \newcommand{\textit{$ f_i $}}{\textit{$ f_i $}} \newcommand{\faccost}[1]{\textit{$ f_#1 $}}
\newcommand{\A}[2]{\mathcal{A}_{\rho^*}(#1,{#2})}
\newcommand{\x}[3]{x_{#1}(#2,#3)} \newcommand{\X}[2]{x_{#1#2}} \newcommand{\Xstar}[2]{x^*_{#1#2}}
\newcommand{\bard}[1]{{d_{#1}}}
\newcommand{\hatofbjc}[1]{\hat{b}^c_{#1}}
\newcommand{\sumlimits}[2]{\displaystyle\sum\limits_{#1}^{#2}} \newcommand{\unionlimits}[2]{\displaystyle\bigcup\limits_{#1}^{#2}}
\newcommand{\sumofx}[1]{\sum_{#1 \in \mathcal{C}} x_{i#1}} \newcommand{\sumxoverclients}[2]{\displaystyle\sum\limits_{#2 \in \mathcal{C}} x_{#1#2}}
\newcommand{\sumxioverclients}[1]{\displaystyle\sum\limits_{#1 \in \mathcal{C}} x_{i#1}} \newcommand{\sumofvaronf}[2]{\sum_{i \in \mathcal{F}'} #1_{#2j}}
\newcommand{\sumap}[1]{\sum_{#1}{}}
\newcommand{\price}[1]{b^c_{#1}} \newcommand{\pricef}[1]{b^f_{#1}} \newcommand{\budgett}[1]{b_{#1}}
\newcommand{\load}[1]{l_{#1}}
\newcommand{\mass}[2]{size(#1,#2)}
\newcommand{\Load}[1]{load(#1)} \newcommand{\massofbundle}[3]{\sum_{#1 \in #2}{} #3_#1} \newcommand{\FC}[1]{\sum_{\textit{i} \in \mathcal{F}} \textit{$ f_i $} #1_i} \newcommand{\FCofbundle}[3]{\sum_{#1 \in #2} f_#1 #3_#1} \newcommand{\CC}[1]{\sum_{\textit{i} \in \mathcal{F}} \sum_{j \in \mathcal{C}} \dist{i}{j} #1_{ij}} \newcommand{\CCa}{\sum_{i,j} \demandofj{j} \dist{i}{j}}
\newcommand{\CCofBundle}[1]{\sum_{i \in \mathcal{F}} \dist{#1}{j} x^*_{#1j}} \newcommand{\textit{k}}{\textit{k}}
\newcommand{\textit{j}}{\textit{j}} \newcommand{\textit{i}}{\textit{i}} \newcommand{\demand}[1]{\tilde{l_{#1}}} \newcommand{\hatofdemand}[1]{\hat{l_{#1}}} \newcommand{\zofi}[1]{z_#1} \newcommand{\hatzofi}[1]{\hat{z}_#1} \newcommand{\primezofi}[1]{z'_{#1}} \newcommand{\tildezofi}[1]{\tilde{z}_#1}
\newcommand{\clientsetbundle}[1]{\mathcal{C}^{#1}} \newcommand{\radiusballofj}{\dist{i}{j} \leq 2 \C{j} }
\newcommand{\demandofj}[1]{\textit{$ d_{#1} $}} \newcommand{\neighbourhood}[1]{\mathbb{N}(#1)} \newcommand{\cal{F}}{\cal{F}} \newcommand{\hat{\cal{F}}}{\hat{\cal{F}}} \newcommand{\CCLPtwo}[1]{\sum_{i \in \mathcal{F}',j \in \clientsetbundle{'}} \demandofj{j} \dist{i}{j} #1_{ij}} \newcommand{\priceofcorf}[2]{b_{#1}^{#2}} \newcommand{\hatofy}[1]{\hat{y_#1}} \newcommand{\hatofx}[2]{\hat{x}_{#1#2}} \newcommand{\bar{\mathcal{C}}}{\bar{\mathcal{C}}}
\newcommand{\obj}[1]{\mathcal{C}ostKM(#1)} \newcommand{\objflp}[1]{\mathcal{C}ostkFLP_{sp}(#1)} \newcommand{\objflpSD}[1]{\mathcal{C}ostkFLP(#1)}
\newcommand{\Costone}[1]{Cost^1(#1)} \newcommand{\Costtwo}[1]{Cost^2(#1)} \newcommand{\cost}[2]{Cost_{#1}(#2)} \newcommand{\bar{\sigma}}{\bar{\sigma}} \newcommand{\hat{\sigma}}{\hat{\sigma}} \newcommand{\costsingle}[1]{Cost(#1)} \newcommand{\costjprimeinc}[3]{Cost_{#1}(#2,~#3)} \newcommand{\loadjinc}[2]{\phi(#1,~#2)}
\newcommand{\costFLPThree}[3]{Cost_{#1}(#2,~#3)} \newcommand{\costtwo}[2]{Cost(#1,~#2)} \newcommand{\costthree}[3]{Cost_{#1}(#2,~#3)} \newcommand{\costFLPFour}[4]{Cost_{#1}(#2,~#3,~#4)} \newcommand{\cardTwo}[1]{Y^*(#1)} \newcommand{\totalcost}[2]{Cost(#1,~#2)}
\newcommand{\textit{et al}.}{\textit{et al}.} \newcommand{\textit{i}.\textit{e}.}{\textit{i}.\textit{e}.} \newcommand{\textit{e}.\textit{g}.}{\textit{e}.\textit{g}.} \newcommand{\Cost}[1]{Cost(#1)}
\newcommand{\soner}[1]{s^1_{#1}} \newcommand{\stwor}[1]{s^2_{#1}} \newcommand{\Soner}[1]{S^1_{#1}} \newcommand{\Stwor}[1]{S^2_{#1}}
\newcommand{\mcone}[1]{G^1_{#1}} \newcommand{\mctwo}[1]{G^2_{#1}} \newcommand{\denser}[1]{j_{d_{#1}}} \newcommand{j_d}{j_d} \newcommand{\sparseoner}[1]{j_{s1_{#1}}} \newcommand{j_{s}}{j_{s}} \newcommand{\sparsetwor}[1]{j_{s2_{#1}}} \newcommand{j_{s2}}{j_{s2}} \newcommand{\That}[1]{\hat{\tau}({#1})} \newcommand{\muj}[1]{\mu({#1})} \newcommand{\resj}[1]{res({#1})} \newcommand{\floordjbyu}[1]{\lfloor{d_{#1}/u}\rfloor}
\newcommand{\tauhat}[1]{\hat{\tau}({#1})}
\newcommand{S_1}{S_1} \newcommand{S_2}{S_2} \newcommand{S_3}{S_3} \newcommand{G_{r_\alpha}}{G_{r_\alpha}} \newcommand{G_{r_\beta}}{G_{r_\beta}}
\newcommand{\psi}{\psi} \newcommand{\sr}[1]{s_{#1}} \newcommand{\Sr}[1]{S_{#1}}
\newcommand{B}{B}
\newcommand{\mathcal{C}_r}{\mathcal{C}_r} \newcommand{\mathcal{C}_p}{\mathcal{C}_p} \newcommand{\mathcal{C}_o}{\mathcal{C}_o} \newcommand{\opt}[1]{LP_{opt}} \newcommand{\lp}[1]{LP_{#1}} \newcommand{Cost_s}{Cost_s} \newcommand{Cost_p}{Cost_p} \newcommand{Cost_f}{Cost_f}
\newcommand{Meta-Cluster }{Meta-Cluster } \newcommand{Meta-Clusters }{Meta-Clusters }
\newcommand{\facilityset_j}{\mathcal{F}_j} \newcommand{\facilityset_{j'}}{\mathcal{F}_{j'}} \newcommand{\distd}[2]{c'(#1,~#2)} \newcommand{\cliset_{full}}{\mathcal{C}_{full}} \newcommand{\cliset_{part}}{\mathcal{C}_{part}} \newcommand{\cliset^*}{\mathcal{C}^*} \newcommand{\mathcal{B}_j}{\mathcal{B}_j}
\newcommand{\mathcal{D}_{\ell_{j'}}}{\mathcal{D}_{\ell_{j'}}} \newcommand{\mathcal{T}}{\mathcal{T}}
\newcommand{\dlj}[1]{r\mathcal{F}_{j_{#1}}}
\newcommand{\rFj}[1]{rF_{j}_{#1}} \newcommand{\level}[1]{\rFj_#1}
\newcommand{\bar\rho}{\bar\rho} \newcommand{\lb}[1]{\mathcal{L}_{#1}} \newcommand{\AVG}[1]{\mathcal{A}_{#1}} \newcommand{\radj}[1]{\mathcal{R}_{#1}} \newcommand{\cluster}[1]{\mathcal{P}_{#1}} \newcommand{\F}[1]{\mathcal{F}_{#1}} \newcommand{i_j}{i_j} \newcommand{\N}[1]{\mathcal{N}_{#1}} \newcommand{\clientset_i}{\mathcal{C}_i} \newcommand{\Out}[1]{\mathcal{O}_{#1}} \newcommand{\R}[1]{\mathcal{R}_{#1}} \newcommand{rmax_j}{rmax_j} \newcommand{\T}[1]{\mathcal{T}_{#1}} \newcommand{\cli}[1]{j_{}} \newcommand{\ball}[1]{\mathcal{B}_{#1}} \newcommand{\floor}[1]{\lfloor{#1}\rfloor} \newcommand{\rtj}[1]{r\mathcal{T}_{j_{#1}}}
\title{On Variants of Facility Location Problem with Outliers}
\titlerunning{Variants of Facility Location Problem with Outliers}
\author{Rajni Dabas\inst{1} \and Neelima Gupta\inst{1}}
\authorrunning{R. Dabas and N. Gupta}
\institute{Department of Computer Science, University of Delhi, India\\ \email{[email protected], [email protected]}}
\maketitle \begin{abstract} In this work, we study the extension of two variants of the facility location problem (FL) to make them robust towards a few distantly located clients. First, $k$-facility location problem ($k$FL), a common generalization of FL and $k$ median problems, is a well studied problem in literature. In the second variant, lower bounded facility location (LBFL), we are given a bound on the minimum number of clients that an opened facility must serve. Lower bounds are required in many applications like profitability in commerce and load balancing in transportation problem. In both the cases, the cost of the solution may be increased grossly by a few distantly located clients, called the outliers. Thus, in this work, we extend $k$FL and LBFL to make them robust towards the outliers. For $k$FL with outliers ($k$FLO) we present the first (constant) factor approximation violating the cardinality requirement by +1. As a by-product, we also obtain the first approximation for FLO based on LP-rounding. For LBFLO, we present a tri-criteria solution with a trade-off between the violations in lower bounds and the number of outliers. With a violation of $1/2$ in lower bounds, we get a violation of $2$ in outliers.
\keywords{ Facility Location \and Outliers \and Approximation \and Lower Bound \and $k$-Facility Location \and $k$-Median.} \end{abstract}
\section{Introduction}
Consider an e-retail company that wants to open warehouses in a city for home delivery of essential items. Each store has an associated opening cost depending on the location in the city. The aim of the company is to open these warehouses at locations such that the cost of opening the warehouses plus the cost servicing all the customers in the city from the nearest opened store is minimised. In literature, such problems are called {\em facility location problems}(FL) where warehouses are the facilities and customers are the clients. Formally, in FL we are given a set $\mathcal{F}$ of $n$ facilities and a set $\mathcal{C}$ of $m$ clients. Each facility $i \in \mathcal{F}$ has an opening cost $f_i$ and cost of servicing a client $j \in \mathcal{C}$ from a facility $i \in \mathcal{F}$ is $c(i,j)$ (we assume that the service costs are metric). The goal is to open a subset $\mathcal{F}' \subseteq \mathcal{F}$ of facilities such that the cost of opening the facilities and servicing the clients from the opened facilities is minimised. In a variant of FL, called $k$-facility location problem ($k$FL), we are given an additional bound $k$ on the maximum number of warehouses/facilities that can be opened i.e. $|\mathcal{F}'| \le k$. In our example this requirement may be imposed to maintain the budget constraints or to comply with government regulations. In another variant of the problem, we are required to serve some minimum number of customers/clients from an opened facility. Such a requirement is natural to ensure profitability in our example.
This minimum requirement is captured as lower bounds in facility location problems. That is, in lower bounded FL (LBFL), we are also given a lower bound $\lb{i}$ on the minimum number of clients that an opened facility $i$ must serve.
In the above scenarios, a few distant customers/clients can increase the cost of the solution disproportionately; such clients are called {\em outliers}. Problem of outliers was first introduced by Charikar \textit{et al}.~\cite{charikar2001algorithms} for the facility location and the $k$-median problems. In this paper we extend $k$-facility location and lower bounded facility location to deal with the outliers and denote them by $k$FLO and LBFLO respectively. Since FL is well known to be NP-hard, NP-hardness of $k$FLO and LBFLO follows. We present the first (constant factor) approximation for $k$FLO opening at most $k+1$ facilities. In particular, we present the following result:
\begin{theorem} \label{thm_flo} There is a polynomial time algorithm that approximates $k$-facility location problem with outliers opening at most $(k+1)$ facilities within $11$ times the cost of the optimal solution. \end{theorem}
Our result is obtained using LP rounding techniques. As a by product, we get first constant factor approximation for FLO using LP rounding techniques. FLO is shown to have an unbounded integrality gap~\cite{charikar2001algorithms} with solution to the standard LP. We get around this difficulty by guessing the most expensive facility opened in the optimal solution. In particular we get the following:
\begin{corollary} \label{coro-flo} There is a polynomial time algorithm that approximates facility location problem with outliers within $11$ times the cost of the optimal solution. \end{corollary}
We reduce LBFLO to FLO and use any algorithm to approximate FLO to obtain a tri-criteria solution for the problem.
To the best of our knowledge, no result is known for LBFLO in literature. In particular, we present our result in Theorem~\ref{thm_LBkFLO} where a tri-criteria solution is defined as follows:
\begin{definition} A tri-criteria solution for LBFLO is an $(\alpha, \beta, \gamma)$- approximation solution $S$ that violates lower bounds by a factor of $\alpha$ and outliers by a factor of $\beta$ with cost no more than $\gamma OPT$ where $OPT$ denotes the cost of an optimal solution of the problem, $\alpha<1$ and $\beta>1$. \end{definition}
\begin{theorem} \label{thm_LBkFLO} A polynomial time $(\alpha,\frac{1}{1-\alpha}, \lambda(\frac{1+\alpha}{1-\alpha})$-approximation can be obtained for LBFLO problem where $\alpha=(0,1)$ is a constant and $\lambda$ is an approximation factor for the FLO problem. \end{theorem}
Theorem~\ref{thm_LBkFLO} presents a trade-off between the violations in the lower bounds and that in the number of outliers. Violation in outliers can be made arbitrarily small by choosing $\alpha$ close to $0$. And, violation in lower bounds can be chosen close to $1$ at the cost of increased violation in the outliers. Similar result can be obtained for LB$k$FLO with $+1$ violation in cardinality using Theorem~\ref{thm_flo}. The violation in the cardinality comes from that in $k$FLO.
\textbf{Our Techniques: } For $k$FLO, starting with an LP solution $\rho^* = <x^*, y^*>$, we first eliminate the $x^*_{ij}$ variables and work with an auxilliary linear programming (ALP) relaxation involving only $y_i$ variables. This is achieved by converting $\rho^*$ into a {\em complete solution} in which either $x^*_{ij}=y^*_{i}$ or $x^*_{ij}=0$. Using the ALP, we identify the set of facilities to open in our solution. ALP is solved using iterative rounding technique to give a pseudo-integral solution (a solution is said to be pseudo integral if there are at most two fractional facilities). We open both the facilities at $+1$ loss in cardinality and at a loss of factor $2$ in the cost by guessing the maximum opening cost of a facility in the optimal. Once we identify the set of facilities to open, we can greedily assign the first $m-t$ clients in the increasing order of distance from the nearest opened facility. Thus, in the rest of the paper, we only focus on identifying the set of facilities to open.
For LBFLO, we construct an instance $I'$ of FLO by ignoring the lower bounds and defining new facility opening cost for each $i \in \mathcal{F}$. An approximate solution $AS'$ to $I'$ is obtained using any approximation algorithm for FLO. Facilities serving less than $\alpha\lb{i}$ clients are closed and their clients are either reassigned to the other opened facilities or are made outliers. This leads to violation in outliers that is bounded by $\frac{1}{1-\alpha}$. Facility opening costs in $I'$ are defined to capture the cost of reassignments.
\textbf{Related Work:} The problems of facility location and $k$-median with outliers were first defined by Charikar~\textit{et al}.~\cite{charikar2001algorithms}. Both the problems were shown to have unbounded integrality gap~\cite{charikar2001algorithms} with their standard LPs.
For FLO, they gave a $(3+\epsilon)$-approximation using primal dual technique by guessing the most expensive facility opened by the optimal solution. For a special case of the problem with uniform facility opening costs and doubling metrics, Friggstad~\textit{et al}.~\cite{FriggstadKR19} gave a PTAS using multiswap local search. For $k$MO, Charikar~\textit{et al}.~\cite{charikar2001algorithms} gave a $4(1+1/\epsilon)$-approximation with $(1+\epsilon)$-factor violation in outliers. Using local search techniques, Friggstad~\textit{et al}.~\cite{FriggstadKR19} gave $(3+\epsilon)$ and $(1+\epsilon)$-approximations with ($1+\epsilon$) violation in cardinality for general and doubling metric respectively. Chen~\cite{chen-kMO} gave the first true constant factor approximation for the problem using a combination of local search and primal dual. Their approximation factor is large and it was improved to $(7.081+\epsilon)$ by Krishnaswamy~\textit{et al}.~\cite{krishnaswamy-kMO} by strengthening the LP. They use iterative rounding framework and, their factor is the current best result for the problem.
Lower bounds in FL were introduced by Karger and Minkoff~\cite{Minkoff_LBFL} and Guha~\textit{et al}.~\cite{Guha_LBFL}. They independently gave constant factor approximations with violation in lower bounds. The first true constant factor($448$) approximation was given by Zoya Svitkina~\cite{Zoya_LBFL} for uniform lower bounds. The factor was improved to $82.6$ by Ahmadian and Swamy \cite{Ahmadian_LBFL}. Shi Li~\cite{Li_NonUnifLBFL} gave the first constant factor approximation for general lower bounds, with the constant being large ($4000$). Han~\textit{et al}.~\cite{Han_LBkM} studied the general lower bounded $k$-facility location (LB$k$FL) violating the lower bounds.
Same authors~\cite{Han_LBknapsackM} removed the violation in the lower bound for the $k$-Median problem.
The only work that deals with lower bound and outliers together is by Ahmadian and Swamy~\cite{ahmadian_lboutliers}. They have given constant factor approximation for lower-bounded min-sum-of-radii with outliers and lower-bounded k-supplier with outliers problems using primal-dual technique.
\textbf{Organisation of the paper:} A constant factor approximation for $k$FLO is given in Section~\ref{kFLPO} opening at most $(k+1)$ facilities. In Section~\ref{LBkFLO}, the tri-criteria solution for LBFLO is presented. Finally we conclude with future scope in Section~\ref{conclusion}.
\label{tri-criteria1} In this section we present a tri-criteria approximation for LBFLO problem with $1/4$-factor violation in lower bound and $2$-factor violation in outliers at a constant factor loss in cost. Following is the LP relaxation for LBFLO problem where for each $i \in \mathcal{F}$, $y_i$ indicates if facility $i$ is opened, for each $i \in \mathcal{F}$ and $j \in \mathcal{C}$, $x_{ij}$ indicates if client $j$ is assigned to facility $i$ and for each $j \in \mathcal{C}$, $z_j$ indicates if client $j$ is an outlier.
\label{{LBFLO}} $\text{Minimize}~\mathcal{C}ostLBFLO(x,y,z) = \sum_{j \in \mathcal{C}}\sum_{i \in \mathcal{F}}\dist{i}{j}x_{ij} + \sum_{i \in \mathcal{F}}f_iy_i $ \begin{eqnarray} \text{subject to} &\sum_{i \in \mathcal{F}}{} x_{ij} + z_j \geq 1 & \forall ~\textit{j} \in \mathcal{C} \label{LbkFLo_const1}\\ & \lb y_i \leq \sum_{j \in \mathcal{C}}x_{ij} & \forall~ \textit{i} \in \mathcal{F} \label{LbkFLo_const2}\\
& \sum_{j \in \mathcal{C}} z_{j} \leq t & \label{LbkFLo_const4}\\ & x_{ij} \leq y_i & \forall~ \textit{i} \in \mathcal{F} , ~\textit{j} \in \mathcal{C} \label{LbkFLo_const5}\\ & y_i,x_{ij},z_j \in [0,1] \label{LPFLP_const5} \end{eqnarray}
Constraints~\ref{LbkFLo_const1} ensure that every client is either served or is an outlier. Constraints~\ref{LbkFLo_const2} and~\ref{LbkFLo_const4} satisfies the lower bounds and number of outliers respectively. Constraints~\ref{LbkFLo_const5} are standard facility location constraint saying that a client is assigned to an open facility only. Let $opt=<x^*,y^*,z^*>$ be the optimal LP solution for the above LP.
A solution is said to be an {\em integral open solution} if all the facilities are either fully opened or fully closed, i.e, $y_i=[0,1]$ for all $i \in \mathcal{F}$. Next we will construct an integral open solution $S=<\bar{x}, \bar{y}, \bar{z}>$. The solution $S$ is created in two steps: ($1$) First we create solution $S_{out}= <x', y', z'>$ by removing the clients that are outliers to a large extent($\geq 1/\lambda$) in $opt$ where $\lambda$ is a parameter to be fixed later, ($2$) Use clustering and rounding techniques to obtain an integral open solution for the remaining clients. Formally, in step 1, for all $j \in \mathcal{C}$ and $i \in \mathcal{F}$, set $z'_{j}=1$ and $x'_{ij}=0$ if $z^*_{j} \geq 1/\lambda$. Otherwise, set $z'_{j}=0$ and $x'_{ij}=x^*_{ij}$. For all $i \in \mathcal{F}$, $y'_{i}=y^*_{i}$. Note that in step 1 we just incur at most $\lambda$-factor in outliers. Also, $CostLBFLO(S_{out}) \leq CostLBFLO(opt)$.
Next we will describe the Step 2 in detail. Let $\AVG{j}$ be the average connection cost for a client $j \in \mathcal{C}$ after Step 1, that is, $\AVG{j}=\sum_{i \in \mathcal{F}} \dist{i}{j}x'_{ij}/\sum_{i \in \mathcal{F}} x'_{ij}$. The clients are now considered in increasing order of radius $\lambda\AVG{j}$. Let $j$ be a client in this order, remove all the clients $k$ such that $c(j,k) \leq 2\lambda max \{ \AVG{j},\AVG{k} \}$ and repeat the process with the left over clients. Let $\mathcal{C}' \subset \mathcal{C}$ be the set of remaining clients after all the clients have been considered. Note that for any clients $j, k \in \mathcal{C}'$ the following property is satisfied: $c(j,k) > 2\lambda max \{ \AVG{j},\AVG{k} \}$.
The total extent up to which facilities are opened in $\F{j}$ after Step 1 is $\geq (1-1/\lambda)(\sum_{i \in \mathcal{F}} x'_{ij}) \geq (1-1/\lambda) 1/\lambda$ where the last inequality follows because every client is served to an extent of at least $1/\lambda$ after first step. To obtain an integral open solution, for all $j \in \mathcal{C}'$, we open the cheapest(lowest facility opening cost) facility say $i_j \in \F{j}$ and transfer all the assignments coming on to the facilities in $\cluster{j}$ to $i_j$. Formally, set $\bar{y}_{i_j}=1$, $\bar{x}_{i_j j} = \sum_{i \in \cluster{j}}x'_{ij}$ and $\bar{y}_{i}=0$, $\bar{x}_{ij}=0$ for all $j \in \mathcal{C}$ and $i \neq i_j$. Set $\bar{z}_{j} = z'_{j}$ for all $j \in \mathcal{C}$.
\begin{lemma} The integral open solution $S=<\bar{x},\bar{y},\bar{z}>$ violates lower bound by $\alpha=1/4$ and outliers by $\gamma=2$ at constant(?) factor loss in cost. \end{lemma}
\begin{proof}
Set $\lambda=1/2$. \end{proof}
\section{$(k+1)$ solution for $k$FLO} \label{kFLPO} The problem $k$FLO can be represented as the following integer program (IP):
\label{{k-FLPO}} $Minimize ~\mathcal{C}ostkFLO(x,y) = \sum_{j \in \mathcal{C}}\sum_{i \in \mathcal{F}}\dist{i}{j}x_{ij} + \sum_{i \in \mathcal{F}}f_iy_i $ \begin{eqnarray} subject~ to &\sum_{i \in \mathcal{F}}{} x_{ij} \leq 1 & \forall ~\textit{j} \in \mathcal{C} \label{LPFLP_const1}\\ & x_{ij} \leq y_i & \forall~ \textit{i} \in \mathcal{F} , ~\textit{j} \in \mathcal{C} \label{LPFLP_const2}\\ & \sum_{i \in \mathcal{F}}y_{i} \leq k & \label{LPFLP_const3}\\ & \sum_{j \in \mathcal{C}} \sum_{i \in \mathcal{F}}x_{ij} \geq m-t & \label{LPFLP_const4}\\ & y_i,x_{ij} \in \left\lbrace 0,1 \right\rbrace \label{LPFLP_const5} \end{eqnarray} where variable $y_i$ denotes whether facility $i$ is open or not and $x_{ij}$ indicates if client $j$ is served by facility $i$ or not. Constraints \ref{LPFLP_const1} ensure that the extent to which a client is served is no more than $1$. Constraints \ref{LPFLP_const2} ensure that a client is assigned only to an open facility. Constraint \ref{LPFLP_const3} ensures that the total number of facilities opened are atmost $k$ and Constraint \ref{LPFLP_const4} ensures that total number of clients served are at least $m-t$. LP-Relaxation of the problem is obtained by allowing the variables $y_i, x_{ij} \in [0, 1]$. Let us call it $LP$.
Let $\rho^{*} = <x^*, y^*>$ denote the optimal solution of $LP$ and $\opt{}$ denote the cost of $\rho^*$. A solution is said to be a {\em complete solution} either $x^*_{ij}= y^*_i$ or $x^*_{ij}=0$, $\forall i \in \mathcal{F}$ and $\forall j \in \mathcal{C}$. We first eliminate $x$ variables from our solution $\rho^*$ by making it complete. This is achieved by standard technique of splitting the openings and making collocated copies of facilities. For every client $j \in \mathcal{C}$, we will define a bundle, $\facilityset_j$ as the set of facilities that are serving $j$ in our complete solution. Formally, $\facilityset_j = \{ i \in \mathcal{F} : x^*_{ij}>0\}$. Let $\dlj{} = max_{i \in \facilityset_j}\distd{i}{j}$ be the distance of farthest facility in $\facilityset_j$ from $j$. See Fig.~\ref{FIG_FLO1}($a$). Note that the complete solution $<x^*, y^*>$ satisfies the following property: \begin{enumerate}
\item \label{prop1} $\sum_{i \in \facilityset_j} y^*_i \leq 1~\forall j \in \mathcal{C}$ as $\sum_{i \in \facilityset_j} y^*_i = \sum_{i \in \mathcal{F}}x^*_{ij} \leq 1$.
\item \label{prop2} $\sum_{i \in \mathcal{F}}y^*_i \leq k$
\item \label{prop3} $\sum_{j \in \mathcal{C}} \sum_{i \in \facilityset_j} y^*_i \geq m-t$ as $\sum_{i \in \facilityset_j} y^*_i = \sum_{i \in \mathcal{F}}x^*_{ij}$ and $\sum_{j \in \mathcal{C}} \sum_{i \in \mathcal{F}} x^*_{ij} \geq m-t$. \end{enumerate}
\subsection{Auxiliary LP (ALP)} \begin{figure}
\caption{ ($a$) Set $\facilityset_j$ corresponding to a client $j$, ($b$) Discretization of distances}
\label{FIG_FLO1}
\end{figure}
We first discretize our distances $c(i,j)$, by rounding them to the nearest power of $2$. Let $\distd{i}{j} = 2^r$, where $r$ is smallest power of $2$ such that $\dist{i}{j} \le 2^r$. See Fig.~\ref{FIG_FLO1}($b$).
Next, we identify a set $\cliset_{full} $ of clients that are going to be served fully in our solution. Ideally, we would like to open at least one facility in $\facilityset_j$ for every $j \in \cliset_{full}$. If all the $\facilityset_j$'s ($j \in \cliset_{full}$) were pair-wise disjoint, an LP constraint like $\sum_{i \in \facilityset_j} w^*_i \geq 1$ for all $j \in \cliset_{full}$, along with constraints~\ref{LPALP_const3}(for partially served clients, say clients in $\cliset_{part}$),~\ref{LPALP_const0}(for cardinality) and~\ref{LPALP_const4}(for outliers), is sufficient to get us a psuedo-integral solution. But this, in general, is not true.
Thus we further identify a set $\cliset^* \subseteq \cliset_{full}$ so that we open one facility in $\facilityset_j$ for every $j \in \cliset^*$ and
($i$) $\facilityset_j$'s ($j \in \cliset^*$) are pair-wise disjoint \textit{(disjointness property)}
($ii$) for every $\cli{f} \in \cliset_{full} \setminus \cliset^*$, there is a close-by (within constant factor of $\dlj{}$ distance from $\cli{f}$) client in $\cliset^*$.
On a close observation, we notice that instead of $ \facilityset_j$'s, we are rather interested in smaller sets: let $rmax_j$ be the (rounded) distance of the farthest facility in $\facilityset_j$ serving $j$ in our solution and $\T{j} = \{ i \in \facilityset_j : c'(i,j) \leq rmax_j\}$. Then we actually want $\T{j}$'s ($j \in \cliset^*$) to be pair-wise disjoint. As the distances are discretized, we have that $rmax_j$ is either $\dlj{}$ or is $\le \dlj{}/2$. Since we don't know $rmax_j$, once a client is identified to be in $\cliset_{full}$, we search for it by starting with $\T{j} = \facilityset_j$, $\rtj{} = \dlj{}$ and, shrinking it over iterations. Shrinking is done whenever, for $\mathcal{B}_j = \{ i \in \facilityset_j : c'(i,j) \leq rmax_j/2\}$ , we obtain $\sum_{i \in \mathcal{B}_j} w_i = 1$. Thus we add a constraint $\sum_{i \in \mathcal{B}_j} w_i \le 1$ in our ALP and arrive at the following auxiliary LP (ALP). Variable $w_i$ denotes whether facility $i$ is opened in the solution or not. Constraints (9) and (10) correspond to the requirements of cardinality and outliers. For $\cli{f}\in \cliset_{full}$, if the ALP doesn't open a facility within $\ball{\cli{f}}$, it bounds the cost of sending $\cli{f}$ up to a distance of $\rtj{}$.
$\text{Min}~CostALP(w) = \sum_{j \in \cliset_{part}}\sum_{i \in \T{j}}\distd{i}{j} w_i + \sum_{j \in \cliset_{full}} [ \sum_{i \in \mathcal{B}_j} \distd{i}{j} w_i + (1 - \sum_{i \in \mathcal{B}_j} w_i)\rtj{}] + \sum_{i \in \mathcal{F}}f_i w_i$ \begin{eqnarray} subject~ to &\sum_{i \in \T{j}} w_i = 1 & \forall ~ j \in \cliset^* \label{LPALP_const1}\\ &\sum_{i \in \mathcal{B}_j} w_i \leq 1 & \forall ~ j \in \cliset_{full} \label{LPALP_const2}\\ &\sum_{i \in \T{j}} w_i \leq 1 & \forall ~ j \in \cliset_{part} \label{LPALP_const3}\\ &\sum_{i \in \mathcal{F}} w_i \leq k & \label{LPALP_const0}\\
&|\cliset_{full}| + \sum_{j \in \cliset_{part}} \sum_{i \in \T{j}} w_i \geq m-t & \label{LPALP_const4}\\ & 0 \leq w_i \leq 1 &\label{LPALP_const5} \end{eqnarray}
The following lemma gives a feasible solution to ALP such that cost is bounded by LP optimal within a constant factor. \begin{lemma} \label{fs_kflo} A feasible solution $w'$ can be obtained to the ALP such that $CostALP(w') \leq 2 \opt{LP}$. \end{lemma}
\begin{proof} Let $w'_i = y^*_{i}$. \begin{enumerate}
\item {\em Feasibility:} Constraints \ref{LPALP_const1} and \ref{LPALP_const2} hold vacuously as $\cliset_{full}$ and hence $\cliset^*$ are empty.
Constraints \ref{LPALP_const3}, \ref{LPALP_const0} and \ref{LPALP_const4} hold by properties~\ref{prop1},~\ref{prop2} and~\ref{prop3} respectively.
\item {\em Cost Bound:}
As $\T{j}=\facilityset_j$,
$CostALP(w'_i) = \sum_{j \in \mathcal{C}} \sum_{i \in \facilityset_j} \distd{i}{j} y^*_{i} + \sum_{i \in \mathcal{F}} f_i y^*_i \leq 2 \sum_{j \in \mathcal{C}} \sum_{i \in \facilityset_j} \dist{i}{j} x^*_{ij} + \sum_{i \in \mathcal{F}} f_i y^*_i = 2\sum_{j \in \mathcal{C}} \sum_{i \in \mathcal{F}} \dist{i}{j} x^*_{ij} + \sum_{i \in \mathcal{F}} f_i y^*_i = 2\opt{}$. The inequality follows as $c'(i,j) \le 2c(i,j)$ and $x^*_{ij} = y^*_i$. \end{enumerate} \end{proof} \qed
\subsection{Iterative Rounding} We next present an iterative rounding algorithm(IRA) for solving the ALP. In every iteration of IRA, we compute an extreme point solution $w^*$ to ALP and check whether any of the constraints \ref{LPALP_const2} or \ref{LPALP_const3} has become tight. If a constraint corresponding to $j \in \cliset_{part}$ gets tight, we move the client to $\cliset_{full}$ and remove it from $\cliset_{part}$. We also update $\cliset^*$ so that disjointness property is satisfied. If a constraint corresponding to $\cli{f} \in \cliset_{full}$ gets tight, we shrink $\T{\cli{f}}$ to $\ball{\cli{f}}$;
update $\ball{\cli{f}}$ and $\cliset^*$ accordingly. The algorithm is formally stated in Algorithm \ref{IRA}. For $j \in \cliset_{full}$, let $resp(j)$ be the client $j' \in \cliset^*$ who takes the responsibility of getting $j$ served. Whenever $j$ is added to $\cliset^*, resp(j)$ is set to $j$ and whenever it is removed because of another client $j' \in\cliset^*$, $resp(j)$ is set to $j'$. If $j$ was never added to $\cliset^*$, then there must be a $j'$ because of which it was not added to $\cliset^*$ in lines $4$ and $5$. Such a $j'$ takes the responsibility of $j$ in that case. Note that a client $j$ may be added and removed several times from $C^*$ over the iterations of the algorithm as $\T{j}$ and $\ball{j}$ shrink (see Fig.~\ref{fig_ALP} for illustration).
\begin{algorithm}
\footnotesize
\begin{algorithmic}[1]
\STATE $\cliset_{full} \leftarrow \phi$, $\cliset_{part} \leftarrow \mathcal{C}$, $\cliset^* \leftarrow \phi$, $\T{j}=\facilityset_j$, $\rtj{} = \dlj{}$\\
\WHILE {true}
\STATE Find an extreme point solution $w^*$ to ALP
\IF{there exists some $j \in \cliset_{part}$ such that $\sum_{i \in \T{j}} w^*_i$ = 1}
{\STATE $\cliset_{part} \leftarrow \cliset_{part} \setminus \{j\}, \cliset_{full} \leftarrow \cliset_{full} \cup \{ j\}, \mathcal{B}_j \leftarrow \{ i \in \T{j} : \distd{i}{j} \leq \floor{\rtj{}/2}\}$
\STATE $process-\cliset^*(j)$.}
\ENDIF
\IF{there exists $j \in \cliset_{full}$ such that $\sum_{i \in \mathcal{B}_j} w^*_i=1$}
{\STATE $\T{j} \leftarrow \mathcal{B}_j, \rtj{} = \floor{\rtj{} /2}, \mathcal{B}_j \leftarrow \{ i \in \T{j} : \distd{i}{j} \leq \floor{\rtj{}/2} \}$ \STATE $process-\cliset^*(j)$}
\ENDIF
\ENDWHILE
\STATE Return $w^*$
\STATE $process-C^*(j)$
\IF{there exists $j' \in \cliset^*$ with $\rtj{'} < \rtj{}$ and $\T{j} \cap \T{j'} \neq \phi$} {\STATE $resp(j) = j'$, if there are more than one such $j'$s, choose any arbitrarily.}
\ELSE {\STATE \textbf{if} $j \in \cliset^*$ \textbf{then} update $\rtj{}$ to its new value} \STATE \ \ \ \ \ \ \ \ \ \ \ \ \ \textbf{else} Add $j$ to $\cliset^*$ with $\rtj{}$ and $resp(j) = j$.
{\STATE Remove all $j'$ from $\cliset^*$ for which $\rtj{} < \rtj{'}$ and $\T{j} \cap \T{j'} \neq \phi$, $resp(j') = j$. }
\ENDIF
\end{algorithmic}
\caption{Iterative Rounding Algorithm}
\label{IRA} \end{algorithm}
\begin{figure}\label{fig_ALP}
\end{figure}
Lemmas~\ref{FS_ALP},~\ref{pseudo_integral}, and~\ref{neartocfull} help us analyse our algorithm. Lemma~\ref{FS_ALP} shows that the solution obtained in an iteration is feasible for the ALP of the next iteration.
We also prove that the cost of the solutions computed is non-increasing over iterations.
\begin{lemma} \label{FS_ALP} Let $ALP_{t}$ and $ALP_{t+1}$ be the auxiliary LPs before and after iteration $t$ of IRA. Let $w^t$ be the extreme point solution obtained in $t^{th}$ iteration. Then $(i)$ $w^t$ is a feasible solution to $ALP_{t+1}$, $(ii)$ $CostALP_{t+1}(w^{t}) \leq CostALP_{t}(w^{t})$ and hence $CostALP_{t+1}(w^{t+1}) \leq CostALP_{t}(w^{t})$. \end{lemma}
\begin{proof}
Note that the feasibility and the cost can change only when one of constraints~(\ref{LPALP_const2}) or constraints~(\ref{LPALP_const3}) $w^t$ becomes tight, that is, either condition at step 4 or condition at step 6 of the algorithm is true.
\begin{itemize}
\item[($i$)]
When one of constraints~(\ref{LPALP_const3}) corresponding to a client $j$ becomes tight i.e. $\sum_{i \in \T{j}} w^t =1$, we move client $j$ from $\cliset_{part}$ to $\cliset_{full}$ and
define the set $\mathcal{B}_j$. Thus,
$\sum_{i \in \mathcal{B}_j } w^t \leq \sum_{i \in \T{j} } w^t = 1$. Thus the new constraints added in constraints~\ref{LPALP_const2} and~\ref{LPALP_const1} (if $j$ is added to $\cliset^*$) are satisfied. Constraint~(\ref{LPALP_const4}) holds as $|\cliset_{full}|$ increases by $1$ and $\sum_{j \in \cliset_{part}} \sum_{i \in \T{j}} w^t$ decreases by $1$. There is no change in constraint~\ref{LPALP_const0}.
Let one of the constraints~(\ref{LPALP_const2}) corresponding to a full client $j$ becomes tight i.e. $\sum_{i \in \mathcal{B}_j} w^t =1$. Two things happen here: ($i$) we shrink $\T{j}$ to $\mathcal{B}_j$, hence $\sum_{i \in \T{j}} w^t=1$. Thus constraint~\ref{LPALP_const1} is satisfied if $j$ is added to $\cliset^*$. ($ii$) shrink $\mathcal{B}_j$ to half its radius, thus $\sum_{i \in \mathcal{B}_j } w^t \leq \sum_{i \in \T{j} } w^t = 1$. Thus constraint~\ref{LPALP_const2} corresponding to $j$ continue to be satisfied with the shrunk $\mathcal{B}_j$.
There is no change in constraints~\ref{LPALP_const0} and~\ref{LPALP_const4}.
\item[($ii$)] For a client $j$, let $\dlj{}^{t}$, $\mathcal{B}_j^{t}$ and $\T{j}^{t}$ be the set $\dlj{}$, $\mathcal{B}_j$ and $\T{j}$ corresponding to client $j$ in $ALP_{t}$ and $\dlj{}^{t+1}$, $\mathcal{B}_j^{t+1}$ and $\T{j}^{t+1}$ be the respective values in $ALP_{t+1}$.
\begin{enumerate}
\item[a.] When $\T{j}$ and $\mathcal{B}_j$ shrink because constraint~\ref{LPALP_const2} becomes tight for a client $j$. Cost paid by $j$ in $w^t$ in the $t^{th}$ iteration
$ = \sum_{i \in \mathcal{B}_j^{t}} c'(i,j) w^t $ because $\sum_{i \in \mathcal{B}_j^{t}} w^t=1$ in the $t^{th}$ iteration. Since $\mathcal{B}_j^{t}= \T{j}^{t+1}$, $ \sum_{i \in \mathcal{B}_j^{t}} c'(i,j) w^t = \sum_{i \in \T{j}^{t+1} : c'(i,j) \leq \dlj{}^{t+1}/2} c'(i,j) w^t + \sum_{i \in \T{j}^{t+1} : c'(i,j) = \dlj{}^{t+1}} c'(i,j) w^t = \\ \sum_{i \in \mathcal{B}_j^{t+1}} c'(i,j) w^t + (1- \sum_{i \in \mathcal{B}_j^{t+1}} w^t)\dlj{}^{t+1}=$ Cost paid by $j$ in $w^t$ in the $(t+1)^{th}$ iteration. Thus change in cost is $0$.
\item[b.] When a client $j$ is moved from $\cliset_{part}$ to $\cliset_{full}$ because constraint~\ref{LPALP_const3} becomes tight. Cost paid by $j$ in $w^t$ in the $t^{th}$ iteration $= \sum_{i \in \T{j}^{t}} c'(i,j) w^t = \\ \sum_{i \in \T{j}^{t} : c'(i,j) \leq \rtj{}^{t}/2} c'(i,j) w^t + \sum_{i \in \T{j}^{t} : c'(i,j) = \rtj{}^{t}} c'(i,j) w^t = \\ \sum_{i \in \mathcal{B}_j^{t+1}} c'(i,j) w^t + (1-\sum_{i \in \mathcal{B}_j^{t+1}} w^t) \rtj{}^{t+1}=$ Cost paid by $j$ in $w^t$ in the $(t+1)^{th}$ iteration. Thus change in cost is $0$.
\end{enumerate} \end{itemize} \end{proof} \qed
Thus we have, $CostALP_{t+1}(w^{t+1}) \le CostALP_{t+1}(w^{t}) = CostALP_{t}(w^{t})$ where the first inequality follows because $w^{t+1}$ is an extreme point solution and $w^t$ is a feasible solution to $ALP_{t+1}$.
Hence, if $n$ is the number of iterations of the IRA then $CostALP_n(w^n) \leq CostALP_{1}(w^1) \leq CostALP(w') \le 2\opt{LP}$ where the second last inequality follows as $w^1$ is an extreme point solution and $w'$ is a feasible solution for $ALP = ALP_1$, last inequality follows from Lemma~\ref{fs_kflo}. Let $w^*$ be the solution returned by the IRA, then $w^* = w^n$. Lemma~\ref{pseudo_integral} establishes that at the end of our IRA, solution $w^*$ is pseudo-integral.
\begin{lemma} \label{pseudo_integral} $w^*$ returned by Algorithm \ref{IRA} has at most two fractionally opened facilities. \end{lemma} \begin{proof} At the termination of the algorithm constraints~\ref{LPALP_const2} and~\ref{LPALP_const3} will not be tight. Let $n_f$ be the number of fractional variables at the end of the algorithm. Then there are exactly $n_f$ number of independent tight constraints from~(\ref{LPALP_const1}),~(\ref{LPALP_const0}) and~(\ref{LPALP_const4}). Let $X$ be the number of tight constraints of type~\ref{LPALP_const1}. There must be at least $2$ fractional variables corresponding to each of these constraints. Also, there must be at least $2$ fractional variables corresponding to constraint~\ref{LPALP_const1}, different from those obtained constraints~\ref{LPALP_const4}. Thus, $n_f \ge 2X + 2$ i.e. $X \le n_f/2 -1$. Also, the number of tight constraints is at most $X + 2$ and hence is at most $n_f/2 + 1$ giving us $n_f \le n_f/2 + 1$ or $n_f \le 2$. \end{proof} \qed
We open both the fractionally opened facilities at a loss of $+ f_{max}$ in the facility opening cost where $f_{max}$ is the guess of the most expensive facility opened by the optimal. In Lemma~\ref{neartocfull} we show that for a client $j$ in $\cliset_{full} \setminus \cliset^*$ there is some client in $ \cliset^*$, that is close to $j$, i.e. within $5\rtj{}$ distance of $j$.
\begin{lemma} \label{neartocfull} At the conclusion of the algorithm, for every $j \in \cliset_{full}$, there exists at least 1 unit of open facilities within distance $5 \rtj{}$ from j. Formally, $\sum_{i:c'(i,j) \leq 5\rtj{}} w^*_i \geq 1$. \end{lemma} \begin{proof}
Let $j \in \cliset_{full}$.
If $resp(j) = j$, then this means that $j$ was added to $\cliset^*$ and was present in $\cliset^*$ at the end of the algorithm. Then, one unit is open in $\T{j}$ i.e. within a distance of $\rtj{}$ of $j$.
If $resp(j) = j' (\ne j)$ then $j$ was either never added to $\cliset^*$ or was removed later. In either case responsibility of opening a facility in a close vicinity of $j$ was taken by $j'$. First we consider the case when $j$ was added to $\cliset^*$ but removed later. Let $j_0, j_1, \ldots j_r$ be the sequence of clients such that $resp(j_i) = j_{i-1}, i = 1 \ldots r, \ resp(j_0) = j_0$ and $j_r = j$. Since $resp(j_0) = j_0$, one unit is open in $\T{j_0}$ i.e. within a distance of $\rtj{0}$ of $j_0$. Clearly, $\rtj{i-1} \le \rtj{i}/2$. Thus $\rtj{i} \le (1/2)^{r -i} \rtj{r}$ for all $i = 0 \ldots r - 1$. Thus, $\distd{j_{r}}{j_{0}} \le \sum_{i = 1~to~r} \distd{j_{i}}{j_{i-1}} \le \sum_{i = 1~to~r} (\rtj{i} + \rtj{i - 1}) \le \rtj{0} + 2\sum_{i = 1~to~r-1} \rtj{i} + \rtj{r} \le \rtj{0} + 2\sum_{i = 1~to~r-1} (1/2)^{r -i} \rtj{r} + \rtj{r}$. Thus one unit of facility is open within a distance of $2\sum_{i = 0~to~r-1} (1/2)^{r -i} \rtj{r} \\ + \rtj{r} = \sum_{t = 1~to~r} (1/2)^{r - t} \rtj{r} + \rtj{r} \le 3 \rtj{r}$ from $j$.
Next, let $j$ was never added to $\cliset^*$. Then since $resp(j) = j' (\ne j)$, $j'$ was added to $\cliset^*$ at some point of time. Thus, from above one unit of facility is opened within distance $3\rtj{'}$ of $j'$. Also, $\distd{j}{j'} \le \rtj{} + \rtj{'} \le 2\rtj{}$. Thus, one unit of facility is opened within distance $5\rtj{}$ of $j$.
\end{proof} \qed
We run the algorithm for all the guesses of $f_{max}$ and select the one with the minimum cost.
\textbf{Combining Everything:} Let $\bar{w}$ be our final solution. $Cost(\bar{w}) \leq Cost(w^*)+f_{max} \leq 5 \cdot CostALP_n(w^*)+f_{max} \leq 5\cdot2\cdot\opt{LP} + f_{max} \leq 11OPT$ where $OPT$ is the cost of the optimal solution.
\label{tri-criteria1} In this section we present a tri-criteria approximation for LBFLO problem with $1/4$-factor violation in lower bound and $2$-factor violation in outliers at a constant factor loss in cost. Following is the LP relaxation for LBFLO problem where for each $i \in \mathcal{F}$, $y_i$ indicates if facility $i$ is opened, for each $i \in \mathcal{F}$ and $j \in \mathcal{C}$, $x_{ij}$ indicates if client $j$ is assigned to facility $i$ and for each $j \in \mathcal{C}$, $z_j$ indicates if client $j$ is an outlier.
\label{{LBFLO}} $\text{Minimize}~\mathcal{C}ostLBFLO(x,y,z) = \sum_{j \in \mathcal{C}}\sum_{i \in \mathcal{F}}\dist{i}{j}x_{ij} + \sum_{i \in \mathcal{F}}f_iy_i $ \begin{eqnarray} \text{subject to} &\sum_{i \in \mathcal{F}}{} x_{ij} + z_j \geq 1 & \forall ~\textit{j} \in \mathcal{C} \label{LbkFLo_const1}\\ & \lb y_i \leq \sum_{j \in \mathcal{C}}x_{ij} & \forall~ \textit{i} \in \mathcal{F} \label{LbkFLo_const2}\\
& \sum_{j \in \mathcal{C}} z_{j} \leq t & \label{LbkFLo_const4}\\ & x_{ij} \leq y_i & \forall~ \textit{i} \in \mathcal{F} , ~\textit{j} \in \mathcal{C} \label{LbkFLo_const5}\\ & y_i,x_{ij},z_j \in [0,1] \label{LPFLP_const5} \end{eqnarray}
Constraints~\ref{LbkFLo_const1} ensure that every client is either served or is an outlier. Constraints~\ref{LbkFLo_const2} and~\ref{LbkFLo_const4} satisfies the lower bounds and number of outliers respectively. Constraints~\ref{LbkFLo_const5} are standard facility location constraint saying that a client is assigned to an open facility only. Let $opt=<x^*,y^*,z^*>$ be the optimal LP solution for the above LP.
A solution is said to be an {\em integral open solution} if all the facilities are either fully opened or fully closed, i.e, $y_i=[0,1]$ for all $i \in \mathcal{F}$. Next we will construct an integral open solution $S=<\bar{x}, \bar{y}, \bar{z}>$. The solution $S$ is created in two steps: ($1$) First we create solution $S_{out}= <x', y', z'>$ by removing the clients that are outliers to a large extent($\geq 1/\lambda$) in $opt$ where $\lambda$ is a parameter to be fixed later, ($2$) Use clustering and rounding techniques to obtain an integral open solution for the remaining clients. Formally, in step 1, for all $j \in \mathcal{C}$ and $i \in \mathcal{F}$, set $z'_{j}=1$ and $x'_{ij}=0$ if $z^*_{j} \geq 1/\lambda$. Otherwise, set $z'_{j}=0$ and $x'_{ij}=x^*_{ij}$. For all $i \in \mathcal{F}$, $y'_{i}=y^*_{i}$. Note that in step 1 we just incur at most $\lambda$-factor in outliers. Also, $CostLBFLO(S_{out}) \leq CostLBFLO(opt)$.
Next we will describe the Step 2 in detail. Let $\AVG{j}$ be the average connection cost for a client $j \in \mathcal{C}$ after Step 1, that is, $\AVG{j}=\sum_{i \in \mathcal{F}} \dist{i}{j}x'_{ij}/\sum_{i \in \mathcal{F}} x'_{ij}$. The clients are now considered in increasing order of radius $\lambda\AVG{j}$. Let $j$ be a client in this order, remove all the clients $k$ such that $c(j,k) \leq 2\lambda max \{ \AVG{j},\AVG{k} \}$ and repeat the process with the left over clients. Let $\mathcal{C}' \subset \mathcal{C}$ be the set of remaining clients after all the clients have been considered. Note that for any clients $j, k \in \mathcal{C}'$ the following property is satisfied: $c(j,k) > 2\lambda max \{ \AVG{j},\AVG{k} \}$.
The total extent up to which facilities are opened in $\F{j}$ after Step 1 is $\geq (1-1/\lambda)(\sum_{i \in \mathcal{F}} x'_{ij}) \geq (1-1/\lambda) 1/\lambda$ where the last inequality follows because every client is served to an extent of at least $1/\lambda$ after first step. To obtain an integral open solution, for all $j \in \mathcal{C}'$, we open the cheapest(lowest facility opening cost) facility say $i_j \in \F{j}$ and transfer all the assignments coming on to the facilities in $\cluster{j}$ to $i_j$. Formally, set $\bar{y}_{i_j}=1$, $\bar{x}_{i_j j} = \sum_{i \in \cluster{j}}x'_{ij}$ and $\bar{y}_{i}=0$, $\bar{x}_{ij}=0$ for all $j \in \mathcal{C}$ and $i \neq i_j$. Set $\bar{z}_{j} = z'_{j}$ for all $j \in \mathcal{C}$.
\begin{lemma} The integral open solution $S=<\bar{x},\bar{y},\bar{z}>$ violates lower bound by $\alpha=1/4$ and outliers by $\gamma=2$ at constant(?) factor loss in cost. \end{lemma}
\begin{proof}
Set $\lambda=1/2$. \end{proof}
\section{Tri-criteria for LBFLO} \label{LBkFLO} In this section, we present a tri-criteria solution for LBFLO problem with $\alpha=(0,1)$-factor violation in lower bound and at most $\beta=(\frac{1}{1-\alpha})$-factor violation in outliers at ($\lambda(\frac{1+\alpha}{1-\alpha})$)-factor loss in cost where $\lambda$ is approximation for FLO. Let $I$ be an instance of LBFLO. For a facility $i$, let $\N{i}$ be the set of $\lb{i}$ nearest clients. We construct an instance $I'$ of FLO with lower bounds ignored and facility costs updated as follows: if a facility $i$ is opened in optimal solution of $I$, then it pays at least $\sum_{j \in \N{i}}c(i,j)$ cost for serving $\N{i}$ clients. Therefore, $f'(i)=f(i)+\delta \sum_{j \in \N{i}}c(i,j)$ where $\delta$ is a tunable parameter. \begin{lemma} \label{fs} Optimal solution of $I'$ is bounded by $ (\delta+1)Cost_I(O)$ where $O$ is the optimal solution of $I$. \end{lemma}
\begin{proof}
Clearly $O$ is a feasible solution for $I'$.
Thus, service cost is same as that in $O$. And, $\sum_{i \in O} f'(i) = \sum_{i \in O}[f(i)+ \delta \sum_{j \in \N{i}}c(i,j)] \leq \delta Cost_I(O)$.
Therefore, $Cost_{I'}(I') \leq (\delta+1)Cost_I(O)$. \end{proof} \qed
Once we have an instance $I'$ of FLO, we use any algorithm for FLO to get a solution $AS'$ to $I'$ of cost no more than $\lambda Cost_{I'}(O')$ where $O'$ is the optimal solution of $I'$ and $\lambda$ is approximation solution for FLO. Note that a facility $i$ opened in solution $AS'$ might serve less than $\alpha\lb{i}$ clients as we ignored the lower bounds in instance $I'$. We close such facilities and do some reassignments to improve the violation in the lower bounds to $\alpha$;
in the process we make some violation in the number of outliers.
We convert the solution $AS'$ to a solution $AS$ of LBFLO. We close every facility $i$ that is serving less than $\alpha\lb{i}$ clients in $AS'$ and either reassign its clients to other opened facilities or decide to leave them unserved. Cost of reassignment is charged to the facility opening costs of the closed facilities. Consider a facility $i$ opened in $AS'$ that served less than $\alpha \lb{i}$ clients. Let $\clientset_i$ be the set of clients, in $\N{i}$, assigned to $i$ in $AS'$ and $\bar \clientset_i$ be the remaining clients in $\N{i}$.
Since $i$ serves $<\alpha \lb{i}$ clients, $|\bar \clientset_i| \ge (1-\alpha )\lb{i}$. Some of the clients in $\bar \clientset_i$ are outliers in $AS'$ and some are assigned to other facilities.
Let $\Out{i}$ be the clients in $\N{i}$ that are outliers and $\R{i}$ be the clients in $\N{i}$ assigned to some other facilities. See Fig.~\ref{division of clients}($a$).
If $\R{i} \ne \phi$ then let $j' \in \R{i}$ be the nearest client to $i$. then,
\begin{equation}
\label{eq1}
c(i,j') \leq \frac{\sum_{j \in \R{i}} c(i,j)}{|\R{i}|} \leq \frac{\sum_{j \in \N{i}} c(i,j)}{|\R{i}|}
\end{equation}
\begin{figure}
\caption{ ($a$) Division of clients in $\N{i}$ for a facility $i$ opened in $AS'$
($b$) $ c(j,i') \leq c(i,j) + c(i,j') + c(j',i')$
}
\label{division of clients}
\end{figure}
Clients in $\clientset_i$ are assigned to the facilities serving the clients in $\R{i}$ and are made outliers proportionally. That is, we assign $\frac{|\R{i}|}{|\R{i}|+|\Out{i}|} |\clientset_i|$ clients in $\clientset_i$ to the nearest facility $i' \ne i$ opened in $AS'$ and leave $\frac{|\Out{i}|}{|\R{i}|+|\Out{i}|} |\clientset_i|$ clients in $\clientset_i$ unserved. If $|\R{i}| \neq 0$, then the total cost of reassignment is $\sum_{j \in \clientset_i} c(j,i') \leq \sum_{j \in \clientset_i} (c(i,j) + c(i,j') + c(j',i')) \text{(by triangle inequality, see Fig.~\ref{division of clients}($b$))}$
$\leq \sum_{j \in \clientset_i}c(i,j) + ( \frac{|\R{i}|}{|\R{i}|+|\Out{i}|} |\clientset_i| \cdot 2 c(i,j'))$ (as $j'$ was assigned to $i'$ and not to $i$ in $AS'$)
$\leq \sum_{j \in \clientset_i}c(i,j) + ( \frac{2|\clientset_i|}{|\R{i}|+|\Out{i}|} \cdot \sum_{i \in \N{i}} c(i,j))$ (using~(\ref{eq1}))
$\leq \sum_{j \in \clientset_i}c(i,j) + ( \frac{2\alpha\lb{i}}{(1-\alpha)\lb{i}} \sum_{i \in \N{i}} c(i,j))$ (As $|\clientset_i| \leq \alpha\lb{i}$ and $|\R{i}|+|\Out{i}| \geq (1-\alpha)\lb{i}$)
$\leq \sum_{j \in \clientset_i}c(i,j) + f'(i)$ (for $\delta \geq \frac{2\alpha}{1-\alpha}$).
Thus the additional cost of reassignment of clients in $\clientset_i$ is bounded by the facility opening cost of $i$.
Violation in outliers is $\frac{|\Out{i}| + \frac{|\Out{i}|}{|\R{i}|+|\Out{i}|} |\clientset_i|}{|\Out{i}|} \leq 1+\frac{|\clientset_i|}{|\R{i}|+|\Out{i}|} \leq 1+\frac{\alpha}{1-\alpha} = \frac{1}{1-\alpha}$.
\textbf{Overall Cost Bound:} It is easy to see that $Cost_I(AS)=Cost_{I'}(AS')$ as cost of solution $AS$ is sum of $(i)$ the original connection cost which is equal to the connection cost of $AS'$, ($ii$) the additional cost of reassignment, which is paid in $AS'$ by facilities that are closed in $AS$ and, ($iii$) the facility cost of the remaining facilities.
Thus, $Cost_I(AS)=Cost_{I'}(AS') \leq \lambda Cost_{I'}(O') \leq \lambda (1+\delta)Cost_{I}(O) =
\lambda (\frac{1+\alpha}{1-\alpha})Cost_{I}(O)$ for $\delta=\frac{2\alpha}{1-\alpha}$. Using $\lambda=(3+\epsilon)$-approximation of Charikar \textit{et al}.~\cite{charikar2001algorithms} for FLO, we get $(3+\epsilon)(\frac{1+\alpha}{1-\alpha})$ factor loss in cost for $\epsilon>0$.
\section{Conclusion and Future Scope} \label{conclusion} In this paper, we first presented a $11$-factor approximation for $k$-facility location problem with outliers opening at most $k+1$ facilities. This also gives us the first constant factor approximation for FLO using LP rounding techniques. Our result can be extended to knapsack median problem with outliers with $(1+\epsilon)$ violation in budget using enumeration techniques.
We also gave a tri-crtieria, $(\alpha, \frac{1}{1-\alpha},(3+\epsilon)\frac{1+\alpha}{1-\alpha})$-solution for general LBFLO where $\alpha=(0,1)$ and $\epsilon>0$. It will be interesting and challenging to see if we can reduce the violation in outliers to $<2$ maintaining $\alpha >1/2$.
We believe that using pre-processing and strengthened LP techniques of Krishnaswamy \textit{et al}.~\cite{krishnaswamy-kMO} we can get rid of the $+1$ violation in cardinality for $k$FLO. This will also directly extend our tri-criteria solution to lower bounded $k$-facility location problem with outliers (LB$k$FLO).
\end{document}
|
arXiv
|
{
"id": "2107.00403.tex",
"language_detection_score": 0.6952198147773743,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Monotone Periodic Orbits for Torus Homeomorphisms} \author{Kamlesh Parwani}
\date{September 1, 2003.}
\maketitle \begin{abstract} Let $f$ be a homeomorphism of the torus isotopic to the identity and suppose that there exists a periodic orbit with a non-zero rotation vector $(\frac{p}{q},\frac{r}{q})$, then $f$ has a topologically monotone periodic orbit with the same rotation vector. \end{abstract}
\section*{Introduction}
In this article we prove a theorem about the existence of topologically monotone periodic orbits on the torus. The concept of monotone orbits on the annulus is certainly not new; it goes back to Aubry and Mather's proof of the existence of orbits whose radial order is preserved by an area-preserving twist map of the annulus. These orbits, that have their radial order preserved by the map, are called Birkhoff orbits (see \cite {Katok}) or monotone orbits.
This notion of monotone orbits inspired the definition of topologically monotone orbits in \cite{Boyland}, where Boyland proved that any homeomorphism of the annulus isotopic to the identity that has a periodic orbit with a non-zero rotation number $\frac{p}{q}$ also has a topologically monotone periodic orbit with the same rotation number. A topologically monotone periodic orbit has the property that the isotopy class of the map, keeping the periodic orbit fixed as a set, is of finite order. The main tool used in Boyland's proof is Nielsen-Thurston theory.
In \cite{Llibre&Mackay}, Llibre and Mackay asked whether a similar result was true for torus homeomorphisms. The goal of this paper is to answer that question by proving the same theorem on the torus.
\textbf{Main Theorem.} \textit{If }$f$\textit{\ is a torus homeomorphism isotopic to the identity that has a periodic orbit with a non-zero rotation vector }$ \left( \frac{p}{q},\frac{r}{q}\right) $\textit{, then }$f$\textit{\ also has a topologically monotone periodic orbit with the same rotation vector.}
There are some immediate complications one encounters while trying to generalize the theorem to the torus. First, the torus has rotation vectors instead of rotation numbers. Then on the annulus, under certain restrictions to the rotation number, we can only get the $pA$ (pseudoAnosov) isotopy class or the finite order isotopy class, but for the torus, there is the reducible isotopy class to deal with also. These concepts will be introduced in Section 1, and then in Section 2 we prove the main theorem of this paper.
It should be noted that LeCalvez has proved the existence of topologically monotone periodic orbits on the torus under the assumption that the maps are smooth by using variational techniques (see \cite{LeCalvez}). Since we are dealing with homeomorphisms, we rely solely on topological methods.
\section{Definitions and important results}
\subsection{Rotation vectors}
Let $f$ be a homeomorphism of the torus which is isotopic to the identity and let $F$ be its lift to the universal cover $\widetilde{T^{2}}$, the plane. Let $()_{1}$ and $()_{2}$ be the projections of a point in the plane to the $x$-axis and the $y$-axis respectively and let $x$ be a point on the torus $T^{2}$ with $\widetilde{x}$ as its lift. Then the \textbf{rotation vector} of $\widetilde{x}$, with respect to a lift $F$, is defined as following if the limit exists.
\begin{equation*} \rho (\widetilde{x},F)=\left( \lim_{n\to \infty }\left( \frac{F^{n}( \widetilde{x})-\widetilde{x}}{n}\right) _{1},\lim_{n\to \infty }\left( \frac{ F^{n}(\widetilde{x})-\widetilde{x}}{n}\right) _{2}\right) \end{equation*}
If $x$ is a periodic point, say of period $q$, then the rotation vector is always well defined and can be written as $(\frac{p}{q},\frac{r}{q})$ for some integers $p$ and $r$. In fact, the rotation vector for any point on the orbit of $x$ is the same, and so, we can associate the vector $(\frac{p}{q}, \frac{r}{q})$ to the periodic orbit. Periodic orbits with rotation vector $(\frac{p}{q},\frac{r}{q})$ and least period $q$ will be called $(p,r,q)$ orbits. Note that a $(p,r,q)$ orbit has the same rotation vector as a $(pt,rt,qt)$ orbit, where $t$ is some positive integer.
The covering space of the torus comes naturally equipped with two important covering translations. Define $X(\widetilde{x})=\widetilde{x}+(1,0)$ and $Y( \widetilde{x})=\widetilde{x}+(0,1)$. Clearly, the rotation vector of a point depends on the lift, and the relationship is $\rho (\widetilde{x} ,Y^{m}X^{n}F)=\rho (\widetilde{x},F)+(n,m)$, where $n$ and $m$ are integers. So when we discuss periodic orbits with a certain rotation vector in this paper, we assume the existence of some lift for which that rotation vector is realized. Also, when we start with a periodic orbit, say $(p,r,q)$ orbit, and then prove that another $(p,r,q)$ orbit exists, it is to be understood that both rotation vectors are calculated with respect to the same lift.
\subsection{The Nielsen-Thurston classification theorem and braids}
Every orientation preserving homeomorphism of an orientable surface with negative Euler characteristic is isotopic to a homeomorphism $g$ such that either
a) $g$ is finite order, or
b) $g$ is pseudoAnosov ($pA$), or
c) $g$ is reducible.
A map $g$ is said to be \textit{reducible} if there is a disjoint collection $C$ of non-parallel, non-peripheral simple disjoint curves such that $g$ leaves invariant the union of disjoint regular neighborhoods of curves in $C$ , and the first return map on each complementary component is either of finite order or $pA$.
This classification theorem was first announced in \cite{Thurston} and the proofs appeared later in \cite{FLP} and \cite{Casson}.
The torus doesn't have negative Euler characteristic but, following Handel as in \cite{Handel}, we will examine the isotopy class relative to a periodic orbit; this will introduce punctures and provide the negative Euler characteristic to apply the Nielsen-Thurston Classification Theorem. When the isotopy class relative to a given periodic orbit is of finite order, the periodic orbit is called a \textit{finite order periodic orbit}, and \textit{ reducible} and $pA$ \textit{periodic orbits} are defined similarly. The isotopy class relative to a periodic orbit is also referred to as the \textit{braid} of the periodic orbit.
\begin{definition} Let $x$\ and $y$\ be two distinct periodic points of least period $n$\ for homeomorphisms $f$ and $g$ respectively of the same orientable surface $S$. Then the orbit of $x$\ ($O(x)$) and the orbit of $y$\ ($O(y)$) have the same \textit{braid} if there exists an orientation-preserving homeomorphism $h$\ of $S$ with the property that $h$\ maps $O(x)$\ onto $O(y)$ and the isotopy class of $h^{-1}fh$\ relative to the orbit of $y$\ is the same as the isotopy class of $g$\ relative to the orbit of $y$, that is, $ [h^{-1}fh]_{O(y)}=[f]_{O(y)}$. \end{definition}
A periodic orbit has a \textbf{trivial} braid if the isotopy class relative to the periodic orbit is of finite order, that is, there exists a homeomorphism $g$ isotopic to $f$, relative to the periodic orbit, such that $g^{n}=identity$ for some $n$. In other words, finite order periodic orbits have trivial braids. These periodic orbits are considered to be topologically monotone.
A periodic orbit has a \textbf{non-trivial} braid if the isotopy class relative to the periodic orbit is not of finite order. In other words, periodic orbits with non-trivial braids are either reducible periodic orbits or are $pA$ periodic orbits. These periodic orbits are not topologically monotone.
Boyland defined a natural partial order $(\vartriangleright)$ into these braids. If $\alpha $ and $\beta $ are two braids of periodic orbits, then $ \alpha \vartriangleright \beta $ if and only if the existence of a periodic orbit with braid $\alpha $ in any homeomorphism $f$ on a given surface implies the existence of a periodic orbit with braid $\beta $ for the same $f$. The proof of the fact that this is an actual partial order is not easy and is in \cite{Boyland}. He also proved that a $pA$ periodic orbit is strictly above (in the partial ordering) all other periodic orbits that are present in the $pA$ representative of the isotopy class relative to the $pA$ periodic orbit.
The existence of topologically monotone periodic orbits on the annulus in \cite{Boyland} is established by showing that periodic orbits with non-trivial braids force the existence of periodic orbits with trivial braids and the same rotation number (non-trivial $\vartriangleright $ trivial).
\begin{theorem}[Boyland] Let f be a homeomorphism of the annulus isotopic to the identity. If f has a periodic orbit with non-zero rotation number $\frac{p}{q}$, then f also has a topologically monotone periodic orbit with the same rotation number. \end{theorem}
Essentially, we follow the same strategy on the torus and the proof of the main theorem in Section 2 is broken into two parts, reducible $ \vartriangleright $ finite order and $pA$ $\vartriangleright $ finite order.
We will also need the following result which can be obtained from the arguments in \cite{Hall&Boyl} and is also proved in \cite{thesis}.
\begin{theorem} Let f be a homeomorphism of the torus that is isotopic to the identity and has a pA periodic orbit. Let g be the pA representative of the isotopy class relative to this orbit and let G be its lift to the plane that fixes the lifts of all the points in the pA orbit. Then G has a dense orbit. \end{theorem}
We will use this theorem in the next section to prove $pA$ $\vartriangleright $ trivial.
\section{Finite order periodic orbits on the torus}
In this section we prove the main theorem by showing that finite order periodic orbits are on the bottom in the partial ordering of periodic orbits for torus homeomorphism isotopic to the identity, that is, the reducible $ \vartriangleright $ finite order and $pA\vartriangleright $ finite order. We restrict our attention to periodic orbits with the same non-zero rotation vector, say $(\frac{p}{q},\frac{r}{q})$, and assume that there are no common factors between $p$, $r$, and $q$. Later we reduce the general case, in which there may be a common factor between $p$, $r$, and $q$, to the case of no factors. \begin{theorem} Let $f:T^{2}\rightarrow T^{2}$ be a homeomorphism isotopic to the identity. Suppose there exists a $(p,r,q)$ orbit such that $gcd(p,r,q)=1$, then there exists a finite order $(p,r,q)$ orbit. \end{theorem} \begin{proof} If the given periodic orbit is already of finite order type, then we're done. If not, the proof breaks down in to two cases---the periodic orbit is either of reducible type or it is of $pA$ type. These are handled separately below.
\textbf{Reducible Case}.
We assume that we obtain a reducible isotopy class, keeping the periodic orbit fixed. A reducing curve can be of two types---essential or non-essential (this is with respect to the unpunctured torus). A reducing curve cannot be non-essential for this would imply that there is a common factor between $p,r,q$. In fact, the common factor would be exactly the number of punctures contained in the disc bounded by the non-essential curve.
So any reducing curve must be essential. All the essential reducing curves are disjoint, and thus, split the torus into parallel annuli. These annuli have an equal number of punctures, say $n$ punctures, in their interiors and are permuted by the action of the map $g$, which represents this reducible isotopy class.
In this case, we will show that we can obtain a finite order isotopy class, keeping a periodic orbit with the same period and rotation vector (it may not be the same periodic orbit that we start with). Let $ A_{0},A_{1},A_{2},...,A_{m-1}$ be the annuli that $g$ permutes, numbered so that $g(A_{k})=A_{k+1}$ mod $m$ and $nm=q$.
\begin{figure}
\caption{The Reducible Case}
\end{figure}
CASE 1.
Suppose that the maps $g^{m}:A_{k}\rightarrow A_{k}$ relative to the punctures are all of finite order. It is easy to see that all these maps are conjugate to each other, and so if one is of finite order, then all are of finite order. Since all finite order maps on the annulus are conjugate to rotations, it follows that $g^{mn}$ (or $g^{q}$) is the identity in each annulus.
We will now argue that this implies that $g^{q}$ is isotopic to the identity on the entire torus relative to the $(p,r,q)$ orbit. The complements of the interiors of annuli containing the punctures (the $A_{k}$'s) are closed annuli that do not contain any punctures. Observe that $g^{q}$ fixes the boundary components of these unpunctured annuli since it fixes the boundary components of the $A_{k}$'s. So if $g^{q}$ is isotopic to the identity in all these unpunctured annuli relative to their boundaries (keeping the boundaries fixed throughout the isotopy) then $g^{q}$ is isotopic to the identity on the entire torus relative to the $(p,r,q)$ orbit, because we already know that $g^{q}$ is the identity on the annuli containing the punctures. Now suppose $g^{q}$ is not isotopic to the identity in one of these unpunctured annuli relative to its boundary components, then $g^{q}$ must be isotopic to some non-trivial Dehn twists. It is easy to see that the maps on all these annuli are all conjugate to each other so $g^{q}$ is isotopic to the same Dehn twists in each unpunctured annulus. However, $g^{q}$ is isotopic to identity on the entire torus when the punctures are allowed to move, because $g$ is isotopic to $f$ and $f$ is isotopic to the identity by assumption. If we have non-trivial Dehn twists that don't cancel each other out (because they are identical), $g^{q}$ is not isotopic to identity, which is a contradiction. It follows that $g^{q}$ is isotopic to the identity on the entire torus relative to the $(p,r,q)$ orbit.
To show that $(p,r,q)$ orbit is topologically monotone, we require a map isotopic to $g$ relative to the orbit that is of finite order. Such a map is guaranteed by Fenchel's solution to the Nielsen Realization problem for finite solvable groups (see Chapter 3 in \cite{Zieschang}) which provides a map $h$ isotopic to $g$, relative to the $ (p,r,q)$ orbit, such that $h^{q}$ is the identity. This shows that the $ (p,r,q)$ orbit is of finite order type, that is, it is topologically monotone.
CASE 2.
Suppose that the maps $g^{m}:A_{k}\rightarrow A_{k}$ relative to the punctures are all $pA$. It now follows from Boyland's proof of Theorem 1.2 in \cite{Boyland} that there exists a finite order periodic orbit with the same period and rotation number in each $A_{k}$. Since all these annulus maps are conjugate, the periodic orbits connect in the torus to give a periodic orbit with the same rotation vector and period as the originally punctured orbit. This reduces to Case 1 and we can find an isotopy relative to the new orbit such that the isotoped map is of finite order. Because the periodic orbits in the $pA$ components are unremovable (see \cite{Boyl:stability}), this periodic orbit existed in the original map $f$.
We have actually established a stronger result. If we do obtain a finite order periodic orbit which is distinct from the one we began with, then it is strictly below the original orbit in the partial order. This is because the only way for the original orbit to not be of a finite order type is for the reducible components to be $pA$, that is, the maps $g^{m}:A_{k}\rightarrow A_{k}$ are $pA$. And $pA$ orbits are strictly above all periodic orbits that are present in the $pA$ representative of the isotopy class (see \cite{Boyland}).
\textbf{\textit{pA} Case}.
In Lemma 2.3, we will prove that any $pA$ $(p,r,q)$ orbit forces another $(p,r,q)$ orbit. Boyland proved that this other periodic orbit is strictly below the $pA$ orbit in the partial order (see \cite{Boyland}). Furthermore, there are only finitely many periodic orbits of any given period in any $pA$ map. So consider a minimal $(p,r,q)$ orbit in the partial order. If it's $pA$, there is another $(p,r,q)$ orbit below it---so it's not minimal. If it's reducible and not of finite order, then it forces another $(p,r,q)$ finite order orbit (by the argument above for the reducible case). Thus, any minimal $(p,r,q)$ orbit must be of finite order and there is at least one minimal orbit. \end{proof}
It now remains to prove Lemma 2.3. We shall appeal to the following result in \cite{Fathi} for the existence of a fixed point of positive index. The proof is based on the ideas used to demonstrate the Brouwer Plane Translation Theorem (see \cite{Franks}).
\begin{theorem}[Fathi] Let $G:R^{2}\rightarrow R^{2}$\ be an orientation preserving homeomorphism which possesses a non-wandering point, then G has a fixed point. If G has only isolated fixed points, then it has a fixed point of positive index. \end{theorem}
\begin{lemma} Let $g:T^{2}\rightarrow T^{2}$\ be the $\mathit{pA}$\ representative obtained from $\mathit{f}$ keeping the $\mathit{(p,r,q)}$ orbit fixed throughout the isotopy. Then $\mathit{g}$\ has another $\mathit{ (p,r,q)}$\ periodic orbit. \end{lemma} \begin{proof} Let $g$ be the $pA$ representative obtained relative to the $(p,r,q)$ orbit. Let $G$ be the lift to the plane that realizes the rotation vector $(\frac{p}{q},\frac{r}{q})$ for the $pA$ orbit and then consider $X^{-p}Y^{-r}G^{q}$; call this map $H$. Then by Theorem 1.3, we know that $H$ has a dense orbit and so there is a non-wandering point. Since the periodic points (of any given period) are isolated in a $pA$ map (see \cite {FLP}), the fixed points of $H$ on the plane are also isolated. So, by Theorem 2.2, we have a fixed point with positive index for $H$, which is a $(p,r,q)$ orbit for $g$, and each point in the orbit is fixed with positive index for $g^{q}$.
The $pA$ $(p,r,q)$ orbit is the location of all the one-prongs or needles and these needles have index zero. Since there is a fixed point for $g^{q}$ with positive index and because the indices have to add up to zero, we have actually shown that the $pA$ periodic orbit forces at least two other $(p,r,q)$ periodic orbits. \end{proof}
\begin{proof}[Proof of the Main Theorem] Let $f$ be a homeomorphism of the torus isotopic to the identity and suppose there exists a periodic orbit with rotation vector $(\frac{p}{q},\frac{r}{q})$. Also assume that this is a $(pt,rt,qt)$ periodic orbit, where $t$ is a positive integer and there are no common factors between $p$, $r$, and $q$. Let $F$ be the lift to the plane and consider $X^{-p}Y^{-r}F^{q}$. We obtain a periodic point of period $t$ for $X^{-p}Y^{-r}F^{q}$. Then by Theorem 2.2, we also obtain a fixed point for $X^{-p}Y^{-r}F^{q}$, which has period $q$ for $f$ and rotation vector $(\frac{p}{q},\frac{r}{q})$, that is, it is a $(p,r,q)$ orbit. Thus, without loss of generality, we may assume that we have a $(p,r,q)$ orbit where there are no common factors between $p$, $r$, and $q$. Now, by Theorem 2.1, we obtain a $(p,r,q)$ orbit that is topologically monotone and it has the desired rotation vector $(\frac{p}{q},\frac{r}{q})$. \end{proof}
It is natural to ask if there is a similar theorem for non-periodic orbits with irrational rotation vectors. This question is unanswered even for the annulus. A similar theorem about the existence of monotone periodic orbits has been proved for periodic orbits on surfaces of higher genus (see \cite {Parwani}).
\section*{Acknowledgments}
The author would like to thank John Franks and Philip Boyland for several useful and stimulating conversations.
\end{document}
|
arXiv
|
{
"id": "0504279.tex",
"language_detection_score": 0.8602454662322998,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\newdateformat{mydate}{\THEDAY~\monthname~\THEYEAR}
\title
[Vanishing viscosity: observations]
{Observations on the vanishing viscosity limit}
\author{James P. Kelliher}
\address{Department of Mathematics, University of California, Riverside, 900 University Ave., Riverside, CA 92521}
\curraddr{Department of Mathematics, University of California, Riverside, 900 University Ave., Riverside, CA 92521} \email{[email protected]}
\subjclass[2010]{Primary 76D05, 76B99, 76D10}
\keywords{Vanishing viscosity, boundary layer theory}
\begin{abstract} Whether, in the presence of a boundary, solutions of the Navier-Stokes equations converge to a solution to the Euler equations in the vanishing viscosity limit is unknown. In a seminal 1983 paper, Tosio Kato showed that the vanishing viscosity limit is equivalent to having sufficient control of the gradient of the Navier-Stokes velocity in a boundary layer of width proportional to the viscosity. In a 2008 paper, the present author showed that the vanishing viscosity limit is equivalent to the formation of a vortex sheet on the boundary. We present here several observations that follow on from these two papers. \Ignore{ We make several observations regarding the vanishing viscosity limit, primarily regarding the control of the total mass of vorticity and the conditions in Tosio Kato's seminal 1983 paper \cite{Kato1983} shown by him to be equivalent to the vanishing viscosity limit.
} \end{abstract}
\date{(compiled on {\dayofweekname{\day}{\month}{\year} \mydate\today)}}
\maketitle
\begin{small}
\begin{flushright}
Compiled on \textit{\textbf{\dayofweekname{\day}{\month}{\year} \mydate\today}}
\end{flushright} \end{small}
\renewcommand\contentsname{} \begin{small}
\tableofcontents \end{small}
\noindent The Navier-Stokes equations for a viscous incompressible fluid in a domain $\Omega \subseteq \ensuremath{\BB{R}}^d$, $d \ge 2$, with no-slip boundary conditions can be written, \begin{align*}
(NS)
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t u + u \cdot \ensuremath{\nabla} u + \ensuremath{\nabla} p = \nu \Delta u + f
&\text{ in } \Omega, \\
\dv u = 0
&\text{ in } \Omega, \\
u = 0
&\text{ on } \Gamma := \ensuremath{\partial} \Omega.
\end{array}
\right. \end{align*} The Euler equations modeling inviscid incompressible flow on such a domain with no-penetration boundary conditions can be written, \begin{align*}
(EE)
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \overline{u} + \overline{u} \cdot \ensuremath{\nabla} \overline{u} + \ensuremath{\nabla} p = \nu \Delta \overline{u} + \overline{f}
&\text{ in } \Omega, \\
\dv \overline{u} = 0
&\text{ in } \Omega, \\
\overline{u} \cdot \bm{n} = 0
&\text{ on } \Gamma.
\end{array}
\right. \end{align*} Here, $u = u_\nu$ and $\overline{u}$ are velocity fields, while $p$ and $\overline{p}$ are pressure (scalar) fields. The external forces, $f$, $\overline{f}$, are vector fields. (We adopt here the notation of Kato in \cite{Kato1983}.)
We assume throughout that $\Omega$ is bounded and $\Gamma$ has $C^2$ regularity, and write $\bm{n}$ for the outward unit normal vector.
The limit, \begin{align*}
(VV) \qquad
u \to \overline{u} \text{ in } L^\ensuremath{\infty}(0, T; L^2(\Omega)), \end{align*} we refer to as the \textit{classical vanishing viscosity limit}. Whether it holds in general, or fails in any one instance, is a major open problem in mathematical fluids mechanics.
In \cite{K2006Kato, K2008VVV} a number of conditions on the solution $u$ were shown to be equivalent to ($VV$). The focus in \cite{K2006Kato} was on the size of the vorticity or velocity in a layer near the boundary, while the focus in \cite{K2008VVV} was on the accumulation of vorticity on the boundary. The work we present here is in many ways a follow-on to \cite{K2006Kato, K2008VVV}, each of which, especially \cite{K2006Kato}, was itself an outgrowth of Tosio Kato's seminal paper \cite{Kato1983} on the vanishing viscosity limit, ($VV$).
This paper is divided into two themes. The first theme concerns the accumulation of vorticity---on the boundary, in a boundary layer, or in the bulk of the fluid. It explores the consequences of having control of the total mass of vorticity or, more strongly, the $L^1$-norm of the vorticity for solutions to ($NS$).
We re-express in a specifically 3D form the condition for vorticity accumulation on the boundary from \cite{K2008VVV} in \cref{S:3DVersion}. In \cref{S:LpNormsBlowUp}, we show that if ($VV$) holds then the $L^p$ norms of the vorticity for solutions to ($NS$) must blow up for all $p > 1$ as $\nu \to 0$ except in very special circumstances. This leaves only the possibility of control of the vorticity's $L^1$ norm. Assuming such control, we show in \cref{S:ImprovedConvergence} that when ($VV$) holds we can characterize the accumulation of vorticity on the boundary more strongly than in \cite{K2008VVV}.
In \cref{S:BoundaryLayerWidth}, we show that if we measure the width of the boundary layer by the size of the $L^1$-norm of the vorticity then the layer has to be wider than that of Kato if ($VV$) holds. We push this analysis further in \cref{S:OptimalConvergenceRate} to obtain the theoretically optimal convergence rate when the initial vorticity has nonzero total mass, as is generic for non-compatible initial data. We turn a related observation into a conjecture concerning the connection between the vanishing viscosity limit and the applicability of the Prandtl theory.
In \cref{S:SomeConvergence}, we show that the arguments in \cite{K2008VVV} lead to the conclusion that some kind of convergence of a subsequence of the solutions to ($NS$) always occurs in the limit as $\nu \to 0$, but not necessarily to a solution to the Euler equations.
The second theme more directly addresses Tosio Kato's conditions from \cite{Kato1983} that are equivalent to ($VV$). We also deal with the closely related condition from \cite{K2006Kato} that uses vorticity in place of the gradient of the velocity that appears in one of Kato's conditions.
We derive in \cref{S:EquivCondition} a condition on the solution to ($NS$) on the boundary that is equivalent in 2D to ($VV$), giving a number of examples to which this condition applies in \cref{S:Examples}.
In \cref{S:BardosTiti} we discuss some interesting recent results of Bardos and Titi that they developed using dissipative solutions to the Euler Equations. We show how weaker, though still useful, 2D versions of these results can be obtained using direct elementary methods.
We start, however, in \cref{S:Background} with the notation and definitions we will need, and a summary of the pertinent results of \cite{K2006Kato, K2008VVV, Kato1983}.
\section{Definitions and past results}\label{S:Background}
\noindent We define the classical function spaces of incompressible fluids, \begin{align*}
H &= \set{u \in (L^2(\Omega))^d: \dv u = 0 \text{ in } \Omega, \,
u \cdot \mathbf{n} = 0 \text{ on } \Gamma} \end{align*} with the $L^2$-norm and \begin{align*}
V &= \set{u \in (H_0^1(\Omega))^d: \dv u = 0 \text{ in } \Omega} \end{align*} with the $H^1$-norm. We denote the $L^2$ or $H$ inner product by $(\cdot, \cdot)$. If $v$, $w$ are vector fields then $(v, w) = (v^i, w^i)$, where we use here and below the common convention of summing over repeated indices. Similarly, if $M$, $N$ are matrices of the same dimensions then $M \cdot N = M^{ij} N^{ij}$ and \begin{align*}
(M, N)
= (M^{ij}, N^{ij})
= \int_\Omega M \cdot N. \end{align*}
We will assume that $u$ and $\overline{u}$ satisfy the same initial conditions, \begin{align*}
u(0) = u_0, \quad \overline{u}(0) = u_0, \end{align*} and that $u_0$ is in $C^{k + \ensuremath{\epsilon}}(\Omega) \cap H$, $\ensuremath{\epsilon} > 0$, where $k = 1$ for two dimensions and $k = 2$ for 3 and higher dimensions, and that $f = \overline{f} \in C^1_{loc}(\ensuremath{\BB{R}}; C^1(\Omega))$. Then as shown in \cite{Koch2002} (Theorem 1 and the remarks on p. 508-509), there is some $T > 0$ for which there exists a unique solution, \begin{align}\label{e:ubarSmoothness}
\overline{u}
\text{ in } C^1([0, T]; C^{k + \ensuremath{\epsilon}}(\Omega)), \end{align} to ($EE$). In two dimensions, $T$ can be arbitrarily large, though it is only known that some positive $T$ exists in three and higher dimensions.
With such initial velocities, we are assured that there are weak solutions to $(NS)$, unique in 2D. Uniqueness of these weak solutions is not known in three and higher dimensions, so by $u = u_\nu$ we mean any of these solutions chosen arbitrarily. We never employ strong or classical solutions to $(NS)$.
\Ignore{ It follows, assuming that $f$ is in $L^1([0, T]; L^2(\Omega))$, that for such solutions, \begin{align}\label{e:NSVariationalIdentity}
\begin{split}
&(u(t), \phi(t)) - (u(0), \phi(0)) \\
&\qquad= \int_0^t \brac{(u, u \cdot \ensuremath{\nabla} \phi)
- \nu (\ensuremath{\nabla} u, \ensuremath{\nabla}
\phi) + (f, \phi) + (u, \ensuremath{\partial}_t \phi))} \, dt
\end{split} \end{align} for all $\phi$ in $C^1([0, T] \times \Omega) \cap C^1([0, T]; V)$. }
We define $\gamma_\mathbf{n}$ to be the boundary trace operator for the normal component of a vector field in $H$ and write \begin{align}\label{e:RadonMeasures}
\Cal{M}(\overline{\Omega}) \text{ for the space of Radon measures on } \overline{\Omega}. \end{align} That is, $\Cal{M}(\overline{\Omega})$ is the dual space of $C(\overline{\Omega})$. We let $\mu$ in $\Cal{M}(\overline{\Omega})$ be the measure supported on $\Gamma$ for which $\mu\vert_\Gamma$ corresponds to Lebesgue measure on $\Gamma$ (arc length for $d = 2$, area for $d = 3$). Then $\mu$ is also a member of $H^1(\Omega)^*$, the dual space of $H^1(\Omega)$.
We define the vorticity $\omega(u)$ to be the $d \times d$ antisymmetric matrix, \begin{align}\label{e:VorticityRd}
\omega(u) = \frac{1}{2}\brac{\ensuremath{\nabla} u - (\ensuremath{\nabla} u)^T}, \end{align} where $\ensuremath{\nabla} u$ is the Jacobian matrix for $u$: $(\ensuremath{\nabla} u)^{ij} = \ensuremath{\partial}_j u^i$. When working specifically in two dimensions, we alternately define the vorticity as the scalar curl of $u$: \begin{align}\label{e:VorticityR2}
\omega(u) = \ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1. \end{align}
Letting $\omega = \omega(u)$ and $\overline{\omega} = \omega(\overline{u})$, we define the following conditions:
\begingroup \allowdisplaybreaks \begin{align*}
(A) & \qquad u \to \overline{u} \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A') & \qquad u \to \overline{u} \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B) & \qquad u \to \overline{u} \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} \overline{u} - \innp{\gamma_\mathbf{n} \cdot, \overline{u} \mu}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} \overline{u} \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E) & \qquad \omega \to \overline{\omega}
- \frac{1}{2} \innp{\gamma_\mathbf{n} (\cdot - \cdot^T),
\overline{u} \mu}
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(F) & \qquad \omega \to \overline{\omega}
\text{ in }
(H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T].
\end{align*} \endgroup We stress that $(H^1(\Omega))^*$ is the dual space of $H^1(\Omega)$, in contrast to $H^{-1}(\Omega)$, which is the dual space of $H^1_0(\Omega)$.
The condition in $(B)$ is the classical vanishing viscosity limit of ($VV$).
We will make the most use of condition $(E)$, which more explicitly means that \begin{align}\label{e:EExplicit}
(\omega(t), M)
\to (\overline{\omega}(t), M) - \frac{1}{2}\int_{\Gamma}
((M - M^T) \cdot \mathbf{n}) \cdot \overline{u}(t)
\text{ in } L^\ensuremath{\infty}([0, T]) \end{align} for any $M$ in $(H^1(\Omega))^{d \times d}$.
In two dimensions, defining the vorticity as in \refE{VorticityR2}, we also define the following two conditions: \begin{align*}
(E_2) & \qquad \omega \to \overline{\omega} - (\overline{u} \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_2) & \qquad \omega \to \overline{\omega} \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T]. \end{align*} Here, $\BoldTau$ is the unit tangent vector on $\Gamma$ that is obtained by rotating the outward unit normal vector $\mathbf{n}$ counterclockwise by $90$ degrees.
\Ignore{ Condition ($E_2$) means that \begin{align*}
(\omega(t), f)
\to (\overline{\omega}(t), f) - \int_{\Gamma} (\overline{u}(t) \cdot \BoldTau) f
\text{ in } L^\ensuremath{\infty}([0, T]) \end{align*} for any $f$ in $H^1(\Omega)$. }
\refT{VVEquiv} is proved in \cite{K2008VVV} ($(A) \implies (B)$ having been proved in \cite{Kato1983}), to which we refer the reader for more details.
\begin{theorem}[\cite{K2008VVV}]\label{T:VVEquiv}
Conditions ($A$), ($A'$), ($B$), ($C$), ($D$), and ($E$) are equivalent
(and each implies condition ($F$)).
In two dimensions, condition ($E_2$) and, when $\Omega$ is simply connected, ($F_2$)
are equivalent to the other conditions.\footnote{The restriction that $\Omega$ be
simply connected for the equivalence of ($F_2$) was not, but should
have been, in the published version of \cite{K2008VVV}.} \end{theorem}
\cref{T:VVEquiv} remains silent about rates of convergence, but examining the proof of it in \cite{K2008VVV} easily yields the following: \begin{theorem}\label{T:ROC}
Assume that ($VV$) holds with
\begin{align*}
\norm{u - \overline{u}}_{L^\ensuremath{\infty}(0, T; L^2(\Omega))}
\le F(\nu)
\end{align*}
for some fixed $T > 0$. Then
\begin{align*}
\norm{(u(t) - \overline{u}(t), v)}_{L^\ensuremath{\infty}([0, T])}
\le F(\nu) \norm{v}_{L^2(\Omega)}
\text{ for all } v \in (L^2(\Omega))^d
\end{align*}
and
\begin{align*}
\norm{(\omega(t) - \overline{\omega}(t), \varphi)}_{L^\ensuremath{\infty}([0, T])}
\le F(\nu) \norm{\ensuremath{\nabla} \varphi}_{L^2}
\text{ for all } \varphi \in H_0^1(\Omega).
\end{align*} \end{theorem}
\begin{remark}\label{R:ROCOthers}
\cref{T:ROC} gives the rates of convergence for ($A$) and ($F_2$);
the rates for ($C$), ($D$), ($E$), and ($E_2$) are like those given for ($F_2$)
(though the test function, $\varphi$, will lie in different spaces). \end{remark}
In \cite{Kato1983}, Tosio Kato showed that ($VV$) is equivalent to \begin{align*}
\nu \int_0^T \norm{\ensuremath{\nabla} u(s)}_{L^2(\Omega)}^2 \, dt \to 0
\text{ as } \nu \to 0 \end{align*} and to \begin{align}\label{e:KatoCondition}
\nu \int_0^T \norm{\ensuremath{\nabla} u(s)}_{L^2(\Gamma_{c \nu})}^2 \, dt \to 0
\text{ as } \nu \to 0. \end{align} Here, and in what follows, $\Gamma_\delta$ is a boundary layer in $\Omega$ of width $\delta > 0$.
In \cite{K2006Kato} it is shown that in \cref{e:KatoCondition}, the gradient can be replaced by the vorticity, so ($VV$) is equivalent to \begin{align}\label{e:KellCondition}
\nu \int_0^T \norm{\omega(s)}_{L^2(\Gamma_{c \nu})}^2 \, dt \to 0
\text{ as } \nu \to 0. \end{align} Note that the necessity of \cref{e:KellCondition} follows immediately from \cref{e:KatoCondition}, but the sufficiency does not, since on the inner boundary of $\Gamma_{c \nu}$ there is no boundary condition of any kind.
We also mention the works \cite{TW1998, W2001}, which together establish conditions equivalent to \refE{KatoCondition}, with a boundary layer slightly larger than that of Kato, yet only involving the tangential derivatives of either the normal or tangential components of $u$ rather than the full gradient. These conditions will not be used in the present work, however.
\Ignore{ The setup and notation are that of \cite{K2008VVV, K2006Kato}, and is largely inherited from \cite{Kato1983}: Weak solutions to the Navier-Stokes equations in a bounded domain, $\Omega$, having $C^2$-boundary, $\Gamma$, are denoted by $u$, the viscosity, $\nu > 0$, being implied by context. Weak (or often strong) solutions to the Euler equations are denoted by $\overline{u}$. Except in \refS{NavierBCs}, we use homogeneous Dirichlet conditions ($u = 0$) for the Navier-Stokes equations and we in any case always use no-penetration conditions ($u \cdot \bm{n} = 0$) for the Euler equations. Here, $\bm{n}$ is the outward normal to the boundary. We use $\omega = \omega(u)$ to be the curl of $u$, defined to be $\ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1$ in $2D$ and the antisymmetric part of $\ensuremath{\nabla} u$ in higher dimensions. Similarly for $\overline{\omega} = \omega(\overline{u})$.
We denote the $L^2$-inner product by $(\cdot, \cdot)$, and write $V$ for the space of all divergence-free vector fields in $H_0^1(\Omega)$. We will also use the related function space $H$ of divergence-free vector fields $v$ in $L^2(\Omega)$ with $v \cdot \mathbf{n} = 0$ on $\Gamma$ in the sense of a trace.
See \cite{K2008VVV, K2006Kato} for more details.
}
\Part{Theme I: Accumulation of vorticity}
\section{A 3D version of vorticity accumulation on the boundary}\label{S:3DVersion}
\noindent In \cref{T:VVEquiv}, the vorticity is defined to be the antisymmetric gradient, as in \cref{e:VorticityRd}. When working in 3D, it is usually more convenient to use the language of three-vectors in condition ($E$). This leads us to the condition $(E')$ in \cref{P:EquivE}.
\begin{prop}\label{P:EquivE}
The condition (E) in \cref{T:VVEquiv} is equivalent to
\begin{align*}
(E') \qquad \curl u \to \curl \overline{u} + (\overline{u} \times \bm{n}) \mu
\text{ in } L^\ensuremath{\infty}((0, T; (H^1(\Omega)^3)^*).
\end{align*} \end{prop} \begin{proof} If $A$ is an antisymmetric $3 \times 3$ matrix then \begin{align*}
A \cdot M
&= \frac{A \cdot M + A \cdot M}{2}
= \frac{A \cdot M + A^T \cdot M^T}{2}
= \frac{A \cdot M - A \cdot M^T}{2} \\
&= A \cdot \frac{M - M^T}{2}. \end{align*} Thus, since $\omega$ and $\overline{\omega}$ are antisymmetric, referring to \refE{EExplicit}, we see that ($E$) is equivalent to \begin{align*}
(\omega(t), M) \to (\overline{\omega}(t), M)
- \int_\Gamma (M \bm{n}) \cdot \overline{u}(t)
\text{ in } L^\ensuremath{\infty}([0, T]) \end{align*} for all \textit{antisymmetric} matrices $M \in (H^1(\Omega))^{3 \times 3}$.
Now, for any three vector $\varphi$ define \begin{align*}
F(\varphi)
&= \tmatrix{0 & -\varphi_3 & \varphi_2}
{\varphi_3 & 0 & -\varphi_1}
{-\varphi_2 & \varphi_1 & 0}. \end{align*} Then $F$ is a bijection from the vector space of three-vectors to the space of antisymmetric $3 \times 3$ matrices. Straightforward calculations show that \begin{align*}
F(\varphi) \cdot F(\psi)
= 2 \varphi \cdot \psi, \qquad
F(\varphi) v
= \varphi \times v \end{align*} for any three-vectors, $\varphi$, $\psi$, $v$. Also, $F(\curl u) = 2 \omega$ and $F(\curl \overline{u}) = 2 \overline{\omega}$.
For any $\varphi \in (H^1(\Omega))^3$ let $M = F(\varphi)$. Then \begin{align*}
(\omega, M)
&= \frac{1}{2} \pr{F(\curl u), F(\varphi)}
= \pr{\curl u, \varphi}, \\
(\overline{\omega}, M)
&= \frac{1}{2} \pr{F(\curl \overline{u}), F(\varphi)}
= \pr{\curl \overline{u}, \varphi}, \\
(M \bm{n}) \cdot \overline{u}
&= (F(\varphi) \bm{n}) \cdot \overline{u}
= (\varphi \times \bm{n}) \cdot \overline{u}
= - (\overline{u} \times \bm{n}) \cdot \varphi. \end{align*} In the last equality, we used the scalar triple product identity $(a \times b) \cdot c = - a \cdot (c \times b)$. Because $F$ is a bijection, this gives the equivalence of ($E$) and ($E'$). \end{proof}
\section{\texorpdfstring{$L^p$}{Lp}-norms of the vorticity blow up for \texorpdfstring{$p > 1$}{p > 1}}\label{S:LpNormsBlowUp}
\noindent
\begin{theorem}\label{T:VorticityNotBounded} Assume that $\overline{u}$ is not identically zero on $[0, T] \times \Gamma$. If any of the equivalent conditions of \cref{T:VVEquiv} holds then for all $p \in (1, \ensuremath{\infty}]$, \begin{align}\label{e:omegaBlowup}
\limsup_{\nu \to 0^+} \norm{\omega}_{L^\ensuremath{\infty}([0, T]; L^p)}
\to \ensuremath{\infty}. \end{align} \end{theorem} \begin{proof} We prove the contrapositive. Assume that the conclusion is not true. Then for some $q' \in (1, \ensuremath{\infty}]$ it must be that for some $C_0 > 0$ and $\nu_0 > 0$, \begin{align}\label{e:omegaBoundedCondition}
\norm{\omega}_{L^\ensuremath{\infty}([0, T]; L^{q'})} \le C_0
\text{ for all } 0 < \nu \le \nu_0. \end{align} Since $\Omega$ is a bounded domain, if \cref{e:omegaBoundedCondition} holds for some $q' \in (1, \ensuremath{\infty}]$ it holds for all lower values of $q'$ in $(1, \ensuremath{\infty}]$, so we can assume without loss of generality that $q' \in (1, \ensuremath{\infty})$.
Let $q = q'/(q' - 1) \in (1, \ensuremath{\infty})$ be \Holder conjugate to $q$ and $p = 2/q + 1 \in (1, 3)$. Then $p, q, q'$ satisfy the conditions of \cref{C:TraceCor} with $(p -1) q = 2$.
Applying \cref{C:TraceCor} gives, for almost all $t \in [0, T]$,
\begingroup \allowdisplaybreaks \begin{align*}
&\norm{u(t) - \overline{u}(t)}_{L^p(\Gamma)}
\le C \norm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\norm{\ensuremath{\nabla} u(t) - \ensuremath{\nabla} \overline{u}(t)}_{L^{q'}(\Omega)}
^{\frac{1}{p}} \\
&\qquad
\le C \norm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\pr{\norm{\ensuremath{\nabla} u(t)}_{L^{q'}}
+ \norm{\ensuremath{\nabla} \overline{u}(t)}_{L^{q'}}}
^{\frac{1}{p}} \\
&\qquad
\le C \norm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\pr{C(q') \norm{\omega(t)}_{L^{q'}}
+ \norm{\ensuremath{\nabla} \overline{u}(t)}_{L^{q'}}}
^{\frac{1}{p}} \\
&\qquad
\le C \norm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}} \end{align*} \endgroup for all $0 < \nu \le \nu_0$. Here we used \cref{e:omegaBoundedCondition} and the inequality, $\norm{\ensuremath{\nabla} u}_{L^{q'}(\Omega)} \le C(q') \norm{\omega}_{L^{q'}(\Omega)}$ for all $q' \in (1, \ensuremath{\infty})$ of Yudovich \cite{Y1963}. Hence, \begin{align*}
\norm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
\le C \norm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))}
^{1 - \frac{1}{p}}
\to 0 \end{align*} as $\nu \to 0$. But, \begin{align*}
\norm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
= \norm{\overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
\ne 0, \end{align*} so condition (B) cannot hold and so neither can any of the equivalent conditions in \cref{T:VVEquiv}. \end{proof}
\section{Improved convergence when vorticity bounded in \texorpdfstring{$L^1$}{L1}}\label{S:ImprovedConvergence}
\noindent In \cref{S:LpNormsBlowUp} we showed that if the classical vanishing viscosity limit holds then the $L^p$ norms of $\omega$ must blow up as $\nu \to 0$ for all $p \in (1, \ensuremath{\infty}]$---unless the Eulerian velocity vanishes identically on the boundary. This leaves open the possibility that the $L^1$ norm of $\omega$ could remain bounded, however, and still have the classical vanishing viscosity limit. This happens, for instance, for radially symmetric vorticity in a disk (Examples 1a and 3 in \cref{S:Examples}), as shown in \cite{FLMT2008}.
In fact, as we show in \cref{C:EquivConvMeasure}, when ($VV$) holds and the $L^1$ norm of $\omega$ remains bounded in $\nu$, the convergence in condition ($E$) is stronger; namely, $weak^*$ in measure (as in \cite{FLMT2008}). (See \cref{e:RadonMeasures} and the comments after it for the definitions of $\Cal{M}(\overline{\Omega})$ and $\mu$.)
\begin{cor}\label{C:EquivConvMeasure}
Suppose that $u \to \overline{u} \text{ in } L^\ensuremath{\infty}(0, T; H)$ and
$\curl u$ is bounded in $L^\ensuremath{\infty}(0, T; L^1(\Omega))$ uniformly in $\ensuremath{\epsilon}$.
Then in 3D,
\begin{align}\label{e:BetterConvergence}
\curl u \to \curl \overline{u} + (u_0 \times \bm{n}) \mu
\quad \weak^* \text{ in } L^\ensuremath{\infty}(0, T; \Cal{M}(\overline{\Omega})).
\end{align}
Similarly, ($C$), ($E$), and ($E_2$) hold with $\weak^*$ convergences
in $L^\ensuremath{\infty}(0, T; \Cal{M}(\overline{\Omega}))$ rather than uniformly in
$(H^1(\Omega))^*$. \end{cor} \begin{proof}
We prove \cref{e:BetterConvergence} explicitly for 3D solutions,
the results for ($C$), ($E$), and ($E_2$) following in the same way.
Let $\psi \in C(\overline{\Omega})$. What we must show is that
\begin{align*}
(\curl u(t) - \curl \overline{u}(t), \psi)
\to \int_\Gamma (u_0(t) \times \bm{n}) \cdot \psi
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
So let $\ensuremath{\epsilon} > 0$ and choose $\varphi \in H^1(\Omega)^d$ with
$\norm{\psi - \varphi}_{C(\overline{\Omega})} < \ensuremath{\epsilon}$. We can always find
such a $\varphi$ because $H^1(\Omega)$ is dense in $C(\overline{\Omega})$.
Let
\begin{align*}
M = \max \set{\norm{\curl u
- \curl \overline{u}}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))},
\norm{\overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}},
\end{align*}
which we note is finite since $\norm{\curl u}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))}$
and $\norm{\curl \overline{u}}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))}$ are both
finite. Then
\begingroup
\allowdisplaybreaks
\begin{align*}
&\abs{(\curl u(t) - \curl \overline{u}(t), \psi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \psi} \\
&\qquad
\le \abs{(\curl u(t) - \curl \overline{u}(t), \psi - \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot
(\psi - \varphi)} \\
&\qquad\qquad
+ \abs{(\curl u(t) - \curl \overline{u}(t), \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \varphi} \\
&\qquad
\le 2 M \ensuremath{\epsilon}
+ \abs{(\curl u(t) - \curl \overline{u}(t), \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \varphi}.
\end{align*}
\endgroup
By \cref{P:EquivE}, we can make the last term above smaller
than, say, $\ensuremath{\epsilon}$, by choosing $\nu$ sufficiently small, which is sufficient
to give the result. \end{proof}
\begin{remark} Suppose that we have the slightly stronger condition that $\ensuremath{\nabla} u$ is bounded in $L^\ensuremath{\infty}(0, T; L^1(\Omega))$ uniformly in $\ensuremath{\epsilon}$. If we are in 2D, $W^{1, 1}(\Omega)$ is compactly embedded in $L^2(\Omega)$. This is sufficient to conclude that ($VV$) holds, as shown in \cite{GKLMN14}. \end{remark}
\section{Width of the boundary layer}\label{S:BoundaryLayerWidth}
\noindent Working in two dimensions, make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}, and assume in addition that the total mass of the initial vorticity does not vanish; that is, \begin{align}\label{e:NonzeroMass}
m := \int_\Omega \omega_0 = (\omega_0, 1) \ne 0. \end{align} (In particular, this means that $u_0$ is not in $V$.) The total mass of the Eulerian vorticity is conserved so \begin{align}\label{e:mEAllTime}
(\overline{\omega}(t), 1) = m \text{ for all } t \in \ensuremath{\BB{R}}. \end{align} The Navier-Stokes velocity, however, is in $V$ for all positive time, so its total mass is zero; that is, \begin{align}\label{e:mNSAllTime}
(\omega(t), 1) = 0 \text{ for all } t > 0. \end{align}
Let us suppose that the vanishing viscosity limit holds. Fix $\delta > 0$ let $\varphi_\delta$ be a smooth cutoff function equal to $1$ on $\Gamma_\delta$ and equal to 0 on $\Omega \setminus \Gamma_{2 \delta}$. Then by ($F_2$) of \cref{T:VVEquiv} and using \cref{e:mEAllTime}, \begin{align*}
\abs{(\omega, 1 - \varphi_\delta) - m}
\to \abs{(\overline{\omega}, 1 - \varphi_\delta) - m}
= \abs{m - (\overline{\omega}, \varphi_\delta) - m}
\le C \delta, \end{align*} the convergence being uniform on $[0, T]$. Thus, for all sufficiently small $\nu$, \begin{align}\label{e:omega1phiLimit}
\abs{(\omega, 1 - \varphi_\delta) - m} \le C \delta. \end{align}
\Ignore { \begin{align}\label{e:E2VVV}
\omega \to \overline{\omega} - (\overline{u} \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T]. \end{align}
Fix $\delta > 0$ let $\varphi_\delta$ be a smooth cutoff function equal to $1$ on $\Gamma_\delta$ and equal to 0 on $\Omega \setminus \Gamma_{2 \delta}$. Letting $\nu \to 0$, since $\varphi_\delta = 1$ on $\Gamma$, we have \begin{align*}
(\omega, \varphi_\delta)
&\to (\overline{\omega}, \varphi_\delta)
- \int_\Gamma \overline{u} \cdot \BoldTau
= (\overline{\omega}, \varphi_\delta)
+ \int_\Gamma \overline{u}^\perp \cdot \mathbf{n} \\
&= (\overline{\omega}, \varphi_\delta)
+ \int_\Omega \dv \overline{u}^\perp
= (\overline{\omega}, \varphi_\delta)
- \int_\Omega \overline{\omega} \\
&= (\overline{\omega}, \varphi_\delta)
- \int_\Omega \overline{\omega}_0
= (\overline{\omega}, \varphi_\delta) - m. \end{align*} The convergence here is uniform over $[0, T]$.
Now,
\begin{align*}
\abs{(\overline{\omega}, \varphi_\delta)}
\le \norm{\overline{\omega}}_{L^\ensuremath{\infty}} \abs{\Gamma_{2 \delta}}
= \norm{\overline{\omega}_0}_{L^\ensuremath{\infty}} \abs{\Gamma_{2 \delta}}
\le C \delta. \end{align*} Thus, for all sufficiently small $\nu$, \begin{align}\label{e:omegaphiLimit}
\abs{(\omega, \varphi_\delta) + m} \le C \delta. \end{align}
For $t > 0$, $u$ is in $V$ so the total mass of $\omega$ is zero for all $t > 0$; that is, \begin{align*}
\int_\Omega \omega = 0. \end{align*} It follows that for all sufficiently small $\nu$, \begin{align}\label{e:omega1phiLimit}
\abs{(\omega, 1 - \varphi_\delta) - m} \le C \delta. \end{align} This reflects one of the consequences of \cref{T:VVEquiv} that \begin{align*}
\omega \to \overline{\omega} \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T], \end{align*} which represents a kind of weak internal convergence of the vorticity. }
In \cref{e:omega1phiLimit} we must hold $\delta$ fixed as we let $\nu \to 0$, for that is all we can obtain from the weak convergence in ($F_2$). Rather, this is all we can obtain without making some assumptions about the rates of convergence, a matter we will return to in the next section.
Still, it is natural to ask whether we can set $\delta = c \nu$ in \cref{e:omega1phiLimit}, this being the width of the boundary layer in Kato's seminal paper \cite{Kato1983} on the subject. If this could be shown to hold it would say that outside of Kato's layer the vorticity for solutions to ($NS$) converges in a (very) weak sense to the vorticity for the solution to ($E$). The price for such convergence, however, would be a buildup of vorticity inside the layer to satisfy the constraint in \cref{e:mNSAllTime}.
In fact, however, this is not the case, at least not by a closely related measure of vorticity buildup near the boundary. The total mass of the vorticity (in fact, its $L^1$-norm) in any layer smaller than that of Kato goes to zero and, if the vanishing visocity limit holds, then the same holds for Kato's layer. Hence, if there is a layer in which vorticity accumulates, that layer is at least as wide as Kato's and is wider than Kato's if the vanishing viscosity limit holds. This is the content of the following theorem.
\begin{theorem}\label{T:BoundaryLayerWidth} Make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}. For any positive function $\delta = \delta(\nu)$, \begin{align}\label{e:OmegaL1VanishGeneral}
\norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\nu)}))}
\le C \pr{\frac{\delta(\nu)}{\nu}}^{1/2}. \end{align} If the vanishing viscosity limit holds and \begin{align*}
\limsup_{\nu \to 0^+} \frac{\delta(\nu)}{\nu} < \ensuremath{\infty} \end{align*} then \begin{align}\label{e:OmegaL1Vanish}
\norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\nu)}))}
\to 0 \text{ as } \nu \to 0. \end{align} \end{theorem}
\begin{proof} By the Cauchy-Schwarz inequality, \begin{align*}
\norm{\omega}_{L^1(\Gamma_{\delta(\nu)})}
\le \norm{1}_{L^2(\Gamma_{\delta(\nu)})} \norm{\omega}_{L^2(\Gamma_{\delta(\nu)})}
\le C \delta^{1/2} \norm{\omega}_{L^2(\Gamma_{\delta(\nu)})} \end{align*} so \begin{align*}
\frac{C}{\delta} \norm{\omega}_{L^1(\Gamma_{\delta(\nu)})}^2
\le \norm{\omega}_{L^2(\Gamma_{\delta(\nu)})}^2 \end{align*} and \begin{align*}
\frac{C \nu}{\delta} \norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\nu)}))}^2
\le \nu \norm{\omega}_{L^2([0, T]; L^2(\Gamma_{\delta(\nu)}))}^2. \end{align*} By the basic energy inequality for the Navier-Stokes equations, the right-hand side is bounded, giving \refE{OmegaL1VanishGeneral}, and if the vanishing viscosity limit holds, the right-hand side goes to zero by \cref{e:KellCondition}, giving \refE{OmegaL1Vanish}. \end{proof}
\begin{remark} In \refT{BoundaryLayerWidth}, we do not need the assumption in \refE{NonzeroMass} nor do we need to assume that we are in dimension two. The result is of most interest, however, when one makes these two assumptions. \end{remark}
\begin{remark} \refE{OmegaL1Vanish} also follows from condition (iii'') in \cite{K2006Kato} using the Cauchy-Schwarz inequality in the manner above, but that is using a sledge hammer to prove a simple inequality. Note that \refE{OmegaL1Vanish} is necessary for the vanishing viscosity limit to hold, but is not (as far as we can show) sufficient. \end{remark}
\Ignore{
\begin{theorem}\label{T:BoundaryLayerWidth} Make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}. Assume that the vanishing viscosity limit holds. For any nonnegative function $\delta = \delta(\nu)$, \begin{align}\label{e:OmegaMassVanishGeneral}
\limsup_{\nu \to 0^+} \int_0^T \abs{\int_{\Gamma_{\delta(\nu)}} \omega}
\le C T \lim_{\nu \to 0} \frac{\delta(\nu)}{\nu}. \end{align} If \begin{align*}
\lim_{\nu \to 0} \frac{\delta(\nu)}{\nu} = 0 \end{align*} then \begin{align}\label{e:OmegaMassVanish}
\int_0^T \abs{\int_{\Gamma_{\delta(\nu)}} \omega}
\to 0 \text{ as } \nu \to 0. \end{align} \end{theorem} \begin{proof} \begin{align*}
\int_{\Gamma_\delta} \omega
= \int_{A_{L, \delta}} \omega
+ \int_{\Gamma_\delta \setminus A_{L, \delta}} \omega, \end{align*} where \begin{align*}
A_{L, \delta}= \set{x \in \Gamma_\delta \colon \abs{\omega} \ge L}. \end{align*} Thus, \begin{align*}
\int_{\Gamma_\delta} \omega
\le \int_{A_{L, \delta}} \omega
+ L \abs{\Gamma_\delta}
\le \int_{A_{L, \delta}} \omega
+ C \delta L. \end{align*}
Let $L$ vary with $\nu$ at a rate we will specify later. Then, \begin{align*}
\nu &\int_0^T \int_{\Gamma_\delta} \abs{\omega}^2
= \nu \int_0^T \int_{A_{L, \delta}} \abs{\omega}^2
+ \nu \int_0^T \int_{\Gamma_\delta \setminus A_{L, \delta}} \abs{\omega}^2 \\
&\ge \nu \int_0^T \int_{A_{L, \delta}} L \abs{\omega}
\ge L \nu \int_0^T \abs{\int_{A_{L, \delta}} \omega} \\
&\ge L \nu \brac{\int_0^T \abs{\int_{\Gamma_\delta} \omega}
- \int_0^T C \delta L}
= L \nu \int_0^T \abs{\int_{\Gamma_\delta} \omega}
- C T \nu \delta L^2. \end{align*}
Define \begin{align*}
M(\nu)
= \int_0^T \abs{\int_{\Gamma_{\delta(\nu)}} \omega}, \quad
M
= \limsup_{\nu \to 0^+} M(\nu). \end{align*} Letting $L = \nu^{-1}$, we have \begin{align*}
\limsup_{\nu \to 0^+} &\, \nu \int_0^T \int_{\Gamma_{\delta(\nu)}} \abs{\omega}^2
\ge \limsup_{\nu \to 0^+} \brac{L_k \nu M(\nu) - CT \nu \delta(\nu) L^2} \\
&= M - CT \limsup_{\nu \to 0^+} \frac{\delta(\nu)}{\nu}
= M. \end{align*} But because we have assumed that the vanishing viscosity limit holds, the left-hand side vanishes with $\nu$ regardless of how the function $\delta$ is chosen. Thus, \begin{align*}
M \le CT \limsup_{\nu \to 0^+} \frac{\delta(\nu)}{\nu}, \end{align*} giving \refE{OmegaMassVanishGeneral} and also \refE{OmegaMassVanish}. \end{proof} }
\section{Optimal convergence rate}\label{S:OptimalConvergenceRate}
\noindent Still working in two dimensions, let us return to \cref{e:omega1phiLimit}, assuming as in the previous section that the vanishing viscosity limit holds, but bringing the rate of convergence function, $F$, of \cref{T:ROC} into the analysis. We will now make $\delta = \delta(\nu) \to 0$ as $\nu \to 0$, and choose $\varphi_\delta$ slightly differently, requiring that it equal $1$ on $\Gamma_{\delta^*}$ and vanish outside of $\Gamma_\delta$ for some $0 < \delta^* = \delta^*(\nu) < \delta$. We can see from the argument that led to \cref{e:omega1phiLimit}, incorporating the convergence rate for ($F_2$) given by \cref{T:ROC}, that \begin{align*}
\abs{(\omega, 1 - \varphi_\delta) - m}
\le C \delta + \norm{\ensuremath{\nabla} \varphi_\delta}_{L^2(\Omega)} F(\nu). \end{align*} Because $\ensuremath{\partial} \Omega$ is $C^2$, we can always choose $\varphi_\delta$ so that $\abs{\ensuremath{\nabla} \varphi_\delta} \le C(\delta - \delta^*)^{-1}$. Then for all sufficiently small $\delta$, \begin{align*}
\norm{\ensuremath{\nabla} \varphi_\delta}_{L^2(\Omega)}
\le \pr{\int_{\Gamma_\delta \setminus \Gamma_{\delta^*}}
\pr{\frac{C}{\delta - \delta^*}}^2}^{\frac{1}{2}}
= C \frac{(\delta - \delta^*)^{\frac{1}{2}}}{\delta - \delta^*}
= C (\delta - \delta^*)^{-\frac{1}{2}}. \end{align*} We then have \begin{align}\label{e:mDiffEst}
\abs{(\omega, 1 - \varphi_\delta) - m}
\le C \brac{\delta + (\delta - \delta^*)^{-\frac{1}{2}} F(\nu)}. \end{align}
For any measurable subset $\Omega'$ of $\Omega$, define \begin{align*}
\mathbf{M}(\Omega') = \int_{\Omega'} \omega, \end{align*} the total mass of vorticity on $\Omega'$. Then \begin{align*}
\mathbf{M}(\Gamma_\delta^C)
= (\omega, 1 - \varphi_\delta)
+ \int_{\Gamma_\delta \setminus \Gamma_{\delta^*}} \varphi_\delta \omega \end{align*} so \begin{align}\label{e:MDiffEst}
\begin{split}
\abs{(\omega, 1 - \varphi_\delta) - \mathbf{M}(\Gamma_\delta^C)}
&\le \norm{\omega}_{L^2(\Gamma_\delta \setminus \Gamma_{\delta^*})}
\norm{\varphi_\delta}_{L^2(\Gamma_\delta \setminus \Gamma_{\delta^*})} \\
&\le C (\delta - \delta^*)^{\frac{1}{2}}
\norm{\omega}_{L^2(\Gamma_{\delta})}.
\end{split} \end{align}
\Ignore{
To obtain any reasonable control on the total mass of vorticity, we certainly need $\delta, \delta^* \to 0$ as $\nu \to 0$, but more important, as we can see from \cref{e:mDiffEst}, we need \begin{align}\label{e:LayerReq1}
(\delta - \delta^*)^{-\frac{1}{2}} F(\nu) \to 0
\text{ as } \nu \to 0. \end{align} In light of \cref{T:BoundaryLayerWidth} and its proof, we should also require at least that \begin{align}\label{e:LayerReq2}
(\delta - \delta^*)^{\frac{1}{2}}
\norm{\omega}_{L^2(0, T; L^2(\Gamma_\delta))} \to 0
\text{ as } \nu \to 0 \end{align} so that the bound in \cref{e:mDiffEst} will lead, via \cref{e:MDiffEst}, to a bound on the total mass of vorticity outside the boundary layer, $\Gamma_\delta$.
Now, as in the proof of \cref{T:BoundaryLayerWidth}, if we let $\delta - \delta^* = O(\nu)$ then the condition in \cref{e:LayerReq2} will hold by \cref{e:KellCondition}. Then the requirement in \cref{e:LayerReq1} becomes \begin{align*}
F(\nu)
= o \pr{(\delta - \delta^*)^{\frac{1}{2}}}
= o (\nu^{\frac{1}{2}}). \end{align*} }
From these observations and those in the previous section, we have the following: \begin{theorem}\label{T:VorticityMassControl}
\Ignore{
Assume that $\delta = \delta(\nu) \to 0$ as $\nu \to 0$ and define
\begin{align*}
M_\delta
= \norm{\int_\Omega \omega_0
- \int_{\Gamma_\delta^C} \omega(t)}_{L^2([0, T])}.
\end{align*}
If the classical vanishing viscosity limit in ($VV$) holds with a rate that is
$o(\nu^{\frac{1}{2}})$ then $M_{\delta(\nu)} \to 0$ as $\nu \to 0$.
}
Assume that the classical vanishing viscosity limit in ($VV$) holds with a rate
of convergence,
$F(\nu) = o(\nu^{1/2})$. Then in 2D
the initial mass of the vorticity must be zero. \end{theorem} \begin{proof}
From \cref{e:mDiffEst,e:MDiffEst},
\begin{align*}
M_\delta
&:= \abs{m - \mathbf{M}(\Gamma_\delta^C)}
\le \abs{m - (\omega, 1 - \varphi_\delta)}
+ \abs{(\omega, 1 - \varphi_\delta) - \mathbf{M}(\Gamma_\delta^C)} \\
&\le C \brac{\delta + (\delta - \delta^*)^{-\frac{1}{2}} F(\nu)}
+ C (\delta - \delta^*)^{\frac{1}{2}}
\norm{\omega}_{L^2(\Gamma_{\delta})}.
\end{align*}
Choosing $\delta(\nu) = \nu$, $\delta^*(\nu) = \nu/2$, we have
\begin{align*}
M_\nu
&\le C \brac{\nu + \nu^{-\frac{1}{2}} o(\nu^{\frac{1}{2}})}
+ C \nu^{\frac{1}{2}}
\norm{\omega}_{L^2(\Gamma_{\nu})},
\end{align*}
uniformly over $[0, T]$. Squaring, integrating in time, and applying Young's
inequality gives
\begin{align*}
\norm{M_\nu}_{L^2([0, T])}^2
= \int_0^T M_\nu^2
\le CT (\nu^2 + o(1))
+ C \nu \int_0^T \norm{\omega}_{L^2(0, T; L^2(\Gamma_\nu))}^2
\to 0
\end{align*}
as $\nu \to 0$ by \cref{e:KellCondition}.
Then,
\begin{align*}
\norm{m - M(\Omega)}_{L^2([0, T])}
&\le \norm{m - M(\Gamma_\nu^C)}_{L^2([0, T])}
+ \norm{M(\Gamma_\nu)}_{L^2([0, T])} \\
&\le \norm{M_\nu}_{L^2([0, T])}
+ \norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\nu}))}
\to 0
\end{align*}
as $\nu \to 0$ by \cref{T:BoundaryLayerWidth}.
But $u(t)$ lies in $V$ so $M(\Omega) = 0$ for all $t > 0$.
Hence, the limit above is possible only if $m = 0$. \end{proof}
For non-compatible initial data, that is for $u_0 \notin V$, the total mass of vorticity will generically not be zero, so $C \sqrt{\nu}$ should be considered a bound on the rate of convergence for non-compatible initial data. As we will see in \cref{R:ROC}, however, a rate of convergence as good as $C \sqrt{\nu}$ is almost impossible unless the initial data is fairly smooth, and even then it would only occur in special circumstances.
Therefore, let us assume that the rate of convergence in ($VV$) is only $F(\nu) = C \nu^{1/4}$. As we will see in \cref{S:Examples}, this is a more typical rate of convergence for the simple examples for which ($VV$) is known to hold.
Now \cref{e:mDiffEst} still gives a useful bound as long as $\delta - \delta^*$ is slightly larger than the Prandtl layer width of $C \sqrt{\nu}$ (though \cref{e:MDiffEst} then fails to tell us anything useful). So let us set $\delta = 2 \nu^{1/2 - \ensuremath{\epsilon}}$, $\delta^* = \nu^{1/2 - \ensuremath{\epsilon}}$, $\ensuremath{\epsilon} > 0$ arbitrarily small. We are building here to a conjecture, so for these purposes we will act as though $\ensuremath{\epsilon} = 0$.
If the Prandtl theory is correct, then we should expect that $\mathbf{M}(\Gamma_\delta^C) \to m$ as $\nu \to 0$, since outside of the Prandtl layer $u$ matches $\overline{u}$. But the total mass of vorticity for all positive time is zero, and the total mass in the Kato Layer, $\Gamma_\nu$, goes to zero by \cref{T:BoundaryLayerWidth}. There would be no choice then but to have a total mass of vorticity between the Kato and Prandtl layers that approaches $-m$ as the viscosity vanishes. (Since the Kato layer is much smaller than the Prandtl layer, this does not require that there be any higher concentration of vorticity in any particular portion of the Prandtl layer, though.)
Now suppose that the rate of convergence is even slower than $C \nu^{1/4}$. Then \cref{e:mDiffEst} gives a measure of $\mathbf{M}(\Gamma_\delta^C)$ converging to $m$ well outside the Prandtl layer. This does not directly contradict any tenet of the Prandtl theory, but it suggests that for small viscosity the solution to the Navier-Stokes equations matches the solution to the Euler equations only well outside the Prandtl layer. This leads us to the following conjecture:
\begin{conj}\label{J:Prandtl}
If the vanishing viscosity limit in ($VV$) holds at a rate slower than
$C \nu^{\frac{1}{4}}$ in 2D then the Prandtl theory fails. \end{conj}
We conjecture no further, however, as to whether the Prandtl equations become ill-posed or whether the formal asymptotics fail to hold rigorously.
\section{Some kind of convergence always happens}\label{S:SomeConvergence}
\noindent Assume that $v$ is a vector field lying in $L^\ensuremath{\infty}([0, T]; H^1(\Omega))$. An examination of the proof given in \cite{K2008VVV} of the chain of implications in \cref{T:VVEquiv} shows that all of the conditions except (B) are still equivalent with $\overline{u}$ replaced by $v$. That is, defining
\begingroup \allowdisplaybreaks \begin{align*}
(A_v) & \qquad u \to v \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A'_v) & \qquad u \to v \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B_v) & \qquad u \to v \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C_v) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} v - \innp{\gamma_\mathbf{n} \cdot, v \mu}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D_v) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} v \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E_v) & \qquad \omega \to \omega(v)
- \frac{1}{2} \innp{\gamma_\mathbf{n} (\cdot - \cdot^T),
v \mu}
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(E_{2, v}) & \qquad \omega \to \omega(v) - (v \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_{2, v}) & \qquad \omega \to \omega(v) \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T], \end{align*} \endgroup we have the following theorem: \begin{theorem}\label{T:MainResultv}
Conditions ($A_v$), ($A'_v$), ($C_v$), ($D_v$), and ($E_v$) are equivalent.
In 2D, conditions ($E_{2,v}$) and, when $\Omega$ is simply connected,
($F_{2,v}$) are equivalent to the other conditions.
Also, $(B_v)$ implies all of the other conditions. Finally,
the same equivalences hold if we replace each
convergence above with the convergence of a subsequence. \end{theorem}
But we also have the following: \begin{theorem}\label{T:SubsequenceConvergence} There exists $v$ in $L^\ensuremath{\infty}([0, T]; H)$ such that a subsequence $(u_\nu)$ converges weakly to $v$ in $L^\ensuremath{\infty}([0, T]; H)$. \end{theorem} \begin{proof} The argument for a simply connected domain in 2D is slightly simpler so we give it first. The sequence $(u_\nu)$ is bounded in $L^\ensuremath{\infty}([0, T]; H)$ by the basic energy inequality for the Navier-Stokes equations. Letting $\psi_\nu$ be the stream function for $u_\nu$ vanishing on $\Gamma$, it follows by the Poincare inequality that $(\psi_\nu)$ is bounded in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$. Hence, there exists a subsequence, which we relabel as $(\psi_\nu)$, converging strongly in $L^\ensuremath{\infty}([0, T]; L^2(\Omega))$ and weak-* in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ to some $\psi$ lying in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$. Let $v = \ensuremath{\nabla}^\perp \psi$.
Let $g$ be any element of $L^\ensuremath{\infty}([0, T]; H)$. Then \begin{align*}
(u_\nu, g)
&= (\ensuremath{\nabla}^\perp \psi_\nu, g)
= - (\ensuremath{\nabla} \psi_\nu, g^\perp)
= (\psi_\nu, - \dv g^\perp)
= (\psi_\nu, \omega(g)) \\
&\to (\psi, \omega(g))
= (v, g). \end{align*} In the third equality we used the membership of $\psi_v$ in $H_0^1(\Omega)$ and the last equality follows in the same way as the first four. The convergence follows from the weak-* convergence of $\psi_\nu$ in in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ and the membership of $\omega(g)$ in $H^{-1}(\Omega)$.
In dimension $d \ge 3$, let $M_\nu$ in $(H_0^1(\Omega))^d$ satisfy $u_\nu = \dv M_\nu$; this is possible by Corollary 7.5 of \cite{K2008VVV}. Arguing as before it follows that there exists a subsequence, which we relabel as $(M_\nu)$, converging strongly in $L^\ensuremath{\infty}([0, T]; L^2(\Omega))$ and weak-* in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ to some $M$ that lies in $L^\ensuremath{\infty}([0, T]; (H_0^1(\Omega))^{d \times d})$. Let $v = \dv M$.
Let $g$ be any element of $L^\ensuremath{\infty}([0, T]; H)$. Then \begin{align*}
(u_\nu, g)
&= (\dv M_\nu, g)
= -(M_\nu, \ensuremath{\nabla} g)
\to - (M, \ensuremath{\nabla} g)
= (v, g), \end{align*} establishing convergence as before. \end{proof}
It follows from \refTAnd{MainResultv}{SubsequenceConvergence} that all of the convergences in \cref{T:VVEquiv} hold except for $(B)$, but for a subseqence of solutions and the convergence is to some velocity field $v$ lying only in $L^\ensuremath{\infty}([0, T]; H)$ and not necessarily in $L^\ensuremath{\infty}([0, T]; H \cap H^1(\Omega))$ . In particular, we do not know if $v$ is a solution to the Euler equations, and, in fact, there is no reason to expect that it is.
\Ignore{ \begin{lemma}\label{L:H1Dual}
$H^{-1}(\Omega)$ is the image under $\Delta$ of $H^1_0(\Omega)$ and the image
under $\dv$ of $(L^2(\Omega))^d$. \end{lemma} \begin{proof}
Let $w$ be in $H^{-1}(\Omega) = H^1_0(\Omega)^*$. By the density of $\Cal{D}(\Omega)$ in
$H^1_0(\Omega)$ the value of $(w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}$ on
test functions $\varphi$ in $\Cal{D}(\Omega)$ is enough to uniquely determine
$w$. By the Riesz representation theorem there exists a $u$ in $H^1_0(\Omega)$
such that for all $\varphi$ in $H^1_0(\Omega)$ and hence in $\Cal{D}(\Omega)$,
\begin{align*}
(w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}
&= \innp{u, \varphi}
= \int_\Omega \ensuremath{\nabla} u \cdot \ensuremath{\nabla} \varphi
= - \int_\Omega \Delta u \cdot \varphi \\
&= (-\Delta u, \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*},
\end{align*}
which shows that $w$ as a linear functional is equal to $- \Delta u$ as a
distribution, and the two can be identified.
Because the identification of $w$ and $u$ in the Riesz representation
theorem is bijective, $H^{-1}(\Omega) = \Delta H^1(\Omega)$.
Since $\Delta = \dv \ensuremath{\nabla}$, it also follows that $H^{-1}(\Omega) \subseteq
\dv (L^2(\Omega))^d$. To show the opposite containment, let $f$ be in
$(L^2(\Omega))^d$. Then by the Hodge decomposition, we can write
\begin{align*}
f = \ensuremath{\nabla} u + g
\end{align*}
with $u$ in $H^1(\Omega)$ and $g$ in $(L^2(\Omega))^d$ with $\dv g = 0$ as a
distribution. Then for any $\varphi$ in $\Cal{D}(\Omega)$,
\begin{align*}
&(\dv f, \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*}
= - (f, \ensuremath{\nabla} \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*} \\
&\qquad= - (\ensuremath{\nabla} u, \ensuremath{\nabla} \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*}
- (g, \ensuremath{\nabla} \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*} \\
&\qquad= - \innp{u, \varphi} + (\dv g, \varphi)_{\Cal{D}(\Omega), \Cal{D}(\Omega)^*}
= \innp{-u, \varphi}
= (w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}
\end{align*}
for some $w$ in $H^1_0(\Omega)^*$ by the Riesz representation theorem. It
follows that $\dv f$ and $w$ can be identified, using the same identification
as before. What we have shown is that $\dv (L^2(\Omega))^d
\subseteq H^{-1}(\Omega)$, completing the proof. \end{proof} }
\Ignore{
\section{Convergence to another solution to the Euler equations?}
\noindent One could imagine that the solutions $u = u_\nu$ to the Navier-Stokes equations converge, in the limit, to a solution to the Euler equations, but one different from $\overline{u}$ and possibly with lower regularity. Since such solutions are determined by their initial velocity, this means that the vector $v$ to which $(u_\nu)$ converges has initial velocity $v^0 \ne \overline{u}^0$. (This conclusion would be true even if $v$ had so little regularity that it had not been determined uniquely by its initial velocity.)
Now, $\overline{u}(t)$ is continuous in $H$, since it is a strong solution, as too, if we restrict ourselves to two dimensions, is $u(t)$. If $v$ has bounded vorticity, say, then $v(t)$ is continuous in $H$ as well. It would seem ...... }
\Ignore{
\section{Physical meaning of the vortex sheet on the boundary?}
\noindent Calling the term $\omega^* := - (\overline{u} \cdot \BoldTau) \mu$ (in 2D) a \textit{vortex sheet} is misleading, and I regret referring to it that way in \cite{K2008VVV} without some words of explanation. The problem is that we cannot interpret $\omega^*$ as a distribution on $\Omega$ because applying it to any function in $\Cal{D}(\Omega)$ gives zero. And how could we recover the velocity associated to $\omega^*$?
One natural, if unjustified, way to try to interpret $\omega^*$ is to extend it to the whole space so that it is a measure supported along the curve $\Gamma$. To determine the associated velocity $v$, let $\Omega_- = \Omega$ and $\Omega_+ = \Omega^C$ with $v_\pm = v|_{\Omega_\pm}$, and let $[v] = v_+ - v_-$. Then as on page 364 of \cite{MB2002}, we must have \begin{align*}
[v] \cdot \mathbf{n} = 0, \quad [v] \cdot \BoldTau = - \overline{u} \cdot \BoldTau. \end{align*} That is, the normal component of the velocity is continuous across the boundary while the jump in the tangential component is the strength of the vortex sheet.
Now, let us assume that the vanishing viscosity limit holds, so that the limiting vorticity is $\overline{\omega} - (\overline{u} \cdot \BoldTau) \mu = \overline{\omega} - \omega^*$. Since $u \to \overline{u}$ strongly with $\omega(\overline{u}) = \overline{\omega}$, the term $\overline{\omega}$ has to account for all of the kinetic energy of the fluid. If the limit is to be physically meaningful, certainly energy cannot be \textit{gained} (though it conceivably could be lost to diffusion, even in the limit). Thus, we would need to have the velocity $v$ associated with $\omega^*$ vanish in $\Omega$; in other words, $v_- \equiv 0$. This leads to $\omega(v_+) = \dv v_+ = 0$ in $\Omega_+$, $v_+ \cdot \mathbf{n} = 0$ on $\Gamma$, $v_+ \cdot \BoldTau = \overline{u} \cdot \BoldTau$ on $\Gamma$, with some conditions on $v_+$ at infinity. But this is an overdetermined set of equations. In fact, if $\Omega$ is simply connected then $\Omega_+$ is an exterior domain, and if we ignore the last equation, then up to a multiplicative constant there is a unique solution vanishing at infinity. This cannot, in general, be reconciled with the need for the last equation to hold.
Actually, perhaps the correct physical interpretation of $\omega^*$ comes from the observation in the first paragraph of this section: that it has no physical effect at all since, as a distribution, it is zero. If the vanishing viscosity limit holds, it is reasonable to assume that if there is a boundary separation of the vorticity it weakens in magnitude as the viscosity vanishes and so contributes nothing in the limit.
Or, looked at another way, if in looking for the velocity $v$ corresponding to the vortex sheet $\omega$ as we did above we assume that $v$ is zero outside $\Omega$, we would obtain \begin{align*}
v \cdot \mathbf{n} = 0, \quad v \cdot \BoldTau = \overline{u} \cdot \BoldTau \end{align*} on the boundary. For a very small viscosity, then, $u$ has almost the same effect as $\overline{u}$ in the interior of $\Omega$, while the vortex sheet that is forming on the boundary as the viscosity vanishes has nearly the same effect as $\overline{u}$ on the boundary. }
\Part{Theme II: Kato's Conditions}
\section{An equivalent 2D condition on the boundary}\label{S:EquivCondition}
\noindent
\begin{theorem}\label{T:BoundaryIffCondition} For ($VV$) to hold in 2D it is necessary and sufficient that \begin{align}\label{e:BoundaryCondition2D}
\nu \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
\to 0
\text{ as } \nu \to 0. \end{align} \end{theorem} \begin{proof} Since the solution is in 2D and $f \in L^2(0, T; H) \supseteq C^1_{loc}(\ensuremath{\BB{R}}; C^1(\Omega))$, Theorem III.3.10 of \cite{T2001} gives \begin{align}\label{e:RegTwoD}
\begin{split}
&\sqrt{t} u \in L^2(0, T; H^2(\Omega)) \cap L^\ensuremath{\infty}(0, T; V), \\
&\sqrt{t} \ensuremath{\partial}_t u \in L^2(0, T; H),
\end{split} \end{align} so $\omega(t)$ is defined in the sense of a trace on the boundary. This shows that the condition in \cref{e:BoundaryCondition2D} is well-defined.
For simplicity we give the argument with $f = 0$. We perform the calculations using the $d$-dimensional form of the vorticity in \cref{e:VorticityRd}, specializing to 2D only at the end. (The argument applies formally in higher dimensions; see \cref{R:BoundaryConditionInRd}.)
Subtracting ($EE$) from ($NS$), multiplying by $w = u - \overline{u}$, integrating over $\Omega$, using \cref{L:TimeDerivAndIntegration} for the time derivative, and $u(t) \in H^2(\Omega)$, $t > 0$, for the spatial integrations by parts, leads to \begin{align}\label{e:BasicEnergyEq}
\begin{split}
\frac{1}{2} \diff{}{t} &\norm{w}_{L^2}^2
+ \nu \norm{\ensuremath{\nabla} u}_{L^2}^2 \\
&= - (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})
- \nu \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}.
\end{split} \end{align}
Now, \begin{align*}
\begin{split}
(\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}
&= 2 (\frac{\ensuremath{\nabla} u - (\ensuremath{\nabla} u)^T}{2} \cdot \mathbf{n})
\cdot \overline{u}
+ ((\ensuremath{\nabla} u)^T \cdot \bm{n}) \cdot \overline{u} \\
&= 2 (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}
+ ((\ensuremath{\nabla} u)^T \cdot \bm{n}) \cdot \overline{u}.
\end{split} \end{align*} But, \begin{align*}
\int_\Gamma &((\ensuremath{\nabla} u)^T \cdot \bm{n}) \cdot \overline{u}
= \int_\Gamma \ensuremath{\partial}_i u^j n^j \overline{u}^i
= \frac{1}{2} \int_\Gamma \ensuremath{\partial}_i(u \cdot \bm{n}) \overline{u}^i \\
&= \frac{1}{2} \int_\Gamma \ensuremath{\nabla} (u \cdot \bm{n}) \cdot \overline{u}
= 0, \end{align*} since $u \cdot \bm{n} = 0$ on $\Gamma$ and $\overline{u}$ is tangent to $\Gamma$. Hence, \begin{align}\label{e:gradunuolEq}
\int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}
= 2 (\omega(u) \cdot \mathbf{n})
\cdot \overline{u} \end{align} and \begin{align*}
\frac{1}{2} \diff{}{t} &\norm{w}_{L^2}^2
+ \nu \norm{\ensuremath{\nabla} u}_{L^2}^2 \\
&= - (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})
- 2 \nu \int_\Gamma (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}. \end{align*}
By virtue of \cref{L:TimeDerivAndIntegration}, we can integrate over time to give \begin{align}\label{e:VVArg}
\begin{split}
&\norm{w(T)}_{L^2}^2
+ 2 \nu \int_0^T \norm{\ensuremath{\nabla} u}_{L^2}^2
= - 2 \int_0^T (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ 2 \nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u}) \\
&\qquad - 2 \nu \int_0^T \int_\Gamma (\omega(u)
\cdot \mathbf{n}) \cdot \overline{u}.
\end{split} \end{align}
In two dimensions, we have (see (4.2) of \cite{KNavier}) \begin{align}\label{e:gradunomega}
(\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega(u) \, \overline{u} \cdot \BoldTau, \end{align} and \cref{e:VVArg} can be written \begin{align}\label{e:VVArg2D}
\begin{split}
&\norm{w(T)}_{L^2}^2
+ 2 \nu \int_0^T \norm{\ensuremath{\nabla} u}_{L^2}^2
= - 2 \int_0^T (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ 2 \nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u}) \\
&\qquad - \nu \int_0^T \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau.
\end{split} \end{align}
The sufficiency of \refE{BoundaryCondition2D} for the vanishing viscosity limit ($VV$) to hold (and hence for the other conditions in \cref{T:VVEquiv} to hold) follows from the bounds, \begin{align*}
\abs{(w \cdot \ensuremath{\nabla} \overline{u}, w)}
&\le \norm{\ensuremath{\nabla} \overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\norm{w}_{L^2}^2
\le C \norm{w}_{L^2}^2, \\
\nu \int_0^T \abs{(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})}
&\le \sqrt{\nu} \norm{\ensuremath{\nabla} \overline{u}}_{L^2([0, T] \times \Omega)}
\sqrt{\nu} \norm{\ensuremath{\nabla} u}_{L^2([0, T] \times \Omega)}
\le C \sqrt{\nu}, \end{align*} and Gronwall's inequality.
Proving the necessity of \refE{BoundaryCondition2D} is just as easy. Assume that $(VV)$ holds, so that $\norm{w}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))} \to 0$. Then by the two inequalities above, the first two terms on the right-hand side of \refE{VVArg2D} vanish with the viscosity as does the first term on the left-hand side. The second term on the left-hand side vanishes as proven in \cite{Kato1983} (it follows from a simple argument using the energy equalities for ($NS$) and ($E$)). It follows that, of necessity, \refE{BoundaryCondition2D} holds. \Ignore{ The reason this argument is formal is twofold. First, $w$ is not a valid test function in the weak formulation of the Navier-Stokes equations because it does not vanish on the boundary and because it varies in time. Beyond time zero the solution has as much regularity as the boundary allows \ToDo{But only up to a finite time; this is a factor to deal with}, so this is a problem only when trying to reach a conclusion after integrating in time down to time zero. This is the second reason the argument is formal: in obtaining \refE{VVArg} we act as though $w$ is strongly continuous in time down to time zero. This is true in 2D, where this part of the argument is not formal, but only weak continuity is known in higher dimensions. (This is also the reason we need assume no additional regularity for the initial velocity in 2D.)
To get around these difficulties, we derive \refE{VVArg} rigorously.
Choose a sequence $(h_n)$ of nonnegative functions in $C_0^\ensuremath{\infty}((0, T])$ such that $h_n \equiv 1$ on the interval $[n^{-1}, T]$ with $h_n$ strictly increasing on $[0, n^{-1}]$. Then $h'_n = g_n \ge 0$ with $g_n \equiv 0$ on $[n^{-1}, T]$. Observe that $\smallnorm{g_n}_{L^1([0, T])} = 1$.
Letting $w = u - \overline{u}$ as before, because $h_n w$ vanishes at time zero we can legitimately subtract ($EE$) from ($NS$), multiply by $h_n w$, and integrate over $\Omega$ to obtain, in place of \refE{BasicEnergyEq},
\begingroup \allowdisplaybreaks \begin{align*}
\begin{split}
\frac{1}{2} \diff{}{t} &\smallnorm{h_n^{1/2} w}_{L^2}^2
- \frac{1}{2} \int_\Omega h_n'(t) \abs{w}^2
+ \nu (\ensuremath{\nabla} u, \ensuremath{\nabla} (h_n u)) \\
&= - (w \cdot \ensuremath{\nabla} \overline{u}, h_n w)
- (u \cdot \ensuremath{\nabla} w, h_n w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} (h_n \overline{u})) \\
&\qquad\qquad
- \nu \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}) \\
&= - (w \cdot \ensuremath{\nabla} \overline{u}, h_n w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} (h_n \overline{u}))
- \nu \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}),
\end{split} \end{align*} \endgroup since $(u \cdot \ensuremath{\nabla} w, h_n w) = h_n(u \cdot \ensuremath{\nabla} w, w) = 0$. Integrating in time gives
\begingroup \allowdisplaybreaks \begin{align*}
&\smallnorm{w(T)}_{L^2}^2
- \smallnorm{h_n^{1/2} w(0)}_{L^2}^2
- \int_0^T \int_\Omega h_n' \abs{w}^2
+ 2 \nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} (h_n u)) \\
&\qquad
= - 2 \int_0^T (w \cdot \ensuremath{\nabla} \overline{u}, h_n w)
+ 2 \int_0^T \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} (h_n \overline{u})) \\
&\qquad\qquad\qquad\qquad
- 2 \int_0^T \nu \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}). \end{align*} \endgroup The second term on the left hand side vanishes because $h_n(0) = 0$. For the four terms containing $h_n$ without derivative, the $h_n$ becomes $1$ in the limit as $n \to \ensuremath{\infty}$. This leaves the one term containing $h_n'$.
Now, $\overline{u}(t)$ is continuous in $H$ and in 2D $u(t)$ is also continuous in $H$. Hence, in 2D $w(t)$ is continuous in $H$. In 3D if we assume that $u_0 \in V$ then $u(t)$ is continuous in $H$ (in fact, in $V$) up to some finite time, $T^* > 0$. Hence, in 3D, $w(t)$ is continuous in $H$ on $[0, T^*)$; $T^*$ may depend on $\nu$, but we will take $n$ to 0 before taking $\nu$ to $0$, so this will not matter. Hence, $F(s) = \norm{w(s)}^2$ is continuous on $[0, T^*)$ with $T^* = T$ in 2D, so \ToDo{Does the $0 \le$ really hold? I don't think so.} \begin{align*}
0
&\le \lim_{n \to \ensuremath{\infty}} \int_0^T \int_\Omega h_n' \abs{w}^2
= \lim_{n \to \ensuremath{\infty}} \int_0^T g_n(s) F(s) \, ds \\
&= \lim_{n \to \ensuremath{\infty}} \int_0^{\frac{1}{n}} g_n(s) F(s) \, ds
\le \norm{g_n}_{L^1} \norm{F}_{L^\ensuremath{\infty} \pr{0, \frac{1}{n}}}
= 0. \end{align*} This gives us \refE{VVArg}. } \end{proof}
\begin{remark}\label{R:ROC}
It follows from the proof of \refT{BoundaryIffCondition} that in 2D,
\begin{align*}
\norm{u(t) - \overline{u}(t)}
\le C \brac{\nu^{\frac{1}{4}}
+ \abs{\nu \int_0^T \int_\Gamma \omega \, \overline{u}
\cdot \BoldTau}^{\frac{1}{2}}} e^{C t}.
\end{align*}
Suppose that $\overline{u}_0$ is smooth enough that
$\Delta \overline{u} \in L^\ensuremath{\infty}([0, T] \times \Omega)$.
Then before integrating to obtain \cref{e:BasicEnergyEq} we
can replace the term $\nu (\Delta u, w)$ with
$\nu (\Delta w, w) + \nu (\Delta \overline{u}, w)$.
Integrating by parts gives
\begin{align*}
\nu (\Delta w, w)
= \nu \norm{\ensuremath{\nabla} w}_{L^2}^2,
\end{align*}
and we also have,
\begin{align*}
\nu (\Delta \overline{u}, w)
\le \nu \norm{\Delta \overline{u}}_{L^2} \norm{w}_{L^2}
\le \frac{\nu^2}{2} \norm{\Delta \overline{u}}_{L^2}^2
+ \frac{1}{2} \norm{w}_{L^2}^2.
\end{align*}
This leads to the bound,
\begin{align*}
\norm{u(t) - \overline{u}(t)}_{L^2}
\le C \brac{\nu
+ \abs{\nu \int_0^T \int_\Gamma \omega \, \overline{u}
\cdot \BoldTau}^{\frac{1}{2}}} e^{C t}
\end{align*}
(and also $\norm{u - \overline{u}}_{L^2(0, T; H^1)} \le C \nu^{1/2} e^{Ct}$).
Thus, the bound we obtain on the rate of convergence in $\nu$ is never better
than $O(\nu^{1/4})$
unless the initial data is smooth enough, in which case it is never better
than $O(\nu)$. In any case, only in exceptional circumstances would the rate
not be determined by the integral coming from the boundary term. \end{remark}
\begin{remark}\label{R:BoundaryConditionInRd} Formally, the argument in the proof of \cref{T:BoundaryIffCondition} would give in any dimension the condition \begin{align*}
\nu \int_0^T \int_\Gamma (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}
\to 0
\text{ as } \nu \to 0. \end{align*} In 3D, one has $\omega(u) \cdot \bm{n} = (1/2) \vec{\omega} \times \bm{n}$, so the condition could be written \begin{align*}
\nu \int_0^T \int_\Gamma (\vec{\omega} \times \bm{n})
\cdot \overline{u}
= \nu \int_0^T \int_\Gamma \vec{\omega} \cdot
(\overline{u} \times \bm{n})
\to 0
\text{ as } \nu \to 0, \end{align*} where $\vec{\omega}$ is the 3-vector form of the curl of $u$. We can only be assured, however, that $u(t) \in V$ for all $t > 0$, which is insufficient to define $\vec{\omega}$ on the boundary. (The normal component could be defined, though, since both $\vec{\omega}(t)$ and $\dv \vec{\omega}(t) = 0$ lie in $L^2$.) Even assuming more compatible initial data in 3D, such as $u_0 \in V$, we can only conclude that $u(t) \in H^2$ for a short time, with that time decreasing to $0$ as $\nu \to 0$ (in the presence of forcing; see, for instance, Theorem 9.9.4 of \cite{FoiasConstantin1988}).
\end{remark}
\Ignore{ \begin{remark}\label{R:BoundaryCondition2DRd} Since $\overline{u} \times \bm{n}$ is a tangent vector, the second form of the condition in \refE{BoundaryCondition3D} shows that it is only the tangential components of $\vec{\omega}$ that matter in this condition. More specifically, only the tangential component perpendicular to $\overline{u}$ matters.
\end{remark} }
There is nothing deep about the condition in \refE{BoundaryCondition2D}, but what it says is that there are two mechanisms by which the vanishing viscosity limit can hold: Either the blowup of $\omega$ on the boundary happens slowly enough that \begin{align}\label{e:nuL1Bound}
\nu \int_0^T \norm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \nu \to 0 \end{align} or the vorticity for ($NS$) is generated on the boundary in such a way as to oppose the sign of $\overline{u} \cdot \BoldTau$. (This latter line of reasoning is followed in \cite{CKV2014}, leading to a new condition in a boundary layer slightly thicker than that of Kato.) In the second case, it could well be that vorticity for $(NS)$ blows up fast enough that \refE{nuL1Bound} does not hold, but cancellation in the integral in \refE{BoundaryCondition2D} allows that condition to hold.
\begin{lemma}\label{L:TimeDerivAndIntegration}
Assume that $v \in L^\ensuremath{\infty}(0, T; V)$ with $\ensuremath{\partial}_t v \in L^2(0, T; V')$ as well as
$\sqrt{t} \ensuremath{\partial}_t v \in L^2(0, T; H)$.
Then $v \in C([0, T]; H)$,
\begin{align*}
\frac{1}{2} \diff{}{t} \norm{v}_{L^2}^2
= (\ensuremath{\partial}_t v, v) \text{ in } \Cal{D}'((0, T))
\text{ with } \sqrt{t} (\ensuremath{\partial}_t v, v) \in L^1(0, T),
\end{align*}
and
\begin{align*}
\int_0^T \diff{}{t} \norm{v(t)}_{L^2}^2 \, dt
= \norm{v(T)}_{L^2}^2 - \norm{v(0)}_{L^2}^2.
\end{align*} \end{lemma} \begin{proof} Having $v \in L^2(0, T; V)$ with $\ensuremath{\partial}_t v \in L^2(0, T; V')$ is enough to conclude that $(\ensuremath{\partial}_t v, v) = (1/2) (d/dt) \norm{v}_{L^2}^2$ in $\Cal{D}'((0, T))$ and $v \in C([0, T]; H)$ (see Lemma III.1.2 of \cite{T2001}).
Let $T_0 \in (0, T)$. Our stronger assumptions also give $(d/dt) \norm{v}_{L^2}^2 = 2(\ensuremath{\partial}_t v, v) \in L^1(T_0, T)$. Hence, by the fundamental theorem of calculus for Lebesgue integration (Theorem 3.35 of \cite{Folland1999}) it follows that \begin{align*}
\int_{T_0}^T \diff{}{t} \norm{v}_{L^2}^2 \, dt
= \norm{v(T)}_{L^2}^2 - \norm{v(T_0)}_{L^2}^2. \end{align*} But $v$ is continuous in $H$ down to time zero, so taking $T_0$ to 0 completes the proof. \end{proof}
\section{Examples where the 2D boundary condition holds}\label{S:Examples}
\noindent All examples where the vanishing viscosity limit is known to hold have some kind of symmetry---in geometry of the domain or the initial data---or have some degree of analyticity.
Since \refE{BoundaryCondition2D} is a necessary condition, it holds for all of these examples. But though it is also a sufficient condition, it is not always practicable to apply it to establish the limit. We give here examples in which it is practicable. This includes all known 2D examples having symmetry. In all explicit cases, the initial data is a stationary solution to the Euler equations.
\Example{1} Let $\overline{u}$ be any solution to the Euler equations for which $\overline{u} = 0$ on the boundary. The integral in \refE{BoundaryCondition2D} then vanishes for all $\nu$. From \refR{ROC}, the rate of convergence (here, and below, in $\nu$) is $C \nu^{1/4}$ or, for smoother initial data, $C \nu$.
\Example{1a} Example 1 is not explicit, since we immediately encounter the question of what (nonzero) examples of such steady solutions there are. As a first example, let $D$ be the disk of radius $R > 0$ centered at the origin and let $\omega_0 \in L^\ensuremath{\infty}(D)$ be radially symmetric. Then the associated velocity field, $u_0$, is given by the Biot-Savart law. By exploiting the radial symmetry, $u_0$ can be written, \begin{align}\label{e:u0Circular}
u_0(x)
&= \frac{x^\perp}{\abs{x}^2}
\int_0^{\abs{x}} \omega_0(r) r \, dr, \quad \end{align} where $B({\abs{x}})$ is the ball of radius $\abs{x}$ centered at the origin and where we abuse notation a bit in the writing of $\omega_0(r)$. Since $u_0$ is perpendicular to $\ensuremath{\nabla} u_0$ it follows from the vorticity form of the Euler equations that $\overline{u} \equiv u_0$ is a stationary solution to the Euler equations.
Now assume that the total mass of vorticity, \begin{align}\label{e:m}
m := \int_{\ensuremath{\BB{R}}^2} \omega_0, \end{align} is zero. We see from \refE{u0Circular} that on $\Gamma$,
$u_0 = m x^\perp R^{-1} = 0$, giving a steady solution to the Euler equations with velocity vanishing on the boundary.
(Note that $m = 0$ is equivalent to $u_0$ lying in the space $V$ of divergence-free vector fields vanishing on the boundary.)
\Example{1b} Let $\omega_0 \in L^1 \cap L^\ensuremath{\infty}(\ensuremath{\BB{R}}^2)$ be a compactly supported radially symmetric initial vorticity for which the total mass of vorticity vanishes; that is, $m = 0$. Then the expression for $u_0$ in \refE{u0Circular}, which continues to hold throughout all of $\ensuremath{\BB{R}}^2$, shows that $u_0$ vanishes outside of the support of its vorticity.
If we now restrict such a radially symmetric $\omega_0$ so that its support lies inside a domain (even allowing the support of $\omega_0$ to touch the boundary of the domain) then the velocity $u_0$ will vanish on the boundary. In particular, $u_0 \cdot \bm{n} = 0$ so, in fact, $u_0$ is a stationary solution to the Euler equations in the domain, being already one in the whole plane. In fact, one can use a superposition of such radially symmetric vorticities, as long as their supports do not overlap, and one will still have a stationary solution to the Euler equations whose velocity vanishes on the boundary.
Such a superposition is called a \textit{superposition of confined eddies} in \cite{FLZ1999A}, where their properties in the full plane, for lower regularity than we are considering, are analyzed. These superpositions provide a fairly wide variety of examples in which the vanishing viscosity limit holds. It might be interesting to investigate the precise manner in which the vorticity converges in the vanishing viscosity limit; that is, whether it is possible to do better than the ``vortex sheet''-convergence in condition $(E_2)$ of \cite{K2008VVV}.
In \cite{Maekawa2013}, Maekawa considers initial vorticity supported away from the boundary in a half-plane. We note that the analogous result in a disk, even were it shown to hold, would not cover this Example 1b when the support of the vorticity touches the boundary.
\Example{2 [2D shear flow]} Let $\phi$ solve the heat equation, \begin{align}\label{e:HeatShear}
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \phi(t, z) = \nu \ensuremath{\partial}_{zz} \phi(t, z)
& \text{on } [0, \ensuremath{\infty}) \times [0, \ensuremath{\infty}), \\
\phi(t, 0) = 0
& \text{ for all } t > 0, \\
\phi(0) = \phi_0. &
\end{array}
\right. \end{align} Assume for simplicity that $\phi_0 \in W^{1, \ensuremath{\infty}}((0, \ensuremath{\infty})$. Let $u_0 = (\phi_0, 0)$ and $u(t, x) = (\phi(t, x_2), 0)$.
Let $\Omega = [-L, L] \times (0, \ensuremath{\infty})$ be periodic in the $x_1$-direction. Then $u_0 \cdot \bm{n} = 0$ and $u(t) = 0$ for all $t > 0$ on $\ensuremath{\partial} \Omega$ and \begin{align*}
\ensuremath{\partial}_t u(t, x)
&= \nu(\ensuremath{\partial}_{x_2 x_2} \phi(t, x_2), 0)
= \nu \Delta u(t, x), \\
(u \cdot \ensuremath{\nabla} u)(t, x)
&=
\matrix{\ensuremath{\partial}_1 u^1 & \ensuremath{\partial}_1 u^2}
{\ensuremath{\partial}_2 u^1 & \ensuremath{\partial}_2 u^2}
\matrix{u^1}{u^2}
=
\matrix{0 & 0}{\ensuremath{\partial}_2 \phi(t, x_2) & 0}
\matrix{\phi(t, x_2)}{0} \\
&=
\matrix{0}{\ensuremath{\partial}_2 \phi(t, x_2) \phi(t, x_2)}
=
\frac{1}{2} \ensuremath{\nabla} \phi(t, x_2). \end{align*} It follows that $u$ solves the Navier-Stokes equations on $\Omega$ with pressure, $p = - \frac{1}{2} \phi(t, x_2)$.
Similarly, letting $\overline{u} \equiv u_0$, we have $\ensuremath{\partial}_t \overline{u} = 0$, $\overline{u} \cdot \ensuremath{\nabla} \overline{u} = \frac{1}{2} \ensuremath{\nabla} \phi_0$ so $\overline{u} \equiv u_0$ is a stationary solution to the Euler equations.
Now, $\omega = \ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1 = - \ensuremath{\partial}_2 \phi(t, x_2)$ so \begin{align*}
\int_\Gamma \omega \, \overline{u} \cdot \BoldTau
&= - \int_\Gamma \ensuremath{\partial}_2 \phi(t, x_2)|_{x_2 = 0}
\phi_0(0)
= - \phi_0(0)\int_{-L}^L
\ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0} \, d x_1 \\
&= -L \phi_0(0) \ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0}. \end{align*}
The explicit solution to \refE{HeatShear} is \begin{align*}
\phi(t, z)
&= \frac{1}{\sqrt{4 \pi \nu t}}
\int_0^\ensuremath{\infty} \brac{e^{-\frac{(z - y)^2}{4 \nu t}}
- e^{-\frac{(z + y)^2}{4 \nu t}}} \phi_0(y) \, dy \end{align*} (see, for instance, Section 3.1 of \cite{StraussPDE}). Thus,
\begingroup \allowdisplaybreaks \begin{align*}
\ensuremath{\partial}_z \phi(t, z)|_{z = 0}
&= -\frac{2}{4 \nu t \sqrt{4 \pi \nu t}}
\int_0^\ensuremath{\infty} y \brac{e^{-\frac{y^2}{4 \nu t}}
+ e^{-\frac{y^2}{4 \nu t}}} \phi_0(y) \, dy \\
&= -\frac{1}{\nu t \sqrt{4 \pi \nu t}}
\int_0^\ensuremath{\infty} y e^{-\frac{y^2}{4 \nu t}} \phi_0(y) \, dy
\\
&= -\frac{1}{\nu t \sqrt{4 \pi \nu t}}
\int_0^\ensuremath{\infty} (- 2 \nu t) \diff{}{y}
e^{-\frac{y^2}{4 \nu t}} \phi_0(y) \, dy \\
&= -\frac{1}{\sqrt{\pi \nu t}}
\int_0^\ensuremath{\infty} \diff{}{y}
e^{-\frac{y^2}{4 \nu t}} \, \phi_0(y) \, dy \\
&= \frac{1}{\sqrt{\pi \nu t}}
\int_0^\ensuremath{\infty}
e^{-\frac{y^2}{4 \nu t}} \phi_0'(y) \, dy \end{align*} \endgroup so that \begin{align*}
\abs{\ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0}}
\le \frac{C}{\sqrt{\nu t}}. \end{align*} We conclude that \begin{align*}
\abs{\nu \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau}
\le C \sqrt{\nu} \int_0^T t^{-1/2} \, dt
= C \sqrt{\nu T}. \end{align*} The condition in \refE{BoundaryCondition2D} thus holds (as does \cref{e:nuL1Bound}). From \refR{ROC}, the rate of convergence is $C \nu^{\frac{1}{4}}$ (even for smoother initial data).
\Example{3} Consider Example 1a of radially symmetric vorticity in the unit disk, but without the assumption that $m$ given by \refE{m} vanishes. This example goes back at least to Matsui in \cite{Matsui1994}. The convergence also follows from the sufficiency of the Kato-like conditions established in \cite{TW1998}, as pointed out in \cite{W2001}. A more general convergence result in which the disk is allowed to impulsively rotate for all time appears in \cite{FLMT2008}. A simple argument to show that the vanishing viscosity limit holds is given in Theorem 6.1 \cite{K2006Disk}, though without a rate of convergence. Here we prove it with a rate of convergence by showing that the condition in \refE{BoundaryCondition2D} holds.
Because the nonlinear term disappears, the vorticity satisfies the heat equation, though with Dirichlet boundary conditions not on the vorticity but on the velocity: \begin{align}\label{e:RadialHeat}
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \omega = \nu \Delta \omega
& \text{in } \Omega, \\
u = 0
& \text{on } \Gamma.
\end{array}
\right. \end{align} Unless $u_0 \in V$, however, $\omega \notin C([0, T]; L^2)$, so we cannot easily make sense of the initial condition this way.
An orthonormal basis of eigenfunctions satisfying these boundary conditions is \begin{align*}
u_k(r, \theta)
&= \frac{J_1(j_{1k} r)}{\pi^{1/2}\abs{J_0(j_{1k})}}
\ensuremath{\widehat}{e}_\theta,
\quad
\omega_k(r, \theta)
= \frac{j_{1k} J_0(j_{1k} r)}{\pi^{1/2}\abs{J_0(j_{1k})}}, \end{align*} where $J_0$, $J_1$ are Bessel functions of the first kind and $j_{1k}$ is the $k$-th positive root of $J_1(x) = 0$. (See \cite{K2006Disk} or \cite{LR2002}.) The $(u_k)$ are complete in $H$ and in $V$ and are normalized so that\footnote{This differs from the normalization in \cite{K2006Disk}, where $\norm{u_k}_H = j_{1k}^{-1}$, $\norm{\omega_k}_{L^2} = 1$.} \begin{align*}
\norm{u_k}_H = 1,
\quad
\norm{\omega_k}_{L^2} = j_{1k}. \end{align*}
Assume that $u_0 \in H \cap H^1$. Then \begin{align*}
u_0 = \sum_{k = 1}^\ensuremath{\infty} a_k u_k,
\quad
\smallnorm{u_0}_H^2
= \sum_{k = 1}^\ensuremath{\infty} a_k^2
< \ensuremath{\infty}. \end{align*} (But, \begin{align*}
\smallnorm{u_0}_V^2
= \sum_{k = 1}^\ensuremath{\infty} a_k^2 j_{1k}^2
= \ensuremath{\infty} \end{align*} unless $u_0 \in V$.) We claim that \begin{align*}
u(t)
= \sum_{k = 1}^\ensuremath{\infty} a_k e^{- \nu j_{1k}^2 t} u_k \end{align*} provides a solution to the Navier-Stokes equations, ($NS$). To see this, first observe that $u \in C([0, T]; H)$, so $u(0) = u_0$ makes sense as an initial condition. Also, $u(t) \in V$ for all $t > 0$. Next observe that \begin{align*}
\omega(t)
:= \omega(u(t))
= \sum_{k = 1}^\ensuremath{\infty} a_k e^{- \nu j_{1k}^2 t} \omega_k \end{align*} for all $t > 0$, this sum converging in $H^n$ for all $n \ge 0$. Since each term satisfies \cref{e:RadialHeat} so does the sum. Taken together, this shows that $\omega$ satisfies \cref{e:RadialHeat} and thus $u$ solves ($NS$).
\Ignore{ \begin{align*}
\sum_{k = 1}^\ensuremath{\infty}
a_k^2 e^{- 2 \nu j_{1k}^2 t} \norm{\omega_k}_{L^2}^2
=
\sum_{k = 1}^\ensuremath{\infty}
a_k^2 j_{1k} e^{- 2 \nu j_{1k}^2 t}
< \ensuremath{\infty} \end{align*} for all $t > 0$ }
The condition in \refE{BoundaryCondition2D} becomes \begin{align*}
\nu \int_0^T & \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
= \nu \sum_{k = 1}^\ensuremath{\infty} \int_0^T \int_\Gamma
a_k e^{- \nu j_{1k}^2 t} \omega_k
\, \overline{u} \cdot \BoldTau \, dt \\
&= \nu \sum_{k = 1}^\ensuremath{\infty} \int_0^T
a_k e^{- \nu j_{1k}^2 t} \omega_k|_{r = 1}
\int_\Gamma \overline{u} \cdot \BoldTau \, dt\\
&= m \nu \sum_{k = 1}^\ensuremath{\infty} a_k
\frac{j_{1k} J_0(j_{1k})}
{\pi^{1/2}\abs{J_0(j_{1k})}}
\int_0^T
e^{- \nu j_{1k}^2 t} \, dt. \end{align*} In the final equality, we used \begin{align*}
\int_\Gamma \overline{u} \cdot \BoldTau
= - \int_\Gamma \overline{u}^\perp \cdot \bm{n}
= - \int_\Omega \dv \overline{u}^\perp
= \int_\Omega \overline{\omega}
= m. \end{align*} (Because vorticity is transported by the Eulerian flow, $m$ is constant in time.)
Then,
\begingroup \allowdisplaybreaks \begin{align*}
&\abs{\nu \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau}
\le \abs{m} \nu \sum_{k = 1}^\ensuremath{\infty}
\frac{\abs{a_k}}{\pi^{1/2}} j_{1k}
\int_0^T
e^{- \nu j_{1k}^2 t} \, dt \\
&\qquad
= \abs{m} \nu \sum_{k = 1}^\ensuremath{\infty}
\frac{\abs{a_k}}{\pi^{1/2}} j_{1k}
\frac{1 - e^{- \nu j_{1k}^2 T}}{\nu j_{1k}^2} \\
&\qquad
\le \frac{\abs{m}}{\pi^{\frac{1}{2}}}
\pr{\sum_{k = 1}^\ensuremath{\infty} a_k^2}^{\frac{1}{2}}
\pr{\sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \nu j_{1k}^2 T})^2}{j_{1k}^2}}
^{\frac{1}{2}} \\
&\qquad
= \frac{\abs{m}}{\pi^{\frac{1}{2}}}
\smallnorm{u_0}_H
\pr{\sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \nu j_{1k}^2 T})^2}{j_{1k}^2}}
^{\frac{1}{2}}. \end{align*} \endgroup Classical bounds on the zeros of Bessel functions give $1 + k < j_{1k} \le \pi(\frac{1}{2} + k)$ (see, for instance, Lemma A.3 of \cite{K2006Disk}). Hence, with $M = (\nu T)^{-\ensuremath{\alpha}}$, $\ensuremath{\alpha} > 0$ to be determined, we have
\begingroup \allowdisplaybreaks \begin{align*}
\sum_{k = 1}^\ensuremath{\infty}
&\frac{(1 - e^{- \nu j_{1k}^2 T})^2}{j_{1k}^2}
\le C \sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \nu k^2 T})^2}{k^2} \\
&\le (1 - e^{- \nu T})^2
+ \int_{k = 1}^M
\frac{(1 - e^{- \nu x^2 T})^2}{x^2} \, dx
+ \int_{k = M + 1}^\ensuremath{\infty}
\frac{(1 - e^{- \nu x^2 T})^2}{x^2} \, dx \\
&\le \nu^2 T^2
+ \nu^2 T^2 \int_{k = 1}^M
\frac{x^4}{x^2} \, dx
+ \int_{k = M + 1}^\ensuremath{\infty}
\frac{1}{x^2} \, dx \\
&\le \nu^2 T^2
+ \nu^2 T^2 \frac{1}{3} \pr{M^3 - 1}
+ \frac{1}{M}
\le \nu^2 T^2
+ \nu^2 T^2 M^3
+ \frac{1}{M} \\
&= \nu^2 T^2
+ \nu^2 T^2 \nu^{-3 \ensuremath{\alpha}} T^{- 3 \ensuremath{\alpha}}
+ (\nu T)^\ensuremath{\alpha}
= \nu^2 T^2
+ (\nu T)^{2 - 3 \ensuremath{\alpha}}
+ (\nu T)^\ensuremath{\alpha} \end{align*} \endgroup as long as $\nu M^2 T \le 1$ (used in the third inequality); that is, as long as \begin{align}\label{e:albetaReq}
(\nu T)^{1 - 2 \ensuremath{\alpha}} \le 1. \end{align} Thus \cref{e:BoundaryCondition2D} holds (as does \cref{e:nuL1Bound}), so ($VV$) holds.
The rate of convergence in ($VV$) is optimized when $(\nu T)^{2 - 3 \ensuremath{\alpha}} = (\nu T)^\ensuremath{\alpha}$, which occurs when $\ensuremath{\alpha} = \frac{1}{2}$. The condition in \refE{albetaReq} is then satisfied with equality. \refR{ROC} then gives a rate of convergence in the vanishing viscosity limit of $C \nu^{\frac{1}{4}}$ (even for smoother initial data), except in the special case $m = 0$, which we note reduces to Example 1a.
\ReturnExample{1a} Let us apply our analysis of Example 3 to the special case of Example 1a, in which $u_0 \in V$. Now, on the boundary, \begin{align*}
(\ensuremath{\partial}_t u + u \cdot \ensuremath{\nabla} u + \ensuremath{\nabla} p) \cdot \BoldTau
= \nu \Delta u \cdot \BoldTau
= \nu \Delta u^\perp \cdot (- \bm{n})
= - \nu \ensuremath{\nabla}^\perp \omega \cdot \bm{n}. \end{align*} But $\ensuremath{\nabla} p \equiv 0$ so the left-hand side vanishes. Hence, the vorticity satisfies homogeneous Neumann boundary conditions for positive time. (This is an instance of Lighthill's formula.) Since the nonlinear term vanishes, in fact, $\omega$ satisfies the heat equation, $\ensuremath{\partial}_t \omega = \nu \Delta \omega$ with homogeneous Neumann boundary conditions and hence $\omega \in C([0, T]; L^2(\Omega))$.
Moreover, multiplying $\ensuremath{\partial}_t \omega = \nu \Delta \omega$ by $\omega$ and integrating gives \begin{align*}
\norm{\omega(t)}_{L^2}^2
+ 2 \nu \int_0^t \norm{\ensuremath{\nabla} \omega(s)}_{L^2}^2 \, ds
= \norm{\ensuremath{\nabla} \omega_0}_{L^2}^2. \end{align*} We conclude that the $L^2$-norm of $\omega$, and so the $L^p$-norms for all $p \le 2$, are bounded in time uniformly in $\nu$. (In fact, this holds for all $p \in [1, \ensuremath{\infty}]$. This conclusion is not incompatible with \refT{VorticityNotBounded}, since $\overline{u} \equiv 0$ on $\Gamma$.)
This argument for bounding the $L^p$-norms of the vorticity fails for Example 3 because the vorticity is no longer continuous in $L^2$ down to time zero unless $u_0 \in V$. It is shown in \cite{FLMT2008} (and see \cite{GKLMN14}) that such control is nonetheless obtained for the $L^1$ norm.
\section{On a result of Bardos and Titi}\label{S:BardosTiti}
\noindent Bardos and Titi in \cite{BardosTiti2013a, Bardos2014Private}, also starting from, essentially, \refE{VVArg}
make the observation that, in fact, for the vanishing viscosity limit to hold, it is necessary and sufficient that $\nu \omega$ (or, equivalently, $\nu [\ensuremath{\partial}_{\bm{n}} u]_{\BoldTau}$) converge to zero on the boundary in a weak sense. In their result, the boundary is assumed to be $C^\ensuremath{\infty}$, but the initial velocity is assumed to only lie in $H$. Hence, the sufficiency condition does not follow immediately from \refE{VVArg}.
Their proof of sufficiency involves the use of dissipative solutions to the Euler equations. (The use of dissipative solutions for the Euler equations in a domain with boundaries was initiated in \cite{BardosGolsePaillard2012}. See also \cite{BSW2014}.) We present here the weaker version of their results in 2D that can be obtained without employing dissipative solutions. The simple and elegant proof of necessity is as in \cite{Bardos2014Private}, simplified further because of the higher regularity of our initial data.
\begin{theorem}[Bardos and Titi \cite{BardosTiti2013a, Bardos2014Private}]\label{T:BardosTiti} Working in 2D, assume that $\ensuremath{\partial} \Omega$ is $C^2$ and that $\overline{u} \in C^1([0, T; C^1(\Omega))$. Then for $u \to \overline{u}$ in $L^\ensuremath{\infty}(0, T; H)$ to hold it is necessary and sufficient that \begin{align}\label{e:BardosNecCond}
\nu \int_0^T \int_\Gamma \omega \, \varphi \to 0
\text{ as } \nu \to 0
\text{ for any } \varphi \in C^1([0, T] \times \Gamma). \end{align} \end{theorem} \begin{proof}
Sufficiency of the condition follows immediately from setting $\varphi = (\overline{u} \cdot \BoldTau)|_\Gamma$ in \refT{BoundaryIffCondition}.
To prove necessity, let $\varphi \in C^1([0, T] \times \Gamma)$. We will need a divergence-free vector field $v_\delta \in C^1([0, T]; H \cap C^\ensuremath{\infty}(\Omega))$ such that $v_\delta \cdot \BoldTau = \varphi$. Moreover, we require of $v_\delta$ that it satisfy the same bounds as the boundary layer corrector of Kato in \cite{Kato1983}; in particular, \begin{align}\label{e:vBounds}
\norm{\ensuremath{\partial}_t v_\delta}_{L^1([0, T]; L^2(\Omega))}
\le C \delta^{1/2}, \qquad
\norm{\ensuremath{\nabla} v_\delta}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))}
\le C \delta^{-1/2}. \end{align} This vector field can be constructed in several ways: we detail one such construction at the end of this proof.
The proof now proceeds very simply. We multiply the Navier-Stokes equations by $v_\delta$ and integrate over space and time to obtain \begin{align}\label{e:BardosNec}
\begin{split}
\int_0^T (\ensuremath{\partial}_t &u, v_\delta)
+ \int_0^T (u \cdot \ensuremath{\nabla} u, v_\delta)
+ \nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} v_\delta) \\
&= \nu \int_0^T \int_\Gamma (\ensuremath{\nabla} u \cdot \bm{n})
\cdot v_\delta
= \nu \int_0^T \int_\Gamma \omega \, v_\delta
\cdot \BoldTau
= \nu \int_0^T \int_\Gamma \omega \, \varphi.
\end{split} \end{align} Here, we used \refE{gradunomega} with $v_\delta$ in place of $\overline{u}$, and we note that no integrations by parts were involved.
Now, assuming that the vanishing viscosity limit holds, Kato shows in \cite{Kato1983} that setting $\delta = c \nu$---and using the bounds in \refE{vBounds}---each of the terms on the left hand side of \refE{BardosNec} vanishes as $\nu \to 0$. By necessity, then, so does the right hand side, giving the necessity of the condition in \refE{BardosNecCond}.
It remains to construct $v_\delta$. To do so, we place coordinates on a tubular neighborhood, $\Sigma$, of $\Gamma$ as in the proof of \cref{L:Trace}. In $\Sigma$, define \begin{align*}
\psi(s, r) = - r \varphi(s). \end{align*} Write $\ensuremath{\widehat}{r}$, $\ensuremath{\widehat}{s}$ for the unit vectors in the directions of increasing $r$ and $s$. Then $\ensuremath{\widehat}{r} \cdot \ensuremath{\widehat}{s} = 0$ and $\ensuremath{\widehat}{r} = - \bm{n}$ on $\Gamma$. Thus, on the boundary, \begin{align*}
\ensuremath{\nabla} \psi(s, r)
= -\varphi(s) \ensuremath{\widehat}{r} -r \varphi'(s) \ensuremath{\widehat}{s}. \end{align*} This gives \begin{align*}
\ensuremath{\nabla} \psi(s) \cdot \bm{n}
= -\varphi(s) \ensuremath{\widehat}{r} \cdot \bm{n}
= \varphi(s). \end{align*} It also gives $\ensuremath{\nabla} \psi \in C^1([0, T]; C(\Sigma))$ so that $\psi \in \varphi \in C^1([0, T] \times \Sigma)$.
We now follow the procedure in \cite{Kato1983}. Let $\zeta: [0, \ensuremath{\infty}) \to [0, 1]$ be a smooth cutoff function with $\zeta \equiv 1$ on $[0, 1/2]$ and $\zeta \equiv 0$ on $[1, \ensuremath{\infty}]$. Define $\zeta_\delta(\cdot) = \zeta(\cdot/\delta)$ and \begin{align*}
v_\delta(x)
= \ensuremath{\nabla}^\perp (\zeta_\delta(\dist(x, \ensuremath{\partial} \Omega)) \psi(x)). \end{align*} Note that $v_\delta$ is supported in a boundary layer of width proportional to $\delta$. The bounds in \refE{vBounds} follow as shown in \cite{K2006Kato}. \end{proof}
To establish the necessity of the stronger condition in \refT{BardosTiti}, we used (based on Bardos's \cite{Bardos2014Private}) a vector field supported in a boundary layer of width $c \nu$, as in \cite{Kato1983}. We used it, however, to extend to the whole domain an arbitrary cutoff function defined on the boundary, rather than to correct the Eulerian velocity as in \cite{Kato1983}.
\begin{remark}
In this proof of \refT{BardosTiti}
the time regularity in the test functions could be weakened
slightly to assuming that
$\ensuremath{\partial}_t \varphi \in L^1([0, T]; C(\Gamma))$,
for this would still allow the first bound in
\refE{vBounds} to be obtained. \end{remark} \begin{remark}
Using the results of \cite{BardosTiti2013a, BSW2014} it is
possible to change the condition in \refE{BardosNecCond} to
apply to test functions $\varphi$ in
$C^1([0, T]; C^\ensuremath{\infty}(\Gamma))$ (\cite{Bardos2014Private}).
Moreover, this can be done
without assuming time or spatial regularity of the
solution to the Euler equations: only that the initial
velocity lies in $H$. \end{remark}
\Ignore{
\section{Speculation on another condition for the VV limit}
\noindent There is nothing deep about the condition in \refE{BoundaryCondition2D}, but what it says is that there are two mechanisms by which the vanishing viscosity limit can hold. First, the blowup of $\omega$ on the boundary can happen slowly enough that \begin{align}\label{e:nuL1Bound}
\nu \int_0^T \norm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \nu \to 0 \end{align} or, second, the vorticity for ($NS$) can be generated on the boundary in such a way as to oppose the sign of $\overline{u} \cdot \BoldTau$. In the second case, it could well be that vorticity for $(NS)$ blows up fast enough that \refE{nuL1Bound} does not hold, but cancellation in the integral in \refE{BoundaryCondition2D} allows that condition to hold.
A natural question to ask is whether the condition, \begin{align*}
(G) \qquad
\nu \int_0^T \norm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \nu \to 0 \end{align*} is equivalent to the conditions in \cref{T:VVEquiv}. The sufficiency of this condition follows immediately, since it implies that \refE{BoundaryCondition2D} holds.
To see why we might suspect that ($G$) is necessary for ($VV$) to hold, we start with the necessary and sufficient condition $(iii')$ of Theorem 1.2 of \cite{K2006Kato} that \begin{align*}
\nu \int_0^T \norm{\omega}_{L^2(\Gamma_\nu)}^2
\to 0
\text{ as } \nu \to 0, \end{align*} where $\Gamma_\nu = \set{x \in \Omega \colon \dist(x, \Gamma) < \nu}$. For sufficiently regular $u_\nu^0$, for all $t > 0$, $\omega(t)$ will lie in $H^2(\Omega) \supseteq C(\overline{\Omega})$, and one might expect to have \begin{align}\label{e:ApproxIntegral}
\nu \int_0^T \norm{\omega}_{L^2(\Gamma_\nu)}^2
&\cong \nu \int_0^T \int_0^\nu \norm{\omega}_{L^2(\Gamma)}^2
= \nu^2 \int_0^T \norm{\omega}_{L^2(\Gamma)}^2. \end{align} Then using \Holders inequality followed by Jensen's inequality, \begin{align}\label{e:HJBound}
\pr{\frac{\nu}{T^{3/2}} \int_0^T \norm{\omega}_{L^1(\Gamma)}}^2
\le \pr{\frac{\nu}{T} \int_0^T \norm{\omega}_{L^2(\Gamma)}}^2
\le \frac{\nu^2}{T} \int_0^T \norm{\omega}_{L^2(\Gamma)}^2. \end{align} But the left-hand side of \refE{ApproxIntegral} must vanish, and so too must the left-hand side of \refE{HJBound}, implying that $(G$) holds.
The problem with this argument, however, is that the best we can say rigorously is that from \refT{BoundaryLayerWidth} and the continuity of $\omega(t)$ for all $t > 0$, \begin{align*}
\nu \int_0^T \norm{\omega}_{L^1(\Gamma)}^2
&= \nu \int_0^T
\lim_{\delta \to 0} \frac{1}{\delta^2} \norm{\omega}_{L^1(\Gamma_{\delta})}^2
\le \nu \liminf_{\delta \to 0} \frac{1}{\delta^2}
\int_0^T \norm{\omega}_{L^1(\Gamma_{\delta})}^2 \\
&\le \nu \lim_{\delta \to 0} \frac{1}{\delta^2} \frac{C \delta}{\nu}
\le \ensuremath{\infty}, \end{align*} where in the first inequality we used Fatou's lemma.
If we could improve this inequality to show that $\nu \int_0^T \norm{\omega}_{L^1(\Gamma)}^2$ is $o(1/\nu)$, then using \Holders inequality followed by Jensen's inequality, \begin{align*}
\pr{\frac{\nu}{T} \int_0^T \norm{\omega}_{L^1(\Gamma)}}^2
\le \frac{\nu^2}{T} \int_0^T \norm{\omega}_{L^1(\Gamma)}^2
\to 0 \text{ as } \nu \to 0. \end{align*}
\Ignore{ Letting $f = \overline{\omega}$ in condition ($E_2$) of \cref{T:VVEquiv} gives \begin{align*}
(\omega, \overline{\omega})
\to \norm{\overline{\omega}}_{L^2}^2 - \int_\Gamma \overline{\omega} \, \overline{u} \cdot \BoldTau. \end{align*} But, \begin{align*}
(\omega, \overline{\omega})
&= (\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})
= - (\Delta u, \overline{u})
+ \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}, \end{align*} where we used Lemma 6.6 of \cite{K2008VVV} for scalar vorticity (in which the factor of 2 in that lemma does not appear). By Equation (4.2) of \cite{KNavier}, \begin{align*}
(\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega(u) \, \overline{u} \cdot \BoldTau. \end{align*} Thus, \begin{align*}
- \nu (\Delta u, \overline{u})
+ \nu \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau
\to \nu \norm{\overline{\omega}}_{L^2}^2 - \nu \int_\Gamma \overline{\omega} \, \overline{u} \cdot \BoldTau. \end{align*} The right-hand side vanishes with $\nu$ since $\overline{u}$ is in $C^{1 + \ensuremath{\epsilon}}$, so \begin{align*}
\nu \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau
\to - \nu (\Delta u, \overline{u}). \end{align*} It remains to show that the right-hand side vanishes with $\nu$.
Now, \begin{align*}
\nu (\Delta u, \overline{u})
= (\nu \Delta u, \overline{u})
= \nu (\ensuremath{\partial}_t u, \overline{u})
+ \nu (u \cdot \ensuremath{\nabla} u, \overline{u})
+ \nu (\ensuremath{\nabla} p, \overline{u}) \end{align*} }
\Ignore{ We make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}.
\begin{theorem} The vanishing viscosity limit holds over any finite time interval $[0, T]$ if and only if $A_\nu \to 0$ as $\nu \to 0$, where \begin{align}\label{e:Anu}
A_\nu = \nu \int_0^T \norm{\omega}_{L^1(\Gamma)}. \end{align} Moreover, \begin{align}\label{e:RateOfConvergence}
\norm{u(t) - \overline{u}(t)}_{L^2}^2
\le (C\nu + C A_\nu + \smallnorm{u_\nu^0 - \overline{u}^0}_{L^2}^2)^{1/2} e^{Ct} \end{align} for all sufficiently small $\nu > 0$, with $C$ depending only upon the initial velocities and $T$. \end{theorem} \begin{proof} Subtracting ($EE$) from ($NS$), multiplying by $w = u - \overline{u}$, and integrating over $\Omega$ leads to \begin{align*}
\frac{1}{2} \diff{}{t} &\norm{\omega}_{L^2}^2
+ \nu \norm{\ensuremath{\nabla} u}_{L^2}^2
= - (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})
- \nu \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u} \\
&= - (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ \nu(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})
- \nu \int_\Gamma \omega \, \overline{u} \cdot \BoldTau. \end{align*} Here we used Equation (4.2) of \cite{KNavier} to conclude that \begin{align*}
(\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega \, \overline{u} \cdot \BoldTau. \end{align*} Integrating over time gives \begin{align*}
\frac{1}{2} &\norm{w(t)}_{L^2}^2
+ \nu \int_0^t \norm{\ensuremath{\nabla} u}_{L^2}^2
= \norm{w(0)}_{L^2}^2
- \int_0^t (w \cdot \ensuremath{\nabla} \overline{u}, w)
+ \nu \int_0^t (\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u}) \\
&\qquad - \nu \int_0^t \int_\Gamma \omega \, \overline{u} \cdot \BoldTau. \end{align*}
Using the bounds, \begin{align*}
\abs{(w \cdot \ensuremath{\nabla} \overline{u}, w)}
&\le \norm{\ensuremath{\nabla} \overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\norm{w}_{L^2}^2
\le C \norm{w}_{L^2}^2, \\
\nu \int_0^T \abs{(\ensuremath{\nabla} u, \ensuremath{\nabla} \overline{u})}
&\le \nu \norm{\ensuremath{\nabla} \overline{u}}_{L^2([0, T] \times \Omega)}
\norm{\ensuremath{\nabla} u}_{L^2([0, T] \times \Omega)} \\
&\le C \nu
\norm{\ensuremath{\nabla} u}_{L^2([0, T] \times \Omega)} \\
&\le C \nu + \frac{\nu}{2} \norm{\ensuremath{\nabla} u}_{L^2([0, T] \times \Omega)}^2 , \\
- \nu \int_0^t \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
&\le
\nu \norm{\overline{u}}_{L^\ensuremath{\infty}} \int_0^T \norm{\omega}_{L^1(\Gamma)}
\le C \nu \int_0^T \norm{\omega}_{L^1(\Gamma)} \end{align*} gives \begin{align}\label{e:VVArg}
\begin{split}
&\norm{w(t)}_{L^2}^2
+ \nu \int_0^t \norm{\ensuremath{\nabla} u}_{L^2}^2
\le \norm{w(0)}_{L^2}^2
+ C \nu + C A_\nu + C \int_0^t \norm{w}_{L^2}^2.
\end{split} \end{align}
Applying Gronwall's inequality leads to \refE{RateOfConvergence} and shows that $A_\nu \to 0$ implies ($VV$).
\end{proof}
Let \begin{align*}
\Gamma_\delta = \set{x \in \Omega \colon \dist(x, \Gamma) < \delta}, \end{align*} where we always assume that $\delta > 0$ is sufficiently small that $\Gamma_\delta$ is a tubular neighborhood of $\Gamma$.
\begin{lemma}\label{L:BoundaryLayerWidth} For any sufficiently small $\delta > 0$ \begin{align}\label{e:OmegaL1VanishGeneral}
\norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta}))}^2
\le C \frac{\delta}{\nu} \end{align} for all sufficiently small $\delta(\nu)$. \end{lemma} \begin{proof} By the Cauchy-Schwarz inequality, \begin{align*}
\norm{\omega}_{L^1(\Gamma_{\delta})}
\le \norm{1}_{L^2(\Gamma_{\delta})} \norm{\omega}_{L^2(\Gamma_{\delta})}
\le C \delta^{1/2} \norm{\omega}_{L^2(\Gamma_{\delta})} \end{align*} so \begin{align*}
\norm{\omega}_{L^1(\Gamma_{\delta})}^2
\le C \delta \norm{\omega}_{L^2(\Gamma_{\delta})}^2 \end{align*} and \begin{align*}
\frac{C \nu}{\delta} \norm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta}))}^2
\le \nu \norm{\omega}_{L^2([0, T]; L^2(\Gamma_{\delta}))}^2. \end{align*} By the basic energy inequality for the Navier-Stokes equations, the right-hand side is bounded, giving \refE{OmegaL1VanishGeneral}. \end{proof}
\begin{theorem}\label{T:Anu}
Assume that $\Gamma$ is $C^3$.
If the vanishing viscosity limit holds then $A_\nu \to 0$ as $\nu \to 0$. \end{theorem} \begin{proof} Impose at first the extra regularity condition that $u_\nu^0$ lies in $H^3(\Omega)$, so that the $u(t)$ lies in $H^3(\Omega)$ for all $t > 0$. Then for all $t > 0$, $\omega(t)$ is in $H^2(\Omega)$ and hence $\omega(t)$ is continuous up to the boundary by Sobolev embedding. Thus,\begin{align}\label{e:BoundaryIntegralLimit}
\norm{\omega(t)}_{L^1(\Gamma)}^2
= \lim_{\delta \to 0} \frac{1}{\delta^2}
\norm{\omega(t)}_{L^1(\Gamma_{\delta})}^2. \end{align} It follows from Fatou's lemma that \begin{align*}
\nu \int_0^T &\norm{\omega(t)}_{L^1(\Gamma)}^2 \, dt
= \nu \int_0^T \lim_{\delta \to 0} \frac{1}{\delta^2}
\norm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt\\
&= \nu \int_0^T \liminf_{\delta \to 0} \frac{1}{\delta^2}
\norm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt
\le \nu \liminf_{\delta \to 0} \int_0^T
\frac{1}{\delta^2} \norm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt\\
&\le \nu \liminf_{\delta \to 0} \frac{1}{\delta^2} C \frac{\delta}{\nu}
= \liminf_{\delta \to 0} \frac{C}{\delta}. \end{align*} In the last inequality we used \refL{BoundaryLayerWidth}. \textbf{Of course, this is BAD!!!}
Using \Holders and Jensen's inequalities it follows that \begin{align*}
\pr{\frac{\nu}{T} \int_0^T \norm{\omega}_{L^1(\Gamma)}}^2
\le \frac{\nu^2}{T} \int_0^T \norm{\omega}_{L^1(\Gamma)}^2
\le \frac{C \nu}{T}, \end{align*} completing the proof. \end{proof}
\begin{remark} In higher dimensions, we could attempt the same argument using $\ensuremath{\nabla} u$ in place of $\omega$. A problem remains, though, in that we cannot conclude that $\ensuremath{\nabla} u$ has sufficient space regularity over a finite time interval independent of the viscosity so that $\omega(t)$ is continuous. Weak solutions do have sufficient regularity so that the left-hand side of \refE{BoundaryIntegralLimit} (with $\ensuremath{\nabla} u$ in place of $\omega$) makes sense, but there is no reason to suppose that equality with the right-hand side holds. \end{remark} }
}
\Ignore{
\section{An alternate derivation of Kato's conditions}\label{S:AlternateDerivation}
\noindent The argument that led to \refE{VVArg} in the proof of \refT{BoundaryIffCondition} is perhaps the first calculation that anyone who ever attempts to establish the vanishing viscosity limit makes. It is simple, direct, and natural. Because we were working in 2D it was easy to make the argument rigorous, but the essential idea is contained in the formal argument.
Kato's introduction of a boundary layer corrector, on the other hand, handles the rigorous proof of the necessity and sufficiency of his conditions in higher dimensions while at the same time striving to give the motivation for those very conditions. In this way, it obscures to some extent the nature of the argument, and appears somewhat unmotivated. That is to say, one can follow the technical details easily enough, but it is hard to see what the plan is at the outset. (Kato uses the energy inequality for the Navier-Stokes equations in a way that avoids treating $w = u - \overline{u}$ as though it were a test function for the Navier-Stokes equations. This now classical technique is clearly explained in Section 2.2 of \cite{IftimieLopeses2009}.)
We give a different derivation below, which starts with \refT{BoundaryIffCondition}. We give the formal argument, which is rigorous in two dimensions if we pay more attention to the regularity of the solutions.
\begin{theorem}
The condition in \cref{e:KellCondition} is necessary and sufficient for
($VV$) to hold. \end{theorem} \begin{proof} Let $v$ be the boundary layer velocity defined by Kato in \cite{Kato1983}, where $\delta = c \nu$: so $v$ is divergence-free, vanishes outside of $\Gamma_{c \nu}$, and $v = \overline{u}$ on $\Gamma$. (In all that follows, one can also refer to \cite{K2006Kato}, which gives Kato's argument using (almost) his same notation.) Since $v = \overline{u}$ on $\Gamma$, by \refT{BoundaryIffCondition}, and using \cref{e:gradunomega}, ($VV$) holds if and only if \begin{align*}
\nu \int_0^T \int_\Gamma (\ensuremath{\nabla} u \cdot \mathbf{n}) \cdot v
= \int_0^T (\nu \Delta u, v) + \nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} v)
\to 0 \end{align*} as $\nu \to 0$.
Using Lemma A.2 of \cite{K2006Kato}, \begin{align*}
\nu \int_0^T (\ensuremath{\nabla} u, \ensuremath{\nabla} v)
&= 2 \nu \int_0^T (\omega(u), \omega(v))
\le 2 \nu \int_0^T \norm{\omega(u)}_{L^2(\Gamma_{c \nu})} \norm{\omega(v)}_{L^2} \\
&\le \sqrt{\nu} \norm{\ensuremath{\nabla} v}_{L^2([0, T] \times \Omega)}
\sqrt{\nu} \norm{\omega(u)}_{L^2([0, T] \times \Gamma_{c \nu})} \\
&\le C \pr{\nu \int_0^T \norm{\omega(u)}_{L^2(\Gamma_{c \nu})}^2}^{1/2}, \end{align*} since $\norm{\ensuremath{\nabla} v}_{L^2([0, T] \times \Omega)} \le C \nu^{-1/2}$.
Also, \begin{align*}
\int_0^T (\nu \Delta u, v)
= \int_0^T \brac{(\ensuremath{\partial}_t u, v) + (u \cdot \ensuremath{\nabla} u, v) + (\ensuremath{\nabla} p, v) - (f, v)}. \end{align*} The integral involving the pressure disappears, while \begin{align*}
\int_0^T \abs{(f, v)}
\le C \nu^{1/2} \int_0^T \norm{f}_{L^2(\Gamma_{c \nu})}, \end{align*} using the bound on $\norm{v}_{L^\ensuremath{\infty}([0, T]; L^2)}$ in \cite{Kato1983} (Equation (3.1) of \cite{K2006Kato}). This vanishes with the viscosity since $f$ lies in $L^1([0, T]; L^2)$.
The integral involving $(u \cdot \ensuremath{\nabla} u, v)$ we bound the same way as in \cite{K2006Kato}. Using Lemma A.4 of \cite{K2006Kato},
\begingroup \allowdisplaybreaks \begin{align*}
&\abs{\int_0^t (u \cdot \ensuremath{\nabla} u, v)}
= 2 \abs{\int_0^t (v, u \cdot \omega(u))}
\\
&\qquad
\le 2 \norm{v}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\int_0^t \norm{u}_{L^2(\Gamma_{c \nu})}
\norm{\omega(u)}_{L^2(\Gamma_{c \nu})} \\
&\qquad
\le C \nu \int_0^t
\norm{\ensuremath{\nabla} u}_{L^2(\Gamma_{c \nu})}
\norm{\omega(u)}_{L^2(\Gamma_{c \nu})} \\
&\qquad
\le C \nu^{1/2}
\norm{\ensuremath{\nabla} u}_{L^2([0, T]; L^2(\Gamma_{c \nu}))}
\nu^{1/2} \norm{\omega(u)}_{L^2([0, T]; L^2(\Gamma_{c \nu}))} \\
&\qquad
\le C \pr{\nu \int_0^t
\norm{\omega(u)}_{L^2(\Gamma_{c \nu})}^2}^{1/2}. \end{align*} \endgroup
Finally, \begin{align*}
\int_0^T (\ensuremath{\partial}_t u, v)
= \int_0^T \int_\Omega \ensuremath{\partial}_t (u v) + \int_0^T (u, \ensuremath{\partial}_t v). \end{align*} As in \cite{Kato1983}, \begin{align*}
\abs{\int_0^t (u, \ensuremath{\partial}_t v)}
&\le \int_0^t \norm{u}_{L^2(\Omega)} \norm{\ensuremath{\partial}_t v}_{L^2(\Omega)}
\le C \nu^{1/2}. \end{align*} Also, \begin{align*}
\int_0^T \int_\Omega &\ensuremath{\partial}_t (u v)
= \int_0^T \diff{}{t} (u, v)
= (u(T), v(T)) - (u_\nu^0, v(0)) \\
&\le \norm{u(T)}_{L^2} \norm{v}_{L^2}
+ \smallnorm{u_\nu^0}_{L^2} \norm{v(0)}_{L^2} \\
&\le C \smallnorm{u_0}_{L^2} \norm{v}_{L^\ensuremath{\infty}([0, T]; L^2)}
\le C \sqrt{\nu}. \end{align*}
We conclude from all these inequalities that \begin{align*}
\nu \int_0^T \norm{\omega(u)}_{L^2(\Gamma_{c \nu})}^2
\to 0 \text{ as } \nu \to 0 \end{align*} is a sufficient condition for the vanishing viscosity limit to hold (as, too, is Kato's condition involving $\ensuremath{\nabla} u$ in place of $\omega(u)$). The necessity follows easily from the energy inequality.
\end{proof} }
\ifbool{IncludeNavierBCSection}{
\section{Navier boundary conditions in 2D}\label{S:NavierBCs}
\refT{VorticityNotBounded} says that if the vanishing viscosity limit holds, then there cannot be a uniform (in $\nu$) bound on the $L^2$-norm of the vorticity. This is in stark contrast to the situation in the whole space, where such a bound holds, or for Navier boundary conditions in 2D, where such a bound holds for $L^p$, $p > 2$, as shown in \cite{FLP} and \cite{CMR}. For Navier boundary conditions in 2D, then, as long as the initial vorticity is in $L^p$ for $p > 2$ there will be a uniform bound on the $L^2$-norm of the vorticity, since the domain is bounded.
In fact, for Navier boundary conditions in 2D the classical vanishing viscosity limit ($VV$)
does hold, even for much weaker regularity on the initial velocity than that considered here (see \cite{KNavier}). The argument in the proof of \refT{VorticityNotBounded} then shows that \begin{align}\label{e:VelocityGammaConvergence}
u \to \overline{u}
\text{ in } L^\ensuremath{\infty}([0, T]; L^2(\Gamma)). \end{align}
We also have weak$^*$ convergence of the vorticity in $\Cal{M}(\overline{\Omega})$, as we show in \cref{T:VorticityConvergenceNavier}.
\begin{theorem}\label{T:VorticityConvergenceNavier} Assume that the solutions to $(NS)$ are with Navier boundary conditions in 2D, and that the initial vorticity $\omega_0 = \overline{\omega}_0$ is in $L^\ensuremath{\infty}$ (slightly weaker assumptions as in \cite{KNavier} can be made). Then all of the conditions in \cref{T:VVEquiv} hold, but with the three conditions below replacing conditions $(C)$, $(E)$, and $(E_2)$, respectively: \begin{align*}
(C^N) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} \overline{u}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \Cal{M}(\overline{\Omega})^{d \times d}), \\
(E^N) & \qquad \omega \to \overline{\omega}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \Cal{M}(\overline{\Omega})^{d \times d}), \\
(E_2^N) & \qquad \omega \to \overline{\omega}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \Cal{M}(\overline{\Omega})). \\ \end{align*} \end{theorem} \begin{proof} First observe that $(E^N)$ is just a reformulation of $(E_2^N)$ with vorticity viewed as a matrix. Also, it is sufficient to prove convergences in $(H^1(\Omega))^*$, using the same argument as in the proof of \cref{C:EquivConvMeasure}, since $\omega$ is bounded in all $L^p$ spaces, including $p = $.
It is shown in \cite{KNavier} that condition $(B)$ holds, from which $(A)$ and $(A')$ follow immediately. Condition $(D)$ is weaker than $(C^N)$ and condition $(F_2)$ is weaker than conditions $(E_2^N)$, so it remains only to show that $(C^N)$ and $(E_2^N)$ hold. We show this by modifying slightly the argument in the proof of \cref{T:VVEquiv} given in \cite{K2008VVV}.
\noindent $\mathbf{(A') \implies (C^N)}$: Assume that ($A'$) holds and let $M$ be in
$(H^1(\Omega))^{d \times d}$. Then
\begin{align*}
(\ensuremath{\nabla} u, M)
&= - (u, \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot u \\
&\to -(\overline{u}, \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u}
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
The convergence follows from condition $(A')$ and \refE{VelocityGammaConvergence}.
But,
\begin{align*}
-(\overline{u}, \dv M)
= (\ensuremath{\nabla} \overline{u}, M)
- \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u},
\end{align*}
giving ($C^N$).
\noindent $\mathbf{(A') \implies (E_2^N)}$: Assume that ($A'$) holds and let $f$ be in
$H^1(\Omega)$. Then
\begin{align*}
(\omega, f)
&= - (\dv u^\perp, f)
= (u^\perp, \ensuremath{\nabla} f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f \\
&= - (u, \ensuremath{\nabla}^\perp f) + \int_\Gamma (u \cdot \BoldTau) f \\
&\to -(\overline{u}, \ensuremath{\nabla}^\perp f) + \int_\Gamma (\overline{u} \cdot \BoldTau) f
\text{ in } L^\ensuremath{\infty}([0, T])
\end{align*}
where $u^\perp = -\innp{u^2, u^1}$ and we used the identity $\omega(u) = - \dv u^\perp$
and \refE{VelocityGammaConvergence}.
But,
\begin{align*}
-(\overline{u}, &\ensuremath{\nabla}^\perp f)
= (\overline{u}^\perp, \ensuremath{\nabla} f)
= - (\dv \overline{u}^\perp, f)
+ \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f \\
&= - (\dv \overline{u}^\perp, f)
- \int_\Gamma (\overline{u} \cdot \BoldTau) f
= (\overline{\omega}, f)
- \int_\Gamma (\overline{u} \cdot \BoldTau) f,
\end{align*}
giving ($E_2^N$). \end{proof}
\begin{remark} If one could show that \refE{VelocityGammaConvergence} holds in dimension three then \refT{VorticityConvergenceNavier} would hold, with convergences in $(H^1(\Omega))^*$, in dimension three as well for initial velocities in $H^{5/2}(\Omega)$. This is because by \cite{IP2006} the vanishing viscosity limit holds for such initial velocities, and the argument in the proof of \refT{VorticityConvergenceNavier} would then carry over to three dimensions by making adaptations similar to those we made to the 2D arguments in \cite{K2008VVV}. Note that \refE{VelocityGammaConvergence} would follow, just as in 2D, from a uniform (in $\nu$) bound on the $L^p$-norm of the vorticity for some $p \ge 2$ if that could be shown to hold, though that seems unlikely. \end{remark} } { }
\Ignore{
\section{High friction limit}
\noindent Assume that $\overline{u}$ is a vector field lying in $L^\ensuremath{\infty}([0, T]; H)$ and let $u = u^\ensuremath{\alpha}$ be a vector field in $L^\ensuremath{\infty}([0, T]; H \cap H^1(\Omega)$ parameterized by $\ensuremath{\alpha}$, where $\ensuremath{\alpha} \to \ensuremath{\infty}$. This is the scenario that occurs in the high friction limit [\textbf{add references}], where $\overline{u}$ (which lies in $L^\ensuremath{\infty}([0, T]; V) \subseteq L^\ensuremath{\infty}([0, T]; H)$), a subject that we return to briefly at the end of this section.
Define the conditions \begin{align*}
(A_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A'_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C_\ensuremath{\alpha}) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} \overline{u}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D_\ensuremath{\alpha}) & \qquad \ensuremath{\nabla} u \to \ensuremath{\nabla} \overline{u} \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E_\ensuremath{\alpha}) & \qquad \omega \to \omega(\overline{u})
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(E_{2, \ensuremath{\alpha}}) & \qquad \omega \to \omega(\overline{u})
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_{2, \ensuremath{\alpha}}) & \qquad \omega \to \omega(\overline{u}) \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T], \end{align*} we have the following theorem: \begin{theorem}\label{T:MainResultal}
Assume that $u \to \overline{u}$ in $L^\ensuremath{\infty}([0, t]; L^2(\Gamma))$.
Conditions ($A_\ensuremath{\alpha}$), ($A'_\ensuremath{\alpha}$), ($C_\ensuremath{\alpha}$), ($D_\ensuremath{\alpha}$), and ($E_\ensuremath{\alpha}$) are equivalent.
In two dimensions, conditions ($E_{2, \ensuremath{\alpha}}$) and ($F_{2, \ensuremath{\alpha}}$) are equivalent to the other conditions
when $\Omega$ is simply connected.
Also, $(B_\ensuremath{\alpha})$ implies all of the other conditions. Finally, the same equivalences hold if we replace each
convergence above with the convergence of a subsequence. \end{theorem} \begin{proof} $\mathbf{(A) \iff (A')}$: Let $v$ be in $(L^2(\Omega))^d$. By Lemma 7.3 of \cite{K2008VVV}, $v = w + \ensuremath{\nabla} p$, where $w$ is in $H$ and $p$ is in $H^1(\Omega)$. Then assuming $(A)$ holds, \begin{align*}
(u(t), v)
&
= (u(t), w)
\to (\overline{u}(t), w)
= (\overline{u}(t), v)
\end{align*} uniformly over $t$ in $[0, T]$, so $(A')$ holds. The converse is immediate.
\noindent $\mathbf{(B) \implies (A)}$:
This implication is immediate.
\noindent $\mathbf{(A') \implies (C)}$: Assume that ($A'$) holds and let $M$ be in
$(H^1(\Omega))^{d \times d}$. Then
\begin{align*}
(\ensuremath{\nabla} &u(t), M)
= - (u(t), \dv M) + \int_\Gamma (M \cdot \mathbf{n}) u(t) \\
&\to -(\overline{u}(t), \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \overline{u} (t)
= (\ensuremath{\nabla} \overline{u}(t), M)
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
But,
\begin{align*}
-(\overline{u}(t), \dv M)
= (\ensuremath{\nabla} \overline{u}(t), M)
- \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u},
\end{align*}
giving ($C$).
\noindent $\mathbf{(C) \implies (D)}$: This follows simply because $H^1_0(\Omega) \subseteq H^1(\Omega)$.
\noindent $\mathbf{(D) \implies (A)}$: Assume ($D$) holds, and let $v$ be
in $H$. Then $v = \dv M$ for some $M$ in $(H^1_0(\Omega))^{d \times d}$ by
Corollary 7.5 of \cite{K2008VVV}, so
\begin{align*}
(u&(t), v)
= (u(t), \dv M)
= -(\ensuremath{\nabla} u(t), M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot u(t) \\
& \to -(\ensuremath{\nabla} \overline{u}(t), M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u}(t)
= (\overline{u}(t), \dv M)
= (\overline{u}(t), v)
\end{align*}
uniformly over $[0, T]$.
from which ($A$) follows.
Now assume that $d = 2$.
\noindent $\mathbf{(A') \implies (E_2)}$: Assume that ($A'$) holds and let $f$ be in
$H^1(\Omega)$. Then
\begin{align*}
(\omega(t), f&)
= - (\dv u^\perp(t), f)
= (u^\perp(t), \ensuremath{\nabla} f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f \\
&= - (u(t), \ensuremath{\nabla}^\perp f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f
\to -(\overline{u}(t), \ensuremath{\nabla}^\perp f) - \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f \\
&= (\overline{u}^\perp(t), \ensuremath{\nabla} f) - \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f
= - (\dv \overline{u}^\perp(t), f)
= (\overline{\omega}(t), f)
\end{align*}
in $L^\ensuremath{\infty}([0, T])$, giving ($E_2$). Here we used $u^\perp = -\innp{u^2, u^1}$ the identity,
$\omega(u) = - \dv u^\perp$, and the fact that $\ensuremath{\nabla}^\perp f$ lies in $H$.
\noindent $\mathbf{(E_2) \implies (F_2)}$: Follows for the same reason that
$(C) \implies (D)$.
\noindent $\mathbf{(F_2) \implies (A)}$: Assume ($F_2$) holds, and let $v$ be
in $H$. Then $v = \ensuremath{\nabla}^\perp f$ for some $f$ in $H^1_0(\Omega)$ ($f$ is called
the stream function for $v$), and
\begin{align*}
(u(t), &v)
= (u(t), \ensuremath{\nabla}^\perp f)
= - (u^\perp(t), \ensuremath{\nabla} f)
= (\dv u^\perp(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f \\
&= - (\omega(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f
\to - (\overline{\omega}(t), f) - \int_\Gamma (\overline{u}^\perp(t) \cdot \mathbf{n}) f \\
&= (\dv \overline{u}^\perp(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f
= - (\overline{u}^\perp(t), \ensuremath{\nabla} f)
= (\overline{u}(t), \ensuremath{\nabla}^\perp f) \\
&= (\overline{u}(t), v)
\end{align*}
in $L^\ensuremath{\infty}([0, T])$, which shows that ($A$) holds.
What we have shown so far is that ($A$), ($A'$), ($B$), ($C$), and ($D$) are equivalent, as are $(E_2)$ and $(F_2)$ in two dimensions. It remains to show that $(E)$ is equivalent to these conditions as well. We do this by establishing the implications $(C) \implies (E) \implies (A)$.
\noindent $\mathbf{(C) \implies (E)}$: Follows directly from the vorticity being the antisymmetric gradient.
\noindent $\mathbf{(E) \implies (A)}$: Let $v$ be in $H$ and let $x$ be the vector field in $(H^2(\Omega) \cap H_0^1(\Omega))^d$ solving $\Delta x = v$ on $\Omega$ ($x$ exists and is unique by standard elliptic theory). Then, utilizing Lemma 7.6 of \cite{K2008VVV} twice (and suppressing the explicit dependence of $u$ and $\overline{u}$ on $t$), \begin{align}\label{e:EImpliesAEquality}
\begin{split}
(u, v)
&= (u, \Delta x)
= - (\ensuremath{\nabla} u, \ensuremath{\nabla} x) + \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot u \\
&= -2 (\omega(u), \omega(x)) - \int_\Gamma (\ensuremath{\nabla} u x) \cdot \mathbf{n}
+ \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot u \\
&= -2 (\omega(u), \omega(x)) + \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot u \\
&\to -2(\omega(\overline{u}), \omega(x))
+ \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot \overline{u} \\
&= -(\ensuremath{\nabla} \overline{u}, \ensuremath{\nabla} x)
+ \int_\Gamma (\ensuremath{\nabla} \overline{u} x) \cdot \mathbf{n}
+ \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot \overline{u} \\
&= -(\ensuremath{\nabla} \overline{u}, \ensuremath{\nabla} x)
+ \int_\Gamma (\ensuremath{\nabla} x \cdot \mathbf{n}) \cdot \overline{u}
= (\overline{u}, \Delta x)
= (\overline{u}, v),
\end{split} \end{align} giving $(A)$. \end{proof}
In the case of the high friction limit, at least in 2D, $(B_\ensuremath{\alpha})$ holds so all of the conditions hold. This means that the vorticities and gradients converge weakly in the sense of the conditions $(C_\ensuremath{\alpha})$ through $(F_{2, \ensuremath{\alpha}})$---convergence that does not include a vortex sheet on the boundary.
}
\addtocontents{toc}{\protect
}
\appendix
\section{Some Lemmas}
\noindent \refC{TraceCor}, which we used in the proof of \refT{VorticityNotBounded}, follows from \refL{Trace}.
\begin{lemma}[Trace lemma]\label{L:Trace}
Let $p \in (1, \ensuremath{\infty})$ and $q \in [1, \ensuremath{\infty}]$ be chosen
arbitrarily, and let $q'$ be \Holder conjugate to $q$.
There exists a constant $C = C(\Omega)$
such that for all $f \in W^{1, p}(\Omega)$,
\begin{align*}
\norm{f}_{L^p(\Gamma)}
\le C \norm{f}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\norm{f}_{W^{1, q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*}
If $f \in W^{1, p}(\Omega)$ has mean zero or $f \in W^{1, p}_0(\Omega)$ then
\begin{align*}
\norm{f}_{L^p(\Gamma)}
\le C \norm{f}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\norm{\ensuremath{\nabla} f}_{L^{q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*} \end{lemma} \begin{proof} We prove this for $f \in C^\ensuremath{\infty}(\Omega)$, the result following by the density of $C^\ensuremath{\infty}(\Omega)$ in $W^{1, p}(\Omega)$. We also prove it explicitly in two dimensions, though the proof extends easily to any dimension greater than two.
Let $\Sigma$ be a tubular neighborhood of $\Gamma$ of uniform width $\delta$, where $\delta$ is half of the maximum possible width. Place coordinates $(s, r)$ on $\Sigma$ where $s$ is arc length along $\Gamma$ and $r$ is the distance of a point in $\Sigma$ from $\Gamma$, with negative distances being inside of $\Omega$. Then $r$ ranges from $-\delta$ to $\delta$, with points $(s,0)$ lying on $\Gamma$. Also, because $\Sigma$ is only half the maximum possible width, $\abs{J}$ is bounded from below, where \begin{align*}
J = \det \frac{\ensuremath{\partial}(x, y)}{\ensuremath{\partial} (s, r)} \end{align*} is the Jacobian of the transformation from $(x, y)$ coordinates to $(s, r)$ coordinates.
Let $\varphi \in C^\ensuremath{\infty}(\Omega)$ equal 1 on $\Gamma$ and equal 0 on $\Omega \setminus \Sigma$. Then if $a$ is the arc length of $\Gamma$,
\begingroup \allowdisplaybreaks \begin{align*}
\norm{f}_{L^p(\Gamma)}^p
&= \int_0^a \int_{-\delta}^0 \pdx{}{r}
\brac{(\varphi f)(s, r)}^p \, dr \, ds \\
&\le \int_0^a \int_{-\delta}^0 \abs{\pdx{}{r}
\brac{(\varphi f)(s, r)}^p} \, dr \, ds \\
&\le \int_0^a \int_{-\delta}^0 \abs{\ensuremath{\nabla}
\brac{(\varphi f)(s, r)}^p} \, dr \, ds \\
&= \pr{\inf_{\supp \varphi} \abs{J}}^{-1}
\int_0^a \int_{-\delta}^0 \abs{\ensuremath{\nabla}
\brac{(\varphi f)(s, r)}^p}
\inf_{\supp \varphi} \abs{J}
\, dr \, ds \\
&\le \pr{\inf_{\supp \varphi} \abs{J}}^{-1}
\int_0^a \int_{-\delta}^0 \abs{\ensuremath{\nabla}
\brac{(\varphi f)(s, r)}^p}
\abs{J}
\, dr \, ds \\
&= C
\int_{\Sigma \cap \Omega} \abs{\ensuremath{\nabla}
\brac{(\varphi f)(x, y)}^p}
\, dx \, dy \\
&\le C
\norm{\ensuremath{\nabla} \brac{\varphi f}^p}_{L^1(\Omega)} \\
&= C p
\norm{(\varphi f)^{p - 1}
\ensuremath{\nabla} \brac{\varphi f}}_{L^1(\Omega)} \\
&\le C p
\norm{(\varphi f)^{p - 1}}_{L^q}
\norm{\ensuremath{\nabla} \brac{\varphi f}}_{L^{q'}(\Omega)} \\
&= C p
\brac{\int_{\Omega}{(\varphi f)^{{(p - 1)} q}}}
^{\frac{1}{q}}
\norm{\ensuremath{\nabla} \brac{\varphi f}}_{L^{q'}(\Omega)} \\
&= C p
\norm{\varphi f}_{L^{(p - 1) q}(\Omega)}
^{p - 1}
\norm{\varphi \ensuremath{\nabla} f + f \ensuremath{\nabla} \varphi}
_{L^{q'}(\Omega)} \\
&\le C p
\norm{f}_{L^{(p - 1) q}(\Omega)}
^{p - 1}
\norm{f}
_{W^{1, q'}(\Omega)}. \end{align*} \endgroup The first inequality then follows from raising both sides to the $\frac{1}{p}$ power and using $p^{1/p} \le e^{1/e}$. The second inequality follows from Poincare's inequality. \end{proof}
\begin{remark}
The trace inequality in \refL{Trace} is a folklore result,
most commonly referenced in the special case where
$p = q = q' = 2$. We proved it for completeness, since we
could not find a proof (or even clear statement) in the literature.
We also note that a simple, but incorrect, proof of it
(for $p = q = q' = 2$) is
to apply the \textit{invalid} trace inequality from
$H^{\frac{1}{2}}(\Omega)$ to $L^2(\Gamma)$ then use
Sobolev interpolation.
\end{remark}
Note that in \cref{L:Trace} it could be that $(p - 1) q \in (0, 1)$, though in our application of it in \cref{S:LpNormsBlowUp}, via \cref{C:TraceCor}, we have $(p - 1) q = 2$. Also, examining the last step in the proof, we see that for $p = 1$ the lemma reduces to $\norm{f}_{L^1(\Gamma)} \le C \norm{f}_{W^{1, q'}(\Omega)}$, which is not useful.
\begin{cor}\label{C:TraceCor}
Let $p, q, q'$ be as in \cref{L:Trace}.
For any $v \in H$,
\begin{align*}
\norm{v}_{L^p(\Gamma)}
\le C \norm{v}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\norm{\ensuremath{\nabla} v}_{L^{q'}(\Omega)}
^{\frac{1}{p}}
\end{align*}
and for any $v \in V \cap H^2(\Omega)$,
\begin{align*}
\norm{\curl v}_{L^p(\Gamma)}
\le C \norm{\curl v}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\norm{\ensuremath{\nabla} \curl v}_{L^{q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*} \end{cor} \begin{proof}
If $v \in H$, then
\begin{align*}
\int_\Omega v^i
& = \int_\Omega v \cdot \ensuremath{\nabla} x_i
= - \int_\Omega \dv v \, x_i
+ \int_\Gamma (v \cdot \bm{n}) x_i
= 0.
\end{align*}
If $v \in V$ then
\begin{align*}
\int_\Omega \curl v
&= - \int_\Omega \dv v^\perp
= - \int_{\ensuremath{\partial} \Omega} v^\perp \cdot \bm{n}
= 0.
\end{align*}
Thus, \refL{Trace} can be applied to $v_1, v_2$, and $\curl v$, giving the result. \end{proof}
} {
}
\end{document}
|
arXiv
|
{
"id": "1409.7716.tex",
"language_detection_score": 0.654859185218811,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\begin{abstract} We propose the notion of GAS numerical semigroup which generalizes both almost symmetric and 2-AGL numerical semigroups. Moreover, we introduce the concept of almost canonical ideal which generalizes the notion of canonical ideal in the same way almost symmetric numerical semigroups generalize symmetric ones. We prove that a numerical semigroup with maximal ideal $M$ and multiplicity $e$ is GAS if and only if $M-e$ is an almost canonical ideal of $M-M$. This generalizes a result of Barucci about almost symmetric semigroups and a theorem of Chau, Goto, Kumashiro, and Matsuoka about 2-AGL semigroups. We also study the transfer of the GAS property from a numerical semigroup to its gluing, numerical duplication and dilatation. \end{abstract}
\keywords{Almost symmetric numerical semigroup, almost Gorenstein ring, 2-AGL semigroup, 2-AGL ring, canonical ideal.}
\title{Almost canonical ideals and GAS numerical semigroups}
\section*{Introduction}
The notion of Gorenstein ring turned out to have great importance in commutative algebra, algebraic geometry and other mathematics areas and in the last decades many researchers have developed generalizations of this concept obtaining rings with similar properties in certain respects. With this aim, in 1997 Barucci and Fr\"oberg \cite{BF} introduced the notion of almost Gorenstein ring, inspired by numerical semigroup theory. We recall that a numerical semigroup $S$ is simply an additive submonoid of the set of the natural numbers $\mathbb{N}$ with finite complement in $\mathbb{N}$. The simplest way to relate it to ring theory is by associating with $S$ the ring $k[[S]]=k[[t^s \mid s \in S]]$, where $k$ is a field and $t$ is an indeterminate. Actually it is possible to associate a numerical semigroup $v(R)$ with every one-dimensional analytically irreducible ring $R$. In this case a celebrated result of Kunz \cite{K} ensures that $R$ is Gorenstein if and only if $v(R)$ is a symmetric semigroup, see also \cite[Theorem 4.4.8]{BH} for a proof in the particular case of $k[[S]]$. In \cite{BF} the notions of almost symmetric numerical semigroup and almost Gorenstein ring are introduced, where the latter is limited to analytically unramified rings. It turns out that $k[[S]]$ is almost Gorenstein if and only if $S$ is almost symmetric.
More recently this notion has been generalized in the case of one-dimensional local ring \cite{GMP} and in higher dimension \cite{GTT}. Moreover, in \cite{CGKM} it is introduced the notion of $n$-AGL ring in order to stratify the Cohen-Macaulay rings. Indeed a ring is almost Gorenstein if and only if it is either $1$-AGL or $0$-AGL, with $0$-AGL equivalent to be Gorenstein. In this respect $2$-AGL rings are near to be almost Gorenstein and for this reason their properties have been deepened in \cite{CGKM,GIT}. In \cite{CGKM} it is also studied the numerical semigroup case, where $2$-AGL numerical semigroups are close to be almost symmetric.
In this paper we introduce the class of {\em Generalized Almost Symmetric numerical semigroups}, briefly GAS numerical semigroups, that includes symmetric, almost symmetric and 2-AGL numerical semigroups, but not 3-AGL. Moreover, if $S$ has maximal embedding dimension and it is GAS, then it is either almost symmetric or 2-AGL. Our original motivation to introduce this class is a result on 2-AGL numerical semigroups that partially generalize a property of almost symmetric semigroups. More precisely, let $S$ be a numerical semigroup with multiplicity $e$ and let $M$ be its maximal ideal. In \cite[Corollary 8]{BF} it is proved that $M-M$ is symmetric if and only if $S$ is almost symmetric with maximal embedding dimension. If we do not assume that $S$ has maximal embedding dimension, it holds that $S$ is almost symmetric if and only if $M-e$ is a canonical ideal of $M-M$ (indeed $S$ has maximal embedding dimension exactly when $M-e=M-M$, see \cite[Theorem 5.2]{B}). In \cite[Corollary 5.4]{CGKM} it is shown that $S$ is 2-AGL if and only if $M-M$ is almost symmetric and not symmetric, provided that $S$ has maximal embedding dimension.
Hence, it is natural to investigate what happens to $M-M$, for a 2-AGL semigroup, if we do not make any assumptions on its embedding dimension. It turns out that $M-e$ is an ideal of $M-M$ that satisfies some equivalent conditions, that are the analogue for ideals to the defining conditions of almost symmetric semigroup (cf. Definition 2.1 and Proposition \ref{almost canonical ideal}); for this reason we called the ideals in this class \emph{almost canonical ideals}. However the converse is not true: there exist numerical semigroups $S$ such that $M-e$ is an almost canonical ideal of $M-M$, but that are not 2-AGL. This fact lead us to look for those numerical semigroup satisfying this property, and we found that these semigroups naturally generalize 2-AGL semigroups (this is evident if we look at $2K\setminus K$, where $K$ is the canonical ideal of $S$, cf. Proposition 3.1 and Definition 3.2); moreover, as we said above this class coincides with the union of 2-AGL and almost symmetric semigroups, if we assume maximal embedding dimension; hence we called them Generalized Almost Symmetric (briefly GAS). It turns out that GAS semigroups are interesting under many aspects; for example, if $S$ is GAS, it is possible to control both the semigroup generated by its canonical ideal (that plays a fundamental role in \cite{CGKM}; cf. Theorem \ref{Livelli più alti}) and its pseudo-Frobenius numbers (cf. Proposition \ref{PF GAS}).
Hence, in this paper, after recalling the basic definitions and notations, we introduce, in Section 2, the concept of almost canonical ideal. We show under which respect they are a generalization of canonical ideals and we notice that, similarly to the canonical case, a numerical semigroup $S$ is almost symmetric if and only if it is an almost canonical ideal of itself. Moreover, we prove several equivalent conditions for a semigroup ideal to be almost canonical (cf. Proposition \ref{almost canonical ideal}) and we show how to find all the almost canonical ideals of a numerical semigroup and to count them (Corollary \ref{Number of almost canonical ideals}).
In Section 3 we develop the theory of GAS semigroups proving many equivalent conditions (see Proposition \ref{Characterizations GAS}), exploring their properties (cf. Theorem \ref{Livelli più alti} and Proposition \ref{PF GAS}) and relating them with other classes of numerical semigroups that have been recently introduced to generalize almost symmetric semigroups. The main result is Theorem \ref{T. Almost Canonical ideal of M-M}, where it is proved that $S$ is GAS if and only if $M-e$ is an almost canonical ideal of $M-M$.
Finally in Section 4 we study the transfer of the GAS property from $S$ to some numerical semigroup constructions: gluing in Theorem \ref{gluing}, numerical duplication in Theorem \ref{Numerical duplication S-<K>} and dilatation in Proposition \ref{dilatation}.
Several computations are performed by using the GAP system \cite{GAP} and, in particular, the NumericalSgps package \cite{DGM}.
\section{Notation and basic definitions}
A numerical semigroup $S$ is a submonoid of the natural numbers $\mathbb{N}$ such that $|\mathbb{N} \setminus S| < \infty$. Therefore, there exists the maximum of $\mathbb{N} \setminus S$ that is said to be the Frobenius number of $S$ and it is denoted by $\F(S)$. Given $s_1, \dots, s_{\nu} \in \mathbb{N}$ we set $\langle s_1, \dots, s_{\nu} \rangle=\{\lambda_1 s_1 + \dots + \lambda_{\nu} s_{\nu} \mid \lambda_1, \dots, \lambda_{\nu} \in \mathbb{N} \}$ which is a numerical semigroup if and only if $\gcd(s_1, \dots, s_{\nu})=1$. We say that $s_1, \dots, s_{\nu}$ are minimal generators of $\langle s_1, \dots, s_{\nu} \rangle$ if it is not possible to delete one of them obtaining the same semigroup. It is well-known that a numerical semigroup have a unique system of minimal generators, which is finite, and its cardinality is called embedding dimension of $S$. The minimum non-zero element of $S$ is said to be the multiplicity of $S$ and we denote it by $e$. It is always greater than or equal to the embedding dimension of $S$ and we say that $S$ has maximal embedding dimension if they are equal. Unless otherwise specified, we assume that $S \neq \mathbb{N}$.
A set $I \subseteq \mathbb{Z}$ is said to be a relative ideal of $S$ if $I+S\subseteq I$ and there exists $z \in S$ such that $z+I \subseteq S$. If it is possible to chose $z=0$, i.e. $I \subseteq S$, we simply say that $I$ is an ideal of $S$. Two very important relative ideals are $M(S)=S\setminus \{0\}$, which is an ideal and it is called the maximal ideal of $S$, and $K(S)=\{x \in \mathbb{N} \mid \F(S)-x \notin S\}$. We refer to the latter as the standard canonical ideal of $S$ and we say that a relative ideal $I$ of $S$ is canonical if $I=x+K(S)$ for some $x \in \mathbb{Z}$. If the semigroup is clear from the context, we write $M$ and $K$ in place of $M(S)$ and $K(S)$. Given two relative ideals $I$ and $J$ of $S$, we set $I-J = \{x \in \mathbb{Z} \mid x+J \subseteq I\}$ which is a relative ideal of $S$. For every relative ideal $I$ it holds that $K-(K-I)=I$, in particular $K-(K-S)=S$. Moreover, an element $x$ is in $I$ if and only if $\F(S)-x \notin K-I$, see \cite[Hilfssatz 5]{J}. As a consequence we get that the cardinalities of $I$ and $K-I$ are equal.
Also, if $I \subseteq J$ are two relative ideals, then $|J \setminus I|=|(K-I)\setminus (K-J)|$. We now collect some important definitions that we are going to generalize in the next section.
\begin{definition} \label{Basic definitions} \rm Let $S$ be a numerical semigroup. \begin{enumerate} \item The {\it pseudo-Frobenius numbers} of $S$ are the elements of the set $\PF(S)=(S-M)\setminus S$.
\item The {\it type} of $S$ is $t(S)=|\PF(S)|$. \item $S$ is {\it symmetric} if and only if $S=K$. \item $S$ is {\it almost symmetric} if and only if $S-M=K \cup \{\F(S)\}$. \end{enumerate} \end{definition}
We note that $M-M=S \cup \PF(S)$. Given $0 \leq i \leq e-1$, let $\omega_i$ be the smallest element of $S$ that is congruent to $i$ modulo $e$. A fundamental tool in numerical semigroup theory is the so-called Ap\'ery set of $S$ that is defined as $\Ap(S)=\{\omega_0=0, \omega_1, \dots, \omega_{e-1}\}$. In $\Ap(S)$ we define the partial ordering $x \leq_S y$ if and only if $y= x+s$ for some $s \in S$ and we denote the maximal elements of $\Ap(S)$ with respect to $\leq_S$ by ${\rm Max}_{\leq_S}(\Ap(S))$. With this notation $\PF(S)=\{\omega -e \mid \omega \in {\rm Max}_{\leq S}(\Ap(S)) \}$, see \cite[Proposition 2.20]{RG}. We also recall that $S$ is symmetric if and only if $t(S)=1$, that is also equivalent to say that $k[[S]]$ has type $1$ for every field $k$, i.e. $k[[S]]$ is Gorenstein. Also for almost symmetric semigroups many useful characterizations are known, for instance it is easy to see that our definition is equivalent to $M+K \subseteq M$, but see also \cite[Theorem 2.4]{N} for another useful characterization related to the Ap\'ery set of $S$ and its pseudo-Frobenius numbers.
\section{Almost canonical ideals of a numerical semigroup}
If $I$ is a relative ideal of $S$, the set $\mathbb{Z}\setminus I$ has a maximum that we denote by $\F(I)$. We set $\widetilde{I}=I+(\F(S)-\F(I))$, that is the unique relative ideal $J$ isomorphic to $I$ for which $\F(S)=\F(J)$, and we note that $\widetilde{I} \subseteq K \subseteq \mathbb{N}$ for every $I$. The following is a generalization of Definition \ref{Basic definitions}.
\begin{definition} \rm Let $I$ be a relative ideal of a numerical semigroup $S$. \begin{enumerate} \item The {\it pseudo-Frobenius numbers} of $I$ are the elements of the set $\PF(I)=(I-M)\setminus I$.
\item The {\it type} of $I$ is $t(I)=|\PF(I)|$. \item $I$ is {\it canonical} if and only if $\widetilde{I}=K$. \item $I$ is {\it almost canonical} if and only if $\widetilde{I}-M=K \cup \{\F(S)\}$. \end{enumerate} \end{definition}
\begin{remark} \rm \label{Rem as} {\bf 1.} $S$ is an almost canonical ideal of itself if and only if it is an almost symmetric semigroup. \\ {\bf 2.} $M$ is an almost canonical ideal of $S$ if and only if $S$ is an almost symmetric semigroup. Indeed, $M-M=S-M$, since $S \neq \mathbb{N}$. Moreover, $t(M)=t(S)+1$. \\ {\bf 3.} It holds that $K-M=K \cup \{\F(S)\}$. One containment is trivial, so let $x \in ((K-M) \setminus (K \cup \{\F(S)\}))$. Then $0 \neq \F(S)-x \in S$ and, thus, $\F(S)=(\F(S)-x)+x \in M+ (K-M) \subseteq K$ yields a contradiction. In particular, a canonical ideal is almost canonical. \\ {\bf 4.} Since $\F(S)=\F(\widetilde{I})$, it is always in $\widetilde{I}-M$. Moreover, we claim that $(\widetilde{I}-M) \subseteq K \cup \{\F(S)\}$. Indeed, if $x \in (\widetilde{I}-M)\setminus\{\F(S)\}$ and $x \notin K$, then $\F(S)-x \in M$ and $\F(\widetilde{I})=\F(S)=(\F(S)-x)+x \in \widetilde{I}$. In addition, $\widetilde{I}$ is always contained in $\widetilde{I}-M$ because it is a relative ideal of $S$. Hence, $I$ is an almost canonical ideal of $S$ if and only if $K \setminus \widetilde{I} \subseteq (\widetilde{I}-M)$. \end{remark}
Given a relative ideal $I$ of $S$, the Ap\'ery set of $I$ is $\Ap(I)=\{i \in I \mid i-e \notin I\}$. As in the semigroup case, in $\Ap(I)$ we define the partial ordering $x \leq_S y$ if and only if $y= x+s$ for some $s \in S$ and we denote by ${\rm Max}_{\leq_S}(\Ap(I))$ the maximal elements of $\Ap(I)$ with respect to $\leq_S$.
\begin{proposition} Let $I$ be a relative ideal of $S$. The following statements hold: \begin{enumerate} \item $\PF(I)= \{i-e \mid i \in {\rm Max}_{\leq_S}(\Ap(I)) \}$; \item $I$ is canonical if and only if its type is $1$. \end{enumerate} \end{proposition}
\begin{proof} (1) An integer $i \in I$ is in ${\rm Max}_{\leq_S}(\Ap(I))$ if and only if $i-e \notin I$ and $s+i \notin \Ap(I)$, i.e. $s+i-e \in I$, for every $s \in M$. This is equivalent to say that $i-e \in (I-M)\setminus I=\PF(I)$. \\ (2) Since $\F(S)\in \widetilde I-M$, we have $t(\widetilde I)=t(I)=1$ if and only if $\widetilde{I}-M=\widetilde I \cup \{\F(S)\}$. Therefore, a canonical ideal has type 1 by Remark \ref{Rem as}.3. Conversely, assume that $t(\widetilde{I})=1$ and let $x \notin \widetilde{I}$. Since $\widetilde{I} \subseteq K$, we only need to prove that $x \notin K$. By (1), there is a unique maximal element in $\Ap(\widetilde{I})$ with respect to $\leq_S$ and, clearly, it is $\F(S)+e$. Let $0 \neq \lambda \in \mathbb{N}$ be such that $x+ \lambda e \in \Ap(\widetilde{I})$. Then, there exists $y \in S$ such that $x+\lambda e + y = \F(S)+e$ and $x=\F(S)-(y+(\lambda-1)e) \notin K$, since $y+(\lambda-1)e \in S$. \end{proof}
Let $g(S)=|\mathbb{N}\setminus S|$ denote the genus of $S$ and let $g(I)=|\mathbb{N}\setminus \widetilde{I}|$. We recall that $2g(S) \geq \F(S) + t(S)$ and the equality holds if and only if $S$ is almost symmetric, see, e.g., \cite[Proposition 2.2 and Proposition-Definition 2.3]{N}.
\begin{proposition} \label{almost canonical ideal} Let $I$ be a relative ideal of $S$. Then $g(I)+g(S) \geq \F(S) + t(I)$. Moreover, the following conditions are equivalent: \begin{enumerate} \item $I$ is almost canonical; \item $g(I)+g(S)=\F(S)+t(I)$; \item $\widetilde{I}-M=K-M$; \item $K-(M-M) \subseteq \widetilde{I}$; \item If $x \in \PF(I)\setminus \{\F(I)\}$, then $\F(I)-x \in \PF(S)$. \end{enumerate} \end{proposition}
\begin{proof}
Clearly, $t(I)=t(\widetilde{I})$ and $g(I)-t(\widetilde{I})=|\mathbb{N} \setminus \widetilde{I}|-|(\widetilde{I}-M)\setminus \widetilde{I}|=|\mathbb{N}\setminus (\widetilde{I}-M)|$. Moreover, since $\F(S)+1-g(S)$ is the number of the elements of $S$ smaller than $\F(S)+1$, it holds that $\F(S)-g(S)=|\mathbb{N}\setminus K|-1=|\mathbb{N} \setminus (K \cup {\F(S)})|$. We have $\widetilde{I}-M \subseteq K \cup \{\F(S)\}$ by Remark \ref{Rem as}.4, then $g(I)-t(I) \geq \F(S) -g(S)$ and the equality holds if and only if $\widetilde{I}-M = K \cup \{\F(S)\}$, i.e. $I$ is almost canonical. Hence, (1) $\Leftrightarrow$ (2). \\ (1) $\Leftrightarrow$ (3). We have already proved that $K-M=K \cup \{\F(S)\}$ in Remark \ref{Rem as}.3. \\
(1) $\Rightarrow$ (4). The thesis is equivalent to $M-M \supseteq K-\widetilde{I}$. Let $x \in K-\widetilde{I}$ and assume by contradiction that there exists $m \in M$ such that $x+m \notin M$. Then, $\F(S)-x-m \in K \cup \{\F(S)\}=\widetilde{I}-M$ and, so, $\F(S)-x \in \widetilde{I}$. Since $x \in K-\widetilde{I}$, this implies $\F(S) \in K$, that is a contradiction. \\ (4) $\Rightarrow$ (1). Let $x \in K$. It is enough to prove that $x \in \widetilde{I}-M$. Suppose by contradiction that there exists $m \in M$ such that $x+m \notin \widetilde{I}\supseteq K-(M-M)$. In particular, $x+m\notin K-(M-M)$ and so $\F(S)-(x+m) \in M-M$. This implies $\F(S)-x \in M$, that is a contradiction because $x \in K$. \\ (1) $\Rightarrow$ (5) We notice that $\PF(\widetilde{I})=\{x+\F(S)-\F(I) \mid x \in \PF(I)\}$. Let $x \in \PF(I)\setminus \{\F(I)\}$ and let $y=x+\F(S)-\F(I) \in \PF(\widetilde I) \setminus \{ \F(S)\}$. We first note that $\F(S)-y \notin S$, otherwise $\F(S)=y+(\F(S)-y) \in \widetilde I$. Assume by contradiction that $\F(S)-y \notin \PF(S)$, i.e. there exists $m \in M$ such that $\F(S)-y+m \notin S$. This implies that $y-m \in K \subseteq \widetilde{I}-M$ by (1) and, thus, $y=(y-m)+m \in \widetilde I$ yields a contradiction. Hence, $\F(I)-x=\F(S)-y \in \PF(S)$. \\ (5) $\Rightarrow$ (4) Assume by contradiction that there exists $x \in (K-(M-M))\setminus \widetilde I$. It easily follows from the definition that there is $s \in S$ such that $x+s \in \PF(\widetilde{I})$. Then, $\F(S)-x-s \in \PF(S) \cup \{0\} \subseteq M-M$ by (5) and $\F(S)-s=x +(\F(S)-x-s) \in (K-(M-M)) + (M-M) \subseteq K$ gives a contradiction. \end{proof}
\begin{remark} \rm {\bf 1.} In \cite[Theorem 2.4]{N} it is proved that a numerical semigroup $S$ is almost symmetric if and only if $\F(S)-f \in \PF(S)$ for every $f \in \PF(S) \setminus \{\F(S)\}$. Hence, the last condition of Proposition \ref{almost canonical ideal} can be considered a generalization of this result. \\ {\bf 2.} Almost canonical ideals naturally arise characterizing the almost symmetry of the numerical duplication $S \! \Join^b \! I$ of $S$ with respect to the ideal $I$ and $b \in S$, a construction introduced in \cite{DS}. Indeed \cite[Theorem 4.3]{DS} says that $S \! \Join^b \! I$ is almost symmetric if and only if $I$ is almost canonical and $K-\widetilde{I}$ is a numerical semigroup. \\ {\bf 3.} Let $T$ be an almost symmetric numerical semigroup with odd Frobenius number (or, equivalently, odd type). Let $b$ be an odd integer such that $2b \in T$ and set $I=\{x \in \mathbb{Z} \mid 2x+b \in T\}$. Then, \cite[Proposition 3.3]{S} says that $T$ can be realized as a numerical duplication $T=S \! \Join^b \! I$, where $S=T/2=\{y \in \mathbb{Z} \mid 2y \in T\}$, while \cite[Theorem 3.7]{S} implies that $I$ is an almost canonical ideal of $S$. In general this is not true if the Frobenius number of $T$ is even. \end{remark}
Since $\F(K-(M-M))= \F(\widetilde{I})=\F(K)$ and $\widetilde{I} \subseteq K$ for every relative ideal $I$, Condition (4) of Proposition \ref{almost canonical ideal} allows to find all the almost canonical ideals of a numerical semigroup. Clearly it is enough to focus on the relative ideals with Frobenius number $\F(S)$.
\begin{corollary} \label{Number of almost canonical ideals} Let $S$ be a numerical semigroup with type $t$. If $I$ is almost canonical, then $t(I)\leq t+1$. Moreover, for every integer $i$ such that $1 \leq i \leq t+1$, there are exactly $\binom{t}{i-1}$ almost canonical ideals of $S$ with Frobenius number $\F(S)$ and type $i$. In particular, there are exactly $2^{t}$ almost canonical ideals of $S$ with Frobenius number $\F(S)$. \end{corollary}
\begin{proof}
Let $C=\{s \in S \mid s>\F(S)\}=K-\mathbb{N}$ be the conductor of $S$ and let $n(S)=|\{s \in S \mid s<\F(S)\}|$. It is straightforward to see that $g(S)+n(S)=\F(S)+1$. If $I$ is almost canonical, Proposition \ref{almost canonical ideal} implies that \begin{align*}
t(I)&=g(I)+g(S)-\F(S)\leq |\mathbb{N}\setminus (K-(M-M))|-n(S)+1= \\
&=|(M-M)\setminus (K-\mathbb{N})|-n(S)+1
=|(M-M)\setminus C|-n(S)+1=\\
&=|(M-M)\setminus S|+|S \setminus C|-n(S)+1=t+n(S)-n(S)+1=t+1. \end{align*}
By Proposition \ref{almost canonical ideal} an ideal $I$ with $\F(I)=\F(S)$ is almost canonical if and only if $K-(M-M) \subseteq I \subseteq K$ and we notice that $|K \setminus (K-(M-M))|=|(M-M)\setminus S|=t$. Let $A \subseteq (K \setminus (K-(M-M)))$ and consider $I=(K-(M-M)) \cup A$. We claim that $I$ is an ideal of $S$. Indeed, let $x \in A$, $m \in M$ and $y \in (M-M)$. It follows that $m+y \in M$ and, then, $x+m+y \in K$, since $K$ is an ideal. Therefore, $x+m \in K-(M-M)$ and $I$ is an ideal of $S$. Moreover, by \cite[Lemma 4.7]{DS}, $t(I)=|(K-I)\setminus S|+1=|K\setminus I|+1=t+1-|A|$ and the thesis follows, because there are $\binom{t}{i-1}$ subsets of $K\setminus (K-(M-M))$ with cardinality $t+1-i$. \end{proof}
If $S$ is a symmetric semigroup, the only almost canonical ideals with Frobenius number $\F(S)$ are $M$ and $S$. In this case $t(M)=t(S)+1=2$. If $S$ is pseudo-symmetric, the four almost canonical ideals with Frobenius number $\F(S)$ are $M$, $S$, $M \cup \{\F(S)/2\}$ and $K$. In this case $t(M)=3$, $t(S)=t(M \cup \{\F(S)/2\})=2$ and $t(K)=1$.
\section{GAS numerical semigroups}
In \cite{CGKM} it is introduced the notion of $n$-almost Gorenstein local rings, briefly $n$-AGL rings, where $n$ is a non-negative integer. These rings generalize almost Gorenstein ones that are obtained when either $n=0$, in which case the ring is Gorenstein, or $n=1$. In particular, in \cite{CGKM} it is studied the case of the 2-AGL rings, that are closer to be almost Gorenstein, see also \cite{GIT}.
Given a numerical semigroup $S$ with standard canonical ideal $K$ we denote by $\langle K \rangle$ the numerical semigroup generated by $K$. Following \cite{CGKM} we say that $S$ is $n$-AGL if $|\langle K \rangle \setminus K|=n$. It follows that $S$ is symmetric if and only if it is 0-AGL, whereas it is almost symmetric and not symmetric if and only if it is 1-AGL.
It is easy to see that a numerical semigroup is 2-AGL if and only if $2K=3K$ and $|2K\setminus K|=2$, see \cite[Theorem 1.4]{CGKM} for a proof in a more general context. We now give another easy characterization that will lead us to generalize this class.
\begin{proposition} A numerical semigroup $S$ is 2-AGL if and only if $2K=3K$ and $2K \setminus K=\{\F(S)-x, \F(S)\}$ for a minimal generator $x$ of $S$. \end{proposition}
\begin{proof} One implication is trivial, so assume that $S$ is 2-AGL. Since $S$ is not symmetric, there exists $k \in \mathbb{N}$ such that $k$ and $\F(S)-k$ are in $K$ and so $\F(S) \in 2K \setminus K$. Let now $a \in (2K \setminus K) \setminus \{\F(S)\}$. Since $a \notin K$, we have $\F(S)-a \in S$. Assume that $\F(S)-a=s_1+s_2$ with $s_1,s_2 \in S \setminus \{0\}$. It follows that $\F(S)-s_1=a+s_2 \in 2K$, since $2K$ is a relative ideal, and by definition $\F(S)-s_1 \notin K$. Therefore, $\{a,\F(S)-s_1,\F(S)\} \subseteq 2K \setminus K$ and this is a contradiction, since $S$ is 2-AGL. Hence, $a=\F(S)-x$, where $x$ is a minimal generator of $S$. \end{proof}
In light of the previous proposition we propose the following definition.
\begin{definition} \rm We say that $S$ is a {\it generalized almost symmetric} numerical semigroup, briefly {\rm GAS} numerical semigroup, if either $2K=K$ or $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ for some $r \geq 0$ and some minimal generators $x_1, \dots, x_r$ of $S$ such that $x_i-x_j \notin \PF(S)$ for every $i,j$. \end{definition}
The last condition could seem less natural, but these semigroups have a better behaviour. For instance, in Theorem \ref{Livelli più alti} we will see that this condition ensures that every element in $ \langle K \rangle \setminus K$ can be written as $\F(S)-x$ for a minimal generator $x$ of $S$.
We recall that $S$ is symmetric if and only if $2K=K$ and it is almost symmetric exactly when $2K \setminus K \subseteq \{\F(S)\}$.
\begin{examples} \rm {\bf 1.} Let $S= \langle 9, 24, 39, 43, 77 \rangle$. Then, $\PF(S)=\{58, 73, 92, 107\}$ and $2K \setminus K=\{107-77,107-43,107-39,107-24,107-9,107\}$. Hence, $S$ is a GAS semigroup. \\ {\bf 2.} If $S=\langle 7,9,15 \rangle$, we have $2K=3K$ and $2K \setminus K=\{26-14,26-7,26\}$. Hence, $S$ is 3-AGL but it is not GAS because $14$ is not a minimal generator of $S$. \\ {\bf 3.} Consider the semigroup $S=\langle 8, 11, 14, 15, 17, 18, 20, 21 \rangle$. We have $2K \setminus K=\{13-11,13-8,13\}$, but $S$ is not GAS because $11-8 \in \PF(S)$. In this case $2K=3K$ and thus $S$ is 3-AGL. \end{examples}
The last example shows that in a numerical semigroup $S$ with maximal embedding dimension there could be many minimal generators $x$ such that $\F(S) -x \in 2K\setminus K$. This is not the case if we assume that $S$ is GAS.
\begin{proposition} \label{MED} If $S$ has maximal embedding dimension $e$ and it is {\rm GAS}, then it is either almost symmetric or {\rm 2-AGL} with $2K\setminus K=\{\F(S)-e,\F(S)\}$. \end{proposition}
\begin{proof} Assume that $S$ is not almost symmetric and let $\F(S)-x=k_1+k_2 \in 2K\setminus K$ with $x\neq 0$ and $k_1,k_2 \in K$. Let $x\neq e$ and consider $\F(S)-e=k_1+k_2+x-e$. Since $x-e \leq \F(S)-e < \F(S)$ and $S$ has maximal embedding dimension, $x-e \in \PF(S) \setminus \{\F(S)\} \subseteq K$ and, therefore, $\F(S)-e \in 3K \setminus K$. Moreover, $\F(S)-e$ cannot be in $2K$, because $S$ is GAS and $x-e \in \PF(S)$, then, $k_1+x-e \in 2K \setminus K$. Hence, we have $\F(S)-(\F(S)-k_1-x+e) \in 2K\setminus K$ and, thus, $\F(S)-k_1-x+e$ is a minimal generator of $S$. Since $S$ has maximal embedding dimension, this implies that $\F(S)-k_1-x \in \PF(S)$ and, then, $\F(S)-k_1 \in S$ yields a contradiction, since $k_1 \in K$. This means that $x=e$ and $2K\setminus K=\{\F(S)-e,\F(S)\}$.
Suppose by contradiction that $2K \neq 3K$ and let $\F(S)-y \in 3K \setminus 2K$. In particular, $\F(S)-y \notin K$ and, therefore, $y \in S$. If $\F(S)-y =k_1+k_2+ k_3$ with $k_i \in K$ for every $i$, then $k_1+k_2 \in 2K \setminus K$ and, thus, $k_1+k_2=\F(S)-e$. This implies that $\F(S)-e<\F(S)-y$, i.e. $y<e$, that is a contradiction. \end{proof}
In particular, we note that in a 2-AGL semigroup with maximal embedding dimension it always holds that $2K \setminus K=\{\F(S)-e, \F(S)\}$.
\begin{proposition} \label{Characterizations GAS} Given a numerical semigroup $S$, the following conditions are equivalent: \begin{enumerate} \item $S$ is {\rm GAS}; \item $x-y \notin (M-M)$ for every different $x,y \in M\setminus (S-K)$; \item either $S$ is symmetric or $2M \subseteq S-K \subseteq M$ and $M-M=((S-K)-M) \cup \{0\}$. \end{enumerate} \end{proposition}
\begin{proof} If $S$ is symmetric, then $M \subseteq S-K$ and both (1) and (2) are true, so we assume $S \neq K$. \\ $(1) \Rightarrow (2)$ Note that $K-S=K$ and $K-(S-K)=K-((K-K)-K)=K-(K-2K)=2K$. Thus, $x \in S \setminus (S-K)$ if and only if $\F(S)-x \in (K-(S-K))\setminus (K-S)=2K \setminus K$. Hence, if $S$ is GAS, then $x-y \notin S \cup \PF(S)=M-M$ for every $x,y \in M\setminus (S-K)$. \\ $(2) \Rightarrow (1)$ If $x$, $y \in M \setminus (S-K)$, then $\F(S)-x$, $\F(S)-y \in 2K \setminus K$ and $x-y \notin \PF(S)$, since it is not in $M-M$. We only need to show that $x$ is a minimal generator of $S$. If by contradiction $x=s_1+s_2$, with $s_1$, $s_2 \in M$, it follows that also $s_1$ is in $M \setminus (S-K)$. Therefore, $s_2=x-s_1 \in M$ yields a contradiction since $x-s_1 \notin M-M$ by hypothesis. \\ $(2) \Rightarrow (3)$ Since $S$ is not symmetric, $S-K$ is contained in $M$. Moreover, if $2M$ is not in $S-K$, then there exist $m_1, m_2 \in M$ such that $m_1+m_2 \in 2M \setminus (S-K)$. Clearly also $m_1$ is not in $S-K$ and $(m_1+m_2)-m_1=m_2 \in M \subseteq M-M$ yields a contradiction.
It always holds that $((S-K)-M) \cup \{0\} \subseteq M-M$, then given $x \in (M-M) \setminus \{0\}$ and $m \in M$, we only need to prove that $x+m \in S-K$. If $m \in M \setminus (S-K)$ and $x+m \notin S-K$, then $(x+m)-m=x \in M-M$ gives a contradiction. If $m \in (S-K) \setminus 2M$ and $k \in K$, then $0 \neq m+k \in S$ and, so, $x+m+k \in M$, that implies $x+m \in S-K$. Finally, if $m \in 2M$, then $x+m \in 2M \subseteq S-K$. \\ $(3) \Rightarrow (2)$ Let $x,y \in M \setminus (S-K)$ with $x \neq y$ and assume by contradiction that $x-y \in (M-M)=((S-K)-M)\cup \{0\}$. By hypothesis $y \in M$, then $x=(x-y)+y \in S-K$ yields a contradiction. \end{proof}
In the definition of GAS semigroup we required that in $2K\setminus K$ there are only elements of the type $\F(S)-x$ with $x$ minimal generator of $S$. In general, this does not imply that the elements in $3K\setminus 2K$ are of the same type. For instance, consider $S=\langle 8,12,17,21,26,27,30,31 \rangle$, where $2K \setminus K=\{23-21,23-17,23-12,23-8,23\}$ and $3K\setminus 2K=\{23-20,23-16\}$. However, by Proposition \ref{MED}, this semigroup is not GAS. In fact, this never happens in a GAS semigroup as we are going to show in Theorem \ref{Livelli più alti}. First we need a lemma.
\begin{lemma} \label{Lemma livelli più alti} Assume that $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ with $x_1, \dots, x_r$ minimal generators of $S$. If $\F(S)-x \in nK \setminus (n-1)K$ for some $n>2$ and $x=s_1+s_2$ with $s_1$, $s_2 \in M$, then $\F(S)-s_1 \in (n-1)K$. \end{lemma}
\begin{proof} Let $\F(S)-(s_1+s_2)=k_1 + \dots + k_n \in nK \setminus (n-1)K$ with $k_i \in K$ for $1 \leq i \leq n$. Since $\F(S)-(s_1+s_2) \notin (n-1)K$, we have $\F(S) \neq k_1+k_2 \in 2K \setminus K$ and, then, $\F(S)-(k_1+k_2)$ is a minimal generator of $S$. Since $\F(S)-(k_1+k_2)=s_1+s_2+k_3+ \dots + k_{n}$, this implies that $s_1+k_3 + \dots + k_{n}\notin S$, that is $k_1+k_2+s_2 =\F(S)-(s_1+k_3+\dots + k_{n}) \in K$. Therefore, $\F(S)-s_1=(k_1+k_2+s_2)+k_3+\dots+ k_{n} \in (n-1)K$ and the thesis follows. \end{proof}
\begin{theorem} \label{Livelli più alti} Let $S$ be a {\rm GAS} numerical semigroup that is not symmetric. Then, $\langle K \rangle \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ for some minimal generators $x_1, \dots, x_r$ with $r \geq 0$ and $x_i-x_j \notin \PF(S)$ for every $i$ and $j$. \end{theorem}
\begin{proof} We first prove that $x_i-x_j \notin \PF(S)$ for every $i$ and $j$ without assuming that $x_i$ and $x_j$ are minimal generators. We can suppose that $x_i=x_1$ and $x_j=x_2$.
Let $\F(S)-x_1=k_1+\dots +k_n \in nK \setminus (n-1)K$ with $k_i \in K$ for every $i$ and assume by contradiction that $x_1-x_2 \in \PF(S)$. We note that $\F(S)-x_2=k_1+\dots +k_n + (x_1-x_2)$ and $k_1+(x_1-x_2) \in K$. Indeed, if $\F(S)-k_1 -(x_1-x_2)=s \in S$, then $s \neq 0$ and $\F(S)-k_1=(x_1-x_2)+s \in S$ yields a contradiction. If $k_1+k_2+(x_1-x_2) \notin K$, then it is in $2K \setminus K$ and, since also $k_1+k_2 \in 2K \setminus K$, we get a contradiction because their difference is a pseudo-Frobenius number. Hence, $k_1+k_2+(x_1-x_2) \in K$.
We proceed by induction on $n$. If $n=2$, it follows that $\F(S)-x_2=k_1+k_2+(x_1-x_2) \in K$, that is a contradiction. So, let $n \geq 3$ and let $i$ be the minimum index for which $k_1+ \dots + k_i + (x_1-x_2) \notin K$. It follows that $k_1+ \dots + k_i + (x_1-x_2) \in 2K\setminus K$ and, since also $k_1+k_2 \in 2K \setminus K$, this implies that $k_3+ \dots + k_i +(x_1-x_2) \notin \PF(S)$. Moreover, it cannot be in $S$, because it is the difference of two minimal generators, since $S$ is GAS. Therefore, there exists $m \in M$ such that $k_3+ \dots + k_i +(x_1-x_2)+ m \notin S$, that means $\F(S)-(k_3+ \dots + k_i +(x_1-x_2)+m)=k' \in K$. Thus, $\F(S)-((x_1-x_2)+m)=k'+k_3+ \dots + k_i \in jK \setminus K$ for some $1< j < n$. Moreover, $\F(S)-m=k'+k_3+ \dots + k_i +(x_1-x_2)\in \langle K \rangle \setminus K$ and by induction $(x_1-x_2)+m-m \notin \PF(S)$, that is a contradiction. Hence, $x_1-x_2 \notin \PF(S)$.
Let now $h \geq 3$. To prove the theorem it is enough to show that, if $\F(S)-x \in hK \setminus (h-1)K$, then $x$ is a minimal generators of $S$. We proceed by induction on $h$. Using the GAS hypothesis, the case $h=3$ is very similar to the general case, so we omit it (the difference is that also $\F(S) \in 2K \setminus K$). Suppose by contradiction that $x=s_1+s_2$ and $\F(S)-(s_1+s_2)=k_1+ \dots +k_h \in hK \setminus (h-1)K$ with $k_1, \dots, k_h \in K$ and $s_1, s_2 \in M$. Clearly, $\F(S)-s_1 \notin K$ and by Lemma \ref{Lemma livelli più alti} we have $\F(S)-s_1 \in (h-1)K$; in particular, $s_1$ is a minimal generator of $S$ by induction. Let $1< i < h$ be such that $\F(S)-s_1 \in iK \setminus (i-1)K$. Since $\F(S)-(s_1+s_2) \notin (h-1)K$, we have $k_1+ \dots + k_i \in iK \setminus (i-1)K$ and, by induction, $\F(S)-(k_1+\dots+k_i)$ is a minimal generator of $S$ and $\F(S)-(k_1+\dots+k_i)-s_1 \notin \PF(S)$ by the first part of the proof. This means that there exists $s \in M$ such that $\F(S)-(k_1+\dots+k_i)-s_1+s \notin S$, i.e. $k_1+\dots+k_i+s_1-s \in K$. This implies that $\F(S)-(s_2+s)=(k_1 + \dots + k_i +s_1-s)+k_{i+1}+\dots+k_h \in (h-i+1)K$ and, since $h-i+1 <h$, the induction hypothesis yields a contradiction because $s_2+s$ is not a minimal generator of $S$. \end{proof}
We recall that in an almost symmetric numerical semigroup $\F(S)-f \in \PF(S)$ for every $f \in \PF(S)\setminus \{\F(S)\}$, see \cite[Theorem 2.4]{N}. The following proposition generalizes this fact.
\begin{proposition} \label{PF GAS} Let $S$ be a numerical semigroup with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$, where $x_i$ is a minimal generator of $S$ for every $i$. \begin{enumerate}
\item For every $i$, there exist $f_j, f_k \in \PF(S)$ such that $f_j+f_k=\F(S)+x_i$.
\item For every $f \in \PF(S)\setminus \{\F(S)\}$, it holds either $\F(S)-f \in \PF(S)$ or $\F(S)-f+x_i \in \PF(S)$ for some $i$. \end{enumerate} \end{proposition}
\begin{proof} Let $\F(S)-x_i=k_1+k_2 \in 2K\setminus K$ for some $k_1, k_2 \in K$ and let $s \in M$. Since $x_i+s \in S$, we have $\F(S)-x_i-s \notin K$ and then $\F(S)-x_i-s=k_1+k_2-s \notin 2K$ because $x_i+s$ is not a generator of $S$. In particular, $k_1-s$ and $k_2-s$ are not in $K$. This means that $\F(S)-k_1+s$ and $\F(S)-k_2+s$ are in $S$ and, thus, $\F(S)-k_1, \F(S)-k_2 \in \PF(S)$. Moreover, $\F(S)-k_1+\F(S)-k_2=2\F(S)-(\F(S)-x_i)=\F(S)+x_i$ and (1) holds.
Let now $f \in \PF(S) \setminus \{\F(S)\}$ and assume that $\F(S)-f \notin \PF(S)$. Then, there exists $s \in M$ such that $\F(S)-f+s \in \PF(S)$. In particular, $f-s \in K$ and $\F(S)-s=(\F(S)-f)+(f-s) \in 2K \setminus K$; thus, $s$ has to be equal to $x_i$ for some $i$ and $\F(S)-f+x_i \in \PF(S)$. \end{proof}
\begin{examples} \rm \label{Examples} {\bf 1.} Let $S=\langle 28,40,63,79,88\rangle$. We have $2K \setminus K=\{281-28,281\}$ and $S$ is 2-AGL. In this case $\PF(S)=\{100,132,177,209,281\}$ and $100+209=132+177=281+28$. \\ {\bf 2.} Consider $S= \langle 67, 69, 76, 78, 86 \rangle$. Here $2K \setminus K=\{485-86,485\}$ and the semigroup is 2-AGL. Moreover, $\PF(S)=\{218, 226, 249, 259, 267, 322, 485 \}$, $218+267=226+259=485$ and $249+322=485+86$. \\ {\bf 3.} If $S=\langle 9,10,12,13 \rangle$, then $2K \setminus K=\{17-13,17-12,17-10,17-9,17\}$ and $\PF(S)=\{11,14,15,16,17\}$. Hence, $S$ is GAS and, according to the previous proposition, we have \begin{align*} \F(S)+9&=11+15 &\F(S)+12=14+15&\\ \F(S)+10&=11+16 &\F(S)+13=14+16&. \end{align*} {\bf 4.} Conditions (1) and (2) in Proposition \ref{PF GAS} do not imply that every $x_i$ is a minimal generator. For instance, if we consider the numerical semigroup $S=\{15,16,19,20,24\}$, we have $2K \setminus K=\{42-40,42-36,42-32,42-24,42-20,42-19,42-16,42-15,42\}$ and $\PF(S)=\{28,29,33,37,41,42\}$. Moreover, \begin{align*} \F(S)+40&=41+41 &\F(S)+20=29+33 \\ \F(S)+36&=37+41 &\F(S)+19=28+33 \\ \F(S)+32&=37+37 &\F(S)+16=29+29 \\ \F(S)+24&=33+33 &\F(S)+15=28+29 \end{align*} and, so, it is straightforward to see that the conditions in Proposition \ref{PF GAS} hold, but $32$, $36$ and $40$ are not minimal generators. \end{examples}
We recall that $\L(S)$ denotes the set of the gaps of the second type of $S$, i.e. the integers $x$ such that $x \notin S$ and $\F(S)-x \notin S$, i.e. $x \in K \setminus S$, and that $S$ is almost symmetric if and only if $\L(S) \subseteq \PF(S)$, see \cite{BF}.
\begin{lemma} \label{Lemma L(S)} Let $S$ be a numerical semigroup with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r,\F(S)\}$, where $x_i$ is a minimal generator of $S$ for every $i$. If $x \in \L(S)$ and $\F(S)-x \notin \PF(S)$, then both $x$ and $\F(S)-x+x_i$ are pseudo-Frobenius numbers of $S$ for some $i$. \end{lemma}
\begin{proof} Assume by contradiction that $x \notin \PF(S)$. Therefore, there exists $s \in M$ such that $x+s \notin S$ and, then, $\F(S)-x-s \in K$. Moreover, since $\F(S)-x \notin \PF(S)$, there exists $t \in M$ such that $\F(S)-x+t \notin S$ and then $x-t \in K$. Consequently, $\F(S)-s-t=(\F(S)-x-s)+(x-t) \in 2K$ and $\F(S)-s-t\notin K$, since $s+t \in S$. This is a contradiction, because $s+t$ is not a minimal generator of $S$. Hence, $x \in \PF(S)$ and, since $\F(S)-x \notin \PF(S)$, Proposition \ref{PF GAS} implies that $\F(S)-x+x_i \in \PF(S)$ for some $i$. \end{proof}
\begin{lemma} \label{difference} As ideal of $M-M$, it holds $\widetilde{M-e}=M-e$ and \[ K(M-M) \setminus (M-e) =\{x-e \mid x \in \L(S) \text{ and } \F(S) - x \notin \PF(S)\}. \] \end{lemma}
\begin{proof} We notice that $\F(S)-e \notin (M-M)$ and, if $y > \F(S)-e$ and $m \in M$, we have $y+m >\F(S)-e+m \geq \F(S)$. Therefore, $\F(M-M)=\F(S)-e=\F(M-e)$ and, then, $\widetilde{M-e}=M-e$.
We have $x-e \in K(M-M) \setminus (M-e)$ if and only if $x\notin M$ and $(\F(S)-e)-(x-e) \notin (M-M)$ that is in turn equivalent to $x \notin M$ and $\F(S)-x \notin S \cup \PF(S)$. Since $x \neq 0$, this means that $x \in \L(S)$ and $\F(S)-x \notin \PF(S)$. \end{proof}
The following corollary was proved in \cite[Theorem 5.2]{B} in a different way.
\begin{corollary} \label{canonical ideal} $S$ is almost symmetric if and only if $M-e$ is a canonical ideal of $M-M$. \end{corollary}
\begin{proof} By definition $M-e$ is a canonical ideal of $M-M$ if and only if $K(M-M) = (M-e)$. In light of the previous lemma, this means that there are no $x \in \L(S)$ such that $\F(S)-x \notin \PF(S)$, that is equivalent to say that $\L(S)\subseteq \PF(S)$, i.e. $S$ is almost symmetric. \end{proof}
In \cite[Corollary 8]{BF} it was first proved that $S$ is almost symmetric with maximal embedding dimension if and only if $M-M$ is a symmetric semigroup. In general it holds $M-M \subseteq M-e \subseteq K(M-M)$ and the first inclusion is an equality if and only if $S$ has maximal embedding dimension, whereas the previous corollary says that the second one is an equality if and only if $S$ is almost symmetric. Moreover, if $S$ has maximal embedding dimension, in \cite[Corollary 5.4]{CGKM} it is proved that $S$ is 2-AGL if and only if $M-M$ is an almost symmetric semigroup which is not symmetric. If we want to generalize this result in the same spirit of Corollary \ref{canonical ideal}, it is not enough to consider the 2-AGL semigroups, but we need that $S$ is GAS. More precisely, we have the following result.
\begin{theorem} \label{T. Almost Canonical ideal of M-M} The semigroup $S$ is {\rm GAS} if and only if $M-e$ is an almost canonical ideal of the semigroup $M-M$. \end{theorem}
\begin{proof} In the light of Remark \ref{Rem as}.4 and Lemma \ref{difference}, $M-e$ is an almost canonical ideal of $M-M$ if and only if \begin{equation} \label{Eq.Canonical Ideal of M-M} K(M-M) \setminus (M-e) \subseteq ((M-e)-((M-M)\setminus \{0\})). \end{equation} Assume that $S$ is GAS with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$. By Lemma \ref{difference} the elements of $K(M-M) \setminus (M-e)$ can be written as $x-e$ with $x \in \L(S)$ and $\F(S)-x \notin \PF(S)$. In addition, Lemma \ref{Lemma L(S)} implies that both $x$ and $\F(S)-x+x_i$ are pseudo-Frobenius numbers of $S$ for some $i$. Let $0 \neq z\in (M-M)$. We need to show that $x-e+z\in M-e$, i.e. $x+z \in M$. Assume by contradiction $x+z \notin M$, which implies $\F(S)-x-z \in K$. Since $x+z \notin M$ and $x \in \PF(S)$, it follows that $z \notin M$ and, then, $z \in \PF(S)$; hence, $z+x_i \in M$ and $\F(S)-z-x_i \notin K$. We also have that $x-x_i \in K$, since $\F(S)-x+x_i \in \PF(S)$. Therefore, \[ \F(S)-z-x_i=(\F(S)-x-z)+(x-x_i) \in 2K \setminus K \] and this yields a contradiction because $(z+x_i)-x_i \in \PF(S)$ and $S$ is a GAS semigroup.
Conversely, assume that the inclusion (\ref{Eq.Canonical Ideal of M-M}) holds. An element in $2K\setminus K$ can be written as $\F(S)-s$ for some $s \in S$, since it is not in $K$. Assume by contradiction that $s \neq 0$ is not a minimal generator of $S$, i.e. $\F(S)-s_1-s_2=k_1+k_2 \in 2K\setminus K$ for some $s_1,s_2 \in M$ and $k_1, k_2 \in K$. It follows that $\F(S)-k_1-s_1=k_2 + s_2 \notin S$, otherwise $\F(S)-s_1 \in K$. Moreover, $k_1+s_1 \notin \PF(S) \cup S$, since $k_1+s_1+s_2 = \F(S)-k_2 \notin S$. Hence, Lemma \ref{difference} and our hypothesis imply that \[k_2+s_2-e=\F(S)-k_1-s_1-e \in ((M-e)-((M-M)\setminus \{0\})).\] Therefore, $\F(S)-k_1-e=(k_2+s_2-e)+s_1 \in M-e$ and, thus, $k_1 \notin K$ yields a contradiction. This means that $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ with $x_i$ minimal generator of $S$ for every $i$. Now, assume by contradiction that $z=x_i-x_j \in \PF(S)$ for some $i,j$ and let $\F(S)-x_i=\F(S)-x_j-z=k_1+k_2$ for some $k_1, k_2 \in K$. Since $k_2+z+x_j=\F(S)-k_1 \notin S$, it follows that $k_2+z \notin S \cup \PF(S)$. Moreover, $\F(S)-k_2-z \notin S$, otherwise $\F(S)-k_2\in S$. Therefore, Lemma \ref{difference} and inclusion (\ref{Eq.Canonical Ideal of M-M}) imply that $\F(S)-k_2-z-e \in ((M-e)-((M-M)\setminus \{0\}))$ and, since $z \in M-M$, it follows that $\F(S)-k_2 \in M$ which is a contradiction because $k_2 \in K$. \end{proof}
\begin{example} \rm Consider $S=\langle 9,13,14,15,19 \rangle$, that is a GAS numerical semigroup with $2K \setminus K=\{25-15,25-13,25-9,25\}$. Then, $M-9$ is an almost canonical ideal of $M-M$ by the previous theorem. In fact \begin{equation*} \begin{split} &M-M=\{0,9,13,14,15,17, \rightarrow\}, \\ &K(M-M)=\{0,4,5,6,8,9,10,11,12,13,14,15,17 \rightarrow\},\\ &M-9=\{0,4,5,6,9,10,13,14,15,17 \rightarrow\}, \\ &(M-9)-((M-M)\setminus \{0\})=K(M-M) \cup \{16\}=\{0,4,5,6,8 \rightarrow\}. \end{split} \end{equation*} \end{example}
\begin{remark} \rm If $S$ is {\rm GAS}, it is possible to compute the type of $M-e$ seen as an ideal of the semigroup $M-M$. In fact by Theorem \ref{T. Almost Canonical ideal of M-M} and Proposition \ref{almost canonical ideal} it follows that \begin{align*} t(M-e)&=g(M-e)+g(M-M)-\F(M-M)= \\ &=g(M)-e+g(S)-t(S)-\F(S)+e= 2g(S)+1-t(S)-\F(S). \end{align*} Moreover, we recall that $2g(S) \geq t(S)+\F(S)$ is always true and the equality holds exactly when $S$ is almost symmetric. Therefore, as $t(S)$ is a measure of how far $S$ is from being symmetric, $t(M-e)=t(M)$ (as ideal of $M-M$) can be seen as a measure of how far $S$ is from being almost symmetric. On the other hand, we note that the type of $M$ as an ideal of $S$ is simply $t(S)+1$. \end{remark}
If $S$ has type 2 and $\PF(S)=\{f,\F(S)\}$, in \cite[Theorem 6.2]{CGKM} it is proved that $S$ is 2-AGL if and only if $3(\F(S)-f) \in S$ and $\F(S)=2f-x$ for some minimal generator $x$ of $S$. In the next proposition we generalize this result to the GAS case.
\begin{proposition} \label{type 2}
Assume that $S$ is not almost symmetric and that it has type 2, i.e. $\PF(S)=\{f,\F(S)\}$. Then, $S$ is {\rm GAS} if and only if $\F(S)=2f-x$ for some minimal generator $x$ of $S$. In this case, if $n$ is the minimum integer for which $n(\F(S)-f) \in S$, then $|2K \setminus K|=2$, $|3K \setminus 2K|= \dots = |(n-1)K \setminus (n-2)K|=1$ and $nK=(n-1)K$. \end{proposition}
\begin{proof} Assume first that $S$ is GAS and let $\F(S)-x$, $\F(S)-y \in 2K \setminus K$. Proposition \ref{PF GAS} implies that $\F(S)+x=f_1+f_2$ and $\F(S)+y=f_3+f_4$ for some $f_1,f_2,f_3,f_4 \in \PF(S)$. Since $f_i$ has to be different from $\F(S)$ for all $i$, it follows that $\F(S)+x=\F(S)+y=2f$ and, then, $x=y$. In particular, $\F(S)=2f-x$.
Assume now that $\F(S)=2f-x$ for some minimal generator $x$ of $S$. Clearly, $\F(S)-x=2(\F(S)-f) \in 2K \setminus K$. Let $y \neq 0,x$ be such that $\F(S)-y \in 2K \setminus K$. Since $2K \setminus K$ is finite, we may assume that $y$ is maximal among such elements with respect to $\leq_S$, that is $\F(S)-(y+m) \notin 2K\setminus K$ for every $m \in M$. Let $\F(S)-y=k_1+k_2$ with $k_1$, $k_2 \in K$. Since $\F(S)-y-m=k_1+k_2-m \notin 2K \setminus K$, then $k_1-m$ and $k_2-m$ are not in $K$, which is equivalent to $\F(S)-k_1+m \in S$ and $\F(S)-k_2+m \in S$ for every $m \in M$. This means that $\F(S)-k_1$, $\F(S)-k_2 \in \PF(S)\setminus \{\F(S)\}$ which implies $\F(S)-y=2(\F(S)-f)=\F(S)-x$ and, thus, $x=y$. Therefore, $|2K \setminus K|=2$ and $S$ is GAS.
Moreover, if $S$ is GAS and $\F(S)-y =k_1+\dots+k_r \in rK \setminus (r-1)K$ with $r>2$ and $k_1, \dots, k_r \in K$, then $k_1= \dots=k_r=\F(S)-f$ because $k_i+k_j \in 2K \setminus K$ for every $i$ and $j$. Therefore, if $n(\F(S)-f) \in S$, then $nK=(n-1)K$. Assume that $r(\F(S)-f) \notin S$. Clearly, it is in $rK$ and we claim that it is not in $K$. In fact, if $r(\F(S)-f) \in K$, it follows that it is in $\L(S)$ and, if $\F(S)-r(\F(S)-f)=f$, then $(r-1)(\F(S)-f)=0 \in S$ yields a contradiction. Therefore, Lemma \ref{Lemma L(S)} implies that $\F(S)-r(\F(S)-f)+x =f$ and, again, $(r-1)(\F(S)-f)=x \in S$ gives a contradiction. This means that $r(\F(S)-f) \in rK \setminus K$. Moreover, if $r(\F(S)-f)=k_1+\dots+ k_{r'} \in r'K\setminus(r'-1)K$ with $1<r'<r$ and $k_1, \dots, k_{r'} \in K$, we get $k_1=\dots=k_{r'}=\F(S)-f$ as above, that is a contradiction. Hence, $|rK \setminus (r-1)K|=1$ for every $1<r<n$. \end{proof}
\begin{example} \rm \label{GAS tipo 2} Consider $S=\langle 5,6,7\rangle$. In this case $f=8$ and $\F(S)=9$. Therefore, the equality $\F(S)=2f-7$ implies that $S$ is GAS. With the notation of the previous corollary we have $n=5$ and, in fact, $2K \setminus K=\{2,9\}$, $3K \setminus 2K=\{3\}$ and $4K \setminus 3K=\{4\}$. \end{example}
In \cite{HHS} another generalization of almost Gorenstein ring is introduced. More precisely a Cohen-Macaulay local ring admitting a canonical module $\omega$ is said to be {\it nearly Gorenstein} if the trace of $\omega$ contains the maximal ideal. In the case of numerical semigroups it follows from \cite[Lemma 1.1]{HHS} that $S$ is nearly Gorenstein if and only if $M \subseteq K+(S-K)$, see also the arXiv version of \cite{HHS}. It is easy to see that an almost symmetric semigroup is nearly Gorenstein, but in \cite{CGKM} it is noted that a 2-AGL semigroup is never nearly Gorenstein (see also \cite[Remark 3.7]{BS} for an easy proof in the numerical semigroup case). This does not happen for GAS semigroups.
\begin{corollary} Let $S$ be a {\rm GAS} semigroup, not almost symmetric, with $\PF(S)=\{f,\F(S)\}$. It is nearly Gorenstein if and only if $3f-2\F(S) \in S$. \end{corollary}
\begin{proof} We will use the following characterization proved in \cite{MS}: $S$ is nearly Gorenstein if and only if for every minimal generator $y$ of $S$ there exists $g \in \PF(S)$ such that $g+y-g' \in S$ for every $g' \in \PF(S)\setminus \{g\}$.
By Proposition \ref{type 2} it follows that $\F(S)=2f-x$ with $x$ minimal generator of $S$. Let $y \neq x$ another minimal generator of $S$ and assume by contradiction that $\F(S)+y-f \notin S$. Therefore, there exists $s \in S$ such that $\F(S)+y-f+s \in \PF(S)$. If it is equal to $\F(S)$, then $f=y+s \in S$ yields a contradiction. If $\F(S)+y-f+s=f$, then $y+s=2f-\F(S)=x$ by Proposition \ref{type 2} and this gives a contradiction, since $x \neq y$ is a minimal generator of $S$. Hence, $\F(S)+y-f \in S$ for every minimal generator $y \neq x$. On the other hand, $\F(S)+x-f=2f-x+x-f=f \notin S$ and, therefore, $S$ is nearly Gorenstein if and only if $f+x-\F(S)=3f-2\F(S)\in S$. \end{proof}
\begin{examples} \rm {\bf 1.} In Example \ref{GAS tipo 2} we have $3f-2\F(S)=6 \in S$ and, then, the semigroup is both GAS and nearly Gorenstein. \\ {\bf 2.} Consider $S=\langle 9,17,67\rangle$ that has $\PF(S)=\{59,109\}$. Since $2*59-109=9$ and $3*59-2*109=-41 \notin S$, the semigroup is GAS but not nearly Gorenstein. \\ {\bf 3.} If $S=\langle 10,11,12,25 \rangle$, we have $\PF(S)=\{38,39\}$ and $2*38-39=37$ is not a minimal generators, thus, $S$ is not GAS. On the other hand, it is straightforward to check that this semigroup is nearly Gorenstein. \end{examples}
\begin{remark} \rm In literature there are other two generalizations of almost Gorenstein ring. One is given by the so-called ring with canonical reduction, introduced in \cite{R}, which is a one-dimensional Cohen-Macaulay local ring $(R,\mathfrak m)$ possessing a canonical ideal $I$ that is a reduction of $\mathfrak m$. When $R=k[[S]]$ is a numerical semigroup ring, this definition gives a generalization of almost symmetric semigroup and $R$ has a canonical reduction if and only if $e+\F(S)-g \in S$ for every $g \in \mathbb{N} \setminus S$, see \cite[Theorem 3.13]{R}. This notion is unrelated with the one of GAS semigroup, in fact it is easy to see that $S=\langle 4,7,9,10 \rangle$ is GAS and it doesn't have canonical reductions, while $S=\langle 8,9,10,22 \rangle$ is not GAS, but has a canonical reduction.
Another generalization of the notion of almost Gorenstein ring is given by the so-called generalized Gorenstein ring, briefly GGL, introduced in \cite{GIKT,GK}. A Cohen-Macaulay local ring $(R,\mathfrak{m})$ with a canonical module $\omega$ is said to be GGL with respect to $\mathfrak{a}$ if either $R$ is Gorenstein or there exists an exact sequence of $R$-modules \[ 0 \xrightarrow{} R \xrightarrow{\varphi} \omega \xrightarrow{} C \xrightarrow{} 0 \] where $C$ is an Ulrich module of $R$ with respect to some $\mathfrak m$-primary ideal $\mathfrak a$ and $\varphi \otimes R/\mathfrak a$ is injective. We note that $R$ is almost Gorenstein and not Gorenstein if and only if it is GGL with respect to $\mathfrak m$. Let $S$ be a numerical semigroup and order $\PF(S)=\{f_1,f_2, \dots, f_t=\F(S)\}$ by the usual order in $\mathbb{N}$. Defining a numerical semigroup GGL if its associated ring is GGL, in \cite{T} it is proved a useful characterization: $S$ is GGL if either it is symmetric or the following properties hold: \begin{enumerate}
\item there exists $x \in S$ such that $f_i+f_{t-i}=\F(S)+x$ for every $i=1, \dots, \lceil t/2 \rceil$;
\item $((c-M) \cap S) \setminus c=\{x\}$, where $c=S-\langle K \rangle$. \end{enumerate} Using this characterization it is not difficult to see that also this notion is unrelated with the one of GAS semigroup. In fact, the semigroups in Examples \ref{Examples}.2 and \ref{Examples}.3 are GAS but do not satisfy (1), whereas the semigroup $S=\langle 5,9,12 \rangle$ is not GAS by Proposition \ref{type 2}, because $\PF(S)=\{13,16\}$, but it is easy to see that it is GGL with $x=10$. \end{remark}
\section{Constructing GAS numerical semigroups}
In this section we study the behaviour of the GAS property with respect to some constructions. In this way we will be able to construct many numerical semigroups satisfying this property.
\subsection{Gluing of numerical semigroups}
Let $S_1=\langle s_1, \dots, s_n \rangle$ and $S_2=\langle t_1, \dots, t_m \rangle$ be two numerical semigroups and assume that $s_1, \dots, s_n$ and $t_1, \dots, t_m$ are minimal generators of $S_1$ and $S_2$ respectively. Let also $a\in S_2$ and $b \in S_1$ be not minimal generators of $S_2$ and $S_1$ respectively and assume $\gcd(a,b)=1$. The numerical semigroup $\langle aS_1,bS_2 \rangle=\langle as_1, \dots, as_n, bt_1, \dots, bt_m \rangle$ is said to be the gluing of $S_1$ and $S_2$ with respect to $a$ and $b$. It is well-known that $as_1, \dots, as_n, bt_1, \dots, bt_m$ are its minimal generators, see \cite[Lemma 9.8]{RG}. Moreover, the pseudo-Frobenius numbers of $T=\langle aS_1,bS_2 \rangle$ are \[ \PF(T)=\{af_1+bf_2+ab \mid f_1 \in \PF(S_1), f_2 \in \PF(S_2)\}, \] see \cite[Proposition 6.6]{N}. In particular, $t(T)=t(S_1)t(S_2)$ and $\F(T)=a\F(S_1)+b\F(S_2)+ab$. Consequently, since $K(T)$ is generated by the elements $\F(T)-f$ with $f \in \PF(T)$, it is easy to see that $K(T)=\{ak_1+bk_2 \mid k_1 \in K(S_1), k_2 \in K(S_2) \}$.
Since $t(T)=t(S_1)t(S_2)$, it follows that $T$ is symmetric if and only if both $S_1$ and $S_2$ are symmetric, so in the next theorem we exclude this case.
\begin{theorem} \label{gluing} Let $T$ be a gluing of two numerical semigroups and assume that $T$ is not symmetric. The following are equivalent: \begin{enumerate} \item $T$ is {\rm GAS}; \item $T$ is {\rm 2-AGL}; \item $T=\langle 2S, b \mathbb{N} \rangle$ with $b \in S$ odd and $S$ is an almost symmetric semigroup, but not symmetric. \end{enumerate} \end{theorem}
\begin{proof} (2) $\Rightarrow$ (1) True by definition. \\ (1) $\Rightarrow$ (3) Let $T=\langle aS_1, bS_2 \rangle$. Since $T$ is not symmetric, we can assume that $S_1$ is not symmetric and, then, $\F(S_1)=k_1+k_2$ for some $k_1$, $k_2 \in K(S_1)$. This implies that \[ \F(T)-b(\F(S_2)+a)=a\F(S_1)+b\F(S_2)+ab-b\F(S_2)-ab=ak_1+ak_2 \in 2K(T) \setminus K(T) \] because $\F(S_2)+a \in S_2$. Therefore, since $T$ is GAS, $\F(S_2)+a$ is a minimal generator of $S_2$. By definition of gluing, $a$ is not a minimal generator of $S_2$, so write $a=s+s'$ with $s$, $s' \in M(S_2)$. Since $\F(S_2)+s+s'$ is a minimal generator of $S_2$, we get $\F(S_2)+s=\F(S_2)+s'=0$, i.e. $\F(S_2)=-1$ and $a=s+s'=2$. This proves that $T=\langle 2S_1, b \mathbb{N} \rangle$. Clearly, $b$ is odd by definition of gluing, so we only need to prove that $S_1$ is almost symmetric. Assume by contradiction that it is not almost symmetric and let $s \in M(S_1)$ such that $\F(S_1)-s=k_1+k_2 \in 2K(S_1)\setminus K(S_1)$ with $k_1$, $k_2 \in K(S_1)$. Then \[ \F(T)-(2s+b)=2\F(S_1)-b+2b-2s-b=2k_1+2k_2 \in 2K(T) \setminus K(T) \] and $2s+b$ is not a minimal generator of $T$, contradiction. \\
(3) $\Rightarrow$ (2) Since $S$ is not symmetric, $\langle K(S) \rangle \setminus K(S)= 2K(S) \setminus K(S)=\{\F(S)\}$. Consider an element $z \in \langle K(T) \rangle \setminus K(T)$, that is $z=2k_1+b\lambda_1 + \dots + 2k_r + b\lambda_r = 2(k_1+\dots +k_r)+b(\lambda_1+ \dots +\lambda_r)$ for some $k_1, \dots, k_r \in K(S)$ and $\lambda_1, \dots, \lambda_r \in \mathbb{N}$. Since $z \notin K(T)$, then $k_1+ \dots +k_r \notin K(S)$ and so $k_1+\dots +k_r=\F(S)$. Therefore, $z=2\F(S)+b(\lambda_1+\dots + \lambda_r) \in 2K(T)\setminus K(T)$ and, since it is not in $K(T)$ and $\F(T)=2\F(S)+b$, it follows that either $z=2\F(S)$ or $z=2\F(S)+b$. Hence, $|\langle K(T) \rangle \setminus K(T)|=2$ and thus $T$ is 2-AGL. \end{proof}
\subsection{Numerical Duplication}
In the previous subsection we have shown that if a non-symmetric GAS semigroup is a gluing, then it can be written as $\langle 2S, b \mathbb{N}\rangle$. This kind of gluing can be seen as a particular case of another construction, the {\it numerical duplication}, introduced in \cite{DS}.
Given a numerical semigroup $S$, a relative ideal $I$ of $S$ and an odd integer $b \in S$, the numerical duplication of $S$ with respect to $I$ and $b$ is defined as $S \! \Join^b \! I=2\cdot S \cup \{2 \cdot I +b\}$, where $2\cdot X=\{2x \mid x\in X\}$ for every set $X$. This is a numerical semigroup if and only if $I+I+b \subseteq S$. This is always true if $I$ is an ideal of $S$ and, since in the rest of the subsection $I$ will always be an ideal, we ignore this condition. In this case, if $S$ and $I$ are minimally generated by $\{s_1, \dots, s_\nu\}$ and $\{i_1, \dots, i_\mu\}$ respectively, then $S \! \Join^b \! I=\langle 2s_1, \dots, 2s_\nu, 2i_1+b, \dots, 2i_\mu+b \rangle$ and these generators are minimal. It follows that $\langle 2S, b \mathbb{N} \rangle = S\! \Join^b \!S$.
\begin{remark} \label{PF duplication} \rm The Frobenius number of $S \! \Join^b \! I$ is equal to $2\F(I)+b$. Moreover, the odd pseudo-Frobenius numbers of $S \! \Join^b \! I$ are $\{2\lambda+b \mid \lambda \in \PF(I)\}$, whereas the even elements in $\PF(S \! \Join^b \! I)$ are exactly the doubles of the elements in $((M-M) \cap (I-I)) \setminus S$; see the proof of \cite[Proposition 3.5]{DS}. In particular, if $2f \in \PF(S \! \Join^b \! I)$, then $f \in \PF(S)$. \end{remark}
In this subsection we write $K$ in place of $K(S)$. We note that $S-\langle K \rangle \subseteq S$ and $\F(S-\langle K \rangle)=\F(S)$.
\begin{lemma} \label{Lemma Numerical Duplication} Let $S$ be a numerical semigroup, $b \in S$ be an odd integer, $I$ be an ideal of $S$ with $\F(I)=\F(S)$ and $T=S \! \Join^b \! I$. The following hold: \begin{enumerate} \item If $k\in K$, then both $2k$ and $2k+b$ are in $K(T)$. In particular, if $\F(S)-x \in iK \setminus K$, then $\F(T)-2x \in iK(T)\setminus K(T)$; \item Let $k \in K(T)$. If $k$ is odd, then $\frac{k-b}{2} \in K$, otherwise $\F(S)-\frac{k}{2} \notin I$; \item If $I=S-\langle K \rangle$ and $k \in K(T)$ is even, then $\frac{k}{2} \in jK$ for some $j \geq 1$. \item Let $I=S-\langle K \rangle$. If $\F(T)-2i-b \in \langle K(T) \rangle \setminus K(T)$, then $\F(S)-i \in \langle K \rangle \setminus K$ for every $i \in I$. Moreover, $\F(S)-x \in \langle K \rangle \setminus K$ if and only if $\F(T)-2x \in \langle K(T) \rangle \setminus K(T)$. \end{enumerate} \end{lemma}
\begin{proof} (1) If $k \in K$, then $2k+b\in K(T)$, since $\F(T)-(2k+b) = 2(\F(S)-k)\notin 2 \cdot S$. Moreover, $\F(T)-2k=2(\F(S)-k)+b$ and $\F(S)-k \notin I$ because it is not in $S$, so $2k \in K(T)$. Therefore, if $\F(S)-x=k_1+\dots + k_i \in iK \setminus K$ with $k_1, \dots, k_i \in K$, then $\F(T)-2x=2k_1+ \dots +2k_{i-1}+(2k_i+b) \in iK(T)$ and, clearly, it is not in $K(T)$, since $2x \in T$. \\ (2) Let $k$ be odd. Since $2(\F(S)-\frac{k-b}{2})=2\F(S)+b-k=\F(T)-k \notin T$, it follows that $\F(S)-\frac{k-b}{2}\notin S$, i.e. $\frac{k-b}{2} \in K$. If $k$ is even, then $2(\F(S)-\frac{k}{2})+b=\F(T)-k \notin T$ and, thus, $\F(S)-\frac{k}{2}\notin I$.\\ (3) Since $\F(S)-\frac{k}{2} \notin S-\langle K \rangle$ by (2), there exist $i\geq 1$ and $a \in iK$ such that $\F(S)-\frac{k}{2}+a \notin S$, that is $\frac{k}{2}-a \in K$. Hence, $\frac{k}{2}=a+ (\frac{k}{2}-a) \in (i+1)K$. \\ (4) If $\F(T)-2i-b=k_1+ \dots + k_j + \dots k_n \in \langle K(T) \rangle \setminus K(T)$ with $k_1, \dots, k_j \in K(T)$ even and $k_{j+1}, \dots, k_n \in K(T)$ odd, then $\F(S)-i=\frac{k_1}{2}+\dots + \frac{k_j}{2} + \frac{k_{j+1}-b}{2} + \dots + \frac{k_{n}-b}{2} + \frac{(n-j)}{2}b \in \langle K \rangle \setminus K$ by (2) and (3). Using (1) the other statement is analogous. \end{proof}
\begin{example} \rm \label{Example Numerical Duplication} {\bf 1.} In the previous lemma we cannot remove the hypothesis $\F(I)=\F(S)$. For instance, consider $S=\langle 3,10,11 \rangle$, $I=\langle 3,10 \rangle$ and $T=S \! \Join^3 \! I $. Then, $\F(I)=11\neq 8=\F(S)$ and we have $\F(S)-6 \in 2K \setminus K$, but $\F(T)-12 \notin \langle K(T) \rangle$. \\ {\bf 2.} In the third statement of the previous lemma, $j$ may be bigger than 1. For instance, consider $S=\langle 6,28,47,97\rangle$ and $T=S\! \Join^{47} \!(S-\langle K\rangle)=\langle 12,56,71,94,115,153,159,194,197,241 \rangle$. Then $88,126,170,182 \in K(T)$, while $44,63,91 \in 2K \setminus K$ and $85 \in 3K \setminus 2K$. \end{example}
\begin{corollary} \label{Numerical duplication 2-AGL} Let $b \in S$ be odd and let $I=S-\langle K \rangle$. The following hold: \begin{enumerate} \item If $S$ is not almost symmetric, then $S\! \Join^b \!M$ is not {\rm GAS}; \item $S$ is n-{\rm AGL} if and only if $S \! \Join^b \! I$ is n-{\rm AGL}. \end{enumerate} \end{corollary}
\begin{proof} (1) Let $T=S\! \Join^b \!M$ and let $x \neq 0$ be such that $\F(S)-x \in 2K \setminus K$. By Lemma \ref{Lemma Numerical Duplication} (1), $\F(T)-2x$ and $\F(T)-(2x+b)$ are in $2K(T) \setminus K(T)$. Even though $2x+b$ and $2x$ are minimal generators, their difference $b$ is a pseudo-Frobenius number of $T$ by Remark \ref{PF duplication}, because $0 \in \PF(M)$, hence $T$ is not GAS. \\ (2) Let $T=S \! \Join^b \! I$. By Lemma \ref{Lemma Numerical Duplication} (4) we have that $\F(S)-x \in \langle K\rangle \setminus K$ if and only if $\F(T)-2x \in \langle K(T) \rangle \setminus K(T)$. Moreover, if $\F(T)-(2i+b) \in \langle K(T) \rangle \setminus K(T)$, Lemma \ref{Lemma Numerical Duplication} (4) implies that $\F(S)-i \in \langle K \rangle$ and, since $i \in (S-\langle K \rangle)$, it follows that $\F(S) \in S$, that is a contradiction. Hence, $S$ is $n$-AGL if and only if $T$ is $n$-AGL. \end{proof}
\begin{remark} \rm If $S$ is almost symmetric with type $t$, then $M=K-(M-M)$ and, consequently, $S\! \Join^b \!M$ is almost symmetric with type $2t+1$ by \cite[Theorem 4.3 and Proposition 4.8]{DS}. \end{remark}
If $R$ is a one-dimensional Cohen-Macaulay local ring with a canonical module $\omega$ such that $R \subseteq \omega \subseteq \overline{R}$, in \cite[Theorem 4.2]{CGKM} it is proved that the idealization $R \ltimes (R:R[\omega])$ is 2-AGL if and only if $R$ is 2-AGL. The numerical duplication may be considered the analogous of the idealization in the numerical semigroup case, since they are both members of a family of rings that share many properties (see \cite{BDS}); therefore, Corollary \ref{Numerical duplication 2-AGL} (2) should not be surprising. In the following proposition we generalize this result for the GAS property.
\begin{theorem} \label{Numerical duplication S-<K>} Let $S$ be a numerical semigroup, let $b \in S$ be an odd integer and let $I=S-\langle K \rangle$. The semigroup $T=S \! \Join^b \! I$ is {\rm GAS} if and only if $S$ is {\rm GAS}. \end{theorem}
\begin{proof} Assume that $T$ is GAS and let $\F(S)-x \in 2K \setminus K$. By Lemma \ref{Lemma Numerical Duplication}, $\F(T)-2x\in 2K(T) \setminus K(T)$, so $2x$ is a minimal generator of $T$ and, thus, $x$ is a minimal generator of $S$. Now let $\F(S)-x$, $\F(S)-y \in 2K \setminus K$ and assume by contradiction that $x-y \in \PF(S)$. In particular, $S$ is not symmetric and, then, $I=M-\langle K \rangle$. Moreover, $\F(T)-2x$ and $\F(T)-2y$ are in $2K(T) \setminus K(T)$. We also notice that $x-y \in I-I$, indeed, if $i \in I$ and $a \in \langle K \rangle$, it follows that $(x-y)+i+a \in (x-y)+M \subseteq S$. Therefore, Remark \ref{PF duplication} implies that $2(x-y) \in \PF(T)$; contradiction.
Conversely, assume that $S$ is GAS and let $\F(T)-z=k_1+k_2 \in 2K(T) \setminus K(T)$ with $k_1$, $k_2 \in K(T)$. If $z=2i+b$ is odd and both $k_1$ and $k_2$ are odd, then $i\in I$ and $\F(S)-i=(k_1-b)/2+(k_2-b)/2+b \in 2K$ by Lemma \ref{Lemma Numerical Duplication}.(2); on the other hand, if $k_1$ and $k_2$ are both even, $\F(S)-i=k_1/2+k_2/2 \in \langle K \rangle$ by Lemma \ref{Lemma Numerical Duplication}.3. Since $i \in (S-\langle K \rangle)$, in both cases we get $\F(S) \in S$, that is a contradiction. Hence, $z=2x$ is even. If $k_1$ is even and $k_2$ is odd, Lemma \ref{Lemma Numerical Duplication} implies that $\F(S)-x=k_1/2 + (k_2-b)/2 \in (j+1)K \setminus K$ for some $j\geq 1$ and, therefore, by Theorem \ref{Livelli più alti} it follows that $x$ is a minimal generator of $S$, i.e. $z=2x$ is a minimal generator of $T$. Moreover, let $\F(T)-2x$, $\F(T)-2y \in 2K(T)\setminus K(T)$ and assume by contradiction that $2x-2y \in \PF(T)$. Remark \ref{PF duplication} implies that $x-y \in \PF(S) \subseteq K \cup \{\F(S)\}$. Thus, if $\F(T)-2x=k_1+k_2$ with $k_1$, $k_2 \in K(T)$ and $k_1$ even, then $\F(S)-x=k_1/2+(k_2-b)/2 \in \langle K(S) \rangle \setminus K(S)$ by Lemma \ref{Lemma Numerical Duplication} and, so, $\F(S)-y=k_1/2+(k_2-b)/2+(x-y) \in \langle K(S) \rangle \setminus K(S)$. Hence, Theorem \ref{Livelli più alti} yields a contradiction, because $x-y \in \PF(S)$. \end{proof}
\begin{example} \rm {\bf 1.} Consider the semigroup $S$ in Example \ref{Example Numerical Duplication}.2. It is GAS and, then, the previous theorem implies that also $T=S\! \Join^{47} \!(S-\langle K\rangle)$ is GAS. However, we notice that $2K\setminus K=\{44,63,91\}$, $3K \setminus 2K=\{85\}$ and $4K=3K$, while $2K(T) \setminus K(T)=\{135,173,217,229\}$ and $2K(T)=3K(T)$. \\ {\bf 2.} Despite Theorem \ref{Numerical duplication S-<K>}, if $S \! \Join^b \! I$ is GAS for an ideal $I$ different form $S-\langle K \rangle$, it is not true that also $S$ is GAS. For instance, the semigroup $S$ in Example \ref{Example Numerical Duplication}.1 is not GAS, but $S\! \Join^3 \! I$ is. \end{example}
\subsection{Dilatations of numerical semigroups}
We complete this section studying the transfer of the GAS property in a construction recently introduced in \cite{BS}: given $a \in M-2M$, the numerical semigroup $S+a=\{0\} \cup \{m+a \mid m \in M\}$ is called dilatation of $S$ with respect to $a$.
\begin{proposition} \label{dilatation} Let $a \in M-2M$. The semigroup $S+a$ is {\rm GAS} if and only if $S$ is {\rm GAS}. \end{proposition}
\begin{proof} We denote the semigroup $S+a$ by $T$. Recalling that $\F(T)=\F(S)+a$, by \cite[Lemma 3.1 and Lemma 3.4]{BS} follows that $2K(T)=2K(S)$ and \begin{equation*} \begin{split} &2K(S) \setminus K(S)=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}, \\ &2K(T) \setminus K(T)=\{\F(T)-(x_1+a), \dots, \F(T)-(x_r+a), \F(T)\} \end{split} \end{equation*} for some $x_1, \dots, x_r \in M$.
Assume that $S$ is a GAS semigroup. Then, $x_i$ is a minimal generator of $S$ and it is straightforward to see that $x_i+a$ is a minimal generator of $T$. Moreover, if $(x_i+a)-(x_j+a) \in \PF(T)$, then for every $m \in M$ we have $x_i-x_j+m+a \in T$, i.e. $x_i-x_j+m \in M$, that is a contradiction, since $S$ is GAS.
Now assume that $T$ is GAS. Suppose by contradiction that $x_i$ is not a minimal generator of $S$, that is $x_i=s_1+s_2$ for some $s_1$, $s_2 \in M$. We have $\F(S)-(s_1+s_2) \in 2K(S)\setminus K(S)$ and so $\F(S)-s_1 \in 2K(S)\setminus K(S)$, since $2K(S)$ is a relative ideal. Hence, $s_1=x_j$ for some $j$ and $(x_i+a)-(x_j+a)=s_2 \in S$. Since $x_i+a$ is a minimal generator, we have that $s_2 \notin T$. Moreover, for every $m+a \in M(T)$ we clearly have $s_2+m+a \in M(T)$, because $s_2 \in S$. This yields a contradiction because $(x_i+a)-(x_j+a)=s_2 \in \PF(T)$ and $T$ is GAS. Finally, if $x_i-x_j \in \PF(S)$, it is trivial to see that $x_i-x_j \in \PF(T)$. \end{proof}
\begin{remark} \rm Suppose $2K(S+a) \setminus K(S+a)=\{\F(S+a)-(x_1+a), \dots, \F(S+a)-(x_r+a), \F(S+a)\}$ with $x_1+a, \dots, x_r+a$ minimal generators of $S+a$, but $S+a$ is not GAS. Then $2K(S) \setminus K(S)=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$, but it is not necessarily true that $x_1, \dots, x_r$ are minimal generators of $S$. For instance, consider $S=\langle 7,9,11 \rangle$ and $S+7=\langle 14, 16, 18, 21, 23, 25, 27, 29, 38, 40 \rangle$. In this case $2K(S+7) \setminus K(S+7)=\{33-29,33-18,33\}$ and $2K(S) \setminus K(S)=\{26-22, 26-11, 26\}$. \end{remark}
\end{document}
|
arXiv
|
{
"id": "2003.13061.tex",
"language_detection_score": 0.695972740650177,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\newtheorem{lem}{Lemma} \newtheorem{lemma}[lem]{Lemma} \newtheorem{prop}{Proposition} \newtheorem{thm}{Theorem} \newtheorem{theorem}[thm]{Theorem}
\def\,\, \substack{+\\ E}\,\,{\,\, \substack{+\\ E}\,\,}
\def\,\, \substack{-\\ E}\,\,{\,\, \substack{-\\ E}\,\,}
\def\,\, \substack{*\\ E}\,\,{\,\, \substack{*\\ E}\,\,}
\def\,\, \substack{-\\ G}\,\,{\,\, \substack{-\\ G}\,\,}
\def\,\, \substack{*\\ G}\,\,{\,\, \substack{*\\ G}\,\,}
\title{\sc On the size of the set $A(A+1)$}
\author{M. Z. Garaev and Ch.-Y. Shen}
\author{ {\sc Moubariz Z. Garaev} \\ {Instituto de Matem{\'a}ticas}\\ {Universidad Nacional Aut\'onoma de M{\'e}xico} \\ {C.P. 58089, Morelia, Michoac{\'a}n, M{\'e}xico} \\ {\tt [email protected]}\\ \and {\sc Chun-Yen Shen} \\ {Department of Mathematics}\\ {Indiana University} \\ {Rawles Hall, 831 East Third St.}\\ {Bloomington, IN 47405, USA}\\ {\tt [email protected]}}
\maketitle
\begin{abstract} Let $F_p$ be the field of a prime order $p.$ For a subset $A\subset F_p$ we consider the product set $A(A+1).$ This set is an image of
$A\times A$ under the polynomial mapping $f(x,y)=xy+x:F_p\times F_p\to F_p.$ In the present note we show that if $|A|<p^{1/2},$ then $$
|A(A+1)|\ge |A|^{106/105+o(1)}. $$
If $|A|>p^{2/3},$ then we prove that $$
|A(A+1)|\gg \sqrt{p\, |A|} $$ and show that this is the optimal in general settings bound up to the implied constant. We also estimate the cardinality of $A(A+1)$ when $A$ is a subset of real numbers. We show that in this case one has the Elekes type bound $$
|A(A+1)|\gg |A|^{5/4}. $$ \end{abstract}
\footnotetext[1]{{\it 2000 Mathematics Subject Classification:}\, 11B75.} \footnotetext[2]{{\it Key words and phrases.}\, sums, products and expanding maps.} \section {Introduction}
Let $F_p$ be the field of residue classes modulo a prime number $p$
and let $A$ be a non-empty subset of $F_p.$ It is known from~\cite{BGK, BKT} that if $|A|<p^{1-\delta},$ where $\delta>0,$ then one has the sum-product estimate $$
|A+A|+|AA|\gg |A|^{1+\varepsilon}; \qquad \varepsilon=\varepsilon(\delta)>0. $$ This estimate and its proof consequently have been quantified and simplified in~\cite{BG},~\cite{Gar1}--\cite{HIS},~\cite{KS}--\cite{Sh},~\cite{TV}. From the sum-product estimate and Ruzsa's triangle inequalities (see,~\cite{R1} and~\cite{R2}) it follows that the polynomial $f(x,y,z)=xy+z:F_p^3\to F_p$ possesses an expanding property, in a sense that for any subsets $A,B,C$ with
$|A|\sim |B|\sim |C|\sim p^{\alpha},$ where $0<\alpha<1$ is fixed, the set $f(A,B,C)$ has cardinality greater than $p^{\beta}$ for some $\beta=\beta(\alpha)>\alpha.$ The problem raised by Widgerson asks to explicitly write a polynomial with two variables which would satisfy the expanding condition. This problem was solved by Bourgain~\cite{B1}, showing that one can take $f(x,y)=x^2+xy.$
Now consider the polynomial $f(x,y)=xy+x.$ This polynomial, of course, does not possess the expanding property in the way defined above. Nevertheless, from Bourgain's work~\cite{B1} it is known that if $|A|\sim p^{\alpha},$ where $0<\alpha<1,$ then $$
|f(A,A)|=|A(A+1)|\ge p^{\beta};\qquad \beta=\beta(\alpha)>\alpha. $$
In the present note we deal with explicit lower bounds for the size of the set $A(A+1).$ Our first result addresses the most nontrivial case $|A|<p^{1/2}.$
\begin{theorem}
\label{thm:106/105} Let $A\subset F_p$ with $|A|<p^{1/2}.$ Then $$
|A(A+1)| \ge |A|^{106/105+o(1)}. $$ \end{theorem}
Theorem~\ref{thm:106/105} will be derived from the Balog-Szemer\'edi-Gowers type estimate and a version of the sum-product estimate given in~\cite{BG}. We remark that the statement of Theorem~\ref{thm:106/105} remains true in a slightly wider range than $|A|<p^{1/2}.$ On the other hand, if $|A|>p^{2/3},$ then we have the optimal in general settings bound.
\begin{theorem} \label{thm:optimal} For any subsets $A, B, C\subset F_p^*$ the following bound holds: $$
|AB|\cdot|(A+1)C|\gg \min\Bigl\{p\,|A|,\,
\frac{|A|^2\cdot|B|\cdot|C|}{p}\Bigr\}. $$ \end{theorem}
Theorem~\ref{thm:optimal} can be compared with the following estimate from~\cite{Gar2}: $$
|A+B|\cdot |AC|\gg \min\Bigl\{p\, |A|,\,
\frac{|A|^2\cdot|B|\cdot|C|}{p}\Bigr\}. $$
Taking $B=A+1,\, C=A,$ Theorem~\ref{thm:optimal} implies $$
|A(A+1)|\gg \min\Bigl\{\sqrt{p\,|A|},\,
\frac{|A|^2}{p^{1/2}}\Bigr\}. $$
In particular, if $|A|>p^{2/3},$ then $$
|A(A+1)|\gg \sqrt{p\, |A|}. $$ Let us show that this is optimal in general settings bound up to the implied constant. Let $N<0.1p$ be a positive integer, $M=[2\sqrt{Np}]$ and let $g$ be a generator of $F_p^*.$ Consider the set $$ X=\{g^{n}-1:\, n=1,2,\ldots, M\}. $$ From the pigeon-hole principle, there is a number $L$ such that $$
|X\cap\{g^{L+1},\ldots, g^{L+M}\}|\ge \frac{M^2}{2p}\ge N. $$ Take $$ A=X\cap\{g^{L+1},\ldots, g^{L+M}\}. $$
Then we have $|A|\ge N$ and $$
|A(A+1)|\le 2M\le 2\sqrt{pN}. $$
Thus, it follows that for any positive integer $N<p$ there exists a set $A\subset F_p$ with $|A|=N$ such that $$
|A(A+1)|\ll\sqrt{p|A|}. $$ This observation illustrates the precision of our result for large subsets of $F_p.$
When $|A|\cdot|B|\cdot|C|\approx p^2,$ Theorem~\ref{thm:optimal} implies that $$
|AB|\cdot|(A+1)C|\gg \sqrt{|A|^3\cdot |B|\cdot|C|}. $$ This coincides with the bound that one can get when $A,B,C$ are subsets of the set of real numbers $\mathbb{R}.$
\begin{theorem} \label{thm:5/4} Let $A,B,C$ be finite subsets of \, $\mathbb{R}\setminus\{0,-1\}.$ Then $$
|AB|\cdot|(A+1)C|\gg \sqrt{|A|^3\cdot |B|\cdot |C|}. $$ \end{theorem} In particular, taking $B=A+1,\, C=A,$ we obtain the bound $$
|A(A+1)| \gg |A|^{5/4}. $$ We mention Elekes' sum-product estimate~\cite{El} in the case of real numbers: $$
|A+A|+|AA|\gg |A|^{5/4}. $$ More generally Elekes' work implies that if $A,B,C$ are finite subsets of the set $\mathbb{R}\setminus\{0\},$ then $$
|AB|\cdot |A+C|\gg \sqrt{|A|^3\cdot |B|\cdot|C|}. $$
The best known bound up to date in the ``pure" sum-product problem for real numbers is $|A+A|+|AA|\gg |A|^{4/3+o(1)},$ due to Solymosi~\cite{Sol}.
\section{Proof of Theorem~\ref{thm:106/105}}
For $E\subset A\times B$ we write $$ A\,\, \substack{-\\ E}\,\, B=\{a-b: (a,b)\in E\}. $$ A basic tool in the proof of Theorem~\ref{thm:106/105} is the following explicit Balog-Szemer\'edi-Gowers type estimate given by Bourgain and Garaev~\cite{BG}.
\begin{lemma}
\label{lem:BG1} Let $A\subset F_p, \, B\subset F_p,\, E\subset A\times B$ be such that $|E|\ge |A||B|/K.$ There exists a subset
$A'\subset A$ such that $|A'|\ge 0.1 |A|/K$ and $$
|A\,\, \substack{-\\ E}\,\, B|^4\ge \frac{|A'-A'|\cdot|A|\cdot|B|^{2}}{10^4K^{5}}. $$ \end{lemma}
Theorem~\ref{thm:106/105} will be derived from the combination of Lemma~\ref{lem:BG1} with the following specific variation of the sum-product estimate from~\cite{BG}. \begin{lemma}
\label{lem:BG2} Let $A\subset F_p,\, |A|<p^{1/2}.$ Then, $$
|A-A|^8\cdot|A(A+1)|^4\ge |A|^{13+o(1)} $$ \end{lemma}
The proof of Lemma~\ref{lem:BG2} follows from straightforward modification of the proof of Theorem 1.1 of~\cite{BG}, so we only sketch it. It suffices to show that $$
|A-A|^5\cdot|2A-2A|\cdot|A(A+1)|^4\ge |A|^{11+o(1)}. $$ Indeed, having this estimate established, one can apply it to large subsets of $A,$ iterate the argument of Katz and Shen~\cite{KS} several times and finish the proof; for more details, see~\cite{BG}.
We can assume that $A\cap \{0, -1\}=\emptyset$ and $|A|\ge 10.$ There exists a fixed element $b_0\in A$ such that $$
\sum_{a\in A}|(a+1)A\cap (b_0+1)A|\ge \frac{|A|^3}{|A(A+1)|}. $$ Decomposing into level sets, we get a positive integer $N$ and a subset $A_1\subset A$ such that \begin{equation}
\label{eqn:aAcapbAge} N\le |(a+1)A\cap (b_0+1)A|< 2N \quad {\rm for \quad any} \quad a\in A_1, \end{equation} \begin{equation}
\label{eqn:N|A1|} N|A_1|\ge \frac{|A|^3}{2|A(A+1)|\cdot\log|A|}. \end{equation} In particular, \begin{equation} \label{eqn:boundA1} N\ge
\frac{|A|^2}{2|A|\cdot|A(A+1)|\cdot\log|A|}. \end{equation}
We can assume that $|A_1|>1.$ Due to the observation of Glibichuk and Konyagin~\cite{GK}, either $$ \frac{A_1-A_1}{A_1-A_1}=F_p $$ or we can choose elements $b'_1,b'_2,b'_3,b'_4\in A_1$ such that $$ \frac{b'_1-b'_2}{b'_3-b'_4}-1\not\in \frac{A_1-A_1}{A_1-A_1}. $$ Using the step of Katz and Shen~\cite{KS}, we deduce that in either case there exist elements $b_1,b_2,b_3,b_4\in A_1$ such that \begin{equation}
\label{eqn:length4} \Bigl|(b_1-b_2)A+(b_3-b_4)A\Bigr|\gg
\frac{|A_1|^3}{|A-A|}. \end{equation}
To each element $x\in (b_1-b_2)A+(b_3-b_4)A$ we attach one fixed representation \begin{equation} \label{eqn:attach x} x=(b_1-b_2)a(x)+(b_3-b_4)a'(x),\quad a(x), a'(x)\in A. \end{equation} Denote $$ S=(b_1-b_2)A+(b_3-b_4)A,\quad S_i=(b_i+1)A\cap (b_0+1)A; \quad i=1,2,3,4. $$ As in~\cite{BG}, we consider the mapping $$ f: S\times S_1\times S_2 \times S_3\times S_4 \to (2A-2A)\times (A-A)\times(A-A)\times(A-A)\times(A-A) $$ defined as follows. Given $$ x\in S, \quad x_i\in S_i; \quad i=1,2,3,4, $$ we represent $x$ in the form~\eqref{eqn:attach x}, represent $x_i$ in the form $$ x_i=(b_i+1)a_i(x_i)=(b_0+1)a_i'(x_i),\quad a_i(x_i)\in A,\quad a_i'(x_i)\in A,\quad (i=1,2,3,4), $$ and define $$ f(x,x_1,x_2,x_3,x_4)=(u,u_1,u_2,u_3, u_4), $$ where $$ u=a_1'(x_1)-a_2'(x_2)+a_3'(x_3)-a_4'(x_4), $$ $$ u_1=a(x)-a_1(x_1), \quad u_2=a(x)-a_2(x_2), $$ $$ u_3=a'(x)-a_3(x_3),\quad u_4=a'(x)-a_4(x_4). $$ From the construction we have $$ x=(b_1+1)u_1-(b_2+1)u_2+(b_3+1)u_3-(b_4+1)u_4+(b_0+1)u. $$ Therefore, the vector $(u,u_1,u_2,u_3,u_4)$ determines $x$ and thus determines $a(x), a'(x)$ and consequently determines $a_1(x_1), a_2(x_2), a_3(x_3), a_4(x_4)$ which determines $x_1,x_2,x_3,x_4.$
Hence, since $|(b_i+1)A\cap (b_0+1)A|\ge N,$ we get that $$
|(b_1-b_2)A+(b_3-b_4)A|N^4\le |A-A|^4\cdot |2A-2A|. $$ Taking into account~\eqref{eqn:length4}, we get $$
|A-A|^4\cdot |2A-2A|\gg \frac{|A_1|^3N^4}{|A-A|}. $$ Using~\eqref{eqn:aAcapbAge}--\eqref{eqn:boundA1}, we conclude the proof of Lemma~\ref{lem:BG2}.
We proceed to prove Theorem~\ref{thm:106/105}. Denote $$ E=\{(x, x+xy):\,\, x\in A, \, y\in A\}\subset A\times A(A+1), $$ Then, $$
|E|=|A|^2=\frac{|A|\cdot|A(A+1)|}{K},\quad K=\frac{|A(A+1)|}{|A|}. $$ Let $B=A(A+1).$ Observe that $$ -AA=A\,\, \substack{-\\ E}\,\, B. $$
According to Lemma~\ref{lem:BG1} there exists $A'\subset A$ with \begin{equation}
\label{eqn:A'end} |A'|\gg \frac{|A|}{K}=\frac{|A|^2}{|A(A+1)|} \end{equation} such that $$
|AA|^4|A(A+1)|^3\gg |A'-A'||A|^6. $$
Raising to eights power and multiplying by $|A(A+1)|^4\ge
|A'(A'+1)|^4,$ we get $$
|AA|^{32}\cdot|A(A+1)|^{28}\gg |A'-A'|^8|A'(A'+1)|^4|A|^{48}. $$ Combining this with Lemma~\ref{lem:BG2} (applied to $A'$), we obtain $$
|AA|^{32}\cdot|A(A+1)|^{28}\gg |A'|^{13}|A|^{48+o(1)}. $$ Taking into account the inequality~\eqref{eqn:A'end}, we get $$
|AA|^{32}\cdot|A(A+1)|^{41}\ge |A|^{74+o(1)}. $$ From Ruzsa's triangle inequalities in multiplicative form, we have $$
|AA|\le\frac{|A(A+1)|\cdot|(A+1)A|}{|A+1|}=\frac{|A(A+1)|^2}{|A|}. $$ Putting last two inequalities together, we conclude that $$
|A(A+1)|^{105}\ge |A|^{106+o(1)}. $$
\section{Proof of Theorem~\ref{thm:optimal}}
Let $J$ be the number of solutions of the equation $$ x^{-1}y(z^{-1}t-1)=1, \quad (x,y,z,t)\in AB\times B\times C\times (A+1)C. $$ Observe that for any given triple $(a,b,c)\in A\times B\times C$ the quadruple $(x,y,z,t)=(ab, \, b, \, c, \, (a+1)c)$ is a solution of this equation. Thus, \begin{equation}
\label{eqn:Jlower} J\ge |A|\cdot|B|\cdot|C|. \end{equation} On the other hand for any nonprincipal character $\chi$ modulo $p$ we have $$
\Bigl|\sum_{z\in C}\,\,\sum_{t\in (A+1)C}\chi(z^{-1}t-1)\Bigr|\le
\sqrt{p\,|C|\cdot |(A+1)C|}, $$ see, for example, the solution to exercise 8 of~\cite[Chapter V]{Vin}. Therefore, the method of solving multiplicative ternary congruences implies that $$ J=\frac{1}{p-1}\sum_{\chi}\sum_{x,y,z,t}\chi\Bigl(x^{-1}y(z^{-1}t-1)\Bigr)= $$ $$ =\frac{1}{p-1}\sum_{x,y,z,t}\chi_0\Bigl(x^{-1}y(z^{-1}t-1)\Bigr)+ \frac{1}{p-1}\sum_{\chi\not=\chi_0}\sum_{x,y,z,t}\chi(x^{-1})\chi(y)\chi(z^{-1}t-1) $$ $$
\le \frac{|AB|\cdot|B|\cdot |C|\cdot |(A+1)C|}{p-1}+\sqrt{p\,
|C|\cdot|(A+1)C|\cdot|AB|\cdot |B|}. $$ Comparing this with~\eqref{eqn:Jlower}, we conclude the proof.\\
{\bf Remark}. In Karatsuba's survey paper ~\cite{Kar} the interested reader will find many applications of character sums to multiplicative congruences.
\section{Proof of Theorem~\ref{thm:5/4}}
Since $A\cap \{0, -1\}=\emptyset,$ we can assume that $|A|$ is large. We will use the Szemer\'edi-Trotter incidence theorem, which claims that if $\mathcal{P}$ is a finite set of points $(x,y)\in \mathbb{R}^2$ and $\mathcal{L}$ is a finite set of lines $\ell\subset \mathbb{R}^2,$ then $$ \#\Bigl\{\Bigl((x,y),\ell\Bigr)\in \mathcal{P}\times \mathcal{L}:\, (x,y)\in \ell\Bigr\}\ll
|\mathcal{P}|+|\mathcal{L}|+(|\mathcal{P}||\mathcal{L}|)^{2/3}. $$ We mention that this theorem was applied by Elekes in the above mentioned work~\cite{El} to the sum-product problem for subsets of $\mathbb{R}.$ In application to our problem, we let $$ \mathcal{P}=\{(x,y):\, x\in AB,\, y\in (A+1)C\} $$ and let $\mathcal{L}$ to be the family of lines $\{\ell=\ell(z,t): z\in C,\, t\in B\}$ given by the equation $$ y-\frac{z}{t}\,x-z=0. $$ In particular, $$
|\mathcal{P}|=|AB|\cdot|(A+1)C|,\quad |\mathcal{L}|=|B||C|.
$$ Each line $\ell(z,t)\in \mathcal{L}$ contains $|A|$ distinct points $(x,y)\in \mathcal{P}$ of the form $$ (x,y)=(at,\,(a+1)z);\quad a\in A. $$ Thus, $$ \#\Bigl\{\Bigl((x,y),\ell\Bigr)\in \mathcal{P}\times \mathcal{L}:\,
(x,y)\in \ell\Bigr\}\ge |A||\mathcal{L}|=|A|\cdot |B|\cdot |C|. $$ Therefore, the Szemer\'edi-Trotter incidence theorem implies that $$
|A|\cdot |B|\cdot |C| \ll
|AB|\cdot|(A+1)C|+|B||C|+\Bigl(|AB|\cdot|(A+1)C|\cdot
|B|\cdot|C|\Bigr)^{2/3}. $$
Since $|A|$ is large and $|AB|\cdot|(A+1)C|\ge |A|^2,$ the result follows.
\end{document}
|
arXiv
|
{
"id": "0811.4206.tex",
"language_detection_score": 0.6118866801261902,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[Betti numbers]{On the expected Betti numbers of the nodal set of random fields}
\author{Igor Wigman} \email{[email protected]} \address{Department of Mathematics, King's College London}
\date{\today}
\begin{abstract} This note concerns the asymptotics of the expected total Betti numbers of the nodal set for an important class of Gaussian ensembles of random fields on Riemannian manifolds. By working with the limit random field defined on the Euclidean space we were able to obtain a locally precise asymptotic result, though due to the possible positive contribution of large {\em percolating} components this does not allow to infer a global result. As a by-product of our analysis, we refine the lower bound of Gayet-Welschinger for the important Kostlan ensemble of random polynomials and its generalisation to K\"{a}hler manifolds. \end{abstract}
\maketitle
\section{Introduction}
\subsection{Betti numbers for random fields: Euclidean case}
Let $F:\mathbb{R}^{d}\rightarrow\mathbb{R}$ be a centred stationary Gaussian random field, $d\ge 2$. The {\em nodal set of $F$} is its (random) zero set $$\mathcal{Z}_{F}:=F^{-1}(0)=\{x\in\mathbb{R}^{d}:\: F(x)=0\}\subseteq\mathbb{R}^{d};$$ assuming $F$ is sufficiently {\em smooth} and {\em non-degenerate} (or regular), its connected components (``nodal components of $F$") are a.s. either closed $(d-1)$-manifolds or smooth infinite hypersurfaces (``percolating components"). One way to study the topology of $\mathcal{Z}_{F}$, a central research thread in the recent few years, is by restricting $F$ to a large centred ball $B(R)=\{x\in\mathbb{R}^{d}:\: \|x\|<R \}$, and then investigate the restricted nodal set $\widetilde{\mathcal{Z}_{F}}(R):=F^{-1}(0)\cap B(R)$ as $R\rightarrow\infty$. The set $\widetilde{\mathcal{Z}_{F}}(R)$ consists of the union of the a.s. smooth closed nodal components of $\mathcal{Z}_{F}$ lying entirely in $B(R)$, and the fractions of nodal components of $F$ intersecting $\partial B(R)$; note that, by intersecting with $B(R)$, the components intersecting $\partial B(R)$, finite or percolating, might break into $2$ or more connected components, or fail to be closed.
It follows as a by-product of the precise analysis due to Nazarov-Sodin ~\cite{sodin_lec_notes,nazarov_sodin} that, under very mild assumptions on $F$ to be discussed below, mainly concerning its smoothness and non-degeneracy, with high probability {\em most} of the components of $\mathcal{Z}_{F}$ fall into the former, rather than the latter, category (see \eqref{eq:nod numb conv mean} below). That is, for $R$ large, with high probability, most of the components of $\mathcal{Z}_{F}$ intersecting $B(R)$ are lying entirely within $B(R)$. Setting $$\mathcal{Z}_{F}(R):=\bigcup\limits_{\gamma\subseteq B(R)}\gamma$$ to be the union of all the nodal components $\gamma$ of $F$ lying entirely in $B(R)$, the first primary concern of this note is in the topology of $\mathcal{Z}_{F}(R)$, and, in particular, the Betti numbers of $\mathcal{Z}_{F}(R)$ as $R\rightarrow\infty$, more precisely, the asymptotics of their expected values.
For $0\le i \le d-1$ the corresponding Betti number $b_{i}(\cdot )$ is the dimension of the $i$'th homology group, so that a.s. \begin{equation} \label{eq:betai def} \beta_{i}(R)=\beta_{F;i}(R):=b_{i}(\mathcal{Z}_{F}(R)) = \sum\limits_{\gamma\subseteq \mathcal{Z}_{F}(R)} b_{i}(\gamma), \end{equation} summation over all nodal components $\gamma$ lying in $\mathcal{Z}_{F}(R)$. For example, $\beta_{0}=:\mathcal{N}_{F}(R)$ is the total number of connected components $\gamma\subseteq \mathcal{Z}_{F}(R)$ (``nodal count") analysed by Nazarov-Sodin, and $$\beta_{i}(R) = \beta_{d-1-i}(R)$$ by Poincar\'{e} duality. To be able to state Nazarov-Sodin's results we need to introduce the following axioms; by convention they are expressed in terms of the spectral measure rather than $F$ or its covariance function.
\begin{definition}[Axioms $(\rho 1)-(\rho 4)$ on $F$] \label{def:axioms rho1-4}
Let $F:\mathbb{R}^{d}\rightarrow\mathbb{R}$ be a Gaussian stationary random field, $$r_{F}(x-y)=r_{F}(x,y):=\mathbb E[F(x)\cdot F(y)]$$ the covariance function of $F$, and $\rho=\rho_{F}$ be its spectral measure, i.e. the Fourier transform of $r_{F}$ on $\mathbb{R}^{d}$.
\begin{enumerate}
\item $F$ satisfies $(\rho 1)$ if the measure $\rho$ has no atoms.
\item $F$ satisfies $(\rho 2)$ if for some $p>6$, $$\int\limits_{\mathbb{R}^{d}}\|\lambda\|^{p}d\rho(\lambda)<\infty.$$
\item $F$ satisfies $(\rho 3)$ if the support of $\rho$ does not lie in a linear hyperplane of $\mathbb{R}^{d}$.
\item $F$ satisfies $(\rho 4)$ if the interior of the support of $\rho$ is non-empty.
\end{enumerate}
\end{definition}
Axioms $(\rho 1)$, $(\rho 2)$ and $(\rho 3)$ ensure that the action of translations on $\mathbb{R}^{d}$ is ergodic, a.s. sufficient smoothness of $F$, and non-degeneracy of $F$ understood in proper sense, respectively. Axiom $(\rho 4)$ implies that any smooth function belongs to the support of the law of $F$, which, in turn, will yield the positivity of the number of nodal components, and positive representation of every topological type of nodal components.
Recall that $\mathcal{N}_{F}(R)=\beta_{0}(R)$ is the number of nodal components of $F$ entirely lying in $B(R)$, and let $V_{d}$ be the volume of the unit $d$-ball, and $\operatorname{Vol} B(R)=V_{d}\cdot R^{d}$ be the volume of the radius $R$ ball in $\mathbb{R}^{d}$. Nazarov and Sodin ~\cite{sodin_lec_notes,nazarov_sodin} proved that if $F$ satisfies $(\rho 1)-(\rho 3)$, then there exists a constant $c_{NS}=c_{NS}(\rho_{F})$ (``Nazarov-Sodin constant") so that $\frac{\mathcal{N}_{F}(R)}{\operatorname{Vol} B(R)}$ converges to $c_{NS}$, both in mean and a.s. That is, as $R\rightarrow\infty$, \begin{equation} \label{eq:nod numb conv mean}
\mathbb E\left[\left|\frac{\mathcal{N}_{F}(R)}{\operatorname{Vol} B(R)} - c_{NS} \right|\right] \rightarrow 0, \end{equation} so that, in particular, \begin{equation} \label{eq:exp nod numb asymp} \mathbb E[\mathcal{N}_{F}(R)] = c_{NS}\cdot \operatorname{Vol} B(R)+o(R^{d}). \end{equation} They also showed that imposing $(\rho 4)$ is sufficient (but not necessary) for the strict positivity of $c_{NS}$, and found other very mild sufficient conditions on $\rho$, so that $c_{NS}>0$. The validity of the asymptotic \eqref{eq:exp nod numb asymp} for the expected nodal count was extended ~\cite{kurlberg2018variation} to hold without imposing the ergodicity axiom $(\rho 1)$, with $c_{NS}=c_{NS}(\rho_{F})$ appropriately generalised, also establishing a stronger estimate for the error term as compared to the r.h.s. of \eqref{eq:exp nod numb asymp}.
One might think that endowing the ``larger" components with the same weight $1$ as the ``smaller" components might be ``discriminatory" towards the larger ones, so that separating the counts based on the components' topology ~\cite{sarnak_wigman16} or geometry ~\cite{beliaev2018volume} would provide an adequate response for the alleged discrimination. These nevertheless do not address the important question of the {\em total} Betti number $\beta_{i}$, the main difficulty being that the individual Betti number $b_{i}(\gamma)$ of a nodal component $\gamma$ of $F$ is not bounded, even under the assumption that $\gamma \subseteq B(R)$ is entirely lying inside a compact domain. Despite this, we will be able to resolve this difficulty by controlling from above the total Betti number via Morse Theory ~\cite{milnor1963morse}, an approach already pursued by Gayet-Welschinger ~\cite{gayet2016betti} (see \S\ref{sec:proofs outline} below for a more detailed explanation).
\begin{theorem} \label{thm:Betti asymp Euclid} Let $F:\mathbb{R}^{d}\rightarrow\mathbb{R}$ be a centred Gaussian random field, satisfying axioms $(\rho 2)$ and $(\rho 3)$ of Definition \ref{def:axioms rho1-4}, $d\ge 2$, and $0\le i \le d-1$. Then
\begin{enumerate}[a.]
\item There exists a number $c_{i}=c_{F;i}\ge 0$ so that \begin{equation} \label{eq:exp Betti R^d} \mathbb E[\beta_{i}(R)] = c_{i}\cdot\operatorname{Vol} B(R) + o_{R\rightarrow\infty}(R^{d}). \end{equation}
\item If, in addition, $F$ satisfies $(\rho 1)$, then convergence \eqref{eq:exp Betti R^d} could be extended to hold in mean, i.e. \begin{equation} \label{eq:conv Betti L1 R^d}
\mathbb E\left[\left| \frac{\beta_{i}(R)}{\operatorname{Vol} B(R)} - c_{i} \right|\right] \rightarrow 0 \end{equation} as $R\rightarrow\infty$.
\item \label{it:rho4=>cNS>0} Further, if $F$ satisfies the axiom $(\rho 4)$ (in addition to $(\rho 2)$ and $(\rho 3)$, but not $(\rho 1)$), then $c_{i}>0$. The same conclusion holds for the important Berry's monochromatic isotropic random waves in arbitrary dimensions (``Berry's random wave model").
\end{enumerate}
\end{theorem}
\subsection{Motivation and background}
The Betti numbers of both the nodal and the excursion sets of Gaussian random fields serve as their important topological descriptor, and are therefore addressed in both mathematics and experimental physics literature, in particular cosmology ~\cite{park2013betti}. From the complex geometry perspective Gayet and Welschinger ~\cite{gayet2016betti} studied the distribution of the total Betti numbers of the zero set for the Kostlan Gaussian ensemble of degree $n$ random homogeneous polynomials on the $d$-dimensional projective space, and their generalisation to K\"{a}hler manifolds, $n\rightarrow\infty$. In the projective coordinates $x=[x_{0}:\ldots : x_{d}]\in \mathbb{R} \mathcal P^{d}$ we may write \begin{equation} \label{eq:Pn Kostlan def}
P_{n}(x) = \sum\limits_{|j|=n}\sqrt{{n \choose j}} a_{j} x^{j}, \end{equation}
where $j=(j_{0},\ldots,j_{d})$, $|j|=\sum\limits_{i=0}^{d}j_{i}$, $x^{j}=x_{0}^{j_{0}}\cdot \ldots \cdot x_{d}^{j_{d}}$, $ {n \choose j} = \frac{n!}{j_{0}!\cdot \ldots\cdot j_{d}!}$, and $\{a_{j}\}$ are standard Gaussian i.i.d. By the homogeneity of $P_{n}$, its zero set makes sense on the projective space. The Kostlan (also referred to as ``Shub-Smale") ensemble is an important model of random polynomials, uniquely invariant w.r.t. unitary transformations on $\mathbb{C} \mathcal P^{d}$. Restricted to the unit sphere $\mathcal{S}^{d}\subseteq \mathbb{R}^{d+1}$, the random fields $P_{n}$ are defined by the covariance function \begin{equation*} \mathbb E[P_{n}(x)\cdot P_{n}(y)] = \langle x,y\rangle^{n} = \left(\cos(\theta(x,y))\right)^{n}, \end{equation*} where $x,y\in \mathcal{S}^{d}$, the inner product $\langle \cdot,\cdot\rangle$ is inherited from $\mathbb{R}^{d+1}$, and $\theta(\cdot,\cdot)$ is the angle between two points on $\mathbb{R}^{d+1}$.
Upon scaling by $\sqrt{n}$ (the meaning is explained in Definition \ref{def:loc lim} below), the Kostlan polynomials \eqref{eq:Pn Kostlan def} admit ~\cite[\S 2.5.4]{sodin_lec_notes}, locally uniformly, a (stationary isotropic) limit random field on $\mathbb{R}^{d}$, namely the Bargmann-Fock ensemble defined by the ``Gaussian" covariance kernel \begin{equation} \label{eq:kappa Gauss BF} \kappa(x):=e^{-x^{2}/2}, \end{equation} see also ~\cite{beffara2017percolation,beliaev2017russo}. This indicates that one should expect the Betti numbers to be of order of magnitude $\approx n^{d/2}$. That this is so is supported by Gayet-Welschinger's upper bounds ~\cite{gayet2016betti} \begin{equation*} \mathbb E[b_{i}(P_{n}^{-1}(0))] \le A_{i} n^{d/2} \end{equation*} with some semi-explicit $A_{i}>0$, and the subsequent lower bounds ~\cite{gayet2014lower} \begin{equation} \label{eq:GW lower bound} \mathbb E[b_{i}(P_{n}^{-1}(0))] \ge a_{i} n^{d/2}, \end{equation} $a_{i}>0$, but to our best knowledge the important question of the true asymptotic behaviour of $b_{i}(P_{n}^{-1}(0))$ is still open.
\subsection{Betti numbers for Gaussian ensembles on Riemannian manifolds}
Since $\kappa$ of \eqref{eq:kappa Gauss BF} (or, rather, its Fourier transform) easily satisfies all Nazarov-Sodin's axioms $(\rho 1)-(\rho 4)$ of Definition \ref{def:axioms rho1-4}, one wishes to invoke Theorem \ref{thm:Betti asymp Euclid} with the Bargmann-Fock field in place of $F$, and try to deduce the results analogous to \eqref{eq:exp Betti R^d} for the Betti numbers of the nodal set of $P_{n}$ in \eqref{eq:Pn Kostlan def}. This is precisely the purpose of Theorem \ref{thm:tot Betti numb loc} below, valid in a scenario of {\em local translation invariant limits}, far more general than merely the Kostlan ensemble, whose introduction is our next goal.
Let $\mathcal{M}$ be a compact Riemannian $d$-manifold, and $\{f_{L}\}_{L\in\mathcal{L}}$ be a family of {\em smooth} Gaussian random fields $f_{L}:\mathcal{M}\rightarrow\mathbb{R}$, where the index $L$ attains a {\em discrete} set $\mathcal{L}$, and $K_{L}(\cdot,\cdot)$ the covariance function corresponding to $f_{L}$, so that $$K_{L}(x,y)=\mathbb E[f_{L}(x)\cdot f_{L}(y)];$$ the parameter $L$ should be thought of as the scaling factor, generalising the rolse of $\sqrt{n}$ for the Kostlan ensemble. We scale $f_{L}$ restricted to a sufficiently small neighbourhood of a point $x\in \mathcal{M}$, so that the exponential map $\exp_{x}(\cdot):T_{x}\mathcal{M}\rightarrow\mathcal{M}$ is well defined. We define \begin{equation} \label{eq:fx,L scal def} f_{x,L}(u):= f_{L}(\exp_{x}(u/L), \end{equation} with covariance
$$K_{x,L}(u,v) := K_{L}(\exp_{x}(u/L),\exp_{x}(v/L))$$ with $|u|,|v|<L\cdot r$ with $r$ sufficiently small, uniformly with $x\in\mathcal{M}$, allowing $u,v$ to grow with $L\rightarrow\infty$.
\begin{definition}[Local translation invariant limits, cf. ~{\cite[Definition 2 on p. 6]{nazarov_sodin}}] \label{def:loc lim}
We say that the Gaussian ensemble $\{f_{L}\}_{L\in\mathcal{L}}$ possesses local translation invariant limits, if for almost all $x\in \mathcal{M}$ there exists a positive definite function $K_{x}:\mathbb{R}^{d}\rightarrow\mathbb{R}$, so that for all $R>0$, \begin{equation} \label{eq:covar scal lim}
\lim\limits_{L\rightarrow\infty}\sup\limits_{|u|,|v|\le R}\left| K_{x,L}(u,v)-K_{x}(u-v)\right| \rightarrow 0. \end{equation} \end{definition}
Important examples of Gaussian ensembles possessing translation invariant local limits include (but not limited to) Kostlan's ensemble \eqref{eq:Pn Kostlan def} of random homogeneous polynomials, and Gaussian band-limited functions ~\cite{sarnak_wigman16}, i.e. Gaussian superpositions of Laplace eigenfunctions corresponding to eigenvalues lying in an energy window. For manifolds with spectral degeneracy, such as the sphere and the torus (and $d$-cube with boundary), the {\em monochromatic} random waves (i.e. Gaussian superpositions of Laplace eigenfunctions belonging to the same eigenspace) are a particular case of band-limited functions; two of the most interesting cases are those of random spherical harmonics (random Laplace eigenfunctions on the round unit $d$-sphere) ~\cite{wigman2009distribution,wigman2010fluctuations}, and ``Arithmetic Random Waves" (random Laplace eigenfunctions on the standard $d$-torus) ~\cite{oravecz2008leray,krishnapur2013nodal}.
In all the said examples of Gaussian ensembles on manifolds of our particular interest the scaling limit $K_{x}$ (and the associate Gaussian random field on $\mathbb{R}^{d}$) was independent of $x$, and the limit in \eqref{eq:covar scal lim} is uniform, attained in a strong quantitative form, see the discussion in ~\cite[\S 2.1]{beliaev2019mean}. We will also need the following, more technical concepts of uniform smoothness and non-degeneracy for $\{f_{L}\}$, introduced in ~{\cite[definitions 2-3, p. 14-15]{sodin_lec_notes}}.
\begin{definition}[Smoothness and non-degeneracy]\ \\
\begin{enumerate}
\item We say that $\{f_{L}\}$ is $C^{3-}$ smooth if for every $0<R<\infty$, \begin{equation*}
\limsup\limits_{L\rightarrow\infty}\sup\left\{ |\partial_{u}^{i}\partial^{j}_{v} K_{x,L}(u,v)|:\: |i|,|j|\le 3;\;x\in \mathcal{M}, \|u\|,\|v\|\le R \right\}< \infty. \end{equation*}
\item We say that $\{f_{L}\}$ is non-degenerate if for every $0<R<\infty$ \begin{equation*}
\liminf\limits_{L\rightarrow\infty}\inf\left\{ \mathbb E\left[\partial_{\xi}f_{x,L}(u)^{2}\right]:\: \xi\in\mathcal{S}^{d-1},\, x\in \mathcal{M},\,\|u\|\le R \right\}>0. \end{equation*}
\end{enumerate}
\end{definition}
Let $\{f_{L}\}_{L\in\mathcal{L}}$ be a $C^{3-}$ smooth, non-degenerate, Gaussian ensemble possessing translation invariant local limits $K_{x}$, corresponding to Gaussian random fields on $R^{d}$ with spectral measure $\rho_{x}$, satisfying axioms $(\rho 1)-(\rho 3)$. Denote $\mathcal{N}(f_{L};x,R/L)$ to be the number of nodal components of $f_{L}$ lying entirely in the geodesic ball $B_{x}(R/L)\subseteq\mathcal{M}$, and $\mathcal{N}(f_{L})$ to be the {\em total} number of the nodal components of $f_{L}$ on $\mathcal{M}$. In this settings Nazarov-Sodin ~\cite{sodin_lec_notes,nazarov_sodin} proved that \begin{equation} \label{eq:nod comp loc Riemann} \lim\limits_{R\rightarrow\infty}\limsup\limits_{L\rightarrow\infty}
\mathbb E\left[ \left| \frac{\mathcal{N}(f_{L};x,R/L)}{\operatorname{Vol} B(R)} - c_{NS}(\rho_{x}) \right| \right] = 0, \end{equation} with $c_{NS}(\cdot)$ same as in \eqref{eq:nod numb conv mean}.
For the total number $\mathcal{N}(f_{L})$ they glued the local results \eqref{eq:nod comp loc Riemann}, to deduce, on invoking a two-parameter analogue of Egorov's Theorem yielding the {\em almost uniform} convergence of \eqref{eq:nod comp loc Riemann} w.r.t. $x$, that \begin{equation} \label{eq:nod comp glob Riemann} \lim\limits_{R\rightarrow\infty}
\mathbb E\left[ \left| \frac{\mathcal{N}(f_{L})}{V_{d}L^{d}} - \nu \right| \right] \rightarrow 0, \end{equation} holds with $$\nu:=\int\limits_{\mathcal{M}}c_{NS}(\rho_{x})dx.$$ In particular, \eqref{eq:nod comp glob Riemann} yields \begin{equation} \label{eq:exp nod comp glob} \mathbb E[\mathcal{N}(f_{L})] = V_{d}\nu \cdot L^{d} + o(L^{d}), \end{equation}
As it was mentioned above, in practice, in many applications, the scaling limit $K_{x}(\cdot)\equiv K(\cdot)$ does not depend on $x$, so that, assuming w.l.o.g. that $\operatorname{Vol}(\mathcal{M})=1$, the asymptotic constant $\nu$ in \eqref{eq:nod comp glob Riemann} (and \eqref{eq:exp nod comp glob}) is $\nu=c_{NS}(\rho)$, where $\rho$ is the Fourier transform of $K$. In this situation, in accordance with Theorem \ref{thm:Betti asymp Euclid}\ref{it:rho4=>cNS>0}, $\nu=c_{NS}>0$ is positive, if $(\rho 4)$ is satisfied. The following result extends \eqref{eq:nod comp loc Riemann} to arbitrary Betti numbers.
\begin{theorem} \label{thm:tot Betti numb loc}
Let $\{f_{L}\}_{L\in\mathcal{L}}$ be a $C^{3-}$ smooth, non-degenerate, Gaussian ensemble, $x\in \mathcal{M}$ satisfying \eqref{eq:covar scal lim} with some $K_{x}$ satisfying axioms $(\rho 1)-(\rho 3)$, and $0 \le i \le d-1$. Denote $\beta_{i;L}(x,R/L)=\beta_{i}(f_{L};x,R/L)$ to be the total $i$'th Betti number of the union of all components of $f_{L}^{-1}(0)$ entirely contained in the geodesic ball $B_{x}(R/L)$. Then for every $\epsilon>0$ \begin{equation} \label{eq:betti loc prob conv ci} \lim\limits_{R\rightarrow\infty}\limsup\limits_{L\rightarrow\infty}
\mathcal{P}r\left\{ \left| \frac{\beta_{i;L}(x,R/L)}{\operatorname{Vol} B(R)}- c_{i} \right| > \epsilon \right\} = 0. \end{equation} where $c_{i}$ is the same as in \eqref{eq:conv Betti L1 R^d}, corresponding to the random field defined by $K_{x}$. \end{theorem}
Theorem \ref{thm:tot Betti numb loc} asserts that the random variables $\left\{\frac{\beta_{i;L}(x,R/L)}{\operatorname{Vol} B(R)}\right\}_{L\in\mathcal{L}}$ converge in probability to $c_{i}$, in the double limit $L\rightarrow\infty$, and then $R\rightarrow\infty$. One would be tempted to try to deduce the convergence in mean for the same setting, the main obstacle being that $\beta_{i;L}(x,R/L)$ is not bounded, and, in principle, a small probability event might contribute positively to the expectation of $\beta_{i;L}(x,R/L)$. While it is plausible (if not likely) that a handy bound on the variance (or the second moment), such as ~\cite{estrade2016number,muirhead2019second}, for the critical points number would rule this out and establish the desired $L^{1}$-convergence in this, or, perhaps, slightly more restrictive scenario, we will not pursue this direction in the present manuscript, for the sake of keeping it compact.
Theorem \ref{thm:tot Betti numb loc} applied on the Kostlan ensemble \eqref{eq:Pn Kostlan def} of random polynomials, in particular, recovers Gayet-Welschinger's later lower bound \eqref{eq:GW lower bound}, but, finer, with high probability, it prescribes the asymptotics of the total Betti numbers of all the components lying in geodesic balls of radius slightly above $1/\sqrt{n}$, and hence, in this case, one might think of Theorem \ref{thm:tot Betti numb loc} as a refinement of \eqref{eq:GW lower bound}. It would be desirable to determine the true asymptotic law of $\mathbb E[b_{i}(P_{n}^{-1}(0))]$ (hopefully, for the more general scenario), though the possibility of giant (``percolating") components is a genuine consideration, and, if our present understanding of this subtlety is correct ~\cite{beliaev2019mean}, then, to resolve the asymptotics of $\mathbb E[b_{i}(P_{n}^{-1}(0))]$ the question whether they consume a positive proportion of the total Betti numbers cannot be possibly avoided. In fact, it is likely that for $d\ge 3$, with high probability, there exists a single percolating component consuming a high proportion of the space, and contributing positively to the Betti numbers, as found numerically by Barnett-Jin (presented within ~\cite{sarnak_wigman16}), and explained by P. Sarnak ~\cite{Sa}, with the use of percolating vs. non-percolating random fields (see ~\cite[\S 1.2]{beliaev2019mean} for more details, and also the discussion in \S\ref{sec:proofs outline} below).
\section{Outline of the proofs and discussion} \label{sec:proofs outline}
\subsection{Outline of the proofs of the principle results}
The principal novel result of this manuscript is Theorem \ref{thm:Betti asymp Euclid}. Theorem \ref{thm:Betti asymp Euclid} {\em given}, the proof of Theorem \ref{thm:tot Betti numb loc} does not differ significantly from the proof of ~\cite[Theorem 5]{sodin_lec_notes} given ~\cite[Theorem 1]{sodin_lec_notes}. The key observation here is that while passing from the Euclidean random field $F_{x}$ to its perturbed Riemannian version $f_{x,L}$ in the vicinity of $x\in\mathcal{M}$, the topology of its nodal set is preserved on a high probability {\em stable} event, to be constructed, and hence so is its $i$'th Betti number. In fact, this was the conclusion from the argument presented in ~\cite[Theorem 6.2]{sarnak_wigman16} that will reconstructed in \S\ref{sec:proof loc Riem}, alas briefly, for the sake of completeness.
\begin{figure}
\caption{Computer simulations by A. Barnett. Left: Giant percolating nodal components for $3$-dimensional monochromatic isotropic waves. Right: Analogous picture for the ``Real Fubini-Study" (a random ensemble of homogeneous polynomials, with different law as compared to Kostlan's ensemble).}
\label{fig:Barnett 3d giant}
\end{figure}
To address the asymptotic expected nodal count $\mathcal{N}_{F}(R) = \beta_{F;0}(R)$, Nazarov-Sodin have developed the so-called {\em Integral Geometric sandwich}. The idea is that one bounds, $\mathcal{N}_{F}(R)$ from below using $\mathcal{N}_{\cdot}(r)$, of radii $0<r<R$ much smaller than $R$ (``fixed"), and $F$ translated (equivalently, shifter radius-$r$ ball), and from above using a version of $\mathcal{N}_{F}(r)$, where, rather than counting nodal components lying entirely in $B(r)$ (or its shift), we also include those components intersecting its boundary $\partial B(r)$. By invoking ergodic methods one shows that both these bounds converge to the same limit, and in its turn this yields automatically both the asymptotics for the expected nodal count, and the convergence in mean.
Unfortunately, since we endow each nodal component $\gamma$ with the, possibly unbounded, weight $b_{i}(\gamma)$, the upper bound in the sandwich does not seemingly yield a useful result. We bypass this major obstacle by using a global bound on the expected Betti numbers via Morse Theory (and the Kac-Rice method), and then establishing an asymptotics for the expected number. Rather than working with arbitrary chosen ``fixed" radii $r>0$, we only work with ``good" radii, defined so that these numbers are ``almost maximising" the expected Betti numbers, so that we can infer the same for all the sufficiently big radii $R>r$ (see \eqref{eq:eta def limsup} and \eqref{eq:betai/r^d>eta-eps}). In hindsight, we interpret working with the good radii as ``miraculously" eliminating the possible fluctuations in the contribution to the Betti numbers of the giant percolating domains. Once the asymptotics for the expected Betti number has been determined, we tour de force working with the good radii to also yield the convergence in mean, with the help of the ergodic assumption $(\rho 1)$.
Another possible strategy for proving results like Theorem \ref{thm:Betti asymp Euclid} is by observing that, by naturally extending the definition of $\beta_{i}$ to smooth domains $\mathcal{D}\subseteq\mathbb{R}^{d}$ as \begin{equation*} \beta_{i}(\mathcal{D})=\beta_{i;F}(\mathcal{D}):= \sum\limits_{\gamma\subseteq \mathcal{D}} b_{i}(\gamma), \end{equation*} with summation over the (random) nodal components of $F$ lying in $\mathcal{D}$, $\beta_{i}(\cdot)$ is made into a {\em super-additive random variable}, i.e. for all $\mathcal{D}_{1},\ldots,\mathcal{D}_{k}\subseteq \mathbb{R}^{d}$ pairwise disjoint domains, the inequality \begin{equation*} \beta_{i}\left(\bigcup\limits_{j=1}^{k}\mathcal{D}_{j}\right)\ge \sum\limits_{j=1}^{k} \beta_{i}(\mathcal{D}_{j}) \end{equation*} holds. It then might be tempting to apply the superadditive ergodic theorem ~\cite[Theorem 2.14, page 210]{krengel_book} (and its finer version ~\cite[p.~165]{nguyen}) on $\beta_{i}$. However, in this manuscript we will present a direct and explicit treatise of this subject.
\subsection{Discussion}
As it was mentioned above, a straightforward application of \ref{thm:tot Betti numb loc} on the Kostlan's ensemble of random homogenous polynomials, in particular implies the lower bound \eqref{eq:GW lower bound} for the total expected Betti number for this ensemble due to Gayet-Welschinger, and its generalisations for K\"{a}hler manifolds. Our argument is entirely different as compared to Gayet-Welschinger's: rather than working with the finite degree polynomials ~\eqref{eq:Pn Kostlan def}, as in ~\cite{gayet2014lower}, we first prove the result for the limit Bargmann-Fock random field on $\mathbb{R}^{d}$ (Theorem \ref{thm:Betti asymp Euclid}), and then deduce the result by a perturbative procedure following Nazarov-Sodin (Theorem \ref{thm:tot Betti numb loc}).
It is crucial to determine whether the global asymptotics \begin{equation*} \mathbb E[\beta_{i;L}] \sim c_{i}\operatorname{Vol}(\mathcal{M})\cdot L^{d}, \end{equation*} expected from its local probabilistic version \eqref{eq:betti loc prob conv ci}, could be extended to hold for the total expected Betti number of $f^{-1}$ in some scenario, inclusive of all the motivational examples. Such a result would indicate that no giant ``percolating" components, not lying inside any {\em macroscopic} (or slightly bigger) geodesic balls exist, contributing positively to the Betti numbers. In fact some numerics due to Barnett-Jin (presented within ~\cite{sarnak_wigman16}) support the contrary for $d\ge 3$, as argued by Sarnak ~\cite{Sa}, see Figure \ref{fig:Barnett 3d giant}, and also ~\cite[\S 2.1]{beliaev2019mean}. To our best knowledge, at this stage this question is entirely open, save for the results on $\beta_{0;L}$ (and $\beta_{d-1;L}$) due to Nazarov-Sodin.
\section{Proof of Theorem \ref{thm:Betti asymp Euclid}}
\subsection{Auxiliary lemmas}
Recall that $\beta_{i}(R)=b_{i}(\mathcal{Z}_{F}(R))$ is defined in \eqref{eq:betai def}, and for $x\in \mathbb{R}^{d}$, $R>0$, introduce \begin{equation} \label{eq:bi loc sum def} \beta_{i}(x;R)=\beta_{F;i}(x,R) := \sum\limits_{\gamma\subseteq \mathbb{Z}_{F}\cap B_{x}(R)} b_{i}(\gamma), \end{equation} summation over all nodal components of $F$ contained in the shifted ball $B_{x}(R)$, or, equivalently \begin{equation*} \beta_{F;i}(x,R) = \beta_{T_{x}F;i}(R), \end{equation*} where $T_{x}$ acts by translation $(T_{x}F)(\cdot)=F(\cdot-x)$.
\begin{lemma}[Integral-Geometric sandwich, lower bound; cf. ~{\cite[Lemma 1]{sodin_lec_notes}}] \label{lem:Int Geom sand} For every $0<r<R$ we have the following inequality \begin{equation} \label{eq:Int Geom sand} \frac{1}{\operatorname{Vol} B(r)}\int\limits_{B(R-r)}\beta_{i}(x;r)dx \le \beta_{i}(R). \end{equation}
\end{lemma}
\begin{proof} Since if a nodal component of $F$ is contained in $B_{x}(r)$ for some $x\in B(R-r)$, then $\gamma \subseteq B(R)$, we may invert the order of summation and integration to write: \begin{equation*} \begin{split} \frac{1}{\operatorname{Vol} B(r)}\int\limits_{B(R-r)}\beta_{i}(x;r)dx &= \frac{1}{\operatorname{Vol} B(r)}\int\limits_{B(R-r)}\sum\limits_{\gamma\subseteq \mathcal{Z}(F)}\mathbbm{1}_{\gamma\subseteq B_{x}(r)}\cdot b_{i}(\gamma)dx \\&=\frac{1}{\operatorname{Vol} B(r)}\sum\limits_{\gamma\subseteq \mathcal{Z}(F)\cap B(R)} b_{i}(\gamma)\cdot \operatorname{Vol}\{x\in B(R-r):\: \gamma\subseteq B_{x}(r) \} \\&\le \sum\limits_{\gamma\subseteq \mathcal{Z}(F)\cap B(R)} b_{i}(\gamma)=b_{i}(R), \end{split} \end{equation*} since \begin{equation*} \{x\in B(R-r):\: \gamma\subseteq B_{x}(r) \} = \bigcap\limits_{y\in \gamma}B_{y}(r) \end{equation*} is of volume $\le \operatorname{Vol} B(r)$. \end{proof}
The intuition behind the inequality \eqref{eq:Int Geom sand} is, in essence, the convexity of the involved quantities. One can also establish the upper bound counterpart of \eqref{eq:Int Geom sand}, whence will need to introduce the $\beta^{*}_{\cdot}(\cdot;\cdot)$ analogue, where the summation range on the r.h.s. \eqref{eq:bi loc sum def} is extended to nodal components $\gamma$ merely {\em intersecting} $B_{x}(R)$. However, since the contribution of a single nodal component to the total Betti number is not bounded, and is expected to be {\em huge} for percolating components, we did not find a useful way to exploit such an upper bound inequality. Instead we are going to seek for a global bound, via Kac-Rice estimating of a relevant local quantity.
\begin{lemma}[Upper bound] \label{lem:upper bnd loc} Let $F$ and $i$ be as in Theorem \ref{thm:Betti asymp Euclid}. Then \begin{equation} \label{eq:upper bnd loc} \limsup\limits_{R\rightarrow\infty} \frac{\mathbb E[\beta_{i}(R)]}{R^{d}} < \infty. \end{equation} \end{lemma}
\begin{proof}
We use Morse Theory to reduce bounding the expected Betti number $\mathbb E[\beta_{i}(R)]$ from above to a {\em local} computation, performed with the aid of Kac-Rice method, an approach already exploited by Gayet-Welschinger ~\cite{gayet2016betti}. Let $\gamma\subseteq \mathbb{R}^{d}$ be a compact closed hypersurface, and $g:\mathbb{R}^{d}\rightarrow\mathbb{R}$ a smooth function so that its restriction $g|_{\gamma}$ to $\gamma$ is a Morse function (i.e. $g|_{\gamma}$ has no degenerate critical points). Then, as a particular consequence of the Morse inequalities ~\cite[Theorem 5.2 (2) on p. 29]{milnor1963morse}, we have \begin{equation*}
b_{i}(\gamma) \le \mathcal{C}_{i}(g|_{\gamma}), \end{equation*}
where $\mathcal{C}_{i}(g|_{\gamma})$ is the number of critical points of $g|_{\gamma}$ of Morse index $i$. Under the notation of Theorem \ref{thm:Betti asymp Euclid} it follows that \begin{equation} \label{eq:betai<=tot Crit}
\mathbb E[\beta_{i}(R)] \le \mathbb E[\mathcal{C}_{i}(g|_{F^{-1}(0) \cap B(R)})]\le \mathbb E[\mathcal{C}(g|_{F^{-1}(0) \cap B(R)})], \end{equation} the r.h.s. of \eqref{eq:betai<=tot Crit} being the total number of critical points of $g$ restricted to the nodal set of $F$ lying in $B(R)$, a local quantity that could be evaluated with the Kac-Rice method.
Now we evaluate the r.h.s. of \eqref{eq:betai<=tot Crit}, where we have the freedom to choose the function $g$, so long as it is a.s. Morse restricted to $F^{-1}(0)$. As a concrete simple case, we nominate the function $$\mathbb{R}^{d}\ni x=(x_{1},\ldots, x_{d})\mapsto g(x)=\|x\|^{2}=\sum\limits_{j=1}^{d}x_{i}^{2},$$
or, more generally, the family of functions $g_{p}=\|x-p\|^{2}$, $p\in \mathbb{R}^{d}$, having the burden of proving that for some $p\in\mathbb{R}^{d}$, the restriction $g_{p}|_{F^{-1}(0)}$ of $g_{p}$ to $F^{-1}(0)$ is Morse a.s. For this particular choice of the family $g_{p}$, a point $x\in F^{-1}(0)\setminus \{0\}$ is a critical point of $g_{p}$, if and only if $\nabla F(x)$ is collinear to
$x-p$. Normalising $v_{1}:=\frac{x-p}{\|x-p\|}$, this is equivalent to $\nabla F(x)\perp v_{j}$, $j=2,\ldots d$, where $\{v_{j}\}_{2\le j\le d}$ is any orthonormal basis of $v_{1}^{\perp}$, and it is possible to make a locally smooth choice for $\{v_{j}\}_{2\le j\le d}$ as a function of $x$ (or, rather $v_{1}$), since $\mathcal{S}^{d-1}$ admits orthogonal frames on a finite partition of the sphere into coordinate patches.
Now, by ~\cite[Lemma 6.3, Lemma 6.5]{milnor1963morse}, a critical point $x\in F^{-1}(0)$, of $g_{p}$ is degenerate, if and only if $p=x+K^{-1}\cdot v_{1}$, with $K$ one of the (at most $d-1$) principal curvatures of $F^{-1}(0)$ at $x$ in direction $v_{1}$, and, by Sard's Theorem \cite[Theorem 6.6]{milnor1963morse}, given a sample function $F_{\omega}$, where $\omega\in\Omega$ is a sample point in the underlying sample space $\Omega$, the collection $A_{\omega}\subseteq \mathbb{R}^{d}$ of all ``bad" $p$, so that $g_{p}|_{F^{-1}(0)}$ contains a degenerate critical point is of vanishing Lebesgue measure, i.e. \begin{equation} \label{eq:mu(Aomega)=0} \mu(A_{\omega})=0, \end{equation} a.s. We are aiming at showing that there exists $p\in\mathbb{R}^{d}$ so that a.s. $p\notin A_{\omega}$; in fact, by the above, we will be able to conclude, via Fubini, that $\mu$-almost all $p$ will do (and then, since, by stationarity of $F$, there is no preference of points in $\mathbb{R}^{d}$, we will be able to carry out the computations with the simplest possible choice $p=0$, though the computations are not significantly more involved with arbitrary $p$). To this end we introduce the set $$\mathcal{A}=\{(p,\omega):\: p\in A_{\omega}\}\subseteq \mathbb{R}^{d}\times \Omega$$ on the measurable space $\mathbb{R}^{d}\times \Omega$, equipped with the measure $d\lambda=d\mu(p) d\mathcal{P}r(\omega)$. Since there is no measurability issue here, an inversion of the integral \begin{equation*} \lambda(\mathcal{A}) = \int\limits_{\mathcal{A}}d\mu(p) d\mathcal{P}r(\omega) = 0, \end{equation*} by \eqref{eq:mu(Aomega)=0}, yields that for $\mu$-almost all $p\in\mathbb{R}^{d}$, \begin{equation} \label{eq:prob(p bad)=0} \mathcal{P}r\{p\in A_{\omega}\} = 0. \end{equation}
The above \eqref{eq:prob(p bad)=0} yields a point $p\in\mathbb{R}^{d}$, so that $g_{p}|_{F^{-1}(0)}$ is a.s. Morse, and, in particular \eqref{eq:betai<=tot Crit} holds a.s. with $g=g_{p}$; by the stationarity of
$F$, we may assume that $p=0$, and we take $g=g_{0}$. Next we plan to employ the Kac-Rice method for evaluating the expected number of critical points of $g|_{F^{-1}(0)} $ as on the r.h.s. of \eqref{eq:betai<=tot Crit}. Recall from above that, for this particular choice of $g$, a point $x\in F^{-1}(0)\setminus \{0\}$ is a critical point of $g$, if and only if $\nabla F(x)$ is collinear to
$v_{1}=v_{1}(x):=\frac{x}{\|x\|}$, or, equivalently, $\nabla F(x)\perp v_{j}$, $j=2,\ldots d$, where $\{v_{j}\}_{2\le j\le d}$ is any orthonormal basis of $v_{1}^{\perp}$.
Let \begin{equation} \label{eq:G(x) def} G(x)=\left(F(x),\langle v_{2},\nabla F(x)\rangle,\ldots, \langle v_{d},\nabla F(x)\rangle\right) \end{equation} be the Gaussian random vector, and $C_{G}(x)$ its $d\times d$ covariance matrix. That the joint Gaussian distribution of $G(x)$ is non-degenerate, is guaranteed by the axiom $(\rho 3)$, since this axiom yields ~\cite[\S 1.2.1]{sodin_lec_notes} the non-degeneracy of the distribution of $\nabla F(x)$ (and hence of any linear transformation of $\nabla F(x)$ of full rank), and $F(x)$ is statistically independent of $\nabla F(x)$. By the Kac-Rice formula ~\cite[Theorem 6.3]{azais_wschebor}, using the non-degeneracy of the distribution of $G(x)$ as an input, we conclude that for every $\epsilon>0$ \begin{equation} \label{eq:exp Crit KR excise}
\mathbb E[\mathcal{C}(g|_{F^{-1}(0) \cap (B(R)\setminus B(\epsilon))})] = \int\limits_{B(R)\setminus B(\epsilon)} K_{1}(x)dx, \end{equation} where for $x\ne 0$, the density is defined as the Gaussian integral \begin{equation} \label{eq:K1 density x}
K_{1}(x) = K_{1;F}(x) = \frac{1}{(2\pi)^{d/2}\sqrt{|\det C_{G}(x)|}}\cdot \mathbb E[ |\det H_{G}(x) | \big| G(x)=0 ], \end{equation} and $H_{G}(\cdot)$ is the Hessian of $G$. Next we apply the Monotone Convergence theorem on \eqref{eq:exp Crit KR excise} as $\epsilon\rightarrow\infty$, upon bearing in mind that $x=0$ is not a zero of $F$ a.s., we obtain \begin{equation} \label{eq:exp Crit KR}
\mathbb E[\mathcal{C}(g|_{F^{-1}(0) \cap B(R)})] = \int\limits_{B(R)} K_{1}(x)dx, \end{equation} extending the definition of $K_{1}$ at $x=0$ arbitrarily.
In what follows we are going to show that $K_{1}(\cdot)$ is {\em bounded} on $\mathbb{R}^{d}$, which, in light of \eqref{eq:exp Crit KR} is sufficient to yield \eqref{eq:upper bnd loc}, via \eqref{eq:betai<=tot Crit}. To this end we observe that, since $F$ is stationary, the value of $K_{1}$ is defined intrinsically as a function of
$v_{1}\in \mathcal{S}^{d-1}$, no matter how $v_{j}$, $j\ge 2$ were determined, as long as they constitute an o.n.b. of $v_{1}^{\perp}$, i.e. $$K_{1}(x)=K_{1}(x/\|x\|) = K_{1}(v_{1}),$$ despite the fact that the law of $G(x)$ does, in general, depend on the choice of the vectors $\{v_{j}\}$ , $j\ge 2$.
The upshot is that, since, given $v_{1}\in \mathcal{S}^{d-1}$, one can choose $\{v_{j}\}_{2\le j\le d}$ locally continuously, also determining the law of $G(x)$
in a locally continuous and non-degenerate way as a function of $v_{1}$, meaning that $ |\det C_{G}(\cdot)|>0$. Hence $K_{1}(\cdot)$ in \eqref{eq:K1 density x} is a continuous function of $v_{1}\in \mathcal{S}^{d-1}$, and therefore it is bounded by a constant depending only on the law of $F$ (though not necessarily defined continuously {\em at} the origin). As it was readily mentioned, the boundedness of $K_{1}$ is sufficient to yield the statement \eqref{eq:upper bnd loc} of Lemma \ref{lem:upper bnd loc}.
\end{proof}
The following lemma is a restatement of ~\cite[Proposition 5.2]{sarnak_wigman} for random fields satisfying $(\rho 4)$, and of ~\cite[Theorem 1.3(i)]{canzani2019topology} for Berry's monochromatic isotropic waves in higher dimensions, and thereupon its proof will be conveniently omitted here.
\begin{lemma} \label{lem:lower bound}
Let $F:\mathbb{R}^{d}\rightarrow\mathbb{R}$ be a Gaussian random field, $\mathcal{H}(d-1)$ the collection of all diffeomorphism classes of closed $(d-1)$-manifolds that have an embedding in $\mathbb{R}^{d}$, and for $H\in\mathcal{H}(d-1)$ denote $\mathcal{N}_{F,H}(R)$ the number of nodal components of $F$, entirely contained in $B(R)$ and diffeomorphic to $H$. Then if $F$ either satisfies $(\rho 4)$ or it is Berry's monochromatic isotropic waves, one has: \begin{equation*} \liminf\limits_{R\rightarrow\infty}\frac{\mathbb E[\mathcal{N}_{F,H}(R)]}{R^{d}} > 0. \end{equation*}
\end{lemma}
\subsection{Proof of Theorem \ref{thm:Betti asymp Euclid}}
\begin{proof}
First we aim at proving \eqref{eq:exp Betti R^d}, that will allow us to deduce \eqref{eq:conv Betti L1 R^d}, with the help of \eqref{eq:Int Geom sand}. Take \begin{equation} \label{eq:eta def limsup} \eta:= \limsup\limits_{R\rightarrow\infty}\frac{\mathbb E[\beta_{i}(R)]}{R^{d}}. \end{equation} Then, necessarily $\eta<\infty$ is finite, thanks to Lemma \ref{lem:upper bnd loc}. We claim that, in fact, \eqref{eq:eta def limsup}, is a limit, whence it is sufficient to show that \begin{equation} \label{eq:liminf = eta} \liminf\limits_{R\rightarrow\infty}\frac{\mathbb E[\beta_{i}(R)]}{R^{d}} \ge \eta. \end{equation} To this end we take $\epsilon >0$ to be an arbitrary positive number, and, by the definition of $\eta$ as a $\limsup$, we may choose $r=r(\epsilon)>0$ so that \begin{equation} \label{eq:betai/r^d>eta-eps} \frac{\mathbb E[\beta_{i}(r)]}{r^{d}} > \eta - \epsilon. \end{equation}
We now take $R>r$, and appeal to the Integral Geometric sandwich \eqref{eq:Int Geom sand}, so that taking an expectation of both sides of \eqref{eq:Int Geom sand} yields \begin{equation} \label{eq:E[bi] convexity} \mathbb E[\beta_{i}(R)] \ge \frac{1}{\operatorname{Vol} B(r)}\int\limits_{B(R-r)}\mathbb E[\beta_{i}(x;r)]dx = \frac{(R-r)^{d}}{r^{d}}\cdot \mathbb E[\beta_{i}(r)], \end{equation} by the stationarity of $F$. Substituting \eqref{eq:betai/r^d>eta-eps} into \eqref{eq:E[bi] convexity}, it follows that \begin{equation*} \mathbb E[\beta_{i}(R)] \ge (R-r)^{d}\cdot (\eta-\epsilon), \end{equation*} and hence, dividing by $R^{d}$, and taking $\liminf\limits_{R\rightarrow\infty}$ (note that $r$ is kept fixed), we obtain \begin{equation*} \liminf\limits_{R\rightarrow\infty}\frac{\mathbb E[\beta_{i}(R)]}{R^{d}} \ge \eta-\epsilon. \end{equation*} Since $\epsilon>0$ is arbitrary, this certainly implies \eqref{eq:liminf = eta}, which, as it was mentioned above, implies that $\eta$ in \eqref{eq:eta def limsup} is a limit, a restatement of \eqref{eq:exp Betti R^d} (with $c_{i} = \frac{\eta}{V_{d}}$).
Next, having proved \eqref{eq:exp Betti R^d}, we are going to deduce the convergence in mean \eqref{eq:conv Betti L1 R^d}, this time, assuming the axiom $(\rho 1)$, yielding that the action of the translations $\{T_{x}\}_{x\in\mathbb{R}^{d}}$ is ergodic, proved independently by Fomin ~\cite{fomin}, Grenander ~\cite{grenander1950stochastic}, and Maruyama ~\cite{maruyama1949harmonic} (see also ~\cite[Theorem 3]{sodin_lec_notes}). Let $0<r<R$, and denote the random variable \begin{equation} \label{eq:Psi i def} \Psi_{i}(R,r)=\Psi_{i}(F;R,r) :=\frac{1}{\operatorname{Vol} B(r)}\int\limits_{B(R-r)}\beta_{i}(x;r)dx, \end{equation} so that the Integral Geometric sandwich \eqref{eq:Int Geom sand} reads \begin{equation} \label{eq:Psi<=beta} \Psi_{i}(R,r) \le \beta_{i}(R), \end{equation} and the aforementioned ergodic theorem asserts that, for $r$ fixed, as $R\rightarrow\infty$, \begin{equation*} \frac{1}{\operatorname{Vol} B(R-r)}\Psi_{i}(R,r) \rightarrow \frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)} \end{equation*} in mean (and a.s.), so that we may deduce the same for \begin{equation} \label{eq:Psi/B(R)->betai/B(r)} \frac{1}{\operatorname{Vol} B(R)}\Psi_{i}(R,r) \rightarrow \frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)}, \end{equation} in mean.
Now let $\epsilon>0$ be arbitrary, and use \eqref{eq:exp Betti R^d}, now at our disposal, to choose $r=r(\epsilon)$ sufficiently large (but fixed) so that \begin{equation} \label{eq:beta/B(r)-ci<eps/3}
\left|\frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)} - c_{i}\right| < \frac{\epsilon}{3}, \end{equation} and also, \begin{equation} \label{eq:exp(betai) Cauchy}
\left|\frac{\mathbb E[\beta_{i}(R)]}{\operatorname{Vol} B(R)}-\frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)}\right| < \frac{\epsilon}{4}, \end{equation} for the function $$r\mapsto \frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)}$$ being Cauchy as $r\rightarrow\infty$. Next, use \eqref{eq:Psi/B(R)->betai/B(r)} in order for the inequality \begin{equation} \label{eq:exp(Psii-betai)<eps/3}
\mathbb E\left[\left|\frac{1}{\operatorname{Vol} B(R)}\Psi_{i}(R,r) - \frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)}\right|\right] < \frac{\epsilon}{3}, \end{equation} to hold, provided that $R$ is sufficiently large (depending on $r$ and $\epsilon$). Note that, thanks to \eqref{eq:Psi<=beta}, we have \begin{equation} \label{eq:Ebetai-EPsii} \begin{split}
&0\le \mathbb E\left[\left| \frac{\beta_{i}(R)}{\operatorname{Vol} B(R)} - \frac{1}{\operatorname{Vol} B(R)}\Psi_{i}(R,r)\right| \right]= \mathbb E\left[ \frac{\beta_{i}(R)}{\operatorname{Vol} B(R)} - \frac{1}{\operatorname{Vol} B(R)}\Psi_{i}(R,r) \right] \\&=\frac{\mathbb E[\beta_{i}(R)]}{\operatorname{Vol} B(R)} - \frac{1}{\operatorname{Vol} B(R)}\mathbb E[\Psi_{i}(R,r)] = \frac{\mathbb E[\beta_{i}(R)]}{\operatorname{Vol} B(R)} - \frac{\operatorname{Vol} B(R-r)}{\operatorname{Vol} B(r) \operatorname{Vol} B(R)}\mathbb E[\beta_{i}(r)] \\&= \frac{\mathbb E[\beta_{i}(R)]}{\operatorname{Vol} B(R)} - (1+o_{R\rightarrow\infty}(1)) \cdot\frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)} < \frac{\epsilon}{3} \end{split} \end{equation} for $R$ sufficiently large, by \eqref{eq:Psi i def}, the stationarity of $F$, and \eqref{eq:exp(betai) Cauchy}. We consolidate all the above inequalities by using the triangle inequality to write \begin{equation*} \begin{split}
&\mathbb E\left[ \left|\frac{\beta_{i}(R)}{\operatorname{Vol} B(R)} -c_{i}\right| \right] \le \mathbb E\left[ \frac{\beta_{i}(R)}{\operatorname{Vol} B(R)} - \frac{1}{\operatorname{Vol} B(R)} \Psi_{i}(R,r) \right]
\\&+ \mathbb E\left[ \left|\frac{1}{\operatorname{Vol} B(R)}\Psi_{i}(R,r) - \frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)}\right| \right] +
\mathbb E\left[ \left|\frac{\mathbb E[\beta_{i}(r)]}{\operatorname{Vol} B(r)} - c_{i}\right| \right] < \epsilon, \end{split} \end{equation*} by \eqref{eq:beta/B(r)-ci<eps/3}, \eqref{eq:exp(Psii-betai)<eps/3} and \eqref{eq:Ebetai-EPsii}. Since $\epsilon>0$ was an arbitrary positive number, the mean convergence \eqref{eq:conv Betti L1 R^d} is now established. Finally, we observe that Theorem \ref{thm:Betti asymp Euclid}\ref{it:rho4=>cNS>0} is a direct consequence of Lemma \ref{lem:lower bound}. Theorem \ref{thm:Betti asymp Euclid} is now proved.
\end{proof}
\section{Proof of Theorem \ref{thm:tot Betti numb loc}} \label{sec:proof loc Riem}
Let $x\in\mathcal{M}$ be a point as postulated in Theorem \ref{thm:tot Betti numb loc}, $K_{x}$ the corresponding covariance kernel, and $F_{x}$ the centred Gaussian random field defined by $F_{x}$. Recall that $f_{x,L}(\cdot)$, defined in \eqref{eq:fx,L scal def} on $\mathbb{R}^{d}$ via the identification $T_{x}(\mathcal{M})\cong \mathbb{R}^{d}$, is the scaled version of $f_{L}$, converging in the limit $L\rightarrow\infty$, to $F_{x}$, with accordance to \eqref{eq:covar scal lim}. By the manifold structure of $\mathcal{M}$, the exponential map $\exp_{x}:T_{x}\rightarrow\mathcal{M}$ is a diffeomorphism on a sufficiently small ball $B(r)\subseteq T_{x}$, with $r>0$ independent of $x$. Hence, for every $R>0$, the diffeomorphism types in $B(R)\subseteq\mathbb{R}^{d}\cong T_{x}(\mathcal{M})$ are preserved under the {\em scaled} exponential map $$\exp_{x;L}:u\mapsto \exp_{x}(u/L),$$ provided that $L$ is sufficiently large. In particular, if $\gamma\subseteq B(R)$ is a smooth hypersurface, then for every $0\le i\le d-1$ \begin{equation} \label{eq:bi exp scal preserve} b_{i}(\gamma)=b_{i}(\exp_{x;L}(\gamma)), \end{equation} Further, for $r>0$ sufficiently small $\exp_{x}$ maps $B(r)$ into the geodesic ball $B_{x}(r)$, so that, for every $R>0$, and $L$ sufficiently large, we have \begin{equation} \label{eq:exp map pres dist asymp} \exp_{x;L}(B(R)) = B_{x}(R/L). \end{equation}
We can then infer from \eqref{eq:bi exp scal preserve} combined with \eqref{eq:exp map pres dist asymp}, that \begin{equation} \label{eq:beta exp scal perturb} \beta_{f_{x,L};i}(R)= \beta_{i}(f_{L};x,R/L) \end{equation} holds for every $R>0$, $L\gg 0$ sufficiently large. We observe that, by the assumption \eqref{eq:covar scal lim} of Theorem \ref{thm:tot Betti numb loc}, the Gaussian random fields $\{f_{x,L}\}$ converge in law to the Gaussian random field $F_{x}$. That alone does not ensure that one can compare the sample functions $f_{x,L}$ to the sample functions $F_{x}$, without {\em coupling} them in a particular way, (i.e. define both on the same probability space $\Omega$ to satisfy some postulated properties). Luckily, such a convenient coupling was readily constructed ~\cite[Lemma 4]{sodin_lec_notes}, and we will reuse it for our purposes.
Our aim is to prove the following result, that, taking into account Theorem \ref{thm:Betti asymp Euclid} applied on $F_{x}$, and \eqref{eq:beta exp scal perturb}, yields Theorem \ref{thm:tot Betti numb loc} at once. We will denote $\Omega$ to be the underlying probability space, where all the random variables are going to be defined, and $\mathcal{P}r$ the associated probability measure.
\begin{proposition} \label{prop:perturb Betti} Under the assumptions of Theorem \ref{thm:tot Betti numb loc}, there exists a coupling of $F_{x}$ and $\{f_{x,L}\}$ so that for every $R>0$ and $\delta>0$ there exists a number $L_{0}=L_{0}(R,\delta)\in\mathcal{L}$ sufficiently big, so that for all $L>L_{0}$ the following inequality holds outside an event of probability $<\delta$: \begin{equation} \label{eq:bi perturb} \beta_{F_{x};i}(R-1) \le \beta_{f_{x,L};i}(R) \le \beta_{F_{x};i}(R+1). \end{equation} \end{proposition}
In what follows we are going to exhibit a construction of the small exceptional event from ~\cite{sodin_lec_notes}, where \eqref{eq:bi perturb} might not hold, prove by way of construction that it is of arbitrarily small probability, and finally culminate, this section with a proof that \eqref{eq:bi perturb} holds outside the exceptional event.
For $R>0$, $L\in \mathcal{L}$, $\alpha>0$ we denote the following ``bad" events in $\Omega$: \begin{equation*}
\Delta_{1}= \Delta_{1}(R,L,\alpha) = \left\{\|f_{x,L}- F_{x}\|_{C^{1}(\overline{B}(2R))} > \alpha\right\}, \end{equation*}
and the ``unstable" event \begin{equation*}
\Delta_{4} = \Delta_{4}(R,\alpha) = \left\{\min\limits_{y\in \overline{B}(2R)}\max\{ |F_{x}(y)|,|\nabla F_{x}(y) \} < 2\alpha \right\}, \end{equation*} (with the more technical events $\Delta_{2},\Delta_{3}$ unnecessary for the purposes of this manuscript), and then set the exceptional event \begin{equation} \label{eq:Delta except} \Delta= \Delta(R,L,\alpha) :=\Delta_{1}\cup \Delta_{4}. \end{equation}
The following bounds for the bad events are due to Nazarov-Sodin ~\cite{sodin_lec_notes} (see also ~\cite{sarnak_wigman16,beliaev2018volume}).
\begin{lemma} \label{lem:Deltai small} There exists a coupling of $F_{x}$ and $\{f_{x,L}\}$ on $\Omega$, so that the following estimates hold. \begin{description}
\item[a. ~{\cite[Lemma 4]{sodin_lec_notes}}] For every $R>0$, $\alpha>0$ \begin{equation*} \limsup\limits_{L\rightarrow\infty} \mathcal{P}r\left(\Delta_{1}(R,L,\alpha)\right) = 0. \end{equation*}
\item[b. ~{\cite[Lemma 5]{sodin_lec_notes}}] For every $R>0$, \begin{equation*} \lim\limits_{\alpha\rightarrow 0}\mathcal{P}r\left(\Delta_{4}(R,\alpha)\right) = 0. \end{equation*}
\end{description}
\end{lemma}
The following lemma, due to Nazarov-Sodin, shows that if a function has no low lying critical points, then its nodal set is stable under small perturbations.
\begin{lemma}[~{\cite[Lemmas 6-7]{sodin_lec_notes}}, ~{\cite[Proposition 6.8]{sarnak_wigman16}}] \label{lem:func perturb comp}
Let $\alpha$, $R>1$, and $f:B(R)\rightarrow\mathbb{R}$ be a $C^{1}$-smooth function on an open ball $B=B(R)\subseteq\mathbb{R}^{d}$ for some $R>0$, such that for every $y\in B(R)$, either $|f(y)|>\alpha$ or $\|\nabla f(y)|>\alpha$. Let $g\in C^{1}(B)$ such that
$\sup\limits_{y\in B}|f(y)-g(y)|<\alpha$. Then each nodal component $\gamma$ of $f^{-1}(0)$ lying in $B(R-1)$ generates a nodal component $\gamma'$ of $g$ diffeomorphic to $\gamma$ lying in $B(R)$. Moreover, the map $\gamma\mapsto \gamma'$ between the nodal components of $f$ lying in $B(R-1)$ and the nodal components of $g$ lying in $B(R)$ is injective. \end{lemma}
We are now ready to show a proof of Proposition \ref{prop:perturb Betti}.
\begin{proof}[Proof of Proposition \ref{prop:perturb Betti}]
Let $R>0$ and $\delta>0$ be given. On an application of Lemma \ref{lem:Deltai small}b we obtain a number $\alpha=\alpha(R,\delta)$ so that \begin{equation*} \mathcal{P}r(\Delta_{4}(R,\alpha)) < \delta/2, \end{equation*} and subsequently, we apply Lemma \ref{lem:Deltai small}a to obtain number $L_{0}=L_{0}(R,\delta,\alpha)$ so that for all $L>L_{0}$, \begin{equation*} \mathcal{P}r(\Delta_{1}(R,L,\alpha)) < \delta/2. \end{equation*}
Defining the exceptional event as in \eqref{eq:Delta except}, the above shows that \begin{equation*} \mathcal{P}r(\Delta)<\delta. \end{equation*}
We now claim that second inequality of \eqref{eq:bi perturb} is satisfied on $\Omega\setminus\Delta$; by the above this is sufficient yielding the statement of Proposition \ref{prop:perturb Betti}, and, as it was previously mentioned, also of Theorem \ref{thm:tot Betti numb loc}. Outside of $\Delta$ we have both \begin{equation*}
\min\limits_{y\in \overline{B}(2R)}\max\{ |F_{x}(y)|,|\nabla F_{x}(y) \} > 2\alpha \end{equation*} and \begin{equation*}
\|f_{x,L}- F_{x}\|_{C^{1}(\overline{B}(2R))} < \alpha \end{equation*} for $L>L_{0}$, and these two also allow us to infer \begin{equation*}
\min\limits_{y\in \overline{B}(2R)}\max\{ |f_{x,L}(y)|,|\nabla f_{x,L}(y) \} > \alpha \end{equation*} for $L>L_{0}$. The first inequality of \eqref{eq:bi perturb} now follows upon a straightforward application of Lemma \ref{lem:func perturb comp}, with $F_{x}$ and $f_{x,L}$ taking the roles of $f$ and $g$ respectively, whereas the second inequality of \eqref{eq:bi perturb} follows upon reversing the roles of $f$ and $g$. Proposition \ref{prop:perturb Betti} is now proved.
\end{proof}
\end{document}
|
arXiv
|
{
"id": "1903.00538.tex",
"language_detection_score": 0.6875979900360107,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\baselineskip = 16pt
\newcommand \ZZ {{\mathbb Z}} \newcommand \NN {{\mathbb N}} \newcommand \RR {{\mathbb R}} \newcommand \PR {{\mathbb P}} \newcommand \AF {{\mathbb A}} \newcommand \GG {{\mathbb G}} \newcommand \QQ {{\mathbb Q}} \newcommand \CC {{\mathbb C}} \newcommand \bcA {{\mathscr A}} \newcommand \bcC {{\mathscr C}} \newcommand \bcD {{\mathscr D}} \newcommand \bcF {{\mathscr F}} \newcommand \bcG {{\mathscr G}} \newcommand \bcH {{\mathscr H}} \newcommand \bcM {{\mathscr M}} \newcommand \bcI {{\mathscr I}} \newcommand \bcJ {{\mathscr J}} \newcommand \bcK {{\mathscr K}} \newcommand \bcL {{\mathscr L}} \newcommand \bcO {{\mathscr O}} \newcommand \bcP {{\mathscr P}} \newcommand \bcQ {{\mathscr Q}} \newcommand \bcR {{\mathscr R}} \newcommand \bcS {{\mathscr S}} \newcommand \bcV {{\mathscr V}} \newcommand \bcU {{\mathscr U}} \newcommand \bcW {{\mathscr W}} \newcommand \bcX {{\mathscr X}} \newcommand \bcY {{\mathscr Y}} \newcommand \bcZ {{\mathscr Z}} \newcommand \goa {{\mathfrak a}} \newcommand \gob {{\mathfrak b}} \newcommand \goc {{\mathfrak c}} \newcommand \gom {{\mathfrak m}} \newcommand \gon {{\mathfrak n}} \newcommand \gop {{\mathfrak p}} \newcommand \goq {{\mathfrak q}} \newcommand \goQ {{\mathfrak Q}} \newcommand \goP {{\mathfrak P}} \newcommand \goM {{\mathfrak M}} \newcommand \goN {{\mathfrak N}} \newcommand \uno {{\mathbbm 1}} \newcommand \Le {{\mathbbm L}} \newcommand \Spec {{\rm {Spec}}} \newcommand \Gr {{\rm {Gr}}} \newcommand \Pic {{\rm {Pic}}} \newcommand \Jac {{{J}}} \newcommand \Alb {{\rm {Alb}}} \newcommand \Corr {{Corr}} \newcommand \Chow {{\mathscr C}} \newcommand \Sym {{\rm {Sym}}} \newcommand \Prym {{\rm {Prym}}} \newcommand \cha {{\rm {char}}} \newcommand \eff {{\rm {eff}}} \newcommand \tr {{\rm {tr}}} \newcommand \Tr {{\rm {Tr}}} \newcommand \pr {{\rm {pr}}} \newcommand \ev {{\it {ev}}} \newcommand \cl {{\rm {cl}}} \newcommand \interior {{\rm {Int}}} \newcommand \sep {{\rm {sep}}} \newcommand \td {{\rm {tdeg}}} \newcommand \alg {{\rm {alg}}} \newcommand \im {{\rm im}} \newcommand \gr {{\rm {gr}}} \newcommand \op {{\rm op}} \newcommand \Hom {{\rm Hom}} \newcommand \Hilb {{\rm Hilb}} \newcommand \Sch {{\mathscr S\! }{\it ch}} \newcommand \cHilb {{\mathscr H\! }{\it ilb}} \newcommand \cHom {{\mathscr H\! }{\it om}} \newcommand \colim {{{\rm colim}\, }} \newcommand \End {{\rm {End}}} \newcommand \coker {{\rm {coker}}} \newcommand \id {{\rm {id}}} \newcommand \van {{\rm {van}}} \newcommand \spc {{\rm {sp}}} \newcommand \Ob {{\rm Ob}} \newcommand \Aut {{\rm Aut}} \newcommand \cor {{\rm {cor}}} \newcommand \Cor {{\it {Corr}}} \newcommand \res {{\rm {res}}} \newcommand \red {{\rm{red}}} \newcommand \Gal {{\rm {Gal}}} \newcommand \PGL {{\rm {PGL}}} \newcommand \Bl {{\rm {Bl}}} \newcommand \Sing {{\rm {Sing}}} \newcommand \spn {{\rm {span}}} \newcommand \Nm {{\rm {Nm}}} \newcommand \inv {{\rm {inv}}} \newcommand \codim {{\rm {codim}}} \newcommand \Div{{\rm{Div}}} \newcommand \CH{{\rm{CH}}} \newcommand \sg {{\Sigma }} \newcommand \DM {{\sf DM}} \newcommand \Gm {{{\mathbb G}_{\rm m}}} \newcommand \tame {\rm {tame }} \newcommand \znak {{\natural }} \newcommand \lra {\longrightarrow} \newcommand \hra {\hookrightarrow} \newcommand \rra {\rightrightarrows} \newcommand \ord {{\rm {ord}}} \newcommand \Rat {{\mathscr Rat}} \newcommand \rd {{\rm {red}}} \newcommand \bSpec {{\bf {Spec}}} \newcommand \Proj {{\rm {Proj}}} \newcommand \pdiv {{\rm {div}}}
\newcommand \wt {\widetilde } \newcommand \ac {\acute } \newcommand \ch {\check } \newcommand \ol {\overline } \newcommand \Th {\Theta} \newcommand \cAb {{\mathscr A\! }{\it b}}
\newenvironment{pf}{\par\noindent{\em Proof}.}{
\framebox(6,6) \par
}
\newtheorem{theorem}[subsection]{Theorem} \newtheorem{conjecture}[subsection]{Conjecture} \newtheorem{proposition}[subsection]{Proposition} \newtheorem{lemma}[subsection]{Lemma} \newtheorem{remark}[subsection]{Remark} \newtheorem{remarks}[subsection]{Remarks} \newtheorem{definition}[subsection]{Definition} \newtheorem{corollary}[subsection]{Corollary} \newtheorem{example}[subsection]{Example} \newtheorem{examples}[subsection]{examples}
\title{Chow groups of conic bundles in $\PR^5$ and the Generalised Bloch's conjecture} \author{Kalyan Banerjee}
\address{Harish Chandra Research Institute, India}
\email{[email protected]}
\begin{abstract} Consider the Fano surface of a conic bundle embedded in $\PR^5$. Let $i$ denote the natural involution acting on this surface. In this note we provide an obstruction to the identity action of the involution on the group of algebraically trivial zero cycles modulo rational equivalence on the surface. \end{abstract} \maketitle
\section{Introduction}
One of the very important problems in algebraic geometry is to understand the Chow group of zero cycles on a smooth projective surface with geometric genus and irregularity equal to $0$. It was already proved by Mumford \cite{M}, that for a smooth, projective complex surface of geometric genus greater than zero, the Chow group of zero cycles is infinite dimensional, in the sense that, it cannot be "parametrized" by an algebraic variety. The conjecture due to Spencer Bloch asserts the converse, that is, for a surface of geometric genus and irregularity zero, the Chow group of zero cycles is isomorphic to the group of integers. The Bloch's conjecture has been studied and proved in the case when the surface is not of general type by \cite{BKL} and for surfaces of general type by \cite{B}, \cite{IM}, \cite{GP}, \cite{PW}, \cite{V},\cite{VC}. Inspired by the Bloch's conjecture, the following conjecture is made, which is a generalisation \cite{Vo}[conjecture 11.19].
\textit{ Conjecture : Let $S$ be a smooth projective surface over the field of complex numbers and let $\Gamma$ be a codimension two cycle on $S\times S$. Suppose that $\Gamma^*$ acts as zero on the space of globally holomorphic two forms on $S$, then $\Gamma_*$ acts as zero on the kernel of the albanese map from $\CH_0(S)$ to $Alb(S)$.}
This conjecture was studied in detail when the correspondence $\Gamma$ is the $\Delta- Graph(i)$, where $i$ is a sympletic involution on a $K3$ surface by \cite{GT}, \cite{HK},\cite{Voi}. In the example of K3 surfaces the push-forward induced by the involution acts as identity on Chow group of zero cycles of degree zero.
Inspired by this conjecture we consider the following question in this article. Let $X$ be a smooth, cubic fourfold in $\PR^5$. Consider a line $l$ in $\PR^5$, embedded in $X$. Considering the projection from the line $l$ to $\PR^3$, we have a conic bundle structure on the cubic $X$. Let $S$ be the discriminant surface of this conic bundle. Let $T$ be the double cover of $S$ inside the Fano variety of lines $F(X)$ of $X$, arising from the conic bundle structure. Then $T$ has a natural involution and we observe that the group of algebraically trivial zero cycles on $T$ modulo rational equivalence (denoted by $A_0(T)$) maps surjectively onto the algebraically trivial one cycles on $X$ modulo rational equivalence (denoted by $A_1(X)$). The action of the involution has as its invariant part equal to the $A_0(S)$ and as anti-invariant part equal to $A_1(X)$. The involution cannot act as $+1$ on the group $A_1(X)$, as it will follow that all the elements of $A_1(X)$ are $2$-torsion, hence $A_1(X)$ is weakly representable. This is not true by the main theorem of \cite{SC}. Now the question is, what is the obstruction to the $+1$ action of the involution in terms of the geometry of $S,T$.
\begin{theorem} \label{theorem3} Let $S$ be the discriminant surface as mentioned above. Then for any very ample line bundle $L$ on $S$ we cannot have the equality $$L^2-g+1=g+n$$ where $g$ is the genus of the curve in the linear system of $L$ and $n$ is a positive integer. \end{theorem}
This result motivates the following:
\begin{corollary} Suppose that we have a surface of general type $S$ with geometric genus zero and we have an involution $i$ on the surface $S$ having only finitely many fixed points. Suppose that there exists a very ample line bundle $L$, on the minimal desingularization of the quotient surface $S/i$ such that the following equality $$L^2-2g+1=n$$
is true, here $g$ is the genus of the smooth curves in the linear system $|L|$ and $n$ is some positive integer. Then the involution $i_*$ acts as identity on the group $A_0(S)$. \end{corollary}
For the proof of the above theorem and the corollary, we follow the approach of the proof for the example of K3 surfaces due to Voisin as in \cite{Voi}. The proof involves two steps. First is that we invoke the notion of finite dimensionality in the sense of Roitman as in \cite{R1} and prove that the finite dimensionality of the image of a homomorphism from $A_0(T)$ to $A_1(X)$ (respectively from $A_0(S)\to A_0(S)$) implies that the homomorphism factors through the albanese map $A_0(T)\to Alb(T)$ (or $A_0(S)\to Alb(S)$ respectively). The second step is to show that, if we have the equality as above \ref{theorem3}, then the image of the homomorphism induced by the difference of the diagonal and the graph of the involution from $A_0(T)$ to $A_1(X)$ (or $A_0(S)\to A_0(S)$) is finite dimensional, yielding the $+1$ action of the involution on $A_1(X)$ or $A_0(S)$ respectively.
As an implication of the above corollary we obtain the Bloch's conjecture for the Craighero-Gattazzo surface of general type with geometric genus zero, studied in \cite{CG},\cite{DW}. This class of surfaces, is obtained as minimal resolution of singularities of singular quintics in $\PR^3$ invariant under an involution and having four isolated, simple elliptic singular points.
{\small \textbf{Acknowledgements:} The author would like to thank the hospitality of IISER-Mohali, for hosting this project. The author is indebted Kapil Paranjape for some useful conversations relevant to the theme of the paper. The author likes to thank Claire Voisin for her advice on the theme of the paper. The author is indebted to J.L.Colliot-Thelene, B.Poonen, and the anonymous referee for finding out a crucial mistake in the earlier version of the manuscript.}
{\small Assumption: We work over the field of complex numbers.}
\section{Finite dimensionality in the sense of Roitman and one-cycles on cubic fourfolds}
Let $P$ be a subgroup of the group of algebraically trivial one cycles modulo rational equivalence on a smooth projective fourfold $X$, the latter is denoted by $A_1(X)$. Following \cite{R1}, we say that the subgroup $P$ is finite dimensional, if there exists a smooth projective variety $W$, and a correspondence $\Gamma$ on $W\times X$, of correct codimension, such that $P$ is contained in the set $\Gamma_*(W)$.
Let $X$ be a cubic fourfold. Consider a line $l$ on $X$ and project from $l$ onto $\PR^3$. Consider the blow up of $X$ along $l$. Then the blow up $X_l$ has a conic bundle structure over $\PR^3$. Let $S$ be the surface in $\PR^3$ such that for any closed point on $S$, the inverse image is the union of two lines in $\PR^3$. Let $T$ be the variety in $F(X)$ which is the double cover of $S$. Precisely it means the following. Let us consider $$\bcU:=\{(l',x):x\in l', \pi_l(x)\in S \}$$ inside $F(X)\times X$. Then its projection to $F(X)$ is $T$ and we have a 2:1 map from $T$ to $S$, which is branched along finitely many points. So $T$ is surface.
Now for a hyperplane section $X_t$, let $l_1,l_2$ be two lines contained in $X_t$. By general position argument these two lines can be disjoint from $l$ inside $X$ and they are contained in $\PR^2$, so under the projection from $l$ they are mapped to two rational curves in $\PR^2$. Thus by Bezout's theorem they must intersect at a point $z$, so the inverse image of $z$ under the projection are two given lines $l_1,l_2$, which tells us that the map from $A_0(T_t)$ to $A_1(X_t)$ is onto, here $T_t$ is the double cover (for a general $t$) of $S_t$, where $S_t$ is the discriminant curve of the projection $\pi_l: X_t\to \PR^2$. This in turn says that $A_0(T)$ to $A_1(X)$ is onto, because $A_1(X)$ is generated by $A_1(X_t)$, where $t$ varies.
\begin{theorem} \label{theorem1} Let $Z$ be a correspondence supported on $T\times X$. Suppose that the image of $Z_*$ from $A_0(T)$ to $A_1(X)$ is finite dimensional. Then $Z_*$ factors through the albanese map of $T$. \end{theorem}
\begin{proof} The proof of this theorem follows the approach of \cite{Voi}[Theorem 2.3]. Since $Z_*$ has finite dimensional image, there exists a smooth projective variety $W$ and a correspondence $\Gamma$ supported on $W\times X$ such that image of $Z_*$ is contained in $\Gamma_*(W)$. Let $C$ inside $T$ be a smooth, hyperplane section (after fixing an embedding of $T$ into a projective space). Then by Lefschetz theorem on hyperplane sections we have that $J(C)$ maps onto $Alb(T)$. So the kernel is an abelian variety, denoted by $K(C)$. First we prove the following.
\begin{lemma} The abelian variety $K(C)$ is simple for a general hyperplane section $C$ of $T$. \end{lemma} \begin{proof} The proof of this lemma follows the approach of \cite{Voi}[Proposition 2.4]. Let if possible there exists a non-trivial proper abelian subvariety $A$ inside $K(C)$. Now $K(C)$ corresponds to the Hodge structure $$\ker(H^1(C,\QQ)\to H^3(T,\QQ))\;.$$ Let $T\to D$ be a Lefschetz pencil such that a smooth fiber is $C$. Then the fundamental group $\pi_1(D\setminus 0_1,\cdots,0_m,t)$ acts irreducibly on the Hodge structure mentioned above, \cite{Vo}[Theorem 3.27]. Here $t$ corresponds to the smooth fiber $C$. Now the abelian variety $A$ corresponds to a Hodge sub-structure $H$ inside the above mentioned Hodge structure. Let $A_D$ be the base change of $A$ over the spectrum of the function field $\CC(D)$. For convenience, let us continue to denote $A_D$ by $A$. Then consider a finite extension $L$ of $\CC(D)$ inside $\overline{\CC(D)}$, such that $A$, $K(C)$ are defined over $L$. Then we spread $A,K(C)$, over a Zariski open $U'$ in $D'$, where $\CC(D')=L$ and $D'$ is a smooth, projective curve which maps finitely onto $D$. Denote these spreads by $\bcA,\bcK$ over $U'$. By throwing out more points from $U'$ we get that $\bcA\to U', \bcK\to U'$ are fibrations, of the underlying smooth manifolds. So the fundamental group $\pi_1(U',t')$ acts on $H$, which is the $2d-1$-th cohomology of $A$ ($d=\dim(A)$), and on $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. Since $U'$ maps finitely onto a Zariski open $U$ of $D$, we have that $\pi_1(U',t')$ is a finite index subgroup of $\pi_1(U,t)$. Now it is a consequence of the Picard-Lefschetz formula that $H$ is a $\pi_1(U,t)$ stable subspace of $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. The latter is irreducible under the action of $\pi_1(U,t)$. So we get that $H$ is either zero or all of $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. Therefore by the equivalence of abelian varieties and weight one, polarized Hodge structures, $A$ is either zero or all of $K(C)$.
\end{proof}
Now consider sufficiently ample hyperplane sections of $T$, so that the dimension of $K(C)$ is arbitrarily large, and hence strictly greater than $\dim(W)$. Consider the subset $R$ of $K(C)\times W$, consisting of pairs $(k,w)$ such that $$Z_*j_*(k)=\Gamma_*(w)$$ here $j: C\to T$ is the closed embedding of $C$ into $T$. Since the image of $Z_*$ is finite dimensional, the projection from $R$ onto $K(C)$ is surjective. By the Mumford-Roitman argument on Chow varieties \cite{R}, $R$ is a countable union of Zariski closed subsets in the product $K(C)\times W$. By the uncountability of the field of complex numbers it follows that some component $R_0$ of $R$, dominates $K(C)$. Therefore we have that $$\dim(R_0)\geq \dim(K(C))>\dim (W)\;.$$ So the fibers of the map $R_0\to W$ are positive dimensional. Since the abelian variety $K(C)$ is simple, the fibers of $R_0\to W$ generate the abelian variety $K(C)$. So for any zero cycle $z$ supported on the fibers of $R_0\to W$, we have that $$Z_*j_*(z)=\deg(z)\Gamma_*(w)$$ since $z$ is of degree zero, it follows that $Z_*$ vanishes on the fibers of $R_0\to W$, which is positive dimensional, hence on all of $K(C)$, by the simplicity of $K(C)$.
Now to prove that the map $Z_*$ factors through $alb$, we consider a zero cycle $z$ of degree zero, which is given by a tuple of $2k$ points for a fixed positive integer $k$. Then we blow up $T$ along these points, denote the blow up by $\tau:T'\to T$. Let $E_i$'s be the exceptional divisor of the blow up, we choose $H$ in $\Pic(T)$, such that $L=\tau^*(H)-\sum_i E_i$ is ample (this can be obtained by Nakai Moisezhon-criterion for ampleness). Now consider a sufficiently large, very ample multiple of $L$, and apply the previous method to a general member $C'$ of the corresponding linear system. Then $K(C')$ is a simple abelian variety. Also $\tau(C')$ contains all the points at which we have blown up. Suppose that the corresponding cycle $z$ is annihilated by $alb_T$, then any of its lifts to $T'$ say $z'$, is annihilated by $alb_{T'}$ and is supported on $K(C')$. So applying the previous argument to the correspondence $Z'=Z\circ \tau$, we have that $$Z_*(z)=Z'_*(z')=0\;.$$
\end{proof}
Let $i$ be the involution on $T$, then this involution induces an involution on $A_1(X)$. Consider the homomorphism given by the difference of identity and the induced involution on $A_1(X)$, call it $Z_{1*}$. It is clear from \ref{theorem1} that the image of $Z_*Z_{1*}$ cannot be finite dimensional, otherwise the involution will act as $+1$ on $A_1(X)$, leading to the fact that $A_1(X)=\{0\}$. Now we prove the following:
\begin{theorem} \label{theorem2} Let $S$ be the discriminant surface, mentioned above. Then for any very ample line bundle $L$ on $S$ the equality $$L^2-g+1=g+n$$ cannot hold, where $g$ is the genus of a curve in the complete linear system of $L$ and $n$ is a positive integer. \end{theorem}
\begin{proof}
The proof of this theorem follows the approach of \cite{Voi}[Proposition 2.5]. The discriminant surface $S$ is a quintic, hence its irregularity is zero. Consider a very ample line bundle $L$ on the quintic $S$. Let $g$ be the genus of a smooth curve in the linear system $|L|$. Now we calculate the dimension of $|L|$. Consider the exact sequence $$0\to \bcO(C)\to \bcO(S)\to \bcO(S)/\bcO(C)\to 0$$ tensoring with $\bcO(-C)$ we have
$$0\to \bcO(S)\to \bcO(-C)\to\bcO(-C)|_C\to 0\;.$$ Taking sheaf cohomology we have
$$0\to \CC\to H^0(S,L)\to H^0(C,L|_C)\to 0$$
since the irregularity of the surface is zero. On the other hand by Nakai-Moisezhon criterion the intersection number $L|_C$ is positive, so $L$ restricted to $C$ has positive degree, by Riemann-Roch this implies that
$$\dim(H^0(C,L|_C))=L^2-g+1\;,$$ provided that we have the equality $$L^2-g+1=g+n$$
for some positive integer $n$. Then the linear system of $L$ is of dimension $g+n$. Now consider the smooth, projective curves $C$ in this linear system $|L|$ and their double covers $\wt{C}$ (this is actually a covering for a general $C$, as the map $T\to S$ is branched along a finite set of points). By Bertini's theorem a general $\wt{C}$ is smooth. By the Hodge index theorem it follows that, $\wt{c}$ is connected. If not, suppose that it has two components $C_1,C_2$. Since $C^2>0$, we have $C_i^2>0$ for $i=1,2$ and since $\wt{C}$ is smooth we have that $C_1.C_2=0$. Therefore the intersection form restricted to $\{C_1,C_2\}$ is semipositive. This can only happen when $C_1$, $C_2$ are proportional and $C_i^2=0$, for $i=1,2$, which is not possible.
Now let $(t_1,\cdots, t_{g+n})$ be a point on $T^{g+n}$, which gives rise to the tuple $(s_1,\cdots,s_{g+n})$ on $S^{g+n}$, under the quotient map. There exists a unique, smooth curve $C$ containing all these points (if the points are in general position). Let $\wt{C}$ be its double cover on $T$. Then $(t_1,\cdots,t_{g+n})$ belongs to $\wt{C}$. Consider the zero cycle $$\sum_i t_i-\sum_i i_*(t_i)$$ this belongs to the image of $P(\wt{C}/C)$ in $A_0(T)$, $P(\wt{C}/C)$ is the Prym variety corresponding to the double cover. So the image of $$\sum_i \left(Z_*(t_i)-i_*Z_*(t_i)\right)$$ is an element in the image of this Prym variety under the homomorphism $$A_0(T)\to A_1(X)\;.$$ So the map $$T^{g+n}\to A_1(X)$$ given by $$(t_1,\cdots,t_{g+n})\mapsto \sum_i Z_*(t_i)-i_*Z_*(t_i) $$ factors through the Prym fibration $\bcP(\wt {\bcC}/\bcC)$, given by $$(t_1,\cdots,t_{g+n})\mapsto alb_{\wt{C}}\left(\sum_i t_i-i(t_i)\right)$$
here $\bcC, \wt{\bcC}$ are the universal smooth curve and the universal double cover of $\bcC$ over $|L|_0$ parametrizing the smooth curves in the linear system $|L|$. By dimension count, the dimension of $\bcP(\wt {\bcC}/\bcC)$ is $2g+n-1$. On the other hand we have that dimension of $T^{g+n}$ is $2g+2n$. So the map $$T^{g+n}\to \bcP(\wt {\bcC}/\bcC)$$ has positive dimensional fibers, and hence the map $$T^{g+n}\to A_1(X)$$ has positive dimensional fibers. So the general fiber of $$T^{g+n}\to A_1(X)$$ contains a curve. Let $H$ be the hyperplane bundle pulled back onto the quintic surface $S$. It is very ample. Pull it back further onto $T$, to get an ample line bundle on $T$. Call it $L'$. Then the divisor $\sum_i \pi_i^{-1}(L')$ is ample on $T^{g+n}$, where $\pi_i$ is the $i$-th co-ordinate projection from $T^{g+n}$ to $T$. Therefore the curves in the fibers of the above map intersect the divisor $\sum_i \pi_i^{-1}(L')$. So we get that there exist points in $F_s$ (the general fiber over a cycle $s$ in $A_1(X)$) contained in $C\times T^{g+n-1}$ where $C$ is in the linear system of $L'$. Then consider the elements of $F_s$ the form $(c,s_1,\cdots,s_{g+n-1})$, where $c$ belongs to $C$. Considering the map from $T^{g+n-1}$ to $A_1(X)$ given by $$(s_1,\cdots,s_{g+n-1})\mapsto Z_*(\sum_i s_i+c-\sum_i i(s_i)-i(c))\;,$$ we see that this map factors through the Prym fibration and the map from $T^{g+n-1}$ to $\bcP(\wt{\bcC}/\bcC)$ has positive dimensional fibers, since $n$ is large. So it means that, if we consider an element $(c,s_1,\cdots,s_{g+n-1})$ in $F_s$ and a curve through it, then it intersects the ample divisor given by $\sum_i \pi_i^{-1}(L')$, on $T^{g+n-1}$. Then we have some of $s_i$ is contained in $C$. So iterating this process we get that elements of $F_s$ are supported on $C^k\times T^{g+n-k}$, where $k$ is some natural number depending on $n$. Note that the genus of $C$ is fixed and equal to $11$ and less than $k$ and for a choice of a large multiple of the very ample line bundle $L$. Thus the elements of $F_s$ are supported on $C^{n_0}\times T^{g+n-k}$. Therefore considering $\Gamma=Z_1\circ Z$, we get that $\Gamma_*(T^{g+n})=\Gamma_*(T^{m_0})$, where $m_0$ is strictly less than $g+n$.
Now we prove by induction that $\Gamma_*(T^{m_0})=\Gamma_*(T^m)$ for all $m\geq g+n$. So suppose that $\Gamma_*(T^k)=\Gamma^*(T^{m_0})$ for $k\geq g+n$, then we have to prove that $\Gamma_*(T^{k+1})=\Gamma_*(T^{m_0})$. So any element in $\Gamma_*(T^{k+1})$ can be written as $\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$. Now let $k-m_0=m$, then $m_0+1=k-m+1$. Since $k-m<k$, we have $k-m+1\leq k$, so $m_0+1\leq k$, so we have the cycle $$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$ supported on $T^k$, hence on $T^{m_0}$. So we have that $\Gamma_*(T^{m_0})=\Gamma_*(T^k)$ for all $k$ greater or equal than $g+n$. Now any element $z$ in $A_0(T)$, can be written as a difference of two effective cycle $z^+,z^-$ of the same degree. Then we have $$\Gamma_*(z)=\Gamma_*(z^+)-\Gamma_*(z_-)$$ and $\Gamma_(z_{\pm})$ belong to $\Gamma_*(T^{m_0})$. So let $\Gamma'$ be the correspondence on $T^{2m_0}\times T$ defined as $$\sum_{l\leq m_0}(pr_{l},pr_T)^*\Gamma-\sum_{m_0+1\leq l\leq 2m_0}(pr_l,pr_T)^* \Gamma$$ where $\pr_l$ is the $l$-th projection from $T^l$ to $T$, and $\pr_T$ is from $T^{2m_0}\times T$ to the last copy of $T$. Then we have $$\im(\Gamma_*)=\Gamma'_*(T^{2m_0})\;.$$ This would imply that the image of $\Gamma_*$ is finite dimensional, so by \ref{theorem1} we have that the induced involution on $A_1(X)$ acts as identity. The involution acts as $-\id$ on $A_1(X)$. Hence all elements of $A_1(X)$ is a $2$-torsion. This will lead to a contradiction to the fact that $A_1(X)$ is infinite dimensional \cite{SC}. \end{proof}
Now we proceed to the proof of the corollary stated in the introduction regarding the generalised Bloch conjecture on surfaces of general type with geometric genus zero and with an involution $i$. The result is as follows:
\begin{corollary} \label{cor1} Suppose that we have a surface of general type $S$ with geometric genus zero and we have an involution $i$ on the surface $S$ having only finitely many fixed points. Suppose that there exists a very ample line bundle $L$, on the minimal desingularization of the quotient surface $S/i$ (by the involution) such that the following equality $$L^2-2g+1=n$$
is true, here $g$ is the genus of the smooth, projective curves in the linear system $|L|$, and $n$ is some positive integer. Then the involution $i_*$ acts as identity on the group $A_0(S)$. \end{corollary}
\begin{proof}
Consider the resolution of singularity of the surface $S/i$. It is the quotient by the involution acting on the surface $\wt{S}$, obtained by blowing up the isolated fixed points of $i$ acting on $S$. Call this quotient $\wt{S}/i$. Since it is dominated by a surface of irregularity zero (namely $\wt{S}$), it has irregularity zero. Consider a very ample line bundle $L$ on $\wt{S}/i$. Let $g$ be the genus of a smooth, projective curve in the linear system $|L|$. Now we calculate the dimension of $|L|$. Consider the exact sequence $$0\to \bcO(C)\to \bcO(\wt{S}/i)\to \bcO(\wt{S}/i)/\bcO(C)\to 0$$ tensoring with $\bcO(-C)$ we get
$$0\to \bcO(\wt{S}/i)\to \bcO(-C)\to\bcO(-C)|_C\to 0\;.$$ Taking sheaf cohomology we get
$$0\to \CC\to H^0(\wt{S}/i,L)\to H^0(C,L|_C)\to 0$$
since the irregularity of the surface $\wt{S}/i$ is zero. On the other hand by Nakai-Moiseshon criterion the intersection number $L|_C$ is positive, so $L$ restricted to $C$ has positive degree, by Riemann-Roch this implies
$$\dim(H^0(C,L|_C))=L^2-g+1\;, $$ provided that we have the equality $$L^2-g+1=g+n$$
for some positive integer $n$. Then the linear system of $L$ is of dimension $g+n$. Now consider a smooth, projective curves $C$ in this linear system $|L|$ and its branched double cover $\wt{C}$, branched along the intersection of $\wt{C}$ with $E_i$, where $E_i$'s are the exceptional curves arising from the blow up $\wt{S}\to S$. By Bertini's theorem a general $\wt{C}$ is smooth. By the Hodge index theorem it follows that, it is connected. If not, suppose that it has two components $C_1,C_2$. Since $C^2>0$, we have $C_i^2>0$ for $i=1,2$ and since $\wt{C}$ is smooth we have that $C_1.C_2=0$. Therefore the intersection form restricted to $\{C_1,C_2\}$ is semipositive. This can only happen when $C_1$, $C_2$ are proportional and $C_i^2=0$, for $i=1,2$, which is not possible as $C_1+C_2$ is ample on $\wt{S}$.
Now let $(t_1,\cdots, t_{g+n})$ be a point on $\wt{S}^{g+n}$, which gives rise to the tuple $(s_1,\cdots,s_{g+n})$ on $(\wt{S}/i)^{g+n}$, under the quotient map. There exists a unique, smooth curve $C$ containing all these points (if the points are in general position). Let $\wt{C}$ be its branched double cover of $C$ in $\wt{S}$. Then $(t_1,\cdots,t_{g+n})$ belongs to $\wt{C}$. Consider the zero cycle $$\sum_i t_i-\sum_i i_*(t_i)$$ this belongs to $P(\wt{C}/C)$, which is the Prym variety corresponding to the double cover $\wt{C}\to C$. So the image of $$\sum_i \left(t_i-i_*(t_i)\right)$$ under the push-forward $j_{\wt{C}*}$ is an element in the image under the homomorphism $$\id-i_*: A_0(\wt{S})\to A_0(\wt{S})$$
So the map $$\wt{S}^{g+n}\to A_0(\wt{S})$$ given by $$(t_1,\cdots,t_{g+n})\mapsto \sum_i (t_i-i_*(t_i)) $$ factors through the Prym fibration $\bcP(\wt {\bcC}/\bcC)$, given by $$(t_1,\cdots,t_{g+n})\mapsto alb_{\wt{C}}\left(\sum_i t_i-i(t_i)\right)$$
here $\bcC, \wt{\bcC}$ are the universal family of smooth curves in $|L|$ and the universal double cover of $\bcC$ respectively, over $|L|_0$ parametrizing the smooth curves in the linear system $|L|$. By dimension count the dimension of $\bcP(\wt {\bcC}/\bcC)$ is $2g+n-1+m/2$, where $m$ is the number of branch points on the curve $\wt{C}$ counted with multiplicities. On the other hand we have that dimension of ${\wt{S}}^{g+n}$ is $2g+2n$. So the map $${\wt{S}}^{g+n}\to \bcP(\wt {\bcC}/\bcC)$$ has fiber dimension equal to $$2g+2n-2g-n+1-m/2=n+1-m/2\;.$$ Considering a large multiple of the very ample line bundle $L$, we can assume that the above number is positive. Indeed we have $$L^2-2g+1=-L.K_{\wt{S}/i}-1=n>0$$ and $$K_{\wt{S/i}}=f^*(K_{S/i})+E$$ where $E$ is the exceptional divisor, $f$ is the regular map from $\wt{S}/i$ to $S/i$. Here we consider $\wt{S}$ is the blow up of $S$ along the unique fixed point of $i$. The calculation for finitely many fixed points greater than one is similar. Let $L$ be equal to $m'f^*(H)-m'E$ which is very ample, where $H$ is a very ample line bundle on $S/i$, after fixing an embedding into some projective space. Then we have to prove that
$$L.(-2K_{\wt{S}/i}-E)-2>0$$
that is
$$L.(-2f^*(K_{S/i})-3E)-2>0$$
putting the expression of $L$, the condition to be proven is
$$-(m'(f^*(H)-E)(2f^*(K_{S/i})+3E))-2=-2m'f^*(H).f^*(K_{S/i})-3m'-2>0$$
But by the adjunction formula on $\wt{S}/i$ we have
$$L^2-2g+1=-L.K_{\wt{S}/i}-1$$
on the other hand
$$L^2-2g+1>0$$
by the assumption of the theorem. Therefore
$$-m'f^*(H).f^*(K_{S/i})-m'-1=-m'f^*(H).f^*(K_{S/i})-(m'+1)>0$$
so
$$-2m'f^*(H).f^*(K_{S/i})> 2m'+2\;.$$
Therefore choosing $l>3$, such that $m'f^*(lH)-m'E$ is very ample, we have $$-2f^*(lm'H).f^*(K_{S/i})> 2l(m'+1)>3m'+2$$ for large values of $l$. Also note that for $L=m'(f^*(lH)-E)$, $l>1$ we have $$L^2-2g+1=-L.K_{\wt{S/i}}-1=-f^*(m'lH).f^*(K_{S/i})-m'-1=-m'lf^*(H).f^*(K_{S/i})-m'-1$$ we know that $$-m'f^*(H).f^*(K_{S/i})>m'+1$$ so $$-m'lf^*(H).f^*(K_{S/i})-m'-1>(m'l-1)(m'+1)> 0\;.$$ So for $L=f^*(m'lH)-m'E$ we have the equality $$L^2-2g+1=n$$ for some positive integer $n$.
So the fiber contains a curve. Let $H$ be the hyperplane bundle pulled back onto the surface $\wt{S}/i$, after fixing an embedding of $\wt{S}/i$ into some projective space. It is very ample. Pull it back further onto $\wt{S}$, to get an ample line bundle on $\wt{S}$. Call it $L'$. Then the divisor $\sum_i \pi_i^{-1}(L')$ is ample on $\wt{S}^{g+n}$, where $\pi_i$ is the $i$-th co-ordinate projection from $\wt{S}^{g+n}$ to $\wt{S}$. Therefore the curves in the fibers of the above map intersect the divisor $\sum_i \pi_i^{-1}(L')$. So there exist points in $F_s$ (the general fiber of $\wt{S}^{g+n}\to A_0(\wt{S})$ over a cycle $s$ in $A_0(\wt{S})$) contained in $C\times \wt{S}^{g+n-1}$ where $C$ is in the linear system of $L'$. Then consider the elements of $F_s$ the form $(c,s_1,\cdots,s_{g+n-1})$, where $c$ belongs to $C$. Considering the map from $\wt{S}^{g+n-1}$ to $A_0(\wt{S})$ given by $$(s_1,\cdots,s_{g+n-1})\mapsto (\sum_i s_i+c-\sum_i i_*(s_i)-i_*(c))\;,$$ we see that this map factors through the Prym fibration and the map from $\wt{S}^{g+n-1}$ to $\bcP(\wt{\bcC}/\bcC)$ has positive dimensional fibers, by choosing $l$ and hence $n$ to be large. So, if we consider an element $(c,s_1,\cdots,s_{g+n-1})$ in $F_s$ and a curve through it, then it intersects the ample divisor given by $\sum_i \pi_i^{-1}(L')$, on $\wt{S}^{g+n-1}$. Then we have some of $s_i$ is contained in $C$. So iterating this process we have, the elements of $F_s$ are supported on $C^k\times \wt{S}^{g+n-k}$, where $k$ is some natural number depending on $n$. Note that the genus of $C$ is fixed and it is less than $k$ for a choice of a very large multiple of the very ample line bundle $L$. Thus the elements of $F_s$ are supported on $C^{n_0}\times \wt{S}^{g+n-k}$. Therefore considering $\Gamma=\Delta_{\wt{S}}-Gr(i)$, we get that $\Gamma_*(\wt{S}^{g+n})=\Gamma_*(\wt{S}^{m_0})$, where $m_0$ is strictly less than $g+n$.
Now we prove by induction that $\Gamma_*(\wt{S}^{m_0})=\Gamma_*(\wt{S}^m)$ for all $m\geq g+n$. So suppose that $\Gamma_*(\wt{S}^k)=\Gamma^*(\wt{S}^{m_0})$ for $k\geq g+n$, then we have to prove that $\Gamma_*(\wt{S}^{k+1})=\Gamma_*(\wt{S}^{m_0})$. So any element in $\Gamma_*(\wt{S}^{k+1})$ can be written as $$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$ Now let $k-m_0=m$, then $m_0+1=k-m+1$. Since $k-m<k$, we have $k-m+1\leq k$, so $m_0+1\leq k$, so we have the cycle $$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$ supported on $\wt{S}^k$, hence on $\wt{S}^{m_0}$. So we have $$\Gamma_*(\wt{S}^{m_0})=\Gamma_*(\wt{S}^k)$$ for all $k$ greater or equal than $g+n$. Now any element $z$ in $A_0(\wt{S})$, can be written as a difference of two effective cycles $z^+,z^-$ of the same degree. Then we have $$\Gamma_*(z)=\Gamma_*(z^+)-\Gamma_*(z_-)$$ and $\Gamma_(z_{\pm})$ belong to $\Gamma_*(\wt{S}^{m_0})$. So let $\Gamma'$ be the correspondence on $\wt{S}^{2m_0}\times \wt{S}$ defined as $$\sum_{l\leq m_0}(pr_{l},pr_{\wt{S}})^*\Gamma-\sum_{m_0+1\leq l\leq 2m_0}(pr_l,pr_{\wt{S}})^* \Gamma$$ where $\pr_l$ is the $l$-th projection from $\wt{S}^l$ to $\wt{S}$, and $\pr_{\wt{S}}$ is from ${\wt{S}}^{2m_0}\times \wt{S}$ to the last copy of $\wt{S}$. Then we have $$\im(\Gamma_*)=\Gamma'_*(\wt{S}^{2m_0})\;.$$ This would imply that the image of $\Gamma_*$ is finite dimensional, so as proved in \cite{Voi}[Theorem 2.3] the induced involution on $A_0(\wt{S})$ factors through the Albanese variety of $\wt{S}$ which is trivial. Hence $i_*$ acts as identity on $A_0(\wt{S})$. By the blow up formula $$A_0(\wt{S})\cong A_0(S)$$ hence the involution $i_*$ acts as identity on $A_0(S)$. \end{proof}
\begin{remark} \label{rem1} Suppose in the above corollary \ref{cor1} we have the fixed locus of the involution consisting of finitely many isolated fixed points and one rational curve. Then on $\wt{S}/i$ we have to prove that the number $$L.(-2K_{\wt{S}/i}-\sum_j E_j-R)-2>0$$ Here $R$ is the strict transform of the rational curve component in the fixed locus, $E_j$ is the exceptional curve over the isolated fixed point $p_j$. Putting $$L=m(f^*(H)-\sum_j E_j)$$ we have to prove that $$-m(f^*(H)-\sum_j E_j)(2f^*(K_{S/i})+3\sum_j E_j+R)-2>0$$ So for simplicity let us assume that the number of isolated fixed point is one, so there is one exceptional divisor. Thus we have to prove that $$-m(f^*(H)- E)(2f^*(K_{S/i})+3E+R)-2>0$$ that is $$-2mf^*(H)f^*(K_{S/i})-3m-2-mf^*(H).R>0$$ Since $R=f^*(L)$ where $L$ is a line in $S/i$, we have $$f^*(H).f^*(L)=f^*(H.L)=f^*(p)=2p$$ Putting this in the above equation $$-2mf^*(H).f^*(K_{S/i})-3m-2-2m=-mf^*(H)f^*(K_{S/i})-5m-2$$ an it has to be greater than zero. By choosing as before $lH$ in place of $H$ and assuming that $f^*(lmH)-mE$ and $f^*(mH)-mE$ are both very ample, we have $$-2mf^*(lH).f^*(K_{S/i})>2l(m+1)$$ and $$2l(m+1)>5m+2$$ for high values of $l$. Therefore in this case also the argument of \ref{cor1} works and we get that the involution acts as identity on $A_0(S)$. \end{remark}
\begin{example} Let $F$ be a singular quintic, invariant under an involution on $\PR^3$ and having simple elliptic singularities at the points $$(1:0:0:0), (0:1:0:0), (0:0:1:0), (0:0:0:1)$$ as studied in \cite{DW}[section 2]. Let us consider the minimum desingularization of this surface $F$ and call it $V$. This surface $V$ is a smooth, projective surface of general type with $p_g=q=0$, equipped with an involution. The fixed locus of the involution on $F$ consists of a line and five isolated fixed points. These five points are different from the singular points of $F$. Let us consider the pre-images of these five points on $V$. They are the isolated fixed points of the involution on $V$. Consider the blow-up of $V$ at the five isolated fixed points of the involution on $V$. Denote it by $V'$. This surface $V'$ is equipped with an involution $i$. Then it is proven in \cite{DW}[proposition 3.1], that $V'/i$ is a non-singular, rational surface. So by the above remark, \ref{rem1}, the involution acts as identity on $A_0(V')$, provided that there exists a line bundle $L$ on $V'/i$ such that $$L.(-K_{V'/i})-1>0\;.$$ Following the discussion in \cite{DW}[discussion after proposition 3.1] we consider the minimal model of $V'/i$. Call it $S$, it is a minimal elliptic surface as mentioned in \cite{DW}[discussion after proposition 3.1]. For this $S$ we have $$K_S^2=0$$ then by Riemann-Roch $$h^0(-K_S)\geq K_{S}^2+\chi (\bcO_S)+1=1+1=2$$
as $h^0(2K_S)=-1$ ($S$ is rational, so $|2K_S|=\emptyset$) and $\chi(\bcO_S)=1$. Therefore for a very ample line bundle of large degree on $S$, we have $$-L.K_S-1>0\;.$$ Now by construction, as in \cite{DW}, the surface $S$ is a contraction of $V'/i$ along two elliptic curves of self-intersection $-1$. Let $\pi$ be the blow-down map from $V'/i$ to $S$. Therefore for a very ample line bundle $$L=\pi^*(L')-E_1-E_2$$ and $$K_{V'/i}=\pi^*(K_S)+E_1+E_2$$ on $V'/i$, we have $$-(\pi^*(L')-E_1-E_2)(\pi^*(K_S)+E_1+E_2)-1=-\pi^*(L'.K_S)-3>0$$ for some very ample line bundle of the form $$L=m\pi^*(L')-E_1-E_2\;.$$ Here $m$ is a very large positive integer. Thus we have $$-L.K_{V'/i}-1>0\;.$$ Therefore there exists a line bundle $L$ on $V'/i$ such that $$L^2-2g+1=n$$
for some positive integer $n$, here $g$ is the genus of a smooth curve in $|L|$, as required in the condition of the corollary \ref{cor1}. Since $V'/i$ is rational, the involution acts also as $-1$ resulting to the fact that every element in $A_0(V')$ is $2$-torsion and hence by Roitman's theorem $A_0(V')=\{0\}$ (as $q=0$ for $V'$). Since by the blow up formula $$A_0(V)\cong A_0(V')$$ we have $A_0(V)=\{0\}$. Thus the Bloch's conjecture holds on $V$. \end{example}
\subsection{Generalization of the above result}
The technique of the proof of \ref{theorem2} is more general, in the sense that we only use the conic bundle structure of the cubic fourfold and the conic bundle structure on the hyperplane sections of the cubic fourfold. Suppose that we consider a fourfold $X$, which is unirational, so contains sufficiently many lines. Now consider a fixed line $l$ on $X$, and project onto $\PR^3$ from this line. Suppose that the discriminant surface $S$ inside $\PR^3$ admits a double cover $T$ of $S$ branched along finitely many points, inside the Fano variety of lines $F(X)$ of $X$.
The proof of \ref{theorem2} tells us that we have the following theorem:
\begin{theorem} Let $X$ be a fourfold embedded in $\PR^5$, which admits a conic bundle structure. Let $S$ denote the discriminant surface for the conic bundle structure such that it admits a branched cover at finitely many points. Then for any very ample line bundle $L$ on $S$, we cannot have the equality $$L^2-g+1=g+n$$ where $g$ is the genus of a curve in the linear system of $L$ and $n$ is a positive integer. \end{theorem}
\end{document}
|
arXiv
|
{
"id": "1705.07766.tex",
"language_detection_score": 0.8025193214416504,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\tikzset{
my tip/.style={
decoration={
markings,
mark=at position 1 with {\arrow[scale=.7]{#1}}
},
postaction=decorate
} }
\title{Constraint qualifications and strong global convergence properties of an augmented Lagrangian method on Riemannian manifolds} \author{Roberto Andreani \thanks{Department of Applied Mathematics, University of Campinas, Campinas-SP, Brazil. Email: [email protected]} \and Kelvin R. Couto\thanks{Federal Institute of Goi\'as, Goi\^ania-GO, Brazil and Department of Applied Mathematics. University of S\~ao Paulo, S\~ao Paulo-SP, Brazil. Email: [email protected]} \and Orizon P. Ferreira \thanks{Institute of Mathematics and Statistics, Federal University of Goi\'as. Goi\^ania-GO, Brazil. Email: [email protected]} \and Gabriel Haeser \thanks{Department of Applied Mathematics, University of S\~ao Paulo, S\~ao Paulo-SP, Brazil. Email: [email protected]} } \maketitle
\maketitle \begin{abstract} In the past years, augmented Lagrangian methods have been successfully applied to several classes of non-convex optimization problems, inspiring new developments in both theory and practice. In this paper we bring most of these recent developments from nonlinear programming to the context of optimization on Riemannian manifolds, including equality and inequality constraints. Many research have been conducted on optimization problems on manifolds, however only recently the treatment of the constrained case has been considered. In this paper we propose to bridge this gap with respect to the most recent developments in nonlinear programming. In particular, we formulate several well known constraint qualifications from the Euclidean context which are sufficient for guaranteeing global convergence of augmented Lagrangian methods, without requiring boundedness of the set of Lagrange multipliers. Convergence of the dual sequence can also be assured under a weak constraint qualification. The theory presented is based on so-called sequential optimality conditions, which is a powerful tool used in this context. The paper can also be read with the Euclidean context in mind, serving as a review of the most relevant constraint qualifications and global convergence theory of state-of-the-art augmented Lagrangian methods for nonlinear programming. \end{abstract}
\noindent {\bf Keywords:} constraint qualifications, global convergence, augmented Lagrangian methods, Riemannian manifolds.
\noindent {\bf AMS subject classification:} 49J52, 49M15, 65H10, 90C30.
\section{Introduction}
The problem of minimizing an objective function defined on a Riemannian manifold has received a lot of attention over the last twenty five years. Several unconstrained algorithms on Euclidean spaces have been successfully adapted to this more general setting. These adaptations come from the fact that the Riemannian machinery, from a theoretical and practical point of view, allows treating several constrained optimization problems as unconstrained Riemannian problems. It is worth noting that the works on this subject involve more than merely a theoretical experiment in generalizing Euclidean space concepts to Riemannian manifolds, which is challenging in many different aspects. Unlike Euclidean spaces, Riemannian manifolds are nonlinear objects, making it challenging to develop a solid optimization theory in this setting. The most important thing to keep in mind is that these studies are important mainly because many problems are most effectively addressed from a point of view of Riemannian geometry. In fact, many optimization problems have an underlying Riemannian geometric structure that can be efficiently exploited with the goal of designing more effective methods to solve them; some references on this subject include \cite{AbsilBook2008,Boumal2022Book,Edelman1999}.
Although unconstrained Riemannian optimization is already somewhat well established, only a few works have appeared dealing with constrained Riemannian optimization (CRO) problems, that is, Riemannian optimization problems where equality and inequality constraints restrict the variables to a subset of the manifold itself. For instance, \cite{Yang_Zhang_Song2014} extended to the Riemannian context the Karush/Kuhn-Tucker (KKT) conditions and second-order optimality conditions under a strong assumption, while in \cite{BergmannHerzog2019} a very interesting intrinsic approach was presented for defining suitable KKT conditions. In \cite{Yamakawa_Sato2022} the Approximate-KKT (AKKT) sequential optimality condition was proposed to support the global convergence theory of an augmented Lagrangian method recently introduced in \cite{Liu_Boumal2020}. In \cite{Jiang2022} an exact penalty method for special problems on Stiefel manifolds was presented, some constraint qualifications and the first- and second-order optimality conditions to support the method are discussed. A manifold inexact augmented Lagrangian framework to solve a family of nonsmooth optimization problems on Riemannian submanifolds embedded in Euclidean space is proposed in \cite{Deng2022}. In \cite{Obara2022}, a Riemannian sequential quadratic optimization algorithm is proposed, which uses a line-search technique with an $\ell_1$-penalty function as an extension of the standard sequential quadratic optimization algorithm for constrained nonlinear optimization problems in Euclidean spaces. In \cite{IPriemann}, a Riemannian interior point algorithm is introduced.
It is worth mentioning that the theoretical tools needed to support constrained optimization methods on the Riemannian setting are still under development. In fact, only recently in \cite{BergmannHerzog2019} a full theory of constraint qualifications and optimality conditions have been developed, where a definition of weak constraint qualifications (CQs) such as Guignard's CQ and Abadie's CQ have been given. Despite guaranteeing the existence of Lagrange multipliers, more robust applications of these conditions are not known so far, even in the Euclidean setting. This is not the case of stronger conditions such as the linear independence CQ (LICQ) and Mangasarian-Fromovitz CQ (MFCQ), which gives, respectively, uniqueness and compactness of the Lagrange multiplier set, together with boundedness of a typical sequence of approximate Lagrange multipliers generated by several primal-dual algorithms, guaranteeing global convergence to a stationary point. These results were discussed in the Riemannian setting in \cite{Yamakawa_Sato2022}.
In this paper our goal is to introduce several intrinsic weaker CQs in the Riemannian context, such as the constant rank CQ (CRCQ \cite{Janin_Robert_1984}), the constant positive linear independence CQ (CPLD \cite{Qi_Wei2000}), and their relaxed variants (RCRCQ \cite{Minchenko_Stakhovski_2011} and RCPLD \cite{Andreani_Haeser_Schuverdt_Silva2012RCPLD}). RCPLD is the weakest of these four conditions introduced, however all of them have their own set of applications, which we mention later. With the exception of CRCQ and RCRCQ, which are independent of MFCQ, all CQs presented are strictly weaker than MFCQ. Thus, despite the fact that no such condition guarantees boundedness of the set of Lagrange multipliers at a solution, they are still able to guarantee global convergence of primal-dual algorithms to a stationary point. In particular, we show that all limit points of a safeguarded augmented Lagrangian algorithm will satisfy the KKT conditions under all proposed conditions. Finally, we present two other conditions, the constant rank of the subspace component CQ (CRSC \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012}) and the quasinormality CQ (QN \cite{hestenes}), which we also show to be enough for proving the global convergence result we mentioned. Although we do not pursue these results in the Riemannian setting, CRSC is expected to be strictly weaker than RCPLD, while these conditions (CRSC and QN) are the weakest ones known in the Euclidean setting such that an Error Bound condition is satisfied. That is, locally, the distance to the feasible set can be measured by means of the norm of the constraint violation \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012,eb2}, which should be an interesting result to be extended to the Riemannian setting. Throughout the text we review several results known in the Euclidean setting in order to serve as a guide for future extensions to Riemannian manifolds. We chose to present in the Riemannian setting an interesting characteristic of QN which is related to the global convergence of the augmented Lagrangian method; namely, under QN, the sequence of approximate Lagrange multipliers generated by the algorithm is bounded, guaranteeing primal-dual convergence even when the set of Lagrange multipliers is itself unbounded, a result that first appeared in \cite{gnep} in the Euclidean setting. In order to do this, we will need to define a stronger sequential optimality condition known as Positive-AKKT (PAKKT \cite{Andreani_Fazzio_Schuverdt_Secchin2019}). Finally, the machinery of sequential optimality conditions we introduce is relevant due to the fact that it is easy to extend the global convergence results we present to other algorithms. We want also to draw attention to the fact that all of the findings obtained in this study are also valid in Euclidean spaces, thus, this study may also be seen as a review of the recent developments in constraint qualifications and their connections with global convergence of algorithms in the Euclidean setting.
This paper is organized as follows. Section~\ref{sec:aux} presents some definitions and preliminary results that are important throughout our study. In Section~\ref{sec:prel}, we state the CRO problem and also recall the KKT and AKKT conditions, together with the definitions of LICQ and MFCQ for CRO problems. Section~\ref{sec:nStricCQ} is devoted to introducing the new CQs for the CRO problem, namely, (R)CRCQ, (R)CPLD, and CRSC, where we present several examples and the proof that these conditions are indeed CQs associated with the global convergence of the augmented Lagrangian method. In Section~\ref{eq:PAKKT} we introduce the PAKKT condition and the quasinormality CQ, where we show that the Lagrange multiplier sequences generated by the augmented Lagrangian method is bounded under quasinormality. The last section contains some concluding remarks.
\subsection{Notations, terminology and basics results} \label{sec:aux}
In this section, we recall some notations and basic concepts of Riemannian manifolds used throughout the paper. They can be found in many books on Riemannian Geometry, see, for example, \cite{Lang1995,Sakai1996,Loring2011}.
Let ${\cal M}$ be an $n$-dimensional smooth Riemannian manifold. Denote the {\it tangent space} at a point $p$ by $T_p{\cal M}$, the {\it tangent bundle} by $T{\cal M}:= \bigcup_{p\in M}T_p{\cal M}$ and a {\it vector field} by a mapping $X\colon {\cal M} \to T{\cal M}$ such that $X(p) \in T_p{\cal M}$. Assume also that ${\cal M}$ has a {\it Riemannian metric} denoted by $\langle \cdot, \cdot \rangle$ and the corresponding {\it norm} by $\|\cdot\|$. For $f\colon U \to\mathbb{R}$ a differentiable function with derivative $d f(\cdot)$, where $U$ is an open subset of the manifold ${\cal M}$, the Riemannian metric induces the mapping $f\mapsto \grad f $ which associates its {\it gradient vector field} via the following rule $\langle \grad f(p),X(p)\rangle:=d f(p)X(p)$, for all $p\in U$. The {\it length} of a piecewise smooth curve $\gamma\colon[a,b]\rightarrow {\cal M}$ joining $p$ to $q$ in ${\cal M}$, i.e., $\gamma(a)= p$ and $\gamma(b)=q$ is denoted by $\ell(\gamma)$. The {\it Riemannian distance} between $p$ and $q$ is defined as $ d(p,q) = \inf_{\gamma \in \Gamma_{p,q}} \ell(\gamma), $ where $\Gamma_{p,q}$ is the set of all piecewise smooth curves in ${\cal M}$ joining points $p$ and $q$. This distance induces the original topology on ${\cal M}$, namely $({\cal M}, d)$ is a complete metric space and the bounded and closed subsets are compact. The {\it open} and {\it closed balls} of radius $r>0$, centered at $p$, are respectively defined by $B_{r}(p):=\left\{ q\in {\cal M}:~ d(p,q)<r\right\}$ and $ B_{r}[p]:=\left\{ q\in {\cal M} :~d(p,q)\leq r\right\}$. Let $\gamma$ be a curve joining the points $p$ and $q$ in ${\cal M}$ and let $\nabla$ be the Levi-Civita connection associated to $({\cal M}, \langle \cdot, \cdot \rangle)$. A vector field $Y$ along a smooth curve $\gamma$ in ${\cal M}$ is said to be {\it parallel} when $\nabla_{\gamma^{\prime}} Y=0$. If $\gamma^{\prime}$ itself is parallel, we say that $\gamma$ is a {\it geodesic}. A Riemannian manifold is {\it complete} if its geodesics $\gamma(t)$ are defined for any value of $t\in \mathbb{R}$. From now on, {\it ${\cal M}$ denotes an $n$-dimensional smooth and complete Riemannian manifold}. Owing to the completeness of the Riemannian manifold ${\cal M}$, the {\it exponential map} at $p$, $\exp_{p}\colon T_{p}{\cal M} \to {\cal M}$, can be given by $\exp_{p}v = \gamma(1)$, where $\gamma$ is the geodesic defined by its position $p$ and velocity $v$ at $p$ and $\gamma(t) = \exp_p(tv)$ for any value of $t$. For $p\in {\cal M}$, the {\it injectivity radius} of ${\cal M}$ at $p$ is defined by $$
r_{p}:=\sup\{ r>0:~{\exp_{p}}{\vert_{B_{r}(0_{p})}} \mbox{ is a diffeomorphism} \},
$$
where $B_{r}(0_{p}):=\lbrace v\in T_{p}{\cal M}:~\|v\| <r\rbrace$ and $0_{p}$ denotes the origin of $T_{p}{\cal M}$. Hence, for $0<\delta<r_{p}$ and $\exp_{p}(B_{\delta}(0_{ p})) = B_{\delta}({p})$, the map $\exp^{-1}_{p}\colon B_{\delta}({p}) \to B_{\delta}(0_{ p})$ is a diffeomorphism. Moreover, for all $p, q\in B_{\delta}({p})$, there exists a unique geodesic segment $\gamma$ joining $p$ to $q$, which is given by $\gamma_{p q}(t)=\exp_{p}(t \exp^{-1}_{p} {q})$, for all $t\in [0, 1]$. Furthermore, $d(q,p)\,=\|exp^{-1}_{p}q\|$ and the map $ B_{\delta}({p})\ni q\mapsto \frac{1}{2}d({q}, p)^2$ is $C^{\infty}$ and its gradient is given by \[ \grad \frac{1}{2}d({q}, p)^2=-\exp^{-1}_{p}{q}, \]
see, for example, \cite[Proposition 4.8, p.108]{Sakai1996}.
Next we state some elementary facts on (positive-)linear dependence/independence of gradient vector fields, whose proofs are straightforward.
Let $h=(h_1,\dots,h_s)\colon {\cal M} \to {\mathbb R}^s$ and $g=(g_1,\dots,g_m)\colon {\cal M} \to {\mathbb R}^m$ be continuously differentiable functions on ${\cal M}$. Let us denote \begin{equation} \label{eq:ap} A(q, {\cal I}, {\cal J}):=\{\grad h_i(q) :~i\in{\cal I} \}\cup\{\grad g_j(q):~j \in {\cal J}\}, \qquad q\in {\cal M}, \end{equation} where ${\cal I} \subset \{1, \ldots, s\}$, ${\cal J}\subset \{1, \ldots, m\}$ while $\{\grad h_i(q) :~i\in{\cal I} \}\cup\{\grad g_j(q):~j \in {\cal J}\}$ is a multiset, that is, repetition of the same element is allowed.
\begin{definition}\label{d:positive-linearly-dependent}
Let $V = \{v_1, \dots , v_s\}$ and $W = \{w_1, \dots , w_m\}$ be two finite multisets on $T_{p}{\cal M}$. The pair $(V,W)$ is said to be positive-linearly dependent if there exist $\alpha=(\alpha_1, \ldots, \alpha_s) \in \mathbb{R}^{s}$ and $\beta=(\beta_1, \ldots, \beta_m)\in \mathbb{R}^{m}_+$ such that $(\alpha, \beta)\neq 0$ and \begin{equation*} \sum_{i=1}^s\alpha_iv_i+ \sum_{j=1}^m\beta_jw_j=0. \end{equation*} Otherwise, $(V,W)$ is said to be positive-linearly independent. When clear from the context, we refer to $V\cup W$ instead of $(V,W)$. \end{definition}
\begin{lemma} \label{lemma:LD} Let $p\in {\cal M}$ and assume that $A(p,{\cal I}, {\cal J} )$ is (positive-)linearly independent. Then, there exists $\epsilon>0$ such that $A(q,{\cal I}, {\cal J}) $ is also (positive-)linearly independent for all $q \in B_{\epsilon}(p)$. \end{lemma}
\begin{lemma}\label{cr:eqcrcq} The following two conditions are equivalent: \begin{enumerate} \item[(i)] There exists $\epsilon >0$ such that for all ${\cal I}\subset \{1, \ldots, s\}$ and ${\cal J}\subset \{1, \ldots, m\}$, whenever the set $A(p, {\cal I}, {\cal J})$ is linearly dependent, $A(q, {\cal I}, {\cal J})$ is also linearly dependent for all $q\in B_{\epsilon}(p)$. \item[(ii)] There exists $\epsilon >0$ such that for all ${\cal I}\subset \{1, \ldots, s\}$ and ${\cal J}\subset \{1, \ldots, m\}$ the rank of $A(q, {\cal I}, {\cal J})$ is constant for any $q\in B_{\epsilon}(p)$. \end{enumerate} \end{lemma}
\begin{lemma}[Carath\'eodory's Lemma \cite{Andreani_Haeser_Schuverdt_Silva2012RCPLD}]\label{l:Caratheodory} Let $u_1, \ldots , u_s, v_1, \ldots , v_m$ be vectors in a finite-dimensional vector space $V$ such that $\{u_1,\dots,u_s\}$ is linearly independent. Suppose $x \in V$ is such that there are real scalars $ \alpha_1, \ldots , \alpha_s, \beta_1, \ldots , \beta_m$, with $\beta_j \neq 0 $ for $j=1, \ldots , m$ and \begin{equation*} x= \sum_{i=1}^s\alpha_i u_i + \sum_{j=1}^m \beta_j v_j. \end{equation*} Then, there exist a subset ${\cal J} \subset \{1, \ldots , m\}$, and real scalars $ \bar{\alpha}_i, i=1,\dots,s$, and $\bar{\beta}_j\neq0, j\in{\cal J}$ such that \begin{equation*} x= \sum_{i=1}^m\bar{\alpha}_i u_i + \sum_{j \in {\cal J}} \bar{\beta}_j v_j, \end{equation*} $\bar{\beta}_j\beta_j>0$ for all $j\in{\cal J}$, and $\{u_i:i \in \{1,\dots,s\}\}\cup \{v_j :j\in {\cal J}\}$ is linearly independent. \end{lemma}
We end this section by stating some standard notations in Euclidean spaces. The set of all $m \times n$ matrices with real entries is denoted by $\mathbb{R}^{m \times n}$ and $\mathbb{R}^m\equiv \mathbb{R}^{m\times 1}$. For $M \in \mathbb{R}^{m\times n} $ the matrix $ M^{\top} \in \mathbb{R}^{n\times m}$ is the {\it transpose} of $M$. For all $x, y \in \mathbb{R}^m$,
$\min\{x,y\}\in\mathbb{R}^m$ is the component-wise minimum of $x$ and $y$. We denote by $[y]_+$ the Euclidean projection of $y$ onto the non-negative orthant $\mathbb{R}^m_+$, while $\left\|y\right\|_2$ and $\left\| y \right\|_{\infty}$ denote its Euclidean and infinity norms, respectively.
\section{Preliminaries} \label{sec:prel} In this paper we are interested in the following Constrained Riemannian Optimization (CRO) problem \begin{equation}\label{PNL} \begin{array}{l} \displaystyle\Min_{q\in {\cal M}}f(q),\\ \mbox{subject~to~}h(q)=0, ~ g(q)\leq 0, \end{array} \end{equation} where ${\cal M}$ is an $n$-dimensional smooth and complete Riemannian manifold, the functions $f\colon {\cal M} \to {\mathbb R}$, $h=(h_1,\dots,h_s)\colon {\cal M} \to {\mathbb R}^s$ and $g=(g_1,\dots,g_m)\colon {\cal M} \to {\mathbb R}^m$ are continuously differentiable on ${\cal M}$. The feasible set $\Omega\subset{\cal M}$ of problem~\eqref{PNL} is defined by \begin{equation} \label{eq:constset} \Omega:=\{q\in {\cal M}:~h(q)=0, ~ g(q)\leq 0\}, \end{equation} which is closed. For a given point $p\in \Omega$, let ${\cal A}(p)$ be the set of indexes of active inequality constraints, that is, \begin{equation} \label{eq:actset} {\cal A}(q):=\left\{j\in \{1, \ldots, m\}:~g_j(q)= 0\right\}. \end{equation} We say that the Karush/Kuhn-Tucker (KKT) conditions are satisfied at $p\in\Omega$ when there exists so-called Lagrange multipliers $(\lambda, \mu) \in {\mathbb R}^s\times {\mathbb R}^m_{+}$ such that the following two conditions hold:
\begin{itemize} \item[(i)]$\grad L(p, \lambda, \mu)=0$, \item[(ii)]$\mu_j=0$, for all $j\notin {\cal A}(p)$, \end{itemize} where $L(\cdot, \lambda, \mu): {\cal M}\to \mathbb{R}$ is the {\it Lagrangian function} defined by \begin{equation*} L(q, \lambda, \mu):=f(q)+\sum_{i=1}^s\lambda_ih_i(q)+ \sum_{j=1}^m\mu_jg_j(q). \end{equation*} For $p\in\Omega$, the {\it linearized/linearization cone} ${\cal L} (p)$ is defined as \begin{equation*} {\cal L} (p) := \big\{ v \in T_p{\cal M}:~ \left\langle \grad h_i(p) , v\right\rangle =0,~i=1, \ldots s;~ \left\langle \grad g_j(p) , v \right\rangle \leq 0, ~ j \in {\cal A}(p) \big\}, \end{equation*} and its polar is given by \begin{equation}\label{eq:PolarConeLin} {\cal L} (p)^{\circ}= \Big\{v \in T_p{\cal M} :~ v= \sum_{i=1}^{s} \lambda_i \grad h_i (p) + \sum_{j \in {\cal A}(p)} \mu_j \grad g_j(p), ~\mu_j \geq 0, \lambda_i\in {\mathbb R}\Big\}. \end{equation} A constraint qualification (CQ) is a condition that refers to the analytic description of the feasible set and that guarantees that every local minimizer is also a KKT point. In \cite{BergmannHerzog2019} it was shown that when $p$ is a local minimizer of \eqref{PNL} that satisfies Guignard's CQ, that is, ${\cal L} (p)^{\circ}={\cal T} (p)^{\circ}$, where ${\cal T} (p)$ is the Bouligand tangent cone of $\Omega$ at $p$, then the KKT conditions are satisfied at $p$.
In~\cite{Udriste_1988} the convex inequality constrained problem is studied, under a Slater CQ, on a complete Riemannian manifold. In this case, the objective and inequality constraints are convex along geodesics and the feasible set is described by a finite collection of inequality constraints. In this context KKT conditions are formulated. In \cite{Yang_Zhang_Song2014} it was shown that when $p$ is a local solution of \eqref{PNL} and LICQ holds at $p$, that is, the set $ \{\grad h_i(p):~i=1, \ldots , s \}\cup \{\grad g_j(p):~ j\in {\cal A}(p)\} $ is linearly independent, then the KKT conditions are satisfied at $p$.
Without CQs, an approximate verison of the KKT conditions are known to be satisfied at local minimizers:
\begin{theorem} \label{def:AKKT} Let $p\in\Omega$ be a local minimizer of \eqref{PNL}. Then $p$ is an {\it Approximate-KKT} (AKKT) point, that is, there exist sequences $(p^k)_{k\in {\mathbb N}}\subset {\cal M}$, $(\lambda^k)_{k\in {\mathbb N}}\subset {\mathbb R}^s$ and $(\mu^k)_{k\in {\mathbb N}}\subset {\mathbb R}_+^m$ such that \begin{enumerate} \item[(i)] $\lim_{k\to \infty}p^k=p,$ \item[(ii)] $ \lim_{k\to \infty} \grad L(p^k, \lambda^k, \mu^k)=0,$ \item[(iii)] $ \mu^k_j=0$, for all $j\notin {\cal A}(p)$ and sufficiently large $k$. \end{enumerate} \end{theorem}
This result appeared in \cite{Yamakawa_Sato2022}, as an extension of the well known nonlinear programming version of this theorem \cite{AndreaniHaeserMatinez2011}. Any sequence $(p^k)_{k\in {\mathbb N}}\subset {\cal M}$ that satisfies $(i)$, $(ii)$ and $(iii)$ above is called a primal AKKT sequence for $p$ while the correspondent sequence $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ is its dual sequence. In the Euclidean setting, this notion has shown to be crucial in developing new constraint qualifications and expanding global convergence results of several algorithms in different contexts; for instance, nonlinear programming \cite{Andreani_Haeser_Schuverdt_Silva2012RCPLD,ccp}, Nash equilibrium problems \cite{gnep}, quasi-equilibrium problems \cite{qep}, multi-objective \cite{giorgi}, second-order cone programming \cite{seq-crcq-socp}, semidefinite programming \cite{AHV,seq-crcq-sdp,weaksparsecq}, Banach spaces \cite{Borgens}, equilibrium constraints \cite{mpecakkt}, cardinality constraints \cite{kanzowcard}, among several other applications and extensions. In \cite{Yamakawa_Sato2022} the following safeguarded augmented Lagrangian algorithm was defined and it was proved that its iterates are precisely AKKT sequences. In order to define it, we denote by $\mathcal{L}_\rho(\cdot,\lambda,\mu):{\cal M}\to\mathbb{R}$ the standard Powell-Hestenes-Rockafellar augmented Lagrangian function, defined by $$\mathcal{L}_\rho(q,\lambda,\mu):=f(q)+\frac{\rho}{2}\left(\left\|h(q)+\frac{\lambda}{\rho}\right\|^2+\left\|\left[g(q)+\frac{\mu}{\rho}\right]_+\right\|^2\right).$$
\begin{algorithm} {\bf Safeguarded augmented Lagrangian algorithm} \label{Alg:LAA} \begin{description}
\item[ Step 0.] Take $p^0 \in {\cal M}$, $ \tau \in \left[0 \, , \, 1\right) $, $\gamma >1$, $ \lambda_{\min} < \lambda_{\max}$, $\mu_{\max} >0$, and $\rho_1>0$. Take also $\bar{\lambda}^1 \in \left[\lambda_{\min}, \lambda_{\max}\right]^s$ and $\bar{\mu}^1 \in \left[0, \lambda_{\max}\right]^m$ initial Lagrange multipliers estimates, and $\left(\epsilon_k\right)_{k\in {\mathbb N}}\ \subset \mathbb{R}_{+}$ a sequence of tolerance parameters such that $\lim_{k\rightarrow \infty} \epsilon_k =0$. Set $k \leftarrow 1$.
\item[ Step 1.] (Solve the subproblem) Compute (if possible) $p^k$ such that \begin{equation}\label{step1 alg1}
\left\|\grad {\mathcal{L}}_{\rho_k} (p^k, \bar{\lambda}^k, \bar{\mu}^k) \right\| \leq \epsilon_k. \end{equation} If it is not possible, stop the execution of the algorithm, declaring failure.
\item[ Step 2.] (Estimate new multipliers) Compute \begin{equation}\label{step2 alg1} \lambda^{k}=\bar{\lambda}^k + \rho_kh(p^k), \qquad \qquad \mu^{k}=\left[ \bar{\mu}^k + \rho_k g(p^k)\right]_{+}. \end{equation}
\item[ Step 3.] (Update the penalty parameter) Define \begin{equation}\label{eq:step3_Vk} V^{k}=\frac{\mu^{k}-\bar{\mu}^k}{\rho_k}. \end{equation} If $k=1$ or \begin{equation}\label{eq: step3-2 alg}
\max\left\{\big\|h(p^{k})\big\|_2 \, , \, \big\|V^k\big\|_2\right\} \leq \tau \max\left\{\big\|h(p^{k-1}) \big\|_2 \, , \, \big\|V^{k-1}\big\|_2\right\}, \end{equation} choose $\rho_{k+1} = \rho_k$. Otherwise, define $\rho_{k+1}=\gamma \rho_k$.
\item[ Step 4.] (Update multipliers estimates) Compute $ \bar{\lambda}^{k+1} \in \left[ \lambda_{\min} \, , \, \lambda_{\max}\right]^m$ and $ \bar{\mu}^{k+1} \in \left[ 0 \, , \, \mu_{\max}\right]^p$.
\item[ Step 5.] (Begin a new iteration) Set $k \leftarrow k+1$ and go to {\bf Step 1}. \end{description} \end{algorithm}
In the algorithm, $(\lambda^k,\mu^k)_{k\in {\mathbb N}}$ is the dual sequence associated with $(x^k)_{k\in {\mathbb N}}$, which may be unbounded, while the safeguarded dual sequence $(\bar{\lambda}^k,\bar{\mu}^k)_{k\in {\mathbb N}}$ is bounded and used for defining the subproblems. A standard choice is considering $(\bar{\lambda}^{k+1},\bar{\mu}^{k+1})$ as the projection of $(\lambda^k,\mu^k)$ onto the corresponding box. It was shown in \cite{Yamakawa_Sato2022} that any limit point of a sequence $(p^k)_{k\in\mathbb{N}}$ generated by Algorithm~\ref{Alg:LAA} is such that it is stationary for the problem of minimizing an infeasibility measure, namely \begin{equation*}
\displaystyle \Min_{q\in {\cal M}} \frac{1}{2}\left\|h(q)\right\|_2^2 + \frac{1}{2}\left\|g(q)_{+}\right\|_2^2. \end{equation*} When the limit point is feasible, they showed that it is an AKKT point. The correspondent AKKT sequence is precisely the sequence $(p^k)_{k\in\mathbb{N}}$ of primal iterates, what can be attested by the dual sequences $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ generated by the algorithm.
Thus, in order to establish a standard global convergence result to Algorithm~\ref{Alg:LAA}, namely, by showing that its feasible limit points satisfy the KKT conditions, it is sufficient to consider any condition that guarantees that all AKKT points are in fact KKT points. Due to Theorem \ref{def:AKKT}, the said condition will necessarily be a CQ. Constraint qualifications with this additional propery are sometimes called {\it strict} CQs, and only the following ones have been stated in the Riemannian context:
\begin{definition}
Let $\Omega$ be given by \eqref{eq:constset}, $p\in \Omega$ and ${\cal A}(p)$ be given by~\eqref{eq:actset}.
The point $p$ is said to satisfy: \begin{enumerate}
\item[(i)] the linear independence constraint qualification (LICQ) if $$ \{\grad h_i(p):~i=1, \ldots , s \}\cup \{\grad g_j(p):~ j\in {\cal A}(p)\} $$ is linearly independent; \item[(ii)] the Mangasarian-Fromovitz constraint qualification (MFCQ) if $$ \{\grad h_i(p):~i=1, \ldots , s \}\cup\{\grad g_j(p):~ j\in {\cal A}(p)\} $$ is positive-linearly independent.\end{enumerate} \end{definition} The definition of LICQ was presented in \cite{Yang_Zhang_Song2014} while MFCQ was introduced in~\cite{BergmannHerzog2019}, where it was shown that LICQ implies MFCQ. In the next section we will introduce several new weaker CQs and we will prove that they can still be used for proving global convergence to a KKT point of algorithms that generate AKKT sequences such as Algorithm~\ref{Alg:LAA}.
\section{New strict constraint qualifications} \label{sec:nStricCQ}
We will say that a property ${\cal P}$ of the constraints defining the feasible set $\Omega$ of \eqref{PNL} at a given point $p\in\Omega$ is a {\it strict} CQ for the necessary optimality condition ${\cal S}$ if at a point $p\in\Omega$ that satisfies both ${\cal P}$ and ${\cal S}$, it is necessarily the case that $p$ satisfies the KKT conditions, according to the definition given in~\cite{Birgin_Martinez_2014_SCQ}. Thus, after we present and discuss our conditions, we shall prove that they are all strict CQs with respect to the sequential optimality condition AKKT from Theorem \ref{def:AKKT}. The first conditions we propose are the following: \begin{definition} \label{def:NewCQS} Let $\Omega$ be given by \eqref{eq:constset}, $p\in \Omega$ and ${\cal A}(p)$ be given by~\eqref{eq:actset}. The point $p$ is said to satisfy: \begin{enumerate} \item[(i)] the constant rank constraint qualification (CRCQ) if there exists $\epsilon>0$ such that for all ${\cal I}\subset \{1, \ldots, s\}$ and ${\cal J}\subset {\cal A}(p)$ the rank of $\{\grad h_i(q):~i\in {\cal I} \}\cup \{\grad g_j(q) : j\in {\cal J}\}$ is constant for all $q\in B_{\epsilon}(p)$;
\item[(ii)] the constant positive linear dependence condition (CPLD), if for any ${\cal I}\subset \{1, \ldots, s\}$ and ${\cal J}\subset {\cal A}(p)$, whenever the set $\{\grad h_i(p):~i\in {\cal I} \}\cup \{\grad g_j(p) : j\in {\cal J}\}$ is positive-linearly dependent, there exists $\epsilon >0$ such that $\{\grad h_i(q):~i\in {\cal I} \}\cup \{\grad g_j(q):~ j\in {\cal J}\}$ is linearly dependent, for all $q\in B_{\epsilon}(p)$; \item[(iii)] the Relaxed-CRCQ (RCRCQ) if there exists $\epsilon >0$ such that for all ${\cal J}\subset {\cal A}(p)$ the rank of $\{\grad h_i(q):~i=1, \ldots , s \}\cup \{\grad g_j(q):~ j\in {\cal J}\}$ is constant for all $q\in B_{\epsilon}(p)$; \item[(iv)] the Relaxed-CPLD (RCPLD) if there exists $\epsilon >0$ such that the following two conditions hold: \begin{enumerate}
\item[(a)] the rank of $\{\grad h_i(q):~i=1, \ldots , s\}$ is constant for all $q\in B_{\epsilon}(p)$;
\item[(b)] Let ${\cal K} \subset \{1, \ldots , s\}$ be such that $\{\grad h_i(p):~ i \in {\cal K} \}$ is a basis for the subspace generated by $\{\grad h_i(p):~ i=1, \ldots, s \}$. For all ${\cal J} \subset {\cal A}(p)$, if $\{\grad h_i(p):~i\in {\cal K}\}\cup \{\grad g_j(p):~ j\in {\cal J}\}$ is positive-linearly dependent, then $\{\grad h_i(q):~i\in {\cal K}\}\cup \{\grad g_j(q):~ j\in {\cal J}\}$ is linearly dependent, for all $q\in B_{\epsilon}(p)$. \end{enumerate} \end{enumerate} \end{definition} These conditions are natural versions in the Riemannian setting of the existing conditions in the Euclidean setting. The following diagram shows the relationship among the conditions introduced so far, where an arrow represents strict implication, which shall be proved later in this section.
\begin{figure}
\caption{Strict constraint qualifications for problem~\eqref{PNL}.}
\label{relations}
\end{figure}
The reason for introducing these CQs in the Riemannian setting is due to their several applications known in the Euclidean case, which we expect to be extended also to the Riemannian setting. Although we shall only prove results concerning the global convergence of the safeguardaded augmented Lagrangian method, let us briefly review some properties of these conditions in the Euclidean setting.
LICQ is equivalent to the uniqueness of the Lagrange multiplier for any objective function that assumes a constrained minimum at the point \cite{wachsmuth}. However, it is considered to be too stringent. For instance, it fails when the same constraint is repeated twice in the problem formulation. On the other hand, MFCQ and its many equivalent statements is the most prevalent CQ in the nonlinear programming literature, with several applications. In particular, it considers the correct sign of the Lagrange multiplier in its formulation, what can be though as it being a more adequate statement than LICQ. However, the simple trick of replacing an inequality constraint $h(x)=0$ by two inequalities $h(x)\leq0$ and $-h(x)\leq0$ is enough for ensuring that MFCQ does not hold. This is due to the fact that under this very natural formulation, the set of Lagrange multipliers (if non-empty) is necessarily unbounded, while MFCQ is equivalent to the boundedness of this set \cite{wachsmuth}. Notice also that the case of linear constraints is not automatically covered by any of these two assumptions, which generally requires a separate analysis when one is assuming MFCQ or LICQ.
Condition CRCQ, on the other hand, gives more freedom to someone modeling an optimization problem, given that it is not tricked by repetition of a constraint or by splitting an equality constraint in two inequalities. It also subsumes the linear case, disregarding a separate analysis. However, it does not consider the correct sign of Lagrange multipliers, being thus independent of MFCQ. The CPLD condition, on the other hand, corrects this issue, introducing the correct sign considering positive linear dependence instead of standard linear dependence (see the alternative definition of CRCQ as given by Lemma \ref{cr:eqcrcq}), being then strictly weaker than both MFCQ and CRCQ together. This condition has been used mainly for showing global convergence of algorithms, firstly for an SQP method, when it was introduced in \cite{Qi_Wei2000}, and it was popularized for being the basis for the global convergence theory of the popular ALGENCAN software \cite{Birgin_Martinez_2014_SCQ}. However, other applications have emerged such as in bilevel optimization \cite{bilevel1,bilevel2}, switching constraints \cite{switching}, exact penalty \cite{exactp}, among several others. On the other hand, CRCQ is more robust in terms of applications, since it has been introduced in order to compute the derivative of the value function \cite{Janin_Robert_1984}. It has also found applications in the characterization of tilt stable minimizers \cite{gfrerer}. More interestingly, while MFCQ is still able to provide a second-order necessary optimality condition, the condition depends on the whole set of Lagrange multipliers, which limits its practical use. CRCQ, on the other hand, provides a strong second-order necessary optimality condition depending on a single Lagrange multiplier \cite{Andreani_Haeser_Schuverdt_Silva2012RCPLD}, a result which was recently extended to the context of conic optimization \cite{CRCQfaces}. This difference with respect to MFCQ in terms of the second-order results can somehow be explained by the fact that under CRCQ the value of the quadratic form defined by the Hessian of the Lagrangian evaluated at a direction in the critical cone is invariant to the Lagrange multiplier used to define it \cite{gfrerer}.
The relaxed versions of CRCQ \cite{Minchenko_Stakhovski_2011} and CPLD \cite{Andreani_Haeser_Schuverdt_Silva2012RCPLD} can in fact be thought as the ``correct'' versions of these conditions, as they enjoy the very same properties previously described. In fact, there is no reason to consider all subsets of equality constraints, and this was already present in the very first results proved under CRCQ by Janin in \cite{Janin_Robert_1984}.
Let us now prove that the strict implications shown in Figure \ref{relations} hold for any Riemannian manifold ${\cal M}$ with dimension $n\geq2$. We do this by providing several examples that help illustrate the different conditions we propose, however we only describe in details the computations concerning the most relevant examples; the other ones being analogous. We start by showing in the next two examples that MFCQ and CRCQ are independent conditions, that is, in Example \ref{ex:CRNotMF}, CRCQ holds and MFCQ fails while in Example \ref{ex:MFNotCR}, MFCQ holds while CRCQ fails.
\begin{example} \label{ex:CRNotMF} Consider problem~\eqref{PNL} with feasible set $\Omega:=\{q\in {\cal M}:~ h(q)\leq0, -h(q)\leq0\}$, where $h: {\cal M} \to {\mathbb R}$ is continuously differentiable on ${\cal M}$ with $\grad h(p)\neq0$ and $p\in\Omega$. Thus MFCQ fails at $p$ while CRCQ holds. \end{example}
To proceed with the examples let us define some auxiliary functions. Let ${\cal M}$ be an $n$-dimensional Riemannian manifold and $p\in {\cal M}$. Take $0<{\bar \delta}<r_p$, where $r_p$ is the injectivity radius, such that $B_{{\bar \delta}}(p)$ is a strongly convex neighborhood, which exists by~\cite[Proposition~4.2]{doCarmo1992}. Let $u, v\in T_{p}{\cal M}$ with $\|u\|=\|v\|=1$, $\langle v, u\rangle =0$, and define the geodesics $\gamma_u(t):=\exp_p(tu)$ and $\gamma_v(t):=\exp_p(tv)$. Take also $0<\delta< {\bar \delta}$ and define $p_1:= \gamma_u(-\delta)$, $p_2:= \gamma_u(\delta)$, and $p_3= \gamma_v(\delta)$. Note that $p_1, p_2, p_3 \in B_{{\bar \delta}}(p)$ with $p_1 \neq p_2$, $p_1\neq p_3$, and $p_2\neq p_3$. Define the following auxiliary functions \begin{equation} \label{defphi} \varphi_i(q)= \frac{1}{2} d(q, p_i)^2-\frac{1}{2}d(p, p_i)^2, \qquad i=1,2,3. \end{equation}
\begin{example} \label{ex:MFNotCR}
Define the functions $g_1(q):=\varphi_1(q)$, $g_2(q):=-\varphi_2(q)$ and consider a feasible set $\Omega:=\{q\in {\cal M}:~ g(q)\leq 0\}$, where $g:= (g_1, g_2)$. One can see that CRCQ is not valid at $p\in\Omega$ while MFCQ holds. \end{example}
In~\cite{Qi_Wei2000}, it was proved that CPLD is strictly weaker than both MFCQ and CRCQ together in the Euclidean setting. The next example shows that the same thing happens for any smooth Riemannian manifold ${\cal M}$ with dimension $n\geq2$.
\begin{example} \label{ex:CPLDNotCRCQ_MFCQ} Let ${\cal M}$ be an $n$-dimensional Riemannian manifold with $n\geq 2$. Take $p\in {\cal M}$ and $g:=(g_1, \ldots , g_4)\colon{\cal M}\to {\mathbb R}^4$ continuously differentiable functions satisfying the following conditions \begin{enumerate} \item[(i)] $g(p)=0$; \item[(ii)] $\grad g_1(p)= \grad g_2(p)\neq0$ and $\grad g_3(p)= - \grad g_4(p)\neq0$; \item[(iii)] for all $\epsilon >0$, there exists $q \in B_{\epsilon} (p)$ such that $\{\grad g_1({q}), \grad g_2({q})\}$ is linearly independent with $q \neq p$; \item[(iv)] $\{\grad g_1(p), \grad g_3(p)\}$ is linearly independent. \item[(v)] there exists $\epsilon>0$ such that $\{\grad g_3(q), \grad g_4(q)\}$ is linearly dependent, for all $q \in B_{\epsilon} (p)$. \end{enumerate} Consider the feasible set $\Omega:=\{q\in {\cal M}:~ g(q)\leq 0\}$. Then, by $(i)$, $p \in \Omega$. It follows from the first equality in condition $(ii)$, that the set $\{\grad g_1(p), \grad g_2(p)\}$ is linearly dependent. Thus, $(iii)$ implies that $p$ does not satisfy CRCQ. Furthermore, the second equality in condition $(ii)$ guarantees that $p$ does not satisfy MFCQ. We will now show that $p$ satisfies CPLD. It is easy to see that the set $\{\grad g_j (p):~j \in {\cal J}\subset {\cal A}(p) \}$ is positive linearly dependent if, and only if $\{3,4\}\subset{\cal J}$. Therefore, by $(v)$ we concluded that $p$ satisfies CPLD. In the following we build two concrete examples satisfying conditions $(i),(ii), (iii), (iv)$, and $(v)$. The first one considers ${\cal M}$ as the $2$-sphere while the second one considers an arbitrary $2$-dimensional manifold ${\cal M}$, however it is easy to generalize the examples to an arbitrary dimension $n\geq2$. \begin{itemize} \item Consider the sphere ${\cal M}:=\{(x, y, z)\in {\mathbb R}^3:~x^2+y^2+z^2=1\}$ and take $p:=(0,0,1)$. The functions $g_1(x, y, z):=x$, $g_2(x, y, z):=x+y^2$, $g_3(x, y, z):=x+y$, $g_4(x, y, z):=-x-y$ satisfy conditions $(i)$, $(ii)$, $(iii)$, $(iv)$, and $(v)$. Indeed, for $q=(x,y,z)\in B_\epsilon(p)$ we have $\grad g_1(q)=\Pi_{T_q{\cal M}}(1,0,0)$, $\grad g_2(q)=\Pi_{T_q{\cal M}}(1,2y,0)$, $\grad g_3(q)=\Pi_{T_q{\cal M}}(1,1,0)$, and $\grad g_4(q)=\Pi_{T_q{\cal M}}(-1,-1,0)$, where $\Pi_{T_q{\cal M}}$ denotes the orthogonal projection onto $T_q{\cal M}$. Cleary, $(i)$ holds. To see that $(ii)$ and $(iv)$ hold it is enough to note that since $T_p{\cal M}=\{p\}^{\perp}$, at $q=p$ the projections coincide with the vectors being projected. We proceed to prove that $(iii)$ holds. Let $q:=(x,y,z)$ with $y\neq0$ and $z\neq0$, hence $u:=(1,0,0)$, $v:=(1,2y,0)$, and $q$ are linearly independent. Take $\alpha,\beta\in\mathbb{R}$ such that $\alpha\grad g_1(q)+\beta\grad g_2(q)=0$. Since $T_q{\cal M}=\{q\}^{\perp}$, this implies that $\alpha(u-r_uq)+\beta(v-r_vq)=0$ for some $r_u,r_v\in\mathbb{R}$, which in turn gives $\alpha u+\beta v+(-\alpha r_u-\beta r_v)q=0$, implying $\alpha=\beta=0$, hence, $(iii)$. We obtain $(v)$ by noting that $\grad g_3(q)=-\grad g_4(q)$ for all $q$.
\item Let ${\cal M}$ be a $2$-dimensional complete manifold. Let us show that the functions $g_1(q):=\varphi_1(q)$, $g_2(q):=-\varphi_2(q)$, $g_3(q):=\varphi_3(q)$, and $g_4(q):=-\varphi_3(q)$ satisfy conditions $(i)$, $(ii)$, $(iii)$, $(iv)$, and $(v)$, where these functions are defined in \eqref{defphi}. Indeed, $g(p)=0$, which gives $(i)$. Since $\grad g_1(p)=-\exp^{-1}_pp_1=\delta u\neq0$, $\grad g_2(p)=\exp^{-1}_pp_2=\delta u$, $\grad g_3(p)=-\exp^{-1}_pp_3=-\delta v\neq0$ and $\grad g_4(p)=\exp^{-1}_pp_3=\delta v$, then $g$ satisfies $(ii)$.
We proceed to show that $g_1$ and $g_2$ satisfy (iii). For that, take a point $q\in B_{{\bar \delta}}(p)$ with $q\neq p$ such that $d({q},p_2)=d({q},p_1) < d(p_2,p_1)$. Note that $\grad g_1({q})=-\exp^{-1}_{q}p_1$ and $\grad g_2({q})=\exp^{-1}_{q}p_2$. In addition, due to $d({q},p_2)=d({q},p_1)$, we have $\|\grad g_1({q})\|=\|\grad g_2({q})\|=d({q},p_1)$. Assume by contradiction that $\{\grad g_1({q}), \grad g_2({q})\}$ is linearly dependent. Since ${\cal M}$ is $2$-dimensional and $d({q},p_1) < d(p_2,p_1) $, we conclude that $\grad g_1({q})=\grad g_2({q})$. Consider the geodesic $$ \gamma(t)=\exp_{q}(-t\exp^{-1}_{q}p_1)= \exp_{q}(t\exp^{-1}_{q}p_2), $$ where the second equality holds because we are under the assumption $\grad g_1({q})=\grad g_2({q})$. Hence $\gamma(0)=q$, $\gamma(-1)=p_1$, and $\gamma(1)=p_2$. Considering that there exists a unique geodesic joining $p_1$ and $p_2$, we conclude that $\gamma_u=\gamma$. Thus, there exists ${\bar t}$ such that $\gamma({\bar t})=p$ and $$ \gamma'({\bar t})=P_{q\gamma({\bar t})}(-\exp^{-1}_{q}p_1). $$ We know that the parallel transport is an isometry and $\gamma'({\bar t})=\delta \gamma'_u(0)=-\exp^{-1}_pp_1$. Thus, using the last equality we conclude that $d(q, p_1)=d(p, p_1)$. Hence, considering that $q$, $p$ and $p_1$ belongs to the same geodesic, we have $q=p$, which is a contradiction. Therefore, $\{\grad g_1({q}), \grad g_2({q})\}$ is linearly independent for $q \neq p$.
Due to $\grad g_1(p)=-\delta u$, $\grad g_3(p)=\delta v$ and $\langle v, u\rangle =0$, condition $(iv)$ is satisfied. Finally, due to $g_4=-g_3$ we have $\grad g_4({q})=-\grad g_3({q})$ and $(v)$ is also satisfied.
The situation in consideration is depicted in Figure~\ref{figura CPLD NotCRCQ_MFCQ}. \end{itemize}
\end{example}
\begin{figure}
\caption{ The scenario at point $p$.}
\caption{Case for $q$ in the neighborhood of $p$.}
\caption{Illustrative figure for Example~\ref{ex:CPLDNotCRCQ_MFCQ} where CPLD holds while MFCQ and CRCQ fails. The rank of the gradients indexed by $\{1,2\}$ is not constant, but they are positive-linearly independent. In addition, the gradients in $\{3,4\}$ are positive-linearly dependent, which implies that MFCQ fails, but this remains to be the case in a neighborhood.}
\label{figura CPLD NotCRCQ_MFCQ}
\end{figure}
As proved in~\cite{Minchenko_Stakhovski_2011}, RCRCQ is strictly weaker than CRCQ in the Euclidean context. This fact is also true for any smooth Riemannian manifold ${\cal M}$ with dimension $n\geq2$. In fact, the following example shows that RCRCQ does not imply CPLD.
\begin{example} \label{ex:CPLDNotRCRCQ}
Define the functions $h(q):=-\varphi_3(q)$, $g_1(q):=-\varphi_1(q)$, $g_2(q):=-\varphi_2(q)$ from \eqref{defphi} and consider a feasible set $\Omega:=\{q\in {\cal M}:~ h(q)=0, g(q)\leq 0\}$, where $g:= (g_1, g_2)$. The point $p \in \Omega$ does not satisfy CPLD, but it satisfies RCRCQ. \end{example}
Finally, let us show that RCRCQ implies RCPLD. We do this by providing the following equivalent description of RCRCQ:
\begin{proposition}\label{P:RCRCQ}
Let ${\cal K} \subset \{1, \ldots , s \}$ be such that $\{\grad h_i(p):~ i \in {\cal K}\}$ is a basis for the subspace generated by $\{\grad h_i(p):~ i=1, \ldots, s \}$. RCRCQ holds at $p\in \Omega$ if and only if there exists $\epsilon >0$ such that the following two conditions hold: \begin{enumerate}
\item[(i)] the rank of $\{\grad h_i(q):~i=1, \ldots , s\}$ is constant for all $q\in B_{\epsilon}(p)$;
\item[(ii)] for all ${\cal J} \subset {\cal A}(p)$, if $\{\grad h_i(p):~i\in {\cal K}\}\cup \{\grad g_j(p):~ j\in {\cal J}\}$ is linearly dependent, then $\{\grad h_i(q):~i\in {\cal K}\}\cup \{\grad g_j(q):~ j\in {\cal J}\}$ is linearly dependent for all $q\in B_{\epsilon}(p)$. \end{enumerate} \end{proposition} \begin{proof}
Assume first that $p \in \Omega$ satisfies RCRCQ. Taking ${\cal J} = \emptyset$ in the definition of RCRCQ, we obtain $(i)$. In order to obtain $(ii)$, let ${\cal J} \subset {\cal A}(p)$ such that $\{\grad h_i(p):~i\in {\cal K}\}\cup \{\grad g_j(p):~ j\in {\cal J}\}$ is linearly dependent. Since $\{\grad h_i(p):~ i \in {\cal K}\}$ is a basis for the subspace generated by $\{\grad h_i(p):~ i=1, \ldots, s \}$ and $(i)$, we have that there exists $\epsilon>0$ such that $\{\grad h_i(q):~ i \in {\cal K}\}$ is a basis for the subspace generated by $\{\grad h_i(q):~ i=1, \ldots, s \}$ for all $q\in B_{\epsilon}(p)$. Thus, in accordance with RCRCQ, the rank of $\{\grad h_i(q):~i\in {\cal K}\}\cup \{\grad g_j(q):~ j\in {\cal J}\}$ is constant for all $q\in B_{\epsilon}(p)$. Consequently, $(ii)$ holds.
To prove the reciprocal assertion, let ${\cal J} \subset {\cal A}(p)$. It is worth noting that, owing to Lemma ~\ref{lemma:LD}, the rank of the set cannot decrease in a neighborhood. Let us choose ${\cal J}_1 \subset {\cal J}$ such that $A(p, {\cal K}, {\cal J}_1)$ is a basis for $A(p, {\cal K}, {\cal J})$ -- see the notation introduced in \eqref{eq:ap}. The case ${\cal J}_1 = {\cal J}$ follows trivially. Consider the situation ${\cal J}_1 \neq {\cal J}$ and let $j \in {\cal J} $ with $j \notin {\cal J}_1 $. As a result of $(ii)$, $A(q, {\cal K}, {\cal J}_1) \cup \{\grad g_j(q)\}$ must continue to be linearly dependent for $q$ in a neighborhood of $p$. Therefore, rank $A(q, {\cal K}, {\cal J}) = \left|{\cal K}\right| + \left|{\cal J}_1\right| $ for all $q\in B_{\epsilon}(p)$ and sufficiently small $\epsilon>0$. Considering the definition of ${\cal K}$ and $(i)$ we must have that $A(q, \{1,\dots,s\}, {\cal J})$ has constant rank for $q\in B_{\epsilon}(p)$, which completes the proof. \end{proof}
Clearly, the equivalent definition of RCRCQ given by Proposition~\ref{P:RCRCQ} is independent of the choice of the index set ${\cal K}$. It is easy to see that the definition of RCPLD is also independent of this choice. This concludes the analysis of strict implications depicted in Figure \ref{relations}, where in particular we have that RCPLD is strictly weaker than CPLD and RCRCQ.
At this point, condition RCPLD is the weakest one among the ones we have presented. Thus, we will prove that RCPLD is a strict CQ with respect to the AKKT condition, which will be true for all other conditions that imply it. Before doing this, let us present yet another CQ called {\it Constant Rank of the Subspace Component} (CRSC \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012}). Noticing that while RCRCQ improves upon CRCQ by noticing that there is no reason to consider every subset of equality constraints, CRSC improves upon RCRCQ by noticing that the same thing is true with respect to the inequality constraints. Namely, it is not the case that every subset of the active inequality constraints must be taken into account; only a particular fixed subset of the constraints maintaining the constant rank property is enough for guaranteeing the existance of Lagrange multipliers. The definition is as follows:
\begin{definition} \label{def:crsc} Let $\Omega$ be given by \eqref{eq:constset}, $p\in \Omega$, ${\cal A}(p)$ and ${\cal L} (p)^{\circ}$ be given by \eqref{eq:constset} and \eqref{eq:PolarConeLin}, respectively. Define the index set ${\cal J}_{-}(p) = \left\{ j \in {\cal A}(p):~-\grad g_j (p)\in {\cal L} (p)^{\circ} \right\}$. The point $p$ is said to satisfy CRSC if there exists $\epsilon >0$ such that the rank of $\{\grad h_i(q):~i=1, \ldots , s \}\cup \{\grad g_j(q):~ j\in {\cal J}_{-}(p)\}$ is constant for all $q\in B_{\epsilon}(p)$. \end{definition}
It is clear that CRSC is weaker than RCRCQ and MFCQ, but its relation with RCPLD is not simple to establish. Somewhat surprisingly, CRSC is strictly weaker than RCPLD in the Euclidean setting. The proof is somewhat elaborate \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012}, so we did not pursue it in the Riemannian setting, as it was not needed in our developments. Instead, we simply show that CRSC does not imply RCPLD in any Riemannian manifold ${\mathcal M}$ of dimension $n\geq2$. Thus, we shall prove the convergence of the augmented Lagrangian method under either of these conditions, even though we expect CRSC to be weaker than RCPLD. At this point, results under CRSC are at least independent of the ones where RCPLD are employed. We proceed with the example where CRSC holds and RCPLD fails:
\begin{example} \label{ex:CRSC_Not_RCPLD} Let ${\cal M}$ be an $n$-dimensional smooth Riemannian manifold with $n\geq 2$. Take $p\in {\cal M}$ and $g:=(g_1 , \ldots , g_{4})\colon{\cal M}\to {\mathbb R}^4$ continuously differentiable functions satisfying the following conditions: \begin{enumerate} \item[(i)] $g(p)=0$; \item[(ii)] $\grad g_1(p)= -\grad g_{2}(p)$ and $\grad g_3(p)= -\grad g_{4}(p)$; \item[(iii)] for all $\epsilon >0$, there exists $q \in B_{\epsilon} (p)$ such that the set $\{\grad g_1({q}), \grad g_2({q})\}$ is linearly independent with $q \neq p$; \item[(iv)] the set $\{\grad g_1(p), \grad g_3(p)\}$ is linearly independent; \item[(v)] there exists $\epsilon >0$ such that $ rank \{\grad g_j(q):~j=1, \ldots , 4\}=2$ for all $q \in B_{\epsilon} (p)$.
\end{enumerate} Consider a feasible set $\Omega:=\{q\in {\cal M}:~ g(q)\leq 0\}$ and $p \in \Omega$. It follows from condition $(ii)$, that the set $\{\grad g_1(p) , \grad g_2 (p)\}$ is positive linearly dependent. Hence, using conditions $(i)$ and $(iii)$, we conclude that RCPLD does not hold at $p$. In order to see that CRSC is valid at $p$, it is enough to note $(v)$ together with the fact that $(i)$ and $(ii)$ imply that ${\cal J}_{-}(p) = \left\{ 1, 2, 3, 4 \right\}$. Next, we will present two examples in which conditions $(i)$, $(ii)$, $(iii)$, $(iv)$, and $(v)$ are satisfied. \begin{itemize} \item Consider the sphere ${\cal M}:=\{(x, y, z)\in {\mathbb R}^3:~x^2+y^2+z^2=1\}$ and take $p:=(0,0,1)$, $g_1(x, y, z):=x-y^2$, $g_2(x, y, z):=-x$, $g_3(x, y, z):=y-x^2$ and $g_4(x, y, z):=-y$, where clearly $(i)$ holds. Similarly to Example \ref{ex:CPLDNotCRCQ_MFCQ}, Let $q:=(x,y,z)$ with $y\neq0$ and $z\neq0$, $u_1:=(1,-2y,0)$, $u_2:=(-1,0,0)$, $u_3:=(-2x,1,0)$, and $u_4:=(0,-1,0)$. Since $u_i\in\{p\}^{\perp}, i=1,2,3,4$, it is easy to see $(ii)$ and $(iv)$. In order to prove $(iii)$, notice that $\{q,u_1,u_2\}$ is linearly independent. Hence, since $\grad g_i(q)=\Pi_{\{q\}^{\perp}}(u_i)=u_i-r_{u_i}q, r_{u_i}\in\mathbb{R}$ for $i=1,2,3,4$, similarly to Example~\ref{ex:CPLDNotCRCQ_MFCQ} we have that $\{\grad g_1({q}), \grad g_2({q})\}$ is linearly independent. To see that $(v)$ holds, take $\epsilon>0$ such that $z\neq0$ for all $q=(x,y,z)\in B_\epsilon(p)$. Hence, $\{u_2, u_4, q\}$ is linearly independent, which implies that $\{\grad g_2({q}), \grad g_4({q})\}$ is linearly independent. From the fact that ${\cal M}$ is $2$-dimensional, $(v)$ holds.
\item Consider a $2$-dimensional complete manifold ${\mathcal M}$ and define the functions $g_1(q):=\varphi_1(q)$, $g_2(q):=\varphi_2(q)$, $g_3(q):=\varphi_3(q)$, and $g_4(q):=-\varphi_3(q)$ from \eqref{defphi}. Similarly to the computations in Example~\ref{ex:CPLDNotCRCQ_MFCQ}, one can prove that items $(i)$, $(ii)$, $(iii)$, and $(iv)$ are satisfied. By $(iv)$ and Lemma~\ref{lemma:LD}, we have that the rank of $\{\grad g_j(q):~j=1, \ldots , 4\}$ is at least $2$ for all $q \in B_{{\epsilon}}(p)$ and some $\epsilon>0$. Therefore, taking into account that ${\cal M}$ is $2$-dimensional, condition $(v)$ is also satisfied. Figure~\ref{fig:CRSC Not RCPLD} illustrates this example. \end{itemize}
\begin{figure}
\caption{ The scenario at point $p$.}
\caption{Case in the neighborhood of the $p$ }
\caption{Illustrative figure for Example~\ref{ex:CRSC_Not_RCPLD} where RCPLD fails but CRSC is satisfied. Although the subset of gradients indexed by $\{1,2\}$ loses positive linear dependence for $q$ near $p$, all gradients remain to span a $2$-dimensional vector space (the whole tangent space) for $q$ near $p$.}
\label{fig:CRSC Not RCPLD}
\end{figure}
\end{example}
In our view, CRSC is the most interesting one of all previously defined conditions. Although we do not pursue its extensions to the Riemannian setting, we mention a few of its properties known in the Euclidean case. First, it has an elegant mathematical description. Also, the index set ${\cal J}_{-}(p)$ can be viewed as the index set of active inequality constraints that are treated as equality constraints in the polar of the linearized cone ${\cal{L}}^\circ(p)$ \eqref{eq:PolarConeLin}. Actually, this interpretation holds also for the cone ${\cal{L}}(p)$, since ${\cal J}_{-}(p)$ can be equivalently stated as the set of indexes $j\in{\cal A}(p)$ such that $\left\langle \grad g_j(p) , v \right\rangle = 0$ for all $v\in{\cal{L}}(p)$ \cite{positivity}. However, surprisingly, these interpretations are not by chance, since it was proved \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012} that when $p\in\Omega$ satisfies CRSC, the constraints $g_j(q)\leq0, j\in{\cal J}_{-}(p)$ can only be satisfied as equalities for a feasible point $q$ in a small enough neighborhood around $p$. That is, one can safely replace the inequalities $g_j(q)\leq0, j\in{\cal J}_{-}(p)$ with equalities $g_j(q)=0, j\in{\cal J}_{-}(p)$ without locally altering the feasible set and in such a way that MFCQ holds. This result is connected to the one in \cite{shulu} which shows that whenever CRCQ is satisfied, there exists a local reformulation of the problem such that MFCQ holds. In fact, this procedure is well known in linear conic programming as {\it facial reduction}, that is, when the constraints are such that there is no interior point, there is an efficient procedure to replace the cone with one of its faces in such a way that a relative interior point exists. The extension of CRSC to the conic context and its connections with the facial reduction procedure are described in \cite{CRCQfaces2}, where they also show that CRSC may also provide the strong second-order necessary optimality condition depending on a single Lagrange multiplier by considering the constant rank property for all subsets of constraints that include ${\cal J}_{-}(p)$ and all equalities. Finally, we mention an additional property that holds in the Euclidean setting for all CQs discussed in this paper, that is, that they all imply that an error bound can be computed. We state this as the following conjecture in the Riemannian setting:
{\bf Conjecture:} Let ${\cal M}$ be a complete Riemannian manifold with dimension $n\geq 2$. Let $p\in\Omega$ be such that CRSC or RCPLD is satisfied. Then, there exists $\epsilon>0$ and $\alpha>0$ such that $$\inf_{w\in\Omega}d(q,w)\leq\alpha\max\{0,g_1(q),\dots,g_m(q),|h_1(q)|,\dots,|h_s(q)|\}$$ for all $q\in B_\epsilon(p)$.
Finally, let us prove that all conditions proposed so far are constraint qualifications. We do this by showing that they are strict CQs with respect to the necessary optimality condition AKKT from Theorem~\ref{def:AKKT}, since this gives us the main result of global convergence of Algorithm~\ref{Alg:LAA}. We start by showing that CRSC is a strict CQ, and we note that when the condition was introduced in the Euclidean setting \cite{Andreani_Haeser_Schuverdt_Silva_CRSC_2012}, only an indirect proof of this fact was presented. Thus, a clear direct proof was not available in the literature even in the Euclidean setting.
\begin{theorem} Suppose that $p\in\Omega$ satisfies CRSC. If $p$ is an AKKT point, then $p$ is a KKT point. \end{theorem} \begin{proof} As in Definition~\ref{def:crsc}, let ${\cal J}_{-}(p) := \left\{ j \in {\cal A}(p):~-\grad g_j(p) \in {\cal L} (p)^{ \circ} \right\}$ and we denote ${\cal J}_+(p):={\cal A}(p)\backslash{\cal J}_-(p)$. Since $p$ is an AKKT point of problem~\eqref{PNL}, there exist sequences $(p^k)_{k\in {\mathbb N}}\subset {\cal M}$, $(\lambda^k)_{k\in {\mathbb N}}\subset {\mathbb R}^s$, and $(\mu^k)_{k\in {\mathbb N}}\subset {\mathbb R}_+^m$ with $\mu^k_j=0$, for all $j\notin {\cal A}(p)$ such that $\lim_{k\to \infty}p^k=p$ and \begin{equation}\label{neweq:akktcrsc2} \grad f(p^k)+\sum_{i=1}^s\lambda_i^k\grad h_i(p^k)+ \sum_{\ell \in {\cal J}_{-}(p)} \mu_\ell^k \grad g_\ell(p^k)+\sum_{j \in {\cal J}_{+}(p)} \mu_j^k \grad g_j(p^k)=:\epsilon_k, \quad \forall k\in {\mathbb N}, \end{equation} where $\lim_{k\to \infty} \epsilon_k=0$. Let ${\cal I}\subset\{1,\dots,s\}$ and ${\cal J}\subset{\cal J}_{-}(p)$ be such that $A(p,{\cal I},{\cal J})$ is a basis for the subspace generated by $A(p,\{1,\dots,s\},{\cal J}_-(p))$ (here we use the notation introduced in \eqref{eq:ap}). Thus, \eqref{neweq:akktcrsc2} can be rewritten as \begin{equation}\label{aux000} \grad f(p^k)+\sum_{i\in{\cal I}}\tilde{\lambda}_i^k\grad h_i(p^k)+ \sum_{\ell \in {\cal J}} \tilde{\mu}_\ell^k \grad g_\ell(p^k)+\sum_{j \in {\cal J}_{+}(p)} \mu_j^k \grad g_j(p^k)=\epsilon_k, \quad \forall k\in {\mathbb N}, \end{equation} for suitable $\tilde{\lambda}_i^k\in\mathbb{R}, i\in{\cal I}$ and $\tilde{\mu}_{\ell}^k\in\mathbb{R}, \ell\in{\cal J}$. If all the sequences $(\tilde{\lambda}_i^k)_{k\in\mathbb{N}}, i\in{\cal I}$, $(\tilde{\mu}_{\ell}^k)_{k\in\mathbb{N}}, \ell\in{\cal J}$, and $(\mu_j^k)_{k\in\mathbb{N}}, j\in{\cal J}_+$ are bounded, we may take a suitable convergent subsequence $(\tilde{\lambda}_i^k)_{k\in K_1}\to\tilde{\lambda}_i\in\mathbb{R}, i\in{\cal I}$, $(\tilde{\mu}_{\ell}^k)_{k\in K_1}\to\tilde{\mu}_{\ell}\in\mathbb{R}, \ell\in{\cal J}$, and $(\mu_j^k)_{k\in K_1}\to\mu_{j}\in\mathbb{R}_+, j\in{\cal J}_+$ such that \begin{equation*} \grad f(p)+\sum_{i\in{\cal I}}\tilde{\lambda}_i\grad h_i(p)+ \sum_{\ell \in {\cal J}} \tilde{\mu}_\ell \grad g_\ell(p)+\sum_{j \in {\cal J}_{+}(p)} \mu_j \grad g_j(p)=0. \end{equation*}
Let us see that this implies that $p$ is a KKT point. First, note that ${\cal J}\subset{\cal J}_{-}(p)$ with ${\cal J}_{-}(p)\cup{\cal J}_{+}(p)=A(p)$. If some $\tilde{\mu}_{\ell_0}<0$, $\ell_0\in{\cal J}$, by the definition of the set ${\cal J}_{-}(p)$, we have $\tilde{\mu}_{\ell_0}\nabla g_{\ell_0}\in{\mathcal L}^{\circ}(p)$. But from \eqref{eq:PolarConeLin}, one can see that ${\mathcal L}^{\circ}(p)$ is closed under addition, which implies that $-\grad f(p)\in{\mathcal L}^{\circ}(p)$, that is, $p$ is a KKT point. Otherwise, if it is not the case that all sequences are bounded, let us take a subsequence $K_2\subset\mathbb{N}$ such that $\lim_{k\in K_2}M_k=+\infty$, where $M_k=\max\{|\tilde{\lambda}^k_i|, i\in{\cal I}; |\tilde{\mu}^k_{\ell}|, \ell\in{\cal J}; \mu^k_j, j\in{\cal J}_+(p)\}$. Dividing \eqref{aux000} by $M_k$ and taking the limit on a suitable subsequence $K_3\subset K_2$ such that $\lim_{k\in K_3}\frac{\tilde{\lambda}^k_i}{M_k}=\alpha_i\in\mathbb{R}$, $\lim_{k\in K_3}\frac{\tilde{\mu}^k_{\ell}}{M_k}=\beta_\ell\in\mathbb{R}$, and $\lim_{k\in K_3}\frac{\mu^k_j}{M_k}=\gamma_j\geq0$ with not all $\alpha_i, \beta_\ell, \gamma_j$ equal to zero, we arrive at $$ \sum_{i\in{\cal I}}\alpha_i\grad h_i(p)+ \sum_{\ell \in {\cal J}} \beta_\ell \grad g_\ell(p)+\sum_{j \in {\cal J}_{+}(p)} \gamma_j \grad g_j(p)=0. $$ However, by the definition of ${\cal J}_+(p)$, we must have $\gamma_j=0$ for all $j\in{\cal J}_+(p)$, since otherwise, by replacing the scalars when some $\beta_{\ell}<0$ (as previously done) we would have $-\nabla g_j(p)\in{\cal L}(p)^{\circ}$. Hence $A(p,{\cal I},{\cal J})$ is linearly dependent. This contradicts the definition of the index sets ${\cal I}$ and ${\cal J}$.
\end{proof}
Similarly, we show that RCPLD is a strict CQ.
\begin{theorem} Suppose that $p\in\Omega$ satisfies RCPLD. If $p$ is an AKKT point, then $p$ is a KKT point. \end{theorem} \begin{proof} The proof is similar to the previous one, however without partitioning $A(p)$. That is, consider the previous proof with ${\cal J}_{-}(p)$ replaced by $\emptyset$ and ${\cal J}_{+}(p)$ replaced by ${\cal A}(p)$. We arrive similarly to \eqref{aux000} to a sequence $$ \grad f(p^k)+\sum_{i\in{\cal I}}\tilde{\lambda}_i^k\grad h_i(p^k)+\sum_{j \in {\cal A}(p)} \mu_j^k \grad g_j(p^k)=\epsilon_k, \quad \forall k\in {\mathbb N}, $$ with $\tilde{\lambda}_i^k\in\mathbb{R}, i\in{\cal I}$, $\mu_j^k\geq0, j\in{\cal A}(p)$, $\lim_{k\in\mathbb{N}}p^k=p$, $\lim_{k\in\mathbb{N}}\epsilon_k=0$, and $\{\grad h_i(p): i\in{\cal I}\}$ linearly independent.
For every $k\in\mathbb{N}$, we apply Lemma \ref{l:Caratheodory} to arrive at \begin{equation}\label{aux001} \grad f(p^k)+\sum_{i\in{\cal I}}\bar{\lambda}_i^k\grad h_i(p^k)+\sum_{j \in {\cal J}_k} \bar{\mu}_j^k \grad g_j(p^k)=\epsilon_k, \end{equation}
for some $\bar{\lambda}_i^k\in\mathbb{R}, i\in{\cal I}$, $\bar{\mu}_j^k\geq0, j\in{\cal J}_k\subset{\cal A}(p)$, and all $k\in {\mathbb N}$, where $A(p^k,{\cal I},{\cal J}_k)$ is linearly independent. Let us take a subsequence such that ${\cal J}_k$ is constant, say, ${\cal J}_k\equiv{\cal J}$ for all $k\in K_1\subset\mathbb{N}$. The proof now follows similarly to the previous one considering $M_k:=\max\{|\bar{\lambda}_i^k|, i\in{\cal I};\bar{\mu}_j^k, j\in{\cal J}\}$. If $(M_k)_{k\in K_1}$ is bounded, one may take the limit in \eqref{aux001} for a suitable subsequence to see that $p$ is a KKT point. Otherwise, dividing \eqref{aux001} by $M_k$ we see that $A(p,{\cal I},{\cal J})$ is positive-linearly dependent, which contradicts the definition of RCPLD.
\end{proof}
We formalize our results in the following:
\begin{corollary} \label{cor} Let $p$ be a feasible limit point of a sequence $(p^k)_{k\in\mathbb{N}}$ generated by Algorithm~\ref{Alg:LAA} such that $p$ satisfies RCPLD or CRSC. Then $p$ satisfies the KKT conditions. \end{corollary}
Notice that differently from the result under MFCQ, where the dual AKKT sequence $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ is necessarily bounded, and thus dual convergence to a Lagrange multiplier is obtained, our result does not include convergence of the dual sequence. In the next section we prove that this can be obtained under a condition weaker than CPLD and independent of RCPLD known as quasinormality \cite{hestenes}. In order to do this, we shall extend a stronger sequential optimality condition to the Riemannian setting known as Positive-AKKT condition (PAKKT \cite{Andreani_Fazzio_Schuverdt_Secchin2019}).
\section{A stronger sequential optimality condition} \label{eq:PAKKT}
The quasinormality constraint qualification (QN) was introduced in \cite{hestenes} and it has been popularized in the book \cite{Bertsekas2016} in connection with convergence of the external penalty method. Recently, it has been connected with the notion of so-called Enhanced KKT conditions, guaranteeing boundedness of the corresponding set of Enhanced Lagrange multipliers \cite{enhanced}. QN is a fairly weak CQ, been known to be strictly weaker than CPLD \cite{Andreani_Martinez_Schuverdt2005} while still implying the Error Bound property \cite{eb2} in the Euclidean setting. In this section we will extend an important algorithmic property of QN that goes beyond what we have proved for RCPLD and CRSC. That is, besides QN being a strict CQ with respect to the AKKT condition, namely, global convergence of Algorithm~\ref{Alg:LAA} in the sense of Corollary \ref{cor} is also valid under QN, we will show that the dual sequence generated by Algorithm~\ref{Alg:LAA} under QN is in fact bounded. In order to do this, we will show that QN is a strict CQ with respect to a stronger sequential optimality condition known as Positive-AKKT (PAKKT) condition \cite{Andreani_Fazzio_Schuverdt_Secchin2019}.
We start by introducing the PAKKT condition in the Riemannian setting, showing that it is indeed a genuine necessary optimality condition for problem~\eqref{PNL}. Our definition considers a modification of the original one as suggested in \cite{Andreani_Haeser_Schuverdt_Secchin_Silva2022}.
\begin{definition}\label{def:pAKKT} The Positive-Approximate-KKT (PAKKT) condition for problem~\eqref{PNL} is satisfied at a point $p\in \Omega$ if there exist sequences $(p^k)_{k\in {\mathbb N}}\subset {\cal M}$, $(\lambda^k)_{k\in {\mathbb N}}\subset {\mathbb R}^s$ and $(\mu^k)_{k\in {\mathbb N}}\subset {\mathbb R}_+^m$ such that \begin{enumerate}
\item[(i)] $\lim_{k\to \infty}\ p^k = p$;
\item[(ii)] $\lim_{k\to \infty} \left\| \grad L(p^k, \lambda^k, \mu^k)\right\| = 0$;
\item[(iii)] $\mu_j^k=0$ for all $j\not\in{\cal A}(p)$ and sufficiently large $k$;
\item[(iv)] If $\gamma_k := \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}\to+\infty$ it holds: \begin{equation}\label{df.PAKKTcontroleSinalRestIguald}
\lim_{k\to \infty} \frac{\left|\lambda_i^k\right|}{\gamma_k} > 0 \quad \Longrightarrow \quad \lambda_i^kh_i(p^k) >0, \, \forall k\in {\mathbb N}; \end{equation} \begin{equation}\label{df.PAKKTcontroleSinalRestDesiguald}
\lim_{k\to \infty} \frac{\mu_j^k}{\gamma_k} > 0 \quad \Longrightarrow \quad \mu_j^kg_j(p^k) >0, \, \forall k\in {\mathbb N}. \end{equation} \end{enumerate} \end{definition} A point $p$ satisfying Definition~\ref{def:pAKKT} is called a PAKKT point; the correspondent sequence $(p^k)_{k\in\mathbb{N}}$ is its associated primal sequence while $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ is its associated dual sequence.
In order to present our results, we will make use of the following lemmas extended to the Riemannian setting in \cite{Yamakawa_Sato2022}:
\begin{lemma} \label{le:leAux} Let $p$ be a local minimizer of problem~\eqref{PNL} and $\alpha>0$. Then, for each $k\in {\mathbb N}$ and ${\rho_k}>0$, the following problem \begin{equation*} \begin{array}{l}
\displaystyle\Min_{q\in {\cal M}} f(q) + \frac{1}{2} d(q,p)^2 + \frac{\rho_k}{2} \left(\left\|h(q)\right\|_2^2 + \left\|g(q)_{+}\right\|_2^2\right),\\ \mbox{subject~to~} d(q,p)\leq \alpha, \end{array} \end{equation*} admits a solution $p^k$. Moreover, if $\lim_{k\to \infty}\ {\rho_k}=+\infty$ then $\lim_{k\to \infty}\ p^k = p$. \end{lemma}
\begin{lemma} \label{le:leAuxs} Let $\phi\colon {\cal M} \to {\mathbb R}$ be a differentiable function, $\alpha>0$ and $p_0\in {\cal M}$. Suppose that $p\in {\cal M} $ is an optimal solution of the following optimization problem: \begin{equation*} \begin{array}{l} \displaystyle\Min_{q\in {\cal M}} \phi (q), \\ \mbox{subject~to~} d(q,p_0)\leq \alpha. \end{array} \end{equation*} If $d(p,p_0)< \alpha$, then $\grad \phi (p) =0$. \end{lemma} We now show that PAKKT is a genuine necessary optimality condition for problem~\eqref{PNL}.
\begin{theorem}\label{T:PAKKTCondNec} Let $p\in\Omega$ be a local minimizer of \eqref{PNL}. Then, $p$ is a PAKKT point. \end{theorem} \begin{proof} Let $p$ be a local minimizer of problem~\eqref{PNL}. Thus, there is a sufficiently small parameter $\alpha >0$ such that the problem \begin{equation*} \begin{array}{l} \displaystyle\Min_{q\in {\cal M}}f(q)+ \frac{1}{2} d(q,p)^2,\\ \mbox{subject~to~}h(q)=0, ~ g(q)\leq 0,~ d(q,p)\leq \alpha, \end{array} \end{equation*} has $p$ as the unique global minimizer. For each $k\in {\mathbb N}$, take ${\rho_k}>0$ such that $\lim_{k\to \infty}\ {\rho_k}=+\infty$. Consider the penalized problem \begin{equation}\label{PNLaux2} \begin{array}{l}
\displaystyle\Min_{q\in {\cal M}} f(q) + \frac{1}{2} d(q,p)^2 + \frac{\rho_k}{2} \left(\left\|h(q)\right\|_2^2 + \left\|g(q)_{+}\right\|_2^2\right),\\ \mbox{subject~to~} d(q,p)\leq \alpha. \end{array} \end{equation}
It follows from Lemma~\ref{le:leAux} that there exists a sequence $(p^k)_{k\in {\mathbb N}}$ such that $p^k$ is a solution of \eqref{PNLaux2} and $\lim_{k \rightarrow \infty} p^k = p$. Thus, item~$(i)$ of Definition~\ref{def:pAKKT} is satisfied. Moreover, there exists an infinite index set ${K}_1$ such that $d(p^k,p)< \alpha$, for all $k\in {K}_1$. Consequently, using Lemma~\ref{le:leAuxs}, we conclude that \begin{equation*} \grad f(p^k) -\exp^{-1}_{p^k}{p}+\sum_{i=1}^s\rho_k h_i(p^k)\grad h_i(p^k)+ \sum_{j=1}^m\rho_k \max\{ 0, g_j(p^k)\}\grad g_j(p^k) = 0, \end{equation*} for all $k\in {K}_1$. Therefore, we have \begin{align*} \lim_{k \in {K}_1} \grad L(p^k, \lambda^k , \mu^k) &= \lim_{k \in {K}_1} \Big(\grad f(p^k) +\sum_{i=1}^s\lambda_i^k\grad h_i(p^k)+ \sum_{j=1}^m\mu_j^k\grad g_j(p^k)\Big)\\
&= \lim_{k \in {K}_1} -\exp^{-1}_{p^k}{p} = 0, \end{align*}
where for each $k \in {K}_1$, we denote $\lambda^k: = \rho_k h(p^k)$ and $\mu^k := \rho_k [g(p^k)]_+ \geq 0$. Therefore, $(ii)$ and $(iii)$ of Definition~\ref{def:pAKKT} are satisfied. We will now analyze the validity of~\eqref{df.PAKKTcontroleSinalRestIguald} and~\eqref{df.PAKKTcontroleSinalRestDesiguald}. Let $\gamma_k := \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}$ for all $k\in {K}_1$ be such that $\lim_{k\in K_1}M_k=+\infty$, and assume that $\lim_{k\in {K}_1} ({\left|\lambda_i^k\right|}/{\gamma_k}) > 0$. Thus, ${\left|\lambda_i^k\right|}/{\gamma_k} > 0$ for sufficiently large $k\in {K}_1$, which implies that $h(p^k)\neq0$. Hence, $\lambda_i^kh_i(p^k)=\rho_kh_i(p^k)^2>0$ for all sufficiently large $k\in K_1$. Similarly, if $\lim_{k\in {K}_1} ({\mu_j^k}/{\gamma_k}) > 0$, we have $\mu_j^k>0$, which implies $g_j(p^k)>0$ for sufficiently large $k\in K_1$. Therefore, \eqref{df.PAKKTcontroleSinalRestIguald} and \eqref{df.PAKKTcontroleSinalRestDesiguald} are fulfilled. Consequently, $p$ satisfies Definition~\ref{def:pAKKT}, which concludes the proof. \end{proof}
Let us now introduce QN in the Riemannian context. We will show that under QN the dual sequence $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ associated with any PAKKT sequence $(p^k)_{k\in\mathbb{N}}$ is bounded. Later, we will show that Algorithm~\ref{Alg:LAA} generates PAKKT sequences, which will provide the main algorithmic relevance of QN. \begin{definition}\label{def:QN} Let $\Omega$ be given by \eqref{eq:constset}, $p\in \Omega$ and ${\cal A}(p)$ be given by~\eqref{eq:actset}. The point $p$ satisfies the quasinormality constraint qualification (QN) if there are no $\lambda \in {\mathbb R}^s$ and $\mu \in {\mathbb R}_{+}^{m}$ such that \begin{enumerate}
\item[(i)] $\sum_{i=1}^{s} \lambda_i \grad h_i (p) + \sum_{j \in {\cal A}(p)} \mu_j \grad g_j(p)=0$;
\item [(ii)] $\mu_j=0$ for all $j \notin {\cal A}(p)$ and $(\lambda, \mu) \neq 0$;
\item [(iii)] for all $\epsilon >0$ there exists $q \in B_{\epsilon}(p)$ such that $\lambda_i h_i(q)>0$ for all $i \in\left\{1, \ldots , s \right\}$ with $\lambda_i \neq 0$ and $\mu_j g_j(q)>0$ for all $j \in {\cal A}(p)$ with $\mu_j>0$. \end{enumerate} \end{definition}
In the next example we show that QN holds, but both RCPLD and CRSC fail. \begin{example}\label{ex:QN_Not_RCPLD} Define the functions $h_1(q):=\varphi_1(q)e^{\varphi_2(q)}$ and $h_2(q):=\varphi_1(q)$ as defined in \eqref{defphi}. Note that \begin{equation} \label{eq:igqn}
\grad h_1(q):=e^{\varphi_2(q)} \grad \varphi_1(q)+ \varphi_1(q)e^{\varphi_2(q)} \grad \varphi_2(q), \qquad \grad h_2(q):=\grad \varphi_1(q). \end{equation} The point $p \in \Omega$ satisfies QN. Indeed, first we note that $\varphi_1(p)=0$ and $\varphi_2(p)=0$. Moreover, we have $\grad h_1(p)=\grad h_2(p)=\grad \varphi_1(p)$. Consider the linear combination $\lambda_1 \grad h_1(p)+ \lambda_2\grad h_2(p)=0$ with $\lambda_1$ and $\lambda_2 \in {\mathbb R}$. Thus, we have $ (\lambda_1 + \lambda_2)\grad \varphi_1(p)=0. $ Since $\grad \varphi_1(p)\neq 0$, we conclude that unless $\lambda_1=\lambda_2=0$, we must have $\lambda_1 \lambda_2<0$. In this case, take $\epsilon >0$ and $q \in B_{\epsilon}(p)$ such that $q\neq p$. Since $h_1(q) h_2(q)>0$, we conclude that $\lambda_1 h_1(q)$ and $\lambda_2 h_2(q)$ have opposite signs, which implies that $p$ satisfies QN.
Now, we are going to show that $p$ does not satisfy RCPLD nor CRSC. For that, we first note that rank of $\{\grad h_1(p), \grad h_2(p) \}$ is equal to one. On the other hand, similarly to the computations in Example~\ref{ex:CPLDNotCRCQ_MFCQ}, one can prove that, for all $\epsilon >0$, there exists $q \in B_{\epsilon} (p)$ such that $\{\grad \varphi_1({q}), \grad \varphi_2({q})\}$ is linearly independent with $q \neq p$. By the definition of $\varphi_1$, notice that $\varphi_1(q)\neq 0$ for all $q \in B_{\epsilon} (p)$ and sufficiently small $\varepsilon>0$; it follows from \eqref{eq:igqn} that $\{\grad h_1({q}), \grad h_2({q})\}$ is also linearly independent. Therefore, $p$ does not satisfy RCPLD nor CRSC. \end{example} \begin{theorem}\label{T:PAKKT_QN+Bounded} Let $p\in\Omega$ be a PAKKT point with associated primal sequence $(p^k)_{k\in {\mathbb N}}$ and dual sequence $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$. Assume that $p$ satisfies QN. Then $(\lambda^k,\mu^k)_{k\in {\mathbb N}}$ is a bounded sequence. In particular, $p$ satisfies the KKT conditions and any limit point of $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ is a Lagrange multiplier associated with $p$. \end{theorem} \begin{proof}
Let $p\in\Omega$ be a PAKKT point with primal sequence $(p^k)_{k\in\mathbb{N}}$ and dual sequence $(\lambda^k,\mu^k)_{k\in\mathbb{N}}$ and let us assume that the dual sequence is unbounded. Then, we will conclude that the point $p$ does not satisfy the quasinormality condition, i.e., we will prove the existence of $\lambda \in {\mathbb R}^s$ and $\mu \in {\mathbb R}_{+}^{m}$ such that items~$(i)$, $(ii)$ and $(iii)$ of Definition~\ref{def:QN} are satisfied. For that, set $\gamma_k = \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}$ as in Definition~\ref{def:pAKKT} and take an infinite subsequence indexed by $K_1$ such that $\lim_{k\in K_1}\gamma_k=+\infty$. To simplify the notations let us define the following auxiliary sequence \begin{equation*} U^k:=(1,\lambda^k,\mu^k) \in {\mathbb R} \times {\mathbb R}^s \times {\mathbb R}_+^m, \qquad \forall k\in {\mathbb N}, \end{equation*}
with $\lim_{k \in K_1} \|U^k\|_2 = \infty$. Take an infinite subset $K_2\subset K_1$ such that the sequence $(U^k/\|U^k\|_2)_{k\in {K}_2}$ converges to some $(0,\lambda,\mu)\in\mathbb{R}\times\mathbb{R}^s\times\mathbb{R}^m_+$, with $\|(0,\lambda,\mu)\|=1$.
Thus, considering that $(p^k)_{k\in {\mathbb N}}$ is a primal PAKKT sequence, we conclude that \begin{equation*}
\lim_{k \in {K}_2}\frac{ \grad L(p^k, \lambda^k , \mu^k)}{\gamma_k} = \lim_{k \in {K}_2} \Big(\frac{\grad f(p^k)}{\gamma_k} +\sum_{i=1}^s\frac{\lambda_i^k}{\gamma_k}\grad h_i(p^k)+ \sum_{j=1}^m\frac{\mu_j^k}{\gamma_k}\grad g_j(p^k)\Big)= 0. \end{equation*}
Hence, taking into account that $\mu_j=0$ for $j \notin {\cal A}(p)$, we obtain that item~$(i)$ of Definition~\ref{def:QN} is satisfied at $p$. In addition , since $(\lambda, \mu)\neq 0$, item~$(ii)$ of Definition~\ref{def:QN} is also satisfied at $p$.
From \eqref{df.PAKKTcontroleSinalRestIguald} and \eqref{df.PAKKTcontroleSinalRestDesiguald}, we have that $\lambda_i^kh_i(p^k)>0$ whenever $\lambda_i\neq0$, and $\mu_j^kg_j(p^k)>0$ whenever $\mu_j>0$, which gives precisely item~$(iii)$ of Definition~\ref{def:QN}. Therefore, QN fails.
\end{proof}
Finally, it remains to show that Algorithm~\ref{Alg:LAA} generates PAKKT sequences, which gives its global convergence result under QN.
\begin{theorem} Assume Algorithm~\ref{Alg:LAA} generates an infinite sequence $(p^k)_{k\in {\mathbb N}}$ with a feasible accumulation $p$, say, $\lim_{k\in K}p^k=p$. Then, $p$ is a PAKKT point with correspondent primal sequence $(p^k)_{k\in K}$ and dual sequence $(\lambda^k,\mu^k)_{k\in K}$ as generated by Algorithm~\ref{Alg:LAA}. In particular, $p$ is a KKT point and any limit point of $(\lambda^k,\mu^k)_{k\in K}$ is a Lagrange multiplier associated with $p$. \end{theorem}
\begin{proof} By {\bf Step 1} and {\bf Step 2} of the algorithm, we have $$\lim_{k\in K}\grad L(p^k,\lambda^k,\mu^k)=\lim_{k\in K}\grad{\cal L}_{\rho_k}(p^k,\bar{\lambda}^k,\bar{\mu}^k)=0,$$ with $\mu_j^k=0$ for sufficiently large $k\in K$ if $j\not\in{\cal A}(p)$. To see this, note that when $(\rho_k)_{k\in K}$ is unbounded, this follows from the definition of $\mu_j^k$, the boundedness of $(\bar{\mu}_j^k)_{k\in K}$, and the fact that $\rho_kg_j(x^k)\to-\infty$. When $(\rho_k)_{k\in K}$ is bounded, we must have from {\bf Step 3} that $V^k\to0$. In particular, $\max\left\{0,\frac{\bar{\mu}^k_j}{\rho_k}+g_j(p^k)\right\}-\frac{\bar{\mu}_j^k}{\rho_k}\to0$. Since $g_j(p^k)<0$ is bounded away from zero when $j\not\in{\cal A}(p)$, we must have $\mu_j^k=0$ for sufficiently large $k\in K$. Thus $(i)$, $(ii)$, and $(iii)$ of Definition~\ref{def:pAKKT} hold.
To prove $(iv)$ of Definition \ref{def:pAKKT}, let $\gamma_k := \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}$ and assume that $\lim_{k\in K}\gamma_k=+\infty$. Let $i\in\{1,\dots,s\}$ be such that $\lim_{k\in K}\frac{\lambda_i^k}{\gamma_k}=\lambda_i\neq0$ and $j\in{\cal A}(p)$ be such that $\lim_{k\in K}\frac{\mu_j^k}{\gamma_k}=\mu_j>0$. This implies that $\lambda_i^k=\bar{\lambda}_i^k+\rho_kh_i(p^k)$ is unbounded for $k\in K$, with $\lambda_i^k\lambda_i>0$. Since $(\bar{\lambda}_i^k)_{k\in K}$ is bounded, the only possibility is that $\rho_k\to+\infty$ and $\lambda_ih_i(p^k)>0$ for sufficiently large $k\in K$. Similarly, we have $\mu_jg_j(p^k)>0$ for sufficiently large $k\in K$, which completes the proof.
\end{proof}
We conclude by providing another property of QN in connection with Algorithm~\ref{Alg:LAA}. Instead of considering \eqref{step1 alg1} in {\bf Step 1} of Algorithm~\ref{Alg:LAA}, one may consider a more flexible criterion for solving the correspondent subproblem. That is, instead of requiring the iterate $p^k$ to satisfy $\left\| \grad {\mathcal L}_{\rho_k}(p^k, \bar{\lambda}^k, \bar{\mu}^k)\right\| \leq\epsilon_k$, one may require the looser criterion $\left\| \frac{\grad {\mathcal L}_{\rho_k}(p^k, \bar{\lambda}^k, \bar{\mu}^k)}{\gamma_k}\right\| \leq\epsilon_k$, where $\gamma_k := \left\|(1,{\lambda}^k,{\mu}^k)\right\|_{\infty}$ with $\lambda^k$ and $\mu^k$ given as in {\bf Step 2} of the algorithm. That is, one abdicates robustness of the solution of the subproblem in place of an easier computable iterate. For instance, this is the approach considered in the well known interior point method IPOPT \cite{ipopt}, even though it tends to generate unbounded dual sequences \cite{ye}. This modification gives rise to the so-called Scaled-PAKKT condition \cite{Andreani_Haeser_Schuverdt_Secchin_Silva2022} which we present in the Riemannian setting as follows:
\begin{definition}\label{def:Scaled_PAKKT} The Scaled-PAKKT condition for problem~\eqref{PNL} is satisfied at a point $p\in \Omega$ if there exist sequences $(p^k)_{k\in {\mathbb N}}\subset {\cal M}$, $(\lambda^k)_{k\in {\mathbb N}}\subset {\mathbb R}^s$ and $(\mu^k)_{k\in {\mathbb N}}\subset {\mathbb R}_+^m$ such that it holds: \begin{enumerate}
\item[(i)] $\lim_{k\to \infty}\ p^k = p$;
\item[(ii)] $\lim_{k\to \infty} \left\| \frac{\grad L(p^k, \lambda^k, \mu^k)}{\gamma_k}\right\| = 0$, where $\gamma_k := \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}$;
\item[(iii)] $\mu_j^k=0$ for all $j\not\in{\cal A}(p)$ and sufficiently large $k$;
\item[(iv)] If $\gamma_k\to+\infty$, then \begin{equation*}
\lim_{k\to \infty} \frac{\left|\lambda_i^k\right|}{\gamma_k} > 0 \quad \Longrightarrow \quad \lambda_i^kh_i(p^k) >0, \, \forall k\in {\mathbb N}; \end{equation*} \begin{equation*}
\lim_{k\to \infty} \frac{\mu_j^k}{\gamma_k} > 0 \quad \Longrightarrow \quad \mu_j^kg_j(p^k) >0, \, \forall k\in {\mathbb N}. \end{equation*} \end{enumerate} \end{definition}
It is easy to see that Algorithm~\ref{Alg:LAA} with the looser criterion in {\bf Step 1} as described previously generates Scaled-PAKKT sequences. Now, it is easy to see that QN is still sufficient for guaranteeing boundedness of the dual Scaled-PAKKT sequence, following the proof of Theorem \ref{T:PAKKT_QN+Bounded}; however, let us show that QN is somewhat the weakest condition with this property.
\begin{theorem} If for each continuously differentiable function $f\colon {\cal M} \rightarrow {\mathbb R}$ such that $p\in \Omega$ is a Scaled-PAKKT point, the KKT conditions also hold, then $p$ satisfies QN or ${\cal L} (p)^{\circ } = T_p{\cal M}$. \end{theorem} \begin{proof} Assume that the point $p$ does not satisfy the quasinormality condition and ${\cal L} (p)^{\circ} \neq T_p{\cal M}$. We will show that there exists a continuously differentiable function $f\colon {\cal M} \rightarrow {\mathbb R}$ such that $p$ is a Scaled-PAKKT point, but $p$ is not a KKT point. Since ${\cal L} (p)^{\circ} \neq T_p{\cal M}$, taking into account that ${\cal L} (p)^{\circ} \subset T_p{\cal M}$ and $0 \in {\cal L} (p)^{\circ}$, we concluded that there exists $v \in T_p{\cal M}$ with $v\neq 0$ such that $v \notin {\cal L} (p)^{\circ}$. Thus, by the definition of ${\cal L} (p)^{\circ}$ in \eqref{eq:PolarConeLin}, we have \begin{equation}\label{eq1:T:ScaledPAKKT} -v +\sum_{i=1}^{s} \bar{\lambda}_i \grad h_i (p) + \sum_{j \in {\cal A}(p)} \bar{\mu}_j \grad g_j(p) \neq 0, \quad \forall \bar{\mu}_j \geq 0, \, \forall \bar{\lambda}_i \in {\mathbb R}. \end{equation} To proceed we take $0<\delta<r_{p}$, the injectivity radius, and define $f\colon B_{\delta}({p}) \rightarrow {\mathbb R}$ by $f(q):=\langle -v, - \exp^{-1}_{p}{q}\rangle$, which is continuously differentiable and $\grad f(p)=-v$. Hence, \eqref{eq1:T:ScaledPAKKT} implies that $p$ is not a KKT point. It remains to show that $p$ is a Scaled-PAKKT point. Since $p$ does not satisfy QN, there exist $\lambda \in {\mathbb R}^s$ and $\mu \in {\mathbb R}_{+}^{m}$ that satisfy items~$(i)$, $(ii)$, and $(iii)$ of Definition~\ref{def:QN}. In particular, by item $(iii)$, let $(p^k)_{k \in {\mathbb N}} \subset {\cal M}$ be such that, $\lim_{k \rightarrow \infty} p^k = p$, with \begin{equation}\label{eq2:T:ScaledPAKKT} \lambda_i h_i(p^k)>0, \quad \forall i \in\left\{1, \ldots , s \right\}, \,\, \lambda_i \neq 0 \quad and \quad \mu_j g_j(p^k)>0, \quad \forall j \in {\cal A}(p), \,\, \mu_j>0. \end{equation} By the continuity of $\grad h$ and $\grad g$ and item~$(i)$ of Definiton~\ref{def:QN}, we have \begin{equation}\label{eq3:T:ScaledPAKKT} \lim_{k \rightarrow \infty} \Big(\sum_{i=1}^{s} \lambda_i \grad h_i (p^k) + \sum_{j \in {\cal A}(p)} \mu_j \grad g_j(p^k)\Big)=0. \end{equation} It follows from~\eqref{eq3:T:ScaledPAKKT} and from the continuity of $\grad f$, provided that $\lim_{k \rightarrow \infty} p^k = p$, that \begin{equation}\label{eq4:T:ScaledPAKKT} \lim_{k \rightarrow \infty} \frac{1}{k}\Big(\grad f (p^k) + \sum_{i=1}^{s} k\lambda_i \grad h_i (p^k) + \sum_{j \in {\cal A}(p)} k\mu_j \grad g_j(p^k)\Big)=0. \end{equation}
Taking into account that any positive multiple of $(\lambda, \mu)$ also satisfies the three items of Definition~\ref{def:QN}, we can suppose without loss of generality that $\left\|(\lambda, \mu)\right\|_\infty =1$. Thus, setting $\lambda^k:= k \lambda$, $\mu^k := k \mu$, and $\gamma_k := \left\|(1,\lambda^k,\mu^k)\right\|_{\infty}$ we have $\gamma_k = k$. Hence, \eqref{eq4:T:ScaledPAKKT} becomes \begin{equation}\label{eq5:T:ScaledPAKKT2} \lim_{k \rightarrow \infty} \frac{1}{\gamma_k}\Big(\grad f (p^k) + \sum_{i=1}^{s} \lambda^k_i \grad h_i (p^k) + \sum_{j \in {\cal A}(p)} \mu^k_j \grad g_j(p^k)\Big)=0. \end{equation}
We conclude that $p$ is a scaled PAKKT point and the proof is complete. \end{proof}
\section{Conclusions}
In this paper we presented a detailed global convergence analysis of a safeguarded augmented Lagrangian method defined on a complete Riemannian manifold. In order to do this, we presented several weak constraint qualifications that can be used to obtain stationarity of all limit points of a primal sequence generated by the algorithm, despite the fact that the dual sequence may be unbounded. In doing so, we described several properties of these conditions well known in the Euclidean setting, which should foster further developments in the Riemannian setting. By means of a stronger sequential optimality condition, we were able to present a weak constraint qualification which guarantees boundedness of the dual sequence, even when the true set of Lagrange multipliers is unbounded. In presenting our conditions, we provided illustrative examples to prove that our conditions are strictly weaker than previously known ones in {\it any} complete Riemannian manifold with dimension $n\geq2$.
Note that when defining the sequential optimality conditions, we chose to present the simplest complementarity measure, namely, item ii) of Theorem \ref{def:AKKT} and item iii) of Definition \ref{def:pAKKT}, while in \cite{Yamakawa_Sato2022}, they considered a slightly stronger complementarity measure known as Approximate Gradient Projection. See the recent discussion about several different complementarity measures in \cite{mor} in the context of Euclidean conic optimization. We foresee significant progress in this topic in the nearby future, in particular, several stronger first- and second-order global convergence results of augmented Lagrangian methods and other algorithms should be expected to be extended to the Riemannian setting.
Finally, in the particular case where the manifold ${\cal M}$ can be embedded in an Euclidean space, one can treat $x\in{\cal M}$ as a subproblem/lower level constraint as described in \cite{lowerlevel}. It is clear that one should exploit the Riemannian structure in order to solve the subproblems more efficiently, while it is also clear that an intrinsic formulation of the theory is well justified \cite{BergmannHerzog2019}; however, in this context, it is not clear whether the pure Euclidean theory differs from the one formulated in the Riemannian setting. This topic will be the subject of a forthcoming paper.
\end{document}
|
arXiv
|
{
"id": "2306.14345.tex",
"language_detection_score": 0.7750146985054016,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Segment Visibility Counting Queries in Polygons}
\begin{abstract} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points or line segments inside \(P\). We develop data structures that can efficiently count the number of objects from \(A\) that are visible to a query point or a query segment. Our main aim is to obtain fast, \(\bO(\polylog nm)\), query times, while using as little space as possible. In case the query is a single point, a simple visibility-polygon-based solution achieves \(\bO(\log nm)\) query time using \(\bO(nm^2)\) space. In case \(A\) also contains only points, we present a smaller, \(\bO(n + m^{2 + \eps}\log n)\)-space, data structure based on a hierarchical decomposition of the polygon. Building on these results, we tackle the case where the query is a line segment and \(A\) contains only points. The main complication here is that the segment may intersect multiple regions of the polygon decomposition, and that a point may see multiple such pieces. Despite these issues, we show how to achieve \(\bO(\log n\log nm)\) query time using only \(\bO(nm^{2 + \eps} + n^2)\) space. Finally, we show that we can even handle the case where the objects in \(A\) are segments with the same bounds. \end{abstract}
\section{Introduction}\label{sec:intro} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points or line segments inside \(P\). We develop efficient data structures for \emph{visibility counting queries} in which we wish to report the number of objects from \(A\) visible to some (constant-complexity) query object \(Q\). An object \(X\) in \(A\) is \emph{visible} from \(Q\) if there is a line segment connecting \(X\) and \(Q\) contained in \(P\). We are mostly interested in the case when \(Q\) is a point or a line segment. Our aim is to obtain fast, \(\bO(\polylog nm)\), query times, using as little space as possible. Our work is motivated by problems in movement analysis where we have sets of entities, for example, an animal species and their predators, moving in an environment, and we wish to determine if there is mutual visibility between the entities of different sets. We also want to quantify `how much' the sets can see each other. Assuming we have measurements at certain points in time, solving the mutual visibility problem between two such times reduces to counting visibility between line segments (for moving entities) and points (for static objects or entities).
\subparagraph*{Related work.} Computing visibility is a classical problem in computational geometry~\cite{ghosh07,orourke87}. Algorithms for efficiently testing visibility between a pair of points, for computing visibility polygons~\cite{elgindy81,joe87,lee83}, and for constructing visibility graphs~\cite{overmars88} have been a topic of study for over thirty years. There is even a host of work on computing visibility on terrains and in other three-dimensional environments~\cite{agarwal93rayshoot,berg94}. For many of these problems, the data structure version of the problem has also been considered. In these versions, the polygon is given up front, and the task is to store it so that we can efficiently query whether or not a pair of points \(p, q\) is mutually visible~\cite{chazelle89,guibas87,hershberger95}, or report the entire visibility polygon \(V(q)\) of \(q\)~\cite{aronov02}. In particular, when \(P\) is a simple polygon with \(n\) vertices, the former type of queries can be answered optimally---in \(\bO(\log n)\) time using linear space~\cite{hershberger95}. Answering the latter type of queries can be done in \(\bO(\log^2 n + \SetSize{V(q)})\) time using \(\bO(n^2)\) space~\cite{aronov02}. The visibility polygon itself has complexity \(\bO(n)\)~\cite{elgindy81}.
Computing the visibility polygon of a line segment has been considered, as well. When the polygon modelling the environment is simple, the visibility polygon, called a \emph{weak visibility polygon,} denoted \(V(\queryseg)\) for a line segment \(\queryseg\), still has linear complexity, and can be computed in \(\bO(n)\) time~\cite{guibas87}. Chen and Wang~\cite{chen15weak} consider the data structure version of the problem: they describe a linear-space data structure that can be queried in \(\bO(\SetSize{V(\queryseg)}\log n)\) time, and an \(\bO(n^3)\)-space data structure that can be queried in \(\bO(\log n + \SetSize{V(\queryseg)})\) time.
Computing the visibility polygon of a line segment \(\queryseg\) allows us to answer whether an entity moving along \(\queryseg\) can see a particular fixed point \(r\), i.e.\@ there is a time at which the moving entity can see \(r\) if and only if \(r\) lies inside \(V(\queryseg)\). If the point \(r\) may also move, it is not necessarily true that the entity can see \(r\) if the trajectory of \(r\) intersects \(V(\queryseg)\). Eades et al.~\cite{eades20} present data structures that can answer such queries efficiently. In particular, they present data structures of size \(\bO(n\log^5 n)\) that can answer such a query in time \(\bO(n^{\sfrac{3}{4}}\log^3 n)\). They present results even in case the polygon has holes. Aronov et al.~\cite{aronov02} show that we can also efficiently maintain the visibility polygon of an entity as it is moving.
Visibility counting queries have been studied before, as well. Bose et al.~\cite{bose02} studied the case where, for a simple polygon and a query point, the number of visible polygon edges is reported. The same problem has been considered for weak visibility from a query segment~\cite{bygi15}. For the case of a set of disjoint line segments and a query point, approximation algorithms exist~\cite{alipour15,gudmundsson10,suri86}. In contrast to these settings, we wish to count visible line segments with visibility obstructed by a simple polygon (other than the line segments). Closer to our setting is the problem of reporting all pairs of visible points within a simple polygon~\cite{ben-moshe04}.
\subparagraph*{Results and organisation.} Our goal is to efficiently count the number of objects, in particular, line segments or points, in a set \(A\) that are visible to a query object \(Q\). We denote this number by \(C(Q, A)\). Given \(P\), \(A\), and \(Q\), we can easily compute \(C(Q, A)\) in optimal \(\bO(n + m\log n)\) time (see \cref{lem:algorithm_oneshot}). We are mostly interested in the data structure version of the problem, in which we are given the polygon \(P\) and the set \(A\) in advance, and we wish to compute \(C(Q, A)\) efficiently once we are given the query object \(Q\). We show that we can indeed answer such queries efficiently, that is, in polylogarithmic time in \(n\) and \(m\). The exact query times and the space usage and preprocessing times depend on the type of the query object and the type of objects in \(A\). See \cref{tab:results} for an overview.
\newcolumntype{Y}[1]{>{\hsize=#1\hsize\linewidth=\hsize}X} \begin{table}[tb] \centering \caption{Results in this paper. \(\bullet\) and \(\slash\) denote points and line segments, respectively.} \begin{tabularx}{\linewidth}{c c Y{.75} Y{1.5} Y{.75} c} \toprule \multirow{2}{*}{\(A\)} & \multirow{2}{*}{\(Q\)} &
\multicolumn{3}{c}{Data structure} & \multirow{2}{*}{Section}\\ & & Space & Preprocessing & Query & \\ \midrule \(\bullet\) & \(\bullet\) & \(\bO(nm^2)\) & \(\bO(nm\log n + nm^2)\) &
\(\bO(\log nm)\) & \ref{sec:arrangement}\\ & & \(\bO(n + m^{2 + \eps} \log n)\) &
\(\bO(n + m \log^2 n + m^{2 + \eps} \log n)\) & \(\bO(\log n \log nm)\) &
\ref{sec:point_point}\\ \(\slash\) & \(\bullet\) & \(\bO(nm^2)\) & \(\bO(nm\log n + nm^2)\) &
\(\bO(\log nm)\) & \ref{sec:arrangement}\\ \(\bullet\) & \(\slash\) & \(\bO(n^2 + nm^{2 + \eps})\) &
\(\bO(n^2\log m + nm\log n + nm^{2 + \eps})\) & \(\bO(\log n\log nm)\) &
\ref{sec:point_segment}\\ \(\slash\) & \(\slash\) & \(\bO(n^2 + nm^{2 + \eps})\) &
\(\bO(n^2\log m + nm\log n + nm^{2 + \eps})\) & \(\bO(\log n\log nm)\) &
\ref{sec:segment_segment}\\ \bottomrule \end{tabularx} \label{tab:results} \end{table}
In \cref{sec:point}, we consider the case where the query object is a point. We show how to answer queries efficiently using the arrangement of all (weak) visibility polygons, one for each object in \(A\). As Bose et al.~\cite[Section~6.2]{bose02} argued, such an arrangement has complexity \(\Theta(nm^2)\) in the worst case.
\begin{restatable}{theorem}{theoremArrangement}\label{thm:arrangement} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points or line segments inside \(P\). In \(\bO(nm^2 + nm\log n)\) time, we can build a data structure of size \(\bO(nm^2)\) that can report the number of points or segments in \(A\) visible from a query point \(q\) in \(\bO(\log nm)\) time. \end{restatable}
We then show that if the objects in \(A\) are points, we can significantly decrease the required space. We argue that we do not need to construct the visibility polygons of all points in \(A\), thus avoiding an \(\bO(nm)\) term in the space and preprocessing time. We use a hierarchical decomposition of the polygon and the fact that the visibility of a point \(a \in A\) in a subpolygon into another subpolygon is described by a single constant-complexity cone. We then obtain the following result. Here and in the rest of the paper, \(\eps > 0\) is an arbitrarily small constant.
\begin{restatable}{theorem}{theoremPointPoint}\label{thm:point_point} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points inside \(P\). In \(\bO(n + m^{2 + \eps} \log n + m \log^2 n)\) time, we can build a data structure of size \(\bO(n + m^{2 + \eps} \log n)\) that can report the number of points from \(A\) visible from a query point \(q\) in \(\bO(\log n \log nm)\) time. \end{restatable}
In \cref{sec:point_segment}, we turn our attention to the case where the query object is a line segment \(\queryseg\) and the objects in \(A\) are points. One possible solution in this scenario would be to store the visibility polygons for the points in \(A\) so that we can count the number of such polygons stabbed by the query segment. However, since these visibility polygons have total complexity \(\bO(nm)\) and the query may have an arbitrary orientation, a solution achieving polylogarithmic query time will likely use at least \(\Omega(n^2m^2)\) space~\cite{agarwal93spacepart,agarwal96,gupta95}. So, we again use an approach that hierarchically decomposes the polygon to limit the space usage. Unfortunately, testing visibility between the points in \(A\) and the query segment is more complicated in this case. Moreover, the segment can intersect multiple regions of the decomposition, so we have to avoid double counting. All of this makes the problem significantly harder. We manage to overcome these difficulties using careful geometric arguments and an inclusion--exclusion-style counting scheme. This leads to the following result, saving at least a linear factor compared to an approach based on stabbing visibility polygons:
\begin{restatable}{theorem}{theoremPointSegment}\label{thm:point_segment} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points inside \(P\). In time \(\bO(nm^{2 + \eps} + nm \log n + n^2\log m)\), we can build a data structure of size \(\bO(nm^{2 + \eps} + n^2)\) that can report the number of points from \(A\) visible from a query segment \(\queryseg\) in \(\bO(\log n \log nm)\) time. \end{restatable}
In \cref{sec:segment_segment}, we show that we can extend these arguments even further and solve the scenario where the objects in \(A\) are also line segments. Somewhat surprisingly, this does not impact the space or time complexity of the data structure. (Note that in this setting, visibility between segments does not represent visibility between moving points; refer to \cref{sec:extra} for a discussion of that problem.)
\begin{restatable}{theorem}{theoremSegmentSegment} \label{thm:segment_segment} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) segments inside \(P\). In time \(\bO(nm^{2 + \eps} + nm \log n + n^2\log m)\), we can build a data structure of size \(\bO(nm^{2 + \eps} + n^2)\) that can report the number of points from \(A\) visible from a query segment \(\queryseg\) in \(\bO(\log n \log nm)\) time. \end{restatable}
Finally, in \cref{sec:extra}, we discuss some extensions of our results. We generalise the approach of \cref{sec:segment_segment} to the case where the objects in \(A\) are simple polygons. We consider some query variations and show that we can compute the pairwise visibility of two sets of objects---that is, solve the problem that motivated this work---in time subquadratic in the number of objects.
\section{Preliminaries}\label{sec:prelims} In this \lcnamecref{sec:prelims}, we review some basic terminology and tools we use to build our data structures.
\subparagraph*{Visibility in a simple polygon.} We refer to the parts of the polygon \(P\) that can be seen from some point \(p \in P\) as its \emph{visibility polygon,} denoted \(V(p)\). The visibility polygon has complexity \(\bO(n)\)~\cite{elgindy81}. We can also construct a visibility polygon for a line segment \(\queryseg\), denoted \(V(\queryseg)\), which is the union of the visibility polygons of all points on \(\queryseg\); it is referred to as a \emph{weak visibility polygon.} Such a polygon still has complexity \(\bO(n)\)~\cite{guibas87}.
\begin{lemma}\label{lem:algorithm_oneshot} Let \(P\) be a simple polygon with \(n\) vertices, and let \(A\) be a set of \(m\) points or line segments inside \(P\). We can compute the number \(C(Q, A)\) of objects from \(A\) visible to a point or line segment \(Q\) in time \(\bO(n + m\log n)\). \end{lemma} \begin{proof} If \(A\) is a set of points, it suffices to compute the visibility polygon of \(Q\) and preprocess it for \(\bO(\log n)\)-time point location queries. Both preprocessing steps take linear time~\cite{guibas87,kirkpatrick83}, and querying takes \(\bO(m\log n)\) time in total. In case \(A\) consists of line segments, we can similarly test if a segment of \(A\) is visible when one of the endpoints is visible. We also need to count the number of visible segments whose endpoints lie outside of \(V(Q)\). We can also do this in \(\bO(n + m \log n)\) time by computing a sufficiently large bounding box \(B\) and constructing a \(\bO(\log n)\)-time ray shooting data structure on \(B \setminus V(Q)\). This allows us to test if a segment intersects \(V(Q)\) in \(\bO(\log n)\) time. Since \(B \setminus V(Q)\) has only a single hole, we can turn it into a simple polygon, build a ray shooting structure for simple polygons~\cite{hershberger95}, and answer a query by \(\bO(1)\) ray shooting queries. \end{proof}
\begin{lemma}\label{lem:visibility-seg-intersect} Given a visibility polygon \(V(p) \subseteq P\) for some point \(p \in P\) and a line segment \(\seg{rs} \subset P\), if \(\seg{rs}\) and \(V(p)\) intersect, their intersection is a single line segment. \end{lemma} \begin{proof} Assume for contradiction that the intersection between \(V(p)\) and \(\seg{rs}\) consists of multiple, possibly degenerate, line segments, \(S_1, \dots, S_k\) for some \(k > 1\). Take some points \(q_i\) and \(q_{i + 1}\) on consecutive segments \(S_i\) and \(S_{i + 1}\). Consider the line segments \(\seg{pq_i}\) and \(\seg{pq_{i + 1}}\). By definition of the visibility polygon, these segments are inside \(P\). Since \(\seg{rs}\) is inside \(P\), the segment \(\seg{q_i q_{i + 1}}\) is also inside \(P\). Since \(P\) is simple, it must then hold that the interior of the triangle \(T\) with vertices \(p\), \(q_i\), and \(q_{i + 1}\) is also inside \(P\). More precisely, \(T\) cannot contain any of the boundary of \(P\). Now consider a line segment \(\seg{pq_o}\) for a point \(q_o\) between segments \(S_i\) and \(S_{i + 1}\) on \(\seg{rs}\). Since its endpoint \(q_o\) is outside \(V(p)\), the line segment must cross the boundary of \(P\) inside \(T\). This contradicts the previous claim that \(T\) is empty; thus, it must be that the intersection between \(V(p)\) and \(\seg{rs}\) is a line segment if they intersect. \end{proof}
A \emph{cone} is a subspace of the plane that is enclosed by two rays starting at some point \(p\), called the \emph{apex} of the cone. Let \(D\) be a diagonal of \(P\) and let \(P_L\) and \(P_R\) be the two subpolygons we obtain when splitting \(P\) with \(D\). Consider some point \(p\) in \(P_L\). We define the \emph{visibility cone} of \(p\) \emph{through} \(D\), denoted \(V(p, D, P_L)\), as the collection of rays starting at \(p\) that intersect \(D\) and do not intersect the boundary of \(P_L\), except at \(D\) (see \cref{fig:cone_illustration}).
\begin{figure}
\caption{The filled shape is the cone \(V(q, D, P_L)\).}
\label{fig:cone_illustration}
\end{figure}
\begin{corollary}\label{cor:cone_continuous} The intersection between the visibility cone \(V(p, D, P_L)\) and the diagonal \(D\) is a single line segment or empty. \end{corollary} \begin{proof} The visibility cone is clearly a subset of the visibility polygon \(V(p)\). Since the intersection between \(D\) and the visibility polygon is a single line segment (or empty) by \cref{lem:visibility-seg-intersect}, the same must hold for the visibility cone. \end{proof}
\subparagraph*{Cutting trees.} Suppose we want to preprocess a set \(\Lns\) of \(m\) lines in the plane so that given a query point \(q\), we can count the number of lines below the query point. Let \(r \in [1, m]\) be a parameter; then a \emph{\((\sfrac{1}{r})\)-cutting} of \(\Lns\) is a subdivision of the plane with the property that each cell is intersected by at most \(\sfrac{m}{r}\) lines~\cite{chazelle93}. If \(q\) lies in a certain cell of the cutting, we know, for all lines that do not cross the cell, whether they are above or below \(q\), and so we can store the count with the cell, or report the lines in a precomputed \emph{canonical subset;} for the lines that cross the cell, we can recurse. The data structure that performs such a query is called a \emph{cutting tree;} it can be constructed in \(\bO(m^{2 + \eps})\) time, uses \(\bO(m^{2 + \eps})\) space, and supports answering the queries in time \(\bO(\log m)\), for any constant \(\eps > 0\). Intuitively, the parameter \(r\) here determines the trade-off between the height of the recursion tree and the number of nodes for which a certain line in \(\Lns\) is relevant. If we pick \(r = m\), the \((\sfrac{1}{r})\)-cutting of \(\Lns\) is just the arrangement of \(\Lns\). The bounds above are based on picking \(r \in \bO(1)\), so the height of the recursion tree is \(\bO(\log m)\). This approach follows the work of Clarkson~\cite{clarkson87}, with Chazelle~\cite{chazelle93} obtaining the bounds above by improving the cutting construction.
An obvious benefit of this approach over just constructing the arrangement on \(\Lns\) and doing point location in that arrangement is that using cuttings, we can obtain \(\bO(\log m)\) canonical subsets and perform nested queries on them without an explosion in storage required; the resulting data structure is called a \emph{multilevel cutting tree.} Specifically, we can query with \(k\) points and a direction associated with each point (above or below) and return the lines of \(\Lns\) that pass on the correct side (above or below) of all \(k\) query points. If we pick \(r \in \bO(1)\) and nest \(k\) levels in a \(k\)-level cutting tree, we get the same construction time and storage bounds as for a regular cutting tree; but the query time is now \(\bO(\log^k m)\). Chazelle et al.~\cite{chazelle92fast} show that if we set \(r = n^{\sfrac{\eps}{2}}\), each level of a multilevel cutting tree is a constant-height tree, so the answer to the query can be represented using only \(\bO(1)\) canonical subsets. The space used and the preprocessing time remains \(\bO(m^{2 + \eps})\).
\begin{lemma}\label{lem:multilevel-cutting-tree-2} Let \(\Lns\) be a set of \(m\) lines and let \(k\) be a constant. Suppose we want to answer the following query: given \(k\) points and associated directions (above or below), or \(k\) vertical rays, find the lines in \(\Lns\) that lie on the correct side of all \(k\) points (or intersect all \(k\) rays). In time \(\bO(m^{2 + \eps})\), we can construct a data structure using \(\bO(m^{2 + \eps})\) storage that supports such queries. The lines are returned as \(\bO(1)\) canonical subsets, and the query time is \(\bO(\log m)\). \end{lemma}
Dualising the problem in the usual way, we can alternatively report or count points from the set \(A\) that lie in a query half-plane; or in the intersection of several half-planes, using a multilevel cutting tree.
\begin{lemma}\label{lem:multilevel-cutting-tree-1} Let \(A\) be a set of \(m\) points and let \(k\) be a constant. In time \(\bO(m^{2 + \eps})\), we can construct a data structure using \(\bO(m^{2 + \eps})\) storage that returns \(\bO(1)\) canonical subsets with the points in \(A\) that lie in the intersection of the \(k\) query half-planes in time \(\bO(\log m)\). \end{lemma}
We can use these basic results to resolve more complicated queries; the techniques are similar and are shown in \cref{lem:mutual_cones}. See \cref{fig:cutting_tree} for an illustration.
\begin{lemma}\label{lem:mutual_cones} Let \(A\) be a set of \(m\) points, each of them an apex of a cone; and let the query point be \(q\), again an apex of a cone \(C_q\). In time \(\bO(m^{2 + \eps})\), we can construct a data structure using \(\bO(m^{2 + \eps})\) storage that returns a representation of the points in \(A' \subseteq A\), such that for any \(p \in A'\), \(q\) lies in the cone of \(p\) and \(p \in C_q\). The points are returned as \(\bO(1)\) canonical subsets, and the query time is \(\bO(\log m)\). Alternatively, the points can be counted. \end{lemma} \begin{proof} We can construct a four-level cutting tree; the first two levels can select the nodes that represent points from \(A\) lying in \(C_q\). Note that to select the points that lie in \(C_q\), we need to perform two consecutive half-plane queries, as \(C_q\) is an intersection of two half-planes that meet at point \(q\). We can use \cref{lem:multilevel-cutting-tree-1} to handle these; note that every time we get a constant number of canonical subsets, so any new point location queries can be done in \(\bO(\log m)\) time on each level. After two levels, we get \(\bO(1)\) canonical subsets. The next two levels handle the other condition: select the points whose cones contain \(q\). This can be done by checking that \(q\) lies below the upper boundaries of the cones and that \(q\) lies above the lower boundaries of the cones. Again, we need to do point location queries on each level and for each canonical subset; we can use \cref{lem:multilevel-cutting-tree-2} to see that we still have a constant number of those. Overall, we do a constant number of point location queries and go down a four-level data structure, where every level is a constant-depth tree. Therefore, the query overall takes \(\bO(\log m)\) time. As stated previously, adding the levels does not increase the storage or the preprocessing time requirements. \end{proof}
\begin{figure}
\caption{A query in a multilevel cutting tree, top left to bottom right. The query point is red; the selected points of \(A\) are blue. Black outline shows the relevant part of the polygon. (a,~b)~We select points in \(A\) above (resp.\@ below) the right (resp.\@ left) cone boundary of \(q\). (c,~d)~We refine by taking points whose left (resp.\@ right) cone boundary is below (resp.\@ above) \(q\).}
\label{fig:cutting_tree}
\end{figure}
\begin{lemma}\label{lem:separated-cones-seg-ds} Let \(L\) be a vertical line and let \(A\) be a set of \(m\) cones starting left of \(L\) and whose top and bottom rays intersect \(L\). In time \(\bO(m^{2 + \eps})\), we can construct two two-level cutting trees for \(A\) of total size \(\bO(m^{2 + \eps})\), so that for a query segment \(\queryseg\) that is fully to the right of \(L\), we can count the number of cones that contain or intersect \(\queryseg\) in \(\bO(\log m)\) time. \end{lemma} \begin{proof} A cone \(C \in A\) partitions the space to the right of \(L\) in three regions: the regions above and below \(C\) and the region inside \(C\). Segment \(\queryseg\) does not intersect \(C\) if it is contained in either the top or the bottom region. This is exactly when either both points of \(\queryseg\) are above the supporting line of the upper boundary of \(C\) or when both are below the supporting line of the lower boundary of \(C\). Hence, if we store the supporting lines of the upper and lower boundaries of \(A\) in two two-level cutting trees, similarly to \cref{lem:mutual_cones}, we can count the number of cones that are not visible for \(\queryseg\). By storing the total number of cones, we can now determine the number of visible cones. \end{proof}
\begin{lemma}\label{lem:count-segment-line-intersections} Let \(\Lns\) be a set of \(m\) lines and \(\queryseg\) a query line segment. We can store \(\Lns\) in a multilevel cutting tree, using \(\bO(m^{2 + \eps})\) space and preprocessing time, so that we can count the number of lines in \(\Lns\) intersected by \(\queryseg\) in time \(\bO(\log m)\). \end{lemma}
\subparagraph*{Polygon decomposition.} For a simple polygon \(P\) on \(n\) vertices, Chazelle~\cite{chazelle82} shows that we can construct a balanced hierarchical decomposition of \(P\) by recursively splitting the polygon into two subpolygons of approximately equal size. The polygon is split on diagonals between two vertices of the polygon. The recursion stops when reaching triangles. The decomposition can be computed in \(\bO(n)\) time and stored using \(\bO(n)\) space in a balanced binary tree.
\subparagraph*{Hourglasses and the shortest path data structure.} An \emph{hourglass} for two segments \(\seg{pq}\) and \(\seg{rs}\) in a simple polygon \(P\) is the union of geodesic shortest paths in \(P\) from a point on \(\seg{pq}\) to a point on \(\seg{rs}\)~\cite{guibas89}. If the upper chain and lower chain of an hourglass share vertices, it is \emph{closed,} otherwise it is \emph{open.} A \emph{visibility glass} is a subset of the hourglass consisting of all line segments between a point on \(\seg{pq}\) and a point on \(\seg{rs}\)~\cite{eades20}.
Guibas and Hershberger~\cite{guibas89}, with later improvements~\cite{hershberger91}, describe a data structure to compute shortest paths in a simple polygon \(P\). They use the polygon decomposition by Chazelle~\cite{chazelle82} and also store hourglasses between the splitting diagonals of the decomposition. The data structure uses \(\bO(n)\) storage and preprocessing time and can answer the following queries in \(\bO(\log n)\) time: \begin{description} \item[Shortest path query.] Given points \(p, q \in P\), return the geodesic shortest path between \(p\) and \(q\) in \(P\) as a set of \(\bO(\log n)\) nodes of the decomposition. The shortest path between \(p\) and \(q\) is a concatenation of the polygonal chains of the boundaries of the (open or closed) hourglasses in these \(\bO(\log n)\) nodes together with at most \(\bO(\log n)\) segments called \emph{bridges} connecting two hourglass boundaries. \item[Segment location query.] Given a segment \(\seg{pq}\), return the two leaf triangles containing \(p\) and \(q\) in the decomposition and the \(\bO(\log n)\) pairwise disjoint open hourglasses such that the two leaf triangles and hourglasses fully cover \(\seg{pq}\). We refer to the returned structure as the \emph{polygon cover} of \(\seg{pq}\). \item[Cone query.] Given a point \(s\) and a line segment \(\seg{pq}\) in \(P\), return the visibility cone from \(s\) through \(\seg{pq}\). This can be done by getting the shortest paths from \(s\) to \(p\) and \(q\) and taking the two segments closest to \(s\) to extend them into a cone. \end{description}
\section{Point Queries}\label{sec:point} In this \lcnamecref{sec:point}, given a set \(A\) of \(m\) points in a simple polygon \(P\) on \(n\) vertices, we count the points of \(A\) that are in the visibility polygon of a query point \(q \in P\). We present two solutions: (i)~a simple arrangement-based approach that is also applicable to the case where \(A\) contains line segments, which achieves a very fast query time at the cost of large storage and preprocessing time; and (ii)~a cutting-tree-based approach with query times slower by a factor of \(\bO(\log n)\), but with much better storage requirements and preprocessing time.
\subsection{Point Location in an Arrangement}\label{sec:arrangement} We first consider a straightforward approach that relies on the following observation: the number of objects in \(A\) visible from a query point \(q\) is equal to the number of (weak) visibility polygons of the objects in \(A\) stabbed by \(q\). Hence, we can construct all (weak) visibility polygons of the objects in \(A\) and compute the arrangement \(\Arr\) of the edges of these polygons. For each cell \(C\) in the arrangement, we store the number of visibility polygons that contain \(C\). Then a point location query for \(q\) yields the number of visible objects in \(A\).
Computing the visibility polygons takes \(\bO(nm)\) time, and constructing the arrangement using an output-sensitive line segment intersection algorithm takes \(\bO(nm\log nm + \SetSize{\Arr})\) time~\cite{chazelle92intls}, where \(\SetSize{\Arr}\) is the number of vertices of \(\Arr\). Building a point location structure on \(\Arr\) for \(\bO(\log \SetSize{\Arr})\)-time point location queries takes \(\bO(\SetSize{\Arr})\) time~\cite{kirkpatrick83}. The space usage is \(\bO(\SetSize{\Arr})\). As Bose et al.~\cite{bose02} show, the complexity of \(\Arr\) is \(\Theta(nm^2)\) in the worst case.
\theoremArrangement*
\subsection{Hierarchical Decomposition}\label{sec:point_point} To design a data structure that uses less storage than that of \cref{sec:arrangement}, we observe that if we subdivide the polygon, we can count the number of visible objects by summing up the number of visible objects residing in the cells of the subdivision. To efficiently compute these counts, we use the polygon decomposition approach, as described in \cref{sec:prelims}. With each split in our decomposition, we store data structures that can efficiently count the number of visible objects in the associated subpolygon.
\subparagraph*{Cone containment.} Let us solve the following problem first. We are given a simple polygon \(P\) and a (w.l.o.g.) vertical diagonal \(D\) that splits it into two simple polygons \(P_L\) and \(P_R\). Furthermore, we are given a set \(A\) of \(m\) points in \(P_L\). Given a query point \(q\) in \(P_R\), we want to count the points in \(A\) that see \(q\). We base our approach on the following observation.
\begin{figure}
\caption{Visibility cones (coloured regions) of (coloured) points w.r.t.\@ some diagonal \(D\). (a)~Blue and red are mutually visible. (b)~Green and blue cannot see each other, nor can orange and blue.}
\label{fig:visibility-cone}
\end{figure}
\begin{lemma}\label{lem:cones} Given a simple polygon \(P\) split into two simple polygons \(P_L\) and \(P_R\) by a diagonal \(D\) between two vertices and two points \(p \in P_L\) and \(q \in P_R\), consider the visibility cones \(V(p, D, P_L)\) and \(V(q, D, P_R)\), i.e.\@ the cones from \(p\) and \(q\) through \(D\) into the other subpolygons. Point \(p\) sees \(q\) in \(P\) if and only if \(q \in V(p, D, P_L)\) and \(p \in V(q, D, P_R)\). \end{lemma} \begin{proof} First suppose that \(p \in V(q, D, P_R)\) and \(q \in V(p, D, P_L)\). We need to show that \(p\) and \(q\) see each other, that is, that the line segment \(\seg{pq}\) lies in \(P\). Observe that both \(p\) and \(q\) lie in \(V(p, D, P_L)\), and \(V(p, D, P_L)\) is convex, so \(\seg{pq}\) lies in \(V(p, D, P_L)\); symmetrically, \(\seg{pq}\) lies in \(V(q, D, P_R)\). Furthermore, note that since both cones are cones through \(D\), the segment \(\seg{pq}\) must cross \(D\) at some point \(r\). Then by construction of \(V(p, D, P_L)\), the segment \(\seg{pr}\) lies entirely in \(P_L\); similarly, \(\seg{rq}\) lies entirely in \(P_R\). As also the diagonal \(D\) lies in \(P\), we conclude that \(\seg{pq}\) lies in \(P\).
Now suppose that \(p\) and \(q\) see each other in \(P\). As they are on the opposite sides of the diagonal \(D\) and the polygon \(P\) is simple, the line segment \(\seg{pq}\) must cross \(D\) at some point \(r\). As \(\seg{pq}\) lies inside \(P\), clearly, \(\seg{pr}\) lies inside \(P_L\) and \(\seg{rq}\) lies inside \(P_R\). Then the visibility cone \(V(p, D, P_L)\) must include the ray from \(p\) through \(r\), and so \(q\) is in \(V(p, D, P_L)\); symmetrically, \(p\) is in \(V(q, D, P_R)\). \end{proof}
\Cref{lem:cones} shows that to count the number of points of \(A\) that see \(q\), it suffices to construct the cones from all points in \(A\) through \(D\) and the cone from \(q\) through \(D\) and count the number of points from \(A\) satisfying the condition of \cref{lem:cones} (see also \cref{fig:visibility-cone}). The cones from all \(p \in A\) into \(P_R\) can be precomputed, so only the cone from \(q\) into \(P_L\) needs to be computed at query time. The query of this type can be realised using a multilevel cutting tree, as explained in \cref{lem:mutual_cones}. We also still need to compute the cone \(V(q, D, P_R)\) at query time and precompute the cones \(V(p, D, P_L)\) for all \(p \in A\); we shall handle this later.
\subparagraph*{Decomposition.} Let us return to the original problem. To solve it, we can use the balanced polygon decomposition~\cite{chazelle82}, as discussed in \cref{sec:prelims}. Following Guibas and Hershberger~\cite{guibas89,hershberger91}, we represent it as a binary tree (see \cref{fig:polygon-decomp}). Observe that as long as there is some diagonal \(D\) separating our query point from a subset of points of \(A\), we can use the approach above.
\begin{figure}
\caption{Augmented polygon decomposition following the approach by Chazelle~\cite{chazelle82}. Each node corresponds to the splitting diagonal (blue dashed line). Along the tree edges (blue lines), we store the multilevel cutting tree (red box) for the polygon in the child using the diagonal of the parent.}
\label{fig:polygon-decomp}
\end{figure}
Every node of the tree is associated with a diagonal, and the two children correspond to the left and the right subpolygon. With each node, we store two data structures described above: one for the query point to the left of the diagonal and one for the query to the right.
The query then proceeds as follows. Suppose the polygon \(P\) is triangulated, and the triangles correspond to the leaves in the decomposition. Given a query point \(q\), find the triangle it belongs to; then traverse the tree bottom up. In the leaf, \(q\) can see all the points of \(A\) that are in the same triangle, so we start with that count. As we proceed up the tree, we query the correct associated data structure---if \(q\) is to the right of the diagonal, we want to count the points to the left of the diagonal in the current subpolygon that see \(q\). It is easy to see that this way we end up with the total number of points in \(A\) that see \(q\), since the subsets of \(A\) that we count are disjoint as we move up the tree and cover the entire set \(A\).
\theoremPointPoint* \begin{proof} The correctness follows from the considerations above; it remains to analyse the time and storage requirements. For the query time, we do point location of the query point \(q\) in the triangulation of \(P\) and make a single pass up the decomposition tree, making queries in the associated multilevel cutting trees. Clearly, the height of the tree is \(\bO(\log n)\). At every level of the decomposition tree, we need to construct the visibility cone from the query point to the current diagonal; this can be done in \(\bO(\log n)\) time using the shortest path data structure for \(P\)~\cite{guibas89,hershberger91}, as discussed in \cref{sec:prelims}. Then we need to query the associated data structure, except at the leaf, where we simply fetch the count. The query then takes time \[\sum_{i = 0}^{\bO(\log n)} \bO(\log m_i + \log n) = \bO(\log n \log m + \log^2 n)\,,\] where the sum of all \(m_i\) is at most \(m\). For the storage requirements, we need to store the associated data structures on in total \(m\) points at every level of the tree, as well as a single copy of the shortest path data structure, yielding overall \(\bO(n + m^{2 + \eps} \log n)\) storage. Finally, we analyse the preprocessing time. Triangulating a simple polygon takes \(\bO(n)\) time~\cite{chazelle91}. Constructing the decomposition can be done in additional \(\bO(n)\) time~\cite{guibas89}. Constructing the associated data structures takes time \(\bO(m^{2 + \eps})\) per level, so \(\bO(m^{2 + \eps} \log n)\) overall, after determining the visibility cones for the points of \(A\) to all the relevant diagonals, which can be done in time \(\bO(m \log^2 n)\), as each point of \(A\) occurs a constant number of times per level of the decomposition, and constructing the cone takes \(\bO(\log n)\) time. Overall we need \(\bO(n + m^{2 + \eps} \log n + m \log^2 n)\) time. \end{proof}
\begin{figure}
\caption{(a)~For the cone that describes visibility of \(\seg{pq}\) through \(D\), \cref{lem:cones} does not hold---there can be visibility without the apices of the cones seeing each other. (b)~The segment \(\seg{pq}\) intersects the cone of \(s\), and \(s\) is in the cone of \(\seg{pq}\), but they cannot see each other, so testing intersection between the objects and the cones also does not work directly.}
\label{fig:cone-lemma-fails-for-segments}
\end{figure}
\begin{remark}\label{rem:no_reuse} While this approach uses many of the ideas needed to tackle the setting with segment queries, \cref{lem:cones} does not apply---see \cref{fig:cone-lemma-fails-for-segments}. \end{remark}
\section{Segment Queries}\label{sec:point_segment} In this \lcnamecref{sec:point_segment}, given a simple polygon \(P\) and a set \(A\) of stationary entities (points) in \(P\), we construct a data structure that efficiently counts the points in \(A\) visible from a query segment \(\queryseg\). We cannot reuse the approach of \cref{sec:arrangement}, since \(\queryseg\) may intersect multiple arrangement cells, so the query time would depend on the number of visible entities, as we need to keep the list to avoid double counting; even if this issue were solved, we would need to sum up the values from the \(\Omega(n)\) cells we might cross. Therefore, we construct a new data structure using the insights of the hierarchical decomposition of \cref{sec:point_point}. That approach is also not directly usable, as discussed in \cref{rem:no_reuse}.
We use the data structure by Guibas and Hershberger~\cite{guibas89} (abbreviated \spds) on \(P\), discussed in \cref{sec:prelims}, as the foundation. For a given query \(\seg{pq}\), the \spds\ partitions \(P\) into four types of regions (\cref{fig:dead_monkeys_alive_query_overview}): hourglasses (orange); triangles that contain \(p\) or \(q\), denoted by \(T_L\) and \(T_R\) (blue); regions that have as a border the upper or the lower chain of an hourglass, referred to as \emph{side polygons} (green); and regions that have as a border one of the edges of \(T_L\) or \(T_R\), referred to as \emph{end polygons} (red). The number of visible objects in \(A\) is the sum of the counts of objects in at least one of the relevant hourglasses or triangles, plus the size of the set of objects contained in a side or an end polygon that are visible to \(\seg{pq}\). This allows us to subdivide the problem into tractable parts with strong assumptions.
Counting the visible objects inside the relevant hourglasses and triangles is easy, since all of them are visible. For the side polygons, we make an observation regarding the conditions for an object \emph{not} to be visible, and we subtract that count from the overall count of points in the relevant side polygons. Finally, for the end polygons, we make a case distinction on the way the visibility cones of the objects cross the adjacent triangles, and use inclusion--exclusion-style arguments to obtain the correct count.
\begin{figure}
\caption{Partitioning of the polygon based on the polygon cover of \(\seg{pq}\).}
\label{fig:dead_monkeys_alive_query_overview}
\end{figure}
\subsection{The Data Structure}\label{sec:ps_ds} In this \lcnamecref{sec:ps_ds}, we describe what our data structure stores and how to compute it. We start with some helpful observations, leading to a helper data structure.
\begin{lemma}\label{lem:ds_viscone} Let \(H\) be an hourglass bounding a side polygon \(S\). Denote the left diagonal of \(H\) by \(D_L\); and denote the polygon bounded by \(D_L\) by \(P_L\). Let \(\Rays\) be a set of visibility rays from objects in \(S\) into \(H\) that exit \(H\) through \(D_L\). (See \cref{fig:visibility-proof}.) In time \(\bO(\SetSize{\Rays}^{2 + \eps})\), we can compute a data structure of size \(\bO(\SetSize{\Rays}^{2 + \eps})\), so that given a query segment \(\queryseg \subset P_L\), we can count the number of rays in \(\Rays\) that are intersected by \(\queryseg\) in time \(\bO(\log \SetSize{\Rays})\). \end{lemma} \begin{proof} Suppose that some ray \(R \in \Rays\) intersects \(\queryseg\). Since \(R\) is a visibility ray into \(H\) that intersects \(D_L\), it extends into \(P_L\) and its apex is to the right of the supporting line of \(D_L\). Since \(\queryseg \subset P_L\), it follows that \(R\) can only intersect \(\queryseg\) left of the supporting line of \(D_L\).
Let \(\seg{ps}\) be the subsegment of \(\queryseg\) that is left of this line. We can compute \(\seg{ps}\) in constant time. The segment \(\seg{ps}\) intersects \(R\) if and only if it intersects the supporting line of \(R\). Testing for the intersection between a preprocessed set of lines and a query segment can be done with a two-level cutting tree (\cref{lem:multilevel-cutting-tree-2}), concluding the proof. \end{proof}
\begin{figure}
\caption{(a)~We want to count the blue rays intersecting the shortest path between \(q\) and \(v_L\). (b)~We store a multilevel cutting tree to query ray intersections with \(\seg{pq}\) and \(\seg{qv_L}\). (c)~We store a shortest path map to count the rays intersecting the shortest path from \(v_L\) to \(p\). In this case, we count \(\sfrac{1}{2}\cdot (3 + 1 + 4) = 4\).}
\label{fig:visibility-proof}
\end{figure}
\begin{lemma}\label{lem:sp_intersection_ds} Let \(H\) be an hourglass bounding a side polygon \(S\). Denote the left diagonal of \(H\) by \(D_L\); and denote the polygon bounded by \(D_L\) by \(P_L\). Let \(\Rays\) be a set of visibility rays from objects in \(S\) into \(H\) that exit \(H\) through \(D_L\). Finally, denote the leftmost vertex of the convex chain separating \(H\) from \(S\) by \(v_L\). (See \cref{fig:visibility-proof}.) Given a query point \(q \in P_L\) to the left of the supporting line of \(D_L\), whose shortest path to \(v_L\) in \(P_L\) forms an upwards convex chain, we wish to count the rays in \(\Rays\) that intersect this convex chain. In time \(\bO(\SetSize{\Rays}^{2 + \eps} + \SetSize{P_L} \log\SetSize{\Rays})\), we can compute a data structure of size \(\bO(\SetSize{\Rays}^{2 + \eps} + \SetSize{P_L})\) that can answer such queries in time \(\bO(\log\SetSize{\Rays}\SetSize{P_L})\). \end{lemma} \begin{proof} The shortest path from \(q\) to \(v_L\), together with \(\seg{qv_L}\), forms a convex polygon. A ray starting in \(S\) (and thus outside this convex polygon) that intersects an edge of the convex polygon must intersect exactly two edges of the polygon.
We store \(\Rays\) in the data structure of \cref{lem:ds_viscone}. We also store a \emph{shortest path map} on \(P_L\) with \(v_L\) as its root~\cite{guibas87}, computed in time \(\bO(\SetSize{P_L})\) and consisting of \(\bO(\SetSize{P_L})\) segments. For every edge in the map, we query the data structure of \cref{lem:ds_viscone} to obtain the number of rays in \(\Rays\) that intersect that edge. With every vertex \(s\) in the shortest path map, we store the total number of intersections between the rays in \(\Rays\) and the path from \(v_L\) to \(s\). Constructing this augmented data structure uses \(\bO(\SetSize{P_L})\) space and takes time \(\bO(\SetSize{P_L} \log\SetSize{\Rays})\).
Given this data structure, we answer a query as follows. That shortest path from \(q\) to \(v_L\) consists of a convex chain of segments of the shortest path map between \(v_L\) and some vertex \(p\), followed by a segment \(\seg{pq}\). In time \(\bO(\log\SetSize{P_L})\), we can identify the vertex \(p\). We query the vertex \(p\) for the (stored) total number of intersections with the rays in \(\Rays\) in \(\bO(1)\) time. Then we query the data structure of \cref{lem:ds_viscone} with segments \(\seg{pq}\) and \(\seg{qv_L}\) in \(\bO(\log\SetSize{\Rays})\) time and add all three counts together. This way, we count all the intersections of the rays in \(\Rays\) with the boundary of the convex polygon defined by \(\seg{qv_L}\) and the shortest path from \(q\) to \(v_L\). For each ray, we now count both intersections; so we divide the total by two and return the result. This procedure is needed, since a ray can intersect the shortest path two times (and not intersect \(\seg{qv_L}\)). The query takes time \(\bO(\log\SetSize{\Rays} + \log\SetSize{P_L}) = \bO(\log\SetSize{\Rays}\SetSize{P_L})\). \end{proof}
We now introduce our Segment Query Data Structure (SQDS). It is based on the \spds, augmented with extra data for visibility queries. The \spds\ decomposes the polygon into hourglasses and triangles; we describe the data structures we store with each class.
\begin{figure}
\caption{(a)~Data structures per chain of an hourglass. (b)~Data structures in \DSPartRef{T2}, for cones from \(E(\seg{uv})\) restricted to pass through \(\seg{vw}\).}
\label{fig:sqds-hourglass-and-triangles}
\end{figure}
\subparagraph*{The data structure for hourglasses.} Consider an hourglass \(H\) bounded by diagonals \(D_L = \seg{v_Lu_L}\), \(D_R = \seg{v_Ru_R}\), and the upper and the lower chains \(\pi(v_L, v_R)\) and \(\pi(u_L, u_R)\) in the \spds\ (\cref{fig:sqds-hourglass-and-triangles}a). Let \(S_U\) be the side polygon of \(H\) that is incident to the upper chain, and let \(\C_U\) be the visibility cones of entities in \(S_U\) into \(H\). For the hourglass \(H\) itself, we store the number of objects in \(A\) that are contained in \(H\). For ease of exposition, we refer to the boundaries of a cone \(C \in \C_U\) as the \emph{left} and the \emph{right} boundary, when viewed from the apex of the cone in the direction of the cone. For the upper chain of \(H\), we store in SQDS: \begin{description} \item[H1.] The number of non-empty visibility cones in \(\C_U\). \item[H2.] The right cone boundaries of all cones in \(\C_U\) that fully exit \(H\) through \(D_R\) in the data structure of \cref{lem:sp_intersection_ds}. \item[H3.] The left cone boundaries of all cones in \(\C_U\) that fully exit \(H\) through \(D_L\) in the data structure of \cref{lem:sp_intersection_ds}. \end{description} We store symmetrical data structures for the bottom chain of \(H\) (i.e.\@ the left cone boundaries for the cones that exit through \(D_R\) and the right cone boundaries for the cones that exit through \(D_L\)).
\subparagraph*{The data structure for triangles.} Let \(T = uvw\) be a triangle in the polygon decomposition underlying the \spds. For \(T\), we store the number of objects in \(A\) that are contained in \(T\). In addition, consider an edge \(\seg{uv}\) of \(T\). To introduce our data structure, we assume that \(\seg{uv}\) is vertical. Denote the subpolygon (end polygon) adjacent to \(T\) and bounded by \(\seg{uv}\) by \(E(\seg{uv})\); for ease of exposition, assume \(T\) is to the right of \(E(\seg{uv})\). Let \(\C_{E(\seg{uv})}\) be the visibility cones into \(T\) of objects in \(A \cap E(\seg{uv})\). Let \(\C_{E(\seg{uv})}(\seg{vw})\) be the set of subcones of the cones in \(\C_{E(\seg{uv})}\) that intersect \(\seg{vw}\); similarly, define \(\C_{E(\seg{uv})}(\seg{uw})\) as the subcones intersecting \(\seg{uw}\). We store: \begin{description} \item[T1.] All cones \(\C_{E(\seg{uv})}\) in a data structure of \cref{lem:separated-cones-seg-ds}. \item[T2.] The subcones of \(\C_{E(\seg{uv})}(\seg{vw})\) in (\DSPartRef{T2.1})~a data structure of \cref{lem:separated-cones-seg-ds} and (\DSPartRef{T2.2})~the right and (\DSPartRef{T2.3})~the left boundaries of these cones in data structures of \cref{lem:sp_intersection_ds}. \item[T3.] Symmetric to \DSPartRef{T2}, but for \(\C_{E(\seg{uv})}(\seg{uw})\). \item[T4.] All the left cone boundaries that intersect \(\seg{vw}\) in a cutting tree. \item[T5.] All the right cone boundaries that intersect \(\seg{uw}\) in a cutting tree. \end{description} This list contains the data structures we store for \(\seg{uv}\); we store analogous data structures for edges \(\seg{vw}\) and \(\seg{uw}\) of \(T\). See also \cref{fig:sqds-hourglass-and-triangles}b.
\begin{lemma} \label{lem:sqds-size-and-build-time} The SQDS requires \(\bO(nm^{2 + \eps} + n^2)\) space and can be constructed in time \(\bO(nm^{2 + \eps} + nm\log n + n^2\log m)\). \end{lemma} \begin{proof} During preprocessing, we construct the \spds\ and explicitly store all the hourglasses, requiring \(\bO(n\log n)\) space and time (\cref{sec:prelims}). We augment the decomposition with our data structures. There are \(\bO(n)\) hourglasses and \(\bO(n)\) triangles, so we need (by \cref{lem:sp_intersection_ds}) at most \(\bO(nm^{2 + \eps} + n^2)\) space.
For each of our data structures, we need the visibility cones to the diagonals of the triangles and hourglasses to compute the data structures. We compute these in \(\bO(m\log n)\) per triangle or hourglass, giving a total time of \(\bO(nm\log n)\). Constructing our data structures, given the visibility cones, takes additionally \(\bO(nm^{2 + \eps} + n^2\log m)\) time. In total, we construct the segment query data structure in time \(\bO(nm^{2 + \eps} + nm\log n + n^2\log m)\). \end{proof}
Next, we describe how to query the data structure, so that we can efficiently count visible entities. Given a query segment \(\queryseg\), we compute the number of visible entities in \(A\) from \(\queryseg\) using the SQDS. We obtain the start triangle \(T\) containing \(p\), \(\bO(\log n)\) hourglasses, and an end triangle containing \(q\). Since all entities of \(A\) inside the hourglasses and the two triangles are visible, we sum up their counts that we store in SQDS. It remains to count the objects in the end polygons bounded by the triangles and the two side polygons per hourglass.
\subsection{Counting Entities in the End Polygons}\label{sec:end} Let \(T = uvw\) be a triangle in our decomposition that contains \(p\). Assume without loss of generality that \(\seg{uv}\) is vertical with \(T\) to its right and \(v\) above \(u\) (see \cref{fig:triangle_cases}a). Let \(E(\seg{uv})\) be the end polygon adjacent to \(T\) left of \(\seg{uv}\), and let \(\C_{E(\seg{uv})}\) be the set comprised, for each point \(s \in A \cap E(\seg{uv})\), of visibility cones \(V(s, \seg{uv}, E(\seg{uv})\). To shorten the phrasing, when we say that a cone \(C \in \C_{E(\seg{uv})}\) \emph{sees} a segment \(\queryseg\), we mean that there is mutual visibility between the apex of that cone and \(\queryseg\) in the complete polygon \(P\). A cone \(C \in \C_{E(\seg{uv})}\) sees \(\queryseg\) whenever \(\queryseg\) intersects \(C\) and there are no edges of \(P \setminus E(\seg{uv})\) that block visibility. First, consider the special case where \(\queryseg\) is contained in \(T\). Since there are no edges of \(P\) in \(T\), a cone \(C \in \C\) sees the segment if and only if \(C\) intersects it, and we conclude:
\begin{observation}\label{obs:enclosed-segment-ds} Using \DSPartRef{T1} stored per edge of the triangle \(T\), we can count the objects in all the end polygons of \(T\) that see a query segment \(\queryseg \subset T\) in \(\bO(\log m)\) time. \end{observation}
\subsubsection{From Segments to Piercing Segments}\label{sec:non_pierce} Now suppose that \(p \in T\) and \(q \notin T\). We assume that \(\queryseg\) pierces \(\seg{vw}\) (the case when \(\queryseg\) pierces \(\seg{uw}\) is symmetrical). In this and the next \lcnamecrefs{sec:non_pierce}, we consider a case distinction on the types of the cones, based on how they pass through \(T\) and some adjacent triangle. We argue that in all cases we can count the visible objects correctly.
We partition the set of cones \(\C\) into three classes using the vertex \(w\) of \(T\) (\cref{fig:triangle_cases}): \begin{itemize} \item blue cones \(\cblue\) pass completely above \(w\); \item red cones \(\cred\) pass completely below \(w\); and \item purple cones \(\cpurple\) contain \(w\). \end{itemize} Next, we argue that we can count the number of visible cones per class.
\begin{figure}
\caption{(a)~The three classes of cones. (b)~For the purple cones, it suffices to look at \(\conetop\). (c)~For the red cones, we test \(R_R\). (d)~For the blue cones, we need to look into the adjacent triangle.}
\label{fig:triangle_cases}
\end{figure}
\subparagraph*{Counting visible red cones.} Consider a cone \(C \in \cred\). Since \(C\) only intersects \(\seg{uv}\) and \(\seg{uw}\), and \(\queryseg\) exits the triangle through \(\seg{vw}\) by assumption, any part of \(\queryseg\) that \(C\) can see must be in \(T\) (see \cref{fig:triangle_cases}c). Hence, \(C\) sees \(\queryseg\) if and only if \(p\) lies below the left ray of \(C\).
\begin{lemma}\label{lem:count_red} Using \DSPartRef{T4} (resp.\@ \DSPartRef{T5}), we can count the number of red cones in \(\C_{E(\seg{uv})}\) that see \(\queryseg\), assuming \(\queryseg\) pierces \(\seg{vw}\) (resp.\@ \(\seg{uw}\)), in \(\bO(\log m)\) time. \end{lemma} \begin{proof} A red cone sees \(\queryseg\) if and only if it sees \(\seg{ps}\), which is contained in \(T\). Moreover, red cones originate to the left of the supporting line of \(\seg{uv}\), and \(\seg{ps}\) is to the right of that line. Thus, we can employ the cutting tree we stored in \DSPartRef{T4} to count the cones that see \(\seg{ps}\). \end{proof}
\subparagraph*{Counting visible purple cones.} Consider a cone \(C \in \cpurple\); we show the following \lcnamecref{lem:top_sees_all}. \begin{lemma}\label{lem:top_sees_all} Let \(C \in \cpurple\) be bounded by the right and the left rays \(R_R\), \(R_L\), and let \(R_w\) be the ray from the apex of \(C\) through the vertex \(w\). The query segment \(\queryseg\) sees \(C\) if and only if it sees the cone \(\conetop\) bounded by \(R_L\) and \(R_w\). \end{lemma} \begin{proof} If \(\queryseg\) is visible to \(\conetop\), then it is visible to \(C\). It remains to show that if \(\queryseg\) is visible to \(C\), then it must be visible to \(\conetop\).
The point \(p\) is either contained in \(\conetop\), in the area of \(T\) above \(\conetop\), or in the area of \(T\) below \(\conetop\). If \(p\) is contained in the area below \(\conetop\), then, since \(\queryseg\) pierces \(\seg{vw}\), the segment \(\queryseg\) always intersects the ray from the apex of \(C\) through \(w\), and the segment is thus always visible to \(\conetop\) (and \(C\)). If \(p\) is contained in the area above \(\conetop\), then \(\queryseg\) intersects \(C\) if and only if it enters \(C\) through the left ray \(R_L\). If \(\queryseg\) intersects \(R_L\) in \(T\), then \(\queryseg\) and \(\conetop\) (and \(C\)) are always mutually visible, since there are no polygon vertices in \(T\) that may block the line of sight. If \(\queryseg\) intersects \(R_L\) outside of \(T\), then \(\queryseg\) cannot be visible to \(C \setminus \conetop\). Finally, if \(p\) is contained in \(\conetop\), then the segment \(\queryseg\) is always visible to \(\conetop\) (and \(C\)). \end{proof}
\Cref{lem:top_sees_all} implies that whenever \(\queryseg\) intersects \(\seg{vw}\), we can use \(\conetop \in \ctop\) instead of \(C \in \cpurple\), where \(\ctop\) is the collection of all such cones. These cones in \(\ctop\) match the definition of cones in \(\cblue\), and we store them together in \DSPartRef{T2}.
\subparagraph*{Counting visible blue and top cones.} Consider a cone \(C \in \ctop \cup \cblue\). Since \(C\) may see \(\queryseg\) outside triangle \(T\), we consider the triangle \(\tadj \neq T\) that is incident to \(\seg{vw}\). If \(q\) is inside \(\tadj\), we can use the data structure of \cref{lem:separated-cones-seg-ds} in \DSPartRef{T2} to count the number of visible blue cones, since visibility cannot be blocked in \(\tadj\). In the next \lcnamecref{sec:pierce}, we show that if \(q\) is outside \(\tadj\), we can count the visible blue cones in time \(\bO(\log nm)\) using \DSPartRef{T2} and \DSPartRef{T3}.
\subsubsection{Counting Visible Cones in an Adjacent Triangle}\label{sec:pierce} \begin{figure}
\caption{(a)~The three types of cones based on the intersection point \(s\). (b)~For the orange cones, we can test visibility along \(R_R\), and it can only be blocked by the upper chain \(\pi(v, q)\) of \(F\).}
\label{fig:piercing_case}
\end{figure}
We assume that \(\queryseg\) pierces \(T\) in the edge \(\seg{vw}\) and then passes through some triangle \(\tadj = wva\). Let \(s\) be the intersection point between \(\seg{vw}\) and \(\queryseg\).
Abusing (or overloading) notation, we again partition the set of cones \(\ctop \cup \cblue\) in three classes, but now using \(s\): the orange cones \(\corange\) pass above \(s\), the grey cones \(\cgrey\) pass below \(s\), and the green cones \(\cgreen\) contain \(s\) (\cref{fig:piercing_case}a). Since there are no polygon vertices in \(\tadj\), all the cones in \(\cgreen\) see \(\queryseg\). Next, we consider all the cones in \(\corange\)---all the cones in \(\cgrey\) are treated in a symmetrical fashion.
\begin{lemma}\label{lem:type_2_reduce_bottom} A cone \(C \in \corange\) sees \(\queryseg\) if and only if it sees \(\queryseg\) along the right cone boundary \(R_R\). \end{lemma} \begin{proof} Let \(R\) be a ray in \(C\) such that \(C\) sees \(\queryseg\) along \(R\). Let \(z\) be the intersection point of \(R\) with \(\queryseg\). Since the apex of \(C \in \corange\) lies left of \(\seg{vw}\), the slope of \(R\) and \(R_R\) must be lower than that of the supporting line of \(\queryseg\). By definition of the right cone boundary (and our chosen orientation of \(\seg{vw}\)), the slope of \(R\) is at least the slope of \(R_R\). Thus, if \(R\) intersects \(\queryseg\), then \(R_R\) intersects \(\queryseg\) left of that point of intersection. Since \(R\) realises mutual visibility between \(C\) and \(\queryseg\), the area bounded by \(R\) and \(\queryseg\) cannot contain any polygon vertices, and thus \(R_R\) must also realise mutual visibility between \(\queryseg\) and \(C\). \end{proof}
Let \(\B_c\) be the right rays of the cones of class \(c\), e.g.\@ \(\borange\) are the right rays of the cones in \(\corange\), and let \(\U_c\) be the left rays of the cones of class \(c\). Let \(N_c\) be the number of cones of class \(c\) that see \(\queryseg\). Similarly, define \(N_c(\seg{xy})\) as the number of cones of class \(c\) that see some segment \(\seg{xy}\). For some two points \(x\) and \(y\) and a class \(c\), denote the number of rays from \(\B_c\) along which we can see the segment \(\seg{xy}\) in \(P\) by \(N^\B_c(\seg{xy})\); and similarly, use \(N^\U_c(\seg{xy})\) for \(\U_c\).
\begin{lemma}\label{lem:visible} For the query segment \(\queryseg\), we have that \(\norange = \norange(\seg{ps}) + \norange^\B(\seg{sq})\). \end{lemma} \begin{proof} By \cref{lem:type_2_reduce_bottom}, an orange cone \(C\) can see \(\queryseg\) if and only if \(\queryseg\) is visible along \(R_R \subseteq C\). Hence, \(\norange = \norange^\B(\queryseg)\). Now observe that a ray in \(C\) cannot intersect both \(\seg{ps}\) and \(\seg{sq}\), since \(C\) is not a green cone. Finally, by \cref{lem:type_2_reduce_bottom}, \(\norange(\seg{ps}) = \norange^\B(\seg{ps})\). This implies the statement of the \lcnamecref{lem:visible}. \end{proof}
Therefore, we can count visibility of orange cones separately for \(\seg{ps}\) and \(\seg{sq}\). Let \(F\) be the funnel from \(q\) to \(\seg{vw}\).
\begin{lemma}\label{lem:only_top_chain_can_block} Let \(R\) be a ray in \(\borange\). Visibility of \(\seg{sq}\) along \(R\) is blocked if and only if \(R\) intersects the top boundary of the funnel \(F\) before intersecting \(\seg{sq}\). \end{lemma} \begin{proof} Note that \(\seg{pq}\) crosses \(\seg{vw}\), so \(\seg{sq} \subseteq F\). Consider the area bounded by the upper chain of \(F\) and \(\seg{sq}\). This area cannot contain any polygon edges or vertices. (See \cref{fig:piercing_case}b.) The point of intersection between \(R\) and \(\seg{sq}\) is contained within this area, if it exists. Moreover, the triangles \(\tadj\) and \(T\) immediately left of \(\seg{sq}\) also cannot contain any polygon vertices. Thus, either \(R\) realises mutual visibility, or \(R\) intersects a polygon edge that belongs to the funnel \(F\). \end{proof}
\begin{corollary}\label{cor:nb_orange} \(\norange^\B(\seg{sq}) = \SetSize{\borange} - X\), where \(X\) is the number of rays in \(\borange\) that intersect the upper boundary of the funnel. \end{corollary}
Now that we have established the necessary relations, we want to bring them together and present a way to count the blue and the top cones that see the query segment.
\begin{lemma}\label{lem:pierce_triangle_ds} Using \DSPartRef{T2}, we can compute the number of blue and top cones that see the segment \(\queryseg\), assuming \(p \in T\) and \(q \notin T \cup \tadj\), in time \(\bO(\log nm)\). \end{lemma} \begin{proof} Given \(\queryseg\), we compute \(s\) in \(\bO(1)\) time. We want to compute \(\ngreen + \ngrey + \norange\). Using the fact that the observations for the orange and the grey cones are symmetric, we can use \cref{lem:visible}. For the grey cones, the left cone boundaries matter instead of the right cone boundaries for the orange cones. We want to find \(\ngreen + \ngrey(\seg{ps}) + \norange(\seg{ps}) + \ngrey^\U(\seg{sq}) + \norange^\B(\seg{sq})\).
It is easier not to separate the classes when checking visibility for \(\seg{ps}\). Since \(\tadj\) and \(T\) contain no polygon vertices, we can find \(\ngreen + \ngrey(\seg{ps}) + \norange(\seg{ps})\) by using the data structure of \cref{lem:separated-cones-seg-ds} stored in \DSPartRef{T2} in \(\bO(\log m)\) time by querying visibility of \(\seg{ps}\).
Finally, we determine \(\ngrey^\U(\seg{sq})\) and \(\norange^\B(\seg{sq})\). We argue the query time for \(\norange^\B(\seg{sq})\); the query for \(\ngrey^\U(\seg{sq})\) can be done symmetrically.
The proof is illustrated by \cref{fig:piercing_case}b. We show that we can uniquely count all right rays that are not in \(\norange^\B(\seg{sq})\). Again, we do not want to explicitly classify the cones into green, orange, and grey, so we need a way to filter out the green and grey cones, as well as the orange cones that do not see \(\seg{sq}\).
For any right ray \(R_R \in \bgreen \cup \bgrey\), we know that the point \(s\) is \emph{above} the supporting line of \(R_R\) and either \begin{itemize}
\item \(R_R\) intersects \(F\), or
\item \(\seg{sq}\) is above the supporting line of \(R_R\). \end{itemize} For any right ray \(R_R \in \borange\), we know, by \cref{lem:only_top_chain_can_block,cor:nb_orange}, that \(s\) is \emph{below} the supporting line of \(R_R\) and either \begin{itemize}
\item \(R_R\) intersects \(F\), or
\item the orange cone sees \(\seg{sq}\) along \(R_R\). \end{itemize}
It immediately follows that \(\norange^\B(\seg{sq})\) is equal to the total number of rays \(\SetSize{\bgreen \cup \bgrey \cup \borange}\) minus the number of right rays whose supporting line is below \(\seg{sq}\) and the number of right rays that intersect \(F\). We store the total count of the cones in \DSPartRef{T2}. We can compute the first count we subtract by reusing one part of the data structure of \cref{lem:separated-cones-seg-ds} we store in \DSPartRef{T2}. We can compute the second count using the data structure of \cref{lem:sp_intersection_ds} in \DSPartRef{T2}. Together, these queries require \(\bO(\log m + \log n) = \bO(\log nm)\) time. \end{proof}
Now that we have a procedure to count the blue and the top cones, we want to bring our results together to count all visible entities in end polygons. We now combine \cref{lem:count_red,lem:top_sees_all,lem:pierce_triangle_ds} into the following \lcnamecref{lem:triangle_count_ds}.
\begin{lemma}\label{lem:triangle_count_ds} Using \DSPartRef{T1--T3} of our SQDS, we can, given a query segment \(\seg{pq}\) with \(p \in T\), and funnels from \(q\) to the edges of \(T\), count the number of visible entities in the end polygons of \(T\) in time \(\bO(\log nm)\). \end{lemma} \begin{proof} With the query segment and the given funnels, we can determine which edges of \(T\) are incident to end polygons and thus should be queried. By \cref{lem:top_sees_all}, we can count the purple cones as the blue cones, and they are already stored in \DSPartRef{T2}. The red and the blue (or top) cones are queried separately, using \DSPartRef{T4--T5} and \DSPartRef{T2--T3}, respectively. Using \cref{lem:count_red,lem:pierce_triangle_ds}, we then query the data structures associated with the edges of \(T\) to count the cones of the different classes for each end polygon. This requires \(\bO(\log m)\) and \(\bO(\log nm)\) time for the different classes per edge, thus giving a total query time of \(\bO(\log nm)\). \end{proof}
\subsection{Counting Entities in Side Polygons} We now describe how to count entities in the side polygons of the hourglasses. Let \(H\) be an hourglass that covers a part of query segment \(\queryseg\) (see \cref{fig:hourglass-visibility}).
\begin{figure}
\caption{Cones entering from side polygon \(S_U\) that (a)~see or (b)~do not see \(\queryseg\).}
\label{fig:hourglass-visibility}
\end{figure}
\begin{lemma}\label{lem:hourglassvisibility} Let \(H\) be an hourglass with diagonals \(D_L = \seg{u_Lv_L}\) and \(D_R = \seg{u_Rv_R}\), let \(S_U\) be the side polygon bounded by the upper chain of \(H\), and let \(\seg{pq}\) be a segment that intersects both \(D_L\) and \(D_R\), with \(p\) to the left of \(D_L\) and \(q\) to the right of \(D_R\). Let \(a \in S_U\) be a point with a non-empty visibility cone \(C\) into \(H\). Then point \(a\) does not see \(\queryseg\) if and only if either: \begin{itemize}
\item the right boundary \(R_R\) of \(C\) intersects \(\pi(v_R, q)\), or
\item the left boundary \(R_L\) of \(C\) intersects \(\pi(v_L, p)\). \end{itemize} \end{lemma} \begin{proof} First, assume that \(a\) does not see \(\queryseg\). We argue that \(R_R\) intersects \(\pi(v_R, q)\) or \(R_L\) intersects \(\pi(v_L, p)\). Assume for the sake of contradiction that neither condition holds. Let \(I\) be the region bounded by \(\seg{v_Lv_R}\), \(\pi(v_R, q)\), \(\queryseg\), and \(\pi(p, v_L)\). Since \(C\) can see points in \(H\) along \(R_R\) and \(R_L\), \(R_R\) and \(R_L\) enter the region \(I\) through \(\seg{v_Lv_R}\), or \(a\) already lies inside \(I\). Since \(R_R\) is a ray, it must also exit \(I\), and by definition it cannot exit through \(\seg{v_Lv_R}\). It cannot exit \(I\) through \(\queryseg\), either, as that would mean \(a\) can see \(\queryseg\). Furthermore, by our assumption, \(R_R\) also does not intersect \(\pi(v_R, q)\). Hence, \(R_R\) intersects \(\pi(v_L, p)\). Using an analogous argument, \(R_L\) must intersect \(\pi(v_R, q)\). However, it now follows that the intersection point \(s = \queryseg \cap D_L\) lies inside the cone \(C\), and must therefore be visible to \(a\) (i.e.\@ nothing above \(H\) can intersect \(\seg{ap}\), and inside \(H\) \(\seg{ap}\) also does not intersect any polygon vertices). Hence, \(a\) sees \(\queryseg\). Contradiction.
Now assume that \(R_R\) intersects \(\pi(v_R, q)\) (the case that \(R_L\) intersects \(\pi(v_L, p)\) is symmetric). We now argue that \(a\) cannot see \(\queryseg\). Let \(I_R\) be the region bounded by \(\queryseg\), \(D_R\), and \(\pi(v_R, q)\). A point \(s\) on \(\queryseg\) is visible via a ray \(R\), entering via \(D_R\), if it first exits the region \(I_R\) via \(\queryseg\). Since \(R_R\) is not obstructed in \(H\), it must enter \(I_R\) via \(D_R\). In addition, by assumption, it first exits via \(\pi(v_R, q)\). If \(R_R\) intersects \(\pi(v_R, q)\) once, then by convexity of \(\pi(v_R, q)\), it follows that \(q\) is below \(R_R\) and thus below any ray \(R\) in the cone \(C\), thus it is not visible. If \(R_R\) intersects \(\pi(v_R, q)\) twice, there is a subsegment of \(\queryseg\) above \(R_R\). The ray \(R_R\) now partitions \(I_R\) into three regions: one below \(R_R\), containing points that cannot be visible, and two regions above \(R_R\). The right region contains the subsegment of \(\queryseg\) that is still above \(R_R\). Consider now any ray \(R\) that could be a visibility ray to a point \(x \in \queryseg\). This ray must be above \(R_R\) and must intersect \(\queryseg\) at \(x\). This means that it must traverse the region \(I_R\) from \(D_R\) to \(x\). But since \(R\) must be above \(R_R\), it follows that it must cross the two regions above \(R_R\), which are separated by a polygon boundary. Thus, no \(x \in \queryseg\) is visible from \(a\). \end{proof}
\begin{lemma}\label{lem:hourglass-visibility-ds} Using \DSPartRef{H1}, \DSPartRef{H2}, and \DSPartRef{H3} stored with each chain of hourglass \(H\) in our SQDS, we can count the visible objects in the side polygons of \(H\) in time \(\bO(\log nm)\). \end{lemma} \begin{proof} By \cref{lem:hourglassvisibility}, we can count the number of visible objects from the upper side polygon by taking the total number of objects with non-empty visibility cones from the upper side polygon and subtracting those for which either \(R_R\) intersects \(\pi(v_R, q)\) or \(R_L\) intersects \(\pi(v_L, p)\). Counting for the lower side polygon is symmetrical.
We store the number of entities with non-empty visibility cones from the side polygon in \DSPartRef{H1}. Then, we query our \DSPartRef{H2} and \DSPartRef{H3} data structures to count the number of visibility cones that exit through \(D_L\) and \(D_R\) that do not see \(\queryseg\). This requires \(\bO(\log m + \log n) = \bO(\log nm)\) query time per chain, yielding the total time. \end{proof}
We are now ready to bring all results together and prove \cref{thm:point_segment}.
\theoremPointSegment* \begin{proof} The size and preprocessing time follow from \cref{lem:sqds-size-and-build-time}. For the query, we first acquire the polygon cover of the query segment via the \spds\ in \(\bO(\log n)\) time (\cref{sec:prelims}). We sum up the number of entities contained in the \(\bO(\log n)\) hourglasses and \(\bO(1)\) triangles in \(\bO(\log n)\) time. In case the query segment is contained in a single triangle \(T\) of the polygon decomposition, we use \DSPartRef{T1} to add the entities visible in all end polygons of \(T\) in \(\bO(\log m)\) time (\cref{obs:enclosed-segment-ds}). Otherwise, for each of the hourglasses, we query the associated data structure of \cref{lem:hourglass-visibility-ds} to obtain the visible objects in the side polygons of the hourglass in time \(\bO(\log nm)\) per hourglass, which contributes \(\bO(\log n \log nm)\) to the query time. We compute the funnels to the edges of the triangles in \(\bO(\log^2 n)\) time and query the triangle data structures in \(\bO(\log nm)\) time, which is dominated by the hourglass query time. \end{proof}
\section{Segment Query for a Set of Segments}\label{sec:segment_segment} As a natural extension to the previous data structure, we now consider the problem where we have a set \(A\) of non-degenerate segments and want to determine the number of visible segments from query segment \(\queryseg\). As we show next, we can reuse the approach of the previous \lcnamecref{sec:point_segment} with some minor additions and answer this query in polylogarithmic time.
A difficulty that arises in this setting is that the entities in \(A\) are no longer partitioned by the polygon cover of \(\queryseg\), that is, segments in \(A\) may start or end in the polygon cover or pass through the cover. To be able to correctly count the visible cones, we propose instead to count the cones that we \emph{cannot} see and subtract this from the total count.
\subsection{Using Visibility Glasses} To compute visibility of the segments in \(A\), we use \emph{visibility glasses} (\cref{sec:prelims}) for our segments. Let \(\seg{ac}\) be a segment to the left of a (vertical) diagonal \(D = \seg{uv}\) in \(P\) (see \cref{fig:visibility-glass}). We now check what \(\seg{ac}\) sees in the subpolygon to the right of the diagonal. To do this, we construct the visibility glass \(L(\seg{ac}, D)\) between \(\seg{ac}\) and the diagonal. Eades et al.~\cite{eades20} show that \(L(\seg{ac}, D)\) is an hourglass defined by some subsegment \(\seg{or} \subseteq \seg{ac}\) and a subsegment \(\seg{wx} \subseteq D\) (potentially, \(o = r\) or \(w = x\)). We can now compute the lines connecting the opposite endpoints of the visibility glass that still provide visibility, that is, the lines through \(\seg{ox}\) and \(\seg{rw}\). Note that these lines define the most extreme slopes under which there can still be visibility. These two lines intersect in a single point \(i\). We now consider this point and the lines through it as a new cone that describes the visible region to the right of diagonal \(D\). We call this cone the \emph{visibility glass cone.} Note that the left and the right rays of the cone are actual visibility rays to points \(o\) and \(r\) on the line segment for points to the right of \(D\).
\begin{figure}
\caption{(a)~The visibility glass (dark region) inside the hourglass (orange region) from segment \(\seg{ac}\) to diagonal \(D = \seg{uv}\). (b)~The intersection point \(i\) and the two rays from \(i\) through \(w\) and \(x\) form a new visibility region in the subpolygon to the right of \(D\).}
\label{fig:visibility-glass}
\end{figure}
\begin{lemma}\label{lem:vis_glass_cone} Consider a polygon \(P\), split into subpolygons \(P_L\) and \(P_R\) by a diagonal \(D = \seg{uv}\), and let \(\seg{ac}\) be a line segment in \(P_L\). Let \(C\) be the visibility glass cone of \(\seg{ac}\) into \(P_R\) through \(D\). If some point \(p \in P_R\) sees \(\seg{ac}\), it must be in \(C\). \end{lemma} \begin{proof} Assume w.l.o.g.\@ that \(v\) is above \(u\), and let \(\seg{wx}\), with \(x\) above \(w\), be the part of \(D\) inside \(C\). See \cref{fig:visibility-glass}. Suppose for a contradiction that \(p\) sees \(\seg{ac}\) but is not in \(C\). Let \(q\) be a visible point on \(\seg{ac}\) and let \(L\) be the line segment connecting \(p\) and \(q\). Since \(D\) separates \(P_L\) from \(P_R\), and \(L\) must be inside \(P\) to be a visibility line, \(L\) must cross \(D\). Suppose w.l.o.g.\@ that \(L\) crosses \(D\) above \(x\), that is, above the left ray \(R_L\) of \(C\). The left ray must intersect a reflex vertex of the upper chain of its associated hourglass \(H\), so there is a region above \(R_L\) bounded by the upper chain, \(R_L\), and \(\seg{xv}\). Since \(L\) enters this region, it must also exit the region. There can only be visibility if \(L\) is inside \(P\), hence, it must exit the region via the edge bounded by \(R_L\). Therefore, its slope is higher than the slope of \(R_L\). By definition of the visibility glass, it then cannot see \(\seg{ac}\), leading to a contradiction. Thus, \(p\) is in \(C\). \end{proof}
\begin{corollary} If \(p\) is visible, the ray from \(p\) through the apex \(i\) of \(C\) is a visibility line to \(\seg{ac}\). \end{corollary}
\Cref{lem:vis_glass_cone} shows that the visibility glass cones are functionally the same as the visibility cones of points, thus we can reuse parts of our data structures of \cref{sec:point_segment}.
\begin{observation}\label{obs:seg_vis_seg} If a segment \(\seg{ac} \in A\) cannot see \(\queryseg\), it must be fully contained in a side or an end polygon. \end{observation}
This follows easily from the fact that if a segment \(\seg{ac}\) is not contained in either an end or a side polygon, then it is in the polygon cover or intersects the boundary of the polygon cover. This then means that \(\seg{ac}\) sees \(\queryseg\). It now suffices to count the segments in the side and the end polygons that are not visible to determine the total number of entities invisible to \(\queryseg\), and thus determine the number of entities in \(A\) visible to \(\queryseg\).
Since our data structure of \cref{sec:point_segment} can already correctly count visible entities from the end and the side polygons, we can simply determine the number of invisible entities by subtracting the visible count from the total number of entities in the end or side polygon.
\subsection{Extended Segment Query Data Structure} We base our new data structure on the SQDS presented in \cref{sec:point_segment}. In our new data structure, for the upper chain of an hourglass \(H\) with the incident side polygon \(S_U\), we store: \begin{description} \item[H1--3.] The same data structures as for the SQDS, but constructed using visibility glass cones on line segments that are inside \(S_U\). \item[H4.] The number of segments inside \(S_U\) with empty visibility glass cones into \(H\). \end{description} We store symmetrical structures for the lower chain.
For a triangle \(T = uvw\) in the polygon decomposition, for the edge \(\seg{uv}\) with the incident end polygon \(E(\seg{uv})\), we store: \begin{description} \item[T1--T5.] The same data structures as for the SQDS, but constructed using visibility glass cones on line segments that are inside \(E(\seg{uv})\). \item[T6.] The number of segments inside \(E(\seg{uv})\) with empty visibility glass cones into \(H\). \item[T7.] The number of segments inside \(E(\seg{uv})\) with non-empty visibility glass cones into \(H\). \end{description}
We now query the data structure as follows: for a given \(\queryseg\), we acquire the polygon cover in \(\bO(\log n)\) time, giving us \(\bO(\log n)\) hourglasses and bridges and \(\bO(1)\) end triangles. For each side and end polygon, we now query the associated data structures to get the number of visible segments for each subpolygon. Let \(N_\textnormal{vis}\) be the total number of visible segments as reported for the side and end polygons. Over all the encountered side and end polygons during the query, let \(N_\textnormal{closed}\) be the total number of empty visibility glass cones, and let \(N_\textnormal{open}\) be the total number of non-empty visibility glass cones. The total number of visible segments is now given by \(m - (N_\textnormal{open} - N_\textnormal{vis}) - N_\textnormal{closed}\).
\theoremSegmentSegment* \begin{proof} Since we only store a constant amount of extra data in our data structure per hourglass chain and triangle edge, the storage requirements are the same as for the SQDS. For construction of the new data structure, we need to compute the visibility glass cones for each side polygon of an hourglass and for all end polygons of a triangle. Using the data structure by Eades et al.~\cite{eades20}, we can compute the visibility glasses to all diagonals in \(\bO(nm\log n + n \log^5 n)\) time and extract the visibility glass cones in constant time per visibility glass. Asymptotically, this does not change the preprocessing time. Since the query only does a constant number of extra operations per hourglass and triangle, the query time is the same as the original SQDS query time. \end{proof}
\section{Extensions and Future Work}\label{sec:extra} In this \lcnamecref{sec:extra}, we present some natural extensions to our work and discuss possible variations, as well as the obstacles in the way of obtaining results in those settings.
\subsection{Subquadratic Counting}\label{sec:count} Given our data structures, we can generalise the problem: given two sets of points or line segments \(A\) and \(B\), each of size \(m\), in a simple polygon \(P\) with \(n\) vertices, count the number of pairs in \(A \times B\) that see each other. Using the work by Eades et al.~\cite{eades20} and further work it is based on, we can solve this problem by checking the visibility for all pairs. If \(n \gg m\), this approach is optimal. In particular, if both sets \(A\) and \(B\) consist of points, this yields a solution with running time \(\bO(n + m^2\log n)\); if one of the sets contains only segments, we need \(\bO(n\log n + m^2\log n)\) time. However, when \(m \gg n\), we want to avoid the \(m^2\) factor. Furthermore, the setting of \cref{sec:segment_segment} is novel, so we consider the full spectrum of trade-offs.
The trick is to use the following well-known technique. Suppose we have a data structure for visibility counting queries with query time \(Q(m, n)\) and preprocessing time \(P(m, n)\). Pick \(k = m^s\) with \(0 \leq s \leq 1\). We split the set \(A\) into sets \(A_1, \dots, A_k\), with \(\sfrac{m}{k}\) objects each; then we construct a data structure for each set. Finally, with each point in \(B\), we query these \(k\) data structures and sum up the counts. It is easy to see that the count is correct; the time that this approach takes is \(\bO\bigl(k \cdot P(\sfrac{m}{k}, n) + mk \cdot Q(\sfrac{m}{k}, n)\bigr)\). We need to pick \(s\) to minimise \(\bO\bigl(m^s \cdot P(m^{1 - s}, n) + m^{1 + s} \cdot Q(m^{1 - s}, n)\bigr)\).
Let us show the results for the various settings. Suppose that both sets \(A\) and \(B\) contain points. First, let us consider the approach of \cref{sec:point_point}. We have \(P(m, n) = \bO(n + m^{2 + \eps}\log n + m\log^2 n)\) and \(Q(m, n) = \bO(\log^2 n + \log n \log m)\). The summands depending only on \(n\) come from preprocessing of the polygon that only needs to be done once; so we get \begin{align*} &\mathrel{\hphantom{=}}\bO\bigl(n + m^s \cdot (m^{(1 - s)(2 + \eps)}\log n + m^{1 - s}\log^2 n) + m^{1 + s} \log^2 n + m^{1 + s}\log n \log m^{1 - s}\bigr)\\ &= \bO(n + m^{(1 - s)(2 + \eps) + s}\log n + m^{1 + s} \log^2 n + m^{1 + s} \log n \log m)\,. \end{align*} Unless \(n \gg m\), we pick \(s\) such that \((1 - s)(2 + \eps) + s = 1 + s\); we find \(s = \sfrac{(1 + \eps)}{(2 + \eps)}\). Therefore, the running time is \(\bO(n + m^{\sfrac{3}{2} + \eps'} \log n \log nm)\) for this choice of \(s\), where \(\eps' > 0\) is an arbitrarily small constant.
Alternatively, we could apply the arrangement-based method of \cref{sec:arrangement}. We have \(P(m, n) \in \bO(nm^2 + nm \log n)\) and \(Q(m, n) \in \bO(\log nm)\). Using the formula above, we get \[\bO\bigl(m^s \cdot (nm^{2 - 2s} + nm^{1 - s} \log n) + m^{1 + s} \cdot \log(nm^{1 - s})\bigr) = \bO(nm^{2 - s} + nm\log n + m^{1 + s}\log nm)\,.\] If \(m \gg n\), we can pick \(s\) to balance the powers of \(m\) in the terms; so we set \(s = \sfrac{1}{2}\) to get \[\bO(m^{\sfrac{3}{2}}\cdot(n + \log n + \log m) + nm\log n) = \bO(nm^{\sfrac{3}{2}} + m^{\sfrac{3}{2}}\log m + nm\log n)\,.\]
If \(n \gg m\), it is best to use the pairwise testing approach; however, if \(m \gg n\), the arrangement-based approach performs best, and if \(m \approx n\), we obtain best results with the decomposition-based approach of \cref{sec:point_point}.
Now suppose that one of the sets contains points and the other set contains line segments. As it turns out, using the approach of \cref{sec:point_segment} is always inefficient here; if \(m \gg n\), we can use the approach of \cref{sec:arrangement}, making sure that we do point queries, and otherwise pairwise testing is fastest.
Finally, suppose both sets consist of line segments. We have \(P(m, n) \in \bO(n^2\log m + nm^{2 + \eps} + nm\log n)\) and \(Q(m, n) \in \bO(\log^2 n + \log n \log m)\). We get \begin{align*} &\mathrel{\hphantom{=}}\bO\bigl(m^s \cdot (n^2 \log m^{1 - s} + nm^{(1 - s)(2 + \eps)} + nm^{1 - s}\log n) + m^{1 + s} \log n \log nm^{1 - s}\bigr)\\ &= \bO(n^2m^s\log m + nm^{(1 - s)(2 + \eps) + s} + nm\log n + m^{1 + s} \log^2 n + m^{1 + s} \log n \log m)\,. \end{align*} For \(n \gg m\), the time is dominated by \(\bO(n^2m^s\log m)\), so we pick \(s = 0\) and get \(\bO(n^2\log m + nm^{2 + \eps} + nm\log n)\) time. For \(n \approx m\) or \(m \gg n\), we balance the powers by picking \(s = \sfrac{(1 + \eps)}{(2 + \eps)}\) to get \(\bO(n^2 m^{\sfrac{1}{2} + \eps'}\log m + nm\log n + nm^{\sfrac{3}{2} + \eps'} + m^{\sfrac{3}{2} + \eps'}\log n \log m)\) time for this choice of \(s\), where \(\eps' > 0\) is an arbitrarily small constant.
\subsection{Preprocessing Polygons}\label{sec:ccobjects} Instead of line segments in the set \(A\), we can extend our approach to polygons in \(A\). So consider now the setting where we have a query segment \(\queryseg\) and a set of polygons \(A\).
For this extension, we mainly have to show that an equivalent to \cref{lem:vis_glass_cone} holds---the rest then easily follows as for line segments in \cref{sec:segment_segment}. We first have to define the visibility glass and the visibility glass cone. The visibility glass between a diagonal \(D = \seg{uv}\) and a polygon \(S \in A\) is defined as the set of segments \(\seg{aw}\) where \(a \in S\) and \(w \in D\). Without loss of generality, assume that \(D\) is vertical and that it splits \(P\) into subpolygons \(P_L\) and \(P_R\), left and right of \(D\), respectively, and assume \(S \subseteq P_L\).
Consider the segment \(\seg{ay}\) of the visibility glass with the highest slope. In case of ties, take the shortest segment, so the intersection of \(\seg{ay}\) and \(S\) consists of only \(a\). Similarly, define \(\seg{cx}\) as the segment with the lowest slope. Let \(L_L\) and \(L_R\) denote the supporting lines of \(\seg{ay}\) and \(\seg{cx}\), respectively. The visibility glass cone of \(S\) through \(D\) is then the cone defined by \(L_L\) and \(L_R\) that passes through \(D\). We are now ready to prove an equivalent to \cref{lem:vis_glass_cone}.
\begin{figure}
\caption{The visibility glass cone \(C\) for a region \(S\) and a diagonal \(D = \seg{uv}\).}
\label{fig:region-cone}
\end{figure}
\begin{lemma}\label{lem:vis_glass_cone_region} Consider a polygon \(P\), split into subpolygons \(P_L\) and \(P_R\) by a diagonal \(D = \seg{uv}\) between two vertices \(u\) and \(v\), and let \(S\) be a simple polygon in \(P_L\). Let \(C\) be the visibility glass cone of \(S\) into \(P_R\) through \(D\). If some point \(p \in P_R\) is visible to \(S\), it must be in \(C\). \end{lemma} \begin{proof} For a contradiction, assume that there is a point \(p \in P_R\) that sees \(S\) but lies outside of \(C\). Without loss of generality, suppose that \(p\) is below \(C\). Let \(s \in S\) be a point that is visible to \(p\), and let \(r \in S\) be the point closest to \(p\) on the line segment \(\seg{sp}\)---see also \cref{fig:region-cone}. Let \(w\) be the intersection point of \(\seg{sp}\) and \(D\). We first note that \(s\) cannot lie above (or on) \(L_R\). If that were the case, then the line segment \(\seg{ws}\) would have a lower slope than \(L_R\) and would be in the visibility glass, contradicting the definition of the visibility glass cone. So we can assume that \(s\) is below \(L_R\). However, if \(s\) is below \(L_R\), then so is \(r\). Now consider the region defined by \(\seg{cx}\), \(\seg{xw}\), \(\seg{wr}\), and the path from \(c\) to \(r\) along the boundary of \(S\) in clockwise direction (green region in \cref{fig:region-cone}). None of these segments or the path can be intersected by the polygon boundary, so the region is empty. However, in that case, also the line segment \(\seg{cw}\) must be in the visibility glass and has a lower slope, again contradicting the definition of the visibility glass cone. From this contradiction we can conclude that any \(p \in P_R\) that sees \(S\) must be inside the visibility glass cone \(C\). \end{proof}
Using this \lcnamecref{lem:vis_glass_cone_region}, we can apply the same methods as in \cref{sec:segment_segment} for a set of segments.
\subsection{Moving Points}\label{sec:moving_points} In the context of moving objects, we may interpret a segment as a moving object that traverses the segment from start to end with a constant velocity. This applies both to the objects in a given set \(A\) and the query object. More formally, consider objects \(p\), \(q\) that have trajectories \(p(t): \R \to \R^2 \cap P\) and \(q(t): \R \to \R^2 \cap P\) inside a polygon \(P\). We say that \(p\) and \(q\) are \emph{mutually visible} in \(P\) if and only if at some time \(t\), the line segment \(\seg{p(t)q(t)}\) is inside the polygon \(P\). In this case, we could be interested in counting how many objects can be seen by the query object at some point during their movement. Note that the settings we discuss in \cref{sec:point,sec:point_segment} lend themselves to this interpretation immediately, since either the query or the objects of \(A\) do not move. On the other hand, the setting of a query segment with a set of segments from \cref{sec:segment_segment} does not translate to moving objects.
Eades et al.~\cite{eades20} present a data structure that supports determining whether two query objects see each other at some point in time by preprocessing only the polygon. There is no obvious extension to their data structure that also preprocesses the set of objects. A possible (slow) solution would be to track time as a third dimension and construct the visibility polygon of each point \(p \in A\) as it moves. Given a moving object as a query, we would then need to count the visibility polygons (that include a time dimension) that are pierced by the segment. It seems difficult to avoid double counting the points in this scenario; actually solving this problem would be an interesting continuation of the work presented in this paper.
\subsection{Query Variations}\label{sec:query_variations} There are many other settings that one could consider as extensions of this work. For instance, we could solve the simpler problem of testing visibility: given a query point \(q\) or line segment \(\queryseg\), check if it sees any object in the set \(A\). Surprisingly, it does not seem easy to simplify our approaches to answer this question more efficiently. We could also consider the reporting version of the problem rather than counting; this works immediately for the point query approaches of \cref{sec:point}, but our use of inclusion--exclusion arguments for segment queries in \cref{sec:point_segment,sec:segment_segment} prevents us from easily adapting those to reporting in time proportional to the number of reported segments. Finally, when considering segments, one can ask many other questions: how much of each segment is seen by a query segment and vice versa, for each segment or in total; these questions and more can also be considered for moving objects as in \cref{sec:moving_points}. All of these would be highly exciting directions for future work.
\end{document}
|
arXiv
|
{
"id": "2201.03490.tex",
"language_detection_score": 0.8365471959114075,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\maketitle
\begin{abstract} We study the `Up the River' problem formulated by Aldous \cite{aldous02}, where a unit drift is distributed among a finite collection of Brownian particles on $ \mathbb{R} _+ $, which are annihilated once they reach the origin.
Starting $ K $ particles at $ x=1 $, we prove Aldous' conjecture \cite{aldous02} that the `push-the-laggard' strategy of distributing the drift asymptotically (as $ K\to\infty $) maximizes the total number of surviving particles, with approximately $ \frac{4}{\sqrt{\pi}} \sqrt{K} $ surviving particles.
We further establish the hydrodynamic limit of the particle density, in terms of a two-phase \ac{PDE} with a moving boundary, by utilizing certain integral identities and coupling techniques. \end{abstract}
\section{Introduction}\label{sect:intro}
In this paper we study the `Up the River' problem formulated by Aldous \cite{aldous02}.
That is, we consider $ K $ independent Brownian particles, which all start at $ x=1 $, and are absorbed (annihilated) once they hit $ x=0 $.
Granted a unit drift, we ask what is the optimal strategy of dividing and allocating the drift among all surviving particles in order to maximize the number of particles that survive forever.
More precisely, letting $ B_i(t) $, $ i=1,\ldots,K $, denote independent standard Brownian motions, we define the model as an $ \mathbb{R} ^K_+ $-valued diffusion $ ( X_i(t);t\geq 0)_{i=1}^K $, satisfying \begin{align}\label{eq:X:SDE}
X_i(t) = 1 + B_i(t\wedge\tau_i) + \int_0^{t\wedge\tau_i} \phi_i(s) ds. \end{align}
Here $ \tau_{i}:=\inf\{t>0:X_i(t)=0\} $ denotes the absorption time of the $ i $-th particle, and the strategy is any $ [0,1]^K $-valued, $ \{B_i(t)\}_{i=1}^K $-progressively measurable function $ (\phi_i(t);t\geq 0)_{i=1}^K $ such that $ \sum_{i=1}^K \phi_i(t) \leq 1 $, $ \forall t\geq 0 $.
Our goal is to maximize $ \widetilde{U}(\infty) $, where \begin{align*}
\widetilde{U}(\infty) := \lim_{t\to\infty} \widetilde{U}(t),
\quad
\widetilde{U}(t) := \# \{ i: X_i(t) >0 \}. \end{align*}
Here $ \widetilde{U}(t) $ actually depends on $ K $, but we \emph{suppress} the dependence in this notation, and reserve notations such as $ \widetilde{U}_K(t) $ for \emph{scaled} quantities.
Inspired by the `Up the River: Race to the Harbor' board game, this simple model serves as a natural optimization problem for a random environment with limited resources.
For $ K=2 $, \cite{mckean06} obtains an explicit expression of the law of $ \widetilde{U}(t) $, and for large $ K $, numerical results are obtained in \cite{han} for the discrete analog of \eqref{eq:X:SDE}.
Focusing on the asymptotic behavior as $ K\to\infty $, we prove that the optimal strategy is the na\"{\i}ve \textbf{push-the-laggard strategy}
\begin{align}\label{eq:pushlaggard}
\phi_i(t) := \ind_\Set{ X_i(t) = Z(t) },
\quad
\text{ where }
Z(t) := \min\{X_i(t) : X_i(t)>0\}, \end{align} which allocates all the unit drift on the \textbf{laggard} $ Z(t) $.
\begin{remark} Due to the recursive nature of Brownian motions in one-dimension, ties do occur in \eqref{eq:pushlaggard}, namely $ \operatorname{\mathbf{P}}(\#\{i: X_{i}(s)=Z(s)\} >1, \text{for some } s\leq t) > 0 $, for all large enough $ t $.
Here we break the ties in an \emph{arbitrarily} fixed manner. That is, any strategy $ (\phi_i(t))_{i=1}^K $ satisfying \begin{align}\label{eq:pushlaggard:}
\sum_{ i: X_i(t) =Z(t) } \phi_i(t) = 1 \end{align} is regarding as a push-the-laggard strategy.
As the analysis in this paper is independent of the exact choice of breaking the ties, hereafter we fix some arbitrary way of breaking the ties and refer to \eqref{eq:pushlaggard:} as \emph{the} push-the-laggard strategy. \end{remark}
\noindent Furthermore, we prove that, due to self-averaging, $ \widetilde{U}(\infty) $ is in fact deterministic to the leading order, under the push-the-laggard strategy. More explicitly, $ \widetilde{U}(\infty) \approx \frac{4}{\sqrt{\pi}} K^{1/2} $. Define the scaled process \begin{align*}
\widetilde{U}_K(t) := \tfrac{1}{\sqrt{K}} \widetilde{U}(tK). \end{align*} The following is our main result:
\begin{theorem}
\begin{enumerate}[label=(\alph*)]
\item[]
\item\label{enu:aldous:upbd}
Regardless of the strategy,
for any fixed $ n<\infty $ and $ \gamma \in (0,\frac14) $, we have
\begin{align}
\label{eq:aldous:upbd}
\operatorname{\mathbf{P}}\big( \widetilde{U}_K(\infty) \leq \tfrac{4}{\sqrt{\pi}} + K^{-\gamma} \big)
\geq 1 - CK^{-n}, \quad \forall K<\infty,
\end{align}
where $ C=C(n,\gamma)<\infty $ depends only on $ n $ and $ \gamma $,
not on the strategy.
\item\label{enu:aldous:optl}
Under the push-the-laggard strategy,
for any fixed $ \gamma\in(0,\frac{1}{96}) $ and $ n<\infty $, we have
\begin{align}
\label{eq:aldous:optl}
\operatorname{\mathbf{P}}\big( |\widetilde{U}_K(\infty) - \tfrac{4}{\sqrt{\pi}} |
\leq
K^{-\gamma} \big) \geq 1 - CK^{-n}, \quad \forall K<\infty,
\end{align}
where $ C=C(\gamma,n)<\infty $ depends only on $ \gamma $ and $ n $. \end{enumerate} \label{thm:aldous} \end{theorem}
\begin{remark} While the exponent $ \frac14^- $ of the error term in Theorem~\ref{thm:aldous}\ref{enu:aldous:upbd} (originating from the control on the relevant martingales) is optimal, the choice of exponent $ \gamma\in(0,\frac{1}{96}) $ in Theorem~\ref{thm:aldous}\ref{enu:aldous:optl} is purely technical.
The latter may be improved by establishing sharper estimates, which we do not pursue in this paper. \end{remark}
Theorem~\ref{thm:aldous} resolves Aldous' conjecture \cite[Conjecture~2]{aldous02} in a slightly different form.
The intuition leading to such a theorem, as well as the the main ingredient of proving it, is the hydrodynamic limit picture given in \cite{aldous02}.
To be more precise, we consider the diffusively scaled process $ X^K_i(t) := \frac{1}{\sqrt{K}} X_i(tK) $ and let $ Z_K(t) := \tfrac{1}{\sqrt{K}} Z(tK) $ denote the scaled process of the laggard.
Consider further the scaled complementary distribution function \begin{align}\label{eq:Uc}
\widetilde{U}_K(t,x) := \tfrac{1}{\sqrt{K}} \# \big\{ X^K_i(t) >x \big\}, \end{align} and let $ p(t,x) := \frac{1}{\sqrt{2\pi t}} \exp(-\frac{x^2}{2t}) $ denote the standard heat kernel.
Under the push-the-laggard strategy, we expect $ (\widetilde{U}_K(t,x),Z_K(t)) $ to be well-approximated by $ (\widetilde{U}_{\star}(t,x), z_{\star}(t)) $.
Here $ \widetilde{U}_{\star}(t,x) $ and $ z_{\star}(t) $ are deterministic functions, which are defined in \emph{two separated phases} as follows.
For $ t \leq \frac12 $, the \textbf{absorption phase}, we define \begin{align}
\label{eq:Ucs:abs}
&
\widetilde{U}_{\star}(t,x) := 2 p(t,x) + \int_0^t 2p(t-s,x) ds,
\quad
\forall t \leq \tfrac12, \ x \geq 0, \\
&
\label{eq:z=0}
z_{\star}(t) :=0,
\quad
\forall t \leq \tfrac12. \end{align} For $ t > \frac12 $, the \textbf{moving boundary phase}, letting $ p^\text{N}(t,y,x) := p(t,y-x) + p(t,y+x) $ denote the Neumann heat kernel, we define \begin{align}
\label{eq:Ucs:move}
\widetilde{U}_{\star}(t,x) := 2 p(t,x) + \int_0^t p^\text{N}(t-s,z_{\star}(s),x) ds,
\quad
\forall t \geq \tfrac12, \ x \geq z_{\star}(t), \end{align} where $ z_{\star}(t) $ is the unique solution to the following integral equation: \begin{align}\label{eq:zeq}
\left\{\begin{array}{l@{}l}
z_{\star}(\Cdot+\tfrac12) \in \mathcal{C}( \mathbb{R} _+), \ \text{nondecreasing },
z_{\star}(\tfrac12) = 0,
\\
\displaystyle
\int_0^\infty p(t-\tfrac12,z_{\star}(t)-y)
\big( \widetilde{U}_{\star}(\tfrac12,0) - \widetilde{U}_{\star}(\tfrac12,y) \big) dy
\\
\displaystyle
\quad\quad\quad\quad\quad
= \int_\frac12^t p(t-s,z_{\star}(t)-z_{\star}(s)) ds,
\quad \forall t \in (\tfrac12,\infty),
\end{array}\right. \end{align} As we show in Section~\ref{sect:Stef}, the integral equation~\eqref{eq:zeq} admits a unique solution.
The pair $ (\widetilde{U}_{\star},z_{\star}) $, defined by \eqref{eq:Ucs:abs}--\eqref{eq:zeq}, is closely related to certain \ac{PDE} problems, as follows.
Let $ \widetilde{\Phi}(t,y) := \operatorname{\mathbf{P}}(B(t) > y) $ denote the Brownian tail distribution function. For $ t\leq\frac12 $, a straightforward calculation (see Remark~\ref{rmk:cal}) shows that the function $ \widetilde{U}_{\star}(t,x) $ in \eqref{eq:Ucs:abs} is written as the tail distribution function of $ u_1(t,x) $: \begin{align}
\label{eq:U1}
\widetilde{U}_{\star}(t,x)
=
\int_{x}^\infty u_1(t,y) dy,
\quad
\forall t\leq \tfrac12, \end{align} where $ u_1(t,x) $ is defined as \begin{align}
\label{eq:u1}
u_1(t,x) &:= -2\partial_x p(t,x) + 4 \widetilde{\Phi}(t,x). \end{align} It is straightforward to check that this density function $ u_1 $ solves the heat equation on $ x>0 $ with a boundary condition $ u_1(t,0)=2 $: \begin{subequations}\label{eq:PDE<}
\begin{align}
&
\label{eq:HE<}
\partial_t u_1 = \tfrac12 \partial_{xx} u_1 \quad \forall 0< t< \tfrac12, \ x> 0, \\
&
\label{eq:DiriBC<}
u_1(t,0) =2, \quad \forall 0< t<\tfrac12, \\
&
\label{eq:PDEic}
\lim_{t\downarrow 0} (u_1(t,x) + 2 \partial_x p(t,x)) =0, \quad\forall x\geq 0.
\end{align} \end{subequations}
For $ t >\frac12 $, we consider the following \textbf{Stefan problem}, a \ac{PDE} with a \emph{moving boundary}: \begin{subequations}\label{eq:PDE>}
\begin{align}
&
\label{eq:z2}
z_2\in \mathcal{C}([\tfrac12,\infty)) \text{ nondecreasing}, \ z_2(\tfrac12)=0,
\\
&
\label{eq:HE>}
\partial_t u_2 = \tfrac12 \partial_{xx} u_2, \quad \forall t > \tfrac12, \ x > z_2(t)
\\
&
u_2(\tfrac12,x) = u_1(\tfrac12,x), \quad \forall x\geq 0,
\\
&
\label{eq:DiriBC>}
u_2(t,z_2(t)) = 2, \quad \forall t\geq \tfrac12,
\\
&
\label{eq:StefBC}
2 \tfrac{d~}{dt}z_2(t) + \tfrac12 \partial_x u_2(t,z_2(t)) = 0, \quad \forall t> \tfrac12.
\end{align} \end{subequations} As we show in Lemma~\ref{lem:StefInt}, for each sufficiently smooth solution $ (u_2,z_2) $ to \eqref{eq:PDE>}, the functions $ \widetilde{U}_{\star}(t,x) := \int_{x }^\infty u_2(t,y) \ind_{\{ y\geq z_{\star}(t)\}} dy $ and $ z_{\star}(t) := z_2(t) $ satisfy \eqref{eq:Ucs:move}--\eqref{eq:zeq} for $ t \geq \frac12 $.
\begin{remark}\label{rmk:cal} To see why \eqref{eq:U1} holds, differentiate \eqref{eq:Ucs:abs} in $ x $ to obtain $ \partial_x \widetilde{U}_{\star}(t,x) = 2\partial_x p(t,x) - 2 \int_0^t \frac{x}{t-s} p(t-s,x) ds $.
Within the last integral, performing the change of variable $ y:= \frac{x}{\sqrt{t-s}} $, we see that $ \int_0^t \frac{x}{t-s} p(t-s,x) ds = 2\widetilde{\Phi}(t-s,x) $. From this \eqref{eq:U1} follows. \end{remark}
\begin{remark}\label{rmk:Stefan} Note that for Equation~\eqref{eq:PDE>} to make sense classically, one needs $ u_2(t,x) $ to be $ \mathcal{C}^1 $ \emph{up to} the boundary $ \{(t,z_2(t)):t\geq 0\} $ and needs $ z_2(t) $ to be $ \mathcal{C}^1 $.
Here, instead of defining the hydrodynamic limit classically through \eqref{eq:PDE>}, we take the integral identity and integral equation \eqref{eq:Ucs:move}--\eqref{eq:zeq} as the \emph{definition} of the hydrodynamic limit equation.
This formulation is more convenient for our purpose, and in particular it requires neither the smoothness of $ u_{\star} $ onto the boundary nor the smoothness of $ z_{\star} $.
We note that, however, it should be possible to establish classical solutions to \eqref{eq:PDE>}, by converting \eqref{eq:PDE>} to a parabolic variational inequality. See, for example, \cite{friedman10}. We do not pursue this direction here. \end{remark}
Before stating the precise result on hydrodynamic limit, we explain the intuition of how \eqref{eq:PDE<}--\eqref{eq:PDE>} arise from the behavior of the particle system.
Indeed, the heat equations \eqref{eq:HE<} and \eqref{eq:HE>} model the diffusive behavior of $ (X^K_i(t))_i $ away from $ Z_K(t) $.
In view of the equilibrium measure of gaps of the infinite Atlas model \cite{pal08}, near $ Z_K(t) $ we expect the particle density to be $ 2 $ to balance the drift exerted on $ Z_K(t) $, yielding the boundary conditions \eqref{eq:DiriBC<} and \eqref{eq:DiriBC>}.
The function $ -2\partial_x p(t,x) $ is the average density of the system without the drift. (The singularity of $ -2\partial_x p(t,x) $ at $ t=0 $ captures the overabundance of particles at $ t=0 $ compared to the scaling $ K^{1/2} $.)
As the drift affects little of the particle density near $ t=0 $, we expect the entrance law~\eqref{eq:PDEic}.
The absorption phase ($ t\leq \frac12 $) describes the initial state of the particle system with a high density, where particles are constantly being absorbed, yielding a fixed boundary $ Z_K(t) \approx 0 $.
Under the push-the-laggard strategy, the system enters a new phase at $ t\approx \frac12 $, where the density of particles is low enough ($ \leq 2 $ everywhere) so that the drift carries all remaining particles away from $ 0 $.
This results in a moving boundary $ Z_K(t) $, with an additional boundary condition~\eqref{eq:StefBC}, which simply paraphrases the conservation of particles $ \frac{d~}{dt} \int_{z_2(t)}^\infty u_2(t,y) dy =0 $.
The following is our result on the hydrodynamic limit of $ (\widetilde{U}_K(t,x), Z_K(t)) $:
\begin{theorem}[hydrodynamic limit] \label{thm:hydro} Under the push-the-laggard strategy, for any fixed $ \gamma\in(0,\frac{1}{96}) $ and $ T,n<\infty $, there exists $ C=C(T,\gamma,n)<\infty $ such that \begin{align}
&
\label{eq:hydro:U}
\operatorname{\mathbf{P}} \Big(
\sup_{ t\in[0,T], x\in \mathbb{R} }
\big\{ |\widetilde{U}_K(t,x)-\widetilde{U}_{\star}(t,x)| t^{\frac{3}{4}} \big\} \leq CK^{-\gamma}
\Big)
\geq
1 - CK^{-n},
\quad
\forall K<\infty, \\
&
\label{eq:hydro:Z}
\operatorname{\mathbf{P}} \Big( \sup_{t\in[0,T]}| Z_K(t)-z_{\star}(t)| \leq CK^{-\gamma} \Big)
\geq
1 - CK^{-n},
\quad
\forall K<\infty.
\end{align} \end{theorem}
\begin{remark} The factor of $ t^{\frac{3}{4}} $ in \eqref{eq:hydro:U} is in place to regulate the singularity of $ \widetilde{U}_K(t,x) $ and $ \widetilde{U}_{\star}(t,x) $ near $ t= 0 $.
Indeed, with $ \widetilde{U}_{\star}(t,x) $ defined in \eqref{eq:U1} for $ t\leq\frac12 $, it is standard to verify that $ \sup_{x\in \mathbb{R} } \widetilde{U}_{\star}(t,x) $ diverges as $ \frac{2}{\sqrt{2\pi t}} $ as $ t\downarrow 0 $.
With $ \widetilde{U}_K(t,x) $ defined in \eqref{eq:Uc}, we have that $ \widetilde{U}_K(0,x) = \sqrt{K} \ind_{\{x<1/\sqrt{K}\}} $, which diverges at $ x=0 $ as $ K\to\infty $.
This singularity at $ t=0 $ of $ \widetilde{U}_K(t,x) $ propagates into $ t>0 $, resulting in a power law singularity of the form $ |t|^{-\frac12} $.
The choice of the exponent $ \frac34 $ in \eqref{eq:hydro:U} is technical, and may be sharpened to $ \frac12 $ as discussed in the preceding, but we do not pursue this direction here. \end{remark}
Under the push-the-laggard strategy \eqref{eq:pushlaggard}, the process $ (X^K_i(t))_i $ is closely related to the Atlas model \cite{fernholz02}. The latter is a simple special case of diffusions with rank-dependent drift:
see \cite{banner05,chatterjee10,chatterjee11,ichiba11,ichiba10,ichiba13}, for their ergodicity and sample path properties, and \cite{dembo12,pal14} for their large deviations properties as the dimension tends to infinity.
In particular, the hydrodynamic limit and fluctuations of the Atlas-type model have been analyzed in \cite{cabezas15, dembo15, hernandez15}.
Here we take one step further and analyze the combined effect of rank-dependent drift and absorption, whereby demonstrating the two-phase behavior.
With the absorption at $ x=0 $, previous methods of analyzing the large scale behaviors of diffusions with rank-dependent drift do not apply.
In particular, the challenge of proving Theorem~\ref{thm:hydro} originates from the lack of invariant measure (for the absorption phase) and the singularity at $ t=0 $, where a rapid transition from $ K $ particles to an order of $ K^{1/2} $ particles occurs.
Here we solve the problem by adopting a \emph{new} method of exploiting certain integral identities of the particle system that mimic \eqref{eq:Ucs:abs}--\eqref{eq:zeq}.
Even though here we mainly focus on the push-the-laggard strategy under the initial condition $ X_i(0)=1 $, $ \forall i $, the integral identities apply to general rank-dependent drifts and initial conditions, and may be used for analyzing for general models with both rank-dependent drifts and absorption.
\subsection*{Outline} In Section~\ref{sect:Int}, we develop certain integral identities of the particle system $ X $ that are crucial for our analysis, and in Section~\ref{sect:Stef}, we establish the necessary tools pertaining to the integral equation~\eqref{eq:zeq}.
Based on results obtained in Sections~\ref{sect:Int}--\ref{sect:Stef}, in Sections~\ref{sect:hydro} and \ref{sect:aldous} we prove Theorems~\ref{thm:hydro} and \ref{thm:aldous}, respectively.
\subsection*{Acknowledgment} We thank David Aldous for suggesting this problem for research.
WT thanks Jim Pitman for helpful discussion throughout this work, and Craig Evans for pointing out the relation between \eqref{eq:PDE>} and parabolic variational inequalities.
LCT thanks Amir Dembo for enlightening discussion at the early stage of this work. LCT was partially supported by the NSF through DMS-0709248.
We thank the anonymous reviewers for their careful reading of the manuscript.
\section{Integral Identities} \label{sect:Int} Recall that $ p^\text{N}(t,x,y) $ denotes the Neumann heat kernel, and let $ \Phi(t,x) := \operatorname{\mathbf{P}}(B(t)\leq x) = 1 - \widetilde{\Phi}(t,x) $ denote the Brownian distribution function.
With $ z_{\star}(t) $ as in \eqref{eq:z=0} and \eqref{eq:zeq}, we unify the integral identities \eqref{eq:Ucs:abs} and \eqref{eq:Ucs:move} into a single expression as \begin{align}\label{eq:Ucs}
\widetilde{U}_{\star}(t,x) = 2 p(t,x) + \int_0^t p^\text{N}(t-s,z_{\star}(s),x) ds,
\quad
\forall t >0, \ x \geq z_{\star}(t). \end{align}
Essential to our proof of Theorems~\ref{thm:aldous} and \ref{thm:hydro} are certain integral identities of the \emph{particle system} $ X=(X(t);t\geq 0) $ that mimic the integral identities~\eqref{eq:Ucs}.
This section is devoted to deriving such identities of the particle system, particularly Proposition~\ref{prop:intXY} in the following.
As it turns out, in addition to the particle system $ X $, it is helpful to consider also the Atlas models.
We say that $ Y=(Y_i(t); t\geq 0 )_{i=1}^m $ is an \textbf{Atlas model} with $ m $ particles if it evolves according to the following system of stochastic differential equations: \begin{equation} \label{eq:alt}
dY_i(t)= \ind_\Set{ Y_i(t)=W(t)} dt + dB_i(t) \quad
\text{ for } 1 \leq i \leq m,
\quad
W(t) := \min\{ Y_i(t) \}. \end{equation} We similarly define the scaled processes $ Y^K_i(t) := \frac{1}{\sqrt{K}} Y_i(tK) $ and $ W_K(t) := \frac{1}{\sqrt{K}} W(tK) $. Note that here $ K $ is just a scaling parameter, not necessarily related to the number of particles in $ Y $.
To state the first result of this section, we first prepare some notations. Define the scaled empirical measures of $ X $ and $ Y $ as: \begin{align}
\label{eq:em}
\mu^K_{t}(\Cdot)
&:=
\frac{1}{\sqrt{K}} \sum\nolimits_{ \{i: X^K_i(t) >0\} } \delta_{X^K_i(t)}(\Cdot). \\
\label{eq:emY}
\nu^K_{t}(\Cdot)
&:= \frac{1}{\sqrt{K}} \sum_{i} \delta_{Y^K_i(t)}(\Cdot). \end{align} For any fixed $ x\geq 0 $, consider the tail distribution function $ \Psi(t,y,x) := \operatorname{\mathbf{P}}( B^\text{ab}_x(t) >y ) $, $ y>0 $, of a Brownian motion $ B^\text{ab}_x $, starting at $ B^\text{ab}_x(0)=x $ and absorbed at $ 0 $. More explicitly, \begin{align}
\label{eq:testf}
\Psi(t,y,x) &:= \Phi(t,y-x) - \widetilde{\Phi}(t,y+x), \end{align} which is the unique solution to the following equation \begin{subequations} \label{eq:testf:Eq} \begin{align}
\label{eq:testf:HE}
\partial_t \Psi(t,y,x) &= \tfrac12 \partial_{yy} \Psi(t,y,x), \forall t,y>0, \\
\Psi(t,0,x) &= 0, \forall t >0, \\
\Psi(0,y,x) &= \ind_{(x,\infty)}(y), \ \forall y>0. \end{align} \end{subequations} Adopt the notations $ t_K:=t+\frac1K $, $ \tau^K_i := K^{-1}\tau_i $ and $ \phi^K_i(t) := \phi_i(Kt) $ hereafter.
\begin{lemma} \label{lem:int} \begin{enumerate}[label=(\alph*)] \item[] \item For the particle system $ (X(t);t\geq 0) $, under any strategy, we have the following integral identity: \begin{align} \label{eq:int:abs:e} \begin{split}
\langle \mu^K_{t}, &\Psi(\tfrac1K,\Cdot,x) \rangle
=
\widetilde{G}_K(t_K,x) \\
&+ \sum_{i=1}^K \int_{0}^{t} \phi^K_i(s) p^\text{N}(t_K-s,X^K_i(s),x) ds
+ M_{K}(t,x),
\quad
\forall t \in \mathbb{R} _+, \ x\geq 0, \end{split} \end{align} where \begin{align}
\label{eq:Gc}
\widetilde{G}_K(t,x) &:=
\sqrt{K} \Psi(t,\tfrac{1}{\sqrt{K}},x), \\
\label{eq:mg}
M_{K}(t,x)
&:=
\frac{1}{\sqrt{K}} \sum_{i=1}^K \int_{0}^{t\wedge\tau^K_i} p^\text{N}(t_K-s,X^K_i(s),x) dB^K_i(s). \end{align} \item Let $ (Y_i(t);t\geq 0)_{i} $ be an Altas model. We have the following integral identity: \begin{align} \begin{split}
\langle \nu^K_{t}, &\Phi(\tfrac{1}{K}, x-\Cdot) \rangle
=
\langle \nu^K_{0}, \Phi(t_K,x-\Cdot) \rangle \\
\label{eq:int:atl:K}
&
- \int_{0}^{t} p(t_K-s,x-W_K(s)) ds
- N_{K}(t,x),
\quad
\forall t\in \mathbb{R} _+, \ x\in \mathbb{R} , \end{split} \end{align} where \begin{align}
\label{eq:mgY}
N_{K}(t,x)
&:= \frac{1}{\sqrt{K}} \sum_{i} \int_{0}^t p(t_K-s,Y^K_i(s)-x) dB^K_i(s). \end{align} \end{enumerate} \end{lemma}
\begin{remark} \label{rmk:int:meaning} To motivate our analysis in the following, here we explain the meaning of each term in the integral identity~\eqref{eq:int:abs:e}.
From the definitions \eqref{eq:Uc} and \eqref{eq:em} of $ \widetilde{U}_K(t,x) $ and $ \mu^K_{t} $, we have that $ \lim_{\varepsilon\to 0 } \langle \mu^K_{t}, \Psi(\varepsilon,\Cdot,x) \rangle = \widetilde{U}_K(t,x) $, so it is reasonable to expect the term $ \langle \mu^K_{t}, \Psi(\tfrac1K,\Cdot,x) \rangle $ on the l.h.s.\ to approximate $ \widetilde{U}_K(t,x) $ as $ K\to\infty $.
Next, consider a system $ (X^{\text{ab}}_i(t);t\geq 0)_{i=1}^K $ of independent Brownian particles starting at $ x=1 $ and absorbed at $ x=0 $, \emph{without} drifts. Letting $ X^{\text{ab},K}_i(t) := \frac{1}{\sqrt{K}} X^{\text{ab}}_i(Kt) $ denote the diffusively scaled process, with the corresponding scaled tailed distribution function \begin{align}
\label{eq:Ucab}
\widetilde{U}^{\text{ab}}_K(t,x) := \tfrac{1}{\sqrt{K}} \# \{ i: X^{\text{ab},K}_i(t) > x \}, \end{align} it is standard to show that \begin{align}
\label{eq:Ucab:Gc}
\Ex(\widetilde{U}^{\text{ab}}_K(t,x)) = \sqrt{K} \operatorname{\mathbf{P}}( X^\text{ab}_1(Kt) > \sqrt{K} x )
= \sqrt{K} \Psi(Kt,1,\sqrt{K}x)
= \widetilde{G}_K(t,x). \end{align} That is, the term $ \widetilde{G}_K(t,x) $ on the r.h.s.\ \eqref{eq:int:abs:e} accounts for the contribution (in expectation) of the \emph{absorption}.
Subsequent, the time integral term $ \sum_{i=1}^K \int_0^{\tau^K_i}(\ldots) ds $ arises from the contribution of the drifts $ (\phi_i(t))_{i=1}^K $ allocated to the particles, while the martingale term $ M(t,x) $ encodes the random fluctuation due to the Brownian nature of the particles. \end{remark}
\begin{proof} Under the diffusive scaling $ X^K_i(t) := \frac{1}{\sqrt{K}} X_i(tK) $, we rewrite the SDE~\eqref{eq:X:SDE} as \begin{align}\label{eq:XK:SDE}
dX^K_i(t) = \phi^K_i(t) \sqrt{K} d(t\wedge\tau^K_i) + d B^K_i(t\wedge\tau^K_i). \end{align}
Fixing arbitrary $ t<\infty $, $ x\geq 0 $, with $ \Psi $ solving~\eqref{eq:testf:HE}, we apply It\^{o}'s formula to $ F_i(s) := \Psi(t_K-s,X^K_i(s),x) $ using \eqref{eq:XK:SDE} to obtain \begin{align}\label{eq:int:abs:F}
F_i(t\wedge\tau^K_i) - F_i(0)
=
\sqrt{K} \int_{0}^{t\wedge\tau^K_i} \phi^K_i(s) p^\text{N}(t_K-s,X^K_i(s),x) ds
+
M_{i,K}(t,x), \end{align} where $ M_{i,K}(t,x) := \int_{0}^{t\wedge\tau^K_i} p^\text{N}(t_K-s,X^K_i(s),x) dB^K_i(s) $.
With $ \Psi(s,0,x)=0 $, we have $ F_i(t\wedge\tau^K_i) = \Psi(\frac1K,X^K_i(t),x) $. Using this in \eqref{eq:int:abs:F}, summing the result over $ i $, and dividing both sides by $ \sqrt{K} $, we conclude the desired identity~\eqref{eq:int:abs:e}.
Similarly, the identity~\eqref{eq:int:atl} follows by applying It\^{o}'s formula with the test function $ \Phi(t_K-s,y-x) $. \end{proof}
Based on the identities \eqref{eq:int:abs:e} and \eqref{eq:int:atl:K}, we proceed to establish bounds on the empirical measures $ \mu^K_{t} $ and $ \nu^K_{t} $.
Hereafter, we use $ C=C(\alpha,\beta,\ldots)<\infty $ to denote a generic deterministic finite constant that may change from line to line, but depends only on the designated variables.
In the following, we will use the following estimates of the heat kernel $ p(t,x) $. The proof is standard and we omit it here. \begin{align}
&
\label{eq:p:Holdx}
|p(t,x) - p(t,x')| \leq C(\alpha) |x-x'|^{\alpha} t^{-\frac{1+\alpha}{2}},&
& \alpha\in(0,1], \\
&
\label{eq:p:Holdt}
|p(t,x) - p(t',x)| \leq C(\alpha) |t-t'|^{\frac{\alpha}{2}} (t')^{-\frac{1+\alpha}{2}},& & \alpha\in(0,1], \ t'<t<\infty. \end{align}
We adopt the standard notations $ \Vert \xi \Vert_n := (\Ex|\xi|^n)^{\frac1n} $ for the $ L^n $-norm of a give random variable $ \xi $
and $ | f | _{L^\infty(\Omega)} := \sup_{x\in \Omega} |f(x)| $ for the uniform norm over the designated region $ \Omega $.
\begin{lemma}\label{lem:emYbd} Let $ (Y_i(t);t \geq 0)_{i} $ be an Atlas model. The total number $ \#\{Y_i(0)\} $ of particles may be random but is independent of $ \sigma(Y_i(t)-Y_i(0);t\geq 0, i=1,\ldots) $.
Let $ \nu^K_{t} $ to be as in \eqref{eq:emY}.
Assume $ (Y^K_i(0))_i $ satisfies the following initial condition: given any $\alpha\in(0,1) $ and $ n <\infty $, there exist $ D_* ,D_{\alpha,n} <\infty $ such that \begin{align}
\label{eq:D*}
\operatorname{\mathbf{P}}\big( \#\{ Y_i(0) \} \leq K \big)
&\geq 1 - \exp(-\tfrac{1}{D_*}K^{\frac12}), \\
\label{eq:Dan}
\big\Vert \langle \nu^K_{0},\ind_{[a,b]}\rangle \big\Vert_n
&\leq
D_{\alpha,n}|b-a|^{\alpha},
\quad
\forall |b-a| \geq \tfrac{1}{\sqrt{K}}. \end{align} For any given $ T<\infty $, we have \begin{align}
\label{eq:emY:bd}
&
\Vert \langle \nu^K_{s},\ind_{[a,b]}\rangle \Vert_{n}
\leq
C |b-a|^{\alpha} \Big( \big( \tfrac{|b-a|}{\sqrt{s_K}} \big)^{1-\alpha} +1 \Big), &
&
\forall \tfrac{1}{\sqrt{K}}\leq |b-a|, \ s\leq T, \\
\label{eq:emPY:bd}
&
\Vert \langle \nu^K_{s}, p(t_K,\Cdot-x) \rangle \Vert_{n}
\leq
C t_K^{\frac{\alpha-1}{2}} \Big( \big( \tfrac{t_K}{s_K} \big)^{\frac{1-\alpha}{2}} +1 \Big), &
&
\forall x\in \mathbb{R} , \ s,t<T, \end{align} where $ C=C(T,\alpha,n,D_*, D_{\alpha,n})<\infty $. \end{lemma}
\begin{proof} Fixing such $ T,\alpha,n $ and $ [a,b] $, throughout this proof we use $ C=C(T,\alpha,n,D_*, D_{\alpha,n})<\infty $ to denote a generic finite constant.
To the end of showing \eqref{eq:emY:bd}, we begin by estimating $
\Vert\langle \nu^K_{s}, \ind_{[a,b]} \rangle \Vert_1
=\Ex(\langle \nu^K_{s}, \ind_{[a,b]} \rangle ). $
To this end, we set $ x=b,a $ in \eqref{eq:int:atl:K}, take the difference of the resulting equation, and take expectations of the result to obtain \begin{align}
\label{eq:int:atl:Ex:}
\Ex(\langle \nu^K_{s}, \Phi(\tfrac1K,b-\Cdot)-\Phi(\tfrac1K,a-\Cdot) \rangle )
=
\Ex( J_1 ) + \Ex(J_2), \end{align} where \begin{align*}
J_1 &:= \langle \nu^K_{0}, \Phi(s_K,b-\Cdot)-\Phi(s_K,a-\Cdot) \rangle, \\
J_2 &:= -\int_{0}^s \big( p(s_K-u,b-W_K(u)) - p(s_K-u,a-W_K(u)) \big) du. \end{align*}
Further, with $ |b-a|\geq K^{-\frac12} $, it is straightforward to verify that \begin{align}
\label{eq:erf:ind}
\Phi(\tfrac1K,b-y)-\Phi(\tfrac1K,a-y) \geq \tfrac{1}{C} \ind_{[a,b]}(y). \end{align} Combining \eqref{eq:erf:ind} and \eqref{eq:int:atl:Ex:} yields \begin{align}
\label{eq:int:atl:Ex}
\Vert \langle \nu^K_{s}, \ind_{[a,b]} \rangle \Vert_1
\leq
C\Ex( J_1 ) + C\Ex(J_2). \end{align}
With \eqref{eq:int:atl:Ex}, our next step is to bound $ \Ex(J_1) $ and $ \Ex(J_2) $. For the former, we use $ \Phi(t_K,b-y)-\Phi(t_K,a-y) = \int_{a}^{b} p(t_K,z-y) dz $ to write $ J_1 = \int_{a}^{b} \langle \nu^K_0,p(s_K,x-\Cdot) \rangle dx $. Taking the $ L^m $-norm of the last expression yields \begin{align}
\label{eq:J1:Ln:}
\Vert J_1 \Vert_m
\leq
\int_{a}^{b} \big\Vert \langle \nu^K_{0},p(s_K,x-\Cdot) \rangle \big\Vert_m dx,
\quad
\forall m \in \mathbb{N} . \end{align} Further, as the heat kernel $ p(t,y-x) = \frac{1}{ \sqrt{t} } p(1,\frac{y-x}{\sqrt{t}}) $
decreases in $ |y-x| $, letting $ I_j(t,x) := x+[j\sqrt{t},(j+1)\sqrt{t}] $
and $ j_* := |j| \wedge |j+1| $, we have \begin{align} \begin{split}
\Vert \langle \nu^K_{s}, p(t,\Cdot-x) \rangle \Vert_m
&\leq
\Big\Vert
\sum_{j\in \mathbb{Z} } \frac{1}{ \sqrt{t} } p(1,j_*)
\langle \nu^K_{s}, \ind_{I_j(t,x)} \rangle
\Big\Vert_m \\
\label{eq:onion:Ln}
&\leq
\sum_{j\in \mathbb{Z} } \frac{1}{ \sqrt{t} } p(1,j_*)
\Vert\langle \nu^K_{s}, \ind_{I_j(t,x)} \rangle \Vert_m. \end{split} \end{align}
Set $ m=n $, $ s=0 $ and $ t=s_K $ in \eqref{eq:onion:Ln}. Then, for each $ j $-th term within the sum, use \eqref{eq:Dan} to bound
$ \Vert \langle \nu^K_{0}, \ind_{I_j(s_K,x)} \rangle \Vert_n \leq C|\sqrt{s_K}|^{\alpha} $, followed by using $ \sum_{j} p(1,j_*) <\infty $. This yields \begin{align}
\label{eq:onion:Ln:}
\Vert \langle \nu^K_{0}, p(s_K,\Cdot-x) \rangle \Vert_n
\leq
C s_K^{\frac{\alpha-1}{2}}. \end{align} Inserting~\eqref{eq:onion:Ln:} into \eqref{eq:J1:Ln:}, we then obtain \begin{align}
\label{eq:J1:Ln}
\Vert J_1 \Vert_n
\leq
\int_{a}^{b} C s_K^{\frac{\alpha-1}{2}} dx
\leq
C |b-a| s_K^{\frac{\alpha-1}{2}}. \end{align} As for $ J_2 $, by \eqref{eq:p:Holdx} we have \begin{align}
\label{eq:J2:bd}
|J_2| \leq C \int_{0}^{s} |b-a|^{\alpha}(u_K)^{-\frac{1+\alpha}{2}} du
\leq
C |b-a|^{\alpha}. \end{align}
Inserting \eqref{eq:J1:Ln}--\eqref{eq:J2:bd} in \eqref{eq:int:atl:Ex}, we see that \eqref{eq:emY:bd} holds for $ n=1 $.
To progress to $ n>1 $, we use induction, and assume \eqref{eq:emY:bd} has been established for an index $ m\in[1,n) $.
To setup the induction, similarly to the proceeding, we set $ x=b,a $ in \eqref{eq:int:atl:K}, take the difference of the resulting equation, and take the $ L^{m+1} $-norm of the result to obtain \begin{align*}
\Vert \langle \nu^K_{s}, \Phi(\tfrac1K,b-\Cdot)-\Phi(\tfrac1K,a-\Cdot) \rangle \Vert_{m+1}
\leq
\Vert J_1 \Vert_{m+1} + \Vert J_2 \Vert_{m+1} + \Vert J_3 \Vert_{m+1}, \end{align*} where $ J_3 :=N_K(s,b) - N_K(s,a) $. Further combining this with \eqref{eq:erf:ind} yields \begin{align}
\label{eq:atl:induc}
\Vert \langle \nu^K_{s}, \ind_{[a,b]} \rangle \Vert_{m+1}
\leq
C \Vert J_1 \Vert_{m+1} + C \Vert J_2 \Vert_{m+1} + C \Vert J_3 \Vert_{m+1}. \end{align} For $ \Vert J_1 \Vert_{m+1} $ and $ \Vert J_2 \Vert_{m+1} $ we have already established the bounds~\eqref{eq:J1:Ln}--\eqref{eq:J2:bd}, so it suffices to bound $ \Vert J_3 \Vert_{m+1} $. As $ J_3 $ is a martingale integral of quadratic variation $
\frac{1}{\sqrt{K}} \int_0^{s} \langle \nu^K_{u}, \widehat{p}^2(u,\Cdot) \rangle du, $ where $ \widehat{p}(u,y) := p(s_K-u,a-y) - p(s_K-u,b-y) $, we applying the~\ac{BDG} inequality to obtain \begin{align}
\label{eq:J3:bd1}
\Vert J_3 \Vert^2_{m+1}
\leq
\frac{C}{\sqrt{K}}
\int_{0}^{s}
\Vert \langle \nu^K_{u}, \widehat{p}^2(u,\Cdot) \rangle \Vert_{\frac{m+1}{2}} du. \end{align} The induction hypothesis asserts the bound \eqref{eq:emY:bd} for $ n=m $.
With this in mind, within the integral in~\eqref{eq:J3:bd1}, we use $ \frac{m+1}{2} \leq m $ to bound the $ \Vert\Cdot\Vert_{\frac{m+1}{2}} $ norm by the $ \Vert\Cdot\Vert_m $ norm, and write \begin{align}
\label{eq:J3:bd2}
\Vert \langle \nu^K_{u}, \widehat{p}^2(u,\Cdot) \rangle \Vert_{\frac{m+1}{2}}
\leq
\Vert \langle \nu^K_{u}, \widehat{p}^2(u,\Cdot) \rangle \Vert_{m}
\leq
| \widehat{p}(u,\Cdot) | _{L^\infty( \mathbb{R} )}
\Vert \langle \nu^K_{u}, \widehat{p}(u,\Cdot) \rangle \Vert_{m}. \end{align} To bound the factor $ | \widehat{p}(u,\Cdot) | _{L^\infty( \mathbb{R} )} $ on the r.h.s.\ of~\eqref{eq:J3:bd2}, fixing $ (2\alpha-1)_+<\beta<\alpha $, we use~\eqref{eq:p:Holdx} to write \begin{align}
\label{eq:J3:bd3}
| \widehat{p}(u,\Cdot) | _{L^\infty( \mathbb{R} )}
\leq C |b-a|^{\beta} (s_K-u)^{-\frac{1+\beta}{2}}. \end{align} Now, within the r.h.s.\ of~\eqref{eq:J3:bd2}, using \eqref{eq:J3:bd3}, \begin{align*}
|\langle \nu^K_{u}, \widehat{p}(u,\Cdot) \rangle|
\leq
\langle \nu^K_{u}, p(s_K-u,b-\Cdot) \rangle
+ \langle \nu^K_{u}, p(s_K-u,a-\Cdot) \rangle \end{align*} and \eqref{eq:onion:Ln}, we obtain \begin{align}
\notag
\Vert \langle \nu^K_{u}, & \widehat{p}^2(u,\Cdot) \rangle \Vert_{\frac{m+1}{2}}
\leq
C |b-a|^{\beta} (s_K-u)^{-\frac{1+\beta}{2}} \\
\label{eq:J3:bd4}
&\sum_{j\in \mathbb{Z} } \frac{1}{ \sqrt{s_K-u} } p(1,j_*)
\Big(
\sum_{x\in a,b}
\Vert \langle \nu^K_{u}, \ind_{I_j(s_K-u,x)} \rangle \Vert_{m}
\Big). \end{align} By the induction hypothesis, $
\Vert \langle \nu^K_{u}, \ind_{I_j(s_K-u,x)} \rangle \Vert_{m}
\leq
C (\sqrt{s_K-u})^{\alpha} ( (\frac{\sqrt{s_K-u}}{\sqrt{u_K}})^{1-\alpha}+1 ). $ Using this for $ x=a,b $ in \eqref{eq:J3:bd4}, and combining the result with \eqref{eq:J3:bd1}, followed by $ \sum_{j\in \mathbb{Z} } p(1,j_*)\leq C $, we obtain \begin{align}
\label{eq:J3:bd5}
\Vert \langle \nu^K_{u}, \widehat{p}^2(u,\Cdot) \rangle \Vert_{\frac{m+1}{2}}
\leq
C |b-a|^{\beta}
\Big(
(s_K-u)^{-\frac{1+\beta}{2}} {u_K}^{\frac{\alpha-1}{2}}
+ (s_K-u)^{-1+\frac{\alpha-\beta}{2}}
\Big). \end{align} Inserting this bound~\eqref{eq:J3:bd5} back into \eqref{eq:J3:bd1}, followed by using
$ \frac{1}{\sqrt{K}} \leq K^{-(2\alpha-\beta)/2} \leq |b-a|^{2\alpha-\beta} $, we arrive at \begin{align*}
\Vert J_3 \Vert^2_{m+1}
\leq
C |b-a|^{2\alpha}
\int_0^{s}
\big(
(s_K-u)^{-\frac{1+\beta}{2}} {u_K}^{\frac{\alpha-1}{2}}
+ (s_K-u)^{-1+\frac{\alpha-\beta}{2}}
\big)
du. \end{align*} Within the last expression, using the readily verify inequality: \begin{align}
\label{eq:int:power}
\int_0^s (s_K-u)^{-\delta_1} {u_K}^{-\delta_2} du \leq C(\delta_1,\delta_2) s_K^{1-\delta_1-\delta_2},
\quad
\forall \delta_1,\delta_2<1, \end{align} we obtain $
\Vert J_3 \Vert^2_{m+1}
\leq
C |b-a|^{2\alpha} s_K^{\frac{\alpha-\beta}{2}}
\leq
C |b-a|^{2\alpha}. $ Using this bound and the bounds \eqref{eq:J1:Ln}--\eqref{eq:J2:bd} in \eqref{eq:atl:induc}, we see that \eqref{eq:emY:bd} holds for the index $ m+1 $. This completes the induction and hence concludes~\eqref{eq:emY:bd}.
The bound~\eqref{eq:emPY:bd} follows by combining \eqref{eq:onion:Ln} and \eqref{eq:emY:bd}. \end{proof}
Next we establish bounds on $ \mu^K_{t} $.
\begin{lemma} \label{lem:embd} Let $ n,T<\infty $ and $ \alpha\in(0,1) $. Given any strategy, \begin{align}
\label{eq:em:bd}
&\Vert \langle \mu^K_{s}, \ind_{[a,b]} \rangle \Vert_{n}
\leq
C |b-a|^{ \alpha } s_K^{ -\frac{1+\alpha}{2} },
\quad
\forall [a,b]\subset \mathbb{R} _+ \text{ with } |b-a|\geq \tfrac{1}{\sqrt{K}},
\ \forall s\leq T, \\
\label{eq:emP:bd}
&\Vert \langle \mu^K_{s}, p(t_K,\Cdot-x) \rangle \Vert_{n}
\leq
C t_K^{-\frac{1-\alpha}{2}} s_K^{-\frac{1+\alpha}{2}} ,
\quad
\forall x\in \mathbb{R} , \ s,t\leq T, \end{align} where $ C=C(T,\alpha,n)<\infty $, which, in particular, is independent of the strategy. \end{lemma}
\begin{proof} With $ \Psi(\tfrac1K,y,x) $ defined in the proceeding, it is straightforward to verify that \begin{align}
\label{eq:testf:ind}
\tfrac1C \ind_{[a,b]}(y) \leq \Psi(\tfrac1K,y,a)-\Psi(\tfrac1K,y,b),
\quad
\forall [a,b] \subset [\tfrac1K,\infty),
\text{ satisfying } |b-a|\geq \tfrac{1}{\sqrt{K}}. \end{align}
The idea of the proof is to follow the same general strategy as in the proof of Lemma~\ref{lem:emYbd}. However, unlike \eqref{eq:erf:ind}, here the inequality~\eqref{eq:testf:ind} does \emph{not} hold for all desired interval $ [a,b]\subset \mathbb{R} _+ $, but only for $ [a,b] \subset [\tfrac1K,\infty) $. This is due to the fact that $ \Psi(t,0,x)=0 $.
To circumvent the problem, we consider the \emph{shifted} process $ (\mathcal{X}^{m}_i(t); t \geq 0)_{i=1}^K $ \begin{align*}
\mathcal{X}^m_i(t) = 1+n + B_i(t\wedge\sigma^n_i) + \int_0^{t\wedge\sigma^n_i} \phi_i(s) ds,
\quad
\text{where } \sigma^n_i := \inf\{t : \mathcal{X}^m_i(t)=0 \}. \end{align*} That is, $ \mathcal{X}^{m}_i(t) $, $ i=1,\ldots,K $, are driven by the same Brownian motion as $ X(t) $, drifted under \emph{the same strategy} $ \phi(t) $ as $ X(t) $, and absorbed at $ x=0 $, but started at $ x=m+1 $ instead of $ x=1 $.
Define the analogous scaled variables as $ \mathcal{X}^{K,m}_i(t) := \frac{1}{\sqrt{K}} \mathcal{X}^m_i(Kt) $, $ \sigma^{K,m}_i := K^{-1}\sigma^m_i $, \begin{align*}
\mu^{K,m}_t(\Cdot)
:=
\frac{1}{\sqrt{K}} \sum_{\mathcal{X}^{K,m}_i(t)>0} \delta_{\mathcal{X}^{K,m}(t)}(\Cdot). \end{align*} We adopt the convention that $ \mathcal{X}^{K,0}_i(t) = X^K_i(t) $ and $ \mu^{K,0}_t = \mu^K_t $.
Under the proceeding construction, we clearly have that $ \mathcal{X}^{m-1}_i(t) = \mathcal{X}^m_i(t)-1 $, $ \forall t\leq\sigma^{m-1}_i $, so in particular \begin{align}
\label{eq:mu:cmp}
\mu^{K,m-1}_t([a-\tfrac{1}{\sqrt{K}},b-\tfrac{1}{\sqrt{K}}]) \leq \mu^{K,m}_t([a,b]),
\quad
\forall [a,b] \subset \mathbb{R} _+. \end{align} For the shifted process $ \mathcal{X}^{K,m}(t) = (\mathcal{X}^{K,m}_i(t))_{i=1}^K $, by the same procedure of deriving \eqref{eq:int:abs:e}, we have the following integral identity: \begin{align} \label{eq:int:abs:e'} \begin{split}
\langle \mu^{K,m}_{t}, &\Psi(\tfrac1K,\Cdot,x) \rangle
=
\widetilde{G}_{K,m}(t_K,x) \\
&+ \sum_{i=1}^K \int_{0}^{t} \phi^K_i(s) p^\text{N}(t_K-s,\mathcal{X}^{K,m}_i(s),x) ds
+ M_{K,n}(t,x),
\quad
\forall t \in \mathbb{R} _+, \ x\geq 0, \end{split} \end{align} where \begin{align}
\label{eq:Gcn}
\widetilde{G}_{K,m}(t,x) &:=
\sqrt{K} \Psi(t,\tfrac{1+m}{\sqrt{K}},x), \\
\label{eq:mgn}
M_{K,m}(t,x)
&:=
\frac{1}{\sqrt{K}} \sum_{i=1}^K \int_{0}^{t\wedge\tau^K_i} p^\text{N}(t_K-s,\mathcal{X}^{K,m}_i(s),x) dB^K_i(s). \end{align}
Having prepared the necessary notations, we now begin the proof of \eqref{eq:em:bd}.
Instead of proving~\eqref{eq:em:bd} directly, we show \begin{align} \begin{split}
\label{eq:em:bd:m}
\Vert \langle \mu^{K,n-m+1}_{s}, \ind_{[a,b]} \rangle \Vert_{m}
&\leq
C |b-a|^{ \alpha } s_K^{ -\frac{1+\alpha}{2} }, \\
&\forall [a,b]\subset[\tfrac{1}{\sqrt{K}},\infty) \text{ with } |b-a|\geq \tfrac{1}{\sqrt{K}},
\ \forall s\leq T, \end{split} \end{align} for all $ m=1,\ldots,n $. Once this is established, combining \eqref{eq:em:bd:m} for $ m=n $ and \eqref{eq:mu:cmp} for $ m=1 $, the desired result~\eqref{eq:em:bd} follows.
Fixing $ [a,b]\subset[\frac1{\sqrt{K}},\infty) $ with $ |b-a|\geq \tfrac{1}{\sqrt{K}} $. We begin by settling \eqref{eq:em:bd:m} for $ m=1 $. Similarly to the procedure for obtaining \eqref{eq:int:atl:Ex}, using \eqref{eq:int:abs:e'} for $ m=n $ and \eqref{eq:testf:ind} in place of \eqref{eq:int:atl:K} and \eqref{eq:erf:ind}, respectively, here we have \begin{align}
\label{eq:int:abs:Ex}
\Vert \langle \mu^{K,n}_{s}, \ind_{[a,b]} \rangle \Vert_1
\leq
C \mathcal{J}^n_{1} + C\Ex(\mathcal{J}^n_{2}), \end{align} where $ \mathcal{J}^m_{1},\mathcal{J}^m_{2} $ is defined for $ m=1,\ldots, n $ as \begin{align*}
\mathcal{J}^m_{1} &:= \widetilde{G}_{K,m}(s_K,a)-\widetilde{G}_{K,m}(s_K,b), \\
\mathcal{J}^m_{2} &:= \sum_{i=1}^K \int_0^{\sigma^{K,n}_i\wedge s} \phi^K_i(u)
\big( p^\text{N}(s_K-u,\mathcal{X}^{K,m}(u),a)-p^\text{N}(s_K-u,\mathcal{X}^{K,m}_i(u),b) \big) du. \end{align*} As noted in Remark~\ref{rmk:int:meaning}, expressions of the type $ \mathcal{J}^m_{1} $ account for the contribution of the system with only absorption, while $ \mathcal{J}^m_2 $ encodes the contribution of the drifts $ \phi^K_i(s) ds $.
The singular behavior of the empirical measure $ \mu^{K,m}_s $ at $ s=0 $ (due to having $ K \gg \sqrt{K} $ particles) is entirely encoded in $ \mathcal{J}^m_1 $. In particular, recalling from \eqref{eq:Ucab} the notation $ \widetilde{U}^\text{ab}_K(t,x) $, by \eqref{eq:Ucab:Gc} we have \begin{align*}
\widetilde{G}_{K,m}(0,a)-\widetilde{G}_{K,m}(0,b)
= \tfrac{1}{\sqrt{K}} \# \{ X^{\text{ab},K}_i(0)+m\in (a,b] \}
= \sqrt{K} \ind_{\frac{1+m}{\sqrt{K}}\in(a,b]}. \end{align*} While this expression diverges (for $ a<\frac{1+m}{\sqrt{K}} $) as $ K\to\infty $, for any fixed $ s>0 $ the absorption mechanism remedies the divergence, resulting in converging expression for the fixed $ s>0 $.
To see this, with $ \widetilde{G}_K(t,x) $ defined in \eqref{eq:Gc}, we use $ \partial_y\Psi(s,y,x)=p^\text{N}(s,y,x) $ and $ \Psi(s,0,x)=0 $ to write \begin{align}
\label{eq:Gc:pNm}
\widetilde{G}_{K,m}(t,x)
= \sqrt{K} \int_{0}^{\frac{1+m}{\sqrt{K}}}
p^\text{N}(s,y,x) dy. \end{align} Letting $ x=a,b $ in \eqref{eq:Gc:pN}, taking the difference of the resulting equations, followed by applying the estimate \eqref{eq:p:Holdx}, we obtain the following bound of $ \mathcal{J}^m_1 $, which stays bounded as $ K\to\infty $ for any fixed $ s>0 $: \begin{align}
\notag
\mathcal{J}^m_1
&=
\sqrt{K} \int_{0}^{\frac{1+m}{\sqrt{K}}}
\big( p^\text{N}(s_K,y,a)-p^\text{N}(s_K,y,b)\big) dy
\leq
C \sqrt{K} \int_{0}^{\frac{1+m}{\sqrt{K}}} s_K^{-\frac{1+\alpha}{2}} |b-a|^{\alpha} dy \\
\label{eq:J1':bd}
&\leq
C s_K^{-\frac{1+\alpha}{2}} |b-a|^{\alpha},
\quad
\forall m=1,\ldots, n. \end{align} As for $ \mathcal{J}^m_2 $, similarly to \eqref{eq:J2:bd}, by using \eqref{eq:p:Holdx} and $ \sum_{i=1}^K \phi^K_i(s) \leq 1 $ here we have \begin{align}
\label{eq:J2':bd}
|\mathcal{J}^m_2| \leq
\sum_{i=1}^K \int_0^{\sigma^{K,n}_i\wedge s} \phi^K_i(u)
C|b-a|^{\alpha}(u_K)^{-\frac{1+\alpha}{2}} du
\leq
C |b-a|^{\alpha},
\quad
\forall m=1,\ldots, n. \end{align} Combining \eqref{eq:J1':bd}--\eqref{eq:J2':bd} with \eqref{eq:int:abs:Ex}, we conclude \eqref{eq:em:bd:m} for $ m=1 $.
Having establishing \eqref{eq:em:bd:m} for $ m=1 $, we use induction to progress, and assume \eqref{eq:em:bd:m} has been established for some index $ m\in[1,n) $.
Similarly to \eqref{eq:atl:induc}, here we have \begin{align}
\label{eq:abs:induc}
\Vert \langle \mu^{K,n-m}_{s}, \ind_{[a,b]} \rangle \Vert_{m+1}
\leq
C \mathcal{J}^{n-m}_1 + C \Vert \mathcal{J}^{n-m}_2 \Vert_{n-m} + C \Vert \mathcal{J}^{n-m}_3 \Vert_{m+1}. \end{align} where $ \mathcal{J}^{m}_3 :=M_{K,m}(s,b) - M_{K,m}(s,a) $. To bound $ \Vert \mathcal{J}^{n-m}_3 \Vert_{m+1} $, similarly to \eqref{eq:J3:bd1}--\eqref{eq:J3:bd2}, by using the~\ac{BDG} inequality here we have \begin{align}
\label{eq:J3':bd1}
\Vert \mathcal{J}^{n-m}_3 \Vert^2_{m+1}
&\leq
\frac{C}{\sqrt{K}}
\int_{0}^{s}
\Vert \langle \mu^{K,n-m}_{u}, \widehat{p}^\text{N}(u,\Cdot)^2 \rangle \Vert_{\frac{m+1}{2}} du, \end{align} where $ \widehat{p}^\text{N}(u,y) := p^\text{N}(s_K-u,a,y)-p^\text{N}(s_K-u,a,y) $. For the expression $ \Vert \langle \mu^{K,m+1}_{u}, \widehat{p}^\text{N}(u,\Cdot)^2 \rangle \Vert_{\frac{m+1}{2}} $, following the same calculations in \eqref{eq:J3:bd2}--\eqref{eq:J3:bd4}, here we have \begin{align}
\notag
\Vert \langle \mu^{K,n-m}_{u}, & \widehat{p}^\text{N}(u,\Cdot)^2 \rangle \Vert_{\frac{m+1}{2}}
\leq
C |b-a|^{\beta} (s_K-u)^{-\frac{1+\beta}{2}} \\
\label{eq:J3':bd4}
&\sum_{j\in \mathbb{Z} } \frac{1}{ \sqrt{s_K-u} } p(1,j_*)
\Big(
\sum_{x=\pm a,\pm b}
\Vert \langle \mu^{K,n-m}_{u}, \ind_{I'_j(x)}\rangle \Vert_{m} \Big), \end{align} where $ \beta \in ((2\alpha-1)_+,\alpha) $ is fixed,
$ j_*:=|j|\wedge|j+1| $ and \begin{align*}
I'_j(x):= I_j(\sqrt{s_K-u},x) = [x+j\sqrt{s_K-u},x+(j+1)\sqrt{s_K-u}]. \end{align*} Since the empirical measure~$ \mu^{K,m+1}_{t} $ is supported on $ \mathbb{R} _+ $, letting\\ $ I''_j(x):=[(x+j\sqrt{s_K-u})_+,(x+j\sqrt{s_K-u})_++\sqrt{s_K-u}] $, we have \begin{align*}
\langle \mu^{K,n-m}_{u}, \ind_{I'_j(x)}\rangle
=
\langle \mu^{K,n-m}, \ind_{I'_j(x)\cap \mathbb{R} _+} \rangle
\leq
\langle \mu^{K,n-m}_{u}, \ind_{I''_j(x)}\rangle. \end{align*} Further using \eqref{eq:mu:cmp} yields \begin{align}
\label{eq:em:II'}
\langle \mu^{K,n-m}_{u}, \ind_{I'_j(x)}\rangle
\leq
\langle \mu^{K,n-m+1}_{u}, \ind_{\frac{1}{\sqrt{K}}+I''_j(x)}\rangle. \end{align} By the induction hypothesis, \begin{align}
\label{eq:em:induc}
\Vert \langle \mu^{K,n-m+1}_{u}, \ind_{\frac{1}{\sqrt{K}}+I''_j(x)} \rangle\Vert_{m}
\leq
C (\sqrt{s_K-u})^{\alpha} (u_K)^{-\frac{1+\alpha}{2}}. \end{align} Using~\eqref{eq:em:induc} for $ x=\pm a,\pm b $ in \eqref{eq:J3':bd4}, and combining the result with \eqref{eq:J3':bd1}, we arrive at \begin{align*}
\Vert \mathcal{J}^{n-m}_3 \Vert^2_{m+1}
\leq
C \frac{|b-a|^{\beta}}{\sqrt{K}}
\int_0^{s}
(s_K-u)^{\frac{-2-\beta+\alpha}{2}} {u_K}^{\frac{-1-\alpha}{2}}
du. \end{align*} Within the last expression, further using
$ \frac{1}{\sqrt{K}} \leq K^{-(2\alpha-\beta)/2} \leq |b-a|^{2\alpha-\beta} $ and \eqref{eq:int:power}, we obtain $
\Vert \mathcal{J}^{n-m}_3 \Vert^2_{m+1}
\leq
C |b-a|^{2\alpha} s_K^{-\frac{1+\beta}{2}}
\leq
C |b-a|^{2\alpha} s_K^{-\frac{1+\alpha}{2}}. $ Using this bound and the bounds \eqref{eq:J1':bd}--\eqref{eq:J2':bd} in \eqref{eq:abs:induc}, we see that \eqref{eq:em:bd} holds for the index $ m+1 $. This completes the induction and hence concludes~\eqref{eq:em:bd}.
The bound~\eqref{eq:emP:bd} follows by \eqref{eq:em:bd} and \eqref{eq:J3':bd4}. \end{proof}
Lemmas~\ref{lem:emYbd}--\ref{lem:embd} establish bounds that are `pointwise' in time, in the sense that they hold at a fixed time $ s $ within the relevant interval.
We next improve these pointwise bounds to bounds that hold for \emph{all} time within a relevant interval.
\begin{lemma}\label{lem:em} Let $ T,L,n<\infty $ and \begin{align}\label{eq:Ixa}
I_{x,\alpha} := [-K^{-\alpha}+x,x+K^{-\alpha}]. \end{align}
\begin{enumerate}[label=(\alph*)]
\item\label{enu:em}
For any given $ \gamma\in(0,\frac14) $, $ \alpha\in(2\gamma,\frac12] $
and any strategy,
\begin{align}
\label{eq:em:bdT}
\operatorname{\mathbf{P}} \Big(
\langle \mu^K_{t}, \ind_{I_{x,\alpha}} \rangle
\leq
t^{-\frac34} K^{-\gamma},
\
\forall t \leq T, |x| \leq L
\Big)
\geq 1 - CK^{-n},
\end{align}
where $ C=C(T,L,\alpha,\gamma,n)<\infty $,
which, in particular, is independent of the strategy.
\item\label{enu:emY}
Letting $ \nu^K_{t} $ be as in Lemma~\ref{lem:emYbd}, we have,
for any $ \alpha\in(\frac14,\frac12] $,
\begin{align}\label{eq:emY:bdT}
\operatorname{\mathbf{P}} \Big(
\langle \nu^K_{t}, \ind_{I_{x,\alpha}}\rangle \leq K^{-\frac14},
\
\forall t \leq T, |x| \leq L
\Big)
\geq 1 - CK^{-n},
\end{align}
where $ C=C(T,L,\alpha,n,D_*, D_{\alpha,n})<\infty $,
for $ D_*, D_{\alpha,n} $ as in \eqref{eq:D*}--\eqref{eq:Dan}.
\end{enumerate} \end{lemma}
\begin{proof}
We first prove Part~\ref{enu:emY}. Fixing $ L,T,n<\infty $ and $ \alpha\in(\frac14,\frac12] $, to simplify notations we use, $ C(a_1,a_2,\ldots)<\infty $ to denote generic finite constants that may depend on $ L,T, \alpha, n, D_*, D_{\alpha,j} $ and the designated variable $ a_1,a_2,\ldots $.
To the end of proving \eqref{eq:emY:bdT}, we cover $ [-L,L] $ by intervals $ I_j $ of length $ K^{-\alpha} $: \begin{align*}
I_j := [jK^{-\alpha}, (j+1)K^{-\alpha}],
\quad
|j| \leq LK^{\alpha}. \end{align*} Indeed, each $ I_{x,\alpha} $ is contained in the union of three consecutive such intervals $ I_j $, so it suffices to prove \begin{align}\label{eq:emY:goal}
\operatorname{\mathbf{P}} \big(
\langle \nu^K_{t}, \ind_{I_{j}}\rangle
\leq
\tfrac13 K^{-\frac14},
\
\forall |j|\leq LK^{\alpha}, t\leq T
\big)
\geq
1- C K^{-n}. \end{align}
By \eqref{eq:emY:bd} we have, for any $ t\in [\frac1K,T] $, $ \beta\in(0,1) $ and $ k<\infty $, \begin{align}\label{eq:emY:norm}
\Vert \langle \nu^K_{t}, \ind_{I_{j}}\rangle \Vert_{k}
\leq
C(k,\beta) |I_j|^{\beta} \big( (|I_j|K^{\frac12})^{1-\beta} + 1 \big)
\leq
C(k,\beta) (K^{-\alpha+\frac{1-\beta}{2}}+ K^{-\alpha\beta}). \end{align}
With $ \alpha > \frac14 $, fixing $ \beta $ close enough to $ 1 $ we have $ \Vert \langle \nu^K_{t}, \ind_{I_{x,\alpha}}\rangle \Vert_{k} \leq C(k) K^{-\frac14-\varepsilon} $, for some fixed $ \varepsilon >0 $.
With this, applying Markov's inequality we obtain \begin{align}\label{eq:Mrk:ineq}
\operatorname{\mathbf{P}}( \langle \nu^K_{t}, \ind_{I_{j}}\rangle \geq \tfrac19 K^{-\frac14} ) \leq C(k) K^{-k\varepsilon}. \end{align} Now, fixing $ k \geq (n+\alpha+2)\varepsilon^{-1} $ and taking the union bound of \eqref{eq:Mrk:ineq}
over $ |j| \leq LK^{\alpha} $ and $ t= t_{\ell} := \ell K^{-2} $, $ 1\leq \ell \leq TK^2 $, we arrive at \begin{align}\label{eq:emY:goal:}
\operatorname{\mathbf{P}}(
\langle \nu^K_{t_\ell}, \ind_{I_{j}}\rangle \leq \tfrac19 K^{-\frac14},
\ \forall |j|\leq LK^{\alpha}, 1\leq \ell\leq TK^{2}
)
\geq
1- C K^{-n}. \end{align} To move from the `discrete time' $ t_\ell $ to `continuous time' $ t\in[0,T] $, we need to control $ \nu^K_{s}(I_j) $ within each time interval $ s\in [t_{\ell-1},t_{\ell}] := J_\ell $. Within each $ J_\ell $, since each $ Y^K_i(s) $ evolves as a drifted Brownian motion with drift $ \leq \sqrt{K} $, we have that \begin{align} \label{eq:driftBMest}
\operatorname{\mathbf{P}}( |Y^K_i(s)-Y^K_i(t_\ell)| \leq K^{-\alpha}, \forall s\in J_{\ell} )
\geq
1- \exp(-\tfrac{1}{C} K^{1-\alpha})
\geq
1- C K^{-n-3}. \end{align} By \eqref{eq:D*}, we assume without lost of generality the total number of $ Y $-particles is at most $ K $. Hence, taking the union bound of \eqref{eq:driftBMest} over $ \ell\leq TK^{-2} $ and over all particles $ i = 1,2,\ldots \leq K $, we obtain \begin{align*}
\operatorname{\mathbf{P}}\Big(
\sup_{s\in J_\ell} |Y^K_i(s)-Y^K_i(t_\ell)|
\leq
K^{-\alpha}, \ \forall i, \forall 1 \leq \ell \leq TK^{2}
\Big)
\geq
1 - C K^{-n}. \end{align*} That is, with high probability, no particle travels farther
than distance $ |I_j| $ within each time interval $ J_{\ell} $. Therefore, \begin{align*}
\operatorname{\mathbf{P}}\Big(
\sup_{s\in J_\ell} \langle \nu^K_{s}, \ind_{I_{j}}\rangle
\leq
\langle \nu^K_{t_\ell}, \ind_{I_{j-1}\cup I_{j}\cup I_{j+1}}\rangle,
\
\forall 1 \leq \ell \leq TK^{2}
\Big)
\geq
1 - C K^{-n}. \end{align*} Combining this with \eqref{eq:emY:goal:} yields the desired result~\eqref{eq:emY:goal}.
Part~\ref{enu:em} is proven by similar argument as in the preceding. The only difference is that, instead of a moment bound of the form \eqref{eq:emY:norm}, we have from \eqref{eq:em:bd} the moment bound \begin{align}\label{eq:em:norm}
\Vert \langle \mu^K_{t}, I_j \rangle \Vert_{k}
\leq
C(k) |I_j|^\frac12 t^{-\frac34}
\leq
C(k) K^{-\frac{\alpha}{2}} t^{-\frac34}, \end{align} for all $ k<\infty $. With $\frac{\alpha}{2} > \gamma $, \eqref{eq:em:norm} yields \eqref{eq:em:bdT} by same argument we obtained \eqref{eq:emY:norm}. \end{proof}
Equipped with Lemmas~\ref{lem:emYbd}--\ref{lem:em}, we proceed to the main goal of this section: to develop integral identities (in different forms from \eqref{eq:int:abs:e} and \eqref{eq:int:atl:K}) that are convenient for proving the hydrodynamic limits.
Recall from \eqref{eq:alt} that $ W(t) $ is the analogous laggard of the Atlas model $ (Y_i(t);t\geq 0)_i $ and that $ W_K(t) $ denotes the scaled process.
For any fixed $ t $, we define the scaled distribution function of $ Y $ as \begin{align}
\label{eq:V}
V_K(t,x) := \tfrac{1}{\sqrt{K}} \# \{ Y^K_i(t) \leq x \}
=
\langle \nu^K_t, \ind_{(-\infty,x]} \rangle. \end{align}
\begin{proposition} \begin{enumerate}[label=(\alph*)] \item [] \item \label{enu:intX} Let $ (\phi_i(t);t\geq 0)_{i=1}^K $ be any given strategy. The following integral identity holds for all $ t<\infty $ and $ x\geq 0 $: \begin{align}\label{eq:int:abs}
\widetilde{U}_K(t,x) = \widetilde{G}_K(t,x)
+ \sum_{i=1}^K \int_{0}^{t\wedge\tau_i^K} \phi^K_i(s) p^\text{N}(t-s,X^K_i(s),x) ds
+ R_K(t,x). \end{align} Here $ R_K(t,x) $ is a remainder term such that, for given any $ T,n<\infty $ and $ \gamma\in(0,\frac14) $, \begin{align}\label{eq:rd:bd}
\operatorname{\mathbf{P}} \Big(
|R_K(t,x)| \leq
K^{-\gamma} t^{-\frac34}, \ \forall t \leq T, \ x\in \mathbb{R}
\Big)
\geq 1 - CK^{-n}, \end{align} where $ C=C(T,\gamma,n)<\infty $, and is in particular independent of the strategy.
\item \label{enu:intY} Let $ (Y_i(t);t \geq 0)_{i} $ be an Atlas model, and let $ W_K(t) $ and $ V_K(t,x) $ be as in the preceding, and assume $ (Y^K_i(0))_i $ satisfies the conditions \eqref{eq:D*}--\eqref{eq:Dan}.
Then, the following integral identity holds for all $ t<\infty $ and $ x\in \mathbb{R} $: \begin{align}\label{eq:int:atl}
V_K(t,x)
=
\int_0^\infty p(t,x-y) V_K(0,y) dy
- \int_{0}^{t} p(t-s,W_K(s),x) ds
+ R'_K(t,x). \end{align} Here $ R'_K(t,x) $ is a remainder term such that, given any $ T,n<\infty $ and $ \gamma\in(0,\frac14) $, \begin{align}\label{eq:rdY:bd}
\operatorname{\mathbf{P}} \Big(
|R'_K(t,x)|
\leq K^{-\gamma},
\
\forall t \leq T, \ x\in \mathbb{R}
\Big)
\geq 1 - CK^{-n}, \end{align} where $ C<\infty $ depends only on $ T,n $ and $ D_*, D_{\alpha,n} $. \end{enumerate} \label{prop:intXY} \end{proposition}
\noindent The proof of Proposition~\ref{prop:intXY} requires a Kolmogorov-type estimate, which we recall from \cite{kunita97} as follows.
\begin{lemma}[{\cite[Theorem~1.4.1]{kunita97}}] \label{lem:kol} Let $ T <\infty $, $ a\in \mathbb{R} $, and let $ F $ be a $ \mathcal{C}([0,\infty)\times \mathbb{R} ) $-valued process. If, for some $ \alpha_1,\alpha_2 $, $ k\in \mathbb{N} $ and $ C_1<\infty $ with $ \frac{1}{k\alpha_1}+\frac{1}{k\alpha_2}<1 $, \begin{align}
\label{eq:kol:0}
\Vert F(0,0) \Vert_{k} &\leq C_1, \\
\label{eq:kol:holder}
\Vert F(t,x)-F(t',x') \Vert_k &\leq C_1 (|t-t'|^{\alpha_1}+|x-x'|^{\alpha_2}), \end{align} $\forall t,t'\in [0,T]$, $x,x'\in[a,a+1]$, then $
\big\Vert | F | _{L^\infty([0,T]\times[a,a+1])} \big\Vert_k
\leq
C_2=C_2(C_1,T,\alpha_1,\alpha_2)<\infty. $ \end{lemma}
\noindent Note that, although the dependence of $ C_2 $ is not explicitly designated in \cite[Theorem~1.4.1]{kunita97}, under the present setting, it is clear from the proof of \cite[Lemma~1.4.2, Lemma~1.4.3]{kunita97} that $ C_2=C_2(C_1,T,\alpha_1,\alpha_2,k) $.
\begin{proof}[Proof of Proposition~\ref{prop:intXY}]
The first step of the proof is to rewrite \eqref{eq:int:abs:e} and \eqref{eq:int:atl:K} in a form similar to \eqref{eq:int:abs} and \eqref{eq:int:atl}.
To motivate this step, recall from Remark~\ref{rmk:int:meaning} that the term $ \langle \mu^K_{t}, \Psi(\frac1K,x,\Cdot) \rangle $ should approximate $ \widetilde{U}_K(t,x) $ as $ K\to\infty $. In view of this, we write \begin{align}
\notag
\langle \mu^K_{t}&, \Psi(\tfrac1K,x,\Cdot) \rangle
=
\widetilde{U}_K(t,x) +E_K(t,x), \\
\label{eq:emrd}
&\text{ where }
E_K(t,x)
:= \langle \mu^K_{t}, \Psi(\tfrac1K,x,\Cdot)-\Psi(0,x,\Cdot) \rangle
= \langle \mu^K_{t}, \Psi(\tfrac1K,x,\Cdot)-\ind_{(x,\infty)} \rangle. \end{align} Similarly, for the the the first two terms on the r.h.s.\ of \eqref{eq:int:abs:e}, we write \begin{align*}
&\widetilde{G}_K(t_K,x) = \widetilde{G}_K(t,x) + \big( \widetilde{G}_K(t_K,x)-\widetilde{G}_K(t,x) \big) \\
&\sum_{i=1}^K \int_{0}^{t} \phi^K_i(s) p^\text{N}(t_K-s,X^K_i(s),x) ds
=
\sum_{i=1}^K \int_{0}^{t} \phi^K_i(s) p^\text{N}(t-s,X^K_i(s),x) ds
+
Q_N(t,x), \end{align*} where \begin{align}
\label{eq:pNrd}
Q_K(t,x)
:=
\sum_{i=1}^K \int_{0}^{t} \phi^K_i(s)
\big( p^\text{N}(t-s,X^K_i(s),x)-p^\text{N}(t_K-s,X^K_i(s),x) \big) ds. \end{align} Under these notations, we rewrite \eqref{eq:int:abs:e} as \begin{align}
\label{eq:int:abs::}
\widetilde{U}_K(t,x) = \widetilde{G}_K(t,x) + \sum_{i=1}^K \int_{0}^{\tau^K_i\wedge t} \phi^K_i(s) p^\text{N}(t-s,X^K_i(s),x) ds
+ R_K(t,x), \end{align} where \begin{align}
\label{eq:rd}
R_K(t,x) := ( \widetilde{G}_K(t_K,x)-\widetilde{G}_K(t,x) ) - E_K(t,x)+Q_K(t,x)+M_K(t,x). \end{align}
Equation~\eqref{eq:int:abs::} gives the desired identity \eqref{eq:int:abs} with the explicit remainders $ R_K(t,x) $.
Similarly for the Atlas model $ Y $, we define \begin{align}
\label{eq:emrdY}
E'_K(u,t,x) &:= \langle \nu^K_{u}, \Phi(t_K,x-\Cdot)-\Phi(t,x-\Cdot) \rangle \\
\label{eq:prd}
Q'_K(t,x)
&:=
\int_{0}^{t}
\big( p(t-s,x-W_K(s))-p(t_K-s,x-W_K(s)) \big) ds. \\
\label{eq:rdY}
R'_K(t,x) &:= E'_K(0,t,x) - E'_K(t,0,x)-Q'_K(t,x)+N_K(t,x), \end{align} and rewrite \eqref{eq:int:atl:K} as \begin{align*}
V_K(t,x)
&= \langle\nu^K_{0}, \Phi(t,x-\Cdot)\rangle
- \int_{0}^{t} p(t-s,x-W_K(s)) ds + R'_K(t,x). \end{align*} Further using integration by parts: \begin{align*}
\langle\nu^K_{0}, \Phi(t,x-\Cdot)\rangle
=
\int_{ \mathbb{R} } \Phi(t,x-y) dV_K(0,y)
=
-\int_{ \mathbb{R} } V_K(0,y) \partial_y \Phi(t,x-y) dy, \end{align*} we write \begin{align}
\label{eq:int:atl::}
V_K(t,x) &= \int_{ \mathbb{R} } p(t,x-y) V_K(0,x) dy
- \int_{0}^{t} p(t-s,x-W_K(s)) ds + R'_K(t,x). \end{align}
Equations~\eqref{eq:int:abs::} and \eqref{eq:int:atl::} give the desired identities \eqref{eq:int:abs} and \eqref{eq:int:atl} with the explicit remainders $ R_K(t,x) $ and $ R'_K(t,x) $, as in \eqref{eq:rd} and \eqref{eq:rdY}.
With this, it suffices to show that these remainders do satisfy the bounds \eqref{eq:rd:bd} and \eqref{eq:rdY:bd}.
To this end, fixing arbitrary $ T,n < \infty $ and $ \gamma\in(0,\frac14) $, we let $ C(k)<\infty $ denote a generic constant depending only on $ T,n,\alpha,\gamma, D_*, D_{\alpha,n} $, and the designated variable $ k $.
We begin with a reduction. That is, in order to prove \eqref{eq:rd:bd} and \eqref{eq:rdY:bd}, we claim that it suffices to prove \begin{align}
\label{eq:rd:bd:}
&
\operatorname{\mathbf{P}} \Big(
|R_K(t,x)| \leq K^{-\gamma} t^{-\frac34}, \ \forall t \leq T, x\in [a,a+1]
\Big)
\geq 1 - CK^{-n}, \\
\label{eq:rdY:bd:}
&
\operatorname{\mathbf{P}} \Big(
|R'_K(t,x)| \leq K^{-\gamma}, \ \forall t \leq T, x\in [a,a+1]
\Big)
\geq 1 - CK^{-n}, \end{align} for all $ a\in \mathbb{R} $.
To see why such a reduction holds, we assume that \eqref{eq:rd:bd:} has been established, and take the union bound of \eqref{eq:rd:bd:} over $ a\in \mathbb{Z} \cap[-K,K] $ to obtain \begin{align}
\label{eq:rd:bd::}
&
\operatorname{\mathbf{P}} \Big(
|R_K(t,x)| \leq K^{-\gamma} t^{-\frac34}, \ \forall t \leq T, |x| \leq K
\Big)
\geq 1 - CK^{-n+1}. \end{align}
To cover the regime $ |x|>K $ that is left out by \eqref{eq:rd:bd::}, we use the fact that each $ X^K_i(t) $ evolves as a Brownian motion with drift at most $ \sqrt{K} $ (and absorption) to obtain \begin{align}
\operatorname{\mathbf{P}}(\Omega_K) \geq 1 - CK^{-n},
\quad
\Omega_K := \{ |X^K_i(t)| \leq \tfrac12 K, \ \forall t\leq T, \forall i \}. \end{align} That is, with a sufficiently high probability, each particle $ X^K_i(t) $ stays within $ [-\frac12K,\frac12K] $ for all time.
Use \eqref{eq:int:abs::} to express $ R_K(t,x) = \widetilde{U}_K(t,x) - f(t,x)- \widetilde{G}_K(t,x) $, where\\ $ f(t,x) := \sum_{i=1}^K \int_0^{t\wedge\tau^K_i} \phi^K_i(s) p^\text{N}(t-s,X^K_i(s),x) ds $.
On the event $ \Omega_K $, the function $ x\mapsto \widetilde{U}_K(t,x) $ remains constant on $ \mathbb{R} \setminus(-K,K) $; and, for all $ x>K $, \begin{align*}
| f(t,\pm x) - f(t,\pm K) |
&\leq
\sum_{i=1}^K \int_0^{t\wedge\tau^K_i} \phi^K_i(s) |p^\text{N}(t-s,X^K_i(s),\pm x)-p^\text{N}(t-s,X^K_i(s),\pm K)| ds \\
&\leq
\int_0^{t} 4|p(t-s,\tfrac{K}{2})| ds
\leq
\frac{C}{K}.
\end{align*} From these we conclude that, on $ \Omega_K $, \begin{align*}
\sup_{x\in \mathbb{R} } |R_K(t,x)|
\leq
\sup_{|x|\leq K} |R_K(t,x)| + \Big( \frac{C}{K} + \sup_{|x|\geq K} |\widetilde{G}_K(t,x)| \Big)
\leq
\sup_{|x|\leq K} |R_K(t,x)| + \frac{C}{K}. \end{align*} Combining this with \eqref{eq:rd:bd::} gives the desired bound \eqref{eq:rd:bd}. A similar argument shows that \eqref{eq:rdY:bd:} implies \eqref{eq:rdY:bd}.
Having shown that \eqref{eq:rd:bd:}--\eqref{eq:rdY:bd:} imply the desired results, we now return to proving \eqref{eq:rd:bd:}--\eqref{eq:rdY:bd:}.
This amounts to bounding each term on the r.h.s.\ of the explicit expressions \eqref{eq:rd} and \eqref{eq:rdY} of $ R_K(t,x) $ and $ R'_K(t,x) $.
To this end, fixing $ t \leq T $, $ a\in \mathbb{R} $ and $ x\in [a,a+1] $, we establish bounds on the following terms in sequel. \begin{enumerate}[label=\itshape\roman*\upshape)]
\item $ |\widetilde{G}_K(t_K,x)- \widetilde{G}_K(t,x)| $;
\item $ Q_K(t,x) $ and $ Q'_K(t,x) $;
\item $ E_K(t,x) $, $ E'_K(0,t,x) $ and $ E'_K(t,0,x) $;
and
\item $ N_K(t,x) $ and $ M_K(t,x) $. \end{enumerate}
(\textit{i}) By \eqref{eq:Gc:pNm} for $ m=0 $, we have that \begin{align}
\label{eq:Gc:pN}
\widetilde{G}_{K}(t,x)
= \sqrt{K} \int_{0}^{\frac{1}{\sqrt{K}}}
p^\text{N}(s,y,x) dy. \end{align} Applying the bound~\eqref{eq:p:Holdt} for $ \alpha=\frac{1}{4} $ within \eqref{eq:Gc:pN}, we obtain \begin{align}\label{eq:Gc:bd}
|\widetilde{G}_K(t_K,x)- 2p(t,x)|
\leq
C K^{-\frac14} t^{-\frac34}. \end{align}
(\textit{ii}) Applying \eqref{eq:p:Holdt} for $ \alpha=\frac12 $ in \eqref{eq:pNrd} and in \eqref{eq:prd} yields \begin{align}\label{eq:prd:bd}
|Q_K(t,x)|, \ |Q'_K(t,x)| \leq C K^{-\frac14}. \end{align}
(\textit{iii})
For the Brownian distribution function $ \Phi(t,y) = \operatorname{\mathbf{P}}(B(t)\leq y) $, it is standard to show that $ t\mapsto |\Phi(t_K,y) - \Phi(t,y)| $ decreases in $ t $, and that $ |\Phi(\frac1K,y) - \Phi(0,y)| \leq C \exp(-\sqrt{K}|y|) $. Further, fixing $ \alpha \in (2\gamma,\frac12) $ and letting $ I_{x,\alpha} $ be as in \eqref{eq:Ixa}, we write \begin{align*}
\exp(-\sqrt{K}|y-x|)
\leq
\ind_{I_{x,\alpha}}(y) + \ind_{ \mathbb{R} \setminus{}I_{x,\alpha}}(y) \exp(-\sqrt{K}|y-x|)
\leq
\ind_{I_{x,\alpha}}(y) + \exp(-K^{\alpha-\frac12}). \end{align*} From these bounds we conclude \begin{subequations} \label{eq:iii} \begin{align}
\label{eq:iii:0}
|\Phi(\tfrac1K,y-x) - \Phi(0,y-x)|
&\leq
C ( \ind_{I_{x,\alpha}}(y) + \exp(-K^{\alpha-\frac12}) ), \\
\label{eq:iii:1}
|\Phi(t_K,y-x) - \Phi(t,y-x)|
&\leq
C ( \ind_{I_{x,\alpha}}(y) + \exp(-K^{\alpha-\frac12}) ), \\
|\Psi(\tfrac{1}{K},y,x) - \Psi(0,y,x)|
\label{eq:iii:2}
&\leq
C ( \ind_{I_{x,\alpha}\cup I_{-x,\alpha}}(y) + \exp(-K^{\alpha-\frac12}) ). \end{align} \end{subequations} Recall the definition of $ E(t,x) $ and $ E'(u,t,x) $ from \eqref{eq:emrd} and \eqref{eq:emrdY}. Applying $ \langle \nu^K_{t}, \Cdot \rangle $ $ \langle \nu^K_{0}, \Cdot \rangle $ and $ \langle \mu^K_{t}, \Cdot \rangle $ on both sides of \eqref{eq:iii:0}--\eqref{eq:iii:2}, respectively, we obtain \begin{subequations} \label{eq:emrd:bd:} \begin{align}
|E'_K(t,0,x)|
&\leq
C \langle \nu^K_{t}, \ind_{I_{x,\alpha}} \rangle
+ C\exp(-K^{\alpha-\frac12}) \langle \nu^K_{t}, \ind_{ \mathbb{R} } \rangle, \\
|E'_K(0,t,x)|
&\leq
C \langle \nu^K_{0}, \ind_{I_{x,\alpha}} \rangle
+ C\exp(-K^{\alpha-\frac12}) \langle \nu^K_{0}, \ind_{ \mathbb{R} } \rangle, \\
|E_K(t,x)|
&\leq
C \langle \mu^K_{t}, \ind_{I_{x,\alpha}} \rangle
+ C \langle \mu^K_{t}, \ind_{I_{-x,\alpha}} \rangle
+ C\exp(-K^{\alpha-\frac12}) \langle \mu^K_{t}, \ind_{ \mathbb{R} } \rangle. \end{align} \end{subequations} On the r.h.s.\ of \eqref{eq:emrd:bd:} sit two types of terms: the `concentrated terms' that concentrate on the small interval $ \ind_{I_{\pm x,\alpha}} $; and the `tail terms' with the factor $ \exp(-K^{\alpha-\frac12}) $.
For the tail terms, writing $
\langle \nu^K_{0}, \ind_{ \mathbb{R} } \rangle
=
\langle \nu^K_{t}, \ind_{ \mathbb{R} } \rangle
=
\frac{1}{\sqrt{K}}\#\{ Y^K_i(0) \} $ and $
\langle \mu^K_{t}, \ind_{ \mathbb{R} } \rangle
\leq
\frac{1}{\sqrt{K}}\#\{ X^K_i(0) \} $, and using the bound~\eqref{eq:D*} and $ \#\{ X^K_i(0) \}=K $, we bound the tail terms by $ C\sqrt{K}\exp(-K^{\alpha-\frac12}) $, with probability $ \geq 1-CK^{-n} $.
Further using $ \sqrt{K}\exp(-K^{\alpha-\frac12}) \leq CK^{-\gamma} $, we have \begin{subequations} \label{eq:emrd:bd::} \begin{align}
|E'_K(t,0,x)|
&\leq
C \langle \nu^K_{t}, \ind_{I_{x,\alpha}} \rangle
+ CK^{-\gamma}, \\
|E'_K(0,t,x)|
&\leq
C \langle \nu^K_{0}, \ind_{I_{x,\alpha}} \rangle
+ CK^{-\gamma}, \\
|E_K(t,x)|
&\leq
C \langle \mu^K_{t}, \ind_{I_{x,\alpha}} \rangle
+C \langle \mu^K_{t}, \ind_{I_{-x,\alpha}} \rangle
+ CK^{-\gamma}, \end{align} \end{subequations} with probability $ \geq 1-CK^{-n} $.
Next, to bound the concentrated terms, we consider the covering $
\mathcal{X} := \{ I_{y,\alpha} : |y|\leq a+1 \} $ of $ [-a-1,a+1] $. With $ x\in[a,a+1] $, we clearly have that $ I_{\pm x, \alpha} \in \mathcal{X} $, so by Lemma~\ref{lem:em} it follows that \begin{align*}
\langle \nu^K_{t}, \ind_{I_{x,\alpha}} \rangle,
\
\langle \nu^K_{0}, \ind_{I_{x,\alpha}} \rangle
\leq
C K^{-\gamma},
\quad
\langle \nu^K_{t}, \ind_{I_{\pm x,\alpha}} \rangle
\leq
C K^{-\gamma}t^{-\frac34}, \end{align*} with probability $ 1-CK^{-n} $. Inserting this into \eqref{eq:emrd:bd::} gives \begin{subequations} \label{eq:emrd:bd} \begin{align}
&
\operatorname{\mathbf{P}}\big(
|E'_K(t,0,x)| \leq C K^{-\gamma},
\ \forall t\leq T, x\in [a,a+1]
\big)
\geq
1 - C K^{-n}, \\
&
\operatorname{\mathbf{P}}\big(
|E'_K(0,t,x)| \leq C K^{-\gamma},
\ \forall t\leq T, x\in [a,a+1]
\big)
\geq
1 - C K^{-n}, \\
&
\operatorname{\mathbf{P}}\big(
|E_K(t,x)| \leq C K^{-\gamma} t^{-\frac{3}{4}},
\ \forall t\leq T, x\in [a,a+1]
\big)
\geq
1 - C K^{-n}. \end{align} \end{subequations}
(\textit{iv}) The strategy is to apply Lemma~\ref{lem:kol} for $ F(t,x) := K^{1/4} N_K(t,x) $.
With $ N_K(t,x) $ defined as in \eqref{eq:mgY}, for such $ F $ we have $ F(0,0)=0 $, so the condition~\eqref{eq:kol:0} holds trivially. Turning to verifying the condition~\eqref{eq:kol:holder}, we fix $ t<t' $ and $ x,x'\in \mathbb{R} $. With $ N_K(t,x) $ defined as in \eqref{eq:mgY}, we telescope $ F(t,x) - F(t',x') $ into $ F_1 + F_2 - F_3 $, where \begin{align*}
F_1 &:= K^{-1/4} \sum_{i} \int_{0}^{t} f_1(s,Y^K_i(s)) dB^K_i(s),&
&
F_2 := K^{-1/4} \sum_{i} \int_{0}^{t} f_2(s,Y^K_i(s)) dB^K_i(s), \\
F_3 &:= K^{-1/4} \sum_{i} \int_{t}^{t'} f_3(s,Y^K_i(s)) dB^K_i(s), \end{align*} $ f_1(s,y) := p(t_K-s,y-x) - p(t_K-s,y-x') $, $ f_2(s,y) := p(t_K-s,y-x') - p(t'_K-s,y-x') $ and $ f_3(s,y) := p( t'_K-s, y-x' ) $. Similar to the way we obtained \eqref{eq:J3:bd1}, here by the \ac{BDG} inequality we have \begin{align*}
\Vert F_1 \Vert_{k}^2
&\leq
C(k)
\int_{0}^{t} \Vert \langle \nu^K_{s}, f_1(s,\Cdot)^2 \rangle \Vert_{k/2} ds,&
&
\Vert F_2 \Vert_{k}^2
\leq
C(k)
\int_{0}^{t} \Vert \langle \nu^K_{s}, f_2(s,\Cdot)^2 \rangle \Vert_{k/2} ds, \\
\Vert F_3 \Vert_{k}^2
&\leq
C(k)
\int_{t}^{t'} \Vert \langle \nu^K_{s}, f_3(s,\Cdot)^2 \rangle \Vert_{k/2} ds, \end{align*} for any fixed $ k>1 $.
On the r.h.s., the kernel functions $ f_1,f_2,f_3 $ appear in square (i.e.\ power of two). We use \eqref{eq:p:Holdx}--\eqref{eq:p:Holdt} to replace `one power' of them with
$ C(t-s)^{-\frac34} |x-x'|^{\frac12} $,
$ C(t-s)^{-\frac34} |t-t'|^{\frac14} $ and $ C(t-s)^{-\frac12} $, respectively, and then use \eqref{eq:emPY:bd} for $ \alpha=\frac34 $ to bound $ \Vert \langle \nu^K_{s}, f_j(s,\Cdot) \rangle \Vert_{k/2} $, $ j=1,2,3 $, whereby obtaining \begin{align*}
\Vert F_1 \Vert_{k}^2
&\leq
C(k)
\int_{0}^{t} |x-x'|^{\frac12} ((t-s)^{-\frac78}+(t-s)^{-\frac34} s^{-\frac18}) ds
\leq
C(k) |x-x'|^{\frac12}, \\
\Vert F_2 \Vert_{k}^2
&\leq
C(k)
\int_{0}^{t} |t-t'|^{\frac14} ((t-s)^{-\frac78}+(t-s)^{-\frac34} s^{-\frac18}) ds
\leq
C(k) |t-t'|^{\frac14}, \\
\Vert F_3 \Vert_{k}^2
&\leq
C(k)
\int_{t}^{t'} ((t-s)^{-\frac58}+(t-s)^{-\frac12} s^{-\frac18}) ds
\leq
C(k) |t-t'|^{\frac38}. \end{align*} We have thus verify the condition~\eqref{eq:kol:holder} for $ (\alpha_1,\alpha_2) = (\frac18,\frac14) $. Now apply Lemma~\ref{lem:kol} to obtain $
\Vert | N_K | _{L^\infty([0,T]\times[a,a+1])} \Vert_{k}
\leq
C(k) K^{-\frac14}. $ From this and Markov's inequality, we conclude \begin{align}\label{eq:mgY:bd}
\operatorname{\mathbf{P}} \Big(
|N_K(t,x)| \leq K^{-\gamma}, \ \forall t\leq T, x\in[a,a+1]
\Big)
\geq
1 - C(k) K^{-k(1-\gamma)}
\geq
1 - C K^{-n}. \end{align} The term $ M_K(t,x) $ is bounded by similar procedures as in the preceding. The only difference is that the estimate \eqref{eq:emP:bd}, unlike \eqref{eq:emPY:bd}, introduces a singularity of $ M_K(t,x) $ as $ t\to 0 $, so we set $ F(t,x) := t^{\frac34} K^{1/4}M_K(t,x) $ (instead of $ F(t,x) := K^{1/4}M_K(t,x) $). The extra prefactor $ t^{\frac34} $ preserves the moment estimate \eqref{eq:kol:holder} since $ t^{\frac34} $ is $ \alpha $-H\"{o}lder continuous for all $ \alpha<\frac34 $. Consequently, following the preceding argument we obtain \begin{align}\label{eq:mg:bd}
\operatorname{\mathbf{P}} \Big(
\sup_{t\in[0, T],x\in[a,a+1]} (t^{\frac34}|M_K(t,x)|) \leq K^{-\gamma}
\Big)
\geq
1 - C K^{-n}. \end{align}
Now, combining the bounds \eqref{eq:Gc:bd}, \eqref{eq:prd:bd}, \eqref{eq:emrd:bd} and \eqref{eq:mgY:bd}--\eqref{eq:mg:bd} from (\textit{i})--(\textit{iv}), we conclude the desired results \eqref{eq:rd:bd} and \eqref{eq:rdY:bd}. \end{proof}
\section{The Stefan Problem} \label{sect:Stef}
In this section, we develop the necessary \ac{PDE} tools.
As stated in Remark~\ref{rmk:Stefan}, we take the integral identity and integral equations \eqref{eq:Ucs:move}--\eqref{eq:zeq}, instead of \eqref{eq:PDE>}, as the definition of the Stefan problem.
To motivate such a definition, we first prove the following:
\begin{lemma}\label{lem:StefInt} Let $ (u_2,z_2) $ be a classical solution to the following \ac{PDE}, i.e., \begin{subequations}\label{eq:Stef} \begin{align}
\notag
&z_2 \in \mathcal{C}^1((0,\infty)) \cap \mathcal{C}([0,\infty)),
\
\text{nondecreasing}, \ z(0)=0, \\
\notag
&u_2 \in L^\infty(\overline{\mathcal{D}})\cap L^1(\overline{\mathcal{D}}),
\text{ and has a } \mathcal{C}^2\text{-extension onto a neighborhood of } \overline{\mathcal{D}}, \\
&
\quad\quad\quad\text{where } \mathcal{D} := \{(t,x): t>0, x \geq z_2(t) \}, \\
\label{eq:StefHE}
&\partial_t u_2 = \tfrac12 \partial_{xx} u_2, \quad \forall~ 0< t < T, \ x>z(t), \\
&
\label{eq:StefDiri}
u_2(t,z_2(t)) = 2, \quad \forall t\geq 0, \\
&
\label{eq:Stefmv}
2\tfrac{d~}{dt}z_2(t) + \tfrac12 \partial_x u_2(t,z_2(t)) = 0, \quad \forall t> 0, \end{align} \end{subequations} Define the tail distribution function of $ u_2 $ as $ \widetilde{U}_2(t,x) := \int_x^\infty u_2(t,y)dy $. We have \begin{align}
\label{eq:Uc2:int}
&\widetilde{U}_2(t,x)
=
\int_0^\infty p^\text{N}(t,y,x) \widetilde{U}_2(0,y) dy
+ \int_0^t p^\text{N}(t-s,z_2(s),x) ds,
\quad
\forall t,x \in \mathbb{R} _+, \\
\label{eq:Stef:z}
&\int_{0}^\infty p(t,z_2(t)-y)
\big( \widetilde{U}_2(0,0)-\widetilde{U}_2(0,y) \big) dy
=
\int_0^t p(t-s,z_2(t)-z_2(s)) ds. \end{align} \end{lemma}
\begin{proof}
Instead of the tail distribution function $ \widetilde{U}_2(t,x) $, let us first consider the distribution function $ U_2(t,x) := \int_{z_2(t)}^{x} u_2(t,y) dy $. We adopt the convention that $ U_2(t,x)|_{x<z_2(t)}:=0 $.
By \eqref{eq:StefHE}, \eqref{eq:StefDiri}--\eqref{eq:Stefmv}, $ U_2(s,y) $ solves the heat equation in $ \{(s,y): s>0, y>z(s)\} $. With this, for any fixed $ t>0 $ and $ x\in \mathbb{R} $, we integrate Green's identity \begin{align*}
\tfrac12 \partial_y((\partial_y p)U_2-p(\partial_y U_2)) + \partial_s(p U_2) =0,
\
\text{ where } p = p(t-s,x-y), \ U_2=U_2(s,y), \end{align*} over $ \{(s,y):\varepsilon <s<t-\varepsilon,y>z(s)+\varepsilon\} $. Letting $ \varepsilon\to 0 $, and combining the result with $ U_2(s,z_2(s))=0 $ and \eqref{eq:StefDiri}, we obtain \begin{align}
\label{eq:U2:int}
U_2(t,x) = \int_{0}^\infty p(t,x-y) U_2(0,y)dy -\int_0^t p(t-s,x-z(s)) ds,
\quad
\forall t\in \mathbb{R} _+, \ x\in \mathbb{R} . \end{align} Note that the preceding derivation of~\eqref{eq:U2:int} applies to \emph{all} $ x \in \mathbb{R} $, including $ x <z_2(t) $. Setting $ x=z_2(t) $ in \eqref{eq:U2:int}, on the l.h.s.\ we have $ U_2(t,z_2(0))=0 $.
Further using $ U_2(0,y) =\widetilde{U}_2(0,0)-\widetilde{U}_2(0,y) $, we see that \eqref{eq:Stef:z} follows.
We now turn to showing \eqref{eq:Uc2:int}. A straightforward differentiation, following by using~\eqref{eq:StefDiri}--\eqref{eq:Stefmv}, gives \begin{align*}
\partial_t U_2(t,\infty)
=
\partial_t \int_{z_2(t)}^\infty u_2(t,x) dx
&=
-z'_2(t) u_2(t,z(t)) + \int_{z(t)} \frac{1}{2} \partial_{xx} u_2(t,x) dx \\
&
=-2z'_2(t) - \frac{1}{2} \partial_{x} u_2(t,z(t)) =0, \end{align*} so in particular $ U_2(0,\infty)=U_2(t,\infty) $. Consequently, \begin{align}
\label{eq:U22}
\widetilde{U}_2(t,x) = U_2(0,\infty)-U_2(t,x). \end{align}
Further, as $ U_2(t,x)|_{x\leq 0} =0 $, \begin{align}
\notag
U_2(t,x)
&=
U_2(t,x) + U_2(t,-x) \\
\label{eq:U2:pN}
&=
\int_{0}^\infty p^\text{N}(t,y,x) U_2(0,y)dy -\int_0^t p^\text{N}(t-s,z(s),x) ds,
\quad
\forall t,x\in \mathbb{R} _+. \end{align} Inserting~\eqref{eq:U2:pN} into the last term in \eqref{eq:U22} yields \begin{align*}
\widetilde{U}_2(t,x)
=
U_2(0,\infty)
-\int_{0}^\infty p^\text{N}(t,y,x) U_2(0,y)dy + \int_0^t p^\text{N}(t-s,z(s),x) ds,
\quad
\forall t,x\in \mathbb{R} _+. \end{align*} Further using $ U_2(0,\infty) = \int_0^\infty p^\text{N}(t,y,x) U_2(0,\infty) dy $ to write \begin{align*}
U_2(0,\infty) - \int_{0}^\infty p^\text{N}(t,y,x) U_2(0,y)dy
=
\int_{0}^\infty p^\text{N}(t,y,x) \widetilde{U}_2(0,y)dy, \end{align*} we see that \eqref{eq:Uc2:int} follows. \end{proof}
We next turn to the well-posedness of \eqref{eq:Stef:z}.
The existence of a solution to \eqref{eq:Stef:z} will be established in Lemma~\ref{lem:W:hydro}, Section~\ref{sect:mvbdy}, as a by-product of establishing the hydrodynamic limit of certain Atlas models.
Here we focus the uniqueness and stability of \eqref{eq:Stef:z}. To this end, we consider $ w\in \mathcal{C}([0,T]) $ that satisfies \begin{equation} \label{eq:pStef:z}
\int_{0}^\infty p(t, w(t)-y) \big( \widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,y)\big) dy
=
f(t,w(t)) + \int_0^t p(t-s, w(t)-w(s)) ds, \end{equation} where $ f \in L^\infty([0,T]\times \mathbb{R} ) $ is a generic perturbation. Define a seminorm \begin{equation}
\label{eq:seminorm}
|w|'_{[0,T]} := \sup\{w(t)-w(t'),~0 \leq t \leq t' \leq T\} \end{equation} that measures how nondecreasing the given function is.
\begin{lemma} \label{lem:pStef} Fixing $ T<\infty $ and $ f_1(t,x),f_2(t,x) \in L^\infty([0,T]\times \mathbb{R} ) $, we consider $ w_1 $ and $ w_2 $ satisfying \eqref{eq:pStef:z} for $ f=f_1 $ and $ f=f_2 $, respectively.
Let $ L := \sup \{ |w_1(t)|, |w_2(t)| : t \leq T \}+1 $.
There exists $C_1=C_1(T,L)<\infty$ such that \begin{align*}
\sup_{0\leq t \leq T}(w_1(t)-w_2(t))
\leq
C_1 \sum_{i=1,2}
\big( |w_i(0)|+ | f_i | _{L^\infty([0,T] \times \mathbb{R} )} +|w_i|'_{[0,T]} \big), \end{align*} for all $ f_1,f_2 \in L^\infty([0,T]\times \mathbb{R} ) $ satisfying $
\sum_{i=1,2}
\big( |w_i(0)|+ | f_i | _{L^\infty([0,T] \times \mathbb{R} )} +|w_i|'_{[0,T]} \big)
\leq
\frac{1}{C_1}. $ \end{lemma} \noindent Indeed, when $ f_1=f_2=0 $, Lemma~\ref{lem:pStef} yields
\begin{corollary}\label{cor:unique} The solution to \eqref{eq:zeq} is unique. \end{corollary}
\begin{proof}[Proof of Lemma~\ref{lem:pStef}]
To simplify notations, let
$ \varepsilon:= |f_1|_{L^\infty([0,T] \times \mathbb{R} )} + |f_2 |_{L^\infty([0,T] \times \mathbb{R} )} $,
$ \varepsilon':=|w_1|'_{[0,T]}+|w_2|'_{[0,T]} $
and $ \varepsilon'':= |w_1(0)|+|w_2(0)| $.
Let \begin{align}
\label{eq:LHSof}
\Lambda(t,z)
&:=
\int_0^{\infty} p(t,z-y) (\widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,y)) dy \\
&=
\int_{-\infty}^{z} p(t,y) (\widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,z-y)) dy \end{align} denote the expression on the l.h.s.\ of \eqref{eq:pStef:z}. From the explicit expressions \eqref{eq:U1}--\eqref{eq:u1}, we have that $ \partial_x (-\widetilde{U}_{\star}(\frac12,z-y)) = u_1(\frac12,z-y) >0 $, $ \forall y\leq z $, so $ \partial_z \Lambda(t,z) > 0$, $ \forall z \geq 0 $. Consequently, there exists $c_1 = c_1(T,L)>0$ such that \begin{equation} \label{eq:lbpartialF}
\partial_z \Lambda(t,z) \geq c_1 >0, \quad \forall 0 \leq z \leq L, \ 0 \leq t \leq T. \end{equation} Setting $ C_1:=\frac{4}{c_1}\vee 1 $ and $ \delta:=C_1 (\varepsilon+\varepsilon'+\varepsilon'') \leq 1 $, we write $ w_2^{\delta}(t):=w_2(t)+\delta $ to simplify notations, and consider the first time $t^{*}:=\inf\{t \leq T: w_1(t) \geq w_2^{\delta}(t) \} $ when $ w_1 $ hits $ w^\delta_2 $.
Indeed, since $ C_1 \geq 1 $, we have $ w_1(0) \leq w_2(0)+|w_2(0)-w_1(0)| < w_2(0)+\delta $, so in particular $ t^* >0 $. Taking the difference of \eqref{eq:pStef:z} for $ (t,f)=(t^*,f_1) $ and for $ (t,f)=(t^*,f_2) $, we obtain \begin{equation}\label{eq:ubdelta}
\Lambda(t^{*},w_1(t^{*})) - \Lambda(t^{*},w_2(t^{*}))
=
\Lambda(t^{*},w_2^{\delta}(t^{*})) - \Lambda(t^{*},w_2(t^{*}))
\leq
\varepsilon + \int_0^{t^{*}} g^{*}(s) ds, \end{equation} where $ g^{*}(s):=p(t^{*}-s,w_1(t^{*})-w_1(s)) - p(t^{*}-s,w_2(t^{*})-w_2(s)) $.
Next, using $ w_1(s) \leq w_2(s)+\delta $, $ \forall s\leq t^* $, we have \begin{align}
\label{eq:w12}
w_1(t^*)-w_1(s) = w_2(t^*)+\delta-w_1(s)
\geq
w_2(t^*) - w_2(s). \end{align} To bound the function $ g^*(s) $, we consider the separately cases \textit{i}) $ w_2(t^*) - w_2(s) \geq 0 $; and \textit{ii}) $ w_2(t^*) - w_2(s) < 0 $. For case (\textit{i}), by \eqref{eq:w12} we have
$ |w_1(t^{*})-w_1(s))| \geq |w_2(t^{*})-w_2(s)| $, so $ g^*(s) \leq 0 $. For case (\textit{ii}), using $ 0 > w_2(t^*) - w_2(s) \geq -\varepsilon' $ we have $ g^*(s) \leq p(t^*-s,0) -p(t^*-s, \varepsilon') $.
Combining these bounds with the readily verified identity \begin{align}
\label{eq:pId}
\int_0^t p(t-s,x) ds = 2 t p(t,x)- 2|x| \widetilde{\Phi}(t,|x|), \end{align} we obtain \begin{align}
\notag
\int_0^{t^*} g_{*}(s)ds
&\leq
\int_0^{t^*} (p(t^*-s,0) -p(t^*-s, \varepsilon')) ds
\leq
2t_*p(t^*,0) -2t_*p(t^*, \varepsilon') + 2|\varepsilon'| \\
\label{eq:estgstar}
&=
\sqrt{\tfrac{2t^*}{\pi}} (1-\exp(-\tfrac{{\varepsilon'}^{2}}{2t^*}))+2
\varepsilon'
<
4 \varepsilon', \end{align} where we used $ (1-e^{-\xi}) \leq 2\sqrt{\xi} $, $ \forall \xi \in \mathbb{R} _+ $, in the last inequality. Now, if $ t^* \leq T $, combining \eqref{eq:estgstar} with \eqref{eq:ubdelta} and \eqref{eq:lbpartialF} yields $ \delta c_1 < \varepsilon + 4 \varepsilon'$, leading to a contradiction. Consequently, we must have $ t^* >T $. \end{proof}
We next establish a property of $ (\widetilde{U}_{\star}(t,x),z_{\star}(t)) $, that will be used toward the proof of Theorems~\ref{thm:aldous} and \ref{thm:hydro}.
\begin{lemma}\label{lem:Us} For any $ (\widetilde{U}_{\star}(t,x),z_{\star}(t); t\geq\frac12) $ satisfying \eqref{eq:Ucs:move}--\eqref{eq:zeq}, we have \begin{align}
\label{eq:Usc:cnsv}
\widetilde{U}_{\star}(t,z_{\star}(t)) = \tfrac{4}{\sqrt{\pi}},
\quad
\forall t\geq \tfrac12. \end{align} \end{lemma}
\begin{remark} As $ \widetilde{U}_{\star}(t,x) $ represents the hydrodynamic limit of the scaled tail distribution function~$ \widetilde{U}_K(t,x) := \frac{1}{\sqrt{K}}\#\{ X^K_i(t) >x \} $, equation~\eqref{eq:Usc:cnsv} is a statement of \emph{conservation of particles} within the moving boundary phase, in the hydrodynamic limit. \end{remark}
\begin{proof} Fixing such $ (\widetilde{U}_{\star}(t,x),z_{\star}(t)) $, we \emph{define} \begin{align*}
U_{\star}(t+\tfrac12,x)
:=
\int_0^\infty p(t,x-y) (\widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,y)) dy
- \int_0^t p(t-s,x-z_{\star}(s+\tfrac12)) ds. \end{align*} From this expression, it is straightforward to verify that, for any fixed $ T<\infty $, $ U_{\star}(\Cdot+\frac12,\Cdot) \in \mathcal{C}([0,T]\times \mathbb{R} ) \cap L^\infty([0,T] \times \mathbb{R} ) $ solves the heat equation on $ \{(t,x) : t>0, x< z_{\star}(s) \} $.
Further, $ U_{\star}(\frac12,x)|_{x\leq 0}=0 $, and, by \eqref{eq:zeq}, \begin{align}
\label{eq:Us:z=0}
U_{\star}(t+\tfrac12,z_{\star}(t+\tfrac12))=0. \end{align} From these properties of $ U_{\star}(t+\frac12,x) $, by the uniqueness of the heat equation on the domain $ \{(t,x):t\in \mathbb{R} , x< z_{\star}(t)\} $,
we conclude that $ U_{\star}(t+\frac12,x)|_{x\leqz_{\star}(t+\frac12)}=0 $. Therefore, \begin{align}
\notag
&
U_{\star}(\tfrac12+t,x)
= U_{\star}(\tfrac12+t,x) +U_{\star}(\tfrac12+t,-x) \\
&
\notag
=
\int_0^\infty p^\text{N}(t,y,x) (\widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,y)) dy
-
\int_0^t p^\text{N}(t-s,z_{\star}(s+\tfrac12),x) ds, \\
\label{eq:Us:refl}
&
=
\widetilde{U}_{\star}(\tfrac12,0)
-\int_0^\infty p^\text{N}(t,y,x) \widetilde{U}_{\star}(\tfrac12,y) dy
-
\int_0^t p^\text{N}(t-s,z_{\star}(s+\tfrac12),x) ds,
\quad
\forall x \in \mathbb{R} _+. \end{align}
Next, set $ t=\frac12 $ in \eqref{eq:Ucs:move}, and write $ 2p(\frac12,y)=p^\text{N}(t,0,y) $ to obtain \begin{align}
\label{eq:Ucs:itr}
\widetilde{U}_{\star}(\tfrac12,y) = p^\text{N}(\tfrac12,0,y) + \int_0^\frac12 p^\text{N}(\tfrac12-s,z_{\star}(s),y) ds. \end{align} Inserting this expression~\eqref{eq:Ucs:itr} of $ \widetilde{U}_{\star}(\frac12,y) $ into \eqref{eq:Us:refl}, followed by using the semigroup property $ \int_0^\infty p^\text{N}(t,y,x)p^\text{N}(\tfrac12,z,y) dy = p^\text{N}(t+\frac12,z,y) $, we obtain \begin{align*}
U_{\star}(t+\tfrac12,x)
&=
\widetilde{U}_{\star}(\tfrac12,0)
- p^\text{N}(t+\tfrac12,0,x)
- \int_0^{t+\frac12} p^\text{N}(t-s,z_{\star}(s+\tfrac12),x) ds \\
&= \widetilde{U}_{\star}(\tfrac12,0) - \widetilde{U}_{\star}(t+\tfrac12,x),
\quad
\forall x \in \mathbb{R} _+. \end{align*} Setting $ x=z_{\star}(t+\frac12) $ and using \eqref{eq:Us:z=0} on the l.h.s., we conclude the desired identity~\eqref{eq:Usc:cnsv}. \end{proof}
As will be needed toward the proof of Theorem~\ref{thm:aldous} and \ref{thm:hydro}, we next show that $ z_{\star}(t) $ grows quadratically near $ t=\frac12 $.
\begin{lemma}\label{lem:zqd} For any solution $ z_{\star}(\Cdot+\frac12) $ to the integral equation~\eqref{eq:zeq}, we have \begin{align}
\label{eq:zdq}
\lim_{t\downarrow 0} \{ t^{-2}z_{\star}(t+\tfrac12) \} = \tfrac{2}{\sqrt{\pi}}. \end{align} \end{lemma}
\begin{remark} For sufficiently smooth solutions to the \ac{PDE} \eqref{eq:PDE>}, one can easily calculate $
\frac{d^2~}{dt^2}z_{\star}(\frac12) = -\frac18 \partial_{xxx} u_1(\frac12,0)
= \frac{2}{\sqrt{\pi}} $ by differentiating \eqref{eq:StefBC} and \eqref{eq:HE>}.
Here, as we take the integral equation \eqref{eq:zeq} as the definition of the Stefan problem, we prove Lemma~\ref{lem:zqd} by a different, indirect method, which does not assume the smoothness of of $ z_{\star} $. \end{remark}
\begin{proof} We begin by deriving a useful identity. Write $
\int_0^t p(t-s,x) ds
=
-\int_0^t \int_{-\infty}^x \partial_{yy} \Phi(t-s,y) ds, $ use $ -\partial_{yy} \Phi(t-s,y) = 2\partial_s \Phi(t-s,y) $, swap the double integrals, and integrate over $ s\in[0,t] $. With $ \Phi(0,y) = \ind_{[0,\infty)}(y) $, we obtain \begin{align}
\label{eq:useful}
\int_0^t p(t-s,x) ds
=
2\int_{-\infty}^{x} (\Phi(t,y)-\ind_{[0,\infty)}(y)) ds
=
2\int_{-\infty}^{-|x|} \Phi(t,y) ds. \end{align}
We now begin the proof of \eqref{eq:zdq}. Let $ \Lambda(t,x) $ be as in \eqref{eq:LHSof}. Recall from \eqref{eq:U1} that $ \partial_y \widetilde{U}_{\star}(\frac12,y) = -u_1(\frac12,y) $, for $ u_1(\frac12,y) $ defined in \eqref{eq:u1}.
Integrating by parts followed by a change of variable $ y \mapsto \frac{y}{\sqrt{t}} $ yields \begin{align}
\notag
\Lambda(t,x)
&=
-\int_0^\infty \partial_y\big(\widetilde{U}_{\star}(\tfrac12,0)-\widetilde{U}_{\star}(\tfrac12,y)\big) \Phi(t,x-y) dy \\
\label{eq:LHSof:}
&=
\sqrt{t} \int_0^\infty u_1(\tfrac12,\sqrt{t} y) \Phi(1,\tfrac{x}{\sqrt{t}}-y) dy. \end{align} From the explicit expression~\eqref{eq:u1} of $ u_1(\frac12,y) $, we see that $ u_1(\frac12,\Cdot) \in \mathcal{C}^\infty( \mathbb{R} _+) \cap L^\infty( \mathbb{R} _+) $, and that $ u_1(\frac12, 0)=2 $, $ \partial_y u_1(\frac12, 0) = \partial_{yy}u_1(\frac12,0) =0 $, and $ -\partial_{yyy} u_{\star}(0) = \frac{16}{\sqrt{\pi}} =: a_3 $.
Using these properties to Taylor-expand $ u_1(\frac12,\sqrt{t}y) $ in \eqref{eq:LHSof:} yields \begin{align}
\label{eq:LHS:Taylor}
\Lambda(t,x)
=
t^{\frac12} \Lambda_0(\tfrac{x}{\sqrt{t}})
- t^{2} \Lambda_3(\tfrac{x}{\sqrt{t}})
+ t^{\frac52} \Lambda_4(t,x), \end{align} where $ \Lambda_4(t,x) $ is a \emph{bounded} remainder function
in the sense that $ \lim\limits_{t\downarrow 0} \sup\limits_{x\in \mathbb{R} }|\Lambda_4(t,x)| <\infty $, and \begin{align}
\label{eq:Lambda0}
\Lambda_0(x) &:= 2 \int_0^\infty \Phi(1,x-y) dy
=
2 \int_{-\infty}^x \Phi(1,y) dy, \\
\label{eq:Lambda3}
\Lambda_3(x) &:= \frac{a_3}{6} \int_0^\infty y^3 \Phi(1,x-y) dy. \end{align}
Insert the expression~\eqref{eq:LHS:Taylor} into \eqref{eq:zeq}, we obtain \begin{align}
\label{eq:qd:ineq:}
t^{\frac12} \Lambda_0(\tfrac{w(t)}{\sqrt{t}})
- t^{2} \Lambda_3(\tfrac{w(t)}{\sqrt{t}})
+ t^{\frac12} \Lambda_4(t,w(t))
=
\int_0^t p(t-s,w(t)-w(s)) ds. \end{align}
The strategy of the proof is to extract upper and lower bounds on $ \frac{w(t)}{\sqrt{t}} $ from \eqref{eq:qd:ineq:}.
We begin with the upper bound. On the r.h.s.\ of \eqref{eq:qd:ineq:}, using $ p(t-s,w(t)-w(s)) \leq p(t-s,0) $, followed by applying the identity~\eqref{eq:useful}, we have that \begin{align}
\label{eq:qd:ineq}
t^{\frac12} \Lambda_0(\tfrac{w(t)}{\sqrt{t}})
- t^{2} \Lambda_3(\tfrac{w(t)}{\sqrt{t}})
+ t^{\frac52} \Lambda_4(t,x)
\leq
t^{\frac12} \Lambda_0(0). \end{align}
Dividing \eqref{eq:qd:ineq} by $ t^{\frac12} $ and letting $ t\downarrow 0 $, we conclude that $ \lim_{t\downarrow 0} \Lambda_0(\frac{w(t)}{\sqrt{t}}) = \Lambda_0(0) $.
As $ x\mapsto \Lambda_0(x) $ is strictly increasing, we must have $ \lim_{t\downarrow 0} \frac{w(t)}{\sqrt{t}} = 0 $. Now, dividing both sides of \eqref{eq:qd:ineq} by $ t^2 $, and letting $ t\downarrow 0 $ using $ \lim_{t\downarrow 0} \frac{w(t)}{\sqrt{t}} = 0 $, we further deduce that \begin{align}
\label{eq:wLambda3<}
\lim_{t\downarrow 0} t^{-\frac32} \big(\Lambda_0(\tfrac{w(t)}{\sqrt{t}})-\Lambda_0(0) \big)
- \Lambda_3(0)
\leq 0. \end{align} From the explicit expression~\eqref{eq:Lambda0} of $ \Lambda_0(x) $, we have that \begin{align}
\label{eq:Lambda0:diff}
\tfrac{d~}{dx} \Lambda_0(0) =1. \end{align} Using~\eqref{eq:Lambda0:diff} to Taylor-expanding the expression $ \Lambda_0(\tfrac{w(t)}{\sqrt{t}}) $ in \eqref{eq:wLambda3<} to the first order, we thus conclude the desired upper bound $
\limsup_{t\downarrow 0} \frac{w(t)}{t^2} \leq \Lambda_3(0) = \frac{a_3}{8} = \frac{2}{\sqrt{\pi}} $.
Having established the desired upper bound on $ \frac{w(t)}{t^2} $, we now turn to the lower bound. Let $ b:=\liminf_{t\downarrow 0} \frac{w(t)}{t^2} $. Since $ 0\leq b\leq \frac{2}{\sqrt{\pi}}<\infty $, there exists $ t_n\downarrow 0 $ such that \begin{align}
\label{eq:liminf:b}
|\tfrac{w(t_n)}{t_n^2} - b| <\tfrac1n,
\quad
\tfrac{w(s)}{s^2} > b-\tfrac1n, \ \forall s\in (0,t_n]. \end{align} As $ t\mapsto w(t) $ is non-decreasing, by \eqref{eq:liminf:b} we have \begin{align*}
|w(t_n)-w(s)|
=
w(t_n)-s(s)
\leq
(bt^2_n+\tfrac{t^2_n}{n}) - (bs^2-\tfrac{s^2}{n})
\leq
b (t^2_n-s^2) + \tfrac{2t^2_n}{n},
\quad
\forall s\leq t_n. \end{align*} Taking the square of the preceding inequality further yields \begin{align}
\label{eq:wts}
|w(t_n)-w(s)|^2
\leq
(\tfrac{2t_n^2}n)^2 + \tfrac{4bt_n^2(t^2_n-s^2)}n + b^2(t_n^2-s^2)^2,
\quad
\forall s\leq t_n. \end{align} Use this inequality~\eqref{eq:wts} to write \begin{align*}
p(&t_n-s,w(t_n)-w(s))
\geq
p(t_n-s,\tfrac{2t_n^2}n)
\exp\Big(
-\tfrac{1}{2(t_n-s)} \Big(\tfrac{4bt_n^2(t^2_n-s^2)}n + b^2(t^2_n-s^2)^2 \Big)
\Big) \\
&=
p(t_n-s,\tfrac{2t_n^2}n)
\exp\Big(
-\tfrac{2bt_n^2(t_n+s)}n \Big)
\exp\Big(
-\tfrac{b^2}{2}(t_n-s)(t_n+s)^2
\Big) \\
&\geq
p(t_n-s,\tfrac{2t_n^2}n)
\exp\Big( -\tfrac{2bt_n^2(2t_n)}n \Big)
\exp\Big( - \tfrac{b^2}{2}(t_n-s)(2t_n)^2 \Big). \end{align*} Within the last expression, using $ e^{-\xi} \geq 1-\xi $ for $ \xi=\tfrac{b^2}{2}(t_n-s)(2t_n)^2 $, and using $
|p(t_n-s,\tfrac{2t_n^2}n)
\exp( -\tfrac{2bt_n^2(2t_n)}n )
\leq
\frac{1}{\sqrt{t_n-s}}, $ we obtain \begin{align}
\label{eq:pwts}
p(t_n-s,w(t_n)-w(s))
\geq
p(t_n-s,\tfrac{2t_n^2}n)
\exp\big(-\tfrac{2bt_n^2(2t_n)}n \big)
- \tfrac{b^2}{2}(t_n-s)^{\frac12}(2t_n)^2. \end{align} Now, integrate \eqref{eq:pwts} over $ s\in[0,t_n] $, using the identity~\eqref{eq:useful} to obtain \begin{align}
\notag
\int_0^{t_n} p(t_n-s,w(t_n)-w(s)) ds
&\geq
e^{-\frac{2bt_n^2(2t_n)}n}
\int_0^{t_n} p(t_n-s,\tfrac{2t_n^2}n) ds
-
4b^2(t_n)^{\frac52} \\
\notag
&=e^{-\frac{4bt_n^3}n} \sqrt{t_n} \Lambda_0( -\tfrac{2t_n^2}{n\sqrt{t_n}} )
-
4b^2(t_n)^{\frac52} \\
\label{eq:llllll}
&\geq
\sqrt{t_n} \Lambda_0( -\tfrac{2t_n^2}{n\sqrt{t_n}} ) - C(t_n)^{\frac52}, \end{align} for some constant $ C<\infty $.
Now, set $ t=t_n $ in \eqref{eq:qd:ineq} and combine the result with \eqref{eq:llllll}. After dividing both sides of the result by $ t_n^2 $ and letting $ n\to\infty $, we arrive at \begin{align}
\label{eq:rrrrr}
\lim_{n\to\infty}
(t_n)^{-\frac32} \big(
\Lambda_0(\tfrac{w(t_n)}{\sqrt{t_n}})
-\Lambda_0(-\tfrac{2}n (t_n)^\frac32 )
\big)
-
\Lambda_3(0) \geq 0. \end{align} Using \eqref{eq:Lambda0:diff} to Taylor expand the expressions $ \Lambda_0(\tfrac{w(t_n)}{\sqrt{t_n}}) $ and $ \Lambda_0(-\tfrac{2}n (t_n)^\frac32 ) $, with\\ $ \liminf_{n\to\infty} \frac{w(t_n)}{t_n^2} = b $ (by~\eqref{eq:liminf:b}), we conclude the desired lower bound $ b\geq \Lambda_3(0) =\frac{2}{\sqrt{\pi}} $. \end{proof}
\section{Proof of Theorem~\ref{thm:hydro}} \label{sect:hydro} Equipped with the tools developed previously, in this section prove Theorem~\ref{thm:hydro}.
To this end, throughout this section we specialize $ (\phi^K_i(t):t\geq 0) $ to the push-the-laggard strategy~\eqref{eq:pushlaggard}.
Recalling that $ \tau_i $ denote the absorption of the $ i $-th particle $ X_i(t) $, we let \begin{align}
\label{eq:extt}
\tau_\text{ext} := \max\nolimits_{i}{\tau_i},
\quad
\tau^K_\text{ext} := K^{-1}\tau_\text{ext} \end{align} denote the extinction times (unscaled and scaled). Under the push-the-laggard strategy, Proposition~\ref{prop:intXY}\ref{enu:intX} gives \begin{align}
\label{eq:int:abs:}
\widetilde{U}_K(t,x) =
\widetilde{G}_K(t,x) + \int_{0}^{t\wedge\tau^K_\text{ext}} p^\text{N}(t-s,Z_K(s),x) ds + R_K(t,x), \end{align} We first establish a lower bound on the extinction time.
\begin{lemma}\label{lem:extT} For any fixed $ T,n<\infty $, there exists $ C=C(T,n)<\infty $ such that \begin{align}
\label{eq:extT}
\operatorname{\mathbf{P}}( \tau^K_\text{ext} > T ) \geq 1 -CK^{-n}. \end{align} \end{lemma}
\begin{proof} Consider the modified process $ (X^\text{ab}_i(t);t \geq 0)_{1\leq i\leq K} $ consisting of $ K $ independent Brownian motions starting at $ x=1 $ and absorbed once they reach $ x=0 $, and let $ \tau'_\text{ext} := \inf \{ t : X_i(t)=0,\forall i \} $ denote the corresponding extinction time.
Under the natural coupling of $ (X^\text{ab}_i(t))_i $ and $ (X_i(t))_i $ (by letting them sharing the underlying Brownian motions), we clearly have $ \tau_\text{ext} \geq \tau'_\text{ext} $.
For the latter, it is straightforward to verify that \begin{align*}
\operatorname{\mathbf{P}}( \tau'_\text{ext} \leq KT )
=
\Big( \operatorname{\mathbf{P}}\Big( \inf_{t\leq T} (B(Kt)+1) \leq 0 \Big) \Big)^K
\leq
\exp(-\tfrac{1}{C(T)}K^{1/2}), \end{align*} where $ B(\Cdot) $ denotes a standard Brownian motion. From this the desired result follows. \end{proof}
\noindent By Lemma~\ref{lem:extT}, toward the end of proving Theorem~\ref{thm:hydro}, without loss of generality we remove the localization $ \Cdot\wedge\tau^K_\text{ext} $ in \eqref{eq:int:abs:}.
Next, using the expression \eqref{eq:Gc:pN} of $ \widetilde{G}_K(t,x) $, from the heat kernel estimate \eqref{eq:p:Holdx} we have \begin{align}
&
\label{eq:GK:est}
|\widetilde{G}_K(t,x) - 2p(t,x)| \leq C(\alpha) K^{-\frac{\alpha}{2}} t^{-\frac{1+\alpha}{2}},&
&
\forall\alpha\in(0,1), \\
&
\label{eq:pN:est}
\int_0^t|p^\text{N}(t-s,x,z)-p^\text{N}(t-s,x,z')| ds
\leq
C(\alpha') |z-z'|^{\alpha'} t^{\frac{1-\alpha'}{2}},&
&
\forall \alpha' \in (0,1). \end{align}
For any fixed $ \gamma\in (0,\frac14) $ and $ \alpha'\in(0,1) $, taking the difference of \eqref{eq:Ucs} and \eqref{eq:int:abs:}, followed by using the estimates \eqref{eq:GK:est}--\eqref{eq:pN:est} and \eqref{eq:rd:bd}, we obtain \begin{align*}
&|\widetilde{U}_K(t,x)-\widetilde{U}_{\star}(t,x)| \\
\leq&
|\widetilde{G}_K(t,x)-2p(t,x)|
+
\int_0^t|p^\text{N}(t-s,x,Z_K(t))-p^\text{N}(t-s,x,z')| ds + |R_K(t,x)| \\
\leq&
C(\gamma) t^{-\frac34} K^{-\gamma}
+
C(\gamma,\alpha') \sup_{s\leq T}|Z_K(s)-z(s)|^{\alpha'},
\quad
\forall x\in \mathbb{R} , t\leq T, \end{align*} with probability $ \geq 1-C(n,T)K^{-n} $. From this we see that the hydrodynamic limit~\eqref{eq:hydro:U} of $ \widetilde{U}_K(t,x) $ follows immediately from the hydrodynamic limit~\eqref{eq:hydro:Z} of $ Z_K $.
Focusing on proving \eqref{eq:hydro:Z} hereafter, in the following we settle \eqref{eq:hydro:Z} in the absorption phase and the moving boundary phase separately.
For technical reasons, instead of using $ t_{\star}=\frac12 $ as the separation of these two phases, in the following we use $ \frac{1}{2}+ \frac17 K^{-2\gamma} $ for the separation of the two phases, where $ \gamma\in(0,\frac{1}{96}) $ is fixed.
More precisely, the desired hydrodynamic result~\eqref{eq:hydro:Z} follows immediately from the following two propositions (by setting $ \beta=\gamma $ in Part\ref{enu:Zhy:abs}):
\begin{proposition} \label{prop:Zhy} For any fixed $ \gamma<\gamma_1\in(0,\frac{1}{96}) $ and $ n<\infty $, there exists $ C=C(\gamma,\gamma_1,n)<\infty $ such that \begin{enumerate}[label=(\alph*)] \item \label{enu:Zhy:abs} for all $ \beta \leq 4\gamma_1 $ and $ K<\infty $, \begin{align}
\label{eq:Zhydro:abs:}
\operatorname{\mathbf{P}} \Big(
|Z_K(t)-z_{\star}(t)| \leq CK^{-\beta},
\
\forall t \in[0, \tfrac{1}{2}+ \tfrac17 K^{-2\beta}]
\Big)
\geq 1 - CK^{-n}; \end{align} \item \label{enu:Zhy:mvbdy} for all $ K<\infty $, \begin{align} \label{eq:Zhy:mvbdy}
\operatorname{\mathbf{P}} \Big(
|Z_K(t)-z_{\star}(t)| \leq CK^{-\gamma},
\
\forall t\in[\tfrac12 + \tfrac17 K^{-2\gamma}, T]
\Big)
\geq 1 - CK^{-n}. \end{align} \end{enumerate} \end{proposition} \noindent We settle Proposition~\ref{prop:Zhy}\ref{enu:Zhy:abs}--\ref{enu:Zhy:mvbdy} in Sections~\ref{sect:absrb}--\ref{sect:mvbdy} in the following, respectively. To this end, we fix $ \gamma<\gamma_1\in(0,\frac{1}{96}) $, $ n<\infty $ and $ T<\infty $, and, to simplify notations,
use $ C<\infty $ to denote a generic constant that depends only on $ \gamma,\gamma_1,n,T $.
\subsection{Proof of Proposition~\ref{prop:Zhy}\ref{enu:Zhy:abs}} \label{sect:absrb}
Fix $ \beta \leq 4\gamma_1 $. We begin with a reduction.
Since $ z_{\star}(t)|_{t\leq\frac12}=0 $, by Lemma~\ref{lem:zqd}, we have $ \sup_{t \leq \frac{1}{2}+ \frac17 K^{-2\beta}} |z_{\star}(t)| \leq CK^{-4\beta} \leq CK^{-\beta} $. From this, we see that is suffices to prove \begin{align} \label{eq:Zhydro:abs}
\operatorname{\mathbf{P}} \Big(
Z_K(t) \leq K^{-\beta},
\
\forall t \leq \tfrac{1}{2}+ \tfrac17 K^{-2\beta}
\Big)
\geq 1 - CK^{-n}. \end{align}
To the end of showing \eqref{eq:Zhydro:abs}, we recall the following classical result from \cite{feller71}.
\begin{lemma}[{\cite[Chapter X.5, Example (c)]{feller71}}] \label{Felconf} Let $ (B(t); t \geq 0) $ be a standard Brownian motion (starting from $ 0 $), and let $ 0<a<b<\infty $. Defining $ \rho(t,a,b) := \operatorname{\mathbf{P}} ( 0< B(s)+a < b,\forall s \leq t) $, we have \begin{equation}\label{eigenexp}
\rho(t,a,b)
=
\sum_{n=0}^{\infty}
\frac{4}{(2n+1) \pi}
\sin \Big( \frac{(2n+1) \pi a}{b} \Big)
\exp \Big( -\frac{(2n+1)^2 \pi^2}{2b^2}t \Big). \end{equation} \end{lemma}
\noindent With $ \beta\leq4\gamma_1<\frac{1}{24} $, we have $ 4\beta<\frac12 - 8\beta $. Fixing $ \alpha \in (4\beta, \frac12 - 8\beta) \subset (0,\frac12) $, we begin with a short-time estimate:
\begin{lemma} \label{lem:smtime} There exists $ C<\infty $ such that \begin{align}
\label{eq:sm}
\operatorname{\mathbf{P}} \Big( Z_K(t) \leq K^{-\alpha}, \ \forall t \leq K^{-2\alpha} \Big)
\geq 1-C K^{-n}. \end{align} \end{lemma}
\begin{proof} We consider first the modified process $ (\widehat{X}^{\text{ab}}_i(t); t \geq 0)_{i=1}^K $, which consists of $ K $ independent Brownian motions starting at $ x=1 $, and absorbed at $ x=0 $ and $ x=\frac{1}{2} K^{\frac12-\alpha} $.
Let $ \widehat{X}^{K,\text{ab}}_i(t) := \frac{1}{\sqrt{K}} \widehat{X}^{\text{ab}}_i(Kt) $ denote the scaled process, and consider \begin{align}
\label{eq:hatN}
\widehat{N}^\text{ab}
:=
\# \set{ i : 0<\widehat{X}^{K,\text{ab}}_i(t)<\tfrac12 K^{-\alpha}, \ \forall t \leq K^{-2\alpha} }, \end{align} the number of surviving $ \widehat{X}^{K,\text{ab}} $-particles of up to time $ K^{-2\alpha} $.
Let \begin{align*}
\rho^*_K := \rho(K^{-2\alpha},K^{-\frac12},\tfrac12 K^{-\alpha})
:=
\operatorname{\mathbf{P}}\big( 0<\tfrac{1}{\sqrt{K}}(B(Kt)+1)<\tfrac12 K^{-\alpha}, \ \forall t \leq K^{-2\alpha} \big). \end{align*} From the definition~\eqref{eq:hatN} we see that $ \widehat{N}^\text{ab} $ is the sum of i.i.d.\ Bernoulli$(\rho^*_K)$ random variables. Hence, by the Chernov bound we have \begin{equation}\label{Chernoff}
\operatorname{\mathbf{P}} (\widehat{N}^\text{ab} \geq \tfrac{1}{2}K \rho^*_K )
\geq
1- \exp \big( -\tfrac{1}{8}K \rho^*_K \big). \end{equation}
Specialize \eqref{eigenexp} at $ (t,a,b) = (K^{-2\alpha}, K^{-\frac12}, \frac12 K^{-\alpha}) $ to obtain \begin{align*}
\rho^*_K
=
\sum_{n=0}^{\infty} \rho'_{K,n}
\exp \big( -2(2n+1)^2 \pi^2 \big),
\
\text{ where }
\rho'_{K,n} := \frac{4}{(2n+1) \pi} \sin ( 2(2n+1)\pi K^{\alpha-\frac12} ). \end{align*}
With $ \alpha<\frac12 $, we have
$ \lim_{K\to\infty} (K^{\frac12-\alpha}\rho'_{K,n}) = 8 $ and $ |\rho'_{K,n}| \leq 8 K^{\alpha-\frac12} $, and it is straightforward to show that \begin{align*}
\lim_{K\to\infty} (K^{\frac12-\alpha}\rho^*_K)
=
8 \sum_{n=0}^{\infty} \exp \big( -2(2n+1)^2 \pi^2 \big) > 0. \end{align*} Consequently, $ \rho^*_K \geq \frac{1}{C} K^{\alpha-\frac{1}{2}} $. Inserting this into \eqref{Chernoff}, we arrive at \begin{align*}
\operatorname{\mathbf{P}} (\widehat{N}^\text{ab} \geq \tfrac{1}{C}K^{\alpha+\frac{1}{2}} )
\geq
1- \exp(-\tfrac{1}{C} K^{\alpha+\frac{1}{2}} )
\geq
1 - CK^{-n}. \end{align*}
Next, we consider the process $ (X^{\text{ab}}_i(t);t\geq 0)_{i=1}^K $, consisting of $ K $ independent Brownian motions starting at $ x= 1 $ and absorbed only at $ x=0 $, coupled to $ (\widehat{X}^{\text{ab}}_i(t))_i $ by the natural coupling that each $ i $-th particle share the same underlying driving Brownian motion.
Let $ X^{\text{ab},K}_i(t) := \frac{1}{\sqrt{K}} X^{\text{ab}}_i(Kt) $ denote the scaled process, let $
\Gamma := \{X^{\text{ab},K}_i(K^{-2\alpha}) :
0<X^{\text{ab},K}_i(t)<\frac12 K^{-\alpha}, \forall t \leq K^{-2\alpha}\} $ denote the set of all $ X^{\text{ab},K} $-particles that stay within $ (0,\frac12 K^{-\alpha}) $ for all $ t \leq K^{-2\alpha} $, and let $ N^{\text{ab}} := \#\Gamma $. We clearly have $ N^{\text{ab}} \geq \widehat{N}^\text{ab} $, and therefore \begin{equation} \label{compfZ}
\operatorname{\mathbf{P}} (N^{\text{ab}}(K^{-2\alpha}) \geq \tfrac{1}{C} K^{\alpha+\frac{1}{2}}) \geq 1 - C K^{-n}. \end{equation}
Now, couple $ (X^{\text{ab},K}(t)) $ and $ (X^K(t)) $ by the aforementioned natural coupling.
On the event $\{N^{\text{ab}} \geq \frac{1}{C} K^{\alpha+\frac{1}{2}}\}$, to move all $ X^K $-particles in $ \Gamma $ to the level $ x=K^{-\alpha} $ requires at least a drift of $
N^{\text{ab}} (\frac{1}{2}K^{-\alpha}) \geq \tfrac{1}{C} K^{\frac{1}{2}}, $ while the total amount of (scaled) drift at disposal is $ K^{-2\alpha+\frac{1}{2}} $. This is less than $ \tfrac{1}{C} K^{\frac{1}{2}} $ for all large enough $ K $. Consequently, the desired result \eqref{eq:sm} follows from \eqref{compfZ}. \end{proof}
Equipped with the short-time estimate~\eqref{eq:sm}, we now return to showing \eqref{eq:Zhydro:abs}.
Consider the threshold function \begin{align}
z^*(t) = K^{-\alpha} \ind_{\{t\leq K^{-2\alpha}\}}
+ (\sqrt{t} K^{-\beta}) \ind_{\{t>K^{-2\alpha}\}}, \end{align} and the corresponding hitting time $ \tau:=\inf\{t \in \mathbb{R} _+ : Z_K(t) \geq z^*(t) \} $.
It suffices to show $ \operatorname{\mathbf{P}}( \tau > \frac{1}{2} + \frac{1}{7} K^{-2\beta} ) \geq 1 - CK^{-n} $. To this end, by Lemma~\ref{lem:smtime}, without loss of generality we assume $ \tau \in (K^{-2\alpha},1) $.
As the trajectory of $Z_{K}$ is continuous except when it hits $ 0 $, we have $Z_K(\tau) \geq z^{*}(\tau)$. Hence at time $ \tau $, no particle exists between $ 0 $ and $ z^*(\tau) $, or equivalently $ \widetilde{U}_K(\tau,z^{*}(\tau))=\widetilde{U}_K(\tau,0). $
With this, taking the difference of \eqref{eq:int:abs:} at $ x=z^*(\tau) $ and at $ x=0 $, and multiplying the result by $ \sqrt{ \frac{\pi \tau}{2} } $, we obtain \begin{align*}
h_1 = h_2 + \sqrt{ \tfrac{\pi \tau}{2} }(R_K(\tau,z^{*}(\tau)) - R_K(\tau,0)), \end{align*} where $ h_1 := \sqrt{ \frac{\pi \tau}{2} }( \widetilde{G}_K(\tau,0)-\widetilde{G}_K(\tau,z^*(\tau))) $, $ h_2 := \sqrt{ \frac{\pi \tau}{2} } \int_0^{\tau} f_2(s,Z_K(s),z^*(\tau)) ds $, and \begin{align}\label{eq:h2}
f_2(s,z,z') := p^\text{N}(\tau-s,z,z')-p^\text{N}(\tau-s,z,0). \end{align}
Further using \eqref{eq:rd:bd}, for fixed $ \delta\in (0,\frac{1-2\alpha}{4} - 4\beta) $, to control the remainder term $ (R_K(\tau,z^{*}) - R_K(\tau,0)) $, with $ \tau \geq K^{-2\alpha} $, we have \begin{align}
\label{eq:lrc}
h_1 \leq h_2 + C K^{ \frac{2\alpha-1}{4}+\delta }, \end{align} with probability $ 1 -CK^{-n} $.
Given the inequality~\eqref{eq:lrc}, the strategy of the proof is to extract the bound $ \tau \geq \frac12 + \frac{1}{7} K^{-2\beta} $ from \eqref{eq:lrc}. To this end, we next derive a lower bound on $ h_1 $ and an upper bound on $ h_2 $.
With $ \widetilde{G}_K(t,x) $ defined as in \eqref{eq:Gc}, we have \begin{align*}
h_1 = h_1(K^{-\beta}),
\
\text{ where }
h_1(a)
=
\sqrt{K\tau}
\int_0^\frac{1}{\sqrt{K\tau}}
\big( e^{-\frac{y^2}{2}} - \tfrac12 e^{-\frac{(y+a)^2}{2}} - \tfrac12 e^{-\frac{(y-a)^2}{2}} \big) dy. \end{align*} Taylor-expanding $ h_1(a) $ to the fifth order gives $
h_1(a) \geq a^2 h_{12} + a^4 h_{14} - Ca^6, $ where $ h_{12} := \sqrt{K\tau} \int_0^\frac{1}{\sqrt{K\tau}} e^{-y^2/2}(\frac12-\frac12 y^2) dy $ and $ h_{14} := \sqrt{K\tau} \int_0^\frac{1}{\sqrt{K\tau}} e^{-y^2/2}(-\frac{1}{8}+\frac{1}{4}y^2-\frac{1}{24}y^4) dy $.
Further Taylor-expanding $ h_{12} $ and $ h_{14} $ in $ \frac{1}{\sqrt{K\tau}} $ yields $ h_{12} \geq \frac12 - \frac{C}{K\tau} $ and $ h_{14} \geq -\frac{1}{8} - \frac{C}{K\tau} $, and therefore \begin{align}\label{eq:h1bd}
h_1
\geq
\tfrac12 a^2 - \tfrac{1}{8} a^4 - C a^2 ( \tfrac{1}{K\tau} + a^4)
\geq
\tfrac12 a^2 - \tfrac{1}{8} a^4 - C a^2 ( K^{2\alpha-1} + a^4),
\text{ for }
a= K^{-\beta}. \end{align}
Turning to estimating $ h_2 $, we first observe that the function $ f_2(s,z,z') $ as in \eqref{eq:h2} increases in $ z $, $ \forall z \leq z' $, as is readily verified by taking derivative as follows: \begin{align*}
&
\sqrt{2\pi(\tau-s)^3} \partial_z f_2(s,z,z') \\
=& z' (e^{-(z-z')^2/2}-e^{-(z+z')^2/2}) - z (e^{-(z-z')^2/2}+e^{-(z+z')^2/2}) + 2z e^{-z^2/2} \\
\geq&
z (e^{-(z-z')^2/2}-e^{-(z+z')^2/2}) - z (e^{-(z-z')^2/2}+e^{-(z+z')^2/2}) + 2z e^{-z^2/2} \geq 0. \end{align*} Now, since $ t\mapsto z^*(t) $ is increasing for all $ t \geq K^{-2\alpha} $, to obtain an upper bound on $ h_2 $ we replace $ Z_K(s) $ with $ z^*(\tau) $ for $ s \geq K^{-2\alpha} $.
Further, with $ \int_0^{K^{-2\alpha}} p^\text{N}(\tau-s,z,z') ds \leq C K^{-\alpha} $, we obtain $ h_2 \leq C K^{-\alpha} + \sqrt{ \frac{\pi \tau}{2} } \int_0^{\tau} f_2(s,z^*(\tau),z^*(\tau)) ds $.
With $ z^*(\tau) = K^{-\beta} \sqrt{\tau} $, the last integral is evaluated explicitly by using \eqref{eq:pId}, yielding \begin{align*}
h_{2} &\leq
\tau h_2(K^{-\beta}) + CK^{-\alpha},
\quad
\text{ where }
h_2(a)
= 1 + e^{-2a^2}-2e^{-a^2/2}
+ 2 a \int_{a}^{2a} e^{-y^2/2} dy. \end{align*} Taylor-expanding $ h_2(a) $ to the fifth order, we further obtain \begin{align}\label{eq:h2bd}
h_{2} \leq \tau (a^2 - \tfrac{7}{12} a^4 + Ca^6) + C K^{-\alpha},
\quad
\text{ for } a= K^{-\beta}. \end{align}
Now, combining \eqref{eq:lrc}--\eqref{eq:h2bd}, we arrive at \begin{align}
\label{eq:t*bd}
\tau \geq
\frac{
\frac12 - \tfrac{1}{8} a^2 - C ( K^{2\alpha-1} + a^4
+a^{-2}K^{-\alpha} + a^{-2}K^{\frac{2\alpha-1}{4}+\delta})
}{
1 - \tfrac{7}{12} a^2 + Ca^4
},
\
\text{ for } a=K^{-\beta}. \end{align} With $ \alpha $ and $ \delta $ chosen as in the preceding, it is now straightforward to check that, for $ a=K^{-\beta} $, \begin{align*}
&\frac{
\frac12 - \tfrac{1}{8} a^2 - C ( K^{2\alpha-1} + a^4
+a^{-2}K^{-\alpha} + a^{-2}K^{\frac{2\alpha-1}{4}+\delta})
}{
1 - \tfrac{7}{12} a^2 + Ca^4
} \\
=&
\frac{ \frac12 - \tfrac{1}{8} a^2 }{ 1 - \tfrac{7}{12} a^2 } + (\text{ higher order terms }) \\
=&
\tfrac12 + \tfrac{1}{6} K^{-2\gamma} + (\text{ higher order terms }). \end{align*} From this the we conclude the desired result: $ \tau>\frac12 + \frac{1}{7} K^{-2\gamma} $, with probability $ \geq 1-CK^{-n} $.
\subsection{Proof of Proposition~\ref{prop:Zhy}\ref{enu:Zhy:mvbdy}} \label{sect:mvbdy}
To simplify notations, we let $ \sigma_K := \frac12+\frac17 K^{-2\gamma} $. Define the scaled distribution function of surviving $ X $-particles as \begin{align}
\label{eq:U}
U_K(t,x) := \tfrac{1}{\sqrt{K}} \# \{ 0<X^K_i(t) \leq x \}
=
\langle \mu^K_t, \ind_{(0,x]} \rangle, \end{align} and, to simplify notations, we let \begin{align}
\label{eq:Uss}
U_{\star\star}(x) := \widetilde{U}_{\star}(\tfrac12,0) - \widetilde{U}_{\star}(\tfrac12,x) = \int_0^x u_1(\tfrac12,y) dy, \end{align} where $ u_1(t,y) $ is defined in \eqref{eq:u1}.
Recall that $ \gamma<\gamma_1\in(0,\frac{1}{96}) $ are fixed. Fix furthering $ \gamma_3<\gamma_2 \in (\gamma,\gamma_1) $, we begin with an estimate on $ U_K(t,x) $:
\begin{lemma}\label{lem:Ubd} There exists $ C<\infty $ such that \begin{align}
\label{eq:Ubd12}
\operatorname{\mathbf{P}}\Big(
|U_K(\tfrac12,x)-U_{\star\star}(x)| \leq CK^{-4\gamma_2},
\
\forall x\in \mathbb{R}
\Big)
\geq 1 - CK^{-n}, \\
\label{eq:Ubd}
\operatorname{\mathbf{P}}\Big(
|U_K(\sigma_K,x)-U_{\star\star}(x)| \leq K^{-\gamma},
\
\forall x\in \mathbb{R}
\Big)
\geq 1 - CK^{-n}. \end{align} \end{lemma}
\begin{proof} With $ U_K(t,x)=\widetilde{U}_K(t,0)-\widetilde{U}_K(t,x) $ and $ U_{\star\star}(x) $ defined in \eqref{eq:Uss}, we have that $
|U_K(t,x)-U_{\star\star}(x)| \leq 2 \sup_{y\in \mathbb{R} } |\widetilde{U}_K(t,y)-\widetilde{U}_{\star}(\frac12,y)|. $ To bound the r.h.s., we take the difference of the integral identities \eqref{eq:int:abs:} and \eqref{eq:Ucs} to obtain \begin{align}
\notag
|\widetilde{U}_K(t,&x)-\widetilde{U}_{\star}(\tfrac12,x)| \\
\label{eq:Uc12:esti:1}
\leq&
|\widetilde{G}_K(t,x)-2p(t,x)| + |2p(t,x)-2p(\tfrac12,x)| \\
\label{eq:Uc12:esti:2}
&
+\int_0^{t} |p^\text{N}(t-s,Z_K(s),x) - p^\text{N}(t-s,z_{\star}(s),x) | ds
+
\int_\frac12^t p^\text{N}(t-s,z_{\star}(s),x) ds \\
\label{eq:Uc12:esti:3}
&
+ |R_K(t,x)|. \end{align} We next bound the terms in \eqref{eq:Uc12:esti:1}--\eqref{eq:Uc12:esti:3} in sequel: \begin{itemize} \item[-] Using \eqref{eq:GK:est} for $ \alpha= 2\gamma_2 $ yields
$ |\widetilde{G}_K(t,x)-2p(t,x)| \leq CK^{-4\gamma_2} $; \item[-] Using \eqref{eq:p:Holdt} for $ \alpha=1 $, gives
$ |2p(t,x)-2p(\tfrac12,x)| \leq C|\tfrac12-t|, $
$ \forall t \geq \frac12 $; \item[-] Using \eqref{eq:pN:est} for $ \alpha'=\frac{\gamma_2}{\gamma_1} $
and \eqref{eq:Zhydro:abs:} for $ \beta=4\gamma_1 $, we have\\
$
\int_0^{t} |p^\text{N}(t-s,Z_K(s),x) - p^\text{N}(t-s,z_{\star}(s),x) | ds
\leq
C \sup_{s\leq t} |Z_K(s)-z_{\star}(s)|^{\alpha'}
\leq
C K^{-4\gamma_2},
$\\
with probability $ \geq 1-CK^{-n} $; \item[-] Using $ p^\text{N}(t-s,z_{\star}(s),s) \leq \frac{2}{\sqrt{2\pi(t-s)}} $, we obtain
$ \int_\frac12^t p^\text{N}(t-s,z_{\star}(s),x) ds \leq \sqrt{\frac{2}{\pi}|t-\frac12|} $.
\item[-] Using \eqref{eq:rd:bd}, we have
$ |R_K(t,x)| \leq CK^{-4\gamma_2} $, $ \forall t\in[\frac12,\sigma_K] $, $ x\in \mathbb{R} $,
with probability $ \geq 1-CK^{-n} $. \end{itemize} \noindent Combining these bounds yields \begin{align}
\label{eq:UcUcs}
|\widetilde{U}_K(t,x)-\widetilde{U}_{\star}(\tfrac12,x)|
\leq
C K^{-4\gamma_2}+C|t-\tfrac12| + \sqrt{\tfrac{2}{\pi}|t-\tfrac12|},
\quad
\forall t\in [\tfrac12,\sigma_K], \end{align} with probability $ \geq 1-CK^{-n} $.
Substituting in $ t=\frac12 $ in \eqref{eq:UcUcs} yields \eqref{eq:Ubd12}.
Similarly, substituting in $ t=\sigma_K $ in \eqref{eq:UcUcs}, with $ |\sigma_K-\tfrac12| = \frac{K^{-2\gamma}}7 $, we have, with probability $ \geq 1-CK^{-n} $, \begin{align*}
|\widetilde{U}_K(\sigma_K,x)-\widetilde{U}_{\star}(\tfrac12,x)|
\leq
C K^{-4\gamma_2}+CK^{-2\gamma} + \sqrt{\tfrac{2}{7\pi}} K^{-\gamma}
<
K^{-\gamma}, \end{align*} for all $ K $ large enough. This concludes \eqref{eq:Ubd}. \end{proof}
Recall the definition of Atlas models from the beginning of Section~\ref{sect:Int}. Our strategy of proving Proposition~\ref{prop:Zhy}\ref{enu:Zhy:mvbdy} is to \emph{reduce} the problem of the particle system $ (X(s);s\geq \sigma_K) $ to a problem of certain Atlas models $ ( \overline{Y} (t):t\geq 0) $ and $ ( \underline{Y} (t):t\geq 0) $, constructed as follows.
To construct such Atlas models, recalling the expression of $ u_1(\frac12,x) $ from \eqref{eq:u1}, we define \begin{align}
\label{eq:uup}
\overline{u} (x) &:=
\left\{\begin{array}{l@{,}l}
u_1(\tfrac12, x) & \text{ when } x\geq K^{-\gamma},
\\
0 & \text{ when } x < K^{-\gamma},
\end{array}\right. \\
\label{eq:ulw}
\underline{u} (x) &:=
\left\{\begin{array}{l@{,}l}
u_1(\tfrac12, x) & \text{ when } x>0,
\\
u_1(\tfrac12,0) = 2 & \text{ when } -K^{-4\gamma_3} \leq x \leq 0,
\\
0 & \text{ when } x < -K^{-4\gamma_3}.
\end{array}\right. \end{align} Adopting the notation $ \PPP(f(x)) $ for a Poisson point process on $ \mathbb{R} $ with density $ f(x) $, for each $ K<\infty $ we let $ ( \overline{Y} (t;K):t\geq 0) $ and $ ( \underline{Y} (t;K):t\geq 0) $ be Atlas models starting from the following initial conditions \begin{align}
\label{eq:Yic:}
( \overline{Y} _i(0;K))_{i} \sim \PPP\big( \overline{u} (\tfrac{x}{\sqrt{K}}) \big),
\quad
( \underline{Y} _i(0;K))_{i} \sim \PPP\big( \underline{u} (\tfrac{x}{\sqrt{K}}) \big), \end{align} and let $ \overline{W} (t;K) :=\min_{i} \overline{Y} _i(t;K) $ and $ \underline{W} (t;K) := \min_{i} \underline{Y} _i(t;K) $ denote the corresponding laggards.
\begin{remark} \label{rmk:Y:Kdep} The notations $ \overline{Y} _i(t;K) $, etc., are intended to highlight the dependence on $ K $ of the processes, as is manifest from \eqref{eq:Yic:}.
To simplify notations, however, hereafter we omit the dependence, and write $ \overline{Y} _i(t;K)= \overline{Y} _i(t) $, etc., unless otherwise noted. \end{remark}
\noindent We let $ \overline{Y}^{K} _i(t) := \frac{1}{\sqrt{K}} \overline{Y} _i(Kt) $, denote the scaled process, and similarly for $ \underline{Y}^{K} _i(t)$, $ \overline{W} _K(t) $ and $ \underline{W} _K(t) $.
Under these notations, equation~\eqref{eq:Yic:} translates into \begin{align}
\label{eq:Yic}
( \overline{Y}^{K} _i(0))_{i} \sim \PPP\big( \overline{u} (x) \big),
\quad
( \underline{Y}^{K} _i(0))_{i} \sim \PPP\big( \underline{u} (x) \big). \end{align} We let $ \overline{V} _K(t,x) := \frac{1}{\sqrt{K}} \#\{ \overline{Y}^{K} _i(t) \leq x \} $ and $ \underline{V} _K(t,x) := \frac{1}{\sqrt{K}} \#\{ \underline{Y}^{K} _i(t) \leq x \} $ denote the corresponding scaled distribution functions.
Having introduced the Atlas models $ \overline{Y} $ and $ \underline{Y} $, we next establish couplings that \emph{relate} these models to the relevant particle system $ X $.
Recall the definition of the extinction time $ \tau^K_\text{ext} $ from \eqref{eq:extt}. We let \begin{align}
\label{eq:abs>12}
\tau^K_\text{abs} := \inf\{ t>\sigma_K : Z_K(t)=0 \} \end{align} denote the first absorption time (scaled by $ K^{-1} $) after $ \sigma_K $.
\begin{lemma} \label{lem:couple} There exists a coupling of $ (X^K(s+\frac12);s\geq 0) $ and $ ( \underline{Y} ^K(s);s\geq 0) $ under which \begin{align}
\label{eq:cplLw}
\operatorname{\mathbf{P}} \big(
\underline{W} _K(s) \leq Z_K(\tfrac{1}{2} + s),
\
\forall s+\tfrac12 < \tau^K_\text{ext}
\big)
\geq
1 - CK^{-n}. \end{align}
Similarly, there exists a coupling of $ (X^K(s+\sigma_K);s\geq 0) $ and $ ( \overline{Y} ^K(s);s\geq 0) $ under which \begin{equation}
\label{eq:cplUp}
\operatorname{\mathbf{P}} \big(
\overline{W} _K(s) \geq Z_K(s+\sigma_K),
\
\forall s+\sigma_K < \tau^K_\text{ext} \wedge \tau^K_\text{abs}
\big)
\geq
1 - CK^{-n}. \end{equation} \end{lemma}
\noindent The proof requires a coupling result from~\cite{sarantsev15}: \begin{lemma}[{\cite[Corollary~$3.9$]{sarantsev15}}] \label{lem:sar:cmp} Let $ (Y_i(s);s\geq 0)_{i=1}^{m} $ and $ (Y'_i(s);s\geq 0)_{i=1}^{m'} $ be Atlas models, and let $ W(s) $ and $ W'(s) $ denote the corresponding laggards. If $ Y'(0) $ dominates $ Y(0) $ componentwisely in the sense that \begin{align}
\label{eq:domin}
m' \leq m,
\quad
Y'_{i}(0) \geq Y_i (0),
\
i=1,\ldots,m', \end{align} then there exists a coupling of $ Y $ and $ Y' $ (for $ s>0 $) such that the dominance continues to hold for $ s>0 $, i.e.\ $ Y'_{i}(s) \geq Y_i (s) $, $ i=1,\ldots,m' $. In particular, $ W'(s) \geq W(s) $. \end{lemma}
\begin{proof}[Proof of Lemma~\ref{lem:couple}] As will be more convenient for the notations for this proof, we work with \emph{unscaled} processes $ X(s+\frac12K) $, $ X(s+\sigma_KK) $ and $ Y(s) $, and construct the coupling accordingly.
We consider first $ \underline{Y} $ and prove \eqref{eq:cplLw}.
At $ s=0 $, order the particles as $ ( \underline{W} (0)= \underline{Y} _{1}(0) \leq \underline{Y} _{2}(0) \leq \cdots) $, and $ (Z(\frac12K)= X_1(\frac{1}{2}K) \leq X_2(\frac{1}{2}K) \leq \cdots) $.
We claim that, regardless of the coupling, the following holds with probability $ 1-CK^{-n} $: \begin{align}
\label{eq:desired}
\#\{ \underline{Y} _{i}(0)\} \geq \#\{X_i(\tfrac{1}{2}K)\},
\quad
\text{and }
\underline{Y} _{i}(0) \leq X_i (\tfrac{1}{2}K),
\
\forall 1\leq i \leq \#\{ X_j(\tfrac{1}{2}K) \}. \end{align}
Recalling from \eqref{eq:U} that $ U_K(t,x) $
denotes the \emph{scaled} distribution function of $ X(t) $, with $ U_K(t,x)|_{x<0}=0 $, we see that \eqref{eq:desired} is equivalent to the following \begin{align} \label{eq:gaol}
\operatorname{\mathbf{P}}\big(
\underline{V} _K(0,x) \geq U_K(\tfrac{1}{2},x),
\
\forall x\in \mathbb{R} _+
\big)
\geq 1-C K^{-n}. \end{align}
To see why \eqref{eq:gaol} holds, with $ ( \underline{Y}^{K} _i(0))_i $ distributed in \eqref{eq:Yic}, we note that $ x \mapsto \sqrt{K} \underline{V} _K(0,x) $, $ x \in [-K^{-4\gamma_3},\infty) $ is an inhomogeneous Poisson process with density $ \sqrt{K} \underline{u} (x) $. From this, it is standard (using Doob's maximal inequality and the \ac{BDG} inequality) to show that \begin{align}
\label{eq:Vlw:bd:}
\Big\Vert \sup_{x\in \mathbb{R} }
\Big| \underline{V} _K(0,x) - \int_{-K^{-4\gamma_3}}^{x} \underline{u} (y) dy \Big|
\ \Big\Vert_m
\leq
C(m) K^{-\frac14},
\quad
\forall m \geq 2. \end{align} Further, with $ \underline{u} $ defined in \eqref{eq:ulw}, we have \begin{align*}
\int_{-K^{-4\gamma_3}}^{x} \underline{u} (y) dy
=
U_{\star\star}(x) + 2K^{-4\gamma_3},
\quad
\forall x \geq 0. \end{align*} Inserting this into \eqref{eq:Vlw:bd:}, followed by using Markov's inequality
$ \operatorname{\mathbf{P}}( |\xi| > K^{-\frac18} ) \leq K^{-\frac{m}{8}}\Ex(|\xi|^m) $ for $ m=8n $, we arrive at \begin{align}
\label{eq:Vlw:bd}
\operatorname{\mathbf{P}} \Big(
| \underline{V} _K(0,x) - U_{\star\star}(x) - 2K^{-4\gamma_3} |
\leq
K^{-\frac18},
\
\forall x \in \mathbb{R} _+
\Big)
\geq 1-CK^{-n}. \end{align} Combining \eqref{eq:Vlw:bd} and \eqref{eq:Ubd12} yields \begin{align}
\label{eq:VlwU}
\underline{V} _K(0,x) - U_K(\tfrac12,x)
\geq
-K^{-\frac18}- CK^{-4\gamma_2}+2K^{-4\gamma_3},
\quad
\forall x \in \mathbb{R} _+, \end{align} with probability $ \geq 1-CK^{-n} $. With $ \gamma_3<\gamma_2<\frac1{96} $, the r.h.s.\ of \eqref{eq:VlwU} is positive for all $ K $ large enough, so \eqref{eq:gaol} holds.
Assuming the event~\eqref{eq:desired} holds, we proceed to construct the coupling for $ s>0 $.
Let $\tau_1:=\inf\{ t \geq \frac12K: Z(t)=0 \} $ be the first absorption time after time $ \frac12K $. For $ s \in [0,\tau_1-\frac12K) $, both processes $ \underline{Y} (s) $ and $ X(s+\frac12K) $ evolve as Atlas models. Hence, by Lemma~\ref{lem:sar:cmp} for $ (Y(s),Y'(s))=( \underline{Y} (s),X(s+\frac12K)) $, we have a coupling such that \begin{equation*}
\underline{Y} _{i}(s) \leq X_{i} (s+\tfrac12K)
\quad
\forall
1 \leq i \leq \#\{X_j(\tfrac{1}{2}K)\},
\
s \in [0,\tau_1-\tfrac12K). \end{equation*} At time $ t=\tau_1 $, the system $ X $ \emph{loses} a particle, so by reorder $ ( \underline{Y} _i(\tau_1-\frac12K))_i $ and $ (X_i(\tau_1))_i $, we retain the type of dominance as in \eqref{eq:desired}. Based on this we iterate the prescribed procedure to the second absorption $ \tau_2 := \inf\{ s>\tau_1: Z(s) =0 \} $. As absorption occurs at most $ K $ times, the iteration procedure yields the desired coupling until the extinction time $ \tau_\text{ext} $. We have thus constructed a coupling of $ ( \underline{Y} (s);s\geq 0) $ and $ (X(s+\frac12K):s\geq 0) $ under which \eqref{eq:cplLw} holds.
We now turn to $ \overline{Y} $ and construct the analogous coupling of $ ( \underline{Y} (s);s\geq 0) $ and $ (X(s+\sigma_KK):s\geq 0) $. Similarly to \eqref{eq:Vlw:bd:}, for $ \overline{V} _K(0,x) $ we have that \begin{align}
\label{eq:Vlw:::}
\operatorname{\mathbf{P}}\Big(
\Big| \overline{V} _K(0,x) - \int_{K^{-\gamma}}^x u_1(\tfrac12,y) dy \Big|
\leq
K^{-\frac18},
\
\forall x \geq K^{-\gamma}
\Big)
\geq 1-CK^{-n}. \end{align} As seen from the expression~\eqref{eq:u1}, $ u_1(\frac12,0)=2 $ and $ x\mapsto u_1(\frac12,x) $ is smooth with bounded derivatives, so in particular \begin{align*}
\Big|
\int_{K^{-\gamma}}^x u_1(\tfrac12,y)dy
- (U_{\star\star}(x)-2K^{-\gamma})
\Big|
\leq
C K^{-2\gamma},
\quad
\forall x \geq K^{-\gamma}. \end{align*} Inserting this estimate into \eqref{eq:Vlw:::}, and combining the result with \eqref{eq:Ubd}, we obtain that, with probability $ \geq 1-CK^{-n} $, \begin{align*}
\overline{V} _K(0,x)
\leq
U_K(\sigma_K,x) - 2K^{-\gamma} + K^{-\gamma} + CK^{-2\gamma}
\leq
U_K(\sigma_K,x),
\quad
\forall x \geq K^{-\gamma}, \end{align*}
for all $ K $ large enough. This together with $ \overline{V} _K(0,x)|_{x<K^{-\gamma}}=0 $ yields the following dominance condition: \begin{align}
\label{eq:desired:}
\#\{ \overline{Y} _{i}(0)\} \leq \#\{X_i(\sigma_KK)\},
\quad
\text{and }
\overline{Y} _{i}(0) \geq X_i (\sigma_KK),
\
\forall 1\leq i \leq \#\{ \overline{Y} _j(0) \}, \end{align} with probability $ \geq 1 - CK^{-n} $. Based on this, we construct the coupling for $ \overline{Y} $ and $ X $ similarly to the proceeding.
Unlike in the proceeding, however, when an absorption occurs, dominance properties of the type~\eqref{eq:desired:} may be destroyed. Hence here we obtain the coupling with the desired property only up to the first absorption time, as in \eqref{eq:cplUp}. \end{proof}
We see from Lemma~\ref{lem:couple} that $ \overline{W} _K $ and $ \underline{W} _K $ serve as suitable upper and lower bounds for $ Z_K $.
With this, we now turn our attention to the Atlas models $ \overline{Y} $ and $ \underline{Y} $, and aim at establishing the hydrodynamic limits of $ \overline{W} _K $ and $ \underline{W} _K $.
To this end, recalling from \eqref{eq:seminorm} the definition of $ |\Cdot|'_{[0,T]} $ and that $ T<\infty $ is fixed, we begin by establishing the follow estimates on
$ | \overline{W} _K|'_{[0,T]} $ and $ | \underline{W} _K|'_{[0,T]} $.
\begin{lemma}\label{lem:Znondecr} There exists $ C<\infty $ such that \begin{align}
\label{eq:Wup:nond}
\operatorname{\mathbf{P}}( | \overline{W} _K|'_{[0,T]} \leq CK^{-\frac18} ) &\geq 1 -CK^{-n}, \\
\label{eq:Wlw:nond}
\operatorname{\mathbf{P}}( | \underline{W} _K|'_{[0,T]} \leq CK^{-\frac18} ) &\geq 1 -CK^{-n}, \end{align} \end{lemma}
\begin{proof} The proof of \eqref{eq:Wup:nond}--\eqref{eq:Wlw:nond} are similar, and we work out only the former here.
At any given time $ s\in \mathbb{R} _+ $, let us \emph{order} the $ \overline{Y} $-particles as $ W(s) = \overline{Y} _1(s) \leq \overline{Y} _2(s) \leq \ldots \leq \overline{Y} _{N}(s) $, where $ N:= \#\{ \overline{Y} _j(0)\} $, and let $ \overline{G} _i(s) := \overline{Y} _{i+1}(s)- \overline{Y} _{i}(s) $ denote the corresponding gap process.
We adopt the convention that $ \overline{G} _i(s) := \infty $ if $ i+1 > N $, so that $ \overline{G} (s) := ( \overline{G} _i(s))_{i=1}^\infty $ is $ [0,\infty]^\infty $-valued.
We begin with a stochastic comparison of the gap process $ \overline{G} (s) $. More precisely, given any $ [0,\infty]^\infty $-valued random vectors $ \xi $ and $ \zeta $, we say $ \xi $ stochastically dominate $ \zeta $, denoted $ \xi \succeq \zeta $, if there exists a coupling of $ \xi $ and $ \zeta $ under which $ \xi_i \geq \zeta_i $, $ i=1,2,\ldots $.
Since $ ( \overline{Y} _i(0)) $ is distributed as in \eqref{eq:Yic}, with $ \overline{u} (x) \leq 2 $, $ \forall x\in \mathbb{R} $, we have that \begin{align}
\label{eq:G0>G}
\overline{G} (0) \succeq \bigotimes_{i=1}^\infty \Exp(2). \end{align} By \cite[Theorem~4.7]{sarantsev14}, for any Atlas model satisfying the dominance property~\eqref{eq:G0>G}, the dominance will continue to hold for $ s>0 $, i.e., $ \overline{G} (s) \succeq \bigotimes_{i=1}^\infty \Exp(2) $.
(Theorem~4.7 of \cite{sarantsev14} does not state $ \overline{G} (s) \succeq \bigotimes_{i=1}^\infty \Exp(2) $ explicitly, but the statement appears in the first line of the proof, wherein $ \pi = \bigotimes_{i=1}^\infty \Exp(2) $, c.f., \cite[Example~1]{sarantsev14} and \cite{pal08}.)
Having established the stochastic comparison of $ \overline{G} (s) $, we now return to estimating $ | \overline{W} _K|'_{[0,T]} $.
The seminorm $ |\Cdot|'_{[0,T]} $, defined in \eqref{eq:seminorm}, measures how nondecreasing the given function is. To the end of bounding $ | \overline{W} _K|'_{[0,T]} $, we fix $ s_*\in[0,T] $, and begin by bounding the quantity \begin{align}
\label{eq:Wlw:nond:}
\sup_{s\in[s_*,T]} ( \overline{W} _K(s_*) - \overline{W} _K(s)). \end{align} We consider an \textbf{infinite Atlas model} $ (Y^*_i(s);t\geq 0)_{i=1}^\infty $, which is defined analogously to \eqref{eq:alt} via the following stochastic differential equations \begin{align}
\label{eq:infAtlas}
dY^*_i(s)= \ind_\Set{ Y^*_i(s)=W^*(s)} dt + dB_i(s), \quad i=1,2,\ldots,
\quad
W^*(s) := \min\nolimits_{i=1}^\infty\{ Y^*_i(s) \}, \end{align} with the following initial condition \begin{align}
\label{eq:infAtl:ic}
Y^*_1(0) := \underline{Y} _1(Ks_*),
\text{ and, independently }
(Y^*_{i+1}(0) - Y^*_{i}(0))_{i=1}^\infty \sim \bigotimes_{i=1}^\infty \Exp(2). \end{align} General well-posedness conditions for \eqref{eq:infAtlas} are studied in \cite{ichiba13,shkolnikov11}. In particular, the distribution~\eqref{eq:infAtl:ic} is an admissible initial condition, and is in fact a stationary distribution of the gaps \cite{pal08}.
Under such stationary gap distribution, the laggard $ W^*(s) $ remains very close to a constant under diffusive scaling. More precisely, letting $ W^*_K(s) := \frac{1}{\sqrt{K}} W^*(sK) $, by \cite[Proposition~$2.3$, Remark~$2.4$]{dembo15}, we have \begin{align}
\label{eq:DT}
\operatorname{\mathbf{P}} \Big( \sup_{ s\in[s_*, T] }|W^*_K(s)-W^*_K(0)| \leq K^{-\eta} \Big)
\geq 1-C(\eta)K^{-n-2}, \end{align} for any fixed $ \eta\in(0,\frac14) $.
In view of the bound \eqref{eq:DT}, the idea of bounding the quantity~\eqref{eq:Wlw:nond:} is to couple $ (Y^*(s);s\geq 0) $ and $ ( \overline{Y} (s+s_*);s\geq 0) $.
As we showed previously $ G(Ks_*) \succeq \bigotimes_{i=1}^\infty \Exp(2) $. With $ (Y^*_i(0))_i $ distributed in \eqref{eq:infAtl:ic}, we couple $ ( \overline{Y} _i(s_*))_i $ and $ (Y^*_i(0))_i $ in such a way that \begin{align}
\label{eq:domin:}
Y^*_i(0) \leq \overline{Y} _i(Ks_*),
\quad
i=1,2,\ldots, \#\{ \overline{Y} _i(Ks_*) \}. \end{align} Equation~\eqref{eq:domin:} gives a generalization of the dominance condition~\eqref{eq:domin} to the case where $ m=\infty $.
For such a generalization we have the analogous coupling result from \cite[Corollary~$3.9$, Remark~$9$]{sarantsev15}, which gives a coupling of $ (Y^*(s);s\geq 0) $ and $ ( \overline{Y} (s+s_*K);s\geq 0) $ such that \begin{align}
\label{eq:Wcmp}
W^*(s) \leq \overline{W} (s+s_*K), \ \forall s \in \mathbb{R} _+. \end{align}
Combining \eqref{eq:DT} for $ \eta=\frac18 $ and \eqref{eq:Wcmp}, together with $ W^*(0)= \overline{W} (s_*K) $ (by \eqref{eq:infAtl:ic}), we obtain \begin{align}
\label{eq:nondecr:Z}
\operatorname{\mathbf{P}} \Big(
\sup_{s\in [s_*,T] }
( \overline{W} _K(s_*) - \overline{W} _K(s)) \leq K^{-\frac18 }
\Big)
\geq 1-CK^{-n-2}. \end{align}
Having established the bound \eqref{eq:nondecr:Z} for fixed $ s_*\in[0,T] $, we now take the union bound of \eqref{eq:nondecr:Z} over $ s_*=s_\ell:=K^{-2}T\ell $, $ 1 \leq \ell \leq K^{2} $, to obtain \begin{equation} \label{eq:discreteZK}
\operatorname{\mathbf{P}} \Big(
\sup_{s\in [s',T] }
( \overline{W} _K(s') - \overline{W} _K(s)) \leq K^{-\frac18 },
\
\forall s'=s_1,s_2,\ldots
\Big)
\geq
1- C K^{-n}. \end{equation} To pass from the `discrete time' $ s'=s_1,s_2,\ldots $ to $ s'\in[0,T] $, adopting the same procedure we used for obtaining \eqref{eq:driftBMest}, we obtain the following continuity estimate: \begin{align} \label{eq:gapZK}
\operatorname{\mathbf{P}} \Big(
\sup_{s \in [s_\ell,s_{\ell+1}]} | \overline{W} _K(s)- \overline{W} _K(s_\ell)|
\leq
K^{-\frac18},
\
1 \leq \ell \leq K^{2}
\Big)
\geq
1 -C K^{-n}. \end{align} Combining \eqref{eq:discreteZK}--\eqref{eq:gapZK} yields \begin{align*}
\operatorname{\mathbf{P}} \Big(
\sup_{s'<s\in[0,T]} ( \overline{W} _K(s')- \overline{W} _K(s))
\leq
2K^{-\frac18}
\Big)
\geq
1 -C K^{-n}. \end{align*} This concludes the desired result \eqref{eq:Wup:nond}. \end{proof}
We next establish upper bonds on $ | \overline{W} _K| $ and $ | \underline{W} _K| $.
\begin{lemma}\label{lem:Zupbd} There exists $ C<\infty $ and a constant $ L=L(T)<\infty $ such that \begin{align}
\label{eq:Wup:bd}
\operatorname{\mathbf{P}}( | \overline{W} _K(t)| \leq L, \ \forall t\leq T ) \geq 1 -CK^{-n}, \\
\label{eq:Wlw:bd}
\operatorname{\mathbf{P}}( | \underline{W} _K(t)| \leq L, \ \forall t\leq T ) \geq 1 -CK^{-n}. \end{align} \end{lemma}
\begin{proof} We first establish \eqref{eq:Wup:bd}.
The first step is to derive an integral equation for $ \overline{W} _K $. Recalling that $ \overline{V} _K(t,x) $ denote the scaled distribution function of $ \overline{Y} $, we apply Lemma~\ref{lem:int}\ref{enu:intY} for $ Y= \overline{Y} $ to obtain the following integral identity \begin{align}
\label{eq:Yup:int}
\overline{V} _K(t,x)
=
\int_0^\infty p(t,x-y) \overline{V} _K(0,y) dy
- \int_{0}^{t} p(t-s,x-W_K(s)) ds
+ R'_K(t,x). \end{align} Note that the conditions~\eqref{eq:D*}--\eqref{eq:Dan} hold for $ \overline{Y} (0) $, which is distributed as in \eqref{eq:Yic}.
Using the approximating \eqref{eq:Vlw:bd}, we have \begin{align}
\label{eq:Vupint}
\Big| \int_0^\infty p(t,x-y) \overline{V} _K(0,y) dy - \int_0^t p(t,x-y) U_{\star\star}(y) dy \Big|
\leq
C K^{-4\gamma_2}, \end{align} with probability $ \geq 1 - CK^{-n} $. Using \eqref{eq:Vupint} and \eqref{eq:rd:bd} in \eqref{eq:Yup:int}, we rewrite the integral identity as \begin{align}
\label{eq:Yup:int:}
\overline{V} _K(t,x)
=
\int_0^\infty p(t,x-y) U_{\star\star}(y) dy
- \int_{0}^{t} p(t-s,x-W_K(s)) ds
+ \overline{F}'_K(t,x), \end{align} for some $ \overline{F}'_K(t,x) $ such that \begin{align}
\label{eq:Yup:Fk'}
\operatorname{\mathbf{P}}\Big( |\overline{F}'_K|_{L^\infty([0,T]\times \mathbb{R} )} \leq C K^{-4\gamma_2} \Big)
\geq 1-CK^{-n}. \end{align} Further, with $ ( \overline{Y} ^K_i(0)) $ distributed as in \eqref{eq:Yic}, it is standard to verify that \begin{align}
\label{eq:Yup:Wuploc}
\operatorname{\mathbf{P}}\Big( | \overline{W} _K(0)| \leq CK^{-4\gamma_3} \Big)
\geq 1-CK^{-n}. \end{align} By definition,
$
\overline{V} _K(t, \overline{W} _K(t))
= \frac{1}{\sqrt{K}} \#\big\{ \overline{Y}^{K} _i(t)\in(-\infty, \overline{W} _K(t)] \big\}
= \frac1{\sqrt{K}}, $
so setting $ x= \overline{W} _K(t) $ in \eqref{eq:Yup:int:} we obtain the follow integral equations \begin{align}
\label{eq:Wup:intEq}
\int_0^\infty p(t, \overline{W} _{K}(t)-y) U_{\star\star}(y) dy
=
\int_{0}^{t} p(t-s, \overline{W} _{K}(t)- \overline{W} _{K}(s)) ds
+ \overline{F}_{K}(t, \overline{W} _{K}(t)), \end{align} where $ \overline{F}_K(t,x) := \frac{1}{\sqrt{K}}-\overline{F}'_K(t,x) $, which, by \eqref{eq:Yup:Fk'}, satisfies \begin{align}
\label{eq:Yup:Fk}
\operatorname{\mathbf{P}}\Big( |\overline{F}_K|_{L^\infty([0,T]\times \mathbb{R} )} \leq C K^{-4\gamma_3} \Big)
\geq 1-CK^{-n}. \end{align}
Having derive the integral equation~\eqref{eq:Wup:intEq} for $ \overline{W} _K $, we proceed to showing \eqref{eq:Wup:bd} based on \eqref{eq:Wup:intEq}. To this, we define $ w^*(t) := \overline{W} _K(0) + at $, for some $ a\in \mathbb{R} _+ $ to be specified later, and consider the first hitting time $ \tau := \inf \{ t: \overline{W} _K(t) \geq w^*(t) \} $.
As $ w \mapsto \int_0^\infty p(\tau,w-y) U_{\star\star}(y) dy $ is nondecreasing, by \eqref{eq:Yup:Wuploc} we have \begin{align}
\label{eq:f1t}
\int_0^\infty p(\tau, \overline{W} _K(0)+a\tau-y) U_{\star\star}(y) dy
&\geq
\int_0^\infty p(\tau,1+a\tau-y) U_{\star\star}(y) dy := f_1(\tau), \end{align} with probability $ \geq 1- CK^{-n} $. Using $ \overline{W} _K(\tau)- \overline{W} _K(s) \geq a(\tau-s) $, $ \forall s\leq \tau $, we obtain \begin{align}
\label{eq:f2a}
\int_{0}^{\tau} p(\tau-s, \overline{W} _K(\tau)- \overline{W} _K(s)) ds
&
\leq
\int_0^\infty p(s,as) ds := f_2(a). \end{align} For the functions $ f_1 $ and $ f_2 $, we clearly have $ \inf_{t\leq T} f_1(t) := f_*>0 $ and $ \lim_{a\to\infty} f_2(a) =0 $.
With this, we now fix some large enough $ a $ with $ f_2(a) < \frac12 f_* $, and insert the bounds \eqref{eq:Yup:Fk}--\eqref{eq:f2a} into \eqref{eq:Wup:intEq} to obtain \begin{align*}
\operatorname{\mathbf{P}}\big( \{f_* \leq \tfrac12 f_* + K^{-4\gamma_3} \} \cap \{ \tau \geq T \} \big)
\geq
1-C K^{-n}. \end{align*} Since $ f_* >0 $, the event $ \{f_* \leq \tfrac12 f_* + K^{-4\gamma_3} \} $ is empty for all large enough $ K $, so \begin{align*}
\operatorname{\mathbf{P}}\Big( \overline{W} _K(t) \leq \overline{W} _K(0)+aT , \ \forall t\leq T \Big)
\geq 1 -CK^{-n}. \end{align*} This together with \eqref{eq:Yup:Wuploc} gives the upper bound $ \operatorname{\mathbf{P}}( \overline{W} _K(t)\leq L,\forall t\leq T) \geq 1-CK^{-n} $ for $ L:=1+aT $. A lower bound $ \operatorname{\mathbf{P}}( \overline{W} _K(t)\geq-L,\forall t\leq T) \geq 1-CK^{-n} $ follows directly from \eqref{eq:nondecr:Z} for $ s_*=0 $. From these we conclude the desired result~\eqref{eq:Wup:bd}.
Similarly to \eqref{eq:Wup:intEq}, for $ \underline{V} _K(t,x) $ we have \begin{align}
\label{eq:Wlw:intEq}
\int_0^\infty p(t, \underline{W} _{K}(t)-y) U_{\star\star}(y) dy
=
\int_{0}^{t} p(t-s, \underline{W} _{K}(t)- \underline{W} _{K}(s)) ds
+ \underline{F}_{K}(t, \underline{W} _{K}(t)), \end{align} for some $ \underline{F}_{K}(t,x) $ satisfying \begin{align}
\label{eq:Ylw:Fk}
\operatorname{\mathbf{P}}\Big( |\underline{F}_K|_{L^\infty([0,T]\times \mathbb{R} )} \leq C K^{-\gamma} \Big)
\geq 1-CK^{-n}. \end{align} From this, the same argument in the proceeding gives the desired bound~\eqref{eq:Wlw:bd} for $ L=1+aT $. \end{proof}
We now establish the hydrodynamic limit of $ \overline{W} _K $ and $ \underline{W} _K $.
\begin{lemma} \label{lem:W:hydro} There exists $ z_{\star}(\Cdot+\frac12)\in\mathcal{C}( \mathbb{R} _+) $ that solves \eqref{eq:zeq} (which is unique by Corollary~\ref{cor:unique}). Furthermore, for some $ C<\infty $, we have \begin{align}
\label{eq:Wlw:hydro}
\operatorname{\mathbf{P}}\Big(
| \underline{W} _K(s)-z_{\star}(\tfrac12+s)| \leq CK^{-4\gamma_3}, \ \forall s\in[0,T]
\Big)
&\geq
1 - CK^{-n}, \\
\label{eq:Wup:hydro}
\operatorname{\mathbf{P}}\Big(
| \overline{W} _K(s)-z_{\star}(s+\tfrac12)| \leq CK^{-\gamma}, \ \forall s\in[0,T]
\Big)
&\geq
1 - CK^{-n}. \end{align} \end{lemma}
\begin{proof}
The strategy of the proof is to utilize the fact that $ \overline{W} _K $ and $ \underline{W} _K $ satisfy the integral equations \eqref{eq:Wup:intEq} and \eqref{eq:Wlw:intEq}, respectively, and apply the stability estimate Lemma~\ref{lem:pStef} to show the convergence of $ \overline{W} _K $ and $ \underline{W} _K $.
Given the estimates \eqref{eq:Yup:Fk} and \eqref{eq:Ylw:Fk}, \eqref{eq:Wup:nond}--\eqref{eq:Wlw:nond}, and \eqref{eq:Wup:bd}--\eqref{eq:Wlw:bd}, the proof of \eqref{eq:Wlw:hydro} and \eqref{eq:Wup:hydro} are similar, and we present only the former.
Such a $ z_{\star} $ will be constructed as the unique limit point of $ \overline{W} _K $. We begin by showing the convergence of $ \overline{W} _K $. To this end, we fix $ K_1<K_2 $, and consider the processes $ \overline{W} _{K_1} $ and $ \overline{W} _{K_2} $. Since they satisfy the integral equation~\eqref{eq:Wup:intEq}, together with the estimates \eqref{eq:Yup:Wuploc}, \eqref{eq:Yup:Fk}, \eqref{eq:Wup:nond} and \eqref{eq:Wup:bd}, we apply Lemma~\ref{lem:pStef} for $ (w_1,w_2)=( \overline{W} _{K_1}, \overline{W} _{K_2}) $ to obtain \begin{align}
\label{eq:Wup:cauchy}
\operatorname{\mathbf{P}}( | \overline{W} _{K_1}(s)- \overline{W} _{K_2}(s)| \leq CK_1^{-4\gamma_3}, \forall s\in[0,T] )
\geq
1 - C{K_1}^{-n}. \end{align} We now consider the subsequence $ \{ \overline{W} _{K_m}\}_{m=1}^\infty $, for $ K_m:=2^m $. Setting $ (K_1,K_2)=(2^{m},2^{m+j}) $ in \eqref{eq:Wup:cauchy}, and taking union bound of the result over $ j\in \mathbb{N} $, we obtain \begin{align}
\label{eq:Wup:cauchy:geo}
\operatorname{\mathbf{P}}\Big(
\sup_{t\in[0,T]}| \overline{W} _{K_{m}}(t)- \overline{W} _{K_{m'}}(t)| \leq CK_m^{-4\gamma_3}, \forall m'>m
\Big)
\geq
1 - CK_m^{-n}. \end{align} From this, we conclude that $ \{ \overline{W} _{K_m} \}_m $ is almost surely Cauchy in $ \mathcal{C}([0,T]) $, and hence converges to a possibly random limit $ W\in\mathcal{C}([0,T]) $.
Now, letting $ K\to\infty $ in \eqref{eq:Wup:intEq}, with \eqref{eq:Yup:Fk}, we see that $ W $ must solve \eqref{eq:zeq}. Further, by \eqref{eq:Wup:nond} and \eqref{eq:Yup:Wuploc}, $ t\mapsto W(t) $ is nondecreasing with $ W(0)=0 $.
Since, by Corollary~\ref{cor:unique}, the solution to \eqref{eq:zeq} is unique, $ W(t) =: z_{\star}(t+\frac12) $ must in fact be deterministic.
Now, letting $ m'\to\infty $ in \eqref{eq:Wup:cauchy:geo} yields \begin{align}
\label{eq:Wup:cauchy:geo:}
\operatorname{\mathbf{P}}\Big(
\sup_{s\in[0,T]}| \overline{W} _{K_{m}}(s)-z_{\star}(s+\tfrac12)| \leq CK_m^{-4\gamma_3}
\Big)
\geq
1 - CK_m^{-n}. \end{align} Combining \eqref{eq:Wup:cauchy} and \eqref{eq:Wup:cauchy:geo:}, we concludes the desired result~\eqref{eq:Wlw:hydro}. \end{proof}
Having established the hydrodynamic limit of the laggards $ \overline{W} _K $ and $ \underline{W} _K $ of the Atlas models $ \overline{Y} $ and $ \underline{Y} $, we now return to proving Proposition~\ref{prop:Zhy}\ref{enu:Zhy:mvbdy}, i.e.\ proving the hydrodynamic limit \eqref{eq:Zhy:mvbdy} of $ Z_K(t) $ for $ t \in[\sigma_K,T] $.
We recall from Lemma~\ref{lem:couple} that we have a coupling of $ Z_K $ and $ \underline{W} _K $ under which $ \underline{W} _K(t-\frac12) \leq Z_K(t) $, $ \forall t\in[\frac12,\tau^K_\text{ext}) $, with probability $ \geq 1-CK^{-n} $.
By using the lower bound \eqref{eq:extT} on the scaled extinction time $ \tau^K_\text{ext} $, we have that \begin{align*}
\operatorname{\mathbf{P}} \big(
\underline{W} _K(t-\tfrac12) \leq Z_K(t),
\
\forall t\in[\tfrac12,T]
\big)
\geq
1 - CK^{-n}. \end{align*}
Combining this with \eqref{eq:Wlw:hydro} yields \begin{align}
\label{eq:Zhy:lw}
\operatorname{\mathbf{P}}\Big(
Z_K(t) \geq z_{\star}(t) - CK^{-4\gamma_3}, \forall t\in[\tfrac12,T]
\Big)
\geq
1 - CK^{-n}. \end{align} Equation~\eqref{eq:Zhy:lw} gives the desired lower bound on $ Z_K $. Further, it provides a lower bound on the absorption time $ \tau^K_\text{abs} $ (as defined in \eqref{eq:abs>12}). To see this, we use \eqref{eq:Zhy:lw} to write \begin{align}
\label{eq:Zhy:lw:}
\operatorname{\mathbf{P}}\Big(
\inf_{t\in[\sigma_K,T]} Z_K(t) \geq \inf_{t\in[\sigma_K,T]} z_{\star}(t) - CK^{-4\gamma_3}
\Big)
\geq
1-CK^{-n}. \end{align} With $ \sigma_K=\frac12+\frac17K^{-2\gamma} $ and $ t\mapsto z_{\star}(t) $ being non-decreasing, the quadratic growth \eqref{eq:zdq} of $ z_{\star}(t) $ near $ t=\frac12 $ gives \begin{align}
\label{eq:zlw}
\inf_{t\in[\sigma_K,T]} z_{\star}(t) = z_{\star}(\sigma_K) \geq \tfrac{1}{C}K^{-4\gamma}. \end{align} Combining \eqref{eq:zlw} with \eqref{eq:Zhy:lw:}, followed by using $ \gamma<\gamma_3 $, we obtain \begin{align}
\label{eq:tabs:bd}
\operatorname{\mathbf{P}}\Big( \inf_{t\in[\sigma_K,T]} Z_K(t) >0 \Big)
=
\operatorname{\mathbf{P}}\big( \tau^K_\text{abs} > T \big)
\geq
1 -CK^{-n}. \end{align} Using the bounds \eqref{eq:tabs:bd} and \eqref{eq:extT} on $ \tau^K_\text{abs} $ and $ \tau^K_\text{ext} $ withing the coupling \eqref{eq:cplUp}, we have that $ Z_K(t) \leq \overline{W} _K(t) $, $ \forall t\in[\sigma_K,T] $, with probability $ \geq 1-CK^{-n} $. From this and \eqref{eq:Wup:hydro}, we conclude \begin{align}
\label{eq:Zhy:up}
\operatorname{\mathbf{P}}\Big(
Z_K(t) \leq z_{\star}(t) + CK^{-\gamma}, \forall t\in[\sigma_K,T]
\Big)
\geq
1 - CK^{-n}. \end{align} As $ 4\gamma_3>\gamma $, the bounds \eqref{eq:Zhy:lw} and \eqref{eq:Zhy:up} conclude the desired hydrodynamic limit~\eqref{eq:Zhy:mvbdy} of $ Z_K(t) $.
\section{Proof of Theorem~\ref{thm:aldous}} \label{sect:aldous}
We first settle Part\ref{enu:aldous:upbd}. To this end, we fix an arbitrary strategy $ \phi(t) = (\phi_i(t))_{i=1}^K $, fix $ \gamma\in(0,\frac14) $ and $ n<\infty $, and use $ C=C(\gamma,n)<\infty $ to denote a generic constant that depends only on $ \gamma,n $, and \emph{not} on the strategy in particular.
Our goal is to establish an upper on $ \widetilde{U}_K(\infty) := \lim_{t\to\infty} \widetilde{U}_K(t,Z_K(0)) $, the total number of ever-surviving particles, scaled by $ \frac{1}{\sqrt{K}} $.
To this end, with $ \widetilde{U}_K(\infty) \leq \widetilde{U}_K(\frac12,0) $, we set $ t=\frac12 $ in \eqref{eq:int:abs} to obtain \begin{align}
\label{eq:int:aldous}
\widetilde{U}_K(\infty) \leq \widetilde{G}_K(\tfrac12,0)
+ \sum_{i=1}^K \int_{0}^{\frac12\wedge\tau_i^K} \phi^K_i(s) p^\text{N}(\tfrac12-s,X^K_i(s),0) ds
+ R_K(\tfrac12,x). \end{align} On the r.h.s.\ of \eqref{eq:int:aldous}, we \begin{itemize} \item[-] use \eqref{eq:Gc:bd} to approximate $ \widetilde{G}_K(\tfrac12,0) $ with $ 2p(t,x) $; \item[-] use $ p^\text{N}(\tfrac12-s,X^K_i(s),0) \leq 2p(\tfrac12-s,0) $
and $ \sum_{i=1}^K \phi^K_i(s) \leq 1 $ to bound the integral term; \item[-] use \eqref{eq:rd:bd} to bound the remainder term $ R_K(\tfrac12,x) $. \end{itemize} \indent We then obtain \begin{align}
\label{eq:int:aldous:}
\widetilde{U}_K(\infty)
\leq
2p(\tfrac12,0) + \int_{0}^{\frac12} 2p(\tfrac12-s,0) ds
+ CK^{-\gamma}, \end{align} with probability $ \geq 1-CK^{-n} $. Comparing the r.h.s.\ of \eqref{eq:int:aldous:} with the r.h.s.\ of \eqref{eq:Ucs:abs}, followed by using $ \widetilde{U}_{\star}(\frac12,0)=\frac{4}{\sqrt{\pi}} $ (from \eqref{eq:Usc:cnsv}) we obtain \begin{align*}
\widetilde{U}_K(\infty)
\leq
\widetilde{U}_{\star}(\tfrac12,0) + CK^{-\gamma}
=
\tfrac{4}{\sqrt{\pi}} + CK^{-\gamma}. \end{align*} with probability $ \geq 1-CK^{-n} $. This concludes the desired result~\eqref{eq:aldous:upbd} of Part\ref{enu:aldous:upbd}.
We now turn to the proof of Part\ref{enu:aldous:optl}. Fix $ \gamma\in(0,\frac{1}{96}) $ and $ n<\infty $, and specialize $ \phi(t) $ to the push-the-laggard strategy hereafter. Using Theorem~\ref{thm:hydro} for $ T=1 $, with $ \widetilde{U}_K(t) := \widetilde{U}_K(t,Z_K(t)) $, we have that $
\sup_{t\in[\frac12,1]} |\widetilde{U}_K(t) - \widetilde{U}_{\star}(t,z_{\star}(t)) |
\leq
CK^{-\gamma}, $ with probability $ \geq 1 -CK^{-n} $. Combining this with \eqref{eq:Usc:cnsv} yields \begin{align}
\label{eq:aldous:hydro01}
\operatorname{\mathbf{P}}\Big(
|\widetilde{U}_K(t) -\tfrac{4}{\sqrt{\pi}}| \leq CK^{-\gamma} ,
\
\forall t\in [\tfrac12,1]
\Big)
\geq
1 - CK^{-n}. \end{align}
Having established \eqref{eq:aldous:hydro01}, we next establish that \begin{align}
\label{eq:Wlw>0}
\operatorname{\mathbf{P}} \Big( \inf_{t\in[1,\infty)} \underline{W} _K(t-\tfrac12) >0 \Big) \geq 1 -CK^{-n}. \end{align} We claim that \eqref{eq:Wlw>0} is the desired property in order to complete the proof. To see this, recall from Lemma~\ref{lem:couple} that we have a coupling under which \eqref{eq:cplLw} holds, and, by Lemma~\ref{lem:extT}, we assume without lost of generality that $ \tau^K_\text{ext} >1 $. Under this setup, the event in \eqref{eq:Wlw>0} implies $ Z_K(t) \geq \underline{W} _K(t-\frac12) >0 $, $ \forall t\in [1,\tau^K_\text{ext}) $ which then forces $ \tau_\text{ext}=\infty $.
That is, the statement~\eqref{eq:Wlw>0} implies $ \operatorname{\mathbf{P}}(Z_K(t) >0 , \forall t>1)\geq 1-CK^{-n} $, and hence $ \operatorname{\mathbf{P}}(\widetilde{U}_K(t)=\widetilde{U}_K(1),\forall t\geq 1) \geq 1-CK^{-n} $. This together with \eqref{eq:aldous:hydro01} concludes \eqref{eq:aldous:optl}.
Returning to the proof of \eqref{eq:Wlw>0}, we recall from Remark~\ref{rmk:Y:Kdep} that the Atlas model $ \underline{Y} (t) $ as well as its laggard $ \underline{W} (t) $ actually depend on $ K $, which we have omitted up until this point for the sake of notations. Here we restore such a dependence and write $ \underline{Y} (t;K) $ and $ \underline{W} (t;K) $, etc.
Recall that the initial condition of the Atlas model $ ( \underline{Y} _i(0;K))_i $ is sampled from the Poisson point process $ \PPP( \underline{u} (\frac{x}{\sqrt{K}})) $ in \eqref{eq:Yic:}. From the definition~\eqref{eq:ulw} of $ \underline{u} $ and the explicit formula \eqref{eq:u1} of $ u_1(\frac12,x) $, it is straightforward to verify that the density function $ x \mapsto \underline{u} (\frac{x}{\sqrt{K}}) $ is nonincreasing on its support $ [-K^{\frac12-4\gamma_3},\infty) $.
Consequently, fixing $ K_1<K_2 $, we have \begin{align*}
\underline{u} (\tfrac{1}{\sqrt{K_1}}(x+{K_1}^{\frac12-4\gamma_3}))
\leq \underline{u} (\tfrac{1}{\sqrt{K_2}}(x+{K_2}^{\frac12-4\gamma_3})),
\quad
\forall x\in \mathbb{R} . \end{align*} With this, it is standard to construct a coupling of $ \underline{Y} (0;K_1) $ and $ \underline{Y} (0;K_2) $ under which \begin{align*}
&\#\{ \underline{Y} _i(0;K_1) \} \leq \#\{ \underline{Y} _i(0;K_2) \}, \\
& \underline{Y} _i(0;K_2)+{K_2}^{\frac12-\gamma_3}
\leq
\underline{Y} _i(0;K_1)+{K_1}^{\frac12-\gamma_3},
\
\forall i=1,\ldots, \#\{ \underline{Y} _i(0;K_1) \} . \end{align*} By Lemma~\ref{lem:sar:cmp}, such a dominance coupling at $ s=0 $ is leveraged into a dominance coupling for all $ s>0 $, yielding \begin{align}
\label{eq:W:RG12}
\underline{W} (s;K_1)
\geq
\underline{W} (s;K_2)+{K_2}^{\frac12-4\gamma_3}-{K_1}^{\frac12-4\gamma_3}
\geq
\underline{W} (s;K_2),
\quad
\forall s \geq 0. \end{align} Now, fix $ K<\infty $ and consider the geometric subsequence $ L_m := K2^{m} $.
We use the union bound to write \begin{align*}
\operatorname{\mathbf{P}}\Big( \inf_{s\in[\frac12K,\infty)} \underline{W} (s;K) \leq 0 \Big)
\leq
\sum_{m=1}^\infty
\operatorname{\mathbf{P}}\Big( \inf_{s\in[L_{m-1},L_{m}]} \underline{W} (s;K) \leq 0 \Big). \end{align*} Within each $ m $-th term in the last expression, use the coupling \eqref{eq:W:RG12} for $ (K_1,K_2)=(L_{m-1},L_m) $ to obtain \begin{align}
\label{eq:Wlw:rg:1}
\operatorname{\mathbf{P}}\Big( \inf_{s\in[L_{m-1},L_{m}]} \underline{W} (s;K) \leq 0 \Big)
\leq
\operatorname{\mathbf{P}}\Big( \inf_{s\in[L_{m-1},L_{m}]} \underline{W} (s;L_m) \leq 0 \Big). \end{align} Next, set $ T=1 $ and $ K=L_m $ in \eqref{eq:Wlw:hydro} and rewrite the resulting equation in in the pre-scaled form as \begin{align}
\label{eq:Wlw:rg:2}
\operatorname{\mathbf{P}}\Big(
| \underline{W} (s;L_m) - \sqrt{L_m}z_{\star}(\tfrac{s}{L_m}+\tfrac12)| \leq CL_m^{\frac12-4\gamma_3},
\forall s\in[0,L_m]
\Big)
\geq
1 -CL_m^{-n}. \end{align} Further, by Lemma~\ref{lem:zqd} and the fact that $ t\mapsto z_{\star}(t) $ is nondecreasing, we have that \begin{align}
\label{eq:Wlw:rg:3}
\inf_{s\in[L_{m-1},L_m]} z_{\star}(\tfrac{s}{L_m}+\tfrac12)
=
\inf_{t\in[\frac12,1]} z_{\star}(t)
=
z_{\star}(\tfrac12) >0. \end{align} Combining \eqref{eq:Wlw:rg:2}--\eqref{eq:Wlw:rg:3} yields $ \operatorname{\mathbf{P}}( \inf_{s\in[L_{m-1},L_{m}]} \underline{W} (s,L_m) \leq 0 ) \leq CL_m^{-n} $. Inserting this bound into \eqref{eq:Wlw:rg:1}, and summing the result over $ m $, we arrive at \begin{align*}
\operatorname{\mathbf{P}}\Big( \inf_{s\in[\frac12K,\infty)} \underline{W} (s;K) \leq 0 \Big)
\leq
C \sum_{m=1}^\infty
L_m^{-n}
=
C K^{-n}. \end{align*} This concludes~\eqref{eq:Wlw>0} and hence complete the proof of Part\ref{enu:aldous:optl}.
\end{document}
|
arXiv
|
{
"id": "1512.04493.tex",
"language_detection_score": 0.462921142578125,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\author{Kohdai Kuroiwa} \email{[email protected]} \affiliation{Institute for Quantum Computing, University of Waterloo, Ontario, Canada, N2L 3G1} \affiliation{Department of Physics and Astronomy, University of Waterloo} \affiliation{Department of Combinatorics and Optimization, University of Waterloo} \affiliation{Perimeter Institute for Theoretical Physics, Ontario, Canada, N2L 2Y5}
\author{Debbie Leung} \email{[email protected]} \affiliation{Institute for Quantum Computing, University of Waterloo, Ontario, Canada, N2L 3G1} \affiliation{Department of Combinatorics and Optimization, University of Waterloo} \affiliation{Perimeter Institute for Theoretical Physics, Ontario, Canada, N2L 2Y5}
\date{\today}
\title{Example2: Note}
\maketitle
\section{Another proof} In this section, we prove that the two $2N$-dimensional ($N:$ odd) states \begin{widetext}
\begin{align*}
\sigma_1 &\coloneqq
\frac{1}{4N}\left(
\begin{array}{ccccc}
2 & \epsilon & 0 & \cdots & \epsilon \\
\epsilon & 2 & \epsilon & \cdots & 0 \\
\vdots &\ddots & \ddots & \ddots & \vdots\\
0 & \cdots & \epsilon& 2 & \epsilon \\
\epsilon & 0 & \cdots & \epsilon & 2
\end{array}
\right)
\\
\sigma_2 &\coloneqq
\frac{1}{4N}\left(
\begin{array}{cccccccc}
1+4\epsilon\tfrac{1}{N+1} &&&&&&& \\
& 1+4\epsilon\tfrac{2}{N+1} &&&&&& \\
&& \ddots &&&&& \\
&&& 1 +4\epsilon\tfrac{N}{N+1} &&&& \\
&&&& 3 -4\epsilon\tfrac{N}{N+1} &&& \\
&&&&& 3 -4\epsilon\tfrac{N-1}{N+1} && \\
&&&&&& \ddots & \\
&&&&&&& 3-4\epsilon\tfrac{1}{N+1}
\end{array}
\right) \end{align*} \end{widetext} with $0<\epsilon<1/4$ do not have redundant parts. For this purpose, we show that the trivial structure shown above is the maximal structure. The first condition is trivially satisfied by considering the single-block structure with no redundant part. Then, the third condition need not be considered because there is only one block. Therefore, we only have to care about the second condition. Since the trivial structure consists of a single block without redundant parts, it suffices to show that if $2N$-dimensional projection matrix $P$ satisfies \begin{align}
\label{eq:commute_1}
P\sigma_1 &= \sigma_1 P\\
\label{eq:commute_2}
P\sigma_2 &= \sigma_2 P, \end{align} then $P = I$ or $P = 0$, where $I$ is the $2N$-dimensional identity matrix. If $P = 0$, the statement holds, so suppose that $P \neq 0$. From Eq.~\eqref{eq:commute_1}, $P$ must be expressed as \begin{equation}~\label{eq:proj_sigma1}
P = \sum_{j} \ketbra{v_{j}}{v_{j}} \end{equation} using some orthonormal eigenvectors $\ket{v_{j}}$ of $\sigma_1$. Also, from Eq.~\eqref{eq:commute_2}, $P$ must be expressed as \begin{equation}~\label{eq:proj_sigma2}
P = \sum_{l} \ketbra{e_{l}}{e_{l}} \end{equation} using some orthonormal eigenvectors $\ket{e_{l}}$ of $\sigma_2$, where eigenvector $\ket{e_l}$ ($l = 1,2,\ldots, 2N$) is a vector whose $l$-th entry is one and other entries are zero. {\color{blue} (Since $0 < \epsilon < 1/4$, $\sigma_2$ is not degenerate. Need more discussion.) } To show $P = I$ by contradiction, we assume $P \neq I$. From Eq.~\eqref{eq:proj_sigma2}, since $P \neq I$, there exists an eigenvector $\ket{e_r}$ of $\sigma_2$ such that \begin{equation}
\bra{e_r}P\ket{e_r} = 0. \end{equation} Hence, from Eq.~\eqref{eq:proj_sigma1}, \begin{equation}~\label{eq:condition_proj}
\sum_{j} |\braket{e_r|v_j}|^2 = 0. \end{equation}
Thus, for all $j$ in the sum of Eq.~\eqref{eq:proj_sigma1}, $\braket{e_r|v_j} = 0$ must hold.
We now show that for any choice of $\ket{v_j}$ and $\ket{e_r}$, $\braket{e_r|v_j} \neq 0$. Observe that a normalized eigenvector $\ket{v_j}$ of $\sigma_1$ can be given as \begin{equation}
\frac{1}{\sqrt{2N}}
\left(
\begin{array}{c}
1 \\
1 \\
1\\
\vdots \\
1
\end{array}
\right), \,\,\,\,
\frac{1}{\sqrt{2N}}
\left(
\begin{array}{c}
1 \\
-1 \\
1 \\
-1 \\
1 \\
\vdots\\
-1
\end{array}
\right), \end{equation} or \begin{equation}
\lambda \frac{1}{\sqrt{2N}}\left(
\begin{array}{c}
1 \\
\omega^{k} \\
\omega^{2k} \\
\omega^{3k} \\
\vdots \\
\omega^{(2N-1)k}
\end{array}
\right)
+
(1-\lambda) \frac{1}{\sqrt{2N}}\left(
\begin{array}{c}
1 \\
\omega^{-k} \\
\omega^{-2k} \\
\omega^{-3k} \\
\vdots \\
\omega^{-(2N-1)k}
\end{array}
\right) \end{equation} with $0\leq \lambda \leq 1$ for $k = 1,2,\ldots,N-1$. {\color{blue} (Need to be discussed more.)} If \begin{equation}
\ket{v_j} =
\frac{1}{\sqrt{2N}}
\left(
\begin{array}{c}
1 \\
1 \\
1\\
\vdots \\
1
\end{array}
\right) \,\,\,\, \mathrm{or}\,\,\,\,
\frac{1}{\sqrt{2N}}
\left(
\begin{array}{c}
1 \\
-1 \\
1 \\
-1 \\
1 \\
\vdots\\
-1
\end{array}
\right), \end{equation}
then $\braket{e_r|v_k} = \pm 1/\sqrt{2N} \neq 0$. On the other hand, if \begin{equation}
\ket{v_j} =
\lambda \frac{1}{\sqrt{2N}}\left(
\begin{array}{c}
1 \\
\omega^{k} \\
\omega^{2k} \\
\omega^{3k} \\
\vdots \\
\omega^{(2N-1)k}
\end{array}
\right)
+
(1-\lambda) \frac{1}{\sqrt{2N}}\left(
\begin{array}{c}
1 \\
\omega^{-k} \\
\omega^{-2k} \\
\omega^{-3k} \\
\vdots \\
\omega^{-(2N-1)k}
\end{array}
\right), \end{equation} for some $0\leq \lambda \leq 1$ and $k = 1,2,\ldots,N-1$, \begin{equation} \begin{aligned}
\braket{e_r|v_j}
&= \frac{\lambda \omega^{rk} + (1-\lambda)\omega^{-rk}}{\sqrt{2N}} \\
&= \frac{1}{\sqrt{2N}} \left(\cos \frac{2\pi rk}{2N} + i(2\lambda - 1) \sin \frac{2\pi rk}{2N}\right). \end{aligned} \end{equation}
When $\lambda \neq 1/2$, $ \braket{e_r|v_j} \neq 0$ because $\cos \frac{2\pi rk}{2N}$ and $\sin \frac{2\pi rk}{2N}$ cannot be zero simultaneously. When $\lambda = 1/2$, \begin{equation} \begin{aligned}
\braket{e_r|v_j}
&= \frac{1}{\sqrt{2N}} \cos \frac{2\pi rk}{2N} \\
&= \frac{1}{\sqrt{2N}} \cos \frac{\pi rk}{N}. \end{aligned} \end{equation} Since $N$ is odd, there exists no integer $m$ such that \begin{equation}
\frac{rk \pi}{N} = \left(\frac{1}{2} + m\right)\pi. \end{equation}
Hence, $\cos \frac{rk \pi}{N} \neq 0$, and $\braket{e_r|v_j} \neq 0$. Since for any choice of $\ket{v_j}$ and $\ket{e_r}$,
$\braket{e_r|v_j} \neq 0$, Eq.~\eqref{eq:condition_proj} is not satisfied, and thus $P = I$.
\end{document}
|
arXiv
|
{
"id": "2206.03501.tex",
"language_detection_score": 0.5021874904632568,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\linespread{1.5} \begin{frontmatter}
\title{Asymptotics for infinite server queues with fast/slow Markov switching and fat tailed service times}
\runtitle{Infinite server queues with switching and fat tailed service times}
\begin{aug} \author{\fnms{\Large{Landy}} \snm{\Large{Rabehasaina}} \ead[label=e2]{[email protected]}} \runauthor{L.Rabehasaina}
\address{\hspace*{0cm}\\ Laboratory of Mathematics, University Bourgogne Franche-Comt\'e,\\ 16 route de Gray, 25030 Besan\c con cedex, France.\\[0.2cm]
\printead{e2}}
\end{aug}
\begin{abstract} We study a general $k$ dimensional infinite server queues process with Markov switching, Poisson arrivals and where the service times are fat tailed with index $\alpha\in (0,1)$. When the arrival rate is sped up by a factor $n^\gamma$, the transition probabilities of the underlying Markov chain are divided by $n^\gamma$ and the service times are divided by $n$, we identify two regimes ("fast arrivals", when $\gamma>\alpha$, and "equilibrium", when $\gamma=\alpha$) in which we prove that a properly rescaled process converges pointwise in distribution to some limiting process. In a third "slow arrivals" regime, $\gamma<\alpha$, we show the convergence of the two first joint moments of the rescaled process.
\end{abstract} \begin{keyword}[class=AMS] \kwd[Primary ]{60G50} \kwd{60K30} \kwd{62P05} \kwd{60K25}
\end{keyword} \begin{keyword} Infinite server queues, Incurred But Not Reported (IBNR) claims, Markov modulation, Rescaled process \end{keyword}
\end{frontmatter}
\normalsize
\section{Introduction and notation}\label{sec:model} \subsection{Model and related work} The classical infinite server queue consists of a system where tasks or customers arrive according to a general arrival process and begin receiving service immediately. Such a model was studied extensively, under various assumptions on the interarrival and service time distributions, in \cite[Chapter 3, Section 3]{T62}. Several variants or extensions have been considered, in particular where arrivals and service times are governed by an external background Markovian process \cite{OP86, D08, FA09, BKMT14, MDT16, BDTM17}, or where customers arrive in batches \cite{MT02}. An extension to a network of infinite-server queues where arrival and service rates are Markov modulated is studied in \cite{JMDW19}.
We consider yet another generalization of this model with Markov switching described as follows. Let $\{ N_t,\ t\ge 0\}$ be a Poisson process with intensity $\lambda>0$, corresponding jump times $(T_i)_{i\in\mathbb{N}}$ satisfying $T_0=0$, such that $(T_i-T_{i-1})_{i\ge 1}$ is a sequence of independent and identically distributed (iid) random variables with same exponential distribution with parameter $\lambda>0$, denoted by ${\cal E(\lambda)}$. Let $(L_{ij})_{i\in \mathbb{N}, j=1,\ldots,k}$ be a sequence of independent random variables such that the sequence of vectors $\left((L_{i1},\ldots,L_{ik})\right)_{i\in \mathbb{N}}$ is iid (with entries $L_{i1}$,\ldots,$L_{ik}$ having different distributions for each $i$). Finally, for some $K$ and $k$ in $\mathbb{N}^*$ we consider the discrete set ${\cal S}=\{ 0,\ldots,K\}^k$ and a stationary finite Markov chain $(X_i)_{i\in\mathbb{N}}$ with state space ${\cal S}$. Then, for all $i$, $X_i$ is a vector of the form $X_i=(X_{i1},\ldots,X_{ik})$ with $X_{ij}\in \{ 0,\ldots,K\}$, $j=1,\ldots,k$. We then define the following $k$ dimensional process $\{Z(t)=(Z_1(t),\ldots,Z_k(t)),\ t\ge 0 \}$ with values in $\mathbb{N}^k$ as \begin{equation}\label{def_Z_t} Z_j(t)=\sum_{i=1}^{N_t}X_{ij} \mathbbm{1}_{[t<L_{ij}+T_i]}=\sum_{i=1}^{\infty}X_{ij} \mathbbm{1}_{[T_i\le t<L_{ij}+T_i]},\quad j=1,...,k. \end{equation}
The process defined by \eqref{def_Z_t} has many applications, of which we list two most important ones: \begin{itemize} \item {\it incurred but not reported correlated claims: } in an actuarial context, $Z(t)=(Z_1(t),\ldots,Z_k(t))$ represents a set of branches where $Z_j(t)$ is the number of incurred but non reported (IBNR) claims in the $j$th branch of an insurance company. Here $X_{ij}$ is the number of such claims arriving in that branch at time $T_i$, and $L_{ij}$ is the related delay time before the claim $j$ is reported. From another point of view, $X_{ij}\in [0,+\infty)$ may also represent the amount (say, in euros) of the claim occurring at time $T_i$ in the $j$th branch, in which case $Z_j(t)$ is the total amount of undeclared claims which have occurred by time $t$. Another application is when $K=1$, in which case $X_{ij}=0$ means that the claim in branch $j$ occurring at time $T_i$ is reported and dealt with immediately by the policyholder, whereas $X_{ij}=1$ means that some effective lag in the report is observed. The Markovian nature of $(X_i)_{i\in\ensuremath{\mathbb{N}}}$ here is important from a practical point of view, as a claim amount at time $T_i$ may impact the one at time $T_{i+1}$, or because a policyholder may decide to grant a long report delay for the claim at time $T_{i+1}$ with high probability if the claim at time $T_i$ is reported immediately. \item {\it infinite server queues with batch arrivals and Markov switching: } $Z(t)=(Z_1(t),\ldots,Z_k(t))$ represents a set of $k$ correlated queues with an infinite number of servers, such that customers arrive at each time $T_i$, with $X_{ij}$ customers arriving in queue $j\in\{1,...,k\}$, with corresponding (same) service times $L_{ij}$ (as an example, the basic case where $X_{ij}=1$ for all $i\in \mathbb{N}$ and $j=1,...,k$ corresponds to $k$ customers arriving simultaneously at each instant $T_i$). $Z_j(t)$ can also be seen as the number of customers of class $j$ in a (single) infinite-server queue, as illustrated in \cite[Figure 1]{RW16}. Other infinite-server queue, such as one where the customers within a batch arriving at time $T_i$ have different service times, may be inferred from the model \eqref{def_Z_t} by choosing an appropriate value of $k$ and Markov chain $(X_i)_{i\in\ensuremath{\mathbb{N}}}$, see \cite[Section 6]{RW16}. Here, the Markov switching is a major novelty in the present model because it allows for some dependence between the successive number of incoming customers. One simple example is when $K=1$, so that $X_{ij}=0$ means that an incoming customer at time $T_i$ in queue $j$ is rejected from the system, whereas $X_{ij}=1$ means that it is accepted: a classical situation would then be that if a customer is rejected at time $T_i$ then the next one could be accepted with high probability, at time $T_{i+1}$. In other words, this Markov switching can help model traffic regulation mechanisms. \end{itemize} The present paper follows \cite{RW18}, which studies the transient or limiting distribution of a discounted version of $Z(t)$ of the form \begin{equation}\label{discounted_Z(t)} Z_j(t)=\sum_{i=1}^{N_t}X_{ij} e^{-a (L_{ij}+T_i)}\mathbbm{1}_{[t<L_{ij}+T_i]}=\sum_{i=1}^{\infty}X_{ij} e^{-a (L_{ij}+T_i)} \mathbbm{1}_{[T_i\le t<L_{ij}+T_i]},\quad j=1,...,k, \end{equation} for $t\ge 0$. The main difference with \cite{RW18} is that the latter has more general assumptions on the interarrival and service distributions, whereas we focus here on Poisson arrivals. Even though the assumptions are more restrictive than in \cite{RW18}, the goal here is different in that we are trying to exhibit different behaviours for the limiting models when the arrival rate is increased and the service times are decreased by suitable factors, whereas \cite{RW18} is more focused on analytical stochastic properties such as the moments of $Z(t)$ or its limiting distribution as $t\to\infty$. The discounting factor $a \ge 0$ in \eqref{discounted_Z(t)} is important in situations e.g. where, in an actuarial context, $X_{ij} e^{-a (L_{ij}+T_i)}$ represents the value of the claim amount at the actual realization time $L_{ij}+T_i$. Furthermore, the state space ${\cal S}=\{ 0,\ldots,K\}^k$, although seemingly artificially complex, allows in fact for some flexibility and enables us to retrieve some known models. In particular, consider a Markov-modulated infinite-server queue, i.e. a queueing process $\{{\cal Z}(t),\ t\ge 0\}$ of which interarrivals and service times are modulated by a background continuous time Markov chain $\{Y(t),\ t\ge 0\}$ with state space say $\{1,...,\kappa\}$, i.e. such that customers arrive on the switching times of the Markov chain, with service times depending on the state of the background process (see \cite{MT02}, \cite[Model II]{MDT16}). Then \cite[Section 6]{RW18} explains how this process $\{{\cal Z}(t),\ t\ge 0\}$ can be embedded in a process $\{Z(t),\ t\ge 0\}$ defined by \eqref{def_Z_t} with an appropriate choice of $k$, $K$ in function of $\kappa$, as well as of the Markov chain $(X_i)_{i\in\mathbb{N}}$ and the sequence $(L_{ij})_{i\in \mathbb{N}, j=1,\ldots,k}$ of service times. Thus, studying a general process $\{Z(t),\ t\ge 0\}$ in \eqref{def_Z_t} allows to study a broad class of infinite server queue models in a similar Markov modulated context.
We now proceed with some notation related to the model and used throughout the paper. Let $P=(p(x,x'))_{(x,x')\in {\cal S}^2}$ and $\pi=(\pi(x))_{x\in {\cal S}}$ (written as a row vector) be respectively the transition matrix and stationary distribution of the Markov chain. We next define for all $r\ge 0$ and $s=(s_1,\ldots,s_k)\in (-\infty,0]^k $,
\begin{align} \tilde{\pi}(s,r)&:= \displaystyle\mbox{diag}\left[ \mathbb{E} \left( \exp\left\{ \sum_{j=1}^k s_jx_j \mathbbm{1}_{[L_j>r]}\right\}\right),\ x=(x_1,\ldots,x_k)\in{\cal S}\right],\label{def_pi_Q_tilda}\\
\Delta_i&:=\mbox{diag} \left[ x_i,\ x=(x_1,\ldots,x_k)\in{\cal S}\right],\quad i=1,\ldots,k,\label{Di} \end{align}
where $P'$ denotes the transpose of matrix $P$. $I$ is the identity matrix, ${\bf 0}$ is a column vector with zeroes, and ${\bf 1}$ is a column vector with $1$'s, of appropriate dimensions. The Laplace Transform (LT) of the process $Z(t)$ jointly to the state of $X_{N_t}$ given the initial state of $X_0$ is denoted by \begin{equation}\label{def_mgf}
\psi(s,t):=\left[ \mathbb{E}\left( \left. e^{<s,Z(t)>}\mathbbm{1}_{[X_{N_t}=y]}\right| X_0=x\right)\right]_{(x,y)\in {\cal S}^2},\quad t\ge 0,\ s=(s_1,\ldots,s_k)\in (-\infty,0]^k \end{equation} where $<\cdot, \cdot>$ denotes the Euclidian inner product on $\mathbb{R}^k$. Note that $X_0$ has no direct physical interpretation here, as the claims sizes/customer batches are given by $X_i$, $i\ge 1$, and is rather introduced for technical purpose.
We finish this section with the following notation. For two sequences of random variables $(A_n)_{n\in\mathbb{N}}$ and $(B_n)_{n\in\mathbb{N}}$ and two random variables $A$ and $B$, the notation ${\cal D}\left(\left. A_n\right|B_n\right)\longrightarrow_{n\to\infty} {\cal D}\left(\left. A\right|B\right)$ indicates that, as $n\to \infty$, the conditional distribution of $A_n$ given $B_n$ converges weakly to the conditional distribution of $A$ given $B$.
\subsection{Rescaling}\label{sec:rescale} We arrive at the main topic of the paper, which is to be able to provide some information on the distribution of $Z(t)$ in \eqref{def_Z_t}. In the particular case of Poisson arrivals, and since $Z(t)$ in \eqref{def_Z_t} is a particular case of the process in \eqref{discounted_Z(t)} with discount factor $a=0$, the LT $\psi(s,t)$ defined in \eqref{def_mgf} is characterized by \cite[Proposition 4]{RW18}, which we rewrite here:
\begin{prop}\label{prop_Poisson_psi} When $\{ N_t,\ t\ge 0\}$ is a Poisson process with intensity $\lambda>0$, then $\psi(s,t)$ is the unique solution to the first order linear (matrix) differential equation \begin{equation}\label{Poisson_ODE} \partial_t \psi(s,t) =[\lambda (P-I) + \lambda P(\tilde{\pi}(s,t)-I)]\psi(s,t) \end{equation} with the initial condition $\psi(s,0)= I$. \end{prop} Unfortunately, the first order ordinary differential equation \eqref{Poisson_ODE} does not have an explicit expression in general, so that studying the (transient or stationary) distribution of the couple $(Z(t),X_{N_t})$ is difficult. In that case, as in \cite{BDTM17, BKMT14, MDT16}, it is appealing to study the process when the intensity of the Poisson process is sped up and the switching rates of the Markov chain are modified. Similarly to those papers, the goal of this paper is thus to study the behaviour of the queue/IBNR process in "extreme conditions" for the arrival rates, transition rates and delays, while trying to maintain minimal assumptions on the service time distributions. For this we will suppose that the rescalings of the parameters, denoted by ${\bf (S1)}$, ${\bf (S2)}$ and ${\bf (S3)}$ hereafter, are performed as follows: \begin{itemize} \item the arrival rate is multiplied by $n^\gamma$ for some $\gamma>0$, denoted by $${\bf (S1)}\quad \lambda_n= \lambda n^\gamma ,$$ with associated Poisson process $\{ N_t^{(n)},\ t\ge 0\}$ and jump times $(T_i^n)_{i\in\mathbb{N}}$, \item the transition probabilities $p(x,y)$ are divided by $n^\gamma$ when $x\neq y$, $x$, $y$ in ${\cal S}$, i.e. the new transition matrix is given by $${\bf (S2)} \quad P_n=P/n^\gamma + (1-1/n^\gamma) I,$$ with corresponding stationary Markov chain $(X_i^{(n)})_{i\in\mathbb{N}}$, having the same distribution $\pi$ as $(X_i)_{i\in\mathbb{N}}$. \end{itemize} Since the transition matrix $P_n$ verifies $P_n\longrightarrow_{n\to\infty} I$, such normalizing assumptions imply that, as $n\to\infty$, one is close to a model where the arriving customers or claims come in the $k$ queues in batches with same fixed size: those queues are nonetheless correlated because the customers arrive according to the same Poisson process. Also, observe that $\lambda_n (P_n-I)$ is the infinitesimal generator of the continuous time Markov chain $\left\{Y^{(n)}(t)=X^{(n)}_{N^{(n)}_t},\ t\ge 0\right\}$ of which embedded Markov chain is the underlying Markov chain i.e. $(Y^{(n)}(T_i^n))_{i\in\mathbb{N}}=(X_i^{(n)})_{i\in\mathbb{N}}$. Thus, since the rescaling is such that $$\lambda_n (P_n-I)=\lambda (P-I)$$ for all $n$ (a property which will be extensively used in the paper), we remark that the rescalings {\bf (S1)} and {\bf (S2)} for the arrival rate and the transition probabilities are such that the transition rates between the states of $\cal S$ of $\{Y^{(n)}(t),\ t\ge 0\}$ are independent from $n$, which allows for enough dynamics in the model that compensates the fact that $P_n$ tends to $I$, and yielding non trivial asymptotics in the convergence results in this paper as $n\to\infty$.
The assumptions for the service times/delays distribution are the following. We first suppose that the base model features fat tailed distributed service times with same index $\alpha \in (0,1)$, i.e. such that $$\mathbb{P}(L_j >t)\sim 1/t^\alpha,\quad t\to \infty,$$ for all $j=1,...,k$. This kind of distribution (included in the wider class of heavy tailed distributions) mean that the service times are "large". In particular, those service times have {\it infinite expectation}. Furthermore, the rescaling for the service times is such that they are divided by $n$, denoted by $$ {\bf (S3)}\quad L_j^{(n)}=L_j/n. $$ Hence, the situation is the following: the arrivals are sped up by factor $n^\gamma$, but this is compensated by the fact that the delay times are diminished with factor $n$, so that one expects one of the three phenomena to occur at time $t$ for the limiting model: the arrivals occur faster than it takes time for customers to be served and the corresponding queue content $Z^{(n)}(t)$ grows large as $n\to\infty$, the arrivals occur slower and services are completed fast so that $Z^{(n)}(t)$ tends to $0$ as $n\to\infty$, or an equilibrium is reached. Those three cases will be studied in the forthcoming sections. Some limiting behaviour was studied in \cite{BKMT14, MDT16}, where the authors identified three regimes for different scalings in a Markov modulating context and obtained a Central Limit Theorem for a renormalized process,
when the service times have general distribution with finite expectation or are exponentially distributed. \cite{BDTM17} provides some precise asymptotics on the tail probability of the queue content for exponentially distributed service times. \cite{JMDW19} provides a diffusion approximation for a model with exponentially distributed service times. A novelty in this paper is that we restrict here the class of distributions to that of fat tailed distributions in order to exhibit (under different scalings) a different behaviour and different limiting distribution which is not gaussian. Also note that the class of fat tailed distributions is interesting in itself as, in actuarial practice, this corresponds to {\it latent claims}, i.e. very long delays which are incidentally in practice often not observed (as the case $\alpha\in(0,1)$ corresponds to the $L_j$'s having infinite expectation), see \cite[Section 6.6.1]{H17}. This motivates the convergence results in this paper, which feature the exponent $\alpha$ as the only information required on those delays. This in itself is a noticeable difference from the Central Limit Theorems obtained in \cite[Section 4]{BKMT14}, where the normalization and limiting distribution require the explicit cumulative distribution function of the service times. Not only that, but the scaling is rather done in those references \cite{BKMT14, MDT16} on the transition rates of the underlying continuous time Markov chain modulating the arrival and service rates, whereas here these are constant, as we saw that $\lambda_n (P_n-I)=\lambda (P-I)$ is independent of $n$, and the scaling is rather done on the service times instead. When the service times are heavy tailed, this particular model can also be seen as a generalization of the {\it infinite source model}, see \cite[Section 2.2]{MRRS02}. Since the class of fat tailed distributions is a sub-class of the set of heavy tailed distributions, the normalizations {\bf (S1)} and {\bf (S3)} can be directly compared to \cite[Section 3.1]{MRRS02}, which studies limiting distributions of such normalized processes, and where the authors introduce the notion of so-called Slow and Fast Growth conditions when the arrival rate of customers is respectively negligible or dominant, compared to the service times. The reader is also referred to \cite{GK03} for a a similar model where the interarrivals are heavy tailed. All in all, what is going to be studied hereafter is, when $t$ is fixed in say $[0,1]$ w.l.o.g., the limiting distribution as $n\to\infty$ of the $\mathbb{N}^k\times {\cal S}$ valued random vector $$ \left(Z^{(n)}(t),X^{(n)}_{N^{(n)}_t}\right) $$ under rescaling ${\bf (S1)}$, ${\bf (S2)}$ and ${\bf (S3)}$, or of a renormalized version of it in the "fast" or "slow" arriving customers case. Note that that the convergence is proved on the interval $[0,1]$, but all proofs can be adapted to show the convergence on any interval $[0,M]$ for $M>0$. The corresponding joint Laplace Transform is given by \begin{equation}\label{def_LT_n}
\psi^{(n)}(s,t)=\left[ \mathbb{E}\left( \left. e^{<s,Z^{(n)}(t)>}\mathbbm{1}_{\left[X^{(n)}_{N^{(n)}_t}=y\right]}\right| \ X_0^{(n)}=x\right)\right]_{(x,y)\in {\cal S}^2},\ s=(s_1,...,s_j)\in (-\infty, 0]^k, \end{equation} where we recall that $(X^{(n)}_i)_{i\in \mathbb{N}}$ is the underlying Markov chain with generating matrix $P_n$, stationary distribution $\pi$, and $\left\{N^{(n)}_t,\ t\ge 0\right\}$ is a Poisson process representing the arrivals, with scaled intensity $\lambda_n$. We also introduce the first and second joint matrix moments defined by \begin{equation}\label{def_moments} \begin{array}{rcl}
M_j^{(n)}(t)&:= &\left[ \mathbb{E}\left(\left. Z^{(n)}_j(t) \mathbbm{1}_{\left[X^{(n)}_{N^{(n)}_t}=y\right]}\right| \ X_0^{(n)}=x\right) \right]_{(x,y)\in {\cal S}^2},\quad j=1,...,k,\\
M_{jj'}^{(n)}(t)&:= &\left[ \mathbb{E}\left(\left. Z^{(n)}_j(t)\ Z^{(n)}_{j'}(t) \mathbbm{1}_{\left[X^{(n)}_{N^{(n)}_t}=y\right]}\right| \ X_0^{(n)}=x\right) \right]_{(x,y)\in {\cal S}^2},\quad j,j'=1,...,k . \end{array} \end{equation} \section{Statement of results and organization of paper} The core results of the paper concerning the different regimes are given in the following two Theorems \ref{theo_regimes} and \ref{theo_slow_arrival}: \begin{theorem}\label{theo_regimes}
Let $\{{\cal X}^\alpha (t)=({\cal X}^\alpha_1 (t),...,{\cal X}^\alpha_k (t)),\ t\in[0,1] \}$ be a $\{0,...,K\}^k$ valued continuous time inhomogeneous Markov chain with infinitesimal generating matrix $\frac{1}{1-\alpha} (1-t)^{\frac{\alpha}{1-\alpha}}\lambda (P-I)$ with ${\cal X}^\alpha (0)\sim \pi$, and $\{\nu_j^\alpha(t),\ t\in [0,1] \}$, $j=1,...,k$, be $k$ independent Poisson processes with same intensity $\frac{\lambda}{1-\alpha}$, independent from $\{{\cal X}^\alpha (t),\ t\in[0,1] \}$. Let $t\in[0,1]$ fixed. \begin{itemize}
\item {\bf Fast arrivals: }If $ \gamma >\alpha$ then, as $n\to\infty$, \begin{multline}\label{convergence_fast}
{\cal D}\left(\left. \left(\frac{Z^{(n)}(t)}{n^{\gamma-\alpha}},X^{(n)}_{N^{(n)}_t} \right)\right|\ X^{(n)}_0\right)\\ \longrightarrow {\cal D}\left( \left. \left(\frac{\lambda}{1-\alpha} \int_{1-t^{1-\alpha}}^{1} {\cal X}^\alpha(v)\ dv,\ {\cal X}^\alpha (1)\right)
\right|\ {\cal X}^\alpha\left(1-t^{1-\alpha}\right) \right), \end{multline} \item {\bf Equilibrium: } If $\gamma =\alpha$ then, as $n\to\infty$, \begin{multline}\label{convergence_equilibrium}
{\cal D}\left(\left. \left(Z^{(n)}(t),X^{(n)}_{N^{(n)}_t} \right)\right|\ X^{(n)}_0\right)\\ \longrightarrow {\cal D}\left( \left. \left( \left(\int_{1-t^{1-\alpha}}^{1} {\cal X}^\alpha_j(v)\ \nu_j^\alpha(dv)\right)_{j=1,...,k},\ {\cal X}^\alpha (1)\right)
\right|\ {\cal X}^\alpha\left(1-t^{1-\alpha}\right) \right). \end{multline} \end{itemize} \end{theorem} We note that the terms in the limits on the right hand side of \eqref{convergence_fast} and \eqref{convergence_equilibrium} feature simple objects (in regards to the complexity of the original model) where the only characteristic parameters needed are $\lambda$, $P$ and $\alpha$; in particular, and apart from $\alpha$, characteristics of the service times $L_j$, $j=1,...,k$, such as their cumulative distribution functions, do not show up in the limiting distributions \eqref{convergence_fast} and \eqref{convergence_equilibrium}.
The convergences in distribution \eqref{convergence_fast} and \eqref{convergence_equilibrium} give some information on convergence of the (possibly renormalized) joint distribution of the couple $\left(Z^{(n)}(t),X^{(n)}_{N^{(n)}_t}\right)$, $t\in [0,1]$ given the initial state $X_0^{(n)}$. Intuitively, for fixed $t\in [0,1]$, in the right hand sides of \eqref{convergence_fast} and \eqref{convergence_equilibrium} we may interpret the inhomogeneous continuous time Markov chain $\{{\cal X}^\alpha (v),\ v\in[1-t^{1-\alpha},1] \}$ as the limiting counterpart of the modulating process $\left\{ X^{(n)}_{N^{(n)}_v},\ v\in [0,t]\right\}$. On an even cruder level, we observe in the Fast arrivals case from \eqref{convergence_fast} that each entry of $Z^{(n)}(t)=(Z^{(n)}_1(t),...,Z^{(n)}_k(t))$ behaves roughly like $n^{\gamma-\alpha}t^{1-\alpha}$. The intuition behind this behaviour may be explained as follows. Within queue $j=1,...k$, there are approximately $\lambda n^{\gamma} t$ arrivals in the interval $[0,t]$, each arriving customer with service time distributed as $L^{(n)}_j$, so that we may very grossly consider that a customer is still present at time $t$ with probability $\mathbb{P} (L^{(n)}_j>t)=\mathbb{P} (L_j/n>t) $. Hence the number of customers in queue $j$ is approximately $$ Z^{(n)}_j(t)\approx \lambda n^{\gamma} t \times \mathbb{P} (L^{(j)}/n>t) = \lambda n^{\gamma} t \times\mathbb{P} (L^{(j)}>nt) \approx \lambda n^{\gamma} t \times \frac{1}{(nt)^\alpha}=\lambda n^{\gamma-\alpha}t^{1-\alpha} $$ which is the expected order of growth $n^{\gamma-\alpha}t^{1-\alpha}$. Of course, such approximations are very crude, however this enables us to justify the presence of the normalizing factor $n^{\gamma-\alpha}$ as well as the time dilated factor $t^{1-\alpha}$ in \eqref{convergence_fast}.
In the case when $ \gamma <\alpha$, proving the convergence in distribution of an adequate normalization of $Z^{(n)}(t)$ seems more difficult. The following result show that the two first moments of $Z^{(n)}(t)$ converge under respective normalization $n^{\alpha-\gamma}$ and $n^{(\alpha-\gamma)/2}$: \begin{theorem}[\bf Slow arrivals]\label{theo_slow_arrival} If $ \gamma <\alpha$ then the following convergences of the two joint moments hold as $n\to \infty$ \begin{eqnarray} n^{\alpha-\gamma} M_j^{(n)}(t)&\longrightarrow & \lambda e^{\lambda t (P-I)}\int_0^t \frac{1}{v^\alpha} e^{-\lambda v (P-I)} \Delta_j e^{\lambda v (P-I)} dv, \label{convergence_slow_M1}\\ n^{\alpha-\gamma} M_{jj}^{(n)}(t)&\longrightarrow & \lambda e^{\lambda t (P-I)}\int_0^t \frac{1}{v^\alpha} e^{-\lambda v (P-I)} \Delta_j^2 e^{\lambda v (P-I)} dv, \label{convergence_slow_M20}\\ n^{\alpha-\gamma} M_{jj'}^{(n)}(t)&\longrightarrow & 0 \quad j\neq j', \label{convergence_slow_M21} \end{eqnarray} for all $j$, $j'\neq j$, in $1,...,k$, $t\in [0,1]$, where we recall that $\Delta_j$ is defined in \eqref{Di}. \end{theorem} One interesting by-product of Theorem \ref{theo_regimes} is that it in particular gives some insight on the (non conditional) limiting distribution of $Z^{(n)}(t)$, with a limit in a somewhat simpler form. More precisely, the following corollary follows from the proofs of \eqref{convergence_fast} and \eqref{convergence_equilibrium}: \begin{cor}\label{rem:marginal} In the Fast arrivals case $\gamma >\alpha$, the following convergence holds \begin{equation}\label{remark_conv_distrib_simpler_fast} \frac{Z^{(n)}(t)}{n^{\gamma-\alpha}}\stackrel{\cal D}{\longrightarrow} \lambda \int_{0}^{t} \frac{{\cal Y}(v)}{v^\alpha}\ dv,\ n\to\infty, \quad t\in [0,1], \end{equation} where $\{{\cal Y}(t)=({\cal Y}_1(t),...,{\cal Y}_k(t)),\ t\in [0,1]\}$ is a (time homogeneous) stationary continuous time Markov chain on the state space ${\cal S}$, with infinitesimal generator matrix defined by \begin{equation}\label{generator_Y}
\lambda (\Delta_\pi^{-1} P' \Delta_\pi-I),\quad \Delta_\pi:=\mbox{\normalfont diag}(\pi(x),\ x\in {\cal S}). \end{equation} In the Equilibrium case $\gamma =\alpha$, one has \begin{equation}\label{remark_conv_distrib_simpler_equilibirum} Z^{(n)}(t)\stackrel{\cal D}{\longrightarrow}\left( \int_0^t {\cal Y}_j(v) \ \tilde{\nu}_j^\alpha(dv)\right)_{j=1,...,k},\ n\to\infty, \quad t\in [0,1]. \end{equation} Here $\{\tilde{\nu}_j^\alpha(t),\ t\in [0,1] \}$, $j=1,...,k$, are the inhomogeneous Poisson processes defined by $\tilde{\nu}_j^\alpha(t)=\nu_j^\alpha(t^{1-\alpha})$, $t\in [0,1]$, where $\{\nu_j^\alpha(t),\ t\in [0,1] \}$, $j=1,...,k$, are defined in Theorem \ref{theo_regimes}. \end{cor} As mentioned in Section \ref{sec:rescale}, \cite{MRRS02, GK03} introduced a notion of Fast and Slow growth similar to the Fast and Slow arrivals presented in Theorems \ref{theo_regimes} and \ref{theo_slow_arrival}, for a process of interest which is either a superposition of renewal processes with heavy tailed interarrivals or the cumulative input of an infinite source Poisson model with heavy tailed services. In those references, the process is shown to converge weakly or in finite dimensional distributions towards specific limit processes under appropriate scaling, see \cite[Theorem 1]{MRRS02} and \cite[Theorem 1]{GK03}. Here, the outline of the proof of Theorem \ref{theo_regimes} is the following: \begin{itemize} \item We will first expand the LT of the left hand side of \eqref{convergence_fast} and \eqref{convergence_equilibrium} as $n\to \infty$ and prove that the limit satisfies a particular ODE thanks to Proposition \ref{prop_Poisson_psi}, which will be referred as Steps 1 and 2 in the proofs in the forthcoming Sections \ref{sec:fast} and \ref{sec:equilibrium}. \item Then, we will identify this limit as the LT of the right hand side of \eqref{convergence_fast} and \eqref{convergence_equilibrium} thanks to a proper use of the Feynman-Kac or Campbell formula. This step will be referred as Step 3 in the proofs. \end{itemize} This is to be compared with the approach in \cite{BKMT14, MDT16}, where the authors derive ODEs for the limiting moment generating function and identify a gaussian limiting distribution for the normalized process.
The paper is organized in the following way. Section \ref{main_proofs} is dedicated to the proofs of the main results, with Subsections \ref{sec:fast}, \ref{sec:equilibrium} and \ref{sec:slow} giving the proofs of the convergences in distribution of Theorem \ref{theo_regimes} in fast arrivals and equilibrium cases, and of Theorem \ref{theo_slow_arrival} in the slow arrivals case. The proof of Corollary \ref{rem:marginal} is included in Subsections \ref{sec:fast}, for the convergence \eqref{remark_conv_distrib_simpler_fast}, and \ref{sec:equilibrium}, for the convergence \eqref{remark_conv_distrib_simpler_equilibirum}. As a concluding remark, we will discuss in Section \ref{sec:remark_compute} some computational aspect for the limiting distributions mentioned in those different regimes in Theorem \ref{theo_regimes} in the particular case when $\alpha$ is a rational number lying in $(0,1)$.
\section{Proofs of Theorems \ref{theo_regimes}, \ref{theo_slow_arrival} and Corollary \ref{rem:marginal}}\label{main_proofs}
\subsection{Preliminary results}\label{sec:preliminary} We will repeatedly use the following general lemma in the proofs: \begin{lemm}\label{lemma_convergence}
Let $\left(t\in [0,1]\mapsto A_n(t) \right)_{n\in\mathbb{N}}$ be a sequence of continuous functions with values in $\mathbb{R}^{{\cal S}\times {\cal S}}$, and let us assume that there exists some continuous function $t\in [0,1]\mapsto A(t)\in \mathbb{R}^{{\cal S}\times {\cal S}}$ such that $\int_0^1 || A_n(v)-A(v)|| dv \longrightarrow 0$ as $n\to\infty$ for any matrix norm $||.||$. Let $y\in \mathbb{R}^{{\cal S}\times {\cal S}}$ and $t\in [0,1]\mapsto Y_n(t)\in \mathbb{R}^{{\cal S}\times {\cal S}}$ be the solution to the following differential equation \begin{equation}\label{lemma_EDOn} \left\{ \begin{array}{rcl} \frac{d}{dt}Y_n(t) &=& A_n(t) Y_n(t),\quad t\in [0,1],\\ Y_n(0)&=& y\in \mathbb{R}^{{\cal S}\times {\cal S}} , \end{array} \right. \quad n\in \mathbb{N} . \end{equation} Then one has $Y_n(t)\longrightarrow Y(t)$ uniformly in $t\in[0,1]$, as $n\to \infty$, where $t\in [0,1]\mapsto Y(t)\in \mathbb{R}^{{\cal S}\times {\cal S}}$ is the solution to the following differential equation \begin{equation}\label{lemma_EDO} \left\{ \begin{array}{rcl} \frac{d}{dt}Y(t) &=& A(t) Y(t),\quad t\in [0,1],\\ Y(0)&=& y . \end{array} \right. \end{equation} \end{lemm} \begin{proof} We first observe that, because of continuity of $t\in [0,1]\mapsto A_n(t)$ and $t\in [0,1]\mapsto A(t)$, \eqref{lemma_EDOn} and \eqref{lemma_EDO} read in integral form \begin{equation}\label{equ_diff_Y} Y_n(t)= y + \int_0^t A_n(v) Y_n(v) dv,\quad Y(t)= y + \int_0^t A(v) Y(v) dv \end{equation}
for all $t\in [0,1]$. Since the norm $||.||$ may be arbitrary, we pick a submultiplicative one on the set of ${\cal S}\times {\cal S}$ matrices. \eqref{equ_diff_Y} implies the following inequality $$
||Y_n(t)|| \le ||y||+ \int_0^t ||A_n(v)||.|| Y_n(v)|| dv,\quad \forall t\in [0,1] . $$
Gronwall's lemma thus implies that $ ||Y_n(t)|| \le ||y|| \exp\left( \int_0^t ||A_n(v)|| dv \right)$ for all $t\in [0,1]$. Since by assumption $\int_0^1 || A_n(v)-A(v)|| dv \longrightarrow 0$ as $n\to\infty$, one has that $\left(\int_0^1 ||A_n(v)|| dv\right)_{n\in \ensuremath{\mathbb{N}}}$ is a bounded sequence. We deduce the following finiteness \begin{multline*}
M_Y:= \sup_{n\in\ensuremath{\mathbb{N}}}\sup_{t\in [0,1]}||Y_n(t)|| \le \sup_{n\in\ensuremath{\mathbb{N}}}\sup_{t\in [0,1]} ||y|| \exp\left( \int_0^t ||A_n(v)|| dv \right)\\
\le ||y|| \exp\left( \int_0^1 \sup_{n\in\ensuremath{\mathbb{N}}} ||A_n(v)|| dv \right) <+\infty . \end{multline*}
Let us then introduce $M_A:= \sup_{v\in [0,1]}||A(v)||$, which is a finite quantity. Then one obtains that \begin{multline*}
||Y_n(t)-Y(t)||\le \int_0^t ||A_n(v)-A(v)||. ||Y_n(v)|| dv + \int_0^t ||A(v)||.||Y_n(v)-Y(v)||dv\\
\le M_Y \int_0^t ||A_n(v)-A(v)|| dv + M_A \int_0^t ||Y_n(v)-Y(v)||dv,\quad \forall t\in[0,1]. \end{multline*} Gronwall's lemma thus implies that, for all $t\in [0,1]$, \begin{multline*}
||Y_n(t)-Y(t)||\le M_Y \left[ \int_0^t ||A_n(v)-A(v)|| dv\right].\ e^{M_A t}\\ \le M_Y \left[ \int_0^1 ||A_n(v)-A(v)|| dv \right].\ e^{M_A}\longrightarrow 0 \mbox{ as } n\to \infty. \end{multline*} Since the right hand side of the above inequality is independent from $t\in [0,1]$, this proves the uniform convergence result. \end{proof} We finish this subsection by stating the differential equation satisfied by the Laplace Transform $\psi^{(n)}(s,t)$ of $\left(Z^{(n)}(t),X^{(n)}_{N^{(n)}_t}\right)$ defined in \eqref{def_LT_n}, which will be the central object studied in Subsections \ref{sec:fast} and \ref{sec:equilibrium}. Thanks to equation\eqref{Poisson_ODE} with the new parameters $\lambda_n$, $P_n$ instead of $\lambda$ and $P$ (and remembering that $\lambda_n (P_n-I)=\lambda(P-I)$), this reads here \begin{equation}\label{Poisson_ODEn} \left\{ \begin{array}{rcl} \partial_t \psi^{(n)}(s,t) &=& [\lambda (P-I) + \lambda n^\gamma P_n(\tilde{\pi}_n(s,t)-I)]\psi^{(n)}(s,t),\quad t\ge 0,\\ \psi^{(n)}(s,0)&=& I, \end{array} \right. \end{equation} for all $s=(s_1,...,s_k)\in (-\infty,0]^k$. And, from \eqref{def_pi_Q_tilda}, using the expansion $\prod_{j=1}^k (a_j+1)=1+ \sum_{I\subset \{1,...,k\}} \prod_{\ell \in I} a_\ell$ for all real numbers $a_1,...,a_k$, we have the following expansion which will be useful later on: \begin{multline}\label{def_pi_n} \tilde{\pi}_n(s,t)-I=\mbox{diag} \left( \prod_{j=1}^k \left( (e^{s_j x_j}-1)\mathbb{P} \left[L_j^{(n)}>t\right]+1\right)-1, \ x=(x_1,...,x_k)\in {\cal S}\right)\\ = \mbox{diag} \left( \sum_{I\subset \{1,...,k\}} \prod_{\ell \in I}\left[(e^{s_\ell x_\ell}-1)\mathbb{P}\left[L_\ell^{(n)}>t\right]\right], \ x=(x_1,...,x_k)\in {\cal S}\right). \end{multline}
\subsection{Case $\gamma>\alpha$: Fast arriving customers}\label{sec:fast} We now proceed to show convergence \eqref{convergence_fast} in Theorem \ref{theo_regimes}. In the present case, it is sensible to guess that $Z^{(n)}(t)$ converges towards infinity as $n\to\infty$, hence it is natural to find a normalization such that a convergence towards a proper distribution occurs. We renormalize the queue content by dividing it by $n^{\gamma-\alpha}$, i.e. we are here interested in $\left(Z^{(n)}(t)/n^{\gamma-\alpha},X^{(n)}_{N^{(n)}_t} \right)$, of which Laplace transform is given by $\psi^{(n)}(s/n^{\gamma-\alpha},t)$, $s=(s_1,...,s_k)\in (-\infty,0]^k$. In order to avoid cumbersome notation, we introduce the quantity $$ \beta:= \frac{1}{1-\alpha}\in (1,+\infty). $$ We observe then that \begin{equation}\label{time_transfo} t\in[0,1]\mapsto t^\beta \in [0,1] \end{equation} is a one to one mapping. Hence, studying the limiting distribution of $\left(Z^{(n)}(t)/n^{\gamma-\alpha},X^{(n)}_{N^{(n)}_t} \right)$ for all $t\in [0,1]$ amounts to study the limiting distribution of \begin{equation}\label{fast_renormalized_beta} \left(Z^{(n)}(t^\beta)/n^{\gamma-\alpha},X^{(n)}_{N^{(n)}_{t^\beta }} \right) \end{equation} for all $t\in [0,1]$, then changing variable $t:=t^{1/\beta}$. The time transformation \eqref{time_transfo} may at this point look artificial, but this is a key step which will later on enable us to use the convergence result in Lemma \ref{lemma_convergence}.The LT of \eqref{fast_renormalized_beta} is given by $$\chi^{(n)}(s,t):= \psi^{(n)}(s/n^{\gamma-\alpha},t^\beta),\quad t\in [0,1].$$ From \eqref{Poisson_ODEn}, $\chi^{(n)}(s,t)$ satisfies \begin{equation}\label{Poisson_ODEn_fast} \left\{ \begin{array}{rcl} \partial_t \chi^{(n)}(s,t) &=& \beta t^{\beta-1}[\lambda (P-I) + \lambda n^\gamma P_n(\tilde{\pi}_n(s/n^{\gamma-\alpha},t^\beta)-I)] \chi^{(n)}(s,t),\quad t\in [0,1],\\ \chi^{(n)}(s,0)&=& I. \end{array} \right. \end{equation} The starting point for proving \eqref{convergence_fast} is the following: we will set to prove that \begin{equation}\label{def_An_fast} A_n(s,t)=\beta t^{\beta-1}[\lambda (P-I) + \lambda n^\gamma P_n(\tilde{\pi}_n(s/n^{\gamma-\alpha},t^\beta)-I)] \end{equation} converges to some limit $A(s,t)$ as $n\to\infty$, use Lemma \ref{lemma_convergence}, then identify the limit $\chi(s,t):=\lim_{n\to\infty}\chi^{(n)}(s,t)$ as the Laplace Transform of a known distribution.\\ {\bf Step 1: Determining $A(s,t)$.} This step is dedicated to finding the limit function $t\in[0,1]\mapsto A(s,t)$ of \eqref{def_An_fast}. Recalling that $\lim_{n\to \infty}P_n=I$, studying the limit of \eqref{def_An_fast} amounts to studying that of $\beta t^{\beta-1}\lambda n^\gamma(\tilde{\pi}_n(s/n^{\gamma-\alpha},t^\beta)-I)$ as $n\to\infty$. In view of \eqref{def_pi_n}, the $x$th diagonal element of this latter term is \begin{equation}\label{term1_fast} \beta t^{\beta-1}\lambda n^\gamma \sum_{I\subset \{1,...,k\}} \prod_{\ell \in I}\left[(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)\mathbb{P}\left[L_\ell^{(n)}>t^\beta\right]\right] \end{equation} of which we proceed to find the limit as $n\to \infty$. In order to study its convergence, we are going to isolate the terms in the sum \eqref{term1_fast} for which $\mbox{Card}(I)=1$ and $\mbox{Card}(I)\ge 2$, and show that the former admit a non zero limit and the latter tend to $0$. We thus write \eqref{term1_fast} as \begin{eqnarray} && \beta t^{\beta-1}\lambda n^\gamma \sum_{I\subset \{1,...,k\}} \prod_{\ell \in I}\left[(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)\mathbb{P}\left[L_\ell^{(n)}>t^\beta\right]\right]=J_n^1(s,t) + J_n^2(s,t),\quad \mbox{where}\nonumber\\ &&J_n^1(s,t)=J_n^1(s,t,x):= \beta t^{\beta-1}\lambda n^\gamma \sum_{\ell =1}^k (e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)\mathbb{P}\left[L_\ell^{(n)}>t^\beta\right],\label{def_J1n}\\ &&J_n^2(s,t)=J_n^2(s,t,x):= \beta t^{\beta-1}\lambda n^\gamma \sum_{\mbox{\tiny Card}(I)\ge 2} \ \prod_{\ell \in I}\left[(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)\mathbb{P}\left[L_\ell^{(n)}>t^\beta\right]\right].\label{def_J2n} \end{eqnarray} Both terms $J_n^1(s,t)$ and $J_n^2(s,t)$ are studied separately. Using that $e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1\sim s_\ell x_\ell/n^{\gamma-\alpha}$ as $n\to\infty$ and \begin{equation}\label{basic_fat_tail} \mathbb{P}\left[L_\ell^{(n)}>t^\beta\right]=\mathbb{P}\left[L_\ell>nt^\beta\right]\sim \frac{1}{n^\alpha t^{\beta \alpha}} \end{equation} when $t>0$, and since $\beta\alpha=\alpha/(1-\alpha)=\beta-1$, we arrive at $$ J_n^1(s,t)\sim \beta \lambda \sum_{\ell =1}^k t^{\beta-1} n^\gamma \frac{s_\ell x_\ell}{n^{\gamma-\alpha}} \frac{1}{n^\alpha t^{\beta \alpha}}\sim \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell,\quad n\to \infty, $$ when $t>0$, and is $0$ when $t=0$.
Next we show that $J_n^2(s,t)$ tends to $0$ by showing that each term on the right hand side of \eqref{def_J2n} tend to $0$. So, if $I\subset \{1,...,k\}$ is such that $I=\{\ell_1,\ell_2\}$, i.e. $\mbox{Card}(I) =2$, then \begin{multline}\label{J2n_tends_zero}
\left| \beta t^{\beta-1}\lambda n^\gamma \prod_{\ell \in I}\left[(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)\mathbb{P}\left[L_\ell^{(n)}>t^\beta\right]\right]\right|= \beta t^{\beta-1}\lambda n^\gamma \left|e^{s_{\ell_1} x_{\ell_1}/n^{\gamma-\alpha}}-1\right|\mathbb{P}\left[L_{\ell_1}>nt^\beta\right]\\
. \left|e^{s_{\ell_2} x_{\ell_2}/n^{\gamma-\alpha}}-1\right|\mathbb{P}\left[L_{\ell_2}>nt^\beta\right]\le \beta t^{\beta-1}\lambda n^\gamma |s_{\ell_1} x_{\ell_1}|.|s_{\ell_2} x_{\ell_2}| \frac{1}{n^{2(\gamma-\alpha)}}\mathbb{P}\left[L_{\ell_1}>nt^\beta\right]\\
= \beta t^{\beta-1}\lambda |s_{\ell_1} x_{\ell_1}|.|s_{\ell_2} x_{\ell_2}| \frac{1}{n^{\gamma-\alpha}} n^\alpha \mathbb{P}\left[L_{\ell_1}>nt^\beta\right], \end{multline}
where we used the inequality $|e^u-1|\le |u|$ for $u\le 0$ and $\mathbb{P}\left[L_{\ell_2}>nt^\beta\right]\le 1$. Thanks to \eqref{basic_fat_tail}, the right hand side of \eqref{J2n_tends_zero} thus tends to zero when $t\in (0,1]$. The case $\mbox{Card}(I) >2$ is dealt with similarly. Finally, all terms on the right hand side of \eqref{def_J2n} tend to $0$ as $n\to\infty$, i.e. $\lim_{n\to\infty}J_n^2(s,t)=0$ for all $t\in (0,1]$. When $t=0$ then $J_n^2(s,t)=0$, so that the limit holds for all $t\in [0,1]$.\\ Hence we have that \eqref{term1_fast} tends to $\lim_{n\to\infty}J_n^1(s,t)+\lim_{n\to\infty}J_n^2(s,t)$, i.e. to $ \beta \lambda\sum_{\ell =1}^k s_\ell x_\ell$ when $t\in (0,1]$, and to $0$ when $t=0$. The candidate for the continuous function $A(s,t)$ is then \begin{equation}\label{candidate_Ast} t\in [0,1]\mapsto A(s,t):= \beta t^{\beta-1}\lambda (P-I) + \beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell \end{equation} where we recall from \eqref{Di} that $\Delta_\ell=\mbox{diag} \left[ x_\ell ,\ x=(x_1,\ldots,x_k)\in{\cal S}\right]$. This is where the time transformation \eqref{time_transfo} described previously is important, as without it it would not have been possible to exhibit the limit \eqref{candidate_Ast} for $A_n(s,t)$. Note that the limit when $t=0$ for $A_n(s,t)$ in \eqref{def_An_fast} differs from $A(s,0)=\beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell$, as indeed a closer look from the study of the limits of $J_n^1(s,t)$ and $J_n^2(s,t)$ would yield that $\lim_{n\to \infty}A_n(s,0)$ should rather be the $0$ matrix. This is due to the fact that the limit $t\in [0,1]\mapsto A(s,t)$ in Lemma \ref{lemma_convergence} has to be {\it continuous} so that the lemma holds.\\
{\bf Step 2: Determining $\chi(s,t)=\lim_{n\to}\chi_n(s,t)$.} We now need to prove that $\int_0^1 || A_n(s,v)-A(s,v)|| dv \longrightarrow 0$ as $n\to\infty$ in order to apply Lemma \ref{lemma_convergence}. Thanks to \eqref{def_An_fast} and \eqref{candidate_Ast}, and by the definitions \eqref{def_J1n} and \eqref{def_J2n} of $J^1_n(s,t,x)$ and $J^1_n(s,t,x)$, we observe that $A_n(s,t)$ can be decomposed as \begin{multline*} A_n(s,t)=A(s,t) + P_n\ \mbox{diag}\left(J_n^1(s,t,x) - \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell,\ x\in {\cal S}\right) \\ + P_n\ \mbox{diag}\left(J_n^2(s,t,x),\ x\in {\cal S}\right) + (P_n-I)\beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell, \quad t\in[0,1]. \end{multline*}
Hence, since $\lim_{n\to \infty}P_n=I$, proving $\lim_{n\to\infty}\int_0^1 || A_n(s,v)-A(s,v)|| dv = 0$ amounts to proving that \begin{equation}\label{to_prove_limits_J12} \begin{array}{rcl}
\displaystyle\int_0^1 \left|J_n^1(s,v,x) - \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell\right| dv & \longrightarrow & 0, \ \mbox{and}\\
\displaystyle\int_0^1 |J_n^2(s,v,x)|dv=\int_0^1 J_n^2(s,v,x)dv & \longrightarrow & 0, \end{array} \end{equation}
as $n\to\infty$, for each fixed $x\in {\cal S}$. Let us first focus on $\int_0^1 \left|J_n^1(s,v,x) - \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell\right| dv$. We have \begin{eqnarray}
\int_0^1 \left|J_n^1(s,v,x) - \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell\right| dv&\le & \sum_{\ell=1}^k (I^1_n(\ell) + I^2_n(\ell)), \mbox{ where, for all } \ell =1,...,k, \label{term3_fast}\\
I^1_n(\ell) &:=& \int_0^1 \lambda \beta v^{\beta-1}\left| n^\gamma(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)- n^\alpha s_\ell x_\ell\right| \mathbb{P}\left[L_\ell^{(n)}>v^\beta\right]dv \nonumber\\
I^2_n(\ell) &:=& |s_\ell x_\ell| \int_0^1 \lambda \left|\beta v^{\beta-1} n^\alpha \mathbb{P}\left[L_\ell^{(n)}>v^\beta\right]-\beta \right|dv .\nonumber
\end{eqnarray}
Expanding the exponential function, one has that $|e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1-s_\ell x_\ell/n^{\gamma-\alpha}|\le M_\ell/n^{2(\gamma-\alpha)}$ where $M_\ell>0$ only depends on $s_\ell$ and $x_\ell$. Thus, one deduces the following upper bounds for $I^1_n(\ell)$, $\ell =1,...,k$: \begin{eqnarray}
I^1_n(\ell) &=& \int_0^1 \lambda \beta v^{\beta-1}\left| n^\gamma(e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1)- n^\alpha s_\ell x_\ell\right| \mathbb{P}\left[L_\ell> n v^\beta\right]dv\nonumber\\
&\le & n^\gamma \frac{M_\ell}{n^{2(\gamma-\alpha)}} \lambda \int_0^1 \beta v^{\beta-1} \mathbb{P}\left[L_\ell> n v^\beta\right]dv = \frac{M_\ell}{n^{\gamma-\alpha}} \beta \lambda \int_0^1 n^\alpha v^{\beta-1} \mathbb{P}\left[L_\ell> n v^\beta\right]dv \nonumber\\
&=& \frac{M_\ell}{n^{\gamma-\alpha}} \beta \lambda \int_0^1 (nv^{\beta})^\alpha \ \mathbb{P}\left[L_\ell> n v^\beta\right]dv,\label{term4_fast} \end{eqnarray} the last equality holding because $\beta-1=\beta \alpha$ implies that the integrand verifies $n^\alpha v^{\beta-1} =(nv^{\beta})^\alpha$. A consequence of the fact that $L_\ell$ is fat tailed with index $\alpha$ is that $\sup_{u\ge 0}u^\alpha \mathbb{P}(L_\ell>u)<+\infty$, from which one deduces immediately that \begin{equation}\label{bound_sup_L_l} \sup_{j\in\mathbb{N},\ v \in [0,1]} (jv^{\beta})^\alpha \ \mathbb{P}\left[L_\ell> j v^\beta\right]<+\infty \end{equation} (note that those two latter suprema are in fact equal). One then gets from \eqref{term4_fast} that \begin{equation}\label{term5_fast} I^1_n(\ell)\le \frac{M_\ell}{n^{\gamma-\alpha}} \beta \lambda \left[\sup_{j\in\mathbb{N},\ v \in [0,1]} (jv^{\beta})^\alpha \ \mathbb{P}\left[L_\ell> j v^\beta\right]\right]\longrightarrow 0,\quad n\to\infty . \end{equation} We now turn to $I^2_n(\ell)$, $\ell =1,...,k$. Using again $\beta-1=\beta \alpha$, one may write in the integrand of $I^2_n(\ell)$ that $v^{\beta-1} n^\alpha =(nv^{\beta})^\alpha$: hence
$$ I^2_n(\ell) = |s_\ell x_\ell| \int_0^1 \lambda \left|\beta (nv^{\beta})^\alpha \mathbb{P}\left[L_\ell>nv^\beta\right]-\beta \right|dv .$$ Since $L_\ell$ is fat tailed with index $\alpha$, estimates similar to the ones leading to the upper bound \eqref{term5_fast} for $I_n^1(\ell)$ yield that $$
\sup_{n\in\mathbb{N},\ v \in [0,1]}\left|\beta (nv^{\beta})^\alpha \mathbb{P}\left[L_\ell>nv^\beta\right]-\beta \right|<+\infty . $$
Furthermore, again because $L_\ell$ is fat tailed, one has $\mathbb{P}\left[L_\ell>nv^\beta\right] \sim 1/(nv^{\beta})^\alpha$ as $n\to \infty$ when $v>0$. Hence $\left|\beta (nv^{\beta})^\alpha \mathbb{P}\left[L_\ell>nv^\beta\right]-\beta \right|\longrightarrow 0 $ as $n\to \infty$ when $v\in (0,1]$, and is equal to $\beta$ when $v=0$. The dominated convergence theorem thus implies that \begin{equation}\label{term6_fast} I^2_n(\ell)\longrightarrow 0, \quad n\to\infty . \end{equation}
Gathering \eqref{term3_fast}, \eqref{term5_fast} and \eqref{term6_fast}, we thus deduce finally that $\int_0^1 \left|J_n^1(s,v,x) - \beta \lambda \sum_{\ell =1}^k s_\ell x_\ell\right| dv$ tends to $0$ as $n\to\infty$ for each $x\in{\cal S}$. \\ We now prove that $\int_0^1 J_n^2(s,v,x)dv\longrightarrow 0$ as $n\to \infty$. In view of the definition \eqref{def_J2n}, it suffices to prove that \begin{equation}\label{to_prove_J2}
\int_0^1\beta v^{\beta-1}\lambda n^\gamma \ \prod_{\ell \in I}\left[|e^{s_\ell x_\ell/n^{\gamma-\alpha}}-1|\ \mathbb{P}\left[L_\ell^{(n)}>v^\beta\right]\right]dv \end{equation}
tends to $0$ as $n\to\infty$ for $I\subset \{1,...,k\}$ such that $\mbox{ Card}(I)\ge 2$. Let us prove the convergence for $\mbox{ Card}(I)= 2$, i.e. for $I=\{\ell_1,\ell_2\}$ for some $\ell_1\neq\ell_2$ in $1,...,k$, the case $\mbox{ Card}(I)> 2$ being dealt with similarly. By the basic inequality $|e^u-1|\le |u|$ for $u\le 0$ we deduce that $|e^{s_{\ell_i} x_{\ell_i}/n^{\gamma-\alpha}}-1|\le |s_{\ell_i} x_{\ell_i}|/n^{\gamma-\alpha}$, $i=1,2$. Since $\mathbb{P}\left[L_{\ell_1}^{(n)}>v^\beta\right]\le 1$ for all $v\in [0,1]$, we then deduce that \eqref{to_prove_J2} is upper bounded by
$$\frac{|s_{\ell_1} x_{\ell_1}|}{n^{\gamma-\alpha}} |s_{\ell_2} x_{\ell_2}| \int_0^1 \beta v^{\beta-1}\lambda n^\alpha \mathbb{P}\left[L_{\ell_2}^{(n)}>v^\beta\right] dv. $$ As $v^{\beta-1} n^\alpha =(nv^{\beta})^\alpha$, and thanks to \eqref{bound_sup_L_l}, the latter quantity is in turn written then bounded as follows \begin{multline*}
\frac{|s_{\ell_1} x_{\ell_1}|}{n^{\gamma-\alpha}} |s_{\ell_2} x_{\ell_2}| \int_0^1 \beta \lambda (nv^{\beta})^\alpha \mathbb{P}\left[L_{\ell_2}^{(n)}>v^\beta\right] dv
= \frac{|s_{\ell_1} x_{\ell_1}|}{n^{\gamma-\alpha}} |s_{\ell_2} x_{\ell_2}| \int_0^1 \beta \lambda (nv^{\beta})^\alpha \mathbb{P}\left[L_{\ell_2}>nv^\beta\right] dv\\
\le \frac{|s_{\ell_1} x_{\ell_1}|}{n^{\gamma-\alpha}} |s_{\ell_2} x_{\ell_2}| \beta \lambda \left[\sup_{j\in\mathbb{N},\ v \in [0,1]} (jv^{\beta})^\alpha \ \mathbb{P}\left[L_{\ell_2}> j v^\beta\right] \right] \longrightarrow 0,\quad n\to\infty, \end{multline*} proving that \eqref{to_prove_J2} tends to $0$ as $n\to\infty$ when $I=\{\ell_1,\ell_2\}$.\\
Hence we just proved \eqref{to_prove_limits_J12}, which implies $\int_0^1 || A_n(s,v)-A(s,v)|| dv \longrightarrow 0$. We may then use Lemma \ref{lemma_convergence} to deduce that $\chi^{(n)}(s,t)$ convgerges to $\chi(s,t)$ which satisfies \begin{equation}\label{Poisson_ODE_fast} \left\{ \begin{array}{rcl} \partial_t \chi(s,t) &=& A(s,t)\chi(s,t)=\left[\beta t^{\beta-1}\lambda (P-I) + \beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell \right]\chi(s,t) ,\quad t\in [0,1],\\ \chi(s,0)&=& I. \end{array} \right. \end{equation} {\bf Step 3: Identifying the limit in distribution.} Let us note that \eqref{Poisson_ODE_fast} does not admit an explicit expression. However, since we purposely chose $s=(s_1,...,s_k)$ with $s_j\le 0$, $j=1,...,k$, one has that $\sum_{j=1}^k s_j \Delta_j = \sum_{j=1}^k s_j\ \mbox{diag}(x_j,\ x\in {\cal S})$ is a diagonal matrix with non positive entries. Let $\Delta_\pi:=\mbox{\normalfont diag}(\pi(x),\ x\in {\cal S})$ and let us introduce the matrix $P^{(r)}$ defined by $P^{(r)}=\Delta_\pi^{-1}P' \Delta_\pi\iff P=\Delta_\pi^{-1}P^{(r)'} \Delta_\pi$. It is standard that $P^{(r)}$ is the transition matrix of the reversed version of the stationary Markov chain $\{X_i,\ i\in\mathbb{N} \}$ with distribution $\pi$, and that $\beta t^{\beta-1}\lambda (P^{(r)}-I)$ is the infinitesimal generator matrix of an inhomogeneous Markov process \begin{equation}\label{def_U} \{U(t)=(U_j(t))_{j=1,...,k}\in {\cal S},\ t\in [0,1]\} \end{equation} with values in ${\cal S}$, and initial distribution $U(0)\sim\pi$.
In fact, it turns out that the conditional distribution of $U(t)$ given $U(0)$ is given by $\left[\mathbb{P}(U(t)=y|\ U(0)=x)\right]_{(x,y)\in {\cal S}}=\exp(t^\beta \lambda(P^{(r)}-I))$, which results in $U(t)\sim\pi$ for all $t\in[0,1]$, i.e. that $\{U(t),\ t\in [0,1]\}$ is stationary. Since $\sum_{j=1}^k s_j \Delta_j$ is diagonal, one checks easily that $A(s,t)=\Delta_\pi^{-1} \left[\beta t^{\beta-1}\lambda ({P^{(r)}}'-I)+\sum_{j=1}^k s_j \Delta_j\right]\Delta_\pi$ and that $Y(t)=Y(s,t):=\Delta_\pi^{-1} \chi(s,t)'\Delta_\pi $ satisfies the differential equation $$ \left\{ \begin{array}{rcl} \partial_t Y(t) &=& Y(t)\left[\beta t^{\beta-1}\lambda ({P^{(r)}}-I) + \beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell \right] ,\quad t\in [0,1],\\ Y(0)&=& I. \end{array} \right. $$
The Feynman-Kac formula ensures that one has the representation
$$Y(t)=Y(s,t)=\left[ \mathbb{E}\left[\left. {\bf 1}_{[U(t)=y]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_0^t U_j(v) dv \right)\right| U(0)=x\right]\right]_{(x,y)\in {\cal S}^2},\quad \forall t\in [0,1],$$ see \cite[Chapter III, 19, p.272]{Rogers_Williams00} for the general theorem on this formula, or \cite[Section 5, Expression (5.2) and differential equation (5.3)]{BH96} for the particular case of a finite Markov chain, adapted here to an inhomogeneous Markov process. Also, the reversed process $\{U(1-t),\ t\in [0,1]\}$ admits ${\Delta_\pi}^{-1}\beta (1-t)^{\beta-1}\lambda ({P^{(r)}}'-I)\Delta_\pi= \beta (1-t)^{\beta-1}\lambda (P-I)$ as infinitesimal generator matrix, which is the generator of the process $\{{\cal X}^\alpha (t)=({\cal X}_1^\alpha (t),...,{\cal X}_k^\alpha (t))\in {\cal S},\ t\in[0,1] \}$ introduced in the statement of Theorem \ref{theo_regimes}, so that $\{{\cal X}^\alpha (t),\ t\in[0,1] \}\stackrel{\cal D}{=}\{U(1-t),\ t\in [0,1]\}$ pathwise. Hence, one obtains for all $x$ and $y$ in ${\cal S}$ that \begin{eqnarray}
&&\mathbb{E}\left[\left. {\bf 1}_{[U(t)=y]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_0^t U_j(v) dv \right)\right| U(0)=x\right]\nonumber\\
&=& \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1-t)=y]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_{1-t}^1 {\cal X}^\alpha_j (v) dv \right)\right|{\cal X}^\alpha (1)=x\right]\nonumber\\
&=& \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1)=x]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_{1-t}^1 {\cal X}^\alpha_j (v) dv \right)\right| {\cal X}^\alpha (1-t)=y\right] \frac{\pi(y)}{\pi(x)},\label{relation_U_chi}
\end{eqnarray} the last line coming from the fact that $U(0)$, $U(t)$, $ {\cal X}^\alpha (1-t)$ and ${\cal X}^\alpha (1)$ all have same distribution $\pi$. Switching the role of $x$ and $y$ in the above results in the following relationship: \begin{eqnarray*}
&&\left[ \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1)=y]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_{1-t}^1 {\cal X}^\alpha_j (v) dv \right)\right| {\cal X}^\alpha (1-t)=x\right] \right]_{(x,y)\in {\cal S}^2}\\
&=& \left[ \mathbb{E}\left[\left. {\bf 1}_{[U(t)=x]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_0^t U_j(v) dv \right)\right| U(0)=y\right] \frac{\pi(y)}{\pi(x)}\right]_{(x,y)\in {\cal S}^2}\\ &=& \Delta_\pi^{-1} Y(t)'\Delta_\pi=\chi(s,t). \end{eqnarray*} Since we just proved that $\chi^{(n)}(s,t):= \psi^{(n)}(s/n^{\gamma-\alpha},t^\beta)$ converges as $n\to \infty$ towards $\chi(s,t)$, expressed above, for all $s=(s_1,...,s_k)\in (-\infty, 0]^k$, and identifying Laplace transforms, we obtained in conclusion that \begin{equation}\label{conv_fast_final}
{\cal D}\left(\left. \left(Z^{(n)}(t^\beta)/n^{\gamma-\alpha},X^{(n)}_{N^{(n)}_{t^\beta }} \right)\right|\ X^{(n)}_0 \right)\longrightarrow {\cal D}\left( \left. \left( \beta\lambda \int_{1-t}^{1} {\cal X}^\alpha(v)\ dv,\ {\cal X}^\alpha (1)\right)
\right|\ {\cal X}^\alpha(1-t) \right) \end{equation} as $n\to\infty$ for all $t\in[0,1]$. Changing $t$ into $t^{1/\beta}$ yields \eqref{convergence_fast}.\\ {\bf Proof of the convergence \eqref{remark_conv_distrib_simpler_fast} in Corollary \ref{rem:marginal}. }With the previous definitions of processes $\{U(t),\ t\in [0,1]\}$ in \eqref{def_U} and $\{{\cal X}^\alpha (t),\ t\in[0,1] \}$, \eqref{relation_U_chi} implies the following matrix equality \begin{multline}\label{relation_U_chi_matrix}
\left[ \mathbb{E}\left[\left. {\bf 1}_{[U(t)=y]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_0^t U_j(v) dv \right)\right| U(0)=x\right] \right]_{(x,y)\in {\cal S}^2}\\
= \left[ \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1)=x]}\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_{1-t}^1 {\cal X}^\alpha_j (v) dv \right)\right| {\cal X}^\alpha (1-t)=y\right] \frac{\pi(y)}{\pi(x)}\right]_{(x,y)\in {\cal S}^2} \end{multline}
Left-multiplying and right-multiplying \eqref{relation_U_chi_matrix} respectively by the row vector $(\pi(x))_{x\in {\cal S}}$ and the column vector ${\bf 1}$ results in in the following equality of LT \begin{multline}\label{LT_reverse_fast} \mathbb{E}\left[ \exp\left(\sum_{j=1}^k s_j \beta \lambda\int_0^t U_j(v) dv \right) \right]\\ = \mathbb{E}\left[\exp\left(\sum_{j=1}^k s_j \beta \lambda\int_{1-t}^1 {\cal X}^\alpha_j (v) dv \right)\right],\quad s=(s_1,...,s_k)\in (-\infty, 0]^k, \end{multline} which, combined with \eqref{conv_fast_final}, yields the convergence $\frac{Z^{(n)}\left(t^\beta\right)}{n^{\gamma-\alpha}}\stackrel{\cal D}{\longrightarrow} \beta\lambda \int_{0}^{t} U(v)\ dv$ as $n\to \infty$. Changing $t$ into $t^{1/\beta}$ and performing the change of variable $ v:=v^{1/\beta}=v^{1-\alpha} $, we obtain \begin{equation}\label{conv_distribution_reversed_fast} \frac{Z^{(n)}\left(t\right)}{n^{\gamma-\alpha}}\stackrel{\cal D}{\longrightarrow} \lambda \int_{0}^{t} \frac{U(v^{1-\alpha})}{v^\alpha}\ dv,\ n\to\infty, \quad t\in [0,1]. \end{equation} Since the ${\cal S}$ valued Markov process $\{U(t),\ t\in [0,1]\} $ admits $\beta t^{\beta-1}\lambda (P^{(r)}-I)$ as the infinitesimal generator matrix, the time changed Markov process $\{{\cal Y}(t):=U(t^{1-\alpha})=U(t^{1/\beta}),\ t\in [0,1]\} $ admits \eqref{generator_Y} as generator, so that \eqref{remark_conv_distrib_simpler_fast} follows from \eqref{conv_distribution_reversed_fast}.
\subsection{Equilibrium case $\gamma=\alpha$}\label{sec:equilibrium} We now proceed to show convergence \eqref{convergence_equilibrium} in Theorem \ref{theo_regimes}. Intuitively, we are in the critical case where customers should arrive just fast enough such that the queue at time $t$ converges as $n\to\infty$. We are here interested in the behaviour of ${\cal D}\left(
\left. \left(Z^{(n)}(t),X^{(n)}_{N^{(n)}_t} \right) \right|\ X^{(n)}_0 \right)$ as $n\to\infty$ when $t\in[0,1]$ is fixed. As in Section \ref{sec:fast}, we first consider $t^\beta$ instead of $t$ and let $$\chi^{(n)}(s,t):= \psi^{(n)}(s,t^\beta),\quad t\in [0,1],$$ the corresponding Laplace transform, where $s=(s_1,...,s_k)\in (-\infty,0]^k$. $t\in [0,1]\mapsto \chi^{(n)}(s,t)$ then satisfies, thanks to \eqref{Poisson_ODEn}, the following differential equation \begin{equation}\label{Poisson_ODEn_equilibrium} \left\{ \begin{array}{rcl} \partial_t \chi^{(n)}(s,t) &=& \beta t^{\beta-1}[\lambda (P-I) + \lambda n^\gamma P_n(\tilde{\pi}_n(s,t^\beta)-I)] \chi^{(n)}(s,t),\quad t\in [0,1],\\ \chi^{(n)}(s,0)&=& I. \end{array} \right. \end{equation} The present case has the same roadmap as Subsection \ref{sec:fast}: we will study the behaviour as $n\to\infty$ of $\lambda n^\gamma(\tilde{\pi}_n(s,t^\beta))-I)$ in order to obtain a limit as $n\to\infty$ of \begin{equation}\label{def_An_equilibirum} A_n(s,t)=\beta t^{\beta-1}[\lambda (P-I) + \lambda n^\gamma P_n(\tilde{\pi}_n(s,t^\beta))-I)] \end{equation} then getting a limiting matrix differential equation with solution $\chi(s,t)=\lim_{n\to\infty} \chi^{(n)}(s,t)$. Then we will identify $\chi(s,t)$ as the Laplace transform of a (conditional) distribution, yielding \eqref{convergence_equilibrium}.\\ {\bf Step 1: Determining $A(s,t)=\lim_{n\to \infty }A_n(s,t)$.} We recall that the $(x,x)$th diagonal element of $\lambda n^\gamma(\tilde{\pi}_n(s,t^\beta))-I)$ is (from \eqref{def_pi_n}) $\sum_{I\subset \{1,...,k\}} \prod_{\ell \in I}\left[(e^{s_\ell x_\ell}-1)\mathbb{P}\left[L_\ell>nt\right]\right]$, which we decompose as in Section \ref{sec:fast} as $K_n^1(s,t)+K_n^2(s,t)$ with \begin{eqnarray}
K_n^1(s,t)&=&K_n^1(s,t,x):= \beta t^{\beta-1}\lambda n^\gamma \sum_{\ell =1}^k (e^{s_\ell x_\ell}-1)\mathbb{P}\left[L_\ell >n t^\beta\right],\label{def_K1n}\\ K_n^2(s,t)&=&K_n^2(s,t,x):= \beta t^{\beta-1}\lambda n^\gamma \sum_{\mbox{\tiny Card}(I)\ge 2} \ \prod_{\ell \in I}\left[(e^{s_\ell x_\ell}-1)\mathbb{P}\left[L_\ell >nt^\beta\right]\right].\label{def_K2n} \end{eqnarray} The important point here is that, throughout this subsection, we have $\gamma=\alpha$ in the expressions \eqref{def_An_equilibirum}, \eqref{def_K1n} and \eqref{def_K2n}, which will impact on the convergences and limiting results we are going to prove. Using that $\mathbb{P}\left[L_\ell >n t^\beta\right]\sim \frac{1}{n^\alpha}\frac{1}{t^{\alpha\beta}}$, $n\to\infty$, when $t>0$, and since $\alpha\beta=\beta-1$, and $\gamma=\alpha$, one here finds that $$ K_n^1(s,t)=K_n^1(s,t,x)\longrightarrow \left\{ \begin{array}{cl} \beta\lambda \sum_{\ell =1}^k (e^{s_\ell x_\ell}-1), & t>0,\\ 0, & t=0, \end{array}\right.
\quad n\to \infty . $$ As to $K_n^2(s,t)$, one proves easily that it tends to $0$ as $n\to\infty$ for all $t\in [0,1]$, as the sum in \eqref{def_K2n} is over $\mbox{Card}(I)\ge 2$, and using the fat tailed property of the service times. The candidate for the continuous function is thus \begin{equation}\label{candidate_Ast_equilibrium} t\in [0,1]\mapsto A(s,t):= \beta t^{\beta-1}\lambda (P-I) + \beta \lambda \sum_{\ell =1}^k \mbox{diag}(e^{s_\ell x_\ell}-1,\ x=(x_1,...,x_k)\in {\cal S}). \end{equation}
{\bf Step 2: Determining $\chi(s,t)=\lim_{n\to}\chi_n(s,t)$.} We now wish to apply Lemma \ref{lemma_convergence} and prove that $\int_0^1 || A_n(s,v)-A(s,v)|| dv \longrightarrow 0$ where $A_n(s,t)$ and $A(s,t)$ are defined in \eqref{def_An_equilibirum} and \eqref{candidate_Ast_equilibrium}. The method is very similar as to proving \eqref{to_prove_limits_J12} in Step 2 of Section \ref{sec:fast}, as this is equivalent to proving for all $x\in {\cal S}$ that \begin{eqnarray}
\int_0^1 \left| K_n^1(s,v,x)- \beta\lambda \sum_{\ell =1}^k (e^{s_\ell x_\ell}-1)\right| dv &\longrightarrow & 0,\label{limits_K1n}\\ \int_0^1 K_n^2(s,v,x)dv &\longrightarrow & 0 \label{limits_K2n} \end{eqnarray} as $n\to\infty$. In view of the expression \eqref{def_K2n} of $K^2_n(s,t)$, \eqref{limits_K2n} is proved the same way as for proving that $\lim_{n\to\infty}\int_0^1 J_n^2(s,v,x)dv=0$ in Step 2 of Section \ref{sec:fast}. More precisely, it suffices from \eqref{def_K2n} to prove that \begin{equation}\label{proof_limit_K2n} \lim_{n\to \infty} \int_0^1 \beta t^{\beta-1}\lambda n^\alpha \prod_{\ell \in I} \mathbb{P}\left[L_\ell >nt^\beta\right] dt =0 \end{equation} for all $I\subset \{1,...,k\}$, $\mbox{Card}(I)\ge 2$. We prove it for $I=\{\ell_1,\ell_2\}$, $\ell_1\neq \ell_2$, the proof for $\mbox{Card}(I)> 2$ being very similar. The trick is again to use that $v^{\beta-1} n^\alpha =(nv^{\beta})^\alpha$ as well as the previously established upper bound \eqref{bound_sup_L_l}, resulting in \begin{multline*} \int_0^1 \beta t^{\beta-1}\lambda n^\alpha \mathbb{P}\left[L_{\ell_1} >nt^\beta\right] \mathbb{P}\left[L_{\ell_2} >nt^\beta\right]dt \\ \le \beta\lambda \left[\sup_{j\in\mathbb{N},\ v \in [0,1]} (jv^{\beta})^\alpha \ \mathbb{P}\left[L_{\ell_1}> j v^\beta\right] \right]\ \int_0^1 \mathbb{P}\left[L_{\ell_2} >nt^\beta\right]dt, \end{multline*}
which converges to zero as $n\to\infty$ by the dominated convergence theorem, proving \eqref{proof_limit_K2n} when $\mbox{Card}(I)= 2$. As to \eqref{limits_K1n}, this is proved, in view of the expression \eqref{def_K1n} of $K^1_n(s,t)$, by showing that $\int_0^1 \lambda \left|\beta v^{\beta-1} n^\alpha \mathbb{P}\left[L_\ell>nv^\beta\right]-\beta \right|dv$ tends to $0$ as $n\to\infty$ for all $\ell=1,...,k$, as again we have that $\gamma=\alpha$; However,
this was already proved in Step 2 of Section \ref{sec:fast} when proving that $\lim_{n\to\infty}I_n^2(\ell)=0$, $\ell=1,...,k$, see the arguments leading to the convergence \eqref{term6_fast}. All in all, one has the convergence $\int_0^1 || A_n(s,v)-A(s,v)|| dv \longrightarrow 0$, and Lemma \ref{lemma_convergence} is applicable so that $\chi^{(n)}(s,t)$ convgerges to $\chi(s,t)$ which satisfies \begin{equation}\label{Poisson_ODE_equilibrium} \left\{ \begin{array}{rcl} \partial_t \chi(s,t) &=& A(s,t)\chi(s,t)=\left[\beta t^{\beta-1}\lambda (P-I) \right.\\ &+&\left. \beta \lambda \sum_{\ell =1}^k \mbox{diag}(e^{s_\ell x_\ell}-1,\ x=(x_1,...,x_k)\in {\cal S}) \right]\chi(s,t) ,\quad t\in [0,1],\\ \chi(s,0)&=& I. \end{array} \right. \end{equation} {\bf Step 3: Identifying the limit in distribution.} With the same notation as in Step 3 of Section \ref{sec:fast} for process $\{{\cal X}^\alpha (t)=({\cal X}_1^\beta (t),...,{\cal X}_k^\beta (t))\in {\cal S},\ t\in[0,1] \}$, one finds this time that \begin{equation}\label{equilibrium_chi}
\chi(s,t)= \left[ \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1)=y]}\exp\left(\sum_{j=1}^k \beta \lambda\int_{1-t}^1 \left(e^{s_j{\cal X}^\alpha_j (v)}-1\right) dv \right)\right| {\cal X}^\alpha (1-t)=x\right] \right]_{(x,y)\in {\cal S}^2} \end{equation} for all $s=(s_1,...,s_k)\in (-\infty,0]^k$. We recall the Campbell formula which states that for all measurable function $f:t\in [0,+\infty) \mapsto f(t)\in \mathbb{R}$ such that $\int_0^\infty (e^{f(v)}-1) \xi \ dv$ is finite for some $\xi>0$ then one has the identity $$ \exp \left(\int_0^\infty \left(e^{f(v)}-1\right) \xi \ dv \right) =\mathbb{E} \left[ \exp\left( \int_0^\infty f(v)\ \nu(dv)\right)\right], $$ where $\{ \nu(x),\ x\ge 0\}$ is a Poisson process with intensity $\xi$, see \cite[Section 3.2]{K93}. Conditioning on $\{{\cal X}^\alpha(v),\ v\in [0,1] \}$, this results in \eqref{equilibrium_chi} being written as $$
\chi(s,t)= \left[ \mathbb{E}\left[\left. {\bf 1}_{[{\cal X}^\alpha (1)=y]}\exp\left(\sum_{j=1}^k s_j\int_{1-t}^{1} {\cal X}^\alpha_j(v)\ \nu_j^\alpha(dv) \right)\right| {\cal X}^\alpha (1-t)=x\right] \right]_{(x,y)\in {\cal S}^2} $$ where $\{\nu_j^\alpha(t),\ t\ge 0 \}$, $j=1,...,k$, are independent Poisson processes with intensities $\beta\lambda=\lambda/(1-\alpha)$, and independent from $\{{\cal X}^\alpha(t),\ t\in[0,1] \}$. Identifying Laplace Transforms, we obtain in conclusion that \begin{multline}\label{conv_equilibrium_final}
{\cal D}\left(\left. \left(Z^{(n)}(t^\beta),X^{(n)}_{N^{(n)}_{t^\beta }} \right)\right|\ X^{(n)}_0 \right)\\ \longrightarrow {\cal D}\left( \left. \left( \left( \int_{1-t}^{1} {\cal X}^\alpha_j(v)\ d\nu_j^\alpha(v)\right)_{j=1,...,k},\ {\cal X}^\alpha (1)\right)
\right|\ {\cal X}^\alpha(1-t) \right) \end{multline} as $n\to\infty$ for all $t\in [0,1]$. Changing $t$ into $t^{1/\beta}$ completes the proof of \eqref{convergence_equilibrium}.\\ {\bf Proof of the convergence \eqref{remark_conv_distrib_simpler_equilibirum} in Corollary \ref{rem:marginal}. }This follows the same pattern as the proof of \eqref{remark_conv_distrib_simpler_fast}, to which we refer here. More precisely, one verifies this time that, from \eqref{equilibrium_chi}, the analog of \eqref{LT_reverse_fast} in the Fast arrival case is here \begin{multline}\label{LT_reverse_equilibrium} \mathbb{E}\left[ \exp\left(\sum_{j=1}^k s_j\int_0^t U_j(v)\ \nu_j^\alpha (dv) \right) \right]\\ = \mathbb{E}\left[\exp\left(\sum_{j=1}^k s_j\int_{1-t}^1 {\cal X}^\alpha_j(v)\ d\nu_j^\alpha(v) \right)\right],\quad s=(s_1,...,s_k)\in (-\infty, 0]^k, \end{multline} which, combined with \eqref{conv_equilibrium_final}, yields the convergence $Z^{(n)}\left(t^\beta\right)\stackrel{\cal D}{\longrightarrow} \left( \int_{0}^{t} U_j(v)\ \nu_j^\alpha (dv)\right)_{j=1,...k}$ as $n\to \infty$. Changing $t$ into $t^{1/\beta}$ and performing the change of variable $ v:=v^{1/\beta}=v^{1-\alpha} $, we obtain \begin{equation}\label{conv_distribution_reversed_equilibrium} Z^{(n)}\left(t\right)\stackrel{\cal D}{\longrightarrow} \left( \int_{0}^{t} U_j(v^{1-\alpha})\ \tilde{\nu}_j^\alpha(dv)\right)_{j=1,...,k},\ n\to\infty, \quad t\in [0,1], \end{equation} where $\{\tilde{\nu}_j^\alpha(t),\ t\in [0,1] \}$, $j=1,...,k$, are the inhomogeneous independent Poisson processes given by $\tilde{\nu}_j^\alpha(t)=\nu_j^\alpha(t^{1/\beta})=\nu_j^\alpha(t^{1-\alpha})$, i.e. Poisson processes with non constant intensity $\lambda t^{-\alpha}$. Arguing, as in the Fast arrival case, the time changed Markov process $\{{\cal Y}(t):=U(t^{1-\alpha})=U(t^{1/\beta}),\ t\in [0,1]\} $ admits \eqref{generator_Y} as generator, hence \eqref{remark_conv_distrib_simpler_equilibirum} follows from \eqref{conv_distribution_reversed_equilibrium}.
\subsection{Proof of Theorem \ref{theo_slow_arrival}: Slow arriving customers.}\label{sec:slow} We now consider the case $\gamma<\alpha$. Section 4.2 of \cite{RW18} provide the first two joint moments of the $Z_j^{(n)}(t)$, $j=1,...,k$, with a particular discount factor $a \ge 0$ (recall the notation \eqref{discounted_Z(t)} in Section \ref{sec:model} for the discounted counterpart of the queueing process \eqref{def_Z_t}). Recalling that the rescaling implies that $\lambda_n(P_n-I)=\lambda(P-I)$, we get from \cite[Theorems 14 and 15 with $a=0$ discount factor]{RW18}, that those moments are given by \begin{eqnarray} M_j^{(n)}(t) &=& \lambda_n e^{\lambda t (P-I)}\int_0^t {\mathbb P} \left(L_j^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j P_n e^{\lambda v (P-I)} dv, \label{m1_sec:slow}\\ M_{jj}^{(n)}(t) &=& \lambda_n e^{\lambda t (P-I)}\int_0^t {\mathbb P}\left(L_j^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j^2 P_n e^{\lambda v (P-I)}\nonumber\\ & + & 2 {\mathbb P}\left(L_j^{(n)}>v\right) \Delta_j P_n M_{j}^{(n)}(v)dv,\label{m2_jj_sec:slow}\\ M_{jj'}^{(n)}(t) &=& \lambda_n e^{\lambda t (P-I)}\int_0^t {\mathbb P}\left(L_j^{(n)}>v\right){\mathbb P}\left(L_{j'}^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j\Delta_{j'} P_n e^{\lambda v (P-I)} \nonumber\\ & + & {\mathbb P}\left(L_j^{(n)}>v\right) \Delta_j P_n M_{j'}^{(n)}(v)
+ {\mathbb P}\left(L_{j'}^{(n)}>v\right) \Delta_{j'} P_n M_{j}^{(n)}(v) dv, \label{m2_jj'_sec:slow} \end{eqnarray} for all $t\ge 0$ and $j\neq j'$, $j$ and $j'$ in $\{1,..,k\}$. We first show \eqref{convergence_slow_M1}. Since $\lambda_n=\lambda n^\gamma$, multiplying \eqref{m1_sec:slow} by $n^{\alpha-\gamma}$ yields for $j=1,...,k$ \begin{equation}\label{m1_sec:slow_proof} n^{\alpha-\gamma} M_j^{(n)}(t)= \lambda e^{\lambda t (P-I)}\int_0^t n^\alpha {\mathbb P} \left(L_j^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j P_n e^{\lambda v (P-I)} dv . \end{equation}
By definition of $L_j^{(n)}$ and the fat tail property of $L_j$: $$n^\alpha {\mathbb P} \left(L_j^{(n)}>v\right)=n^\alpha {\mathbb P} \left(L_j>nv\right)\sim n^\alpha \frac{1}{(nv)^\alpha}=\frac{1}{v^\alpha},\quad v\in (0,t),\quad n\to \infty.$$ Now, since $\lim_{n\to\infty}P_n=P$ and \begin{multline}\label{m1_sec_argument} \sup_{n\in \mathbb{N}}n^\alpha {\mathbb P} \left(L_j^{(n)}>v\right)=\sup_{n\in \mathbb{N}}n^\alpha {\mathbb P} \left(L_j>nv\right)=\frac{\sup_{n\in \mathbb{N}}(nv)^\alpha {\mathbb P} \left(L_j>nv\right)}{v^\alpha}\\ \le \frac{\sup_{u\ge 0}u^\alpha {\mathbb P} \left(L_j>u\right)}{v^\alpha},\quad v\in(0,1), \end{multline} the dominated convergence enables us to let $n\to\infty$ in \eqref{m1_sec:slow_proof} to get \eqref{convergence_slow_M1}. We now turn to \eqref{convergence_slow_M20}. Multiplying \eqref{m2_jj_sec:slow} by $n^{\alpha-\gamma}$ yields \begin{multline}\label{m2_jj_sec:slow_proof} n^{\alpha-\gamma} M_{jj}^{(n)}(t) = \lambda e^{\lambda t (P-I)}\int_0^t n^\alpha {\mathbb P}\left(L_j^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j^2 P_n e^{\lambda v (P-I)}dv\\
+ 2 \lambda e^{\lambda t (P-I)}\int_0^t n^\alpha {\mathbb P}\left(L_j^{(n)}>v\right) \Delta_j P_n M_{j}^{(n)}(v)dv. \end{multline} Since \eqref{convergence_slow_M1} in particular implies that $\lim_{n\to \infty}M_{j}^{(n)}(v)=0$ for all $v\in(0,1)$, and thanks to the upper bound \eqref{m1_sec_argument}, a dominated convergence argument entails that the second integral on the right hand side of \eqref{m2_jj_sec:slow_proof} tends to $0$ as $n\to\infty$. We also conclude by a dominated convergence argument that the first integral on the right hand side of \eqref{m2_jj_sec:slow_proof} tends to the right hand side of \eqref{convergence_slow_M20}, and we are done. As to \eqref{m2_jj'_sec:slow}, we have for $j\neq j'$ \begin{multline}\label{m2_jj'_sec:slow_proof} n^{\alpha-\gamma}M_{jj'}^{(n)}(t) = \lambda e^{\lambda t (P-I)}\int_0^t n^\alpha {\mathbb P}\left(L_j^{(n)}>v\right){\mathbb P}\left(L_{j'}^{(n)}>v\right) e^{-\lambda v (P-I)} \Delta_j\Delta_{j'} P_n e^{\lambda v (P-I)} dv\\
+ \lambda e^{\lambda t (P-I)}\int_0^t \left\{n^\alpha {\mathbb P}\left(L_j^{(n)}>v\right) \Delta_j P_n M_{j'}^{(n)}(v)
+ n^\alpha{\mathbb P}\left(L_{j'}^{(n)}>v\right) \Delta_{j'} P_n M_{j}^{(n)}(v)\right\} dv. \end{multline} Similarly to the second integral on the right hand side of \eqref{m2_jj_sec:slow_proof}, we show that the second integral on the right hand side of \eqref{m2_jj'_sec:slow_proof} converges to $0$ as $n\to\infty$. As to the first integral, the fact that ${\mathbb P}\left(L_{j'}^{(n)}>v\right)={\mathbb P}\left(L_{j'}>nv\right)\longrightarrow 0$ as $n\to\infty$, combined with the upper bound \eqref{m1_sec_argument}, yields by the dominated convergence theorem that it tends to $0$ as $n\to\infty$, achieving the proof of \eqref{m2_jj'_sec:slow} and of the theorem.
\section{A remark on the computation of the limiting joint Laplace transform when $\alpha\in \mathbb{Q}$}\label{sec:remark_compute} We identified in Theorem \ref{theo_regimes} the different limiting regimes when $\gamma$ is larger or equal to $\alpha$ by obtaining the corresponding limiting joint Laplace transform $\chi(s,t)$ in each case. Even though the distributional limits \eqref{convergence_fast} and \eqref{convergence_equilibrium} involve simple processes $\{{\cal X}^\alpha (t),\ t\in[0,1] \}$ and $\{\nu_j^\alpha(t),\ t\ge 0 \}$, $j=1,...,k$, it turns out that the Laplace transforms $\chi(s,t)$, which are solutions to the differential equations \eqref{Poisson_ODE_fast} and \eqref{Poisson_ODE_equilibrium}, are in general not explicit in the fast or equilibrium arriving cases. We suggest to show that things are much simpler when $\alpha\in(0,1)$ is rational, say of the form $$\alpha=1-p/q$$ for some $p$ and $q\in\mathbb{N}^*$, with $p<q$. The idea here is quite simple and standard, and consists in expanding a transformation of the solution $t\in[0,1]\mapsto \chi(s,t)\in \mathbb{R}^{{\cal S}\times {\cal S}}$ into a power series with matrix coefficients, as explained in \cite[Section 1.1]{Balser00}. Let us focus on the fast arrival case in Section \ref{sec:fast}, although the method is of course applicable to the equilibrium case, and let us put $\check\chi(s,t):= \chi(s,t^p)$, $t\in[0,1]$. In that case, we deduce from \eqref{Poisson_ODE_fast} that $t\in[0,1]\mapsto \check\chi(s,t)$ verifies the matrix differential equation
$$\left\{ \begin{array}{rcl} \partial_t \check\chi(s,t) &=& \left[(p+q) t^{q}\lambda (P-I) + pt^{p-1}\beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell \right]\check\chi(s,t) ,\quad t\in [0,1],\\ &=& [Q_1 t^q + Q_2(s) t^{p-1}]\check\chi(s,t),\\ \chi(s,0)&=& I. \end{array} \right. $$
where $Q_1:=(p+q)\lambda (P-I)$ and $Q_2(s):=p\beta \lambda \sum_{\ell =1}^k s_\ell\Delta_\ell$, $s=(s_1,...,s_k)$. It is quite simple to check that $\check\chi(s,t)$ can then be expanded as \begin{equation}\label{expansion_chi} \check\chi(s,t)=\sum_{j=0}^\infty U_j(s) t^j,\quad t\in [0,1], \end{equation} where the sequence of matrices $ (U_j(s))_{j\in\mathbb{N}}$ is defined from \cite[Relation (1.4)]{Balser00} by $U_0(s)=I$ and
\begin{equation}\label{rel_U_j} U_j(s)= \left\{ \begin{array}{c l} 0, & 1\le j <p,\\ Q_2(s)U_{j-p}(s)/j, & p\le j<q+1,\\ \left[ Q_2(s)U_{j-p}(s)+Q_1 U_{j-q-1}(s) \right]/j, & j\ge q+1 , \end{array} \right. \end{equation} and that \eqref{expansion_chi} converges for all $t$, as proved in \cite[Lemma 1 p.2]{Balser00}. The final solution is then expressed in that case as $$ \chi(s,t)=\check\chi(s,t^{1/p})=\sum_{j=0}^\infty U_j(s) t^{j/p},\quad t\in [0,1]. $$ The $U_j(s)$'s, $j\in\mathbb{N}$, being simply expressed with the simple linear recurrence \eqref{rel_U_j}, this expansion for $\chi(s,t)$ is then easy to handle as it can be e.g. approximated by truncation.
\end{document}
|
arXiv
|
{
"id": "1810.06894.tex",
"language_detection_score": 0.6490117311477661,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{A combinatorial proof of Buryak-Feigin-Nakajima}
\begin{abstract}
Buryak, Feigin and Nakajima computed a generating function for a family of partition statistics by using the geometry of the $\mathbb{Z}/c\mathbb{Z}$ fixed point sets in the Hilbert scheme of points on $\mathbb{C}^2$. Loehr and Warrington had already shown how a similar observation by Haiman using the geometry of $\operatorname{Hilb}_n(\mathbb{C}^2)$ could be made purely combinatorial. We extend Loehr and Warrington's techniques to also account for cores and quotients. As a consequence, we obtain a purely combinatorial proof of a generalisation of the result in \cite{BFN}.
More precisely, we define a family of partition statistics
$\{h_{x,c}^+, x\in [0,\infty)\}$ and give a combinatorial proof that for all $x$ and all positive integers $c$,
\begin{equation*}
\sum q^{|\lambda|}t^{h_{x,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq 1}\frac{1}{1-q^{jc}t},
\end{equation*}
where the sum ranges over all partitions $\lambda$ with $c$-core $\mu$.
\end{abstract}
\section{Introduction}
A \textit{partition} $\lambda$ of a positive integer $n$ is a non-increasing sequence of positive integers $\lambda_1\geq \lambda_2\geq\ldots\geq\lambda_l$ such that $\lambda_1+\cdots+\lambda_l=n.$ We write $|\lambda|=n.$ We represent partitions as \textit{Young diagrams}, informally by drawing $\lambda_i$ unit squares one on top of the other starting with bottom left corner $(0,i-1).$ \begin{figure}
\caption{The Young diagram for the partition $(4,2,1)$ of $7$.}
\end{figure}
For a square $\square$ in a Young diagram, $a(\square)$ is the number of squares above $\square$ in the same column, and $l(\square)$ is the number of cells to the right in the same row. For example, the square with bottom right corner $(0,1)$ in Figure 1 has $a(\square)=2$, $l(\square)=1.$ We also define $h(\square)=a(\square)+l(\square)+1$ and let $h_{r,s}(\lambda)$ count the number of squares in the Young diagram of $\lambda$ such that $ (r+s)\mid h(\square)$ and $rl(\square)=s(a(\square)+1).$
Buryak, Feigin, and Nakajima gave a geometric proof of the following \cite[Corollary 1.3]{BFN} \begin{equation}\label{BFN cor}
\sum_{\lambda \in \operatorname{Par}} q^{h_{r,s}(\lambda)}t^{|\lambda|}=\prod_{\substack{i\geq 1\\ r+s\nmid i}}\frac{1}{1-q^{i}}\prod_{i\geq 1}\frac{1}{1-q^{i(r+s)}t} \end{equation} where $\operatorname{Par}$ denotes the set of all partitions. One result of this paper is a purely combinatorial proof of the same result.
We now explain the geometric significance of the generating function~\eqref{BFN cor}. The \textit{Hilbert Scheme of n points on $\mathbb{C}^2$}, $\operatorname{Hilb}_n(\mathbb{C}^2)$, parametrises ideals $I\subset\mathbb{C}[x,y]$ such that $\dim_{\mathbb{C}}\left(\mathbb{C}[x,y]/I\right)=n.$ $\operatorname{Hilb}_n(\mathbb{C}^2)$ admits a torus action by lifting the $\left(\mathbb{C}^*\right)^2$ action on $\mathbb{C}^2$ given by\begin{equation}(t_1,t_2)\cdot(x,y)=(t_1x,t_2y)\end{equation} to the action on ideals $I\subset\mathbb{C}[x,y]$ given by \begin{equation} (t_1,t_2)\cdot I=\{p(t_1^{-1}x,t_2^{-1}y)\mid p(x,y)\in I\}. \end{equation} Let \begin{equation}\Gamma_m=\left\langle\left(e^{\frac{2\pi i}{m}},e^{\frac{-2\pi i}{m}}\right) \right\rangle\end{equation} be a finite subgroup of $\mathbb{C}^2$ of order $m$ and let $T_{r,s}$ be the one-parameter subtorus of $\mathbb{C}^2$ given by \begin{equation}T_{r,s}=\{(t^r,t^s)\mid t\in\mathbb{C}^*\}.\end{equation}
Let $H_*^{\operatorname{BM}}(X;\mathbb{Q})$ denote the Borel-Moore homology of $X$ with rational coefficients and let \begin{equation}
P_q^{\operatorname{BM}}(X)=\sum_{i\geq 0} \dim H_i^{\operatorname{BM}}(X;\mathbb{Q})q^{\frac{i}{2}}. \end{equation}
Buryak, Feigin and Nakajima \cite[Theorem 1.2]{BFN} proved that, if $r,s$ are non-negative integers with $r+s\geq 1,$
\begin{equation}\label{BFN thm}\sum_{n\geq 0}P_q^{\operatorname{BM}}\left(\operatorname{Hilb}_n(\mathbb{C}^2)^{\Gamma_{r+s}\times T_{r,s}}\right)t^n=\prod_{\substack{i\geq 1\\ r+s\nmid i}}\frac{1}{1-q^{i}}\prod_{i\geq 1}\frac{1}{1-q^{i(r+s)}t},\end{equation}
where $\operatorname{Hilb}_n(\mathbb{C}^2)^{T_{r,s}\times \Gamma_{r+s}}$ is the fixed point locus of $\operatorname{Hilb}_n(\mathbb{C}^2)$ under the action of $T_{r,s}\times \Gamma_{r+s}.$ The proof is split into two results. One \cite[Lemma 3.1]{BFN} shows that the left hand side of \eqref{BFN thm} is dependent only on $r+s$. The other \cite[Lemma 3.2]{BFN} computes the left hand side of \eqref{BFN thm} in the case $s=0.$ Broadly speaking, Buryak, Feigin, and Nakajima compute the dimension of the Białynicki-Birula cells when the ``slope'' of the acting one parameter torus is very steep, and prove that the slope itself does not affect the eigenspace.
Finally, using the methods of \cite{BB}, a cell decomposition of $\operatorname{Hilb}_n(\mathbb{C}^2)^{T_{r,s}\times \Gamma_{r+s}}$ shows that the left hand side of \eqref{BFN thm} in the Grothendieck ring of varieties is given by \begin{equation}
\sum_{\lambda \in \operatorname{Par}} q^{h_{r,s}(\lambda)}t^{|\lambda|}. \end{equation}
In \cite{LW}, Loehr and Warrington gave a bijective proof that a partition statistic $h_{x}^+$ is independent of the parameter $x$. In a similar vein to the above, Haiman observed that $h_{x}^+$ accounts for the distribution of the dimension of the Białynicki-Birula cells associated to the action of $(\mathbb{C}^*)^2$ on $\operatorname{Hilb}_n(\mathbb{C}^2),$ i.e. the case when $\Gamma_m$ is the trivial group.
We are interested in \begin{question}\label{main q} Is there a bijection proving \eqref{BFN cor}? \end{question} To answer this question, we also ask the following. \begin{question}\label{coresmash} Can we use Loehr and Warrington's methods to produce a related bijection that preserves the core of a partition? \end{question}
We provide an affirmative answer to Question~\ref{coresmash}, and use the bijection we produce to provide a partial answer to Question~\ref{main q}. In particular, we define a partition statistic $h_{x,c}^+$ where $x\in[0,\infty)$ and $c$ is a positive integer, and $h_{x,c}^+(\lambda)$ counts the number of squares $\square\in\lambda$ such that both \begin{itemize}
\item the hook length $h(\square)$ is divisible by $c$, and
\item if $a(\square)$ and $l(\square)$ denote the size of the arm and leg of $\square$ respectively, \begin{equation}
\frac{a(\square)}{l(\square)+1} \leq x < \frac{a(\square)+1}{l(\square)}.
\end{equation}
\end{itemize} In the case $c=1,$ we recover Loehr and Warrington's statistic $h_x^+$. We then exhibit a bijection proving a refinement (Theorem ~\ref{main theorem}) of \cite[Lemma 3.1]{BFN}. The key ingredient is a bijection at rational slope showing that $h_{x,c}^+$ is equidistributed over partitions with a fixed $c$-core with the statistic $h_{x,c}^-,$ counting boxes $\square$ in the Young diagram such that both \begin{itemize}
\item the hook length $h(\square)$ is divisible by $c$, and
\item if $a(\square)$ and $l(\square)$ denote the size of the arm and leg of $\square$ respectively, \begin{equation}
\frac{a(\square)}{l(\square)+1} < x \leq \frac{a(\square)+1}{l(\square)}.
\end{equation}
\end{itemize} \begin{customthm}{4.3} For all positive rational numbers $x$ and all integers $n\geq 0$, $$\sum t^{h_{x,c}^+(\lambda)}=\sum t^{h_{x,c}^-(\lambda)}$$ where both sums range over partitions $\lambda$ of $n$ with a fixed $c$-core $\mu$. \end{customthm} To do so, we adapt Loehr and Warrington's construction of a bijection $I_{r,s}$ \cite{LW} to give a new bijection $I_{r,s,c}$ which preserves the $c$-core of a partition and ``picks out'' whether or not $c$ divides the hook length of a cell contributing to a partition statistic. In the case $c=1$, $I_{r,s,c}$ specialises to $I_{r,s}$. To construct $I_{r,s,c}$, we refine Loehr and Warrington's multigraph $M_{r,s}$ to a multigraph $M_{r,s,c}$ which also sees the $c$-core of a partition. In order to do so, we recast the $c$-abacus construction in the language used to define $M_{r,s}$ and define an appropriate notion of homomorphism, taking $M_{r,s,c}$ to be the product of the $c$-abacus and $M_{r,s}$ with respect to these homomorphisms.
We then give a combinatorial proof of a result (Theorem~\ref{basic partition step}), computing the distribution of $h_{0,c}^+.$ This result in particular implies \cite[Lemma 3.2]{BFN}. Whilst our proof is combinatorial, it is not bijective, as we use a multi-counting argument. The map we define was previously defined by Walsh and Waarnar \cite[\S6]{Walsh}.
\begin{customthm}{3.43}\label{basecaseintro} For all $x$ in $[0,\infty),$
$$\sum q^{|\lambda|}t^{h_{0,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq 1}\frac{1}{1-q^{jc}t}$$ where the sum ranges over all partitions $\lambda$ with $c$-core $\mu$, henceforth denoted $\operatorname{Par}^c_{\mu}.$ \end{customthm}
Finally, our main theorem (Theorem~\ref{main theorem}) uses both Theorem~\ref{basic partition step} and the bijection $I_{r,s,c}$ to compute the following distribution, and we explain how \eqref{BFN cor} follows.
\begin{customthm}{4.2} For all $x$ in $[0,\infty),$
\begin{equation}\sum q^{|\lambda|}t^{h_{x,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq1}\frac{1}{1-q^{jc}t}\end{equation} where the sum is taken over all partitions $\lambda$ with $c$-core $\mu$. \end{customthm}
\subsection{Organisation of the paper} Section 2 recalls some definitions from partition combinatorics. In particular, we recall the abacus construction first introduced in \cite{James} and recall some basic generating functions. The chapter builds up to proving Theorem~\ref{basic partition step}, which computes the distribution of $h_{0,c}^+$ over $\operatorname{Par}^c_{\mu},$ the set of partitions with $c$-core $\mu$.
Section 3 defines the main partition statistics of interest, $\operatorname{mid}_{x,c},$ $\operatorname{crit}_{x,c}^-,$ $\operatorname{crit}_{x,c}^+,$ $h^+_{x,c}$ and $h^{-}_{x,c}$ where $h^{\pm}_{x,c}=\operatorname{mid}_{x,c}+\operatorname{crit}_{x,c}^{\pm}$. Then, we introduce our main theorem, Theorem~\ref{main theorem}, which states that for all $x\in[0,\infty)$,
\begin{equation}\sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}t^{h_{x,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq1}\frac{1}{1-q^{jc}t}.\end{equation} In view of Theorem~\ref{basic partition step}, it remains to prove that the left hand side is independent of $x$. An argument analogous to that in ~\cite{LW} is then used to show that the independence of the left hand side from $x$ is implied by a symmetry property when $x$ is rational, \begin{equation}
\sum_{\lambda\in\operatorname{Par}^c_\mu}q^{|\lambda|}w^{h^+_{x,c}(\lambda)}y^{h_{x,c}^-(\lambda)}=\sum_{\lambda\in\operatorname{Par}^c_\mu}q^{|\lambda|}w^{h^{-}_{x,c}(\lambda)}y^{h_{x,c}^+(\lambda)}. \end{equation} Finally, the section concludes with a proof that the main result of \cite{BFN} is a consequence of Theorem~\ref{main theorem}.
Section 4 defines the multigraph $M_{r,s,c}(\lambda)$ corresponding to a rational $x=\frac{r}{s}$, and proves that the map $\lambda\mapsto M_{r,s,c}(\lambda)$ remembers the statistics $\operatorname{mid}_{x,c}(\lambda)$, $\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-$, amongst others. The proof defines an ordering $<_{r,s,c}$ on partitions and multigraphs and a special set of partitions $\lambda_{r,s,k}.$ There are three useful facts related to the ordering $<_{r,s,c}$. The first is that if $\lambda<_{r,s,c}\mu$ at the level of partitions then $M_{r,s,c}(\lambda)<_{r,s,c}M_{r,s,c}(\mu)$ at the level of multigraphs. The second is that for any $\lambda,$ if $k$ is taken to be large enough, then $\lambda<_{r,s,c}\lambda_{r,s,k}.$ Moreover, if $M_{\mu}=M_{\lambda_{r,s,k}}$ then $\mu=\lambda_{r,s,k}.$ This gives a framework for proving that $M_{r,s,c}$ remembers partition data. Since $M_{r,s,c}$ is injective at the $\lambda_{r,s,k},$ the map does not any data at all at these points, so the $\lambda_{r,s,k}$ form a family of base cases. We then study how each individual statistic individually changes when taking successor with respect to the ordering $<_{r,s,c}$.
Section 5 defines involutions $I_{r,s,c}:\operatorname{Par}^c_{\mu}\rightarrow \operatorname{Par}^c_{\mu}$ that preserves multigraphs $M_{r,s,c}(\lambda)$. It also proves that $I_{r,s,c}$ exchanges the statistics $\operatorname{crit}_{x,c}^+$ and $\operatorname{crit}_{x,c}^-$. Together with the results of Section 5, this completes a combinatorial proof of Theorem~\ref{main theorem}.
\section{Background: partitions, cores, quotients}
In this section, we recall first definitions in partition combinatorics, including the abacus construction, cores and quotients. We take a nonstandard view of the $c$-core, and describe it as an equivalence class of complete circuits of a directed multigraph $M_c$. We take this approach so that we have descriptions of Loehr and Warrington's construction in \cite{LW} and the $c$-core in the same language, which allows us to formulate a simultaneous refinement of the two in Section 4. Once we have recalled this theory, we will recall a few standard generating functions and use them to give a combinatorial proof of Theorem~\ref{basic partition step}, which forms our base case. \stepcounter{essaypart}
\begin{defn}[Partition,Young diagram] A \textit{partition} of an integer $n\geq 0$ is a sequence of non-increasing positive integers $\lambda_1\geq\lambda_2\geq\ldots\geq \lambda_t$ with sum $n$. The \textit{size} of $\lambda$, denoted $|\lambda|,$ is $n$ and the \textit{length} of $\lambda$ is the number of summands, written $l(\lambda)=t.$ The \textit{Young diagram} of $\lambda$ consists of $t$ columns of $1\times 1$ boxes $\square$ in $\mathbb{R}^2$, with $\lambda_i$ boxes in the $i$th column for each $1\leq i\leq t$. The bottom left corner of the diagram sits at $(0,0)$. \end{defn}
\begin{ex} The partition $\mu=(12,12,10,8,7,4,1,1,1)$ of $56$ has the diagram given in Figure~\ref{first partition}. \end{ex}
Informally, the boundary of a partition $\lambda$ is the bi-infinite path traversing the $x$-axis from $+\infty$ until it hits a box of the partition, then follows the edge of the Young diagram until it hits the $y$-axis, before traversing the $y$-axis to $+\infty$. We split the boundary up into unit steps between lattice points, and view it as a directed multigraph where edges are additionally assigned a label indicating if they are north or west.
\begin{defn}[NW directed multigraph] A \textit{NW directed multigraph} $M=(V,E,s,t,d)$ consists of a vertex set $V$, an edge set $E$, and three maps $s:E\rightarrow V,$ $t:E\rightarrow V$ and $d:E\rightarrow\{N,W\},$ called source, target, and direction respectively. We say the edge $e$ \textit{departs from} the vertex $v$ if $s(e)=v$ and we say that $e$ \textit{arrives at} the vertex $w$ if $t(e)=w.$ We call $e$ a \textit{north edge} if $d(e)=N$ and a \textit{west edge} if $d(e)=W$. \end{defn}
\begin{defn}[Boundary graph] The \textit{boundary graph} $b(\lambda)$ of a partition $\lambda$ is an NW directed multigraph. The edge set is defined as follows. For natural numbers $x,y$ there is a west edge $e$ with $s(e)=(x+1,y),$ $t(e)=(x,y)$ if either \begin{itemize} \item $y=0$ and $x\geq l(\lambda)$, or \item $y>0$ and $\lambda_{x+1}=y.$ \end{itemize} There is a north edge $e$ with $s(e)=(x,y)$ and $t(e)=(x,y+1)$ if either \begin{itemize} \item $x=0$ and $y\geq \lambda_1$, or \item $x>0$ and $\lambda_{x+1}\leq y<\lambda_x .$ \end{itemize} The vertex set $V(b(\lambda))$ is the union of sources and targets of the edges. \end{defn}
\begin{ex} Let $\mu=(12,12,10,8,7,4,1,1,1)$. The boundary graph of $\mu$ is given in Figure~\ref{first partition}, the north edges being the upward arrows and the west edges being the left arrows.
\begin{figure}
\caption{The Young diagram and boundary graph of the partition $(12,12,10,8,7,4,1,1,1)$}
\label{first partition}
\end{figure} \end{ex}
Note that for any edge $e$ in the boundary graph, the value of $y-x$ at the target of $e$ is one greater than at the source, because taking a unit step north or west increases the value of $y-x$ by 1.
So, the value of $y-x$ at the target of an edge indexes an Eulerian tour, or complete circuit, of $b(\lambda).$ For clarity, we recall the definition of a complete circuit, but this is standard in the literature.
\begin{defn} Given a directed multigraph $M$, a \textit{complete circuit} of $M$ is an ordering of $E(M)$ such that if $e_i$ and $e_{i+1}$ are consecutive with respect to the ordering, then there is a vertex $v\in V(M)$ such that $t(e_i)=s(e_{i+1})$. \end{defn}
\begin{defn} If an edge $e\in E(b(\lambda))$ has target $(x,y),$ we say the \textit{index} of $e$ is $i(e)=y-x.$ The \textit{boundary tour} is the complete circuit of $b(\lambda)$ where the edges are ordered by index. We write the edges in this ordering as $(\ldots, e_{-2},e_{-1},e_0,e_1,e_2,\ldots).$ We say an edge $e_j$ \textit{occurs before} the edge $e_k$ if $j<k$. The \textit{boundary sequence} is the bi-infinite sequence $(d_i)_{i\in\mathbb{Z}}$ where $d_i=d(e_i).$
\end{defn}
\begin{ex} The partition $\mu=(12,12,10,8,7,4,1,1,1)$ has boundary sequence $$\ldots WWWWWWNWWWN_0NNWNNNWNWNNWNNWWNNN\ldots.$$ where $d_0$ is indicated with a 0 suffix. \end{ex}
\subsection{Anatomy of a Young Diagram} In this section, we recall some standard partition statistics and how they relate to the boundary sequence. \begin{defn}[Hand, foot, inversion] A box $\square\in\lambda$ can be specified by giving the row and column of the Young diagram that the box sits in. In particular, each box in the Young diagram corresponds to a pair of edges: one west, at the top of the column it $\square$ lies in, called the \textit{hand of $\square$}, and another north, at the extreme right of the row it lies in, called the \textit{foot of $\square$}, where the foot necessarily occurs before the hand. Conversely, given a north edge $s_1$ departing from $(x_1,y_1)$ and arriving at $(x_1,y_1+1)$ and a west edge $s_2$ departing from $(x_2,y_2)$ and arriving at $(x_2-1,y_2)$ such that $y_1-x_1<y_2-x_2$, there is a unique box $\square$ in the Young diagram with bottom left corner $(x_2-1,y_1)$ such that $s_1$ and $s_2$ are respectively the foot and hand of $\square$. We call such a pair of west and north edges an \textit{inversion}. Hence, we may identify a box in the Young diagram with its hand and foot in the boundary sequence.
The \textit{arm} of $\square$ consists of the boxes that lie strictly above $\square$ in the same column, and the $\textit{leg}$ of $\square$, consists of the boxes that lie strictly to the right of $\square$ in the same row. We denote the number of boxes in the arm of $\square$ by $a(\square)$ and the number of boxes in the leg of $\square$ by $l(\square)$. The \textit{hook length} of $\square$ is defined to be $ h(\square)=a(\square)+l(\square)+1$. \end{defn}
\begin{ex} The boxes in the arm and leg of the shaded box $\square$ in Figure~\ref{armlegdiagram} are labelled with the corresponding body part. The hand of $\square$ is the red arrow, and the foot is the blue arrow, and $a(\square)=5$ and $l(\square)=1$, so $ h(\square)=7$. \begin{figure}
\caption{the arm and leg of $\square$}
\label{armlegdiagram}
\end{figure} \end{ex}
\begin{prop}\label{c-hooks are c-inversions} A box $\lambda$ in the Young diagram with hook length $c$ corresponds to an inversion $(d_i,d_j)$ in the boundary sequence where $j=i+c.$ \end{prop} \begin{proof} Let $h$ and $f$ be the hand and foot of $\square$ in the boundary respectively. Consider the map from the arm of $\square$ to the boundary sending each box to its foot. The foot of any box in the arm of $\square$ is a north edge that occurs after $f$ and occurs before $h$. Conversely, each north edge that occurs after $f$ and occurs before $h$ is the foot of a box in the arm of $\square$. So, $a(\square)$ counts north edges that occur after $f$ and before $h$.
Analogously, $l(\square)$ counts west edges that occur after $f$ and before $h$. Thus, $a(\square)+l(\square)$ counts the total number of edges that occur after $f$ and before $h$. There are $h(\square)-1$ such edges. \end{proof}
\begin{defn} A \textit{rimhook} $R$ of length $c$ is a connected set of $c$ boxes in $\lambda$ such that removing $R$ gives the Young diagram of a partition, and $R$ does not contain a $2\times 2$ box. \end{defn}
\begin{corol}\label{rimhookishook} Rimhooks of length $c$ are in bijection with boxes of hook length $c$. \end{corol} \begin{proof} Let $R$ be a rimhook of length $c$ in the diagram of a partition $\lambda$. Then, by the definition of a rimhook, for every box $\square\in R$ there is an edge in the boundary graph of $\lambda$ arriving at the top right corner of $\square.$ Let $e_i,e_{i+1},\ldots,e_{i+c-1}$ be the the set of all such edges (since $R$ is connected these edges are consecutive in the boundary tour). Since $R$ is removable, $d(e_i)=N$ and $d(e_{i+c-1})=W$. Therefore, by Proposition~\ref{c-hooks are c-inversions}, $e_i$ and $e_{i+c-1}$ are the foot and hand respectively of a box of hook length $c$.
Conversely, if $\square$ is a box of hook length $c$, with foot $e_i$ and hand $e_{i+c-1}$ then taking the boxes with top right corners the targets of $e_i,e_{i+1},\ldots, e_{i+c-1}$ gives a rimhook of length $c$.\end{proof}
\begin{defn} A $c$-core of a partition $\lambda$ is a partition obtained by iteratively removing rimhooks of length $c$ from $\lambda$ until a partition with no rimhooks of length $c$ is obtained. A partition $\mu$ is called a $c$-core if $\mu$ has no rimhooks of length $c$. \end{defn}
Applying Corollary~\ref{rimhookishook} to $c$-cores gives the following.
\begin{corol} A partition $\lambda$ is a $c$-core if and only if $\lambda$ has no boxes of hook length $c$. \end{corol}
Our aim for now will be to redefine the $c$-core in the language we wish to use later, and then use it to see that the result of iteratively removing rimhooks of length $c$ is independent of the order in which rimhooks are removed. In order to do so, we need the notion of an NW directed multigraph homomorphism. Informally, these consist of two maps, one between edges, and another between vertices. We require that these maps preserve the direction (N or W) of the edges, and that they be compatible with the source and target maps.
\begin{defn}[NW directed multigraph homomorphism] Let $M_1=(V_1,E_1,s_1,t_1,d_1)$ and $M_2=(V_2,E_2,s_2,t_2,d_2)$ be NW directed multigraphs. A homomorphism of NW directed multigraphs $\varphi:M_1\rightarrow M_2$ is a pair of maps $\varphi_V:V_1\rightarrow V_2$ and $\varphi_E:E_1\rightarrow E_2$ such that for all edges $e\in E_1,$ \begin{align}
s_2(\varphi_E(e))&=\varphi_V(s_1(e))\\
t_2(\varphi_E(e))&=\varphi_V(t_1(e))\\
d_2(\varphi_E(e))&=d_1(e). \end{align} \end{defn}
In other words, $\varphi$ is a quiver homomorphism that preserves direction ($N$ or $W$).
\begin{ex} Let $M_1$ be the boundary graph of $\mu=(12,12,10,8,7,4,1,1,1)$ and let $\varphi_V$ be the map taking each vertex $(x,y)$ to $[y-x],$ the class of $y-x$ modulo 2. This map induces the homomorphism $q_2$ illustrated in Figure~\ref{fig:2 hom}, with north edges coloured red and west edges coloured blue.
For ease of reading, we draw the edges in the image of $q_2$ from left to right as $\ldots, q_2(e_{-2}), q_2(e_{-1}),q_2(e_0),q_2(e_1),q_2(e_2),\ldots$. \begin{figure}
\caption{A portion of $M_1$ and the corresponding edges in $q_2(M_1)$.}
\label{fig:2 hom}
\end{figure}
\end{ex}
We will always work with NW directed multigraph homomorphisms where the edge map $\varphi_E$ is bijective, so from now on we assume $\varphi_E$ is bijective for any homomorphism $\varphi.$ In particular, this assumption allows us to push complete circuits through homomorphisms.
\begin{prop} Let $\varphi:M_1\rightarrow M_2$ be an NW directed multigraph homomorphism. Let $(e_i)_{i\in I}$ be a complete circuit of $M_1$. Then $(\varphi_E(e_i))_{i\in I}$ is a complete circuit of $M_2$. \end{prop} \begin{proof} Since $\varphi_E$ is bijective, we need only check that $s_2(\varphi_E(e_{i+1})=t_2(\varphi_E(e_{i})$ for each $i\in I$. By definition,\begin{align} s_2(\varphi_E(e_{i+1})&=\varphi_V(s_1(e_{i+1}))\\ &=\varphi_V(t_1(e_i))\\ &=t_2(\varphi_E(e_i). \end{align} \end{proof}
We have seen already that rimhooks of length $c$ correspond to boxes of hook length $c$ which in turn correspond to inversions in the boundary sequence where, if the first term has index $i$, the second has index $i+c$. Intuitively enough, then, the useful homomorphism that captured all of this information is the following.
\begin{defn} Let $(z,w)\sim(x,y)$ if $w-z\equiv y-x\pmod{c}$. Then, $q_c:b(\lambda)\rightarrow M_c$ is the NW directed multigraph homomorphism induced by imposing the relation $\sim$ on the vertices of $b(\lambda)$. The complete circuit $(q_c(e_{i}))_{i\in\mathbb{Z}}$ of $M_c$ is called the $c$-abacus tour associated to $\lambda$. \end{defn}
Proposition~\ref{c-hooks are c-inversions} tells us that the number of boxes with hook length divisible by $c$ can be read off from the $c$-abacus tour by looking at edges that correspond to a hand and foot arriving at the same vertex $(v,[i]).$ So, it is sometimes useful to group the edges in a complete circuit by target. This leads us to arrival words.
\begin{defn}[arrival words, departure words] Fix an NW directed multigraph $M=(V,E,s,t,d)$ and a complete circuit $(e_i)_{i\in I}$ of $M$. For $v\in V$ $I_v\subset I$ be the subset of indices such that $t(e_i)=v.$ The \textit{arrival word at $v$}, written $v_a$, is the sequence of directions $d(e_i)_{i\in I_v}.$ The departure word at $v$ is defined analogously, replacing the target map with the source map. \end{defn}
\begin{notn} Given a sequence $S$ of $N$s and $W$s, we write $\operatorname{inv}(S)$ for the number of inversions in $S$. \end{notn}
\begin{prop}\label{c-hook count} Let $\lambda$ be a partition with boundary tour $(e_i)_{i\in\mathbb{Z}}$ and let $\{[0],[1],\ldots,[c-1]\}$ be the vertices of $M_c.$ Then, taking arrival words with respect to the complete circuit $(q_c(e_i))_{i\in\mathbb{Z}}$ \begin{equation}
|\{\square\in\lambda\mid c\mid h(\square)\}|=\sum_{i=0}^{c-1}\operatorname{inv}([i]_a). \end{equation} \end{prop} \begin{proof} Apply Proposition~\ref{c-hooks are c-inversions}. \end{proof}
\subsection{Alignment and charge}
So far, we have associated to every partition a boundary sequence, a bi-infinite sequence of $N$s and $W$s such that if we travel far enough to the left in the sequence every entry is a $W$, and if we travel far enough to the right, every entry is an $N$. We will now study these sequences in general, and identify which of them arise as boundary sequences of a partition.
\begin{defn}\label{kcharge} Let $S$ be a bi-infinite sequence $\left(a_i\right)_{i\in\mathbb{Z}}$ with $a_i\in\{N,W\},$ such that for some $M\in\mathbb{N},$ $\forall m\geq M,$ $a_{-m}=W$ and $a_{m}=N$. Fix an integer $k$. Let $n_k$ be the number of $N$s in $S$ with index at most $k$, \begin{equation}
n_k=\left|\left\{a_j\mid a_j=N\text{ and }j\leq k\right\}\right|. \end{equation} Similarly, let $w_k$ be the number of $W$s with index greater than $k$, \begin{equation}
w_k=\left|\left\{a_j\mid a_j=W\text{ and }j> k\right\}\right|. \end{equation} Then, the $k$-charge of $S$, written $\operatorname{ch}_k(S)$ is $n_k-w_k-k.$ \end{defn}
\begin{prop} If $k$ and $l$ are integers, and $S$ is as in Definition~\ref{kcharge}, then $\operatorname{ch}_k(S)=\operatorname{ch}_l(S).$ \end{prop}
\begin{proof} We check that $\operatorname{ch}_{k+1}(S)=\operatorname{ch}_k(S).$ The proposition then follows by repeated application of the equality. Suppose $a_{k+1}=N.$ Then, $n_{k+1}=n_k+1$ and $w_{k+1}=w_k$. So, \begin{align}\operatorname{ch}_{k+1}(S)&=n_{k+1}-w_{k+1}-(k+1)\\ &=n_k+1-w_k-(k+1)\\ &=n_k-w_k-k\\ &=\operatorname{ch}_k(S). \end{align} Similarly, if $a_{k+1}=W,$ then $n_{k+1}=n_k$ and $w_{k+1}=w_k-1$,
so $\operatorname{ch}_{k+1}(S)=\operatorname{ch}_k(S)$. Therefore, $\operatorname{ch}_k(S)$ is independent of $k$.
\end{proof}
So, in place of $\operatorname{ch}_k(S),$ we may simply write $\operatorname{ch}(S)$.
\begin{prop} A sequence $S$ as in Definition~\ref{kcharge} is the boundary sequence of a partition if and only if $\operatorname{ch}(S)=0.$ \end{prop} \begin{proof} Suppose $S$ is the boundary sequence of a partition. Let $(x_1,y_1)$ be the point on the line $y-x=k$ on the boundary of a partition $\lambda.$ Since $x_1$ counts the number of west edges with index greater than $k$, and $y_1$ counts the number of north edges with index at most $k$, $\operatorname{ch}(S)=y_1-x_1-k=0.$
If $\operatorname{ch}(S)=0,$ then we may reconstruct $\lambda$ from $S$ by placing a point at $(n_k,w_k)$, and drawing the partition boundary in two halves: one as an infinite path departing from $(n_k,w_k)$ taking unit steps with orientations given by $(a_i)_{i>k}$ and the other as an infinite path arriving at $(n_k,w_k)$ taking unit steps with orientations given by $(a_i)_{i\leq k}.$ \end{proof}
\begin{defn} Let $\lambda$ and $\mu$ be partitions and let $[0]^{\lambda}_a,\ldots,[c-1]^{\lambda}_a$, respectively $[0]^{\mu}_a,\ldots,[c-1]^{\mu}_a$, be the arrival words taken from the $c$-abacus tours of $\lambda$, respectively $\mu.$ Define the relation $\lambda\sim_c\mu$ if, for all $i$ with $0\leq i\leq c-1,$ \begin{equation}
\operatorname{ch}([i]^{\lambda}_a)=\operatorname{ch}([i]^{\mu}_a). \end{equation} \end{defn} \begin{ex} Let $\mu=(12,12,10,8,7,4,1,1,1)$ and refer to Figure~\ref{fig:2 hom}. When $c=2,$ $[0]^{\mu}_a$ is given by
\begin{center} \begin{tikzpicture} \node at (-0.25,0.4) {$\cdots \text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\mid N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\cdots$.}; \end{tikzpicture} \end{center}
where the bar separates terms corresponding to edges of negative index from those of positive index.
So, $\operatorname{ch}([0]^{\mu}_a)=4-2=2.$ Analogously, $[1]^{\mu}_a$ is
\begin{center} \begin{tikzpicture} \node at (0,0) {$\cdots\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }W\text{ } \mid N\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\text{ }\cdots$.}; \end{tikzpicture} \end{center}
So, $\operatorname{ch}([1]^{\mu}_a)=1-3=-2.$
\end{ex}
\begin{prop}\label{removing rimhooks} If $\lambda$ is a partition containing a rimhook $R$ of length $c$ and $\lambda'$ is the partition obtained from $\lambda$ by removing $R$, then $\lambda\sim_c\lambda'$. \end{prop} \begin{proof} Let the boundary tours of $\lambda$ and $\lambda'$ be $(e_i)_{i\in\mathbb{Z}}$ and $(e_i')_{i\in\mathbb{Z}}$. First, we analyse how the boundary sequences $(d(e_i))$ and $(d(e_i'))$ differ. Let $R$ have north-western most box $\square_2$ and south-eastern most box $\square_1$. Let $e_j$ be the north edge traversing the right edge of $\square_1$, so that $e_{j+c}$ is the west edge traversing the top of $\square_2.$
\begin{center} \begin{tikzpicture}[scale=0.5] \draw[yellow, fill=yellow] (3,6)--(6,6)--(6,4)--(8,4)--(8,2)--(7,2)--(7,3)--(5,3)--(5,5)--(3,5)--(3,6); \draw[dashed] (3,6)--(3,5)--(5,5)--(5,3)--(7,3)--(7,2)--(8,2); \draw (0,6)--(6,6)--(6,4)--(8,4)--(8,2)--(9,2)--(9,1)--(10,1)--(10,0); \draw[->] (3.6,6)--(3.5,6); \draw[->] (3,5.4)--(3,5.5); \draw[->] (7.6,2)--(7.5,2); \draw[->] (8,2.4)--(8,2.5); \node[above] at (3.5,6) {$e_{j+c}$}; \node[left] at (3,5.4) {$e'_{j+c}$}; \node[right] at (8,2.4) {$e_{j}$}; \node[below] at (7.5,2) {$e'_{j}$}; \end{tikzpicture}\quad \begin{tikzpicture}[scale=0.5] \draw[yellow, fill=yellow] (3,6)--(6,6)--(6,4)--(8,4)--(8,2)--(7,2)--(7,3)--(5,3)--(5,5)--(3,5)--(3,6); \draw (0,6)--(3,6)--(3,5)--(5,5)--(5,3)--(7,3)--(7,2)--(8,2)--(9,2)--(9,1)--(10,1)--(10,0); \end{tikzpicture} \end{center}
Since we remove $\square_1$ and $\square_2$, $d(e_j)=N,$ $d(e_j')=W$, and $d(e_{j+c})=W$ and $d(e_{j+c}')=N$. Let $j=qc+r$ for $0\leq r\leq c-1$. Since the rimhook does not contain a $2\times 2$ box and is connected, the portion of the boundary of $\lambda'$ between the lines $y-x=j+1$ and $y-x=c+j-1$ is a translate of the original partition boundary by $(-1,-1)$, so $d(e_i)=d(e_i')$ for all $i\not\in\{j,j+c\}.$ So, for all $0\leq s\leq c-1$ with $s\not=r$, $[s]^{\lambda}_a=[s]^{\lambda'}_a,$ and the arrival word \begin{equation}([r]^{\lambda'}_a)_i=\left\{ \begin{array}{cc} ([r]^{\lambda}_a)_{q} & i=q+1 \\
([r]^{\lambda}_a)_{q+1} & i=q \\
([r]^{\lambda}_a)_i & \text{otherwise.} \end{array}\right. \end{equation} So, \begin{align}\operatorname{ch}([r]^{\lambda'}_a)&=\operatorname{ch}_{q-1}([r]^{\lambda'}_a)\\ &=\operatorname{ch}_{q-1}([r]^{\lambda}_a)\\ &=\operatorname{ch}([r]^{\lambda}_a). \end{align}
\end{proof}
\begin{corol} The $c$-core of $\lambda$ is unique, and $\lambda\sim_c\mu$ if and only if $\lambda$ and $\mu$ have the same $c$-core. \end{corol} \begin{proof} If $\lambda$ has $c$-core $\nu$, then $\nu$ is obtained from $\lambda$ by iteratively removing rimhooks of length $c$ from $R$, so by Proposition~\ref{removing rimhooks}, $\lambda\sim_c \nu.$ Every partition has at least one $c$-core, so it remains to check that if $\mu$ and $\nu$ are both $c$-cores with $\mu\sim_c\nu$ then $\mu=\nu.$ By Propositions 2.10 and 2.14, if $\mu$ and $\nu$ are both $c$-cores then for each $i$, the arrival words $[i]^\mu_a$ and $[i]^\nu_a$ do not contain any inversions. So, both consist of a string of $W$s up to some index, and a string of $N$s thereafter. Since $\mu\sim_c\nu$, the charge of both $[i]^\mu_a$ and $[i]^\nu_a$ must be the same, and therefore $[i]^\mu_a=[i]^\nu_a$. \end{proof}
\begin{corol}\label{keypreservecore} Let $\lambda$ and $\mu$ be partitions. Then $\lambda$ and $\mu$ have the same $c$-core if there is a value of $m$ with $c\mid m$ such that for each $[i],$ both of the following hold.
\begin{itemize}
\item the arrival words in the $c$-abacus of $\lambda$ and $\mu$ agree after the entry with index $m$;
\item the portion of $[i]^\lambda_a$ with index at most $m$ is a permutation of the portion of $[i]^\mu_a$ with index at most $m$. \end{itemize} \end{corol}
\begin{ex} We will calculate the 2-core $\lambda$ of $\mu=(12,12,10,8,7,4,1,1,1)$. By Corollary~\ref{keypreservecore} and the calculation in Example~\ref{}, the $\lambda$ is the unique $2$-core with $\operatorname{ch}([0]^{\lambda}_a)=2$ and $\operatorname{ch}([1]^{\lambda}_a)=-2.$
So, placing a bar in the bi-infinite string with no inversions to separate edges with positive index from those with negative or 0 index, the arrival words in $M_2(\lambda)$ at 0 and 1 respectively, are \begin{center} \begin{tikzpicture}\node at (0,0) {$\cdots\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }\mid W\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\cdots$};
\node at (0.2,-0.4) {$\cdots \text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }W\text{ }\text{ }N\text{ }\text{ }N\text{ } \text{ }\mid N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\text{ }\text{ }N\cdots,$.}; \end{tikzpicture} \end{center}
So, the 2-core is (3,2,1)
\begin{center} \begin{tikzpicture}[scale=0.5] \draw (0,3)--(1,3)--(1,2)--(2,2)--(2,1)--(3,1)--(3,0); \draw (0,2)--(1,2)--(1,0); \draw (0,1)--(2,1)--(2,0); \draw (0,3)--(0,0)--(3,0); \end{tikzpicture} .\end{center}
\end{ex}
\begin{prop}\label{corelattice} There is an bijective map $f$ from $c$-core partitions to a $\mathbb{Z}$-module of length $c-1.$ \begin{proof}
Consider the $c$-abacus of a $c$-core partition. The charges $(\operatorname{ch}([0]),\ldots,\operatorname{ch}([c-1]))$ specify the $c$-core. A $c$-tuple of integers $(a_0,\ldots,a_{c-1})$ represents the charges of a partition if and only if $\sum_{i=0}^{c-1}a_i=0.$ So, sending a $c$-core to the $c$-tuple of charges gives a bijective map with the $\mathbb{Z}$-module $M = \langle e_1,\ldots, e_c\mid \sum_{i=0}^{c-1}e_i=0\rangle$. \end{proof}
\end{prop} Fix a positive integer $c$, a $c$-core $\mu$, and a non-negative integer $n$. Let $\operatorname{Par}^c_\mu(n)$ denote the set of partitions of $n$ with $c$-core $\mu$. Let $\operatorname{Par}^c_\mu$ denote the set of all partitions with $c$-core $\mu$, and let $\operatorname{Par}$ denote the set of all partitions.
\begin{defn} The $c$-quotient of $\lambda$ is a $c$-tuple of partitions $(q_1(\lambda),q_2(\lambda),\ldots,q_c(\lambda))$, where $q_i(\lambda)$ is the partition with boundary sequence $[i]_a$, with the index shifted so that the charge is 0. \end{defn}
\begin{defn} The \textit{quotient map} $\phi:\operatorname{Par}^c_{\mu}\rightarrow(\operatorname{Par})^c$ sends a partition $\lambda$ to $(q_1(\lambda),\ldots,q_c(\lambda).$ \end{defn} \begin{prop}\label{coresandquotients}
For $\lambda\in\operatorname{Par}^c_{\mu},$ \begin{equation} |\lambda|=|\mu|+c\sum_{i=1}^c|q_i(\lambda)|.\end{equation} \end{prop} \begin{proof}
By Proposition~\ref{c-hook count}, the number of boxes with hook length divisible by $c$ are given by $\sum_{i=1}^c\operatorname{inv}[i]_a$. Starting from the $c$-abacus tour of $\mu$, we can obtain the $c$-abacus tour of $\lambda$ by adding these inversions one at a time. Adding each inversion corresponds to adding a rimhook of length $c$ to the diagram, so contributes $c$ to $|\lambda|$. \end{proof}
\subsection{The map $G_c$} Now we set about proving Theorem~\ref{basic partition step}. We first recall three standard generating functions. \begin{prop}
\begin{equation}\label{partitionswithparts} \sum_{\lambda\in\operatorname{Par}}q^{|\lambda|}t^{l(\lambda)}=\prod_{m\geq 1}\frac{1}{1-q^mt}
\end{equation}
\begin{equation}\label{justpartitions} \sum_{\lambda\in\operatorname{Par}}q^{|\lambda|}=\prod_{m\geq 1}\frac{1}{1-q^m}
\end{equation}
\begin{equation}\label{coresgen} \sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}=q^{|\mu|}\prod_{m\geq 1}\frac{1}{(1-q^{mc})^c}
\end{equation}
\end{prop} \begin{proof} We may rewrite the right hand side of~\eqref{partitionswithparts} as $$\prod_{m\geq 1} 1+q^mt+q^{2m}t^2+q^{3m}t^3+\ldots,$$
so that picking a term $q^{km}t^k$ for each $m$ corresponds to declaring that $\lambda$ contains $k$ parts of size $m$, contributing $|km|$ to $\lambda$ and $k$ to $l(\lambda)$, giving the left hand side. Setting $t=1$ in~\eqref{partitionswithparts} gives~\eqref{justpartitions}.
For~\eqref{coresgen}, Proposition~\ref{coresandquotients} tells us that the map $\phi$ gives a bijection between $\lambda\in\operatorname{Par}^c_\mu$ and $c$-tuples of partitions $(q_1,\ldots,q_c)$ where $|\lambda|=|\mu|+c\sum_{i=1}^c|q_i(\lambda)|.$ The right hand side of~\eqref{coresgen} corresponds to all choices of $c$-tuples $q_1,\ldots,q_c\in \operatorname{Par},$ and the weighting by $c$ corresponds to each box in $q_i$ corresponding to $c$ boxes in $\lambda$. \end{proof}
Next, we define a partition statistic $\lambda_{\hrectangle}^{c*}$ that arises as a special case of one of the statistics that we study.
For a positive integer $d$, let $m_d(\lambda)$ denote the number of parts of $\lambda$ of size $d$, and for fixed $c$ let $\lambda_{\hrectangle}^{c*}$ denote the weighted sum \begin{equation}\lambda_{\hrectangle}^{c*}=\sum_{d=1}^{\infty} \left\lfloor\frac{m_d(\lambda)}{c}\right\rfloor.\end{equation} In words, $\lambda_{\hrectangle}^{c*}$ counts the number of rectangles of any height of positive width divisible by $c$ in the diagram of $\lambda$ such that the whole top edge, and at least the top unit step of the right edge, lies on the boundary of $\lambda$.
\begin{ex} Let $c=3$. The partition $\lambda=(7,7,4,4,4,4,4,4,4,3,2,2,2,1)$ has $m_7=2$, $m_4=7$, $m_3=1$, $m_2=3$ and $m_1=1$. So, the only nonzero contributions to $\lambda_{\hrectangle}^{3*}$ are when $d=2$ and $d=4$, and \begin{equation*}\lambda_{\hrectangle}^{3*}=\left\lfloor\frac{m_2}{3}\right\rfloor+\left\lfloor\frac{m_4}{3}\right\rfloor=1+2=3.
\end{equation*}
\end{ex}
\begin{defn}
The map $G_c:\operatorname{Par}\rightarrow \operatorname{Par}\times K_c$, where $K_c=\{\lambda\in\operatorname{Par}\mid \lambda_{\hrectangle}^{c*}=0\}$ is the set of partitions with no parts repeated $c$ or more times, maps a partition $\lambda$ to $(\xi,\nu)$ where for each $d\in \mathbb{N}$,
\begin{equation}\xi_{\hrectangle}^d=\left\lfloor\frac{m_d}{c}\right\rfloor,\end{equation} and
\begin{equation}\nu_{\hrectangle}^d=m_d-c\left\lfloor\frac{m_d}{c}\right\rfloor.\end{equation}
We write $(G_c)^{-1}$ for the inverse map $(G_c)^{-1}:K_c\times \operatorname{Par}\rightarrow \operatorname{Par}$ where, for each $d\in\mathbb{N}$ \begin{equation}
(G_c)^{-1}(\xi,\nu)_{\hrectangle}^d= c\xi_{\hrectangle}^d+\nu_{\hrectangle}^d.
\end{equation}
\end{defn}
\begin{ex} When $c=3$, the partition $\lambda=(7,7,4,4,4,4,4,4,4,3,2,2,2,1)$ has $$G_3(\lambda)=((4,4,2),(7,7,3,4,1)).$$
\begin{figure}
\caption{The partition $\lambda$ has $G_3(\lambda)=((4,4,2),(7,7,4,3,1))$}
\label{G3 figure}
\end{figure} \end{ex}
The next proposition establishes that the core of a partition $\lambda$ is also the core of the second argument of $G_c(\lambda),$ so we may restrict $G_c$ to $\operatorname{Par}^c_\mu$ in a way that interacts sensibly with cores.
\begin{prop}\label{Glaisherrefinestocores} If $\lambda\in\operatorname{Par}^c_{\mu}$ and $G_c(\lambda)=(\xi,\nu)$, then $\nu\in\operatorname{Par}^c_{\mu}$. \end{prop} \begin{proof} Suppose the proposition is false for some $\lambda$ of minimal possible size. Then, we must have $\lambda\not=\nu$, so $\lambda$ must have some part of some size $d$ repeated at least $c$ times. The top row of the rectangle of height $d$ and width $c$ which has all top edges and the topmost right edge in the boundary of $\lambda$ is a rimhook of size $c$. Let $\lambda'$ be the partition formed by deleting this rimhook. Then, $\lambda'$ has $c$-core $\mu$ and $G_c(\lambda')=(\xi',\nu)$ for some $\xi'.$ So, since $\lambda'$ is smaller than $\lambda$, $\nu\in\operatorname{Par}^c_{\mu}$. \end{proof}
Therefore, $G_c$ restricts to a bijection $G_c|_{\operatorname{Par}^c_{\mu}}:\operatorname{Par}^c_{\mu}\rightarrow\operatorname{Par}\times (K_c\cap \operatorname{Par}^c_{\mu})$. This allows us to use $G_c$ to prove the following.
\begin{prop}
For a positive integer $c$ and a $c$-core $\mu$, the following product formula holds. \begin{equation}\label{GenFunNots}\sum_{\lambda\in K\cap\operatorname{Par}^c_\mu}q^{|\lambda|}=q^{|\mu|}\prod_{m\geq 1}\frac{1}{(1-q^{mc})^{c-1}}.\end{equation} \end{prop} \begin{proof}
Let $\lambda\in\operatorname{Par}_c^\mu.$ Then $G^c_\mu$ bijectively maps $\lambda$ to a pair of partitions $(\xi,\nu)$ with $|\lambda|=|\xi|+c|\nu|,$ because each part of $\nu$ corresponds to $c$ parts of $\lambda$ of the same size. So, \begin{equation}\label{Glaisher}
\sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}=\sum_{\xi\in K\cap\operatorname{Par}^c_\mu}q^{|\xi|}\times \sum_{\nu\in\operatorname{Par}}q^{c|\nu|}.
\end{equation}
Substituting~\eqref{justpartitions} and applying Proposition~\ref{coresandquotients} to~\eqref{Glaisher} gives
\begin{equation}
q^{|\mu|}\prod_{m\geq 1}\frac{1}{(1-q^{mc})^c}=\sum_{\xi\in K\cap\operatorname{Par}^c_\mu}q^{|\xi|}\times \prod_{m\geq 1}\frac{1}{(1-q^{mc})},
\end{equation}
which rearranges to give~\eqref{GenFunNots}.\end{proof}
We are now in a position to prove our base case. \begin{thm}\label{basic partition step} For a fixed positive integer $c$,
\begin{equation}\label{base case}\sum_{\lambda\in\operatorname{Par}_{\mu}^c} q^{|\lambda|}t^{\lambda_{\hrectangle}^{c*}}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq1}\frac{1}{1-q^{jc}t}.\end{equation} \end{thm} \begin{proof}
Let $\lambda\in\operatorname{Par}_c^\mu.$ Then $G^c_\mu$ bijectively maps $\lambda$ to a pair of partitions $(\xi,\nu)$ with $|\lambda|=|\xi|+c|\nu|,$ where each part of $\nu$ of size $d$ corresponds to a $d\times c$ rectangle in $\lambda$ contributing to $\lambda_{\hrectangle}^{c*}$. So,
\begin{equation}\label{decomposition1}\sum_{\lambda\in\operatorname{Par}_{\mu}^c} q^{|\lambda|}t^{\lambda_{\hrectangle}^{c*}}=\sum_{\xi\in K\cap\operatorname{Par}^c_\mu}q^{|\xi|}\times \sum_{\nu\in\operatorname{Par}}q^{c|\nu|}t^{l(\lambda)}.\end{equation}
Substituting~\eqref{GenFunNots} and~\eqref{partitionswithparts} into~\eqref{decomposition1} gives~\eqref{base case}. \end{proof}
\section{Further partition statistics} \stepcounter{essaypart} In this section we define the main partition statistics of interest, $h_{x,c}^+$ and $h_{x,c}^{-},$ where $x$ is a real parameter and $c$ is a positive integer. The main aim of this paper is to compute the distribution of the statistics $h_{x,c}^+$ and $h_{x,c}^{-}$ over $\operatorname{Par}^c_{\mu}$, given in Theorem~\ref{main theorem}. The previous section computed the distribution of $\lambda_{\hrectangle}^{c*}$ over $\operatorname{Par}^c_{\mu}$, giving the right hand side in Theorem~\ref{main theorem}. In this section, we connect to $\lambda_{\hrectangle}^{c*}$ by observing that $\lambda_{\hrectangle}^{c*}=h^+_{0,c},$ and then sketch a framework for piecing together a family of involutions $I_{r,s,c}$ defined on $\operatorname{Par}^c_{\mu}$ to prove that the distribution $h_{x,c}^{\pm}$ over $\operatorname{Par}^c_{\mu}$ is independent of both $x$ and the sign. The rest of the paper will then construct the component bijections $I_{r,s,c}.$
In order to reduce the proof of Theorem~\ref{main theorem} to the construction of appropriate bijections $I_{r,s,c},$ we first prove that Theorem~\ref{main theorem} is implied by Theorem~\ref{h+ is h-}, which states that the $h_{x,c}^+$ and $h_{x,c}^-$ have the same distribution over $\operatorname{Par}^c_{\mu}$. Then, we introduce three other statistics $\operatorname{mid}_{x,c}$, $\operatorname{crit}_{x,c}^-$ and $\operatorname{crit}_{x,c}^+$ and decompose $h_{x,c}^+$ and $h_{x,c}^-$ in terms of these other statistics. Finally, we outline sufficient conditions for the bijections $I_{r,s,c}$ to prove Theorem~\ref{h+ is h-} in terms of these three statistics.
We conclude the section by explaining how the main result of \cite{BFN} follows from Theorem~\ref{main theorem}.
\begin{defn} For a partition $\lambda$, $x\in [0,\infty]$ and a fixed $c\in\mathbb{N}$,
\begin{equation} h_{x,c}^+(\lambda)=\left|\left\{\square \in \lambda \,\middle\vert\, c\mid h(\square) \text{ and } \frac{a(\square)}{l(\square)+1}\leq x<\frac{a(\square)+1}{l(\square)}\right\}\right|,\end{equation} and
\begin{equation}h_{x,c}^-(\lambda)=\left|\left\{\square \in \lambda \,\middle\vert\, c\mid h(\square) \text{ and } \frac{a(\square)}{l(\square)+1}< x\leq\frac{a(\square)+1}{l(\square)}\right\}\right|.\end{equation} We interpret a fraction with denominator $0$ as $+\infty$. \end{defn}
Note that a box $\square$ contributes to $h_{0,c}^+$ if and only if $a(\square)=0$ and $c\mid (l(\square)+1).$ That is, $\square$ is the topmost box in its column, and there is some $m$ such that the column containing $\square$ and exactly $mc-1$ columns to the right all have the same height. The number of such boxes is exactly $\lambda_{\hrectangle}^{c*}$.
Similarly, $h_{\infty,c}^-(\lambda)=\bar{\lambda}_{\hrectangle}^{c*},$ where $\bar{\lambda}$ is the partition conjugate to $\lambda$.
We are now in a position to state our main result.
\begin{thm}\label{main theorem} For all $x\in[0,\infty)$ we have
\begin{equation}\sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}t^{h_{x,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq1}\frac{1}{1-q^{jc}t},\end{equation} and for all $x\in (0,\infty],$\begin{equation}\sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}t^{h_{x,c}^-(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\prod_{j\geq1}\frac{1}{1-q^{jc}t}.\end{equation}\end{thm}
Proposition~\ref{4to3} shows that Theorem~\ref{main theorem} is a consequence of the following result.
\begin{thm}\label{h+ is h-} For all positive rational numbers $x$ and all integers $n\geq0$ we have \begin{equation}\sum_{\lambda\in\operatorname{Par}_\mu^c(n)}t^{h_{x,c}^+(\lambda)}=\sum_{\lambda\in\operatorname{Par}_\mu^c(n)}t^{h_{x,c}^-(\lambda)}.\end{equation} \end{thm}
\subsection{Reducing to Theorem~\ref{h+ is h-}} \begin{prop}\label{4to3} Theorem~\ref{h+ is h-} implies Theorem~\ref{main theorem}. \end{prop} \begin{proof} For $x\in[0,\infty)$, $c\in\mathbb{N}$ and $\delta\in\{+,-\}$ define $$H_{x,c}^\delta(n)=\sum_{\lambda\in\operatorname{Par}^c_{\mu}(n)}t^{h_{x,c}^\delta(\lambda)}.$$ Suppose $H_{x,c}^\delta(n)$ is independent of both $x$ and $\delta$. Then $$H_{x,c}^\delta(n)=H_0^+(n)=\sum t^{h_{0,c}^+(\lambda)}=\sum t^{\lambda_{\hrectangle}^{c*}}.$$ Theorem~\ref{main theorem} then follows immediately by multiplying by $q^n$, adding over all $n\geq 0$, and applying Theorem~\ref{basic partition step}. So, it suffices to prove that Theorem~\ref{h+ is h-} implies that $H_{x,c}^\delta(n)$ is independent of $x$ and $\delta$.
For an integer $n$, we call a positive rational number $r$ a \textit{critical rational for} $n$ if there is a partition $\mu\in\operatorname{Par}(n)$ and a box $\square\in d(\mu)$ such that $ h(\square)$ is divisible by $c$, and $\frac{a(\square)}{l(\square)+1}=r$ or $\frac{a(\square)+1}{l(\square)}=r.$ By convention, $0$ and $+\infty$ are regarded as critical rationals for all $n$.
We denote the set of all critical rationals for $n$ by $C(n).$ Since there are finitely many partitions of $n$ each containing finitely many boxes in their diagrams, $C(n)$ is finite for all $n$. For a fixed $n$, write $C(n)=\{0=r_0<r_1<\cdots<r_{k-1}<r_k=+\infty\}.$ Define open intervals $I_j=(r_{j-1},r_j)$ for each $1\leq j\leq k$. Then $[0,\infty]$ decomposes into a disjoint union $$[0,\infty]=I_1\cup I_2\cup\cdots\cup I_k\cup C(n).$$
Let $x,x'$ be two elements of the same interval $I_j$ and let $\delta,\delta'\in\{+,-\}.$ Suppose $\lambda$ is any partition of $n$. Since there are no critical rationals between $x$ and $x'$, $\square\in d(\lambda)$ contributes to $h_{x,c}^\delta(\lambda)$ if and only if it contributes to $h_{x',c}^{\delta'}(\lambda).$ So, $t^{h_{x,c}^\delta(\lambda)}=t^{h_{x',c}^{\delta'}(\lambda)}$. Adding over all $\lambda$, we see that if $x,x'\in I_j$, \begin{align} H_{x,c}^\delta(n)=H_{x',c}^{\delta'}(n).\end{align}
Similarly, for all $x\in I_j$, \begin{align}\label{two}H_{r_{j-1},c}^+(n)=H_{x,c}^\delta(n)=H_{r_j,c}^-(n).\end{align}
On the other hand, Theorem~\ref{h+ is h-} implies that \begin{align}\label{three}H_{r_j,c}^+(n)=H_{r_j,c}^-(n).\end{align}
Therefore, for $\delta,\delta'\in\{+,-\}$ and $y\geq y'$ by applying a chain of these equalities starting with $H_{y,c}^\delta(n),$ one can reduce $y$ to a critical rational and change $\delta$ to a $+$ using~\eqref{two}, or using~\eqref{three} if $y$ is already a critical rational. Then one may iteratively apply~\eqref{three} and~\eqref{two} to change $\delta$ to a $-$, and then reduce $y$ to the next lowest critical rational and change $\delta$ back to a $+$, until an equality $H_{y,c}^{\delta}(n)=H_{r_j,c}^-(n)$ is obtained for $r_{j-1}\leq y'\leq r_j$. Then, applying~\eqref{two} again with $x=y'$ (and~\eqref{three} to flip the sign of $\delta$ if $y=r_{j-1}$ and $\delta'=-$), one obtains $H_{y,c}^\delta(n)=H_{y',c}^{\delta'}(n).$ \end{proof}
\subsection{Reducing to a symmetry property}
In the case $x$ is rational, where $h_{x,c}^+$ and $h_{x,c}^-$ may differ, it is useful to separate the boxes that contribute to both statistics from those that contribute to just one. In order to do this, we define the following statistics.
\begin{defn} For $x=\frac{r}{s}$ a rational number, we have
\begin{equation}\operatorname{crit}_{x,c}^+(\lambda)=\left|\left\{\square\in\lambda \middle\vert\ c\mid h(\square)\text{ and } \frac{a(\square)}{l(\square+1)}=x\right\}\right|,\end{equation}
\begin{equation}\operatorname{crit}_{x,c}^-(\lambda)=\left|\left\{\square\in\lambda \middle\vert\ c\mid h(\square)\text{ and } \frac{a(\square)+1}{l(\square)}=x\right\}\right|,\end{equation}
\begin{equation}\operatorname{mid}_{x,c}(\lambda)=\left|\left\{\square\in\lambda \middle\vert\ c\mid h(\square)\text{ and }-s<sa(\square)-rl(\square)<r
\right\}\right|.\end{equation} \end{defn}
The next proposition shows that a bijection satisfying some constraints on its behaviour with respect to these statistics will give a bijective proof of Theorem~\ref{h+ is h-}.
\begin{prop}\label{bijection properties} Let $r,s,c$ be positive integers with $(r,s)=1$ and let $x=\frac{r}{s}$. Suppose there exists a bijection $I_{r,s,c}:\operatorname{Par}^c_{\mu}\rightarrow\operatorname{Par}^c_\mu$ such that \begin{enumerate}
\item $|\lambda|=|I_{r,s,c}(\lambda)|$,
\item $\operatorname{mid}_{x,c}(\lambda)=\operatorname{mid}_{x,c}(I_{r,s,c}(\lambda))$,
\item $\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda)=\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda))+\operatorname{crit}_{x,c}^-(I_{r,s,c}(\lambda))$,
\item $\operatorname{crit}_{x,c}^+(\lambda)=\operatorname{crit}_{x,c}^-(I_{r,s,c}(\lambda)).$ \end{enumerate} Then, Theorem~\ref{h+ is h-} is true. \end{prop} \begin{proof} Assume that $I_{r,s,c}$ exists. Then, property 3 and 4 together imply that \begin{equation}
\operatorname{crit}_{x,c}^-(\lambda)= \operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda)) \end{equation}
so $I_{r,s,c}$ exchanges $\operatorname{crit}_{x,c}^+$ and $\operatorname{crit}_{x,c}^-$ whilst preserving $|\lambda|$ and $\operatorname{mid}_{x,c}.$
Note that a box $\square$ contributes to $\operatorname{mid}_{x,c}$ if and only if \begin{equation}\label{middef}-s<sa(\square)-rl(\square)<r.\end{equation} Adding $s+rl(\square)$, the left inequality is equivalent to \begin{equation}\label{midequivalent}
rl(\square)<s(a(\square)+1) \end{equation} Equivalently, \begin{equation}
\frac{a(\square)+1}{l(\square)}>x. \end{equation} Similary, the right inequality in ~\eqref{middef} is equivalent to \begin{equation}
\frac{a(\square)}{l(\square)+1}<x. \end{equation}
So, $\square$ contributes to $\operatorname{mid}_{x,c}$ if and only if \begin{equation}\label{midequiv}
\frac{a(\square)}{l(\square)+1}<x<\frac{a(\square)+1}{l(\square)}. \end{equation} So, comparing the definitions of $\operatorname{crit}_{x,c}^-,$ $\operatorname{crit}_{x,c}^+,$ $h_{x,c}^{+},$ $h_{x,c}^{-}$ and ~\eqref{midequiv}, \begin{equation}h^+_{x,c}(\lambda)=\operatorname{mid}_{x,c}(\lambda)+\operatorname{crit}_{x,c}^+(\lambda)\end{equation} and \begin{equation}h^-_{x,c}(\lambda)=\operatorname{mid}_{x,c}(\lambda)+\operatorname{crit}_{x,c}^-(\lambda).\end{equation}
So, $I_{r,s,c}$ exchanges $h_{x,c}^+(\lambda)$ and $h_{x,c}^{-}(\lambda)$ whilst preserving $|\lambda|,$ and hence proves Theorem~\ref{h+ is h-}.\end{proof}
\subsection{Connecting to Buryak-Feigin-Nakajima} When $c$ is divisible by $r+s$, the Theorem~\ref{main theorem} implies the following product formula. In the case $r+s=c$, this is the main combinatorial result of \cite{BFN}. \begin{corol} Let $r$ and $s$ be coprime integers, let $x=\frac{r}{s}$ and let $r+s\mid c$. Then \begin{equation}
\sum_{\lambda\in\operatorname{Par}}q^{|\lambda|}t^{\operatorname{crit}_{x,c}^+(\lambda)}=\prod_{\substack{i\geq 1\\ c\nmid i}}\frac{1}{1-q^i}\prod_{i\geq 1}\frac{1}{1-q^{ic}t}. \end{equation} \end{corol} \begin{proof} First we show that under the assumption that $r+s\mid c$, then for any partition $\lambda$, $\operatorname{mid}_{x,c}(\lambda)=0.$ Suppose $\square$ were to contribute to $\operatorname{mid}_{x,c}(\lambda),$ then $\square$ would have to satisfy \begin{equation}
-s<sa(\square)-rl(\square)<r. \end{equation} Adding $rl+sl+s,$ \begin{equation}
(r+s)l(\square)<s\left(a(\square)+l(\square)+1\right)<(r+s)\left(l(\square)+1\right) \end{equation} However, the upper and lower bound are consecutive multiples of $r+s$, and therefore $s(a(\square)+l(\square)+1)$ cannot be a multiple of $r+s$, so by assumption cannot be a multiple of $c$. So, $c\nmid h(\square)$ so $\square$ cannot contribute to $\operatorname{mid}_{x,c}(\lambda).$
So in this case $h_{x,c}^+(\lambda)=\operatorname{crit}_{x,c}^+(\lambda)$ and Theorem~\ref{main theorem} becomes \begin{equation}
\sum_{\lambda\in\operatorname{Par}^c_{\mu}}q^{|\lambda|}t^{\operatorname{crit}_{x,c}^+(\lambda)}=q^{|\mu|}\prod_{i\geq 1}\frac{1}{(1-q^{ic})^{c-1}}\frac{1}{1-q^{ic}t}. \end{equation} Summing both sides over all $c$-cores $\mu$ and applying Proposition~\ref{coresandquotients}, \begin{align}
\sum_{\lambda\in\operatorname{Par}}q^{|\lambda|}t^{\operatorname{crit}_{x,c}^+(\lambda)}&=\prod_{i\geq 1}\frac{(1-q^{ic})^c}{1-q^{i}}\frac{1}{(1-q^{ic})^{c-1}}\frac{1}{1-q^{ic}t}\\ &=\prod_{i\geq 1}\frac{(1-q^{ic})}{1-q^{i}}\frac{1}{1-q^{ic}t}\\ &=\prod_{\substack{i\geq 1\\ c\nmid i}}\frac{1}{1-q^{i}}\prod_{i\geq 1}\frac{1}{1-q^{ic}t}.\end{align} \end{proof}
\section{The multigraph $M_{r,s,c}$} \stepcounter{essaypart} In this section we take our first key step in the construction of $I_{r,s,c}$. First, Proposition~\ref{motivate Mrsc} relates the statistics $\operatorname{mid}_{x,c},$ $\operatorname{crit}_{x,c}^-$ and $\operatorname{crit}_{x,c}^+$ to the boundary graph. We use this relationship define a map from the boundary graph to a multigraph $M_{r,s,c}$ that picks out the information relevant to $\operatorname{mid}_{x,c}$ and $\operatorname{crit}_{x,c}^+$, much as the $c$-abacus tour does for the $c$-core. The rest of the section then proves that the multigraph $M_{r,s,c}$ determines the $c$-core, size, and values of $\operatorname{mid}_{x,c}$ and $\operatorname{crit}_{x,c}^-+\operatorname{crit}_{x,c}^+$ of a partition. When we define $I_{r,s,c}$ as a bijection on partitions, we build into the definition that $I_{r,s,c}$ preserves $M_{r,s,c}(\lambda)$ for any partition $\lambda.$ So, it is immediate from these results that $I_{r,s,c}$ satisfies hypotheses 1 to 3 in Proposition~\ref{bijection properties}.
\begin{prop}\label{motivate Mrsc} Let $\lambda$ be a partition and let $\square\in\lambda.$ Let $e_i$ be the foot of $\square$, departing from $(x_1,y_1-1)$ and arriving at $(x_1,y_1)$ and let $e_j$ be the hand of $\lambda$, departing from $(x_2+1,y_2)$ and arriving at $(x_2,y_2)$. Let $t=r(x_1-x_2)+s(y_1-y_2)$. Then \begin{enumerate} \item $\square$ contributes to $\operatorname{crit}_{x,c}^+$ if and only if $t=0$ and $y_1-x_1\equiv y_2-x_2\pmod{c}$; \item $\square$ contributes to $\operatorname{mid}_{x,c}$ if and only if $0<t<r+s$ and $y_1-x_1\equiv y_2-x_2\pmod{c}$; \item $\square$ contributes to $\operatorname{crit}_{x,c}^-$ if and only if $t=r+s$ and $y_1-x_1\equiv y_2-x_2\pmod{c}.$ \end{enumerate} \end{prop} \begin{proof} By the definition of index, $y_1-x_1=i$ and $y_2-x_2=j$. Let $\square \in \lambda$ have bottom left corner $(x_\square,y_\square)$. By Proposition~\ref{c-hooks are c-inversions}, $\square$ has hook length divisible by $c$ if and only if $j\equiv i \pmod{c}$, i.e. $y_1-x_1\equiv y_2-x_2\pmod{c}.$ So, assume that $\square$ does have hook length divisible by $c$.
\begin{figure}
\caption{The box $\square$ and the lines $rx+sy=k_1$ and $rx+sy=k_2$}
\end{figure}
Let $k_1=rx_1+sy_1$ and $k_2=rx_2+sy_2$. Then, \begin{equation}\label{tk1k1}t=k_1-k_2,\end{equation} \begin{equation}\label{armlegbox1}s(y_\square+1)+r(x_\square+l(\square)+1)=k_1,\end{equation} and \begin{equation}\label{armlegbox2}s(y_\square+a(\square)+1)+rx_\square=k_2.\end{equation} Subtracting ~\eqref{armlegbox1} from ~\eqref{armlegbox2}, and substituting in ~\eqref{tk1k1} \begin{equation}sa(\square)-rl(\square)=r-t.\end{equation}
By definition, $\square$ contributes to $\operatorname{crit}_{x,c}^+$ if and only if $sa(\square)-rl(\square)=r$, that is, when $t=0$, proving the first claim. Similarly, $\square$ contributes to $\operatorname{mid}_{x,c}$ if and only if $-s<sa(\square)-rl(\square)<r$, or equivalently $0<t<r+s,$ proving the second claim.
Finally, note $sa(\square)-rl(\square)=-s$ if and only if $t=r+s$. \end{proof}
We define the multigraph $M_{r,s,c}(\lambda)$ accordingly.
\begin{defn}[$M_{r,s,c}$, $(r,s,c)$-tour] For a partition $\lambda$ the NW directed multigraph $M_{r,s,c}(\lambda)$ is obtained from $b(\lambda)$ by imposing the relation $(x_1,y_1)\sim (x_2,y_2)$ if $rx_1+sy_1=rx_2+sy_2$ and $y_2-x_1\equiv y_1-x_1\pmod{c}$ on the vertices. Denote the equivalence class with $rx+sy=v$ and $y-x\equiv i\pmod{c}$ by $(v,[i]).$ Let $q_{r,s,c}:b(\lambda)\rightarrow M_{r,s,c}(\lambda)$ be the induced homomorphism. The $(r,s,c)$-tour of $M_{r,s,c}(\lambda)$ associated to $\lambda$ is $(q_{r,s,c}(e_i))_{i\in\mathbb{Z}}$.
At each vertex $(v,[i])$, we count the number of $N$ edges arriving at $(v,[i])$ in the $(r,s,c)$-tour and denote this quantity by $N_{in}(v,[i])$. Similarly, we count the number of $N$ edges departing from $(v,[i])$ in the $(r,s,c)$-tour and denote this quantity by $N_{out}(v,[i])$. We define $W_{in}(v,[i])$ and $W_{out}(v,[i])$ analogously. \end{defn}
\begin{rmk} As with the boundary graph, but unlike the $c$-abacus, the direction of an edge in $M_{r,s,c}$ can be read off from its source and target. If an edge $e$ has $s(e)=(v,[i])$ and $t(e)=(w,[i+1])$ then either $d(e)=N$ and $w=v+s$ or $d(e)=W$ and $w=v-r.$ \end{rmk}
\begin{rmk} We saw earlier that the $c$-coloured box count of a partition carried the same information as its $c$-core and size, and also gave a correspondence with $\mathbb{Z}/c\mathbb{Z}$ representations. The $M_{r,s,c}$-multigraph carries the same information as a refinement of the $c$-content . If we act by $T\times \mathbb{Z}/c\mathbb{Z}$ where $T=\{(t^r,t^s)\mid t\in\mathbb{C}^*$ and colour boxes according to the weight of the corresponding monomial with respect to this representation, the analogous $(r,s,c)$-content carries the same information as the multigraph. \end{rmk}
The first property that we check is that $M_{r,s,c}(\lambda)$ determines the $c$-core of $\lambda$. \begin{prop}\label{independence core} If $\lambda$ and $\mu$ are partitions with $M_{r,s,c}(\lambda)=M_{r,s,c}(\mu)$ then $\lambda$ and $\mu$ have the same $c$-core. \end{prop} \begin{proof} Let $v$ be large enough so that $\left(0,\left\lceil\frac{v}{s}\right\rceil\right)$ is on the boundary of both $\lambda$ and $\mu$. Fix $m>\left\lceil\frac{v}{s}\right\rceil$ such that $c\mid m$. Then, in both the boundary tour of $\mu$ and the boundary tour of $\lambda$, every edge with index at least $m$ is a north edge. These edges account for every $N$ in an arrival word at a vertex $(w,[j])$ with $w\geq sm$.
For each $[i],$ the number of north edges with index less than $m$, for both $\lambda$ and $\mu$, is given by $$\sum_{w<sm}N_{in}(w,[i]).$$
Therefore $\lambda$, $\mu$ and $m$ satisfy the hypotheses of Corollary~\ref{keypreservecore}, and so $\lambda$ and $\mu$ have the same $c$-core. \end{proof} Next, we work towards showing that $M_{r,s,c}(\lambda)$ determines $\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda)$ and $\operatorname{mid}_{x,c}(\lambda)$. Rephrasing the first part of Proposition~\ref{motivate Mrsc} in terms of the $(r,s,c)$-tour gives \begin{corol}\label{firstcritformulae} Let $\lambda$ be a partition. Then
\begin{equation}\operatorname{crit}_{x,c}^+(\lambda)=\sum_{(v,[i])\in M_{r,s,c}(\lambda)}\operatorname{inv}(v,[i])_a. \end{equation} \end{corol} A similar formula with the departure words holds for $\operatorname{crit}_{x,c}^-(\lambda).$ \begin{corol} \begin{equation}\operatorname{crit}_{x,c}^-(\lambda)=\sum_{(v,[i])\in M_{r,s,c}(\lambda)}\operatorname{inv}(v,[i])_d. \end{equation} \end{corol} \begin{proof}
By the third part of Proposition~\ref{motivate Mrsc}, $\square$ contributes to $\operatorname{crit}_{x,c}^-(\lambda)$ if and only if the foot and hand arrive at points $(x_1,y_1)$ and $(x_2,y_2)$ respectively with \begin{equation}\label{departures rs} r+s=r(x_1-x_2)+s(y_1-y_2)\end{equation} and \begin{equation}\label{departures c}y_1-x_1\equiv y_2-x_2\pmod{c}.\end{equation}
The foot and hand arrive at $(x_1,y_1)$ and $(x_2,y_2)$ respectively if and only if they depart from points $(x_1,y_1-1)$ and $(x_2+1,y_2)$ respectively. The condition ~\eqref{departures c} is equivalent to \begin{equation}(y_1-1)-x_1\equiv y_2-(x_2+1)\pmod{c}.\end{equation}
The condition~\eqref{departures rs} is equivalent to
\begin{equation} r+s=r(x_1-(x_2+1))+s((y_1-1)-y_2)+r+s,\end{equation}
so subtracting $r+s$ from both sides,
\begin{equation} r(x_1-(x_2+1))+s((y_1-1)-y_2)=0.\end{equation} \end{proof}
We now outline a framework for inductive proofs that the statistics in hypotheses 1-3 of Proposition~\ref{bijection properties} are determined by the multigraph $M_{r,s,c},$ using an ordering $<_{r,s,c}$ on partitions and multigraphs. The key result in this direction is Proposition~\ref{acc points are useful}.
\subsection{The order $<_{r,s,c}$}
The structure of the proofs that $M_{r,s,c}(\lambda)$ determines each property of $\lambda$ will be proven by induction on $|\lambda|$, adding a box at each step. Since the structure of $M_{r,s,c}(\lambda)$ is somewhat delicate, we have to be somewhat careful when choosing a box to add. The following ordering on partitions gives us a framework for adding boxes.
If $(x_1,y_1)$ and $(x_2,y_2)$ are two points in $\mathbb{N}^2$, say $(x_1,y_1)<_{r,s,c}(x_2,y_2)$ if any of the following hold. \begin{itemize} \item $sy_1+rx_1<sy_2+rx_2$; \item $sy_1+rx_1=sy_2+rx_2$ and $y_1-x_1\equiv y_2-x_2\pmod{c}$, and $y_1-x_1<y_2-x_2$. \end{itemize}
The partial order $>_{r,s,c}$ on points in the plane induces a partial order $>_{r,s,c}$ on partitions as follows. Say that $\lambda'>'_{r,s,c}\lambda$ if $\lambda'$ can be obtained from $\lambda$ by adding a box with bottom left corner $(x,y)$ minimal with respect to $>_{r,s,c}$ over all possible bottom left corners of boxes that can be added to $\lambda$. Then for partitions $\mu,\lambda$ say that $\mu>_{r,s,c}\lambda$ if there is a sequence of partitions $\lambda=\lambda_0,\lambda_1,\lambda_2,\ldots,\lambda_m=\mu$ such that for each $i$, $\lambda_i<'_{r,s,c}\lambda_{i+1}.$ If $\mu>'_{r,s,c}\lambda$, say that $\mu$ is a \textit{successor} for $\lambda$ with respect to $>_{r,s,c}.$ Every partition has a successor with respect to $>_{r,s,c}$, but successors are not necessarily unique.
\begin{ex}\label{rscexamples} Let $c=2$, $r=3$, and $s=2$. There are three boxes that could be added to the Young diagram of $(3,1)$ to give another partition. They have bottom left corners at $(0,3)$, $(1,1)$, and $(2,0)$, with values of $3x+2y$ of $6,5$ and $6$ respectively. So $(3,1)$ has a unique successor with respect to $<_{2,3,2}$, which is $(3,2)$. \begin{center} \begin{tikzpicture}[scale=0.4] \draw (0,4)--(0,0); \draw (1,3)--(1,0); \draw (2,1)--(2,0); \draw (0,3)--(1,3); \draw (0,2)--(1,2); \draw (0,1)--(2,1); \draw (0,0)--(3,0); \end{tikzpicture} \begin{tikzpicture}[scale=0.4] \draw (0,4)--(0,0); \draw (1,3)--(1,0); \draw (2,2)--(2,0); \draw (0,3)--(1,3); \draw (0,2)--(2,2); \draw (0,1)--(2,1); \draw (0,0)--(3,0); \end{tikzpicture} \end{center} For $(3,2)$, the boxes that could be added to the diagram have bottom left corners $(0,3)$, $(1,2)$ and $(2,0)$, with values of $3x+2y$ of $6, 7$ and $6$ respectively. The values of $y-x$ for $(0,3)$ and $(2,0)$ have different parity so $(2,0)\not<_{2,3,2}(0,3)$, and both $(4,2)$ and $(3,2,1)$ are successors of $(3,2)$. Note that $(4,1)\not >_{r,s,c} (3,1)$. \end{ex}
Note that if $\mu>_{r,s,c}\lambda$, then all boxes of the Young diagram of $\lambda$ are also boxes of the Young diagram of $\mu$, but as Example~\ref{rscexamples} shows the converse is not true in general. \begin{defn} For a partition $\mu$ with the property that whenever $\mu$ strictly contains $\lambda$, we also have $\mu>_{r,s,c}\lambda$, we call $\mu$ an \textit{accumulation point for $>_{r,s,c}$}. \end{defn}
The next section describes a family of accumulation points and proves some key properties.
\subsection{The accumulation points $\lambda_{r,s,k}$}
\begin{defn} For a given natural number $k$, the partition $\lambda_{r,s,k}$ is the partition with Young diagram consisting of all boxes with top right corners on or below the line $sy+rx=k$. \end{defn}
\begin{ex} The Young diagram for $\lambda_{3,2,54}$ is given in Figure~\ref{3,2,54}. \begin{figure}
\caption{the Young diagram of $\lambda_{3,2,54}$}
\label{3,2,54}
\end{figure} \end{ex} \begin{prop}\label{accumulation point} Let $r,s,k$ be positive integers. Let $\mu$ be a partition with diagram strictly contained in the diagram of $\lambda_{r,s,k}$. Then, any successor $\mu^+$ of $\mu$ with respect to $>_{r,s,c}$ has diagram contained in the diagram of $\lambda_{r,s,k}$. In particular, $\lambda_{r,s,k}$ is an accumulation point for $\lambda_{r,s,c}.$
\end{prop} \begin{proof} If $(x,y)$ is the top right corner of a box in $\mu$, then since the diagram for $\mu$ is contained in the diagram of $\lambda_{r,s,k},$ $sy+rk\leq k$. So, the bottom left corner of the same box is at $(x-1,y-1)$ with $s(y-1)+r(x-1)\leq k-r-s.$
Since the containment of $\mu$ in $\lambda$ is strict, there is at least one box not contained in the diagram of $\mu$ satisfying $s(y-1)+r(x-1)\leq k-r-s$. Moreover, since translating a box with top right corner $(x,y)$ left or down decreases $s(y-1)+r(x-1)$, there is a box $\square_1$ with bottom left corner $(x-1,y-1)$ that can be added to $\mu$ to give a valid partition diagram that satisfies $s(y-1)+r(x-1)\leq k-r-s$. Now, if $\mu^+$ is not contained in $\lambda$, then $\mu^+$ contains some box $\square_2$ with top right corner $(z,w)$ such that $sw+rz>k$, so the bottom left corner $(z-1,w-1)$ satisfies $sw+rz>k-r-s.$ This is a contradiction, as $(z-1,w-1)>_{r,s,c}(x-1,y-1)$, and $\square_1$ can be added to $\mu$. \end{proof}
The accumulation points $\lambda_{r,s,k}$ will be extremely useful for two reasons. Firstly, as we check in Proposition~\ref{independence rsk}, $M_{r,s,c}(\lambda_{r,s,k})$ admits a unique $(r,s,c)$-tour whenever $rsc\mid k$, so that $M_{r,s,c}$ must determine any partition statistic in these cases, as it determines the partition itself. Secondly, as we check in Proposition~\ref{rskeventualsuccessor}, if we take successor with respect to $<_{r,s,c}$ iteratively on a given partition, we will eventually hit an accumulation point. This allows us to use the $\lambda_{r,s,k}$ as a base case for iterative proofs that statistics are independent of the choice of $(r,s,c)$-tour, and reduces the problem of understanding how a statistic interacts with $M_{r,s,c}$ to understanding how it behaves when we take successor.
\begin{prop}\label{rskeventualsuccessor} If the diagram of a partition $\mu$ is contained in the diagram of $\lambda_{r,s,k}$ for some $k$, then for any sequence $$\mu=\mu_0<'_{r,s,c}\mu_1<'_{r,s,c}\cdots<'_{r,s,c}\mu_m$$ where $m=|\lambda_{r,s,k}|-|\mu|$, we must have $\mu_m=\lambda_{r,s,k}$. \end{prop}
\begin{proof} Applying Proposition~\ref{accumulation point} to $\mu_0,\mu_1,\ldots,\mu_m,$ the diagram of $\mu_m$ must be contained in the diagram of $\lambda_{r,s,k}$, and $|\mu_m|=|\mu_0|+m=|\lambda_{r,s,k}|$, so $\mu_m=\lambda_{r,s,k}.$ \end{proof}
We now work towards proving that, in the case $rsc\mid k$, if $M_{r,s,c}(\lambda)=M_{r,s,c}(\lambda_{r,s,k}),$ then $\lambda=\lambda_{r,s,k}.$ First, we collect some restrictions on the arrival words that arise in the $(r,s,c)$-tour corresponding to $\lambda_{r,s,k}$. The condition that $rsc\mid k$ does not damage the capacity of the $\lambda_{r,s,k}$ to act as base cases, as to contain the diagram of a partition we just need $k$ to be large enough.
\begin{prop}\label{Multigraph rsk} Let $rsc\mid k$ and let $k_1=\frac{k}{rs}$. The vertices $(v,[i])$ in the multigraph of $\lambda_{r,s,k}$ all satisfy $v>k-r-s$. Moreover, we have the following constraints on the arrival words at a vertex $(v,[i]).$
\begin{itemize}
\item If $k-r-s<v \leq k-r$, then all letters in the arrival word are $W$s. \item If $k-r<v<k$, all letters in the arrival word are $N$s. \item If $v=k$ there the arrival word at $(k,[0])$ has first letter $W$ and all other letters $N$. For $[i]\not=[0]$, all letters in the arrival word at $(k,[i])$ are $N$s.
\end{itemize} \end{prop} \begin{proof} If a box $\square$ has top right corner $(x_1,y_1)$ with $rx_1+sy_1\leq k-r-s$, then the $2\times 2$ box with centre $(x_1,y_1)$ contains $\square$, along with three other boxes with top right corners $(x_1+1,y_1)$, $(x_1,y_1+1)$ and $(x_1+1,y_1+1)$. \begin{center} \begin{tikzpicture}[scale=0.9] \draw (0,0)--(2,0)--(2,2)--(0,2)--(0,0); \draw(1,0)--(1,2); \draw(0,1)--(2,1); \node[left] at (0,1) {$y_1$}; \node[left] at (0,2) {$y_1+1$}; \node[below] at (1,0) {$x_1$}; \node[below] at (2,0) {$x_1+1$}; \draw[red] (0,2.333)--(2.667,0); \node[left] at (0,2.4) {$rx+sy=k-r-s$}; \draw[red] (1.167,3)--(4.667,0); \node[left] at (1.167,3) {$rx+sy=k$}; \end{tikzpicture} \end{center} These points satisfy $rx_1+s(y_1+1)\leq k-r<k$, $r(x_1+1)+sy_1\leq k-s<k$, and $r(x_1+1)+s(y_1+1)\leq k$. Since $\lambda_{r,s,k}$ contains \textit{all} boxes with top right corners on or below the line $sy+rx=k$, the entire $2\times 2$ box with centre $(x,y)$ is contained in $\lambda_{r,s,k}$ so the boundary never visits $(x,y)$.
Suppose $k-r-s<v \leq k-r$. Any north letter in the arrival word at a vertex $(v,[i])$ is also a north letter in the departure word of some vertex $(v-s,[i-1])$, but $v-s\leq k-r-s$, so there is no such vertex.
Suppose now that $k-r < v \leq k$. Any west letter in the arrival word at a vertex $(v,[i])$ arriving at a point $(x_1,y_1)$ with $rx_1+sy_1=v$ is also a west letter in the departure word of some vertex $(v+r,[i-1])$. We have that $v+r>k$, so the west edge cannot be the top edge of a box in the Young diagram of $\lambda_{r,s,k}$ and must be along the $x$ axis. Therefore, $y_1=0$ and $v=rx_1$ is divisible by $r$. However, by assumption $k$ is divisible by $r$ and therefore $k-r$ and $k$ are consecutive multiples of $r$. So, this is only possible if $v=k$. Since the value of $rx$ decreases as the boundary progresses west along the $x$-axis, there is only one such edge, namely, the edge departing from $\left(\frac{k}{r}+1,0\right)$ and arriving at $\left(\frac{k}{r},0\right)$. \end{proof}
We are now in a position to check our base case. We will show that, if $rsc\mid k$ and $M_{r,s,c}(\mu)=M_{r,s,c}(\lambda_{r,s,k})$ then $\mu=\lambda_{r,s,k}.$ So, the accumulation point $\lambda_{r,s,k}$ act as a base case for a claim that any statistic is independent of the choice of $(r,s,c)$-tour.
\begin{prop}\label{independence rsk} For fixed integers $r,s,c,k$ with $k=rsk_1$ and $c\mid k_1$, there is a unique $(r,s,c)$-tour of $M_{r,s,c}(\lambda_{r,s,k})$. \end{prop}
\begin{proof} Suppose we pick a different $(r,s,c)$-tour of $M_{r,s,c}(\lambda_{r,s,k})$ corresponding to a partition $\mu$. First, we will show that the partition boundary of $\mu$ must leave the $x$-axis earlier than the boundary of $\lambda_{r,s,k}$. Let $(v,[i])$ be the vertex with $v$ maximal such that the arrival word at $(v,[i])$ changes. Such a vertex certainly exists because any partition boundary differs in finitely many edges from the boundary of the empty partition. Let $(v,[i])_a^\lambda$ and $(v,[i])_a^\mu$ be the arrival words at $(v,[i])$ in the tour corresponding to $\lambda_{r,s,k}$ and $\mu$ respectively. Then, $(v,[i])_a^\mu$ must be a permutation of $(v,[i])_a^\lambda$, so since $(v,[i])_a^\lambda\not=(v,[i])_a^\mu,$ $(v,[i])_a^\lambda$ must contain both $N$s and $W$s. Proposition~\ref{Multigraph rsk} then tells us that either \begin{itemize}
\item $v>k$, in which case any letter in the arrival word at $(v,[i])$ must correspond to an edge on a co-ordinate axes. Since the value of $v$ decreases as the boundary steps west along the $x$ axes, and increases as it steps north along the $y$-axes, we must have $(v,[i])_a^\lambda=WN$.
\item $(v,[i])=(k,[0])$, in which case Proposition~\ref{Multigraph rsk} implies $(v,[i])_a^\lambda$ is a $W$ followed by a string of $N$s, where the $W$ corresponds to an edge on the $x$-axis. \end{itemize} In either case, $(v,[i])_a^\mu$ must begin with an $N$. So, the boundary of $\mu$ must step north off the $x$-axis before it hits the lattice point on the $x$-axis corresponding to $(v,[i])$ - otherwise $(v,[i])_a^\mu$ would have first letter $W$. So, the boundary of $\mu$ does step north off the boundary earlier than the boundary of $\lambda_{r,s,k}$. In particular, the boundary of $\mu$ never visits the point $(k_1s,0).$ However, the number of arrivals in $M_{r,s,c}(\lambda_{r,s,k})$ at $(k,[0])$ is the same as the number of lattice points $(x,y)$ with $x\geq 0,$ $y\geq 0$, $rx+sy=k$ and $y-x\equiv 0\pmod{c}.$ Hence, any partition boundary path resulting from a different choice of $(r,s,c)$-tour must visit all such points, including $(k_1s,0),$ a contradiction. \end{proof}
Next, we check that there is a sensible pull back of the ordering $>_{r,s,c}$ onto $(r,s,c)$-multigraphs, so that taking successor can be understood to mean something at both the level of the partition and at the level of the multigraph. We abuse notation and write $>_{r,s,c}$ for the ordering on multigraphs and partitions.
\begin{prop}\label{westarrivalnorthdeparture} Given an $(r,s,c)$-multigraph $M$, let $S$ be the set of vertices $(w,[i])$ with at least one west edge arriving at $(w,[i])$. Let $(v,[i])\in S$ such that $v$ is minimal. Then there is an edge from $(v,[i])$ to $(v+s,[i+1])$. \end{prop} \begin{proof}
At least one edge arrives at $(v,[i])$ so at least one edge departs from $(v,[i])$. Any west edge departing from $(v,[i])$ would arrive at $(v-r,[i+1]),$ so $(v-r,[i+1])$ would be in $S$, contradicting the minimality of $v$. Therefore at least one north edge departs from $(v,[i])$, and arrives at $(v+s,[i+1]).$ \end{proof}
\begin{defn} Given an $(r,s,c)$-multigraph $M$, let $(v,[i])\in S$ as in the previous proposition. Then we say $M^+$ is a \textit{successor} of $M$ if $M^+$ can be obtained from $M$ by deleting one west edge from $(v+r,[i-1])$ to $(v,[i])$ and one north edge from $(v,[i])$ to $(v+s,[i+1])$, and adding one north edge from $(v+r,[i-1])$ to $(v+r+s,[i])$ and one west edge from $(v+r+s,[i])$ to $(v+s,[i+1])$. Sometimes we emphasize the vertex $(v,[i])$ and say $M^+$ is a successor of $M$ that \textit{changes from $(v,[i])$}. \end{defn}
\begin{corol}\label{noWoutNin(l,[i])} If $\lambda$ is a partition with $M_{r,s,c}(\lambda)=M$ and $M^+$ is a successor of $M$ changing from $(v,[i])$, then in $M$, $N_{in}(v,[i])=W_{out}(v,[i])=0.$ \end{corol} \begin{proof}
Identical to the proof of Proposition~\ref{westarrivalnorthdeparture}. \end{proof} The next proposition shows that this definition of successors at the level of multigraphs aligns with our definition at the level of partitions.
\begin{prop}\label{successors pass} Let $\lambda$ be a partition with $M_{r,s,c}(\lambda)=M.$ If $M^+$ is a successor of $M$ that changes at $(v,[i])$, then there is a unique partition $\lambda^+$ such that $\lambda^+>'_{r,s,c}\lambda$ and $M^+=M_{r,s,c}(\lambda^+).$ \end{prop} \begin{proof} Let $\lambda'$ be any successor of $\lambda$. Then, the Young diagram of $\lambda'$ consists of all boxes in the Young diagram of $\lambda$ and one additional box $\square$. Let the bottom left corner of $\square$ have co-ordinate $(x_1,y_1)$, where $y_1-x_1\equiv i \pmod{c}$ and $rx_1+sy_1=l.$ Then by definition of a successor, if we take minima over the points $(x,y)$ in $b(\lambda)$, \begin{equation}
l=\min (rx+sy) \end{equation}and\begin{equation}\label{earliest}
y_1-x_1=\min_{\substack{rx+sy=l \\
[y-x]=[i]}}(y-x). \end{equation} Let $s_1$ and $s_2$ be the edges in $b(\lambda)$ arriving at and departing from $(x_1,y_1)$ respectively, and let $s_1'$ and $s_2'$ be the edges in $b(\lambda')$ arriving at and departing from $(x_1+1,y_1+1)$ respectively, as shown in Figure~\ref{super important}. \begin{figure}
\caption{a partition and its successor differ by replacing $s_1$ and $s_2$ with $s_1'$ and $s_2'$}
\label{super important}
\end{figure} Then, the multigraph of $\lambda'$ differs from the multigraph of $\lambda$ only in that one edge from $(l+r,[i-1])$ to $(l,[i])$, and one edge from $(l,[i])$ to $(l+s,[i+1])$ corresponding to $s_1$ and $s_2$ respectively, are deleted, and one edge from $(l+r,[i-1])$ to $(l+r+s,[i])$, and one edge from $(l+r+s,[i])$ to $(l+s,[i+1])$, corresponding to $s_1'$ and $s_2'$ respectively are added. That is, $M_{r,s,c}(\lambda')$ is the successor of $M$ changing from $(l,[i])$.
For uniqueness, given that $M^+$ changes from $M$ at $(l,[i]),$ any successor of $\lambda$ with multigraph $M^+$ must be $\lambda'$ by ~\eqref{earliest} because the value of $y-x$ increases by 1 at every consecutive point visited in the boundary.
For existence, if $M^+$ changes from $M$ at $(v,[j])$ then $v$ is minimal such that there is a west edge into $(v,[j])$ and a north edge out of $(v,[j])$. So, $v=\min_{(x,y)\in b(\lambda)}(rx+sy)$ and there is at least one point $(x,y)$ on the boundary such that $[y-x]=[j].$ So, letting $(x_2,y_2)$ minimise $y-x$ over all such points, and adding a box with bottom left corner $(x_2,y_2)$ gives a successor $\lambda^+$ of $\lambda$ with multigraph $M^+$. \end{proof}
We are now in a position to prove our key structural proposition.
\begin{prop}\label{acc points are useful} Let $f:\operatorname{Par}\rightarrow\mathbb{R}.$ Suppose there is a function $g:\{M_{r,s,c}(\lambda)\mid\lambda\in\operatorname{Par}\}^2\rightarrow \mathbb{R}$ such that, if $\lambda$ is a partition, and $\lambda^+$ is a successor of $\lambda,$ where $\lambda$ and $\lambda^+$ have $(r,s,c)$-multigraphs $M$ and $M^+$ respectively, \begin{equation} f(\lambda^+)-f(\lambda)=g\left(M^+,M\right).\end{equation}
Then, for any partitions $\mu_1$ and $\mu_2$ are partitions such that $M_{r,s,c}(\mu_1)=M_{r,s,c}(\mu_2)$, $f(\mu_1)=f(\mu_2).$ \end{prop} \begin{proof} Let $M=M_{r,s,c}(\mu_1)=M_{r,s,c}(\mu_2)$. There is a sequence of multigraphs $M=M_0,M_1,\ldots$ where $M_j$ is a successor of $M_{j-1}$ for each $j$. Set $\lambda_0=\mu_1$ and $\nu_0=\mu_2$. Then, by Proposition~\ref{successors pass} there are sequences of partitions $\lambda_0,\lambda_1,\ldots$ and $\nu_0,\nu_1,\ldots$ such that $M_j=M_{r,s,c}(\lambda_j)=M_{r,s,c}(\nu_j)$, \begin{equation}\lambda_0<'_{r,s,c}\lambda_1<'_{r,s,c}\ldots,\end{equation} and \begin{equation}\nu_0<'_{r,s,c}\nu_1<'_{r,s,c}\ldots.\end{equation} Let $k$ be divisible by $rsc$ and large enough so that all boxes in the Young diagrams of $\mu_1$ or $\mu_2$ lie below the line $rx+sy=k$. By Proposition~\ref{rskeventualsuccessor}, there is some $m$ such that $M_m=M_{r,s,c}(\lambda_{r,s,k})$. By Proposition~\ref{independence rsk}, $\lambda_m=\nu_m=\lambda_{r,s,k}$. Then, \begin{align} f(\mu_1)&=f(\lambda_{r,s,k})-\sum_{i=1}^m\left(f(\lambda_{i})-f(\lambda_{i-1})\right)\\ &=f(\lambda_{r,s,k})-\sum_{i=1}^m g(M_i,M_{i-1})\\ &=f(\lambda_{r,s,k})-\sum_{i=1}^m\left(f(\nu_{i})-f(\nu_{i-1})\right)\\ &=f(\mu_2). \end{align} \end{proof}
Armed with Proposition~\ref{acc points are useful}, checking that $M_{r,s,c}$ determines the area of a partition is particularly straightforward.
\begin{corol}\label{areacorol}
If $\lambda$ and $\mu$ are partitions with $M_{r,s,c}(\lambda)=M_{r,s,c}(\mu)$ then $|\lambda|=|\mu|.$ \end{corol}
\begin{proof} Apply Proposition~\ref{acc points are useful} with $g(M^+,M)=1.$\end{proof}
Next we check that $M_{r,s,c}$ determines $\operatorname{mid}_{x,c}$ and $\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-.$ In the language of Proposition~\ref{acc points are useful}, Propositions~\ref{middifference} and ~\ref{crittotdiff} calculates $g(M^+,M)$ for $f(\lambda)=\operatorname{mid}_{x,c}(\lambda)$ and $f(\lambda)=\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda)$ respectively.
\subsection{$M_{r,s,c}$ determines $\operatorname{mid}_{x,c}$}
\begin{notn}\label{arrivalwordsuccessors} Let $\lambda$, $M$, $M^+$, $(x,y)$, $(l,[i])$, $s_1$, $s_2$ $s_1'$ and $s_2'$ be as in the proof of Proposition~\ref{successors pass}. For any edge $s$ in the boundary of $\lambda$, write $N_{in}^{\rightarrow s}(w,[j])$ for the number of $N$s in the arrival word at a vertex $(w,[j])$ that correspond to north edges in the boundary of $\lambda$ that occur before $s$. Define $W_{in}^{\rightarrow s}(w,[j])$ analogously for the number of $W$s. Write $N_{in}^{s\rightarrow}(w,[j])$ for the number of $N$s in the arrival word at a vertex $(w,[j])$ that correspond to north edges in the boundary of $\lambda$ that occur after $s$. Define $W_{in}^{s\rightarrow}(w,[j])$ analogously for the number of $W$s. Analogously define $W_{out}^{\rightarrow s}(w,[j])$, $N_{out}^{\rightarrow s}(w,[j])$, $W_{out}^{\rightarrow s}(w,[j])$, and $N_{out}^{\rightarrow s}(w,[j])$ for the departure words. We will use this notation with $s=s_1$ or $s=s_2$. Finally, write $N_{in}^+(w,[j])$ for the number of $N$s in the arrival word at $(w,[j])$ in $M^+$ and define analogously $W_{in}^+, N_{out}^+$ and $W_{out}^+$.
We work in the ring $R$ of functions $V(M)\rightarrow \mathbb{Z}.$ Practically, the only consequence of this is that we write $fg(v,[i])$ for the pointwise product $f(v,[i])g(v,[i])$ and $(f+g)(v,[i])$ for $f(v,[i])+g(v,[i]).$ There should be no confusion between composition and product of functions as functions from $V(M)$ to $\mathbb{Z}$ are not composable. \end{notn}
\begin{ex} Let $\lambda=(4,1),$ $r=3$, $s=1$ and $c=2.$ Then $\min_{(x,y)\in b(\lambda)}(3x+y)=4$ achieved at $(0,4)$ and $(1,1).$ Since $4-0\equiv 1-1 \pmod{2}$ and $1-1<4-0$, we have $(1,1)<_{3,1,2}(4,0)$ so $(4,2)$ is the only $(3,1,2)$-successor of $\lambda$. So, $(l,[i])=(4,[0]).$ \begin{figure}
\caption{the partition $\lambda$ with $(l,[i])=(4,[0])$ and $s_1,s_2,s_1'$ and $s_2'$ labelled}
\end{figure} Then $(N_{in}^{\rightarrow s_1}W_{out}^{s_1 \rightarrow}+N_{out}^+)(7,[1])=1\times1 +1=2.$ \end{ex}
\begin{prop}\label{middifference} Let $\lambda$ be a partition with $M_{r,s,c}(\lambda)=M$. Let $M^+$ be a successor of $M$ that changes from $(l,[i])$. If $\lambda^+>'_{r,s,c}\lambda$ and $M^+=M_{r,s,c}(\lambda^+),$ \begin{equation}\label{middifferenceformula}\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=\sum_{w=l+1}^{l+s+r-1}\left(N_{out}-N_{in}\right)(w,[i]).\end{equation}
\end{prop} \begin{proof} By Proposition~\ref{successors pass}, the Young diagram of $\lambda^+$ is obtained from that of $\lambda$ by adding a box with bottom corner $(x_1,y_1)$ where $rx_1+sy_1=l$ and $[y_1-x_1]=[i]$.
Proposition~\ref{motivate Mrsc} gives a formula for $\operatorname{mid}_{x,c}(\lambda)$: it is the number of pairs of edges $e_n,e_w$ in the boundary sequence such that $e$ is a north edge arriving at a point $(x,y)$ satisfying $rx+sy=v$ and $[y-x]=[j]$, $e_w$ is a west edge occurring after $e_n$ arriving at a point $(x',y')$ satisfying $rx'+sy'=w$ and $[y'-x']=[j]$, where $w$ and $v$ satisfy $-s-r<w-v<0$. \begin{figure}
\caption{the diagrams of $\lambda$ and $\lambda^+$}
\label{one figure to rule them all}
\end{figure}
We account for the change in the number of such pairs when changing $s_1$ to $s_1'$ and $s_2$ to $s_2'$ below: the only changes to $\operatorname{mid}_{x,c}$ will be when $e_n\in \{s_2,s_1'\}$ or $e_w\in \{s_1,s_2'\}.$
By adding $s_1'$ we gain the number of west edges after $s_1$, arriving at points $(x,y)$ on lines $rx+sy=w$, such that $-s-r<w-(l+r+s)<0$ and $[y-x]=[i]$. By deleting $s_1$ we lose the number of north edges occurring before $s_1$ arriving at points $(x,y)$ on lines $rx+sy=v$ such that $-s-r<l-v<0$ and $[y-x]=[i]$ So, the contribution to $\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)$ from switching $s_1$ to $s_1'$ is $S_1$ where
\begin{equation}\label{S1} S_1=\sum_{w=l+1}^{l+r+s-1}\left(W_{in}^{s_1\rightarrow}-N_{in}^{\rightarrow s_1}\right)(w,[i]).\end{equation}
By adding $s_2'$ we gain the number of north edges before $s_2$, arriving at points $(x,y)$ on lines $rx+sy=v$, such that $-s-r<l+s-v<0$, and $y-x\equiv i+1\pmod{c}$. By deleting $s_2$ we lose the number of west edges occurring after $s_2$ arriving at points $(x,y)$ on lines $rx+sy=w$ such that $-s-r<w-(l+s)<0$ and $[y-x]=[i+1].$ So, the contribution to $\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)$ from switching $s_2$ to $s_2'$ is $S_2$ where \begin{equation}\label{S2}S_2=\sum_{v=l+s+1}^{l+r+2s-1}N_{in}^{\rightarrow s_2}(v,[i+1]) -\sum_{v=l-r+1}^{l+s-1}W_{in}^{s_2\rightarrow}(v,[i+1]).\end{equation}
So, \begin{equation}\label{midbreakdown}\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=S_1+S_2.\end{equation} Now, note that a north edge into $(v,[i+1])$ is also a north edge out of $(v-s,[i])$, and a west edge into $(w,[i+1])$ is also a west edge out or $(w+r,[i])$. Applying this reasoning to \eqref{S2},
\begin{equation}\label{S2v2}S_2=\sum_{w=l+1}^{l+r+s-1}\left(N_{out}^{\rightarrow s_2} -W_{out}^{s_2\rightarrow}\right)(w,[i]).\end{equation} Substituting~\eqref{S1} and~\eqref{S2v2} into~\eqref{midbreakdown}, \begin{equation}\label{diffmid}\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=\sum_{w=l+1}^{l+s+r-1}\left(W_{in}^{s_1\rightarrow}-N_{in}^{\rightarrow s_1}+N_{out}^{\rightarrow s_2}-W_{out}^{s_2\rightarrow}\right)(w,[i]).\end{equation} Since $s_2$ is a north edge occuring immediately after $s_1$, $W_{out}^{s_2\rightarrow}=W_{out}^{s_1\rightarrow}$, and since $s_1$ is a west edge immediately preceding $s_2$, $N_{out}^{\rightarrow s_2}=N_{out}^{\rightarrow s_1}.$ So, \begin{equation}\label{diffmid2}\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=\sum_{w=l+1}^{l+s+r-1}\left(W_{in}^{s_1\rightarrow}-N_{in}^{\rightarrow s_1}+N_{out}^{\rightarrow s_1}-W_{out}^{s_1\rightarrow}\right)(w,[i]).\end{equation}
Now, note that at any vertex $(v,[j])$ except $(l,[i])$, we have that \begin{equation}\left(W_{in}^{s_1\rightarrow}+N_{in}^{s_1\rightarrow}\right)(v,[j])=\left(W_{out}^{s_1\rightarrow}+N_{out}^{s_1\rightarrow}\right)(v,[j]),\end{equation} because after $s_1$ we depart every vertex after we arrive at it, the left hand side counting arrivals at the vertex after $s_1$ and the right side counting departures. Rearranging gives \begin{equation}\label{afters1}\left(W_{in}^{s_1\rightarrow}-W_{out}^{s_1\rightarrow}\right)(v,[j])=\left(N_{out}^{s_1\rightarrow}-N_{in}^{s_1\rightarrow}\right)(v,[j]).\end{equation}
Substituting \eqref{afters1} into \eqref{diffmid2}, \begin{equation}\operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=\sum_{w=l+1}^{l+s+r-1}\left(N_{out}^{s_1\rightarrow}-N_{in}^{s_1\rightarrow}-N_{in}^{\rightarrow s_1}+N_{out}^{\rightarrow s_1}\right)(w,[i]).\end{equation} Since $s_1$ is a west edge, $N_{in}^{s_1\rightarrow}+N_{in}^{\rightarrow s_1}=N_{in}$ and $N_{out}^{s_1\rightarrow}+N_{out}^{\rightarrow s_1}=N_{out},$ so \begin{equation} \operatorname{mid}_{x,c}(\lambda^+)-\operatorname{mid}_{x,c}(\lambda)=\sum_{w=l+1}^{l+s+r-1}\left(N_{out}-N_{in}\right)(w,[i]). \end{equation}\end{proof}
\begin{corol}\label{midcorol} If $\lambda$ and $\mu$ are partitions with $M_{r,s,c}(\lambda)=M_{r,s,c}(\mu),$ then $\operatorname{mid}_{x,c}(\lambda)=\operatorname{mid}_{x,c}(\mu).$ \end{corol} \begin{proof}
Apply Proposition~\ref{acc points are useful} with \begin{equation}g(M^+,M)=\sum_{w=l+1}^{l+s+r-1}\left(N_{out}-N_{in}\right)(w,[i]), \end{equation} where $M^+$ is the successor of $M$ that changes from $(l,[i]).$ \end{proof}
\subsection{$M_{r,s,c}$ determines $\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-$}
\begin{prop}\label{crittotdiff} Let $\lambda$ be a partition with $M_{r,s,c}(\lambda)=M$ and let $M^+$ be a successor of $M$ that changes from $(l,[i])$. Then, if $\lambda^+$ is the successor of $\lambda$ with multigraph $M^+,$ \begin{equation}
\operatorname{crit}_{x,c}^+(\lambda^+)+\operatorname{crit}_{x,c}^-(\lambda^+)-\operatorname{crit}_{x,c}^+(\lambda)-\operatorname{crit}_{x,c}^-(\lambda) \end{equation} is equal to \begin{equation}W_{in}(l,[i])-1+(W_{in}-W_{out})(l+r+s,[i]).\end{equation} \end{prop} \begin{proof}
First, we compute $\operatorname{crit}_{x,c}^+(\lambda^+)-\operatorname{crit}_{x,c}^+(\lambda)$. Corollary~\ref{firstcritformulae} implies that \begin{equation}\label{differenceplus}
\operatorname{crit}_{x,c}^+(\lambda^+)-\operatorname{crit}_{x,c}^+(\lambda)=\sum_{v\in M_{r,s,c}(\lambda^+)}\operatorname{inv}(v_a)-\sum_{v\in M_{r,s,c}(\lambda)}\operatorname{inv}(v_a) \end{equation}
We keep the notation of the previous proposition and reference Figure~\ref{one figure to rule them all} throughout. The only nonzero terms in the difference ~\eqref{differenceplus} come from $v\in\{(l,[i]), (l+r+s,[i]), (l+s,[i+1])\}$. We work case-by-case through these vertices. \begin{itemize} \item We delete the first arrival at $(l,[i])$, corresponding to deleting $s_1.$ All arrivals at $(l,[i])$ are $W$s by Corollary~\ref{noWoutNin(l,[i])}, so this does not affect $\operatorname{inv}(l,[i])_a.$ \item We add an $N$ to the arrival word at $(l+r+s,[i])$, corresponding to adding $s_1'.$ \begin{center}
\begin{tikzpicture}
\draw (0,0.5)--(0,0)--(3.95,0)--(3.95,0.5);
\draw (4.05,0.5)--(4.05,0)--(7,0)--(7,0.5);
\node[below] at (1.975,0) {before $s_1$};
\node[below] at (5.502,0) {after $s_1$};
\draw (-0.25,-0.5)--(-0.25,-1)--(3.7,-1)--(3.7,-0.5);
\draw (3.8,-0.5)--(3.8,-1)--(4.2,-1)--(4.2,-0.5);
\node at (4,-0.75) {$N$};
\draw (4.3,-0.5)--(4.3,-1)--(7.25,-1)--(7.25,-0.5);
\node[below] at (1.725,-1) {before $s_1'$};
\node[below] at (4,-1) {$s_1'$};
\node[below] at (5.752,-1) {after $s_1'$};
\end{tikzpicture} \end{center}
This $N$ is the first letter in an inversion with second letter any $W$ occuring after $s_1'$, so $(l+r+s,[i])$ contributes $W_{in}^{s_1\rightarrow}(l+r+s,[i])$ to ~\eqref{differenceplus}. \item We replace the first $N$ in the arrival word at $(l+s,[i+1])$ (corresponding to $s_2$) with a $W$ (corresponding to $s_2'$). Therefore, we lose all inversions with the replaced $N$ edge as their first letter passing from $\lambda$ to $\lambda^+$. There are $W_{in}^{s_2\rightarrow}(l+s,[i+1])$ such inversions. \begin{center}
\begin{tikzpicture}
\draw (-0.25,0.5)--(-0.25,0)--(3.7,0)--(3.7,0.5);
\draw (3.8,0.5)--(3.8,0)--(4.2,0)--(4.2,0.5);
\node at (4,0.25) {$N$};
\draw (4.3,0.5)--(4.3,0)--(7.25,0)--(7.25,0.5);
\node at (1.8,0.25) {$WWWWW\ldots WWW$};
\node[below] at (1.725,0) {before $s_2$};
\node[below] at (4,0) {$s_2$};
\node[below] at (5.752,0) {after $s_2$};
\draw (-0.25,-0.5)--(-0.25,-1)--(3.7,-1)--(3.7,-0.5);
\draw (3.8,-0.5)--(3.8,-1)--(4.2,-1)--(4.2,-0.5);
\node at (4,-0.75) {$W$};
\node at (1.8,-0.75) {$WWWWW\ldots WWW$};
\draw (4.3,-0.5)--(4.3,-1)--(7.25,-1)--(7.25,-0.5);
\node[below] at (1.725,-1) {before $s_2'$};
\node[below] at (4,-1) {$s_2'$};
\node[below] at (5.752,-1) {after $s_2'$};
\end{tikzpicture} \end{center}
We gain no inversions from the new $W$ edge, because $s_2$ was the first north departure from $(l,[i])$ in the tour corresponding to $\lambda$. So, $(l+s,[i+1])$ contributes $-W_{in}^{s_2\rightarrow}(l+s,[i+1])$ to ~\eqref{differenceplus}. \end{itemize}
So, \begin{equation}\label{star2}\operatorname{crit}_{x,c}^+(\lambda^+)-\operatorname{crit}_{x,c}^+(\lambda)=W_{in}^{s_1\rightarrow}(l+r+s,[i])-W_{in}^{s_2\rightarrow}(l+s,[i+1]).\end{equation}
A west arrival before (respectively after) $s_2$ at $(l+s,[i+1])$ is a west departure before (respectively after) $s_2$ from $(l+r+s,[i]).$ Combining this logic with \eqref{star2},
\begin{equation}\label{star3} \operatorname{crit}_{x,c}^+(\lambda^+)-\operatorname{crit}_{x,c}^+(\lambda) = \left(W_{in}^{s_1\rightarrow}-W_{out}^{s_2\rightarrow}\right)(l+r+s,[i]).\end{equation}
We now analyse \begin{equation}\label{differenceminus}\operatorname{crit}_{x,c}^-(\lambda^+)-\operatorname{crit}_{x,c}^-(\lambda)=\sum_{v\in M_{r,s,c}(\lambda^+}\operatorname{inv}(v_d)-\sum_{v\in M_{r,s,c}(\lambda)}\operatorname{inv}(v_d).\end{equation} The departure words at every vertex except for $(l,[i])$, $(l+r+s,[i])$, and $(l+r,[i-1])$ are unchanged so the only nonzero terms in~\eqref{differenceminus} come from $v\in\{(l,[i]),(l+r+s,[i]),(l+r,[i-1])\}.$ An analogous argument to the above shows that the contribution of $(l+r,[i-1])$ to $~\eqref{differenceminus}$ is $\left(W_{out}^{s_1\rightarrow}-N_{out}^{\rightarrow s_1}\right)(l+r,[i-1])$, the contribution of $(l+r+s,[i])$ is $N_{out}^{\rightarrow s_2}(l+r+s,[i])$, and $(l,[i])$ does not contribute. So,
\begin{equation}\label{dagger2}\operatorname{crit}_{x,c}^-(\lambda^+)-\operatorname{crit}_{x,c}^-(\lambda)=\left(W_{out}^{s_1\rightarrow}-N_{out}^{\rightarrow s_1}\right)(l+r,[i-1])+N_{out}^{\rightarrow s_2}(l+r+s,[i]).\end{equation} A north departure from $(l+r,[i-1])$ is a north arrival at $(l+r+s,[i]),$ so \begin{equation}\label{dagger3} \operatorname{crit}_{x,c}^-(\lambda^+)-\operatorname{crit}_{x,c}^-(\lambda)= W_{in}^{s_1\rightarrow}(l,[i])-\left(N_{in}^{\rightarrow s_1}-N_{out}^{\rightarrow s_2}\right)(l+r+s,[i]).\\ \end{equation} Now, since $s_1$ is the first edge to arrive at $(l,[i]),$ \begin{equation}\label{s1firstwest} W_{in}^{s_1\rightarrow}(l,[i])=W_{in}(l,[i])-1.\end{equation} Since $s_1$ does not arrive at $(l+r+s,[i])$, we leave $(l+r+s,[i])$ before $s_1$ the same number of times as we arrive before $s_1$. So, \begin{equation}\label{arrivalsbeforearedeparturesafter}(N_{in}^{\rightarrow s_1}+W_{in}^{\rightarrow s_1})(l+r+s,[i])=(N_{out}^{\rightarrow s_2}+W_{out}^{\rightarrow s_2})(l+r+s,[i])).\end{equation} Rearranging, \begin{equation}\label{reexpressNin}N_{in}^{\rightarrow s_1}(l+r+s,[i])=(N_{out}^{\rightarrow s_2}+W_{out}^{\rightarrow s_2}-W_{in}^{\rightarrow s_1})(l+r+s,[i]).\end{equation} Substituting \eqref{reexpressNin} and \eqref{s1firstwest} into \eqref{dagger3}, \begin{equation}\label{dagger4}\operatorname{crit}_{x,c}^-(\lambda^+)-\operatorname{crit}_{x,c}^-(\lambda)=W_{in}(l,[i])-1+(W_{in}^{\rightarrow s_1}-W_{out}^{\rightarrow s_2})(l+r+s,[i]).\end{equation} Since $(l+r+s,[i])$ is not an endpoint of $s_1$ or $s_2$, \begin{equation}\label{westin} (W_{in}^{\rightarrow s_1}+W_{in}^{ s_1\rightarrow})(l+r+s,[i])=W_{in}(l+r+s,[i])\end{equation} and \begin{equation}\label{westout}(W_{out}^{\rightarrow s_2}+W_{out}^{ s_2\rightarrow})(l+r+s,[i])=W_{out}(l+r+s,[i]).\end{equation} Adding \eqref{dagger4} and \eqref{star3}, and then applying \eqref{westout} and \eqref{westin} completes the proof. \end{proof} \begin{corol}\label{crittotcorol} If $\lambda$ and $\mu$ are partitions such that $M_{r,s,c}(\mu)=M_{r,s,c}(\lambda)$ then \begin{equation}\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda)=\operatorname{crit}_{x,c}^+(\mu)+\operatorname{crit}_{x,c}^-(\mu).\end{equation} \end{corol} \begin{proof} Apply Proposition~\ref{acc points are useful} with \begin{equation}g(M^+,M)=W_{in}(l,[i])-1+(W_{in}-W_{out})(l+r+s,[i]).\end{equation} where the calculations $W_{in}$ and $W_{out}$ are done with respect to the multigraph $M$, and $M^+$ is the successor of $M$ changing from $(l,[i]).$ \end{proof}
So, we know that $M_{r,s,c}(\lambda)$ determines the $c$-core of $\lambda$, $|\lambda|,$ $\operatorname{mid}_{x,c}(\lambda)$ and $\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda),$ and that any bijection $I_{r,s,c}$ preserving $M_{r,s,c}$ therefore satisfies hypotheses 1-3 of Proposition~\ref{bijection properties}. It will be useful later when proving that $I_{r,s,c}$ satisfies the fourth criterion in Proposition~\ref{bijection properties} to have a formula for $\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda)$ in terms of $M_{r,s,c}(\lambda).$ This is what Proposition~\ref{ctot formula} does.
\begin{prop}\label{ctot formula} Let $\lambda$ be a partition. If $k=rsk_1$ where $c\mid k_1$ and $\lambda <_{r,s,c} \lambda_{r,s,k},$ then
\begin{equation}\label{crittot}(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda)= \sum_{\substack{(v,[j]) \\ v\leq k}} N_{in}W_{in}(v,[j])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor.\end{equation} \end{prop}
\begin{proof} First, we prove that~\eqref{crittot} holds when $\lambda=\lambda_{r,s,k}$.
We will show that for all boxes $\square\in\lambda_{r,s,k}$, $sa(\square)-rl(\square)\in(-s,r)$, and hence that the left hand side of ~\eqref{crittot} is zero at $\lambda_{r,s,k}$. We will then check that the right hand side of \eqref{crittot} is zero at $\lambda_{r,s,k}.$
The $i$th part of $\lambda_{r,s,k}$ corresponds to a column with top right corner $(i,y_i)$ where $y_i$ is maximal such that $sy_i+ri\leq k$. So, \begin{equation}y_i=\left\lfloor\frac{k-ri}{s}\right\rfloor=\left\lfloor\frac{k_1rs-ri}{s}\right\rfloor=k_1r-\left\lceil\frac{ri}{s}\right\rceil.\end{equation} Similarly, the number of parts of $\lambda_{r,s,k}$ of size at least $j$ corresponds to a row with top right corner $(x_j,j)$ where $x_j$ is maximal such that $sj+rx_j\leq k$, so \begin{equation}x_j=\left\lfloor\frac{k-sj}{r}\right\rfloor=\left\lfloor\frac{k_1rs-sj}{r}\right\rfloor=k_1s-\left\lceil\frac{sj}{r}\right\rceil.\end{equation}
Now, let $\square\in\lambda$ be a box with top right corner $(i,j)$. Then, the arm of $\square$ is given by $y_i-j$ and the leg of $\square$ is given by $x_j-i$. So, \begin{align}sa(\square)-rl(\square)&=s(y_i-j)-r(x_j-i)\\ &=k_1rs-s\left\lceil\frac{ri}{s}\right\rceil-sj-k_1rs+r\left\lceil\frac{sj}{r}\right\rceil+ri\\ &=\left(r\left\lceil\frac{sj}{r}\right\rceil-sj\right)-\left(s\left\lceil\frac{ri}{s}\right\rceil+ri\right). \end{align}
Now, consider the two bracketed quantities separately, setting $x=\left(r\left\lceil\frac{sj}{r}\right\rceil-sj\right)$ and $y=-\left(s\left\lceil\frac{ri}{s}\right\rceil+ri\right)$. For the first bracket we have that \begin{equation}r\left(\frac{sj}{r}\right)\leq r\left\lceil \frac{sj}{r}\right\rceil< r\left(\frac{sj}{r}+1\right),\end{equation} so \begin{equation}0\leq r\left\lceil\frac{sj}{r}\right\rceil-sj< r.\end{equation}
Similarly for the second bracket, \begin{equation}-s< ri-s\left\lceil\frac{ri}{s}\right\rceil\leq 0.\end{equation}
So, $sa(\square)-rl(\square)$ can be written as $x+y$ for $x\in[0,r)$ and $y\in(-s,0]$ and therefore $sa(\square)-rl(\square)\in(-s,r).$
Therefore, \begin{equation}\operatorname{crit}_{x,c}^+(\lambda_{r,s,k})+\operatorname{crit}_{x,c}^-(\lambda_{r,s,k})=0.\end{equation}
Next we evaluate the right hand side of \eqref{crittot} at $\lambda_{r,s,k}$. Proposition \ref{Multigraph rsk} tells us that for all vertices $(v,[i])$ such that $0\leq v<k$, the arrival word at $(v,[i])$ in $M_{r,s,c}(\lambda_{r,s,k})$ does not contain both an $N$ and a $W$. So, for all such $(v,[i])$ we have $N_{in}W_{in}(v,[i])=0$. So, the right hand side of \eqref{crittot} simplifies to \begin{equation}\label{crittotrsk1}\sum_{i=0}^{c-1}N_{in}W_{in}(k,[i])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor\end{equation}
Proposition~\ref{Multigraph rsk} also tells us that $W_{in}(k,[i])=0$ unless $[i]=[0]$, and that $W_{in}(k,[0])=1$, so we can rewrite \eqref{crittotrsk1} as
\begin{equation}\label{crittotrsk2}N_{in}(k,[0])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor.\end{equation}
So, it suffices to show that $N_{in}(k,[0])=\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor.$ The north edges in the boundary of $\lambda_{r,s,k}$ arriving at vertices $(k,[i])$ correspond to points $(x,y)$ with $y>0$ and $x\geq 0$ such that $rx+sy=k$. These points have coordinates $\{(s(k_1-1),r), (s(k_1-2),2r),\ldots, (s, (k_1-1)r), (0,k_1r)\}$. Now, $N_{in}(k,[0])$ counts the number of these points that also lie on a line $y-x=i$ for $[i]=[0]$. The set of values of $y-x$ for this set of points is $\{r+s-k_1s, 2(r+s)-k_1s,\ldots, k_1(r+s)-k_1s\}$. Letting $l(r+s)=\lcm(c,r+s)$, the values of $y-x$ that give us the same congruence class as $0$ when taken modulo $c$ are of the form $ml(r+s)-k_1s$ for some integer $m$. The number of values of this form in the given set is indeed $\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor.$
Now suppose $\lambda<_{r,s,c}\lambda_{r,s,k}$ is maximal with respect to $>_{r,s,c}$ such that the proposition is false. In particular, the proposition holds for any successor $\lambda^+>'_{r,s,c}\lambda$. Let $M^+$ be a successor of $M$ that changes from $(l,[i]),$ and let $\lambda^+$ be the successor of $\lambda$ with multigraph $M^+$. Then, $(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda^+)-(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda)$ can be written as $\Delta_1$, where \begin{equation}\label{Delta}\Delta_1=W_{in}(l,[i])-1+(W_{in}-W_{out})(l+r+s,[i]).\end{equation}
By assumption, \begin{equation}\label{crittotlambdaplus}(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda^+)= \sum_{\substack{(v,[j]) \\ v\leq k}}N^+_{in}W^+_{in}(v,[j])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor\end{equation} So, combining \eqref{Delta} and \eqref{crittotlambdaplus}, \begin{equation}(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda)= \sum_{\substack{(v,[j]) \\ v\leq k}}N^+_{in}W^+_{in}(v,[j])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor-\Delta_1.\end{equation}
First, we note that a vertex $(v,[j])$ contributes the same to the sums $$\sum_{\substack{(v,[j]) \\ v\leq k}}N_{in}W_{in}(v,[j])$$ taken over the multigraphs $M$ or $M^+$ unless $(v,[j])\in\{(l,[i]),(l+r+s,[i],(l+s,[i+1])\}$. In fact, since there are no north edges into $(l,[i])$ in $M$ or $M^+$, we only need consider terms with $(v,[j])\in\{(l+r+s,[i],(l+s,[i+1])\}$. So, \begin{align*}(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(\lambda)&= \sum_{\substack{(v,[j]) \\ v\leq k}}N_{in}W_{in}(v,[j])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor -\Delta_1+\Delta_2 \stepcounter{equation}\tag{\theequation}\label{differencefromM}\end{align*} where
\begin{equation}\Delta_2=(N^+_{in}W^+_{in}-N_{in}W_{in})(l+r+s,[i])+(N^+_{in}W^+_{in}-N_{in}W_{in})(l+s,[i+1]).\stepcounter{equation}\tag{\theequation}\label{Delta2}\end{equation}
Because $M^+$ changes from $M$ at $(l,[i])$, $N^+_{in}(l+s,[i+1]=(N_{in}(l+s,[i+1])-1$, $W^+_{in}(l+s,[i+1]=(W_{in}(l+s,[i+1])+1$, and $N^+_{in}(l+r+s,[i])=N_{in}(l+r+s,[i])+1$ so \eqref{Delta2} simplifies to \begin{equation}\label{Delta22}\Delta_2=N_{in}(l+s,[i+1])-W_{in}(l+s,[i+1])-W_{in}(l+r+s,[i]). \end{equation}
A west arrival at $(l+s,[i+1])$ is the same as a west departure from $(l+r+s,[i]),$ and a north arrival at $(l+s,[i+1])$ is the same as a north departure from $(l,[i]),$ so \begin{equation}\label{Delta23}\Delta_2=N_{out}(l,[i])-\left(W_{out}-W_{in}\right)(l+r+s,[i]). \end{equation} By Corollary~\ref{noWoutNin(l,[i])}, all edges leaving $(l,[i])$ are north edges and all edges arriving are west edges. The same number of edges arrive at and leave, so $N_{out}(l,[i])=W_{in}(l,[i]).$ So,
\begin{equation}\label{EqualDeltas}\Delta_2=(W_{in}-W_{out})(l+r+s,[i]) +W_{in}(l,[i])-1=\Delta_1.\end{equation} Substituting \eqref{EqualDeltas} into \eqref{differencefromM} completes the proof.\end{proof}
\section{The involution $I_{r,s,c}$} \stepcounter{essaypart} In this section we construct the bijection $I_{r,s,c},$ check that it is well defined, and prove that it satisfies the fourth criterion in Proposition~\ref{bijection properties}.
\subsection{Recovering a partition from the arrival words} Thus far we have constructed $M_{r,s,c}(\lambda)$ and an $(r,s,c)$-tour from $b(\lambda)$. We will define $I_{r,s,c}$ as an involution that preserves $M_{r,s,c}$ but changes the $(r,s,c)$-tour, in fact by changing the order in which some of the letters appear in the arrival words. In order to check the result is well defined, we need to understand how to recover a boundary sequence from a family of arrival words, and indeed have a criterion for when it is possible to do so if the family of arrival words does not a priori arise from a partition.
If $v$ is minimal such that all boxes in the partition have top right corner on or below the line $rx+sy=v$, then we have that for all $w>v$, any arrival at a vertex $(w,[i])$ must be on a co-ordinate axis. So, \begin{equation}\label{rscboringvertex}(w,[i])_a=\begin{cases}
WN & \text{if }r\mid w, s\mid w, \frac{w}{s}\equiv \frac{-w}{r}\equiv i\pmod{c} \\
N & \text{if }s\mid w, c\mid\left(\frac{w}{s}-i\right)\text{ and either }r\nmid w\text{ or }c\nmid(\frac{-w}{r}-i)\\
W & \text{if }r\mid w, c\mid\left(\frac{-w}{r}-i\right)\text{ and either }s\nmid w\text{ or }c\nmid(\frac{w}{s}-i) \\
\emptyset &\text{otherwise.} \\
\end{cases}\end{equation} Moreover, $v$ is uniquely specified as the largest vertex where the arrival word at $(v,[i])$ does \textit{not} satisfy \eqref{rscboringvertex} for some $i\in\{1,2,\ldots,c\}$.
So, we can identify $v$ and fill in the co-ordinate axes above or to the right of the line $rx+sy=v$ as part of the partition boundary. \begin{ex}\label{fillinaxes} Suppose we have $r=3$, $s=2$, $c=2$, and the set of arrival words specified below \begin{center} \begin{tabular}{cccccccc} (20,[1])&W&(22,[0])&N&(23,[0])&W&(23,[1])&W\\ (24,[0])&W&(24,[1])&N&(25,[0])&N&(25,[1])&W\\ (26,[0])&NW&(26,[1])&WWN&(27,[0])&N&(27,[1])&WNW\\ (28,[0])&NN&(28,[1])&N&(29,[0])&NN&(29,[1])&N\\ (30,[0])&WN&(30,[1])&N&&&&\\ \end{tabular} \end{center} Empty for all other vertices $(w,[j])$ with $w<30$. Then for $w>30$,
\begin{equation*}(w,[j])_a=\begin{cases}
WN & \text{if }6\mid w, \frac{w}{2}\equiv \frac{-w}{3}\equiv j\pmod{2} \\
N & \text{if }2\mid w, 2\mid\left(\frac{w}{2}-j\right)\text{ and either }3\nmid w\text{ or }2\nmid(\frac{-w}{3}-j)\\
W & \text{if }3\mid w, 2\mid\left(\frac{-w}{3}-j\right)\text{ and either }3\nmid w\text{ or }2\nmid(\frac{w}{2}-j) \\
\emptyset &\text{otherwise.} \\
\end{cases}\end{equation*}
Looking at the vertex $(30,[0])$, with $w=30$ and $j=0$ we have $\frac{w}{2}\not\equiv j\pmod{2}$, so 30 is maximal such that there is vertex $(30,[i])$ that does not satisfy~\eqref{rscboringvertex} for some $i$. So, $v=30$ and we draw a ray along the positive $y$-axis beginning at $(0,15),$ and a ray along the positive $x$-axis beginning at $(10,0).$ It then remains to fill in the boundary between the points $\left(0,\left\lceil\frac{v}{s}\right\rceil\right),$ and $\left(\left\lceil\frac{v}{r}\right\rceil,0\right).$ To do this, we look first at the arrival word at $\left(s\left\lceil\frac{v}{s}\right\rceil,\left[\left\lceil\frac{v}{s}\right\rceil\right]\right)$, $(30,[1])$ in our example, corresponding to the point on the $y$-axis at which the ray begins. The last letter of this word tells us what kind of edge we should add to the boundary to arrive at $\left(0,\left\lceil\frac{v}{s}\right\rceil\right)$, in this case an $N$, so we add an edge from $(0,14)$ to $(0,15)$. and delete the last $N$ from $(30,[1])_a$. The same logic allows the rest of the boundary to be filled out edge by edge, as in Figure~\ref{fillinaxesfigure}.
\begin{figure}
\caption{the arrival words in Example~\ref{fillinaxes} give the partition $(12,12,10,8,7,4,1,1,1)$}
\label{fillinaxesfigure}
\end{figure} \end{ex}
\subsection{The first arrival tree} Next we lay out a criterion for a family of arrival words to arise from a partition. We saw in the previous section that any family of arrival words arising from a partition must satisfy ~\eqref{rscboringvertex} for $w>v$ large enough. We now give a criterion on the arrival words at the remaining vertices with $w\leq v$ to arise from a partition.
\begin{defn} Let $\lambda$ be a partition and let $M_{r,s,c}(\lambda)=M$. Let $V$ and $E$ be the vertex set and edge set of $M$ respectively. Let the $(r,s,c)$-tour of $M$ corresponding to $\lambda$ have arrival word $(v,[i])_a$ at each vertex $(v,[i])\in V$. Suppose there is another family of arrival words \begin{equation} S= \{(v,[i])'_a\mid (v,[i])\in V\},
\end{equation} such that for each $(v,[i])$, $(v,[i])'_a$ is a permutation of $(v,[i])_a$.
Denote the first letter of the arrival word $(v,[i])'_a$ by $(v,[i])'_1$, and let the first arrival edge $e_1(v,[i])$ with respect to $S$ be any edge $e$ with $t(e)=(v,[i])$ and $d(e)=(v,[i])'_1.$ Let $T_S$ be the subgraph of $M$ with vertex set $V$ and directed edge set \begin{equation}E(T_S)=\{e_1(v,[i])'_a\mid (v,[i])\in M\}.\end{equation} In this case we call $T_S$ the \textit{first arrival graph with respect to $S$}. \end{defn}
\begin{defn} For an integer $k$, and an $(r,s,c)$-multigraph $M,$ let $M^{\leq k}$ be the induced subgraph of $M$ with vertex set $\{(v,[i])\mid (v,[i])\in V(M)\text{ and }v\leq k \}.$ For a family of arrival words $S$, let $T_S^{\leq k}$ be the induced subgraph of $T_S$ with vertex set $V(M^{\leq k}).$ \end{defn}
We require a small amount of preparation before proving Proposition~\ref{image works}, as we will make use of \cite[Thm 5]{vAE}. The proof is not hard, but could possibly be disruptive to the flow of this paper, so the interested reader is referred to \cite{vAE} for a full proof. The notion we will need from \cite{vAE} is that of a $T$-graph. \begin{defn} A $T$-graph is a finite directed multigraph such that at each vertex, the number of edges arriving is the same as the number of edges departing. \end{defn}
Theorem 5a of~\cite{vAE} says that, given a complete circuit of a $T$-graph, starting and ending at a vertex $v,$ the set of edges given by the last departures from any given vertex give a spanning tree of the $T$-graph rooted at $v$. Reversing the direction of all edges, equivalantly, the first arrival graph arising from a complete circuit of a $T$-graph is a spanning tree. Conversely, \cite[Thm 5b]{vAE} says that any spanning tree rooted at $v$ gives rise to a complete circuit with last departures (or equivalently, first arrivals) agreeing with the edges of the spanning tree.
In order to apply these theorems to our situation, we need to separate $M$ into a T-graph and a well understood complement, which is how we prove Proposition~\ref{image works}.
\begin{prop}\label{image works} Let $\lambda$ be a partition and let $M_{r,s,c}(\lambda)=M$. Suppose there is a family of arrival words $S= \{(v,[i])'_a\mid (v,[i])\in V\}$ assigned to $M$. Let $T_S$ be the first arrival graph with respect to $S$. Then there is a partition $\mu$ with an $(r,s,c)$-tour having arrival words $S$ if and only if both of the following hold. \begin{enumerate}
\item There exists some $v$ such that for all $w>v$, and all $j$, $(w,[j])'_a$ satisfies~\eqref{rscboringvertex}.
\item $T_S$ is a spanning tree of $M$. \end{enumerate} \end{prop} \begin{proof} The first condition has already been shown to be necessary, so we prove that assuming the first condition holds, the second condition is equivalent to the existence of $\mu$. Fix $v$ such that for all $w\geq v$,~\eqref{rscboringvertex} holds for both $(w,[j])_a$ and $(w,[j])_a'.$ Let $k\geq v$ be such that $k=rsk_1$ for some integer $k_1$ with $c\mid k_1$. Let $M^{\leq k}$ and $M^{>k}$ be the induced subgraphs of $M$ with vertex sets given by \begin{equation}\label{Mleq k}
V(M^{\leq k})=\{(v,[i])\in V(M) \mid v\leq k\}, \end{equation}\begin{equation}
V(M^{> k})=\{(k,[0])\}\cup\{(v,[i])\in V(M) \mid v > k\}, \end{equation} and let $T_S^{\leq k}$ and $T_S^{>k}$ be the induced subgraphs of $T_S$ with vertex sets $V(M^{\leq k})$ and $V(M^{> k})$. Then~\eqref{rscboringvertex} implies that $T_S^{>k}$ is a spanning tree of $M^{>k}$. \begin{figure}
\caption{$T_S^{>k}$ in the case $(c,r+s)=1$}
\end{figure}
Let $y=(0,k_1r)$ and $x=(k_1s,0)$ on the boundary. Then, the edges in $M^{>k}$ correspond to the rays along the axes starting at $x$ and $y$. The $(r,s,c)$-tour corresponding to $\lambda$ restricted to $M^{\leq k}$ is a complete circuit starting and finishing at $(k,[0])$, and each edge corresponds to an edge in the boundary of $\lambda$ that occurs after the west edge arriving at $x$ and occurs before the north edge departing from $y$. So, $M^{\leq k}$ contains $|x|$ west edges and $|y|$ north edges. Therefore, there is a partition $\mu$ with arrival words $S$ if and only if there is a complete circuit of $M^{\leq k}$ such that the arrival words agree with $S$.
Assume that a complete circuit of $M^{\leq k}$ with arrival words as given in $S$ exists. The $(r,s,c)$-tour of $M$ corresponding to $\lambda$ consists of a circuit of $M^{>k}$ and $M^{\leq k},$ so the in-degree of any vertex $v$ of $M^{\leq k}$ is equal to the out-degree of $v$ in $M^{\leq k}$ and $M^{\leq k}$ is connected. In particular, $M^{\leq k}$ is a $T$-graph. So, ~\cite[Thm 5a]{vAE} implies that $T_S^{\leq k}$ is a tree rooted at $(k,[0]).$ Therefore, $T_S$ is a spanning tree of $M$.
Now assume that $T_S$ is a spanning tree of $M$. Then, $T_S^{\leq k}$ is a tree rooted at $(k,[0])$ and ~\cite[Thm 5b]{vAE} implies that there is a complete circuit of $M^{\leq k}$ with arrival words agreeing with $S$.\end{proof}
From now on, let $\lambda<_{r,s,c}\lambda_{r,s,k}$ where $k=rsk_1$ and $c\mid k_1$. Let $M_{r,s,c}(\lambda)=M$, let $S$ be the family of arrival words corresponding to $\lambda$. We will now refer to $T_S$ as the first arrival tree. Next, we give a useful way to draw $M_{r,s,c}(\lambda).$
\begin{prop}\label{lattice points} If there is a lattice point $(x,y)$ satisfying both $rx+sy=v$ and $y-x\equiv i\pmod{c},$ then for any real number $m$ there is exactly one such lattice point satisfying the inequality $m\leq y-x < m+\lcm(c,r+s)$. \end{prop} \begin{proof} First, note that translating a lattice point $(x,y)$ by $(-s,r)$ does not change the value of $rx+sy$. Moreover, there is no lattice point on the line $rx+sy=v$ between $(x,y)$ and $(x-s,y+r),$ since if $(x-l_1,y+l_2)$ were such a point, we would have $-rl_1+sl_2=0$, so since $r$ and $s$ are coprime, $s\mid l_1$ and $r\mid l_2$.
Secondly, note that translating by $(-s,r)$ changes the value of $y-x$ by $-r-s$. So, the translations that preserve both the value of $rx+sy$ and the residue class of $[y-x]$ modulo $c$ are the translations by $(-as,ar)$ where $a(r+s)$ is divisible by $c$, that is, $a(r+s)$ is divisible by $\lcm(c,r+s)$. Exactly one of these translates lies in the region $r\leq y-x < r+\lcm(c,r+s)$. \end{proof}
This proposition gives us a very useful way to visualize $M_{r,s,c}(\lambda)$. For a fixed integer $n$, we can draw the multigraph by taking the vertices to be lattice points in the portion of $\mathbb{R}^2$ in between the lines $y-x=n$ and $y-x=\lcm(c,r+s)+n$, with an identification along the boundary lines given by $$(x,y)\sim \left(x-\frac{s \lcm(c,r+s)}{r+s},y+\frac{r \lcm(c,r+s)}{r+s}\right).$$ We identify a lattice point $(x,y)$ with the vertex $(rx+sy,[y-x])$. Then, west edges in the multigraph from $(v,[i])$ to $(v-r,[i+1])$ are west edges between lattice points in the region described. Similarly, north edges from $(v,[i])$ to $(v+s,[i+1])$ are north edges between lattice points. Moreover, each vertex $(v,[i])$ corresponds to a unique lattice point in the region. We can view the $(r,s,c)$-tour as the \textit{cylindrical} lattice path tour obtained by collapsing the boundary of the partition onto this cylinder.
\begin{ex} When $c=2$, $r=3$ and $s=2$ then we may draw the $(r,s,c)$-multigraph of $\mu=(12,12,10,8,7,4,1,1,1)$ as in Figure~\ref{Big multigraph 1}. \begin{figure}
\caption{$M_{3,2,2}(12,12,10,8,7,4,1,1,1)$}
\label{Big multigraph 1}
\end{figure} \end{ex}
When we have a drawing of $M_{r,s,c}(\lambda)$ on the cylinder defined in Proposition~\ref{lattice points}, and a directed path $p:(v,[i])\rightarrow (w,[j])$, we define the \textit{winding number of $p$} to be the number of times strictly after leaving $(v,[i])$ and before arriving at $(w,[j])$ that $p$ arrives at a vertex on the upper boundary strip. We will be particularly interested in the case where $(v,[i])=(k,[-k_1s])$ and $p$ is the unique path in the first arrival tree from $(k,[-k_1s])$ to $(w,[j]).$
\begin{defn} Given a partition $\lambda$ with $\lambda<_{r,s,c} \lambda_{r,s,k}$, and $(r,s,c)$-multigraph $M$, let $T$ denote the first arrival tree of $M$ corresponding to $\lambda$. Let $(v,[i])\in V(M)$ have $v\leq k$ and $(v,[i])\not=(k,[0]).$ Then $(v,[i])$ is a \textit{switch} if $(v+r,[i-1])$ and $(v-s,[i-1])$ are both vertices of $M$, and the distances in $T$ from the vertex $(k,[0])$ to $(v+r,[i-1])$, $(v-s,[i-1])$ are equal.
Now drop the condition that $v\leq k$ and $(v,[i])\not=(k,[0]).$ If $(v,[i])$ is not a switch and the first letter in the arrival word is $N$, say $(v,[i])$ is \textit{northern}, and let $\operatorname{No}$ be the set of all northern vertices $(v,[i])$ with $v\leq k$. If $(v,[i])$ is not a switch and the first letter in the arrival word is $W$, say $(v,[i])$ is \textit{western}, and let $\operatorname{We}$ be the set of all western vertices $(v,[i])$ with $v\leq k$. \end{defn}
\begin{ex} For $\mu=(12,12,10,8,7,4,1,1,1)$, the first arrival tree of $M_{3,2,2}(\mu)$ is as in Figure~\ref{Big tree 1} \begin{figure}
\caption{The first arrival tree in $M_{3,2,2}(12,12,10,8,7,4,1,1,1)$ with the northern vertices coloured blue, the western vertices coloured red, and the switches coloured green.}
\label{Big tree 1}
\end{figure} The paths in the first arrival tree from $(36,[0])$ to $(26,[0]),$ $(23,[1]),$ $(28,[1])$ and $(25,[0])$ have winding number $1$, whilst the other vertices $(v,[i])$ for which there is a path in the first arrival tree from $(36,[0])$ to $(v,[i])$ have winding number $0$. The switches are coloured green, the western vertices red, and the northern vertices blue (compare with Figure~\ref{Big multigraph 1} to verify the colouring). \end{ex}
We now make some straightforward but important observations about $M_{r,s,c}(\lambda)$ and winding numbers.
\begin{prop}\label{winding numbers}
Let $(v,[i])$ and $(w,[j])$ be two vertices of $M_{r,s,c}(\lambda)$, and let $p_1$ and $p_2$ be directed paths between $(v,[i])$ and $(w,[j])$. Suppose $p_1$ is given by the sequence of vertices $(v,[i])=(v_0,[i_0]),\ldots, (v_{|p_1|,[i_0+|p_1|]})=(w,[j])$. Then, \begin{enumerate}
\item $|p_1|-|p_2|$ is divisible by $\lcm(c,r+s)$.
\item Let $(v,[i])$ be $m$ lattice steps above the lower boundary of the cylinder, and let $|p_1|=q\lcm(c,r+s)+u$ where $-m < u\leq \lcm(c,r+s)-m$. The winding number of $p_1$ is $q$. \end{enumerate} \end{prop}
\begin{proof} The first point follows from Proposition~\ref{lattice points}: $p_1$ and $p_2$ are lattice paths from points $(x_1,y_1)$ and $(x_1-as,y_1+ar)$ respectively to points $(x_2,y_2)$ and $(x_2-bs,y_2+br)$ respectively, where $\lcm(c,r+s)$ divides $a(r+s)$ and $b(r+s)$. We have that $|p_1|=x_1-x_2+y_2-y_1$ and $|p_2|=x_1-as-x_2+bs+y_2+br-y_1-ar$, so $|p_1|-|p_2|=(r+s)(a-b)$, which is divisible by $\lcm(c,r+s)$.
The second point follows because as we trace out a directed path, the value of $y-x$ moves cyclically through the residue classes modulo $\lcm(c,r+s)$, incrementing by 1 with each step. \end{proof}
\begin{corol}\label{di-di-1} Let $(k,[0])=v_0,v_1\ldots,v_{(r+s)k_1}=(k,[0])$ be the vertices visited, in order, by the $(r,s,c)$-tour, corresponding to the section of the boundary of $\lambda$ between $(k_1s,0)$ and $(0,k_1r)$. Let $d_i$ denote the distance in the first arrival tree $T$ from $(k,[0])$ to $v_i$. \begin{enumerate}
\item If $v_i$ is a switch, or if the edge $(v_{i-1},v_i)$ is in $T$, then $d_i-d_{i-1}=1$.
\item If $v_i$ is a northern vertex and $(v_{i-1},v_i)\not\in E(T)$, then $d_i-d_{i-1}=1+\lcm(c,r+s)$.
\item If $v_i$ is a western vertex and $(v_{i-1},v_i)\not\in E(T)$, then $d_i-d_{i-1}=1-\lcm(c,r+s)$. \end{enumerate} \end{corol} \begin{proof}
Write $p_i$ for the path in $T$ from $(k,[0])$ to $v_i$, so that $|p_i|=d_i.$ The first point follows immediately from the definition of a switch and the definition of $T$.
The winding numbers $\operatorname{wind}(p_i)$ and $\operatorname{wind}(p_{i-1})$ differ by at most 1. Moreover, if $\operatorname{wind}(p_i)=\operatorname{wind}(p_{i-1})$, then by the second part of Proposition~\ref{winding numbers}, $||p_i|-|p_{i-1}||<\lcm(c,r+s).$ Since there is a path of length 1 (not necessarily in $T$) connecting $v_{i-1}$ and $v_i$, then by the first part of Proposition~\ref{winding numbers}, $|p_i|-|p_{i-1}|\equiv 1\pmod{\lcm(c,r+s)}$. Therefore, $d_i-d_{i-1}=1,$ so $v_i$ is a switch or $(v_{i-1},v_{i})$ is an edge of $T$.
In the case that $v_i$ is northern, and $(v_{i-1},v_i)$ is not an edge of $T$, $(v_{i-1},v_i)$ must be a west edge. We cannot have $\operatorname{wind}(p_i)=\operatorname{wind}(p_i)$, so $|\operatorname{wind}(p_i)-\operatorname{wind}(p_{i-1})|=1.$ Since the edge from $v_{i-1}$ to $v_i$ is a west edge, the winding number of $\operatorname{wind}(p_i)=\operatorname{wind}(p_{i-1})+1.$ Let $q_i$ be the path obtained by extending $p_{i-1}$ by the west edge $(v_{i-1},v_i)$. Then $|q_i|=d_{i-1}+1.$ The second part of Proposition~\ref{winding numbers} tells us that $|p_i|$ and $|q_i|$ agree modulo $\lcm(c,r+s)$ and therefore $d_i=d_{i-1}+1+\lcm(c,r+s)$.
An analogous argument proves the third formula. \end{proof} \subsection{Definition of $I_{r,s,c}$} Given a partition $\lambda$ with $\lambda\leq_{r,s,c} \lambda_{r,s,k}$, with multigraph $M$ and first arrival tree $T$, we define the partition $I_{r,s,c}(\lambda)$ as follows. The multigraph of $I_{r,s,c}(\lambda)$ is also given by $M$.
Now, we obtain the $(r,s,c)$-tour of $I_{r,s,c}(\lambda)$ by, at each switch, reversing the arrival word, and at each vertex that is not a switch, fixing the first letter of the arrival word and reversing the rest of the arrival word.
To see that $I_{r,s,c}(\lambda)$ is well defined, we need to check that taking the first arrival at each vertex of $M$ gives a spanning tree. We do this by checking that in $T$, the move of deleting a north (respectively west) edge arriving at a switch $(v,[i])$ and adding a new west (respectively north) edge arriving at $(v,[i])$ gives another spanning tree $T'$. There are still edges arriving at every vertex we had edges arriving at before, but now the edge arriving at $(v,[i])$ might be departing from a different vertex. So, it suffices to check that $(v,[i])$ is still connected to each of $(v-s,[i-1])$ and $(v+r,[i-1])$, and that we have not introduced a cycle by adding the new edge. For the former, it suffices to check $(v-s,[i-1])$ and $(v+r,[i-1])$ are still connected to each other. In $T$, $(v+r,[i-1])$ and $(v-s,[i-1])$ are both connected to $(k,[0])$ by paths. Moreover, the distance in $T$ to $(k,[0])$ strictly decreases with each step along the path we take, so $(v,[i])$ is not a vertex on either of these paths. So, both of these paths exist $T'$, and $(v+r,[i-1])$ and $(v-s,[i-1])$ are connected to one another. To see that the new edge does not introduce a cycle, observe that if we had introduced a cycle, we would now have two distinct paths from $(k,[0])$ to $(v,[i])$. Since the only edge into $(v,[i])$ is from its new neighbour, we must have had two distinct paths from $(k,[0])$ to the new neighbour in $T$ originally. But then we had a cycle in $T$ originally, a contradiction.
Hence, we may permute the letters in any arrival word at any vertex and the result will still correspond to a partition as long as we do not change the first letter in the arrival word at a vertex that is not a switch. Since we defined $I_{r,s,c}(\lambda)$ to fix the first letter in the arrival word at any vertex that is not a switch, $I_{r,s,c}(\lambda)$ is well defined. Moreover, we can recover $\lambda$ from $I_{r,s,c}(\lambda)$ by doing the same operation again, as each operation is self-inverse and preserves switches.
Since $I_{r,s,c}$ does not change $M_{r,s,c}$, we can apply Proposition~\ref{independence core} and Corollaries~\ref{midcorol},~\ref{crittotcorol}, and ~\ref{areacorol} respectively to obtain \begin{equation}
\operatorname{mid}_{x,c}(I_{r,s,c}(\lambda))=\operatorname{mid}_{x,c}(I_{r,s,c}(\lambda)), \end{equation} \begin{equation}
\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda))+\operatorname{crit}_{x,c}^-(I_{r,s,c}(\lambda))=\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^-(\lambda), \end{equation} \begin{equation}
|I_{r,s,c}(\lambda)|=|\lambda|, \end{equation} and \begin{equation}
\operatorname{core}_c(I_{r,s,c}(\lambda))=\operatorname{core}_c(\lambda). \end{equation} Moreover, the map sending $\lambda$ to $I_{r,s,c}(\lambda)$ is an involution - it is immediate from the definition that a vertex is a switch after this reassignment if and only if it were a switch before the reassignment.
It remains to check that $\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda))=\operatorname{crit}_{x,c}^-(\lambda)$ and $\operatorname{crit}_{x,c}^-(I_{r,s,c}(\lambda))=\operatorname{crit}_{x,c}^+(\lambda)$.
\begin{prop} Let $\lambda$ be a partition. Then \begin{equation}\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda))=\operatorname{crit}_{x,c}^-(\lambda)\end{equation} and \begin{equation}\operatorname{crit}_{x,c}^-(I_{r,s,c}(\lambda))=\operatorname{crit}_{x,c}^+(\lambda).\end{equation} \end{prop} \begin{proof}
We will check that $\operatorname{crit}_{x,c}^+(\lambda)=(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(M_{r,s,c}(\lambda))-\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda)).$
Recall that $\operatorname{crit}_{x,c}^+$ counts the total number of inversions in the arrival word at vertices in $M_{r,s,c}(\lambda).$ Suppose the arrival word at vertex $(v,[i])$ has $a$ west edges and $b$ north edges. If $v$ is a switch, then $I$ reverses the arrival word at $(v,[i])$, so the pairs of $N,W$ edges that contribute to $\operatorname{crit}_{x,c}^+(I(\lambda))$ are exactly those that do not contribute to $\operatorname{crit}_{x,c}^+(\lambda)$, so the contributions over $I(\lambda)$ and $\lambda$ at $(v,[i])$ sum to $ab$.
Note that if $(v,[i])\in \operatorname{No}$ then $I(\lambda)$ has inversions in the arrival word at $(v,[i])$ using the first $N$ and any $W$ in the arrival word, and then any other pair of north and west edges contribute to $I(\lambda)$ if and only if they do not contribute to $\lambda$, so the two contributions sum to $ab+a.$ Similarly, if $(v,[i])\in \operatorname{We}$ then the contributions sum to $ab-b$. Hence, we have that the total $\operatorname{crit}_{x,c}^+(\lambda)+\operatorname{crit}_{x,c}^+(I_{r,s,c}(\lambda))$ can be written as $S_1+S_2+S_3$ where $$S_1 = \sum_{(v,[i])\text{is a switch}}(N_{in}(v,[i])W_{in}(v,[i])$$ $$S_2=\sum_{(v,[i])\text{ northern}}(N_{in}(v,[i])W_{in}(v,[i])+W_{in}(v,[i])$$ $$S_3=\sum_{(v,[i])\text{ western}}(N_{in}(v,[i])W_{in}(v,[i])-N_{in}(v,[i]).$$
Now, note first that no vertex $(v,[i])$ with $v>k$ contributes to any of these sums. Indeed, no such vertex is a switch, and the arrival word at any such $(v,[i])$ has length $0,1$ or $2$, containing at most one $W$ and at most one $N$. If the arrival word is empty there is nothing to prove. If the arrival word is $N$ then the vertex is northern, and $(N_{in}(v,[i])W_{in}(v,[i])+W_{in}(v,[i])=0.$ If the arrival word is $W$ then the vertex is western and $(N_{in}(v,[i])W_{in}(v,[i])-N_{in}(v,[i])=0.$ The only other possible arrival word is $WN$, in which case the vertex is western and $(N_{in}(v,[i])W_{in}(v,[i])-N_{in}(v,[i])=1-1=0.$ So, we may restrict our sum to vertices $(v,[i])$ with $v\leq k$.
Proposition~\ref{crittotdiff} proves~\eqref{crittot}, $$(\operatorname{crit}_{x,c}^++\operatorname{crit}_{x,c}^-)(M_{r,s,c}(\lambda))=\sum_{v=0}^k\sum_{i=0}^{c-1}N_{in}(v,[i])W_{in}(v,[i])-\left\lfloor\frac{k_1(s+r)}{\lcm(c,r+s)}\right\rfloor.$$
We wish to show that~\eqref{crittot} is equal to $S_1+S_2+S_3$, and therefore it suffices to check that
\begin{equation}\label{toprove}\sum_{(v,[i])\text{ western}}N_{in}(v,[i])-\sum_{(v,[i])\text{ northern}}W_{in}(v,[i])=\left\lfloor\frac{k_1(s+r)}{\lcm(c,s+r)}\right\rfloor.\end{equation}
Note that the north edges entering western vertices and the west edges entering northern vertices are exactly the edges in $M_{r,s,c}(\lambda)$ arriving at non-switch vertices that are \textit{not} a copy of an edge in the first arrival tree $T$. Hence, if we let $n_0$ denote the number of edges $e$ entering vertices $(v,[i])$ with $v\leq k$ such that either \begin{itemize}
\item $(v,[i])$ is a switch, or \item $(v,[i])$ is not a switch and there is no copy of $e$ in the first arrival tree $T$, \end{itemize} then \begin{equation}\label{usetogglability}n_0+\sum_{(v,[i])\text{ western}}N_{in}(v,[i])+\sum_{(v,[i])\text{ northern}}W_{in}(v,[i])=k_1(r+s).\end{equation}
Now, let $(k,[-k_1s])=v_0,v_1\ldots,v_{(r+s)k_1}= (k,[k_1r])$ be the vertices visited, in order, possibly with repetition, by the $(r,s,c)$-tour. Let $d_i$ denote the distance in the first arrival tree from $(k,[-k_1s])$ to $v_i$. Then, using the assumption that $c\mid (r+s)k_1$, we have that \begin{equation}\label{telescoper}0=d_{(r+s)k_1}=\sum_{i=1}^{(r+s)k_1} d_i-d_{i-1}.\end{equation} Substituting the formulae for $d_i-d_{i-1}$ proven in Corollary~\ref{di-di-1} into ~\eqref{telescoper}, \begin{equation}\label{stratifybyvertextype}n_0+(1+l)\sum_{(v,[i])\in\operatorname{No}}W_{in}(v,[i])+(1-l)\sum_{(v,[i])\in\operatorname{We}}N_{in}(v,[i])=0.\end{equation}
Subtracting~\eqref{stratifybyvertextype} from~\eqref{usetogglability}, gives \begin{equation}k_1(r+s)=\lcm(c,r+s)\left(\sum_{(v,[i])\in\operatorname{We}} N_{in}(v,[i])-\sum_{(v,[i])\in\operatorname{No}} W_{in}(v,[i])\right).\end{equation} Now, since $k$ is divisible by $rsc$, $k_1=\frac{k}{rs}$ is divisible by $c$, so $\lcm(c,r+s)$ divides $k_1(r+s)$. Therefore, $$\left\lfloor\frac{k_1(r+s)}{\lcm(c,r+s)}\right\rfloor=\frac{k_1(r+s)}{\lcm(c,r+s)}=\sum_{(v,[i])\in\operatorname{We}} N_{in}(v,[i])-\sum_{(v,[i])\in\operatorname{No}} W_{in}(v,[i])),$$ which is~\eqref{toprove}, which completes the proof. \end{proof}
\subsection{Extended Example} Let $c=2$ and $n=7$, and $\mu=(2,1).$ Then \begin{equation}\operatorname{Par}^2_{\mu}(7)=\{(6,1),(4,3),(4,1,1,1),(2,2,2,1),(2,1,1,1,1,1)\}.\end{equation} \begin{figure}
\caption{the partitions in $\operatorname{Par}^2_{\mu}(7)$ with boxes of even hook length coloured yellow}
\label{extex}
\end{figure}
For the shaded cells, the values of $\frac{a(\square)}{l(\square)+1}$ are $\left\{3,1,0,\frac{1}{3}\right\}$ and the values of $\frac{a(\square)+1}{l(\square)}$ are $\left\{\infty,3,1,\frac{1}{3}\right\}$. So, the critical rationals are $\left\{0,\frac{1}{3},1,3,\infty\right\}.$
In this example, we will verify that $$\sum_{\lambda\in\operatorname{Par}^2_{\mu}(7)}t^{h_{4,2}^+(\lambda)}=\sum_{\operatorname{Par}^2_{\mu}(7)}t^{\lambda^{2*}_{\hrectangle}}.$$ Recall
\begin{equation} h_{4,2}^+(\lambda)=\left|\left\{\square \in \lambda \,\middle\vert\, 2\mid h(\square) \text{ and } \frac{a(\square)}{l(\square)+1}\leq 4<\frac{a(\square)+1}{l(\square)}\right\}\right|.\end{equation}
From our computation of the critical rationals, given that $2\mid L(\square)$ for some box in a partition $\lambda\in\operatorname{Par}^2_{\mu}(7),$ $4<\frac{a(\square)+1}{l(\square)}$ if and only if $3< \frac{a(\square)+1}{l(\square)}$, and $\frac{a(\square)}{l(\square)+1}\leq 4$ if and only if $\frac{a(\square)}{l(\square)+1}\leq 3.$ So, $h_{4,2}^+(\lambda)=h_{3,2}^+(\lambda)$. Now we use $I_{3,1,2}:\operatorname{Par}^2_{\mu}(7)\rightarrow\operatorname{Par}^2_{\mu}(7)$. Because $\operatorname{mid}_{3,2}(\lambda)=\operatorname{mid}_{3,2}(I_{3,1,2}(\lambda))$ and $\operatorname{crit}^{\pm}_{3,2}(\lambda)=\operatorname{crit}^{\mp}_{3,2}(\lambda)$, $I_{3,1,2}$ is a bijection exchanging $h_{3,2}^+$ and $h_{3,2}^-,$ so $$\sum_{\lambda\in\operatorname{Par}^2_{\mu}(7)}t^{h_{3,2}^+(\lambda)}=\sum_{\lambda\in\operatorname{Par}^2_{\mu}(7)}t^{h_{3,2}^-(\lambda)}.$$
We now explicitly compute $I_{3,1,2}(\lambda)$ for $\lambda=(6,1).$
The diagram of $(6,1)$ lies below the line $3x+y=9.$ So, we choose the smallest value $k\geq 9$ such that $3\times2\times1\mid k,$ $k=12.$ Then, $k_1=\frac{12}{3}=4.$
The $(r,s,c)$-tour of $M_{3,1,2}((6,1))$ is defined by the following family of arrival words. \begin{center}
\begin{tabular}{cccccc} (4,[0])&W&(5,[1])&N&(6,[0])&WNW\\ (7,[1])&NNN&(8,[0])&NN&(9,[1])&WNN \end{tabular} \end{center} and for $w>9,$ \begin{equation}(w,[i])_a=\begin{cases}
WN & 3\mid w, w\equiv \frac{-w}{3}\equiv i\pmod{2} \\
N & 2\mid\left(w-i\right)\text{ and either }3\nmid w\text{ or }2\nmid(\frac{-w}{3}-i)\\
W & 3\mid w, 2\mid\left(\frac{-w}{3}-i\right), 2\nmid(w-i) \\
\text{empty} &\text{otherwise} \\
\end{cases}.\end{equation}
The multigraph is given in Figure~\ref{multex1} with the edges in the first arrival tree in bold.
\begin{figure}
\caption{$M_{3,1,2}((6,1))$ with the edges of the first arrival tree in bold}
\label{multex1}
\end{figure}
After applying $I_{3,1,2}$ the arrival words are \begin{center} \begin{tabular}{cccccc} (4,[0])&W&(5,[1])&N&(6,[0])&WWN\\ (7,[1])&NNN&(8,[0])&NN&(9,[1])&WNN \end{tabular} \end{center} with all arrival words at $(w,[i])$ with $w>9$ unchanged. These arrival words correspond to the partition $(4,3).$ So, $h_{3,2}^+((6,1))=h_{3,2}^-((4,3)).$
From our computation of the critical rationals, given that $2\mid L(\square)$ for some box in a partition $\lambda\in\operatorname{Par}^2_{\mu}(7),$ $3\leq\frac{a(\square)+1}{l(\square)}$ if and only if $1< \frac{a(\square)+1}{l(\square)}$, and $\frac{a(\square)}{l(\square)+1} < 3$ if and only if $\frac{a(\square)}{l(\square)+1}\leq 1.$ So, $h_{3,2}^-(\lambda)=h_{1,2}^+(\lambda)$ for all $\lambda\in\operatorname{Par}^2_{\mu}(7)$. Now, $I_{1,1,2}$ exchanges $h_{1,2}^+$ and $h_{1,2}^-$, and $I_{1,1,2}((4,3))=(2,2,2,1)$, so $h_{4,2}^+((6,1))=h_{1,2}^+((2,2,2,1)).$ Using the same logic again $h_{1,2}^+(\lambda)=h_{\frac{1}{3},2}^-(\lambda)$ for each $\lambda\in\operatorname{Par}^2_{\mu}(7).$ Using $I_{1,3,2}$, $I_{1,3,2}((2,2,2,1))=(2,1,1,1,1,1),$ so $h_{4,2}^+((6,1))=h_{\frac{1}{3},2}^{-}((2,1,1,1,1,1))$. Finally, for any partition $\lambda\in\operatorname{Par}^2_{\mu}(7),$ $\frac{1}{3}\leq\frac{a(\square)+1}{l(\square)}$ if and only if $0<\frac{a(\square)+1}{l(\square)}$, and $\frac{a(\square)}{l(\square)+1}< \frac{1}{3}$ if and only if $a(\square)=0,$ if and only if $\frac{a(\square)}{l(\square)+1}\leq 0,$ so $h^-_{\frac{1}{3},2)(\lambda)}=h^+_{0,2}(\lambda).$
Therefore, since $$I_{1,3,2}\circ I_{1,1,2}\circ I_{3,1,2}((6,1))=(2,1,1,1,1,1),$$ we have that $h^+_{4,2}(6,1)=h^+_{0,2}(2,1,1,1,1,1).$ For the other partitions in $\operatorname{Par}^2_{\mu}(7),$ $$I_{1,3,2}\circ I_{1,1,2}\circ I_{3,1,2}(4,3)=I_{1,3,2}\circ I_{1,1,2}((6,1))=I_{1,3,2}(2,1,1,1,1,1)=(2,2,2,1),$$ $$I_{1,3,2}\circ I_{1,1,2}\circ I_{3,1,2}(4,1,1,1)=I_{1,3,2}\circ I_{1,1,2}((4,1,1,1))=I_{1,3,2}(4,1,1,1)=(4,1,1,1).$$ $$I_{1,3,2}\circ I_{1,1,2}\circ I_{3,1,2}(2,2,2,1)=I_{1,3,2}\circ I_{1,1,2}((2,2,2,1))=I_{1,3,2}(4,3)=(4,3).$$ $$I_{1,3,2}\circ I_{1,1,2}\circ I_{3,1,2}(2,1,1,1,1,1)=I_{1,3,2}\circ I_{1,1,2}((2,1,1,1,1,1))=I_{1,3,2}(6,1)=(6,1).$$
\section{Further Work} We note here that Problem 8.9 in \cite{Walsh} may be amenable to similar techniques.
\begin{bibdiv} \begin{biblist} \bib{vAE}{article}{
author={van Aardenne-Ehrenfest, T.},
author={de Bruijn, N. G.},
title={Circuits and trees in oriented linear graphs},
journal={Simon Stevin},
volume={28},
date={1951},
pages={203--217},
issn={0037-5454},
review={\MR{47311}}, }
\bib{BB}{article}{
author={Bia\l ynicki-Birula, A.},
title={Some properties of the decompositions of algebraic varieties
determined by actions of a torus},
language={English, with Russian summary},
journal={Bull. Acad. Polon. Sci. S\'{e}r. Sci. Math. Astronom. Phys.},
volume={24},
date={1976},
number={9},
pages={667--674},
issn={0001-4117},
review={\MR{453766}}, }
\bib{BFN}{article}{
author={Buryak, Alexandr},
author={Feigin, Boris Lvovich},
author={Nakajima, Hiraku},
title={A simple proof of the formula for the Betti numbers of the
quasihomogeneous Hilbert schemes},
journal={Int. Math. Res. Not. IMRN},
date={2015},
number={13},
pages={4708--4715},
issn={1073-7928},
review={\MR{3439090}},
doi={10.1093/imrn/rnu076}, }
\bib{LW}{article}{
author={Loehr, Nicholas A.},
author={Warrington, Gregory S.},
title={A continuous family of partition statistics equidistributed with
length},
journal={J. Combin. Theory Ser. A},
volume={116},
date={2009},
number={2},
pages={379--403},
issn={0097-3165},
review={\MR{2475023}},
doi={10.1016/j.jcta.2008.07.001}, }
\bib{James}{article}{
author={James, G. D.},
title={Some combinatorial results involving Young diagrams},
journal={Math. Proc. Cambridge Philos. Soc.},
volume={83},
date={1978},
number={1},
pages={1--10},
issn={0305-0041},
review={\MR{463280}},
doi={10.1017/S0305004100054220}, }
\bib{Walsh}{article}{
author={Walsh, Adam},
author={Warnaar, S. Ole},
title={Modular Nekrasov-Okounkov formulas},
journal={S\'{e}m. Lothar. Combin.},
volume={81},
date={2020},
pages={Art. B81c, 28},
review={\MR{4097428}}, }
\end{biblist} \end{bibdiv}
\end{document}
|
arXiv
|
{
"id": "2208.09310.tex",
"language_detection_score": 0.655849277973175,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Interferometric Phase Estimation Though Quantum Filtering in Coherent States} \author{John E.~Gough} \email{[email protected]}
\affiliation{Institute for Mathematics and Physics, Aberystwyth University, SY23 3BZ, Wales, United Kingdom}
\begin{abstract} We derive the form of the quantum filter equation describing the continuous observation of the phase of a quantum system in an arm of an interferometer via non-demolition measurements when the statistics of an input field used for the indirect measurement are in a general coherent state. Both quadrature homodyne detection and photon-counting dection schemes are covered, and we solve the linearized filter for a specific application. \end{abstract}
\pacs{07.60Ly, 03.65Ta, 06.30Bp, 42.50Lc} \maketitle
\section{Introduction}
There has been a steady interest in the problem of ``collapse of the wavefunction'' amongst quantum physicists, particularly in relation to foundational issues. The dichotomy usually presented is between the unitary evolution under the Schr\"{o}dinger equation and the discontinuous change when a measurement is made. Clearly the collapse of the wavefunction is a form of conditioning the quantum state made by an instantaneous measurement. However, conditional probabilities are well known classically and have no such interpretational issues. Furthermore, the process of extraction of information from a classical system and the resulting conditioning of the state is well studied from the point of view of stochastic estimation. For continual measurements, there are standard results on nonlinear filtering, see \cite{DavisMarcus,Kush79,Kush80,Zak69}. What is not often appreciated in the theoretical physics community is that the analogue problem was formulated by Belavkin \cite{Bel80,Bel92a} where a quantum theory of filtering based on non-demolition measurements of an output field is established: see also \cite{BouGutMaa04}-\cite{GJNC_PRA12}. Specifically, we must measure a particular feature of the field, for instance a field quadrature or the count of the field quanta, and this determines a self-commuting, therefore \emph{ essentially classical}, stochastic process. The resulting equations have structural similarities with the classical analogues. They are also formally identical with the equations arising in quantum trajectory theory \cite{Carmichael93} however the stochastic master equations play different roles: in quantum filtering they describe the conditioned evolution of the state while in quantum trajectories they are a means of simulating a master equation.
There has been recent interest amongst the physics community in quantum filtering as an applied technique in quantum feedback and control \cite {AASDM02}- \cite{WM93}. An additional driver is the desire to go beyond the situation of a vacuum field and derive the filter for other physically important states such as thermal, squeezed, single photon states, etc. In a previous publication \cite{G_scat_PRA15} we derived a quantum Markovian model for an opto-mechanical system consisting of a quantum mechanical mirror interacting with quantum optical input fields via radiation pressure, and in particular were able to construct the quantum filter for the position of the mirror based on the continual monitoring of scattered photons. To obtain a non-trivial result, we had to place the input fields in a coherent state of non-zero intensity and rely on the filtering theory for coherent state inputs \cite{GK_COSA10}. In this note we wish to treat the problem of constructing the filter for non-demolition quadrature and photon-counting measurements of the output of a Mach-Zehnder interferometer with the purpose of estimating the phase difference between the two arms of the interferometer: see Figure \ref{fig:Interferometer}.
\begin{figure}
\caption{The model is fully quantum: we have a Mach-Zehnder interferometer in which there is a quantum mechanical phase associated with one of the arms (this may be due to one or more of the mirrors being an opto-mechanical system); the input fields are modelled a quantum input processes on the appropriate Boson Fock space (see Section \ref{sec:setup}).}
\label{fig:Interferometer}
\end{figure}
Here the phase is treated as a quantum mechanical object, so the problem is genuinely one of estimating the quantum state of the interferometer phase variable. As the interaction of the photons with the interferometer is purely scattering (so no emission or absorption) we must take one of input fields to be in a coherent state with intensity function $\beta $. The model may be thought of as the continuous variable analogue of the discrete model examined recently by Harrell in \cite{Harrell}: indeed, it is reasonable to expect that the continuous time limit of this model leads to the results presented here by the time of arguments presented in \cite{G_Sob_2004}.
The paper is organized as follows. In Section \ref{sec:setup} we describe the model of a Mach-Zehnder interferometer with appropriate continuous-variable quantum inputs. A fully quantum stochastic model of the interferometer phase observable and the photon fields is presented in terms of quantum stochastic calculus \cite{HP}-\cite{GC85}. In Section \ref{sec:filter} we describe the basic estimation problem and state the main result which is the form of the filters in the language of stochastic estimation: these may then be rewritten in terms of stochastic master equations, and we give the equivalent form for homodyning. In Section \ref{sec:derive_filters} we derive the filters using the characteristic function approach. Finally in Section \ref{sec:Applications} we solve the filter in a linearized regime - equivalent to a quantum Kalman-Bucy filter and discuss the physical properties of the system including the ``collapse of the wave-function''.
\section{The Experimental Set Up} \label{sec:setup} We consider an interferometer as shown in Figure \ref{fig:Interferometer} where two continuous wave optical inputs $b_{1}^{\text{in}}$ and $b_{2}^{ \text{in}}$ are mixed in a 50-50 beam splitter and then recombined in a second 50-50 beam splitter. The second path in the interferometer has a phase $\theta $ relative to the first path. We treat $\theta $ as a quantum mechanical observable and we aim to estimate the corresponding state by measuring one of the output fields. In this paper we will consider a homodyne scheme where we measure the quadrature associated with the output $ b_{1}^{\text{out}}$. The problem would of course be trivial if both inputs where in the vacuum state, so we assume that one on the inputs, $b_{1}^{ \text{in}}$ is in a coherent state while the other is in the vacuum.
The scattering matrix $S$ relating the inputs processes to outputs is given by the product $S=TPT$ where $T=\frac{1}{\sqrt{2}}\left[ \begin{array}{cc} 1 & i \\ i & 1 \end{array} \right] $ and $P=\left[ \begin{array}{cc} 1 & 0 \\ 0 & -e^{i\theta } \end{array} \right] $ are the beam splitter matrix and interferometer path transfer matrix respectively. That is \begin{eqnarray} S &=&\left[ \begin{array}{cc} S_{11} & S_{12} \\ S_{21} & S_{22} \end{array} \right] \nonumber \\ &\equiv &\frac{1}{2}\left[ \begin{array}{cc} 1+e^{i\theta } & i\left( 1-e^{i\theta }\right) \\ i\left( 1-e^{i\theta }\right) & -\left( 1+e^{i\theta }\right) \end{array} \right] . \label{eq:S_interferometer} \end{eqnarray} Note that the entries $S_{jk}$ depend on the observable $\theta $ and are therefore operators on the associated Hilbert space $\mathfrak{h}$. We may additionally have a Hamiltonian $H$ leading to a non-trivial evolution of the observable $\theta $.
\subsection{Fully Quantum Model}
The inputs satisfy the singular commutation relations \begin{eqnarray*} \left[ b_{j}^{\text{in}}\left( t\right) ,b_{k}^{\text{in*}}\left( s\right) \right] =\delta _{jk}\delta \left( t-s\right) , \end{eqnarray*} and in the following we shall work with the processes \begin{eqnarray*} B_{k}\left( t\right) &=&\int_{0}^tb_{k}^{\text{in}}\left( s\right) ds, \\ B_{k}\left( t\right) ^{\ast } &=&\int_{0}^tb_{k}^{\text{in*}}\left( s\right) ds, \\ \Lambda _{jk}\left( t\right) &=&\int_{0}^tb_{j}^{\text{in*}}\left( s\right) b_{k}^{\text{in}}\left( s\right) ds, \end{eqnarray*} which correspond to well defined operators on a Fock space $\mathfrak{F}$. The Hudson-Parthasarathy theory of quantum stochastic calculus \cite{HP} gives an analogue of the It\={o} calculus for integrals with respect to these processes. We note the It\={o} table \begin{eqnarray} dB_{j}dB_{k}^{\ast } &=&\delta _{jk}dt, \nonumber \\ dB_{j}d\Lambda _{kl} &=&\delta _{jk}dB_{l}, \nonumber \\ d\Lambda _{jk}dB_{l}^{\ast } &=&\delta _{kl}dB_{j}^{\ast }, \nonumber \\ d\Lambda _{jk}d\Lambda _{lm} &=&\delta _{kl}d\Lambda _{jm}, \label{eq:QIT} \end{eqnarray} with other products of differentials vanishing.
The general class of unitaries processes on $\mathfrak{h}\otimes \mathfrak{F} $ driven by these fundamental processes is given in \cite{HP} involves coefficients $\left( S,L,H\right) $ corresponding to the scattering $S$, the coupling $L$ and the Hamiltonian $H$ for the non-field component. In our case their is no photo-emissive coupling of input fields with the interferometer, so we set $L=0$. The most general form then reduces to \begin{eqnarray*} dU_t=\left\{ \sum_{j,k}\left( S_{jk}-\delta _{jk}\right) \otimes d\Lambda _{jk}\left( t\right) -iH\otimes dt\right\} U_t, \end{eqnarray*} with $S=\left[ S_{jk}\right] $ unitary, that is \begin{eqnarray} \sum_{k}S_{ki}^{\ast }S_{kj}=\delta _{ij}I_{\mathfrak{h}}= \sum_{k}S_{ik}S_{jk}^{\ast }. \label{eq:S_unitary} \end{eqnarray}
\subsection{Internal Dynamics of the Interferometer}
For $X$ an arbitrary operator on the Hilbert space $\mathfrak{h}$ of the interferometer, its Heisenberg evolution is given by \begin{eqnarray*} j_t\left( X\right) =U_t^{\ast }\left( X\otimes I_{\mathfrak{F}}\right) U_t \end{eqnarray*} and from the quantum It\={o} calculus we find Langevin equation \begin{eqnarray*} dj_t\left( X\right) &=&\sum_{i,j,k}j_t\left( S_{ki}^{\ast }XS_{kj}-\delta _{ij}X\right) \otimes d\Lambda _{ij}\left( t\right) \\ &&-ij_t\left( \left[ X,H\right] \right) \otimes dt. \end{eqnarray*} For the specific case of the scattering matrix (\ref{eq:S_interferometer}) we find \begin{eqnarray} &&dj_t\left( X\right) =-ij_t\left( \left[ X,H\right] \right) \otimes dt \nonumber \\ &&+\frac{1}{2}j_t\left( e^{-i\theta }Xe^{i\theta }-X\right) \otimes \left( d\Lambda _{11}-id\Lambda _{12}+id\Lambda _{21}+d\Lambda _{22}\right) . \nonumber \\ \label{eq:Heisenberg} \end{eqnarray}
\subsection{Input-Output Relations}
The output fields $B_{j}^{\text{out}}\left( t\right) $ are defined by \begin{eqnarray*} B_{j}^{\text{out}}\left( t\right) \triangleq U_t^{\ast }\left( I_{ \mathfrak{h}}\otimes B_{j}^{\text{in}}\left( t\right) \right) U_t \end{eqnarray*} and again using the quantum It\={o} calculus we find \begin{eqnarray*} dB_{j}^{\text{out}}\left( t\right) =\sum_{k}j_t\left( S_{jk}\right) \otimes dB_{k}^{\text{in}}\left( t\right) . \end{eqnarray*}
\subsection{Homodyne Detection}
Our objective is to estimate the state of the interferometer at time $t$ based on the observations of the output quadrature $Y$ of the first output up to time $t$. Here we have \begin{eqnarray*} Y\left( t\right) &=&B_{1}^{\text{out}}\left( t\right) +B_{1}^{\text{out} }\left( t\right) ^{\ast }\nonumber \\ &=& U_t^{\ast }\left( I_{\mathfrak{h}}\otimes \left( B_{1}^{\text{in}}\left( t\right) +B_{1}^{\text{in}}\left( t\right) ^{\ast }\right) \right) U_t. \end{eqnarray*} We note that \begin{eqnarray} dY\left( t\right) =\sum_{k}j_t\left( S_{1k}\right) \otimes dB_{k}^{\text{in }}\left( t\right) +H.c. \label{eq:dY_quadrature} \end{eqnarray} The process $Y=Y^{\ast }$ is self-nondemolition by which we mean that $\left[ Y\left( t\right) ,Y\left( s\right) \right] =0$ for all times $t,s$. It furthermore satisfies the nondemolition property that $\left[ j_t\left( X\right) ,Y\left( s\right) \right] =0$ for all $t\geq s$, so that we may estimate present or future values of the observable $X$ in the Heisenberg picture based on the observations up to and including present time. We note that \begin{eqnarray*} \left( dY\right) ^{2}=dt, \end{eqnarray*} which follows from the quantum It\={o} table (\ref{eq:QIT}) and the unitarity condition (\ref{eq:S_unitary}).
Clearly the process $Y$ contains information about the scattering coefficients, however, it would possess the statistics of a standard Wiener process if we took the input fields to be in the vacuum state. It is for this reason we take the first input field to be in a coherent state corresponding to an intensity $\beta =\beta \left( t\right) $. The joint state is denoted as $\mathbb{E}_{\beta }$ and is the product state of the initial state of the interferometer (which may be a guess!) and the Gaussian state of the fields corresponding to input 1 in the coherent state with intensity $\beta $ and input 2 in the vacuum. Specifically, the Weyl operators have expectation \begin{multline*} \mathbb{E}_{\beta }\left[ e^{\sum_{k}\int f_{k}\left( t\right) dB_{k}^{\text{ in}}\left( t\right) ^{\ast }-H.c.}\right] =e^{-\frac{1}{2}\sum_{k}\int
|f_{k}\left( t\right) |^{2}dt} \\ \times e^{\int f_{1}\left( t\right) \beta \left( t\right) ^{\ast }dt-\int f_{1}\left( t\right) ^{\ast }\beta \left( t\right) dt}, \end{multline*} and so \begin{eqnarray} \mathbb{E}_{\beta }\left[ dY\left( t\right) \right] =\mathbb{E}_{\beta } \left[ j_t\left( S_{11}\right) \right] \beta \left( t\right) dt+\mathbb{E} _{\beta }\left[ j_t\left( S_{11}^{\ast }\right) \right] \beta \left( t\right) ^{\ast }dt \nonumber\\
\label{eq:E[dY]} \end{eqnarray} which is non-zero for $\beta \left( t\right) \neq 0$.
\subsection{Photon Counting Detection}
Alternatively we could count the number of photons at the first output channel. This time the measured process $Y$ is \begin{eqnarray*} Y\left( t\right) =U_t^{\ast }\left( I_{\mathfrak{h}}\otimes \Lambda _{11}^{ \text{out}}\left( t\right) \right) U_t \end{eqnarray*} and from the It\={o} calculus we obtain \begin{eqnarray} dY\left( t\right) &=&\sum_{j,k}j_t\left( S_{1j}^{\ast }S_{1k}\right) \otimes d\Lambda _{jk}^{\text{in}}\left( t\right) \nonumber \\ &=&j_t\left( \frac{1+\cos \theta }{2}\right) \otimes d\Lambda _{11}^{\text{ in}}\left( t\right) \nonumber \\ &&+j_t\left( \frac{\sin \theta }{2}\right) \otimes \left[ d\Lambda _{12}^{ \text{in}}\left( t\right) +d\Lambda _{21}^{\text{in}}\left( t\right) \right] \nonumber \\ &&+j_t\left( \frac{1-\cos \theta }{2}\right) \otimes d\Lambda _{22}^{\text{ in}}\left( t\right) . \label{eq:dY_photon_count} \end{eqnarray}
Here we note that \begin{eqnarray} \left( dY\right) ^{2}=j_t\left( \frac{1+\cos ^{2}\theta }{2}\right) \otimes d\Lambda _{11}^{\text{in}}\left( t\right) +\cdots \label{eq:dY^2_photon_count} \end{eqnarray} where the omitted terms are proportional to the increments $d\Lambda _{12}^{ \text{in}}\left( t\right) ,d\Lambda _{21}^{\text{in}}\left( t\right) $ and $ d\Lambda _{22}^{\text{in}}\left( t\right) $ which average to zero of the state $\mathbb{E}_{\beta }$.
\section{Quantum Filtering} \label{sec:filter} Our goal is to derive the optimal estimate $\pi _t\left( X\right) $ for an observable $j_t\left( X\right) $ for the state $\mathbb{E}_{\beta }$ given the observations of $Y$ up to time $t$. To this end we set $\mathfrak{Y} _{t]} $ to be the (von Neumann) algebra generated by the family $\left\{ Y\left( s\right) :0\geq s\leq t\right\} $. As $Y$ is self-non-demolition, we have that $\mathfrak{Y}_{t]}$ is a commutative algebra - so the recorded measurement can be treated as an essentially classical stochastic process as it should be. Every observable that commutes with $\mathfrak{Y}_{t]}$ will possess a well-defined joint (classical) statistical distribution with the measurements up to time $t$ and by the non-demolition property this includes $j_t\left( X\right) $. We therefore set \begin{eqnarray*}
\pi _t\left( X\right) =\mathbb{E}_{\beta }\left[ j_t\left( X\right) | \mathfrak{Y}_{t]}\right] \end{eqnarray*} which is the conditional expectation of $j_t\left( X\right) $ onto $ \mathfrak{Y}_{t]}$. The right hand side always exists since the algebra generated by $\mathfrak{Y}_{t]}$ and the additional element $j_t\left( X\right) $ is commutative and so we exploit the fact that in classical probability theory conditional expectations always exist. This classical expectation is then understood as being a function of the commuting set $ \left\{ Y\left( s\right) :0\geq s\leq t\right\} $. It should be remembered that $\pi _t\left( X\right) $ can be defined in this way for arbitrary observable $X$ of the interferometer system, and these $X$'s generally do not commute, so the construction is genuinely quantum in that regard. Note that while $\pi _t\left( X_{1}X_{2}\right) $ is generally different from $ \pi _t\left( X_{2}X_{1}\right) $, we will however have $\left[ \pi _t\left( X_{1}\right) ,\pi _t\left( X_{2}\right) \right] =0$ as we have conditioned onto the commutative algebra of operators $\mathfrak{Y}_{t]}$. Finally we mention that this estimate is optimal in the least squares sense. That is,for $X$ self-adjoint, we have \begin{eqnarray*} \mathbb{E}_{\beta }\bigg[\left( j_t\left( X\right) -\pi _t\left( X\right) \right) ^{2}\bigg]\leq \mathbb{E}_{\beta }\bigg[\left( j_t\left( X\right) -\hat{X}_t\right) ^{2}\bigg] \end{eqnarray*} for all $\hat{X}_t\in \mathfrak{Y}_{t]}$. In particular we have the ``orthogonality'' property
\begin{eqnarray*} \mathbb{E}_{\beta }\bigg[\left( j_t\left( X\right) -\pi _t\left( X\right) \right) C\left( t\right) \bigg]=0 \end{eqnarray*} for every $C\left( t\right) \in \mathfrak{Y}_{t]}$.
\subsection{The Filtering Equation} We will now state the main result. In both cases the filtering equation takes the form \begin{eqnarray} d\pi _t\left( X\right) &=&\frac{1}{2}\pi _t\left( e^{-i\theta
}Xe^{i\theta }-X\right) |\beta \left( t\right) |^{2}-i\pi _t(\left[ X,H \right] )dt \nonumber \\ &&+\mathcal{H}_t\left( X\right) \,dI\left( t\right) , \label{eq:interf_filter} \end{eqnarray} The terms $\mathcal{H}_t(X)$ and the process $I(t)$ are specific to the physical mode of detection and we give these explicitly for the scheme of homodyne (quadrature) measurement and the photon counting scheme below.
\subsubsection{Quadrature Measurement} In this case we measure the quadrature of output field 1. here we find that \begin{eqnarray} \mathcal{H}_t\left( X\right) &=&\frac{1}{2}\left[ \pi _t\left( Xe^{i\theta }\right) -\pi _t\left( X\right) \pi _t\left( e^{i\theta }\right) \right] \beta \left( t\right) \nonumber \\ &+& \frac{1}{2}\left[ \pi _t\left( e^{-i\theta }X\right) -\pi _t\left( e^{-i\theta }\right) \pi _t\left( X\right) \right] \beta \left( t\right) ^{\ast }. \nonumber \\ \label{eq:H_final_quad} \end{eqnarray} The innovations process $I$ is defined by \begin{eqnarray} dI\left( t\right) =dY\left( t\right) -\left[ \pi _t\left( S_{11}\right) \beta \left( t\right) +\pi _t\left( S_{11}^{\ast }\right) \beta \left( t\right) ^{\ast }\right] dt, \nonumber \\ \label{eq:Innovations} \end{eqnarray} and $I\left( 0\right) =0$. Statistically it has the distribution of a standard Wiener process.
We see that $dI\left( t\right) $ is the difference between the actual observed increment $dY\left( t\right) $ and the expected increment $\left[ \pi _t\left( S_{11}\right) \beta \left( t\right) +\pi _t\left( S_{11}^{\ast }\right) \beta \left( t\right) ^{\ast }\right] dt$ based on the filter. In stochastic estimation problems $I(t)$ is referred to as the innovations process.
\subsubsection{Photon Counting Measurement} If instead we count the photons coming out of output 1, we obtain \begin{eqnarray}
\mathcal{H}_t (X) &=& \frac{ 1 }{1+\pi _t\left( \cos ^{2}\theta \right) } \nonumber \\ && \times \Big\{ \frac{1}{2}\pi _t\left( e^{-i\theta }Xe^{i\theta }+e^{-i\theta }X+Xe^{i\theta }-X\right) \nonumber \\ && \quad \quad \qquad - \pi _t\left( X\right) \pi _t\left( \cos \theta \right) \Big\} . \label{eq:H_photon_count} \end{eqnarray} The innovations process is this time given by \begin{eqnarray} dI\left( t\right) =dY\left( t\right) -\pi _t\left( \frac{1+\cos \theta }{2}
\right) |\beta \left( t\right) |^{2}dt, \label{eq:Innovations_photon_count} \end{eqnarray} and $I\left( 0\right) =0$. This time the innovations have the statistical distribution of a compensated Poisson process. Once again, $dI(t)$ is the difference between the observed measurement increment $dY(t)$ and the expected increment $\mathbb{E}_\beta [ dY(t) ]$.
\subsubsection{Equivalent Stochastic Master Equation}
We may alternatively use the dual form for the filter where we express everything in terms of the conditional state $\varrho _t$ of the interferometer system based on the measurements so that \begin{eqnarray*} \pi _t\left( X\right) =\text{tr}\left\{ \varrho _tX\right\} . \end{eqnarray*} In the quadrature case, the filter equation thereby translates into the equivalent stochastic master equation (SME) for $\varrho _t$ \begin{eqnarray} d\varrho _t &=&\frac{1}{2}\left( e^{i\theta }\varrho _te^{-i\theta
}-\varrho _t\right) |\beta \left( t\right) |^{2}dt+i\left[ \varrho _t,H \right] dt \nonumber \\ &&+\frac{1}{2}\left( e^{i\theta }\varrho _t-\zeta _t\varrho _t\right) \beta \left( t\right) dI\left( t\right) \nonumber \\ &&+\frac{1}{2}\left( \varrho _te^{-i\theta }-\zeta _t^{\ast }\varrho _t\right) \beta \left( t\right) ^{\ast }dI\left( t\right) \label{eq:SME} \end{eqnarray} where \begin{eqnarray} \zeta _t=\text{tr}\left\{ \varrho _te^{i\theta }\right\} . \label{eq:zeta} \end{eqnarray}
The presence of the term (\ref{eq:zeta}) in the SME (\ref{eq:SME}) means that the equation is nonlinear. This filter is diffusive since the innovations are a standard Wiener process. An SME formulation may likewise be given for the photon counting: this again will be nonlinear, but this time will be driven by a jump process corresponding to the observation of a photon arrival at the detector.
If we choose to ignore the measurement record - a nonselective measurement - then we obtain the following master equation for $\bar{\rho}_t=\mathbb{E}_{\beta }\left[ \varrho _t\right] $ : \begin{eqnarray} \frac{d\bar{\rho}_t}{dt}=\frac{1}{2}\left( e^{i\theta }\bar{\rho}
_te^{-i\theta }-\bar{\rho}_t\right) |\beta \left( t\right) |^{2}+i\left[ \bar{\rho}_t,H\right] . \label{eq:ME} \end{eqnarray} In the language of quantum trajectories, the SME (\ref{eq:SME}) is an ``unravelling'' of the master equation (\ref{eq:ME}). The same master equation is unravelled by the photon counting SME.
\section{Derivation of the filters} \label{sec:derive_filters} In this section we derive the form of the filters given in Section \ref{sec:filter}. We will use a technique known as the \textit{characteristic function approach}. This is a direct method for calculating the filtered estimate $\pi _t\left( X\right) $ is based on introducing a process $C\left( t\right) $ satisfying the QSDE \begin{eqnarray} dC\left( t\right) =f\left( t\right) C\left( t\right) dY\left( t\right) , \end{eqnarray} with initial condition $C\left( 0\right) =I$. Here we assume that $f$ is integrable, but otherwise arbitrary. The technique is to make an ansatz of the form \begin{eqnarray} d\pi _t\left( X\right) =\mathcal{F}_t\left( X\right) dt+\mathcal{H} _t\left( X\right) dY\left( t\right) \label{filter ansatz} \end{eqnarray} where we assume that the processes $\mathcal{F}_t\left( X\right) $ and $ \mathcal{H}_t\left( X\right) $ are adapted and lie in $\mathfrak{Y}_{t]}$. These coefficients may be deduced from the identity \begin{eqnarray*} \mathbb{E}\left[ \left( \pi _t\left( X\right) -j_t\left( X\right) \right) C\left( t\right) \right] =0 \end{eqnarray*} which is valid since $C\left( t\right) \in \mathfrak{Y}_{t]}$. We note that the It\={o} product rule implies $I+II+III=0$ where
\begin{eqnarray*} I &=&\mathbb{E}\left[ \left( d\pi _t\left( X\right) -dj_t\left( X\right) \right) C\left( t\right) \right] , \\ II &=&\mathbb{E}\left[ \left( \pi _t\left( X\right) -j_t\left( X\right) \right) dC\left( t\right) \right] , \\ III &=&\mathbb{E}\left[ \left( d\pi _t\left( X\right) -dj_t\left( X\right) \right) dC\left( t\right) \right] . \end{eqnarray*}
\subsection{The Quadrature Filter}
We now compute the filter when $Y$ is the measured quadrature of the first output channel.
\subsubsection{Term I}
Here we have (omitting $t$-dependence for ease of notation)
\begin{multline*} I=\mathbb{E}_{\beta }\left[ \mathcal{F}\left( X\right) C+\mathcal{H}\left( X\right) (j(S_{11})\beta +j(S_{11}^{\ast })\beta ^{\ast })C\right] dt \\ -\frac{1}{2}\mathbb{E}_{\beta }\left[ j\left( e^{-i\theta }Xe^{i\theta
}-X\right) C\right] |\beta |^{2}dt+i\mathbb{E}_{\beta }\left[ \left[ X,H \right] C\right] dt \end{multline*} where we use the fact that \begin{eqnarray*}
\mathbb{E}_{\beta }\left[ d\Lambda _{11}\left( t\right) \right] =|\beta
\left( t\right) |^{2}dt \end{eqnarray*} while $\mathbb{E}_{\beta }\left[ d\Lambda _{jk}\left( t\right) \right] =0$ otherwise.
\subsubsection{Term II}
From (\ref{eq:E[dY]}) we obtain
\begin{eqnarray*} II=f\,\mathbb{E}_{\beta }\left[ \left( \pi \left( X\right) -j(X)\right) C\left( t\right) j(S_{11})\beta +j(S_{11}^{\ast })\beta ^{\ast }\right] dt. \end{eqnarray*}
\subsubsection{Term III}
We have
\begin{multline*} III=f\,\mathbb{E}_{\beta }\left[ \mathcal{H}\left( X\right) C\right] dt \\ -f\,\frac{1}{2}\mathbb{E}_{\beta }\left[ j\left( e^{-i\theta }Xe^{i\theta }-X\right) \left( j\left( S_{11}^{\ast }\right) -ij\left( S_{12}^{\ast }\right) \right) \beta ^{\ast }C\right] dt \end{multline*} where we use the fact that $\left( dY\right) ^{2}=dt$ and from (\ref {eq:Heisenberg}) and (\ref{eq:dY_quadrature}) \begin{eqnarray*} dj\left( X\right) dY &=&\frac{1}{2}j_t\left( e^{-i\theta }Xe^{i\theta }-X\right) \\ &&\times \left( j\left( S_{11}^{\ast }\right) -ij\left( S_{12}^{\ast }\right) \right) dB_{1}^{\ast }+\cdots \end{eqnarray*} where the omitted terms average to zero. Note that we used the identities $ d\Lambda _{11}dB_{1}^{\ast }=d\Lambda _{12}dB_{2}^{\ast }=dB_{1}^{\ast }$.
\subsubsection{Computing the Filter} Now from the identity $I+II+III=0$ we may extract separately the coefficients of $f\left( t\right) C\left( t\right) $ and $C\left( t\right) $ as $f\left( t\right) $ was arbitrary to deduce \begin{multline*} \pi \left( \left( \pi \left( X\right) -j(X)\right) \left( j(S_{11})\beta +j(S_{11}^{\ast })\beta ^{\ast }\right) \right) + \pi (\mathcal{H}\left( X\right)) \\ -\pi \left( \frac{1}{2}j\left( e^{-i\theta }Xe^{i\theta }-X\right) \left( j\left( S_{11}^{\ast }\right) -ij\left( S_{12}^{\ast }\right) \right) \beta ^{\ast }\right) =0, \end{multline*} and \begin{multline*} 0=\pi \left( \mathcal{F}\left( X\right) +\mathcal{H}\left( X\right) (j(S_{11})\beta +j(S_{11}^{\ast })\beta ^{\ast }\right) \\
-\frac{1}{2}\pi \left( e^{-i\theta }Xe^{i\theta }-X\right) |\beta |^{2}+i\pi (\left[ X,H\right] ). \end{multline*} Using the projective property of the conditional expectation $\left( \pi _t\circ \pi _t=\pi _t\right) $ and the assumption that $\mathcal{F} _t\left( X\right) $ and $\mathcal{H}_t\left( X\right) $ already lie in $ \mathfrak{Y}_{t]}$, we find after a little algebra that \begin{eqnarray} \mathcal{H}_t &=&\left[ \pi _t\left( XS_{11}\right) -\pi _t\left( X\right) \pi _t\left( S_{11}\right) \right] \beta \left( t\right) \nonumber \\ &&+[\frac{1}{2}\pi _t\left( \left( e^{-i\theta }Xe^{i\theta }+X\right) S_{11}^{\ast }\right) -\pi _t\left( X\right) \pi _t\left( S_{11}^{\ast }\right) \nonumber \\ &&-\frac{i}{2}\pi _t\left( \left( e^{-i\theta }Xe^{i\theta }-X\right) S_{12}^{\ast }\right) ]\beta \left( t\right) ^{\ast }, \label{eq:H_filter} \\ \mathcal{F}_t &=&\frac{1}{2}\pi _t\left( e^{-i\theta }Xe^{i\theta
}-X\right) |\beta |^{2}-i\pi _t(\left[ X,H\right] ) \nonumber \\ &&-\mathcal{H}_t\left( X\right) \left[ \pi _t\left( S_{11}\right) \beta \left( t\right) +\pi _t\left( S_{11}^{\ast }\right) \beta \left( t\right) ^{\ast }\right] . \label{eq:F_filter} \end{eqnarray}
Inserting the expressions $S_{11}\equiv \frac{1}{2}\left( 1+e^{i\theta }\right) $ and $S_{12}=\frac{i}{2}\left( 1-e^{i\theta }\right) $ into (\ref {eq:H_filter}) leads to the more symmetric form (\ref{eq:H_final_quad}).
Substituting the identity (\ref{eq:F_filter}) into the equation (\ref{filter ansatz}), $d\pi _t\left( X\right) =\mathcal{F}_t\left( X\right) dt +\mathcal{H}_t\left( X\right) dY\left( t\right)$, we arrive explicitly at (\ref{eq:interf_filter}) where $\mathcal{H}_t\left( X\right) $ is given by (\ref{eq:H_filter}) and the process $I(t)$ is defined as in (\ref{eq:Innovations}).
Comparing with (\ref{eq:E[dY]}), we see that the process $I$ is mean-zero for the state $\mathbb{E}_{\beta }$ and satisfies the property $\left( dI\right) ^{2}=dt$. By L\'{e}vy's characterization theorem, it is a Wiener process: see for instance \cite{Rogers_Williams} Theorem 33.1.
\subsection{The Photon Count Filter}
We now compute the filter when $Y$ is the measured photon count of the first output channel. Again we omit $t$-dependence for ease of notation.
\subsubsection{Term I}
This time we have using (\ref{eq:dY_photon_count}) and (\ref{eq:E[dY]})
\begin{multline*} I=\mathbb{E}_{\beta }\left[ \mathcal{F}\left( X\right) C+\mathcal{H}\left(
X\right) j(\frac{1+\cos \theta }{2})|\beta |^{2}C\right] dt \\ -\frac{1}{2}\mathbb{E}_{\beta }\left[ j\left( e^{-i\theta }Xe^{i\theta
}-X\right) C\right] |\beta |^{2}dt+i\mathbb{E}_{\beta }\left[ \left[ X,H \right] C\right] dt. \end{multline*} .
\subsubsection{Term II}
From (\ref{eq:dY_photon_count}) and (\ref{eq:E[dY]}) we obtain
\begin{eqnarray*} II=f\,\mathbb{E}_{\beta }\left[ \left( \pi \left( X\right) -j(X)\right)
C\left( t\right) j(\frac{1+\cos \theta }{2})\right] |\beta |^{2}dt. \end{eqnarray*}
\subsubsection{Term III}
We have
\begin{multline*} III=f\,\mathbb{E}_{\beta }\left[ \mathcal{H}\left( X\right) j\left( \frac{
1+\cos \theta }{2}\right) C\right] \left| \beta \right| ^{2}dt \\ -f\,\frac{1}{4}\mathbb{E}_{\beta }\left[ j\left( e^{-i\theta }Xe^{i\theta
}+e^{-i\theta }X-Xe^{i\theta }-X\right) C\right] \left| \beta \right| ^{2}dt \end{multline*} where we now use (\ref{eq:dY^2_photon_count}), and the fact that from (\ref {eq:Heisenberg}) and (\ref{eq:dY_photon_count}) \begin{eqnarray*} dj\left( X\right) dY &=&\frac{1}{2}j_t\left( e^{-i\theta }Xe^{i\theta }-X\right) \\ &&\times \left( j\left( S_{11}^{\ast }\right) -ij\left( S_{12}^{\ast }\right) \right) dB_{1}^{\ast }+\cdots \end{eqnarray*} where the omitted terms average to zero.
\subsubsection{Computing the Filter}
Collecting the coefficients of $f\left( t\right) C\left( t\right) $ and $ C\left( t\right) $ as $f\left( t\right) $ from the identity $I+II+III=0$, we now obtain the expression $\mathcal{H}_t\left( X\right)$ (\ref{eq:H_photon_count}) and \begin{eqnarray} \mathcal{F}_t (X) &=&\frac{1}{2}\pi _t\left( e^{-i\theta }Xe^{i\theta
}-X\right) |\beta |^{2}-i\pi _t(\left[ X,H\right] ) \nonumber \\ &&-\mathcal{H}_t\left( X\right) \pi _t\left( \frac{1+\cos \theta }{2}
\right) |\beta \left( t\right) |^{2}. \label{eq:F_filter_photon} \end{eqnarray}
Substituting this into the equation (\ref{filter ansatz}) gives the stated result.
\section{Collapse of the Wavefunction} \label{sec:Applications} We shall follow \cite{Harrell} and set \begin{eqnarray*} \theta =2kq+\pi \left( 2n+\frac{1}{2}\right) \end{eqnarray*} and for $k$ small we make the linearization \begin{eqnarray*} e^{i\theta }\approx i-2kq. \end{eqnarray*} Under this approximation the stochastic master equation becomes linear and we have \begin{eqnarray*} \mathcal{H}_t\left( X\right) &=&-k\left[ \pi _t\left( Xq\right) -\pi _t\left( X\right) \pi _t\left( q\right) \right] \beta \left( t\right) \\ &&-k\left[ \pi _t\left( qX\right) -\pi _t\left( X\right) \pi _t\left( q\right) \right] \beta \left( t\right) ^{\ast }. \end{eqnarray*} If we assume that interferometer is internally static (that is, we take the Hamiltonian $H=0$) then for functions of the observable $q$ we get \begin{eqnarray*} d\pi _t\left( f\left( q\right) \right) &=& -k\left[ \pi _t\left( f\left( q\right) q\right) -\pi _t\left( f\left( q\right) \right) \pi _t\left( q\right) \right] \nonumber \\ &&\times \left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) dI\left( t\right) . \end{eqnarray*} So we find $d\pi _t\left( q\right) =-k\mathscr{V}_t\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) dI\left( t\right)$, where \begin{eqnarray} \mathscr{V}_t\triangleq \pi _t\left( q^{2}\right) -\pi _t\left( q\right) ^{2}. \label{eq:var_q} \end{eqnarray} We note that $\mathscr{V}_t$ is the conditional variance of the observable $q$.
The filter equation for the observable $q$ is of Kalman-Bucy form. In such cases, if the initial state implies a Gaussian distribution for $q$, then classically one expects the Gaussianity to be maintained and that the variance $\mathscr{V}_t$ is deterministic. One will then have the property that all moments may be expressed in terms of first and second moments, and in particular third order moments of jointly Gaussian observables $X,Y,Z$ may be rewritten as \begin{multline} \pi _{t}(XYZ)\equiv \nonumber \\ \pi _{t}(X)\pi _{t}(YZ)+\pi _{t}(Y)\pi _{t}(XZ)+\pi _{t}(Z)\pi _{t}(XY) \nonumber \\ -2\pi _{t}(X)\pi _{t}(Y)\pi _{t}(Z). \end{multline} \begin{eqnarray} \label{eq:3rd moments} \end{eqnarray} We will now show that this applies in the present situation.
We see that \begin{eqnarray*} d\pi _t\left( q^{2}\right) &=&-k\left[ \pi _t\left( q^{3}\right) -\pi _t\left( q^{2}\right) \pi _t\left( q\right) \right] \\ &&\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) dI\left( t\right) \end{eqnarray*} however if the conditional distribution is Gaussian then we may use (\ref {eq:3rd moments}) to write the third moment $\pi _t\left( q^{3}\right) $ as \begin{eqnarray*} \pi _t\left( q^{3}\right) \equiv 3\pi _t\left( q\right) \pi _t\left( q^{2}\right) -2\pi _t\left( q\right) ^{3} \end{eqnarray*} so that $d\pi _t\left( q^{2}\right) \equiv -2k\mathscr{V}_t\pi _t\left( q\right) \left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) dI\left( t\right) $. Applying the It\={o} calculus, recall $\left( dY\right) ^{2}=dt$ , we have \begin{eqnarray*} d\mathscr{V}_t &=&d\pi _t\left( q^{2}\right) +2\pi _t\left( q\right) d\pi _t\left( q\right) +\left( d\pi _t\left( q\right) \right) ^{2} \\ &\equiv &\left( d\pi _t\left( q\right) \right) ^{2} \\ &=&-k^{2}\mathscr{V}_t^{2}\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) ^{2}dt. \end{eqnarray*} The first two terms $d\pi _t\left( q^{2}\right) +2\pi _t\left( q\right) d\pi _t\left( q\right) $ cancel exactly, leaving an ODE for $\mathscr{V}_t$.
We therefore obtain the following equation for the estimated position observable: \begin{eqnarray} d\pi _t\left( q\right) =-k\mathscr{V}_t\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) \,dI\left( t\right) , \label{eq:filter_q} \end{eqnarray} where the conditional variance $\mathscr{V}_t$ satisfies the deterministic ODE \begin{eqnarray} \frac{d\mathscr{V}_t}{dt}=-k^{2}\mathscr{V}_t^{2}\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) ^{2}. \label{eq:filter_var_q} \end{eqnarray} Note that $\mathscr{V}_t$ is decreasing so long as Re$\beta \left( t\right) \neq 0$, and constant in any interval where Re$\beta \left( t\right) $ vanishes.
We may further specify that the initial state is one where both canonical coordinates $q$ and $p$ are jointly Gaussian. We may determine the filtered estimate $\pi _t\left( p\right) $: first note that $e^{-i\theta }pe^{i\theta }-p=2\hbar k$ and that \begin{eqnarray*} \mathcal{H}_t\left( p\right) =-k\left( \mathscr{C}_t-\frac{i}{2}\hbar \right) \beta \left( t\right) -k\left( \mathscr{C}_t+\frac{i}{2}\hbar \right) \beta \left( t\right) ^{\ast } \end{eqnarray*} where we introduce the symmetrized conditional covariance of $q$ and $p$ as \begin{eqnarray} \mathscr{C}_t \triangleq \frac{1}{2}\pi _t\left( qp+pq\right) -\pi _t\left( q\right) \pi _t\left( p\right) . \label{eq:C_bar} \end{eqnarray} Therefore \begin{eqnarray}
d\pi _t\left( p\right)& =&\hbar k|\beta \left( t\right) |^{2}dt \nonumber \\ &-&2k\mathscr{C}_t \text{Re}\beta \left( t\right) \,dI(t)-k\hbar \text{Im}\beta \left( t\right) \,dI\left( t\right) . \label{eq:pi_p} \end{eqnarray}
Unlike the case of $\pi _t\left( q\right) $, we find a drift term associated with $\pi _t\left( p\right) $ given by $\hbar k|\beta \left(
t\right) |^{2}dt$ which is interpreted as the momentum imparted by the coherent source over the time interval $t$ to $t+dt$. To compute $\pi _t\left( qp\right) $ we start with the filter equation for $X=qp$ which reads as \begin{eqnarray*}
d\pi _t\left( qp\right) =\hbar k\pi _t\left( q\right) |\beta (t)|^{2}dt+ \mathcal{H}_t\left( qp\right) dI\left( t\right) \end{eqnarray*} with \begin{eqnarray*} \mathcal{H}_t\left( qp\right) &=&-k\left[ \pi _t\left( qpq\right) -\pi _t\left( qp\right) \pi _t\left( q\right) \right] \beta \left( t\right) \\ &&-k\left[ \pi _t\left( q^{2}p\right) -\pi _t\left( qp\right) \pi _t\left( q\right) \right] \beta \left( t\right) ^{\ast } \end{eqnarray*} and once again we may use (\ref{eq:3rd moments}) to break down the third order moments. In fact, we obtain \begin{eqnarray*} \mathcal{H}_t\left( qp\right) &=&-k\left[ \pi _t\left( q\right) \left( \mathscr{C}_t-\frac{i}{2}\hbar \right) -\pi _t\left( p\right) \mathscr{V}_t\right] \beta \left( t\right) \\ &&-k\left[ \pi _t\left( q\right) \left( \mathscr{C}_t+\frac{i}{2}\hbar \right) -\pi _t\left( p\right) \mathscr{V}_t\right] \beta \left( t\right) ^{\ast }. \end{eqnarray*} From this we see that \begin{eqnarray*} &&d\left[ \pi _t\left( qp\right) -\pi _t\left( q\right) \pi _t\left( p\right) \right] \\ &=&d\pi _t\left( qp\right) -d\pi _t\left( q\right) \pi _t\left( p\right) \\ &&-\pi _t\left( q\right) d\pi _t\left( p\right) -d\pi _t\left( q\right) d\pi _t\left( p\right) \\ &\equiv &-k^{2}\mathscr{V}_t\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) \\ &\times& \left( \left[ \pi _t\left( qp\right) -\pi _t\left( q\right) \pi _t\left( p\right) \right] \left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) -i\hbar \beta \left( t\right) \right) dt. \end{eqnarray*} Once again, the $dI\left( t\right) $ terms cancel and we are left with a deterministic ODE. Symmetrizing yields the deterministic equation \begin{eqnarray} \frac{d\mathscr{C}_t}{dt}=-k\mathscr{V}_t\mathscr{C}_t\left( \beta \left( t\right) +\beta \left( t\right) ^{\ast }\right) ^{2}+\hbar \text{Im}\beta \left( t\right) . \label{eq:filter_C} \end{eqnarray}
A similar computation works for the conditional uncertainty in the momentum \begin{eqnarray} \mathscr{W}_t \triangleq \pi_t (p^2 ) - \pi_t (p) ^2, \end{eqnarray} and we obtain the ODE \begin{eqnarray} \frac{ d \mathscr{W}_t }{dt} &=& (2 \hbar k)^2 ( \mathrm{Re} \beta (t) )^2 \nonumber\\ &&-(4k \mathscr{C}_t )^2 -16 \hbar k^2 \mathscr{C}_t \mathrm{Im} \beta (t) . \end{eqnarray}
Note that the covariances come from a quantum Gaussian state, and so we must have the inequality \begin{equation*} \left[ \begin{array}{cc} \mathscr{V}_{t} & \mathscr{C}_{t}+\frac{i\hbar }{2} \\ \mathscr{C}_{t}-\frac{i\hbar }{2} & \mathscr{W}_{t} \end{array} \right] \geq 0 \end{equation*} to be consistent with the Heisenberg uncertainty relations, see for instance Section 3.3.3 in \cite{Eisert}.
\section{Conclusions} We have derived the form of the filter (\ref{eq:interf_filter}) for the problem of estimating the quantum state of the a phase observable in an interferometer based on detection of the output fields. As the photon fields do not interact directly with the interferometer other than by scattering in the arms and being split and recombined by the beam-splitters, we needed to place one of the inputs at least in a non-trivial coherent state. This however lead to a practical estimation problem.
For the homodyne situation, we were able to work out the quantum Kalman-Bucy filter. Here the conditional variance $\mathscr{V}_t$ evolves deterministically (\ref{eq:filter_var_q}). If we make the modelling assumption that $\beta \left( t\right) =\beta $ (constant) over the time interval of interest, then we obtain the explicit solution for $\mathscr{V}_t$ as \begin{eqnarray*} \mathscr{V}_t=\frac{1}{V_{0}^{-1}+k^{2}\left( \beta +\beta ^{\ast }\right) ^{2}t} \end{eqnarray*} where $V_{0}$ is the variance of $q$ in the initial state $\rho _{0}$ assigned to the interferometer, i.e., $V_{0}=\mathrm{tr}\left\{ \rho _{0}q^{2}\right\} -\left( \text{tr}\left\{ \rho _{0}q\right\} \right) ^{2}$.
The principal qualitative observation from this is, of course, that clearly $ \lim_{t\rightarrow \infty }\mathscr{V}_t=0$. In other words, the conditional variance is converging to zero as we acquire more information through the quadrature measurement. What should happen in the long time asymptotic limit is that, for any interval $A$, the probability of the observed position $q$ settling down to a value in $A$ will be given by tr$\left\{ \rho _{0}P_{A}\right\} $ where $P_{A}$ is the projection operator \begin{eqnarray*} \left( P_{A}\xi \right) \left( x\right) =\left\{ \begin{array}{cc} \xi \left( x\right) , & x\in A; \\ 0, & x\notin A. \end{array} \right. \end{eqnarray*} If the initial state $\rho _{0}$ was pure, corresponding to a wavefunction $
\psi _{0}$, then the limit probability should be $\int_{A}|\psi _{0}\left(
x\right) |^{2}dx$. As far as we are aware, a rigorous proof of this assertion is lacking, however it is well indicated for finite-dimensional systems with discrete eigenvalues, see for instance \cite{SvHM04} and \cite{vHSM05}.
\end{document}
|
arXiv
|
{
"id": "1601.04374.tex",
"language_detection_score": 0.594693124294281,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\setlength{\baselineskip}{5mm}
\begin{abstract} This paper is motivated by the study of Lyapunov functionals for four equations describing free surface flows in fluid dynamics: the Hele-Shaw and Mullins-Sekerka equations together with their lubrication approximations, the Boussinesq and thin-film equations. We identify new Lyapunov functionals, including some which decay in a convex manner (these are called strong Lyapunov functionals). For the Hele-Shaw equation and the Mullins-Sekerka equation,
we prove that the $L^2$-norm of the free surface elevation and the area of the free surface are Lyapunov functionals, together with parallel results for the thin-film and Boussinesq equations. The proofs combine exact identities for the dissipation rates with functional inequalities. For the thin-film and Boussinesq equations, we introduce a Sobolev inequality of independent interest which revisits some known results and exhibits strong Lyapunov functionals. For the Hele-Shaw and Mullins-Sekerka equations, we introduce a functional which controls the $L^2$-norm of three-half spatial derivative. Under a mild smallness assumption on the initial data, we show that the latter quantity is also a Lyapunov functional for the Hele-Shaw equation, implying that the area functional is a strong Lyapunov functional. Precise lower bounds for the dissipation rates are established, showing that these Lyapunov functionals are in fact entropies. Other quantities are also studied such as Lebesgue norms or the Boltzmann's entropy. \end{abstract}
\title{Functional inequalities and strong Lyapunov functionals for free surface flows in fluid dynamics}
\section{Introduction} \subsection*{The equations} Consider a time-dependent surface $\Sigma$ given as the graph of some function $h$, so that at time $t\ge 0$, $$ \Sigma(t)=\{ (x,y) \in \mathbf{T}^{d}\times \mathbf{R}\,;\, y = h(t,x)\}, $$ where $\mathbf{T}^{d}$ denotes a $d$-dimensional torus. We are interested by several free boundary problems described by nonlinear parabolic equations. A free boundary problem is described by an evolution equation which expresses the velocity of $\Sigma$ at each point in terms of some nonlinear expressions depending on $h$. The most popular example is the \textbf{mean-curvature} equation, which stipulates that the normal component of the velocity of $\Sigma$ is equal to the mean curvature at each point. It follows that: \begin{equation}\label{defi:kappa}
\partial_t h+\sqrt{1+|\nabla h|^2}\kappa=0\quad\text{where}\quad
\kappa=-\cnx \left(\frac{\nabla h}{\sqrt{1+|\nabla h|^2}}\right). \end{equation} The previous equation plays a fundamental role in differential geometry. Many other free boundary problems appear in fluid dynamics. Among these, we are chiefly concerned by the equations modeling the dynamics of a free surface transported by the flow of an incompressible fluid evolving according to Darcy's law. We begin with the Hele-Shaw equations with or without surface tension. One formulation of this problem reads (see Appendix~\ref{appendix:HS}): \begin{equation}\label{HS} \partial_{t}h+G(h)(gh+\mu \kappa)=0, \end{equation} where $\kappa$ is as in~\eqref{defi:kappa}, $g$ and $\mu$ are real numbers in $[0,1]$ and $G(h)$ is the (normalized) Dirichlet-to-Neumann operator, defined as follows: For any functions $h=h(x)$ and $\psi=\psi(x)$, $$
G(h)\psi (x)=\sqrt{1+|\nabla h|^2}\partial_n\mathcal{H}(\psi) \big\arrowvert_{y=h(x)}, $$ where $\nabla=\nabla_x$, $\partial_n=n\cdot\nabla$ and $n$ is the outward unit normal to $\Sigma$ given by $$
n=\frac{1}{\sqrt{1+|\nabla h|^2}}\begin{pmatrix} -\nabla h\\ 1\end{pmatrix}, $$ and $\mathcal{H}(\psi)$ is the harmonic extension of~$\psi$ in the fluid domain, solution to \begin{equation}\label{defi:varphiintro} \left\{ \begin{aligned} &\Delta_{x,y}\mathcal{H}(\psi)=0\quad \text{in }\Omega\mathrel{:=}\{(x,y)\in \mathbf{T}^{d}\times \mathbf{R}\,:\, y<h(x)\},\\ &\mathcal{H}(\psi)\arrowvert_{y=h}=\psi. \end{aligned} \right. \end{equation} Hereafter, given a function $f=f(x,y)$, we use $f\arrowvert_{y=h}$ as a short notation for the function $x\mapsto f(x,h(x))$.
When $g=1$ and $\mu=0$, the equation~\eqref{HS} is called the Hele-Shaw equation without surface tension. Hereafter, we will refer to this equation simply as the \textbf{Hele-Shaw} equation. If $g=0$ and $\mu=1$, the equation is known as the Hele-Shaw equation with surface tension, also known as the \textbf{Mullins-Sekerka} equation. Let us record the terminology: \begin{alignat}{2} &\partial_{t}h+G(h)h=0\qquad &&(\text{Hele-Shaw}),\label{HSi}\\ &\partial_{t}h+ G(h)\kappa=0\qquad &&(\text{Mullins-Sekerka})\label{MSi}. \end{alignat} We are also interested by two equations which describe asymptotic regime in the \textbf{thin-film} approximation. They are \begin{align} &\partial_t h-\cnx(h\nabla h)=0 \qquad&&(\text{Boussinesq}),\label{Bou}\\ &\partial_t h+\cnx(h\nabla \Delta h)=0\qquad &&(\text{thin-film}).\label{ThFi} \end{align} Equation \eqref{Bou} was derived from~\eqref{HSi} by Boussinesq~\cite{Boussinesq-1904} to study groundwater infiltration. Equation \eqref{ThFi} was derived from~\eqref{MSi} by Constantin, Dupont, Goldstein, Kadanoff, Shelley and Zhou in~\cite{Constantin1993droplet} as a lubrication approximation model of the interface between two immiscible fluids in a Hele-Shaw cell.
\subsection{Lyapunov functionals and entropies} Our main goal is to find some monotonicity properties for the previous free boundary flows, in a unified way. Before going any further, let us fix the terminology used in this paper. \begin{definition}\label{Defi:1.1} (a) Consider one of the evolution equation stated above and a function $$ I: C^\infty(\mathbf{T}^{d})\to [0,+\infty). $$ We say that $I$ is a \textbf{Lyapunov functional} if the following property holds: for any smooth solution $h$ in $C^\infty([0,T]\times \mathbf{T}^{d})$ for some $T>0$, we have $$ \forall t\in [0,T],\qquad \frac{\diff}{\dt} I(h(t))\leq 0. $$ The quantity $-\frac{\diff}{\dt} I(h)$ is called the \textbf{dissipation rate} of the functional $I(h)$.
(b) We say that a Lyapunov functional $I$ is an \textbf{entropy} if the dissipation rate satisfies, for some $C>0$, $$ -\frac{\diff}{\dt} I(h(t))\ge C I(h(t)). $$
(c) Eventually, we say that $I$ is a \textbf{strong Lyapunov functional} if $$ \frac{\diff}{\dt} I(h(t))\leq 0\quad\text{and}\quad\frac{\diff^2}{\dt^2} I(h(t))\ge 0. $$ This means that $t\mapsto I(h(t))$ decays in a convex manner. \end{definition} \begin{remark} $(i)$ The Cauchy problems for the previous free boundary equations have been studied by different techniques, for weak solutions, viscosity solutions or also classical solutions. We refer the reader to \cite{A-Lazar,AMS,ChangLaraGuillenSchwab,Chen-ARMA-1993,ChengCoutandShkoller-2014,Cheng-Belinchon-Shkoller-AdvMath,ChoiJerisonKim,CCG-Annals,Escher-Simonett-ADE-1997,FlynnNguyen2020,GG-JPS-AdvMaths-2019,Gunther-Prokert-SIAM-2006, Hadzic-Shkoller-CPAM2015,Kim-ARMA2003,Knupfer-Masmoudi-ARMA-2015,NPausader,Pruss-Simonett-book}. Thanks to the parabolic smoothing effect, classical solutions are smooth for positive times (the elevation $h$ belongs to $C^\infty((0,T]\times \mathbf{T}^{d})$). This is why we consider functionals $I$ defined only on smooth functions $C^\infty(\mathbf{T}^{d})$.
$(ii)$ Assume that $I$ is an entropy for an evolution equation and consider a global in time solution of the latter problem. Then the function $t\mapsto I(h(t))$ decays exponentially fast. In the literature, there are more general definition of entropies for various evolution equations. The common idea is that entropy dissipation methods allow to study the large time behavior or to prove functional inequalities (see~\cite{Bertozzi-NoticesAMS-1998,Carillo-Jungel-Markovich-Toscani-Unterreiter,Arnold-et-al-2004,Evans-2004,Villani-Oldandnew,Bolley-Gentil-JMPA-2010,Dolbeault-Toscani-AIHPNL-2013,Bodineau-Lebowitz-Mouhot-Villani,Zugmeyer-arxiv2020,Jungel-book-entropy}).
$(iii)$ To say that $I(h)$ is a strong Lyapunov functional is equivalent to say that the dissipation rate $-\frac{\diff}{\dt} I(h)$ is also a Lyapunov functional. This notion was introduced in~\cite{Aconvexity} as a tool to find Lyapunov functionals which control higher order Sobolev norms. Indeed, in general, the dissipation rate is expected to be a higher order energy because of the smoothing effect of a parabolic equation. Notice that the idea to compute the second-order derivative in time is related to the celebrated work of Bakry and Emery~\cite{BakryEmmery-1985}. \end{remark}
\subsection{Examples}
Since we consider different equations, for the reader's convenience, we begin by discussing some examples which are well-known in certain communities.
\begin{example}\label{Example:heateq} Consider the heat equation $\partial_t h-\Delta h=0$. The energy identity $$ \frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x +\int_{\mathbf{T}^{d}}\left\vert \nabla h\right\vert^2\diff \! x=0,
$$ implies that the square of the $L^2$-norm is a Lyapunov functional. It is in addition a strong Lyapunov functional since, by differentiating the equation, the quantity $\int_{\mathbf{T}^{d}}\left\vert \nabla h\right\vert^2\diff \! x$ is also a Lyapunov functional. Furthermore, if one assumes that the mean value of $h(0,\cdot)$ vanishes, then the Poincar\'e's inequality implies that the square of the $L^2$-norm is an entropy. Now let us discuss another important property, which holds for positive solutions. Assume that $h(t,x)\ge 1$ and introduce the Boltzmann's entropy, defined by $$ H(h)=\int_{\mathbf{T}^{d}}h \log h \diff \! x. $$ Then $H(h)$ is a strong Lyapunov functional. This classical result (see Evans~\cite{Evans-BAMS-2004}) follows directly from the pointwise identities \begin{align*} &(\partial_t-\Delta)(h\log h)=-\frac{\left\vert \nabla h\right\vert^2}{h},\\ &(\partial_t -\Delta )\frac{\left\vert \nabla h\right\vert^2}{h}=-2h\left\vert \frac{\nabla^2 h}{h} -\frac{\nabla h \otimes \nabla h}{h^2}\right\vert^2. \end{align*} We will prove that the Boltzmann's entropy is also a strong Lyapunov functional for the Boussinesq equation~\eqref{Bou}, by using a functional inequality which controls the $L^2$-norm of $\left\vert \nabla h\right\vert^2/h$. Recall that the $L^1$-norm of $\left\vert \nabla h\right\vert^2/h$, called the Fisher's information, plays a key role in entropy methods and information theory (see~ Villani's lecture notes~\cite{Villani-Lecturenotes2008} and his book~\cite[Chapters 20, 21, 22]{Villani-Oldandnew}). \end{example} For later references and comparisons, we discuss some examples of Lyapunov functionals for the nonlinear equations mentioned above. \begin{example}[Mean-curvature equation]\label{example:MCF}
Consider the mean curvature equation $\partial_t h+\sqrt{1+|\nabla h|^2}\kappa=0$. If $h$ is a smooth solution, then \begin{equation}\label{MCF:n0} \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0 \quad\text{where}\quad
\mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}}\sqrt{1+|\nabla h|^2}\diff \! x. \end{equation} This is proved by an integration by parts argument: \begin{align*}
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)&=\int_{\mathbf{T}^{d}}\nabla_x (\partial_th) \cdot \frac{\nabla_x h}{\sqrt{1+|\nabla h|^2}}\diff \! x =\int_{\mathbf{T}^{d}} (\partial_t h)\kappa\diff \! x\\
&=-\int_{\mathbf{T}^{d}}\sqrt{1+|\nabla h|^2}\kappa^2\diff \! x\leq 0. \end{align*} In fact, the mean-curvature equation is a gradient flow for $\mathcal{H}^{d}(\Sigma)$, see~\cite{CMWP-BAMS-2015}. When the space dimension $d$ is equal to $1$, we claim that the following quantities are also Lyapunov functionals: $$ \int_\mathbf{T} h^2\diff \! x,\quad \int_\mathbf{T} (\partial_x h)^2\diff \! x,\quad \int_\mathbf{T} (\partial_t h)^2\diff \! x,\quad \int_\mathbf{T} (1+(\partial_xh)^2)\kappa^2\diff \! x. $$ To our knowledge, these results are new and we prove this claim in Appendix~\ref{Appendix:MCF}. We will also prove that $\int_\mathbf{T} h^2\diff \! x$ is a strong Lyapunov functional. \end{example} \begin{example}[Hele-Shaw equation]\label{example:Hele-Shaw} Consider the equation $\partial_{t}h+G(h)h=0$. Recall that $G(h)$ is a non-negative operator. Indeed, denoting by $\varphi=\mathcal{H}(\psi)$ the harmonic extension of $\psi$ given by~\eqref{defi:varphiintro}, it follows from Stokes' theorem that \begin{equation}\label{positivityDNintro} \int_{\mathbf{T}^{d}} \psi G(h)\psi\diff \! x=\int_{\partial\Omega}\varphi \partial_n \varphi\diff\mathcal{H}^{d}= \iint_{\Omega}\left\vert\nabla_{x,y}\varphi\right\vert^2\diff \! y \diff \! x\ge 0. \end{equation} Consequently, if $h$ is a smooth-solution to $\partial_{t}h+G(h)h=0$, then $$ \frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x =-\int_{\mathbf{T}^{d}} hG(h)h\diff \! x\leq 0. $$ This shows that $\int_{\mathbf{T}^{d}} h^2\diff \! x$ is a Lyapunov functional. In \cite{AMS}, it is proved that in fact $\int_{\mathbf{T}^{d}} h^2\diff \! x$ is a strong Lyapunov functional and also an entropy. This result is generalized in \cite{Aconvexity} to functionals of the form $\int_{\mathbf{T}^{d}} \Phi(h)\diff \! x$ where $\Phi$ is a convex function whose derivative is also convex. \end{example} \begin{example}[Mullins-Sekerka]\label{example:Mullins-Sekerka} Assume that $h$ solves $\partial_{t}h+G(h)\kappa=0$ and denote by $\mathcal{H}^{d}(\Sigma)$ the area functional (see~\eqref{MCF:n0}). Then~\eqref{positivityDNintro} implies that $$ \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}} (\partial_t h)\kappa\diff \! x=-\int_{\mathbf{T}^{d}}\kappa G(h)\kappa\diff \! x\leq 0, $$ so $\mathcal{H}^{d}(\Sigma)$ is a Lyapunov functional. In fact, the Mullins-Sekerka equation is a gradient flow for $\mathcal{H}^{d}(\Sigma)$, see~\cite{Almgren-Physics-1996,Giacomelli-Otto-CVPDE-2001}. \end{example} \begin{example}[Thin-film equation]\label{exampleTF} The study of entropies plays a key role in the study of the thin-film equation (and its variant) since the works of Bernis and Friedman~\cite{Bernis-Friedman-JDE} and Bertozzi and Pugh~\cite{Bertozzi-Pugh-1996}. The simplest observation is that, if $h$ is a non-negative solution to $\partial_th+\partial_x(h\partial_x^3 h)=0$, then $$ \frac{\diff}{\dt} \int_\mathbf{T} h^2\diff \! x\leq 0, \qquad \frac{\diff}{\dt} \int_\mathbf{T} (\partial_x h)^2\diff \! x\leq 0. $$ (This can be verified by elementary integrations by parts.) To give an example of hidden Lyapunov functionals, consider, for $p\ge 0$ and a function $h> 0$, the functionals $$ H_p(h)=\int_\mathbf{T} \frac{h_x^2}{h^p}\diff \! x. $$ Laugesen discovered~(\cite{Laugesen-CPAA}) that, for $0\leq p\leq 1/2$, $H_p(h)$ is a Lyapunov functional. This result was complemented by Carlen and Ulusoy~(\cite{Carlen-Ulusoy-CMS}) who showed that $H_p(f)$ is an entropy when $0< p<(9 + 4\sqrt{15})/53$. We also refer to \cite{BerettaBDP-ARMA-1995,DPGG-Siam-1998,BDPGG-ADE-1998,JungelMatthes-Nonlinearity-2006} for the study of entropies of the form $\int h^p\diff \! x$ with $1/2\leq p\leq 2$. \end{example}
\subsection{Main results and plan of the paper} We are now ready to introduce our main new results. To highlight the links between them, we begin by gathering in the following table the list of all the Lyapunov functionals that will be considered. This table includes known results, some of which have already been discussed and others will be reviewed later. Precise statements are given in the next section.
\begin{tabular}{@{}llllr@{}}
\toprule
Equations & \multicolumn{3}{c} { \textbf{Lyapunov functionals}
See} & Properties \\[1ex]
\toprule
\textbf{Heat} & {$\int h^2$ } &$(*)$ & Ex.~\ref{Example:heateq} &(S) \\[0.5ex]
\textbf{equation} & $\int h \log h$ & $(*)$& Ex.~\ref{Example:heateq} &(S), (GF) \\[0.5ex]
\midrule
\textbf{Mean} & $\int \sqrt{1+|\nabla h|^2}=\mathcal{H}^{d}(\Sigma)$
&$(*)$& Ex.~\ref{example:MCF} & (GF)\\[0.5ex]
\textbf{curvature}& \cellcolor[gray]{0.95}{$\int \left\vert\nabla h\right\vert^2$}& &Prop.~\ref{Prop:C1nabla} & \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^2$} & $(d=1)$&Prop.~\ref{Prop:C1} & \ccell[gray]{0.95}{(S)} \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_th)^2$} &$(d=1)$ &Prop.~\ref{Prop:C1} &\\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (1+(\partial_xh)^2)\kappa^2$}
&$(d=1)$ &Prop.~\ref{Prop:C1}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_xh)\arctan (\partial_xh)$} & $(d=1)$ &Prop.~\ref{Prop:C1}& \\[0.5ex]
\midrule
\textbf{Hele-Shaw} & $\int \Phi(h)$, $\Phi''\ge 0$
&$(*)$& Ex.~\ref{example:Hele-Shaw} & \\[0.5ex]
& $\int \Phi(h)$ , $\Phi'',\Phi'''\ge 0$ &$(*)$& Ex.~\ref{example:Hele-Shaw} & (S) \\[0.5ex]
& $\int h G(h)h$ &$(*)$& \S\ref{S213} & \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int \sqrt{1+|\nabla h|^2}$} && Th.~\ref{T1} &\ccell[gray]{0.95}{(S)}\\[0.5ex]
& \cellcolor[gray]{0.95}{$\int \kappa G(h)h$} && Th.~\ref{Theorem:J(h)decays} &\\[0.5ex]
\midrule
\textbf{Mullins-} & $\int \sqrt{1+|\nabla h|^2}$
& $(*)$ &Ex.\ref{example:Mullins-Sekerka} & (GF) \\[0.5ex]
\textbf{Sekerka} & \cellcolor[gray]{0.95}{$\int h^2$} & &Th.~\ref{T1} & \\[1ex]
\midrule
\textbf{Thin-film} & $\int \left\vert\nabla h\right\vert^2$ & $(*)$& Prop.~\ref{prop:lubrik1n} & \\[0.5ex]
&$\int h^{-p}h_x^2\qquad 0\leq p\leq 1/2$ &$(*)$& Ex.~\ref{exampleTF}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^{m}\qquad\quad \frac{1}{2} \leq m\leq 2$} & (**) &Prop.~\ref{positivity} &\\[0.5ex]
& $\int h\log h$ & &Prop.~\ref{positivity}& \\[0.5ex]\midrule
\textbf{Boussinesq} & {$\int h ^2$}& & Th.~\ref{Theo2bis} & \ccell[gray]{0.95}{(S)} \\[0.5ex]
& {$\int h\log h$} && Th.~\ref{Theo2bis} & \ccell[gray]{0.95}{(S)}\\[0.5ex]
& $\int h^{m+1}$ & (*) & Prop.~\ref{convexporoust} &\\[0.5ex]
& {$\int h^2\left\vert\nabla h\right\vert^2$}&($*$) &\S\ref{S:Boussinesq}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^m\left\vert\nabla h\right\vert^2, ~ 0\leq m\leq \frac{1+\sqrt{7}}{2}$} &$(**)$ &Prop.~\ref{convexporous}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_xh)\arctan (\partial_xh)$} &$(d=1)$ &Prop.~\ref{prop:C2Boussinesq}& \\[0.5ex]
\bottomrule
\textbf{Legend:} & \multicolumn{4}{l} {The gray boxes point to the new results} \\
& \multicolumn{4}{l} { \small{$(*)$: already known}} \\
& \multicolumn{4}{l} { \small{$(**)$: improves previous exponents or simplifies the proof}} \\
& \multicolumn{4}{l} { \small{$(d=1)$: only in dimension one}} \\
& \multicolumn{4}{l} { \small{(S): is a strong Lyapunov functional}} \\
& \multicolumn{4}{l} { \small{(GF): is derived from a Gradient Flow structure.}} \\
\bottomrule \end{tabular}
To conclude this introduction, let us mention that in addition to Lyapunov functionals, maximum principles also play a key role in the study of these parabolic equations. One can think of the maximum principles for the mean-curvature equation obtained by Huisken~\cite{Huisken-JDE-1984} and Ecker and Huisken~(see \cite{Ecker-Huisken-Annals,Ecker-Regularity-Theory}), used to obtain a very sharp global existence result of smooth solutions. Many maximum principles exist also for the Hele-Shaw equations (see~\cite{Kim-ARMA2003,ChangLaraGuillenSchwab}). In particular, we will use the maximum principle for space-time derivatives proved in~\cite{AMS}. Sea also~\cite{ConstantinVicol-GAFA2012} for related models. For the thin-film equations of the form $\partial_th+\partial_x(f(h)\partial_x^3 h)=0$ with $f(h)=h^m$ and an exponent $m\ge 3.5$, in one space dimension, if the initial data $h_0$ is positive, then the solution $h(x,t)$ is guaranteed to stay positive (see~\cite{Bernis-Friedman-JDE,Bertozzi-et-al-1994} and~\cite{DPGG-Siam-1998,BDPGG-ADE-1998,ZhornitskayaBertozzi-2000,Bresch2018bd}).
\section{Statements of the main results}
Our main goal is to study the decay properties of several natural coercive quantities for the Hele-Shaw, Mullins-Sekerka, Boussinesq and thin-film equations, in a unified way.
\subsection{Entropies for the Hele-Shaw and Mullins-Sekerka equations} The first two coercive quantities we want to study are the $L^2$-norm and the area functional (that is the $d$-dimensional surface measure): \begin{equation}\label{L2Hm}
\left(\int_{\mathbf{T}^{d}}h(t,x)^2\diff \! x\right)^\frac{1}{2},\qquad \mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}}\sqrt{1+|\nabla h|^2}\diff \! x. \end{equation} Our first main result states that these are Lyapunov functionals for the Hele-Shaw and Mullins-Sekerka equations, in any dimension.
\begin{theorem}\label{T1} Let $d\ge 1$, $(g,\mu)\in [0,+\infty)^2$ and assume that $h$ is a smooth solution to \begin{equation}\label{n21} \partial_{t}h+G(h)(gh+\mu \kappa)=0. \end{equation} Then, \begin{equation}\label{n31} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad \text{and}\quad \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0. \end{equation} \end{theorem} \begin{remark} The main point is that this result holds uniformly with respect to $g$ and $\mu$. For comparison, let us recall some results which hold for the special cases where either $g=0$ or $\mu=0$.
$i)$ When $g=0$, the fact that the area-functional $\mathcal{H}^{d}(\Sigma)$ decays in time follows from a well-known gradient flow structure for the Mullins-Sekerka equation. However, the decay of the $L^2$-norm in this case is new.
$ii)$ When $\mu=0$, the decay of the $L^2$-norm follows from an elementary energy estimate. However, the proof of the decay of the area-functional $t\mapsto \mathcal{H}^{d}(\Sigma(t))$ requires a more subbtle argument. It is implied (but only implicitly) by some computations by Antontsev, Meirmanov, and Yurinsky in \cite{Antontsev-al-2004}. The main point is that we shall give a different approach which holds uniformly with respect to $g$ and $\mu$. In addition, we will obtain a precise lower bound for the dissipation rate showing that $\mathcal{H}^{d}(\Sigma)$ is an entropy when $\mu=0$ and not only a Lyapunov functional. \end{remark} To prove these two uniform decay results, the key ingredient will be to study the following functional: $$ J(h)\mathrel{:=} \int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x. $$ It appears naturally when performing energy estimates. Indeed, by multiplying the equation~\eqref{n21} with $h$ or $\kappa$ and integrating by parts, one obtains \begin{align} &\frac{1}{2}\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^2\diff \! x+g \int_{\mathbf{T}^{d}}hG(h)h\diff \! x+\mu J(h)=0,\notag\\ &\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)+gJ(h)+\mu\int_{\mathbf{T}^{d}} \kappa G(h)\kappa\diff \! x=0.\label{J(h)dt} \end{align} We will prove that $J(h)$ is non-negative. Since the Dirichlet-to-Neumann operator is a non-negative operator~(see \eqref{positivityDNintro}), this will be sufficient to conclude that the $L^2$-norm and the area functional $\mathcal{H}^{d}(\Sigma)$ are non-increasing along the flow.
An important fact is that $J(h)$ is a nonlinear analogue of the homogeneous $H^{3/2}$-norm. A first way to give this statement a rigorous meaning consists in noticing that $G(0) h=\left\vert D_x\right\vert h=\sqrt{-\Delta_x}h$ and the linearized version of $\kappa$ is $-\Delta_x h$. Therefore, if $h=\varepsilon\zeta$, then $$ J(\varepsilon \zeta)=\varepsilon^2\int_{\mathbf{T}^{d}} \big( \left\vert D_x\right\vert^{3/2}\zeta\big)^2\diff \! x+O(\varepsilon^3). $$ We will prove a functional inequality (see Proposition~\ref{P:Positive2} below) which shows that $J(h)$ controls the $L^2(\Omega)$-norm of the Hessian of the harmonic extension $\mathcal{H}(h)$ of~$h$, given by~\eqref{defi:varphiintro} with $\psi=h$. Consequently, $J(h)$ controls three half-derivative of $h$ in $L^2$ by means of a trace theorem.
\subsection{The area functional is a strong Lyapunov functional}\label{S213} As seen in Example~\eqref{example:MCF}, for the mean-curvature equation in space dimension $d=1$, there exist Lyapunov functionals which control all the spatial derivatives of order less than $2$. Similarly, there are higher-order energies for the thin-film equations (see Theorem~\ref{Theo2}, the Laugesen's functionals introduced in Example~\ref{exampleTF} and also \cite{ConstantinElgindi}). On the other hand, for the Hele-Shaw and Mullins-Sekerka equations, it is more difficult to find higher-order energies which control some derivatives of the solution. This is becasue it is harder to differentiate these equations. For the Mullins-Sekerka problem, one can quote two recent papers by Chugreeva--Otto--Westdickenberg~\cite{ChugreevaOttoWestdickenberg2019} and Acerbi--Fusco--Julin--Morini~\cite{AcerbiFuscoJulinMorini2019}. In both papers, the authors compute the second derivative in time of some coercive quantities to study the long time behavior of the solutions, in perturbative regimes. Here, we will prove a similar result for the Hele-Shaw equation. However, the analysis will be entirely different. On the one hand, it is easier in some sense to differentiate the Hele-Shaw equation. On the other hand, we will be able to exploit some additional identities and inequalities which allow us to obtain a result under a very mild-smallness assumption.
Here, we consider the Hele-Shaw equation: \begin{equation}\label{HSJ} \partial_t h+G(h)h=0. \end{equation} It is known that Cauchy problem for the latter equation is well-posed on the Sobolev spaces $H^s(\mathbf{T}^{d})$ provided that $s>1+d/2$, and moreover the critical Sobolev exponent is $1+d/2$ (see~\cite{Cheng-Belinchon-Shkoller-AdvMath,Matioc-APDE-2019,AMS,NPausader}). On the other hand, the natural energy estimate only controls the $L^2$-norm. It is thus natural to seek higher order energies, which are bounded in time and which control Sobolev norms $H^\mu(\mathbf{T}^{d})$ of order $\mu>0$. It was proved in~\cite{AMS,Aconvexity} that one can control one-half derivative of $h$ by exploiting some convexity argument. More precisely, it is proved in the previous references that \begin{equation}\label{n120} \frac{\diff}{\dt}\int_{\mathbf{T}^{d}}hG(h)h\diff \! x\leq 0. \end{equation} This inequality gives a control of a higher order Lyapunov functional of order $1/2$. Indeed, $$ \int_{\mathbf{T}^{d}}hG(h)h\diff \! x=\iint_{\Omega}\left\vert\nabla_{x,y}\mathcal{H}(h)\right\vert^2\diff \! y \diff \! x, $$ where $\mathcal{H}(h)$ is the harmonic extension of $h$ (solution to~\eqref{defi:varphiintro} where $\psi$ is replaced by $h$). Hence, by using a trace theorem, it follows that $\int_{\mathbf{T}^{d}}hG(h)h\diff \! x$ controls the $H^{1/2}$-norm of $h$.
The search for higher-order functionals leads to interesting new difficulties. Our strategy here is to try to prove that the area functional is a strong Lyapunov funtional. This means that the function $t\mapsto\mathcal{H}^{d}(\Sigma(t))$ decays in a convex manner. This is equivalent to $\diff^2 \mathcal{H}^{d}(\Sigma)/\diff \! t^2\ge 0$. Now, remembering (cf \eqref{J(h)dt}) that $$ \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)+J(h)=0\quad\text{where}\quad J(h)=\int_{\mathbf{T}^d}\kappa G(h)h\diff \! x, $$ the previous convexity argument suggests that $\diff J(h)/\diff \! t\leq 0$, which implies that $J(h)$ is a Lyapunov function. This gives us a very interesting higher-order energy since the functional $J(h)$ controls three-half spatial derivatives of $h$ (as seen above, and as will be made precise in Proposition~\ref{P:Positive2}). The next result states that the previous strategy applies under a very mild smallness assumption on the first order derivatives of the elevation $h$ at time $0$.
\begin{theorem}\label{Theorem:J(h)decays} Consider a smooth solution to $\partial_t h+G(h)h=0$. There exists a universal constant $c_d$ depending only on the dimension $d$ such that, if initially \begin{equation}\label{esti:final6} \sup_{\mathbf{T}^d}\left\vert \nabla h_0\right\vert^2 \leq c_d,\qquad \sup_{\mathbf{T}^d}\left\vert G(h_0)h_0\right\vert^2 \leq c_d, \end{equation} then \begin{equation}\label{n124} \frac{\diff}{\dt} J(h)
+\frac{1}{2}\int_{{\mathbf{T}}^d}\frac{\big(|\nabla\nabla h|^2
+ |\nabla\partial_t h|^2\big)}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq 0. \end{equation} \end{theorem} \begin{remark} $i)$ The constant $c_d$ is the unique solution in $[0,1/4]$ to $$ 2c_d\left(d+\left(d+\sqrt{d}\right) c_d\right) + 4 \left(c_d\left(d+ (d+1) c_d\right)\left(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}= \frac{1}{2}. $$ $ii)$ Since $$ \frac{\diff}{\dt} J(h)=- \frac{\diff^2}{\dt^2} \mathcal{H}^1(\Sigma), $$ it is equivalent to say that the area-functional $\mathcal{H}^{d}(\Sigma)$ is a strong Lyapunov functional. \end{remark}
\subsection{Entropies for the Boussinesq and thin-film equations} The previous theorems suggest to seek a similar uniform result for the thin-film and Boussinesq equations. In this direction, we will obtain various entropies and gather in the next result only the main consequences. \begin{theorem}\label{Theo2} Let $d\ge 1$, $(g,\mu)\in [0,+\infty)^2$ and $h$ be a smooth solution to \begin{equation}\label{n21B} \partial_{t}h-\cnx \big(gh\nabla h-\mu h\nabla \Delta h\big)=0. \end{equation} Then, \begin{equation}\label{n31B} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad \text{and}\quad \frac{\diff}{\dt} \int_{\mathbf{T}^{d}}\left\vert \nabla h\right\vert^2\diff \! x\leq 0. \end{equation} \end{theorem} \begin{theorem}\label{Theo2bis} Let $d\ge 1$, and assume that $h$ is a smooth solution to \begin{equation}\label{n21bis} \partial_{t}h-\cnx \big(h\nabla h\big)=0. \end{equation} Then the square of the $L^2$-norm and the Boltzmann's entropy are strong Lyapunov functionals: \begin{equation}\label{n31.5} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x\leq 0\quad \text{and}\quad\frac{\diff^2}{\dt^2} \int_{\mathbf{T}^{d}}h^2\diff \! x\ge 0, \end{equation} together with \begin{equation}\label{n31.5log} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h\log h \diff \! x\leq 0\quad \text{and}\quad\frac{\diff^2}{\dt^2} \int_{\mathbf{T}^{d}}h\log h\diff \! x\ge 0. \end{equation} \end{theorem} \begin{remark} We will study more general Lyapunov functionals of the form $\int_{\mathbf{T}^{d}} h^{m}\diff \! x$ and $\int_{\mathbf{T}^{d}} h^m \left\vert\nabla h\right\vert^2\diff \! x$. \end{remark}
When $g=0$, the first half of \eqref{n31B} was already obtained by several authors. The study of the decay of Lebesgue norms was initiated by Bernis and Friedman~\cite{Bernis-Friedman-JDE} and continued by Beretta-Bertsch-Dal Passo~\cite{BerettaBDP-ARMA-1995}, Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998} and more recently by J\"ungel and Matthes~\cite{JungelMatthes-Nonlinearity-2006}, who performed a systematic study of entropies for the thin-film equation, by means of a computer assisted proof. Here we will proceed differently and give a short proof, obtained by computations inspired by functional inequalities of Bernis~\cite{Bernis-proc-1996} and Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998}. Namely, we will establish a Sobolev type inequality. Quite surprisingly, this inequality will in turn allow us to study the case with gravity $g>0$, which is in our opinion the most delicate part of the proof.
As we will see, the crucial ingredient to prove Theorems~\ref{Theo2} and~\ref{Theo2bis} is given by the following functional inequality. \begin{proposition}\label{theo:logSob} For any $d\ge 1$ and any positive function $\theta$ in $H^2(\mathbf{T}^{d})$, \begin{equation}\label{BmD}
\int_{\mathbf{T}^{d}} \big|\nabla \theta^{1/2}\big|^4\diff \! x\leq \frac{9}{16}\int_{\mathbf{T}^{d}} (\Delta \theta)^2 \diff \! x. \end{equation} \end{proposition} There is a short proof which can be explained here. \begin{proof} By integrating by parts, we obtain the classical observation that \begin{equation}\label{Deltanablanabla} \begin{aligned} \int_{\mathbf{T}^{d}}(\Delta \theta)^2\diff \! x&=\int_{\mathbf{T}^d}\sum_{i,j}(\partial_i^2\theta)(\partial_j^2\theta)\diff \! x\\ &=\int_{\mathbf{T}^d}\sum_{i,j}(\partial_{ij}\theta)(\partial_{ij}\theta)\diff \! x=\int_{\mathbf{T}^{d}}\left\vert \nabla\nabla \theta\right\vert^2\diff \! x. \end{aligned}
\end{equation} Now, introduce $I=16\int_{\mathbf{T}^{d}} \big|\nabla \theta^{1/2}\big|^4\diff \! x$. By an immediate computation, $$
I=\int_{\mathbf{T}^{d}} \theta^{-2}|\nabla \theta|^4\diff \! x
=-\int_{\mathbf{T}^{d}} \big(\nabla \theta^{-1} \cdot \nabla \theta\big) \, |\nabla\theta|^2\diff \! x. $$ By integrating by parts, one can rewrite $I$ under the form $$
I=\int_{\mathbf{T}^{d}} \theta^{-1} \Delta \theta |\nabla \theta|^2\diff \! x +2\int_{\mathbf{T}^{d}} \theta^{-1}[(\nabla \theta \cdot \nabla) \nabla \theta]\cdot\nabla \theta\diff \! x. $$ Since $\left\vert (\nabla \theta\cdot\nabla)\nabla \theta\right\vert \leq \left\vert\nabla \theta\right\vert\left\vert \nabla^2 \theta\right\vert$ (see~\eqref{n2001} for details), using~\eqref{Deltanablanabla} and the Cauchy-Schwarz inequality, we obtain $$ I \leq 3 \, I^{1/2} \bigg(\int_{\mathbf{T}^{d}} (\Delta \theta)^2\diff \! x\bigg)^{1/2}. $$ Thus we conclude that $$ I\leq 9 \int_{\mathbf{T}^{d}} (\Delta\theta)^2\diff \! x, $$ which is the wanted inequality. \end{proof} \begin{remark} \begin{enumerate}[(i)] \item See Proposition~\ref{P:refD.1v2} for a more general result. \item The inequality~\eqref{BmD} is a multi-dimensional version of an inequality of Bernis which holds in space dimension $d=1$ (see Theorem~$1$ in~\cite{Bernis-proc-1996}). In this direction, notice that a remarkable feature of~\eqref{BmD} is that the constant $9/16$ is dimension-independent.
\item The Bernis' inequalities in~\cite{Bernis-proc-1996} and similar ones (see Gr\"un~\cite{Grun-2001} and Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998}) have been used to study various problems in fluid dynamics. In the opinion of the authors, Proposition~\ref{theo:logSob} could have other applications in fluid dynamics. As an example, we show in Appendix~\ref{appendix:compressible} how to fully remove a technical obstruction in the construction of weak-solutions for compressible Navier-Stokes equations with viscosities depending on the density. \end{enumerate} \end{remark}
\section{Uniform Lyapunov functionals for the Hele-Shaw and Mullins-Sekerka equations}\label{S:3}
In this section, we prove Theorem~\ref{T1}.
\subsection{Maximum principles for the pressure}\label{S:pressure} In this paragraph the time variable does not play any role and we ignore it to simplify notations.
We will need the following elementary result. \begin{lemma}\label{Lemma:decayinfty} Consider a smooth function $h$ in $C^\infty(\mathbf{T}^d)$ and set $$ \Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,:\,y<h(x)\}. $$ For any $\zeta$ in $C^\infty(\mathbf{T}^d)$, there is a unique function $\phi\in C^\infty(\overline{\Omega})$ such that $\nabla_{x,y}\phi\in L^2(\Omega)$, solution to the Dirichlet problem \begin{equation}\label{defi:varphi2-zero} \left\{ \begin{aligned} &\Delta_{x,y}\phi=0 \quad\text{in }\Omega,\\ &\phi(x,h(x))=\zeta(x) \text{ for all }x\in\mathbf{T}^{d}. \end{aligned} \right. \end{equation} Moreover, for any multi-index $\alpha\in\mathbf{N}^d$ and any $\beta\in \mathbf{N}$ with $\left\vert\alpha\right\vert+\beta>0$, one has \begin{equation}\label{decaytozero} \partial_x^\alpha\partial_y^\beta\phi\in L^2(\Omega)\quad \text{and}\quad \lim_{y\to-\infty}\sup_{x\in\mathbf{T}^{d}}\left\vert \partial_x^\alpha\partial_y^\beta\phi(x,y)\right\vert=0. \end{equation} \end{lemma} \begin{proof} The existence and smoothness of the solution $\phi$ is a classical elementary result. We prove only the property~\eqref{decaytozero}.
Let $y_0$ be an arbitrary real number such that $\mathbf{T}^{d}\times\{y_0\}$ is located underneath the boundary $\partial\Omega=\{y=h\}$ and then set $\psi(x)=\phi(x,y_0)$. This function belongs to $C^\infty(\mathbf{T}^{d})$ since $\phi$ belongs to $C^\infty(\overline{\Omega})$. Now, in the domain $\Pi\mathrel{:=}\{(x,y)\,;\,y<y_0\}$, $\phi$ coincides with the harmonic extension of $\psi$, by uniqueness of the harmonic extension. Since $\Pi$ is invariant by translation in~$x$, we can compute the latter function by using the Fourier transform in~$x$. It results that, \begin{equation}\label{n3000} \forall x\in \mathbf{T}^{d},\quad \forall y<y_0, \qquad \phi(x,y)=(e^{(y-y_0)\left\vert D_x\right\vert}\psi)(x). \end{equation} (Here, for $\tau<0$, $e^{\tau\left\vert D_x\right\vert}$ denotes the Fourier multiplier with symbol $e^{\tau\left\vert\xi\right\vert}$.) Indeed, the function $(e^{(y-y_0)\left\vert D_x\right\vert}\psi)(x)$ is clearly harmonic and is equal to $\psi$ on $\{y=y_0\}$. Then, for $\left\vert\alpha\right\vert+\beta>0$, it easily follows from~\eqref{n3000} and the Plancherel theorem that $\partial_x^\alpha\partial_y^\beta\phi$ belongs to $L^2(\Pi)$. On the other hand, on the strip $\{(x,y)\,;\, y_0<y<h(x)\}$, the function $\partial_x^\alpha\partial_y^\beta\phi$ is bounded and hence square integrable. By combining the two previous results, we obtain that $\partial_x^\alpha\partial_y^\beta\phi$ belongs to~$L^2(\Omega)$. To prove the second half of \eqref{decaytozero}, we use again the formula~\eqref{n3000} and the Plancherel theorem, to infer that $\partial_x^\alpha\partial_y^\beta\phi(\cdot,y)$ converges to $0$ in any Sobolev space $H^\mu(\mathbf{T}^{d})$ ($\mu\ge 0$) when $y$ goes to $-\infty$. The desired decay result now follows from the Sobolev embedding $H^\mu(\mathbf{T}^{d})\subset L^\infty(\mathbf{T}^{d})$ for $\mu>d/2$. \end{proof}
Let us fix some notations used in the rest of this section. Now, we consider a smooth function $h=h(x)$ in $C^\infty(\mathbf{T}^d)$ and set $$ \Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,:\,y<h(x)\}. $$ We denote by $\varphi$ the harmonic extension of $h$ in $\overline{\Omega}$. This is the solution to \eqref{defi:varphi2-zero} in the special case where $\zeta=h$. Namely, $\varphi$ solves \begin{equation}\label{defi:varphi2} \left\{ \begin{aligned} &\Delta_{x,y}\varphi=0 \quad\text{in }\Omega,\\ &\varphi(x,h(x))=h(x) \text{ for all }x\in\mathbf{T}^{d}. \end{aligned} \right. \end{equation} Introduce $Q\colon \overline{\Omega}\to\mathbf{R}$ defined by $$ Q(x,y)=\varphi(x,y)-y. $$ We call $Q$ the pressure. In this paragraph we gather some results for the pressure which are all consequences of the maximum principle. For further references, the main result states that $\partial_y Q<0$ everywhere in the fluid.
\begin{proposition}\label{Prop:p3.2} \begin{enumerate}[i)] \item\label{regP1} On the free surface $\Sigma=\{y=h(x)\}$, the function $Q$ satisfies the following properties: \begin{equation}\label{n8} \partial_n Q=-\left\vert \nabla_{x,y} Q\right\vert \quad\text{and}\quad n=-\frac{\nabla_{x,y}Q}{\left\vert \nabla_{x,y} Q\right\vert}, \end{equation} where $n$ denotes the normal to $\Sigma$, given by \begin{equation}\label{n5}
n=\frac{1}{\sqrt{1+|\nabla h|^2}} \begin{pmatrix} -\nabla h \\ 1 \end{pmatrix}. \end{equation} Moreover, the Taylor coefficient $a$ defined by \begin{equation}\label{defi:Taylor} a(x)=-\partial_y Q(x,h(x)), \end{equation} satisfies $a(x)>0$ for all $x\in \mathbf{T}^{d}$. \item\label{regP2} For all $(x,y)$ in $\overline{\Omega}$, there holds \begin{equation}\label{n209} \partial_y Q(x,y)<0. \end{equation} Furthermore, \begin{equation}\label{n210} \inf_{\overline{\Omega}}(-\partial_y Q)\ge \min \Big\{ \inf_{x\in\mathbf{T}^{d}}a(x),1\Big\}. \end{equation}
\item\label{regP3} The function $\left\vert \nabla_{x,y} Q\right\vert$ belongs to $C^\infty(\overline{\Omega})$.
\item \label{regP4} We have the following bound: \begin{equation}\label{esti:final8}
\sup_{(x,y)\in\overline{\Omega}}\left\vert \nabla_{x,y}Q(x,y)\right\vert^2\leq \max_{\mathbf{T}^d}\frac{(1-G(h)h)^2}{1+|\nabla_xh|^2}. \end{equation} \end{enumerate} \end{proposition} \begin{remark}\label{Rema:final1} Consider the evolution problem for the Hele-Shaw equation $\partial_th+G(h)h=0$. Then in \cite{AMS} it is proved that $$ \inf_{x\in\mathbf{T}^{d}}a(t,x)\ge \inf_{x\in\mathbf{T}^{d}}a(0,x),\quad \sup_{x\in\mathbf{T}^{d}}\left\vert G(h)h(t,x)\right\vert\leq \sup_{x\in\mathbf{T}^{d}}\left\vert G(h)h(0,x)\right\vert. $$ Therefore, \eqref{n210} and \eqref{esti:final8} give two different control of the derivatives of the pressure, which are uniform in time. \end{remark} \begin{proof} In this proof, it is convenient to truncate the domain $\Omega$ to work with a compact domain. Consider $\beta>0$ such that the line $\mathbf{T}^{d}\times \{-\beta\}$ is located underneath the free surface $\Sigma=\{y=h(x)\}$ and set $$ \Omega_\beta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;\,-\beta<y<h(x)\}. $$ We will apply the maximum principle in $\Omega_\beta$ and then let $\beta$ goes to $+\infty$.
$\ref{regP1})$ This point is well-known in certain communities, but we recall the proof for the reader's convenience. We begin by observing that, since $Q\arrowvert_{y=h}=0$, on the free surface we have $\left\vert \nabla_{x,y} Q\right\vert=\left\vert \partial_n Q\right\vert$. So to prove that $\partial_n Q=-\left\vert \nabla_{x,y} Q\right\vert$, it remains only to prove that $\partial_n Q\leq 0$. To do so, we begin by noticing that $Q$ is solution to the following elliptic problem $$ \Delta_{x,y}Q=0,\quad Q\arrowvert_{y=h}=0. $$ We will apply the maximum principle in $\Omega_\beta$ with $\beta$ large enough. In view of \eqref{decaytozero}, there is $\beta>0$ such that $$ \forall y\leq -\frac{\beta}{2},\qquad \left\Vert \partial_y\varphi(\cdot,y)\right\Vert_{L^\infty(\mathbf{T}^{d})}\leq \frac{1}{2}. $$ In particular, on $\{y=-\beta\}$, there holds \begin{equation}\label{Qincreases} \forall y\leq -\frac{\beta}{2},\quad\forall x\in\mathbf{T}^{d},\qquad \partial_y Q(x,y)=\partial_y\varphi(x,y)-1\leq -\frac{1}{2}. \end{equation} On the other hand, by using the classical maximum principle for harmonic functions in $\Omega_\beta$, we see that $Q$ reaches its minimum on the boundary $\partial\Omega_\beta$. In light of \eqref{Qincreases}, the minimum is not attained on $\{y=-\beta\}$, so it is attained on~$\Sigma$. Since $Q$ vanishes there, this means that $Q\ge 0$ in $\Omega_\beta$. This immediately implies the wanted result $\partial_n Q\leq 0$. In addition, since the boundary is smooth, we can apply the classical Hopf--Zaremba's principle to infer that $\partial_n Q<0$ on $\Sigma$.
Let us now prove that $a>0$. Recall that, by notation, $\nabla$ denotes the gradient with respect to the horizontal variable only, $\nabla=(\partial_{x_1},\ldots,\partial_{x_d})^{t}$. Since $Q$ vanishes on $\Sigma$, we have \begin{equation}\label{n3001} 0=\nabla\big(Q\arrowvert_{y=h}\big)=(\nabla Q)\arrowvert_{y=h}+(\partial_yQ)\arrowvert_{y=h}\nabla h, \end{equation} which implies that, on $y=h$ we have, \begin{align}
a&=-(\partial_y Q)\arrowvert_{y=h}=-\frac{1}{1+|\nabla h|^2} \Big(\partial_yQ\arrowvert_{y=h}-\nabla h\cdot (\nabla Q)\arrowvert_{y=h}\Big)\label{esti:final9}\\
&=-\frac{1}{\sqrt{1+|\nabla h|^2}}\notag \partial_n Q. \end{align} Since $\partial_n Q<0$ on $\Sigma$, this implies that $a$ is a positive function. Eventually, remembering that
$n=\frac{1}{\sqrt{1+|\nabla h|^2}} \left(\begin{smallmatrix} -\nabla h \\ 1 \end{smallmatrix}\right)$ and using~\eqref{n3001}, we verify that $$ n=-\frac{\nabla_{x,y} Q}{\left\vert \nabla_{x,y} Q\right\vert}\cdot $$ This completes the proof of statement~~$\ref{regP1})$.
$\ref{regP2})$ Since the function $-\partial_y Q$ is harmonic in $\Omega$, the maximum principle applied in $\Omega_\beta$ implies that $-\partial_yQ$ reaches is minimum on the boundary $\partial\Omega_\beta$, so $$ -\partial_y Q\ge \min\Big\{ \inf_{\Sigma} (-\partial_y Q),\inf_{\{y=-\beta\}}(-\partial_y Q)\Big\}. $$ By letting $\beta$ goes to $+\infty$, we obtain~\eqref{n210} since $-\partial_y Q$ converges to $1$ (see~\eqref{decaytozero} applied with $\alpha=0$ and $\beta=1$). This in turn implies~\eqref{n209} in view of the fact that $a>0$, as proved in the previous point.
$\ref{regP3})$ Since we assume that $h$ is smooth, the function $Q$ belongs to $C^\infty(\overline{\Omega})$. As a consequence, to prove that $\left\vert \nabla_{x,y} Q\right\vert$ is smooth, it is sufficient to prove that $\left\vert\nabla_{x,y} Q\right\vert^2$ is bounded from below by a positive constant, which is an immediate consequence of~\eqref{n209}.
$\ref{regP4})$ Since $Q$ is an harmonic function, we have $$ \Delta_{x,y}\left\vert \nabla_{x,y}Q\right\vert^2=2\left\vert\nabla_{x,y}^2Q\right\vert\ge 0. $$ Consequently, the maximum principle for sub-harmonic functions implies that $$ \sup_{\overline{\Omega_\beta}}\left\vert \nabla_{x,y}Q\right\vert^2= \sup_{\partial\Omega_\beta}\left\vert \nabla_{x,y}Q\right\vert^2, $$ where $\Omega_\beta$ is as above. By letting $\beta$ goes to $+\infty$, we obtain that \begin{equation}\label{esti:final8.1} \sup_{\overline{\Omega}}\left\vert \nabla_{x,y}Q\right\vert^2= \max\left\{\sup_{\Sigma}\left\vert \nabla_{x,y}Q\right\vert^2,1\right\}, \end{equation} where we used as above that $\left\vert\nabla_{x,y}Q\right\vert$ tends to $1$ when $y$ goes to $-\infty$. We are thus reduced to estimating $\left\vert \nabla_{x,y}Q\right\vert^2$ on $\Sigma$. To do so, observe that the identity~\eqref{n3001} implies that, on $\Sigma$, we have \begin{equation}\label{esti:final8.2}
\left\vert \nabla_{x,y}Q\right\vert^2=(1+|\nabla h|^2)(\partial_yQ)^2=(1+|\nabla h|^2)a^2. \end{equation} Using the computations already performed in~\eqref{esti:final9} and remembering that $Q=\varphi-y$, we obtain $$
a=-\frac{1}{1+|\nabla h|^2} \Big(-1+\partial_y\varphi\arrowvert_{y=h}-\nabla h\cdot (\nabla \varphi)\arrowvert_{y=h}\Big). $$ On the other, since $\varphi$ is the harmonic extension of $h$, by definition of the Dirichlet-to-Neumann operator $G(h)$, one has $$ G(h)h=\partial_y\varphi\arrowvert_{y=h}-\nabla h\cdot (\nabla \varphi)\arrowvert_{y=h}. $$ We conclude that $$
a=\frac{1-G(h)h}{1+|\nabla h|^2}, $$ which in turn implies that $$
(1+|\nabla h|^2)a^2=\frac{(1-G(h)h)^2}{1+|\nabla_xh|^2}. $$ By combining this with~\eqref{esti:final8.1} and \eqref{esti:final8.2}, we conclude the proof of statement~$\ref{regP4})$. \end{proof}
\subsection{The key functional identity}\label{S:J(h)}
Let us recall some notations: we denote by $\kappa$ the mean curvature \begin{equation}\label{n6}
\kappa=-\cnx \left(\frac{\nabla h}{\sqrt{1+|\nabla h|^2}}\right). \end{equation} Also, we denote by $\varphi=\varphi(x,y)$ the harmonic extension of $h$ in $\Omega$ given by~\eqref{defi:varphi2} and we use the notation $$ Q(x,y)=\varphi(x,y)-y. $$
\begin{proposition}\label{P:Positive2} Let $d\ge 1$, assume that $h\colon \mathbf{T}^{d}\to \mathbf{R}$ is a smooth function and set $$ J(h)\mathrel{:=} \int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x. $$ Then \begin{equation}\label{n11} J(h)=\iint_{\Omega}\frac{\left\vert \nabla_{x,y} Q\right\vert^2\left\vert \nabla_{x,y}^2Q\right\vert^2 -\left\vert \nabla_{x,y} Q\cdot \nabla_{x,y} \nabla_{x,y} Q\right\vert^2}{\left\vert \nabla_{x,y} Q\right\vert^3}\diff \! y \diff \! x\ge 0. \end{equation} \end{proposition} \begin{remark}\label{rema:34} \begin{enumerate}[i)] \item Since $\left\vert\nabla_{x,y}Q\right\vert\ge \left\vert\partial_yQ\right\vert$, it follows from~\eqref{n209} and the positivity of the Taylor coefficient $a$ (see statement $\ref{regP1})$ in Proposition~\ref{Prop:p3.2}) ~that $\left\vert \nabla_{x,y} Q\right\vert$ is bounded by a positive constant on $\overline{\Omega}$. On the other hand, directly from \eqref{decaytozero}, the function $\left\vert \nabla_{x,y} Q\right\vert^2\left\vert \nabla_{x,y}^2Q\right\vert^2-\left\vert \nabla_{x,y} Q\cdot \nabla_{x,y} \nabla_{x,y} Q\right\vert^2$ belongs to $L^2(\Omega)$. It follows that the right-hand side of \eqref{n11} is a well defined integral. \item To clarify notations, set $\partial_i=\partial_{x_i}$ for $1\leq i\leq n$ and $\partial_{n+1}=\partial_y$. Then \begin{equation*} \left\{ \begin{aligned} &\left\vert \nabla_{x,y}^2 Q\right\vert^2=\sum_{1\leq i,j\leq n+1}(\partial_{i}\partial_{j}Q)^2,\\ &\left\vert \nabla_{x,y} Q\cdot\nabla_{x,y}\nabla_{x,y} Q\right\vert^2 =\sum_{1\leq i\leq n+1}\biggl(\sum_{1\leq j\leq n+1}(\partial_{j}Q)\partial_{i}\partial_{j}Q\biggr)^2. \end{aligned} \right. \end{equation*} So, it follows from the Cauchy-Schwarz inequality that \begin{equation}\label{n2001} \left\vert \nabla_{x,y} Q\cdot\nabla_{x,y}\nabla_{x,y} Q\right\vert^2\leq \left\vert\nabla_{x,y} Q\right\vert^2\left\vert \nabla_{x,y}^2 Q\right\vert^2. \end{equation} This shows that $J(h)\ge 0$. \item If $d=1$, then one can simplify the previous expression. Remembering that $\Delta_{x,y}Q=0$, one can verify that $$ J(h)=\frac{1}{2} \iint_\Omega\frac{\left\vert \nabla_{x,y}^2Q\right\vert^2}{\left\vert \nabla_{x,y} Q\right\vert}\diff \! y \diff \! x. $$ Notice that, for the Hele-Shaw equation, one has a uniform in time estimate for $\left\vert \nabla_{x,y} Q\right\vert$ as explained in Remark~\ref{Rema:final1}. Consequently, $J(h)$ controls the $L^2$-norm of the second-order derivative of $Q$. \end{enumerate} \end{remark} \begin{proof} To prove Proposition~\ref{P:Positive2}, the main identity is given by the following result. \begin{lemma} There holds \begin{equation}\label{n369} J(h)=\int_\Sigma \partial_n \left\vert \nabla_{x,y} Q\right\vert\diff \! \Hm, \end{equation} where $\Sigma=\{y=h(x)\}$. \end{lemma} \begin{proof} By definition of the Dirichlet-to-Neumann operator, one has $$
G(h)h=\sqrt{1+|\nabla h|^2}\partial_n \varphi\arrowvert_{y=h}, $$ so $$ \int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x
=\int_{\mathbf{T}^{d}} \kappa\,\partial_n \varphi\sqrt{1+|\nabla h|^2}\diff \! x. $$ Using the expression~\eqref{n5} for the normal $n$, we observe that $$
\partial_nQ=\partial_n \varphi-\frac{1}{\sqrt{1+|\nabla h|^2}}. $$ Directly from the definition~\eqref{n6} of $\kappa$, we get that $$ \int_{\mathbf{T}^{d}}\kappa \diff \! x=0. $$ So by combining the previous identities, we deduce that $$ J(h)
=\int_{\mathbf{T}^{d}} \kappa\,(\partial_n Q)\arrowvert_{y=h} \sqrt{1+|\nabla h|^2}\diff \! x, $$ which can be written under the form \begin{equation}\label{n358} J(h)=\int_{\Sigma}\kappa \, \partial_n Q \diff \! \Hm. \end{equation}
For this proof only, to simplify notations, we will write simply $\nabla$ and $\Delta$ instead of $\nabla_{x,y}$ and $\Delta_{x,y}$. Now, we recall from Proposition~\ref{Prop:p3.2} that, on the free surface $\Sigma$, we have \begin{equation}\label{n8bis} \partial_n Q=-\left\vert \nabla Q\right\vert \quad\text{and}\quad n=-\frac{\nabla Q}{\left\vert \nabla Q\right\vert}. \end{equation} It follows that $$ \kappa=-\cn_{x,y} \left(\frac{\nabla Q}{\left\vert \nabla Q\right\vert}\right), $$ and \begin{equation}\label{n10} \int_{\Sigma}\kappa \, \partial_n Q \diff \! \Hm =\int_{\Sigma}\cnx\left(\frac{\nabla Q}{\left\vert \nabla Q\right\vert} \right)\left\vert \nabla Q\right\vert \diff \! \Hm. \end{equation} Remembering that $\cnx \nabla Q=0$, one can further simplify: \begin{align*} \cnx \left(\frac{\nabla Q}{\left\vert \nabla Q\right\vert} \right) \left\vert \nabla Q\right\vert &=\cnx\left(\frac{\nabla Q}{\left\vert \nabla Q\right\vert} \left\vert \nabla Q\right\vert \right) -\frac{\nabla Q}{\left\vert \nabla Q\right\vert}\cdot \nabla \left\vert\nabla Q\right\vert\\ &=\cnx \nabla Q-\frac{\nabla Q}{\left\vert \nabla Q\right\vert}\cdot \nabla \left\vert\nabla Q\right\vert\\ &=-\frac{\nabla Q}{\left\vert \nabla Q\right\vert}\cdot \nabla \left\vert\nabla Q\right\vert\cdot \end{align*} Now, we use again the identity $n=-\frac{\nabla Q}{\left\vert \nabla Q\right\vert}$ to infer that, on $\Sigma$, we have $$ \cnx\left(\frac{\nabla Q}{\left\vert \nabla Q\right\vert} \right) \left\vert \nabla Q\right\vert =n\cdot \nabla \left\vert\nabla Q\right\vert=\partial_n \left\vert \nabla Q\right\vert. $$ Consequently, it follows from~~\eqref{n358} and \eqref{n10} that $$ J(h)=\int_{\Sigma} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm. $$ This completes the proof of the lemma. \end{proof} We have proved that $J(h)$ is equal to the integral over $\Sigma$ of $\partial_n\left\vert \nabla Q\right\vert$. This suggests to apply the Stokes' theorem. To do so, as in the proof of Proposition~\ref{Prop:p3.2}, it is convenient to truncate the domain $\Omega$ to work with a compact domain. Again, we consider $\beta>0$ such that the hyperplane $\{y=-\beta\}$ is located underneath the free surface $\Sigma$ and set $$ \Omega_\beta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{T}\,;\,-\beta<y<h(x)\}. $$ Let us check that the contribution from the fictitious bottom disappears when $\beta$ goes to $+\infty$. \begin{lemma} Denote by $\Gamma_\beta$ the bottom $\Gamma_\beta=\{(x,y)\in \mathbf{T}^{d}\times\mathbf{R}\,;\;y=-\beta\}$. Then \begin{equation}\label{n370} \lim_{\beta\to+\infty}\int_{\Gamma_\beta} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm=0. \end{equation} \end{lemma} \begin{proof} We have $$ \int_{\Gamma_\beta} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm =-\int_{\mathbf{T}^{d}}\partial_y \left\vert \nabla Q\right\vert\diff \! x =-\int_{\mathbf{T}^{d}}\frac{\nabla_x Q\cdot\nabla_x\partial_yQ+\partial_y Q\partial_y^2Q}{\left\vert \nabla Q\right\vert}\diff \! x. $$ As we have seen in Remark~\ref{rema:34}, the function $\left\vert \nabla Q\right\vert$ is bounded from below by a positive constant in $\Omega$. Consequently, it is bounded from below on $\Gamma_\beta$ uniformly with respect to $\beta$. On the other hand, it follows from \eqref{decaytozero} that $$ \lim_{\beta\to+\infty}\left\Vert (\nabla_x Q\cdot\nabla_x\partial_yQ +\partial_y Q\partial_y^2Q)(\cdot,-\beta)\right\Vert_{L^\infty(\mathbf{T}^{d})}=0. $$ This immediately gives the wanted result. \end{proof}
Now, we are in position to conclude the proof. It follows from \eqref{n369} that $$ J(h)=\int_{\partial\Omega_\beta} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm-\int_{\Gamma_\beta} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm. $$ Now, remembering that $\left\vert \nabla Q\right\vert$ belongs to $C^\infty(\overline{\Omega})$ (see statement~$\ref{regP3})$ in Proposition~\ref{Prop:p3.2}), one may apply the Stokes' theorem to infer that $$ J(h)=\int_{\Omega_\beta} \Delta\left\vert \nabla Q\right\vert\diff \! y \diff \! x-\int_{\Gamma_\beta} \partial_n\left\vert \nabla Q\right\vert\diff \! \Hm. $$ Since $\left\vert \nabla Q\right\vert>0$ belongs to $C^\infty(\overline{\Omega})$, one can compute $\Delta\left\vert\nabla Q\right\vert$. To do so, we apply the general identity $$ \Delta u^2=2u\Delta u+2\left\vert \nabla u\right\vert^2, $$ with $u=\left\vert\nabla Q\right\vert$. This gives that \begin{align*} \Delta \left\vert \nabla Q\right\vert&=\frac{1}{2\left\vert \nabla Q\right\vert}\Big(\Delta \left\vert \nabla Q\right\vert^2-2\left\vert \nabla \left\vert\nabla Q\right\vert\right\vert^2\Big)\\ &=\frac{1}{2\left\vert \nabla Q\right\vert}\bigg(\Delta \left\vert \nabla Q\right\vert^2-2\frac{\left\vert \nabla Q\cdot\nabla\nabla Q\right\vert^2}{\left\vert \nabla Q\right\vert^2}\bigg). \end{align*} On the other hand, since $\Delta Q=0$, one has $$ \Delta \left\vert \nabla Q\right\vert^2=\sum_{1\leq j,k\leq n+1}\partial_j^2(\partial_kQ)^2 =2\sum_{1\leq j,k\leq n+1}(\partial_j\partial_k Q)^2=2\left\vert \nabla^2Q\right\vert^2. $$ By combining the two previous identities, we conclude that \begin{align*} \Delta\left\vert\nabla Q\right\vert&=\frac{1}{\left\vert \nabla Q\right\vert^3}\Big(\left\vert \nabla Q\right\vert^2\left\vert \nabla^2Q\right\vert^2-\left\vert \nabla Q\cdot \nabla \nabla Q\right\vert^2\Big). \end{align*}
As we have seen in Remark~\ref{rema:34}, the previous term is integrable on $\Omega$. So, we can use the dominated convergence theorem and let $\beta$ goes to $+\infty$. Then~\eqref{n370} implies that the contribution from the bottom disappears from the limit and we obtain the wanted result~\eqref{n11}. This completes the proof. \end{proof}
\subsection{Proof of Theorem~\ref{T1}}
We are now ready to prove Theorem~\ref{T1}. Let $(g,\mu)\in [0,+\infty)^2$ and assume that $h$ is a smooth solution to $$ \partial_{t}h+G(h)(gh+\mu \kappa)=0. $$ We want to prove that $$ \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad \text{and}\quad \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0. $$
Multiplying the equation $\partial_{t}h+G(h)(gh+\mu\kappa)=0$ by $h$ and integrating over $\mathbf{T}^{d}$, one obtains that \begin{equation}\label{n157} \frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x =-g\int_{\mathbf{T}^{d}}hG(h)h\diff \! x-\mu\int_{\mathbf{T}^{d}}h G(h)\kappa\diff \! x. \end{equation} The first term in the right-hand side is non-positive since $G(h)$ is a non-negative operator. Indeed, as we recalled in the introduction, considering an arbitrary function $\psi$ and denoting by $\varphi$ its harmonic extension, it follows from Stokes' theorem that \begin{equation}\label{positivityDN} \int_{\mathbf{T}^{d}} \psi G(h)\psi\diff \! x=\int_{\partial\Omega}\varphi \partial_n \varphi\diff\mathcal{H}^{d}= \iint_{\Omega}\left\vert\nabla_{x,y}\varphi\right\vert^2\diff \! y \diff \! x\ge 0. \end{equation} This proves that $$ -g\int_{\mathbf{T}^{d}}hG(h)h\diff \! x\leq 0. $$ We now prove that the second term in the right-hand side of \eqref{n157} is also non-positive. To see this, we use~\eqref{n11} and the fact that~$G(h)$ is self-adjoint: $$ \int_{\mathbf{T}^{d}}h G(h)\kappa\diff \! x=\int_{\mathbf{T}^{d}}\kappa G(h)h\diff \! x =J(h)\ge 0. $$ This proves that $$ \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0. $$
It remains to prove that $\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0$. Write
\begin{align*}
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)&=\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}\sqrt{1+|\nabla h|^2}\diff \! x\\
&=\int_{\mathbf{T}^{d}}\nabla_x (\partial_th) \cdot \frac{\nabla_x h}{\sqrt{1+|\nabla h|^2}}\diff \! x\\ &=\int_{\mathbf{T}^{d}} (\partial_th)\kappa \diff \! x, \end{align*} to obtain $$ \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)=-\mu\int_{\mathbf{T}^{d}}\kappa G(h)\kappa\diff \! x-g J(h)\leq 0, $$ where we used again~\eqref{n11} and the property~\eqref{positivityDN} applied with $\psi=\kappa$.
This completes the proof.
\section{Strong decay for the Hele-Shaw equation}\label{S:J(h)decays}
In this section we prove Theorem~\ref{Theorem:J(h)decays} about the monotonicity of $J(h)$ for solutions of the Hele-Shaw equation. Recall that, by notation, $$ J(h)=\int_{\mathbf{T}^{d}} \kappa G(h)h\diff \! x\quad\text{where}\quad
\kappa=-\cnx\left(\frac{\nabla h}{\sqrt{1+|\nabla h|^2}}\right). $$ We want to prove that $J(h)$ is non-increasing under a mild-smallness assumption on $\nabla_{x,t}h$ at initial time.
\begin{proposition}\label{LJ(h)I1} Assume that $h$ is a smooth solution to the Hele-Shaw equation $\partial_t h+G(h)h=0$. Then \begin{equation}\label{n132}
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\left\vert\nabla\partial_t h\right\vert^2+\left\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x -\int_\mathbf{T}\kappa \theta\diff \! x\leq 0, \end{equation} where \begin{equation}\label{defi:theta}
\theta=G(h)\left(\frac{\left\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}\right)
-\cnx\left(\frac{\left\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}\nabla h\right), \end{equation} with $\left\vert\nabla_{t,x}h\right\vert^2=(\partial_t h)^2+\left\vert \nabla h\right\vert^2$. In addition, if $d=1$ then \eqref{n132} is in fact an equality. \end{proposition} \begin{proof} If $h$ solves the Hele-Shaw equation $\partial_t h+G(h)h=0$, one can rewrite $J(h)$ under the form $$ J(h)=-\int_{\mathbf{T}^d} \kappa h_t \diff \! x, $$ where $h_t$ as a shorthand notation for $\partial_t h$. Consequently, \begin{equation}\label{esti:final10} \frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\kappa_t h_t\diff \! x+\int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x=0. \end{equation} Let us compute the first integral. To do so, we use the Leibniz rule and then integrate by parts, to obtain \begin{align*} \int_{\mathbf{T}^d}\kappa_t h_t\diff \! x
&=-\int_{\mathbf{T}^d}\cnx\left(\frac{\nabla h_t}{\sqrt{1+|\nabla h|^2}}-\frac{\nabla h\cdot \nabla h_t}{(1+|\nabla h|^2)^{3/2}}\nabla h\right)h_t\diff \! x\\
&=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\left\vert \nabla h_t\right\vert^2-(\nabla h\cdot\nabla h_t)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x. \end{align*} Now, the Cauchy-Schwarz inequality implies that $$
(1+|\nabla h|^2)\left\vert \nabla h_t\right\vert^2-(\nabla h\cdot\nabla h_t)^2\ge \left\vert \nabla h_t\right\vert^2. $$ (Notice that this is an equality in dimension $d=1$.) It follows from~\eqref{esti:final10} that \begin{equation}\label{n131}
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\left\vert \nabla h_t\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x+\int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x\leq 0. \end{equation}
We now move to the most interesting part of the proof, which is the study the second term $\int \kappa h_{tt}$. The main idea is to use the fact that the Hele-Shaw equation can be written under the form of a modified Laplace equation. Let us pause to recall this argument introduced in~\cite{Aconvexity}. For the reader convenience, we begin by considering the linearized equation, which reads $\partial_t h+G(0)h=0$. Since the Dirichlet-to-Neumann operator $G(0)$ associated to a flat half-space is given by $G(0)=\lvert D\rvert$, that is the Fourier multiplier defined by $\lvert D\rvert e^{ix\cdot\xi} =\lvert \xi\rvert e^{ix\cdot\xi}$, the linearized Hele-Shaw equation reads $$ \partial_t h+\left\vert D\right\vert h=0. $$ Since $-\left\vert D\right\vert^2=\Delta$, we find that $$ \Delta_{t,x}h=\partial_t^2 h+\Delta h=0. $$ The next result generalizes this observation to the Hele-Shaw equation.
\begin{theorem}[from~\cite{Aconvexity}]\label{proposition:elliptic} Consider a smooth solution $h$ to $\partial_t h+G(h)h=0$. Then \begin{equation}\label{n131A} \Delta_{t,x}h+B(h)^*\big( \left\vert \nabla_{t,x}h\right\vert^2\big)=0, \end{equation} where $B(h)^*$ is the adjoint (for the $L^2(\mathbf{T}^d)$-scalar product) of the operator defined by $$ B(h)\psi=\partial_y \mathcal{H}(\psi)\arrowvert_{y=h}, $$ where $\mathcal{H}(\psi)$ is the harmonic extension of $\psi$, solution to $$ \Delta_{x,y}\mathcal{H}(\psi)=0\quad \text{in }\Omega,\qquad \mathcal{H}(\psi)\arrowvert_{y=h}=\psi. $$ \end{theorem} We next replace the operator $B(h)^*$ by an explicit expression which is easier to handle. Directly from the definition of $B(h)$ and the chain rule, one can check that (see for instance Proposition~$5.1$ in \cite{AMS}), $$
B(h)\psi=\frac{G(h)\psi+\nabla h\cdot \nabla \psi}{1+|\nabla h|^2}\cdot $$ Consequently, $$
B(h)^*\psi=G(h)\left(\frac{\psi}{1+|\nabla h|^2}\right)
-\cnx\left(\frac{\psi}{1+|\nabla h|^2}\nabla h\right). $$ It follows that \begin{equation}\label{n131AA} B(h)^*\big( \left\vert \nabla_{t,x}h\right\vert^2\big)=\theta, \end{equation} where $\theta$ is as defined in the statement of Proposition~\ref{LJ(h)I1}.
We now go back to the second term in the right-hand side of~\eqref{n131} and write that $$ \int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x=\int_{\mathbf{T}^d}\kappa \Delta_{t,x}h\diff \! x-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x. $$ (To clarify notations, recall that $\Delta$ denotes the Laplacian with respect to the variable $x$ only.) By plugging this in~\eqref{n131} and using \eqref{n131A}--\eqref{n131AA}, we get $$
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\left\vert \nabla h_t\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x -\int_\mathbf{T}\kappa \theta\diff \! x -\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x\leq 0. $$ As a result, to complete the proof of Proposition~\ref{LJ(h)I1}, it remains only to show that \begin{equation}\label{claim:kappahxx}
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x\ge\int_{\mathbf{T}^d}\frac{|\nabla^2 h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x. \end{equation} Notice that in dimension $d=1$, we have $$ \kappa=-\frac{\partial_x^2 h}{(1+(\partial_xh)^2)^{3/2}}, $$ so~\eqref{claim:kappahxx} is in fact an equality. To prove~\eqref{claim:kappahxx} in arbitrary dimension, we begin by applying the Leibniz rule to write \begin{equation}\label{n4001}
-\kappa=\frac{\Delta h}{\sqrt{1+|\nabla h|^2}}-\frac{\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}}, \end{equation} where we use the standard notations $\nabla h\otimes \nabla h=((\partial_ih)(\partial_j h))_{1\leq i,j\leq d}$, $\nabla ^2h=(\partial_i\partial_j h)_{1\leq i,j\leq d}$ together with $A:B=\sum_{i,j}a_{ij}b_{ij}$. So, \begin{equation}\label{n147} -\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x=
\int_{\mathbf{T}^d}\frac{(\Delta h)^2}{\sqrt{1+|\nabla h|^2}}\diff \! x
-\int_{\mathbf{T}^d}\frac{(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}}\diff \! x. \end{equation} On the other hand, by integrating by parts twice, we get \begin{align*}
&\int_{\mathbf{T}^d}\frac{(\Delta h)^2}{\sqrt{1+|\nabla h|^2}}\diff \! x=\\
&\qquad=\sum_{i,j}\int_{\mathbf{T}^d}\frac{(\partial_i^2 h)(\partial_j^2h)}{\sqrt{1+|\nabla h|^2}}\diff \! x\\
&\qquad=\sum_{i,j}\int_{\mathbf{T}^d}\frac{(\partial_i\partial_j h)^2}{\sqrt{1+|\nabla h|^2}}\diff \! x\\ &\qquad\quad
+\sum_{i,j,k}\frac{(\partial_ih)(\partial_k h)(\partial_j^2h)(\partial_i\partial_kh)-(\partial_ih)(\partial_kh)(\partial_{i}\partial_{j}h)(\partial_j\partial_kh)}{(1+|\nabla h|^2)^{3/2}}\diff \! x\\
&\qquad=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\left\vert\nabla ^2h\right\vert^2+(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h-(\nabla h\cdot \nabla^2h)^2
}{(1+|\nabla h|^2)^{3/2}}\diff \! x.\\ \end{align*} By combining this with~\eqref{n147} and simplifying, we obtain $$
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\left\vert\nabla ^2h\right\vert^2-(\nabla h\cdot\nabla\nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x. $$ Now, by using the Cauchy-Schwarz inequality in $\mathbf{R}^d$, we obtain the wanted inequality~\eqref{claim:kappahxx}, and the proposition follows. \end{proof}
In view of the previous proposition, to prove that $J(h)$ is non-increasing, it remains to show that the last term in the left-hand side of~\eqref{n132} can be absorbed by the second one. It does not seem feasible to get such a result by exploiting some special identity for the solutions, but, as we will see, we do have an inequality which holds under a very mild smallness assumption. We begin by making a smallness assumption on the space and time derivatives of the unknown~$h$. We will next apply a maximum principle to bound these derivatives in terms of the initial data only.
\begin{lemma}\label{Lemma:L938} Let $c_d<1/2$ and assume that \begin{equation}\label{assu:L938} \sup_{t,x}\left\vert \nabla h(t,x)\right\vert^2 \leq c_d,\quad \sup_{t,x} ( h_t(t,x))^2\leq c_d. \end{equation} Then \begin{equation}\label{n141} \int_{\mathbf{T}^d}\kappa \theta\diff \! x \leq \gamma_d \int_{\mathbf{T}^d}\frac{\left\vert\nabla h_t\right\vert^2
+\left\vert \nabla^2h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x \end{equation} with $$ \gamma_d = 2 c_d \left(d+\left(d+\sqrt{d}\right) c_d\right) + 4 \left(c_d\left(d+ (d+1) c_d\right)\left(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}. $$ \end{lemma} \begin{proof} To shorten notations, let us set $$ H\mathrel{:=} \int_{\mathbf{T}^d}\frac{\left\vert\nabla h_t\right\vert^2
+\left\vert \nabla^2h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x, $$ and $$
\zeta\mathrel{:=}\frac{\left\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}
=\frac{(\partial_th)^2+ \left\vert\nabla h\right\vert^2}{1+|\nabla h|^2}. $$ Then, by definition of $\theta$ (see~\eqref{defi:theta}), we have $$ \theta=G(h)\zeta-\cnx (\zeta\nabla h) = I_1 + I_2, $$ with $$ I_1= - \zeta \Delta h $$ and $$ I_2 = G(h)\zeta - \nabla \zeta\cdot \nabla h. $$ We will study the contributions of $I_1$ and $I_2$ to $\int \kappa \theta\diff \! x$ separately.
{1) \em Contribution of $I_1$.} We claim that \begin{equation}\label{n4002} -\int_{\mathbf{T}^d}\kappa \zeta \Delta h\diff \! x\leq
\int_{\mathbf{T}^d}\zeta \left(d+ (d+\sqrt d)|\nabla h|^2\right) \frac{|\nabla\nabla h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x. \end{equation} To see this, we use again \eqref{n4001}, to write $$ -\kappa\zeta \Delta h=
\zeta\frac{(\Delta h)^2}{\sqrt{1+|\nabla h|^2}}-\zeta
\frac{(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}}. $$ Then we recall that for all $v\colon\mathbf{R}^d \mapsto \mathbf{R}^d$ $$ (\cnx v)^2 =\sum_i\sum_j \partial_i v_i \partial_j v_j \leq \sum_i\sum_j \frac{1}{2} \bigl((\partial_i v_i)^2 + (\partial_j v_j)^2\bigr)
\leq d |\nabla u|^2, $$ and therefore \begin{equation}\label{div}
(\Delta h)^2 \leq d \,|\nabla\nabla h|^2. \end{equation} Then, by using the Cauchy-Schwarz inequality, we prove the claim~\eqref{n4002}.
Now, observe that, by definition of $\zeta$ we have $\zeta\leq \left\vert\nabla_{t,x}h\right\vert^2$. So, by assumption \eqref{assu:L938}, we deduce that \begin{align*}
\zeta\left(d+ (d+\sqrt d)|\nabla h|^2\right)&\leq \left\vert \nabla_{t,x} h\right\vert^2\left(d+ (d+\sqrt d)|\nabla h|^2\right)\\ &\leq 2c_d\left(d+ (d+\sqrt d)c_d\right). \end{align*} Therefore, it follows from \eqref{n4002} that \begin{equation}\label{n4002ter} -\int_{\mathbf{T}^d}\kappa \zeta \Delta h\diff \! x\leq 2c_d\left(d+ (d+\sqrt d)c_d\right) H. \end{equation}
{2) \em Contribution of $I_2$.} We now estimate the quantity \begin{equation}\label{n143} \int_{\mathbf{T}^d} \kappa\, \big( G(h)\zeta-\kappa \nabla \zeta \cdot \nabla h\big)\diff \! x. \end{equation} We will prove that the absolute value of this term is bounded by \begin{equation}\label{esti:final1} 4 \left(c_d\left(d+ (d+1) c_d\right)\left(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}H. \end{equation} By combining this estimate with \eqref{n4002ter}, this will imply the wanted inequality~\eqref{n141}.
To begin, we apply the Cauchy-Schwarz inequality to bound the absolute value of~\eqref{n143} by $$
\left(\int_{\mathbf{T}^d} (1+ |\nabla h|^2)^{3/2}\kappa^2\diff \! x\right)^\frac{1}{2} \left(\int_{\mathbf{T}^d} \frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\right)^\frac{1}{2}. $$ We claim that \begin{equation}\label{esti:final2}
\int_{\mathbf{T}^d} (1+ |\nabla h|^2)^{3/2}\kappa^2\diff \! x\leq 2\big(d+(d+1)c_d\big) H, \end{equation} and \begin{equation}\label{esti:final3} \int_{\mathbf{T}^d} \frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq 8c_d\left(\frac{12}{1-2c_d}+1\right)H. \end{equation} It will follow from these claims that the absolute value of~\eqref{n143} is bounded by~\eqref{esti:final1}, which in turn will complete the proof of the lemma.
We begin by proving~\eqref{esti:final2}. Recall from~\eqref{n4001} that \begin{equation}\label{n4001bis}
-\kappa=\frac{\Delta h}{\sqrt{1+|\nabla h|^2}}-\frac{\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}},
\end{equation} and therefore, using the inequality $(\Delta h)^2 \leq d \,|\nabla\nabla h|^2$ (see~\eqref{div}), $$ \kappa^2\leq
2\big(d+(d+1)|\nabla h|^2\big)
\frac{|\nabla\nabla h|^2}{(1+|\nabla h|^2)^3}, $$ which implies~\eqref{esti:final2}, remembering that $\left\vert \nabla h\right\vert^2\leq c_d$, by assumption~\eqref{assu:L938}.
We now move to the proof of~\eqref{esti:final3}. Since $$ \frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\leq 2(G(h)\zeta)^2+2 \left\vert \nabla \zeta\right\vert^2, $$ it is sufficient to prove that \begin{equation}\label{n145} \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x
+\int_{\mathbf{T}^d} |\nabla \zeta|^2\diff \! x \leq 4c_d\left(\frac{12}{1-2c_d}+1\right)H. \end{equation} To establish~\eqref{n145}, the crucial point will be to bound the $L^2$-norm of $G(h)\zeta$ in terms of the $L^2$-norm of $\nabla \zeta$. Namely, we now want to prove the following estimate:
if $|\nabla h|^2\leq c_d$ with $c_d<1/2$, then \begin{equation}\label{d12} \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x\leq \frac{12}{1-2 c_d}
\int_{\mathbf{T}^d} |\nabla \zeta|^2\diff \! x. \end{equation} To do so, we use the following inequality\footnote{This inequality belongs to the family of Rellich type inequalities, which give a control on the boundary of the normal derivative in terms of the tangential one.} (proved in Appendix~\ref{A:Rellich}): \begin{equation}\label{d10} \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)|\nabla \zeta-\mathcal{B} \nabla h|^2 \diff \! x, \end{equation} where \begin{equation}\label{d11}
\mathcal{B}=\frac{G(h)\zeta+\nabla \zeta \cdot \nabla h}{1+|\nabla h|^2}. \end{equation} Then, by replacing $\mathcal{B}$ in \eqref{d10} by its expression \eqref{d11}, we obtain \begin{multline*} \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \\
\leq\int_{\mathbf{T}^d} (1+|\nabla h|^2)
\left|\frac{(1+|\nabla h|^2){\rm Id}-\nabla h\otimes \nabla h}
{1+|\nabla h|^2}\nabla \zeta
-\frac{G(h)\zeta}{1+|\nabla h|^2}\nabla h\right|^2 \diff \! x. \end{multline*} So, expanding the right-hand side and simplyfying, we get \begin{align*}
&\int_{\mathbf{T}^d} \frac{1-|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x\\ &\qquad= \int_{\mathbf{T}^d}
\frac{|((1+|\nabla h|^2){\rm Id} -\nabla h \otimes \nabla h) \nabla \zeta|^2}{1+|\nabla h|^2} \diff \! x\\ &\qquad\quad
-2\int_{\mathbf{T}^d}\nabla h \cdot \frac{\bigl(((1+|\nabla h|^2){\rm Id}-\nabla h \otimes \nabla h) \nabla \zeta\bigr)}{1+|\nabla h|^2} G(h)\zeta \diff \! x. \end{align*} Hence, by using the Young inequality, \begin{align*}
\int_{\mathbf{T}^d} \frac{1-|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x &\leq \int_{\mathbf{T}^d}
\frac{\bigl|(1+|\nabla h|^2){\rm Id} -\nabla h\otimes \nabla h\bigr|^2}{1+\nabla h|^2}
|\nabla \zeta|^2\diff \! x\\
&\quad+\int_{\mathbf{T}^d} \frac{|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x\\
&\quad+\int_{\mathbf{T}^d} \frac{|(1+|\nabla h|^2){\rm Id} -\nabla h\otimes\nabla h|^2}
{1+|\nabla h|^2} |\nabla \zeta|^2\diff \! x. \end{align*} Now we write $$
\frac{|((1+|\nabla h|^2){\rm Id} -\nabla h \otimes \nabla h) |^2}{1+|\nabla h|^2}\leq
\frac{(1+2|\nabla h|^2)^2}{1+|\nabla h|^2}, $$ to obtain $$
\int_\mathbf{T} \frac{1-2|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x\leq 2\int_\mathbf{T}
\frac{(1+2|\nabla h|^2)^2}{1+|\nabla h|^2}|\nabla \zeta|^2\diff \! x. $$
Now, recalling that $|\nabla h|^2 \leq c_d < 1/2$, we get $$ \int_\mathbf{T} (G(h)\zeta)^2\diff \! x\leq 2 \frac{(1+c_d) (1+2c_d)^2 }{1-2 c_d}\int_\mathbf{T}
|\nabla \zeta|^2\diff \! x\leq \frac{12}{1-2 c_d}\int_\mathbf{T}
|\nabla \zeta|^2\diff \! x. $$
In view of~\eqref{d12}, to prove the wanted inequality~\eqref{n145}, we are reduced to establishing $$
\int_\mathbf{T} |\nabla \zeta|^2\diff \! x\leq 4 c_d
\int_\mathbf{T}\frac{|\nabla h_t|^2
+ |\nabla\nabla h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x. $$ Since $$
\nabla \zeta = 2\frac{h_t}{(1+|\nabla h|^2)^{1/4} }
\frac{\nabla h_t}{(1+|\nabla h|^2)^{3/4}}
+ 2\frac{(1-(h_t)^2)\nabla h}{(1+|\nabla h|^2)^{5/4}}
\cdot \frac{\nabla\nabla h}{(1+|\nabla h|^2)^{3/4}}, $$ the latter inequality will be satisfied provided that $$
\frac{\left((1-(h_t)^2)|\nabla h|\right)^2}{(1+|\nabla h|^2)^{5/2}}\leq c_d, \quad
\frac{\left( h_t\right)^2}{(1+|\nabla h|^2)^{1/2}}\leq c_d $$ The latter couple of conditions are obviously satisfied when \begin{equation}\label{n150}
|\nabla h|^2\leq c_d,\quad |h_t|^2\leq c_d\quad\text{with}\quad c_d< \frac{1}{2}. \end{equation} This completes the proof of Lemma~\ref{Lemma:L938}. \end{proof}
We are now in position to complete the proof. Recall that Proposition~\ref{LJ(h)I1} implies that $$
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\left\vert\nabla\partial_t h\right\vert^2+\left\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x \leq \int_\mathbf{T}\kappa \theta\diff \! x. $$ On the other hand, Lemma~\ref{Lemma:L938} implies that $$ \int_\mathbf{T}\kappa \theta\diff \! x\leq \gamma_d
\int_{\mathbf{T}^d}\frac{\left\vert\nabla\partial_t h\right\vert^2+\left\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x, $$ with $$ \gamma_d = 2 c_d \left(d+\left(d+\sqrt{d}\right) c_d\right) + 4 \left(c_d\left(d+ (d+1) c_d\right)\left(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}, $$ provided \begin{equation}\label{esti:final4} \sup_{t,x}\left\vert \nabla h(t,x)\right\vert^2 \leq c_d,\quad \sup_{t,x} ( h_t(t,x))^2\leq c_d. \end{equation} We now fix $c_d\in [0,1/4]$ by solving the equation $\gamma_d=1/2$ (the latter equation has a unique solution since $c_d\mapsto \gamma_d$ is strictly increasing). It follows that, \begin{equation}\label{n152}
\frac{\diff}{\dt} J(h)+\frac{1}{2} \int_{\mathbf{T}^d}\frac{\left\vert\nabla\partial_t h\right\vert^2+\left\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq 0. \end{equation} The expected decay of $J(h)$ is thus seen to hold as long as the solution $h=h(t,x)$ satisfies the assumption~\eqref{esti:final4}. Consequently, to conclude the proof of Theorem~\ref{Theorem:J(h)decays}, it remains only to show that the assumption~\eqref{esti:final4} on the solution will hold provided that it holds initially. To see this, we use the fact that there is a maximum principle for the Hele-Shaw equation, for space {\em and} time derivatives (the maximum principle for spatial derivatives is well-known (see~\cite{Kim-ARMA2003,ChangLaraGuillenSchwab,AMS}), the one for time derivative is given by Theorem~$2.11$ in~\cite{AMS}). This means that the assumption \eqref{n150} holds for all time $t\ge 0$ provided that it holds at time $0$. This concludes the proof of Theorem~\ref{Theorem:J(h)decays}.
\section{On the thin-film equation}
The aim of this section is two-fold. Firstly, for the reader's convenience, we collect various known results for the equation $$ \partial_th-\cnx\big(h\nabla(gh-\mu\Delta h)\big)=0. $$ Secondly, we study the decay of certain Lebesgue norms for the thin-film equation \begin{equation}\label{thinfilm-d} \partial_t h +\cnx (h\nabla \Delta h) = 0. \end{equation} Recall that we consider only smooth positive solutions. Then, since $$ \partial_t \int_{\mathbf{T}^{d}} h\diff \! x=0, $$
and since $h=|h|$, the $L^1$-norm is preserved and, obviously, it is a Lyapunov functional. We study more generally the decay of Lebesgue norms $\int h^p\diff \! x$ with $p>0$. The study of this question goes back to the work of Beretta, Bertsch and Dal Passo~\cite{BerettaBDP-ARMA-1995} and was continued by Dal Passo, Garcke and Gr\"{u}n~\cite{DPGG-Siam-1998}, Bertsch, Dal Passo, Garcke and Gr\"{u}n~\cite{BDPGG-ADE-1998}. In these papers, it is proved that $$ \int_{\mathbf{T}^d} h^{p}\diff \! x, $$ is a Lyapunov functional for $1/2< p< 2$ and $d=1$. More recently, J\"{u}ngel and Matthes performed in~\cite{JungelMatthes-Nonlinearity-2006} a systematic study of entropies for the thin-film equation, based on a computer assisted proof. They obtain the same result, allowing for the endpoints, that is for $1/2\leq p\leq 2$; they give a complete proof in space dimension $d=1$ and sketch the proof of the general case in Section $5$ of their paper. Here, we will not prove any new result, but we propose a new proof of the fact that $\int_{\mathbf{T}^d} h^p\diff \! x$ is a Lyapunov functional in any dimension $d\ge 1$ and for any $p$ in the closed interval $[1/2,2]$. Our proof is self-contained and elementary. This will allow us to introduce a functional inequality as well as some integration by parts arguments used lated to study the Boussinesq equation.
\subsection{Classical Lyapunov functionals}
\begin{proposition}\label{prop:lubrik1} Let $(g,\mu)\in [0,+\infty)^2$ and assume that $h$ is a smooth positive solution to the thin-film equation $$ \partial_th-\partial_x\big(h\partial_x(gh-\mu\partial_x^2 h)\big)=0. $$ Then $$ \frac{\diff}{\dt} \int_{\mathbf{T}} h^2\diff \! x \leq 0\quad\text{and}\quad \frac{\diff}{\dt}\int_{\mathbf{T}} (\partial_xh)^2\diff \! x\leq 0. $$ \end{proposition} \begin{proof} Multiply the equation by $h$ and integrate by parts. Then $$ \frac{1}{2}\frac{\diff}{\dt}\int_\mathbf{T} h^2\diff \! x+g\int_\mathbf{T} hh_x^2\diff \! x+\mu\int_\mathbf{T} hh_xh_{xxx}\diff \! x=0. $$ Now notice that $$ \int_\mathbf{T} hh_xh_{xxx}\diff \! x=-\int_\mathbf{T} h_x^2h_{xx}\diff \! x-\int_\mathbf{T} hh_{xx}^2\diff \! x=-\int_\mathbf{T} hh_{xx}^2\diff \! x. $$ Consequently, $$ \frac{1}{2}\frac{\diff}{\dt}\int_\mathbf{T} h^2\diff \! x+\int_\mathbf{T} h(gh_x^2+\mu h_{xx}^2)\diff \! x= 0. $$ Similarly, by multiplying the equation by $h_{xx}$ and integrating by parts, one obtains that $$ \frac{1}{2} \frac{\diff}{\dt} \int_\mathbf{T} h_x^2\diff \! x+\int_\mathbf{T} h\big( gh_{xx}^2+\mu h_{xxx}^2\big)\diff \! x=0. $$ Now, it follows directly from the assumption $h\ge 0$ that $$ \frac{\diff}{\dt} \int_\mathbf{T} h^2\diff \! x\leq 0 \quad\text{and}\quad \frac{\diff}{\dt} \int_\mathbf{T} (\partial_x h)^2\diff \! x\leq 0. $$ This completes the proof. \end{proof}
Half of the previous results can be generalized to the $d$-dimensional case in a straightforward way. \begin{proposition}\label{prop:lubrik1n} Let $d\ge 1$. If $h$ is a smooth positive solution to the thin-film equation \begin{equation}\label{TFwith} \partial_th+\cnx\big(h\nabla \Delta h)=0, \end{equation} then $$ \frac{\diff}{\dt}\int_{\mathbf{T}^{d}} \left\vert \nabla h\right\vert^2\diff \! x\leq 0. $$ If $h$ is a smooth positive solution to \begin{equation}\label{TFwithout} \partial_th-\cnx\big(h\nabla h)=0, \end{equation} then $$ \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x \leq 0. $$ \end{proposition} \begin{proof} For the first point, we multiply the equation by $-\Delta h$ and integrate by parts. For the second point, we multiply the equation by $h$ and integrate by parts. \end{proof}
This raises the question of proving the decay of the $L^2$-norm for \eqref{TFwith} (resp.\ the decay of the $L^2$-norm of $\nabla h$ for \eqref{TFwithout}) in arbitrary dimension. We study these questions in the rest of this section (resp.\ in Section~\ref{S:Boussinesq}).
\subsection{Decay of certain Lebesgue norms}
We begin by considering the special case of the $L^2$-norm. The interesting point is that, in this case, we are able to prove that it is a Lyapunov functional by means of a very simple argument. \begin{proposition}\label{prop:L2decaysagain} Let $d\ge 1$ and consider a smooth positive solution $h$ to~$\partial_t h +\cnx (h\nabla \Delta h) = 0$. Then \begin{equation}\label{decayL2TF}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h(t,x)^2\diff \! x+\frac{2}{3}\int_{\mathbf{T}^{d}} h |\nabla\nabla h|^2 \diff \! x
+ \frac{1}{3} \int_{\mathbf{T}^{d}} h |\Delta h|^2\diff \! x = 0. \end{equation} \end{proposition} \begin{proof} The energy identity reads $$ \frac{1}{2} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h(t,x)^2\diff \! x+I=0 $$ where $$ I= - \int_{\mathbf{T}^{d}} h \nabla h \cdot \nabla \Delta h\diff \! x. $$ Integrating by parts we get $$
I= \int_{\mathbf{T}^{d}} h|\Delta h|^2 \diff \! x
+ \int_{\mathbf{T}^{d}} |\nabla h|^2 \Delta h \diff \! x= I_1+I_2. $$ We integrate by parts again to rewrite $I_2$ under the form $$ I_2 = - 2\int_{\mathbf{T}^{d}} ((\nabla h \cdot \nabla) \nabla h) \cdot \nabla h\diff \! x
= 2 \int_{\mathbf{T}^{d}} h |\nabla \nabla h|^2\diff \! x
- 2 I.$$ It follows that $$
I = \frac{2}{3}\int_{\mathbf{T}^{d}} h |\nabla\nabla h|^2 \diff \! x+ \frac{1}{3} \int_{\mathbf{T}^{d}} h |\Delta h|^2\diff \! x, $$ which is the wanted result. \end{proof}
As explained in the paragraph at the beginning of this section, our main goal is to give a simple and self-contained proof of the fact that the quantities $\int_{\mathbf{T}^d} h^p\diff \! x$ are Lyapunov functionals for any $d\ge 1$ and any real number $p$ in $[1/2,2]$. To do so, the key ingredient will be a new functional inequality of independent interest which is given by the following \begin{proposition}\label{P:refD.1v2} For any $d\ge 1$, any real number $\mu$ and any bounded positive function $\theta$ in $H^2(\mathbf{T}^{d})$, \begin{equation}\label{youpi2}
\frac{\mu^2}{3}\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x \leq
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x+
2\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x. \end{equation} \end{proposition} \begin{remark}\label{Rema:endpoint} Dal Passo, Garcke and Gr\"un proved in \cite[Lemma~$1.3$]{DPGG-Siam-1998} the following identity: \begin{align*} &\int_{\mathbf{T}^{d}}f'(\theta)\left\vert\nabla \theta\right\vert^2\Delta \theta\diff \! x\\ &\qquad=-\frac{1}{3}\int_{\mathbf{T}^{d}}f''(\theta)\left\vert \nabla \theta\right\vert^4\diff \! x\\ &\qquad\quad+\frac{2}{3}\left(\int_{\mathbf{T}^{d}}f(\theta)\left\vert \nabla^2\theta\right\vert^2\diff \! x-\int_{\mathbf{T}^{d}}f(\theta)(\Delta \theta)^2\diff \! x\right). \end{align*} Assuming that $\mu\neq -1$, by using this equality with $f(\theta)=\theta^\mu$, we obtain that \begin{multline}\label{youpi2-vDPGG}
\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x\\ \qquad\qquad\leq C(\mu)\left(
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x+
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x\right). \end{multline} So \eqref{youpi2} is a variant of the previous inequality which holds uniformly in $\mu$ (this means that we can consider the case $\mu=-1$ already encountered in Proposition~\ref{theo:logSob}, the latter case being important for the application since it is needed to control the $L^2$-norms). \end{remark} \begin{proof} The result is obvious when $\mu=0$ so we assume $\mu\neq 0$ in the sequel. We then proceed as in the proof of Proposition~\ref{theo:logSob}. We begin by writing that \begin{align*}
\int_{\mathbf{T}^{d}}\theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x &=\int_{\mathbf{T}^{d}}\frac{1}{\mu}\nabla \theta^\mu\cdot \nabla \theta \left\vert \nabla\theta\right\vert^2\diff \! x\\ &=-\frac{1}{\mu}\int_{\mathbf{T}^{d}}\theta^\mu (\Delta \theta)\left\vert \nabla \theta\right\vert^2\diff \! x\\ &\quad-\frac{2}{\mu}\int_{\mathbf{T}^{d}}\theta^\mu \nabla \theta\cdot\big[ (\nabla \theta\cdot\nabla )\nabla \theta\big]\diff \! x. \end{align*} Then, by using Cauchy-Schwarz arguments similar to the ones used in the proof of Proposition~\ref{theo:logSob}, we infer that \begin{multline*}
\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x\\ \qquad\qquad\leq \frac{1}{\mu^2}\left(
\left(\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x\right)^{\frac{1}{2}}+
2\left(\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x\right)^{\frac{1}{2}}\right)^2. \end{multline*} To conclude the proof, it remains only to use the elementary inequality $(x+2y)^2\leq 3(x^2+2y^2)$. \end{proof}
We are now ready to give an elementary proof of the following result.
\begin{proposition}\label{positivity} Consider a real number $m$ in $[-1/2,0)\cup(0,1]$. Then, for all smooth solution to $\partial_t h +\cnx (h\nabla \Delta h) = 0$, \begin{equation}\label{wantedmn1} \frac{1}{m(m+1)}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x +C_m\int_{\mathbf{T}^{d}}h^{m-2}\left\vert\nabla h\right\vert^4\diff \! x\leq 0, \end{equation} where $$ C_m=\frac{1}{9}(-2m^2+m+1)\ge 0. $$ \end{proposition} \begin{proof} We begin by multiplying the equation by $\frac{1}{m}h^m$ and integrating by parts, to get \begin{align*} &\frac{1}{m(m+1)}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x+ P_m = 0\quad\text{where}\\ &P_m=-\frac{1}{m}\int_{\mathbf{T}^{d}}\nabla h^m \cdot \big( h\nabla \Delta h\big)\diff \! x =-\int_{\mathbf{T}^{d}}h^m\nabla h\cdot \nabla \Delta h\diff \! x. \end{align*} Now, we use the following trick: there are two possible integrations by parts to compute an integral of the form $\int f\nabla g\cdot \nabla \Delta h\diff \! x$. Indeed, \begin{align*} &\int_{\mathbf{T}^{d}} f(\partial_i g)(\partial_i \partial_j^2) h\diff \! x=-\int f (\partial_i^2 g)(\partial_j ^2h)\diff \! x -\int (\partial_if)(\partial_ig)\partial_j^2h\diff \! x\\ &\int_{\mathbf{T}^{d}} f\partial_i g \partial_i \partial_j^2 h\diff \! x=-\int f (\partial_i\partial_j g)(\partial_i\partial_jh)\diff \! x -\int (\partial_jf)(\partial_ig)\partial_j^2h\diff \! x. \end{align*} Consequently, one has two different expressions for $\Pi_m$: \begin{align} P_m&=\int_{\mathbf{T}^{d}}h^m(\Delta h)^2\diff \! x+m\int_{\mathbf{T}^{d}}h^{m-1}\left\vert\nabla h\right\vert^2 \Delta h\diff \! x,\label{Pm1}\\ P_m&=\int_{\mathbf{T}^{d}}h^m\left\vert\nabla^2 h\right\vert^2\diff \! x+m\int_{\mathbf{T}^{d}}h^{m-1}\nabla h\otimes \nabla h:\nabla^2 h\diff \! x.\label{Pm2} \end{align} To exploit the fact that there are two different identities for $P_m$, we need to figure out the most appropriate linear combination of \eqref{Pm1} and~\eqref{Pm2}. To do so, we will exploit the following cancellation \begin{equation}\label{identitysimplei}
\int \Big[f |\nabla \rho|^2\Delta \rho+2 f \nabla \nabla \rho : \nabla \rho \otimes \nabla \rho\Big] \diff \! x =-\int \left\vert \nabla \rho\right\vert^2\nabla f\cdot\nabla \rho\diff \! x, \end{equation} which is proved again by an integration by parts: $$ \int f (\partial_i\rho)^2\partial_j^2 \rho\diff \! x=-2\int f (\partial_i\rho)(\partial_i\partial_j\rho)\partial_j \rho\diff \! x -\int (\partial_j f)(\partial_i\rho)^2\partial_j \rho\diff \! x. $$ This suggests to add the right-hand side of \eqref{Pm1} with two times the right-hand side of \eqref{Pm2}. This implies that $$ 3P_m=\int_{\mathbf{T}^{d}}h^m \Big( 2\left\vert\nabla^2 h\right\vert^2+(\Delta h)^2\Big)\diff \! x-m(m-1)\int_{\mathbf{T}^{d}}h^{m-2}\left\vert\nabla h\right\vert^4\diff \! x. $$ Now, the functional inequality \eqref{youpi2} applied with $\mu=m-1$, implies that $$ \int_{\mathbf{T}^{d}}h^m \Big( 2\left\vert\nabla^2 h\right\vert^2+(\Delta h)^2\Big)\diff \! x\ge \frac{(m-1)^2}{3}\int_{\mathbf{T}^{d}}h^{m-2}\left\vert\nabla h\right\vert^4\diff \! x. $$ We thus obtain the wanted lower bound for the dissipation rate: $$ P_m\ge C_m\int_{\mathbf{T}^{d}}h^{m-2}\left\vert\nabla h\right\vert^4\diff \! x\quad\text{with}\quad C_m=\frac{1}{3}\Big( \frac{(m-1)^2}{3}-m(m-1)\Big). $$ Now, it remains only to observe that the above constant $C_m$ is non-negative when $-2m^2+m+1\ge 0$, that is for $m$ in $[-1/2,1]$. \end{proof}
\section{The Boussinesq equation}\label{S:Boussinesq} In this section, we begin by studying Lyapunov functionals for the Boussinesq equation $$ \partial_th-\cnx(h\nabla h)=0. $$ By a straightforward integration by parts argument, one has the following \begin{proposition}\label{convexporoust} Consider a smooth positive solution to \begin{equation}\label{boussinesqpasdarcy} \partial_t h -\cnx (h \nabla h) = 0. \end{equation} For any real number $m$, \begin{equation}\label{estim1}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^{m+1}\diff \! x + m(m+1)\int_{\mathbf{T}^{d}} h^m |\nabla h|^2\diff \! x = 0, \end{equation} and \begin{equation}\label{estim2}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h\log h \diff \! x+ \int_{\mathbf{T}^{d}} |\nabla h|^2\diff \! x = 0. \end{equation} \end{proposition}
We want to seek strong Lyapunov functionals. Recall from Definition~\ref{Defi:1.1} that $I$ is a strong Lyapunov functional if it decays in a convex manner, in other words: $$ \frac{\diff}{\dt} I\leq 0\quad\text{and}\quad\frac{\diff^2}{\dt^2} I\ge 0. $$ In view of~\eqref{estim1} and~\eqref{estim1}, we have to find those $m$ for which $$ \frac{\diff}{\dt}\int_{\mathbf{T}^{d}}h^m\left\vert\nabla h\right\vert^2\diff \! x\leq 0. $$ As an example we recall that this property holds for $m=2$. Indeed, as explained by V\'azquez in his monograph (see~\cite[\S$3.2.4$]{Vazquez-PME-book}), by multiplying the equation by $\partial_t (h^2)$, one obtains that $$ \frac{\diff}{\dt}\int_{\mathbf{T}^{d}}h^2\left\vert\nabla h\right\vert^2\diff \! x\leq 0. $$ By combining this with~\eqref{estim1}, we see that the square of the $L^3$-norm is a strong Lyapunov functional. We will complement this by considering the square of the $L^{m+1}$-norm for $0\leq m\leq (1+\sqrt{7})/2$. The upper bound is quite technical. However, for the applications to the classical entropies, the important cases are the lower bound $m=0$ together with $m=1$ (this is because these are the two special results which will be used to prove that the square of the $L^2$-norm and the Boltzmann's entropy are strong Lyapunov functionals).
We begin by considering the case $m=1$. In this case, an application of the functional inequality given by Proposition~\ref{theo:logSob} will allow us to give a very simple proof of the following \begin{proposition} For any smooth positive solution to \begin{equation}\label{boussinesqpasdarcy2} \partial_t h -\cnx (h \nabla h) = 0, \end{equation} there holds \begin{equation}\label{Boussinesq:L2dtn2} \frac{1}{2} \frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h\left\vert \nabla h\right\vert^2\diff \! x+ \int_{\mathbf{T}^{d}}\Big(\frac16 \left\vert\nabla h\right\vert^4 + \frac{1}{2} h^2(\Delta h)^2\Big)\diff \! x\leq 0. \end{equation} \end{proposition} \begin{remark}As already mentioned, it follows from~\eqref{estim1} and \eqref{Boussinesq:L2dtn2} that $$ \frac{\diff^2}{\dt^2} \int_{\mathbf{T}^{d}} h^2\diff \! x \ge 0. $$ This proves that the square of the $L^2$-norm is a strong Lyapunov functional for the Boussinesq equation. \end{remark} \begin{proof} The energy equation reads $$ \frac{1}{2} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x + \int_{\mathbf{T}^{d}} h \left\vert \nabla h\right\vert^2\diff \! x = 0. $$
Let us study the time derivative of the dissipation rate $\int h|\nabla h|^2\diff \! x$. Since \begin{align*} \partial_t(h\left\vert \nabla h\right\vert^2) &= (\partial_t h)\left\vert \nabla h\right\vert^2 + 2 h \nabla h\cdot \nabla \partial_t h\\ &= \cnx(h \nabla h)\left\vert \nabla h\right\vert^2 + 2 h \nabla h \cdot\nabla \cnx(h\nabla h), \end{align*} we have $$ \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h\left\vert \nabla h\right\vert^2\diff \! x = \int_{\mathbf{T}^{d}} \cnx (h \nabla h) \left\vert \nabla h\right\vert^2\diff \! x - 2 \int_{\mathbf{T}^{d}} (\cnx(h \nabla h))^2\diff \! x. $$ Remark that \begin{align*} &\cnx (h \nabla h) \left\vert \nabla h\right\vert^2
= |\nabla h|^4 + h (\Delta h)\left\vert \nabla h\right\vert^2,\\ &(\cnx(h \nabla h))^2=h^2(\Delta h)^2+\left\vert \nabla h\right\vert^4+2h(\Delta h)\left\vert \nabla h\right\vert^2. \end{align*} So we easily verify that \begin{align*} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h\left\vert \nabla h\right\vert^2\diff \! x &=
\frac{1}{2} \int_{\mathbf{T}^{d}} |\nabla h|^4 \diff \! x- \frac{3}{2} \int_{\mathbf{T}^{d}} ({\rm div}(h\nabla h))^2 \diff \! x \\ &\quad- \frac{1}{2} \int_{\mathbf{T}^{d}} h^2 (\Delta h)^2\diff \! x. \end{align*} Now, we use Proposition~\ref{theo:logSob} applied with $\theta=h^2$ to write that $$ \int (\cnx(h\nabla h))^2\diff \! x=\frac{1}{4}\int \big(\Delta h^2\big)^2\diff \! x\ge \frac{4}{9}\int \left\vert \nabla h\right\vert^4\diff \! x. $$ This completes the proof. \end{proof}
Let us give now a more general result namely \begin{proposition}\label{convexporous} Consider a smooth positive solution to \begin{equation}\label{boussinesqpasdarcy3} \partial_t h -\cnx (h \nabla h) = 0, \end{equation} and a real number $m$ in $[0,(1+\sqrt{7})/2]$. Then \begin{equation}\label{estim3} \frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^m \left\vert \nabla h\right\vert^2\diff \! x+ I_m \leq 0, \end{equation} with $$
I_m=\frac{m}{m+1}\int_{\mathbf{T}^{d}} h^{m+1} |\Delta h|^2 \diff \! x+ C_m \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x, $$ where $$ C_m =\frac{m+2}{m+1}\, \frac{(m+3)^2}{36}- \frac{m^2- m +2}{4}\ge 0. $$ \end{proposition} \begin{remark} It follows from~\eqref{estim1} that for all $m$ in $[0,(1+\sqrt{7})/2]$, $$ \frac{\diff^2}{\dt^2} \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x \ge 0. $$ Similarly, there holds $$ \frac{\diff^2}{\dt^2} \int_{\mathbf{T}^{d}}h\log h\diff \! x\ge 0. $$ \end{remark} \begin{proof} Starting from $$ \partial_t(h^m \left\vert \nabla h\right\vert^2) = (\partial_t h^m)\left\vert \nabla h\right\vert^2 + 2 h^m \nabla h\cdot \nabla \partial_t h, $$ and then using the equation, $$ \partial_t h^m - m h^{m-1} {\rm div}(h\nabla h) = 0, $$ we deduce that \begin{align*} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\left\vert \nabla h\right\vert^2\diff \! x
&= \int_{\mathbf{T}^{d}} m h^{m-1} {\rm div} (h\nabla h) |\nabla h|^2\diff \! x\\ &\quad + \int_{\mathbf{T}^{d}} 2 h^m \nabla h \cdot \nabla {\rm div}(h\nabla h)\diff \! x. \end{align*} Now we remark that \begin{align*} & h^{m-1}\cnx (h \nabla h) \left\vert \nabla h\right\vert^2
= h^{m-1} |\nabla h|^4 + h^m (\Delta h)\left\vert \nabla h\right\vert^2 \\
&\cnx(h^m\nabla h) \cnx(h \nabla h)= \Big(\cnx\big(h^{(m+1)/2}\nabla h\big)\Big)^2 - \frac{(m-1)^2}{4} h^{m-1} |\nabla h|^4. \end{align*} Consequently, \begin{align*} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\left\vert \nabla h\right\vert^2\diff \! x
& =\frac{m^2+1}{2}\int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4 \diff \! x\\
&\quad+ m\int_{\mathbf{T}^{d}} h^m \Delta h |\nabla h|^2 \diff \! x\\ & \quad- 2 \int_{\mathbf{T}^{d}} \big(\cnx\big(h^{(m+1)/2}\nabla h\big)\big)^2\diff \! x. \end{align*} By integrating by parts twice, we verify that \begin{align*} (m+1)\int_{\mathbf{T}^{d}}h^m\left\vert \nabla h\right\vert^2\Delta h\diff \! x&=-\int_{\mathbf{T}^{d}}h^{m+1}(\Delta h)^2\, dx\\ &\quad +\int_{\mathbf{T}^{d}}\cnx\big(h^{m+1}\nabla h\big)\Delta h\, dx. \end{align*} Then, it follows from the equality $$\cnx\big(h^{m+1}\nabla h\big) \Delta h
= \Big(\cnx\big(h^{(m+1)/2}\nabla h\big)\Big)^2 - \frac{(m+1)^2}{4} h^{m-1} |\nabla h|^4, $$ that \begin{align*} \int_{\mathbf{T}^{d}} h^m\left\vert \nabla h\right\vert^2\Delta h\, dx &=-\frac{1}{m+1}\int_{\mathbf{T}^{d}} h^{m+1}(\Delta h)^2\, dx\\ &\quad+\frac{1}{m+1}\int_{\mathbf{T}^{d}} \left(\cnx \big(h^{(m+1)/2}\nabla h\big)\right)^2\, dx\\ &\quad-\frac{m+1}{4}\int_{\mathbf{T}^{d}} h^{m-1}\left\vert \nabla h\right\vert^4\diff \! x. \end{align*} As a result, \begin{align*} &\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\left\vert \nabla h\right\vert^2\diff \! x \\
&\qquad\qquad =\frac{m^2-m+2}{4} \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x \\ &\qquad\qquad \quad - \frac{m}{m+1} \int_{\mathbf{T}^{d}} h^{m+1} (\Delta h)^2\diff \! x\\ &\qquad\qquad \quad - \frac{m+2}{m+1} \int_{\mathbf{T}^{d}} \Big(\cnx\big(h^{(m+1)/2} \nabla h\big)\Big)^2 \diff \! x. \end{align*} The inequality \eqref{youpi2} then implies that $$ \int_{\mathbf{T}^{d}} \Big(\cnx\big(h^{(m+1)/2} \nabla h\big)\Big)^2 \diff \! x
\ge \frac{(m+3)^2}{36}\int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x . $$ Consequently, for any $m\ge 0$, $$ \frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^m \left\vert \nabla h\right\vert^2\diff \! x+ I_m \leq 0, $$ with $$
I_m=\frac{m}{m+1}\int_{\mathbf{T}^{d}} h^{m+1} (\Delta h)^2 \diff \! x+ C_m \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x, $$ where $$ C_m = \frac{m+2}{m+1}\cdot\frac{(m+3)^2}{36}-\frac{m^2-m+2}{4}. $$ By performing elementary computations, one verifies that $C_m\ge 0$ for all $m$ in $[0,(1+\sqrt 7 )/2]$, which completes the proof. \end{proof}
\appendix
\section{An application to compressible fluid dynamics}\label{appendix:compressible}
The goal of this appendix is to show that the Sobolev inequality given by Proposition~\ref{theo:logSob} has an important application on the global existence of weak solutions on the compressible Navier-Stokes with density dependent viscosities, namely \begin{equation*} \left\{ \begin{aligned} &\partial_t \rho + {\rm div}(\rho u)= 0, \\ &\partial_t (\rho u) + {\rm div}(\rho u\otimes u)
- 2 {\rm div} (\mu(\rho) D(u)) - \nabla (\lambda(\rho){\rm div} u ) + \nabla p(\rho)= 0, \end{aligned} \right. \end{equation*} with $D(u) = (\nabla u + {}^t\nabla u)/2$, $p(s)=a s^\gamma$ with $\gamma>1$ and the initial boundary conditions $$ \rho\vert_{t=0} = \rho_0, \qquad \rho u\vert_{t=0} = m_0. $$ Recently Bresch, Vasseur and Yu~\cite{BrVAYu19} obtained the first result with a large class of given shear and bulk viscosities respectively $s\mapsto \mu(s)$ and $s\mapsto\lambda(s)$ in a periodic domain $\Omega = {\mathbb T}^3$. More precisely, if we assume the shear and bulk viscosities as \begin{equation} \mu(\rho)= \rho^\alpha, \qquad \lambda(\rho) = 2(\alpha-1) \rho^\alpha, \end{equation} then the authors obtained the existence of solutions under the assumption that $$ \frac{2}{3} < \alpha < 4. $$ The lower bound is a constraint coming naturally from a necessary coercivity property. The upper-bound is a mathematical constraint due to Lemma $2.1$ in~\cite{BrVAYu19}, which reads as follows: There exists $C>0$ independent on $\alpha$ and $\varepsilon >0$ as small as we want such that \begin{equation} \begin{aligned} \nonumber
+\infty > \frac{C}{\varepsilon}\int \rho^\alpha |\nabla\nabla \rho^{\alpha-1}|^2 & \ge \frac{4}{(3\alpha-2)^2}
\int |\nabla^2 \rho^{(3\alpha-2)/2}|^2 \\ & + \left(\frac{1}{\alpha}- \frac{1}{4} - \varepsilon\right) \frac{4^4}{(3\alpha-2)^4}
\int |\nabla \rho^{(3\alpha-2)/4}|^4. \end{aligned} \end{equation} The constraint $\alpha<4$ allows to have two positive terms in the righ-hand side and therefore some appropriate controls on $\rho$ namely $$ \nabla^2 \rho^{(3\alpha-2)/2}\in L^2((0,T)\times {\mathbb T}^3)\quad\text{and}\quad \nabla\rho^{(3\alpha-2)/4} \in L^4((0,T)\times {\mathbb T}^3). $$ Proposition \ref{theo:logSob} allows to compare the first and the second quantity and therefore to relax the constraint $\alpha <4.$ More precisely, using such estimate, it suffices to check that $$ \frac{1}{9} + \Bigl(\frac{1}{\alpha}-\frac{1}{4}\Bigr) \frac{4}{(3\alpha-2)^2} >0, $$ to get a positive quantity on the right-hand side controlling the $H^2$ derivatives. We can check that it true for all $\alpha$ such that $2/3 < \alpha <+\infty$. This implies that the result by Bresch--Vasseur--Yu still holds for any $\mu$ and $\lambda$ such that $$ \mu(\rho) = \rho^\alpha, \qquad
\lambda(\rho) = 2 (\alpha-1) \rho^\alpha $$ with $2/3 < \alpha <+\infty$.
\section{Lyapunov functionals for the mean-curvature equation}\label{Appendix:MCF} \begin{proposition}\label{Prop:C1nabla} If $h$ is a smooth solution to the mean-curvature equation $$
\partial_th+\sqrt{1+|\nabla h|^2}\kappa=0\quad\text{with}\quad
\kappa=-\cnx\left(\frac{\nabla h}{\sqrt{1+|\nabla h|^2}}\right), $$ then \begin{equation}\label{nC10} \frac{\diff}{\dt} \int_{\mathbf{T}^d}\left\vert \nabla h\right\vert^2\diff \! x\leq 0. \end{equation} \end{proposition} \begin{proof} By multiplying the equation by $-\Delta h$ and integrating by parts, we find that $$
\frac{\diff}{\dt} \int_{\mathbf{T}^d}\left\vert \nabla h\right\vert^2\diff \! x -\int_{\mathbf{T}^d}\sqrt{1+|\nabla h|^2}\kappa\Delta h\diff \! x=0. $$ Using the Leibniz rule, one has $$
-\sqrt{1+|\nabla h|^2}\kappa\Delta h
=(\Delta h)^2-\frac{\nabla h\cdot (\nabla h\cdot \nabla\nabla h)\Delta h}{1+|\nabla h|^2}. $$ It follows from the Cauchy-Schwarz inequality that $$
\left\vert \frac{\nabla h\cdot (\nabla h\cdot \nabla\nabla h)\Delta h}{1+|\nabla h|^2} \right\vert \leq \left\vert \nabla^2 h\right\vert\left\vert \Delta h\right\vert. $$ Consequently, $$
-\int_{\mathbf{T}^d}\sqrt{1+|\nabla h|^2}\kappa\Delta h\diff \! x \ge \int_{\mathbf{T}^d}\big((\Delta h)^2-\left\vert \nabla^2 h\right\vert\left\vert \Delta h\right\vert\big)\diff \! x. $$ Now we claim that the above term is non-negative, which in turn will imply the wanted result~\eqref{nC10}. To see this, we first use the Cauchy-Schwarz inequality to bound this term from below by $$ \int_{\mathbf{T}^d}(\Delta h)^2\diff \! x -\left(\int_{\mathbf{T}^d}(\Delta h)^2\diff \! x\right)^\frac{1}{2}\left( \int_{\mathbf{T}^d}\left\vert \nabla^2 h\right\vert^2\diff \! x\right)^\frac{1}{2}, $$ and then apply the classical identity (see~\eqref{Deltanablanabla}) $$ \int_{\mathbf{T}^d}(\Delta h)^2\diff \! x=\int_{\mathbf{T}^d}\left\vert \nabla h\right\vert^2\diff \! x, $$ which can be verified by integrating by parts twice. \end{proof} \begin{proposition}\label{Prop:C1} If $h$ is a smooth solution to the mean-curvature equation in space dimension $d=1$: $$ \partial_t h+\sqrt{1+(\partial_x h)^2}\kappa=0\quad\text{with}\quad \kappa=-\partial_x\left(\frac{\partial_x h}{\sqrt{1+(\partial_x h)^2}}\right), $$ then the following quantities are Lyapunov functionals: $$ \int_\mathbf{T} h^2\diff \! x,\quad \int_\mathbf{T} (\partial_t h)^2\diff \! x,\quad \int_\mathbf{T} (1+(\partial_xh)^2)\kappa^2\diff \! x. $$ In addition, $\int_\mathbf{T} h^2\diff \! x$ is a strong Lyapunov functional. \end{proposition} \begin{proof} If the space dimension $d$ is equal to~$1$, we have $$ \sqrt{1+(\partial_x h)^2}\kappa=-\frac{\partial_{xx} h}{1+(\partial_x h)^2}. $$ Consequently, the one-dimensional version of the mean-curvature equation reads $$ \partial_t h-\frac{\partial_{xx}h}{1+(\partial_xh)^2}=0. $$ We may further simplify the mean curvature equation by noticing that \begin{equation}\label{MCFarctan} \partial_t h+\sqrt{1+(\partial_xh)^2}\kappa=\partial_th-\frac{\partial_{xx}h}{1+(\partial_xh)^2}= \partial_th-\partial_x \arctan (\partial_xh). \end{equation} This immediately implies that the square of the $L^2$-norm is a Lyapunov functional: \begin{equation}\label{n71} \frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}}h^2\diff \! x=-\int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x \leq 0, \end{equation} since $u \arctan u\ge 0$ for all $u\in \mathbf{R}$.
It also follows from the previous $\arctan$-formulation that the unknown $\dot{h}=\partial_t h$ is solution to $$ \partial_t\dot{h}-\partial_x\left( \frac{\partial_x \dot{h}}{1+(\partial_xh)^2}\right)=0. $$ Multiplying the previous equation by $\dot{h}$ and integrating by parts in $x$, we infer that $$ \frac{\diff}{\dt} \int_\mathbf{T}(\partial_th)^2\diff \! x\leq 0. $$ By using the equation for $h$, this is equivalent to $$ \frac{\diff}{\dt} \int_\mathbf{T} (1+(\partial_xh)^2) \kappa^2 \diff \! x\leq 0. $$
Now observe that $$ \partial_t \big((\partial_x h) \arctan (\partial_x h)\big)
= \partial_t\partial_x h \Bigl(\arctan (\partial_x h)
+ \frac{\partial_x h}{1+(\partial_x h)^2}\Bigr). $$ On the other hand, using the equation \eqref{MCFarctan}, we have $$ \partial_t\partial_x h=\partial_x\Big(\frac{\partial_{xx}h}{1+(\partial_xh)^2}\Big). $$ Therefore, integrating by parts, we conclude that \begin{align*} &\frac{\diff}{\dt} \int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x \\ &\qquad\qquad=-\int_\mathbf{T} \frac{\partial_{xx}h}{1+(\partial_xh)^2} \partial_x \left(\arctan (\partial_x h)
+ \frac{\partial_x h}{1+(\partial_x h)^2}\right)\diff \! x \\
&\qquad\qquad=-\int_\mathbf{T} \frac{\partial_{xx}h}{1+(\partial_xh)^2} \cdot \frac{2\partial_{xx} h}{(1+(\partial_x h)^2)^2}\diff \! x. \end{align*} This proves that $$ \frac{\diff}{\dt} \int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x=-2\int_\mathbf{T}\kappa^2\diff \! x \leq 0. $$ So, in view of \eqref{n71}, we conclude that $$ \frac{\diff^2}{\dt^2} \int_{\mathbf{T}}h^2\diff \! x\ge 0. $$ We thus have proved that $$ \frac{\diff}{\dt} \int_{\mathbf{T}}h^2\diff \! x\leq 0\quad\text{and}\quad \frac{\diff^2}{\dt^2} \int_{\mathbf{T}}h^2\diff \! x\ge 0. $$ By definition, this means that $\int_{\mathbf{T}}h^2\diff \! x$ is a strong Lyapunov functional for the mean-curvature equation. \end{proof}
The next proposition gives a somewhat surprising property of the Boussinesq equation, which is directly inspired by the $\arctan$-formulation used above for the mean-curvature equation.
\begin{proposition}\label{prop:C2Boussinesq} Consider the Boussinesq equation in space dimension $1$: $$ \partial_th-\partial_x(h\partial_x h)=0. $$ Then $$ \frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x \leq 0. $$ \end{proposition} \begin{proof} As already seen in the previous proof, $$ \partial_t \big((\partial_x h) \arctan (\partial_x h)\big) =\partial_t\partial_x h \left(\arctan (\partial_x h)+\frac{\partial_x h}{1+(\partial_x h)^2}\right). $$ Using the equation $$ \partial_t\partial_x h = \partial_x^2(h\partial_x h), $$ and then integrating by parts, we get $$ \frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x
= - 2\int_{\mathbf{T}} \partial_x(h\partial_x h)
\frac{\partial_x^2 h}{(1+(\partial_x h)^2)^2}\diff \! x
= I, $$ where $I$ reads $$ I=- \int_{\mathbf{T}}\frac{2h(\partial_x^2 h)^2}{(1+(\partial_x h)^2)^2}\diff \! x
- \int_{\mathbf{T}}\frac{2(\partial_x h)^2 \partial_x^2 h}{(1+(\partial_x h)^2)^2}\diff \! x. $$ Note that the second term vanishes since this is the integral of an exact derivative. So, $$ \frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x +\int_{\mathbf{T}}\frac{2h(\partial_x^2 h)^2}{(1+(\partial_x h)^2)^2}\diff \! x =0, $$ which implies the wanted conclusion. \end{proof}
\section{A Rellich type estimate}\label{A:Rellich}
This appendix gives a proof of the inequality~\eqref{d10}. \begin{lemma} For any smooth functions $h$ and $\zeta$ in $C^\infty(\mathbf{T}^d)$, there holds \begin{equation}\label{d10-bisb} \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)|\nabla \zeta-\mathcal{B} \nabla h|^2 \diff \! x, \end{equation} where \begin{equation}\label{d11-bisb}
\mathcal{B}=\frac{G(h)\zeta+\nabla \zeta \cdot \nabla h}{1+|\nabla h|^2}. \end{equation} \end{lemma} \begin{remark} $i)$ This inequality extends to functions which are not smooth.
$ii)$ This generalizes an estimate proved in~\cite{A-stab-AnnalsPDE} when $d=1$, for the Dirichlet-to-Neumann operator associated to a domain with finite depth. When $d=1$, the main difference is that this is an identity (and not only an inequality). This comes from the fact that, in the proof below, to derive \eqref{esti:final7} we use the inequality
$(\nabla h\cdot \mathcal{V})^2\leq |\nabla h|^2 \cdot |\mathcal{V}|^2$, which is clearly an equality when $d=1$. \end{remark} \begin{proof} We follow the analysis in~\cite{A-stab-AnnalsPDE}. Set $$ \Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;y<h(x)\}, $$ and denote by $\phi$ the harmonic function defined by \begin{equation}\label{m1} \left\{ \begin{aligned} &\Delta_{x,y}\phi=0\quad\text{in }\Omega=\{(x,y)\in \mathbf{T}\times \mathbf{R} \,;\, y<h(x)\},\\ &\phi(x,h(x)) = \zeta(x). \end{aligned} \right. \end{equation} As recalled in Lemma~\ref{Lemma:decayinfty}, this is a classical elliptic boundary problem, which admits a unique smooth solution. Moreover, it satisfies \begin{equation}\label{decaytozero-appendix} \lim_{y\to-\infty}\sup_{x\in\mathbf{T}^{d}}\left\vert \nabla_{x,y}\phi(x,y)\right\vert=0. \end{equation} Introduce the notations $$ \mathcal{V}=(\nabla\phi)_{\arrowvert y=h}, \qquad \mathcal{B}=(\partial_y\phi)_{\arrowvert y=h}. $$ (We parenthetically recall that $\nabla$ denotes the gradient with respect to the horizontal variables $x=(x_1,\ldots,x_d)$ only.) It follows from the chain rule that $$ \mathcal{V}=\nabla \zeta-\mathcal{B}\nabla h, $$ while $\mathcal{B}$ is given by \eqref{d11-bisb}. On the other hand, by definition of the Dirichlet-to-Neumann operator, one has the identity $$ G(h)\zeta=\big(\partial_y \phi-\nabla h\cdot \nabla \phi\big)_{\arrowvert y=h}, $$ so $$ G(h)\zeta=\mathcal{B}-\nabla h\cdot \mathcal{V}. $$ Squaring this identity yields $$ (G(h)\zeta)^2 =\mathcal{B}^2-2 \mathcal{B}\nabla h \cdot \mathcal{V} +(\nabla h\cdot \mathcal{V})^2. $$
Since $(\nabla h\cdot \mathcal{V})^2\leq |\nabla h|^2 \cdot |\mathcal{V}|^2$, this implies the inequality: \begin{equation}\label{esti:final7}
(G(h)\zeta)^2\leq \mathcal{B}^2-\left\vert\mathcal{V}\right\vert^2-2\mathcal{B}\nabla h\cdot \mathcal{V} +(1+|\nabla h|^2)\mathcal{V}^2. \end{equation} Integrating this gives $$ \int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)\left\vert\mathcal{V}\right\vert^2 \diff \! x+R, $$ where $$ R=\int_{\mathbf{T}^d}\Big( \mathcal{B}^2-\left\vert\mathcal{V}\right\vert^2-2\mathcal{B}\nabla h\cdot \mathcal{V}\Big)\diff \! x. $$
Since $\left\vert\mathcal{V}\right\vert=|\nabla \zeta-\mathcal{B} \nabla h|$, we immediately see that, to obtain the wanted estimate~\eqref{d10-bisb}, it is sufficient to prove that $R=0$. To do so, we begin by noticing that $R$ is the flux associated to a vector field. Indeed, $$ R=\int_{\partial\Omega} X\cdot n\diff\mathcal{H}^{d} $$ where $X\colon \Omega\rightarrow \mathbf{R}^{d+1}$ is given by $$
X=(-(\partial_y\phi)\nabla \phi;|\nabla \phi|^2-(\partial_y\phi)^2). $$ Then the key observation is that this vector field satisfies $\cn_{x,y} X=0$ since $$
\partial_y \big( (\partial_y\phi)^2-|\nabla\phi|^2\big) +2\cnx \big((\partial_y\phi)\nabla\phi\big)= 2(\partial_y\phi) \Delta_{x,y}\phi=0, $$ as can be verified by an elementary computation. Now, we see that the cancellation $R=0$ comes from the Stokes' theorem. To rigorously justify this point, we truncate $\Omega$ in order to work in a smooth bounded domain. Given a parameter $\beta>0$, set $$ \Omega_\beta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;-\beta<y<h(x)\}. $$ An application of the divergence theorem in $\Omega_\beta$ gives that $$ 0=\iint_{\Omega_\beta} \cn_{x,y}X\diff \! y \diff \! x=R+\int_{\{y=-\beta\}}X\cdot (-e_y)\diff \! x, $$ where $e_y$ is the vector $(0,\ldots,0,1)$ in $\mathbf{R}^{d+1}$. Sending $\beta$ to $+\infty$ and remembering that $X$ converges to $0$ uniformly when $y$ goes to $-\infty$ (see~\eqref{decaytozero-appendix}), we obtain the expected result $R=0$. This completes the proof. \end{proof}
\section{Darcy's law}\label{appendix:HS} In this appendix, we recall the derivation of the Hele-Shaw and Mullins-Sekerka equations. These equations dictate the dynamics of the free surface of an incompressible fluid evolving according to Darcy's law. Consider a time-dependent fluid domain $\Omega$ of the form: $$ \Omega(t)=\{ (x,y) \in \mathbf{T}^{d}\times \mathbf{R}\,;\, y < h(t,x)\}. $$ The Darcy's law stipulates that the velocity $v\colon \Omega\rightarrow \mathbf{R}^{d+1}$ and the pressure $P\colon\Omega\rightarrow \mathbf{R}$ satisfy the following equations: $$ \cn_{x,y} v=0\quad\text{ and }\quad v=-\nabla_{x,y} (P+gy) \quad \text{in }\Omega, $$ where $g>0$ is the acceleration of gravity. In addition, one assumes that $$ \lim_{y\to-\infty}v=0 $$ and that, on the free surface $\partial\Omega$, the normal component of $v$ coincides with the normal component of the velocity of free surface, which implies that $$
\partial_t h=\sqrt{1+|\nabla h|^2} \, v\cdot n\quad \text{on}\quad y=h, $$ where $\nabla=\nabla_x$ and $n$ is the outward unit normal to $\partial\Omega$, given by $$
n=\frac{1}{\sqrt{1+|\nabla h|^2}} \begin{pmatrix} -\nabla h \\ 1 \end{pmatrix}. $$ The final equation states that the restriction of the pressure to the free surface is proportional to the mean curvature: $$ P=\mu \kappa \quad \text{on}\quad\partial\Omega, $$ where the parameter $\mu$ belongs to $[0,1]$ and $\kappa$ is given by~\eqref{defi:kappa}.
Now we notice that $\Delta_{x,y}(P+gy)=\cn_{x,y} v=0$ so $P+gy$ is the harmonic extension of $gh+\mu\kappa$. It follows that the Hele-Shaw problem is equivalent to \begin{equation*} \partial_{t}h+G(h)(gh+\mu \kappa)=0. \end{equation*}
\begin{flushleft} \textbf{Thomas Alazard}\\ UniversitŽ Paris-Saclay, ENS Paris-Saclay, CNRS,\\ Centre Borelli UMR9010, avenue des Sciences, \\ F-91190 Gif-sur-Yvette\\
\textbf{Didier Bresch}\\ LAMA CNRS UMR5127, Univ. Savoie Mont-Blanc, \\ Batiment le Chablais, \\ F-73376 Le Bourget du Lac, France.
\end{flushleft}
\end{document}
|
arXiv
|
{
"id": "2004.03440.tex",
"language_detection_score": 0.6257650852203369,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Quantum steerability: Characterization, quantification, superactivation, and unbounded amplification}
\author{Chung-Yun Hsieh} \email{[email protected]} \affiliation{Department of Physics, National Tsing Hua University, Hsinchu 300, Taiwan} \author{Yeong-Cherng Liang} \email{[email protected]} \affiliation{Department of Physics, National Cheng Kung University, Tainan 701, Taiwan} \author{Ray-Kuang Lee} \affiliation{Department of Physics, National Tsing Hua University, Hsinchu 300, Taiwan} \affiliation{Physics Division, National Center for Theoretical Science, Hsinchu 300, Taiwan}
\date{\today}
\begin{abstract}
Quantum steering, also called Einstein-Podolsky-Rosen steering, is the intriguing phenomenon associated with the ability of spatially separated observers to {\em steer}---by means of local measurements---the set of conditional quantum states accessible by a distant party. In the light of quantum information, {\em all} steerable quantum states are known to be resources for quantum information processing tasks. Here, via a quantity dubbed {\em steering fraction}, we derive a simple, but general criterion that allows one to identify quantum states that can exhibit quantum steering (without having to optimize over the measurements performed by each party), thus making an important step towards the characterization of steerable quantum states. The criterion, in turn, also provides upper bounds on the largest steering-inequality violation achievable by arbitrary finite-dimensional maximally entangled states. For the quantification of steerability, we prove that a strengthened version of the steering fraction is a {\em convex steering monotone} and demonstrate how it is related to two other steering monotones, namely, steerable weight and steering robustness. Using these tools, we further demonstrate the {\em superactivation} of steerability for a well-known family of entangled quantum states, i.e., we show how the steerability of certain entangled, but unsteerable quantum states can be recovered by allowing joint measurements on multiple copies of the same state. In particular, our approach allows one to explicitly construct a steering inequality to manifest this phenomenon. Finally, we prove that there exist examples of quantum states (including some which are unsteerable under projective measurements) whose steering-inequality violation can be arbitrarily amplified by allowing joint measurements on as little as three copies of the same state. For completeness, we also demonstrate how the largest steering-inequality violation can be used to bound the largest Bell-inequality violation and derive, analogously, a simple sufficient condition for Bell-nonlocality from the latter. \end{abstract}
\maketitle
\section{Introduction} \label{Sec:Intro} From the famous Einstein-Podolsky-Rosen (EPR) paradox~\cite{EPR} to Bell's seminal discovery~\cite{Bell}, quantum theory has never failed to surprise us with its plethora of intriguing phenomena and mind-boggling applications~\cite{book,RMP-Bell}. Among those who made the bizarre nature of quantum theory evident was Schr\"odinger, who not only coined the term ``entanglement," but also pointed out that quantum theory allows for {\em steering}~\cite{Schrodinger}: through the act of local measurements on one-half of an entangled state, a party can {\em remotely} steer the set of (conditional) quantum states accessible by the other party.
Taking a quantum information perspective, the demonstration of steering can be viewed as the verification of entanglement involving an untrusted party~\cite{Wiseman}. Imagine that two parties Alice and Bob share some quantum state and Alice wants to convince Bob that the shared state is entangled, but Bob does not trust her. If Alice can convince Bob that the shared state indeed exhibits EPR steering, then Bob would be convinced that they share entanglement, as the latter is a prerequisite for steering. Note, however, that shared entanglement is generally insufficient to guarantee steerability. Interestingly, steerability is actually a necessary but generally insufficient condition for the demonstration of Bell nonlocality~\cite{Wiseman,Quintino}. Hence, steering represents a form of quantum inseparability in between entanglement and Bell nonlocality.
Apart from entanglement verification in a partially-trusted scenario, steering has also found applications in the distribution of secret keys in a partially trusted scenario~\cite{steering-QKD}. From a resource perspective, the steerability of a quantum state $\rho$, i.e., whether $\rho$ is steerable and the extent to which it can exhibit steering turns out to provide also an indication for the usefulness of $\rho$ in other quantum information processing tasks. For instance, steerability as quantified by steering robustness~\cite{Piani2015} is monotonically related to the probability of success in the problem of subchannel discrimination when one is restricted to local measurements aided by one-way communications.
The characterization of quantum states that are capable of exhibiting steering and the quantification of steerability are thus of relevance not just from a fundamental viewpoint, but also in quantum information. Surprisingly, very little is known in terms of which quantum state is (un)steerable (see, however,~\cite{Wiseman,steering-ineq,Girdhar,Taddei,Bowles:PRA:2016,Werner,Barrett,Almeida}). Here, we derive some generic sufficient conditions for steerability that can be applied to quantum state of arbitrary Hilbert space dimensions. Importantly, in contrast to the conventional approach of steering inequalities~\cite{steering-ineq} where an optimization over the many measurements that can be performed by each party is needed, our criteria only require the relatively straightforward computation of the fully entangled fraction~\cite{Horodecki-1}.
Given that some entangled quantum state $\rho$ cannot exhibit steering~\cite{Werner,Wiseman,Barrett,Almeida,Quintino,Bowles:PRA:2016}, a natural question that arises is whether the steerability of such a state can be {\em superactivated} by allowing joint measurements on multiple copies of $\rho$. In other words, is it possible that some $\rho$ that is not steerable becomes steerable if local measurements are performed instead on $\rho^{\otimes k}$ for some large enough $k$? Building on some recent results established for Bell nonlocality~\cite{Palazuelos,Cavalcanti-PRA}, we provide here an affirmative answer to the above question.
Note that even for a quantum state $\rho$ that is steerable, it is interesting to investigate how its steerability scales with the number of copies. For instance, is it possible to amplify the amount of steering-inequality violation by an {\em arbitrarily large} amount if only a small number of copies are available (see~\cite{Palazuelos} for analogous works in the context of Bell nonlocality)? Again, we provide a positive answer to this question, showing that an unbounded amount of amplification can be obtained by allowing joint measurements on as few as three copies of a quantum state that is barely steerable, or even unsteerable under projective measurements.
The rest of this paper is structured as follows. In Sec.~\ref{Sec:Prelim}, we give a brief overview of some of the basic notions in Bell nonlocality and EPR steering that we will need in subsequent discussions. There, inspired by the work of Cavalcanti {\it et al.}~\cite{Cavalcanti-PRA}, we also introduce the notion of {\em steering fraction} and {\em largest (steering-inequality) violation}, which are crucial quantities that lead to many of the findings mentioned above. For instance, in Sec.~\ref{Sec:Characterization}, we use these quantities to derive (1) a general sufficient condition for an arbitrary quantum state $\rho$ to be steerable and (2) upper bounds on the largest steering-inequality violation of an arbitrary finite-dimensional maximally entangled state as a function of its Hilbert space dimension $d$. Quantification of steerability using a strengthened version of steering fraction is discussed in Sec.~\ref{Sec:QuantifySteering} --- there, we also demonstrate how this steering monotone~\cite{Gallego} is related to the others, such as the steerable weight~\cite{SteerableWeight} and steering robustness~\cite{Piani2015}. In Sec.~\ref{Sec:SuperAmpli}, we show the superactivation of steerablity, provide a procedure to construct a steering inequality for this purpose, and demonstrate unbounded amplification of steerability. We conclude in Sec.~\ref{Sec:Conclude} with a discussion and some open problems for future research.
\section{Preliminary notions} \label{Sec:Prelim}
Consider a Bell-type experiment between two parties Alice and Bob. The correlation between measurement outcomes can be succinctly summarized by a vector of joint conditional distributions ${\bf P} \coloneqq \{P(a,b|x,y)\}$, where $x$ and $a$ ($y$ and $b$) are, respectively, the labels of Alice's (Bob's) measurement settings and outcomes. The correlation admits a {\it local hidden-variable} (LHV) model if ${\bf P}$ is Bell-local~\cite{RMP-Bell}, i.e., can be decomposed for all $a,b,x,y$ as \begin{equation}\label{Eq:LHV}
P(a,b|x,y) = \int P_{\lambda} P(a|x,\lambda)P(b|y,\lambda)d\lambda, \end{equation}
for some {\em fixed} choice of $P_\lambda\ge0$ satisfying $\int P_\lambda\,d\lambda=1$ and single-partite distributions $\{P(a|x,\lambda)\}_{a,x,\lambda}$, and $\{P(b|y,\lambda)\}_{b,y,\lambda}$.
Any correlation that is not Bell-local (henceforth {\em nonlocal}) can be witnessed by the violation of some (linear) Bell inequality,\footnote{Bell inequalities that are not linear in ${\bf P}$, or which involve {\em complex} combinations of $P(a,b|x,y)$, have also been considered in the literature, but we will not consider them in this paper.} \begin{subequations}\label{Eq:BI} \begin{equation}\label{Eq:BellFunctional}
\sum_{a,b,x,y} B_{ab|xy}\, P(a,b|x,y)\stackrel{\mbox{\tiny LHV}}{\le} \omega({\bf B}), \end{equation}
specified by a vector of real numbers ${\bf B}\coloneqq\{ B_{ab|xy}\}_{a,b,x,y}$ (known as the Bell coefficients) and the {\em local bound} \begin{align}
\omega({\bf B}) &\coloneqq \sup_{{{\bf P}} \in \text{LHV}} \sum_{a,b,x,y} B_{ab|xy}\, P(a,b|x,y). \end{align} \end{subequations} In the literature, the left-hand side of Eq.~\eqref{Eq:BellFunctional} is also known as a {\em Bell polynomial}~\cite{Werner:PRA:2001} or a {\em Bell functional}~\cite{Buhrman}, as it maps any given correlation ${\bf P}$ into a real number.
To determine if a quantum state (and more generally if a given correlation ${\bf P}$) is nonlocal, one can, without loss of generality consider Bell coefficients that are non-negative, i.e., $B_{ab|xy}\ge 0$ for all $a,b,x,y$. To see this, it suffices to note that any Bell inequality, Eq.~\eqref{Eq:BellFunctional}, can be cast in a form that involves only non-negative Bell coefficients, e.g., by using the identity $\sum_{a,b} P(a,b|x,y)=1$, which holds for all $x,y$.
Specifically, in terms of the {\em nonlocality fraction} $\Gamma$~\cite{Cavalcanti-PRA}, \begin{align}\label{Eq:NonlocalityFraction}
\Gamma ({\bf P},{\bf B}) &\coloneqq \frac{1}{\omega({\bf B})} \sum_{a,b,x,y} B_{ab|xy}\, P(a,b|x,y), \end{align} ${\bf P}$ violates the Bell inequality corresponding to ${\bf B}$ (and hence being nonlocal) if and only if $\Gamma ({\bf P}, {\bf B})>1$.
Importantly, nonlocal quantum correlation \begin{equation}\label{Eq:QCor}
P(a,b|x,y)=\text{tr}\left[\rho\,\left(E_{a|x} \otimes E_{b|y}\right)\,\right] \end{equation}
can be obtained~\cite{Bell} by performing appropriate local measurements on a certain entangled quantum state $\rho$, where $\mathbb{E}_{\rm A}\coloneqq\{E_{a|x}\}_{a,x}$ ($\mathbb{E}_{\rm B}\coloneqq\{E_{b|y}\}_{b,y}$) are the sets of positive-operator-valued measures (POVMs)~\cite{book} acting on Alice's (Bob's) Hilbert space. From now onward, we will use $\mathbb{E}_{\rm A}$ ($\mathbb{E}_{\rm B}$) to denote a set of POVMs on Alice's (Bob's) Hilbert space, and $\mathbb{E}$ to denote their union, i.e., $\mathbb{E}\coloneqq\mathbb{E}_{\rm A}\cup\mathbb{E}_{\rm B}$.
Whenever the measurement outcome corresponding to $E_{a|x}$ is observed on Alice's side, quantum theory dictates that the (unnormalized) quantum state \begin{equation}\label{Eq:Assemblage}
\sigma_{a|x}=\text{tr}_\text{A}[\rho\, (E_{a|x} \otimes \mathbb{I}_\text{B})] \end{equation}
is prepared on Bob's side, where $\text{tr}_\text{A}$ denotes the partial trace over Alice's subsystem and $\mathbb{I}_\text{B}$ is the identity operator acting on Bob's Hilbert space. An {\em assemblage}~\cite{Pusey:PRA:2013} of conditional quantum states ${\bm \sigma}:=\{\sigma_{a|x}\}_{a,x}$ is said to admit a {\em local-hidden-state} (LHS) model~\cite{Wiseman} if it is {\em unsteerable}, i.e., if it can be decomposed for all $a, x$ as \begin{eqnarray}
\sigma_{a|x} = \int P_{\lambda} P(a|x,\lambda)\sigma_{\lambda}\,d\lambda \end{eqnarray}
for some {\em fixed} choice of $P_\lambda\ge0$ satisfying $\int P_\lambda\,d\lambda=1$, single-partite density matrices $\{\sigma_\lambda\}_\lambda$, and single-partite distribution $\{P(a|x,\lambda)\}_{a,x,\lambda}$. Equivalently, a correlation ${\bf P}$ admits a LHS model if it can be decomposed as \begin{equation}\label{Eq:LHS}
P(a,b|x,y) = \int P_{\lambda} P(a|x,\lambda)\text{tr}\left(E_{b|y}\,\sigma_\lambda \right)d\lambda. \end{equation}
Conversely, an assemblage ${\bm \sigma}$ that is steerable can be witnessed by the violation of a steering inequality~\cite{steering-ineq}, \begin{subequations}\label{Eq:SteeringIneq} \begin{equation}\label{Eq:SteeringFunctional}
\sum_{a,x} \text{tr}(F_{a|x} \sigma_{a|x})\stackrel{\text{\tiny LHS}}{\le} \omega_s ({\bf F}), \end{equation}
specified by a set of Hermitian matrices ${\bf F} \coloneqq \{ F_{a|x}\}_{a,x}$ and the {\em steering bound} \begin{align}\label{Eq:SteeringBound}
\omega_s (F) &\coloneqq \sup_{\sigma \in \text{LHS}} \sum_{a,x} \text{tr}(F_{a|x} \sigma_{a|x}). \end{align} \end{subequations} In the literature, the left-hand side of Eq.~\eqref{Eq:SteeringFunctional} is also known as a {\em steering functional}~\cite{JPA15}, as it maps any given assemblage ${\bm \sigma}=\{\sigma_{a|x}\}_{a,x}$ to a real number.
As with Bell nonlocality, in order to determine if a given assemblage is steerable, one can consider, without loss of generality, steering functionals defined only by non-negative, or equivalently positive semidefinite $F_{a|x}$, i.e., $F_{a|x}\succeq 0$ for all $a$, $x$.\footnote{Here and after, the symbol $A\succeq0$ means that the matrix $A$ is positive semidefinite, i.e., having only non-negative eigenvalues.} To see this, it is sufficient to note that any steering inequality, Eq.~\eqref{Eq:SteeringFunctional}, can be rewritten in a form that involves only non-negative $F_{a|x}$, e.g., by using the identity $\sum_a\text{tr}\left(\sigma_{a|x}\right)=1$, which holds for all $x$.
Hereafter, we thus restrict our attention to ${\bf F}$ (${\bf B}$) having only non-negative $F_{a|x}$ ($B_{ab|xy}$).
In analogy with the nonlocality fraction, we now introduce the {\em steering fraction} \begin{align}\label{Eq:SteeringFraction}
\Gamma_s ({\bm \sigma}, {\bf F})&:= \frac{1}{ \omega_s ({\bf F})}\sum_{a,x} \text{tr}(F_{a|x} \sigma_{a|x} ), \end{align} to capture the steerability of an assemblage; an assemblage ${\bm \sigma}$ violates the steering inequality corresponding to ${\bf F}$ if and only if $\Gamma_s ({\bm \sigma}, {\bf F})>1$.
Whenever we want to emphasize the steerability of the underlying state $\rho$ giving rise to the assemblage $\rho$, we will write $\Gamma_s(\{\rho,\mathbb{E}_{\rm A}\},{\bf F})$ instead of $\Gamma_s({\bm \sigma},{\bf F})$ where $\mathbb{E}_{\rm A}=\{E_{a|x}\}_{a,x}$, ${\bm \sigma}:=\{\sigma_{a|x}\}_{a,x}$, and $\rho$ are understood to satisfy Eq.~\eqref{Eq:Assemblage}. In particular, $\rho$ is steerable with ${\bf F}$ if and only if the largest violation of the steering inequality corresponding to ${\bf F}$~\cite{JPA15}, \begin{eqnarray}\label{Eq:LV}
LV_s(\rho,{\bf F})\coloneqq\sup_{\mathbb{E}_{\rm A}}\Gamma_s(\{\rho,\mathbb{E}_{\rm A}\},{\bf F}), \end{eqnarray} is greater than 1.
As mentioned in Sec.~\ref{Sec:Intro}, Bell nonlocality is a stronger form of quantum nonlocality than quantum steering. Let us now illustrate this fact by using the quantities that we have introduced in this section.
For any given ${\bf B}=\{B_{ab|xy}\}_{a,b,x,y}$ and Bob's measurements specified by the POVMs $\mathbb{E}_{\rm B}=\{E_{b|y}\}_{b,y}$, one obtains an {\em induced} steering inequality specified by \begin{equation}\label{Eq:InducedF}
{\bf F}_{({\bf B}; \mathbb{E}_{\rm B})}\coloneqq\left\{\sum_{b,y} B_{ab|xy} E_{b|y} \right\}_{a,x}. \end{equation}
Using this equation, the definition of the steering bound $ \omega_s \sqp{{\bf F}_{({\bf B}; \mathbb{E}_{\rm B})}}$, the local bound $\omega({\bf B})$, and the fact that $\text{tr}(\sigma_{\lambda} E_{b|y})$ is only a {\em particular} kind of response function of the form $P(b|y,\lambda)$, one sees that \begin{eqnarray}\label{Eq:Bound:LvsS}
\omega_s \sqp{{\bf F}_{({\bf B}; \mathbb{E}_{\rm B})}}\le \omega ({\bf B}). \end{eqnarray} A geometrical representation of this fact can be found in Fig.~\ref{Fig1}.
\begin{figure}\label{Fig1}
\end{figure} Hence, for any correlation ${\bf P}$ derived by performing local measurements $\mathbb{E}_{\rm B}$ on Bob's side, and the local measurements $\mathbb{E}_{\rm A}$ on Alice's side when they share a bipartite state $\rho$, it follows from Eqs.~\eqref{Eq:NonlocalityFraction},~\eqref{Eq:QCor},~\eqref{Eq:SteeringFraction}, and~\eqref{Eq:Bound:LvsS} that \begin{equation}\label{Eq:BellvsSteering}
\Gamma({\bf P},{\bf B})\le \Gamma_s \sqp{\left\{\rho,\mathbb{E}_{\rm A}\right\},{\bf F}_{({\bf B}; \mathbb{E}_{\rm B})}}. \end{equation} From here, it is clear that whenever $\rho$ violates the Bell inequality specified by ${\bf B}$, i.e., $\Gamma({\bf P},{\bf B})>1$, it must also be the case that $\rho$ violates the steering inequality induced by ${\bf B}$ and $\mathbb{E}_{\rm B}$, cf. Eq.~\eqref{Eq:InducedF}.
\section{Sufficient condition for steerability and the largest steering-inequality violation} \label{Sec:Characterization}
Equipped with the tools presented above, our immediate goal now is to derive a sufficient condition for any quantum state $\rho$ acting on $\mathbb{C}^d\otimes\mathbb{C}^d$ to be steerable in terms of its {\em fully entangled fraction} (FEF)~\cite{Horodecki-1,Albererio} \begin{equation}\label{Eq:FEF} \begin{split}
\mathcal{F}(\rho)\coloneqq &\max_{\Psi}\langle\Psi|\rho|\Psi\rangle\\
=&\max_{U}\bra{\Psi^+_d}(U\otimes \mathbb{I}_B)\,\rho\, (U\otimes \mathbb{I}_B)^\dag\ket{\Psi^+_d}, \end{split} \end{equation} which is a quantity closely related to the teleportation~\cite{Teleportation} power of a quantum state. In the above definition, $\ket{\Psi^+_d}\coloneqq\frac{1}{\sqrt{d}}\sum_{i=0}^{d-1}\ket{i}\ket{i}$ is the generalized singlet, and the maximization is taken over all maximally entangled states $\ket{\Psi}$ in $\mathbb{C}^d\otimes\mathbb{C}^d$, or equivalently over all $d\times d$ unitary operators $U$. Note that in arriving at the second line of Eq.~\eqref{Eq:FEF}, we make use of the fact that $\ket{\Psi^+_d}$ is invariant under local unitary transformation of the form $U\otimes U^*$, where $^*$ denotes complex conjugation. Thus, any maximally entangled state in $\mathbb{C}^d\otimes\mathbb{C}^d$ can be obtained from $\ket{\Psi^+_d}$ by a local unitary transformation acting on Alice's Hilbert space alone. Alternatively, one may also make use of the identity $(A\otimes\mathbb{I}_{\rm B})\ket{\Psi_d^+}=(\mathbb{I}_A\otimes A^\text{\tiny T})\ket{\Psi_d^+}$ which holds for all normal operators $A$~\cite{Jozsa}, where $A^\text{\tiny T}$ is the transpose of $A$ (defined in the Schmidt basis of $\ket{\Psi^+_d}$).
Clearly, the FEF of a state $\rho$ is invariant under local unitary transformation but may decrease when subjected to the $(U\otimes U^*)$-twirling operation~\cite{Horodecki-2,Bennett-96} \begin{eqnarray}\label{Eq:QuantumTwirling}
T(\rho)\coloneqq \int_{U(d)}(U \otimes U^*)\rho(U \otimes U^*)^{\dagger}dU, \end{eqnarray} where $dU$ is the Haar measure over the group of $d\times d$ unitary matrices $U(d)$. Using a somewhat similar reasoning, one can establish the following lemma (whose proof can be found in Appendix~\ref{App:Proof:Lemma}). \begin{lemma}\label{Lemma:Twirling-SF}
For any given state $\rho$, local POVMs $\mathbb{E}_{\rm A}=\{E_{a|x}\}_{a,x}$, and ${\bf F}=\{ F_{a|x}\succeq0\}_{a,x}$ acting on $\mathbb{C}^d\otimes\mathbb{C}^d$, there exists another state $\rho'$ and unitary operators $U$ and $U'$ in $U(d)$ such that \begin{subequations}\label{Eq:Lemma:Conditions} \begin{gather}
\mathcal{F}[T({\rho}')]=\bra{\Psi^+_d}\rho'\ket{\Psi^+_d}=\mathcal{F}(\rho')=\mathcal{F}(\rho),\label{Eq:FEFSame}\\
\Gamma_s\smp{\bgp{\rho,\tilde{\mathbb{E}}_{\rm A}},\tilde{{\bf F}}} \ge\Gamma_s[\{T({\rho}'),\mathbb{E}_{\rm A}\},{\bf F}], \label{Eq:Gamma_s:Ineq} \end{gather} \end{subequations}
where $\tilde{\mathbb{E}}_{\rm A}\coloneqq\left\{U^\dag\,E_{a|x}\,U\right\}_{a,x}$ and $\tilde{{\bf F}}\coloneqq\left\{U'^\dag\,F_{a|x}\,U'\right\}_{a,x}$. \end{lemma}
While we shall be concerned, generally, only with non-negative ${\bf F}$ [we will say ${\bf F}$ (${\bf B}$) is non-negative if it is formed by non-negative $F_{a|x}$ ($B_{ab|xy}$) from now on], it is worth noting that Lemma~\ref{Lemma:Twirling-SF} also holds for ${\bf F}$ formed by arbitrary Hermitian (but not necessarily non-negative) $F_{a|x}$ if the steering fraction and the corresponding steering bound are defined with an absolute sign, i.e., $\Gamma_s ({\bm \sigma}, {\bf F})= \left|\frac{1}{ \omega_s ({\bf F})}\sum_{a,x} \text{tr}(F_{a|x} \sigma_{a|x} )\right|$ and $ \omega_s ({\bf F}) \coloneqq \sup_{\sigma \in \text{LHS}} \left|\sum_{a,x} \text{tr}(F_{a|x} \sigma_{a|x})\right|$.
Recall from~\cite{Horodecki-2} that the $\left(U\otimes U^*\right)$-twirling operation $T(\rho)$ always gives rise to an isotropic state \begin{eqnarray}\label{Eq:Isotropic}
\rho_{\text{iso}}(p)\coloneqq p|\Psi^+_d\rangle\langle\Psi^+_d| + (1-p)\frac{\mathbb{I}}{d^2}, \end{eqnarray} where $p\in[-\tfrac{1}{d^2-1},1]$ and $\mathbb{I}$ is the identity operator acting on the composite Hilbert space. In this case, it thus follows from Eq.~\eqref{Eq:Lemma:Conditions} that \begin{equation}\label{Eq:Estimate1}
\Gamma_s \smp{\bgp{\rho, \tilde{\mathbb{E}}_{\rm A}}, \tilde{{\bf F}}}\ge \mathcal{F}(\rho) \Gamma_s (\left\{\ket{\Psi_d^+}, \mathbb{E}_{\rm A}\right\}, {\bf F})+Z , \end{equation} where $Z\ge0$ is the contribution of $\mathbb{I}-\proj{\Psi_d^+}$ towards the steering fraction. Maximizing both sides over $\mathbb{E}$ and dropping contribution from the second term gives $LV_s(\rho, {\bf F})\ge \mathcal{F}(\rho)LV_s(\ket{\Psi_d^+},{\bf F})$. Recall from the definition of $LV_s$ that a steering inequality is violated if $LV_s>1$, thus rearranging the term gives the following sufficient condition for $\rho$ to be steerable. \begin{theorem}\label{Thm:SufficentSteerability}
Given a state $\rho$ and ${\bf F}=\{F_{a|x}\succeq 0\}_{a,x}$ acting on $\mathbb{C}^d\otimes\mathbb{C}^d$, a sufficient condition for $\rho$ to be steerable from Alice to Bob\footnote{Note that there exist quantum states that are steerable from Alice to Bob but not the other way around; see, e.g.,~\cite{Bowles:PRL:2015,Quintino}.} is \begin{eqnarray}\label{Eq:F-LV}
\mathcal{F}(\rho)>\frac{1}{LV_s(\ket{\Psi_d^+},{\bf F})}. \end{eqnarray} \end{theorem}
Since $\mathcal{F}(\rho^{\otimes k})\ge [\mathcal{F}(\rho)]^k$, a direct corollary of Theorem~\ref{Thm:SufficentSteerability} is that, when joint local measurements are allowed, $\rho^{\otimes k}$ becomes steerable if for some $k>1$: \begin{eqnarray}\label{Eq:F-LV:k-copy}
\mathcal{F}(\rho)>\left[\frac{1}{LV_s(\ket{\Psi_{d^k}^+},{\bf F})}\right]^{\frac{1}{k}}. \end{eqnarray}
It is worth noting that Theorem~\ref{Thm:SufficentSteerability} holds for general POVMs. If one restricts to projective measurements, it is clear that that the corresponding largest violation, which we denote by $LV^\pi_s(\ket{\Psi_d^+},{\bf F})$ may be suboptimal, likewise for the threshold for $\mathcal{F}$ derived from Eq.~\eqref{Eq:F-LV}, i.e., $\mathcal{F}>1/LV^\pi_s(\ket{\Psi_d^+},{\bf F}) \ge 1/LV_s(\ket{\Psi_d^+},{\bf F})$. Using the fact mentioned between Eqs.~\eqref{Eq:FEF} and~\eqref{Eq:QuantumTwirling}, it is easy to see that $\ket{\Psi_d^+}$ in inequality~\eqref{Eq:F-LV} can be replaced by any other state that is local-unitarily equivalent to $\ket{\Psi_d^+}$. Note also that an exactly analogous treatment can be applied to Bell nonlocality, thereby giving a sufficient condition for bipartite Bell nonlocality in terms of the fully entangled fraction of a state $\rho$ (see Appendix~\ref{App:Bell} for details).
Let us also briefly comment on the tightness of the sufficient conditions derived from Theorem~\ref{Thm:SufficentSteerability}. Evidently, in order for the sufficient condition derived therefrom to be tight, the inequality in Eq.~\eqref{Eq:Gamma_s:Ineq} must be saturated and the non-negative term $Z$ that appears in Eq.~\eqref{Eq:Estimate1} must vanish. While the first of these conditions can be met, e.g., by choosing a state $\rho$ that is invariant under the $\left(U\otimes U^*\right)$ twirling (and hence being an isotropic state), the second of these conditions generally cannot be met at the same time. The relevance of Theorem~\ref{Thm:SufficentSteerability} thus lies in its simplicity and generality, as we now demonstrate with the following explicit examples.
\subsection{Explicit examples of sufficient condition}
As an application of our sufficient condition, consider the ${\bf F}_\text{\tiny MUB}:=\{\proj{\phi_{a|x}}\}_{a,x}$ induced by a set of $n$ mutually unbiased (orthonormal) bases (MUB) $\{\ket{\phi_{a|x}}\}_{a,x}$ in a $d$-dimensional Hilbert space~\cite{Marciniak15}. ${\bf F}_\text{\tiny MUB}$ is non-negative since it involves only rank-one projectors~\cite{Marciniak15}. It was shown in~\cite{Marciniak15} that \begin{equation}
LV_s(\ket{\Psi_d^+},{\bf F})\ge \max \left\{ \frac{n\sqrt{d}}{n+1+\sqrt{d}}, \frac{d\sqrt{n}}{\sqrt{n}+d-1}\right\}. \end{equation} Using this in Theorem~\ref{Thm:SufficentSteerability}, one finds that a sufficient condition for {\em any} bipartite state $\rho$ in $\mathbb{C}^d\otimes \mathbb{C}^d$ to be steerable is \begin{equation}\label{Eq:SteerableBound}
\mathcal{F}(\rho)>\min \left\{ \frac{n+1+\sqrt{d}}{n\sqrt{d}}, \frac{\sqrt{n}+d-1}{d\sqrt{n}}\right\}. \end{equation} When $d$ is a power of a prime number~\cite{Marciniak15,Wootters}, one can find $n=d+1$ MUB and the second of the two arguments in the right-hand side of Eq.~\eqref{Eq:SteerableBound} is smaller, thus simplifying the sufficient condition to \begin{equation}\label{Eq:F:Sufficient}
\mathcal{F}(\rho)>\frac{d-1+\sqrt{d+1}}{d\sqrt{d+1}}. \end{equation} This implies, for instance, (two-way) steerability of an {\em arbitrary} two-qubit state $\rho$ if $\mathcal{F}(\rho)>\tfrac{1+\sqrt{3}}{2\sqrt{3}}\approx 0.7887$ and an {\em arbitrary} two-qutrit state $\rho$ if $\mathcal{F}(\rho) > \tfrac{2}{3}$, etc. Asymptotically, when $d\to \infty$, the sufficient condition of Eq.~\eqref{Eq:F:Sufficient} simplifies to $\mathcal{F}(\rho)\gtrsim \frac{1}{\sqrt{d}}$, making it evident that this simple criterion becomes especially effective in detecting steerable states for large $d$. Nonetheless, it is worth noting that when $d=2^m$ and with $m\ge 24$, the sufficient condition of Bell nonlocality given in Eq.~\eqref{Eq:SufficientKV} (which is also a sufficient condition for steerability by the fact that any quantum state that is Bell nonlocal is also steerable) already outperforms the sufficient condition given in Eq.~\eqref{Eq:F:Sufficient}.
\subsection{Upper bounds on the largest steering-inequality violation of $\ket{\Psi^+_d}$} \label{Sec:UB-LV}
Instead of sufficient conditions for steerability, Theorem~\ref{Thm:SufficentSteerability} can also be used to derive upper bounds on the largest steering-inequality violation by $\ket{\Psi^+_d}$ for arbitrary non-negative {\bf F}, as we now demonstrate.
Consider again the isotropic state given in Eq.~\eqref{Eq:Isotropic}, which is known to be entangled if and only if $p > p_{\text{ent}}\coloneqq 1/(d+1)$~\cite{Horodecki-2,RMP-Bell}. Moreover, $\rho_{\text{iso}}(p)$ is non steerable under general POVMs if~\cite{Almeida} $p\le\tilde{p}^{\phi}:=\tfrac{3d-1}{d^2-1}\left(1-\tfrac{1}{d}\right)^d$, but steerable with projective measurements if and only if~\cite{Wiseman} $p > p_{\text{steer}}\coloneqq (H_d-1)/(d-1)$ where $H_d\coloneqq \sum_{n=1}^{d}\frac{1}{n}$ is the $d$th Harmonic number. For $p\in[0,1]$, it is easy to see that \begin{equation}\label{Eq:F-rIso-p}
\mathcal{F}[\rho_{\text{iso}}(p)]=p+\tfrac{1-p}{d^2}, \end{equation} thus the critical value of FEF beyond which $\rho_{\text{iso}}(p)$ becomes steerable with projective measurements is $\mathcal{F}[\rho_{\rm{iso}}(p)]>\mathcal{F}_{\text{iso,} d}^{\text{steer}}:=\tfrac{H_{d}+H_{d} d-d}{d^2}$.
In order for this steerability criterion for isotropic state to be compatible with Theorem~\ref{Thm:SufficentSteerability}, we must have $\frac{1}{LV_s^\pi(\ket{\Psi_d^+},{\bf F})}\ge \frac{H_{d}+H_{d} d-d}{d^2}$. Otherwise, one would find $\rho_{\rm{iso}}(p)$ with $\mathcal{F}[\rho_{\text{iso}}(p)]<\mathcal{F}_{\text{iso,} d }^{\text{steer}}$ that is steerable according to Theorem~\ref{Thm:SufficentSteerability}, which is a contradiction. Thus, the above necessary and sufficient condition for steerability of $\rho_{\text{iso}}(p)$ with projective measurements implies the following upper bound on the largest steering-inequality violation of $\ket{\Psi^+_d}$.
\begin{theorem}
The largest steering-inequality violation of $\ket{\Psi^+_d}$ for all ${\bf F}=\{ F_{a|x}\succeq 0\}_{a,x}$ is upper bounded as \begin{eqnarray}\label{Eq:UpperBound}
LV_s^\pi(\ket{\Psi_d^+},{\bf F})\le \frac{d^2}{H_{d}+H_{d}d-d} \end{eqnarray} for projective measurements. \end{theorem}
To understand the asymptotic behavior of this upper bound, note that when $d\gg 1$ we have \begin{eqnarray}\label{Eq:AsymptoticFormUpperBound}
\frac{d}{H_{d}+H_{d}d-d}\approx \frac{1}{H_{d}}< \frac{1}{\ln{d}}. \end{eqnarray} This means that $LV_s^\pi(\ket{\Psi_d^+},F)$ scales as $\tfrac{d}{\ln{d}}$ for sufficiently large $d$. In particular, it can be shown\footnote{\label{fn:Upperbound}As $d$ increases from 1, the function $\frac{\ln d}{H_{d}+H_{d}d-d}$ increases monotonically until a maximum value at $d=48$ and decreases monotonically after that.} that $\frac{d^2}{H_{d}+H_{d}d-d}\le 1.0900 \tfrac{d}{\ln{d}}$. Thus our upper bound on $LV_s^\pi(\ket{\Psi_d^+},{\bf F})$ has an asymptotic scaling that improves over the result of Yin {\em at al.}~\cite{JPA15} by a factor of $\tfrac{1}{\ln d}$.\footnote{Their Proposition 2.17 implies an upper bound that scales as $\lesssim d$.}
In addition, by using the sufficient condition of non-steerability of isotropic states under general POVMs [i.e., $\rho_{\rm{iso}}(p)$ is unsteerable under general POVMs if $p\le\tilde{p}^\phi$], one can use Eq.~\eqref{Eq:F-rIso-p} and the same arguments to arrive at the following upper bound under general POVMs: \begin{eqnarray}\label{Eq:UpperBound_generalPOVM} LV_s(\ket{\Psi_d^+},F)\le\frac{d^2}{(d^2-1)\tilde{p}^\phi+1}. \end{eqnarray} When $d\gg1$~\cite{Almeida}, it can be shown that this upper bound scales as $\frac{ed}{3}$.
Let us also remark that since the upper bound of Eq.~\eqref{Eq:UpperBound} [Eq.~\eqref{Eq:UpperBound_generalPOVM}] holds for {\em all} linear steering inequalities specified by non-negative {\bf F}, it also serves as a legitimate upper bound on the largest Bell-inequality violation of $\ket{\Psi^+_d}$ with projective measurements (general POVMs) for all {\em linear Bell inequalities} with {\em non-negative} Bell coefficients. A proof of this can be found in Appendix~\ref{App:Bell}. For linear Bell inequalities specified by non-negative {\bf B}, our upper bound on the largest Bell-inequality violation of $\ket{\Psi^+_d}$ with projective measurements thus has the same scaling as the upper bound due to Palazuelos (see the last equation on page 1971 of \cite{Palazuelos-funct}), but strengthens his by more than a factor of 2. For $d=2$, such an upper bound on the largest Bell-inequality violation of $\ket{\Psi^+_d}$ can be improved further using results from~\cite{Acin,Vertesi}; see Appendix~\ref{App:Bell} for details.
\section{Quantifying Steerability} \label{Sec:QuantifySteering}
Evidently, as we demonstrate in Sec.~\ref{Sec:Characterization}, steering fraction is a very powerful tool for characterizing the steerability of quantum states. A natural question that arises is whether a maximization of steering fraction over all (non-negative) ${\bf F}$ leads to a proper {\em steering quantifier}, i.e., a {\em convex steering monotone}~\cite{Gallego}.
To this end, let us define, for any given assemblage ${\bm \sigma}=\{\sigma_{a|x}\}_{a,x}$, the {\em optimal steering fraction} as \begin{eqnarray}
\mathcal{S}_{\rm O}\left({\bm \sigma}\right)\coloneqq\text{max}\left\{ 0,\sup_{{\bf F}\succeq 0}\Gamma_s\left({\bm \sigma},{\bf F}\right)-1\right\}, \end{eqnarray} where the supremum $\sup_{{\bf F}\succeq 0}$ is taken over all non-negative {\bf F}. From here, one can further define the optimal steering fraction of a quantum state $\rho$ by optimizing over all assemblages that arise from local measurements on one of the parties. Superficially, such a quantifier for steerability bears some similarity to that defined in~\cite{Costa}, but, in the steering measure defined therein, there is a further optimization over all possible steering-inequality violations by {\em all possible quantum states}, which is not present in our definition.
In Appendix~\ref{App:SO_Proof}, we prove that $\mathcal{S}_{\rm O}$ is indeed a convex steering monotone, i.e., it satisfies the following conditions: \begin{enumerate}
\item $\mathcal{S}_{\rm O}({\bm \sigma})=0$ for all unsteerable assemblages ${\bm \sigma}$.
\item $\mathcal{S}_{\rm O}$ does not increase, on average, under deterministic {\em one-way local operations and classical communications} (1W-LOCCs).
\item For all convex decompositions of ${\bm \sigma} =\mu {\bm \sigma}'+(1-\mu) {\bm \sigma}''$ in terms of other assemblages ${\bm \sigma}'$ and ${\bm \sigma}''$ with $0\le\mu\le1$, $\mathcal{S}_{\rm O}({\bm \sigma})\le\mu \mathcal{S}_{\rm O}({\bm \sigma}')+(1-\mu)\mathcal{S}_{\rm O}({\bm \sigma}'')$. \end{enumerate} Moreover, quantitative relations between $\mathcal{S}_{\rm O}$ and two other convex steering monotones, namely, steerable weight ($\mathcal{S}_{\rm W}$)~\cite{SteerableWeight} and steering robustness ($\mathcal{S}_{\rm R}$)~\cite{Piani2015}, can be established, as we now demonstrate.
\subsection{Quantitative relation between optimal steering fraction $\mathcal{S}_{\rm O}$ and steerable weight $\mathcal{S}_{\rm W}$}
To begin with, we recall from~\cite{SteerableWeight} that for any assemblage ${\bm \sigma}=\{\sigma_{a|x}\}_{a,x}$, $\mathcal{S}_{\rm W}({\bm \sigma})$ is defined as the minimum non-negative real value $\nu$ satisfying $\sigma_{a|x}=(1-\nu)\sigma_{a|x}^\text{US}+\nu\sigma_{a|x}^\text{S}$ for all $a$ and $x$, where ${\bm \sigma}^\text{US}\coloneqq\{\sigma_{a|x}^{\text{US}}\}_{a,x}\in \text{LHS}$ and ${\bm \sigma}^\text{S}\coloneqq\{\sigma_{a|x}^{\text{S}}\}_{a,x}$ is a steerable assemblage. In other words, $\mathcal{S}_{\rm W}({\bm \sigma})$ is the minimum weight assigned to a steerable assemblage when optimized over all possible convex decompositions of ${\bm \sigma}$ into a steerable assemblage ${\bm \sigma}^\text{S}$ and an unsteerable assemblage ${\bm \sigma}^\text{US}$. In Appendix~\ref{App:SW}, we establish the following quantitative relations between $\mathcal{S}_{\rm O}$ and $\mathcal{S}_{\rm W}$.
\begin{proposition}\label{Prop:So-SW}
Given an assemblage ${\bm \sigma}$ with the decomposition $\sigma_{a|x}=[1-\mathcal{S}_{\rm W}({\bm \sigma})]\sigma_{a|x}^\text{\rm US}+\mathcal{S}_{\rm W}({\bm \sigma})\sigma_{a|x}^\text{\rm S}$, where ${\bm \sigma}^\text{\rm US}\in\text{\rm LHS}$ and ${\bm \sigma}^\text{\rm S}$ is steerable, we have \begin{eqnarray}\label{Eq:OSFandSteerableWeight}
\mathcal{S}_{\rm O}({\bm \sigma})\le\mathcal{S}_{\rm W}({\bm \sigma})\mathcal{S}_{\rm O}({\bm \sigma}^\text{\rm S})\le \mathcal{S}_{\rm O}({\bm \sigma})+2\left[1-\mathcal{S}_{\rm W}({\bm \sigma})\right].\quad \end{eqnarray} \end{proposition}
Note that if a given assemblage ${\bm \sigma}$ is steerable, and hence $\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})\neq 0$, Eq.~\eqref{Eq:OSFandSteerableWeight} can be rearranged to give \begin{eqnarray}\label{SteerableWeight_and_OSF_Estimate}
\frac{\mathcal{S}_{\rm O}({\bm \sigma})}{\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})}\le\mathcal{S}_{\rm W}({\bm \sigma})\le\frac{2+\mathcal{S}_{\rm O}({\bm \sigma})}{2+\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})}. \end{eqnarray} This means that if ${\bm \sigma}$ and ${\bm \sigma}^\text{S}$ are both largely {\em steerable} so that $\mathcal{S}_{\rm O}({\bm \sigma}), \mathcal{S}_{\rm O}({\bm \sigma}^\text{S})\gg 1$,\footnote{This happens if there exist ${\bf F}_1$ and ${\bf F}_2$ such that $\Gamma_s({\bm \sigma}, {\bf F}_1)\gg 1$ and $\Gamma_s({\bm \sigma}^\text{S},{\bf F}_2)\gg 1$.} Eq.~\eqref{SteerableWeight_and_OSF_Estimate} leads to the following approximation: \begin{eqnarray}
\mathcal{S}_{\rm W}({\bm \sigma})\approx\frac{\mathcal{S}_{\rm O}({\bm \sigma})}{\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})}, \end{eqnarray} which provides an estimate of $\mathcal{S}_{\rm W}({\bm \sigma})$ in terms of $\mathcal{S}_{\rm O}({\bm \sigma})$ and $\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})$ when the two latter quantities are large.
\subsection{Quantitative relation between optimal steering fraction $\mathcal{S}_{\rm O}$ and steering robustness $\mathcal{S}_{\rm R}$}
For any given assemblage ${\bm \sigma}$, its steering robustness $\mathcal{S}_{\rm R}({\bm \sigma})$ is defined as the minimal value $\nu\in[0,\infty)$ such that the convex mixture $\frac{1}{1+\nu}{\bm \sigma}+\frac{\nu}{1+\nu}\tilde{{\bm \sigma}}$ is unsteerable for some assemblage $\tilde{{\bm \sigma}}$. In Appendix~\ref{App:SR}, we derive the following quantitative relations between $\mathcal{S}_{\rm O}$ and $\mathcal{S}_{\rm R}$.
\begin{proposition}\label{Prop:So-SR} For an assemblage ${\bm \sigma}$ giving the unsteerable decomposition ${\bm \sigma}^{\rm US}\coloneqq\frac{1}{1+\mathcal{S}_{\rm R}({\bm \sigma})}{\bm \sigma}+\frac{\mathcal{S}_{\rm R}({\bm \sigma})}{1+\mathcal{S}_{\rm R}({\bm \sigma})}\tilde{{\bm \sigma}}$ where $\tilde{{\bm \sigma}}$ is a legitimate assemblage, we have \begin{equation}\label{Eq:SR-So}
\mathcal{S}_{\rm R}({\bm \sigma})\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})-2\le\mathcal{S}_{\rm O}({\bm \sigma})\le \mathcal{S}_{\rm R}({\bm \sigma})\left[\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})+2\right]. \end{equation} \end{proposition} Note that if $\tilde{{\bm \sigma}}$ is steerable, then $\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})>0$, thus Eq.~\eqref{Eq:SR-So} can be rearranged to give \begin{eqnarray}\label{Eq:SteeringRobustness_and_OSF_Estimate}
\frac{\mathcal{S}_{\rm O}({\bm \sigma})}{\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})+2}\le\mathcal{S}_{\rm R}({\bm \sigma})\le\frac{\mathcal{S}_{\rm O}({\bm \sigma})+2}{\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})}. \end{eqnarray} As with the case of $\mathcal{S}_{\rm W}$, if $\mathcal{S}_{\rm O}({\bm \sigma}), \mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})\gg 1$, Eq.~\eqref{Eq:SteeringRobustness_and_OSF_Estimate} implies the following approximation: \begin{eqnarray}
\mathcal{S}_{\rm R}({\bm \sigma})\approx\frac{\mathcal{S}_{\rm O}({\bm \sigma})}{\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})}, \end{eqnarray} which provides an estimate of $\mathcal{S}_{\rm R}({\bm \sigma})$ in terms of the optimal steering fraction $\mathcal{S}_{\rm O}$.
\section{Superactivation and Unbounded Amplification of Steerability} \label{Sec:SuperAmpli}
Let us now turn our attention to the superactivation and amplification of steerability.
\subsection{Superactivation of steerability} Following the terminology introduced by Palazuelos~\cite{Palazuelos} in the context of Bell nonlocality, we say that the steerability of a quantum state $\rho$ can be {\em superactivated} if it satisfies: \begin{subequations}\label{Eq:Superactivation} \begin{gather}
LV_s(\rho,{\bf F})\le 1\quad\forall\,\,{\bf F}, \\
\Gamma_s\left(\left\{\rho^{\otimes k},\mathbb{E}_{\rm A}\right\},{\bf F}'\right)>1\quad\text{for some $k$, $\mathbb{E}_{\rm A}$, and ${\bf F}'$}. \end{gather} \end{subequations} The possibility to superactivate Bell nonlocality---a question originally posed in~\cite{Liang:PRA:2006}---was first demonstrated by Palazuelos~\cite{Palazuelos} using a certain entangled isotropic state. Their result was soon generalized by Cavalcanti {\em et al.}~\cite{Cavalcanti-PRA} to show that the Bell nonlocality of all entangled states with $\text{FEF}>\tfrac{1}{d}$ can be superactivated. Since all Bell-nonlocal states are also steerable~\cite{Wiseman,Quintino}, while entangled $\rho_\text{iso}(p)$ are exactly those having FEF $>\tfrac{1}{d}$, the steerability of all entangled but unsteerable $\rho_\text{iso}(p)$ can be superactivated (e.g., those with $p_\text{ent}<p\le\tilde{p}^\phi$; see Sec.~\ref{Sec:UB-LV} on page~\pageref{Sec:UB-LV}).
For the benefit of subsequent discussion on the amplification of steerability, it is worth going through the key steps involved in the proof of this superactivation. To this end, let us first recall from~\cite{KV, Buhrman} the Khot-Vishnoi (KV) nonlocal game, which is parametrized by $\eta\in[0,\tfrac{1}{2}]$. Let us denote by $G=\{0,1\}^n$ the group of $n$ bit strings with $\oplus$, bitwise addition modulo 2 being the group operation. Consider the (normal) Hadamard subgroup $H$ of $G$ which contains $n$ elements. The cosets of $H$ in $G$ give rise to the quotient group $\tfrac{G}{H}$ with $\tfrac{2^n}{n}$ elements. The KV game can then be written in the form of a Bell inequality, cf. Eq.~\eqref{Eq:BI}, with $\tfrac{2^n}{n}$ settings and $n$ outcomes:\footnote{Evidently, the KV game defined here only makes sense when the number of outputs is a power of 2. Generalization of this to the situation where $n$ can be an arbitrary positive integer has been considered, for example, in~\cite{Palazuelos-funct}.}
\begin{gather}
\sum_{a\in x,b\in y}\sum_{x,y\in\tfrac{G}{H}}\!\!\! B^{\text{\tiny {\rm KV}}}_{ab|xy}\ P(a,b|x,y) \stackrel{\mbox{\tiny LHV}}{\le} \omega({\bf B}^{\text{\tiny {\rm KV}}}),\nonumber\\
B^{\text{\tiny {\rm KV}}}_{ab|xy}\coloneqq \sum_{g\in G} \frac{n}{2^n} \eta^{w_g}(1-\eta)^{n-w_g}\delta_{a\oplus g,b}\delta_{x\oplus g,y},\label{Eq:BI:KV} \end{gather} where $w_g$ is the Hamming weight of $g\in G$ and $\delta_{i,j}$ is the Kronecker delta between $i$ and $j$
\footnote{Note that for $x,y\in\frac{G}{H}$ and $g\in G$, $\delta_{x\oplus g,y} = 1$ if and only if $y$ and $x\oplus g\coloneqq\{h\oplus g | h\in x \}$ are associated with the same coset in the quotient group $\frac{G}{H}$.} and ${\bf B}^\text{\tiny {\rm KV}}$ is the set of Bell coefficients defining the KV game.
An important feature of ${\bf B}^\text{\tiny {\rm KV}}$ given in Eq.~\eqref{Eq:BI:KV} is that $\omega({\bf B}^{\text{\tiny {\rm KV}}})\le n^{-\frac{\eta}{1-\eta}}$~\cite{Buhrman,Palazuelos} . For the specific choice~\cite{Palazuelos} of $\eta=\frac{1}{2}-\frac{1}{\ln n}$, which makes sense only for $n\ge 8$, this gives $\omega({\bf B}^{\text{\tiny {\rm KV}}})\le n^{-1+\frac{4}{2\ln n}}< \tfrac{C_u}{n}$ with $C_u=e^4$. In this case, performing judiciously chosen rank-1 projective measurements specified by $\mathbb{E}^{\text{\tiny {\rm KV}}}=\mathbb{E}_{\rm A}^{\text{\tiny {\rm KV}}}\bigcup\mathbb{E}_{\rm B}^{\text{\tiny {\rm KV}}}$, where $\mathbb{E}_{\rm A}^{\text{\tiny {\rm KV}}}:=\{E^\text{\tiny {\rm KV}}_{a|x}\}_{a,x}$ and $\mathbb{E}_{\rm B}^{\text{\tiny {\rm KV}}}:=\{E^\text{\tiny {\rm KV}}_{b|y}\}_{b,y}$, on $\ket{\Psi^+_D}$ (with $D=n$) gives rise to a correlation ${\bf P}^\text{\tiny {\rm KV}}$ with the following lower bound on the nonlocality fraction: \begin{equation}\label{Eq:KVestimate}
\Gamma \left({\bf P}^\text{\tiny {\rm KV}}, {\bf B}^{\text{\tiny {\rm KV}}}\right) > C \frac{D}{(\ln D )^2} \end{equation} where $C=4e^{-4}$.
Consider now the collection of non-negative matrices ${\bf F}^{\text{\tiny {\rm KV}}} \coloneqq \{ F^{\text{\tiny {\rm KV}}}_{a|x} = \sum_{b,y} B^{\text{\tiny {\rm KV}}}_{ab|xy}E^{\text{\tiny {\rm KV}}}_{b|y}\}_{a,x}$ induced by the KV game and Bob's optimal POVMs $\mathbb{E}_{\rm B}^\text{\tiny {\rm KV}}$ leading to the lower bound given in Eq.~\eqref{Eq:KVestimate}. An application of inequality~\eqref{Eq:BellvsSteering} to Eq.~\eqref{Eq:KVestimate} immediately leads to
\begin{align}
\Gamma_s \left(\left\{\ket{\Psi_{D}^+}, \mathbb{E}_{\rm A}^{\text{\tiny {\rm KV}}}\right\}, {\bf F}^{\text{\tiny {\rm KV}}}\right)&\ge \Gamma \left({\bf P}^\text{\tiny {\rm KV}}, {\bf B}^{\text{\tiny {\rm KV}}}\right)
>C \frac{D}{(\ln D )^2} \label{Steering_KV_Estimate} \end{align}
For any given state $\rho$, Lemma~\ref{Lemma:Twirling-SF}, Eq.~\eqref{Eq:Estimate1}, and Eq.~\eqref{Steering_KV_Estimate} together imply the existence of $\tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}\coloneqq\{ \tilde{E}_{a|x}^{\text{\tiny {\rm KV}}}\}_{a,x}$ such that \begin{eqnarray}\label{Eq:Gamma-s:FEF}
\Gamma_s \smp{\bgp{\rho, \tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}}, \tilde{{\bf F}}^{\text{\tiny {\rm KV}}}}\ge C\frac{\mathcal{F}(\rho)D}{(\ln D)^2}, \end{eqnarray} where \begin{eqnarray}\label{Eq:ActualSteeringFunctional}
\tilde{{\bf F}}^{\text{\tiny {\rm KV}}}\coloneqq\left\{\tilde{F}^{\text{\tiny {\rm KV}}}_{a|x}=\sum_{b,y} {U'}^\dagger B^{\text{\tiny {\rm KV}}}_ {ab|xy}\, E^{\text{\tiny {\rm KV}}}_{b|y}\, U'\right\}_{a,x} \end{eqnarray} again is non-negative.
Note that if $\rho = \rho_{\rm{iso}}(p)^{\otimes k}$, we have both $D=d^k$ and $\mathcal{F}(\rho)=\sqp{\mathcal{F}(\rho_{\rm{iso}}(p))}^k$ scaling exponentially with $k$, while $(\ln{D})^2=k^2(\ln{d})^2$ only increases quadratically with $k$. Thus, Eq.~\eqref{Eq:Gamma-s:FEF} implies that $\mathcal{F}(\rho)>\tfrac{1}{d}$ is a sufficient condition for $\rho_{\rm{iso}}(p)^{\otimes k}$ to be steerable. In other words, for all entangled $\rho_{\rm{iso}}(p)$, $\rho_{\rm{iso}}(p)^{\otimes k}$ is steerable for sufficiently large $k$. In particular, since $\rho_{\rm{iso}}(p)$ for $p_\text{ent}<p\le\tilde{p}^\phi$ is {\em not} single-copy steerable (see Sec.~\ref{Sec:UB-LV} on page~\pageref{Sec:UB-LV}), the steerability of $\rho_{\rm{iso}}(p)$ with $p$ in this interval can be superactivated.
\subsection{Unbounded amplification of steerability}
Given that joint measurements on an appropriately chosen quantum state $\rho$ can lead to the superactivation of steerability, one may ask, as with Bell nonlocality (see~\cite{Palazuelos}), if it is possible to obtain unbounded amplification of steerability of a quantum state with joint measurements. In particular, since it is easier to exhibit EPR steering than Bell nonlocality, can one achieve unbounded violation of steerability (as quantified using steering fraction) using fewer copies of the quantum state? Here, we show that unbounded amplification of steerability can indeed be achieved using as little as three copies of a quantum state, which improves over the result of unbounded amplification for Bell nonlocality due to Palazuelos~\cite{Palazuelos} with five copies. More precisely, our results are summarized in the following Theorem.
\begin{theorem}\label{Thm:Amplification} For every $\epsilon>0$ and $\delta>0$, there exists an isotropic state $\rho_{\rm{iso}}$ with local dimension $d$ such that \begin{eqnarray}\label{Eq:Conditions:Amplification}
LV_s^{\pi}(\rho_{\rm{iso}},{\bf F})\le \epsilon + 1 \quad \& \quad LV_s(\rho_{\rm{iso}}^{\otimes 3},\tilde{{\bf F}}^{\text{\tiny {\rm KV}}})>\delta \end{eqnarray}
for all non-negative ${\bf F}=\{ F_{a|x}\succeq 0\}_{a,x}$, where $\tilde{{\bf F}}^{\text{\tiny {\rm KV}}}$ is defined in Eq.~\eqref{Eq:ActualSteeringFunctional} with local dimension $d^3$. Moreover, this $\rho_{\rm{iso}}$ can be chosen to be unsteerable under projective measurements whenever $\epsilon<1$, and steerable whenever $1\le\epsilon$. \end{theorem}
\begin{proof} The proof of this is similar to that given by Palazuelos~\cite{Palazuelos} for proving the unbounded amplification of Bell nonlocality using five copies of $\rho_{\rm{iso}}(p)$. First of all, note from Eqs.~\eqref{Eq:LV} and~\eqref{Eq:Isotropic} that \begin{align}
LV_s^{\pi}(\rho_{\text{iso}},{\bf F})&\le p LV_s^{\pi}(\ket{\Psi_d^+},{\bf F})+(1-p)LV_s^{\pi}\left(\frac{\mathbb{I}}{d^2},{\bf F}\right)\nonumber\\
&\le1+p[LV_s^{\pi}(\ket{\Psi_d^+},{\bf F})-1]. \end{align}
Recall from Eq.~\eqref{Eq:UpperBound} that $LV_s^{\pi}(\ket{\Psi_d^+},{\bf F})$ is upper bounded, thus $LV_s^{\pi}(\rho_{\text{iso}},{\bf F})$ is upper bounded by $1+\epsilon$ for any given $\epsilon>0$ if we set \begin{equation}\label{Eq:p}
p=\frac{\epsilon}{\frac{d^2}{H_{d}+H_{d}d-d}-1}. \end{equation} Evidently, for any given $\epsilon$, we still need to ensure that Eq.~\eqref{Eq:p} indeed gives a legitimate parameter for isotropic states such that $0\le p\le1$. Since the denominator in Eq.~\eqref{Eq:p} is always non-negative and generally increases with $d$ (it approaches $\infty$ as $d\to\infty$), we see that the $p$ defined above is always non-negative and can always be chosen to be upper bounded by 1 for all $\epsilon>0$. Using this, as well as Eq.~\eqref{Eq:F-rIso-p} in Eq.~\eqref{Eq:Gamma-s:FEF} with $\rho=\rho_{\rm{iso}}(p)^{\otimes k}$ and $\mathcal{F}(\rho)=[\mathcal{F}(\rho_{\rm{iso}}(p))]^k$, we obtain \begin{align}\label{Eq:Gamma-s:Amplify0}
\Gamma_s \smp{\bgp{\rho, \tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}}, \tilde{{\bf F}}^{\text{\tiny {\rm KV}}}}\ge
\frac{C\,d^k}{(k\ln{d})^2}\left[-\epsilon + \frac{1 + \epsilon}{d^2} + \frac{(d-1) \epsilon}{d - H_d}\right]^k. \end{align} For $d\gg1$, \begin{align}\label{Eq:Gamma-s:Amplify}
\Gamma_s \smp{\bgp{\rho, \tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}}, \tilde{{\bf F}}^{\text{\tiny {\rm KV}}}}\ge&\frac{C\,d^k\,\epsilon^k}{(k\ln{d})^2}\left( \frac{H_d-1}{d - H_d}\right)^k\nonumber\\
\approx&\frac{C\,\epsilon^k}{k^2}(\ln{d})^{k-2}. \end{align} Thus, for $k\ge3$, we see that $\Gamma_s \left(\left\{\rho_{\rm{iso}}(p)^{\otimes k}, \tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}\right\}, \tilde{{\bf F}}^{\text{\tiny {\rm KV}}}\right)$ with $p$ defined in Eq.~\eqref{Eq:p} can become arbitrarily large if we make $d$ arbitrarily large. In particular, for any given $\epsilon$, $d$ must be large enough so that the $p$ defined in Eq.~\eqref{Eq:p} is larger than $\frac{1}{d+1}$, the critical value of $p$ below which $\rho_{\rm{iso}}(p)$ becomes separable.
Now, a direct comparison between Eq.~\eqref{Eq:p} and the threshold value of $p=p_\text{steer}=(H_d-1)/(d-1)$ [where the isotropic state becomes (un)steerable with projective measurements] shows that \begin{equation}
0<\epsilon\le \kappa_d\coloneqq\frac{(d-H_d)(d+1)(H_d-1)}{(H_d+dH_d-d)(d-1)}, \end{equation} if and only if the isotropic state with $p$ given in Eq.~\eqref{Eq:p} is unsteerable under projective measurements. It is easy to verify that (1) the quantity $\kappa_d$ satisfies $\kappa_d<1$ for all $d\ge2$, and (2) $\kappa_d$ rapidly approaches 1 when $d\to\infty$. Hence, for every $0<\epsilon<1$, there exists an isotropic state $\rho_{\rm{iso}}$ (with sufficiently large $d$) that is entangled but unsteerable with projective measurements, but which nevertheless attains arbitrarily large steering-inequality violation with $\rho_{\rm{iso}}^{\otimes 3}$. \end{proof}
Remarks on the implication of Theorem~\ref{Thm:Amplification} are now in order. Firstly, a direct observation shows that $\rho_{\rm{iso}}$ with $p$ given in Eq~\eqref{Eq:p} is always unsteerable under projective measurements if $0\le\epsilon<\kappa_2=0.3$, where one can verify that $\kappa_d$ achieves its minimal value $0.3$ at $d=2$. This, however, is still not enough to guarantee that the given isotropic state $\rho_{\rm{iso}}$ is unsteerable under general POVMs due to the lack of exact characterization of steerability under general POVMs.
Second, it is worth noting that the above results also hold if we replace steerability by Bell nonlocality. To see this, let us first remind that the largest Bell-inequality violation under projective measurements is upper bounded by the upper bound given in Eq.~\eqref{Eq:UpperBound} [see Eq.~\eqref{Eq:UpperBound_LV}]. Next, note that the lower bound on steering fraction that we have presented in Eq.~\eqref{Eq:Gamma-s:Amplify0} actually inherits from a lower bound on the corresponding nonlocality fraction using the KV Bell inequality. Therefore, exactly the same calculation goes through if $\Gamma_s \smp{\bgp{\rho, \tilde{\mathbb{E}}^{\text{\tiny {\rm KV}}}_{\rm A}}, \tilde{{\bf F}}^{\text{\tiny {\rm KV}}}}$ is replaced by $\Gamma({\bf P},{\bf B}^\text{\tiny {\rm KV}})$ with ${\bf P}$ derived from Eq.~\eqref{Eq:QCor} assuming local POVMs that lead to Eq.~\eqref{Eq:KVestimate}. In other words, for sufficiently large $d$, one can always find entangled isotropic states $\rho_{\rm{iso}}$ that do not violate any Bell inequality with projective measurements, but which nevertheless attain arbitrarily large Bell-inequality violation with $\rho_{\rm{iso}}^{\otimes 3}$. This improves over the result of Palazuelos~\cite{Palazuelos} which requires five copies for unbounded amplification.
\section{Discussion} \label{Sec:Conclude}
In this paper, we have introduced the tool of steering fraction $\Gamma_s$ and used it to establish novel results spanning across various dimensions of the phenomenon of quantum steering. Below, we briefly summarize these results and comment on some possibilities for future research.
First, we have derived a general sufficient condition for {\em any} bipartite quantum state $\rho$ to be steerable (Bell nonlocal) in terms of its fully entangled fraction, a quantity closely related to the usefulness of $\rho$ for teleportation~\cite{Teleportation}. As we briefly discussed in Sec.~\ref{Sec:Characterization}, we do not expect these sufficient conditions to detect all steerable (Bell-nonlocal) states. Nonetheless, let us stress that to determine if a quantum state is steerable (as with determining if a quantum state can exhibit Bell nonlocality; see, e.g.,~\cite{Horodecki,Liang:PRA:2007}) is a notoriously difficult problem, which often requires the optimization over the many parameters used to describe the measurements involved in a steering experiment (and/or the consideration of potentially infinitely many different steering inequalities).
In contrast, the general criterion that we have presented in Theorem~\ref{Thm:SufficentSteerability} for steerability (and Theorem~\ref{Theorem:Suff_Condi_Nonlocality} for Bell nonlocality) only requires a relatively straightforward computation of the fully entangled fraction of the state of interest. Given that these sufficient conditions are likely to be suboptimal, an obvious question that follows is whether one can find an explicit threshold $\mathcal{F}_\text{thr}$ that is smaller than that given by Theorem~\ref{Thm:SufficentSteerability} (Theorem~\ref{Theorem:Suff_Condi_Nonlocality}) such that $\mathcal{F}>\mathcal{F}_\text{thr}$ still guarantees steerability (Bell nonlocality). While this may seem like a difficult problem, recent advances~\cite{algorithm} in the algorithmic construction of local hidden-variable (-state) models may shed some light on this. More generally, it will be interesting to know if our sufficient condition can be strengthened while preserving its computability. In particular, it will be very useful to find analogous steerability (Bell-nonlocality) criteria that are tight.
On the other hand, the aforementioned sufficient condition has also enabled us to derive upper bounds---as functions of $d$---on the largest steering-inequality violation $LV_s$ ($LV_s^\pi$) achievable by the maximally entangled state $\ket{\Psi^+_d}$ under general POVMs (projective measurements). In particular, using the general connection between $LV_s$ and the largest Bell-inequality violation, $LV$, established in Appendix~\ref{App:Bell}, our upper bounds on $LV_s$ and $LV_s^\pi$ imply upper bounds on $LV$ and $LV^\pi$ by $\ket{\Psi^+_d}$ (for non-negative {\bf B}), respectively. Notably, our upper bound on $LV^\pi$ is somewhat tighter than that due to Palazuelos~\cite{Palazuelos-funct}. If any strengthened sufficient conditions for steerability, as discussed above, are found, it would also be interesting to see if they could lead to tighter (non asymptotic) upper bound(s) on the largest steering-inequality (and/or Bell-inequality) violation attainable by $\ket{\Psi^+_d}$.
The tool of steering fraction $\Gamma_s$, in addition, can be used to quantify steerability. In particular, we showed that if $\Gamma_s$ is optimized over all (non-negative) {\bf F}, the resulting quantity can be cast as a {\em convex steering monotone}~\cite{Gallego}, which we referred to as the {\em optimal steering fraction} $\mathcal{S}_{\rm O}$. We further demonstrated how this monotone is quantitatively related to two other convex steering monotones: steerable weight~\cite{SteerableWeight} and steering robustness~\cite{Piani2015}. In the light of quantum information, it would be desirable to determine an operational meaning of $\mathcal{S}_{\rm O}$, e.g., in the context of some quantum information tasks (cf. steering robustness~\cite{Piani2015}). Establishment of quantitative relations between $\mathcal{S}_{\rm O}$ and other convex steering monotones, such as the relative entropy of steering~\cite{Gallego}, would certainly be very welcome. In particular, it would be highly desirable to establish quantitative relations that allow one to estimate $\mathcal{S}_{\rm O}$ from other easily computable steering monotones, such as the steerable weight or the steering robustness.
Using the established sufficient condition for steerability, we have also demonstrated the superactivation of steerability, i.e., the phenomenon that certain unsteerable quantum state $\rho$ becomes, for sufficiently large $k$, steerable when joint {\em local} measurements on $\rho^{\otimes k}$ are allowed. A drawback of the examples that we have presented here is that they inherit directly from the superactivation of Bell nonlocality due to Palazuelos~\cite{Palazuelos} and Cavalcanti {\em et al.}~\cite{Cavalcanti-PRA}. An obvious question that follows is whether one can construct explicit examples for the superactivation of steerability using quantum states whose Bell nonlocality {\em cannot} be superactivated via joint measurements.
One the other hand, with joint local measurements, we showed that the steering-inequality (Bell-inequality) violation of certain barely steerable (Bell-nonlocal) $\rho$ [or even unsteerable (Bell-local) $\rho$ with projective measurements] can be arbitrarily amplified, in particular, giving an arbitrarily large steering-inequality (Bell-inequality) violation with $\rho^{\otimes 3}$. Could such unbounded amplification be achieved using joint measurements on two copies of the same state? Our proof technique, see Eq.~\eqref{Eq:Gamma-s:Amplify}, clearly requires a minimum of three copies for unbounded amplification to take place but it is conceivable that a smaller number of copies suffices if some other steering (Bell) inequality is invoked, a problem that we shall leave for future research.
{\em Note added.} Recently, we became aware of the work of~\cite{Quintino:unpublished} who independently (1) derived a sufficient condition of steerability in terms of the fully entangled fraction and (2) demonstrated the superactivation of steering of the isotropic states. Moreover, after submission of this work, an anonymous referee of QIP2017 brought to our attention that for any given assemblage, its optimal steering fraction is actually identical to its steering robustness $\mathcal{S}_{\rm R}$, as can be seen from the dual semidefinite programming formulation of steering robustness given in Eq. (41) in~\cite{SDP} (see also~\cite{Piani2015}).
\begin{acknowledgments} The authors acknowledge useful discussions with Nicolas Brunner, Daniel Cavalcanti, Flavien Hirsch, and Marco T\'ulio Quintino and helpful suggestions from an anonymous referee of AQIS2016. This work is supported by the Ministry of Education, Taiwan, R.O.C., through ``Aiming for the Top University Project" granted to the National Cheng Kung University (NCKU), and by the Ministry of Science and Technology, Taiwan (Grants No. 104-2112-M-006-021-MY3 and No. 105-2628-M-007-003-MY4). \end{acknowledgments}
\appendix
\section{Proof of Lemma~\ref{Lemma:Twirling-SF}} \label{App:Proof:Lemma}
Here, we provide a proof of Lemma~\ref{Lemma:Twirling-SF}. \begin{proof}
For any given state $\rho'$, local POVMs $\mathbb{E}_{\rm A}$ and non-negative ${\bf F}$, let us note that \begin{align}
&\Gamma_s \left(\left\{T(\rho'),\mathbb{E}_{\rm A}\right\},{\bf F}\right)\coloneqq \frac{\sum_{a,x} \text{tr}\left[({E}_{a|x} \otimes {F}_{a|x})T(\rho')\right]}{ \omega_s ({{\bf F}})}\nonumber\\
&=\int_{U(d)} \frac{\sum_{a,x} \text{tr}\left[(U^\dag\,{E}_{a|x}\,U \otimes U^{*\dag}\,{F}_{a|x}\,U^{*}) \rho'\right]}{ \omega_s ({{\bf F}})}\,dU\nonumber\\ &= \int_{U(d)}\Gamma_s\left(\left\{\rho', {\mathbb{E}}_U\right\}, {{\bf F}}_U\right)dU\nonumber\\ &\le \max_{U\in U(d)}\Gamma_s\left(\left\{\rho', {\mathbb{E}}_U\right\},{{\bf F}}_U\right), \label{Eq:MaxGammas:U:Twirl} \end{align}
where ${\mathbb{E}}_U\coloneqq {\{ U^{\dag}{E}_{a|x}U \}}$ and ${{\bf F}}_U\coloneqq {\{ U^{*\dag}F_{a|x}U^* \}}$.
Denoting by $U_\Gamma$ the unitary operator achieving the maximum in Eq.~\eqref{Eq:MaxGammas:U:Twirl}, the above inequality implies that \begin{equation}\label{GammaS:1}
\Gamma_s\left(\left\{\rho', {\mathbb{E}}_{U_\Gamma}\right\},{{\bf F}}_{U_\Gamma}\right) \ge \Gamma_s \left(\left\{T(\rho'),{\mathbb{E}_{\rm A}}\right\},{{\bf F}}\right). \end{equation}
For any given state $\rho$, let us further denote by $U_\mathcal{F}$ the unitary operator that maximizes the FEF of $\rho$ in Eq.~\eqref{Eq:FEF}, i.e., \begin{equation}
\mathcal{F}(\rho)=\bra{\Psi^+_d}(U_\mathcal{F}\otimes \mathbb{I}_B)\,\rho\, (U_\mathcal{F}\otimes \mathbb{I}_B)^\dag\ket{\Psi^+_d}. \end{equation} Defining $\rho':=(U_\mathcal{F}\otimes \mathbb{I}_B)\,\rho\, (U_\mathcal{F}\otimes \mathbb{I}_B)^\dag$ and $\tilde{\mathbb{E}}'_{\rm A} = U_\mathcal{F}\, \tilde{\mathbb{E}}_{\rm A}\, U_\mathcal{F}^\dag$, we then have \begin{subequations}\label{GammaS:2} \begin{equation}
\Gamma_s\left(\left\{\rho,\tilde{\mathbb{E}}_{\rm A}\right\},\tilde{{\bf F}}\right) = \Gamma_s\left(\left\{\rho',\tilde{\mathbb{E}}'_{\rm A}\right\},\tilde{{\bf F}}\right) \end{equation} with \begin{equation}
\mathcal{F}(\rho)=\mathcal{F}(\rho')=\bra{\Psi^+_d}\rho'\ket{\Psi^+_d}=\mathcal{F}\left[T(\rho')\right], \end{equation} \end{subequations} where the last equality follows from the fact~\cite{Horodecki-1,Horodecki-2} that if $\mathcal{F}(\rho')$ is attained with $\ket{\Psi^+_d}$, then $\mathcal{F}\left[T(\rho')\right]$ is attained with $\ket{\Psi^+_d}$.
Combining Eqs.~\eqref{GammaS:1} and~\eqref{GammaS:2} by setting $\mathbb{E}_{U_\Gamma}=\tilde{\mathbb{E}}'_{\rm A}$ and ${\bf F}_{U_\Gamma}=\tilde{{\bf F}}$ then gives the desired inequality: \begin{eqnarray}
\Gamma_s\left(\left\{\rho,\tilde{\mathbb{E}}_{\rm A}\right\},\tilde{{\bf F}}\right)&&=\Gamma_s\left(\left\{\rho', {\mathbb{E}}_{U_\Gamma}\right\},{{\bf F}}_{U_\Gamma}\right) \nonumber\\
&&\ge \Gamma_s \left(\left\{T(\rho'),{\mathbb{E}_{\rm A}}\right\},{{\bf F}}\right), \end{eqnarray} with \begin{equation} \begin{split}
\tilde{\mathbb{E}}_{\rm A}=U_\mathcal{F}^\dag\,\tilde{\mathbb{E}}'_{\rm A}\, &U_\mathcal{F}=U_\mathcal{F}^\dag\,U_\Gamma^{\dag}{\mathbb{E}_{\rm A}}\,U_\Gamma\, U_\mathcal{F},\\
\tilde{{\bf F}}&=U_\mathcal{F}^{*\dag}\, {\bf F}\, U^*_\mathcal{F}, \end{split} \end{equation} which completes the proof. \end{proof}
As a remark, analogous steps but with $\max$ in Eq.~\eqref{Eq:MaxGammas:U:Twirl} replaced by $\min$ leads to the fact that for any given $\rho$, $\mathbb{E}_{\rm A}$, and non-negative ${\bf F}$, there exist $\tilde{\mathbb{E}}_{\rm A}$ and $\tilde{{\bf F}}$ such that $\Gamma_s (\{\rho, \tilde{\mathbb{E}}_{\rm A}\}, \tilde{{\bf F}}) \le \Gamma_s \left(\left\{T(\rho),\mathbb{E}_{\rm A}\right\},{\bf F}\right)$.
\section{Sufficient Condition of Bell nonlocality and Upper Bounds on the Largest Bell-inequality Violation of $\ket{\Psi^+_d}$} \label{App:Bell}
\subsection{Sufficient condition of Bell nonlocality}
For any given quantum state $\rho$, local POVMs $\mathbb{E}$, and (linear) Bell inequality Eq.~\eqref{Eq:BI}, the largest Bell-inequality violation is defined as~\cite{Palazuelos}: \begin{eqnarray}\label{Eq:LV-Bell}
LV(\rho,{\bf B})\coloneqq\sup_{\mathbb{E}}\Gamma\left(\left\{\rho,\mathbb{E}\right\},{\bf B}\right). \end{eqnarray} Using arguments exactly analogous to the proof of Theorem~\ref{Thm:SufficentSteerability}, one can establish the following sufficient condition for a bipartite quantum state $\rho$ to be Bell nonlocal. \begin{theorem}\label{Theorem:Suff_Condi_Nonlocality}
Given a state $\rho$ acting on $\mathbb{C}^d\otimes\mathbb{C}^d$ and ${\bf B}\coloneqq\{B_{ab|xy}\ge 0\}_{a,b,x,y}$, a sufficient condition for $\rho$ to violate the Bell inequality specified by ${\bf B}$ and hence be Bell nonlocal is \begin{eqnarray}\label{Eq:Bell:SufficientCondition}
\mathcal{F}(\rho)>\frac{1}{LV(\ket{\Psi_d^+},B)}. \end{eqnarray} \end{theorem} Note that unlike Theorem~\ref{Thm:SufficentSteerability}, there is no local unitary degree of freedom in the right-hand side of Eq.~\eqref{Eq:Bell:SufficientCondition}. Using again the fact that $\mathcal{F}\left(\rho^{\otimes k}\right)\ge \left[\mathcal{F}(\rho)\right]^k$, a direct corollary of Theorem~\ref{Thm:SufficentSteerability} is the following sufficient condition for $\rho^{\otimes k}$ to be Bell nonlocal: \begin{eqnarray}
\mathcal{F}(\rho)>\left[\frac{1}{LV(\ket{\Psi_d^+},{\bf B})}\right]^{\frac{1}{k}}. \end{eqnarray}
\subsubsection{A sufficient condition based on the Collins-Gisin-Linden-Massar-Popescu-Bell inequality}
As an explicit example, let us consider the family of a two-setting, $d$-outcome inequality due to Collins-Gisin-Linden-Massar-Popescu~\cite{CGLMP}. This inequality can be re-written in a form that involves only the following non-negative coefficients: \begin{equation}
B^\text{\tiny CGLMP}_{ab|xy}=
\left\{ \begin{array}{c@{\quad\quad}l}
2+\frac{2(a-b)}{d-1}, & b\ge a\,\,\, \text{and}\,\,\, x+y=0,\\
\frac{2(a-b-1)}{d-1}, & b<a\,\,\, \text{and}\,\,\, x+y=0,\\
\frac{2(b-a-1)}{d-1}, & b>a\,\,\, \text{and}\,\,\, x+y=1,\\
2+\frac{2(b-a)}{d-1}, & b\le a\,\,\, \text{and}\,\,\, x+y=1,\\
2-\frac{2(b-a-1)}{d-1}, & b>a\,\,\, \text{and}\,\,\, x+y=2,\\
\frac{2(a-b)}{d-1}, & b\le a\,\,\, \text{and}\,\,\, x+y=2,
\end{array} \right. \end{equation} such that
\begin{equation}\label{Eq:CGLMP}
\sum_{a,b=0}^{d-1}\sum_{x,y=0,1} B^\text{\tiny CGLMP}_{ab|xy}P(a,b|x,y) \stackrel{\mbox{\tiny LHV}}{\le} 6. \end{equation} A (tight) lower bound on the largest Bell-inequality violation of this inequality can be inferred from the result presented in~\cite{CGLMP} as \begin{eqnarray}\label{Eq:CHSH:LV} &&LV(\ket{\Psi_d^+},{\bf B}^\text{\tiny CGLMP}) \nonumber\\ &&=\frac{2}{3}\left\{ 1+d\sum_{k=0}^{\lfloor \frac{d}{2}\rfloor-1}\left(1-\frac{2k}{d-1}\right)\left[q_k-q_{-(k+1)}\right]\right\},\quad \end{eqnarray} where $q_k=\tfrac{1}{2d^3\sin^3\left[\pi(k+\frac{1}{4})/d\right]}$. Putting Eq.~\eqref{Eq:CHSH:LV} into Eq.~\eqref{Eq:Bell:SufficientCondition}, we thus see that \begin{equation}\label{Eq:SufficientCGLMP}
\mathcal{F}(\rho)>\frac{3}{2+2d\sum_{k=0}^{\lfloor \frac{d}{2}\rfloor-1}\left(1-\frac{2k}{d-1}\right)\left[q_k-q_{-(k+1)}\right]} \end{equation} is a sufficient condition for Bell nonlocality. For $d=2$, this can be evaluated explicitly to give \begin{equation}
\mathcal{F}(\rho)>\frac{3}{2+\sqrt{2}}\approx0.8787, \end{equation} whereas, in the asymptotic limit of $d\to\infty$, the sufficient condition becomes $\mathcal{F}(\rho)>0.8611$.
\subsubsection{A sufficient condition based on the Khot-Vishnoi nonlocal game}
Although the sufficient condition of Eq.~\eqref{Eq:SufficientCGLMP} can be applied to an arbitrary Hilbert space dimension $d$, there is no reason to expect that it is optimal for all $d\ge 2$. Indeed, when $d$ is a power of 2 and when $d\ge 2^{10}$, a considerably stronger sufficient condition for Bell nonlocality can be established based on the known lower bound of $LV(\ket{\Psi_d^+},{\bf B}^\text{\tiny {\rm KV}})$ given in Eq.~\eqref{Eq:KVestimate}. Explicitly, applying Eq.~\eqref{Eq:KVestimate} to Eq.~\eqref{Eq:Bell:SufficientCondition} gives the following sufficient condition of Bell nonlocality: \begin{eqnarray}\label{Eq:SufficientKV}
\mathcal{F} (\rho) > \frac{e^4}{4}\frac{(\ln{d})^2}{d},\quad d=2^m,\quad m\in\mathbb{N}, \end{eqnarray} where $\mathbb{N}$ is the set of positive integers. This sufficient condition is non trivial (i.e., the lower bound given above is {\em less} than 1) only when $d \ge 2^{10}$. At this critical value of $d$, the sufficient condition of Eq.~\eqref{Eq:SufficientKV} becomes $\mathcal{F}(\rho)\gtrsim 0.6404$, which is considerably better than that given by Eq.~\eqref{Eq:SufficientCGLMP}. Notice also that, for $m\ge 3$, the right-hand side of Eq.~\eqref{Eq:SufficientKV} decreases monotonically with increasing $m$. Thus, when measured in terms of the fully entangled fraction, Eq.~\eqref{Eq:FEF}, we see that the fraction of the set of Bell-nonlocal states that can be detected by Eq.~\eqref{Eq:SufficientKV} whenever $d$ is a power of 2 increases monotonically with $d$.
\subsection{Upper bounds on the largest Bell-inequality violation}
In this section, we demonstrate how the largest steering-inequality violation $LV_s$ is related to the largest Bell-inequality violation $LV$ for the case when ${\bf F}$ is induced from the given ${\bf B}$. \begin{theorem}\label{theorem:LV_Upper_bounded_by_LVs}
Given ${\bf B}\coloneqq\{B_{ab|xy}\ge 0\}_{a,b,x,y}$ and a state $\rho$ acting on $\mathbb{C}^d\otimes\mathbb{C}^d$, we have \begin{eqnarray}
LV(\rho,{\bf B})\le \sup_{\mathbb{E}_{\rm B}}LV_s\left[\rho,{\bf F}_{({\bf B}; \mathbb{E}_{\rm B})}\right], \end{eqnarray}
where the supremum is taken over all possible sets of Bob's POVMs $\mathbb{E}_{\rm B}:=\{ E_{b|y}\}_{b,y}$, and ${\bf F}_{({\bf B};\mathbb{E}_{\rm B})}$ is given by Eq.~\eqref{Eq:InducedF}. \end{theorem} \begin{proof}
Let $\mathbb{E}_{\rm A}:=\{ E_{a|x}\}_{a,x}$ be a generic set of Alice's POVMs and $\mathbb{E}$ be the union of $\mathbb{E}_{\rm A}$ and $\mathbb{E}_{\rm B}$. Suppose that the local POVMs $\mathbb{E}_{\rm A}$ and $\mathbb{E}_{\rm B}$ acting on the joint state $\rho$ of Alice and Bob gives rise to the correlation ${\bf P}$; then Eq.~\eqref{Eq:BellvsSteering} implies that \begin{equation}
\Gamma \left(\left\{\rho,\mathbb{E}\right\},{\bf B}\right)\le \Gamma_s \sqp{\left\{\rho,\mathbb{E}_{\rm A}\right\},{\bf F}_{({\bf B};\mathbb{E}_{\rm B})}}, \end{equation} where we write $\Gamma \left({\bf P},{\bf B}\right):=\Gamma \left(\left\{\rho,\mathbb{E}\right\},{\bf B}\right)$ to make the dependence of ${\bf P}$ on $\rho$ and $\mathbb{E}$ explicit. Thus, \begin{eqnarray} &&LV(\rho,B)=\sup_{\mathbb{E}}\Gamma \left(\left\{\rho,\mathbb{E}\right\},{\bf B}\right)\nonumber\\ &&\le \sup_{\mathbb{E}_{\rm A}, \mathbb{E}_{\rm B}} \Gamma_s \sqp{\left\{\rho,\mathbb{E}_{\rm A}\right\},{\bf F}_{({\bf B};\mathbb{E}_{\rm B})}}\nonumber\\ &&\le \sup_{\mathbb{E}_{\rm B}} LV_s\left[\rho,{\bf F}_{({\bf B};\mathbb{E}_{\rm B})}\right], \end{eqnarray} which completes the proof. \end{proof} Theorem \ref{theorem:LV_Upper_bounded_by_LVs} implies that $LV(\rho,{\bf B})$ is upper bounded by the highest value of the largest steering-inequality violation of steering inequalities that can be induced by ${\bf B}$. In this sense, we can interpret $\sup_{\mathbb{E}_{\rm B}} LV_s[\rho,{\bf F}_{({\bf B};\mathbb{E}_{\rm B})}]$ as the largest steering-inequality violation arising from a given ${\bf B}$. In particular, the largest Bell-inequality violation achievable by a maximally entangled state for {\em any} non-negative ${\bf B}$ under projective measurements must also be {\em upper bounded} by Eq.~\eqref{Eq:UpperBound}: \begin{eqnarray}\label{Eq:UpperBound_LV}
LV^\pi(\ket{\Psi_d^+},{\bf B})\le \frac{d^2}{H_d+H_d d-d}. \end{eqnarray} Note that Eq.~\eqref{Eq:UpperBound_LV} {\em implies}---with the fact given in footnote \ref{fn:Upperbound}---Palazuelos' upper bound~\cite{Palazuelos-funct} of the largest Bell-inequality violation of maximally entangled states under projective measurements (for non-negative ${\bf B}$). Also, Eq.~\eqref{Eq:UpperBound_generalPOVM} implies the following upper bound: \begin{eqnarray} LV(\ket{\Psi_d^+},F)\le\frac{d^2}{(d^2-1)\tilde{p}^\phi+1}, \end{eqnarray} which scales as $d$ when $d\gg1$. It is worth noting that, in the case of general POVMs, Palazuelos' upper bound (Theorem 0.3 in \cite{Palazuelos-funct}) is better than ours by a scaling factor $\frac{1}{\sqrt{d}}$, but we have used a much simpler approach in our derivation (than the operator space theory approach of~\cite{Palazuelos-funct}).
A nice feature of the upper bounds on $LV^{\pi}(\ket{\Psi_d^+},{\bf B})$ and $LV(\ket{\Psi_d^+},{\bf B})$ presented above is that they apply to all dimensions $d$ and all non-negative ${\bf B}$. The drawback, however, is that they are generally not tight. For instance, for the two-qubit maximally entangled state, the inequality above gives $LV^{\pi}(\ket{\Psi_{d=2}^+},{\bf B})\le 1.6$, but if we make explicit use of the nonlocal properties of $\rho_{\rm{iso}}(p)$, then this bound can be tightened. Firstly, let us recall from~\cite{Acin} that the threshold value of $p$ above which $\rho_{\rm{iso}}(p)$ violates some Bell inequality by projective measurements is given by $p_c=\tfrac{1}{K_G(3)}$, where $K_G(3)$ is Grothendieck's constant of order 3~\cite{Finch}. Although the exact value of the constant is not known, it is known to satisfy the following bounds: \begin{equation}\label{Eq:KG}
1.4172 \le K_G(3)\le 1.5163, \end{equation} where the lower bound is due to V\'ertesi~\cite{Vertesi} and the upper bound is due to Krivine~\cite{Krivine}.
Note that for $d=2$, $p=p_c$ corresponds to a FEF of $\mathcal{F}_c=\tfrac{1}{4}(3p_c+1)$. Then, in order for Eq.~\eqref{Eq:Bell:SufficientCondition} to be consistent with this observation, we must have \begin{eqnarray}\label{Eq:GrothendickBound}
LV^{\pi}(\ket{\Psi_2^+},{\bf B})\le\frac{1}{\mathcal{F}_c}=\frac{4K_G(3)}{3+K_G(3)}\le 1.2552, \end{eqnarray} where both bounds in Eq.~\eqref{Eq:KG} have been used to arrive at the last inequality.
\section{Proofs related to the properties of the optimal steering fraction} \label{App:So}
\subsection{Proof that $\mathcal{S}_{\rm O}$ is a convex steering monotone}\label{App:SO_Proof}
The proof proceeds in two parts. We first show that $\mathcal{S}_{\rm O}$ is a convex function that vanishes for unsteerable assemblages. We then show that it is a steering monotone~\cite{Gallego}, that is, non increasing, on average, under one-way local operations and classical communications (1W-LOCCs). The first part of the proof follows from the following lemma. \begin{lemma}\label{Lemma:So_Convex}
$\mathcal{S}_{\rm O}({\bm \sigma})$ is convex in ${\bm \sigma}$ and satisfies $\mathcal{S}_{\rm O}({\bm \sigma})=0$ $\forall\,\,{\bm \sigma}\in\text{LHS}$. \end{lemma} \begin{proof}
From the definition of $\Gamma_s({\bm \sigma},{\bf F})$ and $\mathcal{S}_{\rm O}({\bm \sigma})$, it follows immediately that $\mathcal{S}_{\rm O}({\bm \sigma})=0$ if ${\bm \sigma} \in \text{LHS}$. Note that for any {\em convex} decomposition of the assemblage $\sigma_{a|x}=\mu\sigma_{a|x}'+(1-\mu)\sigma_{a|x}''$ with $\mu\in[0,1]$ and ${\bm \sigma}'$, ${\bm \sigma}''$ being two legitimate assemblages, we have \begin{equation*}
\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma},{\bf F})\le\mu\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma}',{\bf F})+(1-\mu)\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma}'',{\bf F}). \end{equation*} When $\mathcal{S}_{\rm O}({\bm \sigma})=0$, the convexity of $\mathcal{S}_{\rm O}({\bm \sigma})$ holds trivially. In the non trivial case when $\mathcal{S}_{\rm O}({\bm \sigma})=\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma},{\bf F})-1 > 0$, it follows from the above inequality for the steering fractions that $\mathcal{S}_{\rm O}({\bm \sigma})\le\mu \mathcal{S}_{\rm O}({\bm \sigma}')+(1-\mu)\mathcal{S}_{\rm O}({\bm \sigma}'')$, which completes the proof of the convexity of $\mathcal{S}_{\rm O}$. \end{proof}
Next, we shall demonstrate the monotonicity of $\mathcal{S}_{\rm O}$ by showing the following theorem. \begin{theorem}\label{Thm:Monotonic} $\mathcal{S}_{\rm O}$ does not increase, on average, under deterministic 1W-LOCCs, i.e.~\cite{Gallego}, \begin{equation}
\sum_\omega P(\omega)\, \mathcal{S}_{\rm O}\left(\frac{\mathcal{M}_\omega\left({\bm \sigma}\right)}{P(\omega)}\right) \le \mathcal{S}_{\rm O}\left({\bm \sigma}\right) \end{equation} for all assemblages ${\bm \sigma}=\{\sigma_{a|x}\}_{a,x}$, where $P(\omega)=\text{tr}\sqp{\mathcal{M}_\omega({\bm \sigma})}$ and $\mathcal{M}_\omega$ is the subchannel of the completely positive map $\mathcal{M}$ labeled by $\omega$, i.e., \begin{equation}
\mathcal{M}_\omega(\cdot)\coloneqq K_\omega\,\widetilde{\mathcal{W}}_\omega(\cdot)\, K_\omega^\dagger, \end{equation}
and $\widetilde{\mathcal{W}}_\omega$ is a {\em deterministic wiring map} that transforms a given assemblage ${\bm \sigma}=\{\sigma_{a|x}\}_{a,x}$ to another assemblage $\left\{\tilde{\sigma}_{a'|x'}\right\}_{a',x'}$ with setting $x'$ and outcome $a'$: \begin{equation*}
[ \widetilde{\mathcal{W}}_\omega({\bm \sigma})]_{x'}\coloneqq \tilde{\sigma}_{a'|x'}=\sum_{a,x}P(x|x',\omega)P(a'|x',a,x,\omega)\sigma_{a|x}. \end{equation*} \end{theorem}
To appreciate the motivation of formulating 1W-LOCCs in the above manner and the definition of the trace of the assemblage $\mathcal{M}_\omega({\bm \sigma})$, we refer the readers to~\cite{Gallego}. Moreover, to ease notation, henceforth, we denote $P(x|x',\omega)P(a'|x',a,x,\omega)$ by $Q(a',x',a,x,\omega)$ and define \begin{eqnarray}\label{Eq:Domega}
\mathcal{D}_\omega ({\bm \sigma})\coloneqq\frac{K_\omega \widetilde{\mathcal{W}}_\omega ({\bm \sigma}) K_\omega^\dagger}{\text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}}. \end{eqnarray} To prove Theorem~\ref{Thm:Monotonic}, we shall make use of the following lemma. \begin{lemma}\label{Lemma:Nonincreasing_Lemma} For all $\omega$ and assemblages ${\bm \sigma}$, \begin{eqnarray}
\sup_{{\bf F}\succeq 0} \Gamma_s\sqp{\mathcal{D}_\omega ({\bm \sigma}),{\bf F}}\le\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma},{\bf F}). \end{eqnarray} \end{lemma} \begin{proof} From the definitions given in Eqs.~\eqref{Eq:SteeringFraction} and~\eqref{Eq:Domega}, we get \begin{eqnarray}
&&\sup_{{\bf F}\succeq 0} \Gamma_s\sqp{\mathcal{D}_\omega ({\bm \sigma}),{\bf F}}\nonumber\\
&&\coloneqq\sup_{{\bf F}\succeq 0} \sum_{a',x'}\frac{\text{tr}[F_{a'|x'}K_\omega \sum_{a,x}Q(a',x',a,x,\omega)\sigma_{a|x}
K_\omega^\dagger]}{ \omega_s ({\bf F}) \text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}}\nonumber\\
&&=\sup_{{\bf F}\succeq 0}\frac{1}{ \omega_s ({\bf F})}\sum_{a,x}\text{tr}(\check{F}_{a|x}\sigma_{a|x}), \end{eqnarray}
where $\check{{\bf F}}\coloneqq\{\check{F}_{a|x}\succeq 0\}_{a,x}$ is defined by \begin{eqnarray}
\check{F}_{a|x}\coloneqq\sum_{a',x'}\frac{Q(a',x',a,x,\omega)K_\omega^\dagger F_{a'|x'} K_\omega}{\text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}}. \end{eqnarray} Now we have \begin{eqnarray}
\omega_s (\check{{\bf F}}) :&&=\sup_{{\bm \sigma}\in\text{LHS}} \sum_{a,x}\text{tr}(\check{F}_{a|x}\sigma_{a|x})\nonumber\\
&&=\sup_{{\bm \sigma}\in\text{LHS}} \sum_{a',x'}\text{tr}\left(F_{a'|x'}[\mathcal{D}_\omega ({\bm \sigma})]_{a'|x'}\right)\nonumber\\
&&\le\sup_{{\bm \sigma}\in\text{LHS}} \sum_{a',x'}\text{tr}(F_{a'|x'}\sigma_{a'|x'})= \omega_s ({\bf F}), \end{eqnarray}
where the last inequality follows from the fact that ${\bm \sigma}\in\text{LHS}$ implies $\mathcal{D}_\omega ({\bm \sigma})\in\text{LHS}$~\cite{Gallego}, and thus $\left\{ \mathcal{D}_\omega({\bm \sigma})\ |\ {\bm \sigma}\in\text{LHS}\right\}$ is a subset of LHS. Combining the above results, we have $\sup_{{\bf F}\succeq 0} \Gamma_s\sqp{\mathcal{D}_\omega ({\bm \sigma}),{\bf F}}\le\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma},\check{{\bf F}})$. Since we have $\{\check{{\bf F}}\ |\ {\bf F}\succeq 0\}\subseteq\{ {\bf F}\succeq 0\}$, this means $\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma},\check{{\bf F}})\le\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma},{\bf F})$, and hence the lemma. \end{proof}
To complete the proof of Theorem~\ref{Thm:Monotonic}, it suffices to note, first, that when $\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}=0$, the inequality $\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}\le\mathcal{S}_{\rm O}({\bm \sigma})$ holds trivially, whereas when $\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}=\sup_{{\bf F}\succeq 0} \Gamma_s\sqp{\mathcal{D}_\omega ({\bm \sigma}),{\bf F}}-1 > 0$, Lemma~\ref{Lemma:Nonincreasing_Lemma} implies \begin{eqnarray}
\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}\le\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma},{\bf F})-1\le\mathcal{S}_{\rm O}({\bm \sigma}). \end{eqnarray} This means that $\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}\le\mathcal{S}_{\rm O}({\bm \sigma})$ in general. Since $\text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}\ge 0$ for all $\omega$ and $\sum_\omega \text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}\le1$ for (deterministic) 1W-LOCCs, we must have \begin{eqnarray}\label{LemmaF3_estimate}
\sum_\omega \text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}\mathcal{S}_{\rm O}\sqp{\mathcal{D}_\omega ({\bm \sigma})}\le\mathcal{S}_{\rm O}({\bm \sigma}), \end{eqnarray} which completes the proof of Theorem~\ref{Thm:Monotonic} by noting that $P(\omega)=\text{tr}\sqp{\mathcal{M}_\omega ({\bm \sigma})}$ holds by definition.
\subsection{Proof of quantitative relations between optimal steering fraction $\mathcal{S}_{\rm O}$ and steerable weight $\mathcal{S}_{\rm W}$} \label{App:SW}
Here, we give a proof of Proposition~\ref{Prop:So-SW}. \begin{proof}
First, note that the chain of inequalities holds trivially if ${\bm \sigma}$ is unsteerable, since $\mathcal{S}_{\rm W}({\bm \sigma})=0$ in this case. To prove that $\mathcal{S}_{\rm O}({\bm \sigma})\le\mathcal{S}_{\rm W}({\bm \sigma})\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})$ holds in general, we thus assume that $\mathcal{S}_{\rm O}({\bm \sigma})=\sup_{{\bf F}\succeq0} \Gamma_s({\bm \sigma},{\bf F})-1 > 0$ and recall from the condition of Proposition~\ref{Prop:So-SW} that $\sigma_{a|x}=[1-\mathcal{S}_{\rm W}({\bm \sigma})]\sigma_{a|x}^\text{\rm US}+\mathcal{S}_{\rm W}({\bm \sigma})\sigma_{a|x}^\text{\rm S}$; then \begin{align}
\mathcal{S}_{\rm O}({\bm \sigma})=&\sup_{{\bf F}\succeq 0} \frac{1}{ \omega_s ({\bf F})}\sum_{a,x}\text{tr}(F_{a|x}\sigma_{a|x})-1\nonumber\\
\le&\sup_{{\bf F}\succeq 0}\frac{1-\mathcal{S}_{\rm W}({\bm \sigma})}{ \omega_s ({\bf F})}\sum_{a,x}\text{tr}(F_{a|x}\sigma_{a|x}^\text{US})\nonumber\\
&+\sup_{{\bf F}\succeq 0} \frac{\mathcal{S}_{\rm W}({\bm \sigma})}{ \omega_s ({\bf F})}\sum_{a,x}\text{tr}(F_{a|x}\sigma_{a|x}^\text{S})-1\nonumber\\
=&\,\left[1-\mathcal{S}_{\rm W}({\bm \sigma})\right]\left[\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma}^\text{US},{\bf F})-1\right]\nonumber\\
&+\mathcal{S}_{\rm W}({\bm \sigma})\left[\sup_{{\bf F}\succeq 0} \Gamma_s({\bm \sigma}^\text{S},{\bf F})-1\right]\nonumber\\
\le &\, \mathcal{S}_{\rm W}({\bm \sigma})\mathcal{S}_{\rm O}({\bm \sigma}^\text{S}), \end{align} where the last inequality follows from the fact that ${\bm \sigma}^\text{US}\in\text{LHS}$. This proves the first inequality.
To prove the second inequality, $\mathcal{S}_{\rm W}({\bm \sigma})\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})\le\mathcal{S}_{\rm O}({\bm \sigma})+2\left[1-\mathcal{S}_{\rm W}({\bm \sigma})\right]$, we note that the triangle inequality implies \begin{equation*} \begin{split} \mathcal{S}_{\rm W}({\bm \sigma})\Gamma_s({\bm \sigma}^\text{S},{\bf F})\le\Gamma_s({\bm \sigma},{\bf F})+\left[1-\mathcal{S}_{\rm W}({\bm \sigma})\right]\Gamma_s({\bm \sigma}^\text{US},{\bf F}) \end{split} \end{equation*}
Maximizing both sides over all possible ${\bf F}\succeq 0$ and using the definition of optimal steering fraction gives \begin{align*}
\mathcal{S}_{\rm W}({\bm \sigma})\mathcal{S}_{\rm O}({\bm \sigma}^\text{S})&+\mathcal{S}_{\rm W}({\bm \sigma})\\
\le \mathcal{S}_{\rm O}({\bm \sigma})&+\left[1-\mathcal{S}_{\rm W}({\bm \sigma})\right]\mathcal{S}_{\rm O}({\bm \sigma}^\text{US})+2-\mathcal{S}_{\rm W}({\bm \sigma}) \end{align*} Since $\mathcal{S}_{\rm O}({\bm \sigma}^\text{US})=0$ by definition, simplifying the above inequality therefore leads to the desired inequality and completes the proof. \end{proof}
\subsection{Proof of quantitative relations between optimal steering fraction $\mathcal{S}_{\rm O}$ and steerable robustness $\mathcal{S}_{\rm R}$} \label{App:SR}
Here, we give a proof of Proposition~\ref{Prop:So-SR}, which proceeds analogously with the proof of Proposition~\ref{Prop:So-SW}. \begin{proof} Again, we focus on the nontrivial scenario where $\mathcal{S}_{\rm O}({\bm \sigma})=\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma},{\bf F})-1 > 0$. To prove $\mathcal{S}_{\rm O}({\bm \sigma})\le \mathcal{S}_{\rm R}({\bm \sigma})\left[\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})+2\right]$, we note from the condition of the theorem ${\bm \sigma}^{\rm US}=\frac{1}{1+\mathcal{S}_{\rm R}({\bm \sigma})}{\bm \sigma}+\frac{\mathcal{S}_{\rm R}({\bm \sigma})}{1+\mathcal{S}_{\rm R}({\bm \sigma})}\tilde{{\bm \sigma}}$, the definitions of $\mathcal{S}_{\rm O}$, $\Gamma_s$, and the triangle inequality that \begin{align*} \mathcal{S}_{\rm O}({\bm \sigma})\le &\, \left[1+ \mathcal{S}_{\rm R}({\bm \sigma})\right]\sup_{{\bf F}\succeq 0}\Gamma_s({\bm \sigma}^\text{US},{\bf F})\\ &+\mathcal{S}_{\rm R}({\bm \sigma})\sup_{{\bf F}\succeq 0}\Gamma_s(\tilde{{\bm \sigma}},{\bf F})-1\\ \le &\, \left[1+\mathcal{S}_{\rm R}({\bm \sigma})\right]\mathcal{S}_{\rm O}({\bm \sigma}^\text{US})+\mathcal{S}_{\rm R}({\bm \sigma})\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})+2\mathcal{S}_{\rm R}({\bm \sigma})\\ =&\,\mathcal{S}_{\rm R}({\bm \sigma})\left[\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})+2\right], \end{align*} where the last equality follows from ${\bm \sigma}^\text{US}\in\text{LHS}$.
To show the other inequality, we note that \begin{align*} \Gamma_s({\bm \sigma},{\bf F})+\left[1+\mathcal{S}_{\rm R}({\bm \sigma})\right]\Gamma_s({\bm \sigma}^\text{US},{\bf F})=\mathcal{S}_{\rm R}({\bm \sigma})\Gamma_s(\tilde{{\bm \sigma}},{\bf F}) \end{align*} Rearranging the term, taking the supremum over ${\bf F}\succeq 0$ on both sides, and noting that $\mathcal{S}_{\rm O}({\bm \sigma}^\text{US})=0$, we thus obtain the desired inequality $\mathcal{S}_{\rm R}({\bm \sigma})\mathcal{S}_{\rm O}(\tilde{{\bm \sigma}})-2\le\mathcal{S}_{\rm O}({\bm \sigma}).$ \end{proof}
\end{document}
|
arXiv
|
{
"id": "1609.07581.tex",
"language_detection_score": 0.6803752779960632,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[On Wigner's theorem]{On Wigner's theorem in smooth normed spaces}
\author{Dijana Ili\v{s}evi\'{c}}
\address{Department of Mathematics, University of Zagreb, Bijeni\v{c}ka 30, P.O. Box 335, 10002 Zagreb, Croatia}
\email{[email protected]}
\author{Aleksej Turn\v{s}ek}
\address{Faculty of Maritime Studies and Transport, University of Ljubljana, Pot pomor\-\v{s}\v{c}akov 4, 6320 Portoro\v{z}, Slovenia and Institute of Mathematics, Physics and Mechanics, Jadranska 19, 1000 Ljubljana, Slovenia}
\email{[email protected]} \thanks{This research was supported in part by the Ministry of Science and Education of Slovenia.}
\subjclass[2010]{39B05, 46C50, 47J05}
\keywords{Wigner's theorem, isometry, normed space}
\begin{abstract}
In this note we generalize the well-known Wigner's unitary-anti\-unitary theorem. For $X$ and $Y$ smooth normed spaces and $f:X\to Y$ a surjective mapping such that $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$, where $[\cdot,\cdot]$ is the unique semi-inner product, we show that $f$ is phase equivalent to either a linear or an anti-linear surjective isometry. When $X$ and $Y$ are smooth real normed spaces and $Y$ strictly convex, we show that Wigner's theorem is equivalent to $\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\}$, $x,y\in X$. \end{abstract}
\maketitle
\newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}{Definition} \theoremstyle{definition} \newtheorem{example}[theorem]{Example} \newtheorem{xca}[theorem]{Exercise} \newtheorem{question}{Question}
\theoremstyle{remark} \newtheorem{remark}{Remark}[section]
\section{Introduction}
Let $(H,(\cdot,\cdot))$ and $(K,(\cdot,\cdot))$ be inner product spaces over $\mathbb F\in\{\mathbb R,\mathbb C\}$ and suppose that $f:H\to K$ is a mapping satisfying
\begin{equation}| (f(x),f(y))|=|(x,y)|,\quad x,y\in H.\end{equation} Then the famous Wigner's theorem says that $f$ is a solution of (1) if and only if it is phase equivalent to a linear or an anti-linear isometry, say $U$, that is, $$f(x)=\sigma(x) Ux,\quad x\in H,$$
where $\sigma: H\to\mathbb F$, $|\sigma(x)|=1$, $x\in H$, is a so called phase function. This celebrated result plays a very important role in quantum mechanics and in representation theory in physics. There are several proofs of this result, see \cite{Bargmann, Freed, Geher, Gyory, Lomont, Ratz, Sharma1, Sharma3} to list just some of them. For generalizations to Hilbert $C^*$-modules see \cite{Bakic, Molnar}.
On each normed space $X$ over $\mathbb{F}$ there exists at least one semi-inner product (s.i.p.), see \cite{Giles, Lumer}, on $X$ which is a function $[\, \cdot, \cdot \,] \colon X\times X\to\mathbb{F}$ with the following properties: \begin{enumerate} \item $[x+y,z]=[x,z]+[y,z]$, $[\lambda x,y]=\lambda[x,y]$, $[x,\lambda y]=\overline{\lambda}[x,y]$ for all $\lambda\in\mathbb{F}$ and $x,y \in X$, \item $[x,x]>0$ when $x\ne0$,
\item $|[x,y]|\leq[x,x]^{1/2}[y,y]^{1/2}$ for all $x,y \in X,$ \end{enumerate}
and moreover, it is compatible with the norm in the sense that $[x,x]^{1/2}=\|x\|$.
Recall that $X$ is said to have a Gateaux differentiable norm at $x\ne0$ whenever
$$\lim_{t\to0,t\in\mathbb{R}}\frac{\|x+ty\|-\|x\|}{t}$$ exists for all $y\in X$.
Remember also that a support functional $\phi_x$ at $x\in X$ is a norm-one linear functional in $X^*$ such that $\phi_x(x) = \|x\|$. By the Hahn--Banach theorem there always exists at least one such functional for every $x\in X$.
A normed space $X$ is said to be smooth at $x$ if there exists a unique support functional at $x$. If $X$ is smooth at each one of its points then $X$ is said to be smooth. It is well known, see for instance \cite[Theorem 1, p.~22]{Diestel}, that a Banach space $X$ is smooth at $x$ if and only if the norm is Gateaux differentiable at $x$. Moreover, in this case, the real part $\text{Re}\,\phi_x$ of a unique support functional $\phi_x$ at $x$ is given by \begin{equation}\label{smooth}
\text{Re}\,\phi_x(y)=\lim_{t\to0, t\in\mathbb R}\frac{\|x+ty\|-\|x\|}{t}. \end{equation}
If $X$ is not smooth then there are many semi-inner products compatible with the norm. However, if $X$ is smooth then $[x,y]:=\|y\|\phi_y(x)$, where $\phi_y$ is the support functional at $y$, is the unique semi-inner product with $[x,x]^{1/2}=\|x\|$.
Now the following natural question arises: Let $X,Y$ be normed spaces and $f:X\to Y$ a mapping such that \begin{equation}\label{normedWigner}
|[f(x),f(y)]|=|[x,y]|,\quad x,y\in X. \end{equation} Is it true that $f$ satisfies (\ref{normedWigner}) if and only if it is phase equivalent to either a linear or an anti-linear isometry? Let us first check that in general even not all linear isometries satisfy (\ref{normedWigner}).
\begin{example} Let $T \colon (l_\infty^2,\mathbb R)\to(\l_\infty^2,\mathbb R)$ be defined by $T(x,y)=(y,x)$ and let the semi-inner product for $x=(x_1,x_2)$ and $y=(y_1,y_2)$ be defined by $$[x,y]=\begin{cases}
x_1y_1&\text{if}\quad |y_1|>|y_2|\\
x_2y_2&\text{if}\quad|y_1|<|y_2|\\
\frac{3}{4}x_1y_1+\frac{1}{4}x_2y_2&\text{if}\quad |y_1|=|y_2|. \end{cases} $$ Then for $x=(1,0)$ and $y=(1,1)$ we get $[x,y]=\frac{3}{4}$ and $[Tx,Ty]=\frac{1}{4}$. \end{example}
However, if $X$ and $Y$ are smooth normed spaces, then a mapping phase equivalent to a linear or an anti-linear isometry satisfies (\ref{normedWigner}). Indeed, if $U$ is a linear or an anti-linear isometry, then $\|Uy+tUx\|=\|y+tx\|$, $t\in\mathbb R$, hence by (\ref{smooth}) $$\text{Re}\,\phi_{Uy}(Ux)=\text{Re}\,\phi_y(x)$$ and then also $[Ux,Uy]=[x,y]$. From $$[f(x),f(y)]=[\sigma(x)Ux,\sigma(y)Uy]=\sigma(x)\overline{\sigma(y)}[Ux,Uy]$$ the claim follows. In our main result Theorem \ref{main} we show that the converse also holds.
\section{Results} Throughout, for a normed space $(X, \Vert \cdot \Vert)$, by $[\, \cdot, \cdot \,]$ we denote a semi-inner product satisfying $\Vert x \Vert = [x,x]^{1/2}$. We denote by $\mathbb PX=\{\langle x\rangle : x \in X\}$ the set of all one-dimensional subspaces of a normed space $X$. If $M\subset X$ then $\langle M\rangle$ will denote the subspace generated by the set $M$. If $L\subseteq X$ is a two-dimensional subspace then $L=\langle L\rangle$ is called a projective line. Recall also that $A:X\to Y$ is semilinear if $A(x+y)=Ax+Ay$ and $A(\lambda x)=h(\lambda)Ax$, $x,y\in X$, $\lambda \in\mathbb{F}$, where $h:\mathbb{F}\to\mathbb{F}$ is a homomorphism. Next we state the fundametal theorem of projective geometry in the form in which it will be needed, see \cite[Theorem 3.1]{Faure}.
\begin{theorem}[Fundamental theorem of projective geometry]\label{projective} Let $X$ and $Y$ be vector spaces over $\mathbb{F}$ of dimensions at least three. Let $g: \mathbb PX\to\mathbb PY$ be a mapping such that \begin{itemize} \item[(i)] The image of $g$ is not contained in a projective line. \item[(ii)] $0\ne c\in \langle a,b\rangle, a\ne 0\ne b,$ implies $g(\langle c\rangle)\in \langle g(\langle a\rangle), g(\langle b\rangle)\rangle$. \end{itemize} Then there exists an injective semilinear mapping $A:X\to Y$ such that $$g(\langle x\rangle)=\langle Ax\rangle,\quad 0\ne x\in X.$$ Moreover, $A$ is unique up to a non-zero scalar factor. \end{theorem}
In the proof of the next theorem we will also need the notion of orthogonality in normed spaces. Remember that $x\in X$ is Birkhoff-James orthogonal to $y\in X$,
$$x\perp y\quad \text{if}\quad \|x+\lambda y\|\geq\|x\|\quad \text{for all }\lambda\in\mathbb{F}.$$
When $x\in X$ is a point of smoothness, then $x\perp y$ if and only if $y$ belongs to the kernel of the unique support functional at~$x$, see \cite[Proposition 1.4.4.]{Fleming-Jamison}. Important consequence is that Birkhoff-James orthogonality is right additive in smooth spaces, that is, $x\perp y, x\perp z\Rightarrow x\perp y+z$. Also note that in this case $x\perp y$ if and only if $[y,x]=0$.
\begin{theorem}\label{main} Let $X$ and $Y$ be smooth normed spaces over $\mathbb{F}$ and suppose that $f \colon X\to Y$ is a surjective mapping satisfying
$$|[f(x),f(y)]|=|[x,y]|,\quad x,y\in X.$$ \begin{itemize} \item[(i)] If $\dim X\geq2$ and $\mathbb{F}=\mathbb{R}$, then $f$ is phase equivalent to a linear surjective isometry. \item[(ii)] If $\dim X\geq2$ and $\mathbb{F}=\mathbb{C}$, then $f$ is phase equivalent to a linear or conjugate linear surjective isometry. \end{itemize} \end{theorem} \begin{proof}
Let $\lambda\in\mathbb{F}$ and $x\in X$. We will show that $f(\lambda x)=\gamma f(x)$, where $\gamma=\gamma(\lambda,x)$ depends on $\lambda$ and on $x$, and $|\gamma|=|\lambda|$. The function \begin{equation}\label{min}
\xi\mapsto\|f(\lambda x)-\xi f(x)\| \end{equation}
is continuous and tends to infinity when $|\xi|$ tends to infinity. Hence there is at least one point, say $\gamma$, such that the function in (\ref{min}) achieves its global minimum. Thus
$$\min_{\xi\in\mathbb{F}}\|f(\lambda x)-\xi f(x)\|=\|f(\lambda x)-\gamma f(x)\|.$$ Note that
$$\|f(\lambda x)-\gamma f(x)+\mu f(x)\|\geq\|f(\lambda x)-\gamma f(x)\|$$ for all $\mu\in\mathbb{F}$, hence $f(\lambda x)-\gamma f(x)\perp f(x)$. Since $f$ is surjective, there is $z\in X$ such that $f(z)=f(\lambda x)-\gamma f(x)$. Then from $f(z)\perp f(x)$ we get $z\perp x$, and then $z\perp \lambda x$ and $f(z)\perp f(\lambda x)$. Since $Y$ is smooth, Birkhoff-James orthogonality is right additive, so from $f(z)\perp f(\lambda x)$ and $f(z)\perp f(x)$ we conclude $f(z)\perp f(\lambda x)-\gamma f(x)=f(z)$. Thus $f(z)=0$ and we have $f(\lambda x)=\gamma f(x)$. Furthermore,
$$\vert \lambda \vert \Vert x \Vert = \|\lambda x\|=\|f(\lambda x)\|=\|\gamma f(x)\|=\vert \gamma \vert \Vert f(x) \Vert = \vert \gamma \vert \Vert x \Vert,$$
which implies $|\gamma|=|\lambda|$.
Next, let $x,y\in X$ be linearly independent. We will show that
$f(x+y)=\alpha f(x)+\beta f(y)$, where $\alpha=\alpha(x,y)$, $\beta=\beta(x,y)$, and $|\alpha|=|\beta|=1$. Analogously as before we obtain $\alpha, \beta\in\mathbb F$ such that
$$\min_{\xi,\eta\in\mathbb F}\|f(x+y)-\xi f(x)-\eta f(y)\|=\|f(x+y)-\alpha f(x)-\beta f(y)\|.$$ Furthermore, it is easy to see that $$f(x+y)-\alpha f(x)-\beta f(y)\perp f(x)\quad\text{and}\quad f(x+y)-\alpha f(x)-\beta f(y)\perp f(y).$$
Take $z\in X$ such that $f(z)=f(x+y)-\alpha f(x)-\beta f(y)$. Then $f(z)\perp f(x)$ implies $z\perp x$, $f(z)\perp f(y)$ implies $z\perp y$ and smoothness of $X$ implies $z\perp x+y$ and then $f(z)\perp f(x+y)$. Hence $f(z)\perp f(z)$ and $f(z)=0$. Let us show that $|\alpha|=1$. Let $\min_\lambda\|x+\lambda y\|=\|x+\lambda_0y\|$. Then $x+\lambda_0y\perp y$ and $x+\lambda_0y\not\perp x$. Indeed, suppose that $x+\lambda_0y\perp x$. Then by the right additivity we get $x+\lambda_0y\perp x+\lambda_0y$. This would mean that $x+\lambda_0y=0$, a contradiction because $x$ and $y$ are linearly independent. Denote $w=x+\lambda_0y$.
Since $w \perp y$ we also have $f(w) \perp f(y)$.
Then
\begin{eqnarray}
[f(x+y), f(w)]&=&\alpha[f(x), f(w)]+\beta[f(y), f(w)]\nonumber \\
&=&\alpha[f(x), f(w)],\nonumber
\end{eqnarray}
which implies
\begin{eqnarray}
\vert \alpha \vert \vert[x, w]\vert &=&\vert \alpha \vert \vert[f(x), f(w)]\vert=\vert[f(x+y), f(w)]\vert\nonumber \\
&=&\vert[x+y, w]\vert = \vert[x, w]\vert,\nonumber
\end{eqnarray}
hence $|\alpha|=1$. Similarly we get $|\beta|=1$.
Let us prove that $f$ induces a surjective mapping $\tilde{f} \colon \mathbb{P}X\to\mathbb{P}Y$ defined by $\tilde{f}(\langle x\rangle)=\langle f(x)\rangle$. Suppose $\langle x\rangle=\langle y\rangle$, that is $y=\lambda x$. Then $f(y)=f(\lambda x)=\gamma f(x)$ for some $\gamma\in\mathbb F$ and then $\langle f(y)\rangle=\langle f(x)\rangle$. So $\tilde{f}$ is well defined and surjective because $f$ is surjective.
Now suppose that $\dim X\geq3$ and let $x\in X$ be a unit vector. Choose a unit vector $y\in\ker\phi_x$, where $\phi_x$ is the support functional at $x$, and then choose a unit vector $z\in\ker\phi_x\cap\ker\phi_y$, where $\phi_y$ is the support functional at $y$. Then from $x\perp y$, $x\perp z$ and $y\perp z$ follows that $x,y,z$ are linearly independent. Indeed, $y$ and $z$ are linearly independent because $y\perp z$. From $x\perp y$ and $x\perp z$ it follows, using homogeneity and right additivity of Birkhoff-James orthogonality, that $x\perp\langle y,z\rangle$, hence $x,y,z$ are linearly independent. Now $f(x), f(y), f(z)$ are unit vectors such that $f(x)\perp f(y)$, $f(x)\perp f(z)$ and $f(y)\perp f(z)$. As before we conclude that $f(x), f(y)$ and $f(z)$ are linearly independent. So the image of $f$ is not contained in a two-dimensional subspace, thus the image of $\tilde{f}$ is not contained in a projective line. This shows that $\tilde{f}$ satisfies condition (i) of Theorem \ref{projective}. Furthermore, from $f(\lambda x)=\gamma f(x)$ and $f(x+y)=\alpha f(x)+\beta f(y)$ it follows that condition (ii) of Theorem \ref{projective} is also satisfied.
Thus by Theorem \ref{projective} we conclude that $\tilde{f}$ is induced by a bijective semilinear mapping $A:X\to Y$, that is, $$\tilde{f}(\langle x\rangle)=\langle Ax\rangle,\quad x\in X.$$ Fix a nonzero $x\in X$. Then $f(x)=\lambda Ax$ for some nonzero $\lambda\in\mathbb{F}$. Let $y\in X$ be such that $x$ and $y$ are linearly independent. Then $f(y)=\mu Ay$ and $f(x+y)=\nu A(x+y)$. Note also that $Ax$ and $Ay$ are linearly independent since $A$ is semilinear and bijective. Thus from $f(x+y)=\alpha f(x)+\beta f(y)=\alpha\lambda Ax+\beta\mu Ay$ we get
$\alpha\lambda=\nu$ and $\beta\mu=\nu$. Since $|\alpha|=|\beta|=1$ we get $|\lambda|=|\mu|=|\nu|$. Hence $f(z)=\lambda(z)Az$ with $\vert \lambda(z) \vert=\vert \lambda \vert$ for all $z\in X$. Let $U=\lambda A$ and $\sigma(z)=\lambda(z)/\lambda$ for every $z \in X$.
Then $\sigma \colon X \to \mathbb{F}$ is a phase function and
$$f(z)=\lambda(z)Az=\sigma(z)Uz, \quad z \in X.$$ If $\mathbb{F}=\mathbb{R}$ then $A$ (hence also $U$) is linear, because any nontrivial homomorphism $h \colon \mathbb{R}\to\mathbb{R}$ is identity. Suppose $\mathbb{F}=\mathbb{C}$. Let $\xi\in\mathbb{C}$. Then $$f(\xi z)=\lambda(\xi z)A(\xi z)=\lambda(\xi z)h(\xi)Az,$$
and on the other hand $f(\xi z)=\xi'f(z)=\xi'\lambda(z)Az$. Because $|\lambda(\xi z)|=|\lambda(z)|$ and $|\xi'|=|\xi|$ we get $|h(\xi)|=|\xi|$. Then $h$ is continuous at zero, hence continuous everywhere. A continuous homomorphism $h \colon \mathbb{C}\to\mathbb{C}$ is either identity or conjugation. Therefore $A$, and also $U$, is linear or conjugate linear. It is now clear that $U$ is an isometry. It is surjective because $f$ is surjective. This completes the proof.
Let us now suppose that $\dim{X}=2$.
Let us fix linearly independent $x_0, y_0 \in X$.
Let $A(x_0)=f(x_0)$.
For every $\mu \in \mathbb{F}$ there exist $\omega_1, \omega_2 \in \mathbb{F}$ such that
$f(x_0+\mu y_0)=\omega_1 f(x_0)+\omega_2 f(y_0)$,
with $\vert \omega_1 \vert =1$, $\vert \omega_2 \vert = \vert \mu \vert$.
Let $h(\mu)=\omega_2 / \omega_1$ and $A(\mu y_0) = h(\mu)f(y_0)$.
Note that $\vert h(\mu) \vert = \vert \mu \vert$.
Furthermore, let us define $A(x_0+\mu y_0)=A(x_0)+A(\mu y_0)$.
For $\lambda, \mu \in \mathbb{F}$,
$$f(x_0+(\lambda+\mu)y_0)=\omega_1 f(x_0) + \omega_1 h(\lambda+\mu)f(y_0),$$
and also
\begin{eqnarray}
f(x_0+(\lambda+\mu)y_0) &=& f((x_0+\lambda y_0)+\mu y_0) = \omega_2 f(x_0+\lambda y_0) + \omega_3 f(y_0) \nonumber \\
&=& \omega_4 f(x_0)+ \omega_4 h(\lambda)f(y_0)+\omega_3f(y_0). \nonumber
\end{eqnarray}
Since $f(x_0)$ and $f(y_0)$ are also linearly independent, $\omega_4=\omega_1$ and $\omega_4 h(\lambda)+\omega_3 = \omega_1 h(\lambda + \mu)$, with $\vert \omega_1 \vert = 1$ and $\vert \omega_3 \vert = \vert \mu \vert$.
Then \begin{eqnarray}\label{0A} h(\lambda + \mu)=h(\lambda) + \frac{\omega_3}{\omega_1}, \end{eqnarray}
which implies
$$\vert \lambda + \mu \vert = \vert h(\lambda+\mu) \vert = \Big{\vert} h(\lambda) + \frac{\omega_3}{\omega_1} \Big{\vert}$$
with $\vert h(\lambda) \vert = \vert \lambda \vert$ and $\vert \omega_3 / \omega_1 \vert = \vert \mu \vert$.
This yields
$$\Big{\vert} \frac{\lambda}{\mu}+1 \Big{\vert} = \Big{\vert} h(\lambda) \frac{\omega_1}{\omega_3}+1 \Big{\vert}$$
with $\vert \frac{\lambda}{\mu} \vert = \vert h(\lambda) \frac{\omega_1}{\omega_3} \vert$.
It can be easily verified that
$$\frac{\lambda}{\mu} = h(\lambda) \frac{\omega_1}{\omega_3} \quad \textup{or} \quad \frac{\overline{\lambda}}{\overline{\mu}} = h(\lambda) \frac{\omega_1}{\omega_3},$$ that is,
\begin{eqnarray}\label{1A}
\frac{\omega_3}{\omega_1}=h(\lambda)\frac{\mu}{\lambda}
\end{eqnarray}
or
\begin{eqnarray}\label{2A}
\frac{\omega_3}{\omega_1}=h(\lambda)\frac{\overline{\mu}}{\overline{\lambda}}.
\end{eqnarray} Let us fix $\eta \in \mathbb{F}$. If \eqref{1A} holds for $\lambda=1$ and $\mu=\eta-1$ then \eqref{0A} implies $$h(\eta)=h(1)\eta.$$ If \eqref{2A} holds for $\lambda=1$ and $\mu=\eta-1$ then \eqref{0A} implies $$h(\eta)=h(1)\overline{\eta}.$$ If $\mathbb{F}=\mathbb{R}$ we are done. Suppose that $\mathbb{F} = \mathbb{C}$. Note that \eqref{0A} becomes $$h(\lambda+\mu)=h(\lambda)+h(\lambda)\frac{\mu}{\lambda},$$ or $$h(\lambda+\mu)=h(\lambda)+h(\lambda)\frac{\overline{\mu}}{\overline{\lambda}}.$$ If for some $\lambda \in \mathbb{F} \setminus \mathbb{R}$ we have $h(\lambda)=h(1)\lambda$ and for some $\mu \in \mathbb{F} \setminus \mathbb{R}$ we have $h(\mu)=h(1)\overline{\mu}$ then $$h(\mu)=h(\lambda+(\mu-\lambda))=h(\lambda)+h(\lambda)\frac{\mu-\lambda}{\lambda}=h(1)\mu$$ or $$h(\mu)=h(\lambda+(\mu-\lambda))=h(\lambda)+h(\lambda)\frac{\overline{\mu}-\overline{\lambda}}{\overline{\lambda}}=h(1)\frac{\lambda}{\overline{\lambda}}\mu.$$ In both cases we arrive at a contradiction with $\lambda, \mu \notin \mathbb{R}$. Hence $h(\lambda)=h(1)\lambda$ for every $\lambda \in \mathbb{R}$ or $h(\lambda)=h(1)\overline{\lambda}$ for every $\lambda \in \mathbb{R}$. Let $k=h(1)$ and let $A(y_0)=kf(y_0)$. Then $A(\mu y_0)=\mu A(y_0)$ or $\overline{\mu}A(y_0)$, and $A(x_0+\mu y_0)=A(x_0)+\mu A(y_0)$ or $A(x_0+\mu y_0)=A(x_0)+\overline{\mu}A(y_0)$, respectively. In the first case we extend $A$ to $X$ by $A(\lambda x_0+\mu y_0)= \lambda A(x_0+\frac{\mu}{\lambda}y_0)$, and in the second case by $\overline{\lambda} A(x_0+\frac{\mu}{\lambda}y_0)$. Such $A$ is linear or conjugate linear. From \begin{eqnarray} \Vert \lambda x_0 + \mu y_0 \Vert &=& \vert \lambda \vert \, \Vert f(x_0+\frac{\mu}{\lambda} y_0) \Vert = \vert \lambda \vert \, \Vert f(x_0)+h(\frac{\mu}{\lambda})f(y_0) \Vert \nonumber \\ &=& \vert \lambda \vert \, \Vert A(x_0+\frac{\mu}{\lambda} y_0) \Vert = \Vert A(\lambda x_0+\mu y_0) \Vert, \nonumber \end{eqnarray} we conclude that $A$ is an isometry. Finally, \begin{eqnarray} f(\lambda x_0 + \mu y_0) &=&\lambda' f(x_0+\frac{\mu}{\lambda} y_0) \nonumber \\ &=& \lambda' (\omega f(x_0) + \omega h(\frac{\mu}{\lambda}) f(y_0)) = \omega\frac{\lambda'}{\lambda} A(\lambda x_0 + \mu y_0) \nonumber \end{eqnarray} for some $\omega, \lambda' \in \mathbb{F}$ such that $\vert \omega \vert = 1$, $\vert \lambda' \vert = \vert \lambda \vert$. It remains to define $\sigma(\lambda x_0 + \mu y_0) = \omega \frac{\lambda'}{\lambda}$. \end{proof}
\begin{remark}
If $X$ is one-dimensional then $X$ is obviously smooth. Suppose that $Y$ is a smooth normed space and $f:X\to Y$ a mapping such that $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$. Let $\lambda\in\mathbb F$ and fix a unit vector $x\in X$. Analogously as in Theorem \ref{main}, we obtain $f(\lambda x)=\gamma f(x)$ for some $\gamma\in\mathbb F$, which depends on $\lambda$, and $|\gamma|=|\lambda|$. Now for $z=\lambda x$ define phase function $\sigma(z)=\gamma/\lambda$ and define a linear surjective isometry $U:X\to Y$ by $Uz=\lambda f(x)$. Then $f=\sigma U$ and we conclude that $f$ is phase equivalent to a linear surjective isometry. \end{remark}
Maksa and P\'{a}les, see \cite{Maksa}, showed that for a mapping $f:H\to K$, where $H$ and $K$ are real inner product spaces, Wigner's theorem is equivalent to the requirement that $f$ satisfies the following condition: \begin{equation}\label{phaseisometry}
\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\},\quad x,y\in H. \end{equation} They asked for possible generalizations in the setting of real normed spaces, that is, if $X$ and $Y$ are real normed spaces and $f:X\to Y$ a mapping, is it true that $f$ satisfies (\ref{phaseisometry}) if and only if $f$ is phase equivalent to a linear isometry?
Recall that a normed space $X$ is said to be strictly convex whenever the unit sphere $S_X$ contains no non-trivial line segments, that is, each point of $S_X$ is an extreme point of a unit ball $B_X$.
The following proposition generalizes \cite[Theorem 2 (i) $\Leftrightarrow$ (iv) $\Leftrightarrow$ (v)]{Maksa}.
\begin{proposition} Let $X$, $Y$ be real smooth normed spaces, $Y$ strictly convex, $f:X\to Y$ surjective. The following assertions are equivalent: \begin{itemize}
\item[(i)] $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$. \item[(ii)] $f$ is phase equivalent to a linear surjective isometry.
\item[(iii)] $\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\}$, $x,y\in X$. \end{itemize} \end{proposition} \begin{proof}
(i) $\Rightarrow$ (ii) is Theorem \ref{main}, and (ii) $\Rightarrow$ (iii) is obvious. It remains to prove (iii) $\Rightarrow$ (i). Let $x=y$. Then from $\{2\|f(x)\|,0\}=\{2\|x\|,0\}$ we get $\|f(x)\|=\|x\|$, $x\in X$. Insert $2x$ and $x$ in (iii) to get
$$\{\|f(2x)+f(x)\|,\|f(2x)-f(x)\|\}=\{3\|x\|,\|x\|\},\quad x\in X.$$
Hence for $x\in X$ either $\|f(2x)+f(x)\|=3\|x\|$ or $\|f(2x)+f(x)\|=\|x\|$.
Suppose that $x\in X$ is such that $\|f(2x)+f(x)\|=3\|x\|$. Then from
$$3\|x\|=\|f(2x)+f(x)\|\leq\|f(2x)\|+\|f(x)\|=3\|x\|$$
and strict convexity of $Y$ we get $f(2x)=2f(x)$. If $\|f(2x)-f(x)\|=3\|x\|$, then, analogously, we get $f(2x)=-2f(x)$. Therefore $f(2x)=\pm 2f(x)$, $x\in X$. Let $n=2^m$. Then from $f(nx)=\pm nf(x)$, $x\in X$, we have \begin{eqnarray*}
n(\|f(x)+\tfrac{1}{n}f(y)\|-\|f(x)\|)= \|\pm f(nx)+f(y)\|-n\|f(x)\|\\
=\|nx\pm y\|-n\|x\|= n(\|x\pm\tfrac{1}{n}y\|-\|x\|), \quad y\in X. \end{eqnarray*}
Thus $|[f(y),f(x)]|=|[y,x]|$, $x, y\in X$ and the proof is completed. \end{proof}
In the last part of the paper we consider mappings $f:X\to Y$ satisfying \begin{equation} [f(x),f(y)]=[x,y],\quad x,y\in X. \end{equation} Namely, it is easy to see that in the setting of inner product spaces any such mapping is necessarily a linear isometry.
\begin{proposition}\label{isometry} Let $X$ and $Y$ be normed spaces and $f \colon X\to Y$ a mapping such that $[f(x),f(y)]=[x,y]$, $x,y\in X$. \begin{itemize} \item[(i)] If $f$ is surjective then $f$ is a linear isometry. \item[(ii)] If $X=Y$ is smooth Banach space then $f$ is a linear isometry. \end{itemize} \end{proposition} \begin{proof} (i). From \begin{align*} [f(\lambda x+\mu y),f(z)]&=[\lambda x+\mu y,z]=\lambda[x,z]+\mu[y,z]\\ &=\lambda[f(x),f(z)]+\mu[f(y),f(z)]=[\lambda f(x)+\mu f(y),f(z)] \end{align*} we conclude \begin{equation}\label{orth} [f(\lambda x+\mu y)-\lambda f(x)-\mu f(y),f(z)]=0 \end{equation} for all $x,y,z\in X$ and all $\lambda, \mu\in \mathbb{F}$. Since $f$ is surjective, linearity of $f$ follows.
(ii). The proof is by contradiction. Let us denote $u=f(\lambda x+\mu y)-\lambda f(x)-\mu f(y)$ and suppose that $u\ne 0$. From (\ref{orth}) we get $f(z)\perp u$ for all $z\in X$ and because $X$ is smooth this is equivalent to $\phi_{f(z)}(u)=0$. Because of the homogeneity of orthogonality relation we may and do assume that $\|u\|=1$. From
$$\|\phi_u+\xi\phi_{f(z)}\|\geq|\phi_u(u)+\xi\phi_{f(z)}(u)|=|\phi_u(u)|=1=\|\phi_u\|$$ for all $\xi\in\mathbb{R}$ we conclude $\phi_u\perp\phi_{f(z)}$ for all $z\in X$. Homogeneity of Birkhoff-James orthogonality implies $\phi_u\perp\xi\phi_{f(z)}$ for all $z\in X$, $\xi \in \mathbb{R}$.
Furthermore,
$$\|f(z)\|\phi_{f(z)}(f(w))=[f(w),f(z)]=[w,z]=\|z\|\phi_z(w)$$ shows $$\phi_{f(z)}\circ f=\phi_z,\quad z\in X.$$
By the Bishop--Phelps theorem (see \cite{Bishop-Phelps} or a recent survey \cite{Aron-Lomonosov}), for given $\psi \in X^*$ and $\varepsilon > 0$ there exists $\theta \in X^*$, $\Vert \theta \Vert = \Vert \psi \Vert$ and $\Vert \psi - \theta \Vert < \varepsilon$, such that there exists $z \in S_X$ satisfying $\theta(z)=\Vert \theta \Vert$. Then $\pm\frac{1}{\|\theta\|}\theta$ is the support functional at $z\in S_X$. Thus $\theta=\pm\|\theta\|\phi_z\in\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$. Hence $X^*$ is contained in the norm closure of $\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$. Since the reverse inclusion is trivial we conclude that $X^*$ is equal to the norm closure of $\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$. Then from $$\{\xi\phi_{f(z)}: z\in X, \xi\in\mathbb{R}\}\supseteq \{\xi\phi_{f(z)}\circ f: z\in X, \xi\in\mathbb{R}\}$$ and $\phi_{f(z)}\circ f=\phi_z$ for all $z\in X$ we conclude that $X^*$ is equal to the norm closure of $\{\xi\phi_{f(z)}: z\in X, \xi\in\mathbb{R}\}$. Then $\phi_u\perp \xi\phi_{f(z)}$ for all $z\in X$ and $\xi \in \mathbb{R}$ implies $\phi_u=0$. This shows that our assumption $u\ne 0$ is false and $f$ must be linear. This completes the proof. \end{proof}
\begin{corollary} Let $X$ and $Y$ be normed spaces, $X$ smooth and $f:X\to Y$ a mapping. If $f$ is surjective or $X=Y$ then the following assertions are equivalent: \begin{itemize} \item[(i)] $[f(x),f(y)]=[x,y]$, $x,y\in X$. \item[(ii)] $f$ is a linear isometry. \end{itemize} \end{corollary} \begin{proof} That (i)$\Rightarrow$(ii) follows by Proposition \ref{isometry}. Let us prove (ii)$\Rightarrow$(i). Take arbitrary $u,v\in Y$ and find $x,y\in X$ such that $u=f(x)$ and $v=f(y)$. Then from
$$\frac{1}{t}(\|u+tv\|-\|u\|)=\frac{1}{t}(\|f(x)+tf(y)\|-\|f(x)\|)=\frac{1}{t}(\|x+ty\|-\|x\|)$$ it follows that $Y$ is also smooth and $[f(x),f(y)]=[x,y]$. \end{proof}
\end{document}
|
arXiv
|
{
"id": "2002.03904.tex",
"language_detection_score": 0.601341962814331,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{Asymptotics of descent functions} \date{August 2020}
\begin{abstract} In 1916, MacMahon showed that permutations in $S_n$ with a fixed descent set $I$ are enumerated by a polynomial $d_I(n)$. Diaz-Lopez, Harris, Insko, Omar, and Sagan recently revived interest in this \emph{descent polynomial}, and suggested the direction of studying such enumerative questions for other consecutive patterns (descents being the consecutive pattern $21$). Zhu studied this question for the consecutive pattern $321$. We continue this line of work by studying the case of any consecutive pattern of the form $k,k-1,\ldots,1$, which we call a \emph{$k$-descent}. In this paper, we reduce the problem of determining the asymptotic number of permutations with a certain $k$-descent set to computing an explicit integral. We also prove an equidistribution theorem, showing that any two sparse $k$-descent sets are equally likely.
Counting the number of $k$-descent-avoiding permutations while conditioning on the length $n$ and first element $m$ simultaneously, one obtains a number triangle $f_k(m,n)$ with some useful properties. For $k=3$, the $m=1$ and $m=n$ diagonals are OEIS sequences A049774 and A080635. We prove a $k$th difference recurrence relation for entries of this number triangle. This also leads to an $O(n^2)$ algorithm for computing $k$-descent functions.
Along the way to these results, we prove an explicit formula for the distribution of first elements of $k$-descent-avoiding permutations, as well as for the joint distribution of first and last elements. We also develop an understanding of discrete order statistics. In our approach, we combine algebraic, analytic, and probabilistic tools. A number of open problems are stated at the end.
\end{abstract}
\maketitle
\section{Introduction}
A permutation $w\in S_n$ is said to \emph{contain} the consecutive pattern $\pi\in S_k$ if there are consecutive indices $i,i+1,\ldots, i+k-1$ such that the relative ordering of $w(i),w(i+1),\ldots, w(i+k-1)$ is the same as the relative ordering of $\pi(1), \ldots, \pi(k)$. A permutation $w$ is said to \emph{avoid} the consecutive pattern $\pi$ if it does not contain $\pi$. The study of consecutive pattern avoidance was started by Elizalde and Noy in 2003 \cite{ELIZALDE2003110}, and has received a great amount of study since. In this paper, we are interested in a slightly different topic, namely the study of permutations containing a consecutive pattern at some fixed set of indices, continuing a line of inquiry started by MacMahon in 1916 for the case of descents \cite{macmahon}. In particular, we will be interested in the consecutive pattern $k,k-1, \ldots ,2,1$, which we call a \emph{k-descent}. For a permutation $w\in S_n$ and $k\geq 2$, we let $D_k(w)$ be the set of starting points of $k$-descents in $w$. By a starting point of a $k$-descent in $w$, we mean an index $i\in [n]$ such that $w(i)>w(i+1)>\cdots>w(i+k-1)$. For instance, for $w=638541972\in S_9$, the set of starting points of $k$-descents in $w$ is $D_k(w)=\{3,4,7\}$. Our main objects of interest are defined as follows. \begin{definition} For $n\in \mathbb{Z}^+$ and $I\subseteq \mathbb{Z}^+$ a finite set, we let
\[\mathcal{D}_k(I,n)=\{w\in S_n\colon D_k(w)=I\}\hspace{5mm}\text{and}\hspace{5mm} d_k(I,n)=|\mathcal{D}_k(I,n)|.\] We call $d_k(I,n)$ the \emph{$k$-descent function}. \end{definition}
The case of $k=2$ has received a considerable amount of interest. MacMahon \cite{macmahon} proved that for fixed $I$, $d_k(I,n)$ is a polynomial in $n$ (for all $n$ sufficiently large); the function $d_k(I,n)$ is known as the \emph{descent polynomial}. Inspired by the work of Billey, Burdzy, and Sagan \cite{billey} on the adjacent topic of peak polynomials, which has received a large amount of further study \cite{MR3463566}\cite{DAVIS20183249}\cite{DIAZLOPEZ201721}, Diaz-Lopez, Harris, Insko, Omar, and Sagan \cite{diaz} recently revived interest in this descent polynomial. Their 2019 paper led to a number of other recent works on descent polynomials \cite{gaetz2019qanalogs}\cite{jiradilok2019roots}\cite{Oguz2019DescentPP}. They also suggested the direction of studying similar questions for other consecutive patterns (Section 6 part (1) \cite{diaz}). Zhu picked up this study \cite{zhu2019enumerating} for the case of the consecutive pattern $321$, i.e. $k=3$ in our notation.
In this paper, we will focus on the study of asymptotics of $d_k(I,n)$ for $k\geq 3$. For our purposes, it will turn out to be particularly useful to partition the set $\mathcal{D}_k(I,n)$ according to the value of the first element of the permutation. \begin{definition} For $m,n\in \mathbb{Z}^+$ with $1\leq m\leq n$ and $I\subseteq \mathbb{Z}^+$ a finite set, we let
\[\mathcal{D}_k(I,m,n)=\{w\in S_n\colon D_k(w)=I \text{ and } w(1)=m\}\hspace{5mm}\text{and}\hspace{5mm} d_k(I,m,n)=|\mathcal{D}_k(I,m,n)|.\] We call $d_k(I,m,n)$ the \emph{parametrized $k$-descent function}. \end{definition}
We will particularly care about the special case of $I=\emptyset$, which is exactly the case of consecutive pattern avoidance. We introduce the following shorthand notations to avoid notational clutter. \begin{definition} For $m,n\in \mathbb{Z}^+$ with $1\leq m\leq n$, we let \[f_k(n)=d_k(\emptyset, n)\hspace{5mm} \text{and}\hspace{5mm} f_k(m,n)=d_k(\emptyset, m,n).\]
\end{definition}
We now give an outline of our paper, stating our main results. In Section~\ref{sec:rec}, we prove a recurrence relation for $f_k(m,n)$ (Theorem~\ref{thm:mainrec}), which gives rise to a fast algorithm (thinking of $k$ as fixed, and $n,m$ as parameters) for computing $f_k(m,n)$. Along similar lines, we present a fast algorithm for computing $d_k(I,n)$ for any fixed $I$ (Theorem~\ref{thm:otherrec}). We also give a bivariate generating function for $f_3(m,n)$ (Proposition~\ref{prop:gen}), and discuss its generalization to other $k$. In Section~\ref{sec:nasy}, we review some results from the consecutive pattern avoidance literature on the asymptotics of $f_k(n)$. In Section~\ref{sec:mnasy}, we derive the asymptotics of $f_k(m,n)$, with the primary motivation being that this will be crucial in proving our other main results. However, this can also be seen as a statement about the distribution of the first element statistic among permutations avoiding $k$-descents, which in our opinion can be an interesting result in its own right. Perhaps surprisingly, in the following main theorem of Section~\ref{sec:mnasy}, we see that asymptotically, the distribution of $f_k(m,n)$ approaches an explicit smooth distribution $\varphi_k$ (under the right normalization).
\begin{reptheorem}{thm:fmnasy} For all $k\geq 3$, there is a constant $r_k$ so that for all $m,n\in \mathbb{Z}^+$ with $1\leq m\leq n$, \[\frac{nf_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)\left(1+O_k\left(n^{-0.49}\right)\right),\] where \[\varphi_k\left(x\right)=\frac{1}{r_k}\left(1-\frac{(x/r_k)^{k-1}}{(k-1)!}+\frac{(x/r_k)^k}{k!}-\frac{(x/r_k)^{2k-1}}{(2k-1)!}+\frac{(x/r_k)^{2k}}{(2k)!}-\cdots \right).\] \end{reptheorem}
Using this, in Section~\ref{sec:dasy} we prove the following theorem on the asymptotics of $d_k(I,n)$ as a corollary of some more precise asymptotic results (Theorem~\ref{thm:precdasy} or Proposition~\ref{prop:niceform}):
\begin{reptheorem}{thm:dasy} For any $k\geq 3$ and finite $I\subseteq \mathbb{Z}^+$, there is a constant $c_{I,k}$ such that \[d_k(I,n)=c_{I,k} f_k(n)\left(1+O(n^{-0.49})\right).\] \end{reptheorem} In fact, the constant $c_{I,k}$ can be computed (or bounded) efficiently, as it is given by a certain integral formula. This directly settles a conjecture (Conjecture 6.5 \cite{zhu2019enumerating}) by Zhu, and lets us make partial progress towards Zhu's Down-Up-Down-Up Conjecture (Conjecture 6.2 \cite{zhu2019enumerating}). To summarize our approach to proving the above theorem in a few words, the two main ideas are that (1) counting permutations with a certain property is equivalent to finding the probability that a random permutation has a certain property, and that (2) for a certain property, this probability should approach a constant as $n\to \infty$. In Section~\ref{sec:joint}, we bootstrap from the results of Section~\ref{sec:mnasy} to get a description of the joint distribution of the first and last element of a $k$-descent avoiding permutation. Namely, the first and last element turn out to be (almost) independent -- see Theorem~\ref{thm:joint} for a precise statement. Finally, in Section~\ref{sec:equi}, we will use this joint distribution result to prove the following equidistribution theorem.
\begin{reptheorem}{thm:equi}
Fix $k\geq 3$ and $r\in \mathbb{Z}^+$. Let $n\in \mathbb{Z}^+$, $I_1,I_2\subseteq [n]$ with $|I_1|=|I_2|=r$, and no two elements of $I_1$ being closer to each other than $\sqrt{n}$, and similarly for $I_2$. Then \[\frac{d_k(I_1,n)}{d_k(I_2,n)}=1+O_{k,r}\left(n^{-\alpha}\right).\] \end{reptheorem}
Restated another way, the content of the above theorem is that for any two sparse enough $k$-descent sets $I_1,I_2$ of the same size, the number of permutations in $S_n$ with descent set $I_1$ is (almost) the same as the number of permutations with descent set $I_2$. This resolves a conjecture (Conjecture 6.1 \cite{zhu2019enumerating}) by Zhu, stated for the special case of $k=3$ and singleton $I$. On the way to proving this, we use binomial coefficient sum manipulation and the second moment method to prove a concentration result for discrete order statistics.
We finish this introduction with a remark on a simple extension of our results. \begin{remark} Taking complements (the complement of $w\in S_n$ is $w^c\in S_n$ defined by $w^c(i)=n+1-w(i)$), one obtains results analogous to Theorem~\ref{thm:fmnasy}, Theorem~\ref{thm:dasy}, Theorem~\ref{thm:joint}, and Theorem~\ref{thm:equi} for the consecutive pattern $1,2,\ldots, k$. \end{remark}
\section{A recurrence relation for descent functions}\label{sec:rec}
We begin by giving an outline of this section. In Subsection~\ref{subsec:simprec}, we state and prove Theorem~\ref{thm:mainrec}, giving a simple recurrence relation for $f_k(m,n)$. Next, in Subsection~\ref{subsec:fast}, we discuss how this recurrence allows fast computation of $f_k(m,n)$. In Subsection~\ref{subsec:heuristic}, we take some time off to have a strictly heuristic discussion of what one would expect for the distribution of $f_k(m,n)$ just from the recurrence in Theorem~\ref{thm:mainrec}.
We come back to the rigorous path in Subsection~\ref{subsec:general}, where we state and prove Theorem~\ref{thm:otherrec}, which is a generalization of Theorem~\ref{thm:mainrec} for $\mathcal{D}_k(I,m,n)$ with any $I$ (Theorem~\ref{thm:mainrec} is the case of $I=\emptyset$). As before, we show how this allows fast computation of $\mathcal{D}_k(I,m,n)$. We finish this section with some discussion of generating functions in Subsection~\ref{subsec:generating}.
\subsection{A recurrence relation for $f_k(m,n)$}\label{subsec:simprec} We start by defining a familiar function.
\begin{definition} Let $\mathcal{X}$ be the set of all finite length sequences of reals (including the empty sequence). We define the \emph{difference operator} $\Delta\colon \mathcal{X}\to \mathcal{X}$. For $n\geq 2$, $\Delta$ is given by \[\Delta((a_1,a_2,\ldots, a_n))=(a_1-a_2, a_2-a_3, \ldots, a_{n-1}-a_n),\] and we adopt the convention that for $n=1$ and $n=0$, $\Delta(A)=(\hspace{1mm})$, the empty sequence.
We say that the \emph{$k$th difference of $A$} is $\Delta^k(A)$, i.e., the $k$th iterate of the function $\Delta$ applied to $A$. \end{definition}
We define multiplication of sequences by constants and addition of sequences of the same length componentwise, i.e., like vectors. It will be useful to note for later that the $k$th difference is linear: \begin{itemize}
\item for any $A\in \mathcal{X}$ and scalar $c\in \mathbb{Z}$, $\Delta^k(cA)=c\Delta^k(A)$;
\item for any $A,B\in \mathcal{X}$ of the same length, $\Delta^k(A+B)=\Delta^k(A)+\Delta^k(B)$. \end{itemize}
We now state the main recurrence theorem. \begin{theorem}\label{thm:mainrec} For any integers $k\geq 2$ and $n\geq 1$, \[\Delta^k\left(\left(f_k(1,n+k),f_k(2,n+k),\ldots, f_k(n+k,n+k)\right)\right)=\left(f_k(1,n),f_k(2,n),\ldots,f_k(n,n)\right).\] \end{theorem}
For the proof, we start from the following more complicated recursive formula. \begin{proposition}\label{prop:sum} For any $k\geq 2$ and any integers $m,n\in \mathbb{Z}^+$ with $1\leq m\leq n+k$, \[f_k(m,n+k)=f_k(n+k-1)-\left(\sum_{u=1}^{n}f_k(u,n)\binom{m-1}{k-1}-\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-1}\right).\] \end{proposition}
\begin{proof}
For $w\in \mathcal{D}_k(m,n+k)$, the first element of $w$ is $m$, and the restriction of $w$ to the last $n+k-1$ indices is an element of $\mathcal{D}_k\left(\emptyset, n+k-1\right)$. However, not every element of $\mathcal{D}_k(m,n+k-1)$ can be inserted here -- the elements that cannot be inserted are precisely those that start with a decreasing sequence of length $k-1$ starting from $m'<m$. Writing this out explicitly, we have that $f_k(m,n+k)=f_k(n+k-1)-|\mathcal{A}|$, where \[\mathcal{A}=\{v\in \mathcal{D}_k\left(\emptyset, n+k-1\right)\colon m>v(1)>v(2)>\cdots >v(k-1)\}.\]
Fixing some $u\in [n]$, we now consider the number of elements $v\in \mathcal{A}$ such that the restriction of $v$ to the last $n$ indices starts with $u$. We can construct any such element uniquely by choosing the restriction to the last $n$ indices, for which there are $f_k(u,n)$ options, and then choosing values for the initial decreasing subsequence of length $k-1$. If $u\geq m$, then any set of values strictly less than $m$ will be a suitable choice of values for the initial decreasing subsequence of length $k-1$, so the number of options for the subsequence is $\binom{m-1}{k-1}$. If $u<m$, then the number of options for values of the decreasing subsequence in $v$ such that the value of the first element in $w$ is less than $m$ is still $\binom{m-1}{k-1}$, but out of those options, exactly $\binom{m-u-1}{k-1}$ give a decreasing subsequence of length $k$ in $v$ as well (here we use the fact that $v(k-1)>v(k)\iff v(k-1)\geq u$), and hence will give a permutation not in $\mathcal{A}$. Putting these cases together and summing over $u$, we get
\[|\mathcal{A}|=\sum_{u=1}^{n}f_k(u,n)\binom{m-1}{k-1}-\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-1},\] from which \[f_k(m,n+k)=f_k(n+k-1)-\left(\sum_{u=1}^{n}f_k(u,n)\binom{m-1}{k-1}-\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-1}\right),\] which is the desired formula. \end{proof}
Theorem~\ref{thm:mainrec} follows with some sum manipulation. \begin{proof}[Proof of Theorem~\ref{thm:mainrec}] By Proposition~\ref{prop:sum} and linearity of $\Delta^k$,
\begin{align*}
\Delta^k\left((f_k(m,n+k))_{m=1,\ldots,n+k}\right)&=\Delta^k\left((f_k(n+k-1))_{m=1,\ldots,n+k}\right)\\
&\hphantom{=}-\sum_{u=1}^{n}f_k(u,n)\Delta_k\left(\left(\binom{m-1}{k-1}\right)_{m=1,\ldots,n+k}\right)\\
&\hphantom{=}+\Delta^k\left(\left(\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-1}\right)_{m=1,\ldots,n+k}\right). \end{align*}
The first term is a constant sequence, so its $k$th difference is the all $0$s sequence $\mathbf{0}$. The second term is $\mathbf{0}$ as well, since \[\left(\binom{m-1}{k-1}\right)_{m=1,\ldots n+k}\xrightarrow{\Delta}\left(\binom{m-1}{k-1}\right)_{m=1,\ldots n+k-1}\xrightarrow{\Delta}\cdots \xrightarrow{\Delta}\left(\binom{m-1}{0}\right)_{m=1,\ldots n+1}\xrightarrow{\Delta} \mathbf{0}.\] Here, we used the binomial coefficient identity $\binom{m}{k-1}=\binom{m-1}{k-1}+\binom{m-1}{k-2}$. For the third (final) sum, a similar thing happens for all terms except the last summand. We start by finding the first difference: \[\Delta\left(\left(\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-1}\right)_{m=1,\ldots,n+k}\right)\]\[=\left(\left(\sum_{u=1}^{\min(m-1,n)}f_k(u,n)\binom{m-1-u}{k-2}\right)+\mathbbm{1}_{m\leq n}f_k(m,n)\binom{0}{k-1}\right)_{m=1,\ldots,n+k-1}.\] Note that $\binom{0}{k-1}=0$, so we are left with just the sum term. As we take successive differences (formally by induction), this pattern continues: after taking $\ell<k$ finite differences, we get the same sum with $k-1-\ell$ replacing $k-1$ in the binomial coefficients, as well as a leftover term of $\binom{0}{k-\ell}$, which is $0$. In particular, after doing this $k-1$ times, we have $k-1$ replaced by $0$ in the binomial coefficients, which means that the matching index terms cancel with the next $\Delta$. The only nonzero contribution comes from the leftover term of $f_k(m,n)\binom{0}{0}=f_k(m,n)$. Hence, we arrive at the desired \[\Delta^k\left((f_k(m,n+k))_{m=1,\ldots,n+k}\right)=\left(f_k(m,n)\right)_{m=1,\ldots,n}.\] \end{proof}
\subsection{Fast computation of $f_k(m,n)$}\label{subsec:fast} Theorem~\ref{thm:mainrec} lets us compute the values of $f_k(m,n)$ for all $m,n$ with $1\leq m\leq n\leq N$, in $O(k N^2)$ arithmetic operations. That is, we are computing $\Theta(N^2)$ numbers in $O_k(N^2)$ operations. We think of these $f_k(m,n)$ as forming a triangle of integers, with the first row containing $f_k(1,1)$, the second containing $f_k(1,2), f_k(2,2)$, and so on. See Figure~\ref{fig:triangle} for a picture of this layout, and Figure~\ref{fig:fmn} for this triangle for $k=3$. The first $k$ rows are easy to find; namely, for any $1\leq m\leq n\leq k$ except $n=m=k$, $f_k(m,n)=(n-1)!$, and $f_k(k,k)=(k-1)!-1$ (so for $n\leq k$, each row can be computed with one multiplication from the previous row). To find subsequent rows one by one, we use the recurrence from Theorem~\ref{thm:mainrec}. Namely, when finding the $(n+k)$th row, we look at the $n$th row, which forms its $k$th difference. We now compute $k$ antidifferences (inverse finite differences) successively. We think of this as first laying out (in a new triangle) the $n$th row, which is the $k$th difference. We now find the $(k-1)$th difference in the row below it, by noting that the last entry in the $(k-1)$th difference sequence is $0$ (one can observe this in the proof of Theorem~\ref{thm:mainrec}, noting that the sums cancel), and then filling out the rest of the terms using the row above which is its first difference. For instance, the $n$th element of the second row is the last element of the second row minus the $n$th entry in the row above; the $(n-1)$th element of the second row is the $n$th element of the second row minus the $(n-1)$th entry in the row above; and so on, filling the second row backwards. We then repeat for the third row, using the fact that the second row is its first difference, except now we start from the first term being $0$ -- this follows from the fact that $f_k(1,n+k)=f_k(2,n+k)=\cdots=f_k(k-1,n+k)=f(n+k-1)$. For the fourth row, the first element is again $0$, and we fill it out using the third row as its first difference. In fact, the first element is $0$ for the third up to $k$th row, and we fill these out one by one. Each time, we only need $\leq n+k$ additions to find the terms in a row. For the $(k+1)$th row, we have that the first element is $f_k(1,n+k)=f_k(n+k-1)$ (this is easy to see combinatorially, by noting that if the first element is $1$, the restriction to the last $n+k-1$ elements can be any permutation in $\mathcal{D}_k(\emptyset,n+k-1)$) which can be computed as the sum of the elements of the previous row of our triangle, but the rest of the procedure is the same as for previous rows. All in all, given previous rows of the triangle, we have computed the $f_k(1,n+k),f_k(2,n+k),\ldots, f_k(n+k,n+k)$ row of our triangle in $\Theta(nk)$ additional arithmetic operations. For the case $k=3$, this process is depicted in Figure~\ref{fig:comp}.
\begin{figure}
\caption{It can be convenient to think of $f_k(m,n)$ as occupying such a triangle.}
\label{fig:triangle}
\end{figure}
\begin{figure}
\caption{The triangle from Figure~\ref{fig:triangle} for $k=3$}
\label{fig:fmn}
\end{figure}
\begin{figure}
\caption{For $k=3$ and $n=5$, example computation of the $n+3$th row given the $n$th row and the sum of the $(n+2)$th row, here $k=3$ and $n=8$. For each row, the first entry that is assigned to it is in red. The rest of the entries are computed by taking the antidifference of the row above. This gives that the next row of the triangle in Figure~\ref{fig:fmn} is $2017,2017,1947,1824,1665,1485,1296,1107$.}
\label{fig:comp}
\end{figure}
\subsection{A heuristic calculation of the distribution of $f_k(m,n)$}\label{subsec:heuristic} In this subsection, we will make a non-rigorous calculation for the distribution of $f_k(m,n)$. For this subsection only, let's assume that $f_k(m,n)$ is asymptotically given by some distribution, in the sense that there is $\varphi_k\colon [0,1]\to \mathbb{R}$ such that $f_k(m,n)$ is close to $\frac{1}{n}\varphi_k\left(\frac{m}{n}\right) f_k(n)$. Note that if $f_k(m,n)$ is to approach some continuous distribution on $[0,1]$, then the normalization $\frac{1}{n}$ is needed in this statement, as the entire mass $f_k(n)$ is divided between the $n$ values $1,\ldots, n$ for $m$. Geometrically, if the values $\frac{f_k(m,n)}{f(n)}$ are supposed to approach areas of columns of width $\frac{1}{n}$ under some density function $\varphi_k$ supported on $[0,1]$, then the area of a column should be $\frac{1}{n}\varphi_k\left(\frac{m}{n}\right)f_k(n)$.
Under this assumption for the distribution of $f_k(m,n)$, let's see what we can heuristically make of the fact that this distribution should be stable under the finite difference recurrence in Theorem~\ref{thm:mainrec}. Each finite difference can (heuristically) be approximated by the derivative times the gap $\frac{1}{n}$. So heuristically and ignoring terms we anticipate to be lower order, e.g. treating $\frac{1}{n-k}$ as essentially $\frac{1}{n}$, we get \[\frac{d^k \varphi_k(x)}{(dx)^k}\frac{1}{n^k}f_k(n)\approx \varphi_k(x)f_k(n-k).\]
From previous work (see Section~\ref{sec:nasy} and Theorem~\ref{thm:nasy} in particular), it is known that $\frac{f_k(n)}{f_k(n-k)}\approx r_k^k n^k$ for some constant $r_k>0$. Plugging this into our heuristic calculation, we get \[\frac{d^k \varphi_k(x)}{(dx)^k}\approx \varphi_k(x).\] Note that \[\frac{d^k \varphi_k(x)}{(dx)^k}= r_k^k\varphi_k(x)\] is a $k$th order differential equation. The boundary conditions of the recurrence given in Subsection~\ref{subsec:fast} suggest the boundary conditions $\varphi'(0)=0, \varphi''(0)=0,\ldots, \varphi^{(k-2)}(0)=0$, and $\varphi^{k-1}(1)=0$, and there is also the normalization $\int_{0}^1\varphi_k(x) dx=1$. We have a $k$th order equation with $k$ boundary conditions, so we would expect there to be a unique solution, although even this would require work to show rigorously.
But for instance for $k=3$, there is indeed a unique solution, and this turns out to be exactly what we will later (in Theorem~\ref{thm:fmnasy}) rigorously find to be the distribution of $f_3(m,n)$. For any $k$, what we will rigorously find to be the distribution of $f_k(m,n)$ satisfies this differential equation with these boundary conditions (so if the solution is unique, then the unique solution of this differential equation is indeed the distribution of $f_k(m,n)$). However, our proof of the distribution theorem (Theorem~\ref{thm:fmnasy}) in Section~\ref{sec:mnasy} is along very different lines. It remains open whether the approach described in this subsection can somehow be made rigorous. Perhaps one could define some operators on a suitable space of functions, and show convergence to a fixed point. However, we have not been able to carry this out.
\subsection{The case of general $I$}\label{subsec:general}
In fact, a similar line of reasoning as in Subsection~\ref{subsec:simprec} works for any $k$-descent set $I$, as long as the part of the permutation where the recurrence is derived is away from $I$. To make this work, we flip our permutations to bring all the $k$-descents to the end, and get the recurrence from the first $k$ indices. We start with the following definition.
\begin{definition} For $I=\{i_1,\ldots, i_\ell\}\subseteq \mathbb{Z}^+$ and $n\geq \max(I)+k-1$, we define the \emph{$n$-reverse of $I$} to be \[r_n(I)=\{n+2-k-i_1,\ldots, n+2-k-i_\ell\}.\] \end{definition}
We have the following simple observation, with the proof omitted. (Reminder: $D_k(w)$ is the set of starting indices of $k$-descents in $w$.)
\begin{observation} For $w\in S_n$ and letting $\mathrm{rc}(w)\in S_n$ denote the reverse-complement of $w$, i.e. $\left(\mathrm{rc}(w)\right)(i)=n+1-w(n+1-i)$, we have \[D_k(\mathrm{rc}(w))=r_n(D_k(w)).\] \end{observation}
The above observation together with the fact that $\mathrm{rc}\colon S_n\to S_n$ is a bijection prove the following remark. \begin{remark}\label{rmk:bij} For $k\in \mathbb{Z}^+$, finite $I\subseteq \mathbb{Z}^+$, and $n\geq \max(I)+k-1$, \[d_k(I,n)=d_k(r_n(I),n).\] \end{remark}
With this notation, we can state an analog of Theorem~\ref{thm:mainrec} for arbitrary $I$.
\begin{theorem}\label{thm:otherrec} For $k\geq 2$, finite $I\subseteq \mathbb{Z}^+$, and $n\geq \max(I\cup \{0\})+k-1$, \[\Delta^k\left(\left(d_k(r_{n+k}(I),1,n+k),d_k(r_{n+k}(I),2,n+k)\ldots, d_k(r_{n+k}(I),n+k,n+k)\right)\right)\]\[=\left(d_k(r_n(I),1,n),d_k(r_n(I),2,n),\ldots,d_k(r_n(I),n,n)\right).\] \end{theorem}
\begin{proof} The proof is essentially identical to the proof of Theorem~\ref{thm:mainrec}. \end{proof}
\subsection{Fast computation of $d_k(I,n)$}\label{subsec:genfast} Given Theorem~\ref{thm:otherrec}, one can repeat the argument in Subsection~\ref{subsec:fast} to get a $\Theta_{k,I}(N^2)$ algorithm for computing $d_k(r_n(I),n)$, and hence for computing $d_k(I,n)$ as well, as these are equal by Remark~\ref{rmk:bij}. However, with $t:=\max(I)+k-1$, Theorem~\ref{thm:otherrec} only allows for fast computation once all $d_k(r_n(I),m,n)$ are found for $t\leq n\leq t+k-1$. If these numbers were found naively by checking all permutations, it would take more than $t!$ time, which could be the bottleneck for practical purposes. However, the computation of these initial values can also be done much faster using a dynamic programming approach. Namely, one can start from $n=1$ and go up to $n=t+k-1$ doing the following. For each $n$, $m\leq n$, and $\ell\leq k$, we find the number of permutations in $S_n$ that start with $m$, have an initial decreasing sequence of length $\ell$ (and no initial decreasing sequence of length $\ell+1$), and do not violate the prescribed descent structure so far. These values for $n+1$ can each be found as a sum of values for $n$. For instance, the only way to have an initial decreasing sequence of length $\ell+1$ starting at $m$ is to concatenate a new larger value to the start of a decreasing sequence of length $\ell$ starting from $m'<m$ in the relative ordering on the last $n$ elements. We will leave the details of figuring out the general case to the interested reader, as it is our opinion that this is easier to understand by giving it some thought, rather than by reading a formal description (for instance, we would need to introduce some new notation just to formally say what it means for a permutation to not violate the $k$-descent structure $I$ for $n<t$).
\subsection{Generating functions for $f_k(m,n)$}\label{subsec:generating} In this subsection, we discuss generating functions. We are mainly interested in describing the ordinary generating function (o.g.f.) of $f_k(m.n)$. \begin{definition} For $k\geq 2$, we let $T_k(x,y)$ be the ordinary generating function for $f_k(m,n)$: \[T_k(x,y)=\sum_{m,n\geq 1}f_k(m,n)x^m y^n.\] \end{definition}
To translate Theorem~\ref{thm:mainrec} into the language of generating functions, we will use the following well-known lemma.
\begin{lemma} Let $k,n\in \mathbb{Z}^+$. Suppose $\Delta^k\left(\left(a_1,\ldots, a_{n+k}\right)\right)=\left(b_1, \ldots, b_n\right).$ Then for any $m\in [n]$, \[b_m=\sum_{i=0}^k (-1)^{k-i}\binom{k}{i}a_{m+i}.\] \end{lemma}
\begin{proof} This is true by induction on $k$. The base case $k=1$ is trivial, and the inductive step is just $\binom{k}{i}+\binom{k}{i-1}=\binom{k+1}{i}$. \end{proof}
Applying this to the expression in Theorem~\ref{thm:mainrec} and multiplying everything by $x^{m+k} y^{n+k}$, we arrive at the following identity.
\begin{lemma}\label{lem:gen} For any $k\geq 3$, \[f_k(m,n)x^{m+k} y^{n+k}=\sum_{i=0}^k (-1)^{k-i}\binom{k}{i}f_k(m+i,n+k)x^{m+k}y^{m+k}.\] \end{lemma}
Summing this over all pairs $(m,n)\in \mathbb{Z}^+\times\mathbb{Z}^+$ with $1\leq m\leq n$, we get $T_k(x,y)x^k y^k$ on the left-hand side. As for the terms appearing on the right-hand side, these have the form $\sum_{1\leq m\leq n} f_k(m+i,n+k)x^{m+k}y^{n+k}$. This is almost equal to $T_k(x,y)x^{k-i}$, except that all terms with $m\leq i$ or $n\leq k$ or $m\geq n-k+i+1$ are missing. To deal with these missing terms, we proceed with a few more definitions.
\begin{definition} For $k\geq 2$, we define $F_k(y)$ to be the ordinary generating function for $f_k(n)$: \[F_k(y)=\sum_{n\geq 1}f_k(n)y^n.\] We also define $G_\ell(z)$ to be the ordinary generating function for $f_k(n,n+1-\ell)$: \[G_{k,\ell}(z)=\sum_{n\geq \ell+1}f_k(n,n+1-\ell)z^{n-1}.\] \end{definition}
The choices of indices in the definition of $G_{k,\ell}(z)$ may look strange, but this will be a convenient choice for later. To see how all the missing terms can be written in terms of these generating functions, first recall from before that $f_k(1,n)=f_k(2,n)=\cdots=f_k(k-1,n)=f_k(n-1)$ and $f_k(k,n)=f_k(n-1)-f_k(n-k)$. For each fixed value of $m\leq k$, consider the sum of missing terms with this $m$. Because of the fact we just recalled, any such sum can be written as a polynomial in $x,y$ times $F_k(y)$. For the missing terms with $m\geq n-k+1$, one can write these as a polynomial in $x,y$ times $G_{k,n+1-m}(xy)$. As for the missing terms with $n\leq k$, there are only finitely many, so these can be subtracted as a polynomial. Carrying all this out explicitly (e.g. for some particular $k$, such as $k=5$) is a huge mess, as for instance one needs to make sure that terms with $n\leq k$ only get subtracted once (so really these need to be added back in according to how many times each term is overcounted by the $m\leq k$ and $m\geq n-k+1$ sums). Nevertheless, even without computing all the coefficients explicitly, this argument gives us a functional equation for $T_k(x,y)$ of the following form: \[T_k(x,y)\left(x^k y^k -(1-x)^k\right)=P_k(x,y)F_k(y)+\sum_{\ell=1}^k Q_{k,\ell}(x,y)G_{k,\ell}(xy)+R_k(x,y),\] where $P_k(x,y)$, $Q_{k,\ell}(x,y)$ (for any $\ell\in [k]$), and $R_k(x,y)$ are polynomials, and we used the fact that the coefficients of $(1-x)^k=\sum_{i=0}^k (-1)^{k-i}\binom{k}{i}x^{k-i}$ match those in Lemma~\ref{lem:gen}. As $x^k y^k -(1-x)^k$ is invertible, we get a functional equation for $T_k(x,y)$.
\begin{proposition}\label{prop:genf} For any $k\geq 3$, there are polynomials $P_k(x,y)$, $Q_{k,1}(x,y),\ldots, Q_{k,k}(x,y)$, and $R_k(x,y)$ so that \[T_k(x,y)=\frac{P_k(x,y)F_k(y)+\sum_{\ell=1}^k Q_{k,\ell}(x,y)G_{k,\ell}(xy)+R_k(x,y)}{x^k y^k-(1-x)^k}.\] \end{proposition}
We will now specialize to the case $k=3$. We let $g_3(n)$ be the number of permutations in $S_n$ with no $3$-descents and no initial descent. Noting that $f_3(n,n)=g_3(n-1)$ (since the $3$-descent-avoiding permutations starting with $n$ are precisely concatenations of $n$ with a permutation on $n-1$ elements that avoids $3$-descents and does not start with a descent), we get that $G_{3,1}$ defined before is also precisely the ordinary generating function for $g_3(n)$, which we will denote $G_3$ from now on for convenience:
\[G_3(z)=G_{3,1}(z)=\sum_{n\geq 1}g_3(n)z^n.\] This was the motivation for the choice of indexing before. As a side remark, the facts that $f_k(1,n)=f_k(n-1)$ and $f_3(n,n)=g_3(n-1)$ provide another reason to think that the triangle of numbers $f_k(m,n)$ is nice -- namely, for $k=3$, the diagonal of first elements of rows is the sequence $f_3(n)$ (sequence A049774 in OEIS), and the diagonal of last elements of rows is the sequence $g_3(n)$ (sequence A080635 in OEIS), both of which are well-studied.
The following lemma will help us get a more explicit equation for $T_3(x,y)$.
\begin{lemma} For any $n\geq 3$, $f_3(n,n)=g_3(n-1)$, $f_3(n-1,n)=g_3(n-1)+g_3(n-2)$, and $f_3(n-2,n)=g_3(n-1)+2g_3(n-2)$. \end{lemma}
\begin{proof} The first claim was proved earlier. As for $f_3(n-1,n)$, any permutation starting with $n-1$ and having a $3$-descent-avoiding restriction to the last $n-1$ indices that does not start with a descent is counted by $f_3(n-1,n)$, and there are $g_3(n-1)$ such permutations. The only other permutations counted by $f_3(n-1,n)$ start with $n-1$ and have $n$ as the second element, in which case there are $g_3(n-2)$ options for the restriction to the last $n-2$ elements. So $f_3(n-1,n)=g_3(n-1)+g_3(n-2)$.
For $f_3(n-2,n)$, any permutation starting with $n-2$ and having a $3$-descent-avoiding restriction to the last $n-1$ indices that does not start with a descent is counted by $f_3(n-1,n)$, and there are $g_3(n-1)$ such permutations. Any other permutation counted by $f_3(n-1,n)$ has either $n$ or $n-1$ as the second element (and the third element less than the second). The first case is counted by $g_3(n-2)$ as before. For the second case, the third element cannot be $n$, so the third and fourth element must not form a descent (and this is sufficient as well) so this case is also counted by $g_3(n-2)$. Hence, $f_3(n-2,n)=g_3(n-1)+2g_3(n-2)$. \end{proof}
The above lemma implies that for $k=3$, the $m\geq n-k+1$ missing terms can all be expressed in terms of $G_3$ alone. We worked our way through this calculation, figuring out these polynomials explicitly. The result is the following.
\begin{proposition}\label{prop:gen} \[T_3(x,y)=\frac{P(x,y)F_3(y)+Q(x,y)G_3(xy)+R(x,y)}{x^3 y^3-(1-x)^3},\]
where $P,Q,R$ are the following polynomials in $x$ and $y$: \[P(x,y)=xy(x^2y^2-(1-x)^2),\] \[Q(x,y)=(x-1)x^2y(xy+x-1),\]
\[R(x,y)=(x-1)xy\left((x-1)^2-x^2y^2\right).\] \end{proposition}
For $k>3$, it remains open whether it is possible to reduce $G_{k,\ell}$ to some small number of generating functions, and whether the coefficient polynomials appearing in Proposition~\ref{prop:genf} can be explicitly understood.
\section{Asymptotics of $f_k(n)$}\label{sec:nasy}
In this section, we discuss the asymptotics of $f_k(n)$ for $k$ fixed and $n\to \infty$. This section is mostly review of work by other authors and well-known methods. In later sections, we will mostly use the following theorem which is a special case of Corollary 1.4. in \cite{kitaev}.
\begin{theorem}[Ehrenborg-Kitaev-Perry \cite{kitaev}]\label{thm:nasy} For $k\in \mathbb{Z}$, $k\geq 2$, there are $c_k,r_k,\gamma_k\in \mathbb{R}$ with $0<r_k$, $0<c_k$, and $0\leq \gamma_k<1$, such that \[f_k(n)=n!c_k r_k^n\left(1+O_k(\gamma_k^n)\right).\] \end{theorem}
\subsection{The value of $r_3$}
In this subsection, we re-derive Theorem~\ref{thm:nasy} for the special case $k=3$, both to find the value of $r_3$, but also as exposition of a nice analytic method for finding asymptotics of generating functions. The reader is referred to \cite{flajolet} for a much more general overview of this method.
We start with the following exponential generating function (e.g.f.) for $f_3(n)$ (OEIS sequence A049774, e.g.f. given by Noam Elkies \cite{oeis}): \[B(x):=\frac{\sqrt{3}}{2}\frac{e^{x/2}}{\sin\left(\frac{\sqrt{3}}{2}x+\frac{2}{3}\pi\right)}.\]
That is, $f_3(n)$ is $n!$ times the $x^n$ coefficient of $B(x)$. Note that $B(x)$ is a meromorphic function with poles at $x=\frac{2\pi}{3\sqrt{3}}+\ell \frac{2\pi}{\sqrt{3}}$ for all $\ell\in \mathbb{Z}$, and these poles are simple. The two poles with smallest magnitudes are $x_1=\frac{2\pi}{3\sqrt{3}}$ and $x_2=\frac{-4\pi}{3\sqrt{3}}$. For reasons soon to be apparent, we will want to multiply $B(x)$ with a function that cancels out the pole at $x_1$, getting a function which is holomorphic in a disk around $0$ containing $x_1$. We define \[A_1(x)=\frac{\sqrt{3}}{2}\frac{e^{x/2}}{\sin\left(\frac{\sqrt{3}}{2}x+\frac{2}{3}\pi\right)}\left(\frac{2\pi}{3\sqrt{3}}-x\right).\]
Getting rid of the pole at $x_2$ as well, we further define \[A_2(x)=\frac{\sqrt{3}}{2}\frac{e^{x/2}}{\sin\left(\frac{\sqrt{3}}{2}x+\frac{2}{3}\pi\right)}\left(\frac{2\pi}{3\sqrt{3}}-x\right)\left(\frac{-4\pi}{3\sqrt{3}}-x\right).\]
We can use this to express $b_n$, the $x^n$ coefficient of $B(x)$, via $A_1(x_1)$. We pick some $R\in \mathbb{R}$ with $|x_2|>R>|x_1|$, so $A_1$ is holomorphic in the disk of radius $R$ around $0$. In this disk, we can write $A_1(x_1)$ as a power series: \[A_1(x_1)=\sum_{i=0}^\infty a_i x_1^i.\] On the other hand, we have the formal power series expansion \[\frac{1}{x_1-x}=\frac{1}{x_1}\left(1+\frac{x}{x_1}+\frac{x^2}{x_1^2}+\cdots\right),\] from which \[b_n=\frac{a_n}{x_1}+\frac{a_{n-1}}{x_1^2}+\cdots+\frac{a_0}{x_1^{n+1}}.\] Hence, \[b_n x_1^{n+1}=a_0+a_1 x_1+ a_2 x_1^2+\cdots +a_n x_1^n=A_1(x_1)-\sum_{i=n+1}^{\infty}a_i x_1^i,\] where we used the fact that the power series converges to $A_1(x_1)$ at $x_1$ in the last equality. Since as $n\to \infty$, $\sum_{i=n+1}^\infty a_i x_1^i\to 0$, we get that $b_n x_1^{n+1}\to A_1(x_1)$, from which \[b_n\sim \frac{A_1(x_1)}{x_1^{n+1}}.\] From here, we already get that in Theorem~\ref{thm:nasy}, $r_3=\frac{1}{x_1}=\frac{3\sqrt{3}}{2\pi}$, and $c_3=A_1(x_1)/x_1=\frac{3\sqrt{3}}{2\pi}e^{\frac{\pi}{3\sqrt{3}}}$. By iterating this procedure once more (with $A_1(x)$ in place of $B(x)$ and $A_2(x)$ in place of $A_1(x)$), we get that asymptotically in $i$, \[a_i\sim \frac{A_2(x_2)}{x_2^{i+1}}.\] Hence, there is some constant $C$, such that for all $i\geq 0$,
\[|a_i|\leq \frac{C}{|x_2|^{i+1}}.\]
Using this, we can bound the error term in our equation for $b_n$:
\[|b_n x_1^{n+1}-A_1(x_1)|=\left\lvert\sum_{i=n+1}^{\infty}a_i x_1^{i}\right\rvert\leq \sum_{i=n+1}^\infty \frac{C}{|x_2|^{i+1}}x_1^{i}=C' \sum_{i=n+1}^\infty \left(\frac{x_1}{|x_2|}\right)^i=\frac{C'}{2^n}.\]
Hence, we can pick $\gamma_3=\frac{1}{2}$ in Theorem~\ref{thm:nasy}. We will summarize what we just proved in the next proposition.
\begin{proposition} We define \[A_1(x)=\frac{\sqrt{3}}{2}\frac{e^{x/2}}{\sin\left(\frac{\sqrt{3}}{2}x+\frac{2}{3}\pi\right)}\left(\frac{2\pi}{3\sqrt{3}}-x\right)\] and $x_1=\frac{2\pi}{3\sqrt{3}}$. Then \[f_3(n)=n!\frac{A_1(x_1)}{x_1^{n+1}}\left(1+O\left(\frac{1}{2^n}\right)\right).\] \end{proposition}
\subsection{Other $r_k$} We now consider the case of general $k\geq 3$. We start from the following well-known exponential generating function for $f_k(n)$, which appears as Exercise 23.(b) in Chapter $2$ of Stanley's Enumerative Combinatorics 1 \cite{10.5555/2124415}.
\begin{proposition}[\cite{genfunc}] For any $k\geq 3$, the following is an exponential generating function for $f_k(n)$: \[B_k(n)=\frac{1}{\sum_{\ell=0}^\infty \frac{x^{k\ell}}{(k\ell)!}-\frac{x^{k\ell+1}}{(k\ell+1)!}}.\] That is, with $b_{k,n}$ being the $x^n$ coefficient in the power series for $B_k(n)$, we have $f_k(n)=b_{k,n}n!$. \end{proposition} Warlimont \cite{warlimont} proves that this exponential generating function has a unique smallest magnitude pole and that this pole is simple. Together with an analogous standard argument to what we just showed for $k=3$, this implies that $r_k$ (in Theorem~\ref{thm:nasy}) is the reciprocal of the smallest magnitude root of $\sum_{\ell=0}^\infty \frac{x^{k\ell}}{(k\ell)!}-\frac{x^{k\ell+1}}{(k\ell+1)!}$. Warlimont also provides bounds on $r_k$. We state all of this in the next proposition.
\begin{proposition}[Warlimont \cite{warlimont}]\label{prop:war} For any $k\geq 4$, the constant $r_k$ in Theorem~\ref{thm:nasy} is the unique smallest magnitude pole of $B_k$. Furthermore, we have the following bounds: \[1+\frac{1}{k!}\left(1-g(k)\right)\leq \frac{1}{r_k}\leq 1+\frac{1}{k!}\left(1+h(k)\right),\] where \[g(k)=\frac{k!+1}{(k+1)!+1},\hspace{5mm}h(k)=\frac{2(k+1)}{k!-2(k+1)}.\] \end{proposition}
The first three values are $\frac{1}{r_3}=\frac{2\pi}{3\sqrt{3}}=1.209199576\ldots$, $\frac{1}{r_4}=1.038415637\ldots$, and $\frac{1}{r_5}=1.007187547786\ldots$ (given by Kotesovec on OEIS sequences A049774, A117158, and A177523, respectively) \cite{oeis}.
We finish this section with another remark. There is a trick which lets us rewrite the aforementioned infinite sum in a finite form. We start from an identity which can be proven by expanding all terms on the RHS as infinite series: \[\sum_{\ell=0}^\infty \frac{x^{k\ell}}{(k\ell)!}=\frac{1}{k}\left(e^x+e^{\omega_k x}+\cdots+e^{\omega_k^{k-1}}x\right),\] where $\omega_k=e^{\frac{2\pi i}{k}}$. Integrating both sides, we get \[\sum_{\ell=0}^\infty \frac{x^{k\ell+1}}{(k\ell+1)!}=\frac{1}{k}\left(e^x+\frac{1}{\omega_k}e^{\omega_k x}+\cdots+\frac{1}{\omega_k^{k-1}} e^{\omega_k^{k-1}x}\right).\]
Subtracting the second from the first, we get \[\frac{1}{B_k(x)}=\frac{1}{k}\left(\left(1-\frac{1}{\omega_k}\right)e^{\omega_k x}+\left(1-\frac{1}{\omega_k^2}\right)e^{\omega_k^2 x}+\cdots+\left(1-\frac{1}{\omega_k^{k-1}}\right)e^{\omega_k^{k-1}x}\right).\]
\section{Asymptotics of $f_k(m,n)$}\label{sec:mnasy}
\subsection{Two propositions on $f_k(m,n)$} In this subsection, we present two propositions on $f_k(m,n)$. The first one is a nice fact about $f_k(m,n)$ which will be needed to finish the proof of Theorem~\ref{thm:fmnasy} later.
\begin{proposition}\label{prop:decr} For any $k\geq 2$ and $n\in \mathbb{Z}^+$, \[f_k(1,n)\geq f_k(2,n)\geq \cdots \geq f_k(n,n).\] \end{proposition}
\begin{proof} For $2\leq m\leq n$ and any $w\in \mathcal{D}_k(\emptyset,m,n)$, switching $m$ and $m-1$ in $w$ gives $w'\in \mathcal{D}_k(\emptyset,m-1,n)$, since no $k$-descent can be created by this operation. Furthermore, $w\mapsto w'$ is injective because $w$ can be uniquely recovered from $w'$ by switching $m$ and $m-1$ in $w'$. Hence, $f_k(m,n)\leq f_k(m-1,n)$. \end{proof}
The second proposition will be crucial in deriving the asymptotic distribution of $f_k(m,n)$.
\begin{proposition}\label{prop:fmn} For any $k\geq 3$ and $m\leq n\in \mathbb{Z}^+$, \[f_k(m,n)=\binom{m-1}{0}f_k(n-1)-\binom{m-1}{k-1}f_k(n-k)+\binom{m-1}{k}f_k(n-k-1)-\binom{m-1}{2k-1}f_k(n-2k)+\cdots.\] \end{proposition}
For $k=3$, one can prove this by observing that in Proposition~\ref{prop:gen}, the $G(xy)$ and $R(x,y)$ terms will only contribute to coefficients of $x^m y^n$ with $m>n$. So $f_3(m,n)$ is just given by the coefficients of $\frac{Q(x,y)F(y)}{P(x,y)}$. For general $k$, we give the following combinatorial proof.
\begin{proof} Let us consider the right-hand side of the equation we want to prove. We think of the first term, $\binom{m-1}{0}f_k(n-1)$, as counting all permutations that start with $m$ and for which the restriction to the other $n-1$ indices is a permutation in $\mathcal{D}_k(\emptyset,n-1)$. We think of the second term, $\binom{m-1}{k-1}f_k(n-k)$, as counting all permutations that start with a decreasing sequence of $k$ elements beginning at $m$, and the restriction to the other $n-k$ indices is a permutation in $\mathcal{D}_k(\emptyset,n-k)$. We think of the next term, $\binom{m-1}{k}f_k(n-k-1)$ as the same except the initial sequence is now of length $k+1$, and so on. Let us consider how many times each permutation in $S_n$ gets counted, taking signs into account. If a permutation does not start with $m$, then it is clearly not counted by any term. If a permutation starts with $m$ and contains a $k$-descent somewhere not in an initial decreasing sequence, then it does not get counted by any term. If a permutation starts with $m$, avoids $k\ldots 1$ except in the initial decreasing sequence, and has an initial decreasing sequence of length $t\geq k$, it gets counted in exactly the terms with $\binom{m-1}{\ell}$ with the two maximal values $\ell<t$; since these have opposing signs, these counts cancel each other. If a permutation starts with $m$ and avoids $k$-descents, then it is counted exactly once, namely by just the first term. This covers all the cases. Hence, the right-hand side counts the number of permutations starting with $m$ and avoiding $k$-descents, which is equal to $f_k(m,n)$ by definition. So the right-hand side is equal to the left-hand side, completing the proof. \end{proof}
We remark that one can also derive the $k$th difference equation (Theorem~\ref{thm:mainrec}) from Proposition~\ref{prop:fmn}.
\subsection{The asymptotic distribution of $f_k(m,n)$} We now prove the following theorem which describes the asymptotic distribution of $f_k(m,n)$. The content of this theorem is that as $n\to \infty$, the mass of $f_k(n)$ is distributed among $f_k(1,n),\ldots, f_k(n,n)$ according to an explicit distribution $\varphi_k\left(\frac{m}{n}\right)$ -- crucially, this distribution does not depend on $n$ (after the appropriate normalization of $\frac{1}{n}$).
\begin{theorem}\label{thm:fmnasy} For all $k\geq 3$, with $r_k$ from Theorem~\ref{thm:nasy}, for all $m,n\in \mathbb{Z}^+$ with $1\leq m\leq n$, \[\frac{nf_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)\left(1+O_k\left(n^{-0.49}\right)\right),\] where \[\varphi_k\left(x\right)=\frac{1}{r_k}\left(1-\frac{(x/r_k)^{k-1}}{(k-1)!}+\frac{(x/r_k)^k}{k!}-\frac{(x/r_k)^{2k-1}}{(2k-1)!}+\frac{(x/r_k)^{2k}}{(2k)!}-\cdots \right).\] \end{theorem}
We note that the exponent $-0.49$ is just chosen for clarity, and our proof really gives something slightly stronger. \begin{remark} In Theorem~\ref{thm:fmnasy}, the exponent $-0.49$ can be replaced by any $\alpha>-0.5$. \end{remark}
Given Proposition~\ref{prop:fmn} and Theorem~\ref{thm:nasy}, the proof of Theorem~\ref{thm:fmnasy} is just algebra and analysis.
\begin{proof}[Proof of Theorem~\ref{thm:fmnasy}] Plugging the expression for $f_k(n)$ from Theorem~\ref{thm:nasy} into the expression for $f_k(m,n)$ in Proposition~\ref{prop:fmn}, we get \[\frac{n}{f_k(n)}f_k(m,n)=\binom{m-1}{0}r_k\left(1+O(\gamma_k^{n-1})\right) -\binom{m-1}{k-1}\frac{(n-k)!}{(n-1)!}r_k^{-k}\left(1+O(\gamma_k^{n-k})\right)\]\[+\binom{m-1}{k}\frac{(n-k-1)!}{(n-1)!}r_k^{-k-1} \left(1+O(\gamma_k^{n-k-1})\right)-\binom{m-1}{2k-1}\frac{(n-2k)!}{(n-1)!}r_k^{-2k}\left(1+O(\gamma_k^{n-2k})\right)+\cdots.\]
Our proof strategy will be to first show that the terms with $\ell\geq \log{n}$ are negligible, then estimate the terms with $\ell\leq \log{n}$ just for the case $m\geq \sqrt{n}$, and then complete the proof for the remaning $m<\sqrt{n}$ case using analytic arguments and Proposition~\ref{prop:decr}. We will state whenever we restrict to a particular case. We start by consider a general term; it has the following form: \[\binom{m-1}{\ell}\frac{(n-\ell-1)!}{(n-1)!}r_k^{-\ell-1}\left(1+O(\gamma_k^{n-\ell})\right)\] \[=\frac{(m-1)\cdots(m-\ell)}{(n-1)\cdots(n-\ell)}\frac{r_k^{-\ell-1}}{\ell!}\left(1+O(\gamma_k^{n-\ell})\right).\]
We use the fact that $\ell!\geq \left(\frac{\ell}{e}\right)^n$, the fact that $m\leq n$ implies that $\frac{m-i}{n-i}\leq 1$, and the fact that $\ell\leq n$ implies that $1+O(\gamma_k^{n-\ell})\leq C$ (where the constant is independent of $\ell$) to upper-bound such a term: \[\frac{(m-1)\cdots(m-\ell)}{(n-1)\cdots(n-\ell)}\frac{r_k^{-\ell-1}}{\ell!}\left(1+O(\gamma_k^{n-\ell})\right)\leq \frac{C}{r_k}\left(\frac{e/r_k}{\ell}\right)^{\ell}.\] For $\ell\geq \log n$, we further bound this: \[\frac{C}{r_k}\left(\frac{e/r_k}{\ell}\right)^{\ell}\leq\frac{C}{r_k}\left(\frac{e/r_k}{\log n}\right)^{\log n}=\frac{C}{r_k}n^{\log \frac{e/r_k}{\log n}}=\frac{C}{r_k}n^{-\log \frac{\log n}{r_k/e}}.\] Hence, the contribution of all terms with $\ell\geq \log n$ is at most $n$ times the contribution of one such term (since there are at most $n$ terms), totaling to \[O\left(n^{-\log \frac{\log n}{r_k/e}+1}\right).\] Let us now focus on the case $m\geq \sqrt{n}$ and $\ell<\log n$. We bring our attention back to a general term. Note that \[\binom{m-1}{\ell}=\frac{m^\ell}{\ell!}\left(1+O\left(\frac{\ell^2}{m}\right)\right)\] and \[\frac{(n-\ell-1)!}{(n-1)!}=n^{-\ell}\left(1+O\left(\frac{\ell^2}{n}\right)\right).\]
Hence, with these bounds on $\ell$ and $m$, a general term is \[\binom{m-1}{\ell}\frac{(n-\ell-1)!}{(n-1)!}r_k^{-\ell-1}\left(1+O(\gamma_k^{n-\ell})\right)=\frac{1}{r_k}\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^\ell}{\ell!}\left(1+O\left(n^{-0.49}\right)\right).\] Now, comparing the sum of the first $\log n$ terms in the initial sum for $f_k(m,n)$ with the sum \[\frac{1}{r_k}\left(1-\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^{k-1}}{(k-1)!}+\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^k}{k!}-\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^{2k-1}}{(2k-1)!}+\cdots\right),\] where the sum goes up to the largest $\ell<\log n$, we note that the difference is upper bounded by $\left(1+O\left(n^{-0.49}\right)\right)$ times the sum of absolute values of these terms. It is still upper bounded by the same thing with the finite sum replaced by an infinite sum, which is equal to some constant between $\frac{1}{r_k}$ and $\frac{1}{r_k}e^{\frac{1}{r_k}}$ (just by comparing terms). This observation together with our previous bound on the contribution of terms with $\ell\geq \log n$ implies that \[\frac{nf_k(m,n)}{f_k(n)}=\frac{1}{r_k}\left(1-\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^{k-1}}{(k-1)!}+\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^k}{k!}-\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^{2k-1}}{(2k-1)!}+\cdots\right)+O(n^{-0.49}),\] where we used the fact that $O\left(n^{-\log \frac{\log n}{r_k/e}+1}\right)=O(n^{-0.49})$. Now we proceed to bound the difference between the series cut off at $\log n$ and the corresponding infinite series. The sum of absolute values of tail terms (after $\log n$) can be upper-bounded by a geometric series with first term $\frac{1}{r_k}\frac{\left(\frac{m}{n}\frac{1}{r_k}\right)^{\log n}}{(\log n)!}$ and ratio $\frac{\frac{m}{n}\frac{1}{r_k}}{\log n}$. This is $O\left(\left(\frac{e/r_k}{\log n}\right)^n\right)=O(n^{-0.49})$, as argued before. Hence, \[\frac{nf_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)+O(n^{-0.49}),\] with \[\varphi_k(x)=\frac{1}{r_k}\left(1-\frac{\left(x/r_k\right)^{k-1}}{(k-1)!}+\frac{\left(x/r_k\right)^k}{k!}-\frac{\left(x/r_k\right)^{2k-1}}{(2k-1)!}+\cdots\right)\] with the series being infinite now.
We still need to deal with the case $m<\sqrt{n}$. In order to do so, we analyze the function $\varphi_k(x)$. Note that for $x\in [0,1]$, $\varphi_k(x)$ is non-increasing. One can see this by showing that the derivative is non-positive by taking the derivative of the series, pairing up consecutive terms, and using the fact that $\frac{1}{r_k}<\sqrt{2}$, implied by Proposition~\ref{prop:war} and a manual computation for $k=3$, to show that each successive term is smaller in magnitude. A similar pairing argument gives that the second derivative is negative, and that $\varphi_k(1)>0$. We now deal with the case $m\leq \sqrt{n}$. In that case, by Proposition~\ref{prop:decr}, the value of $n\frac{f_k(m,n)}{f_k(n)}$, is at most $\frac{nf_k(1,n)}{f_k(n)}=\frac{nf_k(n-1)}{f_k(n)}=\frac{1}{r_k}\left(1+O(\gamma_k^{n-1})\right)$, and at least $\frac{n f_k(\lceil\sqrt{n}\rceil,n)}{f_k(n)}$. By what we have already proved, this lower bound is $\varphi_k\left(\frac{\lceil\sqrt{n}\rceil}{n}\right)+O(n^{-0.49})$. Since the first and second derivatives of $\varphi_k(x)$ are both negative, the magnitude of the first derivative of $\varphi_k$ is upper-bounded by $|\varphi_k'(1)|$. This is a constant, so $\varphi_k\left(\frac{\lceil\sqrt{n}\rceil}{n}\right)+O(n^{-0.49})=\varphi_k(0)+O(n^{-0.5})=\frac{1}{r_k}+O(n^{-0.5})$. Also, for $m\leq \sqrt{n}$, $\varphi_k(\frac{m}{n})=\frac{1}{r_k}+O(n^{-0.5})$. By combining the upper and lower bounds with this estimate, we thus get $n\frac{f_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)+O(n^{-0.49})$.
So far, we have proved that for all $m\leq n$, \[\frac{nf_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)+O(n^{-0.49}).\] We can finish the proof by noting that there is a uniform lower bound on $\varphi_k(x)$, namely $\varphi_k(x)>\varphi_k(1)>0$. With this, we arrive at the desired result: \[\frac{nf_k(m,n)}{f_k(n)}=\varphi_k\left(\frac{m}{n}\right)\left(1+O(n^{-0.49})\right).\]
\end{proof}
Before moving on, we note that in analogy to what we did at the end of Section~\ref{sec:nasy}, one can instead write $\varphi_k(x)$ in the following finite form.
\begin{proposition} We let $\omega_k=e^{\frac{2\pi i}{k}}$. For any $k\geq 3$, \[\varphi_k(x)=\frac{1}{k r_k}\left((1-\omega_k)e^{\omega_kx}+\left(1-\omega_k^2\right)e^{\omega_k^2 x}+\cdots+\left(1-\omega_k^{k-1}\right)e^{\omega_k^{k-1}x}\right).\] \end{proposition}
By comparing power series expansions, one can write down something quite simple for the case $k=3$.
\begin{proposition} \[\varphi_3(x)=\frac{4\pi}{9}e^{-\pi x/ (3\sqrt{3})}\sin((x+1)\pi/3)\] \end{proposition} Figure~\ref{fig:plots} shows a plot of $\varphi_3$.
We will now argue that as $k\to \infty$, the sequence of functions $\varphi_k$ converges pointwise (and uniformly) to the constant function $1$. One can prove this by using Warlimont's bounds on $r_k$ from Proposition~\ref{prop:war} to bound the contribution of all terms after the first term of the series expansion for $\varphi_k$ given in Theorem~\ref{thm:fmnasy}. Namely, all terms appearing in the series expansion for $\varphi_k(x)$ also appear in the series expansion for $e^{x/r_k}$, and as $\frac{1}{r_k}<\sqrt{2}$, the absolute value of each term is upper bounded by the corresponding term in the series expansion for $e^{\sqrt{2}}$. Since this series expansion converges, the tail sum goes to $0$, and as $k\to \infty$, all non-zero terms of the expansion for $\varphi_k$ are contained in a tail sum starting further along, and hence have contribution going to $0$ (and this is uniform over $x\in [0,1]$; alternatively, one see that pointwise convergence implies uniform convergence from the fact that $\varphi_k(0)=\frac{1}{r_k}$ and $\varphi_k(x)$ is decreasing in $[0,1]$, so it suffices to show pointwise convergence to $\frac{1}{r_k}$ for $x=1$).
So $\lim_{k\to \infty}\left(\varphi_k-\frac{1}{r_k}\right)$ is $0$. Again, using Warlimont's bounds (or just that $\varphi_k$ has integral $1$), $\frac{1}{r_k}\to 1$, so we get that $\lim_{k\to\infty}\varphi_k$ is $1$. One would expect that avoiding $k$-descents says less and less about the first element of a permutation as $k$ becomes larger, so this result matches with intuition.
\begin{figure}
\caption{Plot of $\varphi_3$}
\label{fig:plots}
\end{figure}
\section{Asymptotics of $d_k(I,n)$}\label{sec:dasy} In this section, we prove that if we fix a finite set $I\subseteq\mathbb{Z}^+$, then $d(I,n)$ is asymptotically given by an integral formula, and that this allows for efficient determination of the asymptotics of $d(I,n)$. As a consequence, we will prove the following theorem.
\begin{theorem}\label{thm:dasy} For any $k\geq 3$ and finite $I\subseteq \mathbb{Z}^+$, there is a constant $c_{I,k}$ such that \[d_k(I,n)=c_{I,k} f_k(n)\left(1+O(n^{-0.49})\right).\] \end{theorem} Corollaries of this theorem include three conjectures by Zhu; these are Conjecture 3.2, Conjecture 6.4, and Conjecture 6.5 (which is a generalization of Conjecture 6.4) \cite{zhu2019enumerating}. Again, the exponent $-0.49$ can be replaced with anything strictly greater than $-0.5$, as will be evident from our proof (together with the fact that an analogous statement is true for Theorem~\ref{thm:fmnasy}).
Throughout this section, we will think of $I$ as being fixed, and we let $t=\max(I)+k-1$ (or in other words, $t$ is the index of the end of the last $k$-descent). We now start with discussion that will lead to a proof of Theorem~\ref{thm:dasy}.
Begin by noting that for any permutation $w\in\mathcal{D}_k(I,n)$, the restriction of $w$ to the first $t$ indices has descent set $I$: it is an element of $\mathcal{D}_k(I,t)$. In particular, note that it ends with a $k$-descent (this will be useful soon). The restriction of $w$ to the last $n-t$ indices has no $k$-descents: it is an element of $\mathcal{D}_k(\emptyset,n-t)$.
On the other hand, we can construct a (unique) permutation in $S_n$ by picking some $\tau\in \mathcal{D}_k(I,t)$ to be its restriction to the first $t$ indices; picking some $v\in \mathcal{D}_k(\emptyset,n-t)$ to be its restriction to the last $n-t$ indices; and picking a set of images for the first $t$ indices -- we are choosing a $t$-element subset of $[n]$, so there are $\binom{n}{t}$ choices for this last step. All in all, one can construct $d_k(I,t)\cdot f_k(n-t)\cdot \binom{n}{k}$ distinct permutations this way, and by what we argued before, all permutations in $d_k(I,n)$ are among these.
Now, the first piece of bad news is that not every $\tau\in \mathcal{D}_k(I,n)$, $v\in \mathcal{D}_k(\emptyset,n-t)$, and choice of $t$ elements from $[n]$ give a permutation $w\in \mathcal{D}_k(I,n)$. The problem is that although all indices in $I$ are starts of $k$-descents in $w$, it is possible that $w$ has additional $k$-descents starting at some other indices; namely, $w$ could have a $k$-descent starting at $\max(I)+1$ or $\max(I)+2$ or $\ldots$ or $\max(I)+k-1=t$. The first piece of good news is that such an unprescribed $k$-descent occurs in a $w$ constructed this way if and only if $w(t)>w(t+1)$ (this is a consequence of $\tau$ ending with a $k$-descent). In fact, $w(t)>w(t+1)$ will turn out to be the sort of event whose probability we can find using our knowledge of the asymptotic distribution of $f_k(m,n)$, i.e. Theorem~\ref{thm:fmnasy}.
The second piece of bad news is that the permutations in $\mathcal{D}_k(I,t)$ might in general be hard to describe. To counteract this, we have our second piece of good news: as $I$ is fixed, there are only finitely many $\tau\in \mathcal{D}_k(I,t)$ (namely, no more than $t!$). We will first count the number of permutations $w\in \mathcal{D}_k(I,n)$ that start with a fixed $\tau\in \mathcal{D}_k(I,t)$, and sum over all $\tau\in \mathcal{D}_k(I,t)$ later. From now on, we will think of $\tau$ as being fixed, and we will let $s=s(\tau)$ be the last element of $\tau$, i.e. $s:=\tau(t)$. Now, we construct a random permutation $w\in S_n$ by picking a uniformly random set of $t$ elements from $[n]$ for the (unordered) set of values of $w(1),\ldots, w(t)$, setting their relative order in $w$ to be $\tau$, and then picking a random $v\in \mathcal{D}_k(\emptyset,n-t)$ to be the restriction of $w$ to the last $n-t$ indices. Repeating what we observed before, we get a permutation $w\in \mathcal{D}_k(I,n)$ iff $w(t)<w(t+1)$. So the number of permutations we get this way is
\[\binom{n}{t}f_k(n-t)\mathbb{P}\left(w(t)<w(t+1)\right).\]
Our next goal is to understand $\mathbb{P}(w(t)<w(t+1))$. First off, we essentially know the distribution of $w(t+1)$, as we know the distribution of the first element (Theorem~\ref{thm:fmnasy}) of the restriction to the last $n-t$ indices, and the potential shift by at most $t$ -- depending on the choice of the $t$-element subset of $[n]$ -- is asymptotically small (namely, $O(1/n)$). We will now show that the distribution of $w(t)$ is also simple. The idea is that as $n\to \infty$ but $t$ stays constant, picking $t$ elements of $[n]$ is essentially equivalent to picking $t$ uniform $[0,1]$ random variables (and multiplying each by $n$, and rounding appropriately). We really only care about the value of the $s$th largest of these. In the case of $t$ uniform $[0,1]$ random variables, the $s$th largest has the following well-known distribution.
\begin{proposition} Let $U_1,U_2,\ldots,U_t$ be independent uniform $[0,1]$ random variables. The $s$th largest of these is a random variable which we denote $U^t_{(s)}$ and call the \emph{$s$th order statistic}. At $y\in [0,1]$, the probability density function of $U^t_{(s)}$ is \[\Phi^t_s(u)=\frac{t!}{(s-1)!(t-s)!}y^{s-1}(1-y)^{t-s}.\] \end{proposition}
The next lemma says that in the discrete case, i.e. picking a $t$-element subset of $[n]$, the $s$th largest has distribution close to $\Phi^t_s(u)$ (rescaled appropriately).
\begin{lemma}\label{lem:os} Let $t\geq 3$ and $1\leq s\leq t$ be fixed, and $n\in \mathbb{Z}^+$. Let $\mathcal{Y}$ be a uniform random $t$-element subset of $[n]$, and let $Y_{(s)}^t$ be the $s$th largest element of $\mathcal{Y}$. Then \[\mathbb{P}\left(Y_{(s)}^t=\ell\right)=\frac{1}{n}\Phi_s^t\left(\frac{\ell}{n}\right)+O(n^{-1.5}).\] \end{lemma} \begin{proof} Simply by expanding the brackets and replacing in $\Phi_s^t$, we begin by noting that it suffices to prove the following. \[\mathbb{P}\left(Y_{(s)}^t=\ell\right)=\frac{1}{n}\frac{t!}{(s-1)!(t-s)!}\left(\left(\frac{\ell}{n}\right)^{s-1}\left(1-\frac{\ell}{n}\right)^{t-s}+O(n^{-0.5})\right).\] Let's do some counting. If the $s$th largest element is $\ell$, then there are a total of $\binom{\ell-1}{s-1}$ choices for the bottom $s-1$ elements and $\binom{n-\ell}{t-s}$ choices for the top $t-s$ elements. Hence, \[\mathbb{P}\left(Y_{(s)}^t=\ell\right)=\frac{\binom{\ell-1}{s-1}\binom{n-\ell}{t-s}}{\binom{n}{t}}\]\[=\frac{1}{n}\frac{t!}{(s-1)!(t-s)!}\frac{(\ell-1)(\ell-2)\cdots(\ell-s+1)(n-\ell)(n-\ell-1)\cdots(n-\ell-t+s+1)}{(n-1)(n-2)\cdots(n-t+1)}.\]
We note the identical prefactor in the lemma and the above expression, and we proceed to compare the last terms of the two products. Namely, it remains to show that \[\frac{(\ell-1)(\ell-2)\cdots(\ell-s+1)(n-\ell)(n-\ell-1)\cdots(n-\ell-t+s+1)}{(n-1)(n-2)\cdots(n-t+1)}\]\[=\left(\frac{\ell}{n}\right)^{s-1}\left(1-\frac{\ell}{n}\right)^{t-s}+O(n^{-0.5}).\] If $\ell\leq \sqrt{n}$ and $s\geq 2$, we are automatically done since $\frac{\ell-1}{n-1}$ on the left-hand side and $\frac{\ell}{n}$ on the right-hand side already imply that both terms are $O(n^{-0.5})$. If $\ell\leq \sqrt{n}$ and $s=1$, then $\frac{n-\ell-i}{n-j}=\left(1-\frac{\ell}{n}\right)\left(1+O(n^{-1})\right)$, and the product of finitely many such terms still gives a multiplicative error term of $(1+O(n^{-1}))$, which is better than the desired bound. By symmetry, the case $\ell\geq n-\sqrt{n}$ is also covered now.
As for the case $\sqrt{n}< \ell<n-\sqrt{n}$, we then have $\frac{\ell-i}{n-j}=\frac{\ell}{n}(1+O(n^{-0.5}))$, and similarly for $\frac{n-\ell-i}{n-j}$. The finitely many $(1+O(n^{-0.5}))$ terms still multiply to a $(1+O(n^{-0.5}))$ term. This finishes the case check and the proof of the lemma. \end{proof}
We note that with the notation from before, i.e. $\tau$ being the fixed relative order for the first $t$ indices, and $v$ being the $k$-descent-avoiding restriction to the last $n-t$ indices, $v(1)\leq w(t)\leq v(1)+t$, so \[w(t)\leq v(1)\implies w(t)<w(t+1)\implies w(t)\leq v(1)+t,\] from which \[\mathbb{P}(w(t)\leq v(1))\leq \mathbb{P}(w(t)<w(t+1))\leq \mathbb{P}(w(t)\leq v(1)+t).\] This is useful as it lets us deal with the (otherwise) inconvenient detail that we can understand $w(t)$ and $v(1)$, but we wish to compare $w(t)$ and $w(t+1)$, and $w(t+1)\neq v(1)$ (instead, $w(t+1)$ can be anything in the range $v(1),v(1)+1,\ldots, v(1)+t)$. We now finally get to use our machinery for $w(t)$ and $v(1)$, i.e. Lemma~\ref{lem:os} and Theorem~\ref{thm:fmnasy}. We start from \[\mathbb{P}\left(w(t)\leq v(1)\right)=\sum_{m=1}^{n-t}\frac{f_k(m,n-t)}{f_k(n-t)}\sum_{\ell=1}^{m}\mathbb{P}\left(Y^t_{(s)}=\ell\right).\] Applying Lemma~\ref{lem:os}, we get \[\mathbb{P}\left(w(t)\leq v(1)\right)=\sum_{m=1}^{n-t}\frac{f_k(m,n-t)}{f_k(n-t)}\sum_{\ell=1}^{m}\frac{1}{n}\Phi_s^t\left(\frac{\ell}{n}\right)+O(n^{-1.5}).\] We sum the $O(n^{-1.5})$ terms up into a $O(n^{-0.5})$ and note that the remaining inner sum is a Riemann sum for the integral $\int_0^{\frac{m}{n}}\Phi_s^t(y)$. Since $\Phi_s^t(y)$ is a polynomial, it is continuously differentiable on $[0,1]$, and by compactness of $[0,1]$ its derivative is bounded, so the difference between our integral and our Riemann sum is $n\cdot O(1/n^2)=O(1/n)$. Hence, we get \[\mathbb{P}\left(w(t)\leq v(1)\right)=O(n^{-0.5})+\sum_{m=1}^{n-t}\frac{f_k(m,n-t)}{f_k(n-t)}\int_0^{\frac{m}{n}}\Phi_s^t(y).\]
We now use Theorem~\ref{thm:fmnasy}, getting \[\mathbb{P}\left(w(t)\leq v(1)\right)=O(n^{-0.5})+\sum_{m=1}^{n-t}\frac{1}{n-t}\varphi_k\left(\frac{m}{n-t}\right)\left(1+O\left(n^{-0.49}\right)\right)\int_0^{\frac{m}{n}}\Phi_s^t(y).\] We again pull out a total additive error term of $(n-t)\cdot\frac{1}{n-t}\cdot O\left(n^{-0.49}\right)=O\left(n^{-0.49}\right)$, which we can do since compactness of $[0,1]$ implies that $\varphi_k(\frac{m}{n})\int_0^{\frac{m}{n}}\Phi_s^t(y)$ is bounded. We then again have a Riemann sum, this time for the outer integral in $\int_0^1\varphi_k(x)\int_{0}^{x\frac{n-t}{n}} \Phi_s^t(y)$. Since $\Phi_s^t$ is continuous and bounded (by compactness of $[0,1]$) and $\varphi_k$ is continuously differentiable and hence has bounded derivative, the Riemann summed function $\varphi_k(x)\int_{0}^{x\frac{n-t}{n}} \Phi_s^t(y)$ is differentiable and has bounded derivative on $[0,1]$. So the difference between our Riemann sum and our integral is $O(1/n)$ as before. Hence,
\[\mathbb{P}\left(w(t)\leq v(1)\right)=O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x\frac{n-t}{n}} \Phi_s^t(y).\]
Finally, by boundedness of $\varphi_k(x)\Phi_s^t(y)$ in the compact $[0,1]^2$, the integral over a measure $O(1/n)$ subset of $[0,1]^2$ is itself $O(1/n)$, so \[\mathbb{P}\left(w(t)\leq v(1)\right)=O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y).\]
By an analogous argument, we can also get \[\mathbb{P}(w(t)\leq v(1)+t)=O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y).\] Combining these two, we get \[O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y)\leq \mathbb{P}(w(t)<w(t+1))\leq O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y).\] Hence, \[\mathbb{P}(w(t)<w(t+1))=O(n^{-0.49})+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y).\] This integral is a positive constant (it is nonzero since $\varphi_k(x)$ is bounded below and $\Phi^t_s(y)$ integrates to $1$), so we can make the error term multiplicative: \[\mathbb{P}(w(t)<w(t+1))=\left(1+O(n^{-0.49})\right)+\int_0^1\varphi_k(x)\int_{0}^{x}\Phi_s^t(y).\]
Finally coming back to what we promised a long time ago, we sum over all $\tau\in \mathcal{D}_k(I,t)$, and arrive at the following theorem, which is an integral formula for $d_k(I,n)$.
\begin{theorem}\label{thm:precdasy} For fixed $k\geq 3$, fixed finite $I\subseteq \mathbb{Z}^+$, and asymptotically in $n\in \mathbb{Z}^+$, \[d_k(I,n)=\left(1+O\left(n^{-0.49}\right)\right)\binom{n}{t}f_k(n-t)\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\] \end{theorem}
From Theorem~\ref{thm:precdasy} and Theorem~\ref{thm:nasy}, proving Theorem~\ref{thm:dasy} is easy algebra.
\begin{proof}[Proof of Theorem~\ref{thm:dasy}] We start with \[d_k(I,n)=\left(1+O\left(n^{-0.49}\right)\right)\binom{n}{t}f_k(n-t)\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\] Plugging in $\binom{n}{t}=(1+O(1/n))\frac{n^t}{t!}$ and Theorem~\ref{thm:nasy}, we get \[d_k(I,n)=\left(1+O\left(n^{-0.49}\right)\right)\left(1+O(1/n)\right)\frac{n^t}{t!}c_k r_k^{n-t}(n-t)!\left(1+O(\gamma_k^n)\right)\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\] Only keeping the dominating error term and then using $n!=(n-t)!n^t\left(1+O(1/n)\right)$, and then again keeping the dominating error term, we further get \[d_k(I,n)=\left(1+O\left(n^{-0.49}\right)\right)c_k n!r_k^{n}\frac{1}{t! r_k^t}\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\] Using Theorem~\ref{thm:nasy} and then only keeping the dominating error term once more, we arrive at \[d_k(I,n)=\left(1+O\left(n^{-0.49}\right)\right)f_k(n)\frac{1}{t! r_k^t}\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\]
This is what we sought to prove, with \[c_{I,k}=\frac{1}{t! r_k^t}\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\]
\end{proof}
\subsection{A somewhat simpler formula for $c_{I,k}$} We can write down something simpler for $c_{I,k}$. Namely, we will prove the following. \begin{proposition}\label{prop:niceform} Let $k\geq 3$, let $I\subseteq \mathbb{Z}^+$ be a finite set $t=\max(I)+k-1$, and let $r_k\in \mathbb{R}$ be as given in Theorem~\ref{thm:nasy}. Then \[c_{I,k}=\frac{1}{t! r_k^t}\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{s=1}^t d_k(r_t(I),t+1-s,t)\Phi_{s}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x.\] \end{proposition}
This might appear more complicated than the previous expression for $c_{I,k}$, but we have replaced a sum of possibly up to $t!$ terms with a sum of just $t$ terms which can be understood reasonably well (more on this after the proof).
\begin{proof} Consider \[\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y).\] Let us pick some $s$ and count the number of terms with $\tau(t)=s$. In other words, we are counting the number of permutations in $\mathcal{D}_k(I,t)$ that end with $s$. Under reverse-complementation, $\mathcal{D}_k(I,t)$ bijects with $\mathcal{D}_k(r_t(I),t)$ (see Subsection~\ref{subsec:general} for a reminder on this notation). Under the same map, the subset of $\mathcal{D}_k(I,t)$ of permutations ending with $s$ bijects with $\mathcal{D}_k(r_t(I),t+1-s,t)$. This gives the coefficients in our regrouped sum: \[\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)=\sum_{s=1}^t d_k(r_t(I),t+1-s,t)\Phi_{s}^t(y).\] Making this replacement in \[c_{I,k}=\frac{1}{t! r_k^t}\int_0^1\varphi_k(x)\int_{0}^{x}\sum_{\tau\in \mathcal{D}_k(I,t)}\Phi_{\tau(t)}^t(y)\hspace{0.5mm}\mathrm{d}y\hspace{0.5mm}\mathrm{d}x\] gives us the desired result. \end{proof}
We finish this subsection by mentioning that the coefficients $d_k(r_t(I),t+1-s,t)$ can be found using a dynamic programming approach similar to that in Subsection~\ref{subsec:genfast}. This makes $c_{I,k}$ efficiently computable, assuming one can efficiently take $n$ numerical integrals.
\subsection{Finitely many cases of asymptotic down-up-down-up}
Zhu \cite{zhu2019enumerating} made the following conjecture.
\begin{conjecture}[Down-Up-Down-Up Conjecture \cite{zhu2019enumerating}]\label{conj:dudu}
For any $n\in \mathbb{Z}^+$, \[d_3(\{1\},n)>d_3(\{2\},n)<d_3(\{3\},n)>d_3(\{4\},n)<\cdots,\] where the sequence goes up to $\lceil n/2\rceil$. \end{conjecture}
By bounding $c_{\{1\},3}, c_{\{2\},3}, \ldots$ using numerical integration, we can prove the following partial result towards the Down-Up-Down-Up Conjecture. It is partial in the sense that it only proves the conjectured inequality between $d_3(\{i\},n)$ and $d_3(\{i+1\},n)$ for a finite set of $i$, and also in the sense that our result is only for large enough $n$.
\begin{theorem} There is $N\in \mathbb{Z}^+$ such that for all $n\geq N$, $d_3(\{1\},n)>d_3(\{2\},n)<d_3(\{3\},n)>d_3(\{4\},n)$. \end{theorem} \begin{proof} Using numerical integration software to evaluate the expression for $c_{I,k}$ in Proposition~\ref{prop:niceform}, we found that \[\lim_{n\to \infty} \frac{d(\{1\},n)}{d(\{2\},n)}\approx 1.132101, \hspace{5mm} \lim_{n\to \infty} \frac{d(\{2\},n)}{d(\{3\},n)}\approx 0.826993, \hspace{5mm} \lim_{n\to \infty}\frac{d(\{3\},n)}{d(\{4\},n)}\approx 1.043244.\] \end{proof}
\section{Asymptotic independence of the first and last element of a $k$-descent-avoiding permutation}\label{sec:joint} Theorem~\ref{thm:fmnasy} gives the asymptotic distribution of the first element of $k$-descent-avoiding permutations. Namely, this distribution is given by $\varphi_k(x)$. Upon reverse-complementing, we also get that the distribution of the last element is given by $\varphi_k(1-x)$. However, to show equidistribution (Theorem~\ref{thm:equistrong}) in the next section, we will want to make use of the joint distribution of the first and last element. It is clear that the first and last element cannot both be $m$. Other than that, the first and last element turn out to be asymptotically independent. In this section, we state and prove this -- the rigorous statement is Theorem~\ref{thm:joint}. The structure and content of this section closely resemble those of Section~\ref{sec:mnasy}. We start with a definition.
\begin{definition} For $k,n,m_1,m_2\in \mathbb{Z}^+$ with $1\leq m_1,m_2\leq n$, we let $f_k(m_1,m_2,n)$ be the number of $k$-descent-avoiding permutations $w\in S_n$ such that $w(1)=m_1$ and $w(n)=m_2$. \end{definition}
\subsection{A proposition on $f_k(m_1,m_2,n)$} We will now state a proposition which is an analog of Proposition~\ref{prop:fmn}, except instead of conditioning on just the value of the first element, we now simultaneously condition on values for the first and last element, $m_1$ and $m_2$. Instead of an equality, we now have to settle for two inequalities, but the lower and upper bound will be relatively close.
\begin{proposition}\label{prop:fm1m2} For $k\geq 3$, $n,m_1,m_2\in \mathbb{Z}^+$ with $1\leq m_1\leq m_2\leq n$, we have the following inequalities
\[ \sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-2}{\ell-1}f_k\left(\min(n-\ell,n+1-m_2),n-\ell\right)\]\[ -\sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} \binom{m_1-1}{\ell-1}f_k\left(\max(1,n-\ell+1-m_2),n-\ell\right)\] \[\leq f_k(m_1,m_2,n)\leq\] \[ \sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-1}{\ell-1}f_k\left(\max(1,n-\ell+1-m_2),n-\ell\right)\]\[-\sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}}\binom{m_1-2}{\ell-1}f_k\left(\min(n-\ell,n+1-m_2),n-\ell\right)\] \end{proposition}
\begin{proof} We let $h_k(m_1,m_2,\ell,n)$ be the number of permutations $w\in S_n$ such that $w(1)=m_1$, $w(n)=m_2$, $w(1)>w(2)>\cdots>w(\ell)$, and the restriction of $w$ to the last $n-\ell$ indices contains no $k$-descents. We use the convention that for $\ell>n$, $h_k(m_1,m_2,\ell,n)=0$. Then by an argument essentially identical to that in the proof of Proposition~\ref{prop:fmn}, \[f_k(m_1,m_2,n)=\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}} h_k\left(m_1,m_2,\ell,n\right)-\sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} h_k\left(m_1,m_2,\ell,n\right)\]
We proceed to bound the terms. We claim that \[\binom{m_1-2}{\ell-1}f_k\left(\min(n-\ell,n+1-m_2),n-\ell\right)\leq h_k\left(m_1,m_2,\ell,n\right)\]\[\leq \binom{m_1-1}{\ell-1}f_k\left(\max(1,n-\ell+1-m_2),n-\ell\right).\] We first prove the lower bound. There are at least $\binom{m_1-2}{\ell-1}$ options for the initial $\ell$-element decreasing sequence (we let this be any choice of $\ell-1$ positive integers strictly less than $m_1$ and not equal to $m_2$), after which there are $f_k\left(n-\ell+1-p,n-\ell\right)$ options for the $k$-descent-avoiding restriction to the last $n-\ell$ elements, where $p$ is the value of the last element $m_2$ in the relative ordering of the last $n-\ell$ elements ($p$ depends on the choice of an $\ell$-element decreasing sequence). We have that $\max(1,m_2-\ell)\leq p\leq \min(n-\ell,m_2)$. Using this and the fact that $f_k(m,n)$ is non-increasing in $m$ (for constant $n$), we get \[f_k(\min(n-\ell,n+1-m_2),n-\ell)\leq f_k\left(n-\ell+1-p,n-\ell\right)\leq f_k\left(\max(1,n-\ell+1-m_2),n-k\right).\] Multiplying the lower bound on the number of $\ell$-element initial sequences with the lower bound on the number of options for the relative ordering of the last $n-\ell$ elements gives the desired lower bound on $h_k\left(m_1,m_2,\ell,n\right)$. The proof of the upper bound is similar, except now there are at most $\binom{m_1-1}{\ell-1}$ options for the initial decreasing sequence, and each leaves at most $f_k\left(\max(1,n-\ell+1-m_2),n-k\right)$ options for the relative ordering of the last $n-\ell$ elements.
The statement of the proposition follows from plugging in our bounds on $h_k(m_1,m_2,\ell,n)$ into our expression for $f_k(m_1,m_2,n)$. \end{proof}
\subsection{The asymptotic distribution of $f_k(m_1,m_2,n)$} We move on to stating the main asymptotic independence result.
\begin{theorem}\label{thm:joint} Fix $k\geq 3$. For $n,m_1,m_2\in \mathbb{Z}^+$ with $1\leq m_1, m_2\leq n$ and $m_1\neq m_2$, \[\frac{n^2 f_k(m_1,m_2,n)}{f_k(n)}=\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)\left(1+O_k\left(n^{-0.49}\right)\right).\] \end{theorem}
The reason we are calling this an asymptotic independence theorem is that it follows from this and Theorem~\ref{thm:fmnasy} that for $m_1\neq m_2$ and $w\in \mathcal{D}_k(\emptyset,n)$ chosen uniformly at random, \[\mathbb{P}(w(1)=m_1,w(n)=m_2)\sim \mathbb{P}\left(w(1)=m_1\right)\cdot\mathbb{P}\left(w(n)=m_2\right).\]
The proof of Theorem~\ref{thm:joint} closely resembles the proof of Theorem~\ref{thm:fmnasy}, with Proposition~\ref{prop:fm1m2} in place of Proposition~\ref{prop:fmn} and Theorem~\ref{thm:fmnasy} complementing Theorem~\ref{thm:nasy}.
\begin{proof}[Proof of Theorem~\ref{thm:joint}] It suffices to show that $\frac{n^2 f_k(m_1,m_2,n)}{f_k(n)}$ is both lower and upper bounded by $\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)\left(1+O_k\left(n^{-0.49}\right)\right)$. We start with the lower bound given by Proposition~\ref{prop:fm1m2},
\[\frac{n^2f_k(m_1,m_2,n)}{f_k(n)}\geq \]
\begin{align*}
&\frac{n^2}{f_k(n)}\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-2}{\ell-1}f_k\left(\min(n-\ell,n+1-m_2),n-\ell\right)\\
&-\frac{n^2}{f_k(n)}\sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} \binom{m_1-1}{\ell-1}f_k\left(\max(1,n-\ell+1-m_2),n-\ell\right)=(*) \end{align*}
Using Theorem~\ref{thm:fmnasy}, we get
\begin{align*}
(*)=&\frac{n^2}{f_k(n)}\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-2}{\ell-1}\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)\frac{f_k(n-\ell)}{n-\ell}\left(1+O\left(n^{-0.49}\right)\right)\\ &-\frac{n^2}{f_k(n)}\sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} \binom{m_1-1}{\ell-1}\varphi_k\left(\frac{\max(1,n-\ell+1-m_2)}{n-\ell}\right)\frac{f_k(n-\ell)}{n-\ell}\left(1+O\left(n^{-0.49}\right)\right). \end{align*}
Using Theorem~\ref{thm:nasy}, we then get \[=\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-2}{\ell-1}\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)\frac{n}{(n-1)(n-2)\cdots (n-\ell)r_k^{\ell}}\left(1+O\left(n^{-0.49}\right)\right)\]\[ - \sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} \binom{m_1-1}{\ell-1}\varphi_k\left(\frac{\max(1,n-\ell+1-m_2)}{n-\ell}\right)\frac{n}{(n-1)(n-2)\cdots (n-\ell)r_k^{\ell}}\left(1+O\left(n^{-0.49}\right)\right).\] Now, by an argument like in the proof of Theorem~\ref{thm:fmnasy}, the terms with $\ell\geq \log n$ only contribute $O(1/n)$. We first consider the case $m_1\geq \sqrt{n}$, and focus on terms with $\ell\leq \log n$. We repeat some estimates from the proof of Theorem~\ref{thm:fmnasy}: \[\binom{m_1-1}{\ell-1}=\frac{m_1^{\ell-1}}{(\ell-1)!}\left(1+O\left(\frac{\ell^2}{m_1}\right)\right),\] \[\binom{m_1-2}{\ell-1}=\frac{m_1^{\ell-1}}{(\ell-1)!}\left(1+O\left(\frac{\ell^2}{m_1}\right)\right),\text{ and}\] \[\frac{n}{(n-1)(n-2)\cdots(n-\ell)}=n^{-\ell+1}\left(1+O\left(\frac{\ell^2}{n}\right)\right).\] Plugging these into the current expression for our lower bound, we get \begin{align*}
(*)=&O(1/n)+\frac{1}{r_k}\sum_{\substack{\ell\\1\leq \ell \leq \log n\\ \ell\equiv 1 \pmod{k}}}\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)\frac{\left(\frac{m_1}{n}\frac{1}{r_k}\right)^{\ell-1}}{(\ell-1)!}\left(1+O\left(n^{-0.49}\right)\right)\\ &- \frac{1}{r_k}\sum_{\substack{\ell\\k\leq \ell \leq \log n\\ \ell\equiv 0 \pmod{k}}} \varphi_k\left(\frac{\max(1,n-\ell+1-m_2)}{n-\ell}\right)\frac{\left(\frac{m_1}{n}\frac{1}{r_k}\right)^{\ell-1}}{(\ell-1)!}\left(1+O\left(n^{-0.49}\right)\right). \end{align*}
In $[0,1]$, $\varphi_k$ is bounded below by a constant greater than $0$, and the derivative of $\varphi_k$ is bounded as well, so $\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)=\varphi_k\left(1-\frac{m_2}{n}\right)\left(1+O\left(\frac{\ell}{n}\right)\right)$. We plug this in as well, getting \[(*)=O(1/n)+\frac{1}{r_k}\sum_{\substack{\ell\\1\leq \ell \leq \log n\\ \ell\equiv 1 \pmod{k}}}\varphi_k\left(1-\frac{m_2}{n}\right)\frac{\left(\frac{m_1}{n}\frac{1}{r_k}\right)^{\ell-1}}{(\ell-1)!}\left(1+O\left(n^{-0.49}\right)\right)\]\[ - \frac{1}{r_k}\sum_{\substack{\ell\\k\leq \ell \leq \log n\\ \ell\equiv 0 \pmod{k}}} \varphi_k\left(1-\frac{m_2}{n}\right)\frac{\left(\frac{m_1}{n}\frac{1}{r_k}\right)^{\ell-1}}{(\ell-1)!}\left(1+O\left(n^{-0.49}\right)\right).\]
This can now be approximated by a corresponding infinite sum, just like in the proof of Theorem~\ref{thm:fmnasy}. Skipping the identical steps, we get that the expression for our lower bound becomes \[(*)=O\left(n^{-0.49}\right)+\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right).\] Now, we can get this to our desired form by noting that $\varphi_k$ is lower bounded by a positive constant, \[(*)=\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)\left(1+O\left(n^{-0.49}\right)\right).\] So far, we are only done proving the desired lower bound for $m_1\geq \sqrt{n}$. We now prove the desired lower bound for $m_1\leq \sqrt{n}$. We come back to the lower bound \[\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\binom{m_1-2}{\ell-1}\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)\frac{n}{(n-1)(n-2)\cdots (n-\ell)r_k^{\ell}}\left(1+O\left(n^{-0.49}\right)\right)\]\[ - \sum_{\substack{\ell\\k\leq \ell \leq m_1\\ \ell\equiv 0 \pmod{k}}} \binom{m_1-1}{\ell-1}\varphi_k\left(\frac{\max(1,n-\ell+1-m_2)}{n-\ell}\right)\frac{n}{(n-1)(n-2)\cdots (n-\ell)r_k^{\ell}}\left(1+O\left(n^{-0.49}\right)\right).\] Consider just the first sum; it can be rewritten as \[\sum_{\substack{\ell\\1\leq \ell \leq m_1\\ \ell\equiv 1 \pmod{k}}}\frac{1}{(\ell-1)!}\varphi_k\left(\frac{\min(n-\ell,n+1-m_2)}{n-\ell}\right)\frac{(m_1-2)(m_1-3)\ldots(m_1-\ell)}{(n-2)\cdots (n-\ell)r_k^{\ell}}\left(1+O\left(n^{-0.49}\right)\right).\] Since $\frac{m_1-i}{n-i}\leq \frac{m_1}{n}\leq n^{-0.5}$, all terms after the first one can be upper bounded by a geometric series with first term $O(n^{-k/2})$ and ratio $O(n^{-k/2})$. The entire second sum can be upper bounded by a similar geometric series. All in all, this gives that the contribution of all terms other than the $\ell=1$ term is $O(1/n)$. Skipping a few steps again, we arrive at a lower bound of \[\frac{1}{r_k}\varphi_k\left(1-\frac{m_2}{n}\right)+O\left(n^{-0.49}\right).\] Since $\varphi_k$ has bounded derivative and $\varphi_k(0)=\frac{1}{r_k}$, this is \[\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)+O\left(n^{-0.49}\right)+O\left(n^{-0.5}\right).\] As before, the desired lower bound follows using the fact that there is a positive lower bound on $\varphi_k$: \[\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)+O\left(n^{-0.49}\right)=\varphi_k\left(\frac{m_1}{n}\right)\varphi_k\left(1-\frac{m_2}{n}\right)\left(1+O\left(n^{-0.49}\right)\right).\]
The argument for the upper bound is completely analogous, and will be skipped. This completes the proof. \end{proof}
\section{Asymptotic equidistribution}\label{sec:equi} \subsection{Concentration for discrete order statistics: a tale of more than one Chebyshev}
In this subsection, we prove a concentration result for certain discrete order statistics. Recall that according to Lemma~\ref{lem:os}, if we are choosing a subset of $t$ points from $[n]$, and $t$ stays constant while $n\to \infty$, then the $s$th largest point behaves just like a uniform $[0,1]$ $s$th order statistic. The main takeaway from the next lemma is that if $n\to \infty$ and $t(n)\to \infty$ as well, then the $s$th order statistic is quite close to being constant (that is, it is concentrated).
\begin{proposition}\label{prop:conc} Let $s,t,n\in \mathbb{Z}^+$ with $1\leq s\leq t\leq n$. Let $\mathcal{Y}$ be a $t$-element subset of $[n]$ chosen uniformly from all $t$-element subsets of $[n]$. Let $Y_s$ be the $s$th largest element of $\mathcal{Y}$. Then we have the following: \begin{enumerate}[label=(\arabic*)]
\item $\mathbb{E}[Y_s]=\frac{s}{t+1}(n+1)$.
\item $\mathrm{Var}(Y_s)\leq \frac{n^2}{t}$.
\item $\mathbb{P}(|Y_s-\frac{s}{t+1}(n+1)|\leq \frac{n}{t^{1/3}})\geq 1-t^{-1/3}$. \end{enumerate} \end{proposition}
Our proof of the first part of Proposition~\ref{prop:conc} is inspired by the following argument which I learned in MIT's Fall 2017 Putnam seminar (18.A34), taught by Yufei Zhao. Let $Z_1, Z_2,\ldots, Z_t$ be independent random variables, each chosen uniformly from the (continuous) interval $[0,1]$. Defining $Z=\min(Z_1,\ldots, Z_t)$, our goal is to show that $\mathbb{E}[Z]=\frac{1}{t+1}$. We let $Z'_0,Z'_1,\ldots, Z'_t$ be independent random variables, each chosen uniformly on a circle of length $1$. Note that if we cut the circle at $Z_0'$, i.e. making it a segment $[0,1]$ starting at $Z_0'$ and oriented according to some chosen orientation on the circle, the (joint) distribution of $Z_1', \ldots, Z_t'$ on this segment is the same as the distribution of $Z_1,Z_2,\ldots,Z_t$. Under this identification, the distance between $Z_0'$ and the next $Z_i$ (in the direction given by this orientation on the circle) is equal to $Z$. For $0\leq i\leq t$, we define the random variable $D_i$ to be the distance between $Z_i'$ and the next $Z_j'$ (according to the same chosen orientation on the circle). Note that since the setup is symmetric under relabeling variables, $D_i$ and $D_j$ are identically distributed, so in particular, $\mathbb{E}[D_i]=\mathbb{E}[D_j]$. Going around the circle from some point $Z_i$, we note that the segment from each $Z_j$ to the next point is traversed exactly once, so the total distance is $1=D_0+D_1+\cdots+D_t$. Hence, $1=\mathbb{E}[D_0+\cdots+D_t]$. Together with linearity of expectation and our previous observation that expectations of any $D_i$ and $D_j$ are equal, this implies that $1=(t+1)\mathbb{E}[D_0]$, giving that $\mathbb{E}[D_0]=\frac{1}{t+1}$. One can extend this to the expectation of the $s$th largest $Z_i$ being $\frac{s}{t+1}$ by replacing the $D_i$ in this argument with the distance between the $\ell$th and $(\ell+1)$th $Z_j$ coming after $Z_i$ on the circle, and summing over $\ell=0,\ldots,s-1$ at the end.
In our case, we can start with $n+1$ points on a circle, choose a subset of $t+1$ of these, and argue exactly as in the continuous case, getting that the expected size of any gap is $\frac{n+1}{t+1}$. For the sake of variety, we give a different short argument along similar lines.
\begin{proof}[Proof of Proposition~\ref{prop:conc} (1)] With $\mathcal{Y}$ as in the proposition statement, we let $Y_1, \ldots, Y_t$ be the elements of $\mathcal{Y}$ in order, and we define the random variables $D_1=Y_1, D_2=Y_2-Y_1, \ldots, D_{t}=Y_t-Y_{t-1}, D_{t+1}=(n+1)-Y_t$. Note that for any $D_1,\ldots, D_{t+1}$ with all $D_i\geq 1$ and $D_1+\cdots+D_{t+1}=n+1$, there is a unique set $\mathcal{Y}$, namely the one given by $Y_1=D_1, Y_2=D_1+D_2,\ldots, Y_t=D_1+\cdots+D_t$. So we get the same distribution for $\mathcal{Y}$ if we choose it by picking a uniformly random sequence of $(t+1)$ positive integers summing to $n+1$, $D_1,\ldots,D_{t+1}$, and then taking the corresponding $\mathcal{Y}$. But in this latter formulation in terms of $D_1,\ldots, D_{t+1}$, it is clear that any $D_i$ and $D_j$ are identically distributed. So $n+1=\mathbb{E}[D_1+\cdots+D_{t+1}]=(t+1)\mathbb{E}[D_i]$, from where $\mathbb{E}[D_i]=\frac{n+1}{t+1}$. Hence, $\mathbb{E}[Y_s]=\mathbb{E}[D_1+\cdots+D_s]=s\frac{n+1}{t+1}$, which is what we wanted to show. \end{proof}
For the second part, we use a method for finding certain binomial coefficient sums, along with some other tricks. Here is one of the other tricks, which will be useful for proving negative correlation of $D_i,D_j$.
\begin{lemma}[A partial Chebyshev's sum inequality]\label{lem:cheb} Suppose we have a weakly increasing sequence $a_1\leq \cdots \leq a_n$ and two weakly decreasing sequences $b_1 \geq \cdots \geq b_n\geq 0$ and $c_1\geq \cdots\geq c_n$. Then \[\left(\sum_i a_i b_i c_i\right)\left(\sum_i b_i\right)\leq \left(\sum_i a_i b_i\right)\left(\sum_i b_i c_i \right).\] Or equivalently, \[\sum_i a_i b_i c_i \leq \sum_i a_i b_i \frac{\sum_i b_i c_i}{\sum_i b_i}.\] \end{lemma} Let us first briefly explain why we are calling this a partial Chebyshev's sum inequality. For the sequences $a_1\leq \cdots \leq a_n$ and $b_1 c_1 \geq \cdots \geq b_n c_n$, Chebyshev's sum inequality gives the following: \[\sum_i a_i b_i c_i \leq \sum_i a_i \frac{\sum_i b_i c_i}{n}.\] In words, the weakly increasing sequence $a_i$ summed against the non-increasing sequence $b_i c_i$ is less or equal to $a_i$ summed against the average of the sequence $b_i c_i$. We can think of the second version of the inequality in Lemma~\ref{lem:cheb} as saying that $a_i$ summed against $b_i c_i$ is less or equal to $a_i$ summed against $b_i \frac{\sum_i b_i c_i}{\sum_i b_i}$, which is the unique sequence $b_i c$ with the same sum as $b_i c_i$. In other words, there is a sort of partial averaging (only $c_i$ is averaged out) of the sequence $b_i c_i$.
With this is mind, the most intuitive proof might be an inductive mass redistribution argument, but we instead give a short algebraic proof in the spirit of algebraic proofs of the rearrangement inequality and Chebyshev's sum inequality.
\begin{proof}[Proof of Lemma~\ref{lem:cheb}] Note that for any $i,j\in [n]$, we have $0\leq b_i b_j (a_j-a_i)(c_i-c_j)$. Expanding out, we get \[a_i b_i c_i b_j+a_j b_j c_j b_i\leq a_i b_i b_j c_j +a_j b_j b_i c_i.\] Summing up over all pairs $1\leq i\leq j\leq n$, we get the desired inequality.
\end{proof}
As another remark, note that the special case $b_i=1$ is Chebyshev's sum inequality. We proceed to give the proof of part (2) of our concentration result.
\begin{proof}[Proof of Proposition~\ref{prop:conc} (2)] \[\mathrm{Var}(Y_s)=\mathrm{Var}(D_1+\cdots+D_s)=\sum_{1\leq i\leq s}\mathrm{Var}(D_i)+2\sum_{i<j\leq s}\mathrm{Cov}(D_i,D_j).\] Since the joint distribution of $D_i$ is symmetric in $i$, we can rewrite the above as \[\mathrm{Var}(Y_s)=s\mathrm{Var}(D_1)+s(s-1)\mathrm{Cov}(D_1,D_2).\]
We first consider $\mathrm{Var}(D_1)=\mathbb{E}[D_1^2]-\mathbb{E}[D_1]^2.$ From the proof of part (1), we have that \[\mathbb{E}[D_1]^2=\left(\frac{n+1}{t+1}\right)^2.\] As for $\mathbb{E}[D_1^2]$, we begin by writing down an explicit expression: \[\mathbb{E}[D_1^2]=\sum_{i=1}^{n}i^2\mathbb{P}(D_1=i)=\sum_{i=1}^n i^2 \frac{\binom{n-i}{t-1}}{\binom{n}{t}}=\frac{1}{\binom{n}{t}}\sum_{i=1}^n i^2\binom{n-i}{t-1}.\] We now write $i^2$ in terms of a suitable ``basis'' to replace the above sum with some simpler binomial coefficient sums. Namely, we use the ``basis'' of parameters that are not summed over, as well as terms of the form $(n-i+1)(n-i+2)\cdots (n-i+\ell)$, motivated by the fact that products of such terms and binomial coefficients are nice. We write $i^2=(n-i+1)(n-i+2)-(2n+3)(n-i+1)+(n+1)^2$. Plugging this into the sum, we get \[\mathbb{E}[D_1^2]=\frac{1}{\binom{n}{t}}\left(t(t+1)\sum_{i=1}^n \binom{n-i+2}{t+1}-(2n+3)t\sum_{i=1}^n\binom{n-i+1}{t}+(n+1)^2\sum_{i=1}^n\binom{n-i}{t-1}\right).\] We now use the hockey-stick identity $\sum_{i=r}^\ell \binom{i}{r}=\binom{\ell+1}{r+1}$ to find each sum. \[\mathbb{E}[D_1^2]=\frac{1}{\binom{n}{t}}\left(t(t+1)\binom{n+2}{t+2}-(2n+3)t\binom{n+1}{t+1}+(n+1)^2\binom{n}{t}\right).\] Expanding the binomial coefficients and canceling terms, \[\mathbb{E}[D_1^2]=(n+2)(n+1)\frac{t}{t+2}-(2n+3)(n+1)\frac{t}{t+1}+(n+1)^2.\] With some algebra, we get \[\mathbb{E}[D_1^2]=\frac{(n+1)(2n-t+2)}{(t+1)(t+2)}.\]
From our expressions for $\mathbb{E}[D_1]^2$ and $\mathbb{E}[D_1^2]$, some more algebra gives \[\mathrm{Var}(D_1)=\frac{t(n+1)(n-t)}{(t+1)^2(t+2)}\leq \frac{n^2}{t^2}.\]
We will next show that $D_1,D_2$ are negatively correlated, i.e. that $\mathrm{Cov}(D_1,D_2)\leq 0$. One could do this using the method for finding binomial coefficient sums outlined above, but we instead opt to give two other proofs.
The first proof of $\mathrm{Cov}(D_1,D_2)\leq 0$ uses Lemma~\ref{lem:cheb}. As $\mathrm{Cov}(D_1,D_2)=\mathbb{E}[D_1 D_2]-\mathbb{E}[D_1]\mathbb{E}[D_2]$, it is equivalent to show that $\mathbb{E}[D_1 D_2]\leq \mathbb{E}[D_1]\mathbb{E}[D_2]$. We have
\[\mathbb{E}[D_1 D_2]=\sum_{i=1}^n i\cdot \mathbb{P}(D_1=i)\cdot \mathbb{E}[D_2|D_1=i]\] and \[\mathbb{E}[D_1]\mathbb{E}[D_2]=\sum_{i=1}^n i \cdot \mathbb{P}[D_1=i]\cdot \mathbb{E}[D_2].\]
We start by noting that for $i=1,\ldots,n$, the sequence $a_i:=i$ is weakly increasing, the sequence $b_i:=\mathbb{P}(D_1=i)=\binom{n-i}{t-1}$ is weakly decreasing, and the sequence $c_i=\mathbb{E}[D_2|D_1=i]=\frac{n-i+1}{t}$ is also weakly decreasing.
Next, note that
\[\sum_i \mathbb{P}(D_1=i)\cdot\mathbb{E}[D_2|D_1=i]=\mathbb{E}[\mathbb{E}[D_2|D_1]=\mathbb{E}[D_2]=\sum_{i}\mathbb{P}[D_1=i]\cdot\mathbb{E}[D_2]\]
so
\[\mathbb{E}[D_2]=\frac{\sum_i \mathbb{P}(D_1=i)\cdot\mathbb{E}[D_2|D_1=i]}{\sum_{i}\mathbb{P}(D_1=i)}.\] Translating this into $a_i,b_i,c_i$, we get \[\mathbb{E}[D_2]=\frac{\sum_i b_i c_i}{\sum_i b_i}.\] We can now express both $\mathbb{E}[D_1 D_2]$ and $\mathbb{E}[D_1]\mathbb{E}[D_2]$ in terms of $a_i,b_i,c_i$. Namely, the inequality we wish to prove is \[\sum_i a_i b_i c_i\leq \sum_i a_i b_i \frac{\sum_i b_i c_i}{\sum_i b_i}.\] But this inequality is precisely given by Lemma~\ref{lem:cheb}.
The second proof of $\mathrm{Cov}(D_1,D_2)\leq 0$ uses the following trick. Note that $D_1+\cdots+D_{t+1}=n+1$ is a constant, so \[0=\mathrm{Var}(D_1+\ldots+D_{t+1})=(t+1)\mathrm{Var}(D_1)+t(t+1)\mathrm{Cov}(D_1,D_2),\] from which \[\mathrm{Cov}(D_1,D_2)=\frac{-\mathrm{Var}(D_1)}{t}\leq 0.\]
Having shown that $\mathrm{Cov}(D_1,D_2)\leq 0$, we now finally come back to \[\mathrm{Var}(Y_s)=s\mathrm{Var}(D_1)+s(s-1)\mathrm{Cov}(D_1,D_2)\leq s\mathrm{Var}(D_1)\leq t\mathrm{Var}(D_1)\leq t\frac{n^2}{t^2}=\frac{n^2}{t}.\] This is what we wanted to show. \end{proof}
As a side remark, we note that using the second approach, one can also write down a precise equation for the covariance of $D_1$ and $D_2$, and hence the variance of $Y_s$: \[\mathrm{Cov}(D_1,D_2)=\frac{-\mathrm{Var}(D_1)}{t}=-\frac{(n+1)(n-t)}{(t+1)^2(t+2)},\] and hence
\begin{align*}
\mathrm{Var}(Y_s)&=s\mathrm{Var}(D_1)+s(s-1)\mathrm{Cov}(D_1,D_2)=s\left(1-\frac{s-1}{t}\right)\mathrm{Var}(D_1)\\
&=\frac{s(t-s+1)(n+1)(n-t)}{(t+1)^2(t+2)}. \end{align*}
Note that $Y_s$ is minimal at $s=\lfloor (t+1)/2 \rfloor$ and $s=\lceil (t+1)/2\rceil$, which is what one might intuitively expect before making any calculations.
The third part of Proposition~\ref{prop:conc} follows from the previous two parts using a more famous Chebyshev's inequality, which says that a variance bound implies concentration.
\begin{theorem}[Chebyshev's inequality] Let $X$ be a bounded real-valued random variable. Then for any real number $q>0$,
\[\mathbb{P}\left(|X-\mathbb{E}[X]|\geq q\sqrt{\mathrm{Var}(X)}\right)\leq q^{-2}.\] \end{theorem}
\begin{proof}[Proof of Proposition~\ref{prop:conc} (3)] This is just Chebyshev's inequality with $q=t^{1/6}$, followed by the fact that $\sqrt{\mathrm{Var}(X)}\leq \frac{n}{\sqrt{t}}$ by part (2). \end{proof}
\subsection{The equidistribution theorem} In this subsection, we prove the following equidistribution theorem.
\begin{theorem}\label{thm:equistrong} Fix $k\geq 3$ and $a\in \mathbb{Z}^+$. There is a real constant $C_{k,a}>0$ such that for $n\in \mathbb{Z}^+$ and $I=\{i_1,i_2,\ldots, i_a\}\subseteq [n]$ with $i_1\geq \sqrt{n}$, $i_{j+1}-i_j\geq \sqrt{n}$ for all $j$ with $1\leq j\leq a-1$, and $n-i_a\geq \sqrt{n}$, we have \[d_k(I,n)=C_{k,a} f_k(n)\left(1+O_{k,a}\left(n^{-1/6}\right)\right).\] \end{theorem}
Calling this an equidistribution theorem is motivated by the fact that the constant $C_{k,a}$ only depends on $k$ and $a$ (and not on $I$, as long as $|I|=a$). In particular, we have the following immediate corollary. \begin{theorem}\label{thm:equi}
Fix $k\geq 3$ and $a\in \mathbb{Z}^+$. Let $n\in \mathbb{Z}^+$, $I_1,I_2\subseteq [n]$ with $|I_1|=|I_2|=a$, and no two elements of $I_1$ being closer to each other or $1$ or $n$ than $\sqrt{n}$, and similarly for $I_2$. Then \[\frac{d_k(I_1,n)}{d_k(I_2,n)}=1+O_{k,a}\left(n^{-1/6}\right).\] \end{theorem}
The proof of Theorem~\ref{thm:equistrong} uses a similar random permutation framework as Section~\ref{sec:dasy}. \begin{proof}[Proof of Theorem~\ref{thm:equistrong}]
Consider some $w\in \mathcal{D}_k(I,n)$. We define $v_1$ to be the restriction of $w$ to the first $t_1:=i_1-1$ indices. Then $v_1\in \mathcal{D}_k(\emptyset,t_1)$. The next $k$ indices will be a $k$-descent. We define $v_2$ to be the restriction of $w$ to the indices after that and before the next $k$-descent at $i_2$, i.e. the $v_2$ is the restriction of $w$ to the $t_2:=i_2-i_1-k$ indices $i_1+k,i_1+k+1,\ldots,i_2-1$. In general, for any $j$ with $2\leq j\leq a$, we define $v_j$ to be the restriction of $w$ to the indices between the $(j-1)$th and $j$th $k$-descent -- these are the $t_j:=i_j-i_{j-1}-k$ indices $i_{j-1}+k,i_{j-1}+k+1,\ldots, i_j-1$. We let $v_{a+1}$ be the restriction of $w$ to the indices after the $a$th (and final) $k$-descent -- these are the $t_{a+1}:=n-i_a-k+1$ indices $i_a+k,i_a+k+1,\ldots, n$. Note that for all $i\in [a+1]$, we have $v_i\in \mathcal{D}_k(\emptyset,t_i)$.
We now switch gears to constructing a random permutation $w\in S_n$. We determine a permutation $w$ by randomly choosing various relative orderings and values. Specifically, the random process is the following. \begin{enumerate}[label=\arabic*.]
\item Deterministically fix the relative orderings of the prescribed $k$-descents. That is, for any $j\in [a]$, we fix the relative ordering of the elements $i_j,i_j+1,\ldots,i_j+k-1$ to be $k,k-1,\ldots,1$.
\item For $t_1,t_2,\ldots,t_{a+1}$, independently pick a uniformly random $v_i\in \mathcal{D}(\emptyset,t_i)$. These will be the relative orderings of each of the $a+1$ blocks formed of indices not involved in a $k$-descent. That is, $v_1$ will be the restriction of $w$ to the indices $1,\ldots, i_1-1$, $v_2$ will be the restriction of $w$ to the indices $i_1+k, i_1+k+1,\ldots,i_2-1$, and so on, until $v_{a+1}$ being the restriction of $w$ to the indices $i_a+k, i_{a}+k+1,\ldots, n$.
\item Pick a uniformly random partition of $[n-a k]$ into parts of sizes $t_1,t_2,\ldots, t_{a+1}$. These will be the sets of values of each of our $a+1$ blocks in the relative ordering of the union of their indices. (At this point, we have determined the restriction of $w$ to the union of the $a+1$ $k$-descent-avoiding blocks.
\item (1) Pick a $k$-element subset of $[n-(a-1)k]$. These will be the values of the first $k$-descent in the relative ordering on the union of the $[n-a k]$ indices from the previous part and the $k$ indices $i_1,i_1+1,\ldots,i_1+k-1$. Then (2) pick a $k$-element subset of $[n-(a-2)k]$ to be the values of the second prescribed $k$-descent in the relative ordering where the indices $i_2,i_2+1,\ldots,i_2+k-1$ are unioned in as well. Continue so up until and including ($a$): pick a $k$-element subset of $[n]$ to be the values of the last prescribed $k$-descent at $i_a, i_a+1,\ldots i_a+k-1$ in the relative ordering of all $[n]$ indices. Having finished this, we have determined the permutation $w$. \end{enumerate} Figure~\ref{fig:blocks} shows the $k$-descents, $k$-descent-avoiding blocks, and relative orderings in an example $w$. \begin{figure}
\caption{$k=4$, $n=47$, and $w\in S_{47}$ is a permutation with $k$-descents starting at $i_1=9$, $i_2=27$, $i_3=39$. The number of $k$-descents is $a=3$, and the $3+1=4$ blocks avoiding $k$-descents are shown in red.}
\label{fig:blocks}
\end{figure}
By what we observed in the first paragraph of the proof, each permutation $w\in \mathcal{D}_k(I,n)$ can be generated by this process (in a unique way). However, not every permutation generated by this is in $\mathcal{D}_k(I,n)$. In fact, a $w$ generated like this will be in $\mathcal{D}_k(I,n)$ if and only if each of the pre-determined $k$-descents is preceded and followed by an ascent. That is, for such $w$, \[w\in \mathcal{D}_k(I,n)\iff \forall j\in [a], w(i_j-1)<w(i_j),w(i_j+k-1)<w(i_j+k).\] Let us call this event $\mathcal{E}_{I,n}$. Then, by counting the total number of permutations that can be constructed with the described procedure, we get \[d_k(I,n)=\left(\prod_{j=1}^{a+1}f_k(t_j)\right)\frac{n!}{t_1!t_2!\cdots t_{\ell+1}!\left(k!\right)^a}\mathbb{P}(\mathcal{E}_{I,n}).\]
We claim that it now suffices to show that \[\mathbb{P}(\mathcal{E}_{I,n})=C_{k,a}\left(1+O\left(n^{-1/6}\right)\right),\] where $C_{k,a}$ is a constant only depending on $a$ and $k$ (and not on $I$). Indeed, if this is given, then using this and Theorem~\ref{thm:nasy} on the previous expression for $d_k(I,n)$ we get:
\begin{align*}
\frac{d_k(I,n)}{f_k(n)}&=\frac{1}{f_k(n)}\left(\prod_{j=1}^{a+1}f_k(t_j)\right)\frac{n!}{t_1!t_2!\cdots t_{a+1}!\left(k!\right)^a}\mathbb{P}(\mathcal{E}_{I,n})\\
&=\frac{c_k^a}{k!r_k^{ak}}C_{k,a}\left(1+O\left(n^{-1/6}\right)\right) \end{align*}
We proceed to show that $\mathbb{P}(\mathcal{E}_{I,n})=C_{k,a}\left(1+O\left(n^{-1/6}\right)\right)$. Let us consider the state after completing step 2 of the above procedure. We claim that the joint distribution of the first and last elements in each relative ordering is asymptotically known. Namely, Theorem~\ref{thm:joint} together with independence of different blocks gives that for any sequence of integers \[m_{1,1},m_{2,1},m_{1,2},m_{2,2},m_{1,3},m_{2,3},\ldots, m_{1,a+1},m_{2,a+1}\] such that for all $j\in [a+1]$, \[m_{1,j}\neq m_{2,j} \hspace{3mm}\text{and}\hspace{3mm}1\leq m_{1,j},m_{2,j}\leq t_j,\] we have \[\mathbb{P}\left(v_1(1)=m_{1,1}, v_1(t_1)=m_{2,1},\ldots, v_{r+1}(1)=m_{1,a+1},v_{a+1}(t_{a+1})=m_{2,a+1}\right)=\] \[=\frac{\varphi_k\left(\frac{m_{1,1}}{t_1}\right)\varphi_k\left(1-\frac{m_{2,1}}{t_1}\right)\cdots\varphi_k\left(\frac{m_{1,a+1}}{t_{a+1}}\right)\varphi_k\left(1-\frac{m_{2,a+1}}{t_{a+1}}\right)}{t_1^2\cdots t_{a+1}^2}\left(1+O\left(n^{-0.49}\right)\right).\]
We now fix $m_{1,j},m_{2,j}$ for all $j$ and consider step 3. We call the values assigned to the beginning and end of each block in step 3 respectively $\ell_{1,1},\ell_{2,1}, \ldots, \ell_{1,a+1},\ell_{2,a+1}$. By our concentration result Proposition~\ref{prop:conc} (3), the probability that $\frac{\ell_{1,1}}{n}$ is within $n^{-1/6}$ of $\frac{m_{1,1}}{t_1}$ is at least $1-n^{-1/6}$. By union-bounding, the probability that the analogous statement holds for all $\ell_{1,j}$ and $\ell_{2,j}$ simultaneously is at least $1-O_{k,a}\left(n^{-1/6}\right)$. The other case has probability at most $O\left(n^{-1/6}\right)$, which will be included in the error term later.
We now claim that there is a continuous function $\theta_k(x,y)\colon [0,1]^2\to [0,1]$, continuously differentiable on $[0,1]^2$, such that the following holds. We fix any sequence $\ell_{1,1},\ell_{2,1}, \ldots, \ell_{1,a+1},\ell_{2,a+1}$ to be the values of first and last elements of blocks assigned in step 3. Then the probability that we get the desired ordering in step 4.(1), i.e. there is an ascent before and after the $k$-descent at $i_1$, is $\theta_k\left(\frac{\ell_{2,1}}{n},\frac{\ell_{1,2}}{n}\right)+O\left(n^{-0.5}\right)$.
The proof of this claim boils down to estimating binomial coefficients, and is similar to the proof of Lemma~\ref{lem:os}. Note that we are choosing $k$ elements from $[n-(a-1)k]$, for which there are a total of $\binom{n-(a-1)k}{k}$ options. We wish to count the number of options for which the largest of these $k$ elements is greater than the $\ell_{2,1}$th of the other $n-ak$ elements, which happens iff the largest is at least $\ell_{2,1}+k$; and simultaneously the smallest of these $k$ elements is at most the value of the $\ell_{1,2}$th of the other $n-ak$ elements, which happens iff the smallest is less than $\ell_{1,2}$. Motivated by this, let us now count the number of ways to pick a $k$-element subset of $[n]$ such that the largest element is greater than $\ell_2$ and the smallest element is at most $\ell_1$. The total number of choices of $k$ elements out of $[n]$ is $\binom{n}{k}$, out of which $\binom{\ell_2}{k}$ have largest element at most $\ell_2$, and $\binom{n-\ell_1}{k}$ have smallest element greater than $\ell_1$, with either $\binom{\ell_2-\ell_1}{k}$ or $0$ choices having both, depending on whether $\ell_2>\ell_1$. Hence, the number of ways to pick a $k$-element subset for which the largest element is greater than $\ell_2$ and the smallest element is at most $\ell_1$ is $\binom{n}{k}-\binom{\ell_2}{k}-\binom{\ell_1}{k}+\mathbbm{1}_{\ell_2>\ell_1}\binom{\ell_2-\ell_1}{k}$. So the probability of such a choice is \[1-\frac{\binom{\ell_2}{k}}{\binom{n}{k}}-\frac{\binom{\ell_1}{k}}{\binom{n}{k}}+\mathbbm{1}_{\ell_2>\ell_1}\frac{\binom{\ell_2-\ell_1}{k}}{\binom{n}{k}}\] \[=1-\left(\frac{\ell_2}{n}\right)^k-\left(\frac{\ell_1}{n}\right)^k+\mathbbm{1}_{\ell_2>\ell_1}\left(\frac{\ell_2-\ell_1}{n}\right)^k+O\left(n^{-0.5}\right),\] where the binomial coefficients are estimated like in the proof of Lemma~\ref{lem:os} by splitting into cases according to whether we are choosing at most $\sqrt{n}$ or more than $\sqrt{n}$ elements. Now we note that the last expression is $\theta_k\left(\frac{\ell_2}{n}, \frac{\ell_1}{n}\right)+O\left(n^{-0.5}\right)$, where \[\theta_k(x,y)\colon [0,1]^2\to [0,1], \hspace{5mm} (x,y)\mapsto 1-x^k-y^k+\mathbbm{1}_{x>y}(x-y)^k.\]
Note that $\theta_k$ is continuously differentiable on $[0,1]^2$, so there is a bound on its derivative (over all points in $[0,1]^2$ and all directions). When calculating this probability, the total number of elements was $n$ instead of $n-ak$ as in the initial setup, $\ell_{2,1}+k$ was replaced with $\ell_2+1$, and $\ell_{1,2}$ was replaced with $\ell_1-1$. However, all of these are $O_{k,a}\left(\frac{1}{n}\right)$ changes to an input of $\theta_k$, so since $\theta_k$ has bounded derivative, these contribute a $O\left(\frac{1}{n}\right)$ change to the value of $\theta_k$. Hence, the probability that we get the desired ordering in step 4.(1), i.e., there is an ascent before and after the $k$-descent at $i_1$, is $\theta_k\left(\frac{\ell_{2,1}}{n},\frac{\ell_{1,2}}{n}\right)+O\left(n^{-0.5}\right)$. The existence of such $\theta_k$ is what we wanted to show. We now further claim that conditioned on any choice in 4.(1), the probability that we get the desired ascents in step 4.(2), i.e., that there is an ascent before and after the $k$-descent at $i_2$, is also $\theta_k\left(\frac{\ell_{2,2}}{n},\frac{\ell_{1,3}}{n}\right)+O_{k,a}\left(n^{-\beta}\right)$, and so on (conditioning on any previous choices) until 4.($a$). The proof of this further claim is almost exactly the same, with the only additional observations being that the total number of elements is at most $ak$ away from $n$ at any step, and that an index which has value $\ell_{u,j}$ after step 3. of the process will have value at least $\ell_{u,j}$ and at most $\ell_{u,j}+ak$ when we get to deciding the values of the $k$-descent adjacent to it in step 4. But there is a uniform $O_{k,a}\left(1/n\right)$ bounding these changes to inputs, so again since $\theta_k$ has bounded derivative, (conditional on any sequence of choices in steps 4.(1), $\ldots$, 4.(j-1),) the probability of getting the desired orderings in step 4.(j) is $\theta_k\left(\frac{\ell_{2,j}}{n},\frac{\ell_{1,j+1}}{n}\right)+O_{k,a}\left(n^{-0.5}\right)$. The probability that we get the desired ordering in all steps $4.(1)$ up to $4.(a)$ is then \[\theta_k\left(\frac{\ell_{2,1}}{n},\frac{\ell_{1,2}}{n}\right)\cdot\theta_k\left(\frac{\ell_{2,2}}{n},\frac{\ell_{1,3}}{n}\right)\cdot \ldots \cdot \theta_k\left(\frac{\ell_{2,a}}{n},\frac{\ell_{1,a+1}}{n}\right)+O_{k,a}\left(n^{-0.5}\right).\]
As noted before, there is a probability of $1-O_{k,a}\left(n^{-1/6}\right)$ that in step 3, all $\frac{\ell_{u,j}}{n}$ are at most $n^{-1/6}$ away from $\frac{m_{u,j}}{t_j}$. Hence, with probability $1-O_{k,a}$, after choosing $m_{u,j}$, the probability that we get the desired ordering in all steps 4.(1) up to 4.(a) is \[\theta_k\left(\frac{m_{2,1}}{t_1},\frac{m_{1,2}}{t_2}\right)\cdot\theta_k\left(\frac{m_{2,2}}{t_2},\frac{m_{1,3}}{t_3}\right)\cdot \ldots \cdot \theta_k\left(\frac{m_{2,a}}{t_a},\frac{m_{1,a+1}}{t_{a+1}}\right)+O_{k,a}\left(n^{-1/6}\right),\]
where we have again used the fact that $\theta_k$ has bounded derivative (together with the fact that any input is changed by at most $n^{-1/6}$. To find the overall probability, it remains to sum over sequences \[m_{1,1},m_{2,1},m_{1,2},m_{2,2},m_{1,3},m_{2,3},\ldots, m_{1,a+1},m_{2,a+1}\] such that for all $j\in [a+1]$, \[m_{1,j}\neq m_{2,j} \hspace{3mm}\text{and}\hspace{3mm}1\leq m_{1,j},m_{2,j}\leq t_j.\]
Namely, $\mathbb{P}\left(\mathcal{E}_{I,n}\right)$ is the sum over all such sequences of the probability that we get this sequence in step 2 times the probability of getting the right ordering in step $4$ conditional on having this sequence in step 2. We have already found both of these probabilities. Writing it out, we get \[\mathbb{P}\left(\mathcal{E}_{I,n}\right)=\]\[\sum_{\substack{(m_{u,j})_{(u,j)\in [2]\times [a]}\\ \forall j, m_{1,j}\neq m_{2,j}\\ \forall j, 1\leq m_{1,j},m_{2,j}\leq t_j}}\frac{\varphi_k\left(\frac{m_{1,1}}{t_1}\right)\varphi_k\left(1-\frac{m_{2,1}}{t_1}\right)\ldots\varphi_k\left(\frac{m_{1,a+1}}{t_{a+1}}\right)\varphi_k\left(1-\frac{m_{2,a+1}}{t_{a+1}}\right)}{t_1^2\cdots t_{a+1}^2}\left(1+O\left(n^{-0.49}\right)\right)\] \[\times \left(O_{k,a}\left(n^{-1/6}\right)+\left(1-O_{k,a}\left(n^{-1/6}\right)\right)\left(\theta_k\left(\frac{m_{2,1}}{t_1},\frac{m_{1,2}}{t_2}\right)\cdot \ldots \cdot \theta_k\left(\frac{m_{2,a}}{t_a},\frac{m_{1,a+1}}{t_{a+1}}\right)+O_{k,a}\left(n^{-1/6}\right)\right)\right),\]
The first $O_{k,a}\left(n^{-1/6}\right)$ term on the second line of this equation is the contribution of the choices in step $3$ where the gap between some $\frac{\ell_{u,j}}{n}$ and $\frac{m_{u,j}}{t_j}$ is more than $n^{-1/6}$. The $\left(1-O_{k,a}\left(n^{-1/6}\right)\right)$ term is the probability that all gaps turn out to be $\left\lvert\frac{\ell_{u,j}}{n}-\frac{m_{u,j}}{t_j}\right\rvert\leq n^{-1/6}$ in step $3$.
We can now collect all error terms into one additive error term of $O\left(n^{-1/6}\right)$ at the front, getting \[\mathbb{P}\left(\mathcal{E}_{I,n}\right)=O_{k,a}\left(n^{-1/6}\right)\]\[+\sum_{\substack{(m_{u,j})_{(u,j)\in [2]\times [a]}\\ \forall j, m_{1,j}\neq m_{2,j}\\ \forall j, 1\leq m_{1,j},m_{2,j}\leq t_j}}\frac{\varphi_k\left(\frac{m_{1,1}}{t_1}\right)\varphi_k\left(1-\frac{m_{2,1}}{t_1}\right)\cdots\varphi_k\left(\frac{m_{1,a+1}}{t_{a+1}}\right)\varphi_k\left(1-\frac{m_{2,a+1}}{t_{a+1}}\right)}{t_1^2\cdots t_{a+1}^2}\] \[\times\theta_k\left(\frac{m_{2,1}}{t_1},\frac{m_{1,2}}{t_2}\right)\cdot \ldots \cdot \theta_k\left(\frac{m_{2,a}}{t_a},\frac{m_{1,a+1}}{t_{a+1}}\right).\]
Now consider the following integral: \[\int_{[0,1]^{2(a+1)}}\varphi_k(x_1)\varphi_k(1-y_1)\theta_k(y_1,x_2)\varphi_k(x_2)\varphi_k(1-y_2)\theta_k(y_2,x_3)\cdots\varphi_k(1-y_{a+1}).\] Note that the sum appearing in the expression for $\mathbb{P}\left(\mathcal{E}_{I,n}\right)$ is a Riemann sum for this integral with cells of shape $\frac{1}{t_1}\times \frac{1}{t_1}\times \frac{1}{t_2}\times \frac{1}{t_2}\times\cdots \times \frac{1}{t_{a+1}}\times\frac{1}{t_{a+1}}$ minus the terms coming from cells corresponding to $m_{1,j}=m_{2,j}$. The domain $[0,1]^{2(a+1)}$ is compact so the integrand is bounded. Since all $t_j\geq \sqrt{n}$, the missing cells have total area at most $a\frac{1}{\sqrt{n}}=O_{k,a}\left(n^{-0.5}\right)$. The last two sentences together imply that the contribution of missing terms is at most $O_{k,a}\left(n^{-0.5}\right)$. As for the difference between the Riemann sum and the integral, since the derivative of the integrand (at any point and in any direction) is bounded, the error from each cell is at most its volume times $O_{k,a}\left(n^{-0.5}\right)$, for a total error of $O_{k,a}\left(n^{-0.5}\right)$. Hence, the difference between the sum in the expression for $\mathbb{P}\left(\mathcal{E}_{I,n}\right)$ and the integral is at most $O_{k,a}\left(n^{-0.5}\right)+O_{k,a}\left(n^{-0.5}\right)=O_{k,a}\left(n^{-0.5}\right)$. Plugging this integral into the expression for $\mathbb{P}\left(\mathcal{E}_{I,n}\right)$, we arrive at
\[\mathbb{P}(\mathcal{E}_{I,n})=O_{k,a}\left(n^{-1/6}\right)+\int_{[0,1]^{2(a+1)}}\varphi_k(x_1)\varphi_k(1-y_1)\theta_k(y_1,x_2)\varphi_k(x_2)\varphi_k(1-y_2)\theta_k(y_2,x_3)\cdots\varphi_k(1-y_{a+1}).\]
Crucially, this last integral only depends on $k$ and $a$ (and not $I$ or $n$). We can make the error term multiplicative by noting that this constant $C_{k,a}$ is greater than $0$. This was what remained to be proven. \end{proof}
\section{Open problems} One open problem is whether Conjecture~\ref{conj:dudu} can be proven for any $i$ (but asymptotically in $n$) with our approach. This would probably require a better understanding of the constants $c_{I,k}$. Another direction would be to prove versions of Theorem~\ref{thm:fmnasy}, Theorem~\ref{thm:dasy}, Theorem~\ref{thm:joint}, Theorem~\ref{thm:equistrong} with smaller error bounds. For Theorem~\ref{thm:equistrong}, one can also investigate the range of gap sizes between descents (in place of $\sqrt{n}$) for which an analog of the result holds, as well as try to find a good dependence of the error term on the gap size. Two more open problem are whether the discussion in Subsection~\ref{subsec:heuristic} can be made rigorous, and whether there is a way to simplify or explicitly understand the expression for $T_k(x,y)$ in Proposition~\ref{prop:genf}.
It also remains open whether this method can be generalized to other consecutive patterns in place of $k, k-1, \ldots, 1$. We expect that with an approach like the one in this paper, the main difficulty is in obtaining a nice expression for the analog of $\varphi_k(x)$, as we doubt that something as nice as Proposition~\ref{prop:fmn} will be available. However, we think that analogs of Theorem~\ref{thm:dasy} and Theorem~\ref{thm:equi} should still hold for other consecutive patterns. Before stating these as conjectures, we define some notation for other consecutive patterns. For $\pi\in S_k$ and $w\in S_n$, we let $P_\pi(w)$ be the set of starting indices of consecutive patterns $\pi$ in $w$.
\begin{definition} For $n\in \mathbb{Z}^+$ and $I\subseteq \mathbb{Z}^+$ a finite set, we let
\[\mathcal{P}_{\pi}(I,n)=\{w\in S_n\colon P_\pi(w)=I\}\hspace{5mm}\text{and}\hspace{5mm} p_\pi(I,n)=|\mathcal{P}_\pi(I,n)|.\] We call $\mathcal{P}_\pi(I,n)$ the consecutive-$\pi$-function. Furthermore, we let \[p_\pi(n):=p_{\pi}(\emptyset,n).\] \end{definition}
To connect this up with familiar notation, note that $D_k(w)=P_{k,\ldots,1}(w)$, $\mathcal{D}_k(I,n)=\mathcal{P}_{k,\ldots,1}(I,n)$, and $f_k(n)=p_{k,\ldots,1}(n)$. We conjecture the following analogue of Theorem~\ref{thm:dasy}.
\begin{conjecture} For any $k\geq 3$, any $\pi\in S_k$, and any finite $I\subseteq \mathbb{Z}^+$, there are constants $c_{\pi,I},\alpha\in \mathbb{R}$ with $\alpha>0$ such that \[p_{\pi}(I,n)=c_{\pi,I}p_\pi(n)\left(1+O\left(n^{-\alpha}\right)\right).\] \end{conjecture}
We conjecture the following analogue of Theorem~\ref{thm:equistrong}.
\begin{conjecture} Fix $k\geq 3$, $\pi\in S_k$, and $a\in \mathbb{Z}^+$. There is a real constant $C_{\pi,a}>0$ such that for $n\in \mathbb{Z}^+$ and $I=\{i_1,i_2,\ldots, i_a\}\subseteq [n]$ with $i_1\geq \sqrt{n}$, $i_{j+1}-i_j\geq \sqrt{n}$ for all $j$ with $1\leq j\leq a-1$, and $n-i_a\geq \sqrt{n}$, we have \[p_\pi(I,n)=C_{k,a} p_\pi(n)\left(1+O_{\pi,a}\left(n^{-\alpha}\right)\right).\] \end{conjecture}
The following random process also seems interesting. We construct a random permutation that avoids $k$-descents by inserting last elements one at a time (with value between two previous values, chosen uniformly among options that avoid $k$-descents). In this language, Theorem~\ref{thm:fmnasy} says that the distribution of the last element is given by $\varphi_k$, and Theorem~\ref{thm:joint} says that the correlation between the value of the first and last element is asymptotically small. One can ask about other properties of this random process, such as whether any two points are asymptotically uncorrelated, and also investigate the formation of other patterns.
\section{Acknowledgments} This research was carried out under the MIT Math Department 2020 UROP+ summer research program. My mentor was Pakawut Jiradilok, whom I would like to thank for excellent mentorship and very many great ideas, as well as for his patience and understanding. I would also like to thank Wijit Yangjit for a helpful conversation on asymptotics of power series coefficients; Slava Gerovitch, David Jerison, and Ankur Moitra for organizing the UROP+ program; and the Meryl and Stewart Robertson UROP Fund for funding this project.
\end{document}
|
arXiv
|
{
"id": "2011.14360.tex",
"language_detection_score": 0.7222592234611511,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\thispagestyle{empty} \begin{center} \textbf{\Large Identification of the diffusion parameter in\\
nonlocal steady diffusion problems}\footnote{This work was supported in part by the US National Science Foundation grant DMS-1315259.}
M. D'Elia\footnote{Currently at Sandia National Laboratories, NM. Email: {\tt [email protected]}. Sandia National Laboratories is a multi program laboratory managed and operated by Sandia Corporation, a wholly owned subsidiary of Lockheed Martin Corporation, for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-AC04-94AL85000.} and M. Gunzburger
Florida State University, 400 Dirac Science Library, Tallahassee, FL, 32306, USA \end{center}
\begin{abstract} The problem of identifying the diffusion parameter appearing in a nonlocal steady diffusion equation is considered. The identification problem is formulated as an optimal control problem having a matching functional as the objective of the control and the parameter function as the control variable. The analysis makes use of a nonlocal vector calculus that allows one to define a variational formulation of the nonlocal problem. In a manner analogous to the local partial differential equations counterpart, we demonstrate, for certain kernel functions, the existence of at least one optimal solution in the space of admissible parameters. We introduce a Galerkin finite element discretization of the optimal control problem and derive a priori error estimates for the approximate state and control variables. Using one-dimensional numerical experiments, we illustrate the theoretical results and show that by using nonlocal models it is possible to estimate non-smooth and discontinuous diffusion parameters. \end{abstract}
\noindent{\it Keywords}: Nonlocal diffusion, nonlocal operator, fractional operator, vector calculus, control theory, optimization, parameter estimation, finite element methods.
\section{Introduction} Nonlocal models are currently used in a large variety of applications, including continuum mechanics \cite{chgu:11,sill:00}, graph theory \cite{lova:06}, nonlocal wave equations \cite{weab:05}, and jump processes \cite{bbck:09,bakk:10,burc:11}. Our interest is in nonlocal diffusion operators that arise in many fields where the phenomena cannot be modeled accurately by the standard classical diffusion equation. Among those applications we have image analyses \cite{bucm:10,gilboa:595,gilboa:1005,lzob:10}, machine learning \cite{robd:10}, nonlocal Dirichlet forms \cite{appl:04}, kinetic equations \cite{bass:84,limi:10}, phase transitions \cite{bach:99,fife:03}, and nonlocal heat conduction \cite{bodu:09}.
The principal difference between nonlocal models and the classical (local) partial differential equations (PDEs) is in how two or more domains interact. In the local case interactions only occur due to contact, whereas in the nonlocal case they can occur at distance. Consider an open bounded domain ${\Omega}$ in ${\mathbb{R}^n}$. For $u(\xb)\colon \Omega \to \mathbb{R}$, the action of the nonlocal diffusion operator $\mathcal{L}$ on the function $u(\xb)$ is defined as \begin{equation}\label{nldo}
\mathcal{L} u(\xb) := 2\int_{\mathbb{R}^n} \big(u(\yb)-u(\xb)\big) \, \vartheta(\xb,\yb)\,\gamma (\xb, \yb )\,d\yb \qquad \forall\,\xb \in {\Omega} \subseteq {\mathbb{R}^n}, \end{equation} where the kernel $\gamma (\xb, \yb )\colon\Omega\times\Omega\to\mathbb{R}$ is a non-negative symmetric mapping (i.e., $\gamma (\xb, \yb )=\gamma (\yb, \xb )$), and the diffusion parameter $\vartheta(\xb, \yb )\colon\Omega\times\Omega\to\mathbb{R}$ is a positive function. We are interested in the nonlocal steady-state diffusion equation \begin{equation}\label{nlde} \left\{\begin{array}{ll} -\mathcal{L} u = f &\qquad \mbox{on ${\Omega}$} \\ u = g &\qquad \mbox{on ${\Omega_{\mathcal I}}$}, \end{array}\right. \end{equation} where the equality constraint (the nonlocal counterpart of a Dirichlet boundary condition for PDEs) acts on an interaction volume ${\Omega_{\mathcal I}}$ that is disjoint from ${\Omega}$. Nonlocal diffusion problems such as \eqref{nlde} have been analyzed in the recent works \cite{akme:10,dglz:11,Du10,gule:10}, and techniques for an accurate numerical solution have been developed and applied to diverse applications \cite{chgu:11,Aksoylu,bule:11a,bule:12,Du11,spgl09,zhdu:10}. However, these mathematical models are not exact; parameters such as volume constraint data, diffusivity coefficients, and source terms are often unknown or subject to uncertainty. This fact affects the quality of the computational results and their reliability, making necessary the development of techniques for the identification of model parameters. In the classical local case, i.e., for PDEs, a widely used approach is to formulate the problem as an optimal control problem having the unknown parameters acting as control variables; here, we follow the same approach for nonlocal problems. Control problems for nonlocal diffusion equations have already been addressed in \cite{degu:13} where, for the same nonlocal model \eqref{nlde}, the control variable is the source term $f$; this results in a {\it linear} optimization problem constrained by the nonlocal equation. The case treated in this work is more complex (as its local counterpart is) because of its {\it nonlinearity} and thus requires more sophisticated techniques for analyzing the well-posedness and designing finite dimensional approximations. The estimation problem must rely on some additional a priori information, e.g., a target or reference function for the function of interest $u$. In our context, we formulate the parameter identification as the problem of finding $u$ and $\vartheta$ such that $u$ is as close as possible to a target function $\widehat{u}$ under the constraint that $u$ and $\vartheta$ satisfy the nonlocal problem \eqref{nlde}.
The main contribution of this work is to show that we can mimic the approaches of the classical control theory for PDEs and to show that by using nonlocal models one can obtain accurate estimates of non-smooth and discontinuous diffusion parameters, a case that often arises in practice.
Our analysis is based on the nonlocal vector calculus introduced in \cite{Du10} which is exploited to define a variational formulation of the nonlocal equation and for studying the well-posedness of the control problem. The use of the nonlocal calculus is crucial, as is the classical calculus in the PDE setting, to the analyses. In fact, it helps one avoid more cumbersome direct approaches to deriving the results we present in this work.
Relevant aspects of the nonlocal calculus are reviewed in Section \ref{nlvc}. In Section \ref{sec:control} we formulate the identification problem as a control problem and we demonstrate the existence of at least one optimal solution. In Section \ref{fin_dim_approx} we study finite dimensional approximations of the optimal state and control and, for finite element approximations, we derive a priori error estimates. In Section \ref{num_tests} we present numerical tests conducted on one-dimensional problems and illustrate the theoretical results of Section \ref{fin_dim_approx}. These numerical results serve as a base for two- and three-dimensional simulations and show that by using nonlocal models it is possible to accurately approximate non-smooth parameter functions. In Section \ref{conclusion} we provide a few concluding remarks.
\section{A nonlocal vector calculus and volume-constrained problems}\label{nlvc} For the sake of completeness, in this section we present the basic notions of the nonlocal vector calculus, recall some theoretical results that are useful to us in this work, and introduce nonlocal diffusion problems.
\subsection{Notation} A detailed introduction to the nonlocal vector calculus can be found in \cite{Du10}. Here, we limit the discussion to the tools that we use throughout the paper. For the vector mappings $\nub(\xb,\yb), \alphab(\xb,\yb) \colon {\mathbb{R}^n}\times{\mathbb{R}^n}\to {\mathbb{R}^n}$, with $\alphab$ antisymmetric (i.e., $\alpha (\xb, \yb )=-\alpha (\yb, \xb )$), the action of the nonlocal divergence operator $\mathcal{D}\colon {\mathbb{R}^n} \to \mathbb{R}$ on $\nub$ is defined as \begin{subequations}\label{ndivgrad} \begin{equation}\label{ndiv}
\mathcal{D}\big(\nub\big)(\xb) := \int_{{\mathbb{R}^n}} \big(\nub(\xb,\yb)+\nub(\yb,\xb)\big)\cdot\alphab(\xb,\yb)\,d\yb\qquad
\mbox{for $\xb\in{\mathbb{R}^n}$}. \end{equation}
The action of the adjoint operator ${\mathcal{D}^\ast}\colon {\mathbb{R}^n}\times{\mathbb{R}^n}\to{\mathbb{R}^n}$ on a mapping $u(\xb)\colon {\mathbb{R}^n}$ $\to\mathbb{R}$ is given by \begin{equation}\label{ngra} {\mathcal{D}^\ast}\big(u\big)(\xb,\yb) = -\big(u(\yb)-u(\xb)\big) \alphab(\xb,\yb) \qquad\mbox{for $\xb,\yb\in{\mathbb{R}^n}$}. \end{equation} \end{subequations} Thus, $-{\mathcal{D}^\ast}$ defines a nonlocal gradient operator. We define the nonlocal diffusion operator $\mathcal{L}\colon {\mathbb{R}^n} \to \mathbb{R}$ as the composition of the nonlocal divergence and gradient operators, i.e. $\mathcal{L} u := -\mathcal{D}\big(\vartheta \,{\mathcal{D}^\ast} u)$, where the diffusion parameter $\vartheta(\xb,\yb)$ is a positive symmetric function that maps ${\mathbb{R}^n}\times{\mathbb{R}^n}$ into $\mathbb{R}$. Then, \begin{equation} \hspace{-2cm}\mathcal{L} u (\xb) := -\mathcal{D}\big(\vartheta \,{\mathcal{D}^\ast} u)(\xb)= 2\int_{{\mathbb{R}^n}}\big(u(\yb)-u(\xb)\big)\,\vartheta(\xb,\yb) \,\gamma(\xb,\yb) \,d\yb\quad \hbox{for}\;\;\xb\in{\mathbb{R}^n} \end{equation} which is exactly the operator we introduced in \eqref{nldo}. Thus, the nonlocal calculus allows us to express the nonlocal diffusion operator \eqref{nldo} as a composition of a nonlocal divergence operator and a nonlocal gradient operator. Here the symmetric kernel $\gamma$ is such that $\gamma(\xb,\yb):=\alphab(\xb,\yb)\cdot \alphab(\xb,\yb)$\footnote{In \cite{dglz:11} the diffusivity is defined as a second order symmetric positive definite tensor $\bthe$ and the kernel is defined as $\gamma:=\alphab \cdot \left(\bthe\alphab\right)$. Here for simplifying the analysis of the identification problem we consider a scalar diffusivity and we do not include it in the definition of the kernel.}.
Given an open subset ${\Omega}\subset{\mathbb{R}^n}$, we define the interaction domain corresponding to ${\Omega}$ as $$
{\Omega_{\mathcal I}} := \{ \yb\in{\mathbb{R}^n}\setminus{\Omega} \quad\mbox{such that}\quad \alphab(\xb,\yb)\ne{\bf 0}\quad \mbox{for $\xb\in{\Omega}$}\}. $$ Thus, ${\Omega_{\mathcal I}}$ consists of those points outside of ${\Omega}$ that interact with points in ${\Omega}$.
\subsection{The kernel}\label{sec:kernel} We assume that the domains ${\Omega}$, ${\Omega_{\mathcal I}}$, and ${\omgs\cup\omgc}$ are bounded with piecewise smooth boundary and satisfy the interior cone condition. We also assume that the symmetric kernel satisfies \begin{equation}\label{gamma-conds} \left\{\begin{array}{ll}
\gamma(\xb,\yb) \geq 0 \quad &\forall\, \yb\in B_\varepsilon(\xb)\\[2mm]
\gamma(\xb,\yb) \ge \gamma_0 > 0 \quad &\forall\, \yb\in B_{\varepsilon/2}(\xb)\\[2mm]
\gamma(\xb,\yb) = 0 \quad &\forall\, \yb\in ({\omgs\cup\omgc}) \setminus B_\varepsilon(\xb) \end{array}\right. \end{equation}
for all $\xb\in{\omgs\cup\omgc}$, where $\gamma_0$ and $\varepsilon$ are given positive constants and $B_\varepsilon({\xb}) := \{ \yb \in{\omgs\cup\omgc} \colon |\yb-\xb|\le \varepsilon \}$; thus, nonlocal interactions are limited to a ball of radius $\varepsilon$ which is referred to as the interaction radius. This implies that \begin{equation}\label{omgie}
{\Omega_{\mathcal I}} = \{ \yb\in \mathbb{R}^n\setminus{\Omega} \,\,\,\colon\,\,\, |\yb-\xb|<\varepsilon \mbox{ for $\xb\in{\Omega}$}\}. \end{equation}
In \cite{dglz:11} (and also in \cite{akme:10, akpa:11,amrt:10}) several choices for the kernel $\gamma$ are considered and analyzed. Here, for the sake of brevity, we limit ourselves to two kernel classes. The results presented in this paper can be generalized to the other kernel functions considered in the above cited papers.
\noindent{\bf Case 1}. We further assume that there exist $s\in (0,1)$ and positive constants $\gamma_1$ and $\gamma_2$ such that, for all $\xb\in{\omgs\cup\omgc}$, \begin{equation}\label{case1}
\frac{\gamma_1}{|\yb-\xb|^{n+2s}} \leq \gamma(\xb,\yb) \leq \frac{\gamma_2}{|\yb-\xb|^{n+2s}} \qquad \mbox{for $\yb\in B_\varepsilon({\xb})$}. \end{equation} An example is given by $$
\gamma(\xb,\yb) = \frac{\sigma(\xb,\yb)}{|\yb-\xb|^{n+2s}} $$ with $\sigma(\xb,\yb)$ symmetric and bounded from above and below by positive constants.
\noindent{\bf Case 2}. In addition to \eqref{gamma-conds}, we assume that there exist positive constants $\gamma_3$ and $\gamma_4$ such that, for all $\xb\in{\omgs\cup\omgc}$, \begin{equation}\label{case3}
\frac{\gamma_3}{|\yb-\xb|^{n}} \leq \gamma(\xb,\yb) \leq \frac{\gamma_4}{|\yb-\xb|^{n}} \qquad \mbox{for $\yb\in B_\varepsilon({\xb})$}. \end{equation} An example for this case is given by $$
\gamma(\xb,\yb) = \frac{\xi(\xb,\yb)}{|\yb-\xb|^{n}} $$ with $\xi(\xb,\yb)$ symmetric and bounded from above and below by positive constants.
\subsection{Equivalence of spaces}\label{sec:equivsp} We define the nonlocal energy semi-norm, nonlocal energy space, and nonlocal volume-constrained energy space by \begin{subequations} \begin{equation}
|||v|||^2 := \int_{\omgs\cup\omgc}\int_{{\omgs\cup\omgc}}{\mathcal{D}^\ast}(v)(\xb,\yb )\cdot{\mathcal{D}^\ast}(v)(\xb,\yb )\,d\yb \, d\xb \label{energynorm} \end{equation} \begin{equation}
V({\omgs\cup\omgc}) := \left\{ v \in L^2({\omgs\cup\omgc}) \,\,:\,\, |||v||| < \infty \right\}\qquad\quad\; \label{vspace} \end{equation} \begin{equation} V_c({\omgs\cup\omgc}) := \left\{v\in V({\omgs\cup\omgc}) \,\,:\,\, v=0\;{\rm on}\;{\Omega_{\mathcal I}}\right\}.\qquad\;\;\label{vcspace} \end{equation} \end{subequations} In \cite{dglz:11}, it is shown that, for kernels satisfying \eqref{gamma-conds} and \eqref{case1}, the nonlocal energy space $V({\omgs\cup\omgc})$ is equivalent to the fractional-order Sobolev space $H^s({\omgs\cup\omgc})$\footnote{For $s\in(0,1)$ and for a general domain ${\widetilde\Omega}\in\mathbb{R}^n$, let $$
|v|_{H^s({\widetilde\Omega})}^2 :=
\int_{\widetilde\Omega}\int_{\widetilde\Omega}\frac{\big(v(\yb)-v(\xb)\big)^2}{|\yb-\xb|^{n+2s}}\,d\yb d\xb. $$ Then, the space $H^s({\widetilde\Omega})$ is defined by \cite{Adams} $
H^s({\widetilde\Omega}) := \left\{v\in L^2({\widetilde\Omega}) : \|v\|_{L^2({\widetilde\Omega})} +
|v|_{H^s({\widetilde\Omega})}<\infty\right\}. $
}. This implies that $V_c({\omgs\cup\omgc})$ is a Hilbert space equipped with the norm $|||\cdot|||$. In particular, we have \begin{equation}\label{equivalence}
C_1\|v\|_{H^s({\omgs\cup\omgc})}\leq |||v||| \leq C_2 \|v\|_{H^s({\omgs\cup\omgc})} \;\;\forall\,v\in V_c({\omgs\cup\omgc}) \end{equation}
for some positive constants $C_1$ and $C_2$. As a consequence, any result obtained below involving the energy norm $|||\cdot|||$ can be reinterpreted as a result involving the norm $\|\cdot\|_{H^s({\omgs\cup\omgc})}$. The energy space associated with kernels satisfying \eqref{gamma-conds} and \eqref{case3} is not equivalent to any Sobolev space; however, it is a separable Hilbert space and is a subset of $L^2({\omgs\cup\omgc})$. In both cases, the energy norm satisfies the nonlocal Poincar\'e inequality \begin{equation}
\|v\|_{L^2({\omgs\cup\omgc})}\leq C_p \, |||v||| \quad \forall\,v\in V_c({\omgs\cup\omgc}) \end{equation} where $C_p$ is the Poincar\'e constant.
We denote by $V_c^\prime({\Omega})$ the dual space of $V_c({\omgs\cup\omgc})$ with respect to the standard $L^2({\Omega})$ duality pairing; we define the norm on $V_c^\prime({\Omega})$ as $$
\|f\|_{V_c^\prime({\Omega})}:= \sup_{v\in V_c({\omgs\cup\omgc}),\,\,v\ne0}\,\,\frac {\int_{\Omega} fv\,d\xb }{ |||v|||_{V_c({\omgs\cup\omgc})}}. $$
Note that for Cases 1 and 2 we have that $V_c^\prime({\Omega})\subseteq L^2({\Omega})$ so that $\|f\|_{V_c^\prime({\Omega})}\le \|f\|_{L^2({\Omega})}$. In particular, for Case 1 we have that $V_c^\prime({\Omega})$ is equivalent to $H^{-s}({\Omega})$. We also define the volume ``trace'' space $\widetilde V({\Omega_{\mathcal I}}) := \left\{v|_{\Omega_{\mathcal I}} : v\in V({\omgs\cup\omgc})\right\}$ and an associated norm \begin{equation}\label{tracenorm}
\|g\|_{\widetilde V({\Omega_{\mathcal I}})} := \inf_{v\in V({\omgs\cup\omgc}),\,\,v|_{\Omega_{\mathcal I}}=g} |||v|||. \end{equation}
\subsection{Nonlocal volume constrained problems} We consider the nonlocal volume constrained problem \begin{equation} \label{eq:fwd_diffusion} \left\{ \begin{array}{ll}
\mathcal{D} (\vartheta {\mathcal{D}^\ast} u) = f & \qquad \forall \, \xb\in {\Omega}\\[2mm]
u = g & \qquad \forall \, \xb\in {\Omega_{\mathcal I}} \end{array}\right. \end{equation} where we assume that $u\in V_c({\omgs\cup\omgc})$, $f\in V'_c({\omgs\cup\omgc})$, $g\in \widetilde{V}({\Omega_{\mathcal I}})$, and $\vartheta\in \mathcal{C}$, where\footnote{The reason of this choice will be made clear in the following section.} \begin{equation} \hspace{-2cm}\mathcal{C} :=\{\vartheta\in W^{1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc}):\;\; 0<\vartheta_0\leq\vartheta\leq\vartheta_1<\infty,
\; \|\vartheta\|_{1,\infty}\leq C<\infty\}. \end{equation} Using the nonlocal vector calculus we can define the weak formulation of the problem \eqref{eq:fwd_diffusion} as \begin{equation}\label{weakf} \begin{array}{ll}
&\mbox{\em given $f\in V_c^\prime({\Omega})$, $g\in \widetilde V({\Omega_{\mathcal I}})$, and $\vartheta\in\mathcal{C}$, seek $u\in V({\omgs\cup\omgc})$ }\\[1mm]
&\mbox{\em such that $u=g$ for $\xb\in{\Omega_{\mathcal I}}$ and} \\[2mm]
&\displaystyle\int_{\omgs\cup\omgc}\int_{\omgs\cup\omgc} \vartheta \;{\mathcal{D}^\ast} u\cdot{\mathcal{D}^\ast} v\,d\yb d\xb = \int_{\Omega} fv\,d\xb
\qquad\forall\,v\in V_c({\omgs\cup\omgc}). \end{array} \end{equation} The problem \eqref{weakf} has a unique solution that depends continuously on the data \cite{dglz:11}.
\section{The optimal control problem}\label{sec:control} In this section we define the identification problem as a control problem for the nonlocal diffusion equation \eqref{eq:fwd_diffusion}. In a way similar to the local counterpart we demonstrate the existence of at least one solution\footnote{The non-uniqueness of the solution is not due to the nonlocality, the same result holds for the corresponding local PDE control problem.}.
The {\it state} and {\it control} variables are $u$ and the diffusivity $\vartheta$, respectively, which are related by the {\it state equation} \eqref{eq:fwd_diffusion}; the goal of the control problem is to minimize a cost functional which depends on the state and the control subject to the state equation being satisfied. We define the cost functional as \begin{equation}\label{eq:cost_func} J(u,\vartheta):=\displaystyle\frac{1}{2}\int_{{\Omega}} \big(u-\widehat u \big)^2\,d\xb, \end{equation} where $\widehat{u}\in L^2({\Omega})$ is a given function. Thus, we want to match as well as possible, in an $L^2({\Omega})$ sense, the target function $\widehat{u}$. Formally, we define the control problem as \begin{equation}\label{eq:min} \hspace{-2cm}\begin{minipage}{4.8in} {\em given $g\in \widetilde V({\Omega_{\mathcal I}})$, $f\in V_c'({\omgs\cup\omgc})$, and $\widehat u\in L^2({\Omega})$, seek an optimal control $\vartheta^\ast\in \mathcal{C}$ and an optimal state $u^\ast\in V({\omgs\cup\omgc})$ such that $J(u,\vartheta)$ given by \eqref{eq:cost_func} is minimized, subject to $u$ and $\vartheta$ satisfying \eqref{weakf}.} \end{minipage} \end{equation}
\subsection{Existence of an optimal control} We show that the optimization problem \eqref{eq:min} has at least one solution in the set of admissible parameters; {\color{blue}in this context, the nonlocal Poincar\'e inequality plays a fundamental role.} For simplicity, we consider $g=0$.
{\color{blue}In the proof of the main result we use the following lemma.
\begin{lemma}\label{infty-continuity} There exists a positive constant $C$ that depends on $\vartheta_0$, and $f$, such that, for any solutions $u(\vartheta_i)$ to \begin{equation} \left\{\begin{array}{ll}
\mathcal{D}(\vartheta_i\mathcal{D}^*u) = f & \;\;\forall\; \xb\in{\Omega} \\[1mm]
u=0 & \;\;\forall\; \xb\in{\Omega_{\mathcal I}} \end{array}\right. \end{equation} for $i=a,b$, the following estimate holds: \begin{equation}\label{nl-homo}
|||u(\vartheta_a)-u(\vartheta_b)|||\leq C \|\vartheta_a - \vartheta_b\|_\infty. \end{equation} \end{lemma}
\noindent{\it Proof.} We first note that \begin{displaymath} \mathcal{D}\big(\vartheta_a\mathcal{D}^*(u(\vartheta_a)-u(\vartheta_b))\big) = \mathcal{D}\big((\vartheta_b-\vartheta_a)\mathcal{D}^*u(\vartheta_b)\big); \end{displaymath} multiplying both sides by $(u(\vartheta_a)-u(\vartheta_b))$ and integrating over ${\Omega}$ we have \begin{displaymath}
\displaystyle\int_{\Omega} \mathcal{D}\big(\vartheta_a\mathcal{D}^*(u(\vartheta_a)-u(\vartheta_b))\big) (u(\vartheta_a)-u(\vartheta_b)) \,d\xb = \int_{\Omega} \mathcal{D}\big((\vartheta_b-\vartheta_a)\mathcal{D}^*u(\vartheta_b)\big) (u(\vartheta_a)-u(\vartheta_b)) \,d\xb. \end{displaymath} The nonlocal Green's identity \cite{dglz:11} implies \begin{displaymath}
\displaystyle\int_{\omgs\cup\omgc}\int_{\omgs\cup\omgc} \vartheta_a\big(\mathcal{D}^*(u(\vartheta_a)-u(\vartheta_b))\big)^2 \,d\yb\,d\xb = \int_{\omgs\cup\omgc}\int_{\omgs\cup\omgc} (\vartheta_b-\vartheta_a) \mathcal{D}^*u(\vartheta_b)\cdot \mathcal{D}^*(u(\vartheta_a)-u(\vartheta_b)) \,d\yb\,d\xb. \end{displaymath} Thus, \begin{displaymath}
\vartheta_0 |||u(\vartheta_a)-u(\vartheta_b) |||^2 \leq \|\vartheta_a-\vartheta_b\|_\infty |||u(\vartheta_b) ||| \; |||u(\vartheta_a)-u(\vartheta_b) |||; \end{displaymath}
then, because the solution of \eqref{nl-homo} depends continuously upon the data, dividing both sides by $|||u(\vartheta_a)-u(\vartheta_b) |||$ we have \begin{displaymath}
|||u(\vartheta_a)-u(\vartheta_b) ||| \leq \displaystyle\frac{\|f\|_{V'_c({\Omega})}}{\vartheta_0}\|\vartheta_a-\vartheta_b\|_\infty. \end{displaymath} $\boxempty$}
\begin{thm} There exist at least one solution of the optimization problem \eqref{eq:min}. \end{thm}
\noindent{\it Proof.} The steps of the proof have been inspired by \cite{Banks} (chapter 6) and \cite{Jin}. We drop the explicit reference to the domain ${\omgs\cup\omgc}$ and denote $\int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}}$ by $\int\int_{{\omgs\cup\omgc}}$.
We note that $\inf_{\vartheta\in\mathcal{C}} J(u(\vartheta),\vartheta)$ is bounded over the set $\mathcal{C}$ and, thus, there exists a minimizing sequence $\{\vartheta^n\}\subset \mathcal{C}$ such that $$ J(u(\vartheta^n),\vartheta^n)\rightarrow\inf\limits_{\vartheta\in\mathcal{C}} J(u(\vartheta),\vartheta) \quad {\rm in} \; \mathbb{R}, $$ where $u(\vartheta^n)$ denotes the solution of \eqref{weakf} with $\vartheta=\vartheta^n$. Because $W^{1,\infty}$ is compactly embedded in $L^\infty$, there exist $\vartheta^*\in \mathcal{C}$ and a subsequence, which we still denote by $\{\vartheta^n\}$, such that (see, e.g., \cite{Rudin}) \begin{equation}\label{eq:Lstrong} \vartheta^n \rightarrow\vartheta^* \quad {\rm in} \; L^\infty. \end{equation} Now, let $u ^n:=u (\vartheta^n)$; then, by definition \begin{equation}\label{eq:thetan_qn} \int\int_{{\omgs\cup\omgc}} \vartheta^n {\mathcal{D}^\ast} u ^n\cdot{\mathcal{D}^\ast} v \,d\yb d\xb = \int_{{\Omega}} f\,v \, d\xb \quad \forall \,v \in V_c. \end{equation} The well-posedness of problem \eqref{weakf} implies that there exists $u^\ast\in V_c$ and a subsequence, which we still denote by $\{u ^n\}$, such that \begin{equation}\label{eq:weak} u ^n\xrightarrow{w}u^\ast \quad {\rm in} \;V_c, \end{equation} where by $\xrightarrow{w}$ we mean weak convergence (see, e.g., \cite{Rudin}). Now we show that $u^\ast = u (\vartheta^*)$. For all $v\in V_c$ and for all $n$, \eqref{eq:thetan_qn} is equivalent to \begin{equation}\label{eq:thetan_qn_equivalent} \hspace{-2cm}\begin{array}{l} \displaystyle\int\int_{{\omgs\cup\omgc}} (\vartheta^n-\vartheta^*) {\mathcal{D}^\ast} u ^n\cdot{\mathcal{D}^\ast} v \,d\yb d\xb + \displaystyle\int\int_{{\omgs\cup\omgc}} \vartheta^* {\mathcal{D}^\ast} (u ^n-u^\ast)\cdot{\mathcal{D}^\ast} v \,d\yb d\xb \\[3mm] +\displaystyle\int\int_{{\omgs\cup\omgc}} \vartheta^* {\mathcal{D}^\ast} u^\ast\cdot {\mathcal{D}^\ast} v \,d\yb d\xb = \int_{{\Omega}} f\,v \, d\xb. \end{array} \end{equation} Property \eqref{eq:weak} implies that \begin{equation} \int\int_{{\omgs\cup\omgc}} \vartheta^* {\mathcal{D}^\ast} (u ^n-u^\ast)\cdot{\mathcal{D}^\ast} v \,d\yb d\xb \rightarrow 0 \end{equation} as $n\rightarrow \infty$. Furthermore, \eqref{eq:Lstrong} implies that \begin{equation}
\displaystyle\left|\int\int_{{\omgs\cup\omgc}} (\vartheta^n-\vartheta^*) {\mathcal{D}^\ast} u ^n\cdot{\mathcal{D}^\ast} v \,d\yb d\xb\right| \leq
\displaystyle\|\vartheta^n-\vartheta^*\|_\infty \int\int_{{\omgs\cup\omgc}} \left|{\mathcal{D}^\ast} u ^n\cdot{\mathcal{D}^\ast} v\right| \,d\yb d\xb \;\rightarrow 0 \end{equation} as $n\rightarrow\infty$. Hence, taking the limit in \eqref{eq:thetan_qn_equivalent} as $n\rightarrow\infty$ we have \begin{equation} \int\int_{{\omgs\cup\omgc}} \vartheta^* {\mathcal{D}^\ast} u^\ast \cdot{\mathcal{D}^\ast} v \,d\yb d\xb = \int_{{\Omega}} f\,v \, d\xb \end{equation} that gives, by definition $u^\ast = u(\vartheta^*)$. {\color{blue}Next, we note that by choosing $\vartheta_a=\vartheta^n$ and $\vartheta_b=\vartheta^*$ in Lemma \ref{infty-continuity} we have that $\vartheta^n\to\vartheta^*$ in $L^\infty$ as $n\to\infty$ implies $u^n\tou^\ast$ in $V_c$; furthermore, the nonlocal Poincar\'e inequality implies $u^n\tou^\ast$ in $L^2$ as $n\to\infty$.} Thus, \begin{equation}\label{eq:proof} \begin{array}{ll} J(u^\ast,\vartheta^*) & = \displaystyle\frac{1}{2} \int_{{\omgs\cup\omgc}} \left(u^\ast-\widehat u \right)^2\,d\xb =
\lim\limits_{n\rightarrow\infty}\frac{1}{2} \int_{{\omgs\cup\omgc}} \left(u^n-\widehat u \right)^2\,d\xb \\[4mm]
& = \lim\limits_{n\rightarrow\infty} J(u^n,\vartheta^n) = \inf\limits_{\vartheta\in\mathcal{C}} J(u(\vartheta),\vartheta). \end{array} \end{equation} $\boxempty$
Next, for the optimal control problem \eqref{eq:min}, we give a necessary condition for the optimality of a solution. The Lagrangian functional for the problem \eqref{eq:min} is defined as \begin{equation}\label{eq:lagr} \hspace{-2cm}\begin{array}{l}
\displaystyle L(u ,w,\vartheta,\mu_0,\mu_1) = J(u,\vartheta) + \int_{{\Omega}} \big(-\mathcal{D} (\vartheta{\mathcal{D}^\ast} u ) + f \big) w\,d\xb +\\
\hspace{2cm}\displaystyle \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}}(\vartheta-\vartheta_0)\mu_0 \,d\yb d\xb +
\int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}}(\vartheta_1-\vartheta)\mu_1 \,d\yb d\xb, \end{array} \end{equation} where the {\it adjoint} variable $w\in V_c({\omgs\cup\omgc})$ and $\mu_0,\,\mu_1\in \mathcal{C}$ are the Lagrangian multipliers. If $(u^\ast,\vartheta^*)$ is an optimal solution of \eqref{eq:min}, then, $u^\ast,\,w^*,\,\vartheta^*,\,\mu_0^*,$ and $\mu_1^*$ satisfy, respectively, the state, adjoint, optimality, and complementary equations \cite{itku:08} \begin{subequations} \begin{equation} \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \vartheta {\mathcal{D}^\ast} u^\ast \cdot{\mathcal{D}^\ast} v \,d\yb d\xb - \int_{{\Omega}} f\,v \, d\xb=0 \;\;\;\; \forall \,v \in V_c({\omgs\cup\omgc})\label{eq:weak_state}\qquad\qquad\qquad\qquad\\ \end{equation} \begin{equation} \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \vartheta {\mathcal{D}^\ast} w^* \cdot{\mathcal{D}^\ast}\psi \,d\yb d\xb - \int_{{\Omega}} (u^\ast-\widehat u)\,\psi \, d\xb=0 \;\;\;\; \forall \,\psi \in V_c({\omgs\cup\omgc})\label{eq:weak_adjoint}\qquad\qquad\\ \end{equation} \begin{equation} \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \left({\mathcal{D}^\ast} u^\ast \cdot{\mathcal{D}^\ast} w^*+ \mu_0^* - \mu_1^*\right)\varphi \,d\yb d\xb =0 \;\;\;\; \nonumber\\ \forall \varphi \in W^{1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc}) \label{eq:weak_opt}\\ \end{equation} \begin{equation} \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \mu_0^*\left(\vartheta^* - \vartheta_0\right) \,d\yb d\xb = 0 \label{eq:compl1}\\
\end{equation} \begin{equation} \int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \mu_1^*\left(\vartheta_1 - \vartheta^*\right) \,d\yb d\xb = 0. \label{eq:compl} \end{equation} \end{subequations}
\section{Finite dimensional approximation}\label{fin_dim_approx} In this section we consider the convergence of solutions of finite dimensional discretizations of the optimal control problem. For finite element discretizations we also derive a priori error estimates for the state and control variables. We limit ourselves to Case 1 and we analyze the homogeneous Dirichlet problem.
We choose the families of finite dimensional subspaces \begin{equation}\label{conf} V^N({\omgs\cup\omgc}) \subset V({\omgs\cup\omgc}), \qquad W^M({\omgs\cup\omgc})\subset W^{1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc}) \end{equation} parametrized by integers $N,\,M\to\infty$, and then define the constrained finite dimensional subspace \begin{equation}\label{confc} V^N_c({\omgs\cup\omgc}) := V^N({\omgs\cup\omgc})\cap V_c({\omgs\cup\omgc})= \left\{v\in V^N({\omgs\cup\omgc}) \,\,:\,\, v=0\;{\rm on}\;{\Omega_{\mathcal I}}\right\}. \end{equation}
The usual choice for $N$ and $M$ is the dimension of the subspaces. We assume that, for any function $v\in V({\omgs\cup\omgc})$ and any function $\sigma\in W^{1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc})$, the sequence of best approximations with respect to the energy norm $|||\cdot|||$ and the $W^{1,\infty}$ norm, respectively, converges, i.e., \begin{equation}\label{baerror}
\lim\limits_{N\to\infty} \, \inf\limits_{v_N\in V^N} ||| v - v_N||| = 0
\qquad\forall\, v \in V({\omgs\cup\omgc}) \end{equation} and \begin{equation}\label{baerrorW}
\lim\limits_{M\rightarrow\infty} \, \inf\limits_{\sigma_M\in W^M}\|\sigma_M-\sigma\|_{1,\infty} = 0 \qquad\forall\, \sigma \in W^{1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc}). \end{equation} The admissible parameter sets are specified as \begin{equation}
\hspace{-2cm}\mathcal{C}^M=\{\vartheta_M\in W^M({\omgs\cup\omgc})\; {\rm s.t.} \;\; 0<\vartheta_0\leq\vartheta_M\leq\vartheta_1<\infty, \;\; \|\vartheta_M\|_{1,\infty}<C<\infty\}. \end{equation} We seek the Ritz-Galerkin approximations $u_N \in V^N_c({\omgs\cup\omgc})$ and $\vartheta_M\in\mathcal{C}^M$ determined by posing \eqref{eq:min} on $V^N_c({\omgs\cup\omgc})$ and $W^M({\omgs\cup\omgc})$. The finite dimensional state equation in a weak form is given by \begin{equation}\label{eq:weak_finite} \hspace{-2cm}\int_{{\omgs\cup\omgc}}\int_{{\omgs\cup\omgc}} \vartheta_M {\mathcal{D}^\ast} u_N \cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb = \int_{{\Omega}} f\,v_N \, d\xb \qquad \forall \,v_N \in V^N_c({\omgs\cup\omgc}). \end{equation} As for the infinite dimensional case, by the Lax-Milgram theorem, for all $N$ and $M$, \eqref{eq:weak_finite} has a unique solution $u_N \in V_c^N({\omgs\cup\omgc})$ for all $\vartheta_M\in\mathcal{C}^M$. Then, the finite dimensional control problem is formulated as \begin{equation} \label{eq:min_finite} \hspace{-2cm} \min\limits_{\vartheta_M\in\mathcal{C}^M} J(u_N,\vartheta_M) :=\displaystyle\frac{1}{2}\int_{{\Omega}} \big(u_N - \widehat u \big)^2\,d\xb
\quad\mbox{such that \eqref{eq:weak_finite} is satisfied}. \end{equation} Using the same arguments as for the infinite dimensional problem it is possible to show that problem \eqref{eq:min_finite} has at least one solution.
Next, we consider a finite element approximation for the case that both ${\omgs\cup\omgc}$ and ${\Omega}$ are polyhedral domains. We partition ${\omgs\cup\omgc}$ and ${\omgs\cup\omgc}\times{\omgs\cup\omgc}$ into finite elements for the discretization of $u$ and $\vartheta$ respectively and we denote by $h_u$ and $h_\vartheta$ the diameter of the largest element in each partition. We assume that the partitions are shape-regular and quasiuniform \cite{brsc:08} as the grid sizes $h_u,\,h_\vartheta\to 0$, i.e., as $N,\,M\to\infty$. We choose $V_{c}^N({\omgs\cup\omgc})$ and $W^M({\omgs\cup\omgc})$ to consist of piecewise polynomials of degree no more than $m$ and $l$, respectively, defined with respect to each grid. For some real constants $K_1,\,K_2$, and $K_3$, the following assumptions are made for the finite dimensional subspaces $V_c^N({\omgs\cup\omgc})$ and $W^M({\omgs\cup\omgc})$. For all $v\in V_c({\omgs\cup\omgc})\cap H^{m+t}({\omgs\cup\omgc})$, $s\in (0,\,1)$ and $t\in[s,\,1]$, there exists $\Pi^Vv\in V^N_c({\omgs\cup\omgc})$ such that \begin{equation}\label{eq:piV}
\|v-\Pi^Vv\|_{H^s({\omgs\cup\omgc})}\leq K_1 N^{-(m+t-s)}\|v\|_{H^{m+t}({\omgs\cup\omgc})}.\\ \end{equation} For all $\sigma\in W^{l+1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc})$ there exists $\Pi^W\sigma\in W^M({\omgs\cup\omgc})$ such that \begin{equation}\label{eq:piW}
\|\sigma-\Pi^W\sigma\|_{L^{\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc})}\leq K_2 M^{-(l+1)}\|\sigma\|_{l+1,\infty({\omgs\cup\omgc})}.\\ \end{equation} Also, we assume that for all $v_N\in V_c^N({\omgs\cup\omgc})$ the inverse inequality \begin{equation}\label{eq:inverse_ineq}
\|v_N\|_{H^s({\omgs\cup\omgc})}\leq K_3\,N^s\,\|v_N\|_{L^2({\omgs\cup\omgc})} \end{equation} holds. These properties are satisfied for wide classes of finite element spaces; see \cite[p. 121]{ciarlet} for \eqref{eq:piW} and \cite{ciarlet, graham} for \eqref{eq:inverse_ineq}. Throughout this section we let $K$ denote a generic constant, independent on $N$ and $M$, and we suppress explicit reference to the domain ${\omgs\cup\omgc}$. The proofs of lemmas and theorems are the nonlocal equivalent of \cite[Theorem VI.3.1]{Banks}.
First, we consider the following two lemmas.
\begin{lemma}\label{lemma1} Let $\vartheta^*\in\mathcal{C}$ be a solution of \eqref{eq:min} and let $u^\ast$ be the corresponding solution of \eqref{weakf}. Then, \begin{equation}\label{eq:lemma1}
\hspace{-2cm}|||u(\Pi^W \vartheta^*)-\Pi^Vu^\ast |||\leq \frac{C_2 C_u}{\vartheta_0} \|\Pi^W\vartheta^*-\vartheta^*\|_{L^\infty({\omgs\cup\omgc})} + \frac{\vartheta_1}{\vartheta_0} |||\Pi^Vu^\ast-u^\ast |||, \end{equation}
where $C_2$ is the equivalence constant in \eqref{equivalence} and $C_u$ is such that $\|u^*\|_{H^s({\omgs\cup\omgc})}\leq C_u$. \end{lemma}
{\it Proof.} Let $\widetilde\vartheta_M=\Pi^W\vartheta^*$ and $\widetilde{u}_N=u(\widetilde{\vartheta}_M)$. Observe that \begin{equation} \int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} \widetilde{u}_N\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb= \int_{{\Omega}} f\,v_N \,d\xb=\displaystyle\int\int_{{\omgs\cup\omgc}}\vartheta^*{\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb \end{equation} for all $v_N\in V_c^N$. Then, \begin{equation} \begin{array}{ll}
& \displaystyle\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (\widetilde{u}_N-\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb \\[3mm]
= & \displaystyle\int_{{\Omega}} f\,v_N \,d\xb- \int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb \\[3mm]
= & \displaystyle\int\int_{{\omgs\cup\omgc}}\vartheta^*{\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb -
\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb \\[3mm]
= & \displaystyle\int\int_{{\omgs\cup\omgc}}(\vartheta^*-\widetilde{\vartheta}_M){\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb -
\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (u^\ast-\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} v_N \,d\yb d\xb. \end{array} \end{equation} Now, we choose $v_N=\widetilde{u}_N-\Pi^Vu^\ast$, thus \begin{equation} \begin{array}{ll}
& \vartheta_0\,|||\widetilde{u}_N-\Pi^Vu^\ast |||^2 \\[2mm]
\leq & \left|\displaystyle\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (\widetilde{u}_N-\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-\Pi^Vu^\ast) \,d\yb d\xb \right| \\[3mm]
\leq & \displaystyle\left|\int\int_{{\omgs\cup\omgc}}(\vartheta^*-\widetilde{\vartheta}_M){\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-\Pi^Vu^\ast) \,d\yb d\xb \right| \\[3mm]
+ & \displaystyle\left|\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M{\mathcal{D}^\ast} (u^\ast-\Pi^Vu^\ast)\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-\Pi^Vu^\ast) \,d\yb d\xb\right| \\[3mm]
\leq & \displaystyle\|\widetilde{\vartheta}_M-\vartheta^*\|_{L^\infty} \,|||u^\ast |||\,|||\widetilde{u}_N-\Pi^Vu^\ast ||| + \|\widetilde{\vartheta}_M\|_{L^\infty}
\,|||u^\ast-\Pi^Vu^\ast |||\,|||\widetilde{u}_N-\Pi^Vu^\ast ||| \\[2mm]
\leq & \displaystyle C_2 C_u \|\widetilde{\vartheta}_M-\vartheta^*\|_{L^\infty} |||\widetilde{u}_N-\Pi^Vu^\ast ||| + \vartheta_1 |||u^\ast-\Pi^Vu^\ast |||\,|||\widetilde{u}_N-\Pi^Vu^\ast |||. \end{array} \end{equation}
Dividing both sides by $|||\widetilde{u}_N-\Pi^Vu^\ast |||$ we obtain \eqref{eq:lemma1}. $\boxempty$
\begin{lemma}\label{lemma2} Let $m$ and $l$ be non-negative integers and let $s\in(0,\,1)$ and $t\in[s,\,1]$. Then, there exists a constant $K$, independent on $N$ and $M$, such that, for sufficiently large $N$ and $M$ and for sufficiently smooth $u^*$ and $\vartheta^*$ (defined as in Lemma \ref{lemma1}) \begin{equation}\label{lemma2res}
\hspace{-2cm}\inf\limits_{\vartheta_M\in W^M} \|u^\ast-u_N(\vartheta_M)\|_{L^2} \leq
K \left(M^{-(l+1)}\|\vartheta^*\|_{l+1,\infty}+ N^{-(m+t-s)} \|u^\ast\|_{H^{m+t}}\right). \end{equation} \end{lemma}
\noindent{\it Proof.} Let $\widehat u =\widetilde{u}_N$ (defined as in Lemma \ref{lemma1}); then the optimal adjoint variable $w^*$, the solution of \eqref{eq:weak_adjoint}, satisfies \cite{dglz:11} \begin{equation}\label{eq:fwd_ineq}
\|w^*\|_{H^s}\leq K \|\widetilde{u}_N-u^\ast\|_{L^2}. \end{equation} We consider equation \eqref{eq:weak_adjoint} and choose the test function $\widetilde{u}_N-u^\ast$; we then have $$ \begin{array}{ll}
& \|\widetilde{u}_N-u^\ast\|^2_{L^2}= \displaystyle\int_{{\Omega}} (\widetilde{u}_N-u^\ast)(\widetilde{u}_N-u^\ast)\,d\xb \\ [3mm]
= & \displaystyle \int\int_{{\omgs\cup\omgc}}\vartheta^* {\mathcal{D}^\ast} w^*\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-u^\ast)\,d\yb d\xb \\[3mm]
= & \displaystyle\int\int_{{\omgs\cup\omgc}}(\vartheta^*-\widetilde{\vartheta}_M) {\mathcal{D}^\ast} w^*\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-u^\ast)\,d\yb d\xb \\[3mm]
+ & \displaystyle\int\int_{{\omgs\cup\omgc}}\widetilde{\vartheta}_M {\mathcal{D}^\ast} w^*\cdot{\mathcal{D}^\ast} (\widetilde{u}_N-u^\ast)\,d\yb d\xb \\[3mm]
\leq & \displaystyle\|\vartheta^*-\widetilde{\vartheta}_M\|_{L^\infty}\,|||w^*|||\,|||\widetilde{u}_N-u^\ast||| + \|\widetilde{\vartheta}_M\|_{L^\infty}\,|||w^*|||\,|||\widetilde{u}_N-u^\ast||| \\[2mm]
\leq & \displaystyle K \left(\|\vartheta^*-\widetilde{\vartheta}_M\|_{L^\infty}\|\widetilde{u}_N-u^\ast\|_{L^2}|||\widetilde{u}_N-u^\ast|||
+ \vartheta_1\,\|\widetilde{u}_N-u^\ast\|_{L^2}|||\widetilde{u}_N-u^\ast|||\right), \end{array} $$
where we used \eqref{eq:fwd_ineq}. Dividing both sides by $\|\widetilde{u}_N-u^\ast\|_{L^2}$, we have $$ \begin{array}{ll}
\|\widetilde{u}_N-u^\ast\|_{L^2} & \leq K |||\widetilde{u}_N-u^\ast ||| \left(\|\vartheta^*-\widetilde{\vartheta}_M\|_{L^\infty} + \,\vartheta_1 \right)\\[2mm]
& \leq K\left(|||\widetilde{u}_N - \Pi^Vu^\ast||| + ||| \Pi^Vu^\ast-u^\ast||| \right) \left(\|\vartheta^*-\widetilde{\vartheta}_M\|_{L^\infty} + \,\vartheta_1 \right). \end{array} $$ Using Lemma \ref{lemma1}, \eqref{eq:piV}, and \eqref{eq:piW}, we obtain $$ \begin{array}{l}
\|\widetilde{u}_N-u^\ast\|_{L^2}\leq \left(\displaystyle\frac{C_2 C_u}{\vartheta_0} \|\widetilde{\vartheta}_M-\vartheta^*\|_{L^\infty}
+ \left(1+\frac{\vartheta_1}{\vartheta_0}\right) |||\Pi^Vu^\ast-u^\ast ||| \right) \\[2mm]
\qquad\left(\|\vartheta^*-\widetilde{\vartheta}_M\|_{L^\infty} + \,\vartheta_1 \right) \\[5mm]
\leq K\left( \|\widetilde{\vartheta}_M-\vartheta^*\|^2_{L^\infty} + \|\widetilde{\vartheta}_M-\vartheta^*\|_{L^\infty}\right. \\[1mm]
\qquad\left. + \|\widetilde{\vartheta}_M-\vartheta^*\|_{L^\infty} |||\Pi^Vu^\ast-u^\ast ||| + |||\Pi^Vu^\ast-u^\ast ||| \right) \\[5mm]
\leq K \left(K^2_2 M^{-2(l+1)}\|\vartheta^*\|^2_{l+1,\infty}+ K_2 M^{-(l+1)}\|\vartheta^*\|_{l+1,\infty} \right. \\[2mm]
\qquad\left. + K_1 K_2 M^{-(l+1)}N^{-(m+t-s)}\|\vartheta^*\|_{l+1,\infty} \|u^\ast\|_{H^{m+t}} +
K_1 N^{-(m+t-s)}\|u^\ast\|_{H^{m+t}} \right) \\[5mm]
\leq K \left(M^{-(l+1)}\|\vartheta^*\|_{l+1,\infty}+ N^{-(m+t-s)} \|u^\ast\|_{H^{m+t}}\right). \end{array} $$
Now, because $\inf_{\vartheta_M\in W^M} \|u^\ast-u_N(\vartheta_M)\|_{L^2}\leq \|\widetilde{u}_N-u^\ast\|_{L^2}$, \eqref{lemma2res} follows. $\boxempty$
Finally, we can state the main theorem, which provides an estimate of the approximation error for the control variable $\vartheta$.
\begin{thm}\label{th:param_apriori} Let $m$ and $l$ be non-negative integers and let $s\in(0,\,1)$ and $t\in[s,\,1]$. Assume that for $f\in H^{m+t}({\Omega})$ and $\widehat u \in L^2({\Omega})$, $\vartheta^*\in W^{l+1,\infty}({\omgs\cup\omgc}\times{\omgs\cup\omgc})$ is a solution of \eqref{eq:min} and $u(\vartheta^*) = u^\ast \in V_c({\omgs\cup\omgc}) \cap H^{m+t}({\omgs\cup\omgc})$ is the corresponding state. Then, there exists a constant $K$, independent on $N$ and $M$ such that, for every solution ${\vartheta}_M^*$ of \eqref{eq:min_finite} \begin{equation}\label{main_thm} \begin{array}{ll}
\qquad\displaystyle\int\int_{{\omgs\cup\omgc}}\left|\vartheta^*-{\vartheta}_M^*\right|{\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} u^\ast\,d\yb d\xb &
\leq \;K \left(N^s dist(\widehat u ,\mathcal{V}) \right.\\[2mm]
& +\; N^{-(m+t-2s)} \|u^\ast\|_{H^{m+t}({\omgs\cup\omgc})}\\[2mm]
& +\;N^s M^{-(l+1)} \|\vartheta^*\|_{l+1,\infty} \\[2mm]
& + \left. N^{-(m+t-s)} \|f\|_{H^{m+t}({\omgs\cup\omgc})}\right), \end{array} \end{equation} where $\mathcal{V} = \{u(\vartheta)\,:\, \vartheta\in\mathcal{C}\}$. Furthermore, if $\widehat u \in \mathcal{V}$, then \begin{equation} \begin{array}{ll}
\displaystyle\int\int_{{\omgs\cup\omgc}}\left|\vartheta^*-{\vartheta}_M^*\right|{\mathcal{D}^\ast} u^\ast\cdot{\mathcal{D}^\ast} u^\ast\,d\yb d\xb
& \leq \;K \left(N^{-(m+t-2s)} \|u^\ast\|_{H^{m+t}({\omgs\cup\omgc})} \right. \\[2mm]
& + \;N^s M^{-(l+1)} \|\vartheta^*\|_{l+1,\infty} \\[2mm]
& + \left. N^{-(m+t-s)} \|f\|_{H^{m+t}({\omgs\cup\omgc})}\right). \end{array} \end{equation} \end{thm}
\noindent{\it Proof.} Let $R_1 := \{(\xb,\,\yb) : \vartheta^* - {\vartheta}_M^* \geq 0 \}$ and $R_2 := ({\omgs\cup\omgc}\times{\omgs\cup\omgc})\backslash R_1$ and define the function $\chi:{\omgs\cup\omgc}\times{\omgs\cup\omgc}\rightarrow\mathbb{R}$ by $$ \chi(\xb,\yb) := \left\{ \begin{array}{rl} 1 & (\xb,\,\yb)\in R_1\\ -1 & (\xb,\,\yb)\in R_2. \end{array}\right. $$ Recall that $$ \mathcal{D}(\vartheta^* {\mathcal{D}^\ast}u^\ast) = f \quad \hbox{and} \quad \mathcal{D}({\vartheta}_M^* {\mathcal{D}^\ast} u^*_N) = \Pi^N f, $$ where by $\Pi^N$ we denote the orthogonal $L^2$-projection onto $V_c^N({\omgs\cup\omgc})$. Therefore $$ \mathcal{D}((\vartheta^*-{\vartheta}_M^*) {\mathcal{D}^\ast}u^\ast) = \mathcal{D}({\vartheta}_M^* {\mathcal{D}^\ast} (u_N^*-u^\ast)) + f-\Pi^N f. $$ Taking the inner product with the function $u^\ast\chi$, we have $$ \begin{array}{l}
\displaystyle-\int_{{\Omega}}\mathcal{D}(|\vartheta^*-{\vartheta}_M^*| {\mathcal{D}^\ast} u^\ast)u^\ast\,d\yb d\xb\\[3mm]
= \displaystyle \int_{{\Omega}}\mathcal{D}({\vartheta}_M^* {\mathcal{D}^\ast}(u_N^*-u^\ast))u^\ast\chi \,d\yb d\xb + \int_{{\Omega}}(f-\Pi^N f)u^\ast\chi\,d\xb. \end{array} $$ Thus, we write \begin{equation}\label{eq:first_ineq} \begin{array}{ll}
& \displaystyle\int\int_{{\omgs\cup\omgc}}|\vartheta^*-{\vartheta}_M^*|{\mathcal{D}^\ast} u^\ast \cdot{\mathcal{D}^\ast} u^\ast\,d\yb d\xb \\[3mm]
\leq &\left|\displaystyle\int\int_{{\omgs\cup\omgc}}{\vartheta}_M^* {\mathcal{D}^\ast} (u_N^*-u^\ast)\cdot{\mathcal{D}^\ast} (u^\ast\chi) \,d\yb d\xb\right|
+ \left|\displaystyle\int_{{\Omega}}(f-\Pi^N f)u^\ast\chi\,d\xb\right| \\[3mm]
\leq & \|{\vartheta}_M^*\|_{L^{\infty}}|||u_N^*-u^\ast |||\,|||u^\ast\chi||| + \|f-\Pi^N f\|_{L^2}\|u^\ast\chi\|_{L^2} \\[2mm]
\leq & \vartheta_1 C_2\|u^\ast\|_{H^s} |||u_N^*-u^\ast||| + \|f-\Pi^N f\|_{L^2}\|u^\ast\|_{H^s} \\[2mm]
\leq & \vartheta_1C_2 C_u |||u_N^*-u^\ast||| + C_u \|f-\Pi^N f\|_{H^s} \\[2mm]
\leq & \vartheta_1 C^2_2 C_u \|u_N^*-u^\ast\|_{H^s} + C_u K_1 N^{-(m+t-s)} \|f\|_{H^{m+t}}, \end{array} \end{equation}
where we used \eqref{eq:piV}. Next, we find a bound for $\|u_N^*-u^\ast\|_{H^s}$: $$ \begin{array}{ll}
\|u_N^*-u^\ast\|_{H^s} & \leq \|u^\ast-\Pi^Vu^\ast\|_{H^s} +\|u_N^*-\Pi^Vu^\ast\|_{H^s} \\[2mm]
& \leq K_1 N^{-m-t+s} \|u^\ast\|_{H^{m+t}} + K_3 N^s \|u_N^*-\Pi^Vu^\ast\|_{L^2} \\[2mm]
& \leq K_1 N^{-m-t+s} \|u^\ast\|_{H^{m+t}} + K_3 N^s \left(\|u^\ast-\Pi^Vu^\ast\|_{L^2} + \|u^\ast-u_N^*\|_{L^2}\right) \\[2mm]
& \leq \left(K_1 N^{-m-t+s} + K_1 K_3 N^{-(m+t-2s)}\right) \|u^\ast\|_{H^{m+t}} + K_3 N^s\|u^\ast-u_N^*\|_{L^2}, \end{array} $$
where we used \eqref{eq:piV} and \eqref{eq:inverse_ineq}. Then, we find a bound for $\|u^\ast-u_N^*\|_{L^2}$: \begin{equation}\label{eq:l2_error_norm} \begin{array}{ll}
\|u^\ast-u_N^*\|_{L^2} & \leq \|u^\ast-\widehat u \|_{L^2} +\|\widehat u -u_N^*\|_{L^2} \\
& \leq dist(\widehat u ,\mathcal{V}) +\inf\limits_{W^M}\|\widehat u -u_N(\vartheta_M)\|_{L^2} \\
& \leq dist(\widehat u ,\mathcal{V})+ \inf\limits_{W^M}\left(\|\widehat u -u^\ast\|_{L^2}+ \|u^\ast-u_N(\vartheta_M)\|_{L^2}\right) \\
& \leq 2dist(\widehat u ,\mathcal{V}) + \inf\limits_{W^M} \|u^\ast-u_N(\vartheta_M)\|_{L^2} \\
& \leq 2dist(\widehat u ,\mathcal{V})+ K M^{-(l+1)}\|\vartheta^*\|_{l+1,\infty} + K N^{-(m+t-s)} \|u^\ast\|_{H^{m+t}({\omgs\cup\omgc})}, \end{array} \end{equation} where we applied Lemma \ref{lemma2}. Thus, \begin{equation}\label{eq:Hs_estimate} \begin{array}{ll}
\|u_N^*-u^\ast\|_{H^s} & \leq 2\,K_3\,dist(\widehat u ,\mathcal{V})+K N^sM^{-(l+1)}\|\vartheta^*\|_{l+1,\infty} \\[2mm]
& +\left( K_1 N^{-(m+t-s)}+ K_1 K_3 N^{-(m+t-2s)} +K K_3 N^{-(m+t-2s)}\right)\|u^\ast\|_{H^{m+t}({\omgs\cup\omgc})}. \end{array} \end{equation} Combining \eqref{eq:Hs_estimate} and \eqref{eq:first_ineq} we obtain \eqref{main_thm}. $\boxempty$
Note that Theorem \ref{th:param_apriori} provides an estimate for the $L^2$ norm of the approximation error for the state variable. In fact, from equation \eqref{eq:l2_error_norm}, we have \begin{equation}\label{eq:l2_error}
\hspace{-2cm}\|u_N^*-u^\ast\|_{L^2}\leq K\left( dist(\widehat u ,\mathcal{V})+ M^{-(l+1)}\|\vartheta^*\|_{l+1,\infty} + N^{-(m+t-s)} \|u^\ast\|_{H^{m+t}({\omgs\cup\omgc})}\right). \end{equation}
\section{Numerical tests}\label{num_tests} In this section we present the results of computational experiments for finite element discretizations of one-dimensional problems. These preliminary results illustrate the theoretical results in Section \ref{fin_dim_approx} and provide the basis for extensions to two- and three-dimensional experiments.
{\color{blue} We conduct two convergence analyses. First, for both Case 1 and Case 2 we consider the convergence of approximate optimal solutions to fine-grid surrogates for the analytic optimal solutions; here, we do not assume any knowledge of the optimal solution in ${\Omega_{\mathcal I}}$, where we prescribe homogeneous Dirichlet conditions. Then, for Case 2, we analyze the convergence to optimal analytic solutions prescribing exact volume constraints in ${\Omega_{\mathcal I}}$. We describe the problem settings used in our tests.
\paragraph{Case 1} We consider ${\Omega}=(-1,1)$, ${\Omega_{\mathcal I}}=(-1-\varepsilon,-1)\cup(1,1+\varepsilon)$, \begin{equation}\label{kernel1}
\gamma_1(x,y) = \dfrac{1}{|x-y|^{1+2s}}, \qquad s\in(0,1), \end{equation} for which $\mathcal{L}$ corresponds to a fractional differential operator \cite{mesi:11}, and the data set \begin{equation} {\rm A:} \left\{\begin{array}{l} f(x) = 1 \\[1mm] \widehat{u}(x) = u_{\rm A}(x) \\[1mm]
\left.u(x)\right|_{{\Omega_{\mathcal I}}} = 0, \\[1mm] s = 0.7. \end{array}\right. \end{equation} Here $u_{\rm A}(x)$ is a surrogate for an exact solution of \eqref{eq:fwd_diffusion}; it corresponds to the finite element approximation computed on a very fine grid using \begin{equation} \vartheta_{\rm A}(x,y) = 2+0.4(x+y-1)^2. \end{equation}
\paragraph{Case 2} We consider ${\Omega}=(0,1)$, ${\Omega_{\mathcal I}}=(-\varepsilon,0)\cup(1,1+\varepsilon)$, and \begin{equation}\label{kernel2}
\gamma_2(x,y) = \dfrac{1}{\varepsilon^2|x-y|}, \end{equation} which is often used in the literature, e.g. in a linearized model for continuum mechanics \cite{chgu:11}. We introduce the following data sets \begin{equation} \begin{array}{lll} {\rm B:}\left\{\begin{array}{l}
f(x) = \varepsilon^2 + 24 x^2 - 24 x +7.6 \\[1mm]
\widehat{u}(x) = 2.5\,x(1-x) \\[1mm]
\left.u(x)\right|_{{\Omega_{\mathcal I}}} = 2.5\,x(1-x) \end{array}\right. & \quad{\rm C:}\left\{\begin{array}{l}
f(x) = 5 \\[1mm]
\widehat{u}(x) = u_{\rm C}(x) \\[1mm]
\left.u(x)\right|_{{\Omega_{\mathcal I}}} = 0 \end{array}\right. & \quad{\rm D:}\left\{\begin{array}{l}
f(x) = 5 \\[1mm]
\widehat{u}(x) = u_{\rm D}(x) \\[1mm]
\left.u(x)\right|_{{\Omega_{\mathcal I}}} = 0. \end{array}\right. \end{array} \end{equation} In case B, $\widehat{u}$ is the solution of \eqref{eq:fwd_diffusion} for $\vartheta(x,y) = \vartheta_{\rm A}$; thus, $\widehat{u}$ and $\vartheta_{\rm A}$ are the optimal state and parameter. In cases C and D, $u_{\rm C}$ and $u_{\rm D}$ are surrogates for an exact solution of \eqref{eq:fwd_diffusion}; they are, in fact, the finite element solutions computed on a very fine grid using respectively $\vartheta(x,y) = \vartheta_{\rm C}(\frac{x+y}{2})$ and $\vartheta(x,y) = \vartheta_{\rm D}(\frac{x+y}{2})$, where \begin{equation}\label{eq:piecewiseT} \vartheta_{\rm C}(z) = \left\{\begin{array}{ll}
0.2+(z-0.625)^2 & \; z\in (0,0.625) \\[1mm]
z+1.25 & \; z\in (0.625,0.75) \\[1mm]
14.4(z-0.75)+2 & \; z\in(0.75,1) \end{array}\right.\qquad\quad \vartheta_C(z) = \left\{\begin{array}{ll}
1 & \; z\in(0,0.2) \\[1mm]
0.1 & \; z\in(0.2,0.6) \\[1mm]
1 & \; z\in(0.6,1). \end{array}\right. \end{equation}}
\paragraph{Implementation details} For the state variable we introduce a partition of $\overline{\omgs\cup\omgc} = \overline{(a-\varepsilon, b+ \varepsilon)}$ such that, for the positive constants $K_u$ and $J_u$ \begin{equation}\label{eq:partitionu} \begin{array}{ll}
& a - \varepsilon = x_{-K_u} < \cdots < x_{-1} < a = x_0 < x_1 < \cdots < x_{J_u-1}\\
& \qquad\qquad <x_{J_u} = b<x_{J_u+1} < \cdots < x_{J_u+K_u} = b + \varepsilon. \end{array} \end{equation}
Then, $h_u$ is defined as $\max_{j = -K_u,\ldots,K_u+J_u-1}|x_{j+1}-x_j|$. In the numerical experiments we let $V^N({\omgs\cup\omgc})$ be the finite element space of piece-wise linear polynomials.
For the approximation of the parameter we consider $\vartheta(x,y)$ as a function of one variable only, i.e., as in \eqref{eq:piecewiseT}, $\vartheta(x,y) = \vartheta(\frac{x+y}{2})$, defined in ${\omgs\cup\omgc}$. Thus, for the positive constants $K_\vartheta$ and $J_\vartheta$ we introduce the partition \begin{equation}\label{eq:partitionvt} \begin{array}{ll}
& a - \varepsilon = x_{-K_\vartheta} < \cdots < x_{-1} < a = x_0 < x_1 < \cdots < x_{J_\vartheta-1}\\
& \qquad\qquad < x_{J_\vartheta}=1<x_{J_\vartheta+1} < \cdots < x_{J_\vartheta+K_\vartheta} = b + \varepsilon. \end{array} \end{equation}
Then, $h_\vartheta$ is defined as $\max_{j = -K_\vartheta,\ldots,K_\vartheta+J_\vartheta-1}|x_{j+1}-x_j|$. In choosing $W^M({\omgs\cup\omgc})$ one has to be careful; the most natural choice, for cases A and B and C, is to let $W^M({\omgs\cup\omgc})$ be the space of continuous piece-wise linear polynomials defined over the partition. However, this makes the problem very ill-conditioned. To circumventing this problem we define $W^M({\omgs\cup\omgc})$ as the space of continuous piece-wise linear polynomials such that, for all $\sigma_M\in W^M({\omgs\cup\omgc})$, $\left.\sigma_M\right|_{\Omega_{\mathcal I}}$ is a linear extension of $\left.\sigma_M\right|_{\widetilde K}$, being $\widetilde K$ the element of the partition adjacent to ${\Omega_{\mathcal I}}$. An example of a function belonging to $W^M({\omgs\cup\omgc})$ is displayed in Figure \ref{WM_example} for the domain configuration of Case 2. This empirical choice is not motivated by a theoretical result but by numerical evidence; in fact, we observe that the accuracy of the numerical solutions of the state equation, when the analytic parameter is projected onto $W^M({\omgs\cup\omgc})$, is not affected.
For case D we define $W^M({\omgs\cup\omgc})$ as the space of piece-wise constant functions such that, for all $\sigma_M\in W^M({\omgs\cup\omgc})$, $\left.\sigma_M\right|_{(a-\varepsilon,a)} = \left.\sigma_M\right|_{(a,a+h_\vartheta)}$ and $\left.\sigma_M\right|_{(b,b+\varepsilon)} = \left.\sigma_M\right|_{(b-h_\vartheta,b)}$
\begin{figure}
\caption{Example of a function in $W^M({\omgs\cup\omgc})$ for the data sets B and C.}
\label{WM_example}
\end{figure}
The finite dimensional optimization problem is solved with the Broyden{-}Fletcher{-}Goldfarb{-}Shanno (BFGS) method \cite{nocedal99}, without prescribing any conditions on the lower and upper bounds of the parameter. The reason of this choice is because in our simulations those bounds are not violated.
For the solution of local \cite{itku:08,gunz:02} and nonlocal \cite{degu:13} optimization problems a regularization term is usually added to the functional to prevent the ill-posedness and the ill-conditioning of the mathematical and numerical problems. In this case (as in its local counterpart), as shown in Section \ref{sec:control} and Section \ref{fin_dim_approx}, the problems \eqref{eq:min} and \eqref{eq:min_finite} admit solutions in the space of admissible parameters without regularization. However, a regularization term can be added in case of ill-conditioning; this is the case of the data set D where we utilize the functional \begin{equation}\label{regularization} \overline{J}(u_N,\vartheta_M) = J(u_N,\vartheta_M) + \beta\sum\limits_{j=1}^{J_\vartheta -1} \big(\vartheta_M(\overline{x}_j)-\vartheta_M(\overline{x}_{j+1})\big)^2, \end{equation} where $\beta>0$, and $\overline{x}_j$ is any point inside the interval $[x_j,x_{j+1})$, for $j=1,\ldots J_\vartheta -1$. The additional term in \eqref{regularization} has the effect of minimizing the jumps in $\vartheta_M$.
\subsection{Convergence of the finite element approximate optimal solutions} {\color{blue} We analyze the convergence with respect to the grid sizes $N$ and $M$ of finite element approximations to the surrogates, for cases A, C, and D, and to the analytic solution for case B.
In Tables \ref{tab:fractional-kernel} and \ref{tab:Hconv_exact} we report the error and the corresponding rates for cases A and B respectively. Here, $e_{u,2} = \|u_N^*-\widehat u\|_{L^2({\Omega})}$ and $e^*_\vartheta = \|\vartheta_M^*-\vartheta_{\rm A}\|_*$, where $\|\cdot\|_*$ is the left-hand side of \eqref{main_thm}; $u_{\rm A}$ is generated using $N=2^{11}$. Note that, because $\widehat{u}\in\mathcal{V}$, the term $dist(\widehat{u},\mathcal{V})$ in the estimates \eqref{main_thm} and \eqref{eq:l2_error} is negligible. For case A, though the optimal analytic solution does not necessarily belong to $H^{1+t}({\omgs\cup\omgc})$, $t\in[s,1]$, our results are consistent with the estimate of Theorem \ref{th:param_apriori}; in fact, when the solution is regular enough, for $s=0.7$, we expect to observe a convergence rate $r_u\in(1,1.3)$ for the state and $r_\vartheta\in(0.3, 0.6)$ for the parameter.
For case B, where both $u^*$ and $\vartheta^*$ belong to $C^\infty({\omgs\cup\omgc})$, we observe a quadratic convergence for both the error norms of the state and the parameter for different choices of interaction radius. Up to our knowledge, accurate theoretical results concerning the rates of convergence of the discretization error are not available for kernels as in Case 2; however, there is numerical evidence that for piecewise linear finite element approximations the numerical solutions of the state equation do converge to the analytic solutions in $L^2$ as $h\to 0$ with quadratic convergence rate, see the extensive analysis conducted in \cite{chgu:11}. Furthermore, in \cite{degu:13}, numerical tests show that the same convergence rate is preserved for the approximate solutions of optimal control problems constrained by a nonlocal diffusion equation where the control parameter is the source term $f$. The plots of the optimal approximate solutions are not significant, because of the superimposition of the solutions, and are not reported.
In case C, $u_{\rm C}$ is determined using $N=2^{12}$. Here, the parameter $\vartheta_{\rm C}$ has a discontinuous derivative; thus, we do not expect to observe the same convergence rates as in case B. In fact, though we have convergence, the rates do not show a specific trend and for this reason they are not reported. In Figure \ref{fig:tDNM_DG} we report the approximate optimal parameter for several values of $M$ and we observe a very good match with $\vartheta_{\rm C}$ as we refine the grid. In Figure \ref{fig:uREF_DG} (left) we report $u_{\rm C}$.
\begin{table}[t] \begin{center}
\begin{tabular}{|c|c|c|c|c|c|} \multicolumn{6}{c}{$\varepsilon = 2^{-4}$} \\ \hline $N$ & $M$ & $e_{u,2}$ & rate & $e^*_\vartheta$ & rate \\ \hline $2^4$ & $2^2$ & 2.44e-03 & - & 2.35e-02 & - \\ $2^5$ & $2^3$ & 2.29e-04 & 3.41 & 6.00e-03 & 1.97 \\ $2^6$ & $2^4$ & 1.04e-04 & 1.14 & 3.48e-03 & 0.63 \\ $2^7$ & $2^5$ & 5.09e-05 & 1.03 & 2.48e-03 & 0.63 \\ $2^8$ & $2^6$ & 2.54e-05 & 1.00 & 1.62e-03 & 0.61 \\ \hline \end{tabular} \caption{For the data set A, dependence on the grid sizes $M$ and $N$ of the errors and the rate of convergence of continuous piecewise linear approximations of the state and the parameter.} \label{tab:fractional-kernel} \end{center} \end{table}
\begin{table}[t] \begin{center}
\begin{tabular}{| l | l | c | c | c | c | } \multicolumn{6}{c}{$\varepsilon=2^{-9}$} \\ \hline $N$ & $M$ & $e_{u,2}$ & rate & $e_\vartheta^*$ & rate \\ \hline $2^4$ & $2^2$ & 1.41e-04 & - & 1.28e-02 & - \\ $2^5$ & $2^3$ & 3.24e-05 & 2.12 & 3.16e-03 & 2.02 \\ $2^6$ & $2^4$ & 7.66e-06 & 2.08 & 7.79e-04 & 2.02 \\ $2^7$ & $2^5$ & 1.90e-06 & 2.01 & 1.94e-04 & 2.01 \\ \hline \end{tabular} \hspace{.2cm}
\begin{tabular}{| l | l | c | c | c | c | } \multicolumn{6}{c}{$\varepsilon=2^{-4}$} \\ \hline $N$ & $M$ & $e_{u,2}$ & rate & $e_\vartheta^*$ & rate \\ \hline $2^4$ & $2^2$ & 1.66e-04 & - & 1.31e-02 & - \\ $2^5$ & $2^3$ & 3.55e-05 & 2.22 & 3.10e-03 & 2.08 \\ $2^6$ & $2^4$ & 8.01e-06 & 2.14 & 7.59e-04 & 2.03 \\ $2^7$ & $2^5$ & 1.92e-06 & 2.06 & 1.90e-04 & 2.00 \\ \hline \end{tabular} \caption{For the data set B, dependence on the grid sizes $M$ and $N$ of the errors and the rate of convergence of continuous piecewise linear approximations of the state and the parameter. Results are provided for two choices of interaction radius $\varepsilon$.} \label{tab:Hconv_exact} \end{center} \end{table}
\begin{figure}
\caption{For the data set C and for different grid sizes $M$, $\vartheta_M^*$ and $\vartheta_{\rm C}$ for $\varepsilon=2^{-9}$ (left) and $2^{-4}$ (right).}
\label{fig:tDNM_DG}
\end{figure}
\begin{figure}
\caption{For $N=2^{12}$ and two values of $\varepsilon$, $u_{\rm C}$ on the left and $u_{\rm D}$ on the right.}
\label{fig:uREF_DG}
\end{figure}
For the data set D, $u_{\rm D}$ is determined using $N=2^{12}$ and $\beta=5 \cdot 10^{-4}$ in \eqref{regularization}. Here, $\vartheta_{\rm D}$ is discountinuous and for the same reasons as in case C the errors and the convergence rates are not reported. In Figure \ref{fig:tconst} we report $\vartheta_M$ and $\vartheta_{\rm D}$ for several values of $M$ and we observe the convergence of the approximate optimal parameters to $\vartheta_{\rm D}$ as the grid is refined. In Figure \ref{fig:uREF_DG} (right) we report $u_{\rm D}$.
\begin{figure}
\caption{For the data set D and for different grid sizes $M$, $\vartheta_M^*$ and $\vartheta_{\rm C}$ for $\varepsilon=2^{-9}$ (left) and $2^{-4}$ (right).}
\label{fig:tconst}
\end{figure} }
\section{Conclusions}\label{conclusion} The estimation of input parameters of mathematical models is an important issue also in the field of nonlocal equations. In this paper we show that the identification problem for nonlocal diffusion equations can be treated in a way similar to the classical control theory for PDEs. Furthermore, we show that with nonlocal models we are able to estimate in an accurate way non-smooth and discontinuous input parameters. Using the nonlocal vector calculus we provide an analytical and numerical framework for the identification problem; we formulate the estimation of the diffusion parameter as an optimal control problem for which we prove the existence of an optimal solution. Using approaches similar to those for classical, local, problems we provide a Galerkin finite dimensional formulation and derive a priori error estimates for the approximate state and control variables; these theoretical results are illustrated in one-dimensional numerical examples which provide the basis for realistic simulations. Our preliminary theoretical and numerical analysis is conducted on simplified problems; obvious extensions of this work include the generalization from the scalar to the tensor diffusivity and the simulation of two- and three-dimensional problems. The former consists in estimating the coefficient $\bthe$, a second order symmetric positive definite tensor representing the diffusivity. In this case, the weak formulation of the state equation is \begin{displaymath} \hspace{-2cm} \begin{array}{ll}
&\mbox{\em given $f(\xb)$, $g(\xb)$ and $\bthe(\xb,\yb)$, seek $u(\xb)$ such that}
\\&\mbox{\em $u=g$ for $\xb\in{\Omega_{\mathcal I}}$ and}
\\& \displaystyle \int_{\omgs\cup\omgc}\int_{\omgs\cup\omgc} {\mathcal{D}^\ast} u\cdot(\bthe{\mathcal{D}^\ast} v)\,d\yb d\xb =
\int_{\Omega} fv\,d\xb
\qquad\forall\,v\in V_c({\omgs\cup\omgc}). \end{array} \end{displaymath} The latter involves more complex numerical schemes and time consuming numerical experiments \cite{chgu:11}.
An important follow-up to our work is to consider parameters affected by uncertainty. Our approach to the estimation problem is only deterministic, this is a limitation. In a stochastic approach to the solution of nonlocal problems we assume the input parameters to be random fields described by a probability distribution, i.e., $\vartheta = \vartheta(\xb,\yb, \omega)$ where $\xb$ and $\yb$ are (deterministic) points inside of ${\Omega}$ and the random variable $\omega$ indicates that the value of the diffusion parameter at any point is drawn randomly. The same definition holds for $g(\xb,\omega)$ and $f(\xb,\omega)$; the solution of the nonlocal problem $u(\xb,\omega)$ is a random field itself. The nonlocal stochastic problem is then formulated as \begin{equation}\label{weakstoch} \hspace{-2cm}\begin{array}{ll}
&\mbox{\em given $f(\xb,\omega)$, $g(\xb,\omega)$, and $\vartheta(\xb,\yb,\omega)$, seek $u(\xb,\omega)$ such that}
\\&\mbox{\em $u=g$ for $\xb\in{\Omega_{\mathcal I}}$ and}\; \mathcal{D}(\vartheta\, {\mathcal{D}^\ast} u) = f. \end{array} \end{equation} However, in practice one usually does not know much about the statistics of the input variables; the only known quantities might be the maximum and minimum values or the mean and covariance. Thus, as for the local case, a fundamental issue in nonlocal stochastic equations is the estimation of the distributions of the input parameters. In the classical framework this problem is known as {\it model calibration}. We formulate the stochastic parameter estimation problem for a nonlocal diffusion equation as the standard estimation problem for PDEs. \begin{displaymath} \hspace{-2cm}\begin{minipage}{4.9in} {\em Given the random fields $f(\xb,\omega)$, $g(\xb,\omega)$, and the target function $\widehat u(\xb)$, seek an optimal control $\vartheta^\ast(\xb,\yb,\omega)$ and an optimal state $u^\ast(\xb,\omega)$ such that $J(u,\vartheta)$ is minimized, subject to $u$ and $\vartheta$ satisfying \eqref{weakstoch},} \end{minipage} \end{displaymath} where \begin{displaymath}
J(u,\vartheta) = \mathbb{E}[\|u(\xb,\omega)-\widehat{u}(\xb)\|^2_{L^2({\Omega})}]. \end{displaymath} Here, $\mathbb{E}$ represents the expected value. We might need to add a regularization term to prevent potential ill-posedness or ill-conditioning of the analytical or numerical problem.\\
\end{document}
|
arXiv
|
{
"id": "1310.2558.tex",
"language_detection_score": 0.5533427000045776,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\setcounter{MaxMatrixCols}{10}
\title{STATIONARY STATES IN A POTENTIAL WELL}
\maketitle
\vskip -4 ex
{\sc
H.C. Rosu and J.L. Mor\'an-L\'opez
\footnotesize \vskip 1ex
Instituto Potosino de Investigaci\'on Cient\'{\i}fica y Tecnol\'ogica, SLP, Mexico
\noindent {\bf Keywords}: Stationary states, Bohr's atomic model, Schr\"odinger equation, Rutherford's planetary model, Frank-Hertz experiment, Infinite square well potential, Quantum harmonic oscillator, Wilson-Sommerfeld theory, Hydrogen atom \vskip 0.5cm
\noindent {\bf Contents}\\ 1. Introduction \\ 2. Stationary Orbits in Old Quantum Mechanics \\ 2.1 Quantized Planetary Atomic Model \\ 2.2 Bohr's Hypotheses and Quantized Circular Orbits \\ 2.3 From Quantized Circles to Elliptical Orbits \\ 2.4 Experimental Proof of the Existence of Atomic Stationary States \\ 3. Stationary States in Wave Mechanics \\ 3.1 The Schr\"odinger Equation \\ 3.2 The Dynamical Phase \\ 3.3 The Schr\"odinger Wave Stationarity \\ 3.4 Stationary Schr\"odinger States and Classical Orbits \\ 3.5 Stationary States as Sturm-Liouville Eigenfunctions \\ 4. The Infinite Square Well: The Stationary States Most Resembling the Standing Waves on a String \\ 5. 1D Parabolic Well: The Stationary States of the Quantum Harmonic Oscillator\\ 5.1 The Solution of the Schr\"odinger Equation \\ 5.2 The Normalization Constant \\ 5.3 Final Formulas for the HO Stationary States\\ 5.4 The Algebraic Approach: Creation and Annihilation operators $\hat{a}^{\dagger}$ and $\hat{a}$ \\ 5.5 HO Spectrum Obtained from Wilson-Sommerfeld Quantization Condition\\ 6. The 3D Coulomb Well: The Stationary States of the Hydrogen Atom \\ 6.1 Separation of Variables in Spherical Coordinates \\ 6.2 The Angular Separation Constants as Quantum Numbers \\ 6.2.1 The Azimuthal Solution and the Magnetic Quantum Number \\ 6.2.2 The Polar Solution and the Orbital Quantum Number \\ 6.2.3 Space Quantization \\ 6.3 Polar and Azimuthal Solutions Set Together \\ 6.4 The Radial Solution and the Principal Quantum Number \\ 6.5 Final Formulas for the Hydrogen Atom Stationary States \\ 6.6 Electronic Probability Density \\ 6.7 Other 3D Coordinate Systems Allowing Separation of Variables \\ 7. The 3D Parabolic Well: The Stationary States of the Isotropic Harmonic Oscillator \\ 8. Stationary Bound States in the Continuum \\ 9. Conclusions\\ Bibliography \\
Glossary
Bohr hypotheses: Set of hypotheses that Bohr introduced to explain the stability of the atom.
Coulomb potential: Potential that holds the electrons attached to the nucleus.
Creation and Annihilation operators: Mathematical objects that create or annihilate particles.
Dynamical phase: It is the factor that contains the time dependence of the wave function.
Hydrogen atom: The simplest atom in nature, consisting of one electron revolving around a proton.
Laplace operator: A partial differential operator that contains the second partial derivatives with respect to the space coordinates.
Magnetic quantum number: A quantum number that is associated to the direction of projection of the of the angular momentum
Orbital quantum number: The number associated to the quantization of the orbital motion
Principal quantum number: The number associated to the radial solution of the Schr\"odinger equation and defines the energy of the allowed orbits.
Quantized electron orbits: The electrons can move in the atom only in very specific orbits.
Quantum Harmonic oscillator: The quantum analogous model for a potential whose restoring force is proportional to the displacement.
Quantum Mechanics: Theory of the laws that rule the microscopic world, i.e. at the atomic sizes and less.
Rutherford planetary atomic model: model to describe the atom in which the nucleus is at the center and the electrons move around it.
Schr\"odinger equation: The differential equation that describes the movement of atomic particles.
Square well potential: A one-dimensional model of a potential with vertical walls
Standing wave: The solutions to the Schr\"odinger equation that are stationary.
Zeeman effect: The effect of splitting the electronic energy levels when an atom is immersed in a magnetic field.
\vspace*{10pt}
\section*{Summary}
\noindent In the early days of the 20th century a set of important observations in atomic and molecular physics could not be explained on the basis of the laws of classical physics. One of the main findings was the emission of light by excited atoms with very particular frequencies. To explain those findings a new development in physics was necessary, now known as quantum mechanics. In particular, the concept of stationary states was introduced by Niels Bohr, in 1913, in order to explain those observations and the stability of atoms. According to E.C. Kemble (1929), the existence of discrete atomic and molecular energy levels brought into mechanics a new kind of atomicity superposed on the atomicity of electrons and protons. We review here in a historical context the topic of stationary states in the quantum world, including the generalization to the primary ideas. We also discuss the stationary states in one dimensional parabolic wells and the three dimensional Coulomb and parabolic cases.
\section{Introduction}
At the beginning of the 20th century, some experimental observations in atomic and molecular physics were impossible to explain on the bases of classical physics. It was necessary to introduce revolutionary concepts that lead to the foundation of quantum mechanics. In this context the concept of stationary states played an essential role in the development of new ideas that started to explain the atomic world.
\noindent In 1908 J.R. Rydberg and W. Ritz studied in detail the spectra of the light emitted by excited atoms. They found that the spectra consisted of a set of defined lines of particular wavelengths. Furthermore, the set of spectroscopic lines were dependent only on the atom under study. Through the so-called combination principle they put the data in a most systematic form. Their principle states that the frequency of a particular spectral line can be expressed as a difference between some members of the set of frequency lines.
\noindent These findings could not be explained by the accepted atomic model at that time, proposed by J.J. Thomson, claiming that the electrons were embedded in a positive charged cloud, whose extent was determined by the atomic radius. That model could not explain also the data obtained by H.W. Geiger and E. Mardsen, who under the supervision of Rutherford, were studying the interaction of charged $\alpha$-particles with gold foils \cite{Ruther11}. They observed that a considerable fraction of the $\alpha$-particles was deflected by large angles. This effect could not be attributed to the electrons since they are much less massive. Thus, they concluded that the source of deflection must be the positive charge concentrated in a much smaller volume than the one generated by the atomic radius. In 1911 Rutherford proposed a new atomic model which assumed that all the positive charge is located at the center of the atom with a very dense distribution with a radius much smaller that the atomic one. The electrons then would circulate around the nucleus in a similar way as the planets move around the sun.
\noindent Although Rutherford's planetary atomic model explained qualitatively well the deflection of $\alpha$-particles, it had two major deficiencies. First it could not account for the spectra of radiation from atoms, which was not continuous but discrete. The other major problem was that, according to electrodynamics, an electron moving around the atom is under a constant acceleration, must radiate energy. This fact would lead to a situation in which the electron would loose energy continuously and would collapse with the nucleus.
\section{Stationary Orbits in Old Quantum Mechanics}
\subsection{Quantized Planetary Atomic Model}
In 1911, the two-and-a half-thousand-year-old philosophical concept of atom turned into a scientific matter when Rutherford's planetary atomic model emerged from the interpretation of the experimental data on the scattering of $\alpha$ particles \cite{Ruther11}. The curious fact that has been noticed while these particles were shut to gold foils was that some of them bounced as if they were colliding with very massive objects. To explain these findings Rutherford proposed that the atom was composed by a positive central massive nucleus and the electrons were revolving around it, i.e. very similar to a miniature solar system. However, this famous model was not electrodynamically viable. Atomic stability was simply not assured for Rutherford's semiempiric model, since accelerated charges radiate energy and the electrons moving around the nucleus would loss energy and eventually collapse with the nucleus.
Another important set of empirical set of data, is that obtained from the emission of light by excited atoms. It was observed that the light emitted had a very characteristic frequencies and was a footprint for each atom. These observations were put in a systematic form in 1908 through the so-called combination principle formulated by J.R. Rydberg and W. Ritz. Their principle says that the frequency of a spectral emission or absorption line can be expressed as a difference between the members of a set of well defined frequency terms. Rutherford's model was completely silent on the dynamical origin of the spectral lines. It was the great merit of Bohr to formulate in 1913 the hypotheses, or postulates, that could allow the explanation of the atomic spectral lines based on the planetary atomic structure.
\subsection{Bohr's Hypotheses and Quantized Circular Orbits}
The hypotheses that Bohr added to the Rutherford model in order to explain the spectroscopic information are the following \cite{Bohr1913}
\begin{quote}
\begin{enumerate}
\item An atom can exist only in special states with discrete values of energy. In other words, the electrons moving around an atom can be found only in certain special orbits that Bohr called {\em stationary states}.
\item When an atom makes a {\em transition} from one stationary state to another, it emits or absorbs radiation whose frequency $\nu$ is given by the frequency condition \begin{equation} h\nu =E_1-E_2~, \end{equation} where $E_1$ and $E_2$ are the energies of two stationary states.
\item In the stationary states, the electrons move according to the laws of classical theory. However, only those motions are performed for which the following {\em quantum condition} is fulfilled \begin{equation} \oint p\,dq=nh~, \qquad (n=1,2,3,...;)~, \end{equation} where $p$ is the momentum of the electron and $q$ is its coordinate along the stationary orbit. The integration should be taken along the orbit over one period of the cyclic motion.
\end{enumerate}
\end{quote}
Bohr's theory claimed that those frequency terms, when multiplied by $h$, give distinct energy levels in which the electrons move around the nucleus. This meant that these were the only possible states in which the electrons in the atom could exist.
Let us assume that an electron in a Hydrogen atom is revolving around the nucleus on a circular orbit according to the Newtonian equations of motion. For a circular orbit, the absolute value of the momentum $p$ is constant and then the quantum hypothesis (3) leads to
\begin{equation} p\cdot 2\pi a=nh~, \qquad (n=1,2,3,...) \end{equation}
where $a$ is the radius of the orbit. Thus, $a$ is given by the value of the momentum that can be obtained from the balance between the centrifugal force and the Coulomb force, i.e.,
\begin{equation} \frac{p^2}{ma}=\frac{e^2}{4\pi \epsilon _0a^2} \ . \end{equation}
Combining the two equations, one obtains
\begin{equation} a_n=\frac{\epsilon _0h^2n^2}{\pi m e^2} \qquad (n=1,2,3,...)~. \end{equation}
The latter formula gives the radii of the quantized electron circles in the hydrogen atom. In particular, $a_1\equiv a_B=\frac{\epsilon _0h^2}{\pi m e^2}$, is known as the Bohr radius and is taken as an atomic length unit.
\subsection{From Quantized Circles to Elliptical Orbits}
Wilson \cite{wilson15} and Sommerfeld \cite{sommer16} extended Bohr's ideas to a large variety of atomic systems between 1915 and 1916.
The main idea is that the only classical orbits that are allowed as stationary states are those for which the condition
\begin{equation}\label{ws1} \oint p_k dq_k=n_k h \qquad k=1,...,n~, \end{equation}
with $n_k$ a positive integer, is fulfilled. The weak theoretical point is that in general these integrals can be calculated only for {\em conditionally periodic systems}, because only in such cases a set of coordinates can be found, each of which goes through a cycle as a function of the time, independently of the others. Sometimes the coordinates can be chosen in different ways, in which case the shapes of the quantized orbits depend on the choice of the coordinate system, but the energy values do not.
In particular, when the 3D polar coordinates are employed, Eq.~(6) gives the Sommerfeld ellipses characterized by
\begin{equation}\label{ws2} \oint p_r dr =n_r h~, \quad \oint p_\theta d\theta =n_\theta h~, \quad \oint p_\phi d\phi =n_\phi h~. \end{equation}
Now, since $p_\phi$ is a constant, one gets immediately the `quantization' of the angular momentum of the ellipse along the $z$ axis
\begin{equation}\label{ws3} p_\phi=\frac{mh}{2\pi}~, \quad m=\pm 1, \pm 2, \cdot \cdot \cdot ~. \end{equation}
The quantum number $m$ was called the magnetic quantum number by Sommerfeld who used it as a measure of the direction of the orbit with respect to the magnetic field and thus explaining the Zeeman effect, i.e., the splitting of the spectroscopic lines in a magnetic field. Unless for the value $m=0$ which is considered as unphysical, this `old' $m$ is practically equivalent with Schr\"odinger's $m$, which mathematically is the azimuthal separation constant but has a similar interpretation.
Interestingly, and this is sometimes a source of confusion, the `old' azimuthal quantum number is denoted by $k$ and is the sum of $n_\theta$ and $m$. It gives the shape of the elliptic orbit according to the relationship $\frac{a}{b}=\frac{n}{k}$, where $n=n_r+k$, established by Sommerfeld. Actually, this $k$ is equivalent to Schr\"odinger's orbital number $l$ plus 1, but again their mathematical origin is quite different.
\begin{figure}
\caption{Bohr-Sommerfeld electron orbits for $n=1$, $2$, and $3$, and the allowed values for $l$.}
\label{BS-elipses}
\end{figure}
\begin{figure}
\caption{Spatial quantization of Bohr-Sommerfeld orbits for azimuthal numbers $k$ = 1,2, and 3.}
\label{123}
\end{figure}
\subsection{Experimental Proof of the Existence of Atomic Stationary States}
The existence of discrete atomic energy levels was evidenced for the first time by J. Franck and G. Hertz in 1914 \cite{FH14}. They observed that when an electron collides with an atom (mercury in their case), a transfer of a particular amount of energy occurred. This energy transfer was recorded spectroscopically and confirmed Bohr's hypotheses that atoms can absorb energy only in quantum portions. Even today, the experiment is preferentially done either with mercury or neon tubes. From the spectroscopic evidence, it is known that the excited mercury vapor emits ultraviolet radiation whose wavelength is 2536 \AA, corresponding to a photon energy $h\nu$ equal to 4.89 eV.
The famous Franck-Hertz curves represent the electron current versus the accelerating potential, shown in Fig. 3. The current shows a series of equally spaced maxima (and minima) at the distance of $\sim$ 4.9 V. The first dip corresponds to electrons that lose all their kinetic energy after one inelastic collision with a mercury atom, which is then promoted to its first excited state. The second dip corresponds to those electrons that have the double amount of kinetic energy and loses it through two inelastic collisions with two mercury atoms, and so on. All these excited atoms emit the same radiation at $\sim$ 2536 \AA. But which is the `first' excited state of mercury? It is spectroscopically denoted by $^3P_1$ in Fig.~(\ref{Mercury}). Notice that the other two $P$ states cannot decay to the ground state $^1S_0$ because the dipole emission is forbidden for them and therefore they are termed metastable. More details, such that the observed peak separation depends on the geometry of the tube and the Hg vapor pressure, are explained in the readable paper of Hanne \cite{hanne}.
\begin{figure}
\caption{\small (a) Schematic diagram of the Franck-Hertz experiment, where the tube is filled with a gas of Mercury ; (b) typical plot recorded in
a Franck-Hertz experiment with mercury, showing the periodic maxima.}
\label{FH-exp}
\end{figure}
\begin{figure}
\caption{\small Simplified Mercury's level diagram in the low-energy region in which the so-called hyperfine structure is neglected.
The numbers 6 and 7 are Bohr's `whole numbers' or Schr\"odinger's principal quantum numbers.}
\label{Mercury}
\end{figure}
\section{Stationary States in Wave Mechanics}
\subsection{The Schr\"odinger Equation}
According to L. Pauling and E. Bright Wilson Jr. \cite{paul-bright}, already in the years 1920-1925 a decline of the `old quantum theory' as the Bohr-Sommerfeld atomic theory is historically known and which is based on the `whole number' quantization of cyclic orbits was patent; only very recently there is some revival, especially in the molecular context \cite{revB}. But in 1925, a quantum mechanics based on the matrix calculus was developed by W. Heisenberg, M. Born, and P. Jordan and the best was to come in 1926 when Schr\"odinger in a series of four papers developed the most employed form of quantum mechanics, known as {\em wave mechanics}. The advantage of his theory of atomic motion is that it is based on standard (partial) differential equations, more exactly on the Sturm-Liouville theory of self-adjoint linear differential operators. Schr\"odinger starts the first paper in the 1926 series with the following sentence \cite{S26a}:
\begin{quote} In this paper I wish to consider, first the simplest case of the hydrogen atom, and show that the customary quantum conditions can be replaced by another postulate, in which the notion of `whole numbers', merely as such, is not introduced. \end{quote}
Indeed, he could obtain the basic equation of motion in nonrelativistic quantum mechanics, the so called Schr\"odinger equation for the wavefunctions $\Psi(x,t)$, and provided several analytic applications, among which was the hydrogen atom. The original derivation is based on the variational calculus within the Sturm-Liouville approach and was given eighty years ago. In his first paper of 1926, Schr\"odinger states that the wavefunctions $\Psi$ should be such as to make the `Hamilton integral'
\begin{equation} {\cal J}_{S}[\Psi]= \int \left(\hbar ^2 T(q, \partial \psi/\partial q) +\Psi ^2 V\right)d\tau \ , \end{equation}
stationary subject to the normalizing condition $\int \Psi ^2d\tau =1$ which can be incorporated through the Lagrange multipliers method. The Euler-Lagrange equation of the functional ${\cal J}_{S}[\Psi]$ is the time-dependent Schr\"odinger equation
\begin{equation} i\hbar \frac{\partial \Psi}{\partial t}= H\Psi~. \end{equation}
When the wave function of the time-dependent Schr\"odinger equation is written in the multiplicative form $\Psi(x,t)=\psi (x){\cal F}(t)$ one obtains a complete separation of the space and time behaviors of $\Psi$: on one side, one gets the stationary Schr\"odinger equation for $\psi (x)$,
\begin{equation} -\frac{h^2}{2m}\frac{d^2\psi}{dx^2}+V(x)\psi=E\psi ~, \end{equation}
and on the other side, the simple time-dependent equation for the logderivative of ${\cal F}$
\begin{equation} i\hbar \frac{d\log {\cal F}}{dt}=E \end{equation}
This decoupling of space and time components is possible whenever the potential energy is independent of time.
The space component has the form of a standing-wave equation. Thus, it is correct to regard the time-independent Schr\"odinger equation as a wave equation from the point of view of the spatial phenomenology.
\subsection{The Dynamical Phase}
Furthermore, the time-dependence is multiplicative and reduces to a modulation of the phase of the spatial wave given by
\begin{equation} {\cal F}=e^{-iEt/\hbar}=\cos(Et/\hbar)-i\sin(Et/\hbar)~.
\end{equation}
The phase factor ${\cal F}=e^{-iEt/\hbar}$ is known as the {\em dynamical phase}. In recent times, other parametric phases have been recognized to occur, e.g., the Berry phase. The dynamical phase is a harmonic oscillation with angular frequency $\omega = E/h$ and period $T=h/E$. In other words, a Schr\"odinger wavefunction is flickering from positive through imaginary to negative amplitudes with a frequency proportional to the energy. Although it is a wave of constant energy it is not stationary because its phase is time dependent (periodic). However, a remarkable fact is that the product
$\Psi ^{*}\Psi$, i.e., the modulus $|\Psi|^2$ of the Schr\"odinger constant-energy waves remains constant in time
\begin{equation} \Psi ^{*}\Psi = \psi ^{*}\psi \end{equation}
It is in the sense of their constant modulus that Schr\"odinger constant-energy waves are called stationary states.
\subsection{The Schr\"odinger Wave Stationarity}
Thus, non-relativistic quantum stationarity refers to waves of constant energy and constant modulus, but not of constant phase, which can occur as solutions of Schr\"odinger equation for time-independent potentials. In the Schr\"odinger framework, the dynamical systems are usually assumed to exist in stationary states (or waves of this type). It is worth noting that the preferred terminology is that of states and not of waves. This is due to the fact that being of constant energy the Schr\"odinger stationary waves describe physical systems in configurations (or states) of constant energy which can therefore be naturally associated to the traditional conservative Hamiltonian systems. Moreover, the localization of these waves can be achieved by imposing appropriate boundary conditions.
\subsection{Stationary Schr\"odinger States and Classical Orbits}
In the Schr\"odinger theory, a single stationary state does not correspond to a classical orbit. This is where the Schr\"odinger energy waves differ the most from Bohr's theory which is based on quantized classical cyclic trajectories. To build a wave entity closer to the concept of a classical orbit, one should use superpositions of many stationary states, including their time dependence, i.e., what is known as wave packets. Only monochromatic plane waves of angular frequency $\omega$ corresponds through the basic formula $E=\hbar \omega$ to a well-defined energy $E$ of the `classical' particle but unfortunately there is no relationship between the wavevector $k$ and the momentum $p$ of the corresponding particle since a plane wave means only the propagation at constant (phase) velocity of infinite planes of equal phase. In other words, a criterium for localization is required in order to define a classical particle by means of a wave approach.
In the one-dimensional case, a wave packet is constructed as follows
\begin{equation}\label{wp1} \psi(x,t)=\int _{-\infty}^{+\infty}f(k')e^{i(k' x-\omega ' t)} dk' \ , \end{equation}
with obvious generalization to more dimensions. If $f(k')$ is written in the polar form $F(k')e^{i\alpha}$ and $F$ is chosen with a pronounced peak in a wavenumber region of extension $\Delta k$ around the point $k'=k$, then the wave packet is localized in a spatial region of extension $\Delta x\approx \frac{1}{\Delta k}$ surrounding the ``center of the wavepacket". The latter is equivalent to the concept of material point in classical mechanics and travels uniformly with the group velocity $v_g=\frac{d\omega}{dk}$. This is the velocity that can be identified with the particle velocity $v=\frac{dE}{dp}$ in classical mechanics and which leads to the de Broglie formula $p=\hbar k=\frac{h}{\lambda}$.
\subsection{Stationary States as Sturm-Liouville Eigenfunctions}
The mathematical basis of Schr\"odinger wave mechanics is the Sturm-Liouville (SL) theory of self-adjoint linear differential operators established in the 19th century, more specifically the SL eigenvalue problem, which is to find solutions of a differential equation of the form \begin{equation}\label{sl1} {\cal L}u_n(x)\equiv \frac{d}{dx}\left[p(x)\frac{du_n(x)}{dx}\right]-q(x)u_n(x)=-\mu _n w(x)u_n(x) \end{equation} subject to specified boundary conditions at the beginning and end of some interval $(a,b)$ on the real line, e.g. \begin{equation}\label{sl2} Au(a)+Bu'(a)=0 \end{equation} \begin{equation}\label{sl3} Cu(b)+Du'(b)=0 \end{equation} where $A,B,C,D$ are given constants. The differential operator ${\cal L}$ in (\ref{sl1}) is rather general since any second order linear differential operator can be put in this form after multiplication by a suitable factor. The boundary conditions are also rather general including the well-known Dirichlet and Neumann boundary conditions as particular cases but other possibilities such as periodic boundary conditions
\begin{equation}\label{sl4} u(x)=u(x+b-a) \end{equation}
could be of interest in some cases, especially for angular variables.
The SL eigenvalue problem is an infinite dimensional generalization of the finite dimensional matrix eigenvalue problem
\begin{equation}\label{sl5} Mu=\mu u \end{equation}
with $M$ an $n\times n$ matrix and $u$ an $n$ dimensional column vector. As in the matrix case, the SL eigenvalue problem will have solutions only for certain values of the eigenvalue $\mu _n$. The solutions $u_n$ corresponding to these $\mu _n$ are the eigenvectors. For the finite dimensional case with an $n\times n$ matrix $M$ there can be at most $n$ linearly independent eigenvectors. For the SL case there will in general be an infinite set of eigenvalues $\mu _n$ with corresponding eigenfunctions $u_n(x)$.
The differential equations derived by separating variables are in general of the SL form, the separation constants being the eigenvalue parameters $\mu$. The boundary conditions (\ref{sl2},\ref{sl3}) are determined by the physical application under study.
The solutions $u_n(x),\, \mu _n$ of a SL eigenvalue problem have some general properties of basic importance in wave (quantum) mechanics.
If $u(x)$ and $v(x)$ are arbitrary twice differentiable solutions of a SL operator, then by integrating by parts
\begin{equation}\label{slosc10} \int _a^b dx[v{\cal L}u-u{\cal L}v]=
p\left(v\frac{du}{dx}-u\frac{dv}{dx}\right)\bigg |_a^b~. \end{equation}
An operator ${\cal L}$ which satisfies (\ref{slosc10}) is said to be self-adjoint. Any second order linear differential operator can be put in this self adjoint form by multiplication by a suitable factor.
It is easy to show that for functions $u(x)$, $v(x)$ satisfying boundary conditions of the standard SL form or the periodic boundary condition (\ref{sl4}) the right hand side of (\ref{slosc10}) vanishes. For both of this case we then have
\begin{equation}\label{sl11} \int _a^b dxv{\cal L}u=\int _a^b dxu{\cal L}v~. \end{equation}
Consider now two different eigenfunctions $u_n(x)$, $u_m(x)$ belonging to different eigenvalues $\lambda _n\neq \lambda _m$: \begin{eqnarray}\label{sl12} {\cal L}u_n(x)&=&-\mu _n w (x) u_n(x)~,\\ {\cal L}u_m(x)&=&-\mu _m w (x) u_m(x)~. \end{eqnarray}
Multiplying the first equation by $u_m$ and the second one by $u_n$, integrating and subtracting, we find: \begin{equation}\label{sl13} \int _a^b dx[u_m{\cal L}u_n-u_nLu_m]=-(\mu _n-\mu _m)\int _a^b dx w(x)u_n(x)u_m(x)~. \end{equation}
The left hand side will vanish for either set of boundary conditions we consider here, so for either of these cases, we find the orthogonality condition
\begin{equation}\label{sl14} \int _a^b dx w(x)u_n(x)u_m(x)=0~, \qquad \mu _n \neq \mu _m~. \end{equation}
Two functions $u_n(x)$, $u_m(x)$ satisfying this condition are said to be orthogonal with weight $w(x)$. Moreover, if $w(x)$ is non negative, one can introduce the SL normalization of the $u_n$ as follows:
\begin{equation}\label{sl15} \int _a^b dx w(x)[u_n(x)]^2=1~. \end{equation}
The most important property of the eigenfunctions of a SL problem is that they form a complete set. This means that an arbitrary function $\psi(x)$ can be expanded in an infinite series of the form
\begin{equation}\label{slcompl} \psi(x)=\sum _n a_nu_n(x)~. \end{equation}
The expansion coefficients $a_n$ are determined by multiplying (\ref{slcompl}) by $w(x)u_n(x)$, integrating term by term, and using the orthogonality relation \begin{equation}\label{slcompl-1} a_n=\frac{\int _a^b dx w(x) u_n(x)\psi(x)}{\int _a^bdx w(x)[u_n(x)]^2}~. \end{equation}
According to Courant and Hilbert \cite{ch53}, every piecewise continuous function defined in some domain with a square-integrable first derivative may be expanded in an eigenfunction series which converges absolutely and uniformly in all subdomains free of points of discontinuity; at the points of discontinuity it represents (like the Fourier series) the arithmetic mean of the right and left hand limits. (This theorem does not require that the functions expanded satisfy the boundary conditions.)
\section{The Infinite Square Well: The Stationary States Most Resembling the Standing Waves on a String}
We have already commented that the time-independent Schr\"odinger equation has the form of a standing-wave equation. This is a very instructive analogy and allows to obtain the correct energy values for the case of the infinite square well using only the de Broglie wave concept without even introducing the Schr\"odinger equation \cite{cerny86}.
We remind the treatment of the string standing waves in the case of a finite homogeneous string of total length $L$. Provided that the origin of the coordinate system is placed in its center and the $x$ direction is chosen parallel to it, the space dependent part of its standing waves is given by
\begin{equation}\label{l6} u(x) = A\cos kx +B \sin kx \end{equation}
where $x\in (-L/2, L/2)$ and $A$, $B$, and $k$ are constants. Imposing the usual (Dirichlet) boundary conditions
\begin{equation}\label{l7} u(-L/2)=u(L/2)=0 \ , \end{equation}
equation (\ref{l6}) takes the form
\begin{equation}\label{l8} u_n(x) = \left \{ \begin{array}{ll} A\cos k_nx~, & \mbox{if $n> 0$ and odd}\\ B\sin k_nx ~, & \mbox{if $n> 0$ and even} \end{array} \right. \end{equation}
where \begin{equation}\label{l9} x\in ( -L/2,L/2) \end{equation} and \begin{equation}\label{l10} k_n=\frac{n\pi}{L}~, \qquad (n=1,2,3,...)~. \end{equation}
These functions - the normal modes of the string under consideration - form a complete set with respect to physically reasonable functions defined within the interval (\ref{l9}) and satisfying equation (\ref{l7}), i.e., any such function $U(x)$ can be written as the Fourier series \begin{equation}\label{l11} U(x)=\sum _{n=1}^{\infty}c_n u_n(x)~. \end{equation}
We are ready now to state the analogy, which is based on the following steps.
\begin{itemize}
\item A standing de Broglie wave corresponds to a quantum particle strictly confined to the region $-L/2\leq x\leq L/2$, i.e., in an infinite square-well potential shown in (Fig.~(\ref{fig:Iwell})
\begin{equation}\label{isw1} V(x) = \left \{ \begin{array}{ll}
0~, & \mbox{if $|x|\leq L/2$}\\
-\infty ~, & \mbox{if $|x| > L/2~.$} \end{array} \right. \end{equation}
\begin{figure}
\caption{The infinite square well potential. This case is mathematically the most analogous to the
classical string standing waves.}
\label{fig:Iwell}
\end{figure}
Because of the form of this potential, it is assumed that there is no asymptotic tail of the wave functions in the outside regions to the well, i.e., $\psi (x)=0$ for $|x|>L/2$. In physical terms, that means that the particle is completely localized within the wall and the boundary conditions are of the Dirichlet type.
In the interior region, the Schr\"odinger equation is the simplest possible: \begin{equation}\label{isw0} -\frac{h^2}{2m}\frac{d^2\psi}{dx^2}-E\psi=0 \ , \end{equation}
whose solutions (energy eigenfunctions) are:
\begin{equation}\label{isw2} \phi _n(x) =\left \{ \begin{array}{lll} (2/L)^{1/2}\sin(n\pi x/L)~, & n \, \mbox{even}~, \quad
\mbox{$|x|\leq L/2$} \ , \\
(2/L)^{1/2}\cos(n\pi x/L)~, & n \, \mbox{odd}~, \quad \mbox{$|x|\leq L/2$} \ , \\
0 ~, & \quad \mbox{$|x| > L/2~,$} \end{array} \right. \end{equation}
and the energy eigenvalues are: \begin{equation}\label{isw3} E_n=\frac{(\hbar k_n)^2}{2m}=\frac{\hbar ^2 \pi ^2 n^2}{2mL^2}~. \end{equation}
\item The amplitude of this standing wave in the $n$th stationary state is proportional to $\sin (n\pi z/L)$. This corresponds strictly to the analogy with standing waves on a classical string.
\item The $n$th standing wave is presented as a superposition of two running waves. The wave travelling right, with wavelength $\lambda _n=2L/n$ has de Broglie momentum $\hbar \pi n/L$ and the left travelling wave has opposed de Broglie momentum $-\hbar \pi n/L$. The resulting energy is then quantized and given by
\begin{equation} E_n=\frac{p_n^2}{2m}=\frac{\hbar ^2 \pi ^2 n^2}{2mL^2}~. \end{equation}
\end{itemize}
Because of the extremely strong confinement of the infinite square well it seems that this case is only of academic interest. However, two-dimensional strong confinement of electrons by rings of adatoms (corrals) have been reported in the literature \cite{crommie93}.
\newcommand{\begin{center}}{\begin{center}} \newcommand{\end{center}}{\end{center}} \newcommand{\dagger}{\dagger} \newcommand{a^{\dd}}{a^{\dagger}} \newcommand{\mid}{\mid} \newcommand{\'{\i}}{\'{\i}}
\section{\Large 1D Parabolic Well: The Stationary States of the Quantum Harmonic Oscillator}
\subsection{The Solution of the Schr\"odinger Equation}
The harmonic oscillator (HO) is one of the fundamental paradigms of Physics. Its utility resides in its simplicity which is manifest in many areas from classical physics to quantum electrodynamics and theories of gravitational collapse.\\ It is well known that within classical mechanics many complicated potentials can be well approximated close to their equilibrium positions $a_i$ by HO potentials as follows
\begin{equation} \label{ho1} V(x) \sim \frac{1}{2}V^{\prime\prime}(a_i)(x-a_i)^2~. \end{equation}
For this case, the classical Hamiltonian function of a particle of mass {\em m}, oscillating at the frequency $\omega$ has the following form:
\begin{equation}\label{ho2} H=\frac{p^2}{2m}+\frac{1}{2}m\omega^2x^2 \ , \end{equation}
and the quantum Hamiltonian corresponding to the configurational space is given by
\begin{equation}\label{ho4} \hat{H}=\frac{1}{2m}\left(-i\hbar\frac{d}{dx}\right)^2 +\frac{1}{2}m\omega^2x^2=-\frac{\hbar^2}{2m}\frac{d^2}{dx^2}+\frac{1}{2}m\omega^2x^2~. \end{equation}
\begin{figure}
\caption{The harmonic (parabolic) oscillator potential.}
\label{fig:armonic}
\end{figure}
Since we consider a time-independent potential, the eigenfunctions $\psi_n$ and the eigenvalues $E_n$ are obtained by means of the time-independent Schr\"odinger equation
\begin{equation}\label{ho5} \hat{H}\psi=E\psi~. \end{equation}
For the HO Hamiltonian, the Schr\"odinger equation is
\begin{equation}\label{ho6} \frac{d^2\psi}{dx^2}+\Bigg[\frac{2mE}{\hbar^2} -\frac{m^2\omega^2}{\hbar^2}x^2\Bigg]\psi=0~. \end{equation}
Defining the parameters
\begin{equation}\label{ho7} k^2=\frac{2mE}{\hbar^2}~, \qquad \lambda=\frac{m\omega}{\hbar}~, \end{equation}
\noindent the Schr\"odinger equation becomes
\begin{equation}\label{ho8} \frac{d^2\Psi}{dx^2}+[k^2-\lambda^2x^2]\Psi=0~, \end{equation}
\noindent which is known as Weber's differential equation in mathematics. To solve this equation one makes use of the following variable transformation
\begin{equation}\label{ho9} y=\lambda x^2~. \end{equation}
By changing the independent variable from $x$ to $y$, the differential operators $D_x$ and $D_x^2$ take the form
\begin{equation}\label{ho10} D_x\equiv\frac{d}{dx}=\left(\frac{dy}{dx}\right)D_y ~, \quad D_x^2\equiv \frac{d^2}{dx^2}=\frac{d}{dx}\left(\frac{dy}{dx}\frac{d}{dy}\right) =\left(\frac{d^2y}{dx^2}\right)D_y+\left(\frac{dy}{dx}\right)^2D_y^2~. \end{equation}
By applying these rules to the proposed transformation we obtain the following differential equation in the $y$ variable
\begin{equation}\label{ho11} y\frac{d^2\psi}{dy^2}+\frac{1}{2}\frac{d\psi}{dy}+\left[\frac{k^2}{4\lambda} -\frac{1}{4}y\right]\psi=0~, \end{equation}
\noindent and, by defining:
\begin{equation}\label{ho12} \kappa=\frac{k^2}{2\lambda} =\frac{E}{\hbar\omega}~, \end{equation}
we get after dividing by $y$ (i.e., $y\neq 0$)
\begin{equation}\label{ho14} \frac{d^2\psi}{dy^2}+\frac{1}{2y}\frac{d\psi}{dy} +\left[\frac{\kappa}{2y}-\frac{1}{4}\right]\psi=0~. \end{equation}
Let us try to solve this equation by first doing its asymptotic analysis in the limit $y\rightarrow\infty$ in which the equation behaves as follows
\begin{equation}\label{ho15} \frac{d^2\psi_{\infty}}{dy^2}-\frac{1}{4}\psi_{\infty}=0~. \end{equation}
This equation has as solution
\begin{equation}\label{ho16} \psi_{\infty}(y)=A\exp\left(\frac{y}{2}\right)+B\exp\left(-\frac{y}{2}\right)~. \end{equation}
The first term diverges in the limit $y\rightarrow\infty$. Thus we take $A=0$, and keep only the attenuated exponential. We can now suggest that $\psi$ has the following form
\begin{equation}\label{ho17} \psi(y)=\exp\left(-\frac{y}{2}\right)\varphi(y)~. \end{equation}
Plugging it in the differential equation for $y$ ( Eq.~(\ref{ho14})) one gets:
\begin{equation}\label{ho18} y\frac{d^2\varphi}{dy^2} +\left(\frac{1}{2}-y\right)\frac{d\varphi}{dy}+\left(\frac{\kappa}{2}-\frac{1}{4}\right)\varphi=0~. \end{equation}
The latter equation is of the confluent (Kummer) hypergeometric form
\begin{equation}\label{ho19} z\frac{d^2f}{dz^2}+(c-z)\frac{df}{dz}-af=0~, \end{equation}
whose general solution is \begin{equation}\label{ho20} f(z)=A \hspace{.2cm} _1F_1(a,c;z)+ B \hspace{.2cm} z^{1-c} \hspace{.1cm} _1F_1(a-c+1,2-c;z)~, \end{equation}
where the confluent hypergeometric function is defined as follows
\begin{equation}\label{ho21} _1F_1(a,c;z)=\sum_{n=0}^{\infty}\frac{(a)_n z^n}{(c)_n n!}~. \end{equation} By direct comparison of Eqs.~(\ref{ho19}) and (\ref{ho18}), one can see that the general solution of the latter one is
\begin{equation}\label{ho22} \varphi(y)=A\hspace{.2cm} _1F_1\left(a,\frac{1}{2};y\right)+ B \hspace{.2cm} y^{\frac{1}{2}} \hspace{.2cm} _1F_1\left(a+\frac{1}{2},\frac{3}{2};y\right)~, \end{equation}
where
\begin{equation}\label{ho23} a=-\left(\frac{\kappa}{2}-\frac{1}{4}\right)~. \end{equation}
If we keep these solutions in their present form, the normalization condition is not satisfied by the wavefunction. Indeed, because the
$|y|\rightarrow \infty$ asymptotic behaviour of the confluent hypergeometric function is $_1F_1(a,c;y)\rightarrow \frac{\Gamma(c)}{\Gamma(c-a)}e^{-ia\pi}y^{-a} +\frac{\Gamma(c)}{\Gamma(a)}e^{y}y^{a-c}$, it follows from the dominant exponential behavior that:
\begin{equation}\label{ho24} \psi(y)=e^{(-\frac{y}{2})}\varphi(y)\rightarrow \hspace{.3cm}{\rm const.} \hspace{.2cm} e^{(\frac{y}{2})}y^{a-\frac{1}{2}}~. \end{equation}
This leads to a divergence in the normalization integral, which physically is not acceptable. What one does in this case is to impose the termination condition for the series. That is, the series is cut to a finite number of $n$ terms and therefore turns into a polynomial of order $n$. The truncation condition of the confluent hypergeometric series $_1F_1(a,c;z)$ is $a=-n$, where $n$ is a nonnegative integer (i.e., zero is included). \\ We thus notice that asking for a finite normalization constant, (as already known, a necessary condition for the physical interpretation in terms of probabilities), leads us to the truncation of the series, which simultaneously generates the quantization of energy.\\ In the following we consider the two possible cases:
$1)\hspace{.4cm} a=-n \hspace{.3cm}$ and $ B=0$
\begin{equation}\label{ho25}
\frac{\kappa}{2}-\frac{1}{4}=n~. \end{equation}
The eigenfunctions are given by
\begin{equation}\label{ho26} \psi_n(x)={\cal N}_{n} \exp\left(\frac{-\lambda x^2}{2}\right) \hspace{.1cm} _1F_1\left(-n,\frac{1}{2};\lambda x^2\right) \end{equation}
and the energy is:
\begin{equation}\label{ho27} E_n=\hbar\omega\left(2n+\frac{1}{2}\right)~. \end{equation}
$2)\hspace{.4cm} a+\frac{1}{2}=-n \hspace{.3cm}$ and $A=0$
\begin{equation}\label{ho28} \frac{\kappa}{2}-\frac{1}{4}=n+\frac{1}{2}~. \end{equation}
The eigenfunctions are now
\begin{equation}\label{ho29} \psi_n(x)={\cal N}_{n}\exp\left(-\frac{\lambda x^2}{2}\right) \hspace{.2cm}x \hspace{.2cm}_1F_1\left(-n,\frac{3}{2};\lambda x^2\right)~, \end{equation}
whereas the stationary energies are
\begin{equation}\label{ho30} E_n=\hbar\omega\left[(2n+1)+\frac{1}{2}\right]~. \end{equation} The polynomials obtained by this truncation of the confluent hypergeometric series are called Hermite polynomials and in hypergeometric notation they are
\begin{eqnarray}\label{ho31} H_{2n}(\eta)&=&(-1)^n \frac{(2n)!}{n!} \hspace{.2cm} _1F_1\left(-n,\frac{1}{2};\eta^2\right) \ , \\ H_{2n-1}(\eta)&=&(-1)^n \frac{2(2n+1)!}{n!} \hspace{.2cm}\eta \hspace{.2cm} _1F_1\left(-n,\frac{3}{2};\eta^2\right)~. \end{eqnarray}
We can now combine the obtained results (because some of them give us the even cases and the others the odd ones) in a single expression for the oscillator eigenvalues and eigenfunctions
\begin{eqnarray}\label{ho33} \psi_n (x)&=&{\cal N}_{n} \exp\left( -\frac{\lambda x^2}{2}\right) H_n (\sqrt{\lambda}x)\\ E_n &=&\left(n+\frac{1}{2}\right)\hbar\omega \hspace{1cm}n=0,1,2~\ldots \end{eqnarray}
The HO energy spectrum is equidistant, i.e., there is the same energy difference $\hbar \omega$ between any consecutive neighbor levels. Another remark refers to the minimum value of the energy of the oscillator; somewhat surprisingly it is not zero. This is considered by many people to be a pure quantum result because it is zero when $\hbar\rightarrow 0$. $E_0=\frac{1}{2}\hbar \omega$ is known as the zero point energy and the fact that it is nonzero is the main characteristic of all confining potentials.\\
\subsection{The Normalization Constant}
The normalization constant is usually calculated in the following way. The Hermite generating function $e^{\lambda (-t^2+2tx)}$ is multiplied by itself and then by $e^{-\lambda x^2}$:
\begin{equation}\label{normH1} e^{-\lambda x^2}e^{\lambda(-s^2+2sx)}e^{\lambda(-t^2+2tx)}=\sum _{m,n=0}^{\infty}e^{-\lambda x^2}H_m(\sqrt{\lambda}x)H_n(\sqrt{\lambda}x)\frac{\lambda ^{\frac{m+n}{2}}s^mt^n}{m!n!}~. \end{equation} Integrating over $x'=\sqrt{\lambda}x$ on the whole real line, the cross terms of the double sum drop out because of the orthogonality property
\begin{eqnarray}\label{normH2} \sum _{n=0}^{\infty}\frac{\lambda ^n(st)^n}{(n!)^2}\int _{-\infty}^{\infty}e^{-x'^2}[H_n(x')]^2dx'&=&\int _{-\infty}^{\infty}e^{-x'^2-s'^2+2s'x'-t'^2+2t'x'}dx'=\nonumber\\ & & \\ =\int _{-\infty}^{\infty}e^{-(x'-s'-t')^2}e^{2s't'}dx'&=&\pi ^{1/2}e^{2st}=\pi ^{1/2}\sum _{n=0}^{\infty}\frac{2^n\lambda ^n(st)^n}{n!}\nonumber \end{eqnarray} where the properties of the Euler gamma function have been used \begin{equation}\label{defGamma} \Gamma (z)=2\int _0^{\infty}e^{-t^2}t^{2z-1}dt~, \qquad {\rm Re}(z) >0 \end{equation} as well as the particular case $z=1/2$ when $\Gamma (1/2)=\sqrt{\pi}$.
By equating coefficients of like powers of $st$ in (\ref{normH2}), we obtain \begin{equation}\label{normHfinal} \int _{-\infty}^{\infty}e^{-x'^2}[H_n(x')]^2dx'=2^n\pi ^{1/2}n!~. \end{equation}
This leads to \begin{equation}\label{ho35} {\cal N}_{n} = \Bigg[ \sqrt{\frac{\lambda}{\pi}}\frac{1}{2^n n!}\Bigg]^{\frac{1}{2}}~. \end{equation}
\subsection{Final Formulas for the HO Stationary States}
Thus, one gets the following normalized eigenfunctions (stationary states) of the one-dimensional harmonic oscillator operator
\begin{equation} \label{ho36} \psi_n (x)= \Bigg[ \sqrt{\frac{\lambda}{\pi}}\frac{1}{2^n n!}\Bigg]^{\frac{1}{2}} \hspace{.2cm} \exp \left(\frac{-\lambda x^2}{2}\right) \hspace{.2cm} H_n( \sqrt{\lambda} x)~. \end{equation}
If the dynamical phase factor ${\cal F}$ is included, the harmonic oscillator eigenfunctions takes the following final form
\begin{equation} \label{ho37} \psi_n (x,t)= \Bigg[ \sqrt{\frac{\lambda}{\pi}}\frac{1}{2^n n!}\Bigg]^{\frac{1}{2}} \hspace{.2cm} \exp \left(-i\left(n+\frac{1}{2}\right)\omega t-\frac{\lambda x^2}{2}\right) \hspace{.2cm} H_n( \sqrt{\lambda} x)~. \end{equation}
\subsection{The Algebraic Approach: Creation and Annihilation Operators $\hat{a}^{\dagger}$ and $\hat{a}$}
There is another approach to deal with the HO besides the conventional one of solving the Schr\"odinger equation. It is the algebraic method, also known as the method of creation and annihilation (ladder) operators. This is a very efficient procedure, which can be successfully applied to many quantum-mechanical problems, especially when dealing with discrete spectra.\\ Let us define two nonhermitic operators $a$ and $a^{\dagger}$ :
\begin{equation}\label{ho37} a=\sqrt{\frac{m\omega}{2\hbar}}\left(x+\frac{ip}{m\omega}\right) \ , \end{equation}
\begin{equation}\label{ho38} a^{\dagger}=\sqrt{\frac{m\omega}{2\hbar}}\left(x-\frac{ip}{m\omega}\right)~. \end{equation}
These operators are known as annihilation operator and creation operator, respectively (the reason of this terminology will be seen in the following.\\ Let us calculate the commutator of these operators
\begin{equation}\label{ho39} [a,a^{\dagger}]=\frac{m\omega}{2\hbar}\left[x +\frac{ip}{m\omega},x-\frac{ip}{m\omega}\right]=\frac{1}{2\hbar}(-i[x,p]+i[p,x])=1~, \end{equation} where we have used the commutator $[x,p]=i\hbar$. Therefore the annihilation and creation operators do not commute, since we have $[a,a^{\dagger}]=1$. Let us also introduce the very important number operator $\hat{N}$:
\begin{equation}\label{ho42} \hat{N}=a^{\dd} a~. \end{equation}
This operator is hermitic as one can readily prove using $(AB)^{\dagger}=B^{\dagger}A^{\dagger}$:
\begin{equation}\label{ho43} \hat{N}^{\dagger}=(a^{\dd} a)^{\dagger}=a^{\dd} (a^{\dd})^{\dagger}=a^{\dd} a=\hat{N}~. \end{equation}
Considering now that
\begin{equation}\label{ho44} a^{\dd} a =\frac{m\omega}{2\hbar}\left(x^2+\frac{p^2}{m^2\omega^2}\right)+\frac{i}{2\hbar}[x,p]=\frac{\hat{H}}{\hbar\omega}-\frac{1}{2} \ , \end{equation}
\noindent we notice that the Hamiltonian can be written in a quite simple form as a linear function of the number operator
\begin{equation}\label{ho45} \hat{H}=\hbar\omega\left(\hat{N}+\frac{1}{2}\right)~. \end{equation}
The number operator bears this name because its eigenvalues are precisely the subindexes of the eigenfunctions on which it acts
\begin{equation}\label{ho46}
\hat{N}|n\rangle=n|n\rangle~, \end{equation}
\noindent where we have used Dirac's ket notation $\psi _n =
|n\rangle$.
Applying the number-form of the HO Hamiltonian in (\ref{ho45}) to this ket, one gets
\begin{equation}\label{ho48}
\hat{H}|n\rangle=\hbar\omega\left(n+\frac{1}{2}\right)|n\rangle~, \end{equation}
which directly shows that the energy eigenvalues are given by
\begin{equation}\label{ho49} E_n=\hbar\omega\left(n+\frac{1}{2}\right)~. \end{equation}
Thus, this basic result is obtained through purely algebraic means.\\
It is possible to consider the ket $a^{\dd} |n\rangle$ as an eigenket of that number operator for which the eigenvalue is raised by one unit. In physical terms, this means that an energy quanta has been produced by the action of $a^{\dd}$ on the ket $|n\rangle$. This already explains the name of creation operator. Similar comments with corresponding conclusion can be inferred for the operator $a$ explaining the name of annihilation operator (an energy quanta is eliminated from the system when this operator is put into action).\\
Consequently, we have \begin{equation}\label{ho62}
a^{\dd} |n\rangle=\sqrt{n+1}| n+1\rangle~. \end{equation}
For the annihilation operator, following the same procedure one can get the following relation
\begin{equation}\label{ho63}
a|n\rangle=\sqrt{n}| n-1\rangle~. \end{equation}
Let us show now that the values of $n$ should be nonnegative integers. For this, we employ the positivity requirement for the norm of the state vector $a| n\rangle$. The latter condition tells us that the inner product of the vector with its adjoint $
(a|n\rangle)^\dagger$ (= $\langle n| a^{\dd}$) should always be nonnegative
\begin{equation}\label{ho64}
( \langle n| a^{\dd})\cdot(a| n\rangle)\geq 0~. \end{equation}
This relationship is in fact the expectation value of the number operator
\begin{equation}\label{ho65}
\langle n| a^{\dd} a| n\rangle=\langle n|\hat{N}| n\rangle=\langle n|a^{\dd}
\sqrt{n}|n-1\rangle= \langle n|\sqrt{n}\sqrt{n}|n\rangle=n \geq 0~. \end{equation} Thus, $n$ cannot be negative. It should be a positive integer since, if that would not be the case, by applying iteratively the annihilation operator a sufficient number of times we would be led to imaginary and negative eigenvalues, which would be a contradiction to the positivity of the inner product.\\
It is possible to express the state $|n \rangle$ directly as a function of the ground state $| 0\rangle$ using the $n$th power of the creation operator as follows:
\begin{eqnarray}
|n\rangle=\left[ \frac{ (a^{\dd})^n}{\sqrt{n!}}\right]| 0\rangle ~, \end{eqnarray} which can be obtained by iterations.
One can also apply this method to get the eigenfunctions in the configuration space. To achieve this, we start with the ground state
\begin{equation}\label{ho66}
a|0\rangle=0~. \end{equation}
In the $x$-representation, we have
\begin{equation}\label{ho67} \hat{ a} \psi_0(x)=\sqrt{\frac{m\omega}{2\hbar}} \left(x+\frac{ip}{m\omega}\right) \psi_0(x)=0~. \end{equation}
Recalling the form of the momentum operator in this representation, we can obtain a differential equation for the wavefunction of the ground state. Moreover, introducing the oscillator length $x_0=\sqrt{\frac{\hbar}{m\omega}}=\frac{1}{\sqrt{\lambda}}$, we get
\begin{equation}\label{ho68} \left(x+x_0^2\frac{d}{dx}\right)\psi_0=0~. \end{equation}
This equation can be readily solved and the normalization to unity of the full line integral of the squared modulus of the solution leads to the physical wavefunction of the HO ground state
\begin{equation}\label{ho69} \psi_0(x)=\left(\frac{1}{\sqrt{ \sqrt{\pi}x_0}}\right)e^{ -\frac{1}{2}(\frac{x}{x_0})^2}~. \end{equation}
The rest of the eigenfunctions describing the HO excited states, can be obtained by employing iteratively the creation operator. The procedure is the following \begin{eqnarray} \psi_1&=&a^{\dd} \psi_0 =\left(\frac{1}{\sqrt{2}x_0}\right)\left(x-x_0^2\frac{d}{dx}\right)\psi_0 \ , \\ \psi_2&=&\frac{1}{\sqrt{2}}(a^{\dd})^2\psi_0=\frac{1}{\sqrt{2!}}\left(\frac{1} {\sqrt{2}x_0}\right)^2\left(x-x_0^2\frac{d}{dx}\right)^2\psi_0~. \end{eqnarray} By mathematical induction, one can show that
\begin{equation}\label{ho70} \psi_n(x)=\frac{1}{\sqrt{ \sqrt{\pi}2^nn!}}\hspace{.2cm} \frac{1}{x_0^{n+\frac{1}{2}}} \hspace{.2cm}\left(x-x_0^2\frac{d}{dx}\right)^n \hspace{.2cm}e^{-\frac{1}{2}(\frac{x}{x_0})^2}~. \end{equation}
\subsection{HO Spectrum Obtained from Wilson-Sommerfeld Quantization Condition}
In the classical phase space, the equation $H=E$ for the harmonic oscillator when divided by $E$, i.e., \begin{equation}\label{ws1} \frac{p_x^2}{2mE}+\frac{m\omega _0^2x^2}{2E}=1 \ , \end{equation} turns into the equation for an ellipse \begin{equation}\label{ws2} \frac{x^2}{a^2}+\frac{p_x^2}{b^2}=1 \ , \end{equation} where $a=\frac{1}{\omega _0}\sqrt{\frac{2E}{m}}$ and $b=\sqrt{2mE}$. Therefore, applying the Bohr-Sommerfeld quantization rule for this case \begin{equation}\label{ws3} J=\oint p_xdx=\pi a b =\frac{2\pi E}{\omega _0}=2\pi \hbar n~, \end{equation} one obtains immediately the spectrum \begin{equation}\label{ws4} E_n=n\hbar\omega _0 \ , \end{equation} which is the quantum HO spectrum up to the zero-point energy.
\section{The 3D Coulomb Well: The Stationary States of the Hydrogen Atom}
The case of the Hydrogen atom corresponds in wave mechanics to an effective potential well that is the sum of the Coulomb well and the quantum centrifugal barrier as shown in Fig.~(\ref{fig:PCoul}). This result comes out from the technique of the separation of variables that is to be considered for any differential equation in more than one dimension. A very good introduction to this technique can be found in the textbook of Arfken and Weber \cite{aw6}. In general, for $d$ variables there are $d-1$ separation constants.
In spherical coordinates, the Schr\"odinger equation $(\nabla ^2_r+V(r))\psi({\bf r})=E\psi ({\bf r})$ reads
\begin{equation} \frac{1}{r^{2}} \frac{\partial}{\partial r}\left(r^{2} \frac{\partial \psi}{\partial r} \right) + \frac{1}{r^{2} \sin\theta} \frac{\partial}{\partial \theta} \left(\sin\theta \frac{\partial \psi}{\partial \theta}\right) + \frac{1}{r^{2}\sin^{2}\theta} \frac{\partial^{2} \psi}{\partial \phi^{2}} + \frac{2m}{\hbar^{2}}(E - u)\psi = 0 \ , \end{equation}
that can be also written in the form
\begin{equation} \sin^{2}\theta \frac{\partial}{\partial r}\left(r^{2} \frac{\partial \psi}{\partial r}\right) + \sin\theta \frac{\partial}{\partial \theta}\left(\sin\theta \frac{\partial \psi}{\partial \theta}\right) + \frac{\partial^{2} \psi}{\partial \phi^{2}} + \frac{2mr^{2}\sin^{2}\theta}{\hbar^{2}} \left(\frac{e^{2}}{4\pi \epsilon_{0}r} + E\right)\psi = 0~. \end{equation}
This equation is a partial differential equation for the electron wavefunction $\psi(r,\theta,\phi)$ `within' the atomic hydrogen. Together with the various conditions that the wavefunction $\psi(r,\theta,\phi)$ should fulfill [for example, $\psi(r,\theta,\phi)$ should have a unique value at any spatial point ($r,\theta,\phi$)], this equation specifies in a complete manner the stationary behavior of the hydrogen electron.\\
\begin{figure}
\caption{The effective potential well in the case of the hydrogen atom consisting of the electrostatic potential plus a quantized centrifugal barrier (see text).}
\label{fig:PCoul}
\end{figure}
\subsection{The Separation of Variables in Spherical Coordinates}
The real usefulness of writing the hydrogen Schr\"odinger equation in spherical coordinates consists in the easy way of achieving the separation procedure in three independent one-dimensional equations. The separation procedure is to seek the solutions for which the wavefunction $\psi(r,\theta,\phi)$ has the form of a product of three functions, each of one of the three spherical variables, namely $R(r)$ depending only on $r$, $\Theta(\theta)$ depending only on $\theta$, and $\Phi(\phi)$ that depends only on $\phi$. This is quite similar to the separation of the Laplace equation. Thus
\begin{equation} \psi(r,\theta,\phi) = R(r)\Theta(\theta)\Phi(\phi)~. \end{equation}
The $R(r)$ function describes the differential variation of the electron wavefunction $\psi$ along the vector radius coming out from the nucleus, with $\theta$ and $\phi$ assumed to be constant. The differential variation of $\psi$ with the polar angle $\theta$ along a meridian of an arbitrary sphere centered in the nucleus is described only by the function $\Theta(\theta)$ for constant $r$ and $\phi$. Finally, the function $\Phi(\phi)$ describes how $\psi$ varies with the azimuthal angle $\phi$ along a parallel of an arbitrary sphere centered at the nucleus, under the conditions that $r$ and $\theta$ are kept constant.
Using $\psi=R\Theta\Phi$, one can see that
\begin{equation} \frac{\partial \psi}{\partial r} = \Theta \Phi \frac{d R}{d r}~,\qquad \frac{\partial \psi}{\partial \theta} = R\Phi \frac{d \Theta}{d \theta}~,\qquad \frac{\partial \psi}{\partial \phi} = R\Theta \frac{d \Phi}{d\phi }~. \end{equation}
Then, one can obtain the following equations for the three factoring functions:
\begin{equation} \label{91} \frac{d^{2}\Phi}{d\phi^{2}} + m_{l}^{2}\Phi = 0~, \end{equation} \begin{equation} \label{92} \frac{1}{\sin\theta}\frac{d}{d\theta}\left(\sin\theta \frac{d\Theta}{d\theta}\right) + \left[l(l+1)-\frac{m_{l}^{2}}{\sin^{2}\theta}\right]\Theta = 0~, \end{equation} \begin{equation} \label{93} \frac{1}{r^{2}}\frac{d}{dr}\left(r^{2}\frac{dR}{dr}\right) + \left[\frac{2m}{\hbar^{2}}\left(\frac{e^{2}}{4\pi \epsilon_{0}r} + E\right) - \frac{l(l+1)}{r^{2}}\right]R = 0~. \end{equation}
Each of these equations is an ordinary differential equation for a function of a single variable. In this way, the Schr\"odinger equation for the hydrogen electron, which initially was a partial differential equation for a function $\psi$ of three variables, gets a simple form of three 1D ordinary differential equations for unknown functions of one variable. The reason why the separation constants have been chosen as $-m_l^2$ and $-l(l+1)$ will become clear in the following subsections.
\subsection{The Angular Separation Constants as Quantum Numbers}
\subsubsection{The Azimuthal Solution and the Magnetic Quantum Number}
The Eq.~(\ref{91}) is easily solved leading to the following solution
\begin{equation} \Phi(\phi) = {\cal N}_{\Phi}e^{im_{l}\phi}~, \end{equation}
where ${\cal N}_{\Phi}$ is the integration constant that will be used as a normalization constant for the azimuthal part. One of the conditions that any wavefunctions should fulfill is to have a unique value for any point in space. This applies to $\Phi$ as a component of the full wavefunction $\psi$. One should notice that\ $\phi$ and $\phi + 2\pi$ must be identical in the same meridional plane. Therefore, one should have $\Phi(\phi)= \Phi(\phi + 2\pi)$, i.e., ${\cal N}_{\Phi}e^{im_{l}\phi} = {\cal N}_{\Phi}e^{im_{l}(\phi + 2\pi)}$. This can be fulfilled only if $m_{l}$ is zero or a positive or negative integer $(\pm 1, \pm 2, \pm 3,...)$. The number $m_{l}$ is known as the magnetic quantum number of the atomic electron and is related to the direction of the projection of the orbital momentum $L_{z}$. It comes into play whenever the effects of axial magnetic fields on the electron may show up. There is also a deep connection between $m_{l}$ and the orbital quantum number $l$, which in turn determines the modulus of the orbital momentum of the electron.
The solution for $\Phi$ should also fulfill the normalization condition when integrating over a full period of the azimuthal angle,
\begin{equation} \int_{0}^{2\pi} \mid \Phi \mid^{2}d\phi = 1 \end{equation} and substituting $\Phi$, one gets \begin{equation} \int_{0}^{2\pi} {\cal N}_{\Phi}^{2}d\phi = 1~. \end{equation} It follows that ${\cal N}_{\Phi}=1/\sqrt{2\pi}$, and therefore the normalized $\Phi$ is \begin{equation} \Phi(\phi) = \frac{1}{\sqrt{2\pi}}e^{im_{l}\phi}~. \end{equation}
\subsubsection{The Polar Solution and the Orbital Quantum Number}
The solution of the $\Theta(\theta)$ equation is more complicated since it contains two separation constants which can be proved to be integer numbers. Things get easier if one reminds that the same Eq.~(\ref{92}) occurs also when the Helmholtz equation for the spatial amplitude profiles of the electromagnetic normal modes is separated in spherical coordinates. From this case we actually know that this equation is the associated Legendre equation for which the polynomial solutions are the associated Legendre polynomials
\begin{equation} \label{theta1} P_{l}^{m_{l}}(x) = (-1)^{m_{l}}(1-x^{2})^{m_{l}/2} \frac{d^{m_{l}}}{dx^{m_{l}}}P_{l}(x) = (-1)^{m_{l}}\frac{(1-x^{2})^{m_{l}/2}}{2^{l}l!}\frac{d^{m_{l} + l}}{dx^{{m_{l} + l}}}(x^{2} - 1)^{l}~. \end{equation}
The function $\Theta(\theta)$ are a normalized form, $\Theta(\theta)={\cal N}_{\Theta}P_{l}^{m_l}$, of the associated Legendre polynomials
\begin{equation} \label{theta2} \Theta(\theta) = \sqrt{\frac{2l+1}{2}\frac{(l-m_{l})!}{(l+m_{l})!}} P_{l}^{m_{l}}(cos\theta)~. \end{equation}
For the purposes here, the most important property of these functions is that they exist only when the constant $l$ is an integer number greater or at least equal to $|m_{l}|$, which is the absolute value of $m_{l}$. This condition can be written in the form of the set of values available for $m_{l}$
\begin{equation} \label{theta3} m_{l} = 0,\pm 1, \pm 2,...,\pm l~. \end{equation}
The condition that $l$ should be a positive integer can be seen from the fact that for noninteger values, the solution of Eq.~(\ref{92}) diverges for $\cos \theta =\pm 1$, while for physical reasons we require finite solutions in these limits. The other condition $l\geq
|m_l|$ can be obtained from examining Eq.~({\ref{theta1}), where one can see a derivative of order $m_l+l$ applied to a polynomial of order $2l$. Thus $m_l+l$ cannot be greater than $2l$. On the other hand, the derivative of negative order are not defined and that of zero order is interpreted as the unit operator. This leads to $m_l\geq -l$.
The interpretation of the orbital number $l$ does have some difficulties. Let us examine the equation corresponding to the radial wavefunction $R(r)$. This equation rules only the radial motion of the electron, i.e., with the relative distance with respect to the nucleus along some guiding ellipses. However, the total energy of the electron $E$ is also present. This energy includes the kinetic electron energy in its orbital motion that is not related to the radial motion. This contradiction can be eliminated using the following argument. The kinetic energy $T$ has two parts: a pure radial one $T_{radial}$ and $T_{orbital}$, which is due to the closed orbital motion. The potential energy $V$ of the electron is the attractive electrostatic energy. Therefore, the electron total energy is
\begin{equation} E = T_{radial} + T_{orbital} - \frac{e^{2}}{4\pi \epsilon_{0}r}~. \end{equation} Substituting this expression of $E$ in Eq.~(\ref{93}) we get after some regrouping of the terms
\begin{equation} \frac{1}{r^{2}}\frac{d}{dr}\left(r^{2}\frac{dR}{dr}\right) + \frac{2m}{\hbar^{2}}\left[T_{radial} + T_{orbital} - \frac{\hbar^{2}l(l+1)}{2mr^{2}}\right]R=0~. \end{equation}
If the last two terms in parentheses compensate each other, we get a differential equation for the pure radial motion. Thus, we impose the condition \begin{equation} T_{orbital} = \frac{\hbar^{2}l(l+1)}{2mr^{2}}~. \end{equation}
However, the orbital kinetic energy of the electron is $T_{orbital} = \frac{1}{2}mv_{orbital}^{2}$ and since the orbital momentum of the electron is $L = mv_{orbital}r$, we can express the orbital kinetic energy in the form
\begin{equation} T_{orbital} = \frac{L^{2}}{2mr^{2}}~. \end{equation}
Therefore, we have \begin{equation} \frac{L^{2}}{2mr^{2}} = \frac{\hbar^{2}l(l+1)}{2mr^{2}} \ , \end{equation}
and consequently
\begin{equation}\label{42} L = \sqrt{l(l+1)}\hbar~. \end{equation}
The interpretation of this result is that since the orbital quantum number $l$ is constrained to take the values $l=0,1,2,...,(n-1)$, the electron can only have orbital momenta specified by means of Eq.~({\ref{42}). As in the case of the total energy $E$, the angular momentum is conserved and gets quantized. Its natural unit in quantum mechanics is $\hbar=h/2\pi=1.054 \times 10^{-34}$ J.s.
In the macroscopic planetary motion (putting aside the many-body features), the orbital quantum number is so large that any direct experimental detection of the quantum orbital momentum is impossible. For example, an electron with $l= 2$ has an angular momentum $L=2.6 \times 10^{-34}$ J.s., whereas the terrestrial angular momentum is $2.7 \times 10^{40}$ J.s.!
A common notation for the angular momentum states is by means of the letter $s$ for $l=0$, $p$ for $l=1$, $d$ for $l=2$, and so on. This alphabetic code comes from the empirical spectroscopic classification in terms of the so-called series, which was in use before the advent of wave mechanics.
On the other hand, for the interpretation of the magnetic quantum number, we must take into account that the orbital momentum is a vector operator and therefore one has to specify its direction, sense, and modulus. $L$, being a vector, is perpendicular on the plane of rotation. The geometric rules of the vectorial products still hold, in particular the rule of the right hand: its direction and sense are given by the right thumb whenever the other four fingers point at the direction of rotation.
\subsubsection{The Space Quantization}
We have already seen the spatial quantization of the Bohr-Sommerfeld electron trajectories. But what significance can be associated to a direction and sense in the limited volume of the atomic hydrogen in Schr\"odinger wave mechanics ? The answer may be quick if we think that the rotating electron is nothing but a one-electron loop current that considered as a magnetic dipole has a corresponding magnetic field. Consequently, an atomic electron will always interact with an applied magnetic field ${\bf H}$. The magnetic quantum number $m_{l}$ specifies the spatial direction of $L$, which is determined by the component of $L$ along the direction of the external magnetic field. This effect is commonly known as the quantization of the space in a magnetic field.
If we choose the direction of the magnetic field as the $z$ axis, the component of $L$ along this direction is
\begin{equation} L_{z} = m_{l}\hbar~. \end{equation}
The possible values of $m_{l}$ for a given value of $l$, go from $+l$ to $-l$, passing through zero, so that there are $2l+1$ possible orientations of the angular momentum $L$ in a magnetic field. When $l=0$, $L_{z}$ can be only zero; when$l=1$, $L_{z}$ can be $\hbar$, 0, or $-\hbar$; when $l=2$, $L_{z}$ takes only one of the values $2\hbar$, $\hbar$, 0, $-\hbar$, or $-2\hbar$, and so forth. It is worth mentioning that $L$ cannot be put exactly parallel or anti-parallel to ${\bf H}$, because $L_{z}$ is always smaller than the modulus $\sqrt{l(l+1)}\hbar$ of the total orbital momentum.
One should consider the atom/electron characterized by a given $m_{l}$ as having the orientation of its angular momentum $L$ determined relative to the external applied magnetic field.
In the absence of the external magnetic field, the direction of the $z$ axis is fully arbitrary. Therefore, the component of $L$ in any arbitrary chosen direction is $m_{l}\hbar$; the external magnetic field offers a preferred reference direction from the experimental viewpoint.
Why only the component $L_{z}$ is quantized ? The answer is related to the fact that $L$ cannot be put along a direction in an arbitrary way. There is a special precessional motion in which its `vectorial arrow' moves always along a cone centered on the quantization axis such that its projection $L_{z}$ is $m_{l}\hbar$. The reason why this quantum precession occurs is different from the macroscopic planetary motion as it is due to the uncertainty principle. If $L$ would be fixed in space, in such a way that $L_{x}$, $L_{y}$ and $L_{z}$ would have well-defined values, the electron would have to be confined to a well-defined plane. For example, if $L$ would be fixed along the $z$ direction, the electron tends to maintain itself in the plane $xy$.
This can only occur in the case in which the component $p_{z}$ of the electron momentum is `infinitely' uncertain. This is however impossible if the electron is part of the hydrogen atom. But since in reality just the component $L_{z}$ of $L$ together with $L^2$
have well defined values and $|L| > |L_{z}|$, the electron is not constrained to a single plane. If this would be the case, an uncertainty would exist in the coordinate $z$ of the electron. The direction of $L$ changes continuously so that the mean values of $L_{x}$ and $L_{y}$ are zero, although $L_{z}$ keeps all the time its value $m_{l}\hbar$. It is here where Heisenberg's uncertainty principle helps to make a clear difference between atomic wave motion and Bohr-Sommerfeld quantized ellipses.
\subsection{Polar and Azimuthal Solutions Set Together}
The solutions of the azimuthal and polar parts can be unified within spherical harmonics functions that depend on both $\phi$ and $\theta$. This simplifies the algebraic manipulations of the full wave functions $\psi(r,\theta,\phi)$. Spherical harmonics are given by
\begin{equation} Y_{l}^{m_{l}}(\theta,\phi) = (-1)^{m_{l}} \sqrt{\frac{2l+1}{4\pi} \frac{(l-m_{l})!}{(l+m_{l})!}} P_{l}^{m_{l}}(cos\theta)e^{im_{l}\phi}~. \end{equation}
The factor $(-1)^{m_{l}}$ does not produce any problem because the Schr\"odinger equation is linear and homogeneous. This factor is added for the sake of convenience in angular momentum studies. It is known as the Condon-Shortley phase factor and its effect is to introduce the alternated sequence of the signs $\pm$ for the spherical harmonics of a given $l$.
\subsection{The Radial Solution and the Principal Quantum Number}
There is no energy parameter in the angular equations and that is why the angular motion does not make any contribution to the hydrogen spectrum. It is the radial motion that determines the energy eigenvalues. The solution for the radial part $R(r)$ of the wave function $\psi$ of the hydrogen atom is somewhat more complicated although the presence of two separation constants, $E$ and $l$, point to some associated orthogonal polynomials. In the radial motion of the hydrogen electron significant differences with respect to the electrostatic Laplace equation do occur. The final result is expressed analytically in terms of the associated Laguerre polynomials (Schr\"odinger 1926). The radial equation can be solved exactly only when E is positive or for one of the following negative values $E_{n}$ (in which cases, the electron is in a bound stationary state within atomic hydrogen)
\begin{equation} E_{n} = -{\rm Ry}\left(\frac{1}{n^{2}}\right)~, \end{equation}
where ${\rm Ry}=-\frac{m e^{4}}{32\pi^{2}\epsilon_{0}^{2}\hbar^{2}}=13.606$ eV is the Rydberg atomic energy unit connected with the spectroscopic Rydberg constant $R_{\infty}$ through Ry $=hcR_{\infty}$, whereas $n$ is a positive integer number called the principal quantum number. It gives the quantization of the electron energy in the hydrogen atom. This discrete atomic spectrum has been first obtained in 1913 by Bohr using semi-empirical quantization methods and next by Pauli and Schr\"odinger almost simultaneously in 1926.
Another condition that should be satisfied in order to solve the radial equation is that $n$ have to be strictly bigger than $l$. Its lowest value is $l+1$ for a given $l$. Vice versa, the condition on $l$ is
\begin{equation} l = 0,1,2,...,(n-1) \ , \end{equation}
for given $n$.
The radial equation can be written in the form
\begin{equation} r^{2}\frac{d^{2}R}{dr^{2}} + 2r\frac{dR}{dr} + \left[\frac{2m E}{\hbar^{2}}r^{2} + \frac{2me^{2}}{4\pi \epsilon_{0} \hbar^{2}}r - l(l+1)\right]R = 0~. \end{equation}
Dividing by $r^2$ and using the substitution $\chi (r) =rR$ to eliminate the first derivative $\frac{dR}{dr}$, one gets the standard form of the radial Schr\"odinger equation displaying the effective potential $U(r)=-{\rm const}/r + l(l+1)/r^2$ (actually, electrostatic potential plus quantized centrifugal barrier). These are necessary mathematical steps in order to discuss a new boundary condition, since the spectrum is obtained by means of the $R$ equation. The difference between a radial Schr\"odinger equation and a full-line one is that a supplementary boundary condition should be imposed at the origin ($r=0$). The Coulomb potential belongs to a class of potentials that are called weak singular for which ${\rm lim} _{r\rightarrow 0}\,U(r)r^2=0$. In these cases, one tries solutions of the type $\chi \propto r^{\nu}$, implying $\nu (\nu -1)=l(l+1)$, so that the solutions are $\nu _1 =l+1$ and $\nu _2=-l$, just as in electrostatics. The negative solution is eliminated for $l\neq 0$ because it leads to a divergent normalization constant, nor did it respect the normalization at the delta function for the continuous part of the spectrum. On the other hand, the particular case $\nu _2 =0$ is eliminated because the mean kinetic energy is not finite. The final conclusion is that $\chi (0)=0$ for any $l$.
Going back to the analysis of the radial equation for $R$, the first thing to do is to write it in nondimensional variables. This is performed by noticing that the only space and time scales that one can form on combining the three fundamental constants entering this problem, namely $e^2$, $m_{e}$ and $\hbar$ are the Bohr atomic radius $a_{B}=\hbar ^2/me^2=0.529\cdot 10 ^{-8}$ cm. and the atomic time $t_{B}=\hbar ^3/me^4=0.242 \cdot 10^{-16}$ sec., usually known as atomic units. Employing these units, one gets
\begin{equation} \frac{d^{2}R}{dr^{2}} + \frac{2}{r}\frac{dR}{dr} + \left[2 E + \frac{2}{r} - \frac{l(l+1)}{r^2}\right]R = 0~, \end{equation}
where we are especially interested in the discrete part of the spectrum ($E<0$). The notations $n=1/\sqrt{-E}$ and $\rho=2r/n$ leads us to
\begin{equation} \frac{d^{2}R}{d\rho ^{2}} + \frac{2}{\rho}\frac{dR}{d\rho} + \left[\frac{n}{\rho}-\frac{1}{4} - \frac{l(l+1)}{\rho ^2}\right]R = 0~. \end{equation}
For $\rho \rightarrow \infty$, this equation reduces to $\frac{d^{2}R}{d\rho ^{2}}=\frac{R}{4}$, having solutions $R\propto e^{\pm\rho /2}$. Because of the normalization condition only the decaying exponential is acceptable. On the other hand, the asymptotic at zero, as we already commented on, should be $R\propto \rho ^{l}$. Therefore, we can write $R$ as a product of three radial functions $R=\rho ^{l}e^{-\rho /2}F(\rho)$, of which the first two give the asymptotic behaviors, whereas the third is the radial function in the intermediate region. The latter function is of most interest because its features determine the energy spectrum. The equation for $F$ is
\begin{equation} \rho\frac{d^{2}F}{d\rho ^{2}} + (2l+2-\rho)\frac{dF}{d\rho} + (n-l-1)F = 0~. \end{equation}
This is a particular case of confluent hypergeometric equation. It can be identified as the equation for the associated Laguerre polynomials $L_{n+l}^{2l+1}(\rho)$. Thus, the normalized form of $R$ is
\begin{equation} R_{nl}(\rho) = -\frac{2}{n^2}\sqrt{\frac{(n-l-1)!}{2n[(n+l)!]^{3}}} e^{-\rho /2}\rho^{l} L_{n+l}^{2l+1}(\rho)~, \end{equation}
where the following Laguerre normalization condition has been used
\begin{equation} \int_{0}^{\infty}e^{-\rho}\rho^{2l+1}[L_{n+l}^{2l+1}(\rho)]^{2}d\rho = \frac{2n[(n+l)!]^{3}}{(n-l-1)!}~. \end{equation}
\subsection{Final Formulas for the Hydrogen Atom Stationary States}
We have now the solutions of all the equations depending on a single variable and therefore we can build the stationary wave functions for any electronic state of the hydrogen atom. They have the following analytic form
\begin{equation} \psi(r,\theta,\phi)={\cal N}_{H}(\alpha _n r)^{l} e^{-\alpha _n r/2} L_{n+l}^{2l+1}(\alpha _n r) P_{l}^{m_{l}}(cos\theta)e^{im_{l}\phi}~, \end{equation}
where
\begin{equation}\label{normH} {\cal N}_{H}=-\frac{2}{n^2} \sqrt{\frac{2l+1}{4\pi}\frac{(l-m_{l})!}{(l+m_{l})!} \frac{(n-l-1)!}{[(n+l)!]^{3}}}, \qquad \alpha _n=2/na_{B}~. \end{equation}
Using the spherical harmonics, the solution is written as follows \begin{equation} \psi(r,\theta,\phi)=-\frac{2}{n^2}\sqrt{\frac{(n-l-1)!}{[(n+l)!]^{3}}} (\alpha _n r)^{l} e^{-\alpha _n r/2} L_{n+l}^{2l+1}(\alpha _n r)Y_{l}^{m_{l}}(\theta,\phi)~. \end{equation}
If the dynamical factor ${\cal F}$ is included, we get
\begin{equation}\label{final2} \psi(r,\theta,\phi;t)=-\frac{2}{n^2}\sqrt{\frac{(n-l-1)!}{[(n+l)!]^{3}}} (\alpha _n r)^{l} e^{\left(-i\left(\frac{Ry}{\hbar}\right)\frac{t}{n^2}- \frac{r}{na_B}\right)} L_{n+l}^{2l+1}(\alpha _n r)Y_{l}^{m_{l}}(\theta,\phi)~. \end{equation}
The latter formulas may be considered as the final result for the Schr\"odinger solution of the hydrogen atom for any stationary electron state. Indeed, one can see explicitly both the asymptotic dependence and the two orthogonal and complete sets of functions, i.e., the associated Laguerre polynomials and the spherical harmonics that correspond to this particular case of linear partial second-order differential equation. For the algebraic approach to the hydrogen atom problem we recommend the paper of Kirchberg and collaborators \cite{K-03} and the references therein. Finally, the stationary hydrogen eigenfunctions are characterized by a high degeneracy since they depend on three quantum numbers whereas the energy spectrum is only $n$-dependent. The degree of degeneracy is easily calculated if one notice that there are $2l+1$ values of $m_l$ for a given $l$ which in turn takes values from $0$ to $n-1$ for a given $n$. Thus, there are $\sum _{l=0}^{n-1}(2l+1)=n^2$ states of given energy. Not only the presence of many degrees of freedom is the cause of the strong degeneracy. In the case of the hydrogen atom the existence of the conserved Runge-Lenz vector (commuting operator)${\bf K}=(2me^2)^{-1}[{\bf L}\times {\bf P}-{\bf P}\times {\bf L}]+{\bf r}/r$ introduces more symmetry into the problem and enhances the degeneracy. On the other hand, the one-dimensional quantum wavefunctions are not degenerate because they are characterized by a single discrete index. The general problem of degeneracies is nicely presented in a paper by Shea and Aravind \cite{sa96}.
\subsection{Electronic Probability Density}
In the Bohr model of the hydrogen atom, the electron rotates around the nucleus on circular or elliptic trajectories. It is possible to think of appropriate experiments allowing to ``see" that the electron moves within experimental errors at the predicted radii $r_n=n^{2}a_{0}$
in the equatorial plane $\theta=90^{o}$, whereas the azimuthal angle may vary according to the specific experimental conditions.
It is in this case that the more general wave mechanics changes the conclusions of the Bohr model in at least two important aspects:
\noindent $\bullet$ First, one cannot speak about exact values of $r,\theta,\phi$ (and therefore of planetary trajectories), but only of relative probabilities to find the electron within a given infinitesimal region of space. This feature is a consequence of the wave nature of the electron.
\noindent $\bullet$ Secondly, the electron does not move around the nucleus in the classical conventional way because the probability density $|\psi|^{2}$ does not depend on time but can vary substantially as a function of the relative position of the infinitesimal region.
The hydrogen electron wave function $\psi$ is $\psi=R\Theta\Phi$, where $R=R_{nl}(r)$ describes the way $\psi$ changes with $r$ when the principal and orbital quantum numbers have the values $n$ and $l$, respectively. $\Theta=\Theta_{lm_{l}}(\theta)$ describes in turn how $\psi$ varies with $\theta$ when the orbital and magnetic quantum numbers have the values $l$ and $m_{l}$, respectively. Finally, $\Phi=\Phi_{m_{l}}(\phi)$ gives the change of $\psi$ with $\phi$ when the magnetic quantum number has the value $m_{l}$. The probability density $\mid \psi \mid^{2}$ can be written
\begin{equation} \mid \psi \mid^{2} = \mid R \mid^{2} \mid \Theta \mid^{2} \mid \Phi \mid^{2}~. \end{equation}
Notice that the probability density $|\Phi|^{2}$, which measures the possibility to find the electron at a given azimuthal angle $\phi$, is a constant (does not depend on $\phi$). Therefore, the electronic probability density is symmetric with respect to the $z$ axis and independent on the magnetic substates (at least until an external magnetic field is applied). Consequently, the electron has an equal probability to be found in any azimuthal direction. The radial part $R$ of the wave function, contrary to $\Phi$, not only varies with $r$, but it does it differently for any different combination of quantum numbers $n$ and $l$. Figure (\ref{hydrogen3}) shows plots of $R$ as a function of $r$ for the states $1s$, $2s$, and $2p$. $R$ is maximum at the center of the nucleus ($r=0$) for all the $s$ states, whereas it is zero at $r=0$ for all the states of nonzero angular momentum.
\begin{figure}
\caption{Approximate plots of the radial functions $R_{1s}$, $R_{2s}$, $R_{2p}$; ($a_B=0.53$ \AA ).}
\label{hydrogen3}
\end{figure}
\begin{figure}
\caption{Probability density of finding the hydrogen electron between $r$ and $r+dr$ with respect to the nucleus for the states $1s$ (blue), $2s$ (red), $2p$ (green).}
\label{hydrogen4}
\end{figure}
The electron probability density at the point $r,\theta,\phi$ is proportional to $|\psi|^{2}$, but the real probability in the infinitesimal volume element $dV$ is $\mid \psi \mid^{2}dV$. In spherical coordinates $dV=r^{2}\sin\theta dr d\theta d\phi$ and since $\Theta$ and $\Phi$ are normalized functions, the real numerical probability $P(r)dr$ to find the electron at a relative distance with respect to the nucleus between $r$ and $r+dr$ is
\begin{eqnarray} P(r)dr & = & r^{2}\mid R \mid^{2}dr \int_{0}^{\pi} \mid\ \Theta \mid^{2} \sin\theta d\theta \int_{0}^{2\pi} \mid\ \Phi \mid^{2}d\phi \nonumber\\ & = & r^{2}\mid R \mid^{2}dr \ . \end{eqnarray}
The function $P(r)$ is displayed in Fig. (\ref{hydrogen4}) for the same states for which the radial functions $R$ is displayed in Fig. (\ref{hydrogen3}). In principle, the curves are quite different. We immediately see that $P(r)$ is not maximal in the nucleus for the states $s$, as happens for $R$. Instead, their maxima are encountered at a finite distance from the nucleus. The most probable value of $r$ for a $1s$ electron is exactly $a_{B}$, the Bohr radius. However, the mean value of $r$ for a $1s$ electron is $1.5a_{B}$. At first sight this might look strange, because the energy levels are the same both in quantum mechanics and in Bohr's model. This apparent outmatching is eliminated if one takes into account that the electron energy depends on $1/r$ and not on $r$, and the mean value of $1/r$ for a $1s$ electron is exactly $1/a_{0}$.
The function $\Theta$ varies with the polar angle $\theta$ for all the quantum numbers $l$ and $m_{l}$, unless $l=m_{l}=0$, which are the $s$ states. The probability density $| \Theta |^{2}$ for a $s$
state is a constant (1/2). This means that since $|\Phi|^{2}$ is also a constant, the electronic probability density $| \psi |^{2}$ has the same value for a given value of $r$, not depending on the direction. In other states, the electrons present an angular behavior that in many cases may be quite complicated.
Because $|\psi |^{2}$ is independent of $\varphi$, a three-dimensional representation of $|\psi|^{2}$ can be obtained by rotating a particular representation around a vertical axis. This can prove visually that the probability densities for the $s$ states have spherical symmetry, while all the other states do not possess it. In this way, one can get more or less pronounced lobes of characteristic forms depending on the atomic state. These lobes are quite important in chemistry for specifying the atomic interaction in the molecular bulk.
\subsection{Other 3D Coordinate Systems Allowing Separation of Variables}
A complete discussion of the 3D coordinate systems allowing the separation of variables for the Schr\"odinger equation has been provided by Cook and Fowler \cite{cf}. Here, we briefly review these coordinate systems:
\indent {\em Parabolic}.
\noindent The parabolic coordinates given by
\begin{eqnarray}\label{parab} x&=&\sqrt{\xi \eta}\cos \phi~,\nonumber \\ y&=&\sqrt{\xi \eta}\sin \phi~, \\ z&=&\frac{\xi -\eta}{2}~,\nonumber
\end{eqnarray}
where $\xi \in [0,\infty )$, $\eta \in [0 , \infty )$, and $\phi \in [0, 2\pi )$, are another coordinate system in which the Schr\"odinger hydrogen equation is separable as first shown by Schr\"odinger \cite{parabolic}. The final solution in this case is expressed as the product of factors of asymptotic nature, azimuthal harmonics, and two sets of associate Laguerre polynomials in the variables $\xi$ and $\eta$, respectively. The energy spectrum ($-1/n^2$) and the degeneracy ($n^2$) of course do not depend on the coordinate system. They are usually employed in the study of the Stark effect as first shown by Epstein \cite{epstein}.
\indent {\em Spheroidal}.
\noindent Spheroidal coordinates also can be treated by the separation technique with the $z$ component of angular momentum remaining as a constant of the motion. There are two types of spheroidal coordinates.
The oblate spheroidal coordinates are given by: \begin{eqnarray}\label{oblate} x&=&r\cosh \xi \cos \eta \cos \phi ~,\nonumber \\ y&=&r\cosh \xi \cos \eta \sin \phi ~, \\ z&=&r\sinh \xi \sin \eta ~, \nonumber \end{eqnarray} where $\xi \in [0,\infty )$, $\eta \in [-\pi /2 , \pi /2]$, and $\phi \in [0, 2\pi )$.
The prolate spheroidal coordinates are complementary to the oblate ones in the variables $\xi$ and $\eta$: \begin{eqnarray}\label{prolate} x&=&r\sinh \xi \sin \eta \cos \phi ~,\nonumber \\ y&=&r\sinh \xi \sin \eta \sin \phi ~, \\ z&=&r\cosh \xi \cos \eta ~, \nonumber \end{eqnarray} where $\xi \in [0,\infty )$, $\eta \in [0 , \pi ]$, and $\phi \in [0, 2\pi )$.
\indent {\em Spheroconal}.
\noindent The spheroconal system is a quite unfamiliar coordinate separation system of the H-atom Schr\"odinger equation. In this case $\hat{L}^2$ is retained as a constant of the motion but $\hat{L}_{z}^{2}$ is replaced by another separation operator $\hat{B}$. The relationship with the Cartesian system is given through \cite{cf} \begin{eqnarray}\label{sconal1} x&=& \frac{r}{bc}\,\theta \lambda \ , \nonumber \\ y&=& \frac{r}{bc}\left(-\frac{(b^2-\theta ^2)(b^2-\lambda ^2)}{1-\frac{b^2}{c^2}}\right)^{1/2} \ , \\ z&=& \frac{r}{bc}\left(-\frac{(c^2-\theta ^2)(c^2-\lambda ^2)}{1-\frac{b^2}{c^2}}\right)^{1/2}~. \nonumber \end{eqnarray} One of the separation constants is $l(l+1)$, the eigenvalue of the separation operator $\hat{L}^2$. This fact is of considerable help in dealing with the unfamiliar equations resulting from the separation of the $\theta$ and $\lambda$ equations. The separation operator $\hat{B}$ can be transformed into a Cartesian form,
\begin{equation}\label{23} \hat{B}=b^2\hat{L}_y^2+ c^2\hat{L}_z^2~, \end{equation}
where $\hat{L}_y$ and $\hat{L}_z$ are the usual Cartesian components of the angular momentum operator. Thus, in the spheroconal system, a linear combination of the squares of the $y$ and $z$ components of the angular momentum effects the separation. The linear combination coefficients are simply the squares of the limits of the spheroconal coordinate ranges.\\
In general, the stationary states in any of these coordinate systems can be written as linear combination of degenerate eigenfunctions of the other system and the ground state (the vacuum) should be the same.
\section{\Large The 3D Parabolic Well: The Stationary States of the Isotropic Harmonic Oscillator}
We commented on the importance in physics of the HO at the beginning of our analysis of the 1D quantum HO. If we will consider a 3D analog, we would be led to study a Taylor expansion of the potential in all three variables retaining the terms up to the second order, which is a quadratic form of the most general form
\begin{equation}\label{3ho1} V(x,y,z)=ax^2+by^2+cz^2+dxy+exz+fyz~. \end{equation}
There are however many systems with spherical symmetry or for which this symmetry is sufficiently exact. Then, the potential takes the much simpler form
\begin{equation}\label{3ho2}
V(x,y,z)=K(x^2+y^2+z^2)~. \end{equation}
\noindent This is equivalent to assuming that the second unmixed partial spatial derivatives of the potential have all the same value, herein denoted by $K$. We can add up that this is a good approximation whenever the values of the mixed second partial derivatives are small in comparison to the unmixed ones. When these conditions are satisfied and the potential is given by (\ref{3ho2}), we say that the system is a 3D spherically symmetric HO.\\ The Hamiltonian in this case is of the form
\begin{equation}\label{3ho3} \hat{H}=\frac{-\hbar^2}{2m}\bigtriangledown^2 + \frac{m\omega^2}{2}r^2~, \end{equation}
\noindent where the Laplace operator is given in spherical coordinates and $r$ is the spherical radial coordinate. Equivalently, the problem can be considered in Cartesian coordinates but it is a trivial generalization of the one-dimensional case.\\ Since the potential is time independent the energy is conserved. In addition, because of the spherical symmetry the orbital momentum is also conserved. Having two conserved quantities, we associate to each of them a corresponding quantum number. As a matter of fact, as we have seen in the case of the hydrogen atom, the spherical symmetry leads to three quantum numbers, but the third one, the magnetic number, is related to the `space quantization' and not to the geometrical features of the motion.
Thus, the eigenfunctions depend effectively only on two quantum numbers. The eigenvalue problem of interest is then
\begin{equation}\label{3ho4} \hat{H}\Psi_{nl}=E_{nl}\Psi_{nl}~. \end{equation}
The Laplace operator in spherical coordinates reads
\begin{equation}\label{3ho5} \bigtriangledown^2 =\frac{\partial^2}{\partial r^2}+\frac{2}{r}\frac{\partial}{\partial r} -\frac{\hat{L}^2}{\hbar^2r^2} \ , \end{equation}
where the angular operator $\hat{L}^2$ is the usual spherical one
\begin{equation}\label{3ho6} \hat{L}^2=-\hbar^2\left[ \frac{1}{\sin{\theta}} \frac{\partial}{\partial\theta} \left( \sin{\theta}\frac{\partial}{\partial\theta}\right) +\frac{1}{\sin{\theta}^2}\frac{\partial^2}{\partial\varphi^2}\right]~. \end{equation}
The eigenfunctions of $\hat{L}^2$ are the spherical harmonics, i.e.
\begin{equation}\label{3ho7} \hat{L}^2Y_{lm_{l}}(\theta,\varphi)=-\hbar^2l(l+1)Y_{lm_{l}}(\theta,\varphi) \ . \end{equation}
In order to achieve the separation of the variables and functions, the following substitution is proposed
\begin{equation}\label{3ho8} \Psi_{nlm_{l}}(r, \theta,\varphi)=\frac{R_{nl}(r)}{r} Y_{lm_{l}}(\theta,\varphi)~. \end{equation}
Once this is plugged in the Schr\"odinger equation, the spatial and the angular parts are separated from one another. The equation for the spatial part has the form
\begin{equation}\label{3ho9}
R_{nl}^{\prime\prime}+\left(\frac{2mE_{nl}}{\hbar^2} -\frac{m^2\omega^2}{\hbar^2}r^2-\frac{l(l+1)}{r^2}\right)R_{nl}(r)=0~. \end{equation}
Using the oscillator parameters $k_{nl}^{2}=\frac{2mE_{nl}}{\hbar ^2}$ and $\lambda=\frac{m\omega}{\hbar}$, the previous equation is precisely of the one-dimensional quantum oscillator form but in the radial variable and with an additional angular momentum barrier term,
\begin{equation}\label{3ho10} R_{nl}^{\prime\prime}+\left(k_{nl}^{2}-\lambda^2r^2-\frac{l(l+1)}{r^2}\right)R_{nl}=0~. \end{equation}
\noindent To solve this equation, we shall start with its asymptotic analysis. Examining first the infinite limit $r\rightarrow\infty$, we notice that the orbital momentum term is negligible and therefore in this limit the asymptotic behavior is similar to that of the one-dimensional oscillator, i.e., a Gaussian tail
\begin{equation}\label{3ho11} R_{nl}(r)\sim\exp\left(-\frac{\lambda r^2}{2}\right)\hspace{2cm}\mbox{for} \hspace{.3cm}r\rightarrow\infty~. \end{equation}
If now we consider the behavior near the origin, we can see that the dominant term is that of the orbital momentum, i.e., the differential equation (\ref{3ho10}) in this limit turns into
\begin{equation}\label{3ho12}
R_{nl}^{\prime\prime}-\frac{l(l+1)}{r^2}R_{nl}=0~.
\end{equation}
This is a differential equation of the Euler type \[r^n y^{(n)}(r)+r^{n-1} y^{(n-1)}(r)+\cdots+r y^{\prime}(r)+y(r)=0\] for the case $n=2$ with the first derivative missing. For such equations the solutions are sought of the form $y=r^{\alpha}$ that plugged in the equation lead to a simple polynomial equation in $\alpha$, whose two independent solutions are $l+1$ and $-l$. Thus, one gets
\begin{equation}\label{3ho13} R_{nl}(r)\sim \hspace{.2cm}r^{l+1}\hspace{.2cm}\mbox{or} \hspace{.4cm}r^{-l}\hspace{2cm}\mbox{for}\hspace{.4cm}r \rightarrow 0~. \end{equation}
The previous arguments lead to proposing the substitution
\begin{equation}\label{3ho14} R_{nl}(r)=r^{l+1}\exp{\left(-\frac{\lambda r^2}{2}\right)}u(r)~. \end{equation}
The second possible substitution \begin{equation}\label{3ho15} R_{nl}(r)=r^{-l}\exp{\left(-\frac{\lambda r^2}{2}\right)}v(r)~, \end{equation} produces the same equation as (\ref{3ho14}).
Substituting (\ref{3ho14}) in (\ref{3ho12}), the following differential equation for $u$ is obtained
\begin{equation}\label{3ho16} u^{\prime\prime}+2\left(\frac{l+1}{r}-\lambda r\right)u^{\prime} -[(2l+3)\lambda-k_{nl}^{2}]u=0~. \end{equation}
By using now the change of variable $w=\lambda r^2$, one gets
\begin{equation}\label{3ho17} wu^{\prime\prime}+\left(l+\frac{3}{2}-w\right)u^{\prime}-\left[ \frac{1}{2}\left(l+ \frac{3}{2}\right)-\frac{\kappa _{nl}}{2}\right]u=0~, \end{equation}
where $\kappa _{nl}=\frac {k_{nl}^{2}}{2\lambda}=\frac{E_{nl}}{\hbar\omega}$ is the same dimensionless energy parameter as in the one-dimensional case but now with two subindices. We see that we found again a differential equation of the confluent hypergeometric type having the solution
$$ u(r)=A\hspace{.2cm}_1F_1\left[\frac{1}{2}\left(l+\frac{3}{2}-\kappa _{nl}\right),l+\frac{3}{2}; \lambda r^2\right] $$ \begin{equation}\label{3ho18} +B\hspace{.2cm}r^{-(2l+1)} \hspace{.3cm}_1F_1\left[\frac{1}{2}\left(-l+\frac{1}{2}-\kappa _{nl}\right),-l+\frac{1}{2}; \lambda r^2\right]~. \end{equation}
The second particular solution cannot be normalized because it diverges strongly for $r\rightarrow 0$. Thus one takes $B=0$ which leads to
\begin{equation}\label{3ho19} u_{B=0}(r)=A\hspace{.2cm}_1F_1\left[\frac{1}{2}\left(l+\frac{3}{2}-\kappa _{nl}\right),l+\frac{3}{2}; \lambda r^2\right]~. \end{equation}
By using the same arguments on the asymptotic behavior as in the one-dimensional HO case, that is, imposing a regular solution at infinity, leads to the truncation of the confluent hypergeometric series, which implies the quantization of the energy. The truncation is explicitly
\begin{equation}\label{3ho20} \frac{1}{2}\left(l+\frac{3}{2}-\kappa _{nl}\right)=-n~, \end{equation}
\noindent and substituting $\kappa _{nl}$, we get the energy spectrum
\begin{equation}\label{3ho21} E_{nl}=\hbar\omega\left(2n+l+\frac{3}{2}\right)=\hbar\omega\left(N+\frac{3}{2}\right) ~. \end{equation}
One can notice that for the three-dimensional spherically symmetric HO there is a zero point energy of $\frac{3}{2}\hbar\omega$, three times bigger than in the one-dimensional case.\\ The unnormalized eigenfunctions of the three-dimensional harmonic oscillator are
\begin{equation}\label{3ho22} \psi_{nlm_l}(r,\theta,\varphi) =r^{l}e^{\left(-\frac{\lambda r^2}{2}\right)}\hspace{.2cm}_1F_1\left(-n,l +\frac{3}{2};\lambda r^2\right)\hspace{.1cm}Y_{lm_l}(\theta,\varphi)~. \end{equation} Since we are in the case of a radial problem with centrifugal barrier we know that we have to get as solutions the associated Laguerre polynomials. This can be seen by using the condition (\ref{3ho20}) in (\ref{3ho17}) which becomes
\begin{equation}\label{3ho23} wu^{\prime\prime}+\left(l+\frac{3}{2}-w\right)u^{\prime}+nu=0~. \end{equation}
The latter equation has the form of the associated Laguerre equation $wu^{\prime\prime}+\left(p+1-w\right)u^{\prime}+nu=0$ for $p=l+1/2$ with the polynomial solutions $L_{n}^{l+1/2}$. Thus, the normalized solutions can be written
\begin{equation}\label{3ho24} \psi_{nlm_l}(r,\theta,\varphi) ={\cal N}_{nl}r^{l}e^{\left(-\frac{\lambda r^2}{2}\right)}\hspace{.2cm}L_{n}^{l+1/2}(\lambda r^2)\hspace{.1cm}Y_{lm_l}(\theta,\varphi)~, \end{equation} where the normalization integral can be calculated as in the 1D oscillator case using the Laguerre generating function
\begin{equation}\label{Laggen} \frac{e^{-zt/(1-t)}}{(1-t)^{k+1}}=\sum _{p=0}^{\infty}\frac{t^p}{p+k)!}L_p^k(z)~. \end{equation}
The final result is
\begin{equation}\label{normlagosc} {\cal N}_{nl}= \bigg[\sqrt{\frac{\lambda}{\pi}}\frac{n!(n+l)!}{2^{-(2n+2l+2)}(2n+2l+1)!}\bigg]^{\frac{1}{2}}~. \end{equation}
If the dynamical phase factor ${\cal F}$ is included, the stationary wavefunctions of the 3D spherically symmetric oscillator take the following final form
\begin{equation} \label{osc3wfin} \psi_{n,l,m_l} (r,\theta, \phi, t)={\cal N}_{nl} \exp \left(-i\left(2n+l+\frac{3}{2}\right)\omega t-\frac{\lambda r^2}{2}\right) r^{l}\,L_{n}^{l+1/2}(\lambda r^2)\hspace{.1cm}Y_{lm_l}(\theta,\varphi)~. \end{equation}
For the algebraic (factorization) method applied to the radial oscillator we refer the reader to detailed studies \cite{mota03}. The degeneracy of the radial oscillator is easier to calculate by counting the Cartesian eigenstates at a given energy, which gives $(N+2)!/N!2!$ \cite{sa96}.
\section{Stationary Bound States in the Continuum}
After all these examples, it seems that potential wells are necessary for the existence of stationary states in wave mechanics. However, this is not so! All of the quantum bound states considered so far have the property that the total energy of the state is less than the value of the potential energy at infinity, which is similar to the bound states in classical mechanics. The boundness of the quantum system is due to the lack of sufficient energy to dissociate. However, in wave mechanics it is possible to have bound states that do not possess this property, and which therefore have no classical analog.
Let us choose the zero of energy so that the potential energy function vanishes at infinity. The usual energy spectrum for such a potential would be a positive energy continuum of unbound states, with the bound states, if any, occurring at discrete negative energies. However, Stillinger and Herrick (1975) \cite{ball1}, following an earlier suggestion by Von Neumann and Wigner \cite{ball2}, have constructed potentials that have discrete bound states embedded in the positive energy continuum. Bound states are represented by those solutions of the equation $(-\frac{1}{2}\nabla
^2+V)\Psi =E\Psi$ for which the normalization integral $\int |\Psi
|^2d^3x$ is finite. (We adopt units such that $\hbar =1$ and $m$ =1.)
We can formally solve for the potential, \begin{equation}\label{ball-1} V(r;E)=E+\frac{1}{2}\left(\frac{\nabla ^2 \Psi}{\Psi}\right)~. \end{equation} For the potential to be nonsingular, the nodes of $\Psi$ must be matched by zeros of $\nabla ^2 \Psi$. The free particle zero-angular-momentum function $\Psi _0({\bf x})=\sin(kr)/kr$ satisfies (\ref{ball-1}) with energy eigenvalue $E_0=\frac{1}{2}k^2$
and with $V$ identically equal to zero, but it is unacceptable because the integral of $|\Psi _0|^2$ is not convergent. However, by taking
\begin{equation}\label{ball-2} \Psi({\bf x})=\Psi _0({\bf x}) f(r) \ , \end{equation}
and requiring that $f(r)$ go to zero more rapidly than $r^{-1/2}$ as $r\rightarrow \infty$ one can get a convergent integral for
$|\Psi(({\bf x})|^2$. Substituting (\ref{ball-2}) into (\ref{ball-1}), we obtain
\begin{equation}\label{ball-3} V(r;E)=E-\frac{1}{2}k^2+k\cot(kr)\frac{f'(r)}{f(r)}+\frac{1}{2}\frac{f''(r)}{f(r)}~. \end{equation}
For $V$ to remain bounded, $f'(r)/f(r)$ must vanish at the poles of $\cot (kr)$; that is, at the zeros of $\sin (kr)$. This can be achieved in different ways but in all known procedures the modulation factor $f(r)$ has the form
\begin{equation}\label{ball-5} f(r)=[\lambda +s(r)]^{-1}~, \end{equation}
where $\lambda$ is a positive constant, although Stillinger and Herrick mentioned a wider class of possible $f(r)$. They chose the modulation variable
\begin{equation}\label{ball-4} s_{sh}(r)=8k^2\int _0^r r'[\sin(kr')]^2dr'=\frac{1}{2}(2kr)^2-2kr\sin(2kr)-\cos(2kr) +1~. \end{equation}
The principles guiding the choice of $s(r)$ are: that the integrand must be nonnegative, so that $s(r)$ will be a monotonic function of $r$; and that the integrand must be proportional to $\sin (kr)$, so that $ds(r)/dr$ will vanish at the zeros of $\sin(kr)$.
For $s_{sh}$, $\Psi$ decreases like $r^{-3}$ as $r\rightarrow \infty$, which ensures its square integrability. The potential (\ref{ball-3}) then becomes (for $E_0$)
\begin{equation}\label{ball-6} V_{sh}(r;E_0)=\frac{64k^4r^2[\sin(kr)]^4}{[\lambda +s_{sh}(r)]^2}-\frac{4k^2\{\,[\sin (kr)]^2+2kr\sin(2kr)\}}{\lambda +s_{sh}(r)}~. \end{equation}
The energy of the bound state produced by the potential $V(r;E_0)$ is $E_0=\frac{1}{2}k^2$ as for the free particle, i.e., it is independent of $\lambda$ and the modulation factor $f(r)$. Therefore, the main idea for getting bound states in the continuum is to build isospectral potentials of the free particle and more generally for any type of scattering state. A more consistent procedure to obtain isospectral potentials is given by the formalism of supersymmetric quantum mechanics \cite{psp93} which is based on the Darboux transformations \cite{d1882}. For the supersymmetric case the modulation variable is
\begin{equation}\label{ball-8} s_{d}(r)=\int _0^r u_0^2 (r')dr'=\int _0^r [\sin(kr')]^2dr'=\frac{1}{2}r-\frac{1}{4k}\sin(2kr) \end{equation}
and the isospectral potential has the form
\begin{equation}\label{ball-9} V_d(r;E_0)=\frac{2[\sin(kr)]^4}{[\lambda +s_{d}(r)]^2}-\frac{2k\,\sin(2kr)}{\lambda +s_{d}(r)}~. \end{equation}
Finally, in the amplitude modulation method of von Neumann and Wigner the modulation variable is given by
\begin{equation}\label{ball-10} s_{vnw}(r)=(4ks_d)^2=[2kr-\sin(2kr)]^2 \end{equation}
and the isospectral free particle potential is
\begin{equation}\label{ball-11} V_{vnw}(r;E_0)=-\frac{64k^2\lambda [\sin(kr)]^4}{[\lambda +s_{vnw}(r)]^2}+\frac{48k^2[\sin (kr)]^4-8k^2s_{vnw}^{1/2}\sin(2kr)}{\lambda +s_{vnw}(r)}~. \end{equation}
Interestingly, all these potentials have the same behavior at large $r$
\begin{equation}\label{ball-7} V_{sh}(r;E_0)\sim V_{d}(r;E_0)\sim V_{vnw}(r;E_0)\approx -\frac{4k \sin(2kr)}{r}=-8k^2{\rm sinc}(2kr)=-8k^2j_0(2kr)~, \end{equation}
where the sinc is the cardinal sine function which is identical to the spherical Bessel function of the first kind $j_0$. On the other hand, these potentials display different power-law behavior near the origin.
Moreover, the asymptotic sinc form which is typical in diffraction suggests that the existence of bound states in the continuum can be understood by using the analogy of wave propagation to describe the dynamics of quantum states. It seems that the mechanism which prevents the bound state from dispersing like ordinary positive energy states is the destructive interference of the waves reflected from the oscillations of $V(r;E)$. According to Stillinger and Herrick no other $f(r)$ that produces a single particle bound state in the continuum will lead to a potential that decays more rapidly than (\ref{ball-7}). However, they present further details in their paper which suggest that nonseparable multiparticle systems, such as two-electron atoms, may possess bounded states in the continuum without such a contrived form of potential as (\ref{ball-6}).
The bound (or localized) states in the continuum are not only of academic interest. Such a type of electronic stationary quantum state has been put into evidence in 1992 by Capasso and collaborators \cite{cap92} by infrared absorption measurements on a semiconductor superlattice grown by molecular beam epitaxy in such a way that one thick quantum well is surrounded on both sides by several GaInAs-AlInAs well/barrier layers constructed to act as $\lambda/4$ Bragg reflectors. There is currently much interest in such states in solid state physics \cite{ssp}.
\section{Conclusion}
We have reviewed the concepts that gave rise to quantum mechanics. The stationary states were introduced first by Bohr to explain the stability of atoms and the experimental findings on the variation of the electronic current when electrons collided with mercury atoms. The generalization of those ideas were discussed and the Schr\"odinger equation was introduced. The stationary localized solutions of that equation for various potentials with closed and open boundary conditions were worked out in detail and the physical meaning was stressed.
There are other cases of interest, like the solution of the Schr\"oedinger equation of electrons moving in a solid, that are treated in detail in other chapters. Our interest here is to stress the historic development of quantum mechanics and to show the importance of the stationary state concept.
\end{document}
|
arXiv
|
{
"id": "0702181.tex",
"language_detection_score": 0.8033544421195984,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title{External branch lengths of $\Lambda$-coalescents\\ without a dust component} \author{\noindent Christina S. Diehl\thanks{Institut f\"ur Mathematik, Goethe-Universit\"at, 60054 Frankfurt am Main, Germany \newline [email protected], [email protected] \newline Work partially supported by the DFG Priority Programme SPP 1590 ``Probabilistic Structures in Evolution''} $\ $ and G\"otz Kersting$^*$}
\maketitle
\begin{abstract} \noindent $\Lambda$-coalescents model genealogies of samples of individuals from a large population by means of a family tree whose branches have lengths. The tree's leaves represent the individuals, and the lengths of the adjacent edges indicate the individuals' time durations up to some common ancestor. These edges are called external branches. We consider typical external branches under the broad assumption that the coalescent has no dust component, and maximal external branches under further regularity assumptions. As it transpires, the crucial characteristic is the coalescent's rate of decrease $\mu(b)$, $b\geq 2$. The magnitude of a typical external branch is asymptotically given by $n/\mu(n)$, where $n$ denotes the sample size. This result, in addition to the asymptotic independence of several typical external lengths hold in full generality, while convergence in distribution of the scaled external lengths requires that $\mu(n)$ is regularly varying at infinity. For the maximal lengths, we distinguish two cases. Firstly, we analyze a class of $\Lambda$-coalescents coming down from infinity and with regularly varying $\mu$. Here the scaled external lengths behave as the maximal values of $n$ i.i.d. random variables, and their limit is captured by a Poisson point process on the positive real line. Secondly, we turn to the Bolthausen-Sznitman coalescent, where the picture changes. Now the limiting behavior of the normalized external lengths is given by a Cox point process, which can be expressed by a randomly shifted Poisson point process.
\noindent\emph{AMS 2010 subject classification: 60J75 (primary), 60F05, 60J27, 92D25$^{\color{white} \big|}$}\\ \noindent\emph{Keywords:} $\Lambda$-coalescent, dustless coalescent, Bolthausen-Sznitman coalescent, Beta-coales\-cent, Kingman's coalescent, external branch lengths, Poisson point process, Cox point process, weak limit law \end{abstract}
\section{Introduction and main results}
In population genetics, family trees stemming from a sample out of a big population are modeled by coalescents. The prominent Kingman coalescent \cite{King82} found widespread applications in biology. More recently, the Bolthausen-Sznitman coalescent, originating from statistical mechanics \cite{BS98}, has gained in importance in analyzing genealogies of populations undergoing selection \cite{BDMM07,DWF13,NH13,Schw17}. Unlike Kingman's coalescent, the Bolthausen-Sznitman coalescent allows multiple mergers. The larger class of Beta-coalescents has found increasing interest, e.g., in the study of marine species \cite{SBB13, NNY16}. All these instances are covered by the notion of $\Lambda$-coalescents as introduced by Pitman \cite{Pit99} and Sagitov \cite{Sag99} in 1999. Today, general properties of this extensive class have become more transparent \cite{KSW18,DK18}. \\ In this paper, we deal with the lengths of external branches of $\Lambda$-coalescents under the broad assumption that the coalescent has no dust component, which applies to all the cases mentioned above. We shall treat external branches of typical and, under additional regularity assumptions, of maximal length. For the total external length, see the publications \cite{Moe10,JK11,DKW14,KPS-J14,DY15}.
$\Lambda$-coalescents are Markov processes $(\Pi(t),\,t\geq 0)$ taking values in the set of partitions of $\N$, where $\Lambda$ denotes a non-vanishing finite measure on the unit interval $[0,1]$. Its restrictions $(\Pi_n(t),\,t\geq 0)$ to the sets $\{1,\ldots,n\}$ are called $n$-coalescents. They are continuous-time Markov chains characterized by the following dynamics: Given the event that $\Pi_n(t)$ is a partition consisting of $b\geq 2$ blocks, $k$ specified blocks merge at rate \[\lambda_{b,k}\ :=\ \int_{[0,1]}p^k(1-p)^{b-k}\frac{\Lambda(dp)}{p^2}, \qquad 2\leq k\leq b, \] to a single one. In this paper, the crucial characteristic of $\Lambda$-coalescents is the sequence $\mu=(\mu(b))_{b\geq 2}$ defined as \[\mu(b) \ :=\ \sum_{k=2}^b(k-1)\binom{b}{k}\lambda_{b,k}, \qquad b\geq 2.\] We call this quantity the rate of decrease as it is the rate at which the number of blocks is decreasing on average. Note that a merger of $k$ blocks corresponds to a decline of $k-1$ blocks.
The importance of $\mu$ also became apparent from other publications \cite{Schw00,LS06,DK18}. In particular, the assumption of absence of a dust component may be expressed in this term. Originally characterized by the condition \[\int_{[0,1]}\frac{\Lambda(dp)}{p} \eq \infty,\] (see \cite{Pit99}),
\begin{samepage}\enlargethispage{2\baselineskip} it can be equivalently specified by the requirement \[\frac{\mu(n)}{n} \ \rightarrow \ \infty\] as $n\to\infty$ (see Lemma 1 (iii) of \cite{DK18}).
\end{samepage}
An $n$-coalescent can be thought of as a random rooted tree with $n$ labeled leaves representing the individuals of a sample. Its branches specify ancestral lineages of the individuals or their ancestors. The branch lengths give the time spans until the occurrence of new common ancestors. Branches ending in a leaf are called external branches. If mutations under the infinite sites model \cite{Kim69} are added in these considerations, the importance of external branches is revealed. This is due to the fact that mutations on external branches only affect a single individual of the sample. Longer external branches result, thereby, in an excess of singleton polymorphisms \cite{WNL-CA01} and are known to be a characteristic for trees with multiple mergers \cite{EBBF15}; e.g., external branch lengths have been used to discriminate between different coalescents in the context of HIV trees \cite{WBWF16} (see also \cite{VLBRS18}). Of course such considerations have rather theoretical value as long as singleton polymorphisms cannot be distinguished from sequencing errors.
Now we turn to the main results of this paper. For $1\leq i\leq n$, the length of the external branch ending in leaf $i$ within an $n$-coalescent is defined as \[T_i^n\ :=\ \inf{\left\{t\geq 0:\;\left\{i\right\}\notin\Pi_{n}(t)\right\}}.\] In the first theorem, we consider the length $T^n$ of a randomly chosen external branch. Based on the exchangeability, $T^n$ is equal in distribution to $T_i^n$ for $1\leq i\leq n$. The result clarifies the magnitude of $T^n$ in full generality.
\begin{theorem} \label{dustless} For a $\Lambda$-coalescent without a dust component, we have for $t\geq 0$, \[e^{-2t}+\oo(1)\ \leq \ \PP\left(\frac{\mu(n)}{n}\ T^{n}> t\right)\ \leq \ \frac{1}{1+t}+\oo(1)\] as $n\to\infty$. \end{theorem}
Among others, this theorem excludes the possibility that $T^n$ converges to a positive constant in probability. In \cite{KSW14} the order of $T^n$ was interpreted as the duration of a generation, namely the time at which a specific lineage, out of the $n$ present ones, takes part in a merging event. In that paper, only Beta$(2-\al,\al)$-coalescents with $1<\al<2$ were considered, and the duration was given as $n^{1-\al}$. Our theorem shows that for this quantity the term $n/\mu(n)$ is a suitable measure for $\Lambda$-coalescents without a dust component.
Asymptotic independence of the external branch lengths holds as well in full generality for dustless coalescents. In light of the waiting times, which the different external branches have in common, this may be an unexpected result. However, this dependence vanishes in the limit. Then it becomes crucial whether two external branches end in the same merger. Such an event is asymptotically negligible only in the dustless case. This heuristic motivates the following result.
\begin{theorem} \label{indep} A $\Lambda$-coalescent has no dust component if and only if for fixed $k\in\N $ and for any sequence of numbers $t_1^n,\, \ldots,\, t_k^n\geq 0$, $n\geq 2$, we have \[\PP\left(T_1^n\,\leq\, t_1^n,\,\ldots,\,T_k^n\,\leq\, t_k^n\right)\eq \PP\left(T_1^n\,\leq\, t_1^n\right)\,\cdots\,\PP\left(T_k^n\,\leq\, t_k^n\right)\,+\,\oo(1)\] as $n\to\infty$. \end{theorem}
In the dustless case, one has $T_i^n \to 0$ in probability for $1\leq i\leq k$, then one reasonably restricts to the case $t_i^n\to 0$ as $n\to\infty$. The statement that the asymptotic independence fails for coalescents with a dust component goes back to M{\"o}hle (see equation (10) of \cite{Moe10}).
In order to achieve convergence in distribution of the scaled lengths, stronger assumptions are required on the rate of decrease, namely that $\mu$ is a regularly varying sequence. A characterization of this property is given in Proposition \ref{reg_coa} below. Let $\delta_0$ denote the Dirac measure at zero.
\begin{theorem} \label{iff} For a $\Lambda$-coalescent without a dust component, there is a sequence $(\gamma_n)_{n\in\N}$ such that $\gamma_n\,T^n$ converges in distribution to a probability measure unequal to $\delta_0$ as $n\to\infty$ if and only if $\mu$ is regularly varying at infinity. Then its exponent $\alpha$ of regular variation fulfills $1\leq\alpha\leq 2$ and we have \begin{enumerate} \item for $1<\alpha\leq 2$, \begin{align*} \PP\left(\frac{\mu(n)}{n}\ T^n> t\right) \ \longrightarrow \ \frac{1}{\left(1+\left(\alpha-1\right)t\right)^{\frac{\alpha}{\alpha-1}}} \,, \qquad t\geq 0, \end{align*} \item for $\alpha=1$, \begin{align*} \PP\left(\frac{\mu(n)}{n}\ T^n> t\right) \ \longrightarrow \ e^{-t}, \qquad t\geq 0, \end{align*} \end{enumerate} as $n\to\infty$. \end{theorem}
In particular, this theorem includes the special cases known from the literature. Blum and Fran\c{c}ois \cite{BF05}, as well as Caliebe et al. \cite{CNKR07}, studied Kingman's coalescent. For the Bolthausen-Sznitman coalescent, Freund and M{\"o}hle \cite{FM09} showed asymptotic exponentiality of the external branch length. This result was generalized by Yuan \cite{Y14}. A class of coalescents containing the Beta$(2-\alpha,\alpha)$-coalescent with $1<\alpha<2$ was analyzed by Dhersin et al. \cite{DFS-JY13}.
Combining Theorem \ref{indep} and \ref{iff} yields the following corollary:
\begin{samepage}\enlargethispage{2\baselineskip} \begin{cor} \label{cor} Suppose that the $\Lambda$-coalescent lacks a dust component and has regularly varying rate of decrease $\mu$ with exponent $\alpha\in\left[1,2\right]$. Then for fixed $k\in\N $, we have \[\frac{\mu(n)}{n}\,\left(T_1^n,\,\ldots,\,T_k^n\right)\ \stackrel{d}{\longrightarrow} \ \left(T_1,\,\ldots,\,T_k\right)\] as $n\to\infty$, where $T_1,\,\ldots,\,T_k$ are i.i.d. random variables each having the density \begin{align} \label{dens} f(t)\,dt \eq \frac{\alpha}{\left(1+\left(\alpha-1\right)t\right)^{1+\frac{\alpha}{\alpha-1}}}\ dt \,, \quad t\geq 0, \end{align} for $1<\alpha\leq 2$ and a standard exponential distribution for $\alpha=1$. \end{cor}
\end{samepage}
\begin{ex} For $k\in\N$, let $T_1,\,\ldots,\,T_k$ be the i.i.d. random variables from Corollary \ref{cor}. \begin{enumerate} \item If $\Lambda\left(\left\{0\right\}\right)=2$, then $\mu(n)\sim n^2$ and consequently \[n\left(T_1^n,\ldots,T_k^n\right)\ \stackrel{d}{\longrightarrow} \ \left(T_1,\ldots,T_k\right)\] as $n\to\infty$.
This statement covers (after scaling) the Kingman case. Note that $\Lambda|_{(0,1]}$ does not affect the limit. \item If $\Lambda(dp)= c_a\,p^{a-1}(1-p)^{b-1}dp$ for $0<a<1$, $b>0$ and $c_a:=(1-a)(2-a)/\Gamma(a)$, then $\mu(n)\sim n^{2-a}$ and therefore \[n^{1-a}\left(T_1^n,\ldots,T_k^n\right)\ \stackrel{d}{\longrightarrow} \ \left(T_1,\ldots,T_k\right)\] as $n\to\infty$. After scaling, this includes the Beta$(2-\alpha,\alpha)$-coalescent with $1<\alpha<2$ (see Theorem 1.1 of Siri-J{\'e}gousse and Yuan \cite{SY16}). Note that the constant $b$ does not appear in the limit. \item If $\Lambda(dp)=(1-p)^{b-1}dp$ with $b>0$, then we have $\mu(n)\sim n\log{n}$ implying \begin{align}\label{ex} \log{n}\left(T_1^n,\ldots,T_k^n\right)\ \stackrel{d}{\longrightarrow} \ \left(T_1,\ldots,T_k\right) \end{align} as $n\to\infty$. This contains the Bolthausen-Sznitman coalescent (see Corollary 1.7 of Dhersin and M{\"o}hle \cite{DM13}). Again the constant $b$ does not show up in the limit. \end{enumerate} \end{ex}
In the second part of this paper, we change perspective and examine the external branch lengths ordered by size downwards from their maximal value. In this context, an approach via a point process description is appropriate. Here we consider $\Lambda$-coalescents having regularly varying rate of decrease $\mu$, additionally to the absence of a dust component. It turns out that one has to distinguish between two cases.
First, we treat the case of $\mu$ being regularly varying with exponent $\al\in(1,2]$ (implying that the coalescent comes down from infinity). We introduce the sequence $(s_n)_{n\geq 2}$ given by \begin{equation}\label{new2} \mu(s_n) \eq \frac{\mu(n)}{n}\,. \end{equation} Note that $\mu(n)/n$ is a strictly increasing and, in the dustless case, diverging sequence (see Lemma~\ref{prop}~(ii) and (iv) below), which directly transfers to the sequence $(s_n)_{n\geq 2}$. Also note in view of Lemma \ref{prop} (ii) below that
\begin{samepage}\enlargethispage{2\baselineskip} \begin{align}\label{s_n} s_n \eq \oo(n) \end{align} as $n\to\infty$.
\end{samepage}
\begin{ex} \begin{enumerate} \item If $\mu(n)\sim n^\alpha$ with $\al\in(1,2]$, then we have $s_n\sim n^{(\al-1)/\al}$ as $n\to\infty$. \item If $\mu$ is regularly varying with exponent $\al\in(1,2]$, then the sequence $s_n$ is regularly varying with exponent $(\al-1)/\al$. \end{enumerate} \end{ex}
We define point processes $\Phi^{\,n}$ on $\left(0,\infty\right)$ via \[\Phi^{\,n}(B)\ :=\ \#\left\{i\leq n: \;\frac{\mu(n)}{ns_n}\ T_i^{n}\in B\right\}\] for Borel sets $B\subset\left(0,\infty\right)$.
\begin{theorem} \label{reg_var} Assume that the $\Lambda$-coalescent has a regularly varying rate of decrease $\mu$ with exponent $\al\in(1,2]$. Then, as $n\rightarrow\infty$, the point process $\Phi^{\,n}$ converges in distribution to a Poisson point process $\Phi$ on $(0,\infty)$ with intensity measure \[\phi(dx) \eq \frac{\alpha }{\left(\left(\alpha-1\right)x\right)^{1+\frac{\al}{\al-1}}}\ dx.\] \end{theorem}
Note that $\int_0^1\phi(x)dx=\infty$, which means that the points from the limit $\Phi$ accumulate at the origin. On the other hand, we have $\int_1^\infty\phi(x)dx<\infty$ saying that the points can be arranged in decreasing order. Thus, the theorem focuses on the maximal external lengths showing that the longest external branches differ from a typical one by the factor $s_n$ in order of magnitude (see Corollary \ref{cor}). For Kingman's coalescent, this result was obtained by Janson and Kersting \cite{JK11} using a different method.
In particular, letting $T_{\left\langle 1\right\rangle}^n$ be the maximal length of the external branches, we obtain for $x>0$, \[\PP\left(\frac{\mu(n)}{ns_n}T_{\left\langle 1\right\rangle}^n \loe x\right) \ \to \ e^{-((\alpha-1)x)^{-\frac{\alpha}{\alpha-1}}}\] as $n\to\infty$, i.e., the properly scaled $T_{\left\langle 1\right\rangle}^n$ is asymptotically Fréchet-distributed.
Corollary \ref{cor} shows that the external branch lengths behave for large $n$ as i.i.d. random variables. This observation is emphasized by Theorem \ref{reg_var} because the maximal values of i.i.d. random variables, with the densities stated in Corollary \ref{cor}, have the exact limiting behavior as given in Theorem \ref{reg_var} (including the scaling constants $s_n$).
This heuristic fails for the Bolthausen-Sznitman coalescent, which we now address. For $n\in\N$, define the quantity \[t_n\ :=\ \log\log{n}-\log\log\log{n}+\frac{\log\log\log{n}}{\log\log{n}},\] where we put $t_n:=0$ if the right-hand side is negative or not well-defined. Here we consider the point processes $\Psi^{\,n}$ on the whole real line given by \[\Psi^{\,n}(B)\ :=\ \#\left\{i\leq n:\, \log{\log{(n)}}(T_i^n-t_n)\in B\right\}\] for Borel sets $B\subset\R $. As before, we focus on the maximal values of $\Psi^n$.
\begin{theorem}\label{bs} For the Bolthausen-Sznitman coalescent, the point process $\Psi^{\, n}$ converges in distribution as $n\to\infty$ to a Cox point process $\Psi$ on $\R $ directed by the random measure \[\psi\left(dx\right) \eq E\,e^{-x} dx,\] where $E$ denotes a standard exponential random variable. \end{theorem}
Observe that this random density may be rewritten as \[e^{-x+\log{E}}dx.\] This means that the limiting point process can also be considered as a Poisson point process with intensity measure $e^{-x}dx$ shifted by the independent amount $\log{E}$. This alternative representation will be used in the theorem's proof (see Theorem \ref{bs v2} below). Recall that $G:=-\log{E}$ has a standard Gumbel distribution.
In particular, letting again $T_{\left\langle 1\right\rangle}^n$ be the maximum of $T_1^n, \ldots, T_n^n$, we obtain \begin{equation}\label{new} \PP\left(\log{\log{(n)}}(T_{\left\langle 1\right\rangle}^n -t_n)\loe x\right) \ \longrightarrow \ \int_0^\infty e^{-ye^{-x}}e^{-y}\; dy \eq \frac{1}{1+e^{-x}} \end{equation} as $n\to\infty$. Notably, we arrive at a limit that is non-standard in the extreme value theory of i.i.d. random variables, namely the so-called logistic distribution.
We point out that the limiting point process $\Psi$ no longer coincides with the limiting Poisson point process as obtained for the maximal values of $n$ independent exponential random variables. The same turns out to be true for the scaling sequences. In order to explain these findings, note that \eqref{new} implies \[\frac{T^n_{\left\langle 1\right\rangle}}{\log\log{n}} \eq 1 +\oo_p(1)\] as $n\to\infty$, where $\oo_p(1)$ denotes a sequence of random variables converging to $0$ in probability. In particular, $T^n_{\left\langle 1\right\rangle}\to\infty$ in probability. Hence, we pass with this theorem to the situation where very large mergers affect the maximal external lengths. Then circumstances change and new techniques are required. For this reason, we have to confine ourselves to the Bolthausen-Sznitman coalescent in the case of regularly varying $\mu$ with exponent $\al=1$.
It is interesting to note that an asymptotic shift by a Gumbel distributed variable also shows up in the absorption time $\widetilde{\tau}_n$ (the moment of the most recent common ancestor) of the Bolthausen-Sznitman coalescent: \[\widetilde{\tau}_n-\log{\log{n}} \ \stackrel{d}{\longrightarrow}\ G\] as $n\to\infty$ (see Goldschmidt and Martin \cite{GM05}). However, this shift remains unscaled. Apparently, these two Gumbel distributed variables under consideration build up within different parts of the coalescent tree.
Before closing this introduction, we provide some hints concerning the proofs. For the first three theorems, we make use of an asymptotic representation for the tail probabilities of the external branch lengths. Remarkably, this representation involves, solely, the rate of decrease $\mu$, though in a somewhat implicit, twofold manner. The proofs of the three theorems consist in working out the consequences of these circumstances.
The representation is given in Theorem \ref{thm:int} and relies largely on different approximation formulae derived in \cite{DK18}. We recall the required statements in Section \ref{SLOLN}.
The proofs of the last two theorems incorporate Corollary \ref{cor} as one ingredient. The idea is to implement stopping times $\widetilde{\rho}_{c,n}$ with the property that at that moment a positive number of external branches is still extant which is of order 1 uniformly in $n$.
To these remaining branches, the results of Corollary \ref{cor} are applied taking the strong Markov property into account. More precisely, let \[N_n \eq \left(N_n(t), t\geq 0\right) \] be the block counting process of the $n$-coalescent, where \[N_n(t) \ := \ \#\Pi_n(t)\] states the number of lineages present at time $t\geq 0$. For definiteness, we put $N_n(t)=1$ for $t>\widetilde{\tau}_n$. In the case of regularly varying $\mu$ with exponent $1<\al\leq 2$, we will show that \[\widetilde{\rho}_{c,n}\ :=\ \inf\left\{t\geq 0:\,N_n(t)\leq c s_n\right\}\] with arbitrary $c>0$ is the right choice. Next, we split the external lengths $T^n_{i}$ into the times $\widecheck{T}^{n}_{i}$ up to the moment $\widetilde{\rho}_{c,n}$ and the residual times $\widehat{T}^{n}_{i}$. Formally, we have \[\widecheck{T}^{n}_{i}\ :=\ T^n_{i}\wedge\widetilde{\rho}_{c,n} \qquad \text{ and }\qquad \widehat{T}^{n}_i\ :=\ T^n_{i}-\widecheck{T}^{n}_{i}.\] We shall see that $\widecheck{T}^{n}_{i}$ is of negligible size compared to $\widehat{T}^{n}_{i}$ for large values of $c$. On the other hand, with increasing $c$, also the number of extant external branches tends to infinity uniformly in $n$. Corollary~\ref{cor} tells us that the $\widehat{T}^{n}_{i}$ behave approximately like i.i.d. random variables. Therefore, one expects that the classical extreme value theory applies in our context. These are the ingredients of the proof of Theorem~ \ref{reg_var}.
\begin{figure}
\caption{The stopping time $\widetilde{\rho}_{c,n}$ subdividing the external branch ending in leaf $i$ into two parts of length $\widecheck{T}_i^n$ and $\widehat{T}_i^n$, respectively.}
\end{figure}
The approach for the Bolthausen-Sznitman coalescent is essentially the same. However, new obstacles appear. In contrast to the previous case $\al>1$, the lengths of the maximal branches now diverge in probability. As a consequence, in the case $\alpha=1$, we have in general no longer control over the stopping times $\widetilde{\rho}_{c,n}$ as defined above. Fortunately, for the Bolthausen-Sznitman coalescent, Möhle \cite{Moe15} provides a precise asymptotic description of the block counting process $N_n$ by means of the Mittag-Leffler process, which applies also in the large time regime. Adapted to this result, the role of $\widetilde{\rho}_{c,n}$ is taken by $t_{c,n}\wedge\widetilde{\tau}_n$, where \[t_{c,n}\ :=\ t_n-\frac{\log{c}}{\log\log{n}}\] for some $c>1$. Thus, for the Bolthausen-Sznitman coalescent, the external lengths $T^n_{i}$ are split into \[\widecheck{T}^{n}_{i}\ :=\ T^n_{i}\wedge t_{c,n} \qquad \text{ and }\qquad \widehat{T}^{n}_i\ :=\ T^n_{i}-\widecheck{T}^{n}_{i}.\]
In contrast to the case $\al>1$, the part $\widecheck{T}^{n}_{i}$ does not disappear for $c\to\infty$ but is asymptotically Gumbel-distributed and shows up in the above mentioned independent shift.
The paper is organized as follows: In Section \ref{SLOLN} we recapitulate some laws of large numbers from \cite{DK18}. Section \ref{sec_prop} summarizes several properties of the rate of decrease. The fundamental asymptotic expression of the external tail properties is developed in Section~\ref{sec_rand}. Sections \ref{sec_proofs} and \ref{sec_proof} contain the proofs of Theorem \ref{dustless} to \ref{iff}. In Section \ref{sec_mom} we prepare the proofs of the remaining theorems by establishing a formula for factorial moments of the number of external branches. Sections \ref{sec_proof_beta} and \ref{sec_proof_bs} include the proofs of Theorem \ref{reg_var} and \ref{bs}.
\section{Some laws of large numbers}\label{SLOLN}
In this section we report on some laws of large numbers from the recent publication \cite{DK18}, which are a main tool in the subsequent proofs. Let $X=(X_j)_{j\in\N_0}$ denote the Markov chain embedded in the block-counting process $N_n$, i.e., $X_j$ denotes the number of branches after $j$ merging events. (For convenience, we suppress $n$ in the notation of $X$.) Also, let \[ \rho_r :=\min \{ j\ge 0: X_j \le r\} \]
for numbers $r>0$. We are dealing with laws of large numbers for functionals of the form \[ \sum_{j=0}^{\rho_{r_n}-1} f(X_j) \] with some suitable positive function $f$ and some sequence $(r_n)_{n \ge 1}$ of positive numbers. These laws of large numbers build on two approximation steps. First, letting \[ \Delta X_{j+1}:= X_{j}-X_{j+1} \quad \text{and}\quad \nu(b):= \mathbb E[\Delta X_{j+1} \mid X_{j}=b] \] for $j \ge 1$, we notice that for large $n$, \[ \sum_{j=0}^{\rho_{r}-1}f(X_j) \approx \sum_{j=0}^{\rho_{r}-1} f(X_j)\frac {\Delta X_{j+1}}{\nu(X_j)} .\] The rationale of this approximation consists in the observation that the difference of both sums stems from the martingale difference sequence $f(X_j)(1-\Delta X_{j+1}/\nu(X_j))$, $j \ge 0$, and, thus, is of a comparatively negligible order. Second, we remark that \[ \sum_{j=0}^{\rho_{r}-1}\frac { f(X_j)}{\nu(X_j)}\Delta X_{j+1} \approx \int_{r}^n \frac {f(x)}{\nu(x)}\, dx,\] with $\nu(x)$ extending the numbers $\nu(b)$ to real numbers $x \ge 2$. Here, we regard the left-hand sum as a Riemann approximation of the right-hand integral and take $X_{\rho_{r}} \approx r$ into account. Altogether, \[\sum_{i=0}^{\rho_{r}-1} f(X_i) \approx \int_{r}^n \frac {f(x)}{\nu(x)} .\]
In order to estimate the errors and, in particular, the martingale's quadratic variation, different assumptions are required. For details we refer to \cite{DK18} and deal here only with the two cases that we use later in our proofs.
The first case concerns the time \[\widetilde \rho_{r}:= \inf\{ t\ge 0: N_n(t)\loe r\},\] when the block-counting process drops below $r$. Letting $W_j$ be the period of stay of $N_n$ at state $X_j$ (again suppressing $n$ in the notation), we have \[ \widetilde \rho_{r} = \sum_{j=0}^{\rho_{r}-1} W_j \approx \sum_{j=0}^{\rho_r-1}\mathbb E[W_j \mid N_n]=\sum_{j=0}^{\rho_{r}-1} \frac 1{\lambda(X_j)},\] where $\lambda(b):= \sum_{2\le k \le b} \lambda_{b,k}$ is the jump rate of the block counting process. Also, $\nu(b)= \mu(b)/\lambda(b)$. Therefore, putting $f(x)=\lambda(x)^{-1}$, we are led to the approximation formula \[ \rho_{r} \approx \int_r^n \frac{dx}{\mu(x)} \ . \] More precisely, we have the following law of large numbers.
\begin{prop} \label{ErgLem1} Assume that the $\Lambda$-coalescent is dustless. Let $\gamma < 1$ and let $2 \le r_n \le \gamma n$, $n \ge 1$, be numbers such that \[ \int_{r_n}^n \frac{dx}{\mu(x)} \to 0 \] as $n \to \infty$. Then \[ \widetilde \rho_{r_n} = (1+ \oo_P(1)) \int_{r_n}^n \frac{dx}{\mu(x)} \] as $n\to \infty$. \end{prop}
The role of the assumptions is easily understood: The condition $\int_{r_n}^n \frac{dx}{\mu(x)} \to 0$ implies that $\tilde \rho_{r_n}\to 0$ in probability, i.e., we are in the small time regime. This is required to avoid very large jumps $\Delta X_{j+1}$ of order $X_{j+1}$, which would ruin the above Riemann approximation. The condition $r_n \le \gamma n$ guarantees that $\widetilde \rho_{r_n}$ is sufficiently large to allow for a law of large numbers.
Secondly, we turn to the case $f(x)=x^{-1}$. Here we point out that as $x \to \infty$, \[ \frac 1{\nu(x)} \sim x \frac d{dx} \log \frac{\mu(x)}x, \] which follows from \cite[Lemma 1 (ii)]{DK18}. Therefore, \[ \int_r^n \frac{dx}{x\nu(x)} \approx \log \Big(\frac{\mu(n) }{n}\frac r{\mu(r)}\Big)\,, \] and we have the following law of large numbers.
\begin{prop}\label{ErgLem2} Under the assumptions of the previous proposition, we have \[ \sum_{j=0}^{\rho_{r_n}-1} \frac 1{X_j} = (1+ \oo_P(1))\log \Big(\frac{\mu(n) }{n}\frac {r_n}{\mu(r_n)}\Big) \quad \text{and} \quad \sum_{j=0}^{\rho_{r_n}-1} \frac 1{X_j} = \log \Big(\frac{\mu(n) }{n}\frac {r_n}{\mu(r_n)}\Big) + \oo_P(1) \] as $n \to \infty$. \end{prop}
For the proofs of these propositions, see \cite[Section 3]{DK18}.
\section{Properties of the rate of decrease}\label{sec_prop} We now have a closer look at the rate of decrease $\mu$ introduced in the first section. Defining \begin{align}\label{mu_def} \mu(x) \ :=\ \int_{[0,1]}\left(xp-1+(1-p)^x\right)\frac{\Lambda(dp)}{p^2}, \end{align} we extent $\mu$ to all real values $x\geq 1$, where the integrand's value at $p=0$ is understood to be $x(x-1)/2$.
The next lemma summarizes some required properties of $\mu$.
\begin{lem} \label{prop} The rate of decrease and its derivatives have the following properties: \begin{enumerate} \item $\mu(x)$ has derivatives of any order with finite values, also at $x=1$. Moreover, $\mu$ and $\mu'$ are both non-negative and strictly increasing, while $\mu''$ is a non-negative and decreasing function. \item For $1<x\leq y$, \begin{align*} \frac{x(x-1)}{y(y-1)}\loe\frac{\mu(x)}{\mu(y)}\loe \frac{x}{y}\,. \end{align*} \item For $x> 1$, \begin{align*} \mu'(1) \loe \frac{\mu(x)}{x-1} \loe \mu'(x)\qquad \text{ and }\qquad \mu''(x) \loe \frac{\mu'(x)}{x-1}. \end{align*} \item In the dustless case, \[\frac{\mu(x)}{x} \ \rightarrow \ \infty \] as $x\to\infty$. \end{enumerate} \end{lem}
\begin{proof} (i) Let \[\mu_2(x) \ := \ \int_{[0,1]}(1-p)^x\log^2{(1-p)}\frac{\Lambda(dp)}{p^2},\] which is a $\mathcal{C}^\infty$-function for $x>0$. Set \begin{align*} \mu_1(x)\ := \ & \int_1^x \mu_2(y)dy + \int_{[0,1]}\left(p+(1-p)\log{(1-p)}\right)\frac{\Lambda(dp)}{p^2}\\ \eq & \int_{[0,1]}\left((1-p)^x\log{(1-p)}+p\right)\frac{\Lambda(dp)}{p^2}\,. \end{align*} Note that the second integral in the first line is finite and non-negative just as its integrand. Then we have \[\mu(x) \eq \int_{1}^x\mu_1(y)dy.\] Thus, $\mu_1(x)=\mu'(x)$ and $\mu_2(x)=\mu''(x)$ for $x\geq 1$. From these formulae our claim follows.
(ii) The inequalities are equivalent to the fact that $\mu(x)/x$ is increasing and $\mu(x)/(x(x-1))$ is decreasing as follows from formulae (7) and (8) of \cite{DK18}.
(iii) The monotonicity properties from (i) and $\mu(1)=0$ yield for $x\geq 1$, \[\mu'(1)(x-1) \loe \mu(1) + \int_1^x\mu'(y)dy \loe \mu'(x)(x-1).\] Similarly, we get $\mu''(x)(x-1)\leq\mu'(x)$ because $\mu'(1)\geq 0$.
(iv) See Lemma 1 (iii) of \cite{DK18}. \end{proof}
In order to characterize regular variation of $\mu$, we introduce the function \[H(u)\ :=\ \frac{\Lambda{\left(\left\{0\right\}\right)}}{2}+\int_0^uh(z)dz\,, \quad 0\leq u\leq 1,\] where \[h(z)\ :=\ \int_z^1\int_{\left(y,1\right]}\frac{\Lambda\left(dp\right)}{p^2}dy\,, \quad 0\leq z\leq 1.\] Note that $H$ is a finite function because we have \begin{align}\label{H(1)} H(1)\eq\frac{\Lambda{\left(\left\{0\right\}\right)}}{2}+\int_0^1 \int_0^p\int_0^y dz\,dy\,\frac{\Lambda(dp)}{p^2} \eq \frac{\Lambda\left(\left[0,1\right]\right)}{2}\ <\ \infty\,. \end{align}
\begin{prop}\label{reg_coa} For a $\Lambda$-coalescent without a dust component, the following statements hold: \begin{enumerate} \item $\mu(x)$ is regularly varying at infinity if and only if $H(u)$ is regularly varying at the origin. Then $\mu$ has an exponent $\alpha\in[1,2]$ and we have \begin{equation}\label{H} \mu(x)\ \sim\ \Gamma(3-\alpha)\,x^{2}\,H\left(x^{-1}\right) \end{equation} as $x\to\infty$. \item $\mu(x)$ is regularly varying at infinity with some exponent $\alpha\in(1,2)$ if and only if the function $\int_{(y,1]}p^{-2}\Lambda(dp)$ is regularly varying at the origin with an exponent $\alpha\in(1,2)$. Then we have \[\mu(x)\ \sim\ \frac{\Gamma(2-\alpha)}{\alpha-1} \int_{x^{-1}}^1\frac{\Lambda(dp)}{p^2}\] as $x\to\infty$. \end{enumerate} \end{prop}
The last statement brings the regular variation of $\mu$ together with the notion of regularly varying $\Lambda$-coalescents as introduced in \cite{DK18}.
For the proof of this proposition, we apply the following characterization of regular variation.
\begin{lem}\label{deriv} Let $V(z)$, $z>0$, be a positive function with an ultimately monotone derivative $v(z)$ and let $\eta\neq 0$.
Then $V$ is regularly varying at the origin with exponent $\eta$ if and only if $\left|v\right|$ is regularly varying at the origin with exponent $\eta-1$ and \begin{align*} z\,v(z)\ \sim \ \eta\, V(z) \end{align*} as $z\to 0^+$. \end{lem}
\begin{proof} For $\eta>0$, we have $V(0+)=0$ and, therefore, $V(z)=\int_0^zv(y)dy$. For $\eta<0$, we use the equation $V(z)=\int_z^1(-v(y))dy+V(1)$ instead: here it holds $V(0+)=\infty$. Now our claim follows from well known results for regularly varying functions at infinity (see \cite{Sen73} as well as Theorem~1~(a) and (b) in Section VIII.9 \cite{Fell71}). The proofs translate one-to-one to regularly varying functions at the origin. \end{proof}
\begin{proof}[Proof of Proposition \ref{reg_coa}] (i) From the definition \eqref{mu_def}, we obtain by double partial integration (see formula (8) of \cite{DK18}) that \begin{align}\label{mu} \frac{\mu(x)}{x(x-1)}\eq\frac{\Lambda(\left\{0\right\})}{2}+\int_0^1 (1-z)^{x-2}h(z) \, dz. \end{align}
If $\Lambda(\{0\})>0$, then our claim is obvious because the first term of the right-hand side of \eqref{mu} dominates the integral as $x\rightarrow\infty$ implying $\mu(x)/x^2\sim\Lambda(\{0\})/2=H(0)$ and, therefore, $\alpha=2$. Thus, let us assume that $\Lambda(\{0\})=0$. Let \begin{align*} \mathcal{L}(x)\ :=\ \int_0^1 e^{-zx} h(z)\, dz \end{align*} be the Laplace transform of $H$. In view of a Tauberian theorem (see Theorem~3 and Theorem~2 in Section XIII.5 of \cite{Fell71}), it is sufficient to prove that \begin{align}\label{int_sim} \mathcal{L}(x)\ \sim\ \frac{\mu(x)}{x^2} \end{align} as $x\rightarrow\infty$. For $\frac{1}{2}<\delta<1$, let us consider the decomposition \begin{align}\label{sum_int} \frac{\mu(x)}{x(x-1)} \eq \int_0^{x^{-\delta}} (1-z)^{x-2} h(z)\, dz+\int_{x^{-\delta}}^1 (1-z)^{x-2} h(z)\, dz. \end{align} Because of $\delta<1$ and \eqref{H(1)}, we have \begin{align} \int_{x^{-\delta}}^1 (1-z)^{x-2} h(z)\, dz \loe (1-x^{-\delta})^{x-2} \int_{x^{-\delta}}^1h(z)\, dz\loe e^{-x^{-\delta}(x-2)}\,H(1) \eq\oo\left(x^{-1}\right) \end{align} as $x\rightarrow\infty$. In particular, the second integral in the decomposition \eqref{sum_int} can be neglected in the limit $x\rightarrow\infty$ since $\mu(x)/(x(x-1))\geq \mu'(1)/x$ due to Lemma \ref{prop} (iii). As to the first integral in \eqref{sum_int}, observe for $\delta>\frac{1}{2}$ that \[-\log{\frac{(1-z)^{x-2}}{e^{-zx}}}\eq \OO(x^{1-2\delta})\ \longrightarrow \ 0\] uniformly for $z\in[0,x^{-\delta}]$ as $x\to\infty$ and, therefore, \begin{align}\label{int1} \int_0^{x^{-\delta}} (1-z)^{x-2} h(z)\, dz\ \sim\ \int_0^{x^{-\delta}} e^{-zx} h(z)\, dz. \end{align} Also note that \begin{align}\label{int2} \int_{x^{-\delta}}^1e^{-zx}h(z)dz\loe e^{-x^{1-\delta}}H(1) \eq\oo\left(x^{-1}\right) \end{align} as $x\to\infty$. Combining \eqref{sum_int} to \eqref{int2} entails \[\int_0^1 (1-z)^{x-2} h(z) dz\ \sim \ \mathcal{L}(x).\] Hence, along with formula \eqref{mu}, this proves the asymptotics in \eqref{int_sim}. Moreover, from Lemma~\ref{prop}~(ii) we get $1\leq\alpha\leq 2$.
(ii) If $1<\alpha<2$, then $\Lambda(\{0\})=0$. Lemma \ref{deriv} provides that for $\al<2$ the function $H(u)$ is regularly varying with exponent $2-\al$ iff $h(u)$ is regularly varying with exponent $1-\al$ and then \[(2-\alpha)H(u)\ \sim\ uh(u)\] as $u\to 0^+$. Applying Lemma \ref{deriv} once more for $\al>1$, $h(u)$ is regularly varying with exponent $1-\al$ iff $\int_{(u,1]}\frac{\Lambda(dp)}{p^2}$ is regularly varying with exponent $-\al$ and then \[(\alpha-1)h(u)\ \sim\ u\int_{(u,1]}\frac{\Lambda(dp)}{p^2}\] as $u\to 0^+$. Bringing both asymptotics together with statement (i) finishes the proof. \end{proof}
\section{The length of a random external branch} \label{sec_rand} Recall that $T^n$ denotes the length of an external branch picked at random. The following result on its distribution function does not only play a decisive role in the proofs of Theorem~\ref{dustless} and~\ref{indep} but is also of interest on its own. It shows that the distribution of $T^n$ is primarily determined by the rate function $\mu$.
\begin{theorem} \label{thm:int} For a $\Lambda$-coalescent without a dust component and a sequence $\left(r_n\right)_{n\in\N }$ satisfying $1< r_n\leq n$ for all $n\in\N $, we have \begin{align}\label{mu_int} \PP\left(T^{n}> \int_{r_n}^n\frac{dx}{\mu(x)}\right) \eq\frac{\mu(r_n)}{\mu(n)}+\oo(1) \end{align} as $n\to\infty$. Moreover, \begin{align}\label{ineq} \left(\frac{r_n}{n}\right)^2+\oo(1) \loe \PP\left(T^{n}> \int_{r_n}^n\frac{dx}{\mu(x)}\right) \loe \frac{r_n}{n}+\oo(1) \end{align} as $n\to\infty$. \end{theorem}
Observe that the integral $\int_{r_n}^n\frac{dx}{\mu(x)}$ is the asymptotic time needed to go from $n$ to $r_n$ lineages according to Proposition \ref{ErgLem1}.
For the proof, we recall our notations. $N_n=(N_n(t))_{t\ge 0}$ denotes the block counting process, with the embedded Markov chain $X=(X_j)_{j\in\N_0}$. In particular, we have $N_n(0)=X_0=n$ and we set $X_j=1$ for $j\geq\tau_n$, where $\tau_n$ is defined as the total number of merging events. The waiting time of the process $N_n$ in state $X_j$ is again referred to as $W_j$ for $0\leq j\leq \tau_n-1$. The number of merging events until the external branch ending in leaf $i\in\left\{1,\ldots,n\right\}$ coalesces is given by \[\zeta_i^n\ :=\ \max{\left\{j\geq 0: \ \left\{i\right\}\in\Pi_n(W_0+\cdots+W_{j-1})\right\}}.\] Similarly, $\zeta^n$ denotes the corresponding number of a random external branch with length $T^n$.
\begin{proof}[Proof of Theorem \ref{thm:int}] For later purposes, we show the stronger statement \begin{align}\label{cond}
\PP\left(T^{n} > \int_{r_n}^n\frac{dx}{\mu(x)}\,\bigg|\,N_n\right) \eq \frac{\mu(r_n)}{\mu(n)}+\oo_P(1) \end{align} as $n\to\infty$. It implies \eqref{mu_int} by taking expectations and using dominated convergence. The statement \eqref{ineq} is a direct consequence in view of Lemma \ref{prop} (ii).
In order to prove \eqref{cond}, note that, by the standard subsubsequence argument and the metrizability of the convergence in probability, we can assume that $r_n/n$ converges to some value $q$ with $0\leq q\leq 1$. We distinguish three different cases of asymptotic behavior of the sequence $r_n/n$:
(a) We begin with the case $r_n\sim qn$ as $n\to\infty$, where $0<q<1$. Then there exist $q_1,q_2\in\left(0,1\right)$ such that $q_1n\leq r_n\leq q_2n$ for all $n\in\N $ but finitely many. \\ Let us first consider the discrete embedded setting and afterwards insert the time component. Since there are $\Delta X_{0}+1$ branches involved in the first merger, we have \[\PP\left(\zeta^n\ge 1\mid N_n\right) \eq 1-\frac{\Delta X_0+1}{X_0} \eq \frac{X_1-1}{X_0}\quad a.s.\] Iterating this formula, it follows \[\PP\left(\zeta^n\ge k\mid N_n\right) \eq \prod_{j=0}^{k-1}\frac{X_{j+1}-1}{X_j}\eq \frac{X_k-1}{n-1}\prod_{j=0}^{k-1}\left(1-\frac{1}{X_j}\right)\quad \text{a.s.}\]
for $k\geq 1$. For a combinatorial treatment of this formula, see \cite[Lemma 4]{DK18}. Note that $\sum_{j=0}^{k-1}X_j^{-2}\leq\sum_{m=X_{k-1}}^\infty m^{-2}\leq 2\left(X_{k-1}\right)^{-1}$ to obtain via a Taylor expansion that \begin{align}\label{Mac}
\PP\left(\zeta^n\geq k\left.\right|N_n\right)\eq\frac{X_k-1}{n-1}\exp{\Bigg(-\sum_{j=0}^{k-1}\frac{1}{X_j}+\OO\left(X_{k-1}^{-1}\right)\Bigg)} \qquad \text{a.s.} \end{align} as $n\rightarrow\infty$.
We like to evaluate this quantity at the stopping times
\[\rho_{r_n}\ :=\ \min\{j\geq 0:\, X_j\leq r_n\}.\]
From Lemma \ref{prop} (i) and (iii), we know that the function $\mu(x)$ is increasing in $x$ and that $x/\mu(x)$ converges in the dustless case to $0$ as $x\to\infty$. In view of $r_n\geq q_1n $, therefore, we have \[\int_{r_n}^n\frac{dx}{\mu(x)}\loe \frac{n-r_n}{\mu(r_n)} \loe \left(\frac{1}{q_1}-1\right)\frac{r_n}{\mu(r_n)} \eq \oo(1).\] Hence, we may apply Proposition \ref{ErgLem2} and obtain \[\sum_{j=0}^{\rho_{r_n}-1}\frac{1}{X_j}\eq\log{\left(\frac{\mu(n)}{n}\frac{X_{\rho_{r_n}}}{\mu(X_{\rho_{r_n}})}\right)}+\oo_P(1).\] Also, Lemma 3 of \cite{DK18} implies \[X_{\rho_{r_n}} \eq r_n +\OO_P\left(\Delta X_{\rho_{r_n}}\right)\eq r_n+\oo_P(X_{\rho_{r_n}}).\]
Inserting these two estimates into equation \eqref{Mac} and using Lemma \ref{prop} (ii), it follows \begin{align}\label{discrete}
\PP\left(\zeta^n\geq \rho_{r_n}\left.\right|N_n\right) \eq \frac{X_{\rho_{r_n}}-1}{n-1}\frac{\mu\left(X_{\rho_{r_n}}\right)}{X_{\rho_{r_n}}}\frac{n}{\mu\left(n\right)} (1+\oo_P(1))\eq \frac{\mu\left(r_n\right)}{\mu\left(n\right)}+\oo_P(1). \end{align}
In order to transfer this equality to the continuous-time setting, we first show that for each $\e\in(0,1)$ there is an $\delta>0$ such that \begin{align}\label{15} (1+\delta)\int_{(1+\e)r_n}^n\frac{dx}{\mu(x)}\ <\ \int_{r_n}^n\frac{dx}{\mu(x)}\ <\ (1-\delta)\int_{(1-\e)r_n}^n\frac{dx}{\mu(x)} \end{align} for large $n\in\N $. For the proof of the left-hand inequality, note that due to Lemma \ref{prop} (ii) we have \[\frac{1}{n-(1+\e)r_n}\int_{(1+\e)r_n}^n\frac{dx}{\mu(x)}\ \leq\ \frac{1}{n-r_n}\int_{r_n}^n\frac{dx}{\mu(x)}\] implying with $q_1n\leq r_n$ that \[\frac{1}{1-\e\frac{q_1}{1-q_1}}\int_{(1+\e)r_n}^n\frac{dx}{\mu(x)}\ \leq\ \frac{1}{1-\e\frac{r_n}{n-r_n}}\int_{(1+\e)r_n}^n\frac{dx}{\mu(x)}\ \leq\ \int_{r_n}^n\frac{dx}{\mu(x)}.\] These inequalities show how to choose $\delta>0$. The right-hand inequality in \eqref{15} follows along the same lines.\\ Now, recalling the notion \[\widetilde{\rho}_{r_n}\ :=\ \inf\{t\geq 0:\, N_n(t)\leq r_n\},\] Proposition \ref{ErgLem1} gives for sufficiently small $\e>0$ the formula \begin{align}\label{rho_tilde} \widetilde{\rho}_{r_n(1+\e)}\eq \int_{r_n(1+\e)}^n\frac{dx}{\mu\left(x\right)}\left(1+\oo_P(1)\right) \end{align} as $n\rightarrow\infty$. Combining \eqref{discrete} to \eqref{rho_tilde} yields \begin{align*}
\PP\Big(T^n>&\int_{r_n}^n\frac{dx}{\mu(x)}\ \bigg|\, N_n\Big)\\[1ex]
& \loe \PP\bigg(T^n\geq\left(1+\delta\right)\int_{r_n(1+\e)}^n\frac{dx}{\mu(x)}\ \bigg|\, N_n\bigg)\\[1ex]
& \loe \PP\left(\left.T^n\geq\widetilde{\rho}_{r_n(1+\e)}\right|N_n\right)+\PP\bigg(\left(1+\delta\right)\int_{r_n(1+\e)}^n\frac{dx}{\mu(x)}<\widetilde{\rho}_{r_n(1+\e)}\ \bigg|\, N_n\bigg)\\[1ex]
& \eq \ \PP\left(\left.\zeta^n\geq \rho_{r_n(1+\e)}\right|N_n\right)+\oo_P(1)\\[1ex] & \eq \ \frac{\mu(r_n(1+\e))}{\mu(n)}+\oo_P(1)\\[1ex] & \loe \ \frac{\mu(r_n)}{\mu(n)}\left(1+\e\right)^2+\oo_P(1), \end{align*} where we used Lemma \ref{prop} (ii) for the last inequality. With this estimate holding for all $\e>0$, we end up with \begin{align*}
\PP\left(\left.T^n>\int_{r_n}^n\frac{dx}{\mu(x)}\right|N_n\right)\loe \frac{\mu(r_n)}{\mu(n)}+\oo_P(1) \end{align*} as $n\to\infty$. The reverse inequality can be shown in the same way so that we obtain equation \eqref{cond}.
(b) Now we turn to the two remaining cases $r_n\sim n$ and $r_n=\oo(n)$. In view of Lemma \ref{prop} (ii), the asymptotics $r_n\sim n$ implies $\mu(r_n)\sim\mu(n)$, i.e., the right-hand side of \eqref{cond} converges to $1$. Furthermore, the sequence $(r_n')_{n\in\N }:=(qr_n)_{n\in\N }$, $0<q<1$, fulfills the requirements of part (a). With respect to Lemma \ref{prop} (ii), part (a), therefore, entails for all $q\in(0,1)$, \begin{align*}
\PP\left(\left.T^n>\int_{r_n}^n\frac{dx}{\mu(x)}\right|N_n\right)\goe\PP\left(\left.T^n>\int_{r_n'}^n\frac{dx}{\mu(x)}\right|N_n\right)
\goe \frac{\mu(qn)}{\mu(n)} +\oo_P(1) \goe q^2 +\oo_P(1) \end{align*} as $n\to\infty$. Hence, the left-hand side of \eqref{cond} also converges to $1$ in probability. Similarly, the convergence of both sides of \eqref{cond} to $0$ can be shown for $r_n=\oo(n)$. \end{proof}
\section{Proofs of Theorem \texorpdfstring{\protect\ref{dustless} and \protect\ref{indep}}{1.1 and 1.2}}\label{sec_proofs}
\begin{proof}[Proof of Theorem \ref{dustless}] Let $r_n$ be as required in Theorem \ref{thm:int}. Applying Lemma \ref{prop} (ii), we obtain \[\int_{r_n}^n\frac{dx}{x} \loe \frac{\mu(n)}{n}\int_{r_n}^n\frac{dx}{\mu(x)} \loe \int_{r_n}^n\frac{n-1}{x(x-1)}dx.\] Observing \[\int_{r_n}^n\frac{dx}{x} \eq \log{\frac{n}{r_n}}\] and \[\int_{r_n}^n\frac{n-1}{x(x-1)}dx \eq (n-1)\log{\frac{r_n-nr_n}{n-nr_n}},\] Theorem \ref{thm:int} entails \begin{align}\label{estim_down} \PP\left(\frac{\mu(n)}{n}\,T^{n} > \log{\frac{n}{r_n}}\right)\goe \left(\frac{r_n}{n}\right)^2+\oo(1) \end{align} and \begin{align}\label{estim_up} \PP\left(\frac{\mu(n)}{n}\,T^{n} > (n-1)\log{\frac{r_n-nr_n}{n-nr_n}}\right) \loe \frac{r_n}{n}+\oo(1) \end{align} \enlargethispage{2\baselineskip} as $n\to\infty$, respectively. \\
Now let $t\geq 0$. Using equation \eqref{estim_down} for \[r_n \eq ne^{-t},\] while choosing \[r_n \eq \frac{ne^{t/(n-1)}}{1+n(e^{t/(n-1)}-1)}\] in \eqref{estim_up}, we arrive at \[e^{-2t}+\oo(1) \loe \PP\left(\frac{\mu(n)}{n}\,T^{n}> t\right) \loe \frac{e^{t/(n-1)}}{1+n(e^{t/(n-1)}-1)}+\oo(1) \eq \frac{1}{1+t}\left(1+\oo(1)\right) ,\] as required. \end{proof}
\begin{proof}[Proof of Theorem \ref{indep}] First, we treat the dustless case. Similar to the proof of Theorem \ref{thm:int}, we first consider the discrete version $\zeta_i^n$ of $T_i^n$ for $1\leq i\leq k$ to prove \begin{align}\label{discrete2}
\PP\left(\zeta_1^n\geq I_1^n,\,\ldots,\,\zeta_k^n\geq I_k^n\left.\right|N_n\right) \eq \PP\left(\zeta_1^n\geq I_1^n\left.\right|N_n\right)\,\cdots\,\PP\left(\zeta_k^n\geq I_k^n\left.\right|N_n\right)+\oo_P(1) \end{align} as $n\to\infty$, where $0=:I_0^n\leq I_1^n\leq\cdots\leq I_k^n$ are random variables measurable with respect to the $\sigma$-fields $\sigma\left(N_n\right)$. Denote by $\zeta_A$ the number of mergers until some external branch out of the set $A\subseteq\left\{1,\ldots,n\right\}$ coalesces and let $a:=\#A$. Given $\Delta X_j$, the $j$-th merging amounts to choosing $\Delta X_{j}+1$ branches uniformly at random out of the $X_j$ present ones implying \begin{align}\label{modul}
\PP\left(\zeta_A\geq m\left.\right|N_n\right)\eq\frac{(X_m-1)\cdots(X_m-a)}{(n-1)\cdots(n-a)}\prod_{j=0}^{m-1}\left(1-\frac{a}{X_j}\right) \qquad a.s. \end{align} for $m\geq 1$ (for details see (28) of \cite{DK18}). Let $\bar{\zeta}_{\left\{1,\ldots,k\right\}}:=\zeta_{\left\{1,\ldots,k\right\}}$ and $\bar{\zeta}_{\left\{i,\ldots,k\right\}}:=\zeta_{\left\{i,\ldots,k\right\}}-\zeta_{\left\{i-1,\ldots,k\right\}}$ for $2\leq i\leq k$. Moreover, let $\widebar{N}_{X_j}(t):=N_n(t+W_0+\cdots+W_{j-1})$, in particular, $\widebar{N}_{X_0}(t):=N_n(t)$. The Markov property and \eqref{modul} provide \enlargethispage{2\baselineskip} \begin{align*}
\PP\Big(\zeta_1^n\geq I_1^n,&\ldots,\zeta_k^n\geq I_k^n\Big|N_n\Big)\\[1.5ex]
& \eq \prod_{i=1}^{k}\PP\Big(\bar{\zeta}_{\left\{i,\ldots,k\right\}}\geq I_i^n-I_{i-1}^n\Big|\widebar{N}_{X_{I_{i-1}^n}}\Big)\\[1.5ex] & \eq \prod_{i=1}^{k}\left[\frac{(X_{I_i^n}-1)\cdots(X_{I_i^n}-k+i-1)}{(X_{I_{i-1}^n}-1)\cdots(X_{I_{i-1}^n}-k+i-1)}\prod_{j=I_{i-1}^n}^{I_i^n-1}\left(1-\frac{k-i+1}{X_j}\right)\right]\\[1.5ex] & \eq \prod_{i=1}^{k}\left[\frac{(X_{I_i^n}-k+i-1)}{(n-k+i-1)}\prod_{j=I_{i-1}^n}^{I_i^n-1}\left(1-\frac{k-i+1}{X_j}\right)\right] \qquad a.s. \end{align*} For $1\leq i\leq k$, note that \[\left(1-\frac{k-i+1}{X_j}\right)\eq\left(1-\frac{1}{X_j}\right)^{k-i+1}+\OO\left(X_j^{-1}\right)\] and \[\frac{X_{I_i^n}-k+i-1}{n-k+i-1}\eq\frac{X_{I_i^n}-1}{n-1}+\OO\left(n^{-1}\right)\] to obtain \begin{align*}
\PP\big(\zeta_1^n\geq I_1^n,&\ldots,\zeta_k^n\geq I_k^n\left.\right|N_n\big)\\[1.5ex]
& \eq \prod_{i=1}^{k}\left[\left(\frac{X_{I_i^n}-1}{n-1}+\OO\left(n^{-1}\right)\right)\left(\,\prod_{j=I_{i-1}^n}^{I_i^n-1}\left(1-\frac{1}{X_j}\right)^{k-i+1}+\OO\left(\left(X_{I_i^n}-1\right)^{-1}\right)\right)\right]\\[1.5ex]
& \eq \prod_{i=1}^{k}\left[\frac{X_{I_i^n}-1}{n-1}\,\prod_{j=I_{i-1}^n}^{I_i^n-1}\left(1-\frac{1}{X_j}\right)^{k-i+1}\right]+\oo_P(1)\\[1.5ex] &\eq \prod_{i=1}^{k}\left[\frac{X_{I_i^n}-1}{n-1}\prod_{j=0}^{I_i^n-1}\left(1-\frac{1}{X_j}\right)\right]+\oo_P(1) \end{align*} as $n\to\infty$, where the rightmost $\OO(\cdot)$-term in the first line stems from the fact that $X_{I_i^n}<X_j$ for all $ j< I_i^n$. Furthermore, from \eqref{modul} with $A=\{i\}$, we know that
\[\PP\left(\zeta_i^n\geq I_i^n\left|\right.N_n\right)\eq\frac{X_{I_i^n}-1}{n-1}\prod_{j=0}^{I_i^n-1}\left(1-\frac{1}{X_j}\right) \qquad a.s.\] so that we arrive at equation \eqref{discrete2}. \\ Now based on exchangeability, it is no loss to assume that $0\leq t_1^n\leq\cdots\leq t_k^n$. So inserting \[I_i^n\ :=\ \min\bigg\{k\geq 1:\ \sum_{j=0}^{k-1} W_j > t_i^n\bigg\}\wedge\tau_n\] in \eqref{discrete2} yields \enlargethispage{2\baselineskip} \begin{align*}
\PP\left(\left.T_1^n\,>\, t_1^n,\,\ldots,\,T_k^n\,>\, t_k^n\right|N_n\right) & \eq \PP\left(\zeta_1^n\geq I_1^n,\,\ldots,\,\zeta_k^n\geq I_k^n\left.\right|N_n\right)\\[.5ex]
& \eq \prod_{i=1}^k \PP\left(\zeta_i^n\geq I_i^n\left.\right|N_n\right) +\oo_P(1)\\
&\eq \prod_{i=1}^k \PP\left(\left.T_i^n\,>\, t_i^n\right|N_n\right) +\oo_P(1) \end{align*} as $n\to\infty$. For $1\leq i\leq k$, let $1<r_i^n\leq n$ be defined implicitly via \[t_i^n\eq \int_{r_i^n}^n\frac{dx}{\mu(x)}\,.\] From Lemma \ref{prop} (iii) we know that $\int_1^n\frac{dx}{\mu(x)}=\infty\,$; therefore, $r_i^n$ is well-defined. In the dustless case, consequently, we may apply formula \eqref{cond} to obtain \begin{align*}
\PP\left(\left.T_1^n\,>\, t_1^n,\,\ldots,\,T_k^n\,>\, t_k^n\right|N_n\right) & \eq \prod_{i=1}^k \PP\left(\left.T_i^n\,>\, t_i^n\right|N_n\right) +\oo_P(1) \\[.5ex] & \eq \prod_{i=1}^k\frac{\mu(r_i^n)}{\mu(n)}+\oo_P(1) \end{align*} as $n\to\infty$. Taking expectations in this equation yields, via dominated convergence, the theorem's claim for $\Lambda$-coalescents without a dust component.
For $\Lambda$-coalescents with dust, we use for $t>0$ the formula \[\lim_{n\to\infty}\PP\left(T_1^n>t,\ldots,T_k^n>t\right) \eq \E\left[S_t^k\right],\] with non-degenerative positive random variables $S_t$ (see (10) in \cite{Moe10}). For $k\geq 2$, Jensen's inequality implies \[\lim_{n\to\infty}\PP\left(T_1^n>t,\ldots,T_k^n>t\right) > \E\left[S_t\right]^k \eq \lim_{n\to\infty}\PP\left(T_1^n>t,\ldots,T_k^n>t\right).\] This finishes the proof.
\end{proof}
\section{Proof of Theorem \texorpdfstring{\protect\ref{iff}}{1.3}}\label{sec_proof}
(a) First suppose that $\mu(x)$ is regularly varying with exponent $\alpha\in[1,2]$, i.e., we have \begin{align}\label{varying} \mu(x)\eq x^\alpha L(x), \end{align} where $L$ is a slowly varying function. Let $r_n:=qn$ with $0<q\leq 1$. The statement of Theorem~\ref{thm:int} then boils down to \begin{align}\label{continuous2} \PP\left(\frac{\mu(n)}{n}T^{n}\,>\, \frac{1}{n} \int_{qn}^n\frac{\mu(n)}{\mu(x)}dx\right) \eq q^\alpha+\oo(1) \end{align} as $n\rightarrow\infty$. From \eqref{varying} we obtain \[n^{-1}\int_{qn}^n\frac{\mu\left(n\right)}{\mu\left(x\right)}dx \ \sim\ \begin{cases} \; -\log{q} \quad &\text{ for } \quad \alpha =1\\[1ex] \; \frac{1}{\alpha-1}\left(q^{-(\alpha-1)}-1\right) \quad &\text{ for } \quad 1<\alpha\leq 2 \end{cases}\] as $n\rightarrow\infty$. Thus, choosing, for given $t\geq 0$, \[q \eq \begin{cases} \; e^{-t} \quad &\text{ for } \quad \alpha=1\\[1ex] \; \left(1+\left(\alpha-1\right)t\right)^{-\frac{1}{\alpha-1}} \quad &\text{ for } \quad 1<\alpha\leq 2 \end{cases}\] in equation (\ref{continuous2}) yields the claim.
(b) Now suppose that $\gamma_n\,T^n$ converges for some positive sequence $(\gamma_n)_{n\in\N }$ in distribution as $n\rightarrow\infty$ to a probability measure unequal to $\delta_0$ with cumulative distribution function $F=1-\widebar{F}$, i.e., \begin{align}\label{=>} \PP\left(\gamma_n\, T^n> t\right)\ \stackrel{n\to\infty}{\longrightarrow} \ \widebar{F}(t) \end{align} for $t\geq 0$, $t\notin D$, where $D$ denotes the set of discontinuities of $\widebar{F}$. Note that $0<\widebar{F}(t)<1$ for all $t>0$ due to Theorem \ref{dustless}. In order to prove that $\mu$ is regularly varying, we bring together the assumption \eqref{=>} with the statement of Theorem \ref{thm:int}, which requires several steps.
For this purpose we define, similarly as in the proof of Theorem \ref{indep}, the numbers $r_n(t)$ for $t\geq 0$ implicitly via \begin{align}\label{chi} t\eq \gamma_n\int_{r_n(t)}^n\frac{dx}{\mu(x)}\,. \end{align} Let us first solve this implicit equation. Applying formula \eqref{cond} and \eqref{=>}, we obtain \begin{align}\label{sim} \frac{\mu(r_n(t))}{\mu(n)} \eq \widebar{F}(t)+\oo(1) \end{align} for all $t\geq 0$, $t\notin D$, as $n\to\infty$. Differentiating both sides of \eqref{chi} with respect to $t$ and using Lemma \ref{prop} (i) yields
\[\left|\frac{\gamma_n\,r'_n(t)}{\mu(n)}\right|\eq\frac{\mu(r_n(t))}{\mu(n)} \loe 1.\] In conjunction with \eqref{sim}, it follows that \[\frac{\gamma_n\,r'_n(t) }{\mu(n)}\eq-\widebar{F}(t)+\oo(1)\] and, by dominated convergence, \begin{align}\label{r_n} r_n(t)\eq n-\frac{\mu(n)}{\gamma_n}\left(\int_0^t\widebar{F}(s)ds +\oo\left(1\right)\right) \end{align} as $n\to\infty$.
Next, we show that \(\gamma_n \sim \ c\mu'(n) \) for some $c>0$. From Theorem \ref{dustless} it follows that there exist $0<c_1\leq c_2<\infty$ with \begin{align}\label{mu_eta} c_1\,\frac{\mu(n)}{n}\loe\gamma_n\loe c_2\,\frac{\mu(n)}{n}, \qquad n\geq 2. \end{align}
Furthermore, from equation \eqref{r_n} and a Taylor expansion, we get \[\mu(r_n(t))\eq\mu(n)+\mu'(n)\left(r_n(t)-n\right)+\frac{1}{2}\mu''\left(\xi_n\right)\left(r_n(t)-n\right)^2,\] where $r_n(t)\leq\xi_n\leq n$. Dividing this equation by $\mu(n)$, using \eqref{sim} and \eqref{r_n}, as well as rearranging terms, we obtain
\[\bigg|1-\widebar{F}(t)+\oo(1)-\frac{\mu'(n)}{\gamma_n}\int_0^t\widebar{F}(s)ds\left(1+\oo(1)\right)\bigg| \eq \frac{\mu''(\xi_n)\mu(n)}{2\gamma_n^2}\left(\int_0^t\widebar{F}(s)ds\right)^2\left(1+\oo(1)\right)\] as $n\to\infty$. From Lemma \ref{prop} (iii) and (i), we get $\mu''(\xi_n)\leq\mu'(\xi_n)/(\xi_n-1)\leq \mu'(n)/(r_n(t)-1)$. Moreover, equation \eqref{r_n} with \eqref{mu_eta} yields $r_n(t)-1\geq n/2+\oo(n)$ for $t$ sufficiently small. Taking \eqref{mu_eta} once more into account, we obtain that for given $\e>0$ and $t$ sufficiently small, \begin{align*}
\bigg|1-\widebar{F}(t)+\oo(1)-\frac{\mu'(n)}{\gamma_n}\int_0^t\widebar{F}(s)ds\left(1+\oo(1)\right)\bigg| &\loe \frac{\mu'(n)}{c_1\gamma_n}\left(\int_0^t\widebar{F}(s)ds\right)^2\left(1+\oo(1)\right)\\[1ex] & \loe \e\,\frac{\mu'(n)}{\gamma_n}\left(\int_0^t\widebar{F}(s)ds\right)\left(1+\oo(1)\right) \end{align*} or equivalently, for $t>0$,
\[\bigg|\frac{\gamma_n}{\mu'(n)}\ -\ \frac{\int_0^t\widebar{F}(s)ds}{1-\widebar{F}(t)}\left(1+\oo(1)\right)\bigg|\loe \e\,\frac{\int_0^t\widebar{F}(s)ds}{1-\widebar{F}(t)}\left(1+\oo(1)\right).\] The right-hand quotient is finite and positive for all $t>0$, which implies our claim $\gamma_n\sim c\mu'(n)$ for some $c>0$.
We now remove $\gamma_n$ from our equations by setting $\gamma_n=\mu'(n)$, without loss of generality. With this choice \eqref{mu_eta} changes into \begin{align*} c_1\,\frac{\mu(n)}{n}\loe \mu'(n) \loe c_2\,\frac{\mu(n)}{n}, \qquad n\geq 2. \end{align*} Also, inserting \eqref{r_n} and \eqref{mu_eta} in \eqref{sim} yields \begin{align*} \mu(n)\widebar{F}(t)\left(1+\oo(1)\right)\eq\mu\left(r_n(t)\right)\eq\mu\left(n-\frac{\mu(n)}{\mu'(n)}\int_0^t\widebar{F}(s)ds+\oo(n)\right) \end{align*} as $n\to\infty$. Let us suitably remodel these formulae. In view of the monotonicity properties of $\mu$ and $\mu'$ due to Lemma \ref{prop} (i), we may proceed to \begin{align} \label{mu_eta1}c_3\frac{\mu(x)}{x}\loe\mu'(x)\loe c_4\frac{\mu(x)}{x},\quad x\geq 2, \end{align} for suitable $0< c_3\leq c_4<\infty$, as well as \begin{align}\nonumber \mu(x)\widebar{F}(t)& \eq \mu\left(x-\frac{\mu(x)}{\mu'(x)}\int_0^t\widebar{F}(s)ds+\oo(x)\right)\left(1+\oo(1)\right) \\[1ex] &\eq \mu\left(x-\frac{\mu(x)}{\mu'(x)}\int_0^t\widebar{F}(s)ds+\oo(x)\right) \label{mu_phi} \end{align} as $x\to\infty$, where we pushed the $(1+\oo(1))$-term into $\mu$ by means of Lemma \ref{prop} (ii). This equation suggests to pass to the inverse of $\mu$. From Lemma~\ref{prop}~(i) we know that $\mu(x)$ has an inverse $\nu(y)$. For this function, formula \eqref{mu_eta1} translates into \begin{align}\label{phi_d} \frac{\nu(y)}{c_4y}\loe\nu'(y)\loe\frac{\nu(y)}{c_3y}. \end{align} Also, applying $\nu$ to equation \eqref{mu_phi}, both inside and outside, we get \[\nu \left(y\widebar{F}(t)\right)\eq\nu(y)-y\,\nu'(y)\int_0^t\widebar{F}(s)ds+\oo(\nu(y)).\]
This equation allows us, in a next step, to further analyse $\widebar{F}$. With $0\leq u<v$, $u,v\notin D$, it follows that \begin{align}\label{diff_phi} \nu\left(\widebar{F}(u)y\right)-\nu\left(\widebar{F}(v)y\right)\eq y\,\nu'(y)\int_u^v\widebar{F}(s)ds \left(1+\oo(1)\right) \end{align} as $y\to\infty$. This equation immediately implies that $\widebar{F}(v)<\widebar{F}(u)$ for all $u<v$. It also shows that $\widebar{F}$ has no jump discontinuities, i.e., $D=\emptyset$. Indeed, by the mean value theorem and because $\nu'(y)=1/\mu'(\nu(y))$ is decreasing due to Lemma \ref{prop} (i), we have for $0\leq u<v$, \[\nu\left(\widebar{F}(u)y\right)-\nu\left(\widebar{F}(v)y\right)\goe\nu'(y\widebar{F}(u))y\left(\widebar{F}(u)-\widebar{F}(v)\right)\goe\nu'(y)y\left(\widebar{F}(u)-\widebar{F}(v)\right).\] Thus, also assuming $u,v\notin D$, \eqref{diff_phi} yields \[\widebar{F}(u)-\widebar{F}(v)\loe\int_u^v\widebar{F}(s)ds\loe v-u,\] which implies $D=\emptyset$.
Now, we are ready to show that $\nu$ and, therefore, $\mu$ is regularly varying. By a Taylor expansion, we get \[\nu(\widebar{F}(v)y)-\nu(\widebar{F}(u)y)\eq-\nu'(\widebar{F}(u)y)y(\widebar{F}(u)-\widebar{F}(v))+\frac{1}{2}\nu''(\xi_y)y^2(\widebar{F}(u)-\widebar{F}(v))^2,\] where $\widebar{F}(v)y\leq\xi_y\leq\widebar{F}(u)y$. Dividing this equation by $y\nu'(y)$, using formula \eqref{diff_phi} and rearranging terms, it follows that for $y\to\infty$,
\begin{align}
\label{new^2}\left|\int_u^v\widebar{F}(s)ds(1+\oo(1))\ -\ \frac{\nu'(\widebar{F}(u)y)}{\nu'(y)}\left(\widebar{F}(u)-\widebar{F}(v)\right)\right| \eq\frac{1}{2}\frac{\nu''(\xi_y)y}{\nu'(y)}(\widebar{F}(u)-\widebar{F}(v))^2. \end{align} Next, let us bound the right-hand term. Note that from Lemma \ref{prop} (iii) we have, for $y$ sufficiently large,
\[\left|\nu''(y)\right|\eq\nu'(y)^2\frac{\mu''(\nu(y))}{\mu'(\nu(y))}\loe\frac{\nu'(y)^2}{\nu(y)-1} \loe \frac{2\nu'(y)^2}{\nu(y)} \,.\]
Hence, using \eqref{phi_d} twice and $\widebar{F}(v)y\leq\xi_y\leq\widebar{F}(u)y$, it follows, for $y$ sufficiently large, \[\frac{1}{2}\nu''(\xi_y) \loe \frac{\nu'(\xi_y)^2}{\nu(\xi_y)} \loe \frac{1}{c^2_3} \frac{\nu(\xi_y)}{\xi_y^2}\loe \frac{\nu(\widebar{F}(u)y)}{\widebar{F}(v)^2y^2} \loe \frac{c_4}{c_3^2}\frac{\nu'(\widebar{F}(u)y)\widebar{F}(u)}{\widebar{F}(v)^2y}.\] Now, for given $u>0$ and given $\e>0$, because of the continuity and strict monotonicity of $\widebar{F}$, we get
\[\frac{1}{2}\nu''(\xi_y) \loe \e \frac{\nu'(\widebar{F}(u)y)}{y(\widebar{F}(u)-\widebar{F}(v))}\] if only the (positive) difference $v-u$ is sufficiently small. Inserting into \eqref{new^2}, we get
\[\left|\int_u^v\widebar{F}(s)ds(1+\oo(1))\ -\ \frac{\nu'(\widebar{F}(u)y)}{\nu'(y)}\left(\widebar{F}(u)-\widebar{F}(v)\right)\right| \loe \e\,\frac{\nu'(\widebar{F}(u)y)}{\nu'(y)}(\widebar{F}(u)-\widebar{F}(v))\]
or equivalently, for $y\to\infty$,
\[\Big|\frac{\nu'(y)}{\nu'(\widebar{F}(u)y)}\ -\ \frac{\widebar{F}(u)-\widebar{F}(v)}{\int_u^v\widebar{F}(s)ds}(1+\oo(1))\Big| \loe \e\,\frac{\widebar{F}(u)-\widebar{F}(v)}{\int_u^v\widebar{F}(s)ds}(1+\oo(1)).\] Again, since the right-hand quotient is finite and positive for all $u<v$, this estimate implies that $\nu'(y)/\nu'(\widebar{F}(u)y)$ has a positive finite limit as $y\to\infty$. Because $\widebar{F}(u)$ takes all values between $0$ and $1$, $\nu'(y)$ is regularly varying. From the Lemma in Section VIII.9 of \cite{Fell71}, we then obtain the regular variation of $\nu$ with some exponent $\eta\geq 0$. It fulfills $\frac{1}{2}\leq\eta\leq 1$ as Lemma \ref{prop} (ii) yields \[a\sqrt{y}\loe \nu(y)\loe b y\] for some $a,\,b>0$. Hence, $\mu$, as the inverse function of $\nu$, is regularly varying with exponent $\alpha\in\left[1,2\right]$ (see Theorem 1.5.12 of \cite{BGT87}). \qed
\section{Moment calculations for external branches \texorpdfstring{of $\mathbf{\Lambda}$-coalescents}{}}\label{sec_mom}
In this section, we consider the number of external branches $Y_j$ after $j$ merging events: \[Y_j \ := \ \# \left\{1\leq i\leq n: \ \{i\}\in\Pi_n\left(W_0+\cdots+W_{j-1}\right)\right\}.\] In particular, we set $Y_0=n$ and $Y_j=0$ for $j>\tau_n$. (Again, we suppress $n$ in the notation, for convenience.) We provide a representation of the conditional moments of the number of external branches for general $\Lambda$-coalescents (also covering coalescents with a dust component). For this purpose, we use the notation $\left(x\right)_r:=x\left(x-1\right)\cdots\left(x-r+1\right)$ for falling factorials with $x\in\R $ and $r\in\N $. Recall that $\tau_n$ is the total number of merging events.
\begin{lem} \label{Lambda} Consider a general $\Lambda$-coalescent and let $\rho$ be a $\sigma(N_n)$-measurable random variable with $0\leq\rho\leq\tau_n$ a.s.
\begin{enumerate}
\item For a natural number $r$, the $r$-th factorial moment, given $N_n$, can be expressed as
\[\E\left[\left(Y_\rho\right)_r\left.\right|N_n\right] \eq \left(X_\rho\right)_r\,\prod_{j=1}^\rho \left(1-\frac{r}{X_j}\right) \eq \left(X_\rho-1\right)_r\,\frac{n}{n-r}\,\prod_{j=0}^{\rho-1} \left(1-\frac{r}{X_j}\right)\quad a.s.\]
\item For the conditional variance, the following inequality holds:
\[\V\left(Y_\rho\left|N_n\right.\right) \loe \E\left[Y_\rho\left|N_n\right.\right] \qquad a.s.\] \end{enumerate} \end{lem}
\begin{proof} (i) First, we recall a link between the external branches and the hypergeometric distribution based on the Markov property and exchangeability properties of the $\Lambda$-coalescent, as already described for Beta-coalescents in \cite{DKW14}:\\ Given $N_n$ and $Y_0,\ldots,Y_{\rho-1}$, the $\Delta X_\rho+1$ lineages coalescing at the $\rho$-th merging event are chosen uniformly at random among the $X_{\rho-1}$ present ones. For the external branches, this means that, given $N_n$ and $Y_0,\ldots,Y_{\rho-1}$, the decrement $\Delta Y_\rho :=Y_{\rho-1}-Y_\rho$ has a hypergeometric distribution with parameters $X_{\rho-1},\, Y_{\rho-1}$ and $\Delta X_{\rho}+1$. In view of the formula of the $i$-th factorial moment of a hypergeometric distributed random variable, we obtain \begin{align}\label{factmom}
\E\left[\left(\Delta Y_\rho\right)_i\left.\right|N_n,Y_0,\ldots,Y_{k-1}\right]\ =\ \left(\Delta X_\rho +1\right)_i\frac{\left(Y_{\rho-1}\right)_i}{\left(X_{\rho-1}\right)_i} \qquad a.s. \end{align}
Next, we look closer at the falling factorials. We have the following binomial identity \begin{align}\label{fallingfactorial} \left(a-b\right)_r \eq (a)_r\sum_{i=0}^r\binom{r}{i}\left(-1\right)^i\frac{\left(b\right)_i}{(a)_i} \end{align} for $a,b\in\R $ and $r\in\N $. It follows from the Chu–Vandermonde identity (formula 1.5.7 in \cite{BM04}) \[(x+y)_r=\sum_{i=0}^r\binom{r}{i}(x)_i(y)_{r-i}\] with $x,y\in\R$ and the calculation \begin{align*} (a-b)_r&\eq(-1)^r(b+r-1-a)_r\\[1ex] &\eq (-1)^r\sum_{i=0}^r\binom{r}{i}\left(b\right)_i(r-1-a)_{r-i}\\[1ex] &\eq (-1)^r\sum_{i=0}^r\binom{r}{i}\left(b\right)_i(-1)^{r-i}\frac{(a)_r}{(a)_i}\,. \end{align*} Returning to the number of external branches, we obtain from the identity \eqref{fallingfactorial} that \[\left(Y_\rho\right)_r \eq (Y_{\rho-1})_r\sum_{i=0}^{r}\binom{r}{i}\left(-1\right)^{i}\frac{\left(\Delta Y_\rho\right)_i}{\left(Y_{\rho-1}\right)_{i}}\;.\] With equation \eqref{factmom}, we arrive at \begin{align*}
\E\left[\left(Y_\rho\right)_r\left.\right|N_n,Y_0,\ldots,Y_{\rho-1}\right] \eq \left(Y_{\rho-1}\right)_r\sum_{i=0}^{r}\binom{r}{i}\left(-1\right)^i\frac{\left(\Delta X_\rho+1\right)_i}{\left(X_{\rho-1}\right)_i} \qquad a.s. \end{align*} Furthermore, combining the binomial identity \eqref{fallingfactorial} with the definition of $\Delta X_\rho$, we have \[\left(X_\rho-1\right)_r \eq (X_{\rho-1})_r\sum_{i=0}^{r}\binom{r}{i}\left(-1\right)^i\frac{\left(\Delta X_\rho+1\right)_i}{\left(X_{\rho-1}\right)_{i}}\;.\] Thus,
\[\E\left[\left(Y_\rho\right)_r\left.\right|N_n,Y_0,\ldots,Y_{\rho-1}\right] \eq \left(Y_{\rho-1}\right)_r\frac{\left(X_\rho-1\right)_r}{\left(X_{\rho-1}\right)_r} \qquad a.s.\] and, finally,
\[\frac{\E\left[\left(Y_\rho\right)_r\left.\right|N_n\right]}{\left(X_\rho\right)_r} \eq \frac{\E\left[\left(Y_{\rho-1}\right)_r\left.\right|N_n\right]}{\left(X_{\rho-1}\right)_r}\frac{\left(X_\rho-1\right)_r}{\left(X_\rho\right)_r} \eq \frac{\E\left[\left(Y_{\rho-1}\right)_r\left.\right|N_n\right]}{\left(X_{\rho-1}\right)_r}\left(1-\frac{r}{X_\rho}\right) \quad a.s.\]
The proof now finishes by iteration and taking $\E\left[Y_0\left|N_n\right.\right]=Y_0=X_0$ into account.
(ii) The inequality for the conditional variance follows from the representation in (i) with $r=1$ and $r=2$: \begin{align*}
\V\left(Y_\rho \left|N_n\right.\right)& \eq X_\rho \left(X_\rho -1\right)\prod_{j=1}^\rho \left(1-\frac{2}{X_j}\right)-X^2_\rho \prod_{j=1}^\rho \left(1-\frac{1}{X_j}\right)^2+X_\rho \prod_{j=1}^\rho \left(1-\frac{1}{X_j}\right)\\ & \loe X^2_\rho \prod_{j=1}^\rho \left(1-\frac{2}{X_j}\right)-X^2_\rho \prod_{j=1}^\rho \left(1-\frac{1}{X_j}\right)^2+X_\rho \prod_{j=1}^\rho \left(1-\frac{1}{X_j}\right)\\
& \loe X_\rho \prod_{j=1}^\rho \left(1-\frac{1}{X_j}\right)\eq \E\left[Y_\rho\left|N_n\right.\right] \qquad a.s. \end{align*} This finishes the proof. \end{proof}
\section{Proof of Theorem \texorpdfstring{\protect\ref{reg_var}}{1.5}} \label{sec_proof_beta}
In order to study $\Lambda$-coalescents having a regularly varying rate of decrease $\mu$ with exponent $\alpha\in(1,2]$, we define \[\kappa(x) \ := \ \frac{\mu(x)}{x}, \qquad x \geq 1,\] for convenience. For $k\in\N $ and for real-valued random variables $Z_1,\ldots,Z_k$, denote the reversed order statistics by \[Z_{\left\langle 1\right\rangle} \ \geq\ \cdots \ \geq\ Z_{\left\langle k\right\rangle}.\]
We now prove the following theorem that is equivalent to Theorem \ref{reg_var}. Recall the definition of $s_n$ in (\ref{new2}).
\begin{theorem}\label{reg_var v2} Suppose that the $\Lambda$-coalescent has a regularly varying rate $\mu$ with exponent $1<\alpha\leq 2$ and fix $\ell\in\N $. Then, as $n\to\infty$, the following convergence holds: \[\kappa(s_n)\left(T_{\left\langle 1\right\rangle}^n,\ldots,T_{\left\langle \ell\right\rangle}^n\right)\ \stackrel{d}{\longrightarrow}\ \left(U_1,\ldots,U_\ell\right),\] where $U_1>\cdots>U_\ell$ are the points in decreasing order of a Poisson point process $\Phi$ on $(0,\infty)$ with intensity measure $\phi(dx) \eq \alpha \left(\left(\alpha-1\right)x\right)^{-1-\al/(\al-1)}\;dx$. \end{theorem}
For the rest of this section, keep the stopping times \begin{align}\label{stopp1} \widetilde{\rho}_{c,n}\ :=\ \inf\left\{t\geq 0:\, N_n(t)\leq c s_n\right\} \end{align} in mind and define their discrete equivalents \begin{align}\label{stopp2} \rho_{c,n}\ :=\ \min\left\{j\geq 0:\, X_j\leq c s_n\right\} \end{align} for $c>0$.
Later, we shall apply Proposition \ref{ErgLem2} to the latter stopping times, in view of \eqref{s_n} and \begin{align} \label{tilde_0} \int_{cs_n}^n\frac{dx}{\mu(x)} \eq \OO\left(\int_{ cs_n}^n x^{-\alpha+\e}dx\right) \eq \OO\left(s_n^{1-\al+\e}\right) \eq \oo(1) \end{align} for $0<\e<\al-1$ because $\mu$ is regularly varying with exponent $\al$.\\
The next proposition deals with properties of the stopping times from \eqref{stopp1} and \eqref{stopp2}. It justifies the choice of $s_n$, it shows that $X_{\rho_{c,n}}$ diverges at the same rate as $s_n$ and that $Y_{\rho_{c,n}}$ is uniformly bounded in $n$. In particular, it reveals that for large $c$ there are with high probability external branches still present up to the times $\widetilde{\rho}_{c,n}$.
\begin{prop}\label{EV} Assume that the $\Lambda$-coalescent has a regularly varying rate $\mu$ with exponent $\al\in(1,2]$. Then we have: \begin{enumerate} \item For each $\e>0$, there exists $c_\e>0$ such that for all $c\geq c_\e$, \[\lim_{n\to\infty}\PP\left(\kappa(s_n)\,\widetilde{\rho}_{c,n}\geq\e\right) \eq 0.\] \item For each $c>0$, as $n\to\infty$, \[X_{\rho_{c,n}} \eq cs_n +\oo_P(s_n).\] \item For each $\e>0$,
\[\limsup_{n\to\infty}\PP\left(\left|c^{-\al}\,Y_{\rho_{c,n}}-1\right|\geq\e\right)\ \stackrel{c\to\infty}{\longrightarrow}\ 0.\] \end{enumerate} \end{prop}
\begin{proof} (i) Because $\mu$ is regularly varying with exponent $\al>1$, we have \[\int_{cs_n}^\infty\frac{dx}{\mu(x)} \ \sim\ \frac{1}{\al-1}\frac{cs_{n}}{\mu(cs_n)}\ \sim \ \frac{1}{\al-1}c^{1-\al}\frac{1}{\kappa(s_n)}\] as $n\to\infty$.
Now Proposition \ref{ErgLem2} implies that \[\kappa(s_n)\,\widetilde{\rho}_{c,n} \loe \frac{1}{\al-1}c^{1-\al}(1+\oo_P(1)),\] which entails the claim.
(ii) Because of \eqref{tilde_0}, we may use Lemma 3 (ii) of \cite{DK18}. In conjunction with the definition of $\rho_{c,n}$, therefore, we obtain \begin{align*} \frac{X_{\rho_{c,n}}}{X_{\rho_{c,n}-1}} \eq 1 - \frac{\Delta X_{\rho_{c,n}}}{X_{\rho_{c,n}-1}} \eq 1+\oo_P(1) \end{align*} as $n\to\infty$. This implies the statement because of $X_{\rho_{c,n}}\leq cs_n<X_{\rho_{c,n}-1}$.
(iii) We first prove that \begin{align}\label{condExp}
\E\left[Y_{\rho_{c,n}}|N_n\right] \eq c^\alpha + \oo_P\left(1\right) \end{align} as $n\to\infty$. Lemma \ref{Lambda} (i), together with a Taylor expansion as in \eqref{Mac}, provides \begin{align*}
\E\left[Y_{\rho_{c,n}}\left.\right|N_n\right] & = (X_{\rho_{c,n}}-1) \exp{\left(-\sum_{j=0}^{\rho_{c,n}-1}\frac{1}{X_j}+\OO\left(X_{\rho_{c,n}-1}^{-1}\right)\right)} \end{align*} as $n\rightarrow\infty$. Furthermore, \eqref{s_n} and \eqref{tilde_0} allow us to apply Proposition \ref{ErgLem2} yielding \begin{align}\label{Prop.3} \sum_{j=0}^{\rho_{c,n}-1}\frac{1}{X_j} \eq \log{\left(\frac{\kappa(n)}{\kappa(X_{\rho_{c,n}})}\right)}+\oo_P(1) \end{align} as $n\to\infty$. Combining statement (ii) with Lemma \ref{prop} (ii), therefore, we arrive at
\[\E\left[Y_{\rho_{c,n}}\left.\right|N_n\right] \eq n\,\frac{\mu\big(X_{\rho_{c,n}}\big)}{\mu(n)}\left(1+\oo_P\left(1\right)\right) \eq n\,\frac{\mu(cs_n)}{\mu(n)}\left(1+\oo_P\left(1\right)\right) \]
\begin{samepage}\enlargethispage{3\baselineskip} so that the regular variation of $\mu$ and the definition of $s_n$ imply \eqref{condExp}. Thus, in the upper bound \begin{align*} \begin{split}
\PP\left(\left|Y_{\rho_{c,n}}-c^\al\right|\geq\e\, c^\al\right) & \ \loe \
\PP\left(\left|\E\left[Y_{\rho_{c,n}}\left|N_n\right.\right]-c^\al\right|\geq \frac{\e}{2}\, c^\al\right)\\[1ex]
&\hspace*{100pt} +\ \PP\left(\left|Y_{\rho_{c,n}}-\E\left[Y_{\rho_{c,n}}\left|N_n\right.\right]\right|\geq\frac{\e}{2}\,c^\alpha\right) \end{split} \end{align*} with $\e>0$, the first right-hand probability converges to $0$. For the second one, Chebyshev's inequality and Lemma \ref{Lambda} (ii) imply that
\end{samepage} \begin{align*}
\PP\big(\big|Y_{\rho_{c,n}}-\E\big[Y_{\rho_{c,n}}\big| N_n\big]\big|\geq\,\e\,c^\alpha\big) &\eq \E\left[\PP\left(\left|Y_{\rho_{c,n}}-\E\left[Y_{\rho_{c,n}}\left|N_n\right.\right]\right|\geq\e\,c^\alpha\left.\right|N_n\right) \right]\\[1ex]
& \loe \E\left[\frac{\V\left(Y_{\rho_{c,n}}\left|N_n\right.\right)}{\e^2c^{2\alpha}}\wedge 1\right]\\[1ex]
& \loe \E\left[\frac{\E\left[Y_{\rho_{c,n}}\left|\right.N_n\right]}{\e^{2}\,c^{2\alpha}}\wedge 1\right]. \end{align*}
From \eqref{condExp} and dominated convergence, we conclude
\[\PP\big(\big|Y_{\rho_{c,n}}-\E\big[Y_{\rho_{c,n}}\big| N_n\big]\big|\geq\,\e\,c^\alpha\big) \loe \e^{-2}c^{-\al}+\oo(1)\] as $n\rightarrow\infty$, which provides the claim. \end{proof}
For the following lemma, let us recall the subdivided external branch lengths
\[\widecheck{T}^{n}_{i}\ :=\ T^n_{i}\wedge \widetilde{\rho}_{c,n} \quad \text{ and }\quad \widehat{T}^{n}_i\ :=\ T^n_{i}-\widecheck{T}^{n}_{i}\]
for $1\leq i\leq n$ and let \[\beta\ := \ \frac{\alpha-1}{\alpha}.\]
\begin{lem}\label{lem:key} Suppose that the $\Lambda$-coalescent has a regularly varying rate $\mu$ with exponent $\al\in(1,2]$. Then, for $\ell,y\in\N$, there exist random variables $U_{1,y}\geq\ldots\geq U_{\ell,y}$ such that the following convergence results hold: \begin{enumerate} \item For any bounded continuous function $g:\R^\ell\to \R$ and for fixed $y\geq\ell$, as $n\to\infty$,
\[\E\left[g\left(\kappa(cs_n)\widehat{T}^{n}_{\left\langle 1\right\rangle},\ldots,\kappa(cs_n)\widehat{T}^{n}_{\left\langle \ell \right\rangle}\right)\, \big|\,Y_{\rho_{c,n}}=y,X_{\rho_{c,n}}\right]\ \longrightarrow\ \E\left[g \left(U_{1,y},\ldots,U_{\ell,y}\right)\right]\] in probability. \item For fixed $\ell\in\N$, as $y\to\infty$, \[y^{-\beta}\left(U_{1,y},\ldots,U_{\ell,y}\right)\ \stackrel{d}{\longrightarrow} \ \left(U_{1},\ldots,U_{\ell}\right),\] where $U_{1}>\cdots> U_{\ell}$ are the points of the Poisson point process of Theorem \ref{reg_var v2}. \end{enumerate} \end{lem}
\begin{proof} (i) Let
\[\widebar{g}_y(x,z) := \E\left[g\left(\kappa(z)\widehat{T}^{n}_{\left\langle 1\right\rangle},\ldots,\kappa(z)\widehat{T}^{n}_{\left\langle \ell \right\rangle}\right)\, \big|\,Y_{\rho_{c,n}}=y,X_{\rho_{c,n}}=x\right]\] for $x>y, z\ge 2$. Observe that due to the strong Markov property, given the events $X_{\rho_{c,n}}=x$ and $Y_{\rho_{c,n}}=y$, the $y$ remaining external branches evolve as $y$ ordinary external branches out of a sample of $x$ many individuals. From these $y$ external branches, we consider the $\ell$ largest ones. Hence, since $\kappa$ is regularly varying, Corollary \ref{cor} yields that \[\widebar{g}_y(x,z) \ \longrightarrow \ \E\left[g \left(U_{1,y},\ldots,U_{\ell,y}\right)\right]\] as $x\to\infty$ and $z/x\to 1$. Here, from established formulae for order statistics of i.i.d random variables, $(U_{1,y},\ldots,U_{\ell,y})$ has the density \begin{align} \label{jointdens} \ell!\binom{y}{\ell}F\left( u_\ell\right)^{y-\ell}\ \prod_{i=1}^{\ell}\,f\big(u_i\big) du_1\cdots du_\ell, \end{align} with $u_1 \geq \cdots \geq u_\ell\geq 0$, where $f$ is the density from formula \eqref{dens} and $F$ its cumulative distribution function.
Now, it follows from Skorohod's representation theorem that one can construct random variables $X_n'$ on a common probability space with the properties that $X_n'$ and $X_{\rho_{c,n}}$ have the same distribution for each $n \ge 1$ and that, in view of Proposition \ref{EV} (ii), the random variables $X_n'/cs_n$ converge to 1 a.s. It follows \[ \widebar g_y(X_n', cs_n) \to \E\left[g \left(U_{1,y},\ldots,U_{\ell,y}\right)\right] a.s. \] and, therefore, \[ \widebar g_y(X_{\rho_{c,n}}, cs_n) \to \E\left[g \left(U_{1,y},\ldots,U_{\ell,y}\right)\right] \] in probability, which is our claim.
(ii) Note that \[y^{\beta+1}f(y^\beta u) \eq y^{\beta+1}\al\left(1+(\al-1)uy^\beta\right)^{-1-1/\beta} \ \stackrel{y\to\infty}{\longrightarrow} \ \al\left(\left(\al-1\right)u\right)^{-1-1/\beta}\] and \[F(y^\beta u)^{y-\ell} \eq \left[1-\left(1+(\al-1)y^\beta u\right)^{-1/\beta}\right]^{y-\ell} \ \stackrel{y\to\infty}{\longrightarrow} \ \exp{\left(-\left((\alpha-1)\,u\right)^{-1/\beta}\right)}. \] Consequently, \[\ell!\binom{y}{\ell}F\left( y^\beta u_\ell\right)^{y-\ell}\ \prod_{i=1}^{\ell}\left[\,f\big(y^\beta u_i\big)y^\beta du_i\right],\] being the density of $y^{-\beta}\left(U_{1,y},\ldots,U_{\ell,y}\right)$, has the limit \[\exp{\left(-\left((\alpha-1)\,u_\ell\right)^{-1/\beta}\right)}\,\prod_{i=1}^{\ell}\, \alpha\left((\alpha-1)\,u_i\right)^{-1-1/\beta}du_1\cdots du_\ell\] as $y\to\infty$. Indeed, this is the joint density of the rightmost points $U_1>\cdots>U_\ell$ of the Poisson point process given in Theorem \ref{reg_var v2}. \end{proof}
\begin{proof}[Proof of Theorem \ref{reg_var v2}] The proof consists of two parts. First, we consider $(\widehat{T}^n_{\left\langle 1 \right\rangle},\ldots,\widehat{T}^n_{\left\langle \ell \right\rangle})$ in the limits $n\to\infty$ and then $c\to\infty$, which gives already the limit of our theorem. Consequently, in the second step it remains to show that $(\widecheck{T}^n_{\left\langle 1 \right\rangle},\ldots,\widecheck{T}^n_{\left\langle \ell \right\rangle})$ can asymptotically be neglected.
In the first step, we normalize $\widehat{T}^n_{\left\langle j \right\rangle}$ not by $\kappa(s_n)$ but by the factor $Y_{\rho_{c,n}}^{-\beta}\kappa(c s_n)$, which is equivalent in the limit $c \to \infty$ because of Proposition \ref{EV} (iii). Thus, we set \[V_{c,n} \ :=\ \kappa(cs_n)\left(\widehat{T}^n_{\left\langle 1 \right\rangle},\ldots,\widehat{T}^n_{\left\langle \ell \right\rangle}\right).\]
Let $g:\R^\ell\rightarrow\R$ be a continuous function and assume that $\max{|g|}\leq 1$. For $c>0$, we obtain via the law of total expectation and Lemma \ref{lem:key} (i) that \begin{align*}
\Big|&\E\Big[g\Big(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}\Big)\,\Big|\,X_{\rho_{c,n}}\Big]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\\[2ex]
&\loe \sum_{c/2\leq y\leq 2c}\Big|\E\left[\left.g\left(y^{-\beta}\,V_{c,n}\right)\,\right|\,Y_{\rho_{c,n}}=y,\,X_{\rho_{c,n}}\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\hspace{-1pt}\cdot\hspace{-1pt}\PP\left(Y_{\rho_{c,n}}=y\,|\,X_{\rho_{c,n}}\right)\\[1ex]
& \qquad\qquad +\ 2\PP\left(\left|Y_{\rho_{c,n}}-c^\al\right|\geq c^\al/2\,|\,X_{\rho_{c,n}}\right)\\[1.5ex]
&\loe \max_{c/2\leq y\leq 2c}\Big|\E\left[\left.g\left(y^{-\beta}\,V_{c,n}\right)\,\right|\,Y_{\rho_{c,n}}=y,\,X_{\rho_{c,n}}\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big| \\[1ex]
& \qquad\qquad +\ 2\PP\left(\left|Y_{\rho_{c,n}}-c^\al\right|\geq c^\al/2\,|\,X_{\rho_{c,n}}\right) \\[1.5ex]
&\loe \max_{c/2\leq y\leq 2c}\Big|\E\left[g\left(y^{-\beta}U_{1,y},\ldots,y^{-\beta}U_{\ell,y}\right)\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big| +\oo_P(1)\\[1ex]
& \qquad\qquad +\ 2\PP\left(\left|Y_{\rho_{c,n}}-c^\al\right|\geq c^\al/2\,|\,X_{\rho_{c,n}}\right) \end{align*} as $n\to\infty$. Without loss of generality, we may assume that the $\oo_P(\cdot)$- term is bounded by 1. Hence, taking expectations, applying Jensen's inequality to the left-hand side and using dominated convergence, we obtain \begin{align*}
&\Big|\E\Big[g\Big(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}
\Big)\Big]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\\[1.5ex]
&\qquad\loe \ \max_{c/2\leq y\leq 2c}\Big|\E\left[g\left(y^{-\beta}U_{1,y},\ldots,y^{-\beta}U_{\ell,y}\right)\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\ +\ \oo(1)\\[1ex]
&\qquad\qquad \qquad +\ 2\PP\left(\left|Y_{\rho_{c,n}}-c^\al\right|\geq c^\al/2\right) \end{align*} as $n\to\infty$. Then Lemma \ref{lem:key} (ii) and Proposition \ref{EV} (iii) entail \begin{align}\label{key}
\limsup_{n\to\infty}\Big|\E\Big[g\Big(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}
\Big)\Big]-\E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\ \stackrel{c\to\infty}{\longrightarrow}\ 0. \end{align}
This finishes the first part of our proof. For the second one, we additionally assume that $g$ is a Lipschitz continuous function with Lipschitz constant $1$ (in each coordinate) and prove that \begin{align}\label{proof} \E\left[g\left(\kappa(s_n)\,T^n_{\left\langle 1 \right\rangle},\ldots,\kappa(s_n)\,T^n_{\left\langle \ell \right\rangle}\right)\right]\ \stackrel{n\to\infty}{\longrightarrow} \ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right], \end{align} which implies the theorem's statement. For $\e>0$, we have \begin{align*}
\big|\E&\big[g\big(\kappa(s_n)\,T^n_{\left\langle 1 \right\rangle},\ldots,\kappa(s_n)\,T^n_{\left\langle \ell \right\rangle}\big)\big]\ -\ \E\big[g\big(U_{1},\ldots,U_{\ell}\big)\big]\big|\\[1ex]
& \loe \left|\E\left[g\left(\kappa(s_n)\,\widehat{T}^n_{\left\langle 1 \right\rangle},\ldots,\kappa(s_n)\,\widehat{T}^n_{\left\langle \ell \right\rangle}\right)\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\right|+\sum_{i=1}^\ell\E\left[\kappa(s_n)\widecheck{T}^n_{\left\langle i\right\rangle}\wedge 2\right]\\[1ex]
&\loe \Big|\E\left[g\left(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}\right)\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\Big|\\[1ex] &\qquad \quad +\
\sum_{i=1}^\ell\E\left[\left|\left(Y_{\rho_{c,n}}^{-\beta}\kappa(cs_n)-\kappa(s_n)\right)\widehat{T}^n_{\left\langle i\right\rangle}\right|\wedge 2\right] +\ell\,\E\left[\kappa(s_n)\widecheck{T}^n_{\left\langle 1\right\rangle}\wedge 2\right]\\[1ex]
&\loe \left|\E\left[g\left(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}\right)\right]\ -\ \E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\right|\\[1ex]
& \qquad\quad + \ \ell\, \E\left[\left(\e\kappa(cs_n)Y_{\rho_{c,n}}^{-\beta}\widehat{T}_{\left\langle 1\right\rangle}\right)\wedge 2\right]+\ 2\ell\,\PP\left(\left|Y_{\rho_{c,n}}^{-\beta}\kappa(cs_n)-\kappa(s_n)\right|\geq\e\kappa(cs_n)Y_{\rho_{c,n}}^{-\beta}\right)\\[1ex] & \qquad\quad \quad +\ \ell\e + 2\ell\,\PP\left(\kappa(s_n)\,\widecheck{T}_{\left\langle 1\right\rangle}\geq \e \right) \end{align*} and, consequently, \begin{align*}
&\limsup_{n\to\infty}\big|\E\big[g\big(\kappa(s_n)\,T^n_{\left\langle 1 \right\rangle},\ldots,\kappa(s_n)\,T^n_{\left\langle \ell \right\rangle}\big)\big]-\E\big[g\big(U_{1},\ldots,U_{\ell}\big)\big]\big|\\[1ex]
&\qquad\leq \ \limsup_{n\to\infty}\left|\E\left[g\left(Y_{\rho_{c,n}}^{-\beta}\,V_{c,n}\right)\right]-\E\left[g\left(U_{1},\ldots,U_{\ell}\right)\right]\right|\\[1ex]
&\qquad\qquad\quad +\ \ell\,\limsup_{n\to\infty} \left|\E\left[\left(\e\kappa(cs_n)Y_{\rho_{c,n}}^{-\beta}\widehat{T}_{\left\langle 1\right\rangle}\right)\wedge 2\right]-\E\left[\left(\e U_1\right)\wedge 2\right]\right|+\ell\,\E\left[\left(\e U_1\right)\wedge 2\right]\\[1ex]
&\qquad\qquad\quad\quad +\ 2\ell\limsup_{n\to\infty}\PP\left(\left|1-\frac{\kappa(s_n)}{\kappa(cs_n)}Y_{\rho_{c,n}}^\beta\right|\geq \e\right)\\[1ex] &\qquad\qquad\quad\quad\quad +\ \ell\e + 2\ell\,\limsup_{n\to\infty}\PP\left(\kappa(s_n)\,\widetilde{\rho}_{c,n}\geq \e\right). \end{align*} We now use \eqref{key} for the first two right-hand terms and Proposition \ref{EV} (iii) for the first probability taking $\kappa(cs_n)/\kappa(s_n)\sim c^{\al-1}=c^{\al\beta}$ also into account. To the other probability, we apply Proposition~\ref{EV}~(i). Hence, passing to the limit as $c\to\infty$ yields \begin{align*}
&\limsup_{n\to\infty}\big|\E\big[g\big(\kappa(s_n)\,T^n_{\left\langle 1 \right\rangle},\ldots,\kappa(s_n)\,T^n_{\left\langle \ell \right\rangle}\big)\big]-\E\big[g\big(U_{1},\ldots,U_{\ell}\big)\big]\big| \loe \ell\,\E\left[\left(\e U_1\right)\wedge 2\right] +\ell\e. \end{align*} \enlargethispage{2\baselineskip}
Finally, taking the limit $\e\to 0$ and using dominated convergence provides the claim. \end{proof}
\section{Proof of Theorem \texorpdfstring{\protect\ref{bs}}{1.6}} \label{sec_proof_bs}
Recall the notation of the reversed order statistics $Z_{\left\langle 1\right\rangle}\geq Z_{\left\langle 2\right\rangle}\geq \cdots$ of real-valued random variables as introduced in the previous section and the definition \[t_n\ :=\ \log\log{n}-\log\log\log{n}+\log\log\log{n}/\log\log{n}.\]
In this section, we prove the following equivalent version of Theorem \ref{bs}:
\begin{theorem} \label{bs v2} For the Bolthausen-Sznitman coalescent, the following convergence holds: For $\ell\in\N$, \[\log{\log{n}}\left(T_{\left\langle 1\right\rangle}^{n}-t_n\,\ldots,T_{\left\langle \ell\right\rangle}^n-t_n\right)\ \overset{d}{\longrightarrow}\ \left(U_1-G,\ldots,U_\ell-G\right)\] as $n\to\infty$, where $U_1>\cdots> U_\ell$ are the $\ell$ maximal points in decreasing order of a Poisson point process on $\R $ with intensity measure $e^{-x}\;dx$ and $G$ is an independent standard Gumbel distributed random variable. \end{theorem}
Recall, for $c>1$, the notion \[t_{c,n}\ :=\ t_n-\frac{\log{c}}{\log\log{n}}.\]
\begin{lem}\label{N} Let $E$ be a standard exponential random variable. Then, as $n\to\infty$, we have for $c>1$, \[e^{-t_{c,n}}N_n(t_{c,n})\ \overset{d}{\longrightarrow}\ cE.\] \end{lem}
\begin{proof} We first consider $N_n(t)^{(r)}:=N_n(t)\left(N_n(t)+1\right)\cdots\left(N_n(t)+r-1\right)$ for $r\in\N $. For these ascending factorials, Lemma 3.1 of \cite{Moe15} provides \[\E\left[N_n(t)^{(r)}\right] \eq \frac{\Gamma\left(r+1\right)}{\Gamma\left(1+re^{-t}\right)}\frac{\Gamma\left(n+re^{-t}\right)}{\Gamma\left(n\right)}.\] The Sterling approximation with remainder term yields uniformly in $t\geq 0$, \[\frac{\Gamma\left(n+re^{-t}\right)}{\Gamma\left(n\right)} \eq n^{re^{-t}}\left(1+\oo\left(1\right)\right)\] and, consequently, \[\E\left[N_n(t)^{(r)}\right] \eq \frac{\Gamma\left(r+1\right)}{\Gamma\left(1+re^{-t}\right)}\ n^{re^{-t}}\left(1+\oo\left(1\right)\right)\] uniformly in $t\geq 0$ as $n\rightarrow\infty$. Inserting $t_{c,n}$ in this equation entails \enlargethispage{\baselineskip} \begin{align*} n^{-re^{-t_{c,n}}}\E\left[N_n(t_{c,n})^{(r)}\right]\ \rightarrow\ r! \end{align*}
as $n\rightarrow\infty$.
Now observe \begin{align*} e^{-t_{c,n}}\log{n}&\eq \exp{\left(-\frac{\log\log\log{n}}{\log\log{n}}+\frac{\log{c}}{\log\log{n}}\right)}\log\log{n}\\[1ex] &\eq\log{\log{n}}-\log{\log{\log{n}}}+\log{c} +\oo\left(1\right)\\[1ex] &\eq t_{c,n}+\log{c}+\oo(1). \end{align*} Equivalently, \begin{align*} n^{e^{-t_{c,n}}}\eq ce^{t_{c,n}}\left(1+\oo\left(1\right)\right) \end{align*} and, therefore, \begin{align}\label{asc_fact} e^{-rt_{c,n}}\E\left[N_n(t_{c,n})^{(r)}\right]\ \rightarrow\ c^rr! \end{align} as $n\to\infty$.
Furthermore, because of \[N_n(t)^{r}\loe N_n(t)^{(r)}\loe N_n(t)^r+2^rr^rN_n(t)^{r-1}\loe N_n(t)^r+2^rr^rN_n(t)^{(r-1)},\] we have \[N_n(t)^{(r)}-2^rr^rN_n(t)^{(r-1)}\loe N_n(t)^r\loe N_n(t)^{(r)}.\] Thus, \eqref{asc_fact} transfers to \[e^{-rt_{c,n}}\E\left[N_n(t_{c,n})^{r}\right]\ \longrightarrow \ c^rr!\] as $n\to\infty$ and our claim follows by method of moments. \end{proof}
The following lemma provides the asymptotic behavior of the joint probability distribution of the lengths of the longest external branches starting at time $t_{c,n}$. Let \[M_n(t) \ := \ \#\left\{i\geq 1 :\;\{i\}\in\Pi_n(t)\right\},\qquad t\geq 0,\] which is the number of external branches at time $t$. Also recall \[\widehat{T}^n_{\left\langle i\right\rangle}:=(T_{\left\langle i\right\rangle}^n-t_{c,n})^+.\]
\begin{samepage}\enlargethispage{2\baselineskip} \begin{lem}\label{lem:key2} For $\ell,y\in\N$, there exist random variables $U_{1,y}\geq\cdots\geq U_{\ell,y}$ such that the following convergence results hold: \begin{enumerate} \item For any bounded continuous function $g:\R^\ell\to \R$ and for fixed natural numbers $\ell\leq y$, as $n\to\infty$, \begin{align*}
&\E\left[g\left(\log{\log{(n)}}\,\big(\widehat{T}^{n}_{\left\langle 1 \right\rangle},\ldots,\widehat{T}^{n}_{\left\langle \ell \right\rangle}\big)\right)\,\Big|\,N_n(t_{c,n}),M_n(t_{c,n})=y\right] \ \longrightarrow \ \E\left[g\left(U_{1,y},\ldots,U_{\ell,y}\right)\right] \end{align*} in probability. \item For fixed $\ell$, as $y\to\infty$, \[ \left(U_{1,y}-\log{y} ,\ldots,U_{\ell,y}-\log{y} \right) \ \stackrel{d}{\longrightarrow} \ \left(U_{1},\ldots,U_{\ell}\right),\] where $U_{1}>\cdots> U_{\ell}$ are the points of the Poisson point process of Theorem \ref{bs v2}. \end{enumerate} \end{lem} \end{samepage}
\begin{proof} (i) We proceed in the same vein as in the proof of Lemma \ref{lem:key} (i). The strong Markov property, Corollary \ref{cor} (see also formula \eqref{ex} in the first example) and Lemma \ref{N} yield that
\[\E\left[g\left(z\,\big(\widehat{T}^{n}_{\left\langle 1 \right\rangle},\ldots,\widehat{T}^{n}_{\left\langle \ell \right\rangle}\big)\right)\,\Big|\,N_n(t_{c,n})=x,M_n(t_{c,n})=y\right] \ \longrightarrow \ \E\left[g\left(U_{1,y},\ldots,U_{\ell,y}\right)\right]\] as $x\to\infty$ and $z/\log{x}\to 1$, where
$(U_{1,y},\ldots,U_{\ell,y})$ has the density \begin{align}\label{dens_bs} \ell!\binom{y}{\ell}\left(1-e^{-u_\ell}\right)^{y-\ell}\,\prod_{i=1}^{\ell}e^{-u_i} du_1\cdots du_\ell \end{align} for $u_1\geq\cdots\geq u_\ell$. Moreover, from Lemma \ref{N}, we obtain \begin{align*} \log{\left(N_n(t_{c,n})\right)} \eq t_{c,n} +\OO_P(1) \eq \log{\log{n}}+\oo_P\left(\log{\log{n}}\right) \end{align*} as $n\rightarrow\infty$. Thus, replacing $x$ and $z$ above by $N_n(t_{c,n})$ and $\log\log{n}$, respectively, and invoking Skorohod's representation theorem once more, our claim follows.
(ii) Shifting the distribution from \eqref{dens_bs} by $\log{y}$, we arrive at the densities \[\ell!\binom{y}{\ell}\left(1-\frac{e^{-u_\ell}}{y}\right)^{y-\ell}\,y^{-\ell}\prod_{i=1}^{\ell}e^{-u_i} du_1\cdots du_\ell\] and their limit \[e^{-e^{-u_\ell}}\prod_{i=1}^{\ell}e^{-u_i}du_i\] as $y\to\infty$, which is the joint density of $U_1,\ldots, U_\ell$. This finishes the proof. \end{proof}
Next, we introduce the notion \[\rho_{c,n} \ := \ \min{\bigg\{k\geq 1:\ \sum_{j=0}^{k-1} W_j>t_{c,n}\bigg\}}\wedge\tau_n.\]
It is important to note that in the case of the Bolthausen-Sznitman coalescent Proposition \ref{ErgLem2} is no longer helpful and we may not simply apply \eqref{Prop.3}. As a substitute, we shall use the following lemma.
\begin{lem}\label{sum_bs} As $n\to\infty$, \[\sum_{j=0}^{\rho_{c,n}-1}\frac{1}{X_j} \eq t_{c,n}+\oo_P(1).\] \end{lem}
\begin{proof} Let $\mathcal{F}_k:=\sigma\left(X,W_0,\ldots,W_{k-1}\right)$ and \[Z_k\ :=\ \sum_{j=0}^{k\wedge \tau_n-1}\left(W_j-\frac{1}{X_j-1}\right), \qquad k\geq 0.\] In particular, we have $Z_0=0$. Given $\mathcal{F}_j$ and $X_j=b$ with $b\geq 2$, the waiting time $W_j$ in the Bolthausen-Sznitman coalescent is exponential with rate parameter $b-1$ (see (47) in \cite{Pit99}). Thus, $(Z_k)_{k\in\N}$ is a martingale with respect to the filtration $(\mathcal{F}_k)_{k\in\N}$ with (predictable) quadratic variation
\[\langle Z\rangle_{k} \ :=\ \sum_{j=0}^{k\wedge \tau_n-1}\E\left[(Z_{j+1}-Z_j)^2\big|\mathcal{F}_j\right] \eq \sum_{j=0}^{k\wedge\tau_n-1} \frac{1}{(X_j-1)^2} \qquad a.s.\] Applying Doob's optional sampling theorem to the martingale $Z_{k}^2-\langle Z\rangle_{k}$ yields \begin{align} \label{3/X} \E\left[Z^2_{\rho_{c,n}}\right] \eq \E\left[\left\langle Z\right\rangle_{\rho_{c,n}}\right]\eq \E\left[\sum_{j=0}^{\rho_{c,n}-1}\frac{1}{(X_j-1)^2}\right]\loe \E\left[\sum_{k=X_{\rho_{c,n}-1}}^\infty \frac{1}{(k-1)^{2}}\right] \end{align} and, therefore, because of $X_{\rho_{c,n}-1}=N_n(t_{c,n})$ a.s., \[\E\left[Z^2_{\rho_{c,n}}\right]\loe \E\left[\frac{4}{N_n(t_{c,n})}\right].\] By Lemma \ref{N} and dominated convergence, the right-hand term converges to $0$ as $n\to\infty$ implying \[\sum_{j=0}^{\rho_{c,n}-1}\left(W_j-\frac{1}{X_j}\right)\eq Z_{\rho_{c,n}} +\OO_P\left(\frac{4}{X_{\rho_{c,n}-1}}\right)\eq\oo_P(1)\] as $n\to\infty$. \enlargethispage{2\baselineskip} Finally, the quantity \(\sum_{j=0}^{\rho_{c,n}-1}W_j-t_{c,n}\) is the residual time the process $N_n$ spends in the state $N_n(t_{c,n})$. Due to the property that exponential times lack memory, the residual time is exponential with parameter $N_n(t_{c,n})$. Thus, in view of Lemma \ref{N}, the residual time converges to $0$ in probability. This finishes the proof. \end{proof}
\begin{lem}\label{M} For the number of external branches at time $t_{c,n}$, we have the following results: \begin{enumerate} \item For $c>1$,
\[\E\left[M_n(t_{c,n})\left.\right|N_n\right]\ \overset{d}{\longrightarrow}\ c\,E\] as $n\to\infty$, where $E$ denotes a standard exponential random variable. \item For $\e>0$, as $c\to\infty,$
\[\limsup_{n\to\infty} \PP\left(\left|M_n(t_{c,n})\ -\ \E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|>c^{1/2+\e}\right)\ \rightarrow \ 0 \] as well as \[\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})> c^{1+\e}\right) \ \rightarrow \ 0 \qquad \text{ and } \qquad \limsup_{n\to\infty}\PP\left(M_n(t_{c,n})<c^{1-\e}\right)\ \rightarrow \ 0. \] \end{enumerate} \end{lem}
\begin{proof} (i) Using the representation from Lemma \ref{Lambda} (i) and a Taylor expansion as in \eqref{Mac}, we get \begin{align*}
\E\left[Y_{\rho_{c,n}-1}\left.\right|N_n\right] \eq X_{\rho_{c,n}-1} \exp{\left(-\sum_{j=1}^{\rho_{c,n}-1}\frac{1}{X_j}+\OO_P\left(X_{\rho_{c,n}-1}^{-1}\right)\right)} \end{align*} as $n\to\infty$. Recall that the definition of $\rho_{c,n}$ entails $N_n(t_{c,n})=X_{\rho_{c,n}-1}$ and $M_n(t_{c,n})=Y_{\rho_{c,n}-1}$ a.s. Thus, we obtain \begin{align}\label{EW_Y}
\E\left[M_n(t_{c,n})\left.\right|N_n\right] \eq N_n(t_{c,n}) \exp{\left(-\sum_{j=1}^{\rho_{c,n}-1}\frac{1}{X_j}+\OO_P\left(N_n(t_{c,n})^{-1}\right)\right)}. \end{align} From Lemma \ref{sum_bs} and Lemma \ref{N}, it follows \begin{align*}
\E\left[M_n(t_{c,n})\left.\right|N_n\right] \eq N_n(t_{c,n}) \exp{\left(-t_{c,n}+\oo_P\left(1\right)\right)}. \end{align*} Hence, Lemma \ref{N} implies our claim.
\enlargethispage{\baselineskip} (ii) Chebyshev's inequality and Lemma \ref{Lambda} (ii) provide \begin{align*}
&\PP\left(\left|M_n(t_{c,n})-\E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|>c^{1/2+\e}\right)\\[1ex]
& \hspace{6pc}\eq \E\left[\PP\left(\left|M_n(t_{c,n})-\E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|>c^{1/2+\e}\big|N_n\right)\right]\\[1ex]
&\hspace{6pc}\loe \E\left[\frac{\V\left(M_n(t_{c,n})\left.\right|N_n\right)}{c^{1+2\e}}\wedge 1\right]\\[1ex]
&\hspace{6pc}\loe \E\left[\frac{\E\left(M_n(t_{c,n})\left.\right|N_n\right)}{c^{1+2\e}}\wedge 1\right]. \end{align*} From statement (i) it follows that \begin{equation*}
\limsup_{n\to\infty}\PP\left(\left|M_n(t_{c,n})-\E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|>c^{1/2+\e}\right) \loe \E\left[\frac{cE}{c^{1+2\e}}\wedge 1\right] \loe c^{-2\e}\,, \end{equation*} which entails the first claim.
Similarly, Markov's inequality yields \begin{align*}
\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})> c^{1+\e}\right)\loe \limsup_{n\to\infty}\E\left[\frac{\E\left[M_n(t_{c,n})\left|N_n\right.\right]}{c^{1+\e}}\wedge 1\right] \loe c^{-\e} \end{align*} giving the second claim.
Furthermore, we have \begin{align*}
\PP\big(M_n(t_{c,n})< c^{1-\e}\big) \loe \ & \PP\left(\E\left[M_n(t_{c,n})\left|N_n\right.\right]<2 c^{1-\e} \right) \\[1ex]
& \hspace{6pc} + \ \PP\left(\left|M_n(t_{c,n})-\E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|> c^{1-\e}\right) \end{align*} and, consequently, in view of part (i),
\[\limsup_{n\to\infty}\PP\big(M_n(t_{c,n})< c^{1-\e}\big) \;\leq\; \PP\left(E<2c^{-\e} \right) +\, \limsup_{n\to\infty}\PP\left(\left|M_n(t_{c,n})-\E\left[M_n(t_{c,n})\left|N_n\right.\right]\right|> c^{1-\e}\right).\] The first right-hand term converges to $0$ as $c\to\infty$. Also, as we may assume $\e<1/2$, the second term goes to $0$ in view of the first claim of part (ii). \end{proof}
With these preparations, we now turn to the proof of Theorem \ref{bs v2}.
\begin{proof}[Proof of Theorem \ref{bs v2}] The strategy of this proof resembles that of Theorem \ref{reg_var v2}. However, additional care is required to separate the impact of the parts $\widecheck{T}_i^n$ and $\widehat{T}_i^n$. For this purpose, we consider the functions \[g(x_1,\ldots,x_\ell)\ :=\ \exp{\left(i\left(\theta_1 x_1+\cdots+\theta_\ell x_\ell\right)\right)} \qquad \text{ and } \qquad h(x)\ :=\ \exp{\left(i\left(\theta_1+\cdots+\theta_\ell\right)x\right)},\] where $\theta_i\in\R$ for $1\leq i\leq n$. It is sufficient to prove \[\E\left[g\left(\log{\log{(n)}}\left(T_{\left\langle 1\right\rangle}^n-t_n\right),\ldots,\log{\log{(n)}}\left(T_{\left\langle \ell\right\rangle}^n-t_n\right)\right)\right]\ \longrightarrow \ \E\left[g\left(U_1-G,\ldots,U_\ell-G\right)\right]\] as $n\to\infty$. We bound the difference of the terms on both sides. Recalling \[t_n \eq t_{c,n}+\frac{\log{c}}{\log{\log{n}}},\] we see that, on the event $\left\{M_n(t_{c,n})\geq\ell\right\}$, it holds $T_{\left\langle i\right\rangle}^n\eq\widehat{T}_{\left\langle i\right\rangle}^n+t_{c,n}$ and, therefore, \begin{align}\label{newnew} \log\log{(n)}\big(T^n_{\left\langle j \right\rangle}-t_n\big)\eq \left(\log\log{(n)}\,\widehat{T}^n_{\left\langle j \right\rangle}-\log{M_n(t_{c,n})}\right)+\log{\frac{M_n(t_{c,n})}{c}} \end{align} for $1\leq j\leq\ell$. In conjunction with the independence of $\left(U_1,\ldots,U_\ell\right)$ and the Gumbel random variable $G$, it follows that \begin{align}\label{split}\nonumber
&\left|\E\left[g\left(\log{\log{(n)}}\left(T_{\left\langle 1\right\rangle}^n-t_n\right),\ldots,\log{\log{(n)}}\left(T_{\left\langle \ell\right\rangle}^n-t_n\right)\right)\right]\, -\, \E\left[g\left(U_1-G,\ldots,U_\ell-G\right)\right]\right| \\[2ex] &\qquad\loe
\left|\E\left[g\left(V_{c,n}\right)h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\right]\, -\, \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\E\left[h\left(-G\right)\right]\right|\\[1ex]\nonumber &\qquad\quad\qquad + \ 2\,\PP\left(M_n(t_{c,n})<\ell\right), \end{align} where, in view of \eqref{newnew}, we now set \[V_{c,n}\ :=\ \left(\log\log{(n)}\, \widehat{T}^n_{\left\langle 1 \right\rangle}-\log{M_n(t_{c,n})},\ldots,\log\log{(n)}\, \widehat{T}^n_{\left\langle \ell \right\rangle}-\log{M_n(t_{c,n})}\right).\] Let us estimate the first term on the right-hand side of \eqref{split}.
We have \begin{align*}
\Big|\E\Big[g&\left(V_{c,n}\right)\,h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\Big]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\E\left[h\left(-G\right)\right]\Big|\\[2ex]
\loe &\ \left|\E\left[g\left(V_{c,n}\right)h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\right]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\E\left[h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\right]\right|\\[2ex]
&\qquad +\ \left|\E\left[ h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\right]\ -\ \E\left[ h\left(\log{\frac{\E\left[ M_n(t_{c,n})\left|N_n\right.\right]}{c}}\right)\right] \right|\\[2ex]
&\qquad +\ \left|\E\left[ h\left( \log{\frac{\E\left[M_n(t_{c,n})\left|N_n\right.\right]}{c}}\right)\right] \ -\ \E\left[ h\left(-G\right)\right] \right|\\[2ex] & =: \Delta'_{c,n}+\Delta''_{c,n}+\Delta'''_{c,n} \qquad \text{(say).} \end{align*}
We bound $\Delta'_{c,n}, \Delta''_{c,n}$ and $\Delta'''_{c,n}$ separately. For $\Delta'_{c,n}$, we first consider conditional expectations. For $c>1$, we have, by means of Lemma \ref{lem:key2} (i) in the last step,
\begin{align*}
&\bigg|\E\bigg[g\left(V_{c,n}\right)h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\,\bigg|\,N_n(t_{c,n})\bigg]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\E\left[h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\,\bigg|\,N_n(t_{c,n})\right]\bigg|\\[2.5ex]
& \leq \sum_{\sqrt{c}\leq y\leq c^2}\left|\Big(\E\left[\left.g\left(V_{c,n}\right)\,\right|\,N_n(t_{c,n}),\,M_n(t_{c,n})=y\right] \ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\Big)h\left(\log{\frac{y}{c}}\right)\right|\\
& \quad\qquad\qquad \cdot\PP\left(M_n(t_{c,n})=y\,\big|\,N_n(t_{c,n})\right) \\[1.5ex]
&\quad\qquad +\ 2\,\PP\left(M_n(t_{c,n})<\sqrt{c}\,\big|\,N_n(t_{c,n})\right) +\ 2\,\PP\left(M_n(t_{c,n})>c^2\,\big|\,N_n(t_{c,n})\right)\\[2ex]
& \leq \max_{\sqrt{c}\leq y\leq c^2}\big|\E\left[\left.g\left(V_{c,n}\right)\,\right|\,N_n(t_{c,n}),\,M_n(t_{c,n})=y\right]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\big|\\[1.5ex]
& \quad \qquad +\ 2\,\PP\left(M_n(t_{c,n})<\sqrt{c}\,|\,N_n(t_{c,n})\right)\ +\ 2\,\PP\left(M_n(t_{c,n})>c^2\,|\,N_n(t_{c,n})\right)\\[2ex]
& \leq \max_{\sqrt{c}\leq y\leq c^2}\big|\E\left[g\left(U_{1,y}-\log{y},\ldots,U_{\ell,y}-\log{y}\right)\right]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\big|+\oo_P(1)\\[1.5ex]
& \quad \qquad +\ 2\,\PP\left(M_n(t_{c,n})<\sqrt{c}\,|\,N_n(t_{c,n})\right)\ +\ 2\,\PP\left(M_n(t_{c,n})>c^2\,|\,N_n(t_{c,n})\right) \end{align*} as $n\to\infty$. Without loss of generality, we may assume that the right-hand $\oo_P(\cdot)$-term is bounded by 1. Hence, taking expectations, we obtain via dominated convergence \begin{align} \nonumber
\Delta'_{c,n}& \loe \max_{\sqrt{c}\leq y\leq c^2}\big|\E\left[g\left(U_{1,y}-\log{y},\ldots,U_{\ell,y}-\log{y}\right)\right]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\big|\ +\ \oo(1)\\[1.5ex]\nonumber & \quad\qquad \qquad +\ 2\,\PP\left(M_n(t_{c,n})<\sqrt{c})\right)\ +\ 2\,\PP\left(M_n(t_{c,n})>c^2)\right). \end{align}
Second, observe that the function $h(\log x)$ is Lipschitz on the interval $[c^{-1/4},\infty)$ with Lipschitz constant $|\theta_1+\cdots+\theta_\ell|c^{1/4}$. Thus, \begin{align}\label{Lipschitz} \nonumber
\Delta''_{c,n}
\loe &\ \bigg|\E\bigg[h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\ -\ h\left(\log{\frac{\E\left[ M_n(t_{c,n})\left|N_n\right.\right] }{c}}\right)\,;\,M_{t_{c,n}}\wedge\E\left[ M_n(t_{c,n})\left|N_n\right.\right]\geq c^{3/4}\bigg] \bigg|\\[1.5ex]\nonumber
& \qquad +\ 2\,\PP\left(M_n(t_{c,n})< c^{3/4} \right)\ +\ 2\,\PP\left(\E\left[ M_n(t_{c,n})\left|N_n\right.\right] < c^{3/4} \right)\\[2ex]
\loe &\ 2\,\PP\left(\left|M_n(t_{c,n})\ -\ \E\left[\left.M_n(t_{c,n})\right|N_n\right]\right|> c^{2/3} \right)\ +\ \left|\theta_1+\cdots+\theta_\ell\right|c^{1/4-1/3} \\[2ex]\nonumber
&\qquad +\ 2\,\PP\left(M_n(t_{c,n}) < c^{3/4} \right)\ +\ 2\,\PP\left(\E\left[ M_n(t_{c,n})\left|N_n\right.\right] < c^{3/4} \right). \end{align} Last, Lemma \ref{M} (i) provides the convergence of $\Delta'''_{c,n}$ to $0$ as $n\to\infty$. Consequently, combining equation \eqref{split} to \eqref{Lipschitz}, using Lemma \ref{M} and grouping terms yield \begin{align*}
&\limsup_{n\to\infty}\bigg|\E\bigg[g\left(V_{c,n}\right)h\left(\log{\frac{M_n(t_{c,n})}{c}}\right)\bigg]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\E\left[h\left(-G\right)\right]\bigg|\\[1.5ex]
& \leq \max_{\sqrt{c}\leq y\leq c^2}\big|\E\left[g\left(U_{1,y}-\log{y},\ldots,U_{\ell,y}-\log{y}\right)\right]\ -\ \E\left[g\left(U_1,\ldots,U_\ell\right)\right]\big|\ \\[1.5ex] &\qquad + 2\,\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})<\ell\right) + 2\,\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})<\sqrt{c}\right) + 2\,\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})<c^{3/4}\right) \\[1.5ex]
& \qquad \qquad + 2\,\limsup_{n\to\infty}\PP\left(M_n(t_{c,n})>c^2\right) + 2 \limsup_{n\to\infty} \PP\left(\left|M_n(t_{c,n}) - \E\left[\left.M_n(t_{c,n})\right|N_n\right]\right|> c^{2/3}\right) \\[1.5ex]
&\qquad \qquad\qquad + 2\left(1-e^{-c^{-1/4}}\right) + \left|\theta_1+\cdots+\theta_\ell\right|c^{-1/12}. \end{align*}
Finally, taking the limit $c\to\infty$, the right-hand terms converge to $0$ in view of Lemma \ref{lem:key2} (ii) and Lemma \ref{M}. This finishes the proof. \end{proof}
\phantomsection
\textbf{Acknowledgments.} We are grateful to the anonymous referees for their insightful comments, which allowed us to improve the paper's presentation.
\end{document}
|
arXiv
|
{
"id": "1811.07653.tex",
"language_detection_score": 0.629927933216095,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
\begin{document}
\title[]{The dyadic fractional diffusion kernel as a central limit}
\author[]{Hugo Aimar} \email{[email protected]}
\author[]{Ivana G\'{o}mez} \email{[email protected]}
\author[]{Federico Morana} \email{[email protected]}
\thanks{The research was supported by CONICET, ANPCyT (MINCyT) and UNL}
\subjclass[2010]{Primary 60F05,60G52, 35R11}
\keywords{central limit theorem; dyadic diffusion; fractional diffusion; stable processes; wavelet analysis}
\begin{abstract} In this paper we obtain the fundamental solution kernel of dyadic diffusions in $\mathbb{R}^+$ as a Central Limit of dyadic mollification of iterations of stable Markov kernels. The main tool is provided by the substitution of classical Fourier analysis by Haar wavelet analysis. \end{abstract}
\maketitle
\section{Introduction} The analysis of solutions of nonlocal problems in PDE, has received new impulse after the remarkable results obtained by Caffarelli and Silvestre \cite{CaSi07}. For a probabilistic view of this problems see \cite{Val09}, \cite{Valdinocibook16}. Recently in \cite{AcAimFCAA},\cite{AcAimCzech},\cite{AiBoGo13}, a dyadic version of the fractional derivative was introduced and an associated diffusion was solved.
The classical diffusion process, described by the heat equation $\tfrac{\partial u}{\partial t}=\Delta u$, where $\Delta$ denotes the space Laplacian, has as a fundamental solution the Weierstrass kernel $W_t(x)= (4\pi t)^{-d/2}e^{-\abs{x}^2/4t}$, which is the central limit distribution, for $n\to\infty$, of $\sqrt{n}^{-1}\sum_{j=1}^{n}X_j$, where the $X_j$'s are identically distributed independent random variables with finite variance $t$ and vanishing mean value.
For our later analysis it is convenient to write the convergence in distribution of $n^{-1/2}\sum_{j=1}^n X_j$ to $W_t$ in terms of the common distribution of the random variables $X_j$, $j\in \mathbb{N}$. For the sake of simplicity let us assume that this distribution is given by the density $g$ in $\mathbb{R}^d$. In other words, $\mathscr{P}(\{X_j\in B\})=\int_Bg(x)dx$ where $B$ is a Borel set in $\mathbb{R}^d$. Hence since the random variables $X_j$ are independent the distribution of $S_n=\sum_{j=1}^nX_j$ is given by the convolution $g^n$ of $g$ $n$-times. Precisely, with $g^n=g\ast\cdots\ast g$ $n$-times, we have that $\mathscr{P}(\{S_n\in B\})=\int_Bg^n(x)dx$. On the other hand, $\mathscr{P}(\{n^{-1/2}\sum_{j=1}^nX_j\in B\})=\mathscr{P}(\{S_n\in\sqrt{n}B\})=\int_B(g^n)_{\sqrt{n}}(x)dx$, with $(g^n)_{\sqrt{n}}$ the mollification of $g^n$ by $\sqrt{n}$ in $\mathbb{R}^d$. Precisely, $(g^n)_{\sqrt{n}}(x)=n^{-d/2}g^n(\sqrt{n}x)$. These observations allows to read the CLT as a vague or Schwartz weak convergence of $(g^n)_{\sqrt{n}}(x)$ to $W_t(x)$ when $n\to\infty$. For every $f$ continuous and compactly supported in $\mathbb{R}^d$, we have that $\int_{\mathbb{R}^d}(g^n)_{\sqrt{n}}(x)f(x)\to\int_{\mathbb{R}^d}W_t(x)f(x) dx$ as $n\to\infty$. Since we shall be working in a non-translation invariant setting, to get the complete analogy we still rewrite the CLT as the weak convergence of the sequence of Markov kernel $K^n_{\sqrt{n}}(x,y)=(g^n)_{\sqrt{n}}(x-y)$ to the Markov Weierstrasss kernel $W_t(x-y)$. The kernel $K^n_{\sqrt{n}}(x,y)=\idotsint_{\mathbb{R}^{d-1}} g_{\sqrt{n}}(x-x_1)g_{\sqrt{n}}(x_1-x_2)\cdots g_{\sqrt{n}}(x_{n-1}-y)dx_1dx_2\cdots dx_{n-1}$ corresponds to the kernel of the $n$-th iteration of the operator $T_{\sqrt{n}}f(x)=\int_{\mathbb{R}^d}g_{\sqrt{n}}(x-y)f(y) dy$. The difference in the rhythms of the upper index $n$ of the iteration and the lower index $\sqrt{n}$ of mollification is related to the property of finite variance of $g$. In the problems considered here the Markov kernels involved have heavy tails and the central equilibria takes place for different proportions between iteration and mollification. There are many books where the classical CLT and some of its extensions are masterly exposed. Let us refer to \cite{Chungbook} as one of them.
In this paper we shall be concerned with diffusions of fractional type associated with dyadic differentiation in the space. The basic setting for our diffusions is $\mathbb{R}^+=\{x\in \mathbb{R}: x>0\}$. In \cite{AcAimCzech} it is proved that the function $u(x,t)$ defined for $x\in \mathbb{R}^+$ and $t>0$, given by \begin{equation*} u(x,t)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-s}}\proin{u_0}{h}h(x), \end{equation*} with $\mathscr{H}$ the standard Haar system in $L^2(\mathbb{R}^+)$, $I(h)$ the support of $h$ and $\proin{u_0}{h}=\int_{\mathbb{R}^+}u_0(x)h(x) dx$, solves the problem \begin{equation*} \left \{\begin{array}{ll} \frac{\partial u}{\partial t}=D^{s} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+. \end{array} \right. \end{equation*} with \begin{equation}\label{eq:derivativefractionalDs} D^{s}g(x)=\int_{y\in \mathbb{R}^+}\frac{g(x)-g(y)}{\delta (x,y)^{1+s}} dy \end{equation} for $0<s<1$ and $\delta(x,y)$ the dyadic distance in $\mathbb{R}^{+}$ (see Section~\ref{sec:dyadycAnalysis} for definitions). The main point in the prove of the above statement is provided by the spectral analysis for $D^s$ in terms of Haar functions. In fact, $D^s h=\abs{I(h)}^{-s}h$. When $0<s<1$, since $h$ is a Lipschitz function with respect to $\delta$, the integral in \eqref{eq:derivativefractionalDs} defining $D^sh$ is absolutely convergent. For the case $s=1$ this integral is generally not convergent, nevertheless the operator $D^1$ is still well defined on the Sobolev type space of those function in $L^2(\mathbb{R}^+)$ such that the Haar coefficients $\proin{f}{h}$ satisfy the summability condition $\sum_{h\in\mathscr{H}}\tfrac{\abs{\proin{f}{h}}^2}{\abs{I(h)}^2}<\infty$. For those functions $f$ the first order nonlocal derivative is given by $D^1 f=\sum_{h\in\mathscr{H}}\tfrac{\proin{f}{h}}{\abs{I(h)}}h$. Moreover, with $u_0\in L^2(\mathbb{R}^+)$, the function \begin{equation*} u(x,t)=\int_{\mathbb{R}^+}K(x,y;t)u_0(y) dy, \end{equation*} with \begin{equation}\label{eq:NucleoHaarDifusiones} K(x,y;t)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-1}}h(x)h(y), \end{equation} solves \begin{equation*} (P) \left \{\begin{array}{ll} \frac{\partial u}{\partial t}=D^{1} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+. \end{array} \right. \end{equation*} Notice that for each $t>0$ the function of $x\in \mathbb{R}^+$, $u(x,t)$ is in the dyadic Sobolev space and its $D^1$ space derivative belongs to $L^2(\mathbb{R}^+)$.
The kernel $K(\cdot,\cdot;t)$ for fixed $t>0$ is not a convolution kernel. Nevertheless it can be regarded as a Markov transition kernel which, as we shall prove, depends only on $\delta(x,y)$.
In this note we prove that the Markov kernel family $K(\cdot,\cdot;t)$ is the central limit of adequate simultaneous iteration and mollification of elementary dyadic stable Markov kernels. We shall precisely define stability later, but heuristically it means that the kernel behaves at infinity like a power law of the dyadic distance. The main result is contained in Theorem~\ref{thm:mainresult} in Section~\ref{sec:mainresult}. The basic tool for the proof of our results is the Fourier Haar analysis induced on $\mathbb{R}^+$ by the orthonormal basis of Haar wavelets.
The paper is organized as follow. In Section~\ref{sec:dyadycAnalysis} we introduce the basic facts from dyadic analysis on $\mathbb{R}^+$, in particular the Haar system as an orthonormal basis for $L^2(\mathbb{R}^+)$ and as an unconditional basis for $L^p(\mathbb{R}^+)$, $1<p<\infty$. Section~\ref{sec:Markovdyadickernels} is devoted to introduce the Markov type dyadic kernels. The spectral analysis of the integral operators generated by Markov type dyadic kernels is considered in \S~\ref{sec:spectralanalysis}. Section~\ref{sec:stability} is devoted to introduce the concept of stability and to prove that the kernel in \eqref{eq:NucleoHaarDifusiones} is $1$-stable with parameter $\tfrac{2}{3}t$. The iteration and mollification operators and their relation with stability are studied in Section~\ref{sec:iterationmollification}. Finally in Section~\ref{sec:mainresult} we state and prove our main result: spectral and $L^p(\mathbb{R}^+)$ ($1<p<\infty$) convergence to the solution of (P).
\section{Some basic dyadic analysis}\label{sec:dyadycAnalysis} Let $\mathbb{R}^+$ denote the set of nonnegative real numbers. A dyadic interval is a subset of $\mathbb{R}^+$ that can be written as $I=I^j_k=[k2^{-j},(k+1)2^{-j})$ for some integer $j$ and some nonnegative integer $k$. The family $\mathcal{D}$ of all dyadic intervals can be organized by levels of resolution as follows; $\mathcal{D}=\cup_{j\in \mathbb{Z}}\mathcal{D}^j$, where $\mathcal{D}^j= \set{I^j_k: k=0,1,2,\ldots}$. The dyadic distance induced on $\mathbb{R}^+$ by $\mathcal{D}$ and the Lebesgue measure is defined by $\delta(x,y)=\inf\set{\abs{I}: I\in \mathcal{D}, x\in I, y\in I}$ where $\abs{E}$ denotes the one dimensional Lebesgue measure of $E$. It is easy to check that $\delta$ is a distance (ultra-metric) on $\mathbb{R}^+$
and that, since $\abs{x-y}=\inf\{\abs{J}: x\in J, y\in J, J=[a,b), 0\leq a<b<\infty\}$, $\abs{x-y}\leq\delta(x,y)$. Of course the two distances are not equivalent. Pointwise the function $\delta(x,y)$ is larger than the usual distance $d(x,y)=\abs{x-y}$. Set $B_\delta(x,r)=\{y\in \mathbb{R}^+: \delta(x,y)<r\}$ to denote the $\delta$-ball centered a $x$ with positive radius $r$. Then $B_\delta(x,r)$ is the largest dyadic interval containing $x$ with Lebesgue measure less than $r$. For $r>0$, let $j\in \mathbb{Z}$ be such that $2^j<r\leq 2^{j+1}$. Then, for $x\in \mathbb{R}^+$, $B_\delta(x,r)=I$ with $x\in I\in\mathcal{D}$, $2^j=\abs{I}<r\leq 2^{j+1}$. So that $\tfrac{r}{2}\leq\abs{B_\delta(x,r)}<r$. This normality property of $(\mathbb{R}^+,\delta)$ equipped with Lebesgue measure shows that the $\delta$-Hausdorff dimension of intervals in $\mathbb{R}^+$ is one. In particular the integral singularities that negative powers of $\delta$ and $d$ produce have the same orders. Precisely, for fixed $x\in \mathbb{R}^+$ the functions of $y\in \mathbb{R}^+$ defined by $\delta^{\alpha}(x,y)$ and $\abs{x-y}^\alpha$ have the same local and global integrability properties for $\alpha\in \mathbb{R}$.
\begin{lemma}\label{lemma:deltaintegrability} \quad \begin{enumerate}[(a)] \item The level sets $L(\lambda)=\{(x,y):\delta(x,y)=\lambda\}$ are empty if $\lambda$ is not an integer power of two. On the other hand $L(2^j)=\cup_{I\in\mathcal{D}^j}(I_l\times I_r)\cup (I_r\times I_l)$ with $I_l$ and $I_r$, the left and right halves of $I\in\mathcal{D}^j$. Hence, $\delta(x,y)=\sum_{j\in \mathbb{Z}}2^j\chi_{L(2^j)}(x,y)$.
\item For $x\in \mathbb{R}^+$ and $r>0$ we have, \begin{enumerate}[b-i)]
\item $\frac{c(\alpha)}{2^{1+\alpha}}r^{1+\alpha}\leq \int_{y\in B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy \leq c(\alpha)r^{1+\alpha}$ for $\alpha>-1$ with $c(\alpha)=2^{-1}(1-2^{-(1+\alpha)})^{-1}$;
\item $\int_{B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy= +\infty$ for $\alpha\leq -1$;
\item $\tilde{c}(\alpha)r^{1+\alpha}\leq\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy\leq\frac{\tilde{c}(\alpha)}{2^{1+\alpha}} r^{1+\alpha}$ for $\alpha < -1$ with $\tilde{c}(\alpha)=2^{-1}(1-2^{1+\alpha})^{-1}$;
\item $\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy= +\infty$ for $\alpha\geq -1$. \end{enumerate} \end{enumerate} \end{lemma} \begin{proof}[Proof of (a)] Let $j\in \mathbb{Z}$ fixed. Then $\delta(x,y)=2^j$ if and only if $x$ and $y$ belong to the same $I\in\mathcal{D}^j$, but they do not belong to the same half of $I$. In other words, $(x,y)\in I_l\times I_r$ or $(x,y)\in I_r\times I_l$.
\noindent\textit{Proof of (b).} Fix $x\in \mathbb{R}^+$. Take $0<a<b<\infty$. Then, from \textit{(a)}, \begin{align*} \int_{\{y\in B_\delta(x,b)\setminus B_\delta(x,a)\}}\delta^\alpha(x,y)dy &=\int_{\{y: a\leq \delta(x,y)<b\}}\delta^\alpha(x,y)dy\\ &= \sum_{\{j\in \mathbb{Z}: a\leq 2^j<b\}}\int_{\{y:\delta(x,y)=2^j\}}2^{\alpha j}dy\\ &=\frac{1}{2}\sum_{\{j\in \mathbb{Z}: a\leq 2^j<b\}}2^{(1+\alpha)j}\\ &=\frac{1}{2}S(\alpha;a,b). \end{align*} When $\alpha\geq -1$, then $S(\alpha;a,b)\to +\infty$ for $b\to\infty$, for every $a$. Thus proves \textit{(iv)}. When $\alpha\leq -1$ then $S(\alpha;a,b)\to+\infty$ for $a\to 0$, for every $b$. For $\alpha>-1$, we have with $2^{j_0}\leq r<2^{j_0+1}$ that \begin{equation*} \int_{B_\delta(x,r)}\delta^\alpha(x,y)dy =\frac{1}{2}\lim_{a\to 0}S(\alpha;a,b)=\frac{1}{2}\sum_{j\leq j_0(r)}2^{(1+\alpha)j} =\frac{1}{2}\frac{1}{1-2^{-(1+\alpha)}}2^{(1+\alpha)j_0}=c(\alpha)2^{(1+\alpha)j_0}. \end{equation*} Hence \begin{equation*} \frac{c(\alpha)}{2^{1+\alpha}}r^{1+\alpha}\leq \int_{y\in B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy \leq c(\alpha)r^{1+\alpha}. \end{equation*} For $\alpha<-1$ we have, with $2^{j_0}\leq r<2^{j_0+1}$, that \begin{equation*} \int_{\delta(x,y)\geq r}\delta^{\alpha}(x,y)dy=\frac{1}{2}\lim_{b\to\infty}S(\alpha;r,b)=\frac{1}{2}\sum_{j\geq j_0(r)}(2^{1+\alpha})j= \frac{1}{2}\frac{1}{1-2^{1+\alpha}}2^{(1+\alpha)j_0}=\tilde{c}(\alpha)2^{(1+\alpha)j_0}, \end{equation*} so that \begin{equation*} \frac{\tilde{c}(\alpha)}{2^{1+\alpha}} r^{1+\alpha}\geq\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy\geq\tilde{c}(\alpha)r^{1+\alpha}. \end{equation*} \end{proof}
The distance $\delta$ is not translation invariant. In fact, while for small positive $\varepsilon$, $\delta(\tfrac{1}{2}-\varepsilon,\tfrac{1}{2}+\varepsilon)=1$, $\delta(\tfrac{1}{2}+\tfrac{1}{2}-\varepsilon,\tfrac{1}{2}+\tfrac{1}{2}+\varepsilon)=2$. Neither is $\delta$ positively homogeneous. Nevertheless the next statement contains a useful property of dyadic homogeneity.
\begin{lemma}\label{lemma:deltahomogeneity} Let $j\in \mathbb{Z}$ be given. Then, for $x$ and $y$ in $\mathbb{R}^+$, $\delta(2^jx,2^jy)=2^j\delta(x,y)$. \end{lemma} \begin{proof} Notice first that since $x=y$ is equivalent to $2^jx=2^jy$, we may assume $x\neq y$. Since for $x$ and $y$ in $I\in \mathcal{D}$ we certainly have that $2^jx$ and $2^jy$ belong to $2^jI$, and the measure of $2^jI$ is $2^j$ times the measure of $I$, in order to prove the dyadic homogeneity of $\delta$, we only have to observe that the multiplication by $2^j$ as an operation on $\mathcal{D}$ preserves the order provided by inclusion. In particular $x$ and $y$ belong to $I$ but $x$ and $y$ do not belong to the same half $I_l$ or $I_r$ of $I$, if and only if $2^jx$ and $2^jy$ belong to $2^jI$ but $2^jx$ and $2^jy$ do not belong to the same half of $2^jI$. \end{proof}
As in the classical case of the Central Limit Theorem, Fourier Analysis will play an important role in our further development. The basic difference is that in our context the trigonometric expansions are substituted by the most elementary wavelet analysis, the associated to the Haar system. Let us introduce the basic notation. Set $h^0_0(x)=\chi_{[0,1/2)}(x)-\chi_{[1/2,1)}(x)$ and, for $j\in \mathbb{Z}$ and $k=0,1,2,3,\ldots$; $h^j_k(x)=2^{j/2}h^0_0(2^jx-k)$. Notice that $h^j_k$ has $L^2$-norm equal to one for every $j$ and $k$. Moreover, $h^j_k$ is supported in $I=I^j_k\in \mathcal{D}^j$. Write $\mathscr{H}$ to denote the sequence of all those Haar wavelets. For $h\in\mathscr{H}$ we shall use the notation $I(h)$ to denote the interval $I$ in $\mathcal{D}$ for which $\supp h = I$. Also $j(h)$ is the only resolution level $j\in \mathbb{Z}$ such that $I(h)\in \mathcal{D}^j$.
The basic analytic fact of the system $\mathscr{H}$ is given by its basic character. In fact, $\mathscr{H}$ is an orthonormal basis for $L^2(\mathbb{R}^+)$. In particular, for every $f\in L^2(\mathbb{R}^+)$ we have that in the $L^2$-sense $f=\sum_{h\in\mathscr{H}}\proin{f}{h}h$, where, as usual, for real valued $f$, $\proin{f}{h}=\int_{\mathbb{R}^+}f(x)h(x) dx$.
One of the most significant analytic properties of wavelets is its ability to characterize function spaces. For our purposes it will be useful to have in mind the characterization of all $L^p(\mathbb{R}^+)$ spaces for $1<p<\infty$.
\begin{theorem}[Wojtaszczyk \cite{Wojtasbook}]\label{thm:characterizationLp} For $1<p<\infty$ and some constants $C_1$ and $C_2$ we have \begin{equation} C_1\norm{f}_p\leq \norm{\left(\sum_{h\in\mathscr{H}}\abs{\proin{f}{h}}^2\abs{I(h)}^{-1}\chi_{I(h)}\right)^{1/2}}_p\leq C_2\norm{f}_p \end{equation} \end{theorem}
\section{Markov dyadic kernels defined in $\mathbb{R}^+$}\label{sec:Markovdyadickernels}
A real function $K$ defined in $\mathbb{R}^+\times \mathbb{R}^+$ is said to be a symmetric Markov kernel if $K$ is nonnegative, $K(x,y)=K(y,x)$ for every $x\in \mathbb{R}^+$ and $y\in \mathbb{R}^+$ and $\int_{\mathbb{R}^+} K(x,y) dy=1$ for every $x\in \mathbb{R}^+$. We are interested in kernels $K$ as above such that $K(x,y)$ depends only on the dyadic distance $\delta(x,y)$ between the points $x$ and $y$ in $\mathbb{R}^+$. The next lemma contains three ways of writing such kernels $K$. The first is just a restatement of the dependence of $\delta$ and the other two shall be used frequently in our further analysis. The Lemma also includes relation between the coefficients and their basic properties.
\begin{lemma}\label{lemma:kerneldelta1} Let $K$ be a real function defined on $\mathbb{R}^+\times \mathbb{R}^+$. Assume that $K$ is nonnegative and depends only on $\delta$, i.e., $\delta(x,y)=\delta(x',y')$ implies $K(x,y)=K(x',y')$, with $\int_{\mathbb{R}^+} K(x_0,y)dy=1$ for some $x_0\in \mathbb{R}^+$. Then, with the notation introduced in Lemma~1~(a) for the level sets of $\delta$, we have \begin{enumerate}[(1)] \item $K=\sum_{j\in \mathbb{Z}}k_j\chi_{L(2^j)}$, $k_j\geq 0$, $\sum_{j\in \mathbb{Z}}k_j2^{j-1}=1$ and $K$ is a symmetric Markov kernel. \item The sequence $\overline{\alpha}=(\alpha_l=2^{-l}(k_{-l}-k_{-l+1}):l\in \mathbb{Z})$ belongs to $l^1(\mathbb{Z})$, $\sum_{l\in \mathbb{Z}}\alpha_l=1$ and the function $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$ with $\varphi_l(s)=2^{l}\chi_{(0,2^{-l}]}(s)$, provides a representation of $K$ in the sense that $\varphi(\delta(x,y))=K(x,y)$. Moreover, $\int_{\mathbb{R}^+}\abs{\varphi(s)}ds<\infty$ and $\int_{\mathbb{R}^+}\varphi(s)ds=1$. \item The function $\varphi(s)$ can also be written as $\varphi(s)=\sum_{j\in \mathbb{Z}}\Lambda_j(\varphi_{j+1}(s)-\varphi_j(s))$. \item\label{item:formulaerelated} The coefficients $\overline{k}=(k_j:j\in \mathbb{Z})$ in (1), $\overline{\alpha}=(\alpha_j:j\in \mathbb{Z})$ in (2) and $\overline{\Lambda}=(\Lambda_j:j\in \mathbb{Z})$ in (3) are related by the formulae \begin{enumerate}[(\ref{item:formulaerelated}.a)] \item $\alpha_j = \frac{k_{-j}-k_{-j+1}}{2^j}$
\item $k_j = \sum_{i=j}^{\infty}2^{-i}\alpha_{-i}$
\item $\Lambda_j = \sum_{l>j}\alpha_l $
\item $\alpha_j = \Lambda_{j-1}-\Lambda_j $
\item $\Lambda_j = \tfrac{1}{2}\left(-k_{-j}2^{-j}+\sum_{l<-j}k_l2^l\right) $
\item $k_j = -2^{-j}\Lambda_{-j}+\sum_{i\geq j+1}2^{-i}\Lambda_{-i}$. \end{enumerate}
\item\label{item:propertiessequences} Some relevant properties of the sequences $\overline{k}$, $\overline{\alpha}$ and $\overline{\Lambda}$ are the following. \begin{enumerate}[(\ref{item:propertiessequences}.a)] \item $\overline{\alpha}\in l^1(\mathbb{Z})$; \item $\sum_{l\leq j}\alpha_l2^l\geq 0$ for every $j\in \mathbb{Z}$; \item $\abs{\alpha_l}\leq 2$ for every $l\in \mathbb{Z}$; \item $\lim_{j\to-\infty}\Lambda_j=1$; \item $\lim_{j\to+\infty}\Lambda_j=0$; \item $\sum_{l\leq j-1}\Lambda_l2^l\geq\Lambda_j2^j$ for every $j\in \mathbb{Z}$; \item $\sup_j\Lambda_j=1$; \item $\inf_j\Lambda_j\geq -1$; \item if $\overline{k}$ is decreasing then also $\overline{\Lambda}$ is decreasing. \end{enumerate} \end{enumerate} \end{lemma} \begin{proof}[Proof of (1)]
Since $K$ depends only on $\delta$, then the level sets for $\delta$ are level sets for $K$. Hence $K$ is constant, say $k_j\geq 0$, in $L(2^j)$ for each $j\in \mathbb{Z}$. Notice that the section of $L(2^j)$ at any $x\in \mathbb{R}^+$ has measure $2^{j-1}$, no matter what is $x$. In fact, $\left. L(2^j)\right|_{x}=\{y\in \mathbb{R}^+:(x,y)\in L(2^j)\}=\{y\in \mathbb{R}^+:\delta(x,y)=2^j\}=I$, where $I\in\mathcal{D}$ is the brother of the dyadic interval $J$ of level $j-1$ such that $x\in J$. Hence $\abs{\left. L(2^j)\right|_{x}}=2^{j-1}$. With the above considerations, since $\int_{\mathbb{R}^+}K(x_0,y)dy=1$, we see that \begin{align*} 1&=\int_{\mathbb{R}^+}K(x_0,y)dy=\sum_{j\in \mathbb{Z}}k_j\int_{\mathbb{R}^+}\chi_{L(2^j)}(x_0,y)dy\\
&=\sum_{j\in \mathbb{Z}}k_j\abs{\left. L(2^j)\right|_{x_0}}=\sum_{j\in \mathbb{Z}}k_j 2^{j-1}\\
&=\sum_{j\in \mathbb{Z}}k_j\abs{\left. L(2^j)\right|_{x}}=\int_{\mathbb{R}^+}K(x,y)dy. \end{align*} Then $K$ is a Markov kernel and that the series $\sum_{j\in \mathbb{Z}}k_j2^{j-1}$ converges to $1$. The symmetry of $K$ is clear.
\textit{Proof of (2).} Since $\abs{\alpha_l}\leq 2^{-l}k_{-l}+2^{-l}k_{-l+1}$, the fact that $\overline{\alpha}$ belongs to $l^1(\mathbb{Z})$ follow from the fact that $\sum_{j\in \mathbb{Z}}k_j2^j=2$ proved (1). On the other hand, \begin{equation*} \sum_{l\in \mathbb{Z}}\alpha_l=\sum_{l\in \mathbb{Z}}k_{-l}2^{-l}-\sum_{l\in \mathbb{Z}}k_{-l+1}2^{-l}=2-1=1. \end{equation*} Let us now check that $\varphi(\delta(x,y))=K(x,y)$. Since $\delta(x,y)$ is a integer power of two and $k_j\to 0$ as $j\to\infty$, we have \begin{align*} \varphi(\delta(x,y)) &=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(\delta(x,y))\\ &= \sum_{l\in \mathbb{Z}}\alpha_l 2^l\chi_{(0,2^{-l}]}(\delta(x,y))\\ &=\sum_{l\leq\log_2\tfrac{1}{\delta(x,y)}}2^{-l}(k_{-l}-k_{-l+1})2^l\\ &= \sum_{j\geq\log_2\delta(x,y)}(k_j-k_{j+1})\\ &= k_{\log_2\delta(x,y)}=K(x,y). \end{align*} Now, the absolute integrability of $\varphi$ and the value of its integral follow from the formulae $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$ since $\overline{\alpha}\in l^1(\mathbb{Z})$, $\sum_{l\in \mathbb{Z}}\alpha_l=1$ and $\int_{\mathbb{R}^+}\varphi_l(s)ds=1$.
\textit{Proof of (3).} Fix a positive $s$ and proceed to sum by parts the series defining $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$. Set $\Lambda_j=\sum_{l>j}\alpha_l$. Since $\alpha_l=\Lambda_{l-1}-\Lambda_l$, we have that \begin{equation*} \varphi(s) = \sum_{l\in \mathbb{Z}}(\Lambda_{l-1}-\Lambda_l)\varphi_l(s) =\sum_{l\in \mathbb{Z}}\Lambda_{l-1}\varphi_l(s)-\sum_{l\in \mathbb{Z}}\Lambda_{l}\varphi_l(s) = \sum_{l\in \mathbb{Z}}\Lambda_{l}(\varphi_{l+1}(s)-\varphi_l(s)), \end{equation*} as desired. Notice, by the way, that $\varphi_{l+1}(s)-\varphi_l(s)$ can be written in terms of Haar functions as $\varphi_{l+1}(s)-\varphi_l(s)=2^{\tfrac{l}{2}}h^l_0(s)$.
\textit{Proof of (4).} Follows from the definitions of $\overline{\alpha}$ and $\overline{\Lambda}$.
\textit{Proof of (5).} Notice first that (5.a) was proved in (2). The nonnegativity of $K$ and (4.b) show (5.b). Property (5.d) and (5.e) of the sequence $\overline{\Lambda}$ follow from (4.c) and the fact that $\sum_{l\in \mathbb{Z}}\alpha_l=1$ proved in (2). Inequality (5.f) follows from the positivity of $K$ and (4.f).
We will prove (5.g). From (5.d) and (5.e) we have that $\overline{\Lambda}\in l^{\infty}(\mathbb{Z})$. In fact, there exist $j_1<j_2$ in $\mathbb{Z}$ such that $\Lambda_j<2$ for $j<j_1$ and $\Lambda_j>-1$ for $j>j_2$. Since the set $\{\Lambda_{j_1},\Lambda_{j_1+1},\ldots,\Lambda_{j_2}\}$ is finite, we get the boundedness of $\overline{\Lambda}$. On the other hand, since from (5.d) $\lim_{j\to-\infty}\Lambda_j=1$ we have that $\sup_j\Lambda_j\geq 1$. Assume that $\sup_j\Lambda_j> 1$. Then there exists $j_0\in \mathbb{Z}$ such that $\Lambda_{j_0}>1$. Hence, again from (5.d) and (5.e) we must have that for $j<j_3$, $\Lambda_j<\Lambda_{j_0}$ and for $j>j_4$, $\Lambda_j<1<\Lambda_{j_0}$ for some integers $j_3<j_4$. So that there exists $j_5\in \mathbb{Z}$ such that $\Lambda_{j_5}\geq\Lambda_j$ for every $j\in \mathbb{Z}$ and $\Lambda_{j_5}>1$. Now \begin{equation*} 2^{j_5}\Lambda_{j_5}=\sum_{l\leq j_5 -1}\Lambda_{j_5}2^l>\sum_{l\leq j_5-1}\Lambda_l2^l \end{equation*} which contradicts (5.f) with $j=j_5$.
For prove (5.h) assume that $\inf_j\Lambda_j<-1$. Choose $j_0\in \mathbb{Z}$ such that $\Lambda_{j_0}<-1$. Then from (5.f) \begin{equation*} \Lambda_{j_0+1}\leq 2^{-(j_0+1)}\sum_{l\leq j_0}\Lambda_l2^l =\sum_{l\leq j_0}\Lambda_l2^{l-(j_0+1)} =\frac{1}{2}\left(\Lambda_{j_0}+\sum_{l< j_0}\Lambda_l2^{l-j_0)}\right) \leq\frac{1}{2}(\Lambda_{j_0}+1). \end{equation*} In the last inequality we used (5.g). Let us prove, inductively, that $\Lambda_{j_0+m}\leq\tfrac{1}{2}(\Lambda_{j_0}+1)$ for every $m\in \mathbb{N}$. Assume that the above inequality holds for $1\leq m\leq m_0$ and let us prove it for $m_0+1$. \begin{align*} \Lambda_{j_0+(m_0+1)}&\leq \sum_{l<j_0+m_0+1}2^{l-(j_0+m_0+1)}\Lambda_l\\ &=2^{-m_0-1}\left(\sum_{l=j_0}^{j_0+m_0}2^{l-j_0}\Lambda_l+\sum_{l<j_0}2^{l-j_0}\Lambda_l\right)\\ &=2^{-m_0-1}\left(\sum_{l=1}^{m_0}2^{l}\Lambda_{j_0+l}+\Lambda_{j_0}+\sum_{l<j_0}2^{l-j_0}\Lambda_l\right)\\ &\leq 2^{-m_0-1}\left(\sum_{l=1}^{m_0}2^{l-1}(\Lambda_{j_0}+1)+\Lambda_{j_0}+\sum_{l<j_0}2^{l-j_0}\right)\\ &=2^{-m_0-1}((2^{m_0}-1)(\Lambda_{j_0}+1)+\Lambda_{j_0}+1)\\ &=\frac{1}{2}(\Lambda_{j_0}+1). \end{align*}
Property (5.c) for the sequence $\overline{\alpha}$ follows from (4.d), (5.g) and (5.h). Item (5.i) follows from (4.a) and (4.d). \end{proof}
In the sequel we shall write $\mathscr{K}$ to denote the set of all nonnegative kernels defined on $\mathbb{R}^+\times \mathbb{R}^+$ that depends only on $\delta$ and for some $x_0\in \mathbb{R}^+$, $\int_{\mathbb{R}^+}K(x_0,y)dy=1$.
Let us finish this section by proving a lemma that shall be used later.
\begin{lemma}\label{lemma:basiccharacterizationK} Let $\overline{\Lambda}=(\Lambda_j:j\in \mathbb{Z})$ be a decreasing sequence of real numbers satisfying (5.d) and (5.e). Then there exists a unique $K\in\mathscr{K}$ such that the sequence that (3) of Lemma~\ref{lemma:kerneldelta1} associates to $K$ is the given $\overline{\Lambda}$. \end{lemma}
\begin{proof} Define $K(x,y)=\sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)\varphi_j(\delta(x,y))$. Since $\overline{\Lambda}$ is decreasing the coefficients in the above series are all nonnegative. On the other hand, from (5.d) and (5.e) we have that $\sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)=1$. Hence, for every $x\in \mathbb{R}^+$ we have \begin{equation*} \int_{y\in \mathbb{R}^+}K(x,y)dy = \sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)\int_{y\in \mathbb{R}^+}\varphi_j(\delta(x,y))dy = \sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)=1 \end{equation*} So that $K\in\mathscr{K}$. \end{proof}
\section{The spectral analysis of the operators induced by kernels in $\mathscr{K}$}\label{sec:spectralanalysis} For $K\in\mathscr{K}$ and $f$ continuous with bounded support in $\mathbb{R}^+$ the integral $\int_{\mathbb{R}^+}K(x,y)f(y)dy$ is well defined and finite for each $x\in \mathbb{R}^+$. Actually each $K\in\mathscr{K}$ determines an operator which is well defined and bounded on each $L^p(\mathbb{R}^+)$ for $1\leq p\leq\infty$.
\begin{lemma} Let $K\in\mathscr{K}$ be given. Then for $f\in L^p(\mathbb{R}^+)$ the integral $\int_{\mathbb{R}^+}K(x,y)f(y)dy$ is absolutely convergent for almost every $x\in \mathbb{R}^+$. Moreover, \begin{equation*} Tf(x)=\int_{\mathbb{R}^+}K(x,y)f(y) dy \end{equation*} defines a bounded (non-expansive) operator on each $L^p(\mathbb{R}^+)$, $1\leq p\leq\infty$. Precisely, $\norm{Tf}_p\leq\norm{f}_p$ for $f\in L^p(\mathbb{R}^+)$. \end{lemma} \begin{proof} Notice first that the function $K(x,y)f(y)=\varphi(\delta(x,y))f(y)$ is measurable as a function defined on $\mathbb{R}^+ \times\mathbb{R}^+$, for every measurable $f$ defined on $\mathbb{R}^+$. The case $p=\infty$ follows directly from the facts that $K$ is a Markov kernel and that $K(x,y)\abs{f(y)}\leq K(x,y)\norm{f}_\infty$. For $p=1$ using Tonelli's theorem we get \begin{equation*} \int_{x\in \mathbb{R}^+}\left(\int_{y\in \mathbb{R}^+}K(x,y)\abs{f(y)}dy\right)dx= \int_{y\in \mathbb{R}^+}\abs{f(y)}\left(\int_{x\in \mathbb{R}^+}K(x,y)dx\right)dy=\norm{f}_1. \end{equation*} Hence $\int_{\mathbb{R}^+}K(x,y)f(y) dy$ is absolutely convergent for almost every $x$ and $\norm{Tf}_1\leq\norm{f}_1$. Assume that $1<p<\infty$ and take $f\in L^p(\mathbb{R}^+)$. Then \begin{align*} \abs{Tf(x)}^p &\leq \left(\int_{\mathbb{R}^+}K(x,y)\abs{f(y)} dy\right)^p=\left(\int_{\mathbb{R}^+}K(x,y)^{\tfrac{1}{p'}}K(x,y)^{\tfrac{1}{p}}\abs{f(y)} dy\right)^p\\ &\leq \left(\int_{\mathbb{R}^+}K(x,y) dy\right)^{\tfrac{p}{p'}}\left(\int_{\mathbb{R}^+}K(x,y)\abs{f(y)}^p dy\right)\\ &=\int_{\mathbb{R}^+}K(x,y)\abs{f(y)}^p dy. \end{align*} Hence $\norm{Tf}^p_p=\int_{\mathbb{R}^+}\abs{Tf(x)}^p dx\leq \int_{y\in\mathbb{R}^+}\left(\int_{x\in\mathbb{R}^+}K(x,y) dx\right)\abs{f(y)}^p dy=\norm{f}^p_p$. \end{proof}
The spectral analysis of the operators $T$ defined by kernels in $\mathscr{K}$ is given in the next result.
\begin{theorem}\label{thm:autovalores} Let $K\in\mathscr{K}$ and let $T$ be the operator in $L^2(\mathbb{R}^+)$ defined by $Tf(x)=\int_{\mathbb{R}^+}K(x,y)f(y) dy$. Then the Haar functions are eigenfunctions for $T$ and the eigenvalues are given by the sequence $\overline{\Lambda}$ introduced in Lemma~\ref{lemma:kerneldelta1}. Precisely, for each $h\in\mathscr{H}$ \begin{equation*} Th=\Lambda_{j(h)}h:=\lambda(h) h, \end{equation*} where $j(h)$ is the level of the support of $h$, i.e. $supp\, h\in\mathcal{D}^{j(h)}$. \end{theorem} \begin{proof} Since the sequence $(\alpha_l:l\in \mathbb{Z})$ belongs to $\ell^1(\mathbb{Z})$ and we can interchange orders of integration and summation in order to compute $Th$. In fact, \begin{equation*} T h(x) = \int_{y\in\mathbb{R}^+} \varphi(\delta(x,y))h(y) dy =\int_{y\in\mathbb{R}^+}\left(\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(\delta(x,y))\right) h(y) dy = \sum_{l\in \mathbb{Z}}\alpha_l\left(2^{l}\int_{\{y: \delta(x,y)\leq 2^{-l}\}}h(y)dy\right). \end{equation*} Let us prove that \begin{equation*} \psi(x,l)=2^{l}\int_{\{y: \delta(x,y)\leq 2^{-l}\}}h(y) dy=\chi_{\{l>j(h)\}}(l) h(x). \end{equation*} If $x\notin I(h)$, since $\{y:\delta(x,y)\leq 2^l\}$ is the only dyadic interval $I_l^x$ containing $x$ of length $2^l$, only two situations are possible, $I_l^x\cap I(h)=\emptyset$ or $I_l^x\supset I(h)$, in both cases the integral vanish and $\psi(x,l)=0=\chi_{\{l<-j(h)\}}(l) h(x)$. Take now $x\in I(h)$. Assume first that $x\in I_l(h)$ (the left half of $I(h)$). So that $\psi(x,l)=2^{-l}\int_{I_l^x}h(y) dy=0$ if $l\leq j(h)$, since $I_l^x\supset I(h)$. When $l>j(h)$ we have that $h\equiv \abs{I(h)}^{-1/2}$ on $I_l^x$, hence $\psi(l,x)=2^{-l}\abs{I(h)}^{-1/2}\abs{I_l^x}=\abs{I(h)}^{-1/2}=h(x)$. In a similar way, for $x\in I_r(h)$, we get $\psi(l,x)=-\abs{I(h)}^{-1/2}=h(x)$. \end{proof}
Notice that the eigenvalues $\lambda(h)$ tends to zero when the resolution $j(h)$ tends to infinity. Moreover this convergence is monotonic when all the $\alpha_l$ are nonnegative. Notice also that the eigenvalues depend only on the resolution level of $h$, but not on the position $k$ of its support. Sometimes we shall write $\lambda_j$, $j\in \mathbb{Z}$, instead of $\lambda(h)$ when $j$ is the scale of the support of $h$. With the above result, and using the fact that the Haar system $\mathscr{H}$ is an orthonormal basis for $L^2(\mathbb{R}^+)$, we see that, the action of $T$ on $L^2(\mathbb{R}^+)$ can be regarded as a multiplier operator on the scales.
\begin{lemma} Let $K$ and $T$ as in Theorem~\ref{thm:autovalores}. The diagram
\begin{center} \begin{tikzpicture}
\matrix (m) [matrix of math nodes,row sep=3em,column sep=4em,minimum width=2em]
{
L^2(\mathbb{R}^+) & \ell^2(\mathbb{Z}) \\
L^2(\mathbb{R}^+) & \ell^2(\mathbb{Z}) \\};
\path[-stealth]
(m-1-1) edge node [left] {$T$} (m-2-1)
edge node [below] {$H$} (m-1-2)
(m-2-1.east|-m-2-2) edge node [below] {$H$}
node [above] {} (m-2-2)
(m-1-2) edge node [right] {$M$} (m-2-2);
\end{tikzpicture} \end{center}
commutes, where $H(f)=(\proin{f}{h}: h\in\mathscr{H})$ and $M(a_h:h\in\mathscr{H})=(\lambda(h)a_h:h\in\mathscr{H})$. In particular, $\norm{Tf}^2_2=\sum_{h\in\mathscr{H}} \lambda^2(h)\abs{\proin{f}{h}}^2$. \end{lemma} The characterization of the space $L^p(\mathbb{R}^+)$ ($1<p<\infty$), Theorem~\ref{thm:characterizationLp} above, provides a similar result for the whole scale of Lebesgue spaces, $1<p<\infty$ with the only caveat that when $p\neq 2$ the norms are only equivalent. The next statement contains this observation. \begin{theorem}\label{thm:op.Lp.haar} With $K$ and $T$ as before and $1<p<\infty$ we have that \begin{equation*} \norm{Tf}_p\simeq\norm{\biggl(\sum_{h\in\mathscr{H}}(\lambda(h))^2\abs{\proin{f}{h}}^2\abs{I(h)}^{-1}\chi_{I(h)}\biggr)^{\tfrac{1}{2}}}_p \end{equation*} with constants which do not depend on $f$. \end{theorem}
\begin{corollary}\label{coro:representationK} For every $K\in\mathscr{K}$ and $(\lambda(h):h\in\mathscr{H})$ as in Theorem~\ref{thm:autovalores} we have the representation \begin{equation*} K(x,y)=\sum_{h\in\mathscr{H}}\lambda(h)h(x)h(y). \end{equation*} \end{corollary} \begin{proof} For $f=\sum_{h\in\mathscr{H}}\proin{f}{h}h$ with $\proin{f}{h}\neq 0$ only for finitely many Haar functions $h\in\mathscr{H}$, we have that \begin{align*} \int_{\mathbb{R}^+}K(x,y)f(y)dy=Tf(x)&=\sum_{h\in\mathscr{H}}\proin{f}{h}Th(x)\\ &=\sum_{h\in\mathscr{H}}\left(\int_{y\in\mathbb{R}^+}f(y)h(y)dy\right)\lambda(h)h(x)\\ &=\int_{y\in\mathbb{R}^+}\left(\sum_{h\in\mathscr{H}}\lambda(h)h(y)h(x)\right)f(y)dy. \end{align*} Since the space of such functions $f$ is dense in $L^2(\mathbb{R}^+)$ we have that $K(x,y)=\sum_h\lambda(h)h(x)h(y)$. \end{proof}
\section{Stability of Markov kernels}\label{sec:stability}
In the case of the classical CLT the key properties of the distribution of the independent random variables $X_j$ are contained in the Gaussian central limit itself. Precisely, $(2\pi t)^{-1/2}e^{-\abs{x}^2/4t}$ is the distribution limit of $n^{-1/2}\sum_{j=1}^n X_j$ when $X_j$ are independent and are equi-distributed with variance $t$ and mean zero. Our ``gaussian'' is the Markov kernel $K_t(x,y)$ defined in $\mathbb{R}^+\times \mathbb{R}^+$ by applying Lemma~\ref{lemma:basiccharacterizationK} to the sequence $\Lambda_j=e^{-t2^{j}}$, $j\in \mathbb{Z}$ for fixed $t$. We may also use the Haar representation of $K_t(x,y)$ given by Corollary~\ref{coro:representationK} in \S~\ref{sec:spectralanalysis}. In this way we can write this family of kernels as $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t2^{j(h)}}h(x)h(y)$. As we shall see, after obtaining estimates for the behavior of $K$ for large $\delta(x,y)$, this kernel has heavy tails. In particular, the analogous of the variance given by $\int_{y\in \mathbb{R}^+}K_t(x,y)\delta^2(x,y)dy$ is not finite. This kernel looks more as a dyadic version of Cauchy type distributions than of Gauss type distributions. Which is an agreement with the fact that $K_t$ solves a fractional differential equation and the natural processes are of Lévy type instead of Wiener Brownian. As a consequence, the classic moment conditions have to be substituted by stability type behavior at infinity.
\begin{lemma}\label{lemma:gaussianPsistability23} Set for $r>0$ \begin{equation*} \psi(r)=\frac{1}{r}\left(\sum_{j\geq 1}2^{-j}e^{-(2^jr)^{-1}}-e^{-r^{-1}}\right). \end{equation*} Then $\psi$ is well defined on $\mathbb{R}^+$ with values in $\mathbb{R}^+$. And \begin{equation*} r^{2}\psi(r)\to \frac{2}{3} \textrm{\quad as \quad} r\to\infty. \end{equation*} \end{lemma} \begin{proof} Since $e^{-(2^jr)^{-1}}$ is bounded above we see that $\psi(r)$ is finite for every $r>0$. On the other hand since $\psi(r)=\tfrac{1}{r}\sum_{j\geq 1}2^{-j}[e^{-(2^jr)^{-1}}-e^{-r^{-1}}]$ and terms in brackets are positive we see that $\psi(r)>0$ for every $r>0$. Let us check the behavior of $\psi$ at infinity \begin{equation*} r^{2}\psi(r)=\sum_{j\geq 1}\frac{2^{-j}[e^{-(2^jr)^{-1}}-e^{-r^{-1}}]}{r^{-1}}\to \sum_{j\geq 1}2^{-j}(1-2^{-j})=\frac{2}{3}. \end{equation*} \end{proof}
\begin{lemma}\label{lemma:stability23} Let $t>0$ be given. Set $\Lambda^{(t)}_j=e^{-t2^{j}}$, $j\in \mathbb{Z}$. Let $K_t(x,y)$ be the kernel that Lemma~\ref{lemma:basiccharacterizationK} associated to $\overline{\Lambda^{(t)}}$. Then $K_t\in\mathscr{K}$ and since $K_t(x,y)=\tfrac{1}{t}\psi(\tfrac{\delta(x,y)}{t})$, with $\psi$ as in Lemma~\ref{lemma:gaussianPsistability23}, we have \begin{equation}\label{eq:propertystabilityone} \delta(x,y)^{2}K_t(x,y)\to \frac{2}{3}\,t \end{equation} for $\delta(x,y)\to+\infty$. \end{lemma} \begin{proof} Since $\Lambda^{(t)}_{j+1}<\Lambda^{(t)}_{j}$, for every $j\in \mathbb{Z}$, $\lim_{j\to-\infty}\Lambda^{(t)}_{j}=1$ and $\lim_{j\to +\infty}\Lambda^{(t)}_{j}=0$ we can use Lemma~\ref{lemma:basiccharacterizationK} in order to obtain the kernel $K_t(x,y)$. Now from Corollary~\ref{coro:representationK} we have that $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t2^{j}}h(x)h(y)$. Let us check following the lines of \cite{AcAimFCAA}, that $K_t(x,y)=\tfrac{1}{t}\psi(\tfrac{\delta(x,y)}{t})$, with $\psi$ as in Lemma~\ref{lemma:gaussianPsistability23}. In fact, since $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-1}}h(x)h(y)$, then a Haar function $h\in\mathscr{H}$ contributes to the sum when $x$ and $y$ both belong to $I(h)$. The smallest of such intervals, say $I_0=I(h^{(0)})$ is precisely the dyadic interval that determines $\delta(x,y)$. Precisely $\abs{I_0}=\delta(x,y)$. Let $h^{(1)}$ and $I_1=I(h^{(1)})$ be the wavelet and its dyadic support corresponding to one level less of resolution than that $I_0$ itself. In more familiar terms, $I_0$ is one of two son of $I_1$. In general, for each resolution level less than that of $I_0$ we find one and only one $I_i=I(h^{(i)})$ with $I_0\subset I_1\subset\ldots\subset I_i\subset\ldots$ and $\abs{I_i}=2^i\abs{I_0}$. We have to observe that except for $I_0$ where $x$ and $y$ must belong to different halves $I_{0,r}$ or $I_{0,l}$ of $I_0$, because of the minimality of $I_0$ for all the other $I_i$, $x$ and $y$ must belong to the same half $I_{i,l}$ or $I_{i,r}$ of $I_i$ because they are all dyadic intervals. These properties also show that $h^{(0)}(x)h^{(0)}(y)=-\abs{I_0}^{-1}=-\delta^{-1}(x,y)$ and, for $i\geq 1$, $h^{(i)}(x)h^{(i)}(y)=2^{-i}\abs{I_0}^{-1}=(2^i\delta(x,y))^{-1}$. Hence \begin{align*} K_t(x,y) &= -\frac{e^{-\tfrac{t}{\delta(x,y)}}}{\delta(x,y)}+\sum_{i\geq 1}e^{-\tfrac{t2^{-i}}{\delta(x,y)}}\frac{2^{-i}}{\delta(x,y)}\\ &= \frac{1}{\delta(x,y)}\left[\sum_{i\geq 1}2^{-i}e^{-\tfrac{t}{\delta(x,y)}2^{-i}}-e^{-\tfrac{t}{\delta(x,y)}}\right]\\ &= \frac{1}{t}\psi\left(\frac{\delta(x,y)}{t}\right). \end{align*} So that \begin{equation*} \delta(x,y)^{2}K_t(x,y)=\delta(x,y)^{2}\frac{1}{t}\psi\left(\frac{\delta(x,y)}{t}\right) =t\left(\frac{\delta(x,y)}{t}\right)^{2}\psi\left(\frac{\delta(x,y)}{t}\right) \end{equation*} which from the result of Lemma~\ref{lemma:gaussianPsistability23} tends to $\tfrac{2}{3}$ when $\delta(x,y)\to +\infty$. \end{proof}
Notice that from Lemma~\ref{lemma:deltaintegrability}-\textit{b.iv)} and the behavior at infinity of $K_t(x,y)$ provided in the previous result, we have \begin{equation*} \int_{R^+}K_t(x,y)\delta^2(x,y)dy=+\infty \end{equation*} for every $x\in \mathbb{R}^+$. Moreover, $\int_{R^+}K_t(x,y)\delta(x,y)dy=+\infty$. The adequate substitute for the property of finiteness of moments is provided by the stability involved in property \eqref{eq:propertystabilityone} in Lemma~\ref{lemma:stability23}. Since this property is going to be crucial in our main result we introduce formally the concept of stability. We say that a kernel $K$ in $\mathscr{K}$ is \textbf{\boldmath{$1$}-stable with parameter \boldmath{$\sigma>0$}} if \begin{equation*} \delta(x,y)^2 K(x,y)\to \sigma \end{equation*} for $\delta(x,y)\to\infty$. In the above limit, since the dimension of $\mathbb{R}^+$ with the metric $\delta$ equals one, we think $\delta^2$ as $\delta^{1+1}$, one for the dimension and the other for the order of stability.
Since for $K\in\mathscr{K}$ we have $K(x,y)=\varphi(\delta(x,y))$, the property of $1$-stability can be written as a condition for the behavior at infinity of profile $\varphi$. In particular, with the notation of Lemma~\ref{lemma:kerneldelta1}, the stability is equivalent to $4^jk_j\to\sigma$ as $j\to\infty$.
\section{Iteration and mollification in $\mathscr{K}$}\label{sec:iterationmollification} As we have already observed in the introduction, the two basic operations on the identically distributed independent random variables $X_i$ in order to obtain the means that converge in distribution to the Central Limit, translate into iterated convolution and mollification. In this section, we shall be concerned with two operations, iteration and mollification on $\mathscr{K}$ and on the subfamily $\mathscr{K}^1$ of $1$-stable kernels in $\mathscr{K}$.
In the sequel, given a kernel $K$ in $\mathscr{K}$, $\bar{\Lambda}$, $\bar{\alpha}$ and $\bar{k}$ are the sequences defined on Lemma~\ref{lemma:kerneldelta1} associated to $K$. When a family of kernels in $\mathscr{K}$ is described by an index associated to $K$, say $K_i$, the corresponding sequences are denoted by $\bar{\Lambda}^i$, $\bar{\alpha}^i$ and $\bar{k}^i$. \begin{lemma} \begin{enumerate}[(a)] \item For $K_1$ and $K_2\in\mathscr{K}$, the kernel $$K_3(x,y)=(K_1\ast K_2)(x,y)=\int_{z\in \mathbb{R}^+}K_1(x,z)K_2(z,y)dz$$ is well defined; $K_3\in\mathscr{K}$ with \begin{equation*} \alpha^3_j=\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j \end{equation*} for every $j\in \mathbb{Z}$; \item $(\mathscr{K},\ast)$ and $(\mathscr{K}^1,\ast)$ are semigroups; \item $\lambda^3_j=\lambda^1_j\lambda^2_j$ for every $j\in \mathbb{Z}$. \end{enumerate} \end{lemma} \begin{proof}[Proof of (a)] Let $K_i(x,y)=\varphi^i(\delta(x,y))$, $i=1,2$; with $\varphi^i(s)=\sum_{j\in \mathbb{Z}}\alpha^i_j\varphi_j(s)$, $\sum_{j\in \mathbb{Z}}\alpha^i_j=1$, $\sum_{j\in \mathbb{Z}}\abs{\alpha^i_j}<\infty$. Then, for $x\neq y$ both in $\mathbb{R}^+$. Set $I^*$ to denote the smallest dyadic interval containing $x$ and $y$. Then $\abs{I^*}=\delta(x,y)$ and $x$ and $y$ belong to different halves of $I^*$. From the above properties of the sequences $\bar{\alpha}^i$, $i=1,2$; we can interchange the orders of summation and integration in order to obtain \begin{align*} K_3(x,y) &= \int_{z\in \mathbb{R}^+}K_1(x,z)K_2(z,y)dz\\ &=\sum_{j\in \mathbb{Z}}\sum_{l\in \mathbb{Z}}2^i\alpha^1_j2^l\alpha^2_l\int_{z\in \mathbb{R}^+}\chi_{(0,2^{-j}]}(\delta(x,z))\chi_{(0,2^{-l}]}(\delta(z,y))dz\\ &=\sum_{j\in \mathbb{Z}}2^j\alpha^1_j\sum_{l\in \mathbb{Z}}2^l\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}} \end{align*} where $I^j_{k(x)}$ is the only dyadic interval in $\mathcal{D}^j$ such that $x\in I^j_{k(x)}$. Notice that the intersection of $I^j_{k(x)}$ and $I^l_{k(y)}$ is empty when $j$ and $l$ are both larger than the level $j^*$ of $I^*$. On the other hand, when $j$ or $l$ is smaller than or equal to $j^*$, the intersection is the smallest one. Say, if $j\leq j^*$ and $l>j$, $I^j_{k(x)}\cap I^l_{k(y)}=I^l_{k(y)}$.
With the above considerations we are now in position to compute $K_3(x,y)$ in terms of the sequences $\bar{\alpha}^i$ and $\bar{\lambda}^i$ as follows, with $c(j^*)=\{(j,l)\in \mathbb{Z}^2:j>j^* \textrm{ and } l>j^*\}$, \begin{align*} K_3(x,y) &= \sum\sum_{(j,l)\in \mathbb{Z}^2}2^{j+l}\alpha^1_j\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}}\\ &= \sum\sum_{\mathbb{Z}^2\setminus c(j^*)}2^{j+l}\alpha^1_j\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}}\\ &= \sum_{j\leq j^*}2^j\alpha^1_j\sum_{l>j}2^l\alpha^2_l\abs{I^l_{k(y)}} + \sum_{l\leq j^*}2^l\alpha^2_l\sum_{j>l}2^j\alpha^1_j\abs{I^j_{k(x)}} + \sum_{l\leq j^*}2^l\alpha^2_l2^l\alpha^1_l\abs{I^l_{k(y)}}\\ &= \sum_{j\leq j^*}2^j\alpha^1_j\lambda^2_j+\sum_{l\leq j^*}2^l\alpha^2_l\lambda^1_l+\sum_{l\leq j^*}2^l\alpha^1_l\alpha^2_l\\ &= \sum_{j\leq j^*}\left[\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j\right]2^j\\ &= \sum_{j\in \mathbb{Z}}\left[\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j\right]\varphi_j(\delta(x,y)). \end{align*} In other words, $K_3(x,y)=\varphi^3(\delta(x,y))$ with $\varphi^3(s)=\sum_{j\in \mathbb{Z}}\alpha^3_j\varphi_j(S)$ and $\alpha^3_j=\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j$. Since, as it is easy to check by Tonelli's theorem $\int_{\mathbb{R}^+} K_3(x,y)dy=1$, we have that $K_3\in\mathscr{K}$.
\textit{Proof of (b).} We only have to show that if $K_1$ and $K_2$ are $1$-stable kernels in $\mathscr{K}$, then $K_3=K_1\ast K_2$ is also $1$-stable. As we observed at the end of Section~\ref{sec:stability} for $K_i$ $(i=1,2)$ we have $4^jk^i_j\to\sigma_i$ when $j\to+\infty$. We have to prove that $4^jk^3_j\to \sigma_1+\sigma_2$ when $j\to+\infty$. By Lemma~\ref{lemma:kerneldelta1}, item (4.b), we can write \begin{align*} 4^jk^3_j &= 4^j\sum_{i\geq j}2^{-i}\alpha^3_{-i}\\ &= 4^j\sum_{i\geq j}2^{-i}[\alpha^1_{-i}\lambda^2_{-i}+\alpha^2_{-i}\lambda^1_{-i}+\alpha^1_{-i}\alpha^2_{-i}]\\ &= 4^j\sum_{i\geq j}(2^{-i}\alpha^1_{-i})\lambda^2_{-i}+4^j\sum_{i\geq j}(2^{-i}\alpha^2_{-i})\lambda^1_{-i}+ 4^j\sum_{i\geq j}2^{-i}\alpha^1_{-i}\alpha^2_{-i}\\ &= I(j)+II(j)+III(j). \end{align*} We claim that $I(j)\to\sigma_1$, $II(j)\to\sigma_2$ and $III(j)\to 0$ when $j\to+\infty$. Let us prove that $I(j)\to\sigma_1$, $j\to +\infty$. Since \begin{equation*} \abs{I(j)-\sigma_1}\leq \abs{4^j\sum_{i\geq j}2^{-i}\alpha^1_{-i}(\lambda^2_{-i}-1)}+\abs{4^jk^1_j-\sigma_1} \end{equation*} from the fact that $K_1\in\mathscr{K}^1$ with parameter $\sigma_1$ and because of (5.d) in Lemma~\ref{lemma:kerneldelta1} we have that $I(j)\to\sigma_1$ as $j\to\infty$. The fact $II(j)\to\sigma_2$ follows the same pattern. Let us finally estimate $III(j)$. Notice that from (4.a) en Lemma~\ref{lemma:kerneldelta1} we have \begin{align*} \abs{III(j)}&\leq 4^j\sum_{i\geq j}2^{-i}\abs{\alpha^1_{-i}}\abs{\alpha^2_{-i}}\\ &\leq 4^j\left(\sum_{i\geq j}2^{-i}\abs{\alpha^1_{-i}}\right)\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\ &= 4^j\left(\sup_{i\geq j}2^{-i}\abs{\frac{k^1_i-k^1_{i+1}}{2^{-i}}}\right)\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\ &\leq 2\,4^j\sup_{i\geq j}k^1_i\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\ &= 2\,4^j k^1_{i(j)}\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right), \end{align*} where, since $k_i\to 0$ when $j\to \infty$, $i(j)\geq j$ is the necessarily attained supremum of the $k_i$'s for $i\geq j$. So that $4^jk^1_{i(j)}=4^{j-i(j)}4^{i(j)}k^1_{i(j)}$ is bounded above because $K_1\in\mathscr{K}^1$. On the other hand, since $\bar{\alpha}^2\in l^1(\mathbb{Z})$ the tail $\sum_{l\geq j}\abs{\alpha^2_{-l}}$ tends to zero as $j\to\infty$.
\textit{Proof of (c).} Since each $K_i$, $i=1,2$, can be regarded as the kernel of the operator $T_if(x)=\int_{y\in \mathbb{R}^+}K_i(x,y)f(y)dy$, $K_3$ is the kernel of the composition of $T_1$ and $T_2$, we have that \begin{equation*} T_3h=(T_2\circ T_1)h=T_2(T_1h)=T_2(\lambda^1(h)h)=\lambda^1(h)T_2h=\lambda^1(h)\lambda^2(h)h. \end{equation*} So $\lambda^1$ and $\lambda^2$ depend only on the scale $j$ of $h$, so does $\lambda^3=\lambda^1\lambda^2$. \end{proof}
\begin{corollary} Let $K\in\mathscr{K}^1$ with parameter $\sigma$, then for $n$ positive integer the kernel $K^n$ obtained as the composition of $K$ $n$-times, i.e., \begin{equation*}\label{coro:compositonKntimes} K^{(n)}(x,y)=\idotsint_{(\mathbb{R}^+)^{n-1}}K(x,y_1)\cdots K(y_{n-1},y)dy_1\cdots dy_{n-1} \end{equation*} belongs to $\mathscr{K}^1$ with parameter $n\sigma$ and eigenvalues $\lambda^{(n)}_j=(\lambda_j)^n$, $j\in \mathbb{Z}$, with $\lambda_j$ the eigenvalues of $K$. \end{corollary}
Trying to keep the analogy with the classical CLT, the mollification operator, that we have to define, is expected to preserve $\mathscr{K}^1$ producing a contraction of the parameter $\sigma$ in order to counteract the dilation provided by the iteration procedure.
The first caveat that we have in our search for dilations is that, even when $\mathbb{R}^+$ is closed under (positive) dilations, the dyadic system is not. This means that usually $K(cx,cy)$ does not even belong to $\mathscr{K}$ when $K\in\mathscr{K}$ and $c>0$. Nevertheless, Lemma ~\ref{lemma:deltahomogeneity} in \S~\ref{sec:dyadycAnalysis} gives the answer. If $K(x,y)=\varphi(\delta(x,y))$ then $K_j(x,y)=2^jK(2^jx,2^jy)=2^jK(\delta(2^jx,2^jy))=2^j\varphi(2^j\delta(x,y))$ for every $j\in \mathbb{Z}$. Hence $K_j$ depends only on $\delta$. In the next lemma we summarize the elementary properties of this mollification operator.
\begin{lemma}\label{lemma:propertiesmollificationsK} Let $K\in\mathscr{K}^1$ with parameter $\sigma$ be given. Then $K_j(x,y)=2^jK(2^jx,2^jy)$ belongs to $\mathscr{K}^1$ with parameter $2^{-j}\sigma$. Moreover, denoting with $\varphi^{(j)}$, $\bar{\alpha}^{j}=(\alpha^j_i: i\in \mathbb{Z})$ and $\bar{\lambda}^j=(\lambda^j_i: i\in \mathbb{Z})$ the corresponding functions and sequences for each $K_j$ we have that; \begin{enumerate}[(a)] \item $\varphi^{(j)}(s)=2^j\varphi(2^js)$, $j\in \mathbb{Z}$, $s>0$; \item $\alpha^j_l=\alpha_{l-j}$, $j\in \mathbb{Z}$, $l\in \mathbb{Z}$; \item $\lambda^j_l=\lambda_{l-j}$, $j\in \mathbb{Z}$, $l\in \mathbb{Z}$. \end{enumerate} \end{lemma}
\begin{proof} From the considerations above, it is clear that $K_j\in\mathscr{K}$. Now, for $j\in \mathbb{Z}$ fixed, \begin{equation*} \delta(x,y)^2K_j(x,y)=\delta(x,y)^2 2^j K(2^jx,2^jy)=2^{-j}\delta(2^jx,2^jy)^2K(2^jx,2^jy) \end{equation*} which tends to $2^{-j}\sigma$ when $\delta(x,y)\to\infty$. Property (a) is clear. Property (b) follows from (a); \begin{align*} \varphi^{(j)}(s)=2^j\varphi(2^js)=2^j\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(2^js)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_{l+j}(s)=\sum_{l\in \mathbb{Z}}\alpha_{l-j}\varphi_l(s). \end{align*} Hence $\alpha^j_l=\alpha_{l-j}$. Finally (c) follows from (b) and (4.c) in Lemma~\ref{lemma:kerneldelta1}. \end{proof}
Corollary~\ref{coro:compositonKntimes} and Lemma~\ref{lemma:propertiesmollificationsK} show that for $K\in\mathscr{K}^1$ with parameter $\sigma$ if we iterate $K$, $2^i$-times ($i$ a positive integer) to obtain $K^{(2^i)}$ and then we mollify this kernel by a scale $2^i$, the new kernel $M^i$ belongs to $\mathscr{K}^1$ with parameter $\sigma$. Notice also that iteration and mollification commute, so that $M^i$ can be also seen as the $2^i$-th iteration of the $2^i$ mollification of $K$. Let us gather in the next statement the basic properties of $M^i$ that shall be used later, and follows from Corollary~\ref{coro:compositonKntimes} and Lemma~\ref{lemma:propertiesmollificationsK}.
\begin{lemma} Let $K\in\mathscr{K}^1$ with parameter $\sigma$ and let $i$ be a positive integer. Then, the kernel $M^i\in\mathscr{K}^1$ with parameter $\sigma$ and $\lambda^i_j=\lambda^{2^i}_{j-i}$. \end{lemma}
\section{The main result}\label{sec:mainresult} We are in position to state and prove the main result of this paper. In order to avoid a notational overload in the next statement, we shall use the notation introduced in the above sections.
\begin{theorem}\label{thm:mainresult} Let $K$ be in $\mathscr{K}^1$ with parameter $\tfrac{2}{3}t>0$. Then \begin{enumerate}[(a)] \item the eigenvalues of $M^i$ converge to the eigenvalues of the kernel in \eqref{eq:NucleoHaarDifusiones} when $i\to+\infty$, precisely \begin{equation*} \lambda^{2^i}_{j-i}\to e^{-t2^j}, \textrm{ when } i\to\infty; \end{equation*} \item for $1<p<\infty$ and $u_0\in L^p(\mathbb{R}^+)$, the functions $v_i(x)=\int_{\mathbb{R}^+}M^i(x,y)u_0(y) dy$ converge in the $L^p(\mathbb{R}^+)$ sense to the solution $u(x,t)$ of the problem \begin{equation*} (P) \left \{\begin{array}{ll} \frac{\partial u}{\partial t}=D^{1} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+. \end{array} \right. \end{equation*} for the precise value of $t$ for which the initial kernel $K$ is $1$-stable with parameter $\tfrac{2}{3}t$. \end{enumerate} \end{theorem} \begin{proof}[Proof of (a)] Since $K\in\mathscr{K}^1$ with parameter $\tfrac{2}{3}t>0$, which means that $k_m4^m\to\tfrac{2}{3}t$ as $m$ tends to infinity we have both that $k_m2^m\to 0$ when $m\to\infty$ and that $\sum_{l<m}k_l2^{l-1}<1$ for every positive integer $m$. Since, on the other hand $\sum_{l\in \mathbb{Z}}k_l2^{l-1}=1$, we have for $j\in \mathbb{Z}$ fixed and $i$ a large nonnegative integer that \begin{equation*} 0<\sum_{l<i-j}k_l2^{l-1}- \frac{k_{i-j}2^{i-j}}{2}<1. \end{equation*} Hence, from Lemma~\ref{lemma:propertiesmollificationsK} and Lemma~\ref{lemma:kerneldelta1}, the $j$-th scale eigenvalues of the operator induced by the kernel $M^i$ ar given by \begin{align*} \lambda^{2^i}_{j-i}&=\left[\frac{1}{2}\left(\sum_{l<i-j}k_l2^l-k_{i-j}2^{i-j}\right)\right]^{2^i}\\ &=\left[\sum_{l<i-j}k_l2^{l-1}-k_{i-j}\frac{2^{i-j}}{2}\right]^{2^i}\\ &=\left[1-\left(\sum_{l\geq i-j}k_l2^{l-1}+\frac{k_{i-j}4^{i-j}}{2}\frac{2^j}{2^i}\right)\right]^{2^i}\\ &= \left[1-\gamma(i,j)\frac{2^j}{2^i}\right]^{2^i}, \end{align*} with $\gamma(i,j)=2^{i-j}\sum_{l\geq i-j}k_l2^{l-1}+\frac{k_{i-j}4^{i-j}}{2}$. Notice that \begin{equation*} \gamma(i,j)=2^{i-j}\sum_{l\geq i-j}2^{-l-1}(k_l4^l)+\frac{k_{i-j}4^{i-j}}{2}=\sum_{m=0}^{\infty}2^{-m-1}(k_{i+m-j}4^{i+m-j})+\frac{k_{i-j}4^{i-j}}{2}, \end{equation*} which tends to $t>0$ when $i\to\infty$. With these remarks we can write \begin{equation*} \lambda^{2^i}_{j-i}=\left(\left[1-\frac{\gamma(i,j)2^j}{2^i}\right]^{\tfrac{2^i}{\gamma(i,j)2^j}}\right)^{\gamma(i,j)2^j} \end{equation*} which tends to $e^{-t2^j}$ when $i$ tends to infinity.
\textit{Proof of (b).} The function $v_i(x)-u(x,t)$ can be seen as the difference of two operators $T_i$ and $T^t_{\infty}$ acting on the initial condition, \begin{equation*} v_i(x)=T_iu_0(x)=\int_{y\in \mathbb{R}^+}M^i(x,y)u_0(y) dy \end{equation*} and \begin{equation*} u(x,t)=T^t_{\infty}u_0(x)=\int_{y\in \mathbb{R}^+}K(x,y;t)u_0(y)dy. \end{equation*} Since the eigenvalues of $T_i-T^t_\infty$ are given by $\lambda^{2^i}_{j(h)-i}-e^{-t2^{j(h)}}$, for each $h\in\mathscr{H}$, from Theorem~\ref{thm:op.Lp.haar} in Section~\ref{sec:spectralanalysis} we have \begin{equation*}
\norm{v_i-u(\cdot,t)}_{L_p(\mathbb{R}^+)}\leq C_1\biggl\|\biggl(\sum_{h\in\mathscr{H}}\abs{\lambda^{2^i}_{j(h)-i}-e^{-t2^{j(h)}}}^2\abs{\proin{u_0}{h}}^2
\abs{I(h)}^{-1}\chi_{I(h)}(\cdot)\biggr)^{1/2}\biggr\|_{L_p(\mathbb{R}^+)}. \end{equation*} From (5.g) and (5.h) in Lemma~\ref{lemma:kerneldelta1} we have that the sequence $\lambda^{2^i}_{j(h)-i}$ is uniformly bounded. On the other hand, since $\norm{\bigl(\sum_{h\in\mathscr{H}}\abs{\proin{u_0}{h}}^2 {\abs{I(h)}}^{-1}\chi_{I(h)}(\cdot)\bigr)^{1/2}}_{L_p(\mathbb{R}^+)}\leq C_2\norm{u_0}_{L^p(\mathbb{R}^+)}<\infty$, we can take the limit for $i\to+\infty$ inside the $L^p$-norm and the series in order to get that $\norm{v_i-u(\cdot,t)}_{L_p(\mathbb{R}^+)}\to 0$ when $i\to+\infty$. \end{proof}
\def$'${$'$} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\noindent{\footnotesize \textsc{Instituto de Matem\'atica Aplicada del Litoral, UNL, CONICET}
\noindent\textmd{CCT CONICET Santa Fe, Predio ``Dr. Alberto Cassano'', Colectora Ruta Nac.~168 km 0, Paraje El Pozo, S3007ABA Santa Fe, Argentina.} }
\end{document}
|
arXiv
|
{
"id": "1702.02866.tex",
"language_detection_score": 0.6201594471931458,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
}
|
arXiv/math_arXiv_v0.2.jsonl
| null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.