diff --git a/.gitattributes b/.gitattributes index b2d592944f0e1825d712a9b6b8cd6ae53c7ba186..9a96fdaa275e7613bd418723ea337c52bd4da9f2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -219,3 +219,4 @@ data_all_eng_slimpj/shuffled/split/split_finalac/part-05.finalac filter=lfs diff data_all_eng_slimpj/shuffled/split/split_finalac/part-10.finalac filter=lfs diff=lfs merge=lfs -text data_all_eng_slimpj/shuffled/split/split_finalac/part-01.finalac filter=lfs diff=lfs merge=lfs -text data_all_eng_slimpj/shuffled/split/split_finalac/part-06.finalac filter=lfs diff=lfs merge=lfs -text +data_all_eng_slimpj/shuffled/split/split_finalac/part-09.finalac filter=lfs diff=lfs merge=lfs -text diff --git a/data_all_eng_slimpj/shuffled/split/split_finalac/part-09.finalac b/data_all_eng_slimpj/shuffled/split/split_finalac/part-09.finalac new file mode 100644 index 0000000000000000000000000000000000000000..cc159dc052499f92fe7868f1d57ea9aacb5260a5 --- /dev/null +++ b/data_all_eng_slimpj/shuffled/split/split_finalac/part-09.finalac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:829a2cd7962144cdbe0fa20a806443e2630c419e4a7fd48fd24cd385a08fd4e8 +size 12576636638 diff --git a/data_all_eng_slimpj/shuffled/split2/finalzkah b/data_all_eng_slimpj/shuffled/split2/finalzkah new file mode 100644 index 0000000000000000000000000000000000000000..dd161a00dce4926b31853aa383530a45ff59f27c --- /dev/null +++ b/data_all_eng_slimpj/shuffled/split2/finalzkah @@ -0,0 +1,5 @@ +{"text":"\\subsection*{Acknowledgements}}\n\\newcommand{\\thismonth}{\\ifcase\\month\\or\n January\\or February\\or March\\or April\\or May\\or June\\or\n July\\or August\\or September\\or October\\or November\\or December\\fi\n \\space\\number\\year}\n\\newcommand{\\sideremark}[1]{\\marginpar{\\small #1}}\n\\DeclareSymbolFont{script}{U}{eus}{m}{n}\n\\DeclareSymbolFontAlphabet{\\mathscr}{script}\n\\DeclareMathSymbol{\\EuWedge}{0}{script}{\"5E}\n\\DeclareMathAlphabet{\\mathrmsl}{OT1}{cmr}{m}{sl}\n\\newcommand{\\symb}[2]{\\newcommand{#1}{{\\mathit{#2}}}}\n\\newcommand{\\rssymb}[2]{\\newcommand{#1}{{\\mathrmsl{#2}}}}\n\\newcommand{\\calsymb}[2]{\\newcommand{#1}{{\\mathcal{#2}}}}\n\\newcommand{\\bbsymb}[2]{\\newcommand{#1}{{\\mathbb{#2}}}}\n\\newcommand{\\liealg}[2]{\\newcommand{#1}{{\\mathfrak{#2}}}}\n\\newcommand{\\liealr}[2]{\\renewcommand{#1}{{\\mathfrak{#2}}}}\n\\newcommand{\\lieoper}[2]{\\newcommand{#1}{\\mathop\n {\\mathfrak{#2}\\null}\\nolimits}}\n\\newcommand{\\oper}[3][n]{\\newcommand{#2}{\\mathop\n {\\mathrm{#3}\\null}\\ifx n#1\\nolimits\\else\\limits\\fi}}\n\\newcommand{\\rsoper}[3][n]{\\newcommand{#2}{\\mathop\n {\\mathrmsl{#3}\\null}\\ifx n#1\\nolimits\\else\\limits\\fi}}\n\\bbsymb\\C{C} \\bbsymb\\F{F} \\bbsymb\\HQ{H}\\bbsymb\\I{I} \\bbsymb\\N{N} \\bbsymb\\OC{O}\n\\bbsymb\\Q{Q} \\bbsymb\\R{R} \\bbsymb\\U{U} \\bbsymb\\V{V} \\bbsymb\\W{W} \\bbsymb\\Z{Z}\n\\calsymb\\cA{A} \\calsymb\\cB{B} \\calsymb\\cC{C} \\calsymb\\cD{D} \\calsymb\\cE{E}\n\\calsymb\\cF{F} \\calsymb\\cG{G} \\calsymb\\cH{H} \\calsymb\\cI{I} \\calsymb\\cJ{J}\n\\calsymb\\cK{K} \\calsymb\\cL{L} \\calsymb\\cM{M} \\calsymb\\cN{N} \\calsymb\\cO{O}\n\\calsymb\\cP{P} \\calsymb\\cQ{Q} \\calsymb\\cR{R} \\calsymb\\cS{S} \\calsymb\\cT{T}\n\\calsymb\\cU{U} \\calsymb\\cV{V} \\calsymb\\cW{W} \\calsymb\\cX{X} \\calsymb\\cY{Y}\n\\calsymb\\cZ{Z}\n\\newcommand{\\varepsilon}{\\varepsilon}\n\\newcommand{\\gamma} \\newcommand{\\Gam}{{\\mathrmsl\\Gamma}}{\\gamma} \\newcommand{\\Gam}{{\\mathrmsl\\Gamma}}\n\\newcommand{\\lambda}\\newcommand{\\Lam}{{\\mathrmsl\\Lambda}}{\\lambda}\\newcommand{\\Lam}{{\\mathrmsl\\Lambda}}\n\\newcommand{\\sigma} \\newcommand{\\Sig}{{\\mathrmsl\\Sigma}}{\\sigma} \\newcommand{\\Sig}{{\\mathrmsl\\Sigma}}\n\\renewcommand{\\geq}{\\geqslant} \\renewcommand{\\leq}{\\leqslant}\n\\rsoper\\End{End} \\rsoper\\Hom{Hom} \n\\rsoper\\Sym{Sym} \\rsoper\\Skew{Skew}\n\\rsoper\\gr{gr} \\rsoper{\\spn}{span}\n\\rsoper\\Aut{Aut} \\rsoper\\Stab{Stab} \n\\rsoper\\GL{GL}\\rsoper\\SL{SL}\\rsoper\\PGL{PGL}\\rsoper\\PSL{PSL}\\rsoper\\Symp{Sp}\n\\rsoper\\CO{CO}\\rsoper\\On{O} \\rsoper\\SO{SO} \\rsoper\\Pin{Pin}\\rsoper\\Spin{Spin}\n\\rsoper\\CU{CU}\\rsoper\\Un{U} \\rsoper\\SU{SU}\n\\rsoper\\Diff{Diff} \\rsoper\\SDiff{SDiff}\n\\lieoper\\der{der} \\lieoper\\stab{stab} \n\\lieoper\\gl{gl} \\lieoper\\sgl{sl}\\lieoper\\symp{sp}\n\\lieoper\\co{co} \\lieoper\\so{so} \\lieoper\\spin{spin}\n\\lieoper\\cu{cu} \\lieoper\\un{u} \\lieoper\\su{su}\n\\rsoper\\Vect{Vect} \\rsoper\\Ham{Ham}\n\\oper\\real{Re} \n\\oper\\imag{Im} \n\\newcommand{\\ip}[1]{\\langle#1\\rangle}\n\\newcommand{\\Ip}[1]{\\bigl\\langle#1\\bigr\\rangle}\n\\newcommand{\\ipp}[1]{\\langle\\mkern-3mu\\langle#1\\rangle\\mkern-3mu\\rangle}\n\\newcommand{\\Ipp}[1]{\\bigl\\langle\\mkern-5mu\\bigl\\langle\n #1\\bigr\\rangle\\mkern-5mu\\bigr\\rangle}\n\\newcommand{\\norm}[2][]{|\\mkern-2mu|#2|\\mkern-2mu|\n _{\\lower1pt\\hbox{${}_{#1}$}}}\n\\newcommand{\\Norm}[2][]{\\bigl|\\mkern-3mu\\bigr|#2\\bigr|\\mkern-3mu\\bigr|\n _{\\lower1pt\\hbox{${}_{#1}$}}}\n\\newcommand{\\lie}[1]{{\\mathfrak{#1}}}\n\\newcommand{\\alg}[1]{{\\mathbb{#1}}}\n\\newcommand{\\punc}[1]{\\smallsetminus\\{#1\\}}\n\\newcommand{\\restr}[1]{|_{#1}^{\\vphantom{y}}}\n\\newcommand{\\Restr}[1]{\\Big|_{#1}}\n\\newcommand{\\setof}[1]{\\lbrace#1\\rbrace}\n\\newcommand{\\Setof}[1]{\\bigl\\lbrace#1\\bigr\\rbrace}\n\\newcommand{\\rto}[1][]{\\xrightarrow{#1}}\n\\newcommand{\\lto}[1][]{\\xleftarrow{#1}}\n\\newcommand{\\liebrack}[1]{[#1]}\n\\newcommand{\\Liebrack}[1]{\\bigl[#1\\bigr]}\n\\newcommand{\\abrack}[1]{[\\mkern-3mu[#1]\\mkern-3mu]}\n\\newcommand{\\Abrack}[1]{\\bigl[\\mkern-5mu\\bigl[#1\\bigr]\\mkern-5mu\\bigr]}\n\\newcommand{\\mathbin{{\\times}\\!}}{\\mathbin{{\\times}\\!}}\n\\newcommand{^{*\\!}}{^{*\\!}}\n\\newcommand{^{\\scriptscriptstyle\\times}}{^{\\scriptscriptstyle\\times}}\n\\newcommand{^{\\scriptscriptstyle\\mathrm T\\!}}{^{\\scriptscriptstyle\\mathrm T\\!}}\n\\newcommand{\\mathinner{\\raise2pt\\hbox{$\\centerdot$}}}{\\mathinner{\\raise2pt\\hbox{$\\centerdot$}}}\n\\newcommand{\\vsum}{\\mathinner\\oplus} \n\\newcommand{\\dsum}{\\oplus} \n\\newcommand{\\Dsum}{\\bigoplus} \n\\newcommand{\\vtens}{\\mathinner\\tens} \n\\newcommand{\\tens}{\\otimes} \n\\newcommand{\\Tens}{\\bigotimes} \n\\newcommand{\\vcartan}{\\mathinner\\odot} \n\\newcommand{\\cartan}{\\odot} \n\\newcommand{\\Cartan}{\\bigodot} \n\\newcommand{\\vwedge}{\\mathinner\\wedge} \n\\newcommand{\\Wedge}{\\EuWedge} \n\\newcommand{\\skwend}{\\mathinner{\\scriptstyle\\vartriangle}\n\\newcommand{\\idealin}{\\trianglelefteq} \n\\newcommand{\\subnormal}{\\ltimes} \n\\newcommand{\\normalsub}{\\rtimes} \n\\newcommand{\\intersect}{\\mathinner\\cap} \n\\newcommand{\\propl}{\\sim} \n\\newcommand{\\setdif}{\\smallsetminus\n\\newcommand{\\from}{\\colon} \n\\newcommand{\\into}{\\hookrightarrow} \n\\newcommand{\\connect}{\\#} \n\\newcommand{\\isom}{\\cong} \n\\newcommand{\\grad}{\\nabla} \n\\newcommand{\\Lie}{\\cL} \n\\newcommand{\\bdy}{\\partial} \n\\newcommand{\\del}{\\partial} \n\\newcommand{\\dbar}{\\overline\\partial} \n\\newcommand{\\dbyd}[1]{\\del\/\\del{#1}} \n\\newcommand{\\Proj}{\\mathrmsl{P}} \n\\newcommand{\\Gr}{\\mathrmsl{Gr}} \n\\newcommand{\\RP}[1]{\\R\\Proj^{#1}} \n\\newcommand{\\CP}[1]{\\C\\Proj^{#1}} \n\\newcommand{\\HP}[1]{\\HQ\\Proj^{#1}} \n\\newcommand{\\OP}[1]{\\OC\\Proj^{#1}} \n\\newcommand{\\half}{\\tfrac12} \n\\newcommand{\\Cinf}{\\mathrm{C}^\\infty} \n\\newcommand{\\const}{\\mathrm{const.\\ }} \n\\newcommand{\\st}{\\mathrel{|}} \n\\newcommand{\\ie}{\\textit{i.e.}} \n\\newcommand{\\empt}{\\varnothing} \n\\newcommand{\\sub}{\\subseteq} \n\\renewcommand{\\d}{{\\mathrmsl{d}}} \n\\rsoper\\dimn{dim} \n\\rsoper\\rank{rank} \n\\rsoper\\degree{deg} \n\\rsoper\\kernel{ker}\\rsoper\\image{im} \n\\rsoper\\alt{alt} \\rsoper\\sym{sym} \n\\rsoper\\Ad{Ad} \\rsoper\\ad{ad} \n\\rsoper\\CoAd{CoAd} \\rsoper\\coad{coad} \n\\rsoper\\trace{tr} \\rsoper\\trfree{tf} \n\\rsoper\\detm{det} \n\\rsoper\\Vol{Vol} \n\\rssymb\\vol{vol} \n\\rssymb\\iden{id} \n\\liealg{\\f}{f} \\liealg{\\g}{g} \\liealg{\\h}{h} \\liealg{\\n}{n} \\liealg{\\m}{m}\n\\liealg{\\p}{p} \\liealg{\\q}{q} \\liealr{\\t}{t} \\liealg{\\z}{z}\n\\newcommand{\\ccon}{\\theta\n\\newcommand{\\trl}{\\zeta\n\\newcommand{\\mathbin{\\raise1pt\\hbox{$\\scriptstyle\\bigcirc$}}}{\\mathbin{\\raise1pt\\hbox{$\\scriptstyle\\bigcirc$}}}\n\\newcommand{\\mathscr D}{\\mathscr D}\n\\newcommand{{\\boldsymbol x}}{{\\boldsymbol x}}\n\\newcommand{\\ell}{\\ell}\n\\newcommand{\\tabv}[2]{$\\vcenter{\\hbox{\\strut$#1$}\\hbox{\\strut{$#2$}}}$}\n\\newcommand{\\tabvvv}[3]{$\\vcenter{\\hbox{\\strut$#1$}\\hbox{\\strut{$#2$}}%\n\\hbox{\\strut{$#3$}}}$}\n\\begin{document}\n\\title[Subriemannian metrics and the metrizability of parabolic\ngeometries]{Subriemannian metrics\\\\\nand the metrizability of parabolic geometries}\n\\date{\\today}\n\\author{David M.J. Calderbank}\n\\address{Mathematical Sciences\\\\ University of Bath\\\\\nBath BA2 7AY\\\\ UK.}\n\\email{D.M.J.Calderbank@bath.ac.uk}\n\\author{Jan Slov\\smash{\\'a}k}\n\\address{Department of Mathematics and Statistics\\\\\nMasaryk University\\\\ Kotl\\'a\\v rsk\\'a 2\\\\ 611 37 Brno\\\\ Czech Republic.}\n\\email{slovak@math.muni.cz}\n\\author{Vladim\\smash{\\'\\i}r Sou\\smash{\\v c}ek}\n\\address{Mathematical Institute\\\\ Charles University\\\\ Sokolovsk\\'a 83\\\\\n186 75 Praha 8\\\\ Czech Republic.}\n\\email{soucek@karlin.mff.cuni.cz}\n\\begin{abstract}\nWe present the linearized metrizability problem in the context of parabolic\ngeometries and subriemannian geometry, generalizing the metrizability problem\nin projective geometry studied by R. Liouville in 1889. We give a general\nmethod for linearizability and a classification of all cases with irreducible\ndefining distribution where this method applies. These tools lead to natural\nsubriemannian metrics on generic distributions of interest in geometric control\ntheory.\n\\end{abstract}\n\\thanks{The authors thank the Czech Grant Agency, grant nr. P201\/12\/G028,\nfor financial support.}\n\\maketitle\n\n\n\\section{Introduction}\n\nMany areas of geometric analysis and control theory deal with distributions on\nsmooth manifolds, i.e., smooth subbundles of the tangent bundle. Let $\\cH\\leq\nTM$ be such a distribution of rank $n$ on a smooth $m$-dimensional manifold\n$M$. A smooth curve $c\\colon [a,b] \\to M$ ($a\\leq b\\in\\R$) is called\n\\emph{horizontal} if it is tangent to $\\cH$ at every point, i.e., for every\n$t\\in [a,b]$, the tangent vector $\\dot c(t)$ to $c$ at $c(t)\\in M$ belongs to\n$\\cH$. It is well known that, at least locally, any two points $x,y\\in\nM$ can be connected by a horizontal curve $c$ if and only if $\\cH$ is\n\\emph{bracket-generating} in the sense that any tangent vector can be obtained\nfrom iterated Lie brackets of sections of $\\cH$.\n\nThis paper is concerned with bracket-generating distributions arising in\n\\emph{parabolic geometries}~\\cite{CS}, which are Cartan--Tanaka geometries\nmodelled on homogeneous spaces $G\/P$ where $G$ is a semisimple Lie group and\n$P\\leq G$ a parabolic subgroup. On a manifold $M$ equipped with such a\nparabolic geometry, each tangent space is modelled on the $P$-module $\\g\/\\p$,\nand the socle $\\h$ of this $P$-module (the sum of its minimal nonzero\n$P$-submodules) induces a bracket-generating distribution $\\cH$ on $M$. Simple\nand well-known examples include projective geometry and (Levi-nondegenerate)\nhypersurface CR geometry: in the former case, $\\g\/\\p$ is irreducible and so\n$\\cH=TM$, but in the latter case $\\cH$ is the corank one contact distribution\nof the hypersurface CR structure.\n\nA more prototypical example for this paper is when $\\cH\\leq TM$ is generic of\nrank $n$ and corank $\\frac12 n(n-1)$, i.e., $m=\\frac12 n(n+1) = n+\\frac12\nn(n-1)$, and $[\\Gam(\\cH),\\Gam(\\cH)]=\\Gam(TM)$. In this case the Lie bracket on\nsections of $\\cH$ induces an isomorphism $\\Wedge^2\\cH\\cong TM\/\\cH$ and the\ndistribution is said to be \\emph{free}. Any such manifold is a parabolic\ngeometry where $G=\\SO(V)$ with $\\dim V=2n+1$ and $P$ is the stabilizer of a\nmaximal ($n$-dimensional) isotropic subspace $U$ of $V$~\\cite{DS}. Then\n$\\g\/\\p$ has socle $\\h\\cong U^*\\otimes (U^\\perp\/U)$ with quotient isomorphic to\n$\\Wedge^2 \\h$, and $\\h\\leq \\g\/\\p$ induces the distribution $\\cH\\leq TM$ on\n$M$.\n\nWhile parabolic geometry is the main tool for the present work, our motivation\nis subriemannian geometry, which concerns the following notion~\\cite{Mont}.\n\n\\begin{defn} Consider an $m$-dimensional manifold $M$ with a given smooth\ndistribution $\\cH\\leq TM$ of constant rank $n$. A\n(pseudo-)Riemannian metric $g$ on $\\cH$ is called a \\emph{horizontal}\nor \\emph{subriemannian metric on $M$}.\n\\end{defn}\nHorizontal metrics are important in both geometric analysis and control\ntheory. Among the horizontal curves joining two points, it may be important to\nfind those which are optimal in some sense, for example those of shortest\nlength with respect to a horizontal metric. Horizontal metrics also allow for\nthe definition of a hypo-elliptic sublaplacian~\\cite{JL}, allowing methods of\nharmonic analysis to be applied. However, this raises the question: what is a\ngood choice of horizontal metric?\n\nFor the distribution $\\cH$ on a parabolic geometry, there is a natural\ncompatibility condition that can be imposed. Indeed, one of the key features\nof such a geometry is that it admits a canonical class of connections,\ncalled \\emph{Weyl connections}, which form an affine space modelled on\nthe space of $1$-forms.\n\n\\begin{defn} A horizontal metric on the distribution $\\cH\\leq TM$ induced\nfrom a parabolic geometry $M$ is \\emph{compatible} if it is covariantly\nconstant in horizontal directions with respect to some Weyl connection on $M$.\nWe say $M$ is \\emph{\\textup(locally\\textup) metrizable} if there exists\n(locally) a compatible horizontal metric.\n\\end{defn}\n\nThe metrizability problem has been studied for several classes of parabolic\ngeometry with $\\cH=TM$, in particular, the case of real projective. These\nexamples exhibit several interesting features, which we seek to generalize to\nall parabolic geometries---in particular to those with $\\cH\\neq TM$.\n\nFirst, whereas the metrizability condition appears to be highly nonlinear, it\nlinearizes when viewed as a condition on the inverse metric on $\\cH^*$\nmultiplied by a suitable power of the horizontal volume form. Secondly, this\nlinear equation is highly overdetermined, with a finite dimensional solution\nspace. Hence parabolic geometries admitting such horizontal metrics are rather\nspecial. This has been used to extract detailed information about the\nstructure of the geometry~\\cite{BDE,CEMN,DM,EM,Frost,Liouville,Sinjukov}.\n\nIf $\\h$ is the socle of $\\g\/\\p$, it is not generally the case that $S^2\\h$ is\nirreducible---indeed $\\h$ itself need not be irreducible. In order to\ngeneralize the studied examples, we introduce a condition on $P$-submodules\n$B\\leq S^2 \\h$ containing nondegenerate elements, which we call the\n\\emph{algebraic linearization condition} (ALC). Our first main result\n(Theorem~\\ref{alt}) justifies this terminology by showing that for parabolic\ngeometries and $P$-submodules $B\\leq S^2\\h$ satisfying the ALC, there is a\nbijection between compatible horizontal metrics and nondegenerate solutions of\nan overdetermined first order \\emph{linear} differential equation. (In fact,\nif $\\h$ is not irreducible we need a technical extra condition, which we call the\n\\emph{strong} ALC.)\n\nOur second main result (Theorem~\\ref{main}) is a complete classification of\nall parabolic geometries and all $P$-submodules $B\\leq S^2\\h$ such that $\\h$\nis irreducible and $B$ satisfies the ALC. The classification exhibits two\nnicely counterbalancing features. On the one hand, among parabolic geometries\nwith irreducible socle, those admitting $P$-submodules $B\\leq S^2\\h$\nsatisfying the ALC are rare. On the other hand, the list of examples is quite\nlong: we state the classification using three tables containing 14 infinite\nfamilies and 6 exceptional cases. Many of these examples invite further study\n(see e.g.~\\cite{P}).\n\nThe structure of the paper is as follows. In section~\\ref{s:bg} we briefly\noutline the main notions and tools of parabolic geometry, referring\nto~\\cite{CS} for details, but concentrating on examples. We also establish the\nlocal metrizability of the homogeneous model. In section~\\ref{s:mlp}, we\ndescribe the linearization principle and prove Theorem~\\ref{alt}. We give\nexamples, and in particular show how explicit formulae can be obtained not\nonly for the homogeneous model, but also for so-called \\emph{normal\n solutions}. Section~\\ref{s:class} is devoted to the main classification\nresult. We conclude by giving examples (Theorem~\\ref{more}) where the socle is\nnot irreducible.\n\n\n\\section{Background and motivating examples}\\label{s:bg}\n\nWe work throughout with real smooth manifolds $M$, real Lie groups $P$ and\nreal Lie algebras $\\p$ (e.g., we view $\\GL(n,\\C)$ as a real Lie group and\n$\\gl(n,\\C)$ as a real Lie algebra).\n\nA (real or complex) \\emph{$P$-module} $W$ is a finite dimensional (real or\ncomplex) vector space carrying a representation $\\rho_W\\colon P\\to \\GL(W)$;\n$W$ is then also a $\\p$-module, where $\\p$ is the Lie algebra of $P$, i.e., it\ncarries a representation $\\tilde\\rho_W \\colon\\p\\to\\gl(W)$. We write $\\xi\\mathinner{\\raise2pt\\hbox{$\\centerdot$}}\nw$ for $\\tilde\\rho_W(\\xi)(w)$. The \\emph{nilpotent radical} of $\\p$ is the\nintersection $\\n$ of the kernels of all simple $\\p$-modules. It is an ideal in\n$\\p$ and the quotient $\\p_0:=\\p\/\\n$ is reductive. We let $P_0:=P\/\\exp\\n$ be\nthe corresponding quotient group with Lie algebra $\\p_0$. Any $P$-module $W$\nhas a filtration\n\\begin{equation}\\label{eq:alg-filt}\n0=W^{(0)}\\noroot{}\\edyn\\;\\;\n\\dyn \\noroot{}\\llink<\\root{}\\link\\root{}\\dots\\root{1}\\edyn$&\n$\\so(2\\ell+1,\\C)\\;\\; \\ell\\geq 2$ &$2k, 2k+k(k-1)$\\\\\n\\hline\n$G_2^{h}$&$\\dyn \\noroot{}\\lllink<\\root{1\\strut}\\edyn\\;\\;\n\\dyn \\root{1}\\lllink>\\noroot{}\\edyn$& $G_2^\\C$&$4,6,10$\\\\\n\\hline\n\\end{tabular}\n\\smallbreak\n\\caption{Complex geometries with hermitian $B$}\\label{t:hermitian}\n\\end{table}\n\\begin{table}[!ht]\n\\begin{tabular}{|l|l|l|l|l|}\n\\hline\nCase& Diagram $\\Delta_\\ell$ for $\\p,B$ & Real simple $\\g$ & Growth \\\\\n\\hline\n$A_\\ell^{1,1}$&$\\dyn \\noroot{}\\link\\root{}\\dots\\root{2\\strut}\\edyn$&\n$\\sgl(\\ell+1,\\R)\\;\\; \\ell\\geq 2$ & $\\ell$\\\\\n\\hline\n$A_\\ell^{1,2}$&$\\dyn\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{}\\link\\root{1\\strut}\\link\\root{}\\edyn$& \n\\tabv{\\sgl(\\ell+1,\\R),\\,\\sgl(p+1,\\HQ)}\n{\\ell=2p+1 ,\\, p\\geq2}&$4p$\\\\\n\\hline\n$B_\\ell^{1,k}$&$\\dyn\\root{2\\strut}\\link\\root{}\n\\dots\\root{}\\link\\nodroot{k\\geq2}\\link\\root{}\\dots\\root{}\\llink>\\root{}\\edyn$&\n\\tabv{\\so(p,q),\\;k\\leq p\\leq q}{p+q=2\\ell+1}&\\tabv{d=k(2\\ell-2k+1),}\n{n=d+\\frac12 k(k-1)}\\\\\n\\hline\n$B_{\\ell}^{1,\\ell}$&$\\dyn \\root{2\\strut}\\link\\root{}\\dots\\root{}\\llink>\\noroot{}\\edyn$&\n$\\so(\\ell,\\ell+1)\\; \\ell\\geq 2$&$k,k+\\frac12 k(k-1)$\\\\\n\\hline\n$C_4^{1,2}$&$\\dyn \\root{}\\link\\noroot{}\\link\\root{}\\llink<\\root{1\\strut}\\edyn$&\n\\tabv{\\symp(8,\\R)}{\\symp(2,2)\\;\\;\\symp(1,3)}&$8,11$\\\\\n\\hline \n$C_\\ell^{1,k}$&$\\dyn\\root{}\\link\\root{1\\strut}\\dots\n\\root{}\\link\\nodroot{k=2j\\geq 4}\\link\\root{}\\dots\\root{}\\llink<\\root{}\\edyn$&\n\\tabv{\\symp(2\\ell,\\R)\\;\\;\\;\\symp(p,q)}{\\quad\\;\\ell=p+q,\\; k \\leq p\\leq q}\n&\\tabv{d=k(2\\ell-2k),}{n=d+\\frac12 k(k+1)}\\\\\n\\hline\n$D_\\ell^{1,k}$&$\\dyn \\root{2\\strut}\\link\\root{}\n\\dots\\root{}\\link\\nodroot{k\\geq 2}\\link\\root{}\n\\dots\\root{}\\rootupright{}\\rootdownright{}\\edyn$&\n\\tabv{\\so(p,q)\\qquad\\quad \\so^*(2\\ell)}\n{\\begin{matrix}2\\ell=p+q\\\\\nk\\leq p\\leq q\\end{matrix}\\quad \\begin{matrix}k=2j\\\\ k\\leq \\ell-2\\end{matrix}}\n&\\tabv{d=k(2\\ell-2k),}{n=d+\\frac12 k(k-1)}\\\\\n\\hline\n$E_6^{1,1}$& $\\dyn\\noroot{}\\link\\root{}\\link\\root{}\\rootdown{}\n\\link\\root{}\\link\\root{1\\strut}\\edyn$& $E_{6(6)}$, $E_{6(-26)}$ &$16$\\\\\n\\hline\n$G_2^{1,1}$&$\\dyn \\noroot{}\\lllink<\\root{2\\strut}\\edyn$& $G_{2(2)}$&$2,3,5$\\\\\n\\hline\n\\end{tabular}\n\\smallbreak\n\\caption{Real geometries with absolutely irreducible $\\h$}\n\\label{t:absirred}\n\\end{table}\n\n\\begin{table}\n\\begin{tabular}{|l|l|l|l|l|}\n\\hline\nCase& Diagram $\\Delta_\\ell$ for $\\p,B$ & Real simple $\\g$ & Growth \\\\\n\\hline\n$A_3^{2,1}$&$\\dyn \\noroot{}\\link\\root{2\\strut}\\link\\noroot{}\\edyn$&\n$\\su(1,3),\\;\\su(2,2)$&$4,5$\\\\\n\\hline\n$A_\\ell^{2,k}$&$\\dyn\\root{1\\strut}\\dots\\root{}\\link\\nodroot{k\\geq 2}\\link\\root{}\n\\dots\\root{}\\link\\nodroot{\\ell-k}\\link\\root{}\\dots\\root{1\\strut}\\edyn$&\n\\tabv{\\su(p,q),\\;k\\leq p \\leq q}{\\ell=p+q-1\\geq 4}\n&\\tabv{d=2k(\\ell-2k+1),}{n=d+k^2}\\\\\n\\hline\n$A_\\ell^{2,h}$&\n$\\begin{matrix}\n\\dyn\\root{}\\link\\noroot{}\\link\\root{}\\link\\root{\\strut1}\n\t\\dots\\root{}\\link\\noroot{}\\link\\root{}\\edyn \\\\\n\t\\oplus\\\\\n\t\\dyn\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\n\t\\link\\root{}\\link\\noroot{}\\link\\root{}\n\t\\edyn\n\\end{matrix}$&\n\\tabv{\\su(p,q),\\;2\\leq p\\leq q}{\\ell=p+q-1\\geq 6}&$4(\\ell-3),4(\\ell-2)$\\\\\n\\hline\n$A_{2k+1}^{2,s}$&\n$\\begin{matrix}\n\\dyn\\root{}\\link\\root{\\strut1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\link\\noroot{}\\link\\root{}\\dots\\root{}\\link\\root{}\\edyn\\\\\n \\oplus\\\\\n\\dyn\\root{}\\link\\root{}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\link\\root{}\\edyn \n\\end{matrix}$&\n\\tabvvv{\\su(k,k+2),}{\\su(k+1,k+1)}{\\ell=2k+1\\geq 7}\n &$4k,4k+k^2$\n\\\\\n\\hline\n$A_{2k}^{2,s}$&$\\begin{matrix}{\\dyn\\root{2\\strut}\\link\\root{}\\dots\\root{}\\link\n\\noroot{}\\link\\noroot{}\\link\\root{}\\dots\\root{}\\link\\root{}\\edyn}\\\\\n\\oplus\\\\\n{\\dyn \\root{}\\link\\root{}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\n\\link\\root{}\\dots\\root{}\\link\\root{\\smash{2}}\\edyn}\\end{matrix}$\n& \\tabv{\\su(k,k+1)}{\\ell=2k\\geq 4}&$2k,2k+k^2$\\\\\n\\hline\n$D_\\ell^{2,s}$&$\\dyn \\root{2}\\link\\root{}\\dots\\root{}\n\\norootupright{}\\norootdownright{}\\edyn$&\n\\tabv{\\so(\\ell-1,\\ell+1)}{\\so^*(2\\ell),\\;\\ell=2j+1}&\\tabv{d=2(\\ell-1),}\n{d+\\frac12(\\ell-1)(\\ell-2)}\\\\\n\\hline\n$D_\\ell^{2,h}$&$\\dyn \\root{}\\link\\root{1}\\dots\\root{}\n\\norootupright{}\\norootdownright{}\\edyn$&\n\\tabv{\\so(\\ell-1,\\ell+1)}{\\so^*(2\\ell),\\; \\ell=2j+1}&\\tabv{d=2(\\ell-1),}\n{d+\\frac12(\\ell-1)(\\ell-2)}\\\\\n\\hline\n$E_6^{2,h\\vphantom{{}^2}}$&$\\dyn\\noroot{}\\link\\root{}\\link\\root{}\\rootdown{1\\strut}\n\\link\\root{}\\link\\noroot{}\\edyn$ & $E_{6(2)}$&$16,24$\\\\\n\\hline\n\\end{tabular}\n\\smallbreak\n\\caption{Real geometries with $\\h$ not absolutely irreducible}\n\\label{t:red}\n\\end{table}\n\\end{thm}\n\n\\begin{proof}[Outline of Proof]\nIn the gradings of the complex algebras $\\g$ corresponding to parabolic\ngeometries, the number of irreducible components of $\\h^*$ is equal to the\nnumber of crosses in the Dynkin diagram describing the chosen parabolic\nsubalgebra. However, in the real forms of $\\g$, there might be complex or\nquaternionic components giving rise to two components in the\ncomplexification. These two complex components have to be either conjugate (in\nthe complex case) or isomorphic (in the quaternionic case).\n\nThe latter observation reduces our quest to diagrams with two crosses placed\nin a symmetric way. Indeed, more than two crosses cannot result in one\ncomponent, while asymmetric positions of the crosses inevitably yield two\ncomplex components which are neither conjugate nor isomorphic. Moreover,\nhaving two components in the complexified $\\h$, we may ignore the symmetric\nproducts of the individual parts in $S^2\\h$, because there cannot be any\nnondegenerate metrics there.\n\nWe first dispense with the case that $\\g$ is complex but $B$ is not, so that\n$B\\otimes\\C$ is irreducible in $\\g\\otimes\\C\\cong\\g\\oplus\\g$ and the diagram\nfor $(\\p,B)$ is invariant under the automorphism exchanging the two components\nof the Dynkin diagram. Thus $B\\otimes\\C=\\h_\\alpha\\otimes\\h_\\beta$ where\n$\\h\\otimes\\C=\\h_\\alpha\\oplus\\h_\\beta$. Now the ALC is satisfied provided\n$\\h_\\alpha\\otimes \\h_\\alpha^*$ (and hence also $\\h_\\beta\\otimes\\h_\\beta^*$)\nhas precisely two irreducible components as a representation of a component of\n$\\p_0\\otimes\\C$. Only the (dual) defining representations in type A have this\nproperty, and so $\\g$ must have type $A,B$ or $G$, where the nodes crossed\nin $\\g\\otimes\\C$ are end nodes corresponding to short simple roots. The\npossibilities are listed in Table~\\ref{t:hermitian}, covering the following\nthree cases:\n\n\\begin{case}[$A_{\\ell}^h$] The c-projective geometries may be equipped with\ndistinguished hermitian metrics.\n\\end{case}\n\\begin{case}[$B_{\\ell}^h$] The almost complex version of a free distribution of\nrank $k$, may be equipped with distinguished hermitian metrics.\n\\end{case}\n\\begin{case}[$G_2^h$] The almost complex version of the\n$(2,3,5)$-distributions may be equipped with distinguished hermitian metrics. \n\\end{case}\n\nWe analyse the remaining real cases with irreducible $\\h$ by\nthe Dynkin type of $\\g$ in the following sections.\n\\end{proof}\n\n\\subsection{Proof of Theorem~\\ref{main} when $\\g$ has type $A_\\ell$}\n\nThe case $\\ell=1$ is trivial, so we assume $\\ell\\geq 2$, and first consider the\ncase of a single crossed node. If the crossed node is one of the ends of the\nDynkin diagram, the only real $\\g$ is the split form, $\\h$ and $S^2\\h$ are\nirreducible, and $B=S^2\\h$ satisfies the ALC: when $\\ell=2$,\n\\[\nB\\simeq \\dyn \\noroot{}\\link\\root{2}\\edyn \\qquad \\h^*\\otimes B \\simeq\n\\dyn\\noroot{}\\link\\root{3}\\edyn\\oplus \\dyn\\noroot{}\\link\\root{1}\\edyn\n\\]\nand when $\\ell\\geq 3$,\n\\begin{equation*}\nB\\simeq \\dyn \\noroot{}\\link\\root{}\\dots\\root{2}\\edyn\\qquad\n\\h^*\\otimes B \\simeq \\dyn\\noroot{}\\link\\root{1}\\dots\\root{2}\\edyn\\oplus \n\\dyn\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn.\n\\end{equation*}\nThese examples can be summarized in the following statement.\n\n\\begin{case}[$A^{1,1}_\\ell$] Here $\\g=\\sgl(\\ell+1,\\R)$, $\\ell\\geq2$, $\\h\\cong\\R^\\ell$\nand $B=S^2\\h$. This is the most classical case of projective structures on\n$\\ell$-dimensional manifolds $M$, and nondegenerate sections of $\\cB$ are\ninverse to arbitrary pseudo-Riemannian metrics on $M$.\n\\end{case}\n\nSuppose next that the cross is adjacent to one end of the diagram, with\n$\\ell\\geq 3$. We then have $S^2\\h= B\\oplus B'$, where\n\\begin{gather*}\n\\h\\simeq\\dyn\\root{1}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn \\qquad\n\\h^*\\simeq \\dyn\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\\root{}\\edyn\\\\\nB\\simeq\\dyn\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\link\\root{}\\edyn\n\\;(\\ell\\geq 4)\\qquad\nB'\\simeq\\dyn\\root{2}\\link\\noroot{}\\link\\root{}\\dots\\root{2}\\edyn\n\\end{gather*}\nand $B$ is trivial for $\\ell=3$ (when $\\h\\cong \\h^*$). The tensor product\n$\\h^*\\otimes B'$ decomposes into four irreducible components, except for the\nreal form $\\su(2,2)$ when $\\ell=3$, in which case there are only three\ncomponents. In any case, $B'$ does not satisfy the ALC.\n\nIn order for $B$ to have nondegenerate elements, $\\ell$ must be odd, and for\n$\\ell=2p+1\\geq 5$, $\\h^*\\otimes B\\simeq\n\\dyn\\root{1}\\link\\noroot{}\\link\\root{1}\\link\\root{}\n\\dots\\root{1}\\link\\root{}\\edyn \\oplus\n\\dyn\\root{1}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn$; thus the ALC holds\nfor $B$.\n\n\\begin{case}[$A^{1,2}_\\ell$] For each $\\ell=2p+1\\geq 5$, there are two real forms.\nWhen $\\g\\simeq \\sgl(2p+2,\\R)$, the geometries are the almost grassmannian\nstructures on manifolds $M$ of dimension $4p$, modelled on the grassmannian of\n$2$-planes in $\\R^{2p}$. The tangent bundle $TM$ is identified with a tensor\nproduct $E\\otimes F$, where $\\rank E=2$, $\\rank F=2p$, and the nondegenerate\nmetrics in $\\cB$ are tensor products of area forms on $E$ and symplectic forms\non $F$. When $\\g\\simeq \\sgl(p,\\HQ)$, the geometries are almost quaternionic\ngeometries, where $TM$ is a quaternionic vector bundle, and the nondegenerate\nmetrics in $\\cB$ are the (real parts of) quaternionic hermitian forms.\n\\end{case}\n\nWhen the cross is further from the ends of the diagram, we have $S^2\\h=\nB\\oplus B'$ with\n\\begin{gather*}\nB\\simeq\\dyn\\root{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{1}\\link\\root{}\\edyn\\qquad\nB'\\simeq\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{2}\\edyn.\n\\end{gather*} \nand there are too many components in both $\\h^*\\otimes B$ and $\\h^*\\otimes\nB'$ to satisfy the ALC.\n\nWe now turn to cases with two crossed nodes, related by the diagram\nautomorphism of $A_\\ell$. First suppose the crossed nodes are the endpoints.\nIn order to have nontrivial $B$ we must have $\\ell\\geq3$, in which case\n$S^2\\h= B\\oplus B'\\oplus B''$ where\n\\begin{gather*}\n\\h\\simeq \\dyn\\noroot{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\\edyn\\oplus\n\\dyn\\noroot{}\\link\\root{}\\dots\\root{1}\\link\\noroot{}\\edyn \\simeq \\h^*\\\\\nB\\simeq \\dyn\\noroot{}\\link\\root{2}\\link\\noroot{}\\edyn \\text{ or }\n\\dyn\\noroot{}\\link\\root{1}\\link\\root{}\n\\dots\\root{}\\link\\root{1}\\link\\noroot{}\\edyn\\qquad\nB'\\simeq \\dyn\\noroot{}\\link\\root{2}\\dots\\root{}\\link\\noroot{}\\edyn\n\\oplus\\dyn\\noroot{}\\link\\root{}\\dots\\root{2}\\link\\noroot{}\\edyn\n\\end{gather*}\nand $B''$ is trivial. Clearly $\\h^*\\otimes B'$ has too many irreducible\ncomponents to satisfy the ALC, no matter which real form we consider.\n\nIt remains to consider $B$, first in the case $\\ell=3$, where the possible\nreal forms (with $\\h$ irreducible) are $\\su(2,2)$ and $\\su(1,3)$. Then\n\\[\n\\h^*\\otimes B\\simeq \\bigl(\\,\\dyn\\noroot{}\\link\\root{3}\\link\\noroot{}\\edyn\n\\oplus \\dyn\\noroot{}\\link\\root{3}\\link\\noroot{}\\edyn\\,\\bigr)\n\\oplus\n\\bigl(\\,\\dyn\\noroot{}\\link\\root{1}\\link\\noroot{}\\edyn\n\\oplus \\dyn\\noroot{}\\link\\root{1}\\link\\noroot{}\\edyn\\,\\bigr)\n\\]\nand the ALC is satisfied, since these are complexifications of two complex\ncomponents for the real form in question. However, for $\\ell\\geq 4$, we find\nthat the product $\\h^*\\otimes B$ leads to complexifications with three\ncomplex components, so the ALC is not satisfied. \n \n\\begin{case}[$A^{2,1}_3$] Here $\\g$ is\n\t$\\su(2,2)$ or $\\su(1,3)$, and $M$ has a CR structure, i.e., a contact distribution\n$\\cH$ equipped with a complex structure. The Levi form induces the\nclass of trivial parallel hermitian metrics (the Weyl connections\ncorresponding to the contact forms leave parallel both the complex structure\nand the symplectic form, thus also the associated metric, and the\nmetrizability problem is trivial as in the conformal case). However, we now\nsee that there may also be interesting compatible subriemannian metrics on\n$\\cH\\leq TM$ which are hermitian and tracefree with respect to the\nLevi form.\n\\end{case}\n\nNow suppose the crosses are not placed at the ends, say the left one at the\n$k$-th position, $2\\leq k$. Thus we consider the real forms $\\su(p,q)$ with\n$k\\leq p\\leq q$. We have\n\\begin{gather*}\n\\h \\simeq \n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{1}\\link\\noroot{}\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn \n\\\\\n\\h^* \\simeq \n\\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\\root{}\\edyn \n\\end{gather*}\nfor $\\ell>2k$ and\n\\begin{gather*}\n\\h \\simeq \n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\n\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\n\\link\\root{}\\dots\\root{1}\\edyn \n\\\\\n\\h^* \\simeq\n\\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\link\\noroot{}\n\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\n\\link\\root{1}\\dots\\root{}\\edyn \n\\end{gather*}\nfor $\\ell=2k$. In particular, we have $S^2\\h\\supset B$ where\n\\begin{equation*}\nB\\simeq \\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn,\n\\end{equation*}\nwhich admits nondegenerate metrics and satisfies the ALC, with\n\\begin{align*}\n\\h^*\\otimes B&\\simeq\n\\bigl(\\,\\dyn\\root{1}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn \\oplus\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\\root{1}\\edyn\\,\\bigr)\\\\\n&\\;{}\\oplus\\bigl(\\,\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn\\oplus\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{1}\\link\\noroot{}\\link\\root{}\\dots\\root{}\\edyn\\, \\bigr)\\\\\n\\text{or}\\qquad\n\\h^*\\otimes B &\\simeq \\bigl(\\,\\dyn\\root{1}\\dots\\root{1}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn \\oplus\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{1}\\dots\\root{1}\\edyn\\,\\bigr)\n\\\\\n&\\;{}\\oplus\\bigl(\\,\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn\\oplus\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{}\\edyn\\, \\bigr).\n\\end{align*}\n\n\\begin{case}[$A^{2,k}_\\ell$] Here $\\g\\simeq \\su(p,q)$ with nodes $k$\nand $\\ell+1-k$ crossed, where $2\\leq k\\leq p \\leq q, p+q=\\ell+1$. In these\ngeometries, $\\cH\\cong E\\otimes F$, where $E$ is a complex vector bundle of\nrank $k$, and the rank $(\\ell-2k+1)$ complex vector bundle $F$ comes with a\nhermitian form of signature $(p-k,q-k)$. The corank of $\\cH\\leq TM$ is $k^2$,\nand the metrics on $\\cH$ are the products of hermitian metrics on $E$ with the\ngiven ones on $F$. When $\\ell=2k$ (i.e., $F$ has rank $1$), $\\g=\\su(k,k+1)$\nwith the nodes $k,k+1$ are crossed. These are the free CR geometries with\ncomplex structure on $\\cH$ studied in \\cite{SchmalzS} (where it is also\nexplained how complex structure arises on $\\cH$).\n\\end{case}\n\nThe remaining components of $S^2\\h$ do not satisfy the ALC, except in special\ncases $k=2$, $2k=\\ell$ and $2k+1=\\ell$. In particular, when $k=2$,\n\\begin{equation*}\nB'\\simeq \\dyn\\root{}\\link\\noroot{}\\link\\root{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\link\\root{}\\edyn \n\\oplus\n\\dyn\\root{}\\link\\noroot{}\\link\\root{}\\dots\\root{1}\n\\link\\root{}\\link\\noroot{}\\link\\root{}\n\\edyn\n\\end{equation*}\nsatisfies the ALC (and is nontrivial for $\\ell\\geq 6$).\n\n\\begin{case}[$A^{2,h}_\\ell$] Here $\\g\\simeq \\su(p,q)$ with nodes $2$\nand $\\ell-1$ crossed, where $2\\leq p\\leq q$ and $\\ell=p+q-1\\geq 6$. In this\ngeometry, $\\cH\\cong E\\otimes F$, where $E$ is a complex vector bundle of\nrank $2$, and $F$ is a complex vector bundle of rank $\\ell-3$. The\ncorank of $\\cH\\leq TM$ is $4$. The eligible metrics are the\ncomplex symmetric bilinear forms of the form of tensor product of two exterior\nforms.\n\\end{case}\t\n\nWhen $2k=\\ell$, we obtain $S^2\\h= B\\oplus B'$ where\n\\begin{gather*}\nB'= \\begin{matrix}\n\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{}\\edyn\\oplus{}\\\\\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\noroot{}\\link\\root{}\n\\dots\\root{2}\\edyn\\quad\\end{matrix}\n\\end{gather*}\nwhich admits nondegenerate metrics, and satisfies the ALC, with\n\\begin{gather*}\n\\h^*\\otimes B' \\simeq \\bigl(\\,\\dyn\\root{2}\\dots\\root{1}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{1}\\dots\\root{2}\\edyn\\,\\bigr)\\oplus{}\\\\\n\\qquad\\qquad\\quad \\bigl(\\,\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{1}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{2}\\edyn\\,\\bigr)\\oplus{}\\\\\n\\qquad\\qquad\\;\\,\\bigl(\\,\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{}\\edyn\\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\n\\noroot{}\\link\\root{}\\dots\\root{1}\\edyn\\, \\bigr).\n\\end{gather*}\n\\begin{case}[$A^{2,s}_{2k}$] This case is again the free CR geometry,\nwith $\\g=\\su(k,k+1)$, but the eligible metrics are the complex bilinear\nmetrics on $\\cH$.\n\\end{case}\nSimilarly, when $\\ell=2k+1$ with the $k$-th and $(k+2)$-nd nodes crossed,\n\\begin{equation*}\nB'\\simeq \\dyn\\root{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\link\\noroot{}\\link\\root{}\\dots\\root{}\\edyn \\oplus\n\\dyn\\root{}\\dots\\root{}\\link\\noroot{}\\link\\root{}\n\\link\\noroot{}\\link\\root{}\\dots\\root{1}\\link\\root{}\\edyn \n\\end{equation*}\nsatisfies the ALC.\t\n\\begin{case}[$A^{2,s}_{2k+1}$] Here $\\ell=2k+1,$ $\\g$ is $\\su(k,k+2),$\nor $\\su(k+1,k+1),$ with nodes $k$ and $k+2$ crossed. In this geometry,\n$\\cH\\cong E\\otimes F$, where $E$ is a complex vector bundle of\nrank $k$, and $F$ is a complex vector bundle of rank $2$. The\ncodimension of $\\cH\\leq TM$ is $k^2$. The eligible metrics are the\ncomplex symmetric bilinear forms of the form of tensor product of two exterior\nforms.\n\\end{case}\nWe have now exhausted all possibilities, completing the proof in type A.\n\n\\subsection{Proof of Theorem~\\ref{main} when $\\g$ has type $B_\\ell$}\n\nIn the type $B$ case, there are no complex or quaternionic modules to\nconsider, so the irreducible cases have one cross only. The unique grading of\nlength one is odd dimensional conformal geometry. In dimension three we then\nhave\n\\[\n\\h^* \\simeq \\dyn\\noroot{}\\llink>\\root{2}\\edyn \\simeq \\h\\qquad\nS^2\\h\\simeq \\dyn\\noroot{}\\llink>\\root{4}\\edyn\\oplus \n\\dyn\\noroot{}\\llink>\\root{}\\edyn.\n\\] \nThe trivial representation in $S^2\\h$ corresponds to the trivial case of\nmetrics in the conformal class, which are excluded from our classification,\nand choosing $B$ to be the other component leads to three components in\n$B\\otimes \\h^*$, so the ALC fails. Similarly, for conformal geometries of\ndimensions $2\\ell-1\\geq 5$ we obtain\n\\[\n\\h^* \\simeq \\dyn\\noroot{}\\link\\root{1}\\dots\\root{}\\llink>\\root{}\\edyn \n\\simeq \\h\\qquad\nS^2\\h\\simeq \\dyn\\noroot{}\\link\\root{2}\\dots\\root{}\\llink>\\root{}\\edyn\n\\oplus \\dyn\\noroot{}\\link\\root{}\\dots\\root{}\\llink>\\root{}\\edyn.\n\\] \nAs before, the trivial summand is excluded, and the other component fails the\nALC.\n\nWe turn now to Lie contact geometries, with the second node crossed. For\n$B_3$,\n\\[\n\\h^* \\simeq \\dyn\\root{1}\\link\\noroot{}\\llink>\\root{2}\\edyn \n\\simeq \\h\\qquad\nS^2\\h= B\\oplus B'\\oplus B''\\simeq \n\\dyn\\root{2}\\link\\noroot{}\\llink>\\root{}\\edyn\n\\oplus\\dyn\\root{}\\link\\noroot{}\\llink>\\root{2}\\edyn\n\\oplus\\dyn\\root{2}\\link\\noroot{}\\llink>\\root{4}\\edyn.\n\\] \nHere, $B\\otimes \\h^* = \\dyn\\root{3}\\link\\noroot{}\\llink>\\root{2}\\edyn\\oplus\n\\dyn\\root{1}\\link\\noroot{}\\llink>\\root{2}\\edyn$ and satisfies the ALC. The\nother choices lead to too many components. For $B_\\ell$ with $\\ell\\geq 4$, we\nhave instead\n\\begin{gather*}\n\\h^* \\simeq\n\\dyn\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\\root{}\\llink>\\root{}\\edyn \n\\simeq \\h \\qquad S^2\\h= B\\oplus B'\\oplus B'' \\\\ \nB\\simeq\n\\dyn\\root{2}\\link\\noroot{}\\link\\root{}\\dots\\root{}\\llink>\\root{}\\edyn\\qquad\nB'\\simeq \\dyn\\root{2}\\link\\noroot{}\\link\\root{2}\\dots\\root{}\\llink>\n\\root{}\\edyn\\qquad\nB''\\simeq \\dyn\\root{}\\link\\noroot{}\\link\\root{}\n\\link\\root{1}\\dots\\root{}\\llink>\\root{}\\edyn,\n\\end{gather*}\nexcept that when $\\ell=4$, $B''=\\dyn\\root{}\\link\\noroot{}\\link\\root{}\n\\llink>\\root{2}\\edyn$. Now we check that $B'\\otimes \\h^*$ has six components,\n$B''\\otimes\\h^*$ has three components, but the ALC is again satisfied by $B$.\nLie contact geometries exist for $\\g=\\so(p,q)$ with $2\\leq p\\leq q$; $\\h$ is the\ntensor product of defining representations $\\R^2$ of $\\sgl(2,\\R)$ and\n$\\R^{p+q-4}$ of $\\so(p-2,q-2)$, and $B$ is the tensor product of a symmetric\nform on $\\R^2$ and the defining inner product of signature $(p-2,q-2)$ on\n$\\R^{p+q-4}$. See \\cite[\\S4.2.5]{CS} for more details on these\ngeometries.\n\nNext we consider $B_\\ell$ with the cross on $k$-th position, $3\\leq k\\leq\n\\ell-1$; the outcome is quite similar to the Lie contact case. For $k\\neq\n\\ell-1$, $S^2\\h= B \\oplus B'\\oplus B''$, where\n\\begin{gather*}\n\\h^*\\simeq\\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink>\\root{}\\edyn \\qquad\n\\h\\simeq\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink>\\root{}\\edyn\\\\\nB\\simeq\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\\root{}\\dots\n\\root{}\\llink>\\root{}\\edyn\\\\\nB'\\simeq\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\\root{2}\\dots\n\\root{}\\llink>\\root{}\\edyn\\qquad\nB''\\simeq\n\\dyn\\root{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{}\\link\\root{1}\n\\dots\\root{}\\llink>\\root{}\\edyn\\\\\n\\h^*\\otimes B\\simeq \\dyn\\root{2}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink>\\root{}\\edyn \\oplus\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink>\\root{}\\edyn,\n\\end{gather*}\nso $B$ satisfies the ALC, but $B'$ and $B''$ do not. If $k=\\ell-1$,\n$S^2\\h=B\\oplus B'\\oplus B''$ with\n\\begin{gather*}\n\\h^* \\simeq \\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\llink>\\root{2}\\edyn\n\\qquad \\h \\simeq \\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\llink>\\root{2}\\edyn\\\\\nB\\simeq \\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\llink>\\root{}\\edyn\\qquad\nB'\\simeq \\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\llink>\\root{4}\\edyn\\qquad\nB''\\simeq \\dyn\\root{}\\link\\root{1}\n\\dots\\root{}\\link\\noroot{}\\llink>\\root{2}\\edyn\n\\end{gather*}\nand again, $B$ satisfies the ALC, but $B'$ and $B''$ do not. These\n$|2|$-graded geometries are modelled on the flag variety of isotropic\n$k$-planes and exist for the real forms $\\so(p,q)$ with $k\\leq p\\leq q$. We have\n$\\h\\cong \\R^k\\otimes \\R^{p+q-k}$ and $B$ corresponds to the tensor product of\na symmetric form on $\\R^k$ with the defining inner product on $\\R^{p+q-k}$.\n\\begin{case}[$B^{1,k}_\\ell$] Here $\\g\\simeq \\so(p,q)$ with $k\\leq p\\leq q$\nand $p+q=2\\ell+1$, and the geometries come equipped with the identification of\nthe horizontal distribution $\\cH\\leq TM$ with the tensor product\n$E\\otimes F$, where $E$ has rank $k$ and $F$ carries a metric of signature\n$(p-k,q-k)$. The corank of $\\cH\\leq TM$ is $\\frac12 k(k-1)$.\nThe metrics in $B$ are the tensor products of symmetric nondegenerate forms on\n$E$ and the given metric on $F$.\n\\end{case}\n\nFinally, we arrive at the cross at the very end. For $B_\\ell$ with $\\ell\\geq\n2$, we have\n\\begin{gather*}\n\\h^*\\simeq\\dyn\\root{}\\dots\\root{1}\\llink>\\noroot{}\\edyn\\qquad\n\\h\\simeq\\dyn\\root{1}\\dots\\root{}\\llink>\\noroot{}\\edyn\\qquad\nB= S^2\\h \\simeq \\dyn\\root{2}\\dots\\root{}\\llink>\\noroot{}\\edyn\\\\\n\\h^*\\otimes B \\simeq \\dyn\\root{3}\\llink>\\noroot{}\\edyn\\oplus\n\\dyn\\root{1}\\llink>\\noroot{}\\edyn (\\ell=2)\\qquad\n\\h^*\\otimes B \\simeq\n\\dyn\\root{2}\\dots\\root{1}\\llink>\\noroot{}\\edyn\n\\oplus \n\\dyn\\root{1}\\dots\\root{}\\llink>\\noroot{}\\edyn (\\ell\\geq 3),\n\\end{gather*}\nand the ALC is satisfied.\n\\begin{case}[$B^{1,\\ell}_\\ell$] Here $\\g$ is the split form $\\so(\\ell,\\ell+1)$.\nThe geometries are the well known free distributions, cf.~\\cite{DS}, with rank\n$\\ell$ horizontal distribution $\\cH\\leq TM$ of corank\n$\\frac12\\ell(\\ell-1)$. The metrics in $B$ are all nondegenerate metrics on\n$\\cH$.\n\\end{case}\n\n\\subsection{Proof of Theorem~\\ref{main} when $\\g$ has type $C_\\ell$}\n\nAs with type $B_\\ell$, we only have to consider cases with a single crossed\nnode. We begin with the first node crossed, corresponding to the well known\ncontact projective structures, with\n\\[\n\\h^*\\simeq\\dyn \\noroot{}\\link\\root{1}\\dots\\root{}\\llink<\\root{}\\edyn\\simeq\\h\\;;\n\\]\nwe have discussed the lowest dimension three already as the $B_2$ case, which\ncoincides with the free distribution of rank two. For $\\ell\\geq 3$, the picture\nchanges since\n\\begin{gather*}\nS^2\\h\\simeq \\dyn\n\\noroot{}\\link\\root{2}\\dots\\root{}\\llink<\\root{}\\edyn\\simeq B\\\\\nB\\otimes \\h^* \\simeq\n\\dyn\\noroot{}\\link\\root{3}\\dots\\root{}\\llink<\\root{}\\edyn\n\\oplus \\dyn \\noroot{}\\link\\root{2}\\link\\root{1}\\dots\\root{}\\llink<\\root{}\\edyn\n\\oplus \\dyn \\noroot{}\\link\\root{1}\\dots\\root{}\\llink<\\root{}\\edyn\n\\end{gather*}\nand thus the ALC fails.\n\nMoving on to the second node, we obtain another well known family of examples:\nthe quaternionic contact geometries (for $\\g\\cong\\symp(p,\\ell-p)$, $1\\leq p\\leq\n\\ell\/2$) or their split analogues (for $\\g\\cong\\symp(2\\ell,\\R)$)---see\n\\cite[\\S4.3.3]{CS}. For $\\ell=3$, we have\n\\begin{gather*}\n\\h^*\\simeq\\dyn \\root{1}\\link\\noroot{}\\llink<\\root{1}\\edyn\\simeq\\h\\qquad \nS^2\\h=B' \\oplus B''\\quad\\text{with}\\quad\nB'\\simeq \\dyn \\root{2}\\link\\noroot{}\\llink<\\root{2}\\edyn\n\\end{gather*}\nand $B''$ trivial, while for $\\ell\\geq 4$, we have\n\\begin{gather*}\n\\h^*\\simeq\\dyn \\root{1}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink<\\root{}\\edyn\\simeq \\h \\qquad S^2\\h=B \\oplus B'\\oplus B''\\\\\nB\\simeq \\dyn \\root{}\\link\\noroot{}\\link\\root{}\\llink<\\root{1}\\edyn \\quad\n\\text{or}\\quad \\dyn\\root{0}\\link\\noroot{}\\link\\root{}\\link\\root{1}\\dots\n\\root{}\\llink<\\root{}\\edyn\n\\qquad B'\\simeq\\dyn \\root{2}\\link\\noroot{}\\link\\root{2}\n\\dots\\root{}\\llink<\\root{}\\edyn\n\\end{gather*}\nand $B''$ trivial. Since $\\h^*\\otimes B'$ decomposes into four components,\nthere are only nontrivial possibilities for $\\ell\\geq 4$. For $\\ell=4$,\n\\begin{gather*}\n\\h^*\\otimes B \\simeq\n\\dyn \\root{1}\\link\\noroot{}\\link\\root{1}\\llink<\\root{1}\\edyn \n\\oplus\n\\dyn \\root{1}\\link\\noroot{}\\link\\root{1}\\llink<\\root{}\\edyn\n\\end{gather*}\nand so the ALC holds for $B$, but for $\\ell\\geq 5$, $ \\h^*\\otimes B$ has three\nirreducible components, and the ALC is not satisfied.\n\n\\begin{case}[$C^{1,2}_4$] Here the possible real Lie algebras\nare $\\symp(8,\\R)$, $ \\symp(2,2)$, or $\\symp(1,3)$, with the second node\ncrossed. In the first case, the geometries come equipped with the\nidentification of the horizontal distribution $\\cH\\leq TM$ with the\ntensor product $E\\otimes F$, where $E$ is rank $2$ and the rank $4$ vector\nbundle $F$ comes with a symplectic form. The eligible metrics in $B$ are the\ntensor products of a area form on $E$ and the given symplectic form on $F$. In\nthe quaternionic cases, $\\cH$ is quaternionic and the eligible metrics in $B$\nare quaternionic hermitian forms.\n\\end{case}\n\nLet us next suppose that the $k$-th node is crossed for $3\\leq k\\leq \\ell-2$.\nThen\n\\begin{gather*}\n\\h^*\\simeq\\dyn\\root{}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\\root{}\n\\llink<\\root{}\\edyn\n\\qquad \\h\\simeq\n\\dyn\\root{1}\\dots\\root{}\\link\\noroot{}\\link\\root{1}\\dots\\root{}\n\\llink<\\root{}\\edyn\\\\\nS^2\\h\\simeq B \\oplus B'\\oplus B''\\qquad\nB\\simeq \\dyn\\root{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\n\\link\\root{}\\dots\\root{}\\llink<\\root{}\\edyn\\\\\nB'\\simeq\\dyn\\root{2}\\dots\\root{}\\link\\noroot{}\\link\\root{2}\\dots\\root{}\n\\llink<\\root{}\\edyn\\qquad\nB''\\simeq \\dyn\\root{}\\link\\root{1}\\dots\\root{}\\link\\noroot{}\n\\link\\root{}\\link\\root{1}\\dots\\root{}\\llink<\\root{}\\edyn\\\\\n\\h^*\\otimes B \\simeq\n\\dyn\\root{}\\link\\root{1}\\dots\\root{1}\\link\\noroot{}\\link\\root{1}\\dots\n\\root{}\\llink<\\root{}\\edyn \\oplus \\dyn\\root{1}\\dots\n\\root{}\\link\\noroot{}\\link\\root{1}\\dots\\root{} \\llink<\\root{}\\edyn\n\\end{gather*}\nand so $B$ satisfies the ALC, but the other components do not. The relevant\nmetrics are again tensor products of an exterior form on the rank $k$\nauxiliary bundle $E$ and the given symplectic form on $F$ (where the\nhorizontal distribution is identified with $E\\otimes F$). These geometries\nare available for the split form $\\symp(2\\ell,\\R)$ and, if $k$ is even then\nalso for the real forms $\\symp(p,q)$, $k\\leq p$ 16.5 \\\\\n\\hline\n & 2 msec & 4 msec & 10 msec & 20 msec & 40 msec \\\\\n\\hline\nHigh Loop & R$_{int}$ $<$ 10 & 10 $<$ R$_{int}$$<$12 & 12 $<$ R\n$_{int}$$<$14 & 14 $<$ R$_{int}$$<$16.5 & R $>$ 16.5 \\\\\n\\hline\n & 2 msec & 4 msec & 10 msec & 20 msec & 40 msec \\\\\n\\hline\n\\end{tabular}\n\\caption{\\footnotesize In this table are listed the \nintegration times used for the two WFS with respect to the integrated \nmagnitude of the both references asterism. The values are tuned to the \nstatistical characteristics of the conjugated planes. }\\label{table:3}\n\\end{center}\n\\end{table}\n\n\n\n\\begin{table}[h]\n\\begin{center}\n\\begin{tabular}{|l|l|l|l|}\n\\hline\nLayer ID & Layers Altitude $[$m$]$ & Cn2 fraction & Wind $[$$^{m}$\/\n$_{s}$$]$ \\\\\n\\hline\n1 & 0 & 0.65 & 6.6 \\\\\n\\hline\n2 & 1800 & 0.08 & 12.4 \\\\\n\\hline\n3 & 3200 & 0.12 & 8.0 \\\\\n\\hline\n4 & 5800 & 0.03 & 33.7 \\\\\n\\hline\n5 & 7400 & 0.03 & 23.2 \\\\\n\\hline\n6 & 13100 & 0.08 & 22.2 \\\\\n\\hline\n7 & 15800 & 0.01 & 8.0 \\\\\n\\hline\n\\end{tabular}\n\\caption{\\footnotesize Here are listed the atmospheric \nparameters used in the simulations. For each layer an outer-scale of 20 \nm has been considered. The isoplanatic angle for the overall atmosphere \nis about 15 arcsec at the 2.2$\\mu m$ pass band. This model is not the most \nrecent one where there is a bit more turbulence power in the ground \nlayer (67\\% instead of 65\\%). In this study a 0\".73 seeing in V Band and \n0\".66 seeing in R band were considered.}\\label{table:4}\n\\end{center}\n\\end{table}\n\n\\subsection{Optimization test}\nThe integration times used for both loops were set according to the \nintegrated magnitude of the asterism in the 6 arcmin annular FoV and in \nthe central 2 arcmin FoV, respectively for the Ground and the High WFS \n(see Table~\\ref{table:3}). This solution to set this important \ncouple of parameters is correct only for a first order approach. In \nfact, for example, it neglects the effect of the different illumination \nof the sub-apertures in the High WFS due to the references position and \ndifferent brightness. A smarter analysis should take into account a \nfine-tuning of the frame rates for the two WFSs. But an optimization \nprocedure to be applied to all the asterisms considered it's not \nfeasible because in this case the overall number of simulations \nperformed will increase too much. In other cases described so far\\cite{LNPDRLOSDA,MADLOSDA} we considered a grid of possible values for the two frame rates in \norder to taking into account the different combinations of the two \nintegration times. Here we considered a small $3\\times3$ grid with values \naround to the ones specified in Table~\\ref{table:3} (these \ndepending on the integrated magnitude) ranging from 25$\\%$ less and 25$\\%$ \nmore the two values taking into account for the simulation performed \nyet. We optimized the integration time only for a small set of the \nasterisms (20) used in the three 1$\\times$1 square degree fields. The results \nof this optimization compared to the non-optimized data allow \nextrapolating the optimized SR values for all the simulated cases. \n\n\\section{Data analysis}\n\\subsection{CPU time and Workstation}\nWe found in the catalogue 40000 useful stars over the 3 sky-fields \nconsidered. We analysed 3072 regions, running 2000 simulations on the \nfound asterisms. Each simulation took 3-6 hours of CPU time according to \nthe CPU clock (at most we used 2000 MHz). \n\nThe overall CPU time used was of 330days, the most spent on the Arcetri \nBeowulf cluster with 16 nodes, each one equipped with two CPUs. The LOST \ncode is not parellelized yet, but we ran ``in parallel\" different \nsimulations on different nodes at the same time.\n\n\n\\begin{figure}\n\\centerline{\\includegraphics[width=2.8in]{SPIE5490-941}}\n\\caption{\\footnotesize{In this plot the ``+\" sign defines the direction where the SR is computed in all the simulated cases, the step of this square grid is 24arcsec. The two circles represent the 1arcmin field and the 2arcmin. The correction was applied in the 2 arcmin FoV. In addition to these $6\\times6$ directions the SR data were computed also in the Natural Guide Stars positions.}}\\label{fig:2}\n\\end{figure}\n\n\\begin{figure}\n\\centerline{\\includegraphics[width=4.0in]{SPIE5490-942}}\n\\caption{\\footnotesize{This picture shows a map of the analytical SR found for the Galactic Plane case. Each square covers 101$\\times$101arcsec region. If there is a lack of data or no good asterisms to drive the adaptive system this square is black. The color bar on the right indicates the value of the SR. For example: on the left of this map there is a very bright star that saturates all the plates taken into account for the catalogue preparation: in fact a circular hole without stars appears in our analysis. The analytical SR gives only an idea of the possible SR achievable because it does not take into account neither the distribution of the stars in the 6 and 2 arcmin FoV nor the turbulence profile.}}\\label{fig:3}\n\\end{figure}\n\nWe developed an IDL procedure to manage the different steps described in \nthe section above. Moreover this found the best asterism for each couple \nof 2arcmin and 6arcmin fields; the best integration times for the two \nloops and it computed an analytical SR over the 3 1$\\times$1 deg$^{2}$ \nfields considering the asterism and integration times found before.\n\n\\subsection{Data analysis}\nFor each simulation LOST computed the SR values over the 2arcmin FoV. A \n6$\\times$6 grid of SR evolution data was retrieved for each of the 32$\\times$32 positions in the three 1-degree field (see Figure~\\ref{fig:2}). Using these data the averages SR were computed on the 1arcmin circle \nand the 2-arcmin FoV. Moreover the on axis direction SR was taken into \naccount averaging the 4 ``probe\" stars close to the centre of the field \n(see Figure~\\ref{fig:2}). \n\n\n\nWe optimize the MCAO system parameters looking for the loops closure and \nthe robustness of the correction and we do not optimize the system in \norder to achieve high SR (more than 60\\%). In fact considering only the \neffect of the spatial sampling of the metapupils for the ground (8$\\times$8) \nand for the high loop ($7\\times7$) the best SR achievable is between 55\\%-60\\% \nwith the atmospheric parameters here considered.\n\nEach simulation performed has an iteration step of 2 msec for a total of \n250 iterations that gives an overall time of 0.5 seconds. The length of \nthese simulations is not enough to estimate a representative long \nexposure SR because of the effect of the bootstrap, so we assumed the \nmaximum SR achieved during the run as reference for our analysis \n(the SR values we consider take into account the tip-tilt residual \nalso). Analyzing the SR data of the 3 different 1 square degree fields \nconsidered, we drew different SR maps according to the 3 FoV sizes we \nassumed. These cases can be seen as representative of different \ninstruments:\n\\begin{itemize}\n\\item A camera with few arcsec FoV;\\\\\n\\item An instrument with 1 arcmin FoV;\\\\\n\\item An instrument, or more instruments mounted on the same system \ncovering the corrected 2arcmin field.\n\\end{itemize}\n\n\\subsubsection{Few arcsec FoV Case}\\label{section:a3b2c1}\nWe assumed mounted at the focus of the telescope a camera with a small FoV of few arcsec centered in the optical axis direction. In this case the SR must be uniform because the FoV is smaller than the isoplanatic patch size. So we considered representative for this case the value of the SR obtained for the 4 ``probe\" stars more close to the centre of the FoV (see Figure~\\ref{fig:2}). Figure~\\ref{fig:4} presents the results in terms of sky coverage VS threshold SR:\n\\begin{figure}\n\\centerline{\\includegraphics[width=3.6in]{SPIE5490-943}}\n\\caption{\\footnotesize{This picture shows the sky coverage results for the three galactic latitude cases taken into account and relative to the on axis direction only. The functions plotted here represent the percentage of the simulated case where at least the SR showed in abscissa was achieved. Dotted line represents the North Galactic Pole; the dashed one refers to the South Galactic Pole and the solid to the Galactic Anti-centre. The percentage is relative to the 32$\\times$32 directions considered for each galactic field.}}\\label{fig:4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=3.6in]{SPIE5490-944}}\n\\caption{\\footnotesize{This picture shows the results for the central 1arcmin FoV case for the three galactic latitudes taken into account. The functions plotted here represent the percentage where at least the SR showed in abscissa was achieved. Dotted line represents the North Galactic Pole; the dashed one refers to the South Galactic Pole and the solid to the Galactic Anti-centre.}}\\label{fig:5}\n\\end{figure}\n\nIn the 98$\\%$ of the cases taken into account the SR on axis was higher \nthan 10$\\%$, while percentages of 48$\\%$ and 25$\\%$ were retrieved \nrespectively for the North and the South Galactic poles. For the low \nGalactic latitudes in half of the cases considered the SR on axis was \nhigher than 40\\%. \n\n\\subsubsection{1 arcmin FoV case}\\label{section:a3b2c2}\nNow we describe the sky coverage analysis for an instrument with 1arcmin \nFoV centered in the axis direction (the same axis relative to the \ncorrection applied by the adaptive system). For this case the \nrepresentative SR is the average SR over the central one arcmin computed \nby the simulations. The Figure~\\ref{fig:5} shows the results \nrelative to this field size: for the low galactic latitude (the Galactic \nAnticentre) in the 98\\% of the directions considered the average SR was \nhigher than 10\\%, while the same values for the North and South Poles \nwere 38\\% and 17\\% respectively.\n\n\\subsubsection{2 arcmin FoV case}\\label{section:a3b2c3}\nIn this last case we supposed several instruments (or a unique big \ncamera) observing the whole region corrected by the MCAO system (we \nconsidered a corrected FoV of 2 arcmin). As in the 1arcmin case we took \ninto account the average SR, but now over the 2arcmin FoV. \n\nIn the the results are presented: considering a 10\\% threshold for the \nSR as condition to define the coverage we found sky coverage of 99\\% for \nGalactic plane, 25\\% and 13\\% respectively for the North and South Galactic poles. \n\nWe want to stress that the sky-coverage values within the 3 FoV sizes \nchanges of a factor $\\sim 2$ for the galactic poles while it is un-changed \nfor the Galactic anticentre (see Figure~\\ref{fig:7}). This \ndifferent behaviour depends on the different stars density of the two \ngalactic regions. The poles are poor of stars with respect to the low \ngalactic latitudes: this translates in a less number of reference stars \nfor the galactic poles and so a lower uniformity for the correction with \nrespect to the galactic plane where it is quite easy to cover \nhomogenously the corrected 2arcmin FoV with natural guide stars. \n\n\\begin{figure}\n\\centerline{\\includegraphics[width=3.6in]{SPIE5490-945}}\n\\caption{\\footnotesize{This picture shows the results in the overall corrected 2 arcmin FoV and for the three galactic latitude cases analyzed. The functions plotted here are representing the percentages where at least the SR showed in abscissa was achieved. Dotted line represents the North Galactic Pole; the dashed one refers to the South Galactic Pole and the solid to the Galactic Anti-centre. The SR here considered is average SR over the corrected field of 2 arcmin.}}\\label{fig:6}\n\\end{figure}\n\n\\subsubsection{Dealing to a different definition}\nNow we want to analyse the results presented in the previous sections \n(\\ref{section:a3b2c1}, \\ref{section:a3b2c2} and \n\\ref{section:a3b2c3}) according to a different definition of sky \ncoverage, using, for example, that one given in the {reference\\cite{2003SPIE.4839..566M}}, and that \nwe discussed also above (section~\\ref{section:_Ref76797586}). We assume \nas reference for the infinite SNR SR the maximum SR achieved in each of \nthe 3 cases of Field of View considered that are: $\\sim$0.6 for the few \narcsec field of view case, $\\sim$0.5 for the 1 arcmin case and $\\sim$0.4 for the \n2 arcmin FoV. Using these values we found the SR$_{50}$ thresholds \n(50\\% of the SR relative to the infinite SNR case) for each of these \ncases: 0.3, 0.25 and 0.2 respectively. Applying these thresholds instead \nof the 10\\% one, we found coverage of 90\\% for the low galactic \nlatitudes case and between 7\\%and 15\\% for the galactic poles. Even if \nreferring to different wavelengths these values agree with the ones \ngiven in the references\\cite{mfov,2003SPIE.4839..566M} (here we considered correction in the K \nband while it was the R band in the {reference\\cite{2003SPIE.4839..566M}}).\n\\begin{figure}\n\\centerline{\\includegraphics[width=3.6in]{SPIE5490-946}}\n\\caption{\\footnotesize{This figure shows the percentage of sky-coverage with respect to a threshold SR. Dashed line represents the coverage with respect to the on axis direction SR; the dotted one refers to the average SR over 1 arcmin FoV and the solid line to the SR averaged over the 2arcmin corrected field. All the 3 curves refer to the analysis performed on the 1$\\times$1 square degree field centered in the North Galactic Pole.}}\\label{fig:7}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=4.0in,height=3.2in]{SPIE5490-947}}\n\\caption{\\footnotesize{This figure shows the results for the Galactic Anti-Centre in the on axis case. Different colors indicate different SR values.}}\\label{fig:8}\n\\end{figure}\n\n\\begin{table}[h]\n\\begin{center}\n\\begin{tabular}{|l|l|l|l|}\n\\hline\n & North Galactic Pole & South Galactic Pole & Galactic Anticentre \\\\\n\\hline\nOn Axis(Few arcsec FoV) & 48 \\% & 25 \\% & 99 \\% \\\\\n\\hline\n1 arcminFoV & 38 \\% & 17 \\% & 97 \\% \\\\\n\\hline\n2 arcminFoV & 25 \\% & 13 \\% & 99 \\% \\\\\n\\hline\n\\end{tabular}\n\\caption{In this table are summarized the results \nfor the different cases analyzed. A limit SR of 10\\% was assumed.}\\label{table:5}\n\\end{center}\n\\end{table}\n\n\\section{Example of SCIENCE-COVERAGE: cluster of galaxies at high \nred-shift }\n\n\nWhat we said and stressed about different sky-coverage for different FoV \nbecomes important when we consider the possible astronomical \napplications. For example we took as possible astronomical target the \ncluster of galaxies. As everybody knows their apparent dimension changes \nwith their distance, but because of the structure and evolution of the \nuniverse this depend also by cosmological parameters. \n\nThe angular size of clusters is related to the red-shift (z) then for \nthese objects the sky-coverage is a function of the z. We plotted the \nangular size taken from the reference with respect to the redshift\\cite{2004A&A...417...13E} in \nFigure~\\ref{fig:9}.\n\n\\begin{figure}\n\\centerline{\\includegraphics[width=4.0in]{ettori.eps}}\n\\caption{\\footnotesize{This figure shows the angular size with respect of the red-shift according to the data in the {reference\\cite{2004A&A...417...13E}}. We plotted a dashed line to a radius of 1 arcmin, corresponding to the 2arcmin FoV case we considered in the sky-coverage analysis. Following our results the clusters at $z \\sim0.9$ have sky coverage of 25\\% (at North Galactic Pole).}}\\label{fig:9}\n\\end{figure}\n\n\n\n\n\nThis plot says that high-z clusters have higher sky-coverage. If the \ncluster has dimension bigger than 2~arcmin then more \ncontiguous asterisms are needed to cover its entire dimension and, in this case sky \ncoverage decreases. \n\n\\section{Conclusions}\n\n\nWe analyse the sky coverage problem in the case of a specific Layer \nOriented Multiple Field of View system. We showed basic relationship \nbetween sky coverage and field dimension to be studied and presented a \nscientific case. We analyse the definition of sky coverage and we \nstressed that it must be related to the performance requested to the \nadaptive system and the class of objects to be studied with the \nscientific instrument to be used. In particular we set a reasonable \nthreshold to the 10\\% in SR as condition to define if there is sky \ncoverage or not. We showed that for low galactic latitudes the \ncorrection is feasible about everywhere while at the galactic poles the \ncoverage decreases, but down to reasonable values (20\\%-40\\%) to justify \nthe use of this natural guide stars technique also for high galactic \nlatitudes targets.\n\n \n\n\\section*{ACKNOWLEDGEMENTS}\n\n\n\nThanks to M. Le Louarn for the atmosphere profile of the Cerro Paranal \n(Chile), and to A. Puglisi as ``problem-solver\" regarding the Beowulf \ncluster.\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\nQuasinormal ringing is the principal gravitational-wave signature of\nthe final black hole after a binary merger. This is described by a\nspectrum of complex quasinormal frequencies $\\omega_{lmn}$, which are\nuniquely specified in linear perturbation theory by the mass and spin\nof the Kerr background~\\cite{KokkotasSchmidt1999,Nollert:1999ji,Berti2009}. Precise measurement of these frequencies\ntherefore characterizes the background~\\cite{Echeverria89} and moreover constrains\ndeviations from general relativity (with more than one mode, or when\ncombined with other measurements)~\\cite{Dreyer:2003bv,Berti:2005ys,Brito:2018rfr,LIGOScientific:2021sio}. Although data today already hint at\nmodes beyond the fundamental~\\cite{Isi:2019aib,Cotesta:2022pci,Finch:2022ynt,Capano:2021etf}, future observations with sensitive\ndetectors are sure to enable detailed spectroscopy~\\cite{Berti:2005ys,Bhagwat:2021kwv,Ota:2019bzl}.\n\nTo interpret future observations, however, it will be necessary to\nunderstand quasinormal mode interactions. The ringdown follows a highly\nnonlinear phase (the merger) and although numerical calculations indicate that a sum of\nmodes may be sufficient to represent the gravitational-wave emission~\\cite{Giesler:2019uxc,Mourier:2020mwa,Chen:2022dxt},\nit is not clear that this corresponds to a full nonlinear\ndescription. Indeed, nonlinear ringdown effects have been identified in numerical simulations of binary mergers~\\cite{Mitman:2022qdl,Cheung:2022rbm} as well as in anti-de Sitter black holes~\\cite{Bantilan:2012vu,Sberna:2021eui}.\nIn other contexts (e.g., perturbations of large\nanti-de Sitter black holes) quasinormal modes can interact and even\nbecome turbulent~\\cite{Green:2013zba,Adams:2013vsa}. The point of this paper is to introduce some tools that may be helpful when \ndeveloping a theory of quasinormal mode interactions. \n\nCompared to normal modes, quasinormal modes do not in general form in a \nstraightforward sense a\ncomplete ``basis'' of solutions to the linearized field equations. In fact, black\nhole perturbations are only described by quasinormal modes for an\nintermediate time period in their evolution; at early times they are\ndescribed by a free propagation piece, and at\nlate times by a power law tail~\\cite{Price1972a,Leaver1986b,Ching:1995tj}. \nThe spatial wavefunction of a (decaying) quasinormal mode also\n\\emph{diverges} at the bifurcation surface and at spatial\ninfinity. This makes it hard to write down canonical (conserved) $L^2$-type inner products based on the usual Cauchy-surfaces\nof Kerr.\\footnote{Note however that one may choose hyperboloidal slices \\cite{Zenginoglu:2011jz,PanossoMacedo:2019npm,Ripley:2022ypi,gajic2021quasinormal}; see the conclusions for a discussion of this alternative in connection with our approach.}.\nWithout an inner product, it is not \nclear how to project onto quasinormal modes to study nonlinear\nmode mixing.\n\nThe main goal of this paper is to point out an unconventional bilinear form which may take\nthe place (for some purposes) of an inner product on quasinormal modes of Kerr. Before \nwe introduce this notion, we develop a general theory for conserved -- under time evolution --\nbilinear forms for Weyl scalars or metric perturbations. Similar to \\cite{carter1977killing}, the key idea is \nto start with a ``Klein-Gordon'' type current for Weyl scalars or metric perturbations and \nto apply symmetry operators to the entries of this bilinear expression. As we show, in Kerr spacetimes, \nsuch symmetry operators include, besides the obvious ones descending from the \nKilling symmetry, also an infinite tower of operators built from Carter's Killing tensor. \n(For the Weyl scalars, the symmetry operator of lowest differential order has two derivatives; \nfor metric perturbations, it has six derivatives). \nIn particular, using a combination of such operators we find an infinite set of new conserved, local, gauge invariant current associated with Carter's constant \\cite{Carter:1968ks} in Kerr.\\footnote{\n\\label{footnote1}\nFor an explanation of the relation with previous works \\cite{carter1977killing, carter1979generalized, grant2020class, grant2020conserved, andersson2015spin,aksteiner2019symmetries}, see section \\ref{sec:Symmetry}.}\n\nThe bilinear form of main interest for this paper is, however, not obtained from such differential symmetry operators but rather the symmetry operator associated with the discrete $t$--$\\phi$ reflection. We show that gravitational quasinormal modes\nwith different frequencies are orthogonal with respect to this\nbilinear form. For the reader interested in the main result, the bilinear form is presented explicitly for quasinormal modes in \\eqref{eq:mode-bilinear}. We show furthermore that the quasinormal mode excitation coefficients of a solution are given precisely by the projection of data onto the corresponding modes via the bilinear form.\n\n\nThe plan of this paper is as follows. In section \\ref{sec:Bilinear} we recall the standard recipe for \nconstructing conserved bilinear forms for partial differential operators. In section \\ref{sec:Symmetry}\nwe introduce symmetry operators (including symmetry operators related to the Killing tensor, see also footnote \\ref{footnote1}) \nto construct further conserved bilinear forms, and currents.\n In section \\ref{sec:bilinear_tphi} we construct the bilinear form $\\langle\\langle \\cdot , \\cdot \\rangle\\rangle$ \n using the $t$--$\\phi$ reflection symmetry, which gives orthogonality of quasinormal modes in section \\ref{sec:Ortho}.\nFinally, in section \\ref{sec:Lap transform} we explain the \nrelation with excitation coefficients. Some technical aspects of this paper are deferred to various appendices. \n\n\n\n\\section{Bilinear form -- basic construction}\n\\label{sec:Bilinear}\n\nConsider a partial differential operator $\\mathcal X$ acting on sections of some vector bundle, $E$, \nover a manifold $M$. We assume that $M$ is equipped with a volume form, \n$\\epsilon_{a_1 \\dots a_n}$; later we will always have a metric $g_{ab}$, so the volume form \nis chosen as the one compatible with the metric. Let $\\tilde E$ be the dual vector bundle, i.e., \neach fibre is given by the ${\\mathbb C}$-linear maps of the corresponding fiber of $E$. If $\\psi$ is a \nsection of $E$ and $\\tilde \\psi$ is a section of $\\tilde E$, we can pointwise form the scalar \n$\\tilde \\psi \\psi \\in {\\mathbb C}$. The formal adjoint is the unique differential operator $\\mathcal X^\\dagger$\ndefined by the formula \n\\begin{equation}\n(\\mathcal X^\\dagger \\tilde \\psi) \\psi - \\tilde \\psi \\mathcal X \\psi = \\nabla_a x^a[\\tilde \\psi, \\psi], \n\\end{equation}\nwhere $x^a[\\tilde \\psi, \\psi]$ is local, i.e., at any point built from finitely many \nderivatives of the fields at that point. The divergence operator on the right is defined by our \nvolume form and if it comes from a metric, as we assume from now, it \nis equal to the usual covariant derivative operator. Said differently, $\\mathcal X^\\dagger \\tilde \\psi$ is \nobtained by the usual ``partial integration'' procedure dropping surface terms as if the above \nequation were placed under an integral sign. Note that, by contrast to quantum mechanics, \n$\\dagger$ as defined above is ${\\mathbb C}$-linear, rather than anti-linear.\n\nNow let $(\\tilde \\psi, \\psi)$ be a pair of solutions to $\\mathcal X \\psi = 0 = \\mathcal X^\\dagger \\tilde \\psi$, and let \n$\\Sigma$ be a codimension 1 submanifold of $M$ (later to be chosen as a constant $t$ slice of Kerr). \nThen, by Gauss' theorem, if $\\tilde \\psi$, $\\psi$ have sufficient decay on $\\Sigma$ for the following integral to be suitably convergent (e.g., if they are compactly supported), \nthen the bilinear form \n\\begin{equation}\n\\label{Xdef}\nX[\\tilde \\psi, \\psi] := \\int_\\Sigma x^a [\\tilde \\psi, \\psi] \\, {\\rm d} \\Sigma_a \\equiv \\int_\\Sigma (\\star x) [\\tilde \\psi, \\psi]\n\\end{equation}\nis unchanged under local deformations of $\\Sigma$, and we say that it is ``conserved''.\n(Here $\\star$ denotes the Hodge dual.) \nAs a simple example, consider $\\mathcal X = \\nabla^a \\nabla_a - m^2$, the Klein-Gordon operator acting on real-valued functions $\\psi$, \nso $E = \\tilde E = {\\mathbb R}$ is the trivial bundle. Then $\\mathcal X^\\dagger = \\mathcal X$ and $x^a = - \\tilde \\psi \\nabla^a \\psi + \\psi \\nabla^a \\tilde \\psi$ is the Klein-Gordon (symplectic) current, \nwhich is of course conserved for any pair of solutions. \nThe bilinear form in this case is just the symplectic form for Klein-Gordon theory. It is anti-symmetric \nunder $\\tilde \\psi \\leftrightarrow \\psi$, but note that in the general case we cannot say that about the bilinear form since the bundles $E$\nand $\\tilde E$ cannot usually be identified in a natural way. \n\nAs a second example, let $\\mathcal{E}$ be the linearized Einstein operator on a Ricci-flat \u00b4spacetime. It acts on symmetric covariant rank-2\ntensors $h_{ab}$, so $E$ is equal to ${\\rm Sym}(T^*M \\otimes T^*M)$ in this case, and the dual bundle $\\tilde E$ corresponds to symmetric contravariant \nrank-2 tensors, ${\\rm Sym}(TM \\otimes TM)$. The formula is\n\\begin{align}\\label{eq:linearE}\n \\mathcal{E}_{ab}(h) \\equiv \\frac{1}{2}\\big[ &-\\nabla^c\\nabla_c h_{ab} - \\nabla_a\\nabla_bh + 2 \\nabla^c\\nabla_{(a} h_{b)c} \\nonumber\\\\\n & + g_{ab}(\\nabla^c \\nabla_c h - \\nabla^c\\nabla^d h_{cd}) \\big],\n\\end{align}\nand under the identification of $E$ with $\\tilde E$ (by using the metric $g^{ab}$ to raise indices), we have $\\mathcal{E}^\\dagger = \\mathcal{E}$.\nAs in the Klein-Gordon case, this last relation follows because the linearized \nEinstein equation arises from an action principle. By explicit calculation, the boundary term $w^a \\equiv x^a[\\tilde h, h]$\nis given by \\cite{iyer1994some}\n\\begin{equation}\n\\label{wadef}\nw^a = \np^{abcdef}\\left( h_{bc} \\nabla_d \\tilde h_{ef} - \\tilde h_{bc} \\nabla_d h_{ef}\\right),\n\\end{equation}\nwhere\n\\begin{align}\n p^{abcdef} = &g^{ae}g^{fb}g^{cd} - \\frac{1}{2}g^{ad}g^{be}g^{fc} - \\frac{1}{2}g^{ab}g^{cd}g^{df}\\nonumber\\\\ \n &- \\frac{1}{2}g^{bc}g^{ae}g^{fd} + \\frac{1}{2}g^{bc}g^{ad}g^{ef}.\n\\end{align}\nThe bilinear form \n\\begin{equation}\n\\label{Wdef}\nW[\\tilde h, h] = \\int_\\Sigma\np^{abcdef}\\left( h_{bc} \\nabla_d \\tilde h_{ef} - \\tilde h_{bc} \\nabla_d h_{ef}\\right) {\\rm d} \\Sigma_a ,\n\\end{equation}\nis the symplectic form of General Relativity \\cite{iyer1994some}.\n\nOur third, and most important, example concerns the Teukolsky operator(s) for the perturbed Weyl scalars of the Kerr spacetime $(M,g_{ab})$, \nto which we will restrict attention from now on. For this, we shall employ the GHP\nformalism~\\cite{Geroch:1973am,Bini:2002jx,Aksteiner:2010rh,Toth:2018ybm} in the following, and we now briefly review the essential portions of this formalism which \nsimplifies and also conceptualizes many calculations in the\nKerr (or more generally, Petrov type D) geometry. $l^a$ and $n^a$ are taken to be the repeated\nprincipal null directions which are completed to a null tetrad by defining a smooth pair of\ncomplex null rays $(m^a, \\bar m^a)$ that span the remaining\ndimensions. We choose the normalization $l_an^a=1$ and\n$m_a\\bar m^a=-1$, corresponding to the $-2$ signature. The metric\nthen takes the form\n\\begin{equation}\\label{eq:NP met}\ng_{ab} = 2l_{(a}n_{b)}-2m_{(a}\\bar m_{b)}.\n\\end{equation}\nThe basic idea is to contract any tensor field on $M$ into\nthe legs of the Newman-Penrose (NP) tetrad $(l^a, n^a, m^a, \\bar m^a)$\nin all possible ways\\footnote{We do not require tensor fields to be\n \\emph{fully} contracted with the tetrad, so in general we refer to\n NP \\emph{tensors}, not just scalars. In other words, there can\n remain tensor indices after contraction.} and to represent the\naction of the covariant derivative operator $\\nabla_a$ in terms of\nthese tetrad components, in a way that preserves a natural grading by\nspin and boost weights.\n\nFields $\\eta$ obtained by contracting with the tetrad are\nclassified according to their spin and boost weights as follows. Under\na local rotation that preserves the real null pair, the tetrad\ntransforms as $(l^a, n^a, e^{i\\Gamma} m^a, e^{-i\\Gamma} \\bar m^a)$,\nwhereas under a local boost that preserves the directions of the real\nnull pair, it transforms as\n$(\\Lambda l^a, \\Lambda^{-1}n^a, m^a, \\bar m^a)$, where $\\Lambda$,\n$\\Gamma$ are smooth real-valued functions. If we combine these\nfunctions into the complex function $\\lambda^2 = \\Lambda e^{i\\Gamma}$,\nthen $\\eta$ is said to possess (real) GHP weights $(p,q)$ if under\nthe above combined local rotation and boost of the tetrad, it\ntransforms as\n\\begin{equation}\\label{trafo}\n\\eta \\to \\lambda^p \\bar \\lambda^q \\eta. \n\\end{equation}\nWe write $\\eta \\ensuremath{\\circeq} (p,q)$ if this is the case. In the GHP\nformalism, only quantities with the same weight may be added, whereas\nweights behave additively under multiplication.\n\nFrom the mathematical viewpoint, the GHP formalism can be understood in terms of principal fibre bundles and\ntheir associated vector bundles, as follows. Consider the set of oriented null frames aligned with the given null directions. On each such frame, we may pointwise \nperform a boost\/rotation, which as we described can be combined into a nonzero complex number $\\lambda \\in {\\mathbb C}_\\times$. Thus, we have a multiplicative action of \n${\\mathbb C}_\\times$ on the set of frames which gives this set the structure of a principal $G$-bundle: A principal bundle is abstractly a bundle $P$ over $M$ such that a group $G$\ncan act by right multiplication $X \\to X \\cdot g$ in the fibre -- in our case $X$ is an NP frame aligned with the principal null direstions and $g \\leftrightarrow \\lambda$. Given a principal $G$-bundle and a representation $R$ of $G$ on some vector space $V$, there is a canonical construction of an ``associated'' vector bundle. The sections of this bundle correspond physically to quantities defined on $M$ that ``transform in the representation $R$''.\nMore precisely, the elements in this associated bundle are the equivalence classes of pairs $(X,v)$ where $X \\in P$ \nand $v \\in V$ where $(X,v)$ is declared to be equivalent to \n$(X \\cdot g, R(g)v)$. In the present example, $R_{p,q}(\\lambda)v = \\lambda^p \\bar \\lambda^q v$ and $V = {\\mathbb C}$, which corresponds precisely to the ``transformation law'' \\eqref{trafo}. The associated vector bundle is denoted in general by $P \\ltimes_R V$ and its fibres are isomorphic to $V$. In our case, we get 1-dimensional complex (``line'') bundles $L_{p,q}=P \\ltimes_{p,q} {\\mathbb C}$ over $M$ labelled by the GHP weights $(p,q)$. The number $s=\\frac{1}{2}(p-q)$ is commonly referred to as the spin. Of course, we could tensor $L_{p,q}$ with the usual tensor bundles $T^{(r,s)} M$ to host objects that have GHP weights and tensor indices at the same time such as $l^a$ or $R_{abcd} m^a m^d$.\n\nThe advantage of the above invariant viewpoint involving associated vector bundles is that we can naturally see what quantities are defined in a frame independent manner, which quantities can naturally be added, etc. This provides not only an extremely useful guiding principle in the -- usually very complicated -- calculations related to Kerr, but also means that one is always intrinsically dealing with objects that behave in a well-defined manner under a change of frame. To make the formalism really useful, one needs covariant derivative operators on the bundles $L_{p,q}$. These are given by\n\\begin{equation}\n\\Theta_a = \\nabla_a - \\tfrac{1}{2} (p-q) \\bar m^b \\nabla_a m_b -\\tfrac{1}{2} (p+q) n^b \\nabla_a l_b.\n\\end{equation}\nThe Teukolsky operators also feature the ``gravito-magnetic potential'' which is given by\n\\begin{equation}\n\\label{Bdef}\nB^a \\equiv -(\\rho n^a - \\tau \\bar m^a) \\ensuremath{\\circeq} (0,0), \n\\end{equation}\nwhere $\\rho, \\tau$ are related to spin-coefficients~\\cite{Geroch:1973am,Bini:2002jx,Aksteiner:2010rh,Toth:2018ybm}; see appendix \\ref{app:D}.\nThe Teukolsky operator acts on GHP-scalars of the same weight\\footnote{For the definition of $\\mathcal O$ and \n $\\mathcal{O}^\\dagger$ for general GHP weights see appendix \\ref{app:B}.} as the perturbed Weyl scalar $\\psi_0$, \ni.e., $(p,q) = (4,0)$ and is given by \n\\begin{equation}\n \\mathcal{O} = g^{ab}(\\Theta_a + 4 B_a)(\\Theta_b + 4 B_b) - 16 \\Psi_2 \n\\end{equation}\n with $\\Psi_2$ a background Weyl-scalar. So $E=L_{4,0}$ now. Since the dual vector bundle to $L_{p,q}$ is $L_{-p,-q}$, the adjoint \n Teukolsky operator $\\mathcal{O}^\\dagger$ acts on GHP scalars of weight $(-4,0)$. It is given by\n \\begin{equation}\\label{eq:Odagger}\n\\mathcal{O}^\\dagger = g^{ab}(\\Theta_a - 4 B_a)(\\Theta_b - 4 B_b) - 16 \\Psi_2 .\n\\end{equation} \n It follows that the boundary term $x^a[\\tilde \\Upsilon, \\Upsilon] \\equiv \\pi^a$ (with $\\tilde \\Upsilon \\ensuremath{\\circeq} (4,0), \\Upsilon \\ensuremath{\\circeq} (-4,0)$) \n is given in the case of the Teukolsky operator by \n \\begin{equation}\n \\label{pidef}\n \\pi^a = \\tilde \\Upsilon(\\Theta^a - 4B^a)\\Upsilon - \\Upsilon (\\Theta^a + 4 B^a) \\tilde \\Upsilon \n \\end{equation}\nWe denote the corresponding bilinear form -- formally similar to the Klein-Gordon inner product of a charged scalar field -- by \n\\begin{equation}\n\\label{Pidef}\n\\Pi[\\tilde \\Upsilon, \\Upsilon] = \\int_\\Sigma \\left[ \\tilde \\Upsilon(\\Theta^a - 4B^a)\\Upsilon - \\Upsilon (\\Theta^a + 4 B^a) \\tilde \\Upsilon \\right] {\\rm d} \\Sigma_a.\n\\end{equation}\n\nThe Teukolsky equation\/operator and the linearized Einstein equation\/operator are well-known to be related and this implies that the \nbilinear forms $W$ and $\\Pi$ as in \\eqref{Wdef} and \\eqref{Pidef} are related, too. \\cite{Prabhu:2018jvy} have shown that for $\\Upsilon$ a\nsmooth solution to $\\mathcal O^\\dagger \\Upsilon=0$ arising from compact\nsupport data and $h_{ab}$ a smooth solution to\n$\\mathcal E h_{ab} = 0$, an identity of the following form holds\n\\begin{equation}\\label{eq:intertwine_inf}\nw^a[h, \\mathcal S^\\dagger \\Upsilon] = - \\pi^a[\\mathcal T h, \\Upsilon] + \\nabla_b H^{ab}[\\Upsilon, h], \n\\end{equation}\nwhere $H^{ab}$ is a skew symmetric local tensor. Furthermore~\\cite{Aksteiner:2014thesis,Araneda:2016iwr}\n\\begin{subequations}\n \\begin{align}\n \\mathcal{S}(T) &= Z^{bcda} (\\Theta_a + 4 B_a) \\Theta_b T_{cd},\\\\\n \\mathcal{T}(h) &= - \\frac{1}{2}Z^{bcda} \\Theta_a \\Theta_b h_{cd},\n \\end{align}\n\\end{subequations}\nwhere $Z^{abcd} \\equiv Z^{ab}Z^{cd}$, and $Z^{ab} \\equiv 2l^{[a}m^{b]}$, are operators such that the Teukolsky-Wald identity holds:\n\\begin{equation}\\label{SEOT}\n\\mathcal S \\mathcal E = \\mathcal O \\mathcal T.\n\\end{equation}\nThis equation encodes that the action of $\\mathcal T h$ on a metric perturbation $h_{ab}$ (which equals the perturbed Weyl scalar $\\psi_0$)\ngives a solution to Teukolsky's equation $\\mathcal{O} \\psi_0 = 0$. Conversely, taking an adjoint of \\eqref{SEOT}, i.e., \n$\\mathcal{E} \\mathcal{S}^\\dagger = \\mathcal{T}^\\dagger \\mathcal{O}^\\dagger$, shows that any solution $\\mathcal{O}^\\dagger \\Upsilon = 0$ of \nGHP weight $(-4,0)$ (``Hertz potential'') is such that $h_{ab} = \\Re \\mathcal{S}^\\dagger_{ab} \\Upsilon$ is a solution to the linearized Einstein equations. \n\nRef.~\\cite{Prabhu:2018jvy} did not derive the explicit form for $H^{ab}$ but \nargued for the above equation \\eqref{eq:intertwine_inf} to hold on general grounds based on \\eqref{SEOT}. The main use of the above identity \n\\eqref{eq:intertwine_inf} is to \nrelate the corresponding bilinear forms $W[h, \\mathcal S^\\dagger \\Upsilon]$ and $\\Pi[\\mathcal T h, \\Upsilon]$ for a Cauchy surface $\\Sigma$\nof the exterior of Kerr. This identity is obtained by simply integrating the above identity over $\\Sigma$. If all \nfields are falling off rapidly at the horizon and spatial infinity, then the boundary term arising from $H^{ab}$ will not contribute; \nin other cases, $H^{ab}$ will contribute surface terms. Their computation is fairly long and non-trivial and therefore deferred to appendix \\ref{app:A}. \nIf $\\Sigma$ is a co-dimension one surface with boundary $\\partial \\Sigma$,\n$\\Upsilon$ is a\nsmooth solution to $\\mathcal O^\\dagger \\Upsilon=0$ and $h_{ab}$ a smooth solution to\n$\\mathcal E h_{ab} = 0$, then we have\n\\begin{equation}\\label{eq:intertwine}\n W[h, \\mathcal S^\\dagger \\Upsilon] = - \\Pi[\\mathcal T h, \\Upsilon] + B[h,\\Upsilon] \n\\end{equation}\nwhere $B = \\int_{\\partial \\Sigma} H^{ab} {\\rm d} \\Sigma_{ab}$. When $\\Sigma$ is \na slice of constant $t$ in Boyer-Lindquist coordinates, $\\partial \\Sigma$ would correspond to the bifurcation surface at $r=r_+$ and the sphere at $r=\\infty$.\nUsing this formula, the reader can readily transfer results on \nbilinear forms in this paper between the metric perturbation and Teukolsky variables. \n\n\\section{Bilinear forms from infinitesimal symmetry operators}\n\\label{sec:Symmetry}\n\nConsider again a general partial differential operator $\\mathcal X$ acting on sections of some vector bundle, $E$, \nover a manifold $M$. We have the corresponding conserved bilinear form $X[\\tilde \\psi, \\psi]$ defined \nby \\eqref{Xdef}. Now suppose $\\mathcal{C}$ is a partial differential operator acting on $E$ mapping \nsolutions to $\\mathcal X \\psi = 0$ to solutions -- this is equivalent to the statement that there is a partial differential operator $\\mathcal{D}$\nsuch that $\\mathcal X \\mathcal{C} = {\\mathcal D} \\mathcal X$. Such an operator is called a ``symmetry operator''. The symmetry operators form an algebra which is trivial for a generic operator $\\mathcal X$.\nIf we have a symmetry operator, then \n$X[\\tilde \\psi, \\mathcal{C} \\psi]$ is also a conserved \nbilinear form, i.e., invariant under local changes of the surface $\\Sigma$ in \\eqref{Xdef}, see e.g. \\cite{carter1977killing, carter1979generalized} for a similar observation.\n\nLet us apply this recipe to the linearized Einstein operator $\\mathcal{E}$ on the Kerr spacetime. The Kerr spacetime\nhas two Killing vector fields, $t^a, \\phi^a$ corresponding to asymptotic time translations and rotations. The Lie\nderivatives $\\mathcal{L}_t, \\mathcal{L}_\\phi$ evidently commute with $\\mathcal{E}$ and thus provide two conserved quadratic forms:\n\\begin{equation}\n\\label{canen}\nE[h] = W[h,\\mathcal{L}_t h], \\quad J[h] = W[h,\\mathcal{L}_\\phi h].\n\\end{equation}\nThey correspond to the canonical energy and canonical angular momentum of the perturbation $h_{ab}$ when $\\Sigma$ is a Cauchy surface\nstretching between the bifurcation surface and spatial infinity \\cite{hollands2013stability}. \n\nIf we want to repeat a similar construction for the Teukolsky operator $\\mathcal{O}$ and the corresponding bilinear form $\\Pi$ \nwe face the problem that the Lie-derivative \nin general is not well-defined on an arbitrary vector bundle (though it is on the usual bundles of tensors over $M$). \nIn the GHP formalism, the vector bundles $L_{q,p}$ in question \nare defined relative to an NP tetrad, and in such a case we can still give a definition of the Lie derivative along a Killing vector\nfield, though not an arbitrary vector field, as we now describe. The point is that if $g_{ab}$ has an isometry $\\varphi$ that\n preserves the globally defined null directions, then this \n constitutes an intrinsically\n defined action on GHP tensors $\\eta \\ensuremath{\\circeq} (p,q)$. More\n explicitly, if $\\varphi$ preserves the null directions, then it must\n be the case that it acts on a given null frame as\n $\\varphi_* l^a = \\Lambda l^a$, $\\varphi_* n^a = \\Lambda^{-1} n^a$,\n and $\\varphi_* m^a = e^{i\\Gamma} m^a$, for some real functions $\\Lambda$,\n $\\Gamma$ on $M$ that depend on the chosen frame and\n $\\varphi$. The action of $\\varphi$ on $\\eta$ is then invariantly\n defined since GHP tensors are functionals of the\n null tetrads giving rise to the prescribed pair of null\n directions. In the given null frame, this action amounts to\n $\\varphi^{\\text{GHP}}_*\\eta \\equiv \\lambda^{-p} \\bar \\lambda^{-q}\n \\varphi_*\\eta$, where $\\lambda^2 = \\Lambda e^{i\\Gamma}$ and\n $\\varphi_*$ is the standard pushforward on functions \n (or tensors). In particular, the\n tetrad vectors are invariant under $\\varphi_*^{\\text{GHP}}$.\n\n Infinitesimally, if $\\varphi_t$ is a 1-parameter group of transformations generated by\n a Killing field $\\chi^a$ with corresponding\n $\\lambda_t$, then the corresponding ``Lie'' transport of $\\eta \\ensuremath{\\circeq} (p,q)$ is given\n by~\\cite{edgar2000integration}\n \\begin{IEEEeqnarray}{rClCl}\\label{eq:GHPLie}\n \\text{\\L}_\\chi \\eta &=& \\lim_{t\\to0}\\frac{(\\varphi_{-t})^{\\text{GHP}}_\\ast \\eta - \\eta}{t} && \\nonumber\\\\\n &=& (\\mathcal{L}_\\chi - p w - q \\bar w) \\eta &\\ensuremath{\\circeq}& (p,q),\n \\end{IEEEeqnarray}\n in the given frame. Here, $\\mathcal{L}$ denotes the standard Lie derivative, and \n \\begin{align}\n w &= \\frac{d}{dt} \\log \\lambda_t \\bigg|_{t=0}\\\\\n &= \\frac{1}{2} \\left(n_a\\mathcal{L}_\\chi l^a - \\bar m_a \\mathcal{L}_\\chi m^a \\right).\n \\end{align}\n If we introduce the bivector \n $Y \\equiv n \\wedge l - \\bar m \\wedge m$ (for further details on the\n bivector calculus see, e.g.\n \\cite{fayos1990electromagnetic,Aksteiner:2014thesis}) and use\n the fact that $\\chi^a$ is a Killing field, so\n $\\nabla_{(a}\\chi_{b)} = 0$, then~\\eqref{eq:GHPLie} can be manipulated to obtain\n \\begin{equation}\\label{eq:GHPLie-simplified}\n \\text{\\L}_\\chi \\eta = \\left[ \\mathcal{L}^\\Theta_\\chi\n - \\frac{p}{4} Y^{ab}\\Theta_a\\chi_b\n - \\frac{q}{4} \\left( Y^{ab} \\Theta_a\\chi_b \\right)^\\ast \\right] \\eta,\n \\end{equation}\n where $\\mathcal{L}^\\Theta$ is the standard Lie derivative with $\\nabla_a$\n derivatives replaced by $\\Theta_a$ derivatives. In this notation, the GHP Lie derivative \n is also defined for GHP-tensors, i.e., \n sections in a bundle $L_{p,q}$ tensored with \n $TM$ or $T^*M$. In any case, the GHP Lie\n derivative defined here is manifestly GHP covariant, and it can be\n checked that it satisfies the Leibniz rule. The expression for \n $\\text{\\L}_\\chi$ \n in a chosen NP tetrad will depend on that choice. \n For the Kinnersley tetrad \\eqref{eq:Kintet}, $w=0$, but\n $w$ can be different from zero for other choices of the frame.\n\n With these definitions, it then follows that $\\text{\\L}_\\chi$ for $\\chi^a$ either $t^a$ or $\\phi^a$ \n commutes with the covariant derivative $\\Theta_a$ and annihlates $g_{ab}, n^a, l^a, m^a, \\bar m^a, B_a$. Therefore, \n $\\text{\\L}_\\chi$ also commutes with the Teukolsky operators, \n \\begin{equation}\n [\\text{\\L}_\\chi, \\mathcal{O}] = 0 = [\\text{\\L}_\\chi, \\mathcal{O}^\\dagger], \\quad \\chi^a = t^a , \\phi^a, \n \\end{equation}\n and it thus defines a symmetry operator. \n \n There exist other symmetry operators in the Kerr (and more generally, Petrov type D-) spacetimes\n related to the Killing tensor $K_{ab}$ that exists in those spacetimes. The construction of those operators for spin $s=0, \\tfrac{1}{2}$ in the Teukolsky equation goes back to \n \\cite{carter1977killing, carter1979generalized}; here we present the corresponding symmetry operator for arbitrary GHP-weights $(p,q)$. Similar operators have appeared also in \n \\cite{grant2020class, grant2020conserved}, eq. III.3, for spin $s=1, 2$, though not in the GHP covariant form presented here which makes manifest the relationship with the Killing tensor. This tensor \n is given by \n \\begin{equation}\n \\label{Kabdef}\n K^{ab} = - \\dfrac{1}{4} \\left( \\zeta - \\bar{\\zeta} \\right)^2 l^{(a} n^{b)} + \\dfrac{1}{4} \\left( \\zeta + \\bar{\\zeta} \\right)^2 m^{(a} \\bar{m}^{b)}\n \\end{equation} \n where we use the shorthand\n\\begin{equation}\n\\label{zetadef}\n\\zeta = - \\Psi^{-\\tfrac{1}{6}}_2 \\bar{\\Psi}^{-\\tfrac{1}{6}}_2 \\rho^{-\\tfrac{1}{2}} \\bar{\\rho}^{\\tfrac{1}{2}} \\circeq \\GHPw{0}{0}\n\\end{equation}\nwith \n$\\rho$ one of the spin coefficients in the GHP formalism. The desired symmetry operator $\\mathcal{K}$\nacting on GHP scalars of weights $(p,q)$ is defined as\n\\begin{align}\\label{eq:Koperatordef}\n\\mathcal{K} \\eta =&\\left( \\Theta_a + p B'_a + q \\bar{B}'_a \\right) K^{ab} \\left( \\Theta_b + p B'_b + q \\bar{B}'_b \\right) \\eta \\nonumber\\\\\n&+ 2 (p \\gamma + q \\bar{\\gamma}) \\text{\\L}_\\xi \\eta\n\\end{align}\nwhere \n\\begin{equation}\n\\label{xidef}\n\\xi_a = \\zeta \\left( B_a - B'_a \\right), \n\\end{equation}\nis proportional to a Killing vector field, and $\\gamma = ( \\zeta^2 - \\bar{\\zeta}^2 )\/(8 \\zeta)$. Here and in the following, a prime as in $B'_a$ means the GHP priming operation $n^a \\leftrightarrow l^a, m^a \\leftrightarrow \\bar m^a$.\nIn Boyer-Lindquist coordinates and the Kinnersly frame (see appendix \\ref{app:D}), $\\xi^a = M^{-1\/3} t^a$, $\\gamma = M^{- 1\/3} \\frac{- i a \\cos \\theta}{2(r - i a \\cos\\theta)}$ and $\\text{\\L}_\\xi \\eta = M^{-1\/3} \\partial_t \\eta$.\n$\\mathcal{K}$ is called a symmetry operator because one can show that \n\\begin{equation}\n\\label{commutator}\n[\\mathcal{K}, \\mathcal{O}] = 0 = [\\mathcal{K}, \\mathcal{O}^\\dagger] \n\\end{equation}\nwhen acting on GHP quantities of weight $(4,0)$ or $(-4,0)$, respectively. The proof of this statement is rather \nnontrivial and deferred to appendix \\ref{app:B}, where we also prove the commutation property for arbitrary $(p,q)$. It follows from the properties of the GHP Lie derivative that $[\\text{\\L}_\\chi, \\mathcal{K}]=0$ for any Killing vector field $\\chi^a$, so we have:\n\n\\begin{theorem}\n$\\text{\\L}_t, \\text{\\L}_\\phi, \\mathcal{K}$ generate a commutative, infinite-dimensional algebra of symmetry operators for Teukolsky's operator $\\mathcal O$ for any GHP weights $(p,q)$.\n\\end{theorem}\n\nHence, by the general scheme, if we have \nsolutions to $\\mathcal{O} \\tilde \\Upsilon = 0 = \\mathcal{O}^\\dagger \\Upsilon$,\nand symmetry operators $\\mathcal{A}, \\mathcal{B}$, then the bilinear \nform $\\Pi[\\mathcal{A}\\tilde \\Upsilon, \\mathcal{B} \\Upsilon]$, with $\\Pi$ as in \\eqref{Pidef}, is conserved, i.e. unchanged under local deformations of the Cauchy surface $\\Sigma$. We caution the reader that such bilinear forms can be trivial, i.e., be equivalent to forms that are conserved identically; see appendix \\ref{sec:trivial} for some discussion. \n \nIt is possible to derive symmetry operators also for the linearized Einstein tensor $\\mathcal{E}$ (and for the Maxwell equations) on Kerr or more generally, a Petrov type D spacetime. Let $n=0,1,2,\\dots$ and set\n\\begin{equation}\n\\label{Cndef}\n\\mathcal{C}_n = \\mathcal{S}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{T}',\n\\end{equation}\nas well as\n\\begin{equation}\n\\mathcal{D}_n = \\mathcal{T}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{S}',\n\\end{equation}\nwhere for spin-2 considered here we should take $s=2$, and where we use the GHP priming operation. \nThen\n\\begin{equation}\n\\begin{split}\n\\mathcal{E} \\mathcal{C}_n =& \\mathcal{E} \\mathcal{S}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{T}' \\\\\n=& \\mathcal{T}^\\dagger \\mathcal{O}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{T}'\\\\ \n=& \\mathcal{T}^\\dagger \\mathcal{K}^n \\mathcal{O}^\\dagger \\zeta^{2s} \\mathcal{T}' \\\\\n=& \\mathcal{T}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{O}' \\mathcal{T}'\\\\\n=& \\mathcal{T}^\\dagger \\mathcal{K}^n \\zeta^{2s} \\mathcal{S}' \\mathcal{E}\\\\\n=& \\mathcal{D}_n \\mathcal{E}\n\\end{split}\n\\end{equation}\nwhere we used twice the Teukolsky-Wald identity \\eqref{SEOT}, the commutation $[\\mathcal{O}^\\dagger, \\mathcal{K}]=0$, as well as the intertwining \nrelation $\\mathcal{O}^\\dagger \\zeta^{2s} = \n\\zeta^{2s} \\mathcal{O}'$.\nWhen acting on a perturbation $h_{ab}$, $\\mathcal{T}'(h)$ gives the perturbed Weyl scalar $\\psi_4$, which is gauge invariant. Therefore, we see that $\\mathcal{C}_n(h)=0$ for any gauge perturbation $h_{ab} = \\mathcal{L}_\\xi g_{ab}$. \n\nBy the results of appendix \\ref{app:B} another symmetry operator for $\\mathcal E$ would be $\\mathcal{C}_n = \\mathcal{S}^\\dagger \\mathcal{G}^n \\zeta^{2s} \\mathcal{T}'$, with $\\mathcal{D}_n = \\mathcal{T}^\\dagger \\mathcal{G}^{\\dagger n} \\zeta^{2s} \\mathcal{S}'$ (with similar proof, see appendix \\ref{app:B} for the definition of $\\mathcal G$), and further symmetry operators are obtained by the GHP prime- and overbar operations applied to these $\\mathcal{C}_n$'s. Finally, by putting $s=1$ in the above expressions, and defining $\\mathcal{T}, \\mathcal{S}$ so that the analog of the Teukolsky-Wald identity \\eqref{SEOT} holds for electromagnetic perturbations, where $({\\mathcal E}A)_a = \\nabla^b \\nabla_{[a} A_{b]}$, we get similar operators in the electromagnetic case.\n\nAs a consequence, in all cases, $\\mathcal{C}_n$ give symmetry operators for $\\mathcal{E}$ of order $4+2n$ for spin-2 and of order $2+2n$\nfor spin-1. Regarding our operator $\\mathcal{C}_0$ for spin-2, we remark that a very similar looking operator has been considered by \\cite{grant2020class}, Eq.~III.14. \nRegarding our operator $\\mathcal{C}_1$, \na similar looking operator has been considered in \n\\cite{grant2020class}, Eq.~III.47 and also in \n\\cite{aksteiner2019symmetries}, Thm.~16. However, closer inspection of the operator\\footnote{\\cite{grant2020class}, Eq. III.14 on the other hand is manifestly local.} in \\cite{grant2020class}, Eq. III.47 shows that it is non-local, while our operators are all local and also manifestly GHP covariant. The relation of our operators $\\mathcal{C}_1$ to the order 6 symmetry operator asserted in \\cite{aksteiner2019symmetries} is not completely clear to us and the same goes for our other operators $\\mathcal{C}_1'$, etc. For spin-1, symmetry operators of orders 2 and 4 have been discussed in \\cite{grant2020conserved,andersson2015spin}, and the comparison to ours is qualitatively similar.\n\nBy the general theory, for example ($n=0,1,2, \\dots$)\n \\begin{equation}\n\\chi_{(n)}[h] = W[\\overline{\\mathcal{C}_0 h}, \\mathcal{C}_n h]\n\\end{equation}\nwith $W$ as in \\eqref{Wdef} are conserved for all solutions $h_{ab}$ to the linearized Einstein equations, i.e.~unchanged under local deformations of the Cauchy surface $\\Sigma$. The corresponding conserved currents are \n \\begin{equation}\n \\label{jdef}\nj^a_{(n)} = w^a[\\overline{\\mathcal{C}_0 h}, \\mathcal{C}_n h]\n\\end{equation}\nwith $w^a$ as in \\eqref{wadef}. Note that each $j^a_{(n)}$ is \nlocal and gauge invariant from the properties of $\\mathcal{C}_n$. The concrete expressions of $j^a_{(n)}$ \nare very long and contain $2n+9$ derivatives of $h_{ab}$.\nFor the reason explained below, we call $j^a_{(n)}$ the ``Carter current(s)''.\n\nTo gain some insight into the meaning of the conserved quantities $\\chi_{(n)}$, we make a WKB (high frequency) analysis similar to \\cite{green2016superradiant}, see also \\cite{grant2020class}. If the momentum of the \nsharply collimated WKB wave packet $h_{ab}$ is $p_a$\nand its amplitudes defined with respect to a suitable basis of polarization tensors are $A_{+,\\times}$, the result is\n\\begin{equation}\n\\begin{split}\n\\label{chinint}\n\\chi_{(n)}[h] = & \\int_\\Sigma j^a_{(n)} {\\rm d} \\Sigma_a \\\\\n\\sim & \\, \\, -i (-1)^n \\int_\\Sigma p^a \n{\\rm Im}(A_+ \\bar A_\\times) \\times \\\\\n& \\qquad \\times Q(p)^{n+4} \\, {\\rm d} \\Sigma_a\n\\end{split}\n\\end{equation}\nwhere $K^{ab} p_a p_b = Q(p)$ denotes the Carter constant. See appendix \\ref{app:WKB} for more \ndetail on the derivation of this formula \nand on the \nprecise definitions of the WKB wave functions, polarizations, etc. \n\n\nWe can obviously form alternative conserved quantities by other combinations of the various symmetry operators of the linearized Einstein operator described above giving e.g., the GHP primed version of our Carter currents $j^{a \\prime}_{(n)}$. We note also that such currents could have alternatively been constructed from $\\pi^a$ \\eqref{pidef}, taking $\\Upsilon = \\zeta^4 \\psi_4$ and $\\tilde \\Upsilon = \\psi_0$ and acting on those with various symmetry operators for the Weyl scalars, as described above. \n\nWe finally remark that conserved currents for metric perturbations related to Carter's constant have also been considered in \\cite{grant2020class}, Eqs. IV.14-16. Eq. IV.14 is very similar to our $j_{(0)}^a$ but their currents Eqs. IV.15-16 are different from our Carter currents $j^a_{(n)}$ or their GHP primes because unlike ours, they are based on non-local currents requiring a mode decomposition of the solutions.\n\n\n \n\\section{Bilinear form from $t$--$\\phi$ reflection}\\label{sec:bilinear_tphi}\n\nIn the previous section, we combined the basic conserved bilinear form \\eqref{Pidef} with symmetry operators, which arise in particular \nfrom the Killing vector fields of Kerr. One naturally expects that a similar construction should be possible for the discrete isometry of\nKerr, namely the $t$--$\\phi$ reflection map $J: (t,\\phi) \\to (-t,-\\phi)$ where here and in the following we refer to Boyer-Lindquist coordinates.\nHowever, just as for Killing vectors, some care has to be taken when defining the action of $J$ on GHP scalars with nontrivial weights \n$(p,q)$. So we first turn to this issue.\n\nThe map $J$ swaps the null directions $l^a$ and $n^a$ and changes the\n orientation on the orthogonal complement of these null directions\n spanned by $m^a$, $\\bar m^a$. \n There must thus be $\\Lambda$, $\\Gamma$ depending on the null tetrad\n such that $J_* l^a = -\\Lambda n^a$, $J_* n^a = -\\Lambda^{-1} l^a$, and\n $J_* m^a = e^{i\\Gamma} \\bar m^a$, where we have defined $J$ to act on\n tensors by the push-forward. \n By analogy with the previous\n case of isometries which are continuously deformable to the identity, \n it is then natural to define for $\\eta \\ensuremath{\\circeq} (p,q)$ a GHP\n reflection\n \\begin{equation}\n \\label{Jdef}\n \\mathcal J \\eta \\equiv i^{p+q} \\lambda^{-p} \\bar \\lambda^{-q} \\eta \\circ J \\ensuremath{\\circeq} (-p,-q) \n \\end{equation}\n in the given frame. \n \n The operator $\\mathcal J$ is evidently a GHP priming operation combined with $t \\to -t, \\phi \\to -\\phi$, and\n is therefore easily seen to be GHP covariant (i.e.~defined intrinsically as a map from sections in $L_{p,q}$ to sections in $L_{-p,-q}$, irrespective of the chosen frame), \n but, by contrast to the ``pull-back'' arising from isometries continuously connected to the \n identity as considered above, it changes the GHP weights. In this sense it is similar to the CPT operator arising in quantum field theory. \n It is clear that $\\mathcal J^2 = 1$ and one can relatively easily show the ``anti-commutation'' relations \n $\\text{\\L}_{t} \\mathcal J = - \\mathcal J \\text{\\L}_{t}$, $\\text{\\L}_{\\varphi} \\mathcal J = - \\mathcal J \\text{\\L}_{\\varphi}$\n with the GHP Lie-derivative defined above.\n We also note an important intertwining property\nof the $t$--$\\phi$ reflection operator $\\mathcal J$ with the Teukolsky operator and its adjoint, namely,\n\\begin{align}\\label{eq:OJ}\n \\mathcal O \\Psi_2^{4\/3} \\mathcal J = \\Psi_2^{4\/3} \\mathcal J \\mathcal O^\\dagger,\n\\end{align}\nwhere we used basic properties of gravito-magnetic field $B_a$ and its GHP prime $B'_a$, as well as the relation\n\\begin{equation}\\label{eq:gradPsi}\n \\Theta_a \\Psi_2 = -3 (B_a + B'_a) \\Psi_2.\n\\end{equation}\n In the Kinnersley frame and Boyer-Lindquist coordinates (see appendix \\ref{app:D}), the $\\mathcal J$ operator corresponds to sending $t \\to -t, \\phi \\to -\\phi$\n and multiplication according to \\eqref{Jdef} by appropriate powers of $\\lambda, \\bar \\lambda$, where $\\lambda$ is given in this case \n explicitly by \n\\begin{equation}\\label{eq:boostParams}\n \\lambda \n = \\sqrt{2} (r-ia \\cos \\theta) \\Delta(r)^{-1\/2}.\n\\end{equation}\n\nWe are now in a position to define the bilinear form. For simplicity, we restrict at first to entries having compact support on the \nCauchy surface $\\Sigma$ in order to avoid any convergence problems.\n\n\\begin{definition}[Bilinear form for compact support]\n Let $\\Upsilon_1, \\Upsilon_2 \\ensuremath{\\circeq} (-4,0)$ be smooth GHP scalars\n of compact support on $\\Sigma$ in the kernel of\n $\\mathcal O^\\dagger$. Then we set\n \\begin{equation}\\label{bilinear}\n \\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle \\equiv \\Pi_\\Sigma[\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1, \\Upsilon_2]\n \\end{equation}\n with $\\Pi$ as in \\eqref{Pidef}.\n\\end{definition}\n\n\\begin{lemma}\\label{lemma:compactsupport}\n Under the conditions of the definition, we have\n \\begin{enumerate}[label=(\\roman*), start=1]\n \\item $\\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle$ is ${\\mathbb C}$-linear in both entries.\n \\item\n $\\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle=\\langle\\langle \\Upsilon_2,\n \\Upsilon_1 \\rangle\\rangle$,\n \\item\n $\\langle\\langle \\text{\\L}_t\\Upsilon_1, \\Upsilon_2 \\rangle\\rangle=\\langle\\langle\n \\Upsilon_1, \\text{\\L}_t \\Upsilon_2 \\rangle\\rangle$ for $t^a$ the time\n translation Killing field, and\n \\item $\\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle$ is independent of\n the chosen Cauchy surface $\\Sigma$.\n \\end{enumerate}\n\\end{lemma}\nBefore we prove this lemma, we remark that, e.g. by \\eqref{eq:Kinnersley-bilinear}, the bilinear form may be viewed as defined on the initial data of the Teukolsky equation on \nthe Cauchy surface $\\Sigma$. On an initial data set $\\text{\\L}_t$ corresponds to the action of a suitably definined Hamiltonian operator $\\mathcal{H}$. \nThen item (iii) corresponds to the statement that \n\\begin{equation}\n \\langle\\langle \\Upsilon_1, \\mathcal{H} \\Upsilon_2 \\rangle\\rangle = \\langle\\langle \\mathcal{H} \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle, \n\\end{equation}\ni.e.~to the fact that the Hamiltonian operator is symmetric with respect to our bilinear form. We refer the interested reader to appendix \\ref{sec:Lagrangian-Hamiltonian} for details \non the Hamiltonian formulation of the Teukolsky equation. \n\nWe also note that although we defined our bilinear form on $s=-2$\nGHP scalars (i.e., solutions to the adjoint Teukolsky equation), we\ncould also define a bilinear form on $s=+2$ solutions to the original\nTeukolsky equation. In this case, we set\n$\\langle\\langle\\tilde\\Upsilon_1, \\tilde\\Upsilon_2\\rangle\\rangle \\equiv\n\\Pi_\\Sigma[\\tilde\\Upsilon_1, \\Psi_2^{-4\/3} \\mathcal J\n\\tilde\\Upsilon_2]$. It can be shown that the $s=+2$ bilinear form\nsatisfies all the same properties as the $s=-2$ form. \n\\begin{proof}\n \\begin{enumerate}[label=(\\roman*),start=1]\n \\item This is obvious from the definition.\n \\item By explicit calculation, we have with $\\pi_{abc} = \\epsilon_{abcd} \\pi^d$ and $\\pi^a$ as in \\eqref{pidef},\n \\begin{widetext}\n \\begin{align}\n \\pi_{abc}(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2) &= \\epsilon_{dabc} \\left[ (\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1) (\\Theta^d - 4 B^d) \\Upsilon_2 - \\Upsilon_2 (\\Theta^d + 4 B^d) (\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1 )\\right] \\nonumber \\\\\n &= \\mathcal J \\epsilon_{dabc} \\left[ \\Psi_2^{4\/3} \\Upsilon_1 (\\Theta^d - 4 B^{\\prime d}) (\\mathcal J \\Upsilon_2 ) - (\\mathcal{J} \\Upsilon_2) (\\Theta^d + 4 B^{\\prime d}) (\\Psi_2^{4\/3} \\Upsilon_1) \\right] \\nonumber\\\\\n &= \\mathcal J \\epsilon_{dabc} \\left[ \\Upsilon_1 (\\Theta^d + 4 B^d) (\\Psi_2^{4\/3} \\mathcal J \\Upsilon_2) - (\\Psi_2^{4\/3} \\mathcal J \\Upsilon_2) (\\Theta^d - 4 B^d) \\Upsilon_1 \\right] \\nonumber\\\\\n &= - \\mathcal J \\pi_{abc}(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_2, \\Upsilon_1),\n \\end{align}\n \\end{widetext}\n using $\\mathcal J^2 = 1$ and~\\eqref{eq:gradPsi}. Now integrate over $\\Sigma$. Since\n $\\mathcal J$ reverses the orientation of $\\Sigma$, the claim\n follows.\n \\item \n We first remark that, by Cartan's magic formula, we have that on solutions (where $\\pi = \\pi_{abc} {\\rm d} x^a \\wedge {\\rm d} x^b \\wedge {\\rm d} x^c$),\n \\begin{equation}\n \\mathcal{L}_t \\pi = {\\rm d} ( t \\cdot \\pi),\n \\end{equation}\n if ${\\rm d} \\pi=0$. Integrating over $\\Sigma$ and using Stokes's theorem,\n \\begin{equation}\\label{eq:pi-cartan}\n \\int_\\Sigma \\mathcal{L}_t \\pi = \\int_{\\partial \\Sigma} t \\cdot \\pi = 0.\n \\end{equation}\n as, for compact support data, the contribution on $\\partial \\Sigma$ evaluates to zero. \n In our case,\n $\\Upsilon_1 \\in \\ker \\mathcal{O}^\\dagger$, therefore\n $\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1 \\in \\ker \\mathcal O$, thus\n $\\pi(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2)$ is indeed closed, ${\\rm d} \\pi=0$.\n On the other hand, we have, since background quantities are all\n GHP-Lie-derived by $t^a = M^{1\/3} \\xi^a$, and since\n $\\mathcal J \\text{\\L}_t = - \\text{\\L}_t \\mathcal J$, that\n \\begin{align}\\label{eq:lemmaiiib}\n & \\mathcal{L}_t \\pi(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2)\\nonumber \\\\\n &\\quad= \\pi( \\Psi_2^{4\/3} \\text{\\L}_t \\mathcal J \\Upsilon_1, \\Upsilon_2) + \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1, \\text{\\L}_t \\Upsilon_2) \\nonumber \\\\\n &\\quad= - \\pi(\\Psi_2^{4\/3} \\mathcal{J} \\text{\\L}_t \\Upsilon_1, \\Upsilon_2) + \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon_1, \\text{\\L}_t \\Upsilon_2).\n \\end{align}\n Inserting this into the left hand side of \\eqref{eq:pi-cartan} evaluated on the solutions $\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1$ and $\\Upsilon_2$ immediately yields the claim.\n\n \\item Holds by Gauss's theorem because $\\pi$ is closed on solutions,\n and $\\Psi_2^{4\/3} \\mathcal J$ takes $\\ker \\mathcal O^\\dagger$ into\n $\\ker \\mathcal O$.\n \\end{enumerate}\n\\end{proof}\n\nWe end this section with an explicit expression of our bilinear form in Boyer-Lindquist coordinates and the Kinnersley frame:\n\\begin{widetext}\n\\begin{align}\\label{eq:Kinnersley-bilinear}\n \\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle \n = 4 M^{4\/3}\n \\int_\\Sigma {\\rm d} r \\, {\\rm d}\\theta {\\rm d}\\phi\\, \\frac{\\sin\\theta}{\\Delta^2} \\Bigg[\n & \n \\Upsilon_1\\Big|_{\\substack{t\\to-t \\\\ \\phi\\to-\\phi}} \\left( \\frac{\\Lambda}{\\Delta}\\partial_t + \\frac{2Mra}{\\Delta}\\partial_\\phi + 2 \\left[ -r - ia\\cos\\theta + \\frac{M}{\\Delta}(r^2 - a^2)\\right] \\right) \\Upsilon_2\n \\nonumber\\\\\n & \n + \\Upsilon_2 \\left[\\left( \\frac{\\Lambda}{\\Delta}\\partial_t + \\frac{2Mra}{\\Delta}\\partial_\\phi + 2 \\left[ -r - ia\\cos\\theta + \\frac{M}{\\Delta}(r^2 - a^2)\\right] \\right) \\Upsilon_1\\right]_{\\substack{t\\to-t \\\\ \\phi\\to-\\phi}}\n \\Bigg],\n \\end{align}\n\\end{widetext} \nwhere we refer to appendix \\ref{app:D} for the definitions of \n$\\Sigma, \\Delta$, and $\\Lambda$. \n\n\\section{Quasinormal mode orthogonality}\n\\label{sec:Ortho}\n\\subsection{Quasinormal modes}\nConsider modes of the form\n\\begin{equation}\\label{eq:modes}\n {}_s\\Upsilon_{\\ell m\\omega} = e^{-i\\omega t + i m \\phi} \\ensuremath{ {}_s R_{\\ell m \\omega }}(r) \\ensuremath{{}_s S_{\\ell m \\omega}}(\\theta),\n\\end{equation}\nwith $m \\in \\mathbb Z$ and $\\omega \\in \\mathbb C$, in the Kinnersley\nframe. This form leads to separation of the spin-$s$\nTeukolsky equation~\\cite{Teukolsky:1973ha}, $\\mathcal O \\Upsilon = 0$ (for any integer spin $s$), into an angular equation,\n\\begin{widetext}\n\\begin{align}\\label{eq:Sph eq}\n \\left[\\frac{1}{\\sin \\theta} \\frac{ {\\rm d}}{{\\rm d} \\theta}\\left(\\sin \\theta \\frac{{\\rm d} \\,}{{\\rm d} \\theta} \\right) \\right. \\left. + \\left( K - \\frac{m^2+s^2+2 m s \\cos \\theta}{\\sin^2 \\theta} - a^2 \\omega^2 \\sin^2 \\theta -2 a \\omega s \\cos \\theta \\right) \\right] \\ensuremath{{}_s S_{\\ell m \\omega}}(\\theta) = 0,\n\\end{align}\nand a radial equation,\n\\begin{align}\\label{eq:radial}\n \\left[ \\Delta^{-s} \\frac{{\\rm d}}{{\\rm d} r} \\left( \\Delta^{s+1} \\frac{{\\rm d}}{{\\rm d} r} \\right) \\right. \\left. + \\left( \\frac{H^2 - 2 i s (r-M)H}{\\Delta} + 4 i s \\omega r+2 a m \\omega - K +s(s+1) \\right) \\right] \\ensuremath{ {}_s R_{\\ell m \\omega }}(r) = 0,\n\\end{align}\n\\end{widetext}\nwith $H \\equiv (r^2+a^2)\\omega - a m$. Here $K$ is a separation\nconstant. Imposing regularity at the poles $\\theta=0,\\pi$, the angular\nequation leads to a discrete set of modes $\\ensuremath{{}_s S_{\\ell m \\omega}}$ and separation\nconstants $\\ensuremath{ {}_s K_{\\ell m \\omega}}$, both of which are indexed by\n$\\ell \\in \\mathbb Z^{\\ge \\max(|m|, |s|)}$. The functions\n$\\ensuremath{{}_s S_{\\ell m \\omega}}(\\theta)e^{im\\phi} $ are known as spin-weighted spheroidal\nharmonics~\\cite{Teukolsky:1973ha}. For $\\omega \\in \\mathbb R$, the\nangular problem reduces to a Sturm-Liouville eigenvalue problem. \nModes with the same $s$,\n$m$, and real $\\omega$, but different $\\ell$ are orthogonal, and\nwe normalize them such that\n\\begin{equation}\\label{eq:theta-orthogonality}\n \\int_0^\\pi {\\rm d}\\theta\\, \\sin\\theta \\, {}_sS_{\\ell m\\omega}(\\theta) {}_sS_{\\ell'm\\omega}(\\theta) = \\delta_{\\ell\\ell'}.\n\\end{equation}\nOrthogonality can be checked by verifying that the angular operator is\nsymmetric with respect to this product. \n\nTo discuss boundary conditions of the radial equation it is convenient to\nintroduce a ``tortoise'' coordinate ${\\rm d} r_*= (r^2+a^2)\/\\Delta {\\rm d} r$, see\n\\eqref{eq:rstar}. \nFor fixed $s,l,m,\\omega$ one considers the solutions\n$R^{\\rm in}$ and $R^{\\rm up}$ ``defined'' by the ``boundary conditions''\n\\begin{subequations}\\label{eq:R bcs}\n \\begin{align}\n &R^{\\rm in} \\sim \\frac{e^{-ikr_*}}{\\Delta^{s}}, \\qquad r_*\\to-\\infty,\\\\\n &R^{\\rm up} \\sim \\frac{e^{i\\omega r_*}}{r^{2s+1}}, \\qquad r_*\\to\\infty,\n \\end{align}\n\\end{subequations}\nwhere \n$k \\equiv \\omega-m\\Omega_H$, where $\\Omega_H$ is the angular frequency of the outer horizon\n$\\Omega_H=a\/(2Mr_+)$, and where the radii of the inner- and outer horizons (roots of $\\Delta$) are denoted by $r_\\pm$, respectively. \n\nThe conditions \\eqref{eq:R bcs} correspond physically to the\nabsence of incoming radiation from the past horizon and past null\ninfinity, respectively. As stated \\eqref{eq:R bcs} do not really pick out uniquely a solution in the case ${\\rm Im} \\omega<0$ because we may always add a multiple of the subdominant solution as $|r_*| \\to \\infty$ without affecting the asymptotic behavior. \nMore precisely, mode solutions may be obtained via series expansions \\cite{Leaver1986}, involving three-term recurrence relations for the coefficients.\n Selecting the so-called ``minimal solution'' \\cite{gautschi1967computational} of the recurrence relations ensures that the series represenation converges at the horizon (in) or infinity (up).\\footnote{This definition is satisfied by a radial solution of the form\n\\begin{equation}\n R(r) = e^{i\\omega r}(r-r_-)^{-1-s+i\\omega+i\\sigma_+}(r-r_+)^{-s-i\\sigma_+} f(r) ,\n\\end{equation}\nwhere $\\sigma_+=(\\omega r_+-am)\/(r_+-r_-)$ and $f(r)=\\sum_{n=0}^{\\infty}d_n\\left(\\frac{r-r_+}{r-r_-}\\right)^n$ with $d_n$ coefficients that are a minimal solution to a three-term recursion relation~\\cite{Leaver1985} so that the series is uniformly absolutely convergent as $r\\rightarrow\\infty$}\n Imposing both of these conditions simultaneously\n %\n\\footnote{The problem is made\ncomplicated, however, because $\\omega$ and $K$ appear in both the\nangular and radial equations, $\\omega$ nonlinearly. One must jointly\nsolve both equations to obtain a self-consistent solution of this\nnonlinear eigenvalue problem. Using Hamiltonian methods (see appendix \\ref{sec:Lagrangian-Hamiltonian})\none can recast this as the eigenvalue problem $\\mathcal{H} \\Upsilon = i\\omega \\Upsilon$,\ni.e., the problem is linear in $\\omega$, but the angular and radial\nproblems remain coupled.}\ngives rise to a discrete set of quasinormal modes $\\omega_n \\in \\mathbb{C}$, where $n=0,1,2,\\ldots$ are the so-called ``overtone'' numbers. We restrict to frequencies with ${\\rm Im}\\, \\omega \\le 0$, as modes growing exponentially in time are not in the specturm of Kerr \\cite{Whiting:1988vc}.\n\n\\begin{figure*}\n\\centering\n\\includegraphics[trim={0.cm 4.cm 0.cm 2.cm},clip,width=0.49\\linewidth]{contour_r.pdf}\n\\includegraphics[trim={0.cm 4.cm 0.cm 2.cm},clip,width=0.49\\linewidth]{contour_rstar.pdf}\n\\caption{{\\it Left:} Sketch of the complex $r$ contour $C_*$ defining the bilinear form on quasinormal modes. The contour cannot be pulled back to the real axis because the integrand crosses (an infinite number of) different sheets associated with the branch points $r_-$ and $r_+$. {\\it Right:} Same contour, but in the complex $r_*$ plane.\nNote that this contour cannot be pulled back to the real axis due to the presence of Stokes lines along which the integrand of the bilinear form would diverge.}\n\\label{fig:r_contour}\n\\end{figure*}\n\n\\subsection{Bilinear form on quasinormal modes}\n\nWe would now like to extend our definition of the bilinear form $\\langle\\langle \\cdot , \\cdot \\rangle\\rangle$, originally \nonly for compactly supported solutions\/data on the Cauchy surface $\\Sigma$, to quasinormal modes. \nThe immediate problem is that, according to the boundary conditions on the corresponding solutions \nto the radial equation, these blow up both at the horizon $r=r_+$ and infinity $r \\to \\infty$. \nIn this subsection, inspired by the work of~\\cite{LeungModes94}, we show that the Kerr\nbilinear form can be defined for quasinormal mode data by a suitable\ndeformation of the radial integration into the complex plane.\\footnote{\nIn the quantum mechanics literature, this method is known also as (exterior)\ncomplex scaling \\cite{Aguilar:1971ve}. \nComplex scaling and complex integration contours have already been used in the context of black hole quasinormal modes, see for instance~\\cite{Bony2007,Dyatlov:2011jd} and~\\cite{Glampedakis:2003dn,Leaver1986b}. } \n\nConsider the bilinear form acting on two quasinormal modes with\nquasinormal frequencies $\\omega_1$ and $\\omega_2$. The \nintegrand in the bilinear\nform~\\eqref{eq:mode-bilinear} goes as\n$\\sim e^{\\pm i(\\omega_1+\\omega_2)r_\\ast}$ as $r_\\ast \\to \\pm\\infty$,\nand therefore diverges exponentially for\n$\\Im(\\omega_1 + \\omega_2) < 0$, which is the case for all modes that decay\nin time. Therefore, we clearly see that the bilinear form as defined for compact support data~\\eqref{eq:Kinnersley-bilinear} is divergent.\n\nWe can obtain a finite bilinear form by analytic continuation in $r$.\nThe radial mode functions $R^{\\rm in\/up}(r)$ are analytic with branch points at $r=r_\\pm$ \\cite{Leaver1986}, and we take the branch cut as the wiggly line in Fig.~\\ref{fig:r_contour} going from $r_+$ to $r_-$. We take the branch cut for the tortoise coordinate\n\\eqref{eq:rstar} $r_*(r)$ to be identical, so that we can think of both the radial functions $R^{\\rm in\/up}$ and $r_*$ as \ndefined on the same multisheeted covering of the twice cut complex $r$-plane. The integrand of the bilinear form, given by the 3-form \n$\\pi_{abc} = \\epsilon_{abcd} \\pi^d$ [see \\eqref{pidef}] evaluated on two mode \nfunctions as in \\eqref{bilinear} or equivalently \\eqref{eq:Kinnersley-bilinear}, therefore has an analytic continuation on the multi-sheeted complex $r$-plane. \n\nIn \\eqref{bilinear} or equivalently \\eqref{eq:Kinnersley-bilinear}, we now define an integration contour going into \nthis complex $r$-plane as shown qualitatively \nin fig. \\ref{fig:r_contour}. In terms of $r_*(r)$, \nwhich is a function on the same multi-sheeted complex $r$-plane, the contour is defined in such a way that $0 < \\arg((\\omega_1 + \\omega_2) r_\\ast) < \\pi$\non the right limit, and\n$-\\pi < \\arg((\\omega_1 + \\omega_2) r_\\ast) < 0$ on the left, then as\n$|r_\\ast| \\to \\infty$, \nthe volume integral will converge exponentially with $|r_\\ast|$.\n\nTo achieve this for any $\\Im(\\omega_1 + \\omega_2) < 0$ me may take a snake\nshaped contour $\\lambda \\mapsto r_*(\\lambda,\\epsilon)$ of the radial\ncoordinate in the complex $r_*$ plane, with the properties\n\\begin{equation}\\label{eq:contour}\n \\begin{cases}\n r_*(\\lambda,\\epsilon)=\\lambda\n &\\text{for $\\lambda_1 < \\lambda < \\lambda_2$}\\\\\n \\arg r_*(\\lambda,\\epsilon) \\to +\\pi - \\epsilon & \\text{for $r_* \\to \\infty$}\\\\\n \\arg r_*(\\lambda,\\epsilon) \\to 0 + \\epsilon &\\text{for $r_* \\to -\\infty$,}\n \\end{cases}\n\\end{equation}\nwhere $\\lambda_1<0$, $\\lambda_2>0$ can in principle be chosen arbitrarily. We give a sketch of this contour, $C_*$, which corresponds \nto one in terms of $r$, in the right panel of Fig.~\\ref{fig:r_contour}. The corresponding 3-dimensional\n submanifold (depending on $\\epsilon > 0$ and on $t \\in {\\mathbb R}$) of the analytically continued Kerr manifold $M_{{\\mathbb C}}$ is denoted by\n$\\Sigma_{{\\mathbb C}} = \\{ (t,r_*(\\lambda,\\epsilon),\\theta,\\phi) \\mid \\lambda \\in {\\mathbb R} \\}$.\nIn practice, the angle $\\epsilon>0$ is chosen sufficiently small such that the integral in the following definition of the bilinear form converges, \n\\begin{align}\n \\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle &= \\Pi_{\\Sigma_{{\\mathbb C}}}[\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2]. \\\\\n \\nonumber\n\\end{align}\nReplacing $\\Sigma$ with the contour $\\Sigma_{\\mathbb{C}}$ as described in section~\\ref{sec:bilinear_tphi}, \nthanks to the analyticity of the integrand and its fall off on $\\partial \\Sigma_{\\mathbb{C}}$, all properties of the bilinear form of of lemma \\ref{lemma:compactsupport} continue to hold on quasinormal modes. In particular, from item (iii) of lemma \\ref{lemma:compactsupport}, we get $(\\omega_1-\\omega_2) \\langle\\langle \\Upsilon_1 , \\Upsilon_2 \\rangle\\rangle=0$ for a pair of quasinormal modes with complex frequencies $\\omega_1, \\omega_2$. Furthermore, by (iv), the value of the bilinear form is independent of the precise choice of $t$, details of the complex integration contour such as the asymptotic angle $\\epsilon$ against the real half-axes and\/or $\\lambda_1, \\lambda_2$, as long as the integrand is exponentially decaying. \n\n\\begin{corollary}[Orthogonality of quasinormal modes]\n Let $\\Upsilon_1$ and $\\Upsilon_2$ be quasinormal modes for the $s=2$\n Teukolsky equation\n with frequencies $\\omega_1$ and $\\omega_2$. Then either\n $\\langle\\langle \\Upsilon_1, \\Upsilon_2 \\rangle\\rangle = 0$ or\n $\\omega_1 = \\omega_2$.\n\\end{corollary}\n\n\nOur bilinear form takes the following form on quasinormal mode solutions~\\eqref{eq:modes}. After plugging two $s=-2$ mode solutions in separated form\ninto~\\eqref{eq:Kinnersley-bilinear}, we can carry out the $\\phi$\nintegration to obtain \n\\begin{widetext}\n\\begin{eqnarray}\\label{eq:mode-bilinear}\n &&\\langle\\langle \\Upsilon_{\\ell_1m_1\\omega_1}, \\Upsilon_{\\ell_2m_2\\omega_2} \\rangle\\rangle \\nonumber\\\\&= &8\\pi M^{4\/3} \\delta_{m_1m_2} e^{-i(\\omega_2-\\omega_1)t}\n \\int_{C_*} {\\rm d} r_* \\int_0^\\pi {\\rm d}\\theta\\, \\frac{ (r^2+a^2)\\sin\\theta}{\\Delta} S_1(\\theta) S_2(\\theta) R_1(r) R_2(r) \n\n \\bigg( - \\frac{i\\Lambda}{\\Delta}(\\omega_1+\\omega_2) \\nonumber\\\\\n &&\\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad + \\frac{2iMra}{\\Delta}(m_1+m_2) + 2 \\left[ -r - ia\\cos\\theta + \\frac{M}{\\Delta}(r^2 - a^2)\\right] \\bigg)\n\\end{eqnarray}\n\\end{widetext}\nwith $C_*$ the contour for the $r_*$-integration described above and the Kerr quantities $\\Delta, \\Sigma, \\Lambda$ as given in \nappendix \\ref{app:D}.\n\nThe integrands depend on $\\theta$ and $r$ in a nonfactorizable way, so\nthis expression is the best that can be achieved in general: for Kerr,\nthe orthogonality relation expressed by the previous corollary \n(vanishing of the above inner product for $\\omega_1 \\neq \\omega_2$) \nis fundamentally two-dimensional. This has\nto do with the fact that the orthogonality\nrelation~\\eqref{eq:theta-orthogonality} for spin-weighted spheroidal\nharmonics occurs between modes of different $\\ell$ but the \\emph{same}\n$m$ and $\\omega$; if $\\omega_1 \\ne \\omega_2$, then no such relation\nexists, and one cannot expect to be able to perform the $\\theta$\nintegration to obtain a $\\delta_{\\ell_1\\ell_2}$ factor. \n\nIn the $a\\to0$ Schwarzschild limit, however, the integral \\emph{does}\nfactorize: the $\\theta$ dependence of the integrand reduces to the\n$\\sin\\theta$ volume factor on the sphere, the spheroidal harmonics\nreduce to spherical harmonics (independent of $\\omega$), and the\n$\\theta$ integral is proportional to $\\delta_{\\ell_1\\ell_2}$. One is left\nwith a radial integration, which must vanish for\n$\\omega_1 \\ne \\omega_2$. \n\nAs we can see from the following figure \\ref{fig:ortho}, the contour integral in the bilinear form converges quite well, \nwhich is useful in practice when using it to extract excitation coefficients, as we describe in the next section. \nFurthermore, since orthogonality is an exact result for quasinormal modes, it can be used potentially as a benchmark check \nfor approximations. For example, we have considered approximations to quasinormal modes based on a matched asymptotic \nexpansion for near-extremal black holes, and have found that the orthogonality relation is typically satisfied to a very high accuracy. \n\\begin{figure}\n\\centering\n\\includegraphics[width=1.\\linewidth,trim={.cm .1cm .1cm .1cm},clip]{Ortho_Kerr_l_n.pdf}\n\\caption{Numerical check of the orthogonality between two Kerr quasinormal modes with the same $l=m=2$ and different $n=0$, 1 (upper panel) and modes with the same $n=0$, $m=2$ and different $l=2$, 3 (lower panel). We show the result of the numerical evaluation of the bilinear form~\\eqref{eq:mode-bilinear} along the most convergent contour (black points) $r_* \\to r_{*,\\rm lower\/upper}+ \\lambda e^{-i \\arg(\\omega_1+\\omega_2)+i\\theta}$, $\\theta=\\pi\/2$, integrating up to $\\lambda_{\\rm upper\/lower}$. We use the mode solutions provided by the Black Hole Perturbation Toolkit~\\cite{BHPToolkit}.\nFor the overtone orthogonality, we also show an exponential fit converging to zero as $\\lambda_{\\rm lower}\\to-\\infty$ (red line). Because of the presence of the branch cut, the lower integration limit sets the overall accuracy of the bilinear form. In this example, we set $M=1$, $a\\simeq0.7$, $r_{*,\\rm upper}=4$ and $r_{*,\\rm lower}=-6$.\n}\n\\label{fig:ortho}\n\\end{figure}\n\nWe remark that\nour ``norm'' on quasinormal modes \nhas some similarities with the ``norm'' of resonant state \nwave functions in quantum mechanics defined by~\\cite{Zeldovich:1961theory}. Rather than taking the integral of $|\\psi|^2$, the ``norm''\nused by \\cite{Zeldovich:1961theory} also involves $\\psi^2$, whereas our bilinear is complex linear in both arguments as opposed to an inner product (anti-linear in the first argument, complex linear in the second).\nOur regularization procedure differs from that proposed by~\\cite{Zeldovich:1961theory} but was rather inspired by the investigations \\cite{LeungModes94} in the context of leaky optical\none-dimensional cavities, and on Schwarzschild black holes in~\\cite{Ching:1993gt}.\nIn \\cite{leung1997twoa,leung1997twob} it was recognized that phase space was the natural setting for the bilinear form.\\footnote{A\nvariational method for computing quasinormal frequencies of ``dirty''\nSchwarzschild black holes was developed in~\\cite{Leung:1999rh,Leung:1999iq}.}\nIn fact, in several ways our work was inspired by some of these papers: we work\nwithin the Teukolsky formalism, we arrive at the bilinear form\nstarting from the symplectic form, and we recognize the fundamental\nimportance of the $t$--$\\phi$ reflection symmetry. \n\n\\medskip\n\n\\section{Excitation coefficients}\\label{sec:Lap transform} \n\nIf $\\langle\\langle \\cdot , \\cdot \\rangle\\rangle$ were an honest to God scalar product in a Hilbert space and $\\{ {}_s\\Upsilon_{\\ell mn} \\}$ an orthonormal basis, then an arbitrary \nwave function $\\Upsilon_s$ could evidently be expanded as\n\\begin{align}\n\\label{excited}\n\\Upsilon_s = \\sum_{\\ell mn} c_{\\ell mn} \\, {}_s\\Upsilon_{\\ell mn},\n\\end{align}\nwhere the excitation coefficients are\n\\begin{align}\nc_{\\ell mn} =\\frac{\\langle\\langle {}_s\\Upsilon_{\\ell mn} , \\Upsilon_s \\rangle\\rangle }{\\langle\\langle {}_s\\Upsilon_{\\ell mn} , {}_s\\Upsilon_{\\ell mn} \\rangle\\rangle }. \\label{eq:excitation coeff}\n\\end{align}\nHere $\\sum_{\\ell m n}$ denotes $\\sum_{\\ell = \\vert s\\vert}^\\infty\\sum_{m=-\\ell}^\\ell \\sum_{n=0}^\\infty$. In the present context, $\\langle\\langle \\cdot,\\cdot\\rangle\\rangle$ is of course \nonly a symmetric bilinear form on solutions to the spin $s$ Teukolsky equation (for the case of interest in this paper, $s=-2$). It is neither positive definite, nor is the set of quasi-normal modes, \nwhile being orthogonal, in any obvious mathematical sense a complete basis for a reasonable function space in as far as we can see. \n\nInspired by~\\cite{LeungModes94}, we will nevertheless show in this section \nthat for solutions $\\Upsilon_s$ to the adjoint Teukolsky equation with compact support on a Cauchy surface $\\Sigma$, \nthe above expansion can formally be ``derived'' in the Laplace transform formalism \\cite{Leaver1986b,Nollert:1999ji} for the retarded propagator if we deform the frequency \nintegration contours into the complex plane and collect only contributions from the quasinormal mode frequencies. Thus, \\eqref{eq:excitation coeff}, while not an exact \nequality, is expected to capture the transient behavior of the solution $\\Upsilon_s$.\n\n\\subsection{Laplace transform}\\label{eq:Laplace}\n\nThe Laplace transform $\\hat f(\\omega) = L f(t)$ of a function $f(t)$ is given by\n\\begin{equation}\\label{eq:Lap def}\n\\hat f(\\omega) = \\int_0^\\infty e^{i\\omega t} f(t) {\\rm d} t,\n\\end{equation}\n where $\\Im \\omega >0$. The Laplace transform is related to the Fourier transform $\\mathcal F \\!f = \\int_{-\\infty}^{\\infty} e^{i \\omega t} f(t) {\\rm d} t$ by sending $f(t) \\to f(t) \\theta (t)$, where $\\theta(t)$ is the Heaviside distribution. Sufficient conditions for the existence of $\\hat f(\\omega)$ are that the function $f(t)$ be Riemann integrable (continuous except on sets of measure zero) on every closed sub-interval of the path of integration and that it be of exponential order; i.e. at any $t$ one can find constants $a$ and $N$ such that $\\vert e^{-at} f(t) \\vert < N$. If the Laplace integral exists for some value of $\\omega=\\omega_0$, then it also exists for all $\\omega$ with $ \\Im \\omega>\\Im \\omega_0$. The lowermost $\\Im \\omega_0$ where convergence occurs is called the abscissa of convergence and the region above this line called the convergence region. The function $\\hat f(\\omega)$ is analytic in the convergence region. \n\nThe Laplace transform formalism is naturally adapted to the study of causal dynamics of linear second-order systems, as it incorporates the initial data into a source by taking time derivatives into field values at the initial time\n\\begin{align}\\label{eq:lap derivs}\nLf'(t) = &-i\\omega \\hat f(\\omega)-f(0), \\\\\nLf''(t) = &-\\omega^2 \\hat f(\\omega)+i\\omega f(0) - f'(0).\n\\end{align}\nThe Laplace transform $\\ensuremath{\\hat \\Upsilon_{s}}$ of the spin-$s$ master function $\\ensuremath{\\Upsilon_{s}}$ is given by\n\\begin{equation}\\label{eq:tilde U}\n\\ensuremath{\\hat \\Upsilon_{s}}(\\omega,r,\\theta,\\phi) = \\int_{0}^{\\infty} e^{i \\omega t} \\ensuremath{\\Upsilon_{s}}(t,r,\\theta,\\phi) {\\rm d} t.\n\\end{equation}\nThis decomposed into modes in the usual way\n\\begin{equation}\\label{eq:tilde Us mode}\n\\ensuremath{\\hat \\Upsilon_{s}} = \n\\sum_{\\ell m} \\ensuremath{{}_s S_{\\ell m \\omega}}(\\theta) \\, \\ensuremath{ {}_s R_{\\ell m \\omega }}(r) e^{i m \\phi}.\n\\end{equation}\nThe inverse transform is given by\n\\begin{equation}\\label{eq:inverseLap}\n\\Upsilon_s(t,r,\\theta,\\phi)= \\frac{1}{2\\pi}\\int_{-\\infty+ic}^{\\infty+ic} e^{-i\\omega t} \\ensuremath{\\hat \\Upsilon_{s}}(\\omega,r,\\theta,\\phi)\\, {\\rm d} \\omega,\n\\end{equation}\nwhere $c>0$ is chosen such that the integral contour lies within the convergence region.\n\nTo formulate the initial data problem within the mode decomposition, we take the Laplace transform of the Teukolsky master equation \nand substitute \\eqref{eq:tilde Us mode}. We then collect the terms in the master equation with transformed time derivatives to the right-hand side and project onto the angular mode function. \nThis yields a sourced equation for the radial function\n\\begin{equation}\\label{eq:radeq formal}\n\\mathcal{L} \\ensuremath{ {}_s R_{\\ell m \\omega }} = \\ensuremath{ {}_s I_{\\ell m \\omega}}.\n\\end{equation}\nHere, $\\mathcal{L}$ is given in \\eqref{eq:radial} and the source $\\ensuremath{ {}_s I_{\\ell m \\omega}}=\\ensuremath{ {}_s I_{\\ell m \\omega}}(r)$ is comprised of $(\\ell,m)$-projected initial data~\\cite{Campanelli:1997un}:\n\\begin{widetext}\n\\begin{align}\\label{eq:I source modes}\n \\ensuremath{ {}_s I_{\\ell m \\omega}}= \\int_0^{2\\pi}\\! \\! \\int_0^\\pi \\bigg[ \n\\frac{\\Lambda}{\\Delta} \\left( \\ensuremath{\\partial}_t \\ensuremath{\\Upsilon_{s}} - i \\omega \\ensuremath{\\Upsilon_{s}} \\right) -2s \\Big( \\frac{M(r^2-a^2)}{\\Delta} -r -ia\\cos\\theta \\Big) \\ensuremath{\\Upsilon_{s}} + \\frac{4 M ar}{\\Delta} \\ensuremath{\\partial}_\\phi \\ensuremath{\\Upsilon_{s}}\\bigg]_{t=0}\\ensuremath{{}_s S_{\\ell m \\omega}}(\\theta)\\, e^{-i m \\phi} \\sin \\theta\\, {\\rm d} \\theta \\, {\\rm d} \\phi\n\\end{align}\n\\end{widetext}\nwhich we take to be of compact support.\n Imposing the outgoing boundary conditions \\eqref{eq:R bcs} fixes the freedom of homogeneous solutions to \\eqref{eq:radeq formal} and therefore defines a radial Green's function\n${}_s g_{\\ell m\\omega}(r,r') = \\ensuremath{ {}_s R_{\\ell m \\omega }}^{\\rm in}(r_<)\\ensuremath{ {}_s R_{\\ell m \\omega }}^{\\rm up}(r_>)\/\\mathcal W $\nwhere $r_< \\,\\,(r_>)$ is the lesser (greater) of $r$ and $r'$. Here, for any two solutions of the radial equation at fixed\n$s, m, \\ell, \\omega$, the (``$\\Delta$-scaled'') Wronskian\n\\begin{equation}\\label{eq:Wronskian}\n\\mathcal W[R_1,R_2] = \\Delta^{1+s}\\left[ R_1 \\frac{{\\rm d} R_2}{{\\rm d} r} - R_2 \\frac{{\\rm d} R_1}{{\\rm d} r} \\right]\n\\end{equation}\nhas been defined which is independent of $r$. If $R_1$ and $R_2$ are linearly dependent, then\nthe Wronskian vanishes. Thus, if we take $R_1 \\to R^{\\text{in}}$,\n$R_2 \\to R^{\\text{up}}$, the Wronskian vanishes when $\\omega$ attains\na quasinormal frequency.\n\nThe quasinormal mode contribution to $\\Upsilon_s$ can be found by closing the contour of the Laplace integral in the lower-half complex $\\omega$ plane, and can be expressed \nas a discrete sum over the residues of the radial Green's function arising at the points in the complex frequency plane where the Wronskian vanishes:\n\\begin{align}\\label{eq:oops n}\n\\Upsilon_{s} =& -i \\sum_{n\\ell m} e^{-i\\omega_n t+im\\phi} {}_sS_{\\ell m n}(\\theta) \\nonumber\\\\\n&\\times \\int_{r_+}^{\\infty} \\frac{\n\\Rq{n}^{\\rm in}(r_<) \\Rq{n}^{\\rm up}(r_>)}{{\\rm d}\\mathcal W\/ {\\rm d}\\omega\\vert_{\\omega_n}} {}_s I_{\\ell m n}(r')\\Delta^s(r') {\\rm d} r',\n\\end{align}\nwhere ${}_s I_{\\ell m n}={}_s I_{\\ell m \\omega}\\vert_{\\omega=\\omega_n}$. In considering only the poles when closing the contour, we are effectively ignoring \nthe early-time ``direct'' contribution from the large-$\\omega$ arc and the late-time ``tail'' contribution resulting from the branch point at zero frequency.\nThus, the $=$ sign in the above equation is not actually justified and should be understood as meaning this approximation.\n\nOn a quasinormal mode,\n $R^{\\rm in}$ is a constant multiple of $R^{\\rm up}$, and either may be moved outside the radial integral to write the field as\n\\begin{equation}\\label{eq:oops nice}\n\\Upsilon_{s} = \\sum_{n \\ell m} c_{\\ell mn} \\,{}_s\\Upsilon_{lmn},\n\\end{equation}\nwhere we have isolated the familiar form of the excitation coefficient\n\\begin{align}\\label{eq:qnm exc}\nc_{n\\ell m} &= -\\frac{i}{{\\rm d} \\mathcal W\/{\\rm d}\\omega\\vert_{\\omega_n}}\\int_{r_+}^{\\infty}\n {}_s I_{\\ell m n}(r') \\, {}_s R_{\\ell mn}(r') \\Delta^s(r') \\, {\\rm d} r'.\n\\end{align}\n\n\\subsection{Equivalence between \n\\eqref{eq:qnm exc} and \\eqref{eq:excitation coeff}} \n\n To begin, for a given\nradial function $R$, and $\\ell, m, \\omega$, we can define a $s=-2$ GHP\nscalar $\\Upsilon_{\\ell m \\omega}$ in separated form~\\eqref{eq:modes}\nby appending a spin-weighted spheroidal harmonic and $e^{-i\\omega t}$\ntime-dependence. For the time being, we do not require $R$ to satisfy\nany equation. We have the following lemma relating the Wronskian to\n$t\\cdot\\pi$ integrated over the 2-sphere.\n\n\\begin{lemma}\\label{lemma:wronskian-boundary}\n Let $\\Upsilon_1, \\Upsilon_2 \\ensuremath{\\circeq} (-4,0)$ be two GHP scalars in\n separated form~\\eqref{eq:modes}, with the same $m, \\ell, \\omega$,\n where $S_1$, $S_2$ are normalized spin-weighted spheroidal harmonics\n solving the angular equation, but where $R_1, R_2$ are not\n necessarily solutions to the radial equation. Then\n \\begin{equation}\n 8\\pi M^{4\/3} \\mathcal{W}[R_1,R_2] = \\int_{S^2(t,r)} t \\cdot \\pi(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2),\n \\end{equation}\n where $S^2(t,r)$ is a sphere of constant $t$ and $r$, $\\pi_{abc} = \\epsilon_{abcd} \\pi^d$ and $\\pi^a$ as in \\eqref{pidef}.\n\\end{lemma}\n\\begin{proof}\nConsider the Cauchy surface $\\Sigma(t) = \\{t={\\rm const.}\\}$ in Boyer-Lindquist coordinates. The future directed normal \nto $\\Sigma$ and induced area element on $S^2(t,r)$ are given by, respectively\n\\begin{align}\n \\nu^a =& \\left( \\sqrt{\\frac{\\Lambda}{\\Delta\\Sigma}}, 0, 0, \\frac{2Mar}{\\sqrt{\\Delta\\Sigma\\Lambda}}\\right), \\\\\n {\\rm d} A =& \\sqrt{\\Sigma\\Lambda} \\sin\\theta \\, {\\rm d} \\theta {\\rm d} \\phi. \n\\end{align}\nFrom the first relation on can read off the lapse function $N$ of $t^a$ from $\\nu^a = (t^a - N^a)\/N$. \nThe action of the reflection reverses $\\nu^a$ and from this fact and the formula for $\\pi^a$, see \\eqref{pidef}, one can deduce that \n\\begin{widetext}\n \\begin{align}\\label{eq:int-tdotpi}\n \\int_{S^2(t,r)} t \\cdot \\pi(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2) = \\int_{S^2(t,r)} N \\Psi_2^{4\/3}\\left\\{ (\\mathcal J\\Upsilon_1) r^a(\\Theta_a - 4 B_a) \\Upsilon_2 - \\Upsilon_2 \\mathcal J [r^a (\\Theta_a - 4 B_a) \\Upsilon_1 ]\\right\\} {\\rm d} A\n \\end{align}\n\\end{widetext}\nwhere $r^a$ is the normal to $S^2(r,t)$ inside $\\Sigma(t)$.\nAn explicit calculation shows that in the Kinnersley frame,\n \\begin{equation}\n r^a(\\Theta_a - 4B_a)\\Upsilon = \\sqrt{\\frac{\\Delta}{\\Sigma}}\\partial_r\\Upsilon - 2\\frac{(r-M)}{\\sqrt{\\Delta\\Sigma}}\\Upsilon.\n \\end{equation}\n Using this, as well as expressions for $N$, ${\\rm d} A$, and\n $\\mathcal J$ [using~\\eqref{eq:boostParams}], we obtain\n \\begin{widetext}\n \\begin{equation}\n \\int_{S^2(t,r)} t \\cdot \\pi(\\Psi_2^{4\/3}\\mathcal J \\Upsilon_1, \\Upsilon_2)\n = \\frac{4M^{4\/3}}{\\Delta(r)} \\left( R_1\\frac{{\\rm d} R_2}{{\\rm d} r} - R_2 \\frac{{\\rm d} R_1}{{\\rm d} r}\\right) \\int_0^{\\pi}\\int_0^{2\\pi} \\ S_1(\\theta) S_2(\\theta) \\, \\sin\\theta {\\rm d} \\theta {\\rm d} \\phi .\n \\end{equation}\n\\end{widetext}\nFinally, perfoming the integration and using the normalization~\\eqref{eq:theta-orthogonality} for the angular\n functions, we obtain the result.\n\\end{proof}\n\nNext, we take $R_1$ and $R_2$ to be solutions ingoing at the horizon\nand outgoing at infinity. Considered as a function of $\\omega$, the\nWronskian vanishes at quasinormal frequencies $\\omega_n$, because at\nthese frequencies the two solutions become linearly dependent. The\nfirst derivative with respect to $\\omega$, however, is proportional to\nthe ``norm'' of the quasinormal mode.\n\\begin{lemma}\\label{lemma:Wronskian-derivative}\n Let $R_\\omega^{\\mathrm{in}}, R_{\\omega}^{\\mathrm{up}}$ be solutions to\n the radial equation for fixed $s=-2,\\ell,m$, and allowing $\\omega$\n to vary, that are ingoing at the horizon and outgoing at infinity,\n respectively, as in~\\eqref{eq:R bcs}. Construct\n $\\Upsilon^{\\mathrm{in}}_\\omega, \\Upsilon^{\\mathrm{up}}_\\omega \\ensuremath{\\circeq}\n (-4,0)$ as mode solutions based on the radial functions. Then the\n derivative of the Wronskian at a quasinormal frequency $\\omega_n$\n can be written\n \\begin{equation}\n \\left. \\frac{{\\rm d}}{{\\rm d} \\omega}\\mathcal{W}[R^{\\mathrm{in}}_\\omega, R^{\\mathrm{up}}_\\omega] \\right|_{\\omega = \\omega_n} = \\frac{-i}{8\\pi M^{4\/3}} \\langle\\langle \\Upsilon^{\\mathrm{in}}_{\\omega_n}, \\Upsilon^{\\mathrm{up}}_{\\omega_n} \\rangle\\rangle.\n \\end{equation}\n\\end{lemma}\n\\begin{proof}\n Consider the current\n $\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n},\n \\Upsilon^{\\text{up}}_\\omega\\right)$ evaluated at a generic\n frequency $\\omega$ and a quasinormal frequency $\\omega_n$. By\n Cartan's magic formula and the fact that $\\pi$ is closed on\n solutions,\n \\begin{align}\n &{\\rm d}\\left(t \\cdot \\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_\\omega\\right)\\right) \\nonumber \\\\\n &\\quad = \\mathcal{L}_t \\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_\\omega\\right) \n \\nonumber\\\\\n &\\quad = -i(\\omega - \\omega_n)\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_\\omega\\right) ,\n \\end{align}\n where in contrast to the previous lemma the right side does not\n vanish on account of the different frequencies. We integrate over a partial Cauchy surface $S$, and\n apply Stokes's theorem,\n \\begin{align}\\label{eq:balance}\n &\\int_{\\partial S} t \\cdot \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_\\omega)\\nonumber\\\\\n &\\quad= -i (\\omega - \\omega_n) \\int_S \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_\\omega).\n \\end{align}\n Next, we\n differentiate this equation with respect to $\\omega$ and take the\n limit $\\omega\\to \\omega_n$. On the right side, we trivially get\n \\begin{equation}\n \\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\text{r.h.s. of \\eqref{eq:balance}} \n = -i \\int_S \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_{\\omega_n}).\n \\end{equation}\n The left side can be expressed as three terms, namely \n \\begin{align}\n &\\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\text{l.h.s. of \\eqref{eq:balance}} \\nonumber\\\\\n &\\quad= \\int_{\\partial S_+} t\\cdot\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\Upsilon^{\\text{up}}_\\omega\\right) \\nonumber\\\\\n &\\qquad- \\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\int_{\\partial S_-} t\\cdot\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_\\omega, \\Upsilon^{\\text{up}}_\\omega\\right) \\nonumber\\\\\n &\\qquad + \\int_{\\partial S_-} t\\cdot \\pi\\left( \\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\Psi_2^{4\/3} \\mathcal{J} \\Upsilon^{\\text{in}}_\\omega, \\Upsilon^{\\text{up}}_{\\omega_n} \\right).\n \\end{align}\n By lemma~\\ref{lemma:wronskian-boundary} we can write the second of\n these terms as the derivative of the Wronskian,\n \\begin{align}\n &\\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\int_{\\partial S_-} t\\cdot\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_\\omega, \\Upsilon^{\\text{up}}_\\omega\\right) \\nonumber\\\\\n &\\quad= 8 \\pi M^{4\/3} \\left.\\frac{{\\rm d}}{{\\rm d}\\omega} \\mathcal W[R^{\\text{in}}_\\omega, R^{\\text{up}}_\\omega]\\right|_{\\omega = \\omega_n}.\n \\end{align}\n\n Summarizing our results so far, we have shown that\n \\begin{align}\\label{eq:balance2}\n &8 \\pi M^{4\/3} \\left.\\frac{{\\rm d}}{{\\rm d}\\omega} \\mathcal W[R^{\\text{in}}_\\omega, R^{\\text{up}}_\\omega]\\right|_{\\omega = \\omega_n} \\nonumber\\\\\n &\\quad = -i \\int_S \\pi(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_{\\omega_n}) \\nonumber\\\\\n &\\qquad - \\int_{\\partial S_-} t\\cdot \\pi\\left( \\left.\\frac{{\\rm d}}{{\\rm d}\\omega}\\right|_{\\omega=\\omega_n} \\Psi_2^{4\/3} \\mathcal{J} \\Upsilon^{\\text{in}}_\\omega, \\Upsilon^{\\text{up}}_{\\omega_n} \\right)\n \\nonumber\\\\\n &\\qquad - \\int_{\\partial S_+} t\\cdot\\pi\\left(\\Psi_2^{4\/3} \\mathcal J \\Upsilon^{\\text{in}}_{\\omega_n}, \\left. \\frac{{\\rm d}}{{\\rm d}\\omega} \\right|_{\\omega=\\omega_n} \\Upsilon^{\\text{up}}_\\omega\\right).\n \\end{align}\nAs $S \\to \\Sigma_\\mathbb{C}$, the boundary integrals vanish exponentially, so the right hand side\nof~\\eqref{eq:balance2} reduces to $-i \\langle\\langle \\Upsilon^{\\text{in}}_{\\omega_n}, \\Upsilon^{\\text{up}}_{\\omega_n} \\rangle\\rangle$. \n\\end{proof}\n\nThe desired equivalence between \\eqref{eq:qnm exc} and \\eqref{eq:excitation coeff} can be seen immediately by\nsubstituting \\eqref{eq:I source modes} for ${}_s I_{\\ell m n}$, comparing with \\eqref{eq:Kinnersley-bilinear}, and applying lemma~\\ref{lemma:Wronskian-derivative}.\n\n\\section{Concluding remarks}\n\nWe end this paper with some potential applications and alternatives to our formalism. \n\nThe main motivation of this work is to provide some tools needed to study the black hole ringdown beyond linear order in perturbation theory. Higher orders are already needed to interpret high-precision numerical relativity simulations of binary mergers~\\cite{Mitman:2022qdl,Cheung:2022rbm}, and could be needed to analyse gravitational wave observations by future detectors. \nRoughly speaking, we wish to make an ansatz \nfor the solution of the non-linear system as a linear combination of quasinormal modes with \\emph{time dependent} excitation coefficients similar to \\eqref{excited}. The idea is that the bilinear form will help us writing down a dynamical system for these coefficients by analogy with wave equations on compact spaces, where the normal modes would be used instead to compute the overlap integrals required for terms in this dynamical system that are non-linear in the modes.\n\nAs extremality is approached, it is well known that a family of quasinormal modes becomes arbitrarily long-lived, with\n$\\Re \\omega \\approx m\\Omega_H$\n\\cite{PressTeukolsky1973,Detweiler1977,Leaver1985,Hod:2008zz,Yang:2012pj,Cook:2014cta}. With\na commensurate frequency spectrum and arbitrarily slow decay, these\nmodes have been conjectured to become \\emph{turbulent} as\n$a\\to M$~\\cite{Yang:2014tla}. By taking the extremal limit of the nonlinear excitation coefficients using the approach detailed above, we hope to establish (or rule out) the emergence of turbolent behavior. \n\nApplications along similar lines could include mode\nmixing in clouds of ultralight scalar fields that could form outside\nKerr black holes~\\cite{Arvanitaki:2010sy}. Here, once these clouds grow via\nthe superradiant instability, nonlinear interactions between the modes have been conjectured to give rise to a coherent\nemission of gravitational waves or a bosenova~\\cite{Yoshino:2013ofa}, see~\\cite{Baumann:2018vus,Baumann:2022pkl} for recent proposals based on heuristic methods. It would be interesting to see whether our methods could be used to conceptualize or shed more light on the theoretical basis of such proposals. \n\nIn \\cite{gajic2021quasinormal}, a different approach is taken to quasinormal modes. Their essential idea is to consider, instead of a time $t$\nCauchy surface intersecting the bifurcation surface and spatial infinity, a ``hyperboloidal'' slice intersecting the future event horizon and future null infinity. On such a slice, they define a certain space of ``almost analytic'' functions (a ``Gevrey space'') encoding somehow the ``boundary conditions'' \\eqref{eq:R bcs}. Their space is in fact a genuine Hilbert space, and the time evolution is represented on this space by a semigroup whose generator is essentially the Hamiltonian, $\\mathcal{H}$ (see appendix \\ref{sec:Lagrangian-Hamiltonian}). Their inner product is non-canonical -- and is not conserved -- and the generator of the semi-group is correspondingly not symmetric, as is also not physically expected due to the ``dissipative'' nature of quasinormal modes. Nevertheless, their analysis shows that quasinormal modes are genuine eigenfunctions $\\mathcal{H} \\Upsilon = i\\omega \\Upsilon$ in this space -- crucially, by contrast to their restriction to a constant $t$ surface, they do not blow up on the hyperboloidal slice as the horizon or scri are approached. \nWhile the quasinormal modes are not orthogonal in their inner product, the definition of our bilinear form with a hyperboloidal slice is also clearly possible and it would be interesting to see whether quasinormal modes, as defined in the \nsetting of \\cite{gajic2021quasinormal} (see also~\\cite{Zenginoglu:2011jz,PanossoMacedo:2019npm,Ripley:2022ypi}) are still orthogonal in this bilinear form, as we conjecture. This would provide an alternative to our regularization procedure involving complex contours. \n\nFinally, it will be interesting to explore the relation between our bilinear form and the adjoint-spheroidal functions introduced in Ref.~\\cite{London:2020uva}.\n\n\n\\medskip\n{\\bf Acknowledgements:} We thank M. Casals, E. Flanagan, D. Gajic and E. Flanagan for comments and discussions related to this work. SH thanks the Max-Planck Society for supporting the collaboration between MPI-MiS and Leipzig U., Grant Proj. Bez. M.FE.A.MATN0003. VT is grateful to the International Max Planck Research School, MPI-MiS for support through a studentship. This work makes use of the Black Hole Perturbation Toolkit.\n\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction and Result}\nLet $\\Omega \\subset \\mathbb{R}^n$ be a bounded domain with $C^1-$boundary. The normal vectors $n(x), n(y)$ of two elements of the boundary, $x,y \\in \\partial \\Omega$,will point roughly in the same direction which is roughly orthogonal to $y-x$ if $x$ and $y$ are close. In regions of\nlarge curvature the normal vector changes quickly but convex domains whose boundary has regions with large curvature are `flatter' in\nother regions and it might all average out in the end. We prove a quantitative version of this notion.\n\n\\begin{thm} There exists $c_n > 0$ so that for any bounded $\\Omega \\subset \\mathbb{R}^n$ with $C^1-$boundary\n$$ \\int_{\\partial \\Omega \\times \\partial \\Omega} \\frac{\\left|\\left\\langle n(x), y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|x -y\\|^{n+1}}~d \\sigma(x) d\\sigma(y) \\geq c_n |\\partial \\Omega|$$\nwith equality if and only if the domain $\\Omega$ is convex.\n\\end{thm}\nIntegration is carried out with respect to the $(n-1)-$dimensional Hausdorff measure\nand the size of the boundary $ |\\partial \\Omega|$ is measured the same way. Somewhat to our surprise,\nwe were unable to find this statement in the literature. It can be interpreted as a global conservation law\nfor convex domains or as a geometric functional with an extremely large set of minimizers (all convex domains). The requirement of the boundary $\\partial \\Omega$ being $C^1$\ncan presumably be somewhat relaxed.\n\n\\begin{figure}[h!]\n\\begin{center}\n\\begin{tikzpicture}[scale=0.8]\n\\draw[thick] (0,0) ellipse (2.5cm and 1cm);\n\\filldraw (-2,0.6) circle (0.06cm);\n\\filldraw (2.37,0.3) circle (0.06cm);\n\\draw (-2, 0.6) -- (2.37, 0.3);\n\\draw [thick, ->] (-2, 0.6) -- (-2.28, 1.05);\n\\draw [thick, ->] (2.37, 0.3) -- (2.8, 0.55);\n\\node at (-1.9, 0.3) {$x$};\n\\node at (2.15, 0.05) {$y$};\n\\node at (-2.9, 1) {$n(x)$};\n\\node at (3.4, 0.5) {$n(y)$};\n\\end{tikzpicture}\n\\end{center}\n\\caption{If $x$ and $y$ are close, then $n(x)$ and $n(y)$ are nearly orthogonal to $x-y$ unless $x$ and $y$ are far apart.}\n\\end{figure}\n\n If $\\Omega$ is the unit ball in $\\mathbb{R}^n$, then\n$\\partial \\Omega = \\mathbb{S}^{n-1}$ and for any $x, y \\in \\mathbb{S}^{n-1}$, we have $n(x) = x$ and\n$ \\|x-y\\|^2 = 2 - 2\\left\\langle x, y \\right\\rangle$. This simplifies the expression since\n$$ \\left|\\left\\langle n(x), y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| = (1 - \\left\\langle x,y \\right\\rangle)^2.$$\n Moreover, using rotational symmetry and\n $w = (1,0,0,\\dots,0)$ for the north pole,\n\\begin{align*}\n\\int_{\\mathbb{S}^{n-1} \\times \\mathbb{S}^{n-1}} \\frac{ (1 - \\left\\langle x, y \\right\\rangle)^2 }{\\|y - x\\|^{n+1}}~d \\sigma(x) d\\sigma(y) &=\n \\int_{\\mathbb{S}^{n-1} \\times \\mathbb{S}^{n-1}} \\frac{ (1 - \\left\\langle x, y \\right\\rangle)^2 }{(2-2\\left\\langle x, y\\right\\rangle)^{\\frac{n+1}{2}}}~d \\sigma(x) d\\sigma(y) \\\\\n &= \\frac{|\\mathbb{S}^{n-1}|}{2^{\\frac{n+1}{2}}} \\int_{\\mathbb{S}^{n-1} } (1-\\left\\langle x, w\\right\\rangle)^{-\\frac{n-3}{2}}~d \\sigma(x) \n\\end{align*}\nwhich implies\n\\begin{align*}\n c_n = \\frac{1}{2^{\\frac{n+1}{2}}} \\int_{\\mathbb{S}^{n-1} } (1-x_1)^{-\\frac{n-3}{2}}~d \\sigma(x)= \\frac{1}{2} \\int_{\\mathbb{S}^{n-1}} \\left| x_1 \\right| d\\sigma(x).\n \\end{align*}\nThe constant has a simple form in low dimensions where $c_2 = 2$ and $c_3 = \\pi$. \nOur proof can be best described as an application of Integral Geometry; we formulate and use a bilinear version of the Crofton formula. \nThe proof tells us a little bit more: if the domain $\\Omega$ is convex, stronger statements can be made.\n\\begin{corollary} For any convex, bounded $\\Omega \\subset \\mathbb{R}^n$ with $C^1-$boundary and all $x \\in \\partial \\Omega$\n$$ \\int_{\\partial \\Omega} \\frac{\\left|\\left\\langle n(x), y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|x -y\\|^{n+1}}~d\\sigma(y) = c_n.$$\nIf $x \\in \\Omega \\setminus \\partial \\Omega$ and $w \\in \\mathbb{S}^{n-1}$ is an arbitrary unit vector, then\n$$ \\int_{\\partial \\Omega} \\frac{\\left|\\left\\langle w, y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|x -y\\|^{n+1}}~d\\sigma(y) = 2 \\cdot c_n.$$\n\\end{corollary}\nWe note that $c_n$ is the exact same constant as above (which can be seen by integrating the first equation over $\\partial \\Omega$ with respect to $d \\sigma(x)$). Some of the conditions can presumably be relaxed a little. The Crofton formula is known to hold in a very general setting (see Santal\\'o \\cite{santa2}). It is an interesting question whether any of these results could be generalized to more abstract settings.\n\n\\section{Proof of the Theorem}\n The Crofton formula in $\\mathbb{R}^n$ (see, for example, Santal\\'o \\cite{santa}) states that for rectifiable $S$ of co-dimension 1 one has\n$$ |S| = \\alpha_n \\int_{L} n_{\\ell}(S) d\\mu(\\ell),$$\nwhere the integral runs over the space of all oriented lines in $\\mathbb{R}^n$ with respect to the kinematic measure $\\mu$ (which is invariant under all\nrigid motions of $\\mathbb{R}^n$) and $n_{\\ell}(S)$ is the number of times the line $\\ell$ intersects the surface $S$. The constant\n$\\alpha_n$ can be computed by picking $S = \\mathbb{S}^{n-1}$ but will not be needed for our argument.\n\n\n\\begin{lemma}\nLet $\\Omega \\subset \\mathbb{R}^n$ be a bounded domain with $C^1-$boundary. Almost all lines $\\ell$ (with respect to the kinematic measure) intersect the boundary $\\partial \\Omega$ either never or in exactly two points if and only if $\\Omega$ is convex. \\end{lemma}\n\\begin{proof}\nIf $\\Omega$ is convex, the result is immediate. Suppose now $\\Omega$ is not convex; then there exists a boundary point $x \\in \\partial \\Omega$\nsuch that the supporting hyperplane does not contain all of the domain on one side (note that because the boundary of $\\Omega$ is $C^1$, the\nsupporting hyperplane is unique).\nIn particular, there exists $y \\in \\Omega$ that is on the other side of the supporting hyperplane. The line $\\ell$ that goes through $x$ and $y$ satisfies\n$n_{\\ell}(\\partial \\Omega) \\geq 4$, moreover, this is stable under some perturbations of the line (and thus a set of kinematic measure larger than 0) because $\\partial \\Omega$ is $C^1$. \\end{proof}\n\n\\begin{proof}[Proof of the Theorem]\nWe first note for almost all lines $\\ell$ (with respect to the kinematic measure) the number of intersections $n_{\\ell}(\\partial \\Omega)$ is either 0 or at least 2: if a line enters the domain, it also has to exit the domain (lines that are tangential to the boundary are a set of measure 0). This implies\n$$ |\\partial \\Omega| = \\alpha_n \\int_{L} n_{\\ell}(\\partial \\Omega) ~d \\mu(\\ell) \\leq \\frac{\\alpha_n}{2} \\int_{L} n_{\\ell}(\\partial \\Omega)^2 ~d \\mu(\\ell)$$\nwith equality if and only if $\\Omega$ is convex.\nAt this point we pick a small $\\varepsilon > 0$ and decompose the boundary\n$ \\partial \\Omega = \\bigcup_i \\partial \\Omega_i$ into small disjoint regions that have diameter $\\leq \\varepsilon \\ll 1$ (and $\\varepsilon$ will later tend to 0).\nNaturally,\n$$ n_{\\ell}(\\partial \\Omega)^2 = \\left[ n_{\\ell}\\left( \\bigcup_i \\partial \\Omega_i\\right) \\right]^2 = \\left[ \\sum_i n_{\\ell}\\left( \\partial \\Omega_i\\right) \\right]^2= \\sum_{i,j} n_{\\ell}( \\partial \\Omega_i) n_{\\ell}(\\partial \\Omega_j)$$\nWe will now evaluate the integral over such a product. The diagonal terms $i=j$ behave\na little bit differently than the non-diagonal terms and we start with those. As $\\varepsilon \\rightarrow 0$, the fact that the \nboundary is $C^1$ implies that a `random' line will hit any such infinitesimal segment at most once and thus the Crofton formula implies\n\\begin{align*}\n \\frac{\\alpha_n}{2} \\int_{L} \\sum_i n_{\\ell}( \\partial \\Omega_i)^2 ~d \\mu(\\ell) &= (1+o(1)) \\frac{\\alpha_n}{2} \\int_{L} \\sum_i n_{\\ell}( \\partial \\Omega_i) ~d \\mu(\\ell)\\\\ \n &= (1+o(1)) \\frac{\\alpha_n}{2} \\int_{L} n_{\\ell}( \\partial \\Omega) ~d \\mu(\\ell) = (1+o(1)) \\frac{|\\partial \\Omega|}{2}\n \\end{align*}\n which is nicely behaved as $\\varepsilon \\rightarrow 0$ (the error could be made quantitative in terms of the modulus of continuity of the normal vector). It remains to analyze the off-diagonal terms.\n Let us assume that $\\partial \\Omega_x \\subset \\partial \\Omega$ is a small\nsegment centered around $x \\in \\partial \\Omega$ and $\\partial \\Omega_y \\subset \\partial \\Omega$ is a small\nsegment centered around $y \\in \\partial \\Omega$ and that both are scaled to have surface area $0 < \\varepsilon \\ll 1$. We can\nalso assume, because the surface is $C^1$ and we are allowed to take $\\varepsilon$ arbitrarily small, that they are approximately given by hyperplanes (and, as above, the error is a lower order term coming from curvature). \nThe quantity to be evaluated, \n$$ \\int_{L} n_{\\ell}( \\partial \\Omega_x) n_{\\ell}(\\partial \\Omega_y) d\\mu(\\ell), \\qquad \\mbox{can be seen in probabilistic terms}$$\nas the likelihood that a `random' line (random as induced by the kinematic measure $\\mu$) intersects\nboth $\\partial \\Omega_x$ and $\\partial \\Omega_y$. Appealing to the law of total probability\n$$ \\mathbb{P}\\left( n_{\\ell}( \\partial \\Omega_x) n_{\\ell}(\\partial \\Omega_y) = 1\\right) = \\mathbb{P}( n_{\\ell}(\\partial \\Omega_y) = 1 \\big| n_{\\ell}( \\partial \\Omega_x) =1) \\cdot \\mathbb{P}( n_{\\ell}( \\partial \\Omega_x) =1).$$\nThe last quantity is easy to evaluate: by Crofton's formula\n$$ \\mathbb{P}( n_{\\ell}( \\partial \\Omega_x) =1) = \\frac{1}{\\alpha_n} | \\partial \\Omega_x| = \\frac{\\varepsilon}{\\alpha_n}.$$\nIt remains to compute the second term: the likelihood of a `random' line hitting $\\partial \\Omega_y$ provided that it has already hit $\\partial \\Omega_x$. For this purpose, we first consider what we can say about random lines that have hit $\\partial \\Omega_x$. The distribution of $\\partial \\Omega_x \\cap \\ell$, provided it is not empty, is, to leading order, uniformly distributed over $\\partial \\Omega_x$ because $\\partial \\Omega_x$ is, to leading order, part of a hyperplane and the kinematic measure is translation-invariant. In contrast, the direction $\\phi$ of intersection (identified with unit vectors on $\\mathbb{S}^{n-1}$) is not uniformly distributed: the likelihood is proportional to the size of the projection of $\\Omega_x$ in direction of $\\phi$ which is proportional to the inner product of $\\phi$ with the normal vector $n(x)$. Hence the probability distribution of the direction of intersection $\\phi$ of lines conditioned on hitting $\\partial \\Omega_x$ is given by\n$$ \\Psi(\\phi) = \\frac{2 \\left\\langle n(x), \\phi \\right\\rangle } {\\int_{\\mathbb{S}^{n-1}} \\left|\\left\\langle w, n(x)\\right\\rangle \\right| d\\sigma(w) }, $$\nwhere the factor $2$ comes from the fact that each line creates two directions of intersections. \nThis allows us to perform a change of measure: we may assume that the lines are oriented uniformly at random provided that we later weigh the end result by $\\Psi$.\nIf the lines are oriented in all directions uniformly, then it is easy to see the likelihood of hitting $\\partial \\Omega_y$ provided one has already hit $\\partial \\Omega_x$: it is simply proportional to the size of the projection of $\\partial \\Omega_y$ onto the sphere of radius $\\|x-y\\|$ centered at $x$. The projection shrinks\nthe area by a factor of $ \\left| \\left\\langle n(y), (x-y)\/\\|x-y\\| \\right\\rangle \\right|$. The relative likelihood is then proprtional to\n$$P= \\Psi\\left( \\frac{x-y}{\\|x-y\\|}\\right) \\left| \\left\\langle n(y), \\frac{x-y}{\\|x-y\\|} \\right\\rangle \\right| \\frac{\\varepsilon}{\\|x-y\\|^{n-1}}.$$\nPlugging in the definition of $\\Psi$ this simplifies to\n$$ P = 2\\left( \\int_{\\mathbb{S}^{n-1}} \\left|\\left\\langle w, n(x)\\right\\rangle\\right| d\\sigma(w)\\right)^{-1} \\frac{\\left| \\left\\langle n(x), x-y\\right\\rangle \\left\\langle x-y, n(y) \\right\\rangle \\right|}{\\|x-y\\|^{n+1}} \\varepsilon,$$\nwhere we note that, by rotational symmetry of the sphere, the first integral is actually independent of the direction in which $n(x)$ is pointing.\nAltogether,\n\\begin{align*}\n |\\partial \\Omega|& = \\alpha_n \\int_{L} n_{\\ell}(\\partial \\Omega) ~d \\mu(\\ell) \\leq \\frac{\\alpha_n}{2} \\int_{L} n_{\\ell}(\\partial \\Omega)^2 ~d \\mu(\\ell) \\\\\n &= \\frac{\\alpha_n}{2} \\int_{L} \\sum_i n_{\\ell}(\\partial \\Omega_i) ~d \\mu(\\ell) + \\frac{\\alpha_n}{2} \\int_{L} \\sum_{i \\neq j} n_{\\ell}(\\partial \\Omega_i) n_{\\ell}(\\partial \\Omega_j) ~d \\mu(\\ell).\n\\end{align*}\nThe inequality is an equation if and only if $\\Omega$ is convex. As already discussed above, the first term tends to $|\\Omega|\/2$ as $\\varepsilon \\rightarrow 0$. Thus, for arbitrary $a \\in \\mathbb{S}^{n-1}$,\n\\begin{align*}\n\\frac{ |\\partial \\Omega|}{2} &\\leq \\lim_{\\varepsilon \\rightarrow 0} \\frac{\\alpha_n}{2} \\int_{L} \\sum_{i \\neq j} n_{\\ell}(\\partial \\Omega_i) n_{\\ell}(\\partial \\Omega_j) ~d \\mu(\\ell) \\\\\n&=\\left( \\int_{\\mathbb{S}^{n-1}} \\left|\\left\\langle w, a\\right\\rangle\\right| d\\sigma(w)\\right)^{-1} \\int_{\\partial \\Omega \\times \\partial \\Omega} \\frac{\\left|\\left\\langle n(x), y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|y - x\\|^{n+1}}~d \\sigma(x) d\\sigma(y).\n\\end{align*}\nThis establishes the inequality with constant\n$$ c_n =\\frac{1}{2} \\int_{\\mathbb{S}^{n-1}} \\left|\\left\\langle w, n\\right\\rangle\\right| d\\sigma(w) = \\frac{1}{2} \\int_{\\mathbb{S}^{n-1}} \\left| w_1 \\right| d\\sigma(w).$$\n\\end{proof}\n\n\n\\section{Proof of the Corollary}\n\\begin{proof} The proof of the Corollary is using the same computation as the proof of the Theorem in two additional settings leading to the two identities. Let $\\Omega$ be convex and let $x \\in \\partial \\Omega$. We start by considering an infinitesimal hyperplane segment $\\partial \\Omega_x$ centered around $x$. By convexity of $\\Omega$, almost all lines intersecting $\\partial \\Omega_x$ will intersect $\\partial \\Omega$ in exactly one other point. This implies, as the size of $\\partial \\Omega_x$ tends to 0, that\n$$ \\int_{L} n_{\\ell}( \\partial \\Omega_x) n_{\\ell}(\\partial \\Omega \\setminus \\partial \\Omega_x) d\\mu(\\ell) = (1+o(1)) \\cdot \\mu(\\partial \\Omega_x).$$\nAt the same time, by Crofton's formula, the likelihood of a line hitting $\\partial \\Omega_x$ is only a function of the surface area of $\\partial \\Omega_x$ and independent of everything else. Finally, using linearity, we can decompose $\\partial \\Omega \\setminus \\partial \\Omega_x$ into small hyperplane segments and use the computation above to deduce that\n$$ \\int_{\\partial \\Omega} \\frac{\\left|\\left\\langle n(x), y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|x -y\\|^{n+1}}~d\\sigma(y) = \\mbox{const}.$$\nIntegrating once more and applying the Theorem immediately implies that the constant has to be $c_n$. As for the second part, we can consider an infinitesimal hyperplane segment $H_x$ centered at $x \\in \\Omega \\setminus \\partial \\Omega$ with normal direction given by $w \\in \\mathbb{S}^{n-1}$. Every line hitting $H_x$ intersects $\\partial \\Omega$ in exactly two points and thus\n$$ \\int_{L} n_{\\ell}( H_x) n_{\\ell}(\\partial \\Omega) d\\mu(\\ell) = 2 \\cdot \\mu(H_x).$$\nBy Crofton's formula, the right-hand side does not depend on the shape or location of $H_x$ and is only a function of the surface area of the infinitesimal segment. As for the left-hand side, using the computation done in the proof of the Theorem shows\n $$ \\int_{L} n_{\\ell}( H_x) n_{\\ell}(\\partial \\Omega) d\\mu(\\ell) = (1+o(1)) \\int_{\\partial \\Omega} \\frac{\\left|\\left\\langle w, y - x \\right\\rangle \\left\\langle y - x, n(y) \\right\\rangle \\right| }{\\|x -y\\|^{n+1}}~d\\sigma(y)$$\nwhere the error term is with respect to the diameter of $H_x$ shrinking to 0.\n\\end{proof}\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\nAcquiring comprehensive data from human speech is a challenging task\nthat, however, is crucial for understanding and modelling speech\nproduction as well as developing speech signal processing algorithms.\nThe possible approaches can be divided into \\emph{direct} and\n\\emph{indirect methods}. Direct methods concern measurements carried\nout on test subjects either by audio recordings, acquisition of\npressure, flow velocity, or even electrical signals (such as takes\nplace in electroglottography), or using different methods of medical\nimaging during speech. Indirect methods concern simulations using\ncomputational models (such as described in \\cite{A-A-M-M-V:MLBVFVTOPI}\nand the references therein) or measurements from \\emph{physical\n models}\\footnote{Physical models are understood as artefacts or\n replicas of parts of the speech anatomy in the context of this\n article.}. Typically, computational and physical models are created\nand evaluated based on data that has first been acquired by direct\nmethods. The main advantage of indirect methods is the absence of the\nhuman component that leads to experimental restrictions and unwanted\nvariation in data quality.\n\nThe purpose of this article is to describe an experimental\narrangement, its validation, and some experiments on one type of\nphysical model for vowel production: \\emph{acoustic resonators}\ncorresponding to vocal tract (VT) configurations during prolonged\nvowel utterance. The anatomic geometry for such resonators has been\nimaged by Magnetic Resonance Imaging (MRI) with simultaneous speech\nrecordings as described in\n\\cite{A-A-H-J-K-K-L-M-M-P-S-V:LSDASMRIS,K-M-O:PPSRDMRI}. The MRI voxel\ndata has been processed to surface models as explained in\n\\cite{O-M:ASUAMRIVTGE} and then printed in ABS plastic by Rapid\nPrototyping as explained below in Section~\\ref{ProcessingSubSec}. In\nitself, the idea of using 3D printed VT models in speech research is\nby no means new: see, e.g.,\n\\cite{T-K-E-S-W:EEIFPSKGFTC,E-S-W:NIMARVTDP,T-M-K:AAVTDVPFDTDM}.\n\nJust creating physical models of the VT is not enough for model\nexperiments: also a suitable acoustic signal source is required with\ncustom instrumentation and software associated to it. As these\nexperiments involve a niche area in speech research, directly\napplicable commercial solutions do not exists and constructing a\ncustom measurement suite looks an attractive option. Thus, we propose\nan \\emph{acoustic glottal source} design shown in\nFig.~\\ref{TractrixHornFig} that resembles the loudspeaker-horn\nconstructions shown in \\cite[Fig.~1]{T-K-E-S-W:EEIFPSKGFTC},\n\\cite[Fig.~3]{E-S-W:NIMARVTDP}, \\cite[Fig.~2a]{Wolfe:AIS:2000} . \n\nAll such source\/horn constructions can\nbe regarded as variants of \\emph{compression drivers} used as high\nimpedance sources for horn loudspeakers. Unfortunately, most\ncommercially available compression drivers are designed for\nfrequencies over $500 \\, \\mathrm{Hz}$ whereas a construction based on\na loudspeaker unit can easily be scaled down to lower frequencies\nrequired in speech research. We point out that high quality acoustic\nmeasurements on VT physical models can be carried out using a\nmeasurement arrangement not based an impedance matching horn or a\ncompression driver of some other kind; see\n\\cite[Fig.~3]{T-M-K:AAVTDVPFDTDM} where the sound pressure is fed into\nthe model through the mouth opening, and the measurements are carried\nout using a microphone at the vocal folds position. However, excitation\nfrom the glottal position is desirable because the face and the\nexterior space acoustics are issues as well.\n\n\n\n\\begin{figure}[t]\n\\begin{center}\n\\includegraphics[width=0.21\\textwidth]{GS_pic_9.jpg}\\hspace{0.31cm}\n\\includegraphics[width=0.43\\textwidth]{GS_pic_6.jpg}\\hspace{0.31cm}\n\\includegraphics[width=0.29\\textwidth]{dummyload.jpg}\n\\end{center}\n \\caption{\\label{TractrixHornFig} Left: Measurement arrangement for\n the frequency response of vowel [\\textipa{\\textscripta}] from a 3D\n printed VT geometry. Middle: The tractrix horn and the loudspeaker\n unit assembly separated. Right: The dummy load used for\n calibration measurements as explained in\n Section~\\ref{CalibrationSec}. }\n\\end{figure}\n\n\nThe general principle of operation of the acoustic glottal source is\nfairly simple. The source consists of a loudspeaker unit and an\nimpedance matching horn as shown disassembled in\nFig.~\\ref{TractrixHornFig} (middle panel). The purpose of the horn is\nto concentrate the acoustic power from the low-impedance loudspeaker\nto an opening of diameter $6\\, \\mathrm{mm}$, the high-impedance output\nof the source. There is, however, a number of conflicting design\nobjectives that need be taken into account in a satisfactory way. For\nexample, the instrument should not be impractically large, and it\nshould be usable for acoustic measurements of physical models of human\nVTs in the frequency range of interest, specified as $80\n\\ldots 7350 \\, \\mathrm{Hz}$ in this article. To achieve these goals\nin a meaningful manner, we use a design methodology involving\n\\textrm{(i)} heuristic reasoning based on mathematical acoustics,\ntogether with \\textrm{(ii)} numerical acoustics modelling of the main\ncomponents and their interactions. Numerical modelling of all details\nis not necessary for a successful outcome. Optimising the source\nperformance using only the method of trial and error and extensive\nlaboratory measurements would be overly time consuming as well.\n\nThe design and construction process was incremental, and it consisted\nof the following steps that were repeated when necessary:\n\\begin{enumerate}\n\\item[(i)] Choice of the acoustic design and the main components,\n based on general principles of acoustics, horn design, and\n feasibility,\n\\item[(ii)] Finite element (FEM) based modelling of the horn acoustics\n to check overall validity of the approach, to detect and then\n correct the expected problems in construction,\n\\item[(iii)] the construction of the horn and the loudspeaker\n assembly together with the required instrumentation,\n\\item[(iv)] a cycle of measurements and modifications, such as\n placement of acoustically soft material and silicone sealings in\n various parts based on, e.g., the FEM modelling,\n\\item[(v)] development of MATLAB software for producing properly\n weighted measurement signals for sweep experiments that compensate\n most of the remaining nonidealities, and\n\\item[(vi)] development of MATLAB software for reproducing the\n Liljencrants--Fant (LF) glottal waveform excitation at the glottal\n position of the physical models.\n\\end{enumerate}\n\n\nFinally, the source is used for measuring the frequency responses of\nphysical models of VT during the utterance of Finnish vowels\n[\\textipa{\\textscripta, i, u}], obtained from a 26-year-old male (in\nfact, one of the authors of this article).\nThe measured amplitude frequency responses are compared with the\nspectral envelope data from vowel samples shown in\nFig.~\\ref{VTResponseFig}, recorded in anechoic chamber from the same\ntest subject. In addition to these responses, vowel signal is produced\nby acoustically exciting the physical models by a glottal pulse\nwaveform of LF type, reconstructed at the output of the source. The\nproduced signals for vowels [\\textipa{\\textscripta, i, u}] have good\naudible resolution from each other, yet they have the distinct\n``robotic'' sound quality that is typical of most synthetically\nproduced speech.\n\nResonant frequencies extracted from the measured frequency responses\nare used for development and validation of acoustic and phonation\nmodels such as the one introduced in \\cite{A-A-M-M-V:MLBVFVTOPI}. The\nsynthetic vowel signals are intended for benchmarking Glottal Inverse\nFiltering (GIF) algorithms as was done in\n\\cite{Alku:EstVoiceSrc:2006,Alku:IFReview:2011}. Large amounts of\nmeasurement data are required for these applications which imposes\nrequirements to the measurement arrangement.\n\nSo as to physical dimension of the measured signals, this article\nrestricts to sound pressure measurements using microphones. If\nacoustic impedances are to be measured instead, some form of acoustic\n(perturbation) velocity measurement need be carried out. The velocity\nmeasurement can be carried out, e.g., by hot wire anemometers\n\\cite{Kob:MMV:2002}, impedance heads consisting of several microphones\n\\cite{Wolfe:EEI:2013}, or even by a single microphone using a\nresistive calibration load coupled to a high impedance source\n\\cite{Singh:AIM:1978}; see \\cite[Table~1]{Wolfe:IPM:2006} for various\napproaches. In general, carrying out velocity measurements is much\nmore difficult and expensive that measuring just sound pressure.\nDetermining pressure-to-pressure -responses of VT physical models is,\nhowever, sufficient for the purposes of this article since\n(\\textrm{i}) resonant frequencies can be determined from pressures,\nand (\\textrm{ii}) the GIF algorithm can be configured to run on\npressure data.\n\n\\section{\\label{BackgroundSec} Background}\n\nWe review relevant aspects from mathematical acoustics, horn design,\nsignal processing, and MRI data acquisition.\n\n\\subsection{Acoustic equations for horns}\n\nAcoustic horns are impedance matching devices that can be described as\nsurfaces of revolution in a three-dimensional space. Thus, they are\ndefined by strictly nonnegative continuous functions $r = R(x)$ where\n$x \\in [0, \\ell]$, $\\ell > 0$ being the length of the horn, and $r$\ndenoting the radius of horn at $x$. The end $x = 0$ ($x = \\ell$) is\nthe \\emph{input end} (respectively, the \\emph{output end}) of the\nhorn. It is typical, though not necessary, that the function\n$R(\\cdot)$ is either increasing or decreasing.\n\n\nThere exists a wide literature on the design of acoustic (tractrix)\nhorns for loudspeakers; see, e.g.,\n\\cite{Dinsdale:1974,Edgar:1981,Delgado:2000,U-W-B:OVMAH}. As a\ngeneral rule, the matching impedance at an end of the horn is\ninversely proportional to the opening area. For uniform diameter\nwaveguides, the matching impedance coincides with the characteristic\nimpedance given by $Z_0 = \\rho c\/A_0$ where $A_0$ is the\nintersectional area. The constant $c$ denotes the speed of sound and\n$\\rho$ is the density of the medium.\n\nTo describe the acoustics of an air column in a cavity such as a horn,\nwe use two (partial) differential equations. The three dimensional\nacoustics is described by the lossless Helmholtz equation in terms of\nthe velocity potential\n\\begin{equation} \\label{HelmHoltzEq}\n \\lambda^2 \\phi_\\lambda = c^2 \\Delta \\phi_\\lambda \\text{ on } \\Omega \\quad\n \\text{ and } \\quad \n \\frac{\\partial \\phi_\\lambda}{\\partial \\nu}({\\bf r}) = 0 \\text{ on }\n \\partial \\Omega \\setminus \\Gamma_0\n\\end{equation}\nwhere the acoustic domain is denoted by $\\Omega \\subset {\\mathbb{R}}^3$ with\nboundary $\\partial \\Omega$. A part of the boundary, denoted by\n$\\Gamma_0$, is singled out as an interface to the exterior space. In\nhorn designs of Section~\\ref{HelmholtzCavitySec}, the interface\n$\\Gamma_0$ is the opening at the narrow output end of the horn. In\nSection~\\ref{CompValSec}, the symbol $\\Gamma_0$ denotes a spherical\ninterface around the mouth opening. For now, we use the Dirichlet\nboundary condition on $\\Gamma_0$\n\\begin{equation} \\label{DirichletBndry}\n \\phi_\\lambda({\\bf r}) = 0 \\text{ on } \\Gamma_0.\n\\end{equation}\nEqs.~\\eqref{HelmHoltzEq}-- \\eqref{DirichletBndry} have a countably\ninfinite number of solutions $(\\lambda_j, \\phi_j) = (\\lambda,\n\\phi_\\lambda) \\in \\mathbb{C} \\times H^1(\\Omega) \\setminus \\{ 0\\}$ for $j = 1,\n2, \\ldots$, and each of the solutions is associated to a\n\\emph{Helmholtz resonant frequency} $f_j$ of $\\Omega$ by $f_j =\n\\mathrm{Im}{\\lambda_j}\/2 \\pi$.\n\nIn addition to acoustic resonances, the acoustic transmission\nimpedance of the source is important. Because it is more practical to\ndeal with scalar impedances, we use the lossless Webster's resonance\nmodel for defining it, again in terms of Webster's velocity\npotential. It is given for any $s \\in \\mathbb{C}$ by\n\\begin{equation} \\label{WebsterModel}\n\\begin{aligned}\n s^2 \\psi_s & = \\frac{c^2}{A(x)} \\frac{\\partial}{\\partial x} \\left (A(x) \\frac{\\partial \\psi_s}{\\partial x} \\right ) \\text{ on } [0,\\ell], \\\\\n - A(0) \\frac{\\partial \\psi_s}{\\partial x}(0) & = \\hat i(s), \\text{ and } R_L A(\\ell)\n \\frac{\\partial \\psi_s}{\\partial x}(\\ell) = \\rho s \\phi_s(\\ell)\n\\end{aligned}\n\\end{equation}\nwhere $A(x) = \\pi R(x)^2$ is the intersectional area of the horn,\n$\\rho$ is the density of air, and $R_L \\geq 0$ is the termination\nresistance at the output end $x = \\ell$ \\footnote{Because the external\n termination resistance $R_L$ is the only loss term in\n Eq.~\\eqref{WebsterModel}, we call the model lossless.}. Again, the\nfrequencies and Laplace transform domain $s$ variables are related by\n$f = \\mathrm{Im} s\/ 2\\pi$. The function $\\hat i(s)$ is the Laplace\ntransform of the (perturbation) volume velocity used to drive the\nhorn, and the output is similarly given as the Laplace transform of\nthe sound pressure given by $\\hat p(s) = \\rho s \\phi_s(\\ell)$. Now,\nthe transmission impedance of the horn, terminated to the resistance\n$R_L > 0$, is given by\n\\begin{equation} \\label{TransmissionImpedance}\n Z_{R_L}(s) = \\hat p(s)\/\\hat i(s) \\text{ for all } s \\in \\mathbb{C}_+.\n\\end{equation}\nNote that when solving Eq.~\\eqref{WebsterModel} for a fixed $s$, we may\nby linearity choose $\\hat i(s) = 1$ when plainly $Z_{R_L}(s) = \\rho s\n\\phi_s(\\ell)$. Further, as an impedance of a passive system, the\ntransmission impedance satisfies the positive real condition\n\\begin{equation}\n \\mathop{Re} {Z_{R_L}(s)} \\geq 0 \\text{ for all } s \\in \\mathbb{C}^{+} := \\{ s\n \\in \\mathbb{C}: \\mathop{Re}{s} > 0 \\}.\n\\end{equation}\n\n\\begin{figure}[t]\n\\begin{center}\n\\includegraphics[scale=0.35]{testTractrix.pdf} \\hspace{0.5cm}\n\\includegraphics[width=0.20\\textwidth]{tract_blend_3d.png} \\hspace{0.5cm}\n\\includegraphics[width=0.33\\textwidth]{tract_a_rend.png} \n\\end{center}\n \\caption{\\label{DesignOfSourceFig} Left: Wave propagation in a\n tractrix horn. The spherical wave front progressing along the\n centreline meets the horn surface at right angles. Middle: A 3D\n illustration of the impedance matching cavity within the\n source. Right: The geometry of the VT corresponding to vowel\n [\\textipa{\\textscripta}], equipped with a spherical boundary\n condition interface at the mouth opening.}\n\\end{figure}\n\n\n\\subsection{\\label{SupressionSubSec} Suppression of transversal modes in horns}\n\nBy transversal modes we refer to the resonant standing wave patterns\nin a horn where significant pressure variation is perpendicular to the\nhorn axis, as opposed to purely longitudinal modes. The purpose of\nthis section is to argue why transversal modes in horn geometries are\nundesirable from the point of view of the this article.\n\nAs a well-known special case, consider a wave\\-guide of length $\\ell$\nthat has a constant diameter, i.e., $A(x) = A_0$. Then the\ntransmission impedance given by\nEqs.~\\eqref{WebsterModel}--\\eqref{TransmissionImpedance} can be given\nthe explicit formula\n\\begin{equation} \\label{TransmissionLineImpedance}\n Z_{R_L}(s) = \\frac{Z_0 R_L}{Z_0 \\cosh{\\frac{s \\ell}{c}} + R_L \\sinh{\\frac{s \\ell}{c}}}\n\\end{equation}\nwhere $Z_0 := \\rho c\/A_0$ is called \\emph{characteristic\n impedance}. Because both $\\cosh$ and $\\sinh$ are entire functions,\nit is impossible to have $Z_{R_L}(s) = 0$ for any $s \\in \\mathbb{C}$. If the\ntermination resistance $R_L$ equals the characteristic impedance of\nthe wave\\-guide, the wave\\-guide becomes nonresonant, and we get the\npure delay $Z_{R_L}(s) = Z_0 e^{- s \\ell \/ c}$ of duration $T = \\ell \/\nc$ as expected.\n\nIt can be shown by analysing the Webster's model that the transmission\nimpedance $Z_{R_L}(s)$ given by Eq.~\\eqref{TransmissionImpedance} has\nno zeroes for $s \\in \\mathbb{C}$; i.e., it is an all-pole transmission impedance\nfor any finite value of termination resistance\n$R_L > 0$.\\footnote{This follows from Holmgren's uniqueness theorem\n for real analytic area functions $A(\\cdot)$.} The salient, desirable\nfeature of any all-pole impedance is that also the admittance\n$A_{R_L}(s) := Z_{R_L}(s)^{-1}$ is analytic and even\n$\\mathop{Re}{A_{R_L}(s)} > 0$ in $s \\in \\mathbb{C}^{+}$. This makes it easy to\nprecompensate the lack of flatness in the frequency response of\n$Z_{R_L}(s)$ by a causal, passive, rational filter whose transfer\nfunction approximates $A_{R_L}(s)$.\n\nOn the other hand, it has been shown in\n\\cite[Theorem~5.1]{L-M:PEEWEWP} that the time-dependent Webster's\nmodel describes accurately the transversal averages of a 3D wavefront\nin an acoustic wave\\-guide if the wavefront itself is constant on the\ntransversal sections of the wave\\-guide interior. Conversely,\nWebster's equation models only the longitudinal dynamics of the\nwave\\-guide acoustics by its very definition as can be understood\nfrom, e.g., \\cite{L-M:WECAD}. If the transversal modes in a\nwave\\-guide have been significantly excited, then Webster's equation\nbecomes a poor approximation, and all hopes of regarding the measured\ntransmission impedance of the wave\\-guide as an all-pole SISO system\nare lost. A more intuitive way of seeing why transversal acoustic\nmodes are expected to introduce zeroes to $Z_{R_L}(\\cdot)$ is by\nreasoning by analogy with Helmholtz resonators: the resonant side\nbranches of the wave\\-guide (eliciting transversal modes at desired\nfrequencies) can be used to eliminate frequencies from response.\n\nWe have now connected, via Webster's horn model, the appearance of\ntransversal modes in a horn to zeroes of the transmission impedance\n$Z_{R_L}(\\cdot)$. Because these zeroes are undesirable features in\ngood horn designs, we need to identify and suppress the transversal\nmodes as well as is feasible.\n\n\\subsection{Minimisation of transmission loss}\n\\label{TLMinSec}\n\nWhen a horn is excited from its input end, some of the excitation\nenergy is reflected back to the source with some delays. For horns of\nfinite length $\\ell$, there are two kinds of backward\nreflections. Firstly, the geometry of the horn may cause distributed\nbackward reflections over the the length of the horn. Secondly, there\nmay be backward reflections at the output end of the horn, depending\non the acoustic impedance seen by the horn at the termination point $x\n= \\ell$ in Eq.~\\eqref{WebsterModel}. We next consider only the\nbackward reflections of the first kind since only they can be affected\nby the horn design.\n\nBecause the acoustics of the horn described by\nEqs.~\\eqref{HelmHoltzEq}---\\eqref{WebsterModel} is internally\nlossless, minimising the TL amounts to minimising the backward\nreflections that take place inside the horn. This is a classical shape\noptimisation problem in designing acoustical horns, and modern\napproaches are based on numerical topology optimisation techniques as\npresented in, e.g., \\cite{U-W-B:OVMAH,Y-W-B:LOTSHMIFFDPAH} where also\nother design objectives (typical of loudspeaker horn design) are\ntypically taken into account.\n\nWe take another approach, and use analytic geometry and physical\nsimplifications of wave propagation for designing the function $r =\nR(x)$ on $[0, \\ell]$ following Paul~G.~A.~H.~Voigt who proposed a\nfamily of tractrix horns in his patent ``Improvements in Horns for\nAcoustic Instruments'' in 1926, see \\cite{PV:IHAI}. His invention was\nto use the surface of revolution of tractrix curve given by\n\\begin{equation} \\label{TractrixEq}\n x = a \\ln{\\frac{a + \\sqrt{a^2 - r^2}}{r}} - \\sqrt{a^2 - r^2}, \\quad r \\in [0, a]\n\\end{equation}\nwhere $a > 0$ is a parameter specifying the radius of the wide (input)\nend. Obviously, Eq.~\\eqref{TractrixEq} defines a decreasing function\n$x \\mapsto R(x) = r$ mapping $R:[0,\\infty) \\to (0,a]$ with $R(0) = a$\nand $\\lim_{x \\to \\infty}{R(x)} = 0$ which defines the \\emph{tractrix\n horn}. The required finite length $\\ell > 0$ of the horn is solved\nfrom $R(\\ell) = b$ where $0 < b < a$ is the required radius of the\n(narrow) output end.\n\nThe tractrix horn is known as the \\emph{pseudosphere} of constant\nnegative Gaussian curvature in differential geometry. That it acts as\na spherical wave horn is based on Huyghens principle and a geometric\nproperty of Eq.~\\eqref{TractrixEq}. More precisely, it can be seen\nfrom Fig.~\\ref{TractrixHornFig} (left panel) that a spherical wave\nfront of curvature radius $a$, propagating along the centreline of the\nhorn, meets the tractrix horn surfaces always in right\nangles. Disregarding, e.g., the viscosity effects in the boundary\nlayer at the horn surface, the right angle property is expected to\nproduce minimal backward reflections for spherical waves similarly as\na planar wavefront would behave in a constant diameter wave\\-guide far\naway from wave\\-guide walls.\n\n\\subsection{\\label{DeConvSec} Regularised deconvolution}\n\nA desired sound waveform target pattern will be reconstructed at the\nsource output by compensating the source dynamics in\nSection~\\ref{ImpulseSubSec}. Our approach is based on the idea of\n\\emph{constrained least squares filtering} used in digital image\nprocessing \\cite{Hunt:DLS:1972,Phillips:TNS:1962}.\n\nSuppose that a linear, time-invariant system has the real-valued\nimpulse response $h(t) = h_0(t) + h_e(t)$ that is expected to contain\nsome measurement error $h_e(t)$. When the input signal $u = u(t)$ is\nfed to the system, the measured output is obtained from\n\\begin{equation} \\label{ConvolutionEq}\ny(t) = (h_0*u)(t) + v(t) \\quad \\text{ with } \\quad v = h_e * u + w\n\\quad \\text{ for } \\quad t \\in [0, T].\n\\end{equation}\nAs usual, the convolution is defined by $(h_0*u)(t) = \\int_{-\\infty}^t\n{h_0(t - \\tau) u(\\tau) \\, d \\tau}$, and our task is to estimate $u$\nfrom Eq.~\\eqref{ConvolutionEq} given $y$ and some incomplete\ninformation about the output noise $v$. We assume $u, v \\in L^2(0,T)$\nand that $h_0$ is a continuous function. We define the noise level\nparameter by $\\epsilon = \\Vert v \\Vert_{L^2(0,T)} \/ \\Vert y \\Vert_{L^2(0,T)}$ and require that $0\n< \\epsilon < 1$ holds.\n\nUnfortunately, Eq.~\\eqref{ConvolutionEq} is not typically solvable for\nsmooth $y$ since the noise $v$ is not generally even continuous\nwhereas the convolution operator $h_0*$ is smoothing. Instead of\nsolving Eq.~\\eqref{ConvolutionEq}, we solve an estimate $\\v u$ for $u$\nfrom the regularised version of Eq.~\\eqref{ConvolutionEq}, given for\n$y \\in L^2(0,T)$ by\n\\begin{equation} \\label{RegularisedEq}\n \\begin{aligned}\n \\mathrm{Arg \\, min} & \\left ( \\kappa \\Vert \\v u \\Vert_{L^2(0,T)}^2 + \\Vert\n \\v u'' \\Vert_{L^2(0,T)}^2 \\right ) \\\\\n & \\text{ with the constraint } \\Vert\n y - h_0* \\v u \\Vert_{L^2(0,T)} = \\epsilon \\Vert y \\Vert_{L^2(0,T)}.\n \\end{aligned}\n\\end{equation}\nHere $T> 0$ is the sample length, $\\kappa > 0$ is a regularisation\nparameter, and $\\epsilon$ is the noise level introduced above in the\nview of $v$ in Eq.~\\eqref{ConvolutionEq}. Obviously, it is not\ngenerally possible to choose $\\epsilon = 0$ in\nEq.~\\eqref{RegularisedEq} without rendering $y = h_0* \\v u$\ninsolvable in $L^2(0,T)$.\n\nUsing Lagrange multipliers, the Lagrangian function takes the form\n\\begin{equation*}\n L_\\epsilon(\\v u, \\mu) = \\kappa \\Vert \\v u \\Vert_{L^2(0,T)}^2 \n + \\Vert \\v u'' \\Vert^2_{L^2(0,T)} \n -\\mu \\left (\\Vert y - h_0* \\v u \\Vert_{L^2(0,T)}^2 - \\epsilon^2 \\Vert y \\Vert^2_{L^2(0,T)} \\right ).\n\\end{equation*}\nUsing the variation $\\tilde{u}_\\eta = \\v u+\\eta w$ with $\\eta\n\\in {\\mathbb{R}}$, we get\n\\begin{equation*}\n\\begin{aligned}\n & \\frac{d}{d\\eta} L_\\epsilon (\\tilde{u}_{\\eta}, \\mu)\n \\bigg|_{\\eta=0} \\\\\n= & 2\\mathrm{Re} \\left( \\kappa \\langle w , \\v u \\rangle_{L^2(0,T)} + \\langle w'' , \\v u'' \\rangle_{L^2(0,T)}\n -\\mu \\langle h_0* w, y-h_0 *\\v u \\rangle_{L^2(0,T)}\n \\right) = 0\\,\n\\end{aligned}\n\\end{equation*}\nfor all test functions $w \\in \\mathcal{D}([0,T])$. Thus\n\\begin{equation*}\n \\kappa \\langle w, \\v u \\rangle + \\langle w'', \\v u'' \\rangle - \\mu \\langle h_0*w, y-h_0*\\v u \\rangle=0\n\\end{equation*}\nwhich, after partial integration and adjoining the convolution\noperator $h_0*$, gives\n\\begin{equation*}\n\\kappa \\v u + \\v u^{(4)} -\\mu (h_0*)^* \\left( y- h_0*\\v u \\right)\n = 0,\n\\end{equation*}\nleading to the normal equation\n\\begin{equation} \\label{NormalEq}\n\\v u = \\left[\\gamma \\left ( \\kappa + \\frac{d^4}{dt^4} \\right) +\n (h_0*)^*(h_0*) \\right]^{-1}(h_0*)^*y\n\\end{equation}\ntogether with the constraint $\\Vert y - h_0* \\v u \\Vert_{L^2(0,T)} =\n\\epsilon \\Vert y \\Vert_{L^2(0,T)}$ where $\\gamma= \\gamma(y,\\epsilon) \\in {\\mathbb{R}}$ satisfies $\\gamma\n= 1\/ \\mu$ (a constant independent of $t$). By a direct computation\nusing commutativity, we get for the residual\n\\begin{equation} \\label{ResidualEq}\n v_{\\kappa, \\mu} = y - h_0 * \\v u = \\left (\\kappa + \\frac{d^4}{dt^4}\n + \\mu (h_0*)^*(h_0*) \\right )^{-1} \\left (\\kappa y + y^{(4)} \\right ).\n\\end{equation}\nBecause $\\gamma, \\kappa > 0$, the inverses in\nEqs.~\\eqref{NormalEq}--\\eqref{ResidualEq} exist by positivity of the\noperators.\n\nSo, the possible noise components $v$ in Eq.~\\eqref{ConvolutionEq},\nconsistent with Eq.~\\eqref{NormalEq}, are the two parameter family $v\n= v_{\\kappa, \\mu}$ given in Eq.~\\eqref{ResidualEq} where $\\kappa, \\mu\n> 0$. For each $\\kappa$, we have\n\\begin{equation*}\n \\Vert v_{\\kappa, 0} \\Vert_{L^2(0,T)} \n = \\Vert y \\Vert_{L^2(0,T)} \n \\text{ and } \\lim_{\\mu \\to \\infty} { \\Vert v_{\\kappa, \\mu} \\Vert_{L^2(0,T)}} = 0.\n\\end{equation*}\nBy continuity and the inequality $0 < \\epsilon < 1$, there exists a\n$\\mu_0 = \\mu_0(\\epsilon, \\kappa)$ such that $\\Vert v_{\\kappa, \\mu_0}\n\\Vert_{L^2(0,T)} = \\epsilon \\Vert y \\Vert_{L^2(0,T)}$ as required. We\nconclude that $\\v u$ given by Eq.~\\eqref{NormalEq} with $\\gamma =\n1\/\\mu_0$ is a solution of the optimisation problem\n\\eqref{RegularisedEq}, and, hence, the regularised solution of\nEq.~\\eqref{ConvolutionEq} depending on parameters $\\epsilon, \\kappa >\n0$. In practice, the values of these regularising parameters must be\nchosen based on the original problem data $y$ and $v$.\n\nIn frequency plane, Eqs.~\\eqref{NormalEq}--\\eqref{ResidualEq} take the\nform\n\\begin{equation*}\n \\hat u(\\xi) = \\frac{\\overline{H(i \\xi)} \\hat y (\\xi)}\n {\\gamma \\left ( \\kappa + \\xi^4 \\right) + \\abs{H(i \\xi)}^2} \n\\end{equation*}\nwhere $H(s) = \\int_0^\\infty {e^{-st}h_0(t) \\, dt} $ is the transfer\nfunction corresponding to $h_0(t)$ and\n\\begin{equation}\\label{remainderEq}\n \\hat v_{\\kappa,\\mu}(\\xi) = G_{\\kappa,\\mu}(i \\xi) \\hat y (\\xi) \\quad \\text{ where } \\quad\n G_{\\kappa,\\mu}(s) = \\left (1 + \\frac{\\mu \\abs{H(s)}^2}{ \\kappa + s^4} \\right )^{-1}.\n\\end{equation}\nNote that $\\abs{G_{\\kappa,\\mu}(i \\xi)} < 1$, and the last equation\nindicates that the high frequency components of $y$ and\n$v_{\\kappa,\\mu}$ are essentially identical. By Parseval's identity,\nthe value of $\\gamma = 1\/\\mu_0$ is solved from $\\frac{1}{2\n \\pi}\\int_{-\\infty}^\\infty {\\abs{\\hat v_{\\kappa,\\mu}(\\xi)}^2\n \\, d \\xi } = \\epsilon^2 \\Vert y \\Vert^2_{L^2(0,T)} $.\n\n\n\n\\subsection{\\label{ProcessingSubSec} Processing of VT anatomic data and sound}\n\nThree-dimensional anatomic data of the VT is used for\ncomputational validations of the sound source as well as for carrying\nout measurements using physical models.\n\nVT anatomic geometries were obtained from a (then)\n26-year-old male (in fact, one of the authors of this article) using\n3D MRI during the utterance of Finnish vowels [\\textipa{\\textscripta,\n i, u}] as explained in \\cite{A-A-H-J-K-K-L-M-M-P-S-V:LSDASMRIS}. A\nspeech sample was recorded during the MRI, and it was processed for\nformant analysis by the algorithm described in \\cite{K-M-O:PPSRDMRI}.\nThe formant extraction for Section~\\ref{CompValSec} was carried out\nusing Praat \\cite{Praat:2016}. Three of the MR images\ncorresponding to Finnish quantal vowels [\\textipa{\\textscripta, i, u}]\nwere processed into 3D surface models (i.e., STL files) as explained\nin \\cite{O-M:ASUAMRIVTGE}. A spherical boundary condition interface\nwas attached at the mouth opening for the geometry corresponding\n[\\textipa{\\textscripta}] for producing the computational geometries\nshown in Fig.~\\ref{CoupledSystemRes}.\n\n\\begin{figure}[t]\n\\begin{center}\n\\includegraphics[width=0.28\\textwidth]{a_print.jpg}\\hspace{0.2cm}\n\\includegraphics[width=0.277\\textwidth]{i_print.jpg}\\hspace{0.2cm}\n\\includegraphics[width=0.345\\textwidth]{u_print.jpg} \\hspace{0.2cm}\n\\end{center}\n \\caption{\\label{VTPrints} Physical VT models of\n articulation geometries corresponding to [\\textipa{\\textscripta,\n i, u}]. Adaptor sleeves have been glued to the glottis ends\n for coupling to the sound source.}\n\\end{figure}\n\nStratasys uPrint SE Plus 3D printer was used to produce physical\nmodels in ABS plastic from the STL files, shown in\nFig.~\\ref{VTPrints}. The printed models are in natural scale with\nwall thickness $2 \\, \\mathrm{mm}$, they extend from the glottal\nposition to the lips, and they were equipped with an adapter (visible\nin Fig.~\\ref{VTPrints}) for coupling them to a acoustic sound source\nshown in Fig.~\\ref{TractrixHornFig} (left panel).\n\n\\section{Design and construction}\n\nBased on the considerations of Section~\\ref{BackgroundSec}, we\nconclude that the following three design objectives are desirable for\nachieving a successful design:\n\\begin{enumerate}\n\\item[(i)] \\label{Req1} The transmission loss (henceforth, TL) from\n the the input to output should be as low as possible.\n\\item[(ii)] \\label{Req2} There should be no strong transversal\n resonant modes inside the impedance matching cavity of the device.\n\\item[(iii)] \\label{Req3} The frequency response\n $\\omega \\mapsto \\abs{Z_{R_L}(i \\omega)}$ of the transmission\n impedance should be as flat as possible for relevant termination\n resistances.\n\\end{enumerate}\nIt is difficult --- if not impossible --- to optimise all these\ncharacteristics in the same device. Fortunately, DSP techniques can\nbe used to cancel out some undesirable features, and instead of\nrequirement \\textrm{(iii)} it is more practical to pursue a more\nmodest goal:\n\\begin{enumerate}\n\\item[(iii')] \\label{Req3weaker} The frequency response $\\omega \\mapsto\n \\abs{Z_{R_L}(i \\omega)}$ should be such that its lack of flatness can be\n accurately precompensated by causal, rational filters.\n\\end{enumerate}\nWe next discuss each of these design objectives and their solutions in\nthe light of Section~\\ref{BackgroundSec}.\n\nThe tractrix horn geometry was chosen so as to minimise the TL as\nexplained in Section~\\ref{TLMinSec}. In the design proposed in this\narticle, we use $a = 50.0 \\, \\mathrm{mm}$, $b = 2.2 \\, \\mathrm{mm}$,\nand $\\ell = 153.0 \\, \\mathrm{mm}$ as nominal values in\nEq.~\\eqref{TractrixEq}. The physical size was decided based on reasons\nof practicality and the availability of suitable loudspeaker units.\n\n\nContrary to horn loudspeakers or gramophone horns having essentially\npoint sources at the narrow input end of the horn, the sound source is\nnow located at the wide end of the horn. Hence, it would be desirable\nto generate the acoustic field by a spherical surface source of\ncurvature radius $a$ whose centrepoint lies at the centre of the\nopening of the wider input end. This goal is impossible to precisely\nattain using commonly available loudspeaker units, but a reasonable\noutcome can be obtained just by placing the loudspeaker (with a\nconical diaphragm) at an optimal distance from the tractrix horn as\nshown in Fig.~\\ref{DesignOfSourceFig} (middle panel). This results in\na design where the \\emph{impedance matching cavity} of the source is a\nhorn as well, consisting of the tractrix horn that has been extended\nat its wide end by a cylinder of diameter $2 a = 100.0 \\, \\mathrm{mm}$\nand height $h = 20.0 \\, \\mathrm{mm}$. Thus, the total longitudinal\ndimension of the impedance matching cavity inside the sound source is\n$\\ell_{tot} = \\ell + h = 173.0 \\, \\mathrm{mm}$ as shown in\nFig.~\\ref{DesignOfSourceFig} (middle panel). This dimension\ncorresponds to the quarter wavelength resonant frequency at $f_{low} =\n1648 \\, \\mathrm{Hz}$, obtained by solving the eigenvalue problem\nEq.~\\eqref{HelmHoltzEq} by finite element method (FEM) shown in\nFig.~\\ref{HornSystemRes} (left panel). For frequencies much under\n$f_{low}$, the impedance matching cavity need not be considered as a\nwave\\-guide but just as a delay line.\n\nSince the geometry of impedance matching cavity has already been\nspecified, there is no \\emph{geometric} degrees of freedom left for\nimproving anything. Thus, it is unavoidable to relax design\nrequirement \\textrm{(iii)} in favour of the weaker requirement\n\\textrm{(iii')}. As discussed in Section~\\ref{SupressionSubSec},\nrequirement \\textrm{(iii')} can, however, be satisfactorily achieved\nif overly strong transversal modes of the impedance matching cavity\ncan be avoided, i.e., the design requirement \\textrm{(ii)} is\nsufficiently well met.\n\n\n\n\n\n\n\n\\subsection{\\label{HelmholtzCavitySec} Modal analysis of the impedance matching cavity}\n\nThe first step in treating transversal modes of the impedance matching\ncavity is to detect and classify them. Understanding the modal\nbehaviour helps the optimal placement of attenuating material. For\nthis purpose, the Helmholtz equation \\eqref{HelmHoltzEq} was solved by\nFEM in the geometry of the impedance matching cavity, producing\nresonances up to $8 \\, \\mathrm{kHz}$. Some of the modal pressure\ndistributions are shown in Fig.~\\ref{HornSystemRes}. As explained in\nSection~\\ref{CompValSec} below, also the acoustic resonances of the VT\ngeometry shown Fig.~\\ref{DesignOfSourceFig} (right panel) were\ncomputed in a similar manner, and their perturbations were evaluated\nwhen coupled to the impedance matching cavity as shown in\nFig.~\\ref{CoupledSystemRes}.\n\nThe triangulated surface mesh of the impedance matching cavity was\ncreated by generating a profile curve of the tractrix horn in MATLAB,\nfrom which a surface of revolution was created in Comsol where the\ncylindrical space and the loudspeaker profile were included.\nSimilarly, the surface mesh of the VT during phonation of the Finnish\nvowel \\textipa{[\\textscripta]} was extracted from MRI\ndata~\\cite{O-M:ASUAMRIVTGE}. This surface mesh was then attached to\nthe surface mesh of the spherical interface $\\Gamma{_0}$ shown in\nFig.~\\ref{DesignOfSourceFig} (right panel). For computations required\nin Section~\\ref{CompValSec}, the two surface meshes (i.e., the cavity\nand the VT) were joined together at the output end of the tractrix\nhorn and the glottis, respectively. Finally, tetrahedral volume meshes\nfor FEM computations were generated using GMSH~\\cite{gmsh} of all of\nthe three geometries with details given in Table~\\ref{MeshTable}.\n\n\\begin{table}[h]\n \\centerline{\n \\begin{tabular}{|l|c|c|c|c|}\n\\hline\n & tetrahedrons & d.o.f. \\\\\n\\hline\n\\textbf{Impedance matching cavity} & 71525 & 15246 \\\\\n\\textbf{Cavity joined with VT} & 175946 & 38020 \\\\\n\\textbf{VT} & 97847 & 21745 \\\\\n\\hline\n\\end{tabular}}\n\\caption{\\label{MeshTable} The number of tetrahedrons of the three FEM\n meshes used for resonance computations in\n Sections~\\ref{HelmholtzCavitySec}~and~\\ref{CompValSec}. The degrees\n of freedom indicates the size of resulting system of linear\n equations for eigenvalue computations. }\n\\end{table}\n\nThe Helmholtz equation \\eqref{HelmHoltzEq} with the Dirichlet boundary\ncondition \\eqref{DirichletBndry} at the output interface $\\Gamma_0$ is\nsolved by FEM using piecewise linear elements. In this case, the\nproblem reduces to a linear eigenvalue problem whose lowest\neigenvalues give the resonant frequencies and modal pressure\ndistributions of interest. Some of these are shown in\nFig.~\\ref{HornSystemRes}.\n\n\n\n\n\n\\begin{figure}[t]\n\\begin{center}\n\\includegraphics[width=0.22\\textwidth]{first_long.png}\\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{first_cylinder.png}\\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{first_uncertain.png} \\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{first_certain.png}\n\\end{center}\n \\caption{\\label{HornSystemRes} Pressure distributions of some\n resonance modes of the impedance matching cavity above the loudspeaker\n unit. The lowest mode is at $1648 \\, \\textrm{Hz}$, and it is\n purely longitudinal. The lowest transversal mode is at $1994 \\,\n \\textrm{Hz}$, and it is due to the cylindrical part joining the\n loudspeaker unit to the tractrix horn. At $4218 \\, \\textrm{Hz}$,\n a transversal mode appears where strong excitation exists between\n the cylindrical part and the horn. The lowest transversal mode\n that is solely due to the tractrix horn geometry is found at $5229\n \\, \\textrm{Hz}$.}\n\\end{figure}\n\n\n\n\n\nThe purely longitudinal acoustic modes were found at frequencies\n$1648 \\, \\mathrm{Hz}$, $2540 \\, \\mathrm{Hz}$, $3350 \\, \\mathrm{Hz}$,\n$3771 \\, \\mathrm{Hz}$, $4499 \\, \\mathrm{Hz}$, $5061 \\, \\mathrm{Hz}$,\n$5745 \\, \\mathrm{Hz}$, $6671 \\, \\mathrm{Hz}$, $7088 \\, \\mathrm{Hz}$,\n$7246 \\, \\mathrm{Hz}$, and $7737 \\, \\mathrm{Hz}$. All of these\nlongitudinal modes have multiplicity $1$. Transversal modes divide\ninto three classes: \\textrm{(i)} those where excitation is mainly in\nthe cylindrical part of the impedance matching cavity, \\textrm{(ii)}\nthose where the excitation is mainly in the tractrix horn, and\n\\textrm{(iii)} those where both parts of the impedance matching cavity\nare excited to equal extent. Resonances due to the cylindrical part\nappear at frequencies $1994 \\, \\mathrm{Hz}$, $3094 \\, \\mathrm{Hz}$,\n$4150 \\, \\mathrm{Hz}$, $6063 \\, \\mathrm{Hz}$, $6262 \\, \\mathrm{Hz}$,\n$6872 \\, \\mathrm{Hz}$, $6942 \\, \\mathrm{Hz}$, $7334 \\, \\mathrm{Hz}$,\nand $7865 \\, \\mathrm{Hz}$, and they all have multiplicity $2$ except\nthe resonance at $4150 \\, \\mathrm{Hz}$ that is simple. (Note that\nthere is a longitudinal resonance at $4150 \\, \\mathrm{Hz}$ as well.)\nThere are only four frequencies corresponding to the transversal modes\n(all with multiplicity $2$) in the tractrix horn: namely,\n$5229 \\, \\mathrm{Hz}$, $5697 \\, \\mathrm{Hz}$, $6764 \\, \\mathrm{Hz}$,\nand $6781 \\, \\mathrm{Hz}$. The peculiar mixed modes of the third kind\nwere observed at $4218 \\, \\mathrm{Hz} \\, (2)$,\nand $5200 \\, \\mathrm{Hz} \\, (3)$ where\nthe number in the parenthesis denotes the multiplicity.\n\nBased on these observations, the acoustic design of the impedance\nmatching cavity was deemed satisfactory as the transversal dynamics of\nthe tractrix horn shows up only above $5.2 \\, \\mathrm{kHz}$. The\nlower resonant frequencies of the wide end of the cavity are treated\nby placement of attenuating material as described in\nSection~\\ref{ConstructionDetailsSec}.\n\n\n\\subsection{\\label{ConstructionDetailsSec} Details of the construction}\n\n\nThe tractrix horn geometry was produced using the parametric Tractrix\nHorn Generator OpenSCAD script \\cite{TractrixGenerator:2014}. The horn\nwas 3D printed by Ultimaker Original in PLA plastic with wall thickness\nof $2 \\, \\mathrm{mm}$ and fill density of $100 $\\%. The inside surface\nof the print was coated by several layers of polyurethane lacquer,\nafter which it was polished. The horn was installed inside a cardboard\ntube, and the space between the horn and the tube was filled with\n$\\approx 1.2 \\, \\textrm{kg}$ of \\emph{plaster of Paris} in order to\nsuppress the resonant behaviour of the horn shell itself and to\nattenuate acoustic leakage through the horn walls.\n\nThe walls of the cylindrical part of the impedance matching cavity\nwere covered by felt in order to control the standing waves in the\ncylindrical part of the cavity. Acoustically soft material, i.e.,\npolyester fibre, was placed inside the source (partly including the\nvolume of the tractrix horn) by the method trial and improvement,\nbased on iterated frequency response measurements as explained in\nSection~\\ref{CompensationSubSec} and heuristic reasoning based on\nFig.~\\ref{HornSystemRes}. The main purpose of this work was to\nsuppress overly strong transversal modes shown in\nFig.~\\ref{HornSystemRes} in the impedance matching cavity shown in\nFig.~\\ref{DesignOfSourceFig} (middle panel). As a secondary\neffect, also the purely longitudinal modes got suppressed. Adding\nsound soft material resulted in the attenuation of unwanted resonances\nat the cost of high but tolerable increase in the TL of the source.\n\nThe loudspeaker unit of the source is contained in the hardwood box\nshown in Fig.~\\ref{TractrixHornFig}, and its wall thickness $40 \\,\n\\textrm{mm}$. The box is sealed air tight by applying silicone mass to\nall joints from inside in order to reduce acoustic leakage. Its exterior\ndimensions are $215 \\, \\textrm{mm} \\times 215 \\, \\textrm{mm} \\times\n145 \\, \\textrm{mm}$, and it fits tightly to the horn assembly\ndescribed above. The horn assembly and the space of the loudspeaker\nunit above the loudspeaker cone form the impedance matching cavity of\nthe source shown in Fig.~\\ref{DesignOfSourceFig}. There is another\nacoustic cavity under the loudspeaker unit whose dimension are $135.0\n\\, \\mathrm{mm} \\times 135.0 \\mathrm{mm} \\times 70.0 \\mathrm{mm}$. Also\nthis cavity was tightly filled with acoustically soft material to\nreduce resonances.\n\n\\subsection{Electronics and software for measurements}\n\nWe use a $4''$ two-way loudspeaker unit (of generic brand) whose\ndiameter determines the opening of the tractrix horn. Its nominal\nmaximum output power is $30 \\, \\mathrm{W \\, (RMS)}$ when coupled to a\n$4 \\, \\Omega$ source. The loudspeaker is driven by a power amplifier\nbased on TBA810S IC. There is a decouplable mA-meter in the\nloudspeaker circuit that is used for setting the output level of the\namplifier to a fixed reference value at $1 \\, \\mathrm{kHz}$ before\nmeasurements. The power amplifier is fed by one of the output channels\nof the sound interface ``Babyface'' by RME, connected to a laptop\ncomputer via USB interface.\n\nThe acoustic source contains an electret \\emph{reference microphone}\n(of generic brand, $\\oslash \\, 9 \\, \\mathrm{mm}$, biased at $5 \\,\n\\mathrm{V}$) at the output end of the horn. The reference microphone\nis embedded in the wave\\-guide wall, and there is an aperture of\n$\\oslash \\, 1 \\, \\mathrm{mm}$ in the wall through which the microphone\ndetects the sound pressure. The narrow aperture is required so as not\nto overdrive the microphone by the very high level of sound at the\noutput end of the horn, and it is positioned about $13.5 \\,\n\\mathrm{mm}$ below the position where vocal folds would be in the 3D\nprinted VT model (depending on the anatomy).\n\nThe measurements near the mouth position of 3D-printed VTs are carried\nout by a \\emph{signal microphone}. As a signal microphone, we use\neither a similar electret microphone unit as the reference microphone\nor Br\\\"uel \\& Kj\\ae{}ll measurement microphone model 4191 with the\ncapsule model 2669 (as shown in Fig.~\\ref{TractrixHornFig} (left\npanel)) and preamplifier Nexus 2691. The B\\&K unit has over $20 \\,\n\\mathrm{dB}$ lower noise floor compared to electret units which,\nhowever, has no significance when measuring, e.g., the resonant\nfrequencies of an acoustic load in a noisy environment. Measurements\nin the anechoic chamber yield much cleaner data when the B\\&K unit is\nused, and this is advisable when studying acoustic loads with higher\nTL and lower signal levels. Then, extra attention has to be paid to\nall other aspects of the experiments so as to achieve the full\npotential of the high-quality signal microphone.\n\nThe reference and the signal electret microphone units were picked\nfrom a set of $10$ units to ensure that their frequency responses\nwithin $80 \\, \\mathrm{Hz} \\ldots 8 \\ \\textrm{kHz}$ are practically\nidentical. It was observed that there are very little differences in\nthe frequency and phase response of any two such microphone\nunits. Furthermore, these microphones are practically\nindistinguishable from the Panasonic WM-62 units (with nominal\nsensitivity $-45 \\pm 4$ dB re $1$ V\/Pa at $1$ kHz) that were used in\nthe instrumentation for MRI\/speech data acquisition reported in\n\\cite{A-A-H-J-K-K-L-M-M-P-S-V:LSDASMRIS}.\n\nFinal results given in Section~\\ref{MeasSigSec} were measured using\nthe Br\\\"uel \\& Kj\\ae{}ll model 4191 at the mouth position. The results\nshown in \\cite[Fig.~5]{K-M-O:PPSRDMRI} were measured using the\nelectret unit matched with the similar reference microphone, embedded\nto the source at the glottal position. In this article, the electret\nmicrophone measurements at the mouth position were only used for\ncomparison purposes.\n\nBiases for both the electret microphones are produced by a custom\npreamplifier having two identical channels based on LM741 operational\namplifiers. The amplifier has nonadjustable $40 \\, \\mathrm{dB}$\nvoltage gain in its passband that is restricted to $40 \\, \\textrm{Hz}\n\\ldots 12 \\, \\textrm{kHz}$. Particular attention is paid to reducing\nthe ripple in the microphone bias as well as the cross-talk between\nthe channels. The input impedance $2.2 \\, \\mathrm{k \\Omega}$ of the\npreamplifier is a typical value of electret microphones,\nand the output is matched to $300 \\, \\Omega$ for the two input\nchannels of the Babyface unit.\n\nSignal waveforms and sweeps are produced numerically as explained in\nSections~\\ref{CalibrationSec} for all experiments. Frequency response\nequalisation and other kinds of time and frequency domain\nprecompensations are a part of this process. All computations are done\nin MATLAB (R2016b) running on Lenovo Thinkpad T440s, equipped with 3.3\nGHz Intel Core i7-4600U processor and Linux operating system. The\nexperiments are run using MATLAB scripts, and access from MATLAB to\nthe Babyface is arranged through Playrec (a MATLAB\nutility,~\\cite{Playrec}).\n\n\n\n\\subsection{Measurement arrangement}\n\n\\begin{figure}[t]\n\\begin{center}\n\\includegraphics[width=\\textwidth]{meas_setup.png}\\hspace{0.2cm}\\end{center}\n \\caption{\\label{SystemGraph} An illustration of the\n measurement system.}\n\\end{figure}\n\nAn outline of the measurement arrangement for sweeping a VT print is\nshown in Fig.~\\ref{SystemGraph}. Both the amplifiers, the digital\nanalogue converter (DAC), and the computer are located outside the\nanechoic chamber. The arrangement inside the anechoic chamber\ncontains two microphones: the reference unit at the glottal position\ninside the source, and the external microphone in front of the mouth\nopening. The position of the external microphone must be kept same in\nall measurements to have reproducibility.\n\nBecause of the quite high transmission loss of the VT print (in\nparticular, in VT configuration corresponding to [\\textipa{i}]) and\nthe relatively low sound pressure level produced by the source at the\nglottal position (compared to the sound pressure produced by human\nvocal folds), one may have to carry out measurements using an acoustic\nsignal level only about $20 \\ldots 30 \\, \\mathrm{dB}$ above the\nhearing threshold. The laboratory facilities require using\nwell-shielded coaxial microphone cables of length $10 \\, \\mathrm{m}$\nin order to prevent excessive hum. Another significant source of\ndisturbance is the acoustic leakage from the source directly to the\nexternal microphone. This leakage was be reduced by $\\approx 6 \\,\n\\mathrm{dB}$ by enclosing the sound source into a box made of\ninsulating material, and preventing sound conduction through\nstructures by placing the source on silicone cushions resting on a\nheavy stone block (not shown in\nFigs.~\\ref{TractrixHornFig}~and~\\ref{SystemGraph}).\n\n\n\\section{\\label{CompValSec} Computational validation using a VT load}\n\nWhen an acoustic load is coupled to a sound source containing an\nimpedance matching cavity, the measurements carried out using the\nsource necessarily concern the joint acoustics of the source and the\nload. Hence, precautions must be taken to ensure that the\ncharacteristics of the acoustic load truly are the main component in\nmeasurement results. In the case of the proposed design, the small\nintersectional area of the opening at the source output leads to high\nacoustic output impedance which is consistent with a reasonably good\nacoustic \\emph{current} source. Also, the narrow glottal position of\nthe VT helps in isolating the the two acoustic spaces from each other.\n\nWe proceed to evaluate this isolation by computing the Helmholtz\nresonance structures of the joint system shown in\nFigs.~\\ref{CoupledSystemRes} and compare them with \\textrm{(i)}\nformant frequencies measured from the same test subject during the MR\nimaging, and \\textrm{(ii)} Helmholtz resonances of the VT geometry\nshown in Fig.~\\ref{DesignOfSourceFig} (right panel). The VT part of\nboth the computational geometries is the same, and it corresponds to\nthe vowel [\\textipa{\\textscripta}]. The vowel [\\textipa{\\textscripta}]\nout of [\\textipa{\\textscripta, i, u}] was chosen because its three\nlowest formants are most evenly distributed in the voice band of\nnatural speech. \n\n\n\n\\begin{table}[h]\n \\centerline{\n \\begin{tabular}{|l|c|c|c|c|}\n\\hline\n & $F_1$ & $F_2$ & $F_3$ \\\\\n\\hline\n\\textbf{VT resonances} & $519$ & $1130$ & $2297$ \\\\\n\\textbf{VT + source resonances} & $594$ & $1136$ & $2290$ \\\\\n\\textbf{Formant frequencies} & $683$ & $1111$ & $2417$ \\\\\n\\hline\n\\end{tabular}}\n\\caption{\\label{FormantCouplingTable} Vowel formants and Helmholtz\n resonances (in $\\textrm{Hz}$) of a VT during a production\n of [\\textipa{\\textscripta}]. In the first two row, only those\n resonances have been taken into account whose modal behaviour\n corresponds with the formants $F_1, F_2,$ and\n $F_3$.}\n\\end{table}\n\nIn numerical computations, the domain $\\Omega \\subset {\\mathbb{R}}^3$ for the\nHelmholtz equation~\\eqref{HelmHoltzEq} consists of the VT geometry of\n[\\textipa{\\textscripta}] either as such (leading to ``VT resonances''\nin Table~\\ref{FormantCouplingTable}) or manually joined to the\nimpedance matching cavity at the glottal position (leading to ``VT +\nsource resonances'' in Table~\\ref{FormantCouplingTable}). The FEM\nmeshes have been described in Section~\\ref{HelmholtzCavitySec}.\nThe acoustic modes and resonant frequencies have been computed from\nEq.~\\eqref{HelmHoltzEq}, and some of the resulting resonant\nfrequencies and modal pressure distributions are shown in\nFig.~\\ref{CoupledSystemRes}.\n\nIn contrast to Section~\\ref{HelmholtzCavitySec}, the symbol $\\Gamma_0$\nnow denotes the spherical mouth interface surface visible in\nFig.~\\ref{DesignOfSourceFig} (right panel), and instead of\nEq.~\\eqref{DirichletBndry} we use the boundary condition of Robin type\n\\begin{equation*}\n \\lambda \\phi_\\lambda({\\bf r}) + c\\frac{\\partial\n \\phi_\\lambda}{\\partial\\nu}({\\bf r}) = 0 \\text{ on } \\Gamma_0,\n\\end{equation*}\nmaking the interface absorbing. When computing VT resonances for\ncomparison values without the impedance matching cavity (the top row\nin Table~\\ref{FormantCouplingTable}), the interface at the glottal\nopening is considered as part of $\\Gamma_0$, too. The resulting\nquadratic eigenvalue problem was then solved by transforming it to a\nlarger, linear eigenvalue problem as explained in\n\\cite[Section~3]{Hannukainen:2007}.\nFor a similar kind of numerical experiment involving VT geometries but\nwithout a source, see \\cite{arnela:2013}.\n\n\nThe formant values given in Table~\\ref{FormantCouplingTable} have been\nextracted by Praat \\cite{Praat:2016} from post-processed speech\nrecordings during the acquisition of the MRI geometry as explained in\nSection~\\ref{ProcessingSubSec}. The extraction was carried out at $3.5\n\\, \\mathrm{s}$ from starting of the phonation, with duration $25 \\,\n\\textrm{ms}$.\n\nGiven in semitones, the discrepancies between the first two rows in\nTable~\\ref{FormantCouplingTable} are $-2.3$, $-0.1$, and\n$0.05$. Similarly, the discrepancies between the last two rows in\nTable~\\ref{FormantCouplingTable} are $-2.4$, $0.4$, and $-0.9$. The\nlargest discrepancy concerning the first formant $F_1$ is partly\nexplained by the challenges in formant extraction from the nonoptimal\nspeech sample pair of the MRI data used. In\n\\cite[Table~2]{A-A-H-J-K-K-L-M-M-P-S-V:LSDASMRIS}, the value for $F_1$\nfrom the same test subject was found to be $580 \\pm 23 \\, \\mathrm{Hz}$\nbased on averaging over ten speech samples during MRI and using a more\ncareful treatment for computing the spectral envelope, based on MATLAB\nfunction \\verb|arburg| .\n\nWe conclude that for Helmholtz resonances corresponding to $F_2$ and\n$F_3$ of the physical model of [\\textipa{\\textscripta}], the\nperturbation due to acoustic coupling with the impedance matching\ncavity are small fractions of the comparable natural variation in\nspoken vowels. So as to the lowest formant $F_1$, it seems that the\nimpedance matching cavity actually represents a better approximation\nof the true subglottal acoustics contribution than the mere absorbing\nboundary condition imposed at the glottis position of a VT\ngeometry. We further observe that the three lowest resonant modes of\nthe VT (corresponding to formants $F_1, F_2, F_3$) appear where the\nimpedance matching cavity remains in ``ground state''; see\nFig.~\\ref{CoupledSystemRes}. This supports the desirable property that\nthe narrowing of the horn at the vocal folds position effectively\nkeeps the impedance matching cavity of the source and the VT load only\nweakly coupled.\n\n\n\n\\begin{figure}[]\n\\begin{center}\n\\includegraphics[width=0.22\\textwidth]{a2.png}\\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a3.png} \\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a4.png} \\vspace{0.5cm}\n\\includegraphics[width=0.22\\textwidth]{a5.png} \\\\ \\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a6.png}\\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a7.png} \\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a8.png} \\hspace{0.2cm}\n\\includegraphics[width=0.22\\textwidth]{a9.png}\n\\end{center}\n \\caption{\\label{CoupledSystemRes} Pressure distributions of some\n resonance modes of the impedance matching cavity of the source\n coupled to a VT geometry of [\\textipa{\\textscripta}]. The modes\n corresponding to longitudinal VT resonances are at\n frequencies $593 \\, \\textrm{Hz}$, $1137 \\, \\textrm{Hz}$, and $2287\n \\, \\textrm{Hz}$, corresponding to formants $F_1, F_2,$ and $F_3$.\n The remaining pressure modes under $2465 \\, \\mathrm{Hz}$ are\n excitations of the impedance matching cavity of the source.}\n\\end{figure}\n\n\n\n\\section{Calibration measurements and source compensation}\n\\label{CalibrationSec}\n\n\\subsection{\\label{CompensationSubSec} Measurement and compensation of the frequency response}\n\nIn this section, we describe the production of an \\emph{exponential\n frequency sweep}\\footnote{Also known as the logarithmic chirp.} with\nuniform sound pressure at the glottal position. The defining property\nof such sweeps is that each increase in frequency by a semitone takes\nan equal amount of time. In this work, the frequency interval of such\nsweeps is $80 \\ldots 7350 \\, \\mathrm{Hz}$ with duration of $10 \\,\n\\mathrm{s}$. All measurements leading to curves in\nFigs.~\\ref{EnvelopeResidualFig}--\\ref{LissajousFig} were carried out\nusing the \\emph{dummy load} shown in Fig.~\\ref{TractrixHornFig} (right\npanel) as the standardised acoustic reference load.\n\nIf one plainly introduces a constant voltage amplitude exponential\nsinusoidal sweep to the loudspeaker unit, the sound pressure at the\nsource output (as seen by the adjacent reference microphone) will vary\nover $20 \\, \\mathrm{dB}$ over the frequency range of the sweep as\nshown in Fig.~\\ref{EnvelopeResidualFig} (left panel). The key\nadvantage in producing a \\emph{constant amplitude sound pressure} at\nthe source output is that excessive external noise contamination of\nmeasured signals can be avoided on frequencies where the output power\nwould be low. Standardising the sound pressure at the output of the\nsource also makes the source acoustics less visible in the\nmeasurements of the load. This reduces the perturbation effect at\n$F_1$ that was computationally observed in Section~\\ref{CompValSec}.\n\nAn essentially flat sound pressure output shown in\nFig.~\\ref{EnvelopeResidualFig} (right panel) can be obtained from the\nsource by applying the frequency dependent amplitude weight\n$\\mathbf{w}$ shown in Fig.~\\ref{EnvelopeResidualFig} (middle panel) to\nthe voltage input to the loudspeaker unit. As is to be expected, both\nthe weighted and unweighted voltage sweeps have almost identical phase\nbehaviours as can be seen in Fig.~\\ref{LissajousFig} (left panel). In\ncontrast, the voltage sweep and the resulting sound pressure at the\nreference microphone are out of phase in a very complicated frequency\ndependent manner; see Fig.~\\ref{LissajousFig} (middle panel). Such\nphase behaviour cannot be explained by the relatively sparsely located\nacoustic resonances of the impedance matching cavity.\n\nAn iterative process requiring several sweep measurements was devised\nto obtain the weight shown in Fig.~\\ref{EnvelopeResidualFig} (middle\npanel), and it is outlined below as\nAlgorithm~\\ref{SweepAlgorithm}. Various parameters in the algorithm\nwere tuned by trial and error so as to produce convergence to a\nsatisfactory compensation weight. During the iteration, different\nversions of the measured sweeps have to be temporally aligned with\neach other. The required synchronisation is carried out by detecting a\n$1 \\, \\mathrm{kHz}$ cue of length $1 \\, \\mathrm{s}$, positioned before\nthe beginning of each sweep. This is necessary because there are\nwildly variable latency times in the DAC\/software combination used for\nthe measurements.\n\n\\begin{figure}[t]\n \\includegraphics[width=0.32\\textwidth]{orig_freq_res-crop.pdf}\n \\includegraphics[width=0.32\\textwidth]{compensation_weights.pdf}\n \\includegraphics[width=0.32\\textwidth]{final_freqres-crop.pdf}\n \\caption{\\label{EnvelopeResidualFig} Left panel: The pressure signal\n envelope of the measurement system at the glottal position when a\n constant amplitude exponential voltage sweep was used as the\n loudspeaker input. The first longitudinal resonance of the\n impedance matching cavity appears at $1648 \\, \\mathrm{Hz}$. The\n source was terminated to the dummy load shown in\n Fig.~\\ref{TractrixHornFig} (right panel). Middle panel: The\n inverse weights that are applied to the constant amplitude\n exponential sweep in order to get the output in the next panel.\n Right panel: The envelope of the weighted exponential sweep at the\n glottal position where the weight has been produced by\n Algorithm~\\ref{algorithm}. The produced sound pressure sweep at\n the source output has residual amplitude dynamics of approximately\n $0.5 \\,\\mathrm{dB}$.}\n\\end{figure}\n\n\\begin{algorithm}\n\\caption{Computation of the equalisation weight $\\mathbf{w}$}\\label{algorithm}\n\\begin{algorithmic}[1]\n\\Procedure{CalibrateCompensation}{n,t}\n\\State $\\mathbf{w}\\gets [1,1,\\ldots,1]$\n\\For {$k\\gets 0\\ldots N$}\n\\State $\\mathbf{x} \\gets w\\cdot $ExponentialChirp(t)\n\\State $\\mathbf{y} \\gets $Play $ (\\mathbf{x}) $\n\\State $H \\gets $ComputeEnvelope$ (\\mathbf{y})$\n\\State $d \\gets $Dynamics $(H)$\n\\State $r \\gets $Regularization $(d)$\n\\State $ \\mathbf{w} \\gets \\frac{1}{\\abs{H}+r}\\cdot \\mathbf{w}$\n\\EndFor\n\\State\\Return $\\mathbf{w}$\n\\EndProcedure\n\\end{algorithmic} \\label{SweepAlgorithm}\nWe consider the calibration successful if the measured dynamics at the\nfinal iteration stage is below $1 \\, \\mathrm{dB}$.\n\\end{algorithm}\n\n\nThe system comprising the power amplifier, the loudspeaker and the\nacoustic load is somewhat nonlinear which becomes evident in wide\nfrequency ranges and high amplitude variations. Even though the curves\nin Fig.~\\ref{EnvelopeResidualFig} (left and middle panels) are\nobviously related, they do not sum up to a constant that would be\nindependent of the frequency. Not even the dynamical ranges of these\ncurves coincide as would happen in a linear and time-invariant\nsetting. In spite of nonlinearity, it is possible to use of a very\nslowly increasing sweep to produce an accurate voltage gain from the\noutput of DAC to the output of reference microphone preamplifier over\na very wide range of frequency. One example of such voltage gain\nfunction is shown in Fig.~\\ref{EnvelopeResidualFig} (left panel) but\nits inverse is not a good candidate for the compensation weight.\n\n\n\\begin{figure}[hb]\n \\centering\n \\includegraphics[width=0.3\\textwidth]{lissajous_original_to_compensated_3000hz.pdf} \\hspace{0.2cm}\n \\includegraphics[width=0.3\\textwidth]{lissajous_1540hz_orig_sweep_to_compensated_nose.pdf} \\hspace{0.2cm} \n \\includegraphics[width=0.33\\textwidth]{impulse_response_dummy.pdf}\n\\caption{\\label{LissajousFig} Left: Lissajous plot of the original,\n unweighted voltage sweep against the sweep near $3 \\, \\mathrm{kHz}$\n weighted by $\\mathbf{w}$ produced by\n Algorithm~\\ref{SweepAlgorithm}. Middle: Lissajous plot of the\n unweighted voltage sweep against the corresponding output as\n recorded by the reference microphone near $1540 \\mathrm{Hz}$ where\n the phase difference varies around $\\pi\/2$. Right: The measured\n impulse response of from the voltage input to reference microphone\n output. In both the measurements, the source was terminated to the\n dummy load shown in Fig.~\\ref{TractrixHornFig} (right panel).}\n\\end{figure}\n\n\n\\noindent The results of sweep measurement from physical models of VT\nare given in Section~\\ref{SweepMeasSubSec}.\n\n\n\n\\subsection{Compensation of the source response for reference tracking}\n\\label{ImpulseSubSec}\n\nAnother important goal is to be able to reconstruct a desired waveform\nas the sound pressure output of the source as observed by the\nreference microphone. In the context of speech, a good candidate for a\ntarget waveform is the Liljencrants--Fant (LF) waveform \\cite{Fant:1985}\ndescribing the the flow through vibrating vocal folds; see\nFig.~\\ref{ReconstructedWaveformFig} (top row, left panel).\n\nBecause there is an acoustic transmission delay of $\\approx 0.5 \\,\n\\mathrm{ms}$ in the impedance matching cavity in addition to various,\nmuch larger latencies in the DAC\/computer instrumentation and software,\na simple feedback-based PID control strategy is not feasible for\nsolving any trajectory tracking problem. Instead, a \\emph{feedforward\n control solution} is required where the response of the acoustic\nsource and the electronic instrumentation is cancelled out by\n\\emph{regularised deconvolution}, so as to obtain an input waveform\nthat produces the desired output. For this, we use the version of\nconstrained least squares filtering whose mathematical treatment in\nsignal processing context is given in Section~\\ref{DeConvSec}.\n\nThe regularised deconvolution requires estimating the impulse response\nof the whole measurement system that corresponds to the convolution\nkernel $h_0$ in Eq.~\\eqref{ConvolutionEq}. This response is estimated\nusing the sinusoidal sweep excitation described in\n\\cite{Muller:TFM:2001}, and the result of the measurement can be seen\nin~Fig.~\\ref{LissajousFig} (right panel). Because the deconvolution\ncontains regularisation parameters $\\gamma$ and $\\kappa$, it tolerates\nsome noise always present in the estimated impulse response.\n\nLet us proceed to describe how the mathematical treatment given in\nSection~\\ref{DeConvSec} can be turned into a workable signal\nprocessing algorithm in discrete time. All signals (including the\nestimated impulse response corresponding to kernel $h_0$) are\ndiscretised at the sampling rate $44 100 \\, \\mathrm{Hz}$ used in all\nsignal measurements. We denote the sample number of a discretised\nsignal, say, $x[n]$ by $N = 44 100 \\, \\mathrm{Hz} \\cdot T$ where $T$\nis the temporal length of the original (continuous) signal $x(t)$, $t\n\\in [0, T]$, and sampling is carried out by setting, e.g., \n\\begin{equation*}\n x[n] = \\frac{1}{T_s} \\int_{(n-1) T_s}^{n T_s}{ x(t) \\, dt} \n \\quad \\text{ where } \\quad 1 \\leq n \\leq N \\quad\n \\text{ and } \\quad T_s = \\mathrm{s}\/44 \\, 100 .\n\\end{equation*}\nThe measured (discrete) impulse response $h_0[n]$ is extended to match\nthe signal length $N$ by padding it with zeroes, if necessary.\n\nIn discrete time, the regularised deconvolution given in\nEqs.~\\eqref{NormalEq}--\\eqref{ResidualEq} takes the matrix\/vector form\n\\begin{equation} \\label{DiscretisedRegConv}\n\\begin{aligned}\n \\v{\\mathbf{u}} & = \\left(\\gamma \\left (\\kappa I + R^{T}R \\right ) + H^{T}H \\right)^{-1}H^{T} \\mathbf{y} \\quad \\text{ and } \\\\\n \\mathbf{v}_{\\gamma, \\kappa} & =\n\\left(\\kappa I + R^{T}R + \\gamma^{-1} H^{T}H \\right)^{-1} \\left ( \\kappa I + R^{T}R \\right ) \\mathbf{y}.\n\\end{aligned}\n\\end{equation}\nThe components of the $N \\times 1$ column vectors $\\v{\\mathbf{u}},\n\\mathbf{y}, \\mathbf{v}_{\\gamma, \\kappa}$ are plainly the discretised\nvalues $\\v{u}[n], y[n], {v}_{\\kappa, \\mu}[n]$ for $n = 1, \\ldots , N$\nof signals $\\v{u}, y, {v}_{\\kappa, \\mu}$, respectively, given in\nEqs.~\\eqref{NormalEq}--\\eqref{ResidualEq} where $\\mu = \\gamma^{-1}$.\nThe second order difference $N \\times N$ matrix $R$ is the symmetric\nmatrix whose top row is $\\left [ 2,-1,0,\\ldots,0 , -1 \\right ]$,\nmaking it circulant. The nonsymmetric $N \\times N$ matrix $H = \\left [\n h_{j,k} \\right ]$ is constructed by setting $h_{jk}=h_0[(N + j -\n k)\\,\\mathrm{mod} \\,N+1]$ for $1 \\leq j, k \\leq N$. Because all of\nthe matrices $R = R^{T}, H, H^{T}$ are now circulant, so is the\nsymmetric matrix $\\gamma \\left (\\kappa I + R^{T}R \\right ) + H^{T}H$\nin Eq.~\\eqref{DiscretisedRegConv}. Hence, the matrix\/vector products\nin Eq.~\\eqref{DiscretisedRegConv} can be understood as circular\ndiscrete convolutions that can be implemented in $N\\log(N)$ time using\nthe Fast Fourier Transform (FFT). This leads to very efficient\nsolution for $ \\v{\\mathbf{u}}$ given $\\mathbf{y}$ even for long\nsignals.\n\nDefining the transfer functions $\\widehat R(z)$, $\\widehat H(z)$ and\nthe transforms $\\widehat{y}(z), \\widehat{v}_{\\gamma, \\kappa} (z)$ for\n$z = e^{i \\theta}$ as\n\\begin{equation*}\n\\begin{aligned}\n & \\widehat R(z) = - z^{-N} - z^{-1} + 2 - z - z^{N}, \\quad \n \\widehat H(z) = \\sum_{n = 0}^N {h_{n 0} z^{n}} + \\sum_{n = -N}^{-1} {h_{0 n} z^{n}}, \\\\\n & \\widehat{y}(z) = \\sum_{n = 1}^N {y[n] z^{n}}, \\text{ and } \n \\widehat{v}_{\\gamma, \\kappa} (z) = \\sum_{n = 1}^N {{v}_{\\gamma, \\kappa}[n] z^{n}},\n\\end{aligned}\n\\end{equation*}\nwe observe that the latter of Eqs.~\\eqref{DiscretisedRegConv} takes\nthe form of Discrete Fourier Transform (DFT)\n\\begin{equation} \\label{DFTTransferFunctionEq}\n \\frac{\\widehat{v}_{\\gamma, \\kappa} (z_k) }{\\widehat{y}(z_k)} = \\frac{\\kappa + \\abs{\\widehat R(z_k)}^2 }\n {\\kappa + \\abs{\\widehat R(z_k)}^2 + \\gamma^{-1} \\abs{\\widehat H(z_k)}^2 },\n\\end{equation}\nrealised in MATLAB code, where $z_k = e^{2 \\pi k\/N}$ and $k = 1,\n\\ldots , N$ enumerates the discrete frequencies. By Parseval's\nidentity, we interpret the residual equation \\eqref{remainderEq} in\ndiscretised form as\n\\begin{equation*}\n\\sum_{k = 1}^N {\\abs{\\widehat{v}_{\\gamma, \\kappa} (z_k)}^2} = \\epsilon^2 \\sum_{k = 1}^N {\\abs{\\widehat{y} (z_k)}^2}\n\\end{equation*}\nwhich, together with Eq.~\\eqref{DFTTransferFunctionEq}, gives an\nequation from which $\\gamma = \\gamma(\\epsilon, \\kappa)$ can be solved for each $0 < \\epsilon < 1$\nand $\\kappa \\geq 0$. This is done using MATLAB's \\texttt{fminbnd}\nfunction to ensure that $\\gamma>0$. The values for $\\epsilon, \\kappa$\nare chosen based on the experiments.\n\n\n\n\\section{\\label{MeasSigSec} Results}\n\nTwo kinds of measurements on 3D printed VT physical models were\ncarried out. Firstly, the measurement of the magnitude frequency\nresponse to determine spectral characteristics (such as the lowest\nresonant frequencies) of the VT geometry. Secondly, the classical LF\nsignal was fed into the VT physical model to simulate vowel acoustics\nin a spectrally correct manner.\n\n\\subsection{\\label{SweepMeasSubSec} Sweep measurements}\n\nThe power spectral density is obtained from VT physical models by the\nsweep measurements. The sweep is constructed as described in\nSection~\\ref{CompensationSubSec} to obtain a constant sound pressure\nat the output of the source when terminated to the dummy load. The\nsignal from the measurement microphone at the mouth position of the\nphysical model is then transformed to an amplitude envelope (similar\napproach can be found in~\\cite[Fig. 2]{Wolfe:EMS:2016}) by an envelope\ndetector (i.e., computing a moving average of the nonnegative signal\namplitude). Finally, this output envelope is divided by the similar\nenvelope from the reference microphone at the source output. The\nresulting amplitude envelopes are shown in the top curves of\nFig.~\\ref{VTResponseFig}, and the resonance data is given in\nTable~\\ref{sweepFormantTable}.\n\n\\begin{figure}[h]\n \\centering\n \\includegraphics[width=0.3\\textwidth]{a_sweep_anec.pdf}\n \\includegraphics[width=0.3\\textwidth]{i_sweep_anec.pdf}\n \\includegraphics[width=0.3\\textwidth]{u_sweep_anec.pdf}\n \\caption{\\label{VTResponseFig} The measured frequency amplitude\n response of physical models of VT anatomies corresponding to\n vowels [\\textipa{\\textscripta, i, u}]. The spectral maxima\n extending to $7350\\,\\mathrm{Hz}$ were selected so that two peaks\n had to be at least $100\\,\\mathrm{Hz}$ apart from another with at\n least $4\\,\\mathrm{dB}$ peak prominence have been marked with\n circles. The lower curves are power spectral envelopes extracted\n from the vowel utterances of the same test subject, recorded in\n the anechoic chamber. }\n\\end{figure}\n\n\n\\begin{table}[h]\n \\centerline{\n \\begin{tabular}{|l|c|c|c|c|c|c|}\n\\hline\n & $P_1$ & $P_2$ & $P_3$ &$P_4$ &$P_5$&$P_6$ \\\\\n\\hline\n\\textbf{[a]} & 635 * & 1104 * & 2364 * & 3167 & 4038& X \\\\\n\\textbf{[i]} & 316 * & 658 & 984 & 2104 * & 2957 * & 5740 \\\\\n\\textbf{[u]} & 386 * &819 * &2132 * &3206 &4732&5736 \\\\\n\\hline\n\\end{tabular}}\n\\caption{\\label{sweepFormantTable} Peak frequency positions from sweep\n measurements on 3D printed VT physical models. The peaks\n corresponding to the three lowest formants $F_1, F_2$, and $F_3$\n are denoted by an asterisk.}\n\\end{table}\n\n\n\n\\subsection{\\label{GlottalPulseReconSec} Glottal pulse reconstruction}\n\nThe second goal is to reconstruct acoustically reasonable pressure\nwaveforms at the source output as observed by the reference\nmicrophone. For reproducing nonsinusoidal target signals, a general\nmethod described in Sections~\\ref{DeConvSec}~and~\\ref{ImpulseSubSec}\nis used to track them. We use the LF waveform shown in\nFig.~\\ref{fig:direct} (left panel) as the target signal since it\nmodels the action of the vocal folds during phonation. The regularised\nconvolution is successful in producing the desired tracking as can be\nseen in Fig.~\\ref{fig:deconv} (right panel). For results shown in\nFig.~\\ref{ReconstructedWaveformFig}, the impulse response and all\nsignals have been measured with the source terminated to the vowel\ngeometry [\\textipa{\\textscripta}].\n\n\n\n\\section{Discussion}\n\nAfter many design cycles for improvements, the proposed acoustic\nglottal source appears well suited for its intended use. We now\nproceed to discuss remaining shortcomings and possible improvements\nfor the design and algorithms.\n\nThe three most serious shortcomings in the final design are\n\\textrm{(i)} high TL in the impedance matching cavity due to\nattenuation by polyester fibre, \\textrm{(ii)} acoustic leakage through\nthe source chassis, and \\textrm{(iii)} the usable low frequency limit\nat $\\approx 80 \\, \\mathrm{Hz}$. Since the proposed design is scalable,\nthe latter two deficiencies are easiest treated by increasing the\nphysical dimensions, chassis wall thickness, and, hence, the mass of\nthe source. Using $6''$ or even $8''$ loudspeaker unit with lower bass\nresonant frequencies could be considered, equipped with separate\nconcentric tweeters for producing the higher frequencies. Overly\nincreasing the size of the source makes it, however, impractical for\ndemonstration purposes.\n\n\\begin{figure}[t]\n \\begin{center}\n \\begin{subfigure}[b]{0.485\\textwidth}\n \\centering\n \\includegraphics[width=0.49\\textwidth]{LF_target.pdf}\n \\includegraphics[width=0.49\\textwidth]{h_star_x.pdf}\n \\caption{The LF waveform input to the measurement system (left)\n and the corresponding pressure output at the glottal position\n (right).}\n \\label{fig:direct}\n \\end{subfigure}\n \\hspace{0.05cm}\n \\begin{subfigure}[b]{0.485\\textwidth}\n \\centering\n \\includegraphics[width=0.49\\textwidth]{input.pdf}\n \\includegraphics[width=0.485\\textwidth]{deconv_x.pdf}\n \\caption{The input waveform produced by regularised\n deconvolution (left) and the corresponding output,\n replicating the LF waveform (right).}\n \\label{fig:deconv}\n \\end{subfigure}\n \\caption{Output waveform reconstruction at $180 \\, \\mathrm{Hz}$\n using measured impulse response and regularised\n deconvolution.}\\label{ReconstructedWaveformFig}\n \\end{center}\n\\end{figure}\n\n\nTransversal resonances were checked by adding polyester fibre to the\nwide parts of the impedance matching cavity which results in a marked\nincrease in TL of the source. Considering the amplitude response\ndynamics of $\\approx 35 \\, \\mathrm{dB}$ of the source shown in\nFig.~\\ref{EnvelopeResidualFig}, the output volume remains relatively\nlow in uniform amplitude sweeps that are produced as explained in\nSection~\\ref{CompensationSubSec}. Even though the VT physical models\nhave additional TL of order $20 \\ldots 40 \\, \\mathrm{dB}$ depending on\nthe vowel and test subject, it is possible to carry out frequency\nresponse of formant position measurements without an anechoic chamber\nor a high quality measurement microphone at the mouth position, and\nthe results are quite satisfactory; see\n\\cite[Fig.~5]{K-M-O:PPSRDMRI}. To obtain the high quality frequency\nresponse data or carry out waveform reconstructions presented in\nSection~\\ref{MeasSigSec}, one has to do the utmost to reduce acoustic\nleakage, hum, and noise level, including using of the Br\\\"uel \\&\nKj\\ae{}ll measurement microphone in the anechoic chamber. Then\nsecondary error components emerge as can be observed, e.g., as\nroughness between the formant peaks in Fig.~\\ref{VTResponseFig}. We\npoint out that the quality of the microphone used at the mouth opening\ndoes not affect the measured frequencies of the formant\npeaks. However, the microphone position or the paraboloid concentrator\nshown in \\cite[Fig.~4]{K-M-O:PPSRDMRI} does have a small yet\nobservable effect, in particular, on the lowest resonance frequency of\nthe physical model.\n\nAn attractive way of getting a louder sound source is to use\n\\emph{Smith slits} \\cite{Smith:1953,Dodd:2009} for checking the transversal\nresonances within the wide part of the impedance matching cavity. The\nrequired design work is best carried out using computational design\noptimisation methods introduced in\n\\cite{U-W-B:OVMAH,Y-W-B:LOTSHMIFFDPAH}.\n\nThis article does not concern impedance measurements rather than\nresponse between two acoustic pressures. For impedance measurements,\nthe perturbation velocity should be measured at the output of the\nsource for which a number of approaches, based on microphones, have\nbeen proposed \\cite{Wolfe:AIS:2000, Wolfe:IPM:2006,Wolfe:EEI:2013}. In\nthe current design, hot wire anemometry at the reference microphone\nposition would be most suitable; see \\cite{Pratt:MAI:1977,\n Kob:MMV:2002}. Even the smallest Microflown unit (see\n\\cite{Eerden:EWN:1998,Bree:TMN:1996,Bree:TMF:1997}) commercially\navailable, placed in the middle of the source output channel of\ndiameter $6\\, \\mathrm{mm}$, would cause severe back reflections.\n\nWe have used two different response compensation techniques in\nSection~\\ref{CalibrationSec}: weighting for sweeps and regularised\ndeconvolution for more complicated signals. Using deconvolution for\nproducing sweeps tens of seconds long is not a practical since the\ndimension of Eqs.~\\eqref{DiscretisedRegConv} would be too high. As\nopposed to weighted sweeps, regularised deconvolution takes into\naccount the phase response of the full measurement system. The\ndeconvolution is a linear operation whereas the measurement system\nshows signs of amplitude nonlinearity in\nFig.~\\ref{EnvelopeResidualFig}. This is one of the reasons why\ntracking more challenging targets than the LF waveform (e.g., the ramp\nsignal) will not give as good an outcome. The compensation weight\nreconstruction method in Section~\\ref{CompensationSubSec} does not\nrely on linearity at all, and its performance can be improved by\nincreasing the sweep length.\n\nOne of the challenging secondary objectives is to design dummy loads\nof \\emph{reasonable physical size} for the source that would present a\nconstant resistive load over a wide range of frequencies. The dummy\nload shown in Fig.~\\ref{TractrixHornFig} (right panel) consists of a\ntractrix horn tightly filled with polyester fibre, and it has the\nproperty of not being resonant to an observable degree. Two\nparticularly inspiring examples on the construction of resistive\nacoustic loads are given in \\cite{Wolfe:AIS:2000} ($42 \\,\\mathrm{m}$\nof insulated steel pipe of inner diam. $7.8 \\, \\mathrm{mm}$) and\n\\cite{Wolfe:IPM:2006} ($97 \\,\\mathrm{m}$ of straight PVC pipe of inner\ndiam. $15 \\, \\mathrm{mm}$). The practical challenges in such\napproaches are considerable.\n\nWe conclude by discussing the numerical efficiency of the discretised\ndeconvolution proposed in Section~\\ref{ImpulseSubSec}. In order to\nobtain an $N \\log{N}$ algorithm, the $N \\times N$ matrices $R$ and $H$\nwere forced to be circulant. Another way to proceed is allowing $R$ to\nbe the usual tridiagonal, symmetric, second order difference matrix, \nand $H$ to be the upper triangular matrix obtained from the impulse\nresponse, both noncirculant Toeplitz matrices. Then the symmetric\nmatrix $\\gamma \\left (\\kappa I + R^{T}R \\right ) + H^{T}H$ in\nEq.~\\eqref{DiscretisedRegConv} is a slightly perturbed Toeplitz\nmatrix, and the required (approximate) solution of the linear system\ncan be carried out by Toeplitz-preconditioned Conjugate Gradients at\nsuperlinear convergence speed; see, e.g., \\cite{JM:PIT}. Again, an $N\n\\log{N}$ algorithm is obtained if the matrix\/vector products are\nimplemented by FFT.\n\n\\section{Conclusions}\n\nA sound source was proposed for acoustic measurements of vocal tract\nphysical models, produced by Fast Prototyping methods from Magnetic\nResonance Images. The source design requires only commonly available\ncomponents and instruments, and it can be scaled to different\nfrequency ranges. Heuristic and numerical methods were used to\nunderstand and to optimise the source design and performance. Two\nkinds of algorithms were proposed for compensating the source\nnonoptimality: (\\textrm{i}) an iterative process for producing uniform\namplitude sound pressure sweeps, and (\\textrm{ii}) a method based on\nregularised deconvolution for replicating target sound pressure\nwaveforms at the source output. The sound source together with the\ntwo compensation algorithms, written in MATLAB code, was deemed\nsuccessful based on measurements on the vocal tract geometry\ncorresponding to vowel [\\textipa{\\textscripta}] of a male speaker.\n\n\\section*{Acknowledgments}\n\n\n The authors wish to thank for consultation and facilities\n Dept. Signal Processing and Acoustics, Aalto University\n (Prof.~P.~Alku, Lab.~Eng.~I.~Huhtakallio, M.~Sc.~M.~Airaksinen) and\n Digital Design Laboratory, Aalto University (M.~Arch.~A.~Mohite).\n\n The authors have received financial support from Instrumentarium\n Science Foundation, Magnus Ehrnrooth Foundation, Niilo Helander\n Foundation, and Vilho, Yrj\\\"o and Kalle V\\\"ais\\\"al\\\"a Foundation.\n\n\n\n\n\\section*{References}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} diff --git a/data_all_eng_slimpj/shuffled/split2/finalzyld b/data_all_eng_slimpj/shuffled/split2/finalzyld new file mode 100644 index 0000000000000000000000000000000000000000..33e8553cdb5d2de4487336ff4a07f8b246288047 --- /dev/null +++ b/data_all_eng_slimpj/shuffled/split2/finalzyld @@ -0,0 +1,5 @@ +{"text":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Conclusions} \\label{SEC:Conclusion}\nIn this paper we have presented an approach that estimates frequency differences between the mixing oscillator in the HF transmitter, that moves the baseband signal to the carrier frequency, and the oscillator in the receiver that converts the radio signal back to baseband. It is based on tracking the pitch and its harmonics in the received baseband speech signal in an open range search method. Experiments on real data from \\gls{HF} transmissions show promising performance, both in terms of precision and computational complexity.\n\n\n\\balance\n\\renewcommand*{\\bibfont}{\\small}\n\\bibliographystyle{IEEEtran}\n\n\\section{Pitch tracking experiments} \\label{SEC:PitchEx}\n\\begin{figure}[b]\n\t\\centering\n\t\\input{images\/wizzard.tex}\n\t\\caption{Cumulative density function (CDF) of pitch estimation error per frame on PTDB-TUG database \\cite{PTDB-TUG2011}.}\n\t\\label{fig:wizzard}\n\\end{figure}\nTo evaluate the proposed approach w.r.t.\\ its capabilities of tracking the human pitch we used the PTDB-TUG database from \\cite{PTDB-TUG2011} and compared our approach to the YAAPT algorithm implementation \\cite{Zahorian08}. The results are given in Fig.~\\ref{fig:wizzard}. The Rake-PC pitch tracker achieves in more than 85\\% of all pitch containing speech segments a higher precision than the YAAPT algorithm. However, in the remaining 15\\% of the segments the performance stays way below YAAPT. This difference can be attributed to the sophisticated post-processing of YAAPT (multi pitch candidate selection process, non-linearities to restore missing pitches, application of temporal restrictions) which is missing in Rake-PC. If a Kalman filter is applied to the pitch trajectory estimated by Rake-PC, the performances difference can be compensated for to a great degree.\n\nFig.~\\ref{fig:wizzard} also shows the results of an oracle experiment, where it was allowed to multiply the pitch tracking results by a factor of $2$ or $0.5$ to compensate for mistakenly selecting a harmonic or subharmonic as the pitch frequency. Both algorithms benefit from the oracle, with Rake-PC achieving higher gains and even outperforming YAAPT. From this control experiment it can be concluded that the majority of the large errors in pitch estimation are caused by a wrong classification of pitch harmonics and sub-harmonics to be the pitch. This is a typical error to be handled by post-processing. But this misinterpretation has no impact on the task of carrier frequency difference estimation, because we are only interested in the sum of the PSD values at pitch and pitch harmonics. \n\n\n\\section{Experiments on HAM radio data} \\label{SEC:Experiments}\n\n\\begin{figure}[t]\t\t\n\t\\input{images\/cmp_FFT2.tex}\t\n\t\\caption{Difference between estimated and ground truth carrier frequency difference for three FFT sizes. Length of speech activity was ${\\geq}\\SI{10}{s}$. No crosstalkers present.} \t\\label{fig:cmp_FFT}\t\n\\end{figure}\n\n\n\\input{sections\/database}\n\n\\subsection{Carrier Frequency Difference Estimation}\nIn Fig.~\\ref{fig:cmp_FFT} the error between the estimated difference $\\widehat{f}_{D}$ and the ground truth difference is depicted. For this experiment the length of the speech segments was between $\\SI{10}{s}$ and $\\SI{27}{s}$. The system works reliable with errors below $\\pm \\SI{5}{Hz}$, which is an error that is not perceivable by humans \\cite{Clark2013}.\n\nFor shorter speech segments the error increases as can be seen in Fig.~\\ref{fig:ErrorClassAffiliation}, where we grouped the estimation errors in five classes. In that figure we compared our approach to the harmonic\/spectral envelope approach of \\cite{Ganapathy2013}, which we implemented on our own since no open source implementation was available.\nIt can be observed that the proposed approach achieves lower estimation errors, both on short and longer speech utterances.\n\n\\begin{figure}[t]\n\t\\centering\n\t\\resizebox{\\columnwidth}{!}{\n\t\t\\input{images\/ErrorClassAffiliationCMP2.tex}}\n\t\\caption{Dependency of error class affiliation on speech segment length. Implementation of Harmonic\/Spectral Envelop following \\cite{Ganapathy2013}. FFT size set to $4096$.}\n\t\\label{fig:ErrorClassAffiliation}\n\\end{figure}\n\n\\begin{figure}[b]\n\t\\centering\n\t\\input{images\/crosstalker.tex}\n\t\\caption{Accumulated logarithmic energy in case of two parallel speakers. Crosstalker is visible as secondary maximum and its (sub-)harmonics.}\n\t\\label{fig:multipleparallelspeakerpsdtrack}\t\n\\end{figure}\n\n\\subsection{Parallel speakers and harmonic errors}\nA small amount of our recordings include the special case of a concurrent speaker at a higher frequency. This is a challenging, however likely scenario to be encountered in practice.\nFig.~\\ref{fig:multipleparallelspeakerpsdtrack} depicts the accumulated logarithmic energy $\\widehat{\\Gamma}(f_D)$, Eq.~\\eqref{EQ:accumulatedLogEnergy}, versus candidate frequency difference $f_D$. Both, the speaker at $\\SI{100}{Hz}$ and the interfering crosstalker at $\\SI{1098}{Hz}$ are visible as maxima in the accumulated log-energy. Also two secondary maxima are visible next to the crosstalker which we attribute to harmonics\/subharmonics and a possible non-linearity in the transmission system. So extending the Rake-PC towards concurrent speaker tracking and identification seems to be possible, similar to multi-speaker tracking in diarization \\cite{Hogg2019} or localization tasks \\cite{Gerlach2014}.\n\n\n\n\\subsection{Processing time}\nIn Fig.~\\ref{fig:realtime} the real time factors of the proposed approach, our implementation of \\cite{Ganapathy2013} and the reference implementation of the YAAPT algorithm from \\cite{Zahorian08} are given. The implementation following Fig.~\\ref{Fig:rakesimplified} (''Rake-PC (Single Core)'') improves the real time factor significantly compared to the direct implementation (''Rake (Single Core)''). The overall processing time can be further reduced by a straight forward parallel implementation (''Rake-PC (Multi Core)''). For a \\gls{FFT} size of 2048 Rake-PC has a similar real time factor as \\cite{Ganapathy2013} and \\cite{Zahorian08}.\n\n\\begin{figure}[htb]\n\t\\centering\n\t\\input{images\/rtf.tex}\n\t\\caption{Real time factors for different approaches and \\gls{FFT} sizes, including single and multi core implementation for a block shift of \\SI{20}{ms}. (AMD Ryzen 5 3600, 6-Core, 32GB RAM, Matlab)}\n\t\\label{fig:realtime}\t\n\\end{figure}\n\n\n\n\\section{Deep Neural Network}\\label{SEC:DNN}\nThe proposed approach can be further improved by exchanging the two maximum operations of \\eqref{EQ:Max} and \\eqref{EQ:Argmax} by a \\gls{DNN} based detector directly working on the features $\\bm{\\Gamma}(t,\\bm{f}_{P},f_{S})$.\n\nFrom the \\gls{DNN} we expect that it learns the temporal dependencies ...\n\n\n\n\n\n\\section{Introduction}\nThe scenario at hand envisions a radio station listening on a fixed, pre-selected frequency, and seeking for \\gls{SSB} modulated \\gls{HF} signals. \nIf the receiver selects a different carrier frequency than the transmitter, the demodulated signal contains a frequency shifted version of the original speech signal originating from the carrier frequency difference \\cite{Suzuki1994}. This has a detrimental effect on the intelligibility of the transmitted speech signal \\cite{Baskent2007}. Fig.~\\ref{fig:spec} shows the spectrogram of a demodulated signal from a station operating at a carrier frequency difference of $\\SI{500}{Hz}$ compared to the transmitter. \n\nTo improve intelligibility, the carrier frequency difference should be estimated and the signal shifted in frequency to remove the difference. This contribution is concerned with the first task, the determination of the carrier frequency difference from the demodulated speech signal. The second task, the compensation, is rather straightforward and will not be considered here.\n\nA carrier frequency difference can be estimated by investigating the statistical properties of speech, e.g., the modulation symmetry \\cite{Clark2013} or the spectral envelope \\cite{Ganapathy2013}. \nThe contribution \\cite{Clark2013} utilizes a third-order modulation spectral analysis that, however, limits the analyzable spectrum to one-forth of its total width, and \\cite{Ganapathy2013} proposes a fundamental harmonic frequency detector that requires relatively long speech segments for reliable estimation in noisy conditions. \nTraining based frequency offset estimation has been proposed in \\cite{Xing2017}, where GMM-SVMs, i-Vectors and deep neural networks are employed. The main disadvantage here is the requirement of having a representative and large enough data set for training, as \\gls{HF} transmissions include a variety of distortions.\n\n\\begin{figure}[b]\n\t\\centering\n\t\\input{images\/spec.tex}\n\t\\caption{Spectrogram of a signal transmitted over HF and demodulated with \\SI{500}{Hz} carrier frequency difference. Marked in black are the carrier frequency difference (dashed) and the pitch traces including two harmonics.}\n\t\\label{fig:spec}\n\\end{figure}\n\nIn this paper we follow the idea of \\cite{Suzuki1994, Ganapathy2013, Xing2017}: By detecting the typical pitch structures in the spectrogram, a possible carrier frequency difference becomes apparent. \nFundamental frequency estimation, or pitch tracking, has been a research topic for years with applications in signal enhancement and speaker identification tasks. Various approaches are known from the literature, e.g., RAPT \\cite{Talkin2005ARA}, STRAIGHT \\cite{Kawahara02}, YIN \\cite{YIN02} and YAAPT \\cite{Zahorian08}.\nSince most approaches are based on correlation techniques, be it in the time \\cite{YIN02} or frequency domain \\cite{Kawahara02} or even in both domains \\cite{Zahorian08}, comparative studies show only small differences between the algorithms in terms of precision \\cite{8081482} as they all depend on similar features. Detecting candidates for periodic signals within the physical range of the vocal cord's oscillation frequencies is usually the first step, which is followed by a post-processing for candidate refinement and subsequent smoothing \\cite{Talkin2005ARA, Zahorian08}. Besides time and frequency domain, also cepstral domain estimators have been proposed \\cite{Gerkman2010}. \nClearly, all methods suffer from low \\gls{SNR} ratios \\cite{8081482} and robustness to distortions is an important aspect. Here, new approaches based on \\glspl{DNN} reported promising results \\cite{Han2014}.\n\nHowever, pitch tracking with the purpose of frequency difference estimation poses different constraints compared to the pure pitch tracking task, since in our scenario the pitch and its harmonics are shifted by an arbitrary frequency, requiring an open range search for all possible shifts. \n\nThe contributions of this paper are two-fold. First, we introduce and discuss our new approach to carrier frequency difference estimation named ''Rake''. It is based on accumulated log-energy values and \nenables the classification on significantly shorter segments of speech compared to existing approaches, e.g., \\cite{Ganapathy2013}. Second, an efficient implementation in the power cepstrum domain is proposed to reduce the computational demands of the approach. Finally, in the experiments we evaluate the proposed algorithm on real \\gls{SSB} \\gls{HF} recordings and also compare it to a state-of-the-art pitch tracking algorithm and a frequency difference estimation algorithm.\n\nThe paper is organized as follows: In Sec.~\\ref{SEC:Rake} our features for carrier frequency difference estimation are derived, followed by Sec.~\\ref{SEC:Implementation} where we discuss details of the implementation in the power cepstrum domain. In Sec.~\\ref{SEC:PitchEx} and Sec.~\\ref{SEC:Experiments} the experimental results are discussed. The paper ends by drawing some conclusions in Sec.~\\ref{SEC:Conclusion}.\n\n\n\\section{Implementation} \\label{SEC:Implementation}\nThe computationally expensive evaluation of the terms in \\eqref{EQ:GammaSum} can be interpreted as a correlation of the logarithmic \\gls{PSD} values $\\log\\{|X(t,f)|^2\\}$ with a filter function\n\\begin{align} \\label{EQ:FilterBank}\n\th(f,f_P) = \\sum_{\\tau=1}^{\\tau_{\\text{max}}} \\sum_{\\nu=-W}^{W} \\omega(\\tau,\\nu) \\cdot \\gamma(f-\\tau \\cdot f_P(t)-\\nu),\n\\end{align}\nalong the frequency axis, where $\\gamma(.)$ denotes the unit impulse. This interpretation is similar to the harmonic sieves proposed in \\cite{Gerlach2014}. The correlation, which has to be carried out for all $f_P(t) \\in [f_{P,\\min}, f_{P,\\max}]$, can be efficiently computed by applying a \\gls{FFT}, i.e., by moving to the power cepstral domain, and using the Overlap-Save-Method. \n\nFig.~\\ref{Fig:rakesimplified} shows a block diagram of the overall algorithm. This implementation is denoted as ''Rake-PC'' (Rake Power Cepstrum) in the following.\n\\begin{figure}[htb]\n\t\\centering \\footnotesize\n\t\\def.95\\columnwidth{.95\\columnwidth}\n\n\t\\import{images\/}{rake2.pdf_tex}\n\t\\caption{Rake-PC: Block diagram showing power cepstral domain correlation, carrier frequency difference and pitch trace estimation.}\n\t\\label{Fig:rakesimplified}\n\\end{figure}\nThe upper part of the figure illustrates the realization of the correlation of the log-PSD with the set of filters, where each filter correponds to an assumed value of $f_P$, in the PSD domain. The resulting $\\Gamma(t,f_{P}(t),f_D)$ depends on time frame $t$, the assumed pitch frequency $f_P(t)$, and the carrier frequency shift $f_D$. Next, the optimal value of the pitch is determined according to Eq.~\\eqref{EQ:Max}, followed by a summation along the time axis, Eq.~\\eqref{EQ:accumulatedLogEnergy}. The resolution of the resulting $\\widehat{\\Gamma}(f_D)$ is limited by the \\gls{STFT} size, as the shift is given by its bin index. This can be overcome by interpolating the accumulated log-energy terms, e.g., by spline interpolation. The subsequent argmax operation from \\eqref{EQ:Argmax} yields the final estimate $\\hat{f}_D$, whose resolution is no longer limited by the FFT size. \n\nNote, that the first maximum operation as stated in \\eqref{EQ:Max}, is carried out independently for each time frame $t$, a clear shortcoming of the method, as it does not account for the inertia of the vocal cords, that results in smooth pitch trajectories. Introducing a-priori knowledge to account for the lowpass characteristics of the pitch trajectory, e.g., by using a simple first order Markov chain as proposed in \\cite{Gerkman2010}, would improve the pitch tracking precision, however, at the cost of a significantly increased computational complexity.\n\nThe values of $f_D$ are quantized by the FFT resolution and the maximum\nsearch from \\eqref{EQ:Argmax} is restricted to the frequency bins that belong to the frequency range \\SI{0}{Hz} to \\SI{3500}{Hz} for a signal sampled at $\\SI{8}{kHz}$.\nThe upper limit is motivated by the fact that a speech signal requires approximately a $\\SI{500}{Hz}$ bandwidth to be intelligible and that the regarded harmonics have to fit in the considered frequency range. Signals with negative offsets $f_D$ are not considered because they are characterized by a significant signal loss of the lower frequencies and remain unintelligible without signal reconstruction approaches.\n\n\n\nThe availability of pitch trace estimates $f'_P(t,f_D)$, $t=0,\\ldots , T-1$, offers the opportunity to discard maxima in $\\widehat{\\Gamma}(f_D)$ that are caused by narrow-band digital transmissions instead of speech. As human speech is characterized by a time-variant pitch, while digital transmissions operate with a fixed frequency, the two can be discerned by the variance of $f'_P(t,f_D)$. If it falls below a threshold, the detected signal is probably not speech and consequently discarded. \n\n\n\\section{Rake Approach} \\label{SEC:Rake}\nWe are given a demodulated \\gls{SSB} \\gls{HF} signal, of which we assume that it has already been pre-processed by a speech activity detection unit, e.g., by the DNN-based approach from \\cite{Heitkaemper2020}, such that only segments with active speakers are regarded in the following.\nNote, that these segments consist of voiced and unvoiced speech, as well as short pauses. In the spectral domain the pitch and its harmonics are clearly visible in a spectrogram, if the \\gls{STOI} value \\cite{Taal2011STOI} is above $0.5$. However, many \\gls{SSB} transmissions have much worse \\gls{STOI} values and the pitch contours are occluded by noise or even completely erased, requiring noise robust algorithms and approaches (visit \\cite{Heitkaemper2020a} for example signals).\n\nIn the following we propose to estimate the carrier frequency difference by a method that is based on locating the pitch and its harmonics in the noisy speech spectrogram. To this end it uses a filterbank with adjustable and time-varying center frequencies, that correspond to the fundamental frequency and its harmonics. One can view the filtering operation as a rake that is pulled in time direction through the logarithmic \\gls{PSD} values $\\log\\{|X(t,f)|^2\\}$ of the signal's \\gls{STFT} $X(t,f)$, where $t$ denotes the frame index and $f$ the frequency bin index to collect the energy at the pitch frequency and its harmonics. The relevant frequency bin indices at the $t$-th frame for a hypothetical carrier frequency difference $f_D$, pitch $f_P(t)$ and the corresponding pitch harmonics are given by $f_D + \\tau \\cdot f_{P}(t)$, where $(\\tau \\in [1, \\tau_{\\text{max}}])$. To account for the limited frequency resolution of the \\gls{STFT} analysis, not only the frequency bin itself but also a small range around it is considered by introducing the frequency deviation parameter $\\nu$. So, the logarithmic \\gls{PSD} values of the pitch including the harmonics are given by \n\\begin{align} \\label{EQ:PSDTerms}\n&\\Psi_{\\nu}^\\tau(t,{f}_{P}(t),f_D) = \\log\\{|X(t,f_D + \\tau \\cdot f_{P}(t)+\\nu)|^2\\}.\n\\end{align}\nThese values are weighted by factors $\\omega(\\tau,\\nu)$, which depend on the harmonic index $\\tau$ and the distance $\\nu$ to the filter center, and are summed by\n\\begin{align} \\label{EQ:GammaSum}\n& \\Gamma(t,f_{P}(t),f_D) = \\sum_{\\tau=1}^{\\tau_{\\text{max}}} \\sum_{\\nu=-W}^{+W} \\omega(\\tau,\\nu) \\cdot \\Psi_{\\nu}^\\tau(t,f_{P}(t),f_D).\n\\end{align}\n\nFor each frequency difference $f_D$ a different sequence of pitch values $\\bm{f}_P = [f_{P}(0),\\ldots f_{P}(T-1)]$ is optimal in the sense that the summation of $\\Gamma(t,f_{P}(t),f_D)$ along $t$ reaches a maximum. Stated differently, the maximization of \\eqref{EQ:GammaSum} will yield an estimate of $f_D$. This is achieved by the following three steps. First, for each time instance $t$, the maximum across the possible pitch hypotheses $f_{P}(t) \\in [f_{P,\\min},f_{P,\\max}]$ is computed:\n\\begin{align} \\label{EQ:Max}\nf'_{P}(t, f_D) = \\underset{f_{P}(t)}{\\argmax} \\left\\{ \\Gamma(t,f_{P}(t),f_D)\\right\\} \\\\ \n\\Gamma'(t,f_D) = \\Gamma(t,f'_{P}(t, f_D),f_D).\n\\end{align}\n$\\Gamma'(t,f_D)$ is the maximum log-energy value for a given demodulation shift hypotheses $f_D$, and the corresponding pitch hypothesis is $f'_{P}(t, f_D)$. Here, $f_{P,\\min}$ and $f_{P,\\max}$ denote the frequency bins corresponding to the assumed minimum (\\SI{50}{Hz}) and maximum (\\SI{400}{Hz}) pitch frequencies.\n\nNext, a summation over time results in the accumulated log-energy per carrier frequency difference hypothesis $f_D$: \n\\begin{align} \\label{EQ:accumulatedLogEnergy}\n& \\widehat{\\Gamma}(f_D) = \\sum_{t=0}^{T-1} \\Gamma'\\left(t,f_D \\right).\n\\end{align}\nAssuming a single speaker scenario, the maximum of $\\widehat{\\Gamma}(f_D)$ is selected as the most likely hypotheses for the demodulation shift $\\widehat{f}_{D}$ with \n\\begin{align} \\label{EQ:Argmax}\n\\widehat{f}_{D} = \\underset{f_{D}\\in \\Omega_{f_D}}{\\argmax} \\left\\{ \\widehat{\\Gamma}(f_D)\\right\\} \n\\end{align}\nwith $\\Omega_{f_D}$ denoting the set of candidate frequency differences,\nand the corresponding pitch hypotheses sequence is given by\n\\begin{align}\n\\label{EQ:PitchHyp}\n\\widehat{\\bm{f}}_{P} = [f'_{P}(0, \\widehat{f}_{D}),\\ldots, f'_{P}(T-1, \\widehat{f}_{D})].\n\\end{align}\nAs reported in several publications, e.g., in \\cite{Zahorian08}, and also observed in our own experimental recordings, some audio segments have only a very weak or even no pitch at all, although the harmonics are clearly observable. To take account of this observation the weight of the pitch $\\omega(0,\\nu)$ is only half of $\\omega(1,\\nu)$, i.e., the weight of the first harmonic. Furthermore, all other harmonics are weighted with $\\omega(\\tau,\\nu) \\propto \\frac{1}{\\tau}$, whereby all filters are designed in a triangular shape.\n\nThe summation in \\eqref{EQ:GammaSum} gives a similar pitch detection feature as the \\gls{SHC} used in the YAAPT algorithm \\cite{Zahorian08}, whereby \\eqref{EQ:GammaSum} is defined as a sum of logarithmic \\gls{PSD} values and \\gls{SHC} is a sum over a product of magnitude spectral values. The log-spectral domain formulation causes a dynamic range reduction and improves the numerical stability. \n\nSince \\eqref{EQ:GammaSum} extends the pitch tracking problem towards an open range search by introducing the unknown parameter $f_D$, the computational complexity of the problem is increased by a factor proportional to the size $|\\Omega_{f_D}|$ of the set of candidate values for the frequency difference. Hence, reducing the computational complexity becomes an important task which in our case is handled by interpreting \\eqref{EQ:GammaSum} in the cepstral domain as shown in the next section.\n\n\n\\subsection{Ham Radio Data}\\label{SEC:dataset}\nWe have set up a transmission system between our amateur radio station in Paderborn and several other distant ham radio stations across Europe, transmitting utterances from the LibriSpeech corpus \\cite{libri20}. Kiwi-\\gls{SDR} devices \\cite{kiwi20} at the distant stations were utilized to demodulate the received \\gls{SSB} \\gls{HF} signals and send the recorded audio signals back to our servers via a websocket connection. Audio markers had been added to the signal to allow for an automated time alignment between the transmitted and received signals, easing the annotation and segmentation of the data \\cite{Heitkaemper2020a}.\n\nFor the transmissions a beacon, callsign DB0UPB, was used, which was supervised by a human to avoid interference with other ham radio stations. The \\gls{HF} signals are \\gls{SSB} modulated using the \\gls{LSB} with a bandwidth of $\\SI{2.7}{kHz}$ at carrier frequencies of $\\SI{7.06}{MHz}-\\SI{7.063}{MHz}$ and $\\SI{3.6}{MHz} -\\SI{3.62}{MHz}$. To simulate a carrier frequency difference the demodulation frequency of the transmitter and the receiver were selected to differ by values from the set $f_D = [0,100,300,500,1000]$. \nAlthough the original speech samples have a sampling rate of \\SI{16}{kHz}, and the Kiwi-\\gls{SDR} samples the data at \\SI{12.001}{Hz}, the finally emitted data is band-limited to $\\SI{2.7}{kHz}$ (\\gls{ITU} regulations) which introduces a loss of the upper frequencies in case of \\gls{LSB} transmission depending on the carrier frequency difference $f_D$. \nThe data set has a total size of 23:31 hours of which 3:28 hours contain speech activity.\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\nThe journal \\textit{Monthly Notices of the Royal Astronomical Society} (MNRAS) encourages authors to prepare their papers using \\LaTeX.\nThe style file \\verb'mnras.cls' can be used to approximate the final appearance of the journal, and provides numerous features to simplify the preparation of papers.\nThis document, \\verb'mnras_guide.tex', provides guidance on using that style file and the features it enables.\n\nThis is not a general guide on how to use \\LaTeX, of which many excellent examples already exist.\nWe particularly recommend \\textit{Wikibooks \\LaTeX}\\footnote{\\url{https:\/\/en.wikibooks.org\/wiki\/LaTeX}}, a collaborative online textbook which is of use to both beginners and experts.\nAlternatively there are several other online resources, and most academic libraries also hold suitable beginner's guides.\n\nFor guidance on the contents of papers, journal style, and how to submit a paper, see the MNRAS Instructions to Authors\\footnote{\\label{foot:itas}\\url{http:\/\/www.oxfordjournals.org\/our_journals\/mnras\/for_authors\/}}.\nOnly technical issues with the \\LaTeX\\ class are considered here.\n\n\n\\section{Obtaining and installing the MNRAS package}\nSome \\LaTeX\\ distributions come with the MNRAS package by default.\nIf yours does not, you can either install it using your distribution's package manager, or download it from the Comprehensive \\TeX\\ Archive Network\\footnote{\\url{http:\/\/www.ctan.org\/tex-archive\/macros\/latex\/contrib\/mnras}} (CTAN).\n\nThe files can either be installed permanently by placing them in the appropriate directory (consult the documentation for your \\LaTeX\\ distribution), or used temporarily by placing them in the working directory for your paper.\n\nTo use the MNRAS package, simply specify \\verb'mnras' as the document class at the start of a \\verb'.tex' file:\n\n\\begin{verbatim}\n\\documentclass{mnras}\n\\end{verbatim}\nThen compile \\LaTeX\\ (and if necessary \\bibtex) in the usual way.\n\n\\section{Preparing and submitting a paper}\nWe recommend that you start with a copy of the \\texttt{mnras\\_template.tex} file.\nRename the file, update the information on the title page, and then work on the text of your paper.\nGuidelines for content, style etc. are given in the instructions to authors on the journal's website$^{\\ref{foot:itas}}$.\nNote that this document does not follow all the aspects of MNRAS journal style (e.g. it has a table of contents).\n\nIf a paper is accepted, it is professionally typeset and copyedited by the publishers.\nIt is therefore likely that minor changes to presentation will occur.\nFor this reason, we ask authors to ignore minor details such as slightly long lines, extra blank spaces, or misplaced figures, because these details will be dealt with during the production process.\n\nPapers must be submitted electronically via the online submission system; paper submissions are not permitted.\nFor full guidance on how to submit a paper, see the instructions to authors.\n\n\\section{Class options}\n\\label{sec:options}\nThere are several options which can be added to the document class line like this:\n\n\\begin{verbatim}\n\\documentclass[option1,option2]{mnras}\n\\end{verbatim}\nThe available options are:\n\\begin{itemize}\n\\item \\verb'letters' -- used for papers in the journal's Letters section.\n\\item \\verb'onecolumn' -- single column, instead of the default two columns. This should be used {\\it only} if necessary for the display of numerous very long equations.\n\\item \\verb'doublespacing' -- text has double line spacing. Please don't submit papers in this format.\n\\item \\verb'referee' -- \\textit{(deprecated)} single column, double spaced, larger text, bigger margins. Please don't submit papers in this format.\n\\item \\verb'galley' -- \\textit{(deprecated)} no running headers, no attempt to align the bottom of columns.\n\\item \\verb'landscape' -- \\textit{(deprecated)} sets the whole document on landscape paper.\n\\item \\verb\"usenatbib\" -- \\textit{(all papers should use this)} this uses Patrick Daly's \\verb\"natbib.sty\" package for citations.\n\\item \\verb\"usegraphicx\" -- \\textit{(most papers will need this)} includes the \\verb'graphicx' package, for inclusion of figures and images.\n\\item \\verb'useAMS' -- adds support for upright Greek characters \\verb'\\upi', \\verb'\\umu' and \\verb'\\upartial' ($\\upi$, $\\umu$ and $\\upartial$). Only these three are included, if you require other symbols you will need to include the \\verb'amsmath' or \\verb'amsymb' packages (see section~\\ref{sec:packages}).\n\\item \\verb\"usedcolumn\" -- includes the package \\verb\"dcolumn\", which includes two new types of column alignment for use in tables.\n\\end{itemize}\n\nSome of these options are deprecated and retained for backwards compatibility only.\nOthers are used in almost all papers, but again are retained as options to ensure that papers written decades ago will continue to compile without problems.\nIf you want to include any other packages, see section~\\ref{sec:packages}.\n\n\\section{Title page}\n\nIf you are using \\texttt{mnras\\_template.tex} the necessary code for generating the title page, headers and footers is already present.\nSimply edit the title, author list, institutions, abstract and keywords as described below.\n\n\\subsection{Title}\nThere are two forms of the title: the full version used on the first page, and a short version which is used in the header of other odd-numbered pages (the `running head').\nEnter them with \\verb'\\title[]{}' like this:\n\\begin{verbatim}\n\\title[Running head]{Full title of the paper}\n\\end{verbatim}\nThe full title can be multiple lines (use \\verb'\\\\' to start a new line) and may be as long as necessary, although we encourage authors to use concise titles. The running head must be $\\le~45$ characters on a single line.\n\nSee appendix~\\ref{sec:advanced} for more complicated examples.\n\n\\subsection{Authors and institutions}\n\nLike the title, there are two forms of author list: the full version which appears on the title page, and a short form which appears in the header of the even-numbered pages. Enter them using the \\verb'\\author[]{}' command.\n\nIf the author list is more than one line long, start a new line using \\verb'\\newauthor'. Use \\verb'\\\\' to start the institution list. Affiliations for each author should be indicated with a superscript number, and correspond to the list of institutions below the author list.\n\nFor example, if I were to write a paper with two coauthors at another institution, one of whom also works at a third location:\n\\begin{verbatim}\n\\author[K. T. Smith et al.]{\nKeith T. Smith,$^{1}$\nA. N. Other,$^{2}$\nand Third Author$^{2,3}$\n\\\\\n$^{1}$Affiliation 1\\\\\n$^{2}$Affiliation 2\\\\\n$^{3}$Affiliation 3}\n\\end{verbatim}\nAffiliations should be in the format `Department, Institution, Street Address, City and Postal Code, Country'.\n\nEmail addresses can be inserted with the \\verb'\\thanks{}' command which adds a title page footnote.\nIf you want to list more than one email, put them all in the same \\verb'\\thanks' and use \\verb'\\footnotemark[]' to refer to the same footnote multiple times.\nPresent addresses (if different to those where the work was performed) can also be added with a \\verb'\\thanks' command.\n\n\\subsection{Abstract and keywords}\n\nThe abstract is entered in an \\verb'abstract' environment:\n\\begin{verbatim}\n\\begin{abstract}\nThe abstract of the paper.\n\\end{abstract}\n\\end{verbatim}\n\\noindent Note that there is a word limit on the length of abstracts.\nFor the current word limit, see the journal instructions to authors$^{\\ref{foot:itas}}$.\n\nImmediately following the abstract, a set of keywords is entered in a \\verb'keywords' environment:\n\\begin{verbatim}\n\\begin{keywords}\nkeyword 1 -- keyword 2 -- keyword 3\n\\end{keywords}\n\\end{verbatim}\n\\noindent There is a list of permitted keywords, which is agreed between all the major astronomy journals and revised every few years.\nDo \\emph{not} make up new keywords!\nFor the current list of allowed keywords, see the journal's instructions to authors$^{\\ref{foot:itas}}$.\n\n\\section{Sections and lists}\n\nSections and lists are generally the same as in the standard \\LaTeX\\ classes.\n\n\\subsection{Sections}\n\\label{sec:sections}\nSections are entered in the usual way, using \\verb'\\section{}' and its variants. It is possible to nest up to four section levels:\n\\begin{verbatim}\n\\section{Main section}\n \\subsection{Subsection}\n \\subsubsection{Subsubsection}\n \\paragraph{Lowest level section}\n\\end{verbatim}\n\\noindent The other \\LaTeX\\ sectioning commands \\verb'\\part', \\verb'\\chapter' and \\verb'\\subparagraph{}' are deprecated and should not be used.\n\nSome sections are not numbered as part of journal style (e.g. the Acknowledgements).\nTo insert an unnumbered section use the `starred' version of the command: \\verb'\\section*{}'.\n\nSee appendix~\\ref{sec:advanced} for more complicated examples.\n\n\\subsection{Lists}\n\nTwo forms of lists can be used in MNRAS -- numbered and unnumbered.\n\nFor a numbered list, use the \\verb'enumerate' environment:\n\\begin{verbatim}\n\\begin{enumerate}\n \\item First item\n \\item Second item\n \\item etc.\n\\end{enumerate}\n\\end{verbatim}\n\\noindent which produces\n\\begin{enumerate}\n \\item First item\n \\item Second item\n \\item etc.\n\\end{enumerate}\nNote that the list uses lowercase Roman numerals, rather than the \\LaTeX\\ default Arabic numerals.\n\nFor an unnumbered list, use the \\verb'description' environment without the optional argument:\n\\begin{verbatim}\n\\begin{description}\n \\item First item\n \\item Second item\n \\item etc.\n\\end{description}\n\\end{verbatim}\n\\noindent which produces\n\\begin{description}\n \\item First item\n \\item Second item\n \\item etc.\n\\end{description}\n\nBulleted lists using the \\verb'itemize' environment should not be used in MNRAS; it is retained for backwards compatibility only.\n\n\\section{Mathematics and symbols}\n\nThe MNRAS class mostly adopts standard \\LaTeX\\ handling of mathematics, which is briefly summarised here.\nSee also section~\\ref{sec:packages} for packages that support more advanced mathematics.\n\nMathematics can be inserted into the running text using the syntax \\verb'$1+1=2$', which produces $1+1=2$.\nUse this only for short expressions or when referring to mathematical quantities; equations should be entered as described below.\n\n\\subsection{Equations}\nEquations should be entered using the \\verb'equation' environment, which automatically numbers them:\n\n\\begin{verbatim}\n\\begin{equation}\n a^2=b^2+c^2\n\\end{equation}\n\\end{verbatim}\n\\noindent which produces\n\\begin{equation}\n a^2=b^2+c^2\n\\end{equation}\n\nBy default, the equations are numbered sequentially throughout the whole paper. If a paper has a large number of equations, it may be better to number them by section (2.1, 2.2 etc.). To do this, add the command \\verb'\\numberwithin{equation}{section}' to the preamble.\n\nIt is also possible to produce un-numbered equations by using the \\LaTeX\\ built-in \\verb'\\['\\textellipsis\\verb'\\]' and \\verb'$$'\\textellipsis\\verb'$$' commands; however MNRAS requires that all equations are numbered, so these commands should be avoided.\n\n\\subsection{Special symbols}\n\n\n\\begin{table}\n \\caption{Additional commands for special symbols commonly used in astronomy. These can be used anywhere.}\n \\label{tab:anysymbols}\n \\begin{tabular}{lll}\n \\hline\n Command & Output & Meaning\\\\\n \\hline\n \\verb'\\sun' & \\sun & Sun, solar\\\\[2pt]\n \\verb'\\earth' & \\earth & Earth, terrestrial\\\\[2pt]\n \\verb'\\micron' & \\micron & microns\\\\[2pt]\n \\verb'\\degr' & \\degr & degrees\\\\[2pt]\n \\verb'\\arcmin' & \\arcmin & arcminutes\\\\[2pt]\n \\verb'\\arcsec' & \\arcsec & arcseconds\\\\[2pt]\n \\verb'\\fdg' & \\fdg & fraction of a degree\\\\[2pt]\n \\verb'\\farcm' & \\farcm & fraction of an arcminute\\\\[2pt]\n \\verb'\\farcs' & \\farcs & fraction of an arcsecond\\\\[2pt]\n \\verb'\\fd' & \\fd & fraction of a day\\\\[2pt]\n \\verb'\\fh' & \\fh & fraction of an hour\\\\[2pt]\n \\verb'\\fm' & \\fm & fraction of a minute\\\\[2pt]\n \\verb'\\fs' & \\fs & fraction of a second\\\\[2pt]\n \\verb'\\fp' & \\fp & fraction of a period\\\\[2pt]\n \\verb'\\diameter' & \\diameter & diameter\\\\[2pt]\n \\verb'\\sq' & \\sq & square, Q.E.D.\\\\[2pt]\n \\hline\n \\end{tabular}\n\\end{table}\n\n\\begin{table}\n \\caption{Additional commands for mathematical symbols. These can only be used in maths mode.}\n \\label{tab:mathssymbols}\n \\begin{tabular}{lll}\n \\hline\n Command & Output & Meaning\\\\\n \\hline\n \\verb'\\upi' & $\\upi$ & upright pi\\\\[2pt]\n \\verb'\\umu' & $\\umu$ & upright mu\\\\[2pt]\n \\verb'\\upartial' & $\\upartial$ & upright partial derivative\\\\[2pt]\n \\verb'\\lid' & $\\lid$ & less than or equal to\\\\[2pt]\n \\verb'\\gid' & $\\gid$ & greater than or equal to\\\\[2pt]\n \\verb'\\la' & $\\la$ & less than of order\\\\[2pt]\n \\verb'\\ga' & $\\ga$ & greater than of order\\\\[2pt]\n \\verb'\\loa' & $\\loa$ & less than approximately\\\\[2pt]\n \\verb'\\goa' & $\\goa$ & greater than approximately\\\\[2pt]\n \\verb'\\cor' & $\\cor$ & corresponds to\\\\[2pt]\n \\verb'\\sol' & $\\sol$ & similar to or less than\\\\[2pt]\n \\verb'\\sog' & $\\sog$ & similar to or greater than\\\\[2pt]\n \\verb'\\lse' & $\\lse$ & less than or homotopic to \\\\[2pt]\n \\verb'\\gse' & $\\gse$ & greater than or homotopic to\\\\[2pt]\n \\verb'\\getsto' & $\\getsto$ & from over to\\\\[2pt]\n \\verb'\\grole' & $\\grole$ & greater over less\\\\[2pt]\n \\verb'\\leogr' & $\\leogr$ & less over greater\\\\\n \\hline\n \\end{tabular}\n\\end{table}\n\nSome additional symbols of common use in astronomy have been added in the MNRAS class. These are shown in tables~\\ref{tab:anysymbols}--\\ref{tab:mathssymbols}. The command names are -- as far as possible -- the same as those used in other major astronomy journals.\n\nMany other mathematical symbols are also available, either built into \\LaTeX\\ or via additional packages. If you want to insert a specific symbol but don't know the \\LaTeX\\ command, we recommend using the Detexify website\\footnote{\\url{http:\/\/detexify.kirelabs.org}}.\n\nSometimes font or coding limitations mean a symbol may not get smaller when used in sub- or superscripts, and will therefore be displayed at the wrong size. There is no need to worry about this as it will be corrected by the typesetter during production.\n\nTo produce bold symbols in mathematics, use \\verb'\\bmath' for simple variables, and the \\verb'bm' package for more complex symbols (see section~\\ref{sec:packages}). Vectors are set in bold italic, using \\verb'\\mathbfit{}'.\n\nFor matrices, use \\verb'\\mathbfss{}' to produce a bold sans-serif font e.g. \\mathbfss{H}; this works even outside maths mode, but not all symbols are available (e.g. Greek). For $\\nabla$ (del, used in gradients, divergence etc.) use \\verb'$\\nabla$'.\n\n\\subsection{Ions}\n\nA new \\verb'\\ion{}{}' command has been added to the class file, for the correct typesetting of ionisation states.\nFor example, to typeset singly ionised calcium use \\verb'\\ion{Ca}{ii}', which produces \\ion{Ca}{ii}.\n\n\\section{Figures and tables}\n\\label{sec:fig_table}\nFigures and tables (collectively called `floats') are mostly the same as built into \\LaTeX.\n\n\\subsection{Basic examples}\n\\begin{figure}\n \\includegraphics[width=\\columnwidth]{example}\n \\caption{An example figure.}\n \\label{fig:example}\n\\end{figure}\nFigures are inserted in the usual way using a \\verb'figure' environment and \\verb'\\includegraphics'. The example Figure~\\ref{fig:example} was generated using the code:\n\\begin{verbatim}\n\\begin{figure}\n \\includegraphics[width=\\columnwidth]{example}\n \\caption{An example figure.}\n \\label{fig:example}\n\\end{figure}\n\\end{verbatim}\n\n\\begin{table}\n \\caption{An example table.}\n \\label{tab:example}\n \\begin{tabular}{lcc}\n \\hline\n Star & Mass & Luminosity\\\\\n & $M_{\\sun}$ & $L_{\\sun}$\\\\\n \\hline\n Sun & 1.00 & 1.00\\\\\n $\\alpha$~Cen~A & 1.10 & 1.52\\\\\n $\\epsilon$~Eri & 0.82 & 0.34\\\\\n \\hline\n \\end{tabular}\n\\end{table}\nThe example Table~\\ref{tab:example} was generated using the code:\n\\begin{verbatim}\n\\begin{table}\n \\caption{An example table.}\n \\label{tab:example}\n \\begin{tabular}{lcc}\n \\hline\n Star & Mass & Luminosity\\\\\n & $M_{\\sun}$ & $L_{\\sun}$\\\\\n \\hline\n Sun & 1.00 & 1.00\\\\\n $\\alpha$~Cen~A & 1.10 & 1.52\\\\\n $\\epsilon$~Eri & 0.82 & 0.34\\\\\n \\hline\n \\end{tabular}\n\\end{table}\n\\end{verbatim}\n\n\\subsection{Captions and placement}\nCaptions go \\emph{above} tables but \\emph{below} figures, as in the examples above.\n\nThe \\LaTeX\\ float placement commands \\verb'[htbp]' are intentionally disabled.\nLayout of figures and tables will be adjusted by the publisher during the production process, so authors should not concern themselves with placement to avoid disappointment and wasted effort.\nSimply place the \\LaTeX\\ code close to where the figure or table is first mentioned in the text and leave exact placement to the publishers.\n\nBy default a figure or table will occupy one column of the page.\nTo produce a wider version which covers both columns, use the \\verb'figure*' or \\verb'table*' environment.\n\nIf a figure or table is too long to fit on a single page it can be split it into several parts.\nCreate an additional figure or table which uses \\verb'\\contcaption{}' instead of \\verb'\\caption{}'.\nThis will automatically correct the numbering and add `\\emph{continued}' at the start of the caption.\n\\begin{table}\n \\contcaption{A table continued from the previous one.}\n \\label{tab:continued}\n \\begin{tabular}{lcc}\n \\hline\n Star & Mass & Luminosity\\\\\n & $M_{\\sun}$ & $L_{\\sun}$\\\\\n \\hline\n $\\tau$~Cet & 0.78 & 0.52\\\\\n $\\delta$~Pav & 0.99 & 1.22\\\\\n $\\sigma$~Dra & 0.87 & 0.43\\\\\n \\hline\n \\end{tabular}\n\\end{table}\nTable~\\ref{tab:continued} was generated using the code:\n\n\\begin{verbatim}\n\\begin{table}\n \\contcaption{A table continued from the previous one.}\n \\label{tab:continued}\n \\begin{tabular}{lcc}\n \\hline\n Star & Mass & Luminosity\\\\\n & $M_{\\sun}$ & $L_{\\sun}$\\\\\n \\hline\n $\\tau$~Cet & 0.78 & 0.52\\\\\n $\\delta$~Pav & 0.99 & 1.22\\\\\n $\\sigma$~Dra & 0.87 & 0.43\\\\\n \\hline\n \\end{tabular}\n\\end{table}\n\\end{verbatim}\n\nTo produce a landscape figure or table, use the \\verb'pdflscape' package and the \\verb'landscape' environment.\nThe landscape Table~\\ref{tab:landscape} was produced using the code:\n\\begin{verbatim}\n\\begin{landscape}\n \\begin{table}\n \\caption{An example landscape table.}\n \\label{tab:landscape}\n \\begin{tabular}{cccccccccc}\n \\hline\n Header & Header & ...\\\\\n Unit & Unit & ...\\\\\n \\hline\n Data & Data & ...\\\\\n Data & Data & ...\\\\\n ...\\\\\n \\hline\n \\end{tabular}\n \\end{table}\n\\end{landscape}\n\\end{verbatim}\nUnfortunately this method will force a page break before the table appears.\nMore complicated solutions are possible, but authors shouldn't worry about this.\n\n\\begin{landscape}\n \\begin{table}\n \\caption{An example landscape table.}\n \\label{tab:landscape}\n \\begin{tabular}{cccccccccc}\n \\hline\n Header & Header & Header & Header & Header & Header & Header & Header & Header & Header\\\\\n Unit & Unit & Unit & Unit & Unit & Unit & Unit & Unit & Unit & Unit \\\\\n \\hline\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n Data & Data & Data & Data & Data & Data & Data & Data & Data & Data\\\\\n \\hline\n \\end{tabular}\n \\end{table}\n\\end{landscape}\n\n\\section{References and citations}\n\n\\subsection{Cross-referencing}\n\nThe usual \\LaTeX\\ commands \\verb'\\label{}' and \\verb'\\ref{}' can be used for cross-referencing within the same paper.\nWe recommend that you use these whenever relevant, rather than writing out the section or figure numbers explicitly.\nThis ensures that cross-references are updated whenever the numbering changes (e.g. during revision) and provides clickable links (if available in your compiler).\n\nIt is best to give each section, figure and table a logical label.\nFor example, Table~\\ref{tab:mathssymbols} has the label \\verb'tab:mathssymbols', whilst section~\\ref{sec:packages} has the label \\verb'sec:packages'.\nAdd the label \\emph{after} the section or caption command, as in the examples in sections~\\ref{sec:sections} and \\ref{sec:fig_table}.\nEnter the cross-reference with a non-breaking space between the type of object and the number, like this: \\verb'see Figure~\\ref{fig:example}'.\n\nThe \\verb'\\autoref{}' command can be used to automatically fill out the type of object, saving on typing.\nIt also causes the link to cover the whole phrase rather than just the number, but for that reason is only suitable for single cross-references rather than ranges.\nFor example, \\verb'\\autoref{tab:journal_abbr}' produces \\autoref{tab:journal_abbr}.\n\n\\subsection{Citations}\n\\label{sec:cite}\n\nMNRAS uses the Harvard -- author (year) -- citation style, e.g. \\citet{author2013}.\nThis is implemented in \\LaTeX\\ via the \\verb'natbib' package, which in turn is included via the \\verb'usenatbib' package option (see section~\\ref{sec:options}), which should be used in all papers.\n\nEach entry in the reference list has a `key' (see section~\\ref{sec:ref_list}) which is used to generate citations.\nThere are two basic \\verb'natbib' commands:\n\\begin{description}\n \\item \\verb'\\citet{key}' produces an in-text citation: \\citet{author2013}\n \\item \\verb'\\citep{key}' produces a bracketed (parenthetical) citation: \\citep{author2013}\n\\end{description}\nCitations will include clickable links to the relevant entry in the reference list, if supported by your \\LaTeX\\ compiler.\n\n\\defcitealias{smith2014}{Paper~I}\n\\begin{table*}\n \\caption{Common citation commands, provided by the \\texttt{natbib} package.}\n \\label{tab:natbib}\n \\begin{tabular}{lll}\n \\hline\n Command & Ouput & Note\\\\\n \\hline\n \\verb'\\citet{key}' & \\citet{smith2014} & \\\\\n \\verb'\\citep{key}' & \\citep{smith2014} & \\\\\n \\verb'\\citep{key,key2}' & \\citep{smith2014,jones2015} & Multiple papers\\\\\n \\verb'\\citet[table 4]{key}' & \\citet[table 4]{smith2014} & \\\\\n \\verb'\\citep[see][figure 7]{key}' & \\citep[see][figure 7]{smith2014} & \\\\\n \\verb'\\citealt{key}' & \\citealt{smith2014} & For use with manual brackets\\\\\n \\verb'\\citeauthor{key}' & \\citeauthor{smith2014} & If already cited in close proximity\\\\\n \\verb'\\defcitealias{key}{Paper~I}' & & Define an alias (doesn't work in floats)\\\\\n \\verb'\\citetalias{key}' & \\citetalias{smith2014} & \\\\\n \\verb'\\citepalias{key}' & \\citepalias{smith2014} & \\\\\n \\hline\n \\end{tabular}\n\\end{table*}\n\nThere are a number of other \\verb'natbib' commands which can be used for more complicated citations.\nThe most commonly used ones are listed in Table~\\ref{tab:natbib}.\nFor full guidance on their use, consult the \\verb'natbib' documentation\\footnote{\\url{http:\/\/www.ctan.org\/pkg\/natbib}}.\n\nIf a reference has several authors, \\verb'natbib' will automatically use `et al.' if there are more than two authors. However, if a paper has exactly three authors, MNRAS style is to list all three on the first citation and use `et al.' thereafter. If you are using \\bibtex\\ (see section~\\ref{sec:ref_list}) then this is handled automatically. If not, the \\verb'\\citet*{}' and \\verb'\\citep*{}' commands can be used at the first citation to include all of the authors.\n\n\\subsection{The list of references}\n\\label{sec:ref_list}\n\nIt is possible to enter references manually using the usual \\LaTeX\\ commands, but we strongly encourage authors to use \\bibtex\\ instead.\n\\bibtex\\ ensures that the reference list is updated automatically as references are added or removed from the paper, puts them in the correct format, saves on typing, and the same reference file can be used for many different papers -- saving time hunting down reference details.\nAn MNRAS \\bibtex\\ style file, \\verb'mnras.bst', is distributed as part of this package.\nThe rest of this section will assume you are using \\bibtex.\n\nReferences are entered into a separate \\verb'.bib' file in standard \\bibtex\\ formatting.\nThis can be done manually, or there are several software packages which make editing the \\verb'.bib' file much easier.\nWe particularly recommend \\textsc{JabRef}\\footnote{\\url{http:\/\/jabref.sourceforge.net\/}}, which works on all major operating systems.\n\\bibtex\\ entries can be obtained from the NASA Astrophysics Data System\\footnote{\\label{foot:ads}\\url{http:\/\/adsabs.harvard.edu}} (ADS) by clicking on `Bibtex entry for this abstract' on any entry.\nSimply copy this into your \\verb'.bib' file or into the `BibTeX source' tab in \\textsc{JabRef}.\n\nEach entry in the \\verb'.bib' file must specify a unique `key' to identify the paper, the format of which is up to the author.\nSimply cite it in the usual way, as described in section~\\ref{sec:cite}, using the specified key.\nCompile the paper as usual, but add an extra step to run the \\texttt{bibtex} command.\nConsult the documentation for your compiler or latex distribution.\n\nCorrect formatting of the reference list will be handled by \\bibtex\\ in almost all cases, provided that the correct information was entered into the \\verb'.bib' file.\nNote that ADS entries are not always correct, particularly for older papers and conference proceedings, so may need to be edited.\nIf in doubt, or if you are producing the reference list manually, see the MNRAS instructions to authors$^{\\ref{foot:itas}}$ for the current guidelines on how to format the list of references.\n\n\\section{Appendices and online material}\n\nTo start an appendix, simply place the \\verb'\n\\section{Introduction}\n\nExoplanet surveys and demographic studies have revealed a previously unknown class of planets. These close-in, super-earth\/sub-neptunes have radii in the range 1-4~R$_\\oplus$ \\citep[e.g.][]{Bourucki2011,Thompson2018} and masses $\\lesssim 20~$M$_\\oplus$ \\citep[e.g.][]{Mayor2011,Wu2013,Marcy2014,Weiss2014,Hadden2014,JontofHutter2016}. With orbital periods of less than a few hundred days, these planets have been shown to be incredibly common, with most sun-like and later-type stars hosting at least one, if not many \\citep[e.g.][]{Fressin2013,Silburt2015,Mulders2018,Zink2019,Hsu2019}. \n\nCombined mass and radius measurements for individual planets revealed a vast spread in densities; from as high as $\\sim 10$~g~cm$^{-3}$ to as low as $\\sim 0.1~$g~cm$^{-3}$. The former is consistent with rocky bodies with Earth-like (iron\/silicate) compositions \\citep[e.g.][]{Dressing2015,Dorn2019} and the latter with solid cores surrounded by larger H\/He atmospheres \\citep[e.g.][]{JontofHutter2016}. At intermediate densities a plethora of compositions are possible, including some combination of iron, silicates, water and H\/He \\citep[e.g.][]{Rogers2010,Zeng2019}. \n\nH\/He envelopes on low-mass planets close to their host stars are vulnerable to mass-loss \\citep[e.g.][]{Lammer2003,Baraffe2005,MurrayClay2009,owen12}. Evolutionary models of solid-cores surrounded by H\/He envelopes suggested that mass-loss would sculpt the planet population into two classes: those which completely lose their H\/He envelope leaving behind a ``stripped core'', and those planets that retain about 1~\\% by mass in their H\/He envelope \\citep[e.g.][]{Owen2013,Lopez2013,Jin2014}. Early indications of such a dichotomy were found in planetary density measurements \\citep[e.g.][]{Weiss2014,Rogers2015}. However, it was not until accurately measured stellar properties allowed precise planetary radii to be determined that a bi-modal radius distribution was revealed \\citep[e.g.][]{Fulton2017,Fulton2018,VanEylen2018}. \n\nIncorporating these evolutionary clues into the compositional determination suggests most planets larger than about 1.8~R$_\\oplus$ consist of an Earth-like core surrounded by a H\/He envelope, where this envelope contains a few percent of the planet's mass \\citep{Wolfgang2015}. Further, mass-loss models suggest that the vast majority of those planets that do not possess H\/He envelopes today, formed with one which they then lost \\citep[e.g.][]{Owen2017,Wu2019,Rogers2020}. \n\nThe majority of exoplanet host stars are billions of years old. Recent work by \\citet{Berger2020} has shown that the median age of the {\\it Kepler} planet host stars is $\\sim 4$~Gyr and only $\\sim 3$\\%\nof the {\\it Kepler} host stars are $<1$~Gyr old. Therefore, most of our knowledge about the demographics of close-in exoplanets is restricted to old stars. The fact many of these planets possess H\/He envelopes necessitates they formed before gas-disc dispersal. These gas-discs have lifetimes of $1-10$~Myr \\citep[e.g.][]{haisch01,Mamajek2009}. { Thus, most exoplanet host-stars are significantly older than the planetary formation timescale ($\\lesssim 10$~Myr). }\n\nCompositional uncertainty and lack of knowledge of the exoplanet demographics has led to considerable discussion and debate about how these planets form. In many cases the size of the H\/He envelopes implies they must have accreted from a protoplanetary disc. The idea of accretion of a H\/He envelope by a solid core fits within the general framework of core-accretion \\citep[e.g.][]{Rafikov2006,Ikoma2006}. In the core-accretion model, once the planet is massive enough that its Bondi-radius resides outside its physical radius (crudely at a few tenths of a Lunar mass, e.g. \\citealt{Massol2016}) it can gravitationally bind nebula gas. As the planet's solid mass continues to grow it can bind ever larger quantities of gas; { eventually at around core masses of $\\sim$0.5~M$_\\oplus$ the H\/He envelope's cooling time becomes comparable to the disc's lifetime \\citep[e.g.][]{Lee2015}.} Thus, beyond solid core masses of $\\sim$ 0.5~M$_\\oplus$\\footnote{Obviously this precise value is uncertain and depends on details of the opacity, e.g. \\citealt{Venturini2016,LeeConnors2020}.} accretion of a H\/He envelope is dependant on how fast current gas in the planet's envelope evolves thermally; heat the envelope gas via solid accretion, and gas flows from the envelope to the disc, let it cool and contract, and high-entropy nebula gas flows into the envelope. \n\nThe expected thermal envelope structure typically consists of a convective zone surrounding the core, where heat generated by solid accretion or gravitational contraction of the atmosphere is rapidly convected outwards. { Eventually, as the temperature and density drops a radiative zone forms, which connects the envelope to the disc}, where the lower entropy interior is connected to the higher entropy disc \\citep[e.g.][]{Rafikov2006}. Since opacity typically increases with pressure and temperature it's the radiative-convective boundary that controls the thermal evolution of the envelope, and hence the rate of accretion. This makes gas accretion fairly insensitive to the nebula's properties (e.g. density and temperature). \n\nDespite the basic success of the core-accretion model in explaining that low-mass planets can acquire a H\/He envelope, it has failed to quantitatively explain the properties of observed sub-Neptunes \\citep[e.g.][]{Lee2014,Ogihara2020,Alessi2020}. In fact the issue is not that they have accreted H\/He atmospheres, it is that they have only accreted a few percent by mass, even after mass-loss processes like photoevaporation \\citep{Jankovic2019,Ogihara2020} and\/or core-powered mass-loss \\citep[e.g.][]{Ginzburg2018} have been taken into account. Specifically, \\citet{Rogers2020} has recently shown standard core-accretion models over-predict the H\/He envelope mass by a factor of $\\sim$5 at the end of disc dispersal.\n\nThese problems have led to several proposed solutions: either slowing the accretion of H\/He \\citep[e.g.][]{Ormel2015,Lee2016,Ginzburg2017,Chen2020}, typically by increasing the entropy of the interior, or including extra, rapid mass-loss, most notably {\\it during} protoplanetary disc dispersal, decreasing the entropy of the interior \\citep[e.g.][]{OW2016,Ginzburg2016,Fossati2017,Kubyshkina2018_young} (see the discussion for a detailed description of these models). More fundamentally, these proposed solutions all modify the thermodynamic evolution of the H\/He envelope. Without any intervention one would expect the planet to have an initial cooling timescale (or Kelvin-Helmholtz contraction timescale) comparable to the time over which it has been forming, i.e. a few million years. Thus, in the first solution where accretion is slowed by increasing the entropy of the interior, the envelope's cooling time becomes shorter (probably less than a few million years). In the second solution, where rapid mass-loss during disc dispersal decreases the { planet's radius and entropy}, planets end up with much longer cooling times, closer to $\\sim 100$~Myr \\citep{OW2016}. This mechanism is known as ``boil-off''. \n\nAs planets age they cool; by the time they reach a Gyr old they gone through many { initial} cooling times and have completely forgotten their initial thermodynamic properties. Therefore, our population of old planets is generally unable to tell us about the thermodynamic properties of forming or recently formed planets. \n\nWith the advent of wider searches for transiting exoplanets (e.g. K2 \\citealt{K2Mission}, TESS \\citealt{TESSmission} \\& NGTS \\citealt{NGTS}) discovering young ($\\lesssim 100$~Myr) close-in planets has become possible. Notable recent examples are the four planets in the V1298 Tau system at an age of $\\sim$ 24~Myr \\citep{David2019}, a $\\sim 45$~Myr old planet around DS Tuc A \\citep{Newton2019}, a $\\sim 23$~Myr old planet around AU Mic \\citep{Plavchan2020} as well as other recent results from the {\\sc thyme} project \\citep[e.g.][]{THYMEII}. \n\n\nIn this work, we show how the combination of a mass and radius measurement for young planets can be used to place a constraint on their initial entropy. In Section~2 we demonstrate how young planets with measured mass and radii can be used to constrain their initial entropies, using simple models. In Section~3 we use numerical planetary evolution models to explore this further, investigating real systems in Section~4. In Section~5 we discuss the implications of our work and summarise in Section~6. \n\n\n\n\n\n\\section{A sketch of the idea}\nPresent day sub-neptunes with voluminous H\/He atmospheres cool over time, contracting under the release of heat left over from formation (both in their atmospheres and solid cores).\nEvolutionary tracks \\citep[e.g.][]{Baraffe2005,Lopez2014} predict these planets were substantially larger when younger, even when mass-loss is not factored in. In Figure~\\ref{fig:basic_evol} we show tracks from planetary evolution models for planets that evolve into typical sub-neptunes after billions of years of evolution. These planets have radii in the range 4-15~R$_\\oplus$ at ages $\\lesssim 100$~Myr. \n\n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{simple_evolve_2.pdf}\n \\caption{{ The planet's radius (top) and total planet mass (bottom)} evolution of low-mass, close-in exoplanets with H\/He atmospheres. The solid and dashed lines show planets whose envelopes have initial Kelvin-Helmholtz contraction timescales of 500 and 5 Myr respectively. The thick and thin lines show planets that have initial envelope fractions ($M_{\\rm env}\/M_{\\rm c}$) of 0.1 and 0.04 respectively. The evolution calculations are computed using {\\sc mesa} as described in Section~\\ref{sec:mesa} and include thermal evolution and photoevaporation. The planet contains a core with a mass of $\\sim 5$~M$_\\oplus$ and is located at 0.1~AU around a sun-like star.}\n \\label{fig:basic_evol}\n\\end{figure} \n\nIn fact the upper envelope of the radius evolution for present-day sub-neptunes is expected to become $\\gtrsim 1~$R$_{\\rm Jup}$ at young ages. This is demonstrated in Figure~\\ref{fig:basic_evol} indicating that at the youngest ages these planets could be conflated with a giant planet, as they have radii similar to present day hot jupiters. In fact, young hot jupiters are expected to have radii in excess of $\\sim$ 13~R$_\\oplus$ \\citep{Fortney2010}. This indicates that even those young planets that have measured radii $\\sim 10$~R$_\\oplus$ (e.g. V1298 Tau b, \\citealt{David2019} and HIP 67522b, \\citealt{THYMEII}) are likely to be ``proto-sub-neputunes'' rather than young, jupiter mass planets. \n\nUnlike stars, planet's have no method of preventing gravitational collapse (other than Coulomb or degeneracy pressure), therefore, the observed size of a planet depends on the thermodynamic state of it's interior, or how much entropy it currently possesses. As the planet's envelope contracts the interior entropy falls. Eventually, the interior will either be supported by degeneracy pressure at high masses or, at low-masses by Coulomb pressure \\citep{Seager2007}.\n\nIt is well known that with measurements of the mass and radius of a single composition self-gravitating sphere (e.g. a pure H\/He mixture), then the internal thermodynamic state and interior can be determined. However, both observational \\citep{WL12,JontofHutter2016,Fulton2017,VanEylen2018,Benneke2019,Benneke2019b}, and theoretical \\citep{Owen2017,Wu2019,Rogers2020} evidence suggests present-day sub-neptunes are not of a single composition, but mostly likely composed of a solid core surrounded by a H\/He envelope\\footnote{Although alternative ideas do exists, e.g. \\citet{Zeng2019}.}. As we show below, this makes the envelope mass and its internal thermodynamic state degenerate. Specifically, a planet with a known mass and radius can have more mass in its core and less in its envelope if the envelope is hotter (and therefore has a larger scale height) or vice-versa, { as demonstrated by works where extra heating mechanisms are included \\citep[e.g.][]{Pu2017,Millholland2020}}. This is even before the degeneracy between the composition of the core and envelope mass is taken into account. \n\n\\subsection{Degeneracy between internal entropy and envelope mass fraction}\n\nThe degeneracy between entropy and envelope mass fraction is trivial to identify. Given a planet composed of a core of known density surrounded by a H\/He envelope, there are three parameters which specify its structure: core mass, envelope mass and internal entropy. Thus, with only measurements of a planet's mass and radius it is not possible to constrain these three structure parameters, and thus we cannot determine its internal entropy. \n\nThis can be seen explicilty if we build a simple model for the planet's internal structure. This simple model is based on the assumption that self-gravity can be neglected in the envelope\\footnote{It can also be seen by considering ``loaded'' polytropes \\citep{Huntley1975} which include atmosphere self-gravity and smoothly tend to the mono-composition models as the envelope mass exceeds the core mass. However, such analysis is not necessary to elucidate the basic point.}\\citep[e.g.][]{Rafikov2006,Piso2014,Lee2015,Ginzburg2016}. As derived by \\citet{Owen2017} \\& \\citet{OCE2020} the envelope mass, $M_{\\rm env}$, surrounding a core of mass $M_c$ and radius $R_c$ with equation-of-state relating pressure, $P$ to density, $\\rho$ via $P\\propto \\rho^\\gamma$ is given, approximately by:\n\\begin{equation}\n M_{\\rm env}\\approx 4\\pi R_{\\rm rcb}^3\\rho_{\\rm rcb}\\left(\\nabla_{\\rm ab}\\frac{GM_c}{c_s^2 R_{\\rm rcb}}\\right)^{1\/(\\gamma-1)} I_2\\left(R_c\/R_{\\rm rcb},\\gamma\\right) \\label{eqn:Menv}\n\\end{equation}\nwhere $R_{\\rm rcb}$ is the radius of the radiative-convective boundary, $\\nabla_{\\rm ab}$ is the adiabatic gradient, $c_s$ the isothermal sound-speed at the radiative-convective boundary and $I_n$ is a dimensionless integral of the form:\n\\begin{equation}\n I_n\\left(R_c\/R_{\\rm rcb},\\gamma\\right)=\\int_{R_c\/R_{\\rm rcb}}^1 x^n\\left(x^{-1}-1\\right)^{1\/(\\gamma-1)}{\\rm d}x\n\\end{equation}\nDue to the extreme irradiation level most close-in planets receive, the radiative-convective boundary occurs at optical depths much higher than the photosphere. The radiative-convective boundary sets the point at which the energy released by gravitational contraction in the interior is no-longer transported by the convection, but rather by radiation. Hence the luminosity of the planet (and therefore internal entropy) is related directly to the density at the radiative-convective boundary. Since luminosity and entropy are rather non-intuitive quantities and do not facilitate easy comparison across planet-mass, age or formation models we follow \\citet{Owen2017} and choose the atmosphere's Kelvin-Helmholtz contraction timescale $\\tau_{\\rm KH}$ (or cooling timescale) as our quantity to describe the entropy and thermodynamic state of the planetary interior. We choose this parameterisation of entropy as this cooling timescale can be directly compared to quantities like the protoplanetary disc lifetime. This allows us to write the luminosity as:\n\\begin{equation}\n L\\approx\\frac{1}{\\tau_{\\rm KH}}\\frac{GM_cM_{\\rm env}}{R_{\\rm rcb}}\\frac{I_1}{I_2}\n\\end{equation}\n\nFor clarity, we restrict ourselves to a constant opacity envelope, with opacity $\\kappa$ (while this is clearly incorrect, nothing we demonstrate below is invalidated by this). Thus, solving for the density at the radiative-convective boundary \\citep[e.g.][]{Owen2017} we find:\n\\begin{equation}\n \\rho_{\\rm rcb}\\approx\\left(\\frac{\\mu}{k_b}\\right)\\left[\\left(\\frac{I_2}{I_1}\\right)\\frac{64\\pi\\sigma T^3 R_{\\rm rcb} \\tau_{\\rm KH}}{3\\kappa M_{\\rm env}}\\right] \\label{eqn:rho_rcb}\n\\end{equation}\nwith $\\mu$ the mean molecular weight, $k_b$ Boltzmann's constant, $\\sigma$ Stefan-Boltzmann's constant and $T$ is the equilibrium temperature of the planet. Finally, noting that the radiative layer is approximately isothermal (and therefore, well described by an exoponential density profile) and thus thin, we simply take $R_{\\rm rcb}\\approx R_p$. Combining Equations~\\ref{eqn:Menv} and \\ref{eqn:rho_rcb} we can derive a mass-radius relationship for our planets of the form:\n\n\\begin{equation}\n R_p^{(4\\gamma - 5)\/(\\gamma -1)}M_p^{1\/(\\gamma -1)}\\propto M_{\\rm env}^2 \\tau_{\\rm KH}^{-1}\\frac{I_1}{I_2^2} \\label{eqn:mass_radius}\n\\end{equation}\nwhere we have dropped unimportant variables. Equation~\\ref{eqn:mass_radius} clearly demonstrates that even with the measurement of a planet's mass and radius, the Kelvin-Helmholtz contraction timescale cannot be determined. Now, in the limit $M_{\\rm env}\\rightarrow M_p$ this degeneracy disappears\\footnote{However, one needs to include self-gravity in the analysis.}. For old planets this degeneracy is bypassed by the reasonable assumption that the planet has cooled sufficiently that its initial thermodynamic state has been forgotten and $\\tau_{\\rm KH}$ has simply tended towards the age of the planet ($T_{\\rm age}$). However, for young planets such statements cannot be made, all we can safely say is $\\tau_{\\rm KH}\\gtrsim T_{\\rm age}$. Thus, at young ages, measurements of a planet's mass and radius are not sufficient to determine either its internal composition (fraction of mass in the atmosphere compared to the core) or its internal thermodynamic state.\n\n The dependence in the core composition is encapsulated in the dimensional integrals $I_1$ and $I_2$. In the limit $R_p\/R_c \\gg 1$, relevant for young planets, both integrals tend to a constant for $\\gamma > 3\/2$ and $I_2$ tends to a constant for $>4\/3$. Inspection of our numerical models (Section~\\ref{sec:mesa}) indicates that planetary interiors span the full range of possible limits, with $\\gamma <4\/3$ close to the planetary cores when the interiors are high-entropy and $\\gamma > 3\/2$ closer to the planetary surface for low-entropy interiors. In order to assess whether core-composition will effect our analysis we calculate how the ratio $I_2^2\/I_1$ varies with core-composition at different values of $\\gamma$, we do this for a 7~R$_\\oplus$, 5~M$_\\oplus$ planet. For an extreme variation in core composition of 1\/3 ice, 2\/3 rock to 1\/3 iron, 2\/3 rock we find a variation of 4\\% for $\\gamma=5\/3$ and a factor of two for a of $\\gamma=1.25$ (the lowest found at any point in the numerical models). These variations are much smaller than the order of magnitude changes in the Kelvin-Helmholtz timescale we are investigating, especially when considering detailed fits to the exoplanet data suggest the spread in core-composition is narrow \\citep[e.g.][]{Dorn2019,Rogers2020}. Specifically, for the spread in core composition inferred by \\citet{Rogers2020} the ratio $I_2^2\/I_1$ varies by a maximum of 15\\% for $\\gamma=1.25$ for a 7~R$_\\oplus$, 5~M$_\\oplus$ planet. Thus, we consider our results to be robust to uncertainties in the core-composition, and certainly smaller than variations arising from the observational uncertainties on age, mass and radius\\footnote{In fact retaining the dimensional integrals and an arbitrary choice of $\\gamma$, we find the dependence on the constrain of $\\tau_{\\rm KH}$ in Equation~\\ref{eqn:critera2} scales linearly with $I_2^2\/I_1$, compared to much higher powers of mass, radius and age, indicating that for typical 10-20\\% errors, the variation in the dimensional integrals with core-compositions will have a small effect on our constraint on the Kelvin-Helmholtz timescale. }. Given we have already adopted a constant opacity, we chose to adopt a constant value of $\\gamma=5\/3$ and ignore the variation of $I_2$ and $I_1$ for simplicity in the rest of the section, while noting no choice of a single value of $\\gamma$ is justifiable. We emphasise that this section is purely an illustrative demonstration of the method and these choices do not affect the general idea. In our numerical models in Section~\\ref{sec:mesa} the appropriate equation-of-state and opacities are used. \n\n\n\n\n\\subsection{Leveraging mass-loss}\n\nFortunately, for close-in planets we do have a way of constraining the mass in a planet's envelope. The high-irradiation levels experienced by young planets cause them to lose envelope mass over time \\citep[e.g.][]{Baraffe2005,Lopez2013,Owen2013}. Specifically, given a planet with a known mass and radius there is a minimum envelope mass it could have retained given its age. Make the envelope less massive and it could not have survived mass-loss until its current age. Therefore, the envelope mass-loss timescale $t_{\\dot{m}}$, must satisfy:\n\\begin{equation}\nt_{\\dot{m}}\\equiv\\frac{M_{\\rm env}}{\\dot{M}}\\gtrsim T_{\\rm age}\\label{eqn:tmdot1}\n\\end{equation}\nSince the mass-loss rate $\\dot{M}$ depends only on planet mass and radius (for externally driven loss processes), or planet mass, radius and Kelvin-Helmholtz contraction timescale (for internally driven loss processes such as core-powered mass-loss \\citealt{Ginzburg2018,Gupta2019}), then the inequality in Equation~\\ref{eqn:tmdot1} can be used to place a lower bound on the Kelvin-Helmholtz timescale of the planet. Working within the framework where the loss is driven by photoevaporation, we show that with a planet radius alone one can place a minimum value on the planet mass to be consistent with the simplest (``vanilla'') picture of planetary formation via core accretion. Further, with a measured mass and radius we demonstrate how a lower bound on the Kelvin-Helmholtz timescale can be found. \n\\subsection{A minimum mass for the ``vanilla'' scenario} \n\\label{sec:min_mass}\nThe most na\\\"ive expectation is that planet formation is smooth and continuous and disc dispersal gently releases the planet into the circumstellar environment wherin mass-loss can proceed. In this scenario, with no violent processes, the planet's Kelvin-Helmholtz contraction timescale should roughly track age. Therefore, at young-ages we can follow what is done at old ages, where we accept that a planet is several cooling times old and set $\\tau_{\\rm KH}\\sim T_{\\rm age}$. Combining this anszat with Equation~\\ref{eqn:mass_radius} and inequality \\ref{eqn:tmdot1} we find:\n\n\\begin{equation}\n M_p^{3\/4}\\gtrsim A\\, \\dot{M}T_{\\rm age}^{1\/2}R_p^{-5\/4}\n\\end{equation}\nwhere the term $A$ incorporates all the terms we have dropped (e.g. temperature, opacity, mean-molecular weight and fundamental constants). Setting the mass-loss rate to:\n\\begin{equation}\n \\dot{M}\\propto \\frac{F_{\\rm HE}\\pi R_p^3}{G M_p}\\propto R_p^3\/M_p\n\\end{equation} as found in the case of energy-limited photoevaporation ({ with $F_{\\rm HE}$ the high energy flux received by the planet),} we find:\n\\begin{equation}\nM_p \\gtrsim A'\\, R_p T_{\\rm age}^{2\/7} \\label{eqn:criterion}\n\\end{equation}\nwith $A'\\equiv A^{4\/7}$. Put simply, for a young planet with a measured radius from a transit survey there is a minimum mass for it to be consistent with the na\\\"ivest expectation of planet formation. Put another way, if a planet is consistent with the criterion in Equation~\\ref{eqn:criterion}, then limited constraints can be placed on its formation entropy and history. Noting in the scenario above where $T\\propto a^{-1\/2}$ (with $a$ the orbital separation) and $\\dot{M}\\propto a^{-2}$ we find $A' \\propto a ^{-13\/7}$, validating the expectation that the higher irradiation levels closer to the star lead to higher mass-loss and therefore higher required planet masses. Finally (in the case of photoevaporation), the rapid drop in XUV flux when the star spins down \\citep[e.g.][]{Tu2015} means $T_{\\rm age}$ cannot be set arbitrarily long, but is rather constrained to be the saturation time of the XUV output of the star. Hence, the typically quoted values of $\\sim 100$~Myr for sun-like stars \\citep[e.g.][]{Jackson2012}. \n\nWhile the above style of calculation is unlikely to provide interesting analysis for real planets, it could be useful for selecting which planets to target with radial velocity, transit-timing variation (TTV) or spectroscopic follow-up. \n\n\n\n\n\n\n\n\n\n\n\n\n\\subsection{Constraining entropy of formation}\n\\label{sec:min_tkh}\nDoing away with the anszat that $\\tau_{\\rm KH}\\sim T_{\\rm age}$, we can now generalise to the possibility that at young ages $\\tau_{\\rm KH}\\gtrsim T_{\\rm age}$. Now we can follow a similar argument to that in the preceeding section, and show that with measurement of a planet's mass and radius, one can place a lower bound on the planet's Kelvin-Helmholtz contraction timescale. \n\nAgain combining Equation~\\ref{eqn:mass_radius} for the mass-radius relationship with the mass-loss criterion in Equation~\\ref{eqn:tmdot1} we find:\n\\begin{equation}\n \\tau_{\\rm KH} \\gtrsim B\\, R_p^{7\/2} M_p^{-7\/2} T_{\\rm age}^2 \\label{eqn:critera2}\n\\end{equation}\nwhere like the $A$ factor above, $B$ encapsulates all the terms and fundamental constants we have dropped from our analysis. The dependence of the inequality in Equation~\\ref{eqn:critera2} is easy to understand. Larger and less massive planets which are older have experienced more mass-loss. The higher total mass-loss requires a higher atmosphere mass to resist, necessitating a lower entropy interior to give a planet with the same total mass and radius (Equation~\\ref{eqn:mass_radius}). Again for the case where $T\\propto a^{-1\/2}$ and $\\dot{M}\\propto a^{-2}$ we find $B \\propto a ^{-13\/4}$ indicating that it is those planets that are closest to their host stars (and experience more vigorous mass-loss) that are the most constraining. Now clearly, if one finds a constraint on the Kelvin-Helmholtz timescale that is shorter than its age, one has not learnt anything other than it is consistent with the ``vanilla'' scenario for core-accretion, and satisfies the constraint in Equation~\\ref{eqn:criterion}. \n\nWith a sample of young planets with ages less than a few 100~Myr with measured masses and radii it is possible to constrain their current Kelvin-Helmholtz contraction timescales and hence gain insights into their formation entropies and the physical processes that lead to their formation and early evolution. On the flip-side, if {\\it all} young planets appear to be consistent with $\\tau_{\\rm KH}\\sim T_{\\rm age}$ at young ages we can also make inferences about their formation pathways.\n\n\nWe caution that in the previous sections we deliberately chose an incorrect opacity-law (a constant opacity) and a simple mass-loss model, in order that the powers in the previous expressions did not become large integer ratios, and as such they should not be used for any quantitative analysis. Switching to more realistic opacity and mass-loss laws does not change the facts identified in Section~\\ref{sec:min_mass} and \\ref{sec:min_tkh}. \n\n\\subsection{A slightly more sophisticated demonstration}\n\nBefore we switch to using full numerical solutions of planetary evolution we can get a sense of the range of interesting planet properties by using the semi-analytic planet structure model developed by \\citet{Owen2017}, where all choices (opacity-law, mass-loss model etc) follow those in \\citet{OCE2020}. In all cases we assume an Earth-like core composition with a 1\/3 iron to 2\/3 rock mass-ratio, which is consistent with the current exoplanet demographics (but as mentioned above, such a choice does not strongly affect our results). \n\\subsubsection{Minimum masses}\nIn Figure~\\ref{fig:simple_rad} we show the minimum mass required for the ``vanilla'' scenario where $\\tau_{\\rm KH}\\sim T_{\\rm age}$ at all ages. \n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{Min_mass_simple.pdf}\n \\caption{The minimum mass required to be consistent with a scenario where $\\tau_{\\rm KH}\\sim T_{\\rm age}$ at all ages.{ This figure uses the more sophisticated calculation described in Section 2.5, rather than the simple inequality given in Equation~9}. The top panel shows a planet with a separation from a sun-like star of 0.05~AU and the bottom panel 0.1~AU. The sun-like star is assumed to have a saturated XUV flux of $10^{-3.5}~$L$_\\odot$ at all ages. { The white regions in the left of the plot, labelled as ``no solution (evaporation valley)'' are regions of parameter space planet where a H\/He envelope would have undergone run-away loss and is a manifestation of the evaporation valley}. The dotted lines show planetary radius evolution curves (with no mass-loss) that begin at 10~R$_\\oplus$ at 10~Myr and track $\\tau_{\\rm KH}=T_{\\rm age}$.}\n \\label{fig:simple_rad}\n\\end{figure}\nThe dotted lines on these figures show the radius evolution of planets (not undergoing mass-loss), which begin at 10~R$_\\oplus$ at 10~Myr. These evolutionary curves do not cross many minimum mass contours indicating that there is little strong age preference in the range of 10 to 100~Myr for selecting planets for this kind of analysis (although we will investigate this more precisely in Section~\\ref{sec:mesa}). This is fairly easy to understand; as the planet cools and contracts the absorbed XUV falls, reducing the mass-loss rate. However, the total time to resist mass-loss increases. These two competing effects approximately balance, resulting in a minimum mass that does not change strongly with age. Once the XUV flux is no-longer saturated and rapidly falls with time, the mass-loss rate drops precipitously and the minimum mass will also drop rapidly with age.\n\nThe difference between the two panels in Figure~\\ref{fig:simple_rad} indicates, as expected from the previous analysis, that close-in planets require higher masses. For those young planets discovered to date with radii in the range 5-10~R$_\\oplus$, minimum masses in the range of 5-15~M$_\\oplus$ are required.\n\\subsubsection{Constraining the initial Kelvin-Helmholtz timescale}\n\nWhile the minimum masses provide a useful guide they do not provide much insight into planetary formation. Here, we elaborate on the much more interesting case of young planets with well measured masses and radii. \n\nIn Figure~\\ref{fig:tkh_simple} we show how the mass-radius plane is partitioned into regions of parameter space that are consistent with $\\tau_{\\rm KH}\\sim T_{\\rm age}$ and those requiring $\\tau_{\\rm KH}\\gtrsim T_{\\rm age}$. This analysis is performed for a planet located at 0.1~AU around an XUV saturated, 50~Myr old Sun-like star. \n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{Constrain_tkh_simple.pdf}\n \\caption{The planet mass and radius plane separated into those planets consistent with $\\tau_{\\rm KH}\\sim T_{\\rm age}$ and those which require longer initial Kelvin-Helmholtz timescales. { As in Figure~2 this figure uses the more sophisticated calculation described in Section 2.5, rather than the simple inequality given in Equation~10 }. The diagram is shown for a planet located at 0.1~AU around an XUV saturated (10$^{-3.5}$~L$_\\odot$), 50~Myr old sun-like star. Planets that sit the white region would require a long Kelvin-Helmholtz contraction timescale at formation. The point shows a representative young planet with a measured radius of 7~R$_\\oplus$ and mass of 5~M$_\\oplus$ shown with indicative 10\\% error-bars. { The solid-line and dotted line show curves with a constant $\\tau_{\\rm KH}$, with values of 382~Myr and 50~Myr respectively} . Thus, this representative planet would require a current (and hence formation) contraction timescale of $\\gtrsim T_{\\rm age}$.}\n \\label{fig:tkh_simple}\n\\end{figure}\nPlacing a representative young planet with a measured radius of 7~R$_\\oplus$ and mass of 5~M$_\\oplus$ on this diagram indicates it would require a longer Kelvin-Helmholtz contraction timescale (and hence lower entropy) than predicted by simple formation scenarios. Given theoretical ideas, such as ``boil-off'', predict initial Kelvin-Helmholtz contraction timescales of $\\sim$ 100~Myr \\citep{OW2016}, this figure indicates they could be detected with radii and mass measured to the $\\sim 10$\\% precision level. \n\nIts important to emphasise (as demonstrated in Equation~\\ref{eqn:critera2}) that this type of analysis only provides a bound on the Kelvin-Helmholtz contraction timescale, where the equality holds when a planet is on the limit of stability due to mass-loss. Therefore, finding a planet in the region consistent with $\\tau_{\\rm KH}\\sim T_{\\rm age}$ does not imply that it doesn't have a longer contraction timescale, rather it could just be very stable to envelope mass-loss. \n\n\\section{Numerical planetary evolution models}\n\\label{sec:mesa}\nIn the previous section we have used analytic tools to illustrate the basic physics; however, these models can only be pushed so far. For robust and quantitative results full numerical models are a must. This is for several reasons, most importantly, many of the transiting planets discovered to date are large and thus may contain quite significant envelope mass-fractions ($\\gtrsim 10\\%$). While self-gravity of such an envelope is small it is not negligible, and not included in the previous analytic model. Additionally, in the previous section we assumed an ideal equation-of-state with constant ratio of specific heats and power-law opacity model. While these choices are acceptable for understanding demographic properties, these assumptions induce unnecessary errors in the analysis of individual systems. Finally, by characterising the full evolutionary history (rather than the instantaneous state) we are able to leverage even more power. This is because not all planetary structures that are consistent with a planet's current state are consistent with its evolutionary history { once mass-loss is taken into account}. The last point is demonstrated by the fact \\citet{Owen2016} were able to provide a (albeit weak) constraint on the entropy of formation for the old planet {\\it Kepler-}36c. { Specifically in the previous section we only asked the question whether $\\tau_{\\rm KH}\\gtrsim T_{\\rm age}$. In this section, by including the full evolutionary history, we are able to compare to the planet's {\\it initial} Kelvin Helmholtz timescale, which we define to be the envelope's Kelvin Helmholtz timescale at the end of disc dispersal. This comparison is more powerful, as it allows to to explore initial Kelvin-Helmholtz timescales which are shorter than the planet's current age.}\n\n\n\nTherefore, to overcome the above shortcomings we solve for the full planetary evolution using {\\sc mesa} \\citep{Paxton2011,Paxton2013,Paxton2015}. The {\\sc mesa} models are identical to those used in \\citet{Owen2016} and \\citet{OwenLai2018}, and include the impact of stellar irradiation (which tracks the \\citealt{Baraffe1998} stellar evolution models) and photoevaporation using the \\citet{Owen2012} mass-loss rates. \n\n\\subsection{Example planet}\nHere we return to our example planet from Figure~\\ref{fig:tkh_simple}, a planet located at 0.1~AU around a 50~Myr sun-like star. Nominally, we consider this planet to have a measured radius of 7~R$_\\oplus$ and measured mass of 5~M$_\\oplus$, but we will investigate how changes to the mass, as well as measurement precision, will affect constraints on the planet's initial Kelvin-Helmholtz timescale. \n\n\n\\subsubsection{Time undergoing photoevaporation}\n\nOne of the big uncertainties at young ages is how long the planet has been exposed to XUV irradiation, and hence photoevaporating. When embedded in the protoplanetary disc it is protected from XUV photons. Thus, the age of the star only provides an upper bound on the time the planet has spent photoevaporating. Since disc lifetimes vary between $\\sim 1$ and $\\sim 10$~Myr, at young ages this is not a trivial uncertainty. We include this uncertainty in our analysis by deriving the probability distribution for the time a planet has spent photoevaporating after disc dispersal $T_p$. We then marginalise over this probability when determining our lower bound on the planet's initial Kelvin-Helmholtz timescale. \n\nWe take the star to have a Gaussian age uncertainty with mean $t_*$ and error $\\sigma_*$. We further assume after a time $t_d$, the disc fraction decays exponentially with the form \n\\begin{equation}\n \\propto \\exp\\left(-\\frac{T_d-t_d}{\\sigma_d}\\right)\n\\end{equation}\nwhere $T_d$ is the disc's lifetime and $\\sigma_d$ is the decay time for the disc fraction. Such a phenomenological form describes the evolution of the protoplanetary disc fraction \\citep[e.g.][]{Mamajek2009}. Now given a star's actual age ($T_*$) is a sum of the unknown disc's lifetime and the unknown time the planet has been undergoing photoevaporation ($T_p$), then we know $T_*=T_p+T_d$.\nTherefore the probability distribution for $T_p$ can be written as:\n\\begin{eqnarray}\n P(T_p) &=& \\frac{1}{2\\sigma_d}\\exp\\left[\\frac{\\sigma_*^2+2\\sigma_d\\left(T_p+t_d-t_*\\right)}{2\\sigma_d^2}\\right]\\nonumber \\\\&\\times&\\left\\{1-{\\rm erf}\\left[\\frac{\\sigma_*^2+\\sigma_d\\left(T_p+t_d-t_*\\right)}{\\sqrt{2}\\sigma_*\\sigma_d}\\right]\\right\\}\n\\end{eqnarray}\n\nIn this work we set $t_d=1$~Myr and $\\sigma_d=3$~Myr as this reproduces the fact that all (single) stars host discs at an age of 1~Myr, but by 10~Myr the vast majority of stars have dispersed their discs. { Thus, we adopt an initial Kelvin-Helmholtz timescale of 10~Myr as the upper limit that can be reached in standard core-accretion theory.} \n\n\\subsubsection{Results}\n\n\\begin{figure*}\n\\centering\n\\includegraphics[width=\\textwidth]{Panel_plot.pdf}\n\\caption{The top left, top right and bottom left panels show joint probability distributions for the initial Kelvin-Helmholtz timescale, core mass and initial envelope mass fractions for a 50$\\pm5$~Myr old planet with a radius of $7\\pm0.7$~R$_\\oplus$ and mass of $5\\pm 0.5$~M$_\\oplus$. The bottom right panel shows the marginalised probability distribution for the initial Kelvin-Helzholtz timescale, with the point indicating the 99\\% lower-limit at a value of 168~Myr. } \\label{fig:big_panel}\n\\end{figure*}\n\nIn Figure~\\ref{fig:big_panel} we show joint probability distributions for the initial Kelvin-Helmholtz timescale, initial envelope mass fraction and core mass, as well as the marginalised probability distribution for the initial Kelvin-Helmholtz timescale. This analysis has been performed assuming $10\\%$ Gaussian errors on stellar age, radius and mass. Similar to our analysis in the earlier section for our 7~R$_\\oplus$ and 5~M$_\\oplus$ 50~Myr old planet we find that it would require an initial Kelvin-Helmholtz timescale significantly longer than would be predicted by standard core-accretion theory. In this example, we would place a 99\\% lower limit on the initial Kelvin-Helmholtz timescale of $\\sim 170$~Myr. The joint probability distributions are also correlated as expected with our earlier analysis. Lower mass planets require longer initial Kelvin-Helmholtz timescales and higher initial envelope mass fractions. \n\nWe explore the role of planet mass in Figure~\\ref{fig:vary_mass} where we consider measured planet masses between 4 and 8 Earth masses (again for our 7~R$_\\oplus$, 50~Myr old planet with 10\\% measurement uncertainties). We note very few 4~M$_\\oplus$ models are consistent with the measured radius, as most have initial envelope mass fractions $\\sim 1$, making them extremely vulnerable to photoevaporation \\citep{Owen2019}. \n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{Vary_mass.pdf}\n \\caption{Marginalised probability distributions for the initial Kelvin-Helmholtz timescale for a 50$\\pm5$~Myr old planet with a radius of $7\\pm0.7$~R$_\\oplus$. Different lines show different planet masses (with 10\\% uncertainty). For this particular planet a measured mass of $\\lesssim 7$~M$_\\oplus$ would be required to claim evidence of boil-off.}\n \\label{fig:vary_mass}\n\\end{figure}\nAs expected, as the planet mass increases, the bound on the initial Kelvin-Helmholtz timescale decreases (as the higher-mass core is able to hold onto a less massive, and thus higher entropy envelope). \n\nWhile $\\lesssim 10\\%$ measurement uncertainties on planet radius and stellar age\\footnote{Much of the uncertainty in the time a planet has spent photoevaporating is dominated by the uncertainty in the disc dispersal timescale at ages $\\lesssim 100$~Myr.} have been achieved for known young planets, stellar activity may mean obtaining radial-velocity mass measurements at a $\\sim 10$\\% precision is difficult. Therefore, in Figure~\\ref{fig:mass-error} we show how sensitive our constraints on the initial Kelvin-Helmholtz timescale are to mass uncertainties in the range of 5-25\\%. As you would naturally expect, increasing the uncertainty means higher mass planets become consistent with the measured mass, allowing shorter initial Kelvin-Helmholtz timescales. However, even with a tentative $\\sim 25\\%$ mass detection, for this example we would still be able to place a useful constraint on the initial Kelvin-Helmholtz timescale. \n\n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{Vary_Error.pdf}\n \\caption{Marginalised probability distributions for the initial Kelvin-Helmholtz timescale for a 50$\\pm5$~Myr old planet with a radius of $7\\pm0.7$~R$_\\oplus$ and mass of $5$~M$_\\oplus$. Different lines show different uncertainties on the measured planet mass.}\n \\label{fig:mass-error}\n\\end{figure}\n\nThis gives us confidence that measured masses can place useful constraints on the entropy of formation of young transiting planets, even if those mass measurements are tentative. \n\n\\subsection{What age is best?}\n\nOne question that remains is what is the best system age to do this experiment for? Obviously young planets allow you to constrain shorter and shorter initial Kelvin-Helmholtz timescales as they've had less chance too cool. Yet, at young ages there are two confounding effects. First photoevaporation may not have had enough time to significantly control the planet's evolution. Second, at very young ages, the time the planet has spent photoevaporating after disc dispersal is not dominated by the uncertainty in the age of the system, but rather by the unknown disc lifetime. For example a 10~Myr old planet could have spent anywhere between 0 and $\\sim 9$~Myr photoevaporating. However, wait too long and the planet will have cooled sufficiently that knowledge of its initial thermodynamic state will have been lost, especially at ages $\\gtrsim 100$~Myr when photoevaporation no-longer dominates. \n\nThus, we expect there to be an optimum range of stellar ages at which this experiment is most stringent. In order to assess this we take the evolution of a planet with a 4.375~M$_\\oplus$ core, with an initial envelope mass fraction and Kelvin-Helmholtz timescale of $0.3$ and $500$~Myr respectively. This model roughly corresponds to our 5~M$_\\oplus$, 7~R$_\\oplus$, 50 Myr old planet studied earlier. We then use our method to constrain its initial Kelvin-Helmholtz timescale as a function of age assuming 10\\% errors on planet mass, radius and stellar age. The minimum initial Kelvin-Helmholtz timescale (at the 99\\% confidence level) is shown as a function of age for this exercise in Figure~\\ref{fig:best_age}.\n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{Best_age.pdf}\n \\caption{The constraint on the minimum inferred initial Kelvin-Helmholtz timescale as a function of stellar age. }\n \\label{fig:best_age}\n\\end{figure}\nThe system age that is most constraining lies around $\\sim 30$~Myr, where even with errors and a fairly robust confidence level we recover the actual initial Kelvin-Helmholtz timescale within a factor of 3. Ages in the range $\\sim 20-60$~Myr have a constraint that varies by less than $50\\%$ of the absolute maximum. We can clearly see that after 100~Myr the constraint on the initial Kelvin-Helmholtz timescale becomes uninformative. Therefore, for real systems our method should provide meaningful constraints on the initial Kelvin-Helmholtz timescale and hence formation entropy for planets around stars with ages in the range 20-60~Myr. \n\n\n\\section{Application to real planets}\nHaving shown that by using the photoevaporation model it is possible to constrain a young planet's entropy of formation we turn our attention to detected young planets, and consider how their inferred entropy of formation varies as a function of possible measured mass. We choose to focus here on DS Tuc Ab and V1298 Tau c, out of the handful of known young planets, as these are the most strongly irradiated, and therefore most likely to result in strong constraints on their initial Kelvin-Helmholtz contraction timescale. \n\n\n\\subsection{DS Tuc Ab}\n\nDS Tuc Ab \\citep{Benatti2019,Newton2019} is a $5.70\\pm0.17$~R$_\\oplus$ planet\\footnote{We choose to use the stellar and planetary parameters from \\citet{Newton2019}.} discovered around a $45\\pm4$~Myr, 1.01~M$_\\odot$ star, orbiting with a period of 8.1~days. Using exactly the same formalism as applied in Section~\\ref{sec:mesa} we consider the constraints on entropy of formation and initial Kelvin-Helmholtz timescale as a function of planet mass.\nWe find that a measured mass $\\lesssim 4.5$~M$_\\oplus$ would be inconsistent with the current properties of DS Tuc Ab. In Figure~\\ref{fig:limit_DS} we show how the inferred lower-limit on the initial Kelvin-Helholtz timescale varies with both the measured planet mass and the measurement uncertainty. Our results indicate that a measured mass $\\lesssim 7.5$~M$_\\oplus$ with a uncertainty of 10\\% (or $\\lesssim$ 6.5~M$_\\oplus$ with a 20\\% uncertainty) would require a longer than na\\\"ively expected initial Kelvin-Helmholtz timescale and require with a ``boil-off'' phase. A mass of 7.5~M$_\\oplus$ corresponds to a radial velocity semi-amplitude of $\\sim 2.4$~m~s$^{-1}$, eminently detectable with current instrumentation, stellar noise not withstanding. \n\n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{DS_TUCAb_result.pdf}\n \\caption{The minimum initial Kelvin-Helmholtz timescale (at the 99\\% confidence limit) for DS Tuc Ab shown as a function of measured planet mass for a 10\\% and 20\\% measured mass uncertainty. A measured mass of $\\lesssim 7-8$M$_\\oplus$ would require a ``boil-off'' phase to explain. }\n \\label{fig:limit_DS}\n\\end{figure}\n\n\\subsection{V1298 Tau c}\n\nThe 23$\\pm4$~Myr old, 1.1~M$_\\odot$, V1298 Tau system contains four large transiting young planets \\citep{David2019}. All planets are between 5-11~R$_\\oplus$ in radii and orbit close to the star with periods $\\lesssim 100$~days indicating it is likely to be a precursor to the the archetypal {\\it Kepler} multi-planet systems. Given our analysis in Section~\\ref{sec:min_tkh} indicated that planets much closer to their star will provide the most stringent limits (due to more vigorous photoevaporation) we select planet c to investigate here. V1298 Tau c is a $5.59\\pm0.34$~R$_\\oplus$ planet with an orbital period of 8.2~days. Since the V1298 Tau system is a multi-planet system, dynamical arguments have already put constraints on the sum of planet c's and d's mass to be $7^{+21}_{-5}$~M$_\\oplus$. Like DS Tuc Ab above we calculate the minimum initial Kelvin-Helmholtz timescale, taken to be the 99\\% lower limit, as a function of measured planet mass (with both 10\\% and 20\\% measurement uncertainties) which is shown in Figure~\\ref{fig:V1298_result}\n\n\\begin{figure}\n \\centering\n \\includegraphics[width=\\columnwidth]{V1298_result.pdf}\n \\caption{The minimum initial Kelvin-Helmholtz timescale (at the 99\\% confidence limit) for V1298 Tau c shown as a function of measured planet mass for a 10\\% and 20\\% measured mass uncertainty. A measured mass of $\\lesssim 6-7$M$_\\oplus$ would require a ``boil-off'' phase to explain. }\n \\label{fig:V1298_result}\n\\end{figure}\nWe note measured planet masses $\\lesssim 4$~M$_\\oplus$ are inconsistent with V1298 Tau c's current properties. A mass measurement of $\\lesssim 6.5$~M$_\\oplus$ with a 10\\% uncertainty (or $\\lesssim$5.5~M$_\\oplus$ with a 20\\% uncertainty) would require a boil-off phase to explain. This corresponds to a RV semi-amplitude of $\\sim 2$~m~s$^{-1}$, again within the realm of possibility for radial velocity characterisation (stellar noise not withstanding). Further, since V1298 Tau is a multi-planet system, this permits the possibility of mass constraints through Transit Timing Variations. \n\n\\section{Discussion}\n\nTypical sub-neptune and super-earth planets are expected to be much larger at early ages.\nIn this work we have shown that with a combined mass and radius measurement of a proto-sub-neptune\/super-earth ($\\lesssim 100$~Myr old), a lower bound can be placed on its initial Kelvin-Helmholtz timescale. This lower bound provides valuable insight into the accretion and early evolution of its H\/He envelope. This lower bound is essentially found by answering how low-mass a H\/He envelope can exist on the planet given it's been undergoing photoevaporation. For a fixed planet radius, a higher entropy envelope contains less mass and is therefore more vulnerable to mass-loss. Whereas, lower entropy envelopes need to be more massive and thus are able to resist mass-loss for longer. \n\nOne might expect that the younger the planet this experiment is done for the tighter a constraint can be obtained. While this is generally true, at the youngest ages the fact protoplanetary disc lifetimes vary means one cannot be certain how long a planet has been undergoing mass-loss. Thus, we find that the optimum ages for this experiment are around 20-60~Myr. \n\nFurther, more accurate measurements obviously result in tighter constraints. In this work, we showed that measurement precision in the range of 10-20\\% on radius, mass and age are required to perform this analysis. Current (and previous) transit surveys have already reached and exceeded this requirement on known young planets (e.g. the planets in the V1298 Tau system have radius uncertainties in the range 6-7\\%). Further, the already published young planets have age uncertainties at the 10\\% level. \n\nWhat is difficult to ascertain is whether the mass measurements at the $\\lesssim 20\\%$ precision are achievable. As discussed in Section~4, the problem is not RV precision. Rather it is intrinsic stellar variability, particularly due to spots \\citep[e.g.][]{Huerta2008}, which are more prevalent on younger stars. Recent work using Gaussian Processes have shown that it is possible to model intrinsic stellar variability and obtain mass measurements for planets \\citep{Haywood2014,Grunblatt2015}. Using this technique \\citet{Barragan2019} recently obtained an RV mass measurement for K2-100b, which is a moderately young star showing significant intrinsic RV variability. Alternatively, if the planets happen to reside in multi-transiting systems, then TTVs could be used. While we acknowledge the difficulty in obtaining the mass measurements we require, we advocate that the scientific value of constraints on planets' initial entropy is important enough to motivate the effort. \n\nSince we are using photoevaporation to constrain the entropy of formation, our results are sensitive to the accuracy of theoretical photoevaporation calculations. In this work we use the mass-loss rates of \\citet{Owen2012} which are consistent with the location and slope of the ``evaporation valley'' \\citep{VanEylen2018} \\footnote{ As does the core-powered mass-loss model \\citep{Gupta2019,Gupta2019b}.}, and are generally in good agreement with observed outflows \\citep{Owen2012}. Only more theoretical and observational work calibrating photoevaporation models can assess the impact changing the mass-loss rates may have on entropy constraints. \n\n\n\\subsection{Links to planet formation theory}\n\nSince the discovery of sub-neptunes and super-earths there has been much work on their origin \\citep[e.g.][]{Ikoma2006,Ikoma2012,Lee2014,Venturini2015,Venturini2016,Ginzburg2018}. It is clear that the only way to explain (at least some of) their current densities is to have a have a planetary core (made of some mixture of rock, iron and ices) surrounded by a H\/He envelope which contains $\\sim 1-10$\\% of the planet's total mass \\citep[e.g.][]{JontofHutter2016}. \n\nSuch a planetary composition would naturally arise through the core-accretion mechanism, whereby the growing solid core accretes a H\/He envelope over the disc's lifetime. In this standard picture, the accreting planetary envelope smoothly connects to the disc, but remains in quasi-hydrostatic and thermal equilibrium. As the envelope cools, it contracts and slowly accretes. This process happens on the envelope's Kelvin-Helmholtz timescale, which without any strong internal heating sources, quickly equilibrates to roughly the envelope's age. If disc dispersal allows the envelope to remain in quasi-hydrostatic and thermal equilibrium, then a planet's ``initial Kelvin-Helmholtz timescale'' (which we define as the Kelvin-Helmholtz timescale after disc dispersal) is essentially the time it has spent forming, which is bounded by the protoplanetary disc lifetime (e.g. $\\lesssim 10$~Myr). \n\nWhile the basic picture appears to fit, there is growing evidence that the standard core accretion model significantly over-predicts the amount of H\/He a core of a given mass should accrete \\citep{Jankovic2019,Ogihara2020,Alessi2020,Rogers2020}. In some cases the problem is so acute that it's not clear why certain planets did not become giant planets \\citep[e.g.][]{Lee2014,Lee2019}. Several solutions have been proposed to solve this problem. \\cite{Chen2020} suggested enhanced opacity from dust could slow the atmosphere's accretion. Using numerical simulations, \\citet{Ormel2015} and \\citet{Fung2015} suggested that the envelope was not in quasi-hydrostatic equilibrium with the disc, but rather high-entropy disc material continually flowed into the envelope, preventing it from cooling. \n\n\\cite{Lee2016} hypothesised instead these planets do not spend the entire disc lifetime accreting from the nebula, but rather formed rapidly (over a timescale of $10^5-10^6$~years) in the final ``transition'' disc stage of the protoplanetary disc. The much lower gas surface densities and the shorter lifetime of the transition disc phase gave rise to smaller accreted atmospheres. The above modifications to the ``vanilla'' core accretion theory model will typically result in higher entropy envelopes and therefore initial Kelvin-Helmholtz contraction timescales, significantly shorter than the standard value of a few Myr.\n\nAn alternative solution to the over accretion problem\\footnote{Although it does not prohibit the modifications to core-accretion theory described above.} is the introduction of additional mass-loss. While it does not seem energetically feasible to increase the rates of either photoevaporation or core-powered mass-loss (as they are already fairly efficient), the assumption that the envelope maintains some sort of dynamical equilibrium as the disc disperses seems unlikely. Protoplanetary discs are observed to live and evolve slowly over their 1-10~Myr lifetimes. However, the dispersal process is rapid with a timescale of $\\sim 10^5$~years \\citep[e.g.][]{kenyon95,ercolano11,koepferl13,owenreview2016}. \n\nAs argued by \\citet{OW2016} and \\citet{Ginzburg2016} this means accreted H\/He envelopes cannot maintain dynamical and thermal balance with the gas in the dispersing disc. As such the envelopes become over-pressurised, and expand hydrodynamically into the forming cicumstellar vacuum. This ``boil-off'' process results in mass-loss (in extreme cases up to 90\\% of the initial envelope is lost), but also importantly cooling of the interior. This is because the bottleneck for cooling (the radiative-convective boundary) is replaced by an advective-convective boundary and thermal energy is removed from the interior quickly by advection and mass-loss. Using simulations, \\citet{OW2016} found that after this boil-off process, the remaining envelopes had their entropies reduced. Their Kelvin-Helmholtz contraction timescales at the end of disc dispersal were around $\\sim 100$~Myr. \n\nThus, any constraints of the initial Kelvin-Helmholtz contraction timescale of proto-sub-neptunes\/super-earths will be invaluable for constraining and testing our models for their origins. \n\n\\section{Summary}\n\nThe formation of sub-Neptunes and super-Earths is uncertain and many formation models have been proposed to explain their origin. These formation models are essentially unconstrained by the old, evolved exoplanet population that has a typical age of 3~Gyr. However, various planet formation models predict vastly different entropies at the end of protoplanetary disc dispersal. Characterising the entropies at the end of disc dispersal in terms of initial Kelvin-Helmholtz contraction timescales, these predictions range from $\\lesssim 1$~Myr to $\\gtrsim 100$~Myr.\n\nA young proto-sub-neptune\/super-earth with a measured mass, radius and age can be used to place a lower bound on its initial Kelvin-Helmholtz contraction timescale. This requires the planet to be close enough to its host star that photoevaporation has had an impact on its evolution. This constraint is obtained by answering how low-mass a H\/He envelope can exist on the planet given the mass-loss it experienced. For a fixed planet radius, a higher entropy envelope contains less mass and is therefore more vulnerable to mass-loss. Whereas, lower entropy envelopes need to be more massive and thus are able to resit mass-loss for longer.\n\nWe have shown that planets around host stars with ages 20-60~Myr are the optimum targets for this kind of analysis. Applying our hypothesised method to detected young planets DS Tuc Ab and V1298 Tau c we show planet mass constraints (with $\\lesssim 20\\%$ precision) in the range 7-10~M$_\\oplus$ would be consistent with our standard picture of core-accretion. Mass measurements $\\lesssim 7$~M$_\\oplus$ would favour a ``boil-off'' process, where a planet loses mass and its interior cools significantly during dispersal. \n\nWhile precise mass measurements of low-mass planets orbiting young stars are likely to be challenging, the insights into planet formation that could be obtained warrant the effort. \n\n\n\\section*{Acknowledgements}\nJEO is supported by a Royal Society University Research Fellowship and a 2019 ERC starting grant (PEVAP).\n\n\\section*{Data Availability}\nThe code used to create the planet structure models in Section~2.5 is freely available at: \\url{https:\/\/github.com\/jo276\/EvapMass}. The custom {\\sc mesa} code used to calculate the planet evolution models in Section~3 and 4 is freely available at: \\url{https:\/\/github.com\/jo276\/MESAplanet}.\nThe remaining data underlying this article will be shared on reasonable request to the corresponding author.\n\n\n\n\n\\bibliographystyle{mnras}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\t\\label{sec:introduction}\n\t\n\tModern day edge devices, with their data acquisition and storage ability, have pushed the need of distributed computing beyond the realms of data centers. Devices such as mobile phones, sensor systems in vehicles, wearable technology and smart homes, within their limited storage and processing capabilities, can constantly collect data and perform simple computations. However, due to data privacy concerns and limitations on network bandwidth and power, it becomes impractical to transmit all the collected data to a centralized server and conduct centralized training. \n\t\n\tThe nascent field of federated learning \\cite{konevcny2015federated,konevcny2016federated,brendan2017aguera,mohri2019agnostic,li2020federated} tries to address these concerns. As described in \\cite{konevcny2016federated}, federated learning is a machine learning setting where the goal is to train a high-quality centralized model with training data distributed over a large number of clients. Unlike the data centers, the clients collect data samples independently but in a non-i.i.d. fashion. The clients may be highly unbalanced, i.e., the number of samples per client may vary significantly. The clients may also have hardware related constraints. Although the number of clients could be quite large, each client is typically a simple device which has access to a very small number of data samples and can only conduct very basic computations due to limitations on its processing and power capabilities. Furthermore, since battery power is at a premium, the communication between the client and the centralized server acts as a major bottleneck. Due to these constraints, it is common to encounter straggling and faulty clients in the federated learning setting. \n\t\n\tIn this work, we study the problem of exact support recovery of sparse linear regression in federated learning. \\cite{wainwright2009info} provided an information theoretic lower bound for sparse linear regression. They showed that, in a completely centralized setting where all the data resides in a single server, $\\mathcal{O}(s \\log d)$ samples are necessary for exact support recovery of a $d$-dimensional parameter vector with $s$ non-zero entries. In our setting, none of the clients has the access to necessary number of data samples required for exact support recovery or possess computational capabilities to run complex algorithms. Furthermore, we only allow for one-shot communication between the clients and the centralized server, i.e., clients can send information to the centralized server only once. We propose a novel yet simple algorithm for this setting and show that local clients can collaboratively recover the exact support of the sparse linear regression model with provable theoretical guarantees. \n\t\n\t\\paragraph{Related work.}\n\t\n\tDespite being a new research area, there has been lot of interest in the field of federated learning. On the experimental side, \\cite{konevcny2015federated} were the first to formally define federated learning and proposed an algorithm with encouraging experimental results. \\cite{konevcny2016federated} came up with strategies to improve the communication efficiency for federated learning. \\cite{brendan2017aguera} proposed a communication efficient algorithm for deep networks. Similarly, \\cite{yurochkin2019bayesian} developed a novel framework for federated learning with neural networks and \\cite{wang2020federated} proposed a federated learning algorithm using matched averaging for neural networks. \\cite{bhagoji2019analyzing} empirically analyzed adversarial attacks on federated learning settings. They specifically studied the threat of model poisoning where the adversary controls a small number of malicious clients (usually 1)\n\twith the aim of causing the global model to misclassify. \\cite{li2020fair} studied fair resource allocation in federated learning. On the theoretical side, \\cite{he2018cola} proposed a new decentralized training algorithm with guarantees for convergence rates for linear classification and regression models. \\cite{smith2017cocoa} presented a communication efficient decentralized framework which covers general non-strongly-convex regularizers, including problems like lasso with convergence rate guarantees. They also describe a possible extension of their method to one-shot communication schemes. \\cite{smith2017federated} proposed a multi-task learning based approach for federated learning with convergence rate guarantees which was tolerant to client failure and could handle clients which lag in sending information to the centralized server (also known as straggling clients). \\cite{mohri2019agnostic} proposed a client distribution agnostic framework for federated learning. They also provided Rademacher-based generalization bounds for their proposed approach. \n\t\n\t\\paragraph{Our Contribution.}\n\tAll the work mentioned above are interesting in their own domain however our contribution is mostly theoretical. The existing theoretical work provide guarantees for convergence rates (which guarantees a small mean squared error in the training set provided enough iterations) or generalization bounds (which guarantees a small mean squared error in the testing set provided enough samples). However, the final solution may not match exactly with the true parameter vector. In this work, we provide provable theoretical guarantees for exact recovery of the support of the true sparse paramater vector of linear regression in federated learning.\n\tSupport recovery, i.e., correctly detecting the zero and nonzero entries of the parameter vector, is arguably a challenging task. In particular, we show that for a $d$-dimensional $s$-sparse parameter vector $\\mathcal{O}(\\log d)$ clients and $\\mathcal{O}(s^2 \\log s)$ data samples per client are sufficient to recover the exact support. If the predictor variables are mutually independent then we can do exact support recovery with only $\\mathcal{O}(s)$ data samples per client. Notice that in this case the aggregate sample complexity is $\\mathcal{O}(s\\log d)$ which matches the optimal sample complexity of the centralized setting\\cite{wainwright2009info,wainwright2009sharp}. We propose a simple yet effective method for exact support recovery and prove that the method is \\emph{correct} and efficient in terms of \\emph{time} and \\emph{sample complexity}. Our method has the following key properties:\n\t\\begin{itemize}\n\t\t\\item \\textbf{Simplicity: } We do not solve any optimization problem at the client level. All the computations are simple and let us use our method in devices with low computational power.\n\t\t\\item \\textbf{One shot communication and privacy: } Our method is communication efficient. We only need one round communication of at most $d-$bits from the client to the centralized server. As the communication is kept to a minimum, very little information about the client is passed to the centralized server. \n\t\t\\item \\textbf{Fault tolerance and aversion to model poisoning and straggling: } Our method is naturally robust to client node failure and averse to rogue and straggling clients.\n\t\\end{itemize} \n\t\n\t\n\t\n\t\\section{Preliminaries}\n\t\\label{sec:preliminaries}\n\tIn this section, we collect the notation which we use throughout this paper. We also formally define the support recovery problem for sparse linear regression in federated learning.\n\t\n\t\\subsection{Notation and Problem Setup}\n\t\\label{subsec:notation and problem setup}\n\t\n\tLet $w^* \\in \\mathbb{R}^d$ be a $d-$dimensional parameter with sparsity $s$, i.e., only $s$ out of $d$ entries of $w^*$ are non-zero. We use $\\seq{r}$ as a shorthand notation to denote the set $\\{1,2,\\cdots,r\\}$. Let $S^*$ be the true support set, i.e., $S^* = \\{ r | w^*_r \\ne 0, r \\in \\seq{d} \\}$. We denote the corresponding complementary non-support set as $S^*_c = \\{ r | w^*_r = 0, r \\in \\seq{d} \\}$. Assume there are $g$ clients, each with $n_i$ independent samples, for $i \\in [g]$. Note that the data distribution across $g$ clients need not be identical. Each client $i \\in \\seq{g}$ contains each data sample in the format $(X_i, y_i)$ where $X_i \\in \\mathbb{R}^d$ are the predictor variables and $y_i \\in \\mathbb{R}$ is the response variable. The data generation process for each client $i \\in \\seq{g}$ is as follows:\n\t\\begin{align}\n\t\\label{eq:generative model}\n\t\\begin{split}\n\ty_i = X_i^\\intercal w^* + e_i \\; ,\n\t\\end{split}\n\t\\end{align} \n\twhere $e_i$ is a zero mean sub-Gaussian additive noise with variance proxy $\\eta_i^2$, where $\\eta_i > 0$. Note that all the clients share the same parameter vector $w^*$. The $j$-th entry of $X_i$ is denoted by $X_{ij}, \\forall i\\in \\seq{g}, j \\in \\seq{d}$. Each entry $X_{ij}$ of $X_i$ is a zero mean sub-Gaussian random variable with variance proxy $\\rho_i^2$, where $\\rho_i > 0$. We denote covariance matrix for $X_i$ as $\\Sigma^i \\in \\mathbb{R}^{d \\times d}$ with diagonal entries $\\Sigma^i_{jj} \\equiv {\\sigma^i_{jj}}^2, \\forall j \\in \\seq{d}$ and non-diagonal entries $\\Sigma^i_{jk} \\equiv \\sigma^i_{jk}, \\forall j,k \\in \\seq{d}, j \\ne k$. If predictor variables are mutually independent then $\\sigma^i_{jk} = 0, \\forall i \\in \\seq{g}, j,k \\in \\seq{d}, j \\ne k$. The $t$-th sample of the $i$-th client is denoted by $(X_i^t, y_i^t), \\forall i \\in \\seq{g}, t \\in \\seq{n_i}$. We note that $X_i^t \\in \\mathbb{R}^d$ and $y_i^t \\in \\mathbb{R}$ and denote $j$-th entry of $X_i^t$ as $X_{ij}^t$. Notice that the data distributions for $(X_i, y_i)$ can vary a lot across the clients by varying $\\rho_i$ and $\\eta_i$, as well as the specific sub-Gaussian probability distribution. The class of sub- Gaussian variates includes for instance Gaussian variables, any bounded random variable (e.g., Bernoulli, multinomial, uniform), any random variable with strictly log-concave density, and any finite mixture of sub-Gaussian variables. Similarly, data samples can be distributed unevenly across the clients by varying $n_i$. In subsequent sections, we use $\\mathbb{P}(A)$ to denote probability of the event $A$ and $\\mathbb{E}(A)$ to denote the expectation of the random variable $A$.\n\t\n\t\\subsection{Problem Statement}\n\t\\label{subsec: problem statement}\n\t\n\tFor our problem, we assume that $n_i < \\mathcal{O}(s \\log d), \\forall i \\in \\seq{g}$. Otherwise, support can be trivially recovered by using compressed sensing methods in the client with $n_i = \\mathcal{O}(s \\log d)$ which is the order of necessary and sufficient number of samples required for exact support recovery in linear regression setup \\cite{wainwright2009info,wainwright2009sharp}. Furthermore, we assume that each of our clients can only do very simple computations and can only do one-shot communication with the centralized server, i.e., each client can only send at most $d$-bits to the centralized server. Considering the above requirements, we are interested in answering the following question:\n\t\\begin{problem}[Exact Support Recovery]\n\t\t\\label{prob:exact support recovery}\n\t\tGiven that each client contains $n_i < \\mathcal{O}(s\\log d)$ data samples generated through the process described in equation \\eqref{eq:generative model}, is it possible to efficiently recover the true support of the $s$-sparse shared parameter vector $w^* \\in \\mathbb{R}^d$ by collecting $d$-bits of information from every client only once with provable theoretical guarantees.\n\t\\end{problem}\n\tThe efficiency in exact recovery means that the sample complexity per client should be strictly less than $\\mathcal{O}(s\\log d)$ and that our algorithm should have polynomial time complexity and should also be easy to implement. \n\t\n\t\\section{Our Method}\n\t\\label{sec:methodology}\n\t\n\tIn this section, we present a simple algorithm to solve problem \\ref{prob:exact support recovery}. Our main idea is that estimation at the client level can be incorrect for every client but this information can still be aggregated in a careful manner to compute the true support. \n\t\n\t\\subsection{Client Level Computations}\n\t\\label{subsec:client level computations}\n\t\n\tEach client tries to estimate the support of $w^*$ using $n_i$ independent samples available to it. As mentioned previously, $n_i, \\forall i \\in \\seq{g}$ is not sufficient to compute correct support of $w^*$ using any method possible \\cite{wainwright2009info}. Let $\\hat{w}_i \\in \\mathbb{R}^d$ be the estimate of $w^*$ computed by each client $i$. Let $S_i$ be the support of $\\hat{w}_i$. Each server communicates the computed support (at most $d$ bits) to a centralized server which then computes the final support for $w^*$. The centralized server receives $S_i$ from each client and computes the final support $S = f(S_1, S_2,\\cdots,S_g)$. Each client $i, \\forall i \\in \\seq{g}$ computes $\\hat{w}_i$ in the following way:\n\t\\begin{align}\n\t\\label{eq:what}\n\t\\begin{split}\n\t\\forall i \\in \\seq{g}, j \\in \\seq{d},\\quad \\hat{w}_{ij} = \\frac{1}{\\hat{\\sigma}_{ij}} \\text{sign}(\\hat{\\alpha}_{ij}) \\max (0, |\\hat{\\alpha}_{ij}| - \\lambda) \\; ,\n\t\\end{split}\n\t\\end{align} \n\twhere $\\hat{w}_{ij}$ is $j$-th entry of $\\hat{w}_i$ and $\\lambda > 0$ is a regularization parameter. We present the exact procedure to compute a feasible $\\lambda$ in later sections. We also define $\\hat{\\sigma}_{ij}$ and $\\hat{\\alpha}_{ij}$ as follows: \n\t\\begin{align}\n\t\\label{eq:sigma_alpha}\n\t\\begin{split}\n\t\\hat{\\sigma}_{ij} \\triangleq \\frac{1}{n_i} \\sum_{t=1}^{n_i} (X_{ij}^t)^2,\\quad \\hat{\\alpha}_{ij} \\triangleq \\frac{1}{n_i} \\sum_{t=1}^{n_i} y_i^t X_{ij}^t\n\t\\end{split}\n\t\\end{align}\n\tNote that these are simple calculations and can be done in $\\mathcal{O}(d n_i)$ run time at each client. If $n_i$ can be kept small (which we will show later), this can be done even by a device with low computational ability. The choice of this exact form of $\\hat{w}_{ij}$ in equation \\eqref{eq:what} is not arbitrary. To get the intuition behind our choice, consider the following $\\ell_1$-regularized (sparse) linear regression problem at each client.\n\t\\begin{align}\n\t\\label{eq:serverlasso}\n\t\\begin{split}\n\t(\\forall i\\in\\seq{g}), \\quad \\hat{w}_i = \\arg\\min_w \\frac{1}{n_i}\\sum_{t=1}^{n_i} (w^\\intercal X_i^t - y_i^t)^2 + \\lambda \\| w \\|_1 \\; ,\n\t\\end{split}\n\t\\end{align}\n\twhere $\\| \\cdot \\|_1$ denotes the $\\ell_1$ norm of a vector. The construction of $\\hat{w}_i$ in equation \\eqref{eq:what} is the exact solution to optimization problem \\eqref{eq:serverlasso} if predictor variables, i.e., the entries in $X_{i}$, are assumed to be uncorrelated. Notice how the solution provided in \\eqref{eq:what} avoids any computation (or estimation) of the covariance matrix which, in any case, would be incorrect if each client has access to only a few samples. Each client $i$ sends the support $S_i = \\{ j | \\hat{w}_{ij} \\ne 0, j \\in \\seq{d} \\}$ of $\\hat{w}_i$ to the centralized server. Note that even in the worst case scenario, each client only sends $d$ bits to the centralized server.\n\t\n\t\\subsection{Information Aggregation and Constructing the Final Support}\n\t\\label{subsec:constructing final support S}\n\t\n\tWe aggregate supports $S_i, \\forall i \\in \\seq{g}$ from all the clients and construct the final support. Before we get to the construction of the final support, we define a random variable $R_{ij}, \\forall i \\in \\seq{g}, j \\in \\seq{d}$ which takes value $1$ if $j \\in S_i$ and $0$ otherwise.\n\n\tThus, random variable $R_{ij}$ indicates whether entry $j$ is in the support $S_i$ of client $i$. Using the random variables $R_{ij}$, we construct the final support $S$ by computing the median of $R_{ij}$ across $i \\in \\seq{g}$. If the median is $1$ then we conclude that $j$ is in the support otherwise we conclude that $j$ is not in the support. More formally, we define a random variable $R_j \\triangleq \\frac{1}{g}\\sum_{i=1}^g R_{ij}$ and if $R_j \\geq \\frac{1}{2}$, then we conclude that $j \\in S$. Otherwise, if $R_j < \\frac{1}{2}$, then we conclude that $j \\notin S$. The above procedure can be compactly written as the following algorithms running in clients and centralized server:\n\t\n\t\\begin{algorithm}[H]\n\t\t\\label{algo:getExactSupport}\n\t\t\\begin{minipage}{0.5\\textwidth}\n\t\t\t\\begin{algorithm}[H]\n\t\t\t\t\\SetKwInOut{Input}{Input}\n\t\t\t\t\\SetKwInOut{Output}{Output}\n\t\t\t\t\\tcp{Runs in client $i, \\forall i \\in \\seq{g}$} \n\t\t\t\t\\Input{Data samples $(X_i^t, y_i^t), \\forall t \\in \\seq{n_i}$ }\n\t\t\t\t\\Output{Estimated support for shared parameter $w^*$}\n\t\t\t\t$R_i \\leftarrow \\{0\\}^d$ \\;\n\t\t\t\t\\For{each $j \\in \\seq{d}$}{\n\t\t\t\t\tCompute $\\hat{w}_{ij}$ using equation \\eqref{eq:what} and \\eqref{eq:sigma_alpha} \\;\n\t\t\t\t\t\\If{$\\hat{w}_{ij} \\ne 0$}{ \n\t\t\t\t\t\t$R_{ij} \\leftarrow 1 $ \\;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tSend $R_i$ to centralized server \\;\n\t\t\t\\end{algorithm}\n\t\t\\end{minipage}%\n\t\n\t\t\\begin{minipage}{0.5\\textwidth}\n\t\t\t\\vspace{-1.8\\baselineskip}\n\t\t\t\\begin{algorithm}[H]\n\t\t\t\t\\SetKwInOut{Input}{Input}\n\t\t\t\t\\SetKwInOut{Output}{Output}\n\t\t\t\t\\tcp{Runs in centralized server}\n\t\t\t\t\\Input{$R_i, \\forall i \\in \\seq{g}$}\n\t\t\t\t\\Output{True support $S$ for shared parameter $w^*$}\n\t\t\t\t$S \\leftarrow \\{\\}$ \\;\n\t\t\t\t\\For{each $j \\in \\seq{d}$}{ \n\t\t\t\t\tCompute $R_j = \\frac{1}{g} \\sum_{i=1}^g R_{ij}$ \\;\n\t\t\t\t\t\\If{$R_j \\geq \\frac{1}{2}$}{\n\t\t\t\t\t\t$S \\leftarrow S \\cup \\{j\\}$ \\;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\\end{algorithm}\n\t\t\\end{minipage}\n\t\t\\caption{getExactSupport}\n\t\\end{algorithm}\n\n\t\n\t\\section{Main Results and Analysis}\n\t\\label{sec:analysis}\n\t\n\tIn this section, we describe and analyze our theoretical results. We present our results in two different settings. In the first setting, we assume that predictor variables are mutually independent. We tackle the more general case of correlated predictors in the second setting.\n\t\n\t\\subsection{Mutually Independent Predictors}\n\t\\label{subsec:mutually independent predictors}\n\t\n\tIn this setting, predictor variables are mutually independent of each other in all the clients, i.e., $\\forall i \\in \\seq{g}$, $\\mathbb{E}(X_{ij} X_{ik}) = 0, \\forall j, k \\in \\seq{d}, j \\ne k$. In this setting, we state the following result:\n\t\\begin{theorem}[Mutually Independent Predictors]\n\t\t\\label{thm:mutually independent predictors}\n\t\tFor the federated support learning for linear regression as described in Section \\ref{sec:preliminaries} where predictor variables are mutually independent of each other, if for some $0 < \\delta < 1$, each of the $g = \\mathcal{O}(\\log d)$ client has $n_i = \\mathcal{O}(\\frac{1}{\\delta^2})$ data samples and if for each $i \\in \\seq{g}$ and $j \\in S^*$,\n\t\t\\begin{align*}\n\t\t\\begin{split}\n\t\t8 \\delta \\rho_i^2 \\sqrt{\\sum_{k\\in S^*} w_k^2} + 8 |\\eta_i\\rho_i|\\delta < \\lambda < |w_j^* {\\sigma_{jj}^i}^2| - 8 |w_j^*| \\rho_i^2 \\delta - 8 \\rho_i^2 \\sqrt{\\sum_{k\\in S^*, k \\ne j} {w_k^*}^2} \\delta - 8 |\\eta_i\\rho_i| \\delta \\;,\n\t\t\\end{split}\n\t\t\\end{align*} \n\t\tthen Algorithm \\ref{algo:getExactSupport} recovers the exact support for the shared parameter vector $w^*$. \n\t\\end{theorem} \n\tBy taking $\\delta = \\mathcal{O}(\\frac{1}{\\sqrt{s}})$, we get the following corollary:\n\t\\begin{corollary}\n\t\t\\label{cor:mutually independent predictors}\n\t\tFor the federated support learning for linear regression as described in Section \\ref{sec:preliminaries} where predictor variables are mutually independent of each other, if for some constant $0 < K < \\sqrt{s}$, each of the $g = \\mathcal{O}(\\log d)$ client has $n_i = \\mathcal{O}(s)$ data samples and if for each $i \\in \\seq{g}$ and $j \\in S^*$,\n\t\t\\begin{align*}\n\t\t\\begin{split}\n\t\t&8 K \\rho_i^2 \\sqrt{\\frac{\\sum_{k\\in S^*} {w_k^*}^2}{s}} + 8 K \\frac{|\\eta_i\\rho_i|}{\\sqrt{s}} < \\lambda < |w_j^* {\\sigma_{jj}^i}^2| - 8 K \\frac{|w_j^*| \\rho_i^2}{\\sqrt{s}} - 8 K \\rho_i^2 \\sqrt{\\frac{\\sum_{k\\in S^*, k \\ne j} {w_k^*}^2}{s}} - \\\\\n\t\t& 8 K \\frac{|\\eta_i\\rho_i|}{\\sqrt{s}} \\;,\n\t\t\\end{split}\n\t\t\\end{align*} \n\t\tthen Algorithm \\ref{algo:getExactSupport} recovers the exact support for the shared parameter vector $w^*$.\n\t\\end{corollary}\n\tThe choice of such value of $\\delta$ is to subdue the growth of the $\\sqrt{\\sum_{k\\in S^*} {w_k^*}^2}$ term which approximately grows as $\\mathcal{O}(\\sqrt{s})$. Later on, we will empirically show that such a choice leads to a feasible range for $\\lambda$. Also observe that, the overall sample complexity for our algorithm is $\\mathcal{O}(s \\log d)$ which matches the optimal sample complexity for sparse linear regression\\cite{wainwright2009info,wainwright2009sharp}, i.e., even if we have access to all the samples in a centralized server, we can not have a better sample complexity guarantee for support recovery. \n\t\n\t\\subsubsection{Proof of Theorem \\ref{thm:mutually independent predictors}}\n\t\\label{subsubsec: proof of theorem mutually independent predictors}\n\t\n\t\\begin{proof}\n\t\t\\label{proof:theorem mutually independent predictors}\n\t\tRecall that $R_j = \\frac{1}{g} \\sum_{i=1}^g R_{ij}$ where $R_{ij}$ is defined in Section \\ref{subsec:constructing final support S}. We prove that, with high probability, $R_j \\geq \\frac{1}{2}, \\forall j \\in S^*$ and $R_j < \\frac{1}{2}, \\forall j \\in S^*_c$. We will provide the proof in two parts. First, we deal with entries $j$ which are in the support of $w^*$, i.e., $j \\in S^*$ and then we will deal with $j \\in S^*_c$.\n\t\t\n\t\t\\paragraph{For entries $j$ in support $S^*$.}\n\t\t\n\t\tWe begin our proof by first stating the following lemma.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:support mcdiarmid}\n\t\t\tFor $j \\in S^*$, let $\\mathbb{E}(R_j) > \\frac{1}{2}$, then $R_j \\geq \\frac{1}{2}$ with probability at least $1 - 2 \\exp( - 2g (-\\frac{1}{2} + \\mathbb{E}(R_j))^2)$.\n\t\t\\end{lemma}\n\t\t\n\t\tNext we show that for $j \\in S^*$, $\\mathbb{E}(R_j)$ is indeed greater than $\\frac{1}{2}$. To that end, we provide the result of the following lemma.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:support bound on E(Rj)}\n\t\t\tFor $j \\in S^*$ and some $0 < \\delta \\leq 1$, if predictors are mutually independent of each other and $ 0 < \\lambda < |w_j^* {\\sigma_{jj}^i}^2| - 8 |w_j^*| \\rho_i^2 \\delta - 8 \\rho_i^2 \\sqrt{\\sum_{k\\in S^*, k \\ne j} {w_k^*}^2} \\delta - 8 |\\eta_i\\rho_i| \\delta $\n\t\t\tthen $ \\mathbb{E}(R_j) \\geq 1 - \\frac{6}{g} \\sum_{i=1}^g \\exp(-n_i\\delta^2) $. Furthermore, for $n_i = \\mathcal{O}(\\frac{1}{\\delta^2})$, we have $\\mathbb{E}(R_j) > \\frac{1}{2}$.\n\t\t\\end{lemma}\n\t\t\n\t\t\\paragraph{For entries $j$ in non-support $S^*_c$.}\n\t\tSimilar to the entries in the support, we begin this part by stating the following result for entries in the non-support.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:nonsupport mcdiarmid}\n\t\t\tFor $j \\in S^*_c$, let $\\mathbb{E}(R_j) < \\frac{1}{2}$, then $R_j \\leq \\frac{1}{2}$ with probability at least $1 - 2 \\exp( - 2g (\\frac{1}{2} - \\mathbb{E}(R_j))^2)$.\n\t\t\\end{lemma}\n\t\tIt remains to show that for $j \\in S^*_c$, $\\mathbb{E}(R_j)$ is smaller than $\\frac{1}{2}$. In particular, we use the result from the following lemma.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:nonsupport bound E(Rj)}\n\t\t\tFor $j \\in S^*_c$ and $0 < \\delta \\leq 1$, if predictors are mutually independent of each other and if $ \\lambda > 8 \\delta \\rho_i^2 \\sqrt{\\sum_{k\\in S^*} w_k^2} + 8 |\\eta_i\\rho_i|\\delta $\n\t\t\tthen $\\mathbb{E}(R_j) \\leq \\frac{4}{g} \\sum_{i=1}^g \\exp(-n_i \\delta^2)$. Furthermore, for $n_i = \\mathcal{O}(\\frac{1}{\\delta^2})$, we have $\\mathbb{E}(R_j) < \\frac{1}{2}$.\n\t\t\\end{lemma} \n\t\tResults from Lemma \\ref{lem:support bound on E(Rj)} and \\ref{lem:nonsupport bound E(Rj)} make sure that Lemma \\ref{lem:support mcdiarmid} and \\ref{lem:nonsupport mcdiarmid} hold. We would like these results to hold across all $j \\in \\seq{d}$. This implies that we need a union bound across all the $d$ predictors. Thus, having $g = \\mathcal{O}(\\log d)$ ensures that our results hold for all entries in the support and non-support with high probability. \n\t\\end{proof}\n\t\n\t\\subsection{Correlated predictors}\n\t\\label{subsec:correlated predictors}\n\t\n\tNow that we have dealt with mutually independent predictors, we focus on correlated predictors in this section. As described previously, the covariance matrix for $X_i$ is denoted by $\\Sigma^i \\in \\mathbb{R}^{d \\times d}$ with diagonal entries $\\Sigma^i_{jj} \\equiv {\\sigma^i_{jj}}^2, \\forall j \\in \\seq{d}$ and non-diagonal entries $\\Sigma^i_{jk} \\equiv \\sigma^i_{jk}, \\forall j,k \\in \\seq{d}, j \\ne k$. Some of the results from the previous subsection can be used for this setting as well. However, correlation between predictors affects some results. Below, we state the main results for this setting before proving them formally. \n\t\\begin{theorem}[Correlated Predictors]\n\t\t\\label{thm:correlated predictors}\n\t\tFor the federated support learning for linear regression as described in Section \\ref{sec:preliminaries}, if for some $0 < \\delta < \\frac{1}{\\sqrt{2}}$, each of the $g = \\mathcal{O}(\\log d)$ client has $n_i = \\mathcal{O}(\\frac{1}{\\delta^2} \\log s)$ data samples and if for each $i \\in \\seq{g}$,\n\t\t\\begin{align*}\n\t\t\\begin{split}\n\t\t&(\\forall j \\in S^*_c) |\\sum_{k\\in S^*} w^*_k \\sigma^i_{jk}| + \\sum_{k\\in S^*} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\max_j {\\sigma^i_{jj}}^2 \\delta + 8 |\\eta_i \\rho_i| \\delta < \\lambda \\\\\n\t\t&< (\\forall j \\in S^*) |(w^*_j{\\sigma^i_{jj}}^2 + \\sum_{k\\in S^*, k\\ne j} w^*_k \\sigma^i_{jk})| - 8 |w_j^*| \\rho_i^2 \\delta - \\sum_{k\\in S^*, k\\ne j} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\\\\n\t\t&\\max_j {\\sigma^i_{jj}}^2 \\delta - 8 |\\eta_i \\rho_i| \\delta \\;,\n\t\t\\end{split}\n\t\t\\end{align*} \n\t\tthen Algorithm \\ref{algo:getExactSupport} recovers the exact support for the shared parameter vector $w^*$. \n\t\\end{theorem} \n\tBy taking $\\delta = \\mathcal{O}(\\frac{1}{s})$, we get the following corollary:\n\t\\begin{corollary}\n\t\t\\label{cor:correlated predictors}\n\t\tFor the federated support learning for linear regression as described in Section \\ref{sec:preliminaries}, if for some constant $0 < K < \\frac{s}{\\sqrt{2}}$, each of the $g = \\mathcal{O}(\\log d)$ client has $n_i = \\mathcal{O}(s^2 \\log s)$ data samples and if for each $i \\in \\seq{g}$,\n\t\t\\begin{align*}\n\t\n\t\t\\begin{split}\n\t\t&(\\forall j \\in S^*_c) |\\sum_{k\\in S^*} w^*_k \\sigma^i_{jk}| + \\sum_{k\\in S^*} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\max_j {\\sigma^i_{jj}}^2 \\frac{K}{s} + 8 |\\eta_i \\rho_i| \\frac{K}{s} < \\lambda \\\\\n\t\t&< (\\forall j \\in S^*) |(w^*_j{\\sigma^i_{jj}}^2 + \\sum_{k\\in S^*, k\\ne j} w^*_k \\sigma^i_{jk})| - 8 |w_j^*| \\rho_i^2 \\frac{K}{s} - \\sum_{k\\in S^*, k\\ne j} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\\\\n\t\t&\\max_j {\\sigma^i_{jj}}^2 \\delta - 8 |\\eta_i \\rho_i| \\frac{K}{s} \\;,\n\t\t\\end{split}\n\t\t\\end{align*} \n\t\tthen Algorithm \\ref{algo:getExactSupport} recovers the exact support for the shared parameter vector $w^*$.\n\t\\end{corollary}\n\t\n\tAs with the previous case, the choice of such a value of $\\delta$ is to subdue the growth of terms which grow as $\\mathcal{O}(s)$. In our experiments, this leads to a feasible range for $\\lambda$. In this case, the overall sample complexity for our algorithm is $\\mathcal{O}(s^2 \\log s \\log d)$ which only differs by a factor of $s\\log s$ from the optimal sample complexity for support recovery in sparse linear regression in the centralized setting where all the data resides in a single server\\cite{wainwright2009info,wainwright2009sharp}.\n\t\n\t\\subsubsection{Proof of Theorem \\ref{thm:correlated predictors}}\n\t\\label{subsubsec:proof of theorem correlared predictors}\n\t\n\t\\begin{proof}\n\t\t\\label{proof:theorem correlared predictors}\n\t\tRecall that $R_j = \\frac{1}{g} \\sum_{i=1}^g R_{ij}$ where $R_{ij}$ is defined in Section \\ref{subsec:constructing final support S}. We will again prove that, with high probability, $R_j \\geq \\frac{1}{2}, \\forall j \\in S^*$ and $R_j < \\frac{1}{2}, \\forall j \\in S^*_c$. Some of the results from the previous Section \\ref{proof:theorem mutually independent predictors} follow without any changes. We provide new results for the remaining parts. Like before first, we deal with entries $j$ which are in the support of $w^*$, i.e., $j \\in S^*$ and then we will deal with $j \\in S^*_c$. \n\t\t\n\t\t\\paragraph{For entries $j$ in support $S^*$.}\n\t\t\n\t\tWe observe that Lemma \\ref{lem:support mcdiarmid} holds even in this case. Thus, we start our proof by stating the following lemma.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:support bound on E(Rj) correlated}\n\t\t\tFor $j \\in S^*$ and some $0 < \\delta \\leq \\frac{1}{\\sqrt{2}}$, if $\\forall j \\in S^*$,\n\t\t\t$ 0 < \\lambda < |(w^*_j{\\sigma^i_{jj}}^2 + \\sum_{k\\in S^*, k\\ne j} w^*_k \\sigma^i_{jk})| - 8 |w_j^*| \\rho_i^2 \\delta - \\sum_{k\\in S^*, k\\ne j} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\max_j {\\sigma^i_{jj}}^2 \\delta - 8 |\\eta_i \\rho_i| \\delta $\n\t\t\tthen $ \\mathbb{E}(R_j) \\geq 1 - \\frac{4s}{g} \\sum_{i=1}^g \\exp(-n_i\\delta^2 ) $. Furthermore, for $n_i = \\mathcal{O}(\\frac{1}{\\delta^2}\\log s)$, we have $\\mathbb{E}(R_j) > \\frac{1}{2}$.\n\t\t\\end{lemma}\n\t\t\n\t\t\\paragraph{For entries $j$ in non-support $S^*_c$.}\n\t\tAgain, Lemma \\ref{lem:nonsupport bound E(Rj)} follows directly. Thus, we present the following lemma to show that for the entries in the non-support $\\mathbb{E}(R_j) < \\frac{1}{2}$.\n\t\t\\begin{lemma}\n\t\t\t\\label{lem:nonsupport bound E(Rj) correlated}\n\t\t\tFor $j \\in S^*_c$ and some $0 < \\delta \\leq \\frac{1}{\\sqrt{2}}$, if\n\t\t\t$ \\lambda > |\\sum_{k\\in S^*} w^*_k \\sigma^i_{jk}| + \\sum_{k\\in S^*} 8 \\sqrt{2} |w_k^*| (1 + 4 \\max_j \\frac{\\rho_i^2}{{\\sigma^i_{jj}}^2}) \\max_j {\\sigma^i_{jj}}^2 \\delta + 8 |\\eta_i \\rho_i| \\delta $ \n\t\t\tthen $\\mathbb{E}(R_j) \\leq \\frac{4s + 2}{g} \\sum_{i=1}^g \\exp(-n_i \\delta^2)$. Furthermore, for $n_i = \\mathcal{O}(\\frac{1}{\\delta^2} \\log s)$, we have $\\mathbb{E}(R_j) < \\frac{1}{2}$.\n\t\t\\end{lemma} \n\t\tResults from Lemmas \\ref{lem:support bound on E(Rj) correlated} and \\ref{lem:nonsupport bound E(Rj) correlated} ensure that Lemma \\ref{lem:support mcdiarmid} and \\ref{lem:nonsupport mcdiarmid} hold. Since we would like these results to hold across all $j \\in \\seq{d}$, we need a union bound across all the $d$ predictors. Thus, having $g = \\mathcal{O}(\\log d)$ makes sure that our results hold for all entries in the support and non-support with high probability. \t\n\t\\end{proof}\n\t\n\t\\subsection{Time Complexity}\n\t\\label{sub:time complexity}\n\t\n\tEach client does $\\mathcal{O}(dn_i)$ basic calculations. Thus, from the results of Corollaries \\ref{cor:mutually independent predictors} and \\ref{cor:correlated predictors}, the time complexity at each client is $\\mathcal{O}(sd)$ for mutually independent predictors and $\\mathcal{O}(s^2 d \\log s)$ for correlated predictors. The centralized server gathers $d$-bits of information from $g$ clients in $\\mathcal{O}(dg)$ time. \n\t\n\t\\section{Discussion on Robustness}\n\t\\label{sec:discussion on robustness}\n\t\n\tSince our method only relies on the correct calculation of the median, it is naturally robust to failure of few clients. To simulate the effect of model poisoning \\cite{bhagoji2019analyzing} and stragglers, we consider that $0 < \\beta < \\frac{1}{2}$ portion of clients have gone rogue (are straggling) and transmitting the wrong information to the centralized server. For the worst case scenario, we assume that they report the complement of the support, i.e., they always send a bit ``$1$'' for entries in the non-support and a bit ``$0$'' for entries in the support. To accommodate this change in the case of correlated predictors, we slightly change statements of Lemmas \\ref{lem:support bound on E(Rj) correlated} and \\ref{lem:nonsupport bound E(Rj) correlated}. Now we have, $ (\\forall j \\in S^*), \\quad \\mathbb{E}(R_j) \\geq (1 - \\beta) - \\frac{4s}{g} \\sum_{i=1}^{(1-\\beta)g} \\exp(-n_i \\delta^2) $ and $ (\\forall j \\in S^*_c), \\quad \\mathbb{E}(R_j) \\leq \\frac{4s + 2}{g} \\sum_{i=1}^{(1 - \\beta)g} \\exp(-n_i \\delta^2) + \\beta $.\n\tIt is easy to see that, as long as, we have $n_i > \\frac{1}{\\delta^2} \\log(\\frac{(8s + 4)(1 - \\beta)}{1 - 2 \\beta })$ data samples per client, then we still have $\\mathbb{E}(R_j) > \\frac{1}{2}, \\forall j \\in S^*$ and $\\mathbb{E}(R_j) < \\frac{1}{2}, \\forall j \\in S^*_c$ and all our results still hold. A similar analysis can be conducted for the case of mutually independent predictors and our results hold as long as we have $n_i > \\frac{1}{\\delta^2} \\log (\\frac{12(1 - \\beta)}{1 - 2 \\beta})$ data samples per client. \n\t\n\t\\section{Experimental Results}\n\t\\label{sec:experimental results}\n\t\n\t\\begin{figure}[!ht]\n\t\t\\centering\n\t\t\\begin{subfigure}{.45\\textwidth}\n\t\t\t\\centering\n\t\t\t\\includegraphics[width=\\linewidth]{exact_support_rec_num_samples}\n\t\t\t\\caption{Exact support recovery against numbers of samples per client}\n\t\t\t\\label{fig:recnumsample}\n\t\t\\end{subfigure}%\n\t\t\\begin{subfigure}{.45\\textwidth}\n\t\t\t\\centering\n\t\t\t\\includegraphics[width=\\linewidth]{exact_support_rec_num_clients}\t\n\t\t\t\\caption{Exact support recovery against numbers of clients}\n\t\t\t\\label{fig:recnumclient}\n\t\t\\end{subfigure}\n\t\t\\caption{Phase transition curves. Left: Exact support recovery averaged across $30$ runs against varying number of samples per client for $d = 500, 1000$, and $2000$, $s = 3$, $g = \\mathcal{O}(\\log d)$ clients. Right: Exact support recovery averaged across $30$ runs against varying number of clients for $s = 10, 20, 40$, and $50$, $d = 1000$, $n = \\max(30, \\mathcal{O}(s^2 \\log s))$ samples per server.}\n\t\t\\label{fig:recovery}\n\t\\end{figure} \n\t\n\t\n\tIn this section, we validate our theoretical results by conducting computational experiments. We provide the results for the experiments when predictors are correlated. Data in each client is generated by following generative process described in equation \\ref{eq:generative model}. Note that predictors and error term in different clients follow different sub-Gaussian distributions. To make it more general, we keep the correlation between entries in the support different than the correlation between one entry in the support and the other entry in the non-support and these further vary across clients. The regularization parameter $\\lambda$ is chosen such that condition in corollary \\ref{cor:correlated predictors} is satisfied for every client and for every entry in support and non-support. All the results reported here are averaged over 30 independent runs. We conduct two separate experiments to verify that $n_i = \\mathcal{O}(s^2\\log s)$ independent samples per client and a total of $g = \\mathcal{O}(\\log d)$ clients are sufficient to recover the true support.\n\t\n\t\\paragraph{Exact support recovery against number of samples per client.} \n\tThis experiment was conducted for a varying number of predictors ($d = 500, 1000$ and $2000$). For each of them, we fixed the number of clients to be $g = 2 \\log d$. The sparsity $s$ is kept fixed at $3$. The number of samples per client $n_i$ is varied with control parameter $C$ as $10^C s^2 \\log s$. Performance of our method is measured by assigning value $1$ for exact recovery and $0$ otherwise. We can see in Figure \\ref{fig:recnumsample}, that initially, recovery remains at $0$ and then there is sharp jump after which recovery becomes $1$. Notice how all three curves align perfectly. This validates the result of our theorem and shows that given $g = \\mathcal{O}(\\log d)$ clients, $n_i = \\mathcal{O}(s^2 \\log s)$ samples per client are sufficient to recover the true support. \n\t\n\t\\paragraph{Exact support recovery against number of clients.}\n\tThe second experiment was conducted for a varying number of non-zero entries ($s=10, 20, 40$ and $50$) in the support of $w^*$. The experiments were run for a setup with $d=1000$ predictors. We fixed the number of samples per client ($n_i$) to be $\\max(30, \\mathcal{O}(s^2 \\log s))$. This ensures that a minimum of $30$ samples are available to each client. This is inline with our previous experiment where exact recovery is achieved around $30$ samples per client. The number of clients $g$ is varied with control parameter $C$ as $10^C \\log d$. Like previous experiment, performance is measured by assigning value $1$ for exact recovery and $0$ otherwise. We can again see in Figure \\ref{fig:recnumclient}, that initially, recovery is remains at $0$ and then it goes to $1$ as we increase number of clients. We also notice that all four curves align nicely. This validates that given $n_i = \\mathcal{O}(s^2 \\log s)$ independent samples per server, $g = \\mathcal{O}(\\log d)$ clients are sufficient to recover the true support. \n\t%\n \n\t\n\t\\section{Concluding Remarks}\n\t\\label{sec:conclusion}\n\n\tIn this paper, we propose a simple and easy to implement method for learning the exact support of parameter vector of linear regression problem in a federated learning setup. We provide theoretical guarantees for the correctness of our method. We also show that our method runs in polynomial sample and time complexity. Furthermore, our method is averse to client failures, model poisoning and straggling clients. As a future direction, it would be interesting to analyze if the bound on the sample complexity in the case of correlated predictors matches corresponding information theoretic lower bounds.\n\t\n\t\n\n\t","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\nThis paper starts by introducing a mathematical framework for our solution. Then it describes a procedure to build the generic factor's activations identificator. Finally it presents the result of the procedure for two particular statistical modelling of the measure grid's activity. This paper has been influenced by modern machine learning techniques, reviewed in \\cite{PCML-1}, especially algorithm that perform automated feature engineering such as neural network and deep learning \\cite{PCML-2} as well as tree learning techniques \\cite{PCML-3} and improvements \\cite{PCML-4} and \\cite{PCML-5}. Finally, modern signal processing techniques, that I have been taught at the Ecole Polytechnique F\\'ed\\'erale De Lausanne, reviewed in \\cite{SP-1}, and recent work in statistics in large dimensions, to which I have been introduced during my stay in the Laboratoire d'Informatique Gaspard Monge, reviewed in \\cite{TAO-1}, has been more than determinant for the conception of this paper. In order to make the core subject of this report more consistent, we introduce the following notations:\n\n \\begin{figure}[h]\n \\centering\n \\includegraphics[scale=0.25]{figures\/notations.png}\n \\caption{Measure grid model}\n \\label{fig:model_overview}\n\\end{figure}\n\n\n\\begin{itemize}\n\\item $n$ the size of the measure grid. \n\\item $\\mathcal{G}$ the measure grid, composed of bits, $\\mathcal{G} = \\lbrace b_1, \\ldots b_n \\rbrace$.\n\\item $S(\\mathcal{G})$ the set of all possible permutations of $\\mathcal{G}$, $\\vert S(\\mathcal{G})\\vert = 2^{n}$.\n\\item $S(\\mathcal{G}, l)$ the set of all permutations of $\\mathcal{G}$ of size $l$.\n\\item $K$ the number of latent factors.\n\\item $\\mathcal{F}$ the set of latent factors, $\\mathcal{F} = \\lbrace f_1, \\ldots f_K \\rbrace$.\n\\item $S(\\mathcal{F})$ the set of all possible permutations of $\\mathcal{F}$, $\\vert S(\\mathcal{F})\\vert = 2^{K}$.\n\\item $S(\\mathcal{F}, l)$ the set of all permutations of $\\mathcal{F}$ of size $l$.\n\\item $\\mathcal{G}(f)$ the set of bits activated by factor $f$, $\\mathcal{G}(f) \\in S(\\mathcal{G})$ and $f \\in \\mathcal{F}$. \n\\item $\\mathcal{G}^{-1}(b)$ the set of factors that activate grid's bit $b$, $\\mathcal{G}^{-1}(b) \\in S(\\mathcal{F})$ and $b \\in \\mathcal{G}$.\n\\item $F(2)$ the field with elements $\\{0, 1\\}$, equipped with the logical XOR and logical AND respectively as the addition and multiplication.\n\\end{itemize}\n\n \n\\section{Definitions And Properties}\n\n\\subsection{Statistical definitions}\n\nIn this section we provide formalism for the statistical description of factors's activity and their signature on the measure grid.\n\n\\subsubsection*{Activation of factors}\n\nEach factor takes value in $\\{ 0, 1\\}$ at each instant of time. A factor with value 1 at some instant is active, otherwise it has value 0. At this stage of the paper we assume no particular statistical model for factors. Nevertheless, if we consider the set of all possible combination of active and unactive factors ($F(2)^{K}$), we assume that there is a well defined distribution $d'$ such that \n\n\\begin{align*}\nd' &: F(2)^{K} \\rightarrow [0,1]\\\\\nd'_x &= \\mathbb{P}\\left(f_1 = x_1, \\ldots, f_K = x_K \\right)\n\\end{align*}\n\nThe statistical signature of a factor on the measure grid describes how the factor is linked to measure grid's bits. At this stage we simply assume that there is a well defined probability measure so that for any $I \\in S(\\mathcal{G})$\n\n\\begin{align*}\n\\mathbb{P}(\\mathcal{G}(f) = I) &\\in [0, 1]\\\\\n\\sum_{I \\in S(\\mathcal{G})} \\mathbb{P}(\\mathcal{G}(f) = I) &=1\n\\end{align*}\n\nLatent factors's activations and signatures on the measure grid induce activations of measure grid's bits. We refer to this distribution over all possible combinations of activations of bits as $d$, and define it as\n\n\\begin{align*}\nd &: F(2)^{n} \\rightarrow [0,1]\\\\\nd_x &= \\mathbb{P}\\left(b_1 = x_1, \\ldots, b_n = x_n \\right)\n\\end{align*}\n\nFinally, we can also modelize the connection between factors and a measure grid's bit as a signature of the grid's bit on factor space. That is, for $I \\in S(\\mathcal{F})$, there is a well defined probability measure so that \n\n\\begin{align*}\n\\mathbb{P}(\\mathcal{G}^{-1}(b) = I) &\\in [0, 1]\\\\\n\\sum_{I \\in S(\\mathcal{F})} \\mathbb{P}(\\mathcal{G}^{-1}(b) = I) &=1\n\\end{align*}\n\n\n\\subsubsection*{Characteristic polynome}\n\nThe activity of factors and grid's bits may be modelized using a set of multivariate polynomials whose fiber and image domain is respectively $F(2)^{n}$ and $F(2)$. The set of polynome associated with a set $I \\in S(\\mathcal{F})$ and $I' \\in S(\\mathcal{G})$ is denoted respectively $\\lbrace \\mathcal{P}_{I, l} \\rbrace_{l \\in \\mathbb{N}}$ and $\\lbrace \\mathcal{P}_{I', l} \\rbrace_{l \\in \\mathbb{N}}$. It represents a segmentation of states of respectively factors of $I$ and measure grid's bits of $I'$.\n\n\n\\begin{align*}\n\\mathcal{P}_{I, l}&: F(2)^{K} \\rightarrow F(2)\\\\\n\\mathcal{P}_{I, l} [\\textbf{x}] &= \\begin{cases} \\sum_{\\pi \\in S(I', l)} x_{\\pi_1} \\cdot \\ldots \\cdot x_{\\pi_l}, & \\text{if }\\ l \\in \\lbrace 1, \\ldots, \\vert I \\vert \\rbrace \\\\ 0, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\nand \n\n\\begin{align*}\n\\mathcal{P}_{I', l}&: F(2)^{n} \\rightarrow F(2)\\\\\n\\mathcal{P}_{I', l} [\\textbf{x}'] &= \\begin{cases} \\sum_{\\pi \\in S(I', l)} x'_{\\pi_1} \\cdot \\ldots \\cdot x'_{\\pi_l}, & \\text{if }\\ l \\in \\lbrace 1, \\ldots, \\vert I \\vert \\rbrace \\\\ 0, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\n\nWhere $S(I, l)$ and $S(I', l)$ are the set of all permutations of size $l$ of respectively $I$ and $I'$, $\\textbf{x} = [x_{f_1}, \\ldots, x_{f_K}]$ and $\\textbf{x}' = [x_{b_1}, \\ldots, x_{b_n}]$. Furthermore we define the characteristic polynomial of a set $I \\in S(\\mathcal{F})$ and $I' \\in S(\\mathcal{G})$ at level $l_0 \\in \\{0, \\ldots, \\vert I \\vert \\}$ and $l'_0 \\in \\{0, \\ldots, \\vert I' \\vert \\}$ as \n\n\\vspace{10px}\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{align*}\n\\mathcal{P}^{l_0}_{I} &: F(2)^{K} \\rightarrow F(2)\\\\\n\\mathcal{P}^{l_0}_{I} &= \\sum_{l = l_0}^{\\vert I \\vert} \\mathcal{P}_{I, l}\n\\end{align*}\n\\end{minipage}%\n\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{align*}\n\\mathcal{P}^{l'_0}_{I'} &: F(2)^{n} \\rightarrow F(2)\\\\\n\\mathcal{P}^{l'_0}_{I'} &= \\sum_{l = l'_0}^{\\vert I' \\vert} \\mathcal{P}_{I', l}\n\\end{align*}\n\\end{minipage}\\vspace{15px}\n\nSo far, the addition is set to be the logical XOR in the definition of fields $F(2)$. However, in the rest of this report, we will use symbol $+$ and $\\sum$ as representation of a logical OR in $F(2)$. This notation enables us to save a lot of time in writing complex polynomials. Denoting $\\oplus$ the logical XOR and $\\bar{x}$ the opposite of $x$, one have\n\n\\begin{equation*}\n(x \\cdot \\bar{y}) \\oplus (\\bar{x} \\cdot y) = x + y \n\\end{equation*} \n\n\\subsubsection*{Operators on polynome}\n\nIn order to qualify a set of factors and grid's bits, we define some basic operator. First, let $I_{\\mathcal{G}}$ be a subset of $S(\\mathcal{G})$, we denote by $F_2(I_{\\mathcal{G}}, \\mathcal{G})$ the operator that transforms $I_{\\mathcal{G}}$ into a set of $\\vert I_{\\mathcal{G}} \\vert$ vector in $F(2)^{\\vert \\mathcal{G} \\vert}$. \n\n\\begin{equation*}\nF_2(I_{\\mathcal{G}}, \\mathcal{G}) : S(\\mathcal{G}) \\rightarrow F(2)^{\\vert \\mathcal{G} \\vert \\times \\vert I_{\\mathcal{G}} \\vert} \n\\end{equation*}\n\nFor each vector $X \\in F_2(\\{I\\}, \\mathcal{G})$ such that $I\\in I_{\\mathcal{G}}$, an entry takes value $1$ if the associated index belongs to $I$, 0 otherwise. This operator is convenient to evaluate the characteristic polynome. As an example, let $(I, I') \\in S(\\mathcal{G})^{2}$ and $l_0 \\in \\{ 1, \\ldots \\vert I \\vert \\}$ then \n\n\\begin{align*}\n\\sum_{x \\in F_2(\\lbrace I' \\rbrace, \\mathcal{G})} \\mathcal{P}_I^{l_0} \\left[ x \\right] &= \\begin{cases} 1, & \\text{if }\\ \\vert I'\\cap I \\vert \\geq l_0 \\\\ 0, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\nFurthermore, given the distribution over measure grid's bits activation $d$, we define the norm of a characteristic Polynome $\\mathcal{P}_I^{l_0}$ with respect to $d$ as\n\n\\begin{align*}\n\\Vert . \\Vert_{d} &: \\mathcal{P}_{F_2(S(\\mathcal{G}), \\mathcal{G})} \\rightarrow \\left[ 0, 1\\right] \\\\\n\\Vert \\mathcal{P}_I^{l_0} \\Vert_{d} &= \\sum_{x \\in F_2(S(\\mathcal{G}), \\mathcal{G})} \\mathcal{P}_I^{l_0}\\left[ x \\right] \\times d_x\n\\end{align*}\n\n\nWhere $\\mathcal{P}_{F_2(S(\\mathcal{G}), \\mathcal{G})}$ the space of all polynomials with domain $F_2(S(\\mathcal{G}), \\mathcal{G})$ and $\\times$ is the simple multiplication in $\\mathbb{R}$. Finally, keeping previous notations, let $\\lbrace \\mathcal{P}_{I_{i}}^{l_i} \\rbrace_{i=1, ..., k}$ a set of characteristic polynome for some integer $k \\geq 2$, we define the product operator with respect to $d$ as \n\n\\begin{align*}\n\\langle ., \\ldots, . \\rangle_{d} &: \\mathcal{P}_{F_2(S(\\mathcal{G}), \\mathcal{G})}^{k} \\rightarrow \\left[ 0, 1\\right] \\\\\n\\langle \\mathcal{P}^{l_1}_{I_1}, \\ldots, \\mathcal{P}^{l_k}_{I_k} \\rangle_{d} &= \\sum_{x \\in F_2(S(\\mathcal{G}), \\mathcal{G})} \\left( \\mathcal{P}^{l_1}_{I_1} \\left[ x \\right] \\cdot \\ldots \\cdot \\mathcal{P}^{l_k}_{I_k} \\left[ x \\right] \\right) \\times d_x\n\\end{align*}\n\nWhere $\\cdot$ denotes the usual multiplication in $F(2)$ and $\\times$ is the simple multiplication in $\\mathbb{R}$. Finally, each operator specified above can also be defined in the factor space, using the characteristic polynomial in factor space and the distribution over factors's activations $d'$.\n\n\n\\subsubsection*{Stochastic processus induced by factor's activation}\n\nFactors's activations are observed as a strictly stationnary stochastic processus. That is for a couple $(I, l) \\in \\mathcal{F} \\times \\{1, \\ldots, \\vert I \\vert\\}$, we associate a stochastic process $x_I^l[t]$ defined as \n\n\n\\begin{equation*}\nx_I^l[t] = \\begin{cases} 1, & \\text{with probability }\\ \\Vert \\mathcal{P}_I^l \\Vert_{d'}\\\\ 0, & \\text{Otherwise} \\end{cases}\n\\end{equation*}\n\nwith $\\{ x_I^l[t] \\}_{t\\in \\mathbb{N}}$ 2-by-2 independant. Factors's signatures and their activations lead to bits's activations that are also observed as striclty stationary stochastic processus. Again for a couple $(I, l) \\in \\mathcal{G} \\times \\{1, \\ldots, \\vert I \\vert\\}$, we associate a stochastic process $x_I^l[t]$ defined as \n\n\n\\begin{equation*}\nx_I^l[t] = \\begin{cases} 1, & \\text{with probability }\\ \\Vert \\mathcal{P}_I^l \\Vert_{d} \\\\ 0, & \\text{Otherwise} \\end{cases}\n\\end{equation*}\n\nwith $\\{ x_I^l[t] \\}_{t\\in \\mathbb{N}}$ 2-by-2 independant.\n\n\\subsection{Firing Graph}\nThe firing graph is the main data structure used in our solution. In this section we propose a definition of it, as well as basic tools to support its analysis.\n \n\\subsubsection*{Graph specification}\nThe algorithm presented in this report use a particular data structure that we refer as firing graph and that we denote $G(V, D_w)$. \n\n\\begin{itemize}\n\\item $V$ is the set of vertices $V = \\lbrace v_1, \\ldots, v_{\\vert V \\vert} \\rbrace$\n\\item $D_w$ is the weighted direct link matrix, $D_w \\in \\mathbb{N}^{\\vert V \\vert \\times \\vert V \\vert}$ and $\\left[ D_w \\right]_{i, j} = w$ indicate an edge of weight $w$ from vertex $v_i$ to vertex $v_j$ if $w > 0$ \n\\end{itemize}\n\n$G$ is a directed weighted graph whose vertices are organized in layer. A vertex $v$ of some layer $i \\in \\mathbb{N}$ must have at least one incoming edge from a vertex of layer $i-1$. It may also have incoming edges from any vertices of layer $k \\in \\mathbb{N}, k < i$. Such a set of vertices will be referred as the input domain of $v$. Vertices of layer $0$ have empty input domains, they correspond to bits of the measure grid $\\mathcal{G}$. Each vertex stores the tuple $(I, l_0)$\n\n\\begin{itemize}\n\\item $I$ the set of vertices at the tail of incoming edge of the vertex, referred as input set\n\\item $l_0$ the firing rate's lower bound of the vertex, referred as level, $l_0 \\in \\lbrace 1, \\ldots, \\vert I \\vert \\rbrace$ \n\\end{itemize}\n\n \\begin{figure}[H]\n \\centering\n \\includegraphics[scale=0.25]{figures\/firing_graph.png}\n \\caption{Firing graph}\n \\label{fig:firing_graph}\n\\end{figure}\n\n\\subsubsection*{Graph Polynomials}\n\nAs for bits of the measure grid and factors, a vertex $v(I, l_0)$ of a firing graph is assiociated with the set of polynomes $\\lbrace \\mathcal{P}_{I, l} \\rbrace_{l \\in \\{l_0, \\ldots, \\vert I \\vert \\ \\}}$. Each polynome is a segment of its characteristic polynome $\\mathcal{P}_v$ that describes activation, at instant $t$, of $v$, given its input domain's activations at instant $t-1$. If we denote by $n$, $I$ and $l_0$ respectively the size of the input domain of $v$, the set of vertex that has a link toward $v$ and the level of $v$, then \n\n\\vspace{10px}\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{align*}\n\\mathcal{P}_{v, l}&: F(2)^{n} \\rightarrow F(2)\\\\\n\\mathcal{P}_{v, l}[\\textbf{x}] &= \\begin{cases} \\sum_{\\pi \\in S(I, l)} x_{\\pi_1} \\cdot x_{\\pi_2} \\cdot \\ldots \\cdot x_{\\pi_l}, & \\text{if }\\ l \\in \\lbrace l_0, \\ldots, \\vert I \\vert \\rbrace \\\\ 0, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\\end{minipage}%\n\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{align*}\n\\mathcal{P}_v &: F(2)^{n} \\rightarrow F(2)\\\\\n\\mathcal{P}_v[\\textbf{x}] &= \\sum_{l = l_0}^{\\vert I \\vert} \\mathcal{P}_{v, l}[\\textbf{x}]\n\\end{align*}\n\\end{minipage}\\vspace{15px}\n\nWhere $S(I, l)$ is the set of all permutations of size $l$ of elements of $I$ and $\\textbf{x} \\in F_2(\\{ I \\}, D_v)$, where $D_v$ is the input domain of $v$. Furthermore, all operators on polynome defined previously is applicable. Let $v, v_1, \\ldots, v_k$ be some vertices of the firing graph with the same input domain and $d$ a distribution over activations of their input domain's vertices. Then the norm and the product with respect to distribution $d$ are defined as\n\n\n\\vspace{10px}\\begin{minipage}{.3\\linewidth}\n\\begin{align*}\n\\Vert \\mathcal{P}_v \\Vert_{d} = \\sum_{x \\in F_2(S(\\mathcal{G}), \\mathcal{G})} \\mathcal{P}_v\\left[ x \\right] \\times d_x\n\\end{align*}\n\\end{minipage}%\n\\noindent\\begin{minipage}{.7\\linewidth}\n\\begin{align*}\n\\langle \\mathcal{P}_{v_1}, \\ldots, \\mathcal{P}_{v_k} \\rangle_{d} = \\sum_{x \\in F_2(S(\\mathcal{G}), \\mathcal{G})} \\left( \\mathcal{P}_{v_1} \\left[ x \\right] \\cdot \\ldots \\cdot \\mathcal{P}_{v_k} \\left[ x \\right] \\right) \\times d_x\n\\end{align*}\n\\end{minipage}\\vspace{15px}\n\n\nFinally, activations of vertices are observed as stochastic processus. Given a vertex $v(I, l)$ we define\n\n\\begin{equation*}\nx_v[t] = \\begin{cases} 1, & \\text{with probability }\\ \\Vert \\mathcal{P}_v \\Vert_{d}\\\\ 0, & \\text{Otherwise} \\end{cases}\n\\end{equation*}\n\nThe stochastic process that takes value 1 if the vertex $v$ actvivates and 0 otherwise, at each instant of time. If measure grid's bits compound layer 0 of the firing graph, then, from definition of bit's stochastic processus and linearity of state's propagations, $x_v[t]$ is strictly stationary.\n\n\n\\subsubsection*{Connection to grid's bit}\n\nThe firing graph is a convenient data structure to measure activity of a complex group of measure grid's bits. When the firing graph's layer 0 is composed of measure grid's bits, the characteristic polynome of each vertex can be represented as a characteristic polynome in the measure grid's space, without consideration of time and delay. Let $G$ be such a firing graph, then for any vertex of layer 1, $v(I, \\vert I \\vert)$, the characteristic polynome $v$ is equal to the characteristic polynome of the set of bits $I \\in \\mathcal{G}$ with level $\\vert I \\vert$.\n\n\\begin{align*}\n\\mathcal{P}_{v} &= \\mathcal{P}_{I, \\vert I \\vert}\\\\\nx_v[t] &= x_{I}^{\\vert I \\vert}[t - 1] \n\\end{align*}\n\nFurthermore if we set the level of $v$ to 1 its characteristic polynome become the logical $or$-sum of the characteristic polynome of each bits of $I$\n\n\\begin{align*}\n\\mathcal{P}_{v} &= \\sum_{b \\in I} \\mathcal{P}_{\\{ b \\}, 1}\\\\\nx_v[t] &= \\begin{cases} 1, & \\text{if }\\ \\sum_{b \\in I} x_{\\{ b \\}}^1[t - 1] > 0\\\\ 0, & \\text{Otherwise} \\end{cases}\n\\end{align*}\n\n\nBesides, one can design more complexe arrangements of vertices that enable to model activations of multiple sets of measure grid's bits. Let $G$ be a firing graph with its layer 0 composed of $\\mathcal{G}$, let $u(I, \\vert I \\vert)$ and $v(I', 1)$, such that $I \\cap I' = \\emptyset$, be vertices of layer 1 and $w(\\{ u, v \\}, 2)$ a vertex of layer 2. Then one can see that that characteristic polynome of $w$ verifies\n\n\\begin{align*}\n\\mathcal{P}_{w} &= \\sum_{b \\in I'} \\mathcal{P}_{I \\cup \\{ b \\}, \\vert I \\vert + 1}\\\\\nx_w[t] &= \\begin{cases} 1, & \\text{if }\\ \\sum_{b \\in I'} x_{I \\cup \\{ b \\}}^{\\vert I \\vert +1}[t - 2] > 0\\\\ 0, & \\text{Otherwise} \\end{cases}\n\\end{align*}\n\n\n\\subsection{Evaluation of measure grid's bits}\n\nA perfect indicator of the activation of a given factor $f$ can be used to evaluate the possibility of any set of bits to be part of $f$'s signature on the measure grid. \n\n\\subsubsection*{Factor's signature}\n\n\nOne way to describe the activity of a factor $f$ on the measure grid is to associate it to a polynome in the measure grid's space\n\n\\begin{align*}\n\\mathcal{P}_{\\mathcal{G}(f)} &: F(2)^n \\rightarrow F(2)\\\\\n \\mathcal{P}_{\\mathcal{G}(f)} &= \\mathcal{P}_{\\mathcal{G}(f), \\vert \\mathcal{G}(f) \\vert}\n\\end{align*}\n\n$\\mathcal{P}_{\\mathcal{G}(f)}$ is refered as the polynomial signature of $f$ on $\\mathcal{G}$. Anytime $f$ is active then its polynomial signature takes value 1. Yet under particular modelling of factor's links to measure grid, the polynomial signature of $f$ can take value 1 while $f$ is not active. More formally let $f \\in \\mathcal{F}$, $\\forall I \\in S(\\mathcal{F})$ such that $x \\in F_2(\\{I\\}, \\mathcal{F})$ and $x' \\in F_2(\\{\\cup_{f \\in I} \\mathcal{G}(f) \\}, \\mathcal{G})$\n\\begin{center}\n$\\mathcal{P}_f[x] = 1 \\Rightarrow \\mathcal{P}_{\\mathcal{G}(f)}[x'] = 1$\n\\end{center}\n\nFurthermore if $!\\exists J \\in S(\\mathcal{F} \\setminus \\{ f \\})$ such that $\\mathcal{G}(f) \\subset \\bigcup_{f' \\in J} \\mathcal{G}(f')$ then\n\n\\begin{center}\n$\\mathcal{P}_f[x] = 1 \\Leftrightarrow \\mathcal{P}_{\\mathcal{G}(f)}[x'] = 1$\n\\end{center}\n\n\\subsubsection*{basic metrics}\n\nLet $I \\in S(\\mathcal{G})$, $l \\in \\{ 1, \\ldots, \\vert I \\vert \\}$, $f \\in \\mathcal{F}$ and $e$ the event \"factor $f$ is active\". Then we define the recall coefficient of couple $(I, l)$ with respect to $f$ as\n\n\\begin{equation*}\n\\mu_{I, l, f} = \\langle \\mathcal{P}^{l}_{I}, \\mathcal{P}_{\\mathcal{G}(f)} \\rangle_{d \\vert e} + \\langle \\mathcal{P}^{l}_{I}, \\bar{\\mathcal{P}}_{\\mathcal{G}(f)} \\rangle_{d \\vert e}\n\\end{equation*}\n\nWhere $d \\vert e$ is the distribution over bit's activations given event $e$ and $\\bar{\\mathcal{P}}_{\\mathcal{G}(f)}$ is the complement of $\\mathcal{P}_{\\mathcal{G}(f)}$ in $F(2)$. Furthermore we define the precision coefficient of couple $(I, l)$ with respect to $f$ as\n\n\\begin{equation*}\n\\nu_{I, l, f} = \\langle \\mathcal{P}^{l}_{I}, \\mathcal{P}_{\\mathcal{G}(f)} \\rangle_{d \\vert \\bar{e}} + \\langle \\mathcal{P}^{l}_{I}, \\bar{\\mathcal{P}}_{\\mathcal{G}(f)} \\rangle_{d \\vert \\bar{e}}\n\\end{equation*}\n\nWhere $d \\vert \\bar{e}$ is the distribution over bit's activations given not event $e$. Finally we define the purity coefficient of couple $(I, l)$ with respect to $f$ as \n\n\\begin{equation*}\n\\omega_{I, l, f} = \\frac{\\nu_{I, l, f}}{\\mu_{I, l, f}}\n\\end{equation*}\n\n\nThe lower $\\omega_{I, l, f}$ is, the purer is the couple ($I$, $l$) with respect to $f$. The recall, precision and purity coefficient can be defined for any vertex $v$ of a firing graph where vertices of layer 0 are composed by measure grid's bit and are denoted respectively $\\mu_{v, f}$, $\\nu_{v, f}$ and $\\omega_{v, f}$. The latter are computed by using the representation of $\\mathcal{P}_v$ as a characteristic polynomial in the measure grid's space.\n\n\\subsubsection*{advanced metrics}\n\n \nLet $I \\in S(\\mathcal{G})$, $l \\in \\{ 1, \\ldots, \\vert I \\vert \\}$, $f \\in \\mathcal{F}$ and $e$ the event \"factor $f$ is active\". We define the precision of the couple $(I, l)$ with respect to factor $f$ as\n\n\\begin{equation*}\n\\phi_{I, l, f} = \\frac{\\Vert \\mathcal{P}^{l}_I \\Vert_{d, e}}{\\Vert \\mathcal{P}^{l}_I \\Vert_{d}}\n\\end{equation*}\n\nWe also define the recall of the couple $(I, l)$ with respect to factor $f$ as\n\n\\begin{equation*}\n\\psi_{I, l, f} = \\frac{\\Vert \\mathcal{P}^{l}_I \\Vert_{d, e}}{\\Vert \\mathcal{P}_{\\mathcal{G}(f)} \\Vert_{d, e}}\n\\end{equation*}\n\nWhere $d, e$, the distribution over the combination of activations of measure grid's bits that intetersect with event $e$. The precision and the recall are defined for any vertex $v$ of a firing graph where vertices of layer 0 are composed by measure grid's bit and are denoted respectively $\\phi_{v, f}$ and $\\psi_{v, f}$. Again, The latter are computed by using the representation of $\\mathcal{P}_v$ as a characteristic polynomial in the measure grid's space.\n\n\\subsubsection*{Advanced stochastic process induced by vertex}\n\nGiven a firing graph with its layer 0 composed of measure grid's bits, we have seen that the propagation of activations induces a stochastic process at each vertex. Here we introduce some more complex stochastic processus at each vertex of $G$. Given a vertex $v$ at layer $k \\geq 0$, its characteristic polynome $\\mathcal{P}_{v}$, a factor $f \\in \\mathcal{F}$ and e, the event \"factor $f$ is active\", we define the score process of $v$ with respect to factor $f$ as\n\n\\begin{equation*}\ns_{v,f}\\left[N, T, p, q \\right] = N + \\sum_{t=1}^{T} s_{v, p, q, t, f}\n\\end{equation*}\n\nWhere $(N, T, p, q) \\in \\mathbb{N}^4$ and $\\lbrace s_{v, p, q, t, f} \\rbrace_{t \\in \\mathbb{N}}$ a set of i.i.d random variable. $s_{v, p, q, t, f}$ takes value $q$ if the event e was true at instant $t - k $ and value $-p$ if it was false, given that $v$ activates at instant $t$. That is, $\\forall$ $t < k $, $s_{v, p, q,t, f} = 0$ and $\\forall$ $t \\geq k$\n\n\\begin{equation*}\ns_{v, p,q, t, f} = \\begin{cases} q, & \\text{with probability } q_s \\\\ -p, & \\text{with probability } 1 - q_s \\end{cases}\n\\end{equation*}\n\nWhere $q_s = \\frac{q_r}{q_r + q_p}$ with $q_r = \\Vert \\mathcal{P}_v \\Vert_{d,e} $ and $q_p = \\Vert \\mathcal{P}_v \\Vert_{d} - q_r$. $d, e$ is the distribution over measure grid's activations that intersect with the event e\n\n\\subsection{Properties}\n\nThis paragraph intend to deliver useful properties for the analysis of the algorithm. The proof of every properties can be found in the appendix A at the end of this paper.\n\n\\subsubsection*{Polynomial decomposition}\n\n\\underline{Partition}\\\\\n\nLet $v_1(I, l_0)$, $v_2(J, 0)$ and $v_3(K, 0)$, be three vertices at the layer 1 of some firing graph, with the same input domain $\\mathcal{G}$. If $I = J \\cup K$ and $J \\cap K = \\emptyset$, then, $\\forall x \\in F_2(S(\\mathcal{G}), \\mathcal{G})$\n\n\\begin{equation}\n\\label{prop:partition-1}\n\\mathcal{P}_I^{l_0}\\left[ x \\right] = \\sum_{l=l_0}^{\\vert I \\vert} \\sum_{j=0}^{\\vert J \\vert} \\mathcal{P}_{J, j}\\left[ x \\right] \\cdot \\mathcal{P}_{K, l - j}\\left[ x \\right] \n\\end{equation} \n\nIn paticular for $b \\in I$\n\n\\begin{equation}\n\\label{prop:partition-2}\n\\mathcal{P}_{I, l}\\left[ x \\right] = \\mathcal{P}_{I\\setminus \\{ b\\}, l}\\left[ x \\right] \\cdot \\mathcal{P}_{\\{ b\\}, 0}\\left[ x \\right] + \\mathcal{P}_{I\\setminus \\{ b\\}, l-1}\\left[ x \\right] \\cdot \\mathcal{P}_{\\{ b\\}, 1}\\left[ x \\right]\n\\end{equation} \n\n\\underline{Decomposition}\\\\\n\nLet $G$ be a firing graph with layer 0 composed of $\\mathcal{G}$. Let $u(I, l_u)$, $v(I', l_v)$ such that $I \\cap I' = \\emptyset$ as vertices of layer 1 and $w(\\{ u, v \\}, 2)$ as vertex of layer 2. Let $K \\in \\cup_{l \\in \\{l_v, \\ldots, \\vert I' \\vert \\}} S(I', l)$, $x \\in F_2(S(\\mathcal{G}), \\mathcal{G})$ and $x' = \\begin{bmatrix} \\mathcal{P}_{u}[x] &\\mathcal{P}_{v}[x] \\end{bmatrix}$ then \n\n\n\\begin{equation}\n\\label{prop:2layer-1}\n\\mathcal{P}_{K, \\vert K \\vert}\\left[ x \\right] \\cdot \\mathcal{P}_{\\{u, v\\}, 2}\\left[ x' \\right] = \\sum_{l=l_u}^{\\vert I \\vert} \\sum_{J \\in S(I, l)} \\mathcal{P}_{J \\cup K, l + \\vert K \\vert }\\left[ x \\right]\n\\end{equation}\n\nIn particular if $l_u = \\vert I \\vert$ and $l_v = 1$, then for any vertex of layer 0, $b \\in I'$\n\n\\begin{equation}\n\\label{prop:2layer-2}\n \\mathcal{P}_{b, 1}\\left[ x \\right] \\cdot \\mathcal{P}_{\\{u, v\\}, 2} \\left[ x' \\right]= \\mathcal{P}_{I \\cup \\{b\\}, \\vert I \\vert + 1}\\left[ x \\right]\n\\end{equation}\n\n\n\\subsubsection*{Metrics}\n\nThroughout this section, we consider $G$ to be a firing graph with layer 0 composed by measure grid's bits $\\mathcal{G}$ and $f \\in \\mathcal{F}$ denote some target factor that is linked to some bit of the measure grid. The distribution of activation of latent factors and measure grid's bits will be respectively denoted $d$ and $d'$ and e is the event \"factor $f$ is active\". Furthermore we use $v$ to denote some vertex of $G$ whose characteristic polynome respects $\\mathcal{P}_v = \\mathcal{P}_{I}^{l}$ with $(I, l) \\in S(\\mathcal{G}), \\{1, \\ldots, \\vert I \\vert\\}$ and $f \\in \\mathcal{F}$ some factor\\\\\n\n\\underline{Precision of vertex}\\\\\n\nThe precision of $v$ with respect to $f$ is\n\n\\begin{equation}\n\\label{prop:precision1}\n\\phi_{v, f} = \\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'}) \\times \\omega_{I, l, f}}\n\\end{equation}\n\nFurthermore, if $\\mu_{v, f} = 1$ we have\n\n\\begin{equation}\n\\label{prop:precision2}\n\\phi_{v, f} \\leq \\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'}) \\times \\omega_{\\mathcal{G}(f),\\vert \\mathcal{G}(f) \\vert, f}} \n\\end{equation}\n\n\\underline{Recall of vertex}\\\\\n\nThe recall of $v$ with respect to $f$ is\n\n\\begin{equation}\n\\label{prop:recall1}\n\\psi_{v, f} = \\mu_{I, l, f}\n\\end{equation}\n\nFurthermore, \n\n\\begin{equation}\n\\label{prop:recall2}\n0 \\leq \\phi_{v, f} \\leq 1\n\\end{equation}\n\nWhere right equality is reached whenever $v$ is connected to a set of measure grid's bit $I \\in \\mathcal{G}$, with level $l_0 = \\vert I \\vert$ such that $I \\subset \\mathcal{G}(f)$. \\\\\n\n\\underline{vertex's score process}\\\\\n\nIf $s_{v, f}[N, T, p, q]$ denotes the score process of $v$ with respect to $f$, with $N, T, p, q \\in \\mathbb{N}^4$, then\n\n\\begin{equation}\n\\label{prop:score_mean}\n\\mathbb{E} \\left[ s_{v, f}[N, T, p, q] \\right] = N + T \\times (\\phi_{I, l, f} \\times (p + q) - p)\n\\end{equation}\n\nFurthermore,\n\n\\begin{equation}\n\\label{prop:score_var}\n\\mathrm{Var} \\left[ s_{v, p, q, t, f} \\right] = (q + p)^{2} \\times \\phi_{I, l, f} \\times (1 - \\phi_{I, l, f})\n\\end{equation}\n\n\\section{Identification of Latent Factor}\nIn this section, we present a procedure to identify a latent factor's activation. The procedure consists of two steps:\n\n\\begin{itemize}\n\\item Sampling: Sample the measure grid and build a firing graph.\n\\item Draining: Drain the firing graph to exclude high purity coefficient's vertices.\n\\end{itemize}\n\nBoth processus will be described and the efficiency of the draining algorithm quantified.\n\n\\subsection{Sampling}\n\n\nSampling the measure grid consists in following a procedure to select some bits of it. This procedure is usually designed to be the most efficient in the fullfilment of specific quantitative objective. First, we assume that we have access to a determinist exact indicator of $f$'s activations with $f\\in \\mathcal{F}$. Then, the objective of sampling is to maximize the probability that we sample a bit whose purity coefficient with respect to $f$ is lower or equal to some positive constant $\\omega$. That is, if we denote $s$ the random variable of the outcome of a single sampling, the objective is to maximize\n\n\\begin{center}\n$\\mathbb{P}( \\omega_{\\{s\\}, 1, f} \\leq \\omega)$\n\\end{center} \n\nAgain, if we have a set $I \\subset \\mathcal{G}$ of bits, the objective of sampling is to maximize the probability of selecting a bit $b$, for which the purity of $I \\cup \\{b \\}$ at level $\\vert I \\vert + 1$ is lower to a given positive constant $\\omega$. That is, if we denote by $s$ the random variable of the outcome of a single sampling, the objective is to maximize\n\n\\begin{center}\n$\\mathbb{P}( \\omega_{I \\cup \\{ s\\}, \\vert I \\vert, f} \\leq \\omega)$\n\\end{center} \n\nWe propose a very intuitive sampling method based on the indicator of activation of target factor $f$. Given parameters $p_s \\in [0, 1]$ and $S_p$ respectively the probability of picking a bit and a set of pre-selected measure grid's bits, the sampling procedure writes\n\n\\begin{algorithm}[H]\n\\caption{Sampling}\n\\textbf{Input:} $p_{\\mathcal{S}}$, $S_{p}$\\\\\n\\textbf{Output:} $S$\n\\begin{algorithmic}\n\\State $S \\gets \\{ \\}$, $x_f \\gets nextFactorState()$, $X_{\\mathcal{G}} \\gets nextGridState()$\n\\While{$ S \\textit{ is empty}$}\n \\If {$x_f = 1$ and $\\forall b \\in S_p \\textit{ } X_{\\mathcal{G}}[b] = 1$}\n \\ForAll{$b \\in \\mathcal{G}\\setminus S \\cup S_p$}\n \\If {$X_{\\mathcal{G}}[b] = 1$}\n \\State $S \\gets S \\cup \\{ b \\} \\textit{ with probability } p_s$\n \\EndIf\t\n \\EndFor\t\n \\EndIf\n \\State $x_f \\gets nextFactorState()$\n \\State $X_{\\mathcal{G}} \\gets nextGridState()$\n\\EndWhile\n\\end{algorithmic}\n\\end{algorithm}\n\nWhere $x_f$ and $X_{\\mathcal{G}}$ are respectively a scalar that takes value 1 when factor $f$ is active, 0 otherwise, and a mapping with measure grid's bits as keys and their states as values (0 or 1). The second mean of the sampling procedure is to build a firing graph. The construction of the firing graph requires to set a parameter $N \\in \\mathbb{N}$ that corresponds to the initial weigth of edges that will be drained. In addition we set a mask matrix $G_{mask} \\in \\{0, 1\\}^{\\vert V \\vert}$ that controls which vertex is allowed to have their outcoming edges updated during draining. We consider two kind of firing graphs.\n\n\\begin{figure}[H]\n\\centering\n\\includegraphics[scale=0.35]{figures\/firing_graph_drainer_1.png}\n\\caption{Single sampled firing graph}\n\\label{fig:single_sampled_fg}\n\\end{figure}\n\nIn figure~\\ref{fig:single_sampled_fg}, sampled bits $\\{b_1, \\ldots, b_{n_s}\\}$ are used as vertices of the layer 0 of a firing graph $G$, $n_S = \\vert S \\vert$. Then vertex $v(\\{b_1, \\ldots, b_{n_s}\\}, 1)$ is added at the layer 1 of $G$. Furthermore, we set $G_{mask}$ so to allow only layer 0's outcoming edges to be updated through draining.\n\n\\begin{figure}[H]\n\\centering\n\\includegraphics[scale=0.35]{figures\/firing_graph_drainer_2.png}\n\\caption{Joint sampled firing graph}\n\\label{fig:joint_sampled_fg}\n\\end{figure}\n\nIn figure~\\ref{fig:joint_sampled_fg}, sampled bits $\\{b_1, \\ldots, b_{n_s}\\}$ and pre-selected bits $\\{b_1^{*}, \\ldots, b_k^{*} \\}$ for some $k \\in \\mathbb{N}^{*}$ compound the layer 0 of the firing graph $G$, $n_S = \\vert S \\vert$. Then, vertices $v(\\{b_1, \\ldots, b_{n_s}\\}, 1)$ and $u(\\{b_1, \\ldots, b_{k}\\}, k)$ are added at layer 1 of $G$ and vertex $w(\\{u, v\\}, 2)$ at layer 2 of $G$. Finally, we set $G_{mask}$ so that only $b_1, \\ldots, b_{n_s}$'s outcoming edges are allowed to be updated through draining.\n\n\\subsection{Draining}\n\nDraining the firing graph consists in iterating a forward propagation of bits's activations and a backward propagation of feedback generated by factor's activations through the firing graph. Feedback are meant to increment or decrement the weight of unmasked vertices's outcoming edges. Given that an edge with a null or negative weigth vanishes, at the end of the routine, connections of the graph differentiate between vertices's purity. To ease understanding of the algorithm, we split vertices of the firing graph into input and core vertices which are respectively vertices of layer 0 and vertices of layers $> 0$. Furthermore, we introduce a new type of vertices that can only have incoming edges from core vertices. We refer to those vertices as outputs. \n\n\\begin{figure}[H]\n\\centering\n\\includegraphics[scale=0.30]{figures\/firing_graph_drainer.png}\n\\caption{Draining diagram}\n\\label{fig:draining_diagram} \n\\end{figure}\n\nWe use $n_i$, $n_c$ and $n_o$ to refer to the number of respectively input, core and output vertices. Furthermore, we define $I_w \\in \\mathbb{N}^{n_i \\times n_c}$, $C_w \\in \\mathbb{N}^{n_c \\times n_c}$ and $O_w \\in \\mathbb{N}^{n_c \\times n_o}$ that correspond to the weighted direct link matrices respectively from input toward core vertices, core toward core vertices and core toward output vertices. Furthermore we will use $A = A_w > 0$, $A \\in \\{I, C, O \\}$ to denote the corresponding unweighted direct link matrices. Finally, in order to represent in a more convenient way stochastic processus induced by measure grid's activations, we define the following stochastics vectors\n\n\\begin{itemize}\n\\item $x_i^{(t)} \\in \\{0, 1\\}^{1 \\times n_i}$ the vector of activations of input vertices at instant $t$\n\\item $x_c^{(t)} \\in \\{0, 1\\}^{1 \\times n_c}$ the vector of activations of core vertices at instant $t$\n\\item $x_o^{(t)} \\in \\{0, 1\\}^{1 \\times n_o}$ the vector of activations of output vertices at instant $t$\n\n\\end{itemize}\n\nThe propagation of activations through the firing graph can be represented with two equations:\\\\\n\n\n\\vspace{20px}\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{center}\n\\underline{Forward transmitting (FT)}\n\\end{center}\n\\begin{align*}\n\\tilde{x}_c^{(t)} &= x_i^{(t-1)} \\cdot I + x_c^{(t-1)} \\cdot C \\\\\n\\tilde{x}_o^{(t)} &= x_c^{(t-1)} \\cdot O\n\\end{align*} \n\\end{minipage}%\n\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{center}\n\\underline{Forward processing (FP)}\n\\end{center}\n\\begin{align*}\n[x_c^{(t)}]_i &= \\begin{cases} 1, & \\text{if }\\ [\\tilde{x}_c^{(t)}]_i > l_i \\\\ 0, & \\text{Otherwise} \\end{cases}\\\\\n[x_o^{(t)}]_j &= \\begin{cases} 1, & \\text{if }\\ [\\tilde{x}_o^{(t)}]_j > 1 \\\\ 0, & \\text{Otherwise} \\end{cases} \n\\end{align*}\n\\end{minipage}\\vspace{20px}\n\n\nWhere $\\cdot$ is the usual matrix multiplication, $(i, j) \\in \\{1, \\ldots n_c\\} \\times \\{1, \\ldots n_o\\}$ and $l_i$ is the level of the $i^{th}$ core vertex. An output vertex of the firing graph is fed with the activation of a targeted factor decayed in time by the number of layer - 1. That is, for single and joint sampled firing graphs, the decay is respectively set to 1 and 2. Factor's activations generate a feedback to the output that is back propagated through the firing graph. Supposing that we set the factor's decay to $d \\geq 1$, the feedback is defined as \n\n\\begin{equation*}\nx_{b,o}^{(t)} = x_o^{(t)} \\circ \\left( (p + q) \\times x_f^{(t-d)} - p \\right)\n\\end{equation*}\n\nWhere $\\circ$ denotes the Hadamard product, $x_f^{(t-d)}$ is the vector of states of factors at instant $t-d$ and $p, q$ are pre-difined positive integers. A correct backpropagation of $x_{b,o}^{(t)}$ up to the input vertices is made possible by using time and space coherence of firing graph's forward states. We denote by $V_i$, $i\\in \\mathbb{N}$ the set of vertices that has a path, composed of $i$ vertices, toward an output vertex. Let $G$ be a firing graph with $k \\in \\mathbb{N}^{*}$ layers augmented with a layer of ouptut vertices. Let $V_o$ the set of output vertices, $\\forall v, o \\in V_0 \\times V_o$, $v$ is elligible to $o$'s feedback at instant $t$ if and only if \n\n\\begin{itemize}\n\\item $v$ was active at instant $t-1$\n\\item $v$ has an edge toward $o$\n\\end{itemize} \n\nThe same principle can be used to backpropagate the feedback from vetices of $V_0$ towards vertices of $V_1$ and so on. Generally speaking, the back propagation from vertices of $V_i$ towards $V_{i+1}$ respects $\\forall v, v' \\in V_{i} \\times V_{i-1}$, $v$ is elligible to feedback of $v'$ at instant $t$ if and only if \n\n\\begin{itemize}\n\\item $v$ was active at instant $t - (2 \\times i + 1)$\n\\item $v$ has an edge toward $v'$\n\\end{itemize} \n\nFinally we can encode the backpropagation equations as\n\n\\vspace{20px}\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{center}\n\\underline{Backward transmitting (BT)}\n\\end{center}\n\\begin{align*}\n\\tilde{X}_{b, c}^{(t)} &= (O \\cdot X_{b, o}^{(t-1)} + C \\cdot X_{b, c}^{(t-1)}) \\circ X_{m, c}^{(t)T}\\\\\nX_{b,i}^{(t)} &= (I \\cdot X_{b, c}^{(t-1)}) \\circ X_{m, i}^{(t)T}\n\\end{align*} \n\\end{minipage}%\n\\noindent\\begin{minipage}{.5\\linewidth}\n\\begin{center}\n\\underline{Backward processing (BP)}\n\\end{center}\n\\begin{align*}\nX_{b, o}^{(t)} &= \\begin{bmatrix} \\textbf{0}_{no \\times 1} & x_{b, o}^{(t)} & \\textbf{0}_{no \\times d_{max} - 2} \\end{bmatrix} \\\\\nX_{b, c}^{(t)} &= \\begin{bmatrix} \\textbf{0}_{n_c \\times 2} & [\\tilde{X}_{b, c}^{(t)}]_{(:n_c, :(d_{max}-2))} \\end{bmatrix} \n\\end{align*}\n\\end{minipage}\n\n\\begin{center}\n\\underline{Structure udpates (SU)}\n\\begin{align*}\nO_w &= O_w + O \\circ (X_{b, o}^{(t-1)} \\cdot X_{m, c}^{(t)})^{T}\\\\\nC_w &= C_w + C \\circ (X_{b, c}^{(t-1)} \\cdot X_{m, c}^{(t)})^{T}\\\\\nI_w &= I_w + I \\circ (X_{b, c}^{(t-1)} \\cdot X_{m, i}^{(t)})^{T}\n\\end{align*}\n\\end{center}\\vspace{20px}\n\n\n \nWhere $X_{m,c}^{(t)} = \\begin{bmatrix} x_c^{(t)} & \\ldots & x_c^{(t- d_{max})}\\end{bmatrix}^{T}$ and $X_{m, i}^{(t)} =\\begin{bmatrix} x_i^{(t)} & \\ldots & x_i^{(t- d_{max})}\\end{bmatrix}^{T}$, $X_{c, b}^{(t)} \\in \\{0, q, - p \\}^{n_c \\times d_{max}}$ for $t \\in \\mathbb{N}^{*}$ and $X_{c, b}^{(0)} = \\textbf{0}_{n_c \\times d_{max}}$. Furthermore $d_{max} \\geq (l -1) \\times 2 + 1$ where $l$ is the number of layers of the firing graph. Finally we provide a parameter $T \\in \\mathbb{N}$ to the draining algorithm. It controls the targeted number of feedback that an edge should receive before disabling its update. Maintaining update's permissions for each edge requires an operation similar to structure updates. Finally, the draining algorithm iterates forward and backward pass until either $G$ is composed of two distinct connexe components, no structure update is enabled or the maximum number of iterations $T_{max} \\in \\mathbb{N}$ has been reached. \n\n\\begin{algorithm}[H]\n\\caption{Draining}\n\\textbf{Input:} $G$, T, $T_{max}$, $p$, $q$, decay\\\\\n\\textbf{Output:} $G$ drained\n\\begin{algorithmic}\n\\State $i \\gets 0$\\Comment{Initialisation}\n\\State $X_{b,c}$, $x_{b, o}, x_i, x_c, X_{m, c}, X_{m, i}$ $\\gets$ InitSignals()\n\\While{$i < T_{max}$}\\Comment{Core loop}\n \\State $x_i \\gets nextGridState()$\n \\State $x_c, x_o \\gets \\textit{FT}(G, x_i, x_c)$\\Comment{Forward pass}\n \\State $X_{m, c}, X_{m, i}, x_c, x_o \\gets \\textit{FP}(x_c, x_o)$\n \\If {$i \\geq decay$}\n \\State $x_f \\gets nextFactoreState()$\n \\State $X_{b,c}, X_{b, o} \\gets \\textit{BP}(X_{b, c}, X_{b, o} x_f, p, q)$\\Comment{Backward pass}\n \\State $G' \\gets \\textit{SU}(T, G, X_{b, c}, X_{b, o}, X_{m, c}, X_{m, i})$\n \\State $X_{b, c}, X_{b, i} \\gets \\textit{BT}(G, X_{b, c}, X_{b, o}, X_{m, c}, X_{m, i}, )$\n \\State $G \\gets G'$\n \\EndIf\t\n \\If {$G.cc == 2$ or $\\textit{not } G_{mask}.any()$}\\Comment{Stop conditions}\n \\State $break$\n \\EndIf\t\n \\State $i \\gets i + 1$ \n\\EndWhile\n\\end{algorithmic}\n\\end{algorithm}\n\nClearly, the complexity of the algorithm is dominated by the backward transmit and structure updates operations. A standard worst case analysis of those operations gives $\\mathcal{O}(n^{4} \\times d_{max}^2)$, where $n$ is the total number of vertices in the firing graph. Yet this analysis relies on standard complexity time for dense matrix operations, and does not take into account neither the sparsity of signals and direct link matrices nor the distribution of input vertices's activations. In practice, we have found that the forward and backward propagation of bits and factors's activations is time consuming, especially when both $N$ and $T$ are large numbers. Thus, to reduce running time, batch\\_size successive bits and factors's states are forward and backward propagated with an efficient vectorization of the equation. The decrease in time complexity of this practical trick is impressive and worth the gain in space complexity of the algorithm. Finally this trick may requires to dynamically change the batch\\_size so that treshold for the number of updates at each edges is respected.\n\n\\subsection{Analysis of the algorithm}\n\n\\begin{theorem}\n\\label{th_stopping}\nGiven a set of sampled bits $S$, a set of pre-selected bits $I =\\{b_{1}^{*}, \\ldots, b_{i}^{*}\\}$ a target factor $f$ and $G$, the firing graph built after sampling algorithm. A 5-tuple $(\\omega, N, T, p, q)$ exists such that the probability of event E: \"no input vertices of $G$ have outcoming edges at the end of the draining\" is upper bounded. More specifically\n\n\\begin{equation*}\n\\mathbb{P} \\left( E \\right) \\leq \\sum_{j = 0}^{\\vert S \\vert} p_{-}^j \\times \\mathbb{P}_{\\mathcal{S}} \\left( \\vert \\lbrace s \\in S \\setminus \\omega_{I \\cup \\{s\\}, \\vert I \\vert + 1, f } < \\omega \\rbrace \\vert = j \\right) \n\\end{equation*}\n\nWhere $p_{-} = \\mathbb{P} \\left( s_{v, f}[N, T, p,q] < 0 \\vert \\omega_{I \\cup \\{ s \\}, i + 1, f} < \\omega \\right)$. Where $v(I \\cup \\{ s \\}, i+1)$, for any $s \\in S$, is a vertex of layer 1 of a firing graph $G$ of 2 layers. Furthermore\n\n\\begin{equation*}\n\\mathbb{P} \\left( s_{v, f}[N, T, p, q]< 0 \\vert \\omega_{I \\cup \\{ s \\}, i + 1, f} < \\omega \\right) \\leq C \\times \\max \\left( \\exp\\left(- T \\times \\left(\\frac{\\delta_{f} c}{\\sigma}\\right)^2 \\right), \\exp\\left(- T \\times \\delta_{f}c \\right) \\right)\n\\end{equation*}\n\nWith $\\delta_{f}$, $C$ and $c$ are postitive constants that depends on $\\omega$ and $i$ and $f$. $\\mathrm{Var}[s_{v, p, q, f, t}] = \\sigma^2$. \n\n\\end{theorem}\n\n\\textbf{Proof.} As a reminder, in the core of this proof, we refer to $d$ and $d'$ respectively to the distribution over bits's activations and factors's activations. Given the arrangement of vertices of graph $G$ and the forward equations of the draining algorithm, the activation of any vertices $b \\in S$ that will be propagated toward an output vertex, is modelled by the following characteristic polynomial\n\n\\begin{equation*}\n\\mathcal{P}_{\\{b\\}, 1} \\cdot \\mathcal{P}_{\\{u, v\\}, 2}\n\\end{equation*}\n\nWith $v(S, 1)$ and $u(\\{b_1^{*}, \\ldots, b_i^{*}\\}, i)$. Thus, using (\\ref{prop:2layer-2}), the activity of $b$ that is propagated to the ouptut vertex is the same than the activity of a vertex $v(\\{b_1^{*}, \\ldots, b_i^{*}, b\\}, i+1)$ at the layer 1 of a firing graph $G'$ where $b_1^{*}, \\ldots, b_i^{*}$ and $b$ compound its layer 0. Furtermore, given the time and space consistency of the backpropagation of the feedback from the output vertex, the weight of the outcoming edge of $b$, at the convergence of the draining algorithm, is either $0$ or equal to the score process of vertex $v$ in $G'$ with respect to $f$, $s_{v, f}[N, T, p, q]$. Then, the first inequality is obtained by developping\n\n\\begin{align*}\n\\mathbb{P} \\left( E \\right)&= \\sum_{j=0}^{\\vert S \\vert} p_{-}^j \\times p_{+}^{\\vert S \\vert -j} \\times\\mathbb{P}_{\\mathcal{S}} \\left( \\vert \\lbrace s \\in S \\setminus \\omega_{I \\cup \\{s\\}, i + 1, f } < \\omega \\rbrace \\vert = j \\right) \\\\\n&\\leq \\sum_{j=0}^{\\vert S \\vert} p_{-}^j \\times \\mathbb{P}_{\\mathcal{S}} \\left( \\vert \\lbrace s \\in S \\setminus \\omega_{I \\cup \\{s\\}, i + 1, f } < \\omega \\rbrace \\vert = j \\right)\n\\end{align*} \n\nWhere \n\\begin{itemize}\n\\item $I =\\{b_{1}^{*}, \\ldots, b_{i}^{*}\\}$\n\\item $p_{-} = \\mathbb{P} \\left( s_{v, f}(N, T, p, q) < 0 \\vert \\omega_{I\\cup\\{s \\}, i + 1, f} < \\omega \\right)$\n\\item $p_{+} = \\mathbb{P} \\left( s_{v, f}(N, T, p, q) < 0 \\vert \\omega_{I\\cup\\{s \\}, i + 1, f} \\geq \\omega \\right)$\n\\end{itemize}\n\nThen, we choose the value of the postive real $\\omega$ such that a measure grid's bit $b^{+}$ verifies\n\n\\begin{align*}\nb^{+} = &\\argmin_{b \\in \\mathcal{G}} \\vert \\omega - \\omega_{I \\cup \\{ b \\}, i +1, f} \\vert \\\\\n&\\textit{ such that } \\omega - \\omega_{I \\cup \\{ b \\}, i +1, f} > 0 \n\\end{align*}\n\nAnd we define the vertex $v^{+}(I \\cup \\{b^{+}\\}, i+1)$ and $\\delta^{+} = \\vert \\omega - \\omega_{v^{+}, f} \\vert$. If vertex $v$ is such that $\\omega_{v,f} < \\omega$ then using (\\ref{prop:precision1}) one gets\n\n\\begin{equation*}\n\\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (\\omega - \\delta) \\times (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi_{v, f}} \\geq \\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (\\omega - \\delta^{+}) \\times (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi_{v^{+}, f}} > \\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + \\omega \\times (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi}\n\\end{equation*}\n\nFor some real $\\delta \\geq \\delta^{+} > 0$. Then, We choose the 4-tuple $(N, T, p, q)$ as follow:\n\n\\begin{align*}\n(p, q) &\\in \\mathbb{N}^{2} \\textit{ such that } \\phi \\times (p + q) - p < 0\\\\\nN &= -T \\times (\\phi \\times (p+q) - p)\\\\\nT &\\in \\mathbb{N} \\textit{ such that } N \\textit{ large enough}\n\\end{align*}\n\nThus, given $\\omega_{v,f} < \\omega$ one can write\n\n\\begin{align*}\n\\mathbb{P} \\left( s_{v,f}[N, T, p, q] < 0 \\right) &= \\mathbb{P} \\left( N + \\sum_{t=1}^{T} s_{v, p, q, T, f} < 0 \\right) \\\\\n&= \\mathbb{P}\\left( \\sum_{t=1}^{T} s_{v, p, q, t, f} - T \\times \\mathbb{E}\\left[s_{v, p, q, 1, f}\\right] < -N - T \\times \\mathbb{E}\\left[s_{v, p, q, 1, f}\\right]\\right)\n\\end{align*}\n\nFurthermore from the definition of $\\phi$ and $\\phi_{v, f}$ we have\n\n\\begin{equation*}\n\\phi_{v, f} =\\phi + \\underbrace{\\delta \\times \\phi \\times \\phi_{v, f} \\times \\frac{1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}}_{\\delta_{v, f}}\n\\end{equation*}\n\nYet using equation (\\ref{prop:score_mean}) one have\n\n\\begin{align*}\n\\mathbb{E}\\left[s_{v, p, q, 1, f}\\right] &= \\phi_{v, f} \\times (p + q) - p\\\\\n&= (\\phi + \\delta_{v, f}) \\times (p + q) - p\\\\\n\\end{align*}\n\nUsing $N = - T \\times \\left(\\phi \\times (p + q) - p \\right)$ and the definition of $\\phi$ one have\n\n\\begin{equation*}\n-N - T \\times \\mathbb{E}\\left[s_{v, p, q, 1, f}\\right] = -T \\times (p + q) \\times \\delta_{v, f} \n\\end{equation*}\n\nThus\n\n\\begin{align*}\n\\mathbb{P} \\left( s_{v,f}[N, T, p, q] < 0 \\right) &= \\mathbb{P}\\left( \\sum_{t=1}^{T} s_{v, p, q, t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] < -T \\times (p + q) \\times \\delta_{v, f} \\right)\\\\\n&\\leq \\mathbb{P}\\left(\\vert \\sum_{t=1}^{T} s_{v, p, q,t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > T \\times (p + q) \\times \\delta_{v, f} \\right)\\\\\n&\\leq \\mathbb{P}\\left(\\vert \\sum_{t=1}^{T} s_{v, p, q, t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > T \\times \\delta_{f} \\right)\n\\end{align*}\n\nWith $\\delta_{f} = (p + q) \\times \\delta^{+} \\times \\phi^{2} \\times \\frac{1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}$. \\\\\n\nAt this point we have to notice that $\\lbrace s_{v, p, q, t, f} \\rbrace_{t=1, \\ldots, T}$ is a sequence of i.i.d random variables with mean $\\mu$ and variance $\\sigma^2$ that verifies $\\vert s_{v, p, q, t, f} \\vert \\leq max(p, q)$. Thus one can apply the Chernoff inequality as formulated in \\cite{TAO-1}. In particular, taking $\\lambda = \\sigma^{-1}\\delta_{f}$ we obtain\n\n\\begin{align*}\n\\mathbb{P}\\left(\\vert \\sum_{t=1}^{T} s_{v, p, q, tn f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > T \\delta_{f} \\right) &= \\mathbb{P}\\left(\\vert \\sum_{t=1}^{T} s_{v, p, q, t} - \\mathbb{E}\\left[s_{v, p, q, t}\\right] \\vert > \\lambda\\sigma \\sqrt{T} \\right)\\\\\n&\\leq C \\times \\max \\left( \\exp\\left(- T \\times \\left(\\frac{\\delta_{f} c}{\\sigma}\\right)^2 \\right), \\exp\\left(- T \\times \\delta_{f}c \\right) \\right)\n\\end{align*}\n\n\nWith $C, c$ some positive constant and $\\mathrm{Var}[s_{v, p, q, t, f}] = \\sigma^2$ for $t \\in \\{1, \\ldots, T \\}$. Q.E.D.\n\n\\begin{center}\n\\rule[0pt]{100pt}{1pt} \n\\end{center}\n\n\\begin{theorem}\n\\label{th_precision}\nGiven a set of sampled bits $S$, a set of pre-selected bits $I =\\{b_{1}^{*}, \\ldots, b_{i}^{*}\\}$ a target factor $f$ and $G$ the firing graph built after sampling algorithm. A sequence of 5-tuple $(\\omega, N, T, p, q)$ exists such that for each input vertex $v$ of $G$, from which the output is reachable, we have \n\n\\begin{equation}\n\\mathbb{P} \\left( \\omega_{v, f} > \\omega \\right) \\leq C \\times \\max \\left( \\exp\\left(- T \\times \\left(\\frac{\\delta_{f} c}{\\sigma}\\right)^2 \\right), \\exp\\left(- T \\times \\delta_{f}c \\right) \\right)\n\\end{equation}\n\nWhere $v(I \\cup \\{ s \\}, i+1)$, for any $s \\in S$, is a vertex of layer 1 of a firing graph $G$ of 2 layers and $\\delta_{f}$, $C$ and $c$ are postitive constants that depends on $\\omega$ and $i$ and $\\mathrm{Var}[s_{v, p, q, f, t}] = \\sigma^2$. \n\\end{theorem}\n\n\\textbf{Proof.} \nAs in the proof of the previous theorem, using the arrangement of vertices of $G$, the property (\\ref{prop:2layer-2}) and the forward and backward equations of the draining algorithm, one can show that the weight of the outcoming edge of any vertices $b \\in S$ of $G$ is either equal to 0 or to the score process $s_{v, f}[N, T, p, q]$ where $v(\\{b_1^{*}, \\ldots, b_i^{*}, b\\}, i+1)$ is a vertex at the layer 1 of a firing graph $G'$ where $b_1^{*}, \\ldots, b_i^{*}$ and $b$ compound its layer 0. Furthermore, if sample $b$ still have outcoming edges after draining, then\n\n\\begin{equation*}\n\\mathbb{P} \\left( \\omega_{v, f} > \\omega \\right) = \\mathbb{P}_{\\mathcal{S}} \\left( \\omega_{I \\cup \\{b\\}, i + 1, f } > \\omega \\right) \\times \\mathbb{P} \\left( s_{v, f}(N, T, p, q) > 0 \\vert \\omega_{I \\cup \\{ b \\}, i + 1, f} > \\omega \\right)\n\\end{equation*}\n\nThen, we choose the value of the postive real $\\omega$ such that a bit $b^{-}$ verifies\n\n\\begin{align*}\nb^{-} = &\\argmin_{b \\in \\mathcal{G}} \\vert \\omega - \\omega_{I \\cup \\{ b \\}, f} \\vert \\\\\n&\\textit{ such that } \\omega - \\omega_{I \\cup \\{ b \\}, f} < 0 \n\\end{align*}\n\nAnd we define the vertex $v^{-}(I \\cup \\{b^{-}\\}, i+1)$ and $\\delta^{-} = \\vert \\omega - \\omega_{v^{-}, f} \\vert$. If $v$ is such that $\\omega_{v, f} < \\omega$ then using (\\ref{prop:precision1}) we have \n\n\\begin{equation*}\n\\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (\\omega + \\delta) \\times (1-\\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi_{v, f}} \\leq \\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + (\\omega + \\delta^{-}) \\times (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi_{v^{-}, f}} < \\underbrace{\\frac{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'} + \\omega \\times (1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'})}}_{\\phi}\n\\end{equation*}\n\nfor some $\\delta \\geq \\delta^{-} > 0$. Then defining the 4-tuple $(N, T, p, q)$ as\n\n\n\\begin{align*}\n(p, q) &\\in \\mathbb{N}^{2} \\textit{ such that } \\phi \\times (p + q) - p < 0\\\\\nN &= -T \\times (\\phi \\times (p+q) - p)\\\\\nT &\\in \\mathbb{N} \\textit{ such that } N \\textit{ large enough}\n\\end{align*}\n\nThen, reproducing the same development as it was done in the proof of previous theorem, one can derive a convenient form to easily apply the Chernoff inequality.\n\n\\begin{equation*}\n\\mathbb{P} \\left( s_{v, f}(N, T, p, q) > 0 \\vert \\omega_{v, f} > \\omega \\right) \\leq \\mathbb{P}\\left(\\vert \\sum_{t=0}^{T_i - 1} s_{v, p, q, t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > T \\times \\delta_{f} \\right)\n\\end{equation*}\n\nWith $\\delta_{f} = (p + q) \\times \\delta^{-} \\times \\phi^{2} \\times \\frac{1 - \\Vert \\mathcal{P}_{f} \\Vert_{d'}}{\\Vert \\mathcal{P}_{f} \\Vert_{d'}}$. Then using the Chernoff inequality as written in \\cite{TAO-1} using $\\lambda = \\sigma^{-1} \\delta_{f}$ we obtain\n\n\n\\begin{align*}\n\\mathbb{P}\\left(\\vert \\sum_{t=0}^{T - 1} s_{v, p, q, t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > T \\times \\delta_{f} \\right) &= \\mathbb{P}\\left(\\vert \\sum_{t=0}^{T - 1} s_{v, p, q, t, f} - \\mathbb{E}\\left[s_{v, p, q, t, f}\\right] \\vert > \\lambda \\sigma \\sqrt{T} \\right) \\\\\n&\\leq C \\times \\max \\left( \\exp\\left(- T \\times \\left(\\frac{\\delta_{f} c}{\\sigma}\\right)^2 \\right), \\exp\\left(- T \\times \\delta_{f}c \\right) \\right)\n\\end{align*}\n\nWith $C, c$ some positive constant and $\\mathrm{Var}[s_{v, p, q, t, f}] = \\sigma^2$. Q.E.D\n\n\\begin{center}\n\\rule[0pt]{100pt}{1pt} \n\\end{center}\n\n\\subsection{Limit of the generic case}\n\nThe combination of theorems shows that the association of sampling and draining with the right choice of 5-tuple $(\\omega, N, T, p, q)$ gives a convenient tool to select measure grid's bits with purity coefficient lower than a target $\\omega$. Furthermore, when $T \\rightarrow +\\infty$, the correct selction is almost certain, which highlights the trade-off between efficiency and complexity of the algorithm that is embedded in the choice of $\\omega$ and $T$, on which depends $N, p$ and $q$.\nThis generic procedure and its analysis deliver a strong framework that eases the derivation of more specific results that may be obtained under specific modelling of latent factors's activations and measure grid signatures. Nevertheless, it leaves two fundamental points clueless \n\n\\begin{itemize}\n\\item No possibility to quantify further the effectiveness of the sampling strategy \n\\item No specific procedure or heuristics to choose positive real value $\\omega$\n\\end{itemize}\n\nIn the rest of this paper, we present two particular cases of factor's and measure grid's modelling that enables a better quantification of the sampling strategy and stronger heuristics for the choice of $\\omega$. \n\n\\section{Case of signal plus noise}\n\nThis particular case is designed to be easy to analyze. We first define the statistical modelling of factors and bits's activations. Then, we quantify the sampling strategy and justify a choice for the 5-tuple $(\\omega, T, N, p, q)$. Finally, we present simulations and provide discussion of the results obtained with this special case.\n\n\\subsection{Statistical modelling}\n\nIn this particular case, we assume that the target factor $f$ is linked to some $\\vert \\mathcal{G}(f)\\vert = k$ measure grid's bits and activates with probability $p_f$. We also assume that bits of the measure grid are identically and independently subject to a noisy activation with probability $p_N$. We may see noisy activations as the result of $n$ noisy latent factors, linked to exactly 1 bit of the measure grid, that is $K=n + 1$. Under this model, the probability for a bit $b\\in \\mathcal{G}$ to activate is defined as \n\n\\begin{equation*}\n\\mathbb{P} \\left( \\textit{\"b active\"} \\right) = \\begin{cases} p_{f} + p_N \\times (1-p_{f}), & \\text{if }\\ b \\in \\mathcal{G}(f^{*}) \\\\ p_N, & \\text{Otherwise} \\end{cases}\n\\end{equation*}\n\nAs a consequence, for any $I \\in S(\\mathcal{G})$ such that $\\vert I \\cap \\mathcal{G}(f) \\vert = i$ and $j = \\vert I \\vert -i$ if we set $x \\in F_2 (\\{I\\} , \\mathcal{G})$, the distribution over measure grid bits's activations is defined as\n\n\\begin{equation*}\nd_{x} = \\begin{cases} p_N^{i+j} \\times (1-p_N)^{n - i - j} \\times (1- p_{f}) + p_N^{j} \\times (1-p_N)^{n - k - j} \\times p_{f}, & \\text{if }\\ i=k \\\\ p_N^{i+j} \\times (1-p_N)^{n - i - j} \\times (1- p_{f}), & \\text{Otherwise} \\end{cases}\n\\end{equation*}\n\nIn the rest of the section, we will always refer to this distribution as $d$.\n\n\\subsection{Evaluation of bits}\n\nLet $G$ be a firing graph with a layer 0 composed of measure grid's bits. Then, the precision of a vertex $v(I, \\vert I \\vert)$ of layer 1 of $G$, with respect to $f$, depends only on $\\vert I \\vert$ and $\\vert I \\cap \\mathcal{G}(f) \\vert$. Indeed, if $\\vert I \\cap \\mathcal{G}(f) \\vert = i$ then\n\n\\begin{equation*}\n\\phi_{v,f} = \\frac{p_{f}}{p_{f} + (1 - p_{f})\\times p_N^{i}}\n\\end{equation*}\n\nWith identification of terms using (\\ref{prop:precision1}) we have $\\omega_{v, f} = p_N^{i}$ and using previously defined distribution, one finds that $\\mu_{v, f} = p_N^{\\vert I \\vert - i}$, $\\nu_{v, f} = p_N^{\\vert I \\vert}$. Besides, given a set of bits $I$ such that $I \\subset \\mathcal{G}(f)$, if $b \\in \\mathcal{G}(f) \\setminus I$, the precision of vertex $v(I \\cup \\{ b \\}, \\vert I \\vert +1)$ with respect to $f$ is \n\n\\begin{equation*}\n\\phi_{v} = \\frac{p_{f}}{p_{f} + (1 - p_{f})\\times p_N^{\\vert I \\vert + 1}}\n\\end{equation*}\n\nif $b \\notin \\mathcal{G}(f)$\n\n\\begin{equation*}\n\\phi_{v} = \\frac{p_{f}}{p_{f} + (1 - p_{f})\\times p_N^{\\vert I \\vert}}\n\\end{equation*}\n\n\n\\subsection{Sampling Strategy}\n\nIn this particular case we follow the generic sampling procedure $\\mathcal{S}$ with parameter $p_{\\mathcal{S}}$. Thus, using the previously defined statistical distribution of bits's activations, if we denote $S$, the set of sampled bits using $\\mathcal{S}$, the distribution of the cardinal of $S$ is \n\n\\begin{align*}\n\\mathbb{P}\\left(\\vert S \\vert = s\\right) = \\begin{cases} \\begin{pmatrix} n - k \\\\ s - k \\end{pmatrix} \\times p_N^{s - k } \\times (1 - p_N)^{n-k-s} \\times p_{\\mathcal{S}} , & \\text{if }\\ s \\geq k \\\\ 0, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\nThus its expected size is $\\mathbb{E}\\left[ \\vert S \\vert \\right] = k + (n - k) \\times p_N \\times p_{\\mathcal{S}} $. Furthermore if $I= \\{b_1^{*}, \\ldots, b_i^{*} \\}\\in S(\\mathcal{G})$ is some set of pre-selected bits and $S$ is a set of bits sampled using $\\mathcal{S}$, a positive real $\\omega_i$ exists such that\n\n\\begin{align*}\n\\mathbb{P}_{\\mathcal{S}} \\left( \\vert \\lbrace s \\in S \\setminus \\omega_{I \\cup \\{s\\}, \\vert I \\vert + 1, f } < \\omega_i \\rbrace \\vert = j \\right) &= \\mathbb{P}_{\\mathcal{S}} \\left( \\vert \\lbrace s \\in S \\setminus s\\in \\mathcal{G}(f) \\vert = j \\right)\\\\\n&= \\begin{pmatrix} \\vert \\mathcal{G}(f) \\vert -i \\\\ j \\end{pmatrix} \\times p_{\\mathcal{S}}^{j} \\times (1 - p_{\\mathcal{S}})^{\\vert \\mathcal{G}(f) \\vert -i - j}\n\\end{align*}\n\n\\subsection{Identification of factors}\n\nFirst, in the case of a single sampled firing graph, one can see that bits's purity coefficients take only two values with respect to $f$\n\n\\begin{align*}\n\\omega_{\\{b\\}, 1, f} = \\begin{cases} p_N , & \\text{if }\\ b \\in \\mathcal{G}(f) \\\\ 1, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\nThus if we choose \n\n\\begin{equation*}\n\\omega_0 = \\frac{(1 + p_N)}{2}\n\\end{equation*}\n\nIt maximizes the purity margin defined as\n \n\\begin{align*}\n\\delta_0 &= \\frac{(\\omega_0 - \\omega_{\\{ b \\}, 1, f}) + (\\omega_{\\{ b' \\}, 1, f} - \\omega_0)}{2} = \\frac{(1-p_N)}{2}\n\\end{align*}\n\nWhere $b \\in \\mathcal{G}(f)$ and $b' \\notin \\mathcal{G}(f)$. In the case of a joint sampled firing graph in which a set $I = \\{b_1^{*}, \\ldots, b_i^{*}\\}$ of $i \\in \\mathbb{N}^{*}$ pre-selected bits that verify $\\forall b \\in I$, $b \\in \\mathcal{G}(f)$, remaining bit's purity coefficients with respect to $f$ can take again two values\n\n\\begin{align*}\n\\omega_{I \\cup \\{b\\}, i+1, f} = \\begin{cases} p_N^{i+1} , & \\text{if }\\ b \\in \\mathcal{G}(f) \\\\ p_N^{i}, & \\text{otherwise} \\end{cases}\n\\end{align*}\n\nThus if we choose\n\n\\begin{equation*}\n\\omega_i = \\frac{(1 + p_N) \\times p_N^{i}}{2}\n\\end{equation*}\n\nit maximizes the purity margin defined as\n \n\\begin{align*}\n\\delta_i &= \\frac{(\\omega_i - \\omega_{I \\cup \\{ b \\}, i+1, f}) + (\\omega_{I \\cup \\{ b' \\}, i+1, f} - \\omega_i)}{2} = \\frac{(1-p_N) \\times p_N^{i}}{2}\n\\end{align*}\n\nWhere $b \\in \\mathcal{G}(f)$ and $b' \\notin \\mathcal{G}(f)$. Finally we define the 5-tuple $(N_i, T_i, p, q)$ as \n\n\\begin{align*}\n(p, q) &\\in \\mathbb{N}^{2} \\textit{ such that } \\phi_i \\times (p + q) - p \\leq 0 \\textit{ and } \\phi_i' \\times (p + q) - p > 0 \\\\\nN &= -T \\times (\\phi \\times (p+q) - p)\\\\\nT &\\in \\mathbb{N} \\textit{ such that } N \\textit{ large enough}\n\\end{align*}\n\nWhere $t \\in \\mathbb{N}$, $\\phi_i = \\frac{p_{f}}{p_{f} + \\omega_i \\times (1 - p_f)}$ and $\\phi_i' = \\frac{p_{f}}{p_{f} + (\\omega_i - \\delta_i) \\times (1 - p_f)}$.\n\n\\subsection{Simulation}\n\nThe signal plus noise model is implemented in python and mainly uses standard numpy and scipy modules to generate random signal that fit its probabilistic model. More details about the implementation can be found in appendix B. We generate $n=1000$ bits that randomly activate with probability $p_N$ and we choose randomly $\\vert \\mathcal{G}(f)\\vert = 50$ bits that are linked to a latent factor that activates with probability $p_f=0.3$. Finally we build the single sampled firing graph using $p_{\\mathcal{S}} = 1$.\n\n\\begin{figure}[H]\n\\subfloat[$p_N = 0.3$, $(p,q)=(1,1)$]{\\includegraphics[scale=0.165]{figures\/signalplusnoise_111.png}} \n\\subfloat[$p_N = 0.5$, $(p,q)=(2,3)$]{\\includegraphics[scale=0.165]{figures\/signalplusnoise_112.png}}\\\\\n\\subfloat[$p_N = 0.7$, $(p,q)=(3, 5)$]{\\includegraphics[scale=0.165]{figures\/signalplusnoise_121.png}}\n\\subfloat[$p_N = 0.9$, $(p,q)=(5,11)$]{\\includegraphics[scale=0.165]{figures\/signalplusnoise_122.png}} \n\\caption{Observation of the score process for different SNR models $T=500$}\n\\label{fig:sim_spn_1}\n\\end{figure}\n\nEach subplot of figure~\\ref{fig:sim_spn_1} shows the weight of outcoming edges of sampled vertices. Blue lines show the weight of edges outcoming from sampled bit $b \\in \\mathcal{G}(f)$ and red lines correspond to the weight of edges outcoming from sampled bit $b \\notin \\mathcal{G}(f)$. Finally the black horizontal line represents the theoretical mean value of $s_{v, f}[N, T, p, q]$ of a vertex with characteristic polynome $\\mathcal{P}_v = \\mathcal{P}_{\\{b\\}, 1}$, with $b \\in \\mathcal{G}(f)$. As theory suggests, we can see two distinct phenomenons, blues lines converge around theoretical mean for process of bits linked to the target factor and red lines converge to 0. However, the higher is $p_N$, the less noticeable is the distinction between each process. This is explained by the fact that the higher is $p_N$, the closer are the precision of bits linked to target factor $f$ and the precision of noisy bits. Futhermore the later observation induces a high value of $p+q$ which result in a more volatile score process. For the second simulation we use $n=1000$, $\\vert \\mathcal{G}(f)\\vert = 50$, $p_f=0.3$, $T=200$ and $p_{\\mathcal{S}}= 0.5$. Yet at the end of the draining we choose all the input vertices of the firing graph that still have an outcoming edge and use their combined activation as an estimator of the target factor's activation. We then measure their precision and recall over $100$ repetion for each SNR ratio\n\n\\begin{table}[H]\n\\begin{tabular}{|l|l|l|l|l|l|}\n\\hline\n$P_N$ & Mean $\\phi$ & Standard deviation $\\phi$ & Mean $\\psi$ & Standard deviation $\\psi$ & Number of fails \\\\ \\hline\n0.3 & 1.0 & 0.00 & 0.87 & 0.30 & 0 \\\\ \\hline\n0.5 & 1.0 & 0.03 & 0.66 & 0.42 & 0 \\\\ \\hline\n0.7 & 0.97 & 0.13 & 0.43 & 0.46 & 4 \\\\ \\hline\n0.9 & 0.75 & 0.14 & 0.13 & 0.25 & 19 \\\\ \\hline\n\\end{tabular}\n\\caption{Evaluation of naive factor's activation estimation, $T=200$, $100$ repetitions}\n\\label{tab:sim_spn_1}\n\\end{table}\n\nTable~\\ref{tab:sim_spn_1} shows quality indicators of the estimator for different SNR ratio. The two first columns give respectively the mean and standard deviation of the precision of the estimator. The two following columns are respectively the mean and the standard deviation of the recall of the estimator. Finally the last column is the number of experiments that ended without any input vertices having a path towards the output, so that the construction of an estimator is not possible. Again, we see that the quality of the estimator drops as the theoretical precision between noisy bits and factor's bits are close to each other. Yet it reveals that this naive estimator, for a reasonable SNR ratio, is still efficient to predict the activation of target latent factor. Finally, we simulate the signal plus noise model in the settings of joint sampled firing graph. We use a measure grid of $n=1000$ bits from which we sampled randomly $\\vert \\mathcal{G}(f) \\vert = 50$ bits linked to target factor $f$ that activates with probability $p_f = 0.3$ and we set $p_N=0.6$. Finally we built the joint sampled firing graph by pre-selecting randomly 5 bits linked to the factor and running the sampling algorithm described previously using $p_{\\mathcal{S}} = 1$.\n\n\\begin{figure}[H]\n\\centering\n\\includegraphics[scale=0.30]{figures\/signalplusnoise_2.png}\n\\caption{Observation of the score process in a joint sampled firing graph with $T=500$ and $(p, q) = (7, 1)$}\n\\label{fig:sim_spn_2}\n\\end{figure}\n\nIn this case, we obtain $N=7$ and $\\omega_5 \\simeq 0.062$ when following the procedure described in previous section. As for the first experiment, blue lines show the weight of edges outcoming from sampled bit $b \\in \\mathcal{G}(f)$ and red lines correspond to the weight of edges outcoming from sampled bit $b \\notin \\mathcal{G}(f)$. The black horizontal line represents the theoretical mean value of $s_{v, f}[N, T, p, q]$, where $v$ has characteristic polynome $\\mathcal{P}_v = \\mathcal{P}_{\\{b_1, \\ldots, b_5, b\\}, 5}$, with $\\{b_1, \\ldots, b_5\\}$ the set of pre-selected bits and $b \\in \\mathcal{G}(f)$. The simulation validate the expectation from theory and the high value of $p+q$ explains the high volatility of score processus.\n\n\\section{Case of sparse measure grid}\n\nThis particular case is more complex than the previous one. We first define the statistical signature of factors and bits's activations. Then we quantify the sampling strategy and justify a choice for the 5-tuple $(\\omega, T, N, p, q)$. Finally, we present simulations and provide discussion of results obtained with this particular case.\n\n\\subsection{Statistical modelling}\n\n\n\\subsubsection*{Latent factor activation}\n\nWe assume that each of the $K$ latent factors activates independantly with probability $p_f$. As a consequence, for any $I \\in S(\\mathcal{F})$, if we define $x$ such that $x \\in F_2 (\\{I\\}, \\mathcal{F})$, we can define the distribution of factor's activation as \n\n\\begin{equation*}\nd'_x = \\begin{pmatrix} \\vert I \\vert \\\\ K \\end{pmatrix} \\times p_f^{\\vert I \\vert} \\times (1-p_f)^{K-\\vert I \\vert} \n\\end{equation*}\n\n\n\\subsubsection*{Measure grid activation}\n\nWe assume two major properties of activations of measure grid's bits.\n\n\\begin{itemize}\n\\item For each factor $f \\in \\mathcal{F}$, each bit $b \\in \\mathcal{G}$ has equal probability $p_g$ to belong to $\\mathcal{G}(f)$.\n\\item For each factor $f \\in \\mathcal{F}$, for each couple $b_1, b_2 \\in \\mathcal{G}^2$, events \"$b_1 \\in \\mathcal{G}(f)$\" and \"$b_2 \\in \\mathcal{G}(f)$\" are independent.\n\\end{itemize} \n\nAs a consequence the probability for a bit $b$ to activate, given that every factor of some set $\\{f_1, \\ldots, f_k\\} \\subset \\mathcal{F}$ is active, writes\n\n\\begin{align*}\n\\mathbb{P}\\left(\\textit{b active } \\vert f_1, \\ldots, f_k \\textit{ active}\\right) &= \\sum_{i=1}^{k} \\begin{pmatrix} k \\\\ i \\end{pmatrix} \\times p^i_g \\times (1 - p_g)^{k - i}\\\\\n &= 1 - (1 - p_g)^{ k}\n\\end{align*}\n\nThe above quantity depends only on the number of active latent factors. Thus, for any $I \\in S(\\mathcal{G})$, if we define $x \\in F_2 (S(\\mathcal{G}) , \\mathcal{G})$, we can define the distribution of bits's activations as \n\n\\begin{equation*}\nd_x = \\sum_{k = 1}^{K} \\left[ \\begin{pmatrix} K \\\\ k \\end{pmatrix} \\times p_f^{k} \\times (1-p_f)^{K-k}\\right] \\times \\left[ p^i_{g \\vert k} \\times (1 - p_{g\\vert k})^{n - i} \\right]\n\\end{equation*}\n\nWith $i = \\vert I \\vert$ and $p_{g \\vert k} = \\mathbb{P}\\left(\\textit{b active } \\vert f_1, \\ldots, f_k \\textit{ active}\\right)$.\n\n\\subsection{Evaluation of bits}\n\nLet $G$ be a firing graph whose layer 0 is composed of measure grid's bits. Given a target factor $f$, the precision with respect to $f$ of a vertex $v(\\{ b \\}, 1)$ of the layer 1 of $G$ depends on wether $b \\in \\mathcal{G}(f)$ and on $\\vert \\mathcal{G}^{-1}(b) \\vert$. Indeed, if $b \\in \\mathcal{G}(f)$ and $\\vert \\mathcal{G}^{-1}(b) \\vert = l $, $l \\in \\{1, \\ldots, K\\}$, it will be said to have a purity rank of $l$ and its precision with respect to $f$ writes\n\n\\begin{equation*}\n\\phi_{v, f} = \\frac{p_f}{p_f + (1 - p_f)\\times \\omega_{l}}\n\\end{equation*}\n\nwhere $\\omega_l = 1 - (1-p_f)^{l-1}$. If $b' \\notin \\mathcal{G}(f)$ the precision of $v'(\\{ b' \\}, 1)$ with respect to $f$ writes\n\n\\begin{equation*}\n\\phi_{v', f} = p_f\n\\end{equation*}\n\nFuthermore, if we have a vertex $v(I, \\vert I \\vert)$ such that $\\forall b \\in I$, $b \\in \\mathcal{G}(f)$ and $\\min_{b \\in I} \\vert \\mathcal{G}^{-1}(b) \\vert = l$, then the precision of $v$ with respect to $f$ verifies\n\n\\begin{equation*}\n\\phi_{v, f} \\leq \\frac{p_f}{p_f + \\omega^{-}_l \\times (1 - pf)}\n\\end{equation*}\n\nWith \n\n\\begin{equation*}\n\\omega^{-}_l = \\sum_{k=K-l-1}^{K} \\begin{pmatrix} K \\\\ k \\end{pmatrix} p_f^{k} \\times (1-p_f)^{K - k}\n\\end{equation*}\n\nThe minimum purity coefficient one can obtained with bits that verifies $b \\in \\mathcal{G}(f)$ and $\\vert \\mathcal{G}^{-1}(b) \\vert = l$. That is, the case of a vertex $v(I, \\vert I \\vert)$ with $I$ composed of every possible $\\begin{pmatrix} K \\\\ l \\end{pmatrix}$ such bits.\n\n\n\\subsection{Sampling Strategy}\n\nWe follow the generic sampling procedure $\\mathcal{S}$ with parameter $p_{\\mathcal{S}}$. Although it is not hard to derive key quantification such as $\\mathbb{E}\\left[ \\vert S \\vert \\right]$ or probabilities to sample bits linked to a target factor $f$ under this modelling, generic formulas are not elegant and present not much interest in this simulation.\n \n\\subsection{Identification of factors}\n\nFirst, in the case of a single sampled firing graph, for any grid's bits linked to factor $f$, there is only $K$ different purity coefficients possible. Thus we may set $\\omega$ to $\\omega_l$, using $l$ reasonably small to differentiate lower purity rank from greater purity rank samples. In the case of a joint sampled graph, where a set of $I=\\{b_1^{*}, \\ldots b_i^{*}\\}$ were pre-selected then the choice of $\\omega$ is not trivial and is hard to be efficiently and generically derived. Let $\\omega_{I, \\vert I \\vert, f}$ the purity coefficient of the pre-selected set of bits we set $\\omega = \\omega_{I, \\vert I \\vert, f} - \\delta$ where $\\delta \\in \\mathbb{R}$ should be chosen with caution. Finally we choose the 5-tuple $\\lbrace (\\omega, N, T, p, q)$ as \n\n\\begin{align*}\n(p, q) &\\in \\mathbb{N}^{2} \\textit{ such that } \\phi \\times (p + q) - p < 0 \\\\\nN &= -T \\times (\\phi \\times (p+q) - p)\\\\\nT &\\in \\mathbb{N} \\textit{ such that } N \\textit{ large enough}\n\\end{align*}\n\n\nWhere $\\phi = \\frac{p_f}{p_f + \\omega \\times (1-p_f)}$. \n\n\\subsection{Simulation}\n\nThe sparse measure grid model is implemented using python and the standard python numpy and scipy modules to generate random signal that fit its probabilistic model. In our case we generate $n=1000$ bits with $K=10$ latent factors that activate with probability $p_f = 0.3$ and we link measure grid's bits independently with probability $p_g=0.3$. Finally we built the single sampled firing graph running the sampling algorithm described previously, using $p_{\\mathcal{S}} = 1$. Finally, we set $\\omega= \\omega_{10}$, the higher purity coefficient for bits linked to the target factor $f$.\n\n\\begin{figure}[H]\n\\centering\n\\includegraphics[scale=0.33]{figures\/sparse_1.png}\n\\caption{Observation of the score process in a single sampled firing graph with $T=1000$ and $(p, q) = (1, 1)$}\n\\label{fig:sim_si_1}\n\\end{figure}\n\nWe clearly see a rapid differentiation of score processus according to their purity rank. We can also observe that, at the end of draining, the higher the purity coefficient is, the closer are weights of corresponding edges. Finally, the behaviour of score processus validates the efficiency of the draining algorithm to rank bits of the measure grids blindly, in an attempt to identify latent factors. The second experiment with the sparse measure grid model aims to give intuition on the choice of $\\delta$ used for draining a joint sampled firing graph. As for the previous simulation, we generate $n=1000$, bits with $K=10$ latent factors that activate with probability $p_f = 0.3$ and we link measure grid's bits independently with probability $p_g=0.3$. Then we choose randomly $i=5$ bits, denoted by $I=\\{b_1^{*}, \\ldots, b_i^{*}\\}$, with purity rank $4$ with respect to the target factor $f$. Finally we sampled and built the joint sampled firing graph using $p_{\\mathcal{S}} = 1$ and the set of pre-selected bits $I$. The procedure described in the previous section to choose the target purity coefficient consists in estimating the purity coefficient $\\hat{\\omega}_{I, \\vert I \\vert, f}$ and to set $\\delta$ so that $\\omega = \\hat{\\omega}_{I, \\vert I \\vert, f} - \\delta$.\\\\\n\n\\begin{figure}[H]\n\\subfloat[$\\delta = 0.$]{\\includegraphics[scale=0.165]{figures\/sparse_211.png}} \n\\subfloat[$\\delta = 10^{-2}$]{\\includegraphics[scale=0.165]{figures\/sparse_212.png}}\\\\\n\\subfloat[$\\delta = 5 \\times 10^{-2}$]{\\includegraphics[scale=0.165]{figures\/sparse_221.png}}\n\\subfloat[$\\delta = 10^{-1}$]{\\includegraphics[scale=0.165]{figures\/sparse_222.png}} \n\\caption{Observation of sampled bit's score processus in a joint sampled firing graph $i=5$, $T=500$ and $(p, q) = (1, 1)$}\n\\label{fig:sim_si_2}\n\\end{figure}\n\nIn each simulation, $\\hat{\\omega}_{I, \\vert I \\vert, f}$ has been estimated using $1000$ samples and we use $T=500$. Furthermore, each figure corresponds to a different value of $\\delta$ that induces different values of $\\omega$, set as $\\omega = \\hat{\\omega}_{I, \\vert I \\vert, f} - \\delta$. As for the first experiment, the different colored lines in each subfigure show the weight of edges outcoming from sampled bits with different purity ranks. As expected, we can see that the higher $\\delta$ is, the more discriminative the draining procedure is. If $\\delta$ is set to 0, then every sampled bits will remain connected in the firing graph after draining, which is not of great interest. Yet, if $\\delta$ is set too high we may end with two connexe components, which is not desirable neither. Thus, the experiment confirms the difficulties that we may face choosing the right value for $\\delta$.\n\n\\section{Discussion}\n\nThis paper has presented an algorithm that consists in a generic optimisation of a firing graph, in an attempt to solve the abstract task of identifying latent factor's activations. Furthermore it has provided theoretical certitude on the effectivness of the procedure. However, the iterative optimisation method associated with the diversity and flexibility of the architecture of a firing graph opens doors to further applications, notably in the field of inverse problem and in the very hype field of machine learning. Indeed in supervised classification, we are given a dataset composed of features that may be numerical or categorical description of samples and targets that specify the class of samples. If we assume that the activation of a target is a combination of latent factors's activations and that we operate the minimum transformation of features so that they take the form of a measure grid, a light layer of procedures could turn our solution into a supervised classificator. The specificity of such a learner would give it an interesting position in the supervised learning landscape. Indeed, its iterative optimisation and flexible architecture could make it an adaptative learner, that scale to large dataset, with minimum processing work on raw data, in the manner of a neural network. Yet unlike neural network the algorithm handle very efficiently categorical or sparse feature space. Furthermore, compared to the most advanced tree based classification, its flexible architecture is more suitable to learning update and on-the-fly evaluation or addition of new features. Finally, given the hype granted to the field of machine learning nowaday, both in the scientific comunity and civil society, it would be common sense to orient this piece of research to this field. \n\n\n\\newpage\n\\begin{center}\n\\LARGE \\textbf{Appendices}\n\\end{center}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section*{Abstract}{\\small\n\nBased on $uvby\\beta$ photometry we study the structure of several Galactic\nstar-forming fields. Lac OB1 is a compact association at 520$\\pm20$ pc\nspatially correlated with a region of intense H{\\sc ii} emission in\nSh2-126. Lod\\'{e}n 112 is a compact OB group at 1630$\\pm82$ pc, probably connected to\nan extended feature of OB stars located toward the Carina tangent.\nThe field toward Car OB1 is complex and \nlikely contains apparent concentrations representing parts of long segments\nof the Carina arm projected along the line of sight. Within the classical Mon\nOB2 association we separate a relatively compact group\nat 1.26 kpc, that is spatially correlated to the Monoceros Loop SN remnant.\n\n\\normalsize}\n\\end{minipage}\n\\section{Introduction \\label{intro}}\nThe Galactic OB-associations offer a unique opportunity to study the\ninfluence of massive stars on the interstellar matter. \nA reconstruction of the star-formation history of many Galactic fields should be\npossible once the spatial distribution of the young stars is reliably\ndetermined. Despite the extensive efforts to improve and unify the distances\nto the young stellar groups in the Milky Way (MW), discrepancies still remain\nin the published studies for a large number of fields. Many of the present\ndistance estimates are based to a large extent on preliminary distance\ncalibrations, broad-band photometry, or absolute magnitudes ($M_V$) obtained\nvia spectral and luminosity type (MK classification). On the other hand, the\n$uvby\\beta$ photometric system provides $M_V$ and colour\nexcess determinations for early-type stars in excellent agreement\nwith the $Hipparcos$ parallaxes \\cite{kaltcheva98}.\n\\begin{figure}\n\\center\n\\includegraphics[scale=0.27]{Kaltcheva_N_Fig1a.pdf} ~\n\\includegraphics[scale=0.27]{Kaltcheva_N_Fig1b.pdf} \n\\caption{\\label{fig1} Comparisons of $uvby\\beta$ $M_V$ (left) and\nMK-based $M_V$ (right) to the $Hipparcos$-based $M_V$.}\n\\end{figure}\n\nFigure~\\ref{fig1} represents the comparison of $uvby\\beta$ $M_V$ and MK-based\n$M_V$ to the $Hipparcos$-based $M_V$, pointing out to a possible\nover-estimation of stellar distance when relying on a MK-based determination.\nSince the distances to the Galactic OB associations are based mainly on\nindividual stellar distances and rarely on main-sequence fitting, applying\n$uvby\\beta$ photometry should lead to a significant improvement for many OB\ngroups in the MW.\n\nOur deriving of $uvby\\beta$ photometric distances utilizes the\nintrinsic colour calibrations of Crawford \\cite{crawford78} and Kilkenny \\& Whittet \\cite{kilkenny85} and\nthe luminosity calibration of Balona \\& Shobbrook \\cite{balona84} and takes into account possible\nstellar emission and mis-classification \\cite{kaltcheva00}. The expected\nerrors for one star are about 12 \\% for luminosity classes III-IV and about\n18-20~\\% for luminosity classes I and II. Photometric $uvby\\beta$ distances\nderived in this way provide the same impression of the star-forming field's\nstructure as the improved $Hipparcos$ parallaxes (cf. \\cite{kaltcheva07}). \n\nIn this contribution we present improved distance estimates for four Galactic\nOB associations.\n\n\n\\section{Lac OB1}\n\nLac OB1 (Lac~OB1b, \\cite{blaauw58}) is a nearby notable clustering of\nearly-type stars near 10 Lacertae that initially gained attention because of\nthe expanding motion of its members. Based on the derived $uvby\\beta$\nphotometric distances used in conjunction with the photometric diagrams we\nidentify Lac OB1 as a compact group of 12 low-reddened main-sequence stars\nlocated at a distance 520$\\pm20$ pc in the direction $l=96.4^\\circ, b=-16.6^\\circ$ (see \\cite{kaltcheva09} for details). The available radial velocity and\nproper motion measurements support the impression that this is a real\ngroup. For these 12 stars, the recalculated $Hipparcos$ parallaxes\n\\cite{vanleeuwen07} are in excellent agreement with the photometric\n$uvby\\beta$ parallaxes. The photometric distance of the O9V star 10~Lac (HD\n214680) is estimated to be 715$^{+107}_{-92}$ pc. Although this estimate is\nconsiderably larger than the one based on $Hipparcos$, the agreement for this\nstars is better with the recomputed $Hipparcos$ parallax \\cite{vanleeuwen07}\nwhich yields a distance of $529^{+70}_{-50}$ pc in comparison to the original\n$Hipparcos$ estimate of $325^{+82}_{-55}$ pc.\n\nFigure~\\ref{fig2} presents the distribution of H{\\sc ii} intensity in units of\nRayleighs and brightness temperature distribution of H{\\sc i} at velocity\nchannel $-15.5$ km s$^{-1}$ toward Lac OB1, with Lac OB1 stars\nsuperimposed. Here and on all further figures the H{\\sc ii} data are taken from \\cite{finkbeiner03} via the $SkyView$ interface \\cite{McGlynn98}, and the H{\\sc i} data are taken from Leiden\/Argentine\/Bonn (LAB) Survey of Galactic HI \\cite{kalberla05}.\nA correlation of the stars' location with the regions of\nintense H{\\sc ii} emission in Sh2-126 \\cite{sharpless59} is noticeable. On the\nother side, the distribution of neutral hydrogen shows a deficiency (most obvious in the selected velocity channel), also\ncorrelating with the location of the stars (see also \\cite{cappadenicolau90}).\n\n\\begin{figure}\n\\center\n\\includegraphics[scale=0.5]{Kaltcheva_N_Fig2a.pdf} ~\n\\includegraphics[scale=0.5]{Kaltcheva_N_Fig2b.pdf} \n\\caption{\\label{fig2} The stars of Lac OB1 overploted on the distribution of\n the H{\\sc ii} emission in Sh2-126 (left) and H{\\sc i} (right). 10 Lac is shown with filled symbol.}\n\\end{figure}\n\n\\section{The field of Lod\\'{e}n 112}\n\nLod\\'{e}n 112 is identified as a poor, but compact cluster candidate. Based on $uvby\\beta$ photometry we obtained true DM = 11.06$\\pm$0.12(s.e.) and average color excess $E(b-y)$ = 0.5$\\pm$0.03(s.e.) \\cite{kaltcheva11}. This corresponds to a distance of 1630$\\pm$82 pc, which is\nsignificantly smaller than the presently adopted 2500 pc (WEBDA). In our\n$uvby\\beta$ sample there are several other early B stars located at that exact\ndistance. The photometric distances and available proper motions allowed us to\nidentify a group of about 10 early B stars that could represent a new OB\nassociation at coordinates $282^\\circ\\!