@article{11432,
  abstract     = {This paper proposes a method for simulating liquids in large bodies of water by coupling together a water surface wave simulator with a 3D Navier-Stokes simulator. The surface wave simulation uses the equivalent sources method (ESM) to efficiently animate large bodies of water with precisely controllable wave propagation behavior. The 3D liquid simulator animates complex non-linear fluid behaviors like splashes and breaking waves using off-the-shelf simulators using FLIP or the level set method with semi-Lagrangian advection.
We combine the two approaches by using the 3D solver to animate localized non-linear behaviors, and the 2D wave solver to animate larger regions with linear surface physics. We use the surface motion from the 3D solver as boundary conditions for 2D surface wave simulator, and we use the velocity and surface heights from the 2D surface wave simulator as boundary conditions for the 3D fluid simulation. We also introduce a novel technique for removing visual artifacts caused by numerical errors in 3D fluid solvers: we use experimental data to estimate the artificial dispersion caused by the 3D solver and we then carefully tune the wave speeds of the 2D solver to match it, effectively eliminating any differences in wave behavior across the boundary. To the best of our knowledge, this is the first time such a empirically driven error compensation approach has been used to remove coupling errors from a physics simulator.
Our coupled simulation approach leverages the strengths of each simulation technique, animating large environments with seamless transitions between 2D and 3D physics.},
  author       = {Schreck, Camille and Wojtan, Christopher J},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  number       = {2},
  pages        = {343--353},
  publisher    = {Wiley},
  title        = {{Coupling 3D liquid simulation with 2D wave propagation for large scale water surface animation using the equivalent sources method}},
  doi          = {10.1111/cgf.14478},
  volume       = {41},
  year         = {2022},
}

@article{11435,
  abstract     = {We introduce a new variant of quantitative Helly-type theorems: the minimal homothetic distance of the intersection of a family of convex sets to the intersection of a subfamily of a fixed size. As an application, we establish the following quantitative Helly-type result for the diameter. If $K$ is the intersection of finitely many convex bodies in $\mathbb{R}^d$, then one can select $2d$ of these bodies whose intersection is of diameter at most $(2d)^3{diam}(K)$. The best previously known estimate, due to Brazitikos [Bull. Hellenic Math. Soc., 62 (2018), pp. 19--25], is $c d^{11/2}$. Moreover, we confirm that the multiplicative factor $c d^{1/2}$ conjectured by Bárány, Katchalski, and Pach [Proc. Amer. Math. Soc., 86 (1982), pp. 109--114] cannot be improved. The bounds above follow from our key result that concerns sparse approximation of a convex polytope by the convex hull of a well-chosen subset of its vertices: Assume that $Q \subset {\mathbb R}^d$ is a polytope whose centroid is the origin. Then there exist at most 2d vertices of $Q$ whose convex hull $Q^{\prime \prime}$ satisfies $Q \subset - 8d^3 Q^{\prime \prime}.$},
  author       = {Ivanov, Grigory and Naszodi, Marton},
  issn         = {0895-4801},
  journal      = {SIAM Journal on Discrete Mathematics},
  number       = {2},
  pages        = {951--957},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{A quantitative Helly-type theorem: Containment in a homothet}},
  doi          = {10.1137/21M1403308},
  volume       = {36},
  year         = {2022},
}

@article{11438,
  abstract     = {Lasers with well-controlled relative frequencies are indispensable for many applications in science and technology. We present a frequency-offset locking method for lasers based on beat-frequency discrimination utilizing hybrid electronic LC filters. The method is specifically designed for decoupling the tightness of the lock from the broadness of its capture range. The presented demonstration locks two free-running diode lasers at 780 nm with a 5.5-GHz offset. It displays an offset frequency instability below 55 Hz for time scales in excess of 1000 s and a minimum of 12 Hz at 10-s averaging. The performance is complemented with a 190-MHz lock-capture range, a tuning range of up to 1 GHz, and a frequency ramp agility of 200kHz/μs.},
  author       = {Li, Vyacheslav and Diorico, Fritz R and Hosten, Onur},
  issn         = {2331-7019},
  journal      = {Physical Review Applied},
  keywords     = {General Physics and Astronomy},
  number       = {5},
  publisher    = {American Physical Society},
  title        = {{Laser frequency-offset locking at 10-Hz-level instability using hybrid electronic filters}},
  doi          = {10.1103/physrevapplied.17.054031},
  volume       = {17},
  year         = {2022},
}

@inbook{11440,
  abstract     = {To compute the persistent homology of a grayscale digital image one needs to build a simplicial or cubical complex from it. For cubical complexes, the two commonly used constructions (corresponding to direct and indirect digital adjacencies) can give different results for the same image. The two constructions are almost dual to each other, and we use this relationship to extend and modify the cubical complexes to become dual filtered cell complexes. We derive a general relationship between the persistent homology of two dual filtered cell complexes, and also establish how various modifications to a filtered complex change the persistence diagram. Applying these results to images, we derive a method to transform the persistence diagram computed using one type of cubical complex into a persistence diagram for the other construction. This means software for computing persistent homology from images can now be easily adapted to produce results for either of the two cubical complex constructions without additional low-level code implementation.},
  author       = {Bleile, Bea and Garin, Adélie and Heiss, Teresa and Maggs, Kelly and Robins, Vanessa},
  booktitle    = {Research in Computational Topology 2},
  editor       = {Gasparovic, Ellen and Robins, Vanessa and Turner, Katharine},
  isbn         = {9783030955182},
  pages        = {1--26},
  publisher    = {Springer Nature},
  title        = {{The persistent homology of dual digital image constructions}},
  doi          = {10.1007/978-3-030-95519-9_1},
  volume       = {30},
  year         = {2022},
}

@article{11442,
  abstract     = {Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator,
as they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is
sufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform
baseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. },
  author       = {Piovarci, Michael and Foshey, Michael and Xu, Jie and Erps, Timothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Closed-loop control of direct ink writing via reinforcement learning}},
  doi          = {10.1145/3528223.3530144},
  volume       = {41},
  year         = {2022},
}

@article{11443,
  abstract     = {Sometimes, it is possible to represent a complicated polytope as a projection of a much simpler polytope. To quantify this phenomenon, the extension complexity of a polytope P is defined to be the minimum number of facets of a (possibly higher-dimensional) polytope from which P can be obtained as a (linear) projection. This notion is motivated by its relevance to combinatorial optimisation, and has been studied intensively for various specific polytopes associated with important optimisation problems. In this paper we study extension complexity as a parameter of general polytopes, more specifically considering various families of low-dimensional polytopes. First, we prove that for a fixed dimension d, the extension complexity of a random d-dimensional polytope (obtained as the convex hull of random points in a ball or on a sphere) is typically on the order of the square root of its number of vertices. Second, we prove that any cyclic n-vertex polygon (whose vertices lie on a circle) has extension complexity at most 24√n. This bound is tight up to the constant factor 24. Finally, we show that there exists an no(1)-dimensional polytope with at most n vertices and extension complexity n1−o(1). Our theorems are proved with a range of different techniques, which we hope will be of further interest.},
  author       = {Kwan, Matthew Alan and Sauermann, Lisa and Zhao, Yufei},
  issn         = {1088-6850},
  journal      = {Transactions of the American Mathematical Society},
  number       = {6},
  pages        = {4209--4250},
  publisher    = {American Mathematical Society},
  title        = {{Extension complexity of low-dimensional polytopes}},
  doi          = {10.1090/tran/8614},
  volume       = {375},
  year         = {2022},
}

@article{11444,
  abstract     = {This article investigates library-related documents written by Gerard van Swieten (1700–72) during his tenure as Library Prefect in the Imperial Library of Vienna (1745–72). Van Swieten’s time as Library Prefect is considered through a textual analysis. Handwritten letters were deconstructed in terms of their appearance, layout, and tone in order to mine them for meaning. Furthermore, the contents were examined for library matters such as censorship, catalogues, and collection development. The Imperial Court Library held a prominent role as a repository for rare and valuable works, later becoming the National Library of Austria.
Gerard van Swieten’s work as a librarian tends to be overlooked, perhaps because he is better known as the private physician of Maria Theresia, as well as a medical reformer. Nevertheless, he was a hard-working chief librarian deeply involved in all aspects of librarianship. Van Swieten endorsed modern scientific works, which were otherwise banned officially by the censorship commission, for the use of scholars in the library, expanded the collection by acquiring books through his network of scholars and publishers, and reissued library catalogues. He also provided for the comfort of users in the library reading room, at a time when such considerations were unusual. In conclusion, a proposal is made that van Swieten viewed his role as librarian with some importance and pride.},
  author       = {Chlebak, Clara A and Reid, Peter H.},
  issn         = {1758-3497},
  journal      = {Library and Information History},
  number       = {1},
  pages        = {23--41},
  publisher    = {Edinburgh University Press},
  title        = {{From the prefect’s desk: Gerard van Swieten’s library correspondence}},
  doi          = {10.3366/lih.2022.0097},
  volume       = {38},
  year         = {2022},
}

@article{11447,
  abstract     = {Empirical essays of fitness landscapes suggest that they may be rugged, that is having multiple fitness peaks. Such fitness landscapes, those that have multiple peaks, necessarily have special local structures, called reciprocal sign epistasis (Poelwijk et al. in J Theor Biol 272:141–144, 2011). Here, we investigate the quantitative relationship between the number of fitness peaks and the number of reciprocal sign epistatic interactions. Previously, it has been shown (Poelwijk et al. in J Theor Biol 272:141–144, 2011) that pairwise reciprocal sign epistasis is a necessary but not sufficient condition for the existence of multiple peaks. Applying discrete Morse theory, which to our knowledge has never been used in this context, we extend this result by giving the minimal number of reciprocal sign epistatic interactions required to create a given number of peaks.},
  author       = {Saona Urmeneta, Raimundo J and Kondrashov, Fyodor and Khudiakova, Kseniia},
  issn         = {1522-9602},
  journal      = {Bulletin of Mathematical Biology},
  keywords     = {Computational Theory and Mathematics, General Agricultural and Biological Sciences, Pharmacology, General Environmental Science, General Biochemistry, Genetics and Molecular Biology, General Mathematics, Immunology, General Neuroscience},
  number       = {8},
  publisher    = {Springer Nature},
  title        = {{Relation between the number of peaks and the number of reciprocal sign epistatic interactions}},
  doi          = {10.1007/s11538-022-01029-z},
  volume       = {84},
  year         = {2022},
}

@article{11448,
  abstract     = {Studies of protein fitness landscapes reveal biophysical constraints guiding protein evolution and empower prediction of functional proteins. However, generalisation of these findings is limited due to scarceness of systematic data on fitness landscapes of proteins with a defined evolutionary relationship. We characterized the fitness peaks of four orthologous fluorescent proteins with a broad range of sequence divergence. While two of the four studied fitness peaks were sharp, the other two were considerably flatter, being almost entirely free of epistatic interactions. Mutationally robust proteins, characterized by a flat fitness peak, were not optimal templates for machine-learning-driven protein design – instead, predictions were more accurate for fragile proteins with epistatic landscapes. Our work paves insights for practical application of fitness landscape heterogeneity in protein engineering.},
  author       = {Gonzalez Somermeyer, Louisa and Fleiss, Aubin and Mishin, Alexander S and Bozhanova, Nina G and Igolkina, Anna A and Meiler, Jens and Alaball Pujol, Maria-Elisenda and Putintseva, Ekaterina V and Sarkisyan, Karen S and Kondrashov, Fyodor},
  issn         = {2050-084X},
  journal      = {eLife},
  keywords     = {General Immunology and Microbiology, General Biochemistry, Genetics and Molecular Biology, General Medicine, General Neuroscience},
  publisher    = {eLife Sciences Publications},
  title        = {{Heterogeneity of the GFP fitness landscape and data-driven protein design}},
  doi          = {10.7554/elife.75842},
  volume       = {11},
  year         = {2022},
}

@article{11449,
  abstract     = {Mutations are acquired frequently, such that each cell's genome inscribes its history of cell divisions. Common genomic alterations involve loss of heterozygosity (LOH). LOH accumulates throughout the genome, offering large encoding capacity for inferring cell lineage. Using only single-cell RNA sequencing (scRNA-seq) of mouse brain cells, we found that LOH events spanning multiple genes are revealed as tracts of monoallelically expressed, constitutionally heterozygous single-nucleotide variants (SNVs). We simultaneously inferred cell lineage and marked developmental time points based on X chromosome inactivation and the total number of LOH events while identifying cell types from gene expression patterns. Our results are consistent with progenitor cells giving rise to multiple cortical cell types through stereotyped expansion and distinct waves of neurogenesis. This type of retrospective analysis could be incorporated into scRNA-seq pipelines and, compared with experimental approaches for determining lineage in model organisms, is applicable where genetic engineering is prohibited, such as humans.},
  author       = {Anderson, Donovan J. and Pauler, Florian and Mckenna, Aaron and Shendure, Jay and Hippenmeyer, Simon and Horwitz, Marshall S.},
  issn         = {2405-4720},
  journal      = {Cell Systems},
  number       = {6},
  pages        = {438--453.e5},
  publisher    = {Elsevier},
  title        = {{Simultaneous brain cell type and lineage determined by scRNA-seq reveals stereotyped cortical development}},
  doi          = {10.1016/j.cels.2022.03.006},
  volume       = {13},
  year         = {2022},
}

@article{11451,
  abstract     = {The precursor conversion chemistry and surface chemistry of Cu3N and Cu3PdN nanocrystals are unknown or contested. Here, we first obtain phase-pure, colloidally stable nanocubes. Second, we elucidate the pathway by which copper(II) nitrate and oleylamine form Cu3N. We find that oleylamine is both a reductant and a nitrogen source. Oleylamine is oxidized by nitrate to a primary aldimine, which reacts further with excess oleylamine to a secondary aldimine, eliminating ammonia. Ammonia reacts with CuI to form Cu3N. Third, we investigated the surface chemistry and find a mixed ligand shell of aliphatic amines and carboxylates (formed in situ). While the carboxylates appear tightly bound, the amines are easily desorbed from the surface. Finally, we show that doping with palladium decreases the band gap and the material becomes semi-metallic. These results bring insight into the chemistry of metal nitrides and might help the development of other metal nitride nanocrystals.},
  author       = {Parvizian, Mahsa and Duràn Balsa, Alejandra and Pokratath, Rohan and Kalha, Curran and Lee, Seungho and Van Den Eynden, Dietger and Ibáñez, Maria and Regoutz, Anna and De Roo, Jonathan},
  issn         = {1521-3773},
  journal      = {Angewandte Chemie - International Edition},
  number       = {31},
  publisher    = {Wiley},
  title        = {{The chemistry of Cu₃N and Cu₃PdN nanocrystals}},
  doi          = {10.1002/anie.202207013},
  volume       = {61},
  year         = {2022},
}

@inbook{11456,
  abstract     = {The proteomes of specialized structures, and the interactomes of proteins of interest, provide entry points to elucidate the functions of molecular machines. Here, we review a proximity-labeling strategy that uses the improved E. coli biotin ligase TurboID to characterize C. elegans protein complexes. Although the focus is on C. elegans neurons, the method is applicable regardless of cell type. We describe detailed extraction procedures that solubilize the bulk of C. elegans proteins and highlight the importance of tagging endogenous genes, to ensure physiological expression levels. We review issues associated with non-specific background noise and the importance of appropriate controls. As proof of principle, we review our analysis of the interactome of a presynaptic active zone protein, ELKS-1. Our aim is to provide a detailed protocol for TurboID-based proximity labeling in C. elegans and to highlight its potential and its limitations to characterize protein complexes and subcellular compartments in this animal.},
  author       = {Artan, Murat and de Bono, Mario},
  booktitle    = {Behavioral Neurogenetics},
  editor       = {Yamamoto, Daisuke},
  isbn         = {9781071623206},
  issn         = {1940-6045},
  pages        = {277--294},
  publisher    = {Springer Nature},
  title        = {{Proteomic Analysis of C. Elegans Neurons Using TurboID-Based Proximity Labeling}},
  doi          = {10.1007/978-1-0716-2321-3_15},
  volume       = {181},
  year         = {2022},
}

@inproceedings{11459,
  abstract     = {We present a novel approach to differential cost analysis that, given a program revision, attempts to statically bound the difference in resource usage, or cost, between the two program versions. Differential cost analysis is particularly interesting because of the many compelling applications for it, such as detecting resource-use regressions at code-review time or proving the absence of certain side-channel vulnerabilities. One prior approach to differential cost analysis is to apply relational reasoning that conceptually constructs a product program on which one can over-approximate the difference in costs between the two program versions. However, a significant challenge in any relational approach is effectively aligning the program versions to get precise results. In this paper, our key insight is that we can avoid the need for and the limitations of program alignment if, instead, we bound the difference of two cost-bound summaries rather than directly bounding the concrete cost difference. In particular, our method computes a threshold value for the maximal difference in cost between two program versions simultaneously using two kinds of cost-bound summaries---a potential function that evaluates to an upper bound for the cost incurred in the first program and an anti-potential function that evaluates to a lower bound for the cost incurred in the second. Our method has a number of desirable properties: it can be fully automated, it allows optimizing the threshold value on relative cost, it is suitable for programs that are not syntactically similar, and it supports non-determinism. We have evaluated an implementation of our approach on a number of program pairs collected from the literature, and we find that our method computes tight threshold values on relative cost in most examples.},
  author       = {Zikelic, Dorde and Chang, Bor-Yuh Evan and Bolignano, Pauline and Raimondi, Franco},
  booktitle    = {Proceedings of the 43rd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  isbn         = {9781450392655},
  location     = {San Diego, CA, United States},
  pages        = {442--457},
  publisher    = {Association for Computing Machinery},
  title        = {{Differential cost analysis with simultaneous potentials and anti-potentials}},
  doi          = {10.1145/3519939.3523435},
  year         = {2022},
}

@article{11460,
  abstract     = {Background: Proper cerebral cortical development depends on the tightly orchestrated migration of newly born neurons from the inner ventricular and subventricular zones to the outer cortical plate. Any disturbance in this process during prenatal stages may lead to neuronal migration disorders (NMDs), which can vary in extent from focal to global. Furthermore, NMDs show a substantial comorbidity with other neurodevelopmental disorders, notably autism spectrum disorders (ASDs). Our previous work demonstrated focal neuronal migration defects in mice carrying loss-of-function alleles of the recognized autism risk gene WDFY3. However, the cellular origins of these defects in Wdfy3 mutant mice remain elusive and uncovering it will provide critical insight into WDFY3-dependent disease pathology.
Methods: Here, in an effort to untangle the origins of NMDs in Wdfy3lacZ mice, we employed mosaic analysis with double markers (MADM). MADM technology enabled us to genetically distinctly track and phenotypically analyze mutant and wild-type cells concomitantly in vivo using immunofluorescent techniques.
Results: We revealed a cell autonomous requirement of WDFY3 for accurate laminar positioning of cortical projection neurons and elimination of mispositioned cells during early postnatal life. In addition, we identified significant deviations in dendritic arborization, as well as synaptic density and morphology between wild type, heterozygous, and homozygous Wdfy3 mutant neurons in Wdfy3-MADM reporter mice at postnatal stages.
Limitations: While Wdfy3 mutant mice have provided valuable insight into prenatal aspects of ASD pathology that remain inaccessible to investigation in humans, like most animal models, they do not a perfectly replicate all aspects of human ASD biology. The lack of human data makes it indeterminate whether morphological deviations described here apply to ASD patients or some of the other neurodevelopmental conditions associated with WDFY3 mutation.
Conclusions: Our genetic approach revealed several cell autonomous requirements of WDFY3 in neuronal development that could underlie the pathogenic mechanisms of WDFY3-related neurodevelopmental conditions. The results are also consistent with findings in other ASD animal models and patients and suggest an important role for WDFY3 in regulating neuronal function and interconnectivity in postnatal life.},
  author       = {Schaaf, Zachary A. and Tat, Lyvin and Cannizzaro, Noemi and Green, Ralph and Rülicke, Thomas and Hippenmeyer, Simon and Zarbalis, Konstantinos S.},
  issn         = {2040-2392},
  journal      = {Molecular Autism},
  keywords     = {Psychiatry and Mental health, Developmental Biology, Developmental Neuroscience, Molecular Biology},
  publisher    = {Springer Nature},
  title        = {{WDFY3 mutation alters laminar position and morphology of cortical neurons}},
  doi          = {10.1186/s13229-022-00508-3},
  volume       = {13},
  year         = {2022},
}

@article{11462,
  abstract     = {Nanobodies (VHH) from camelid antibody libraries hold great promise as therapeutic agents and components of immunoassay systems. Synthetic antibody libraries that could be designed and generated once and for various applications could yield binders to virtually any targets, even for non-immunogenic or toxic ones, in a short term. One of the most difficult tasks is to obtain antibodies with a high affinity and specificity to polyglycosylated proteins. It requires antibody libraries with extremely high functional diversity and the use of sophisticated selection techniques. Here we report a development of a novel sandwich immunoassay involving a combination of the synthetic library-derived VHH-Fc fusion protein as a capture antibody and the immune single-chain fragment variable (scFv) as a tracer for the detection of pregnancy-associated glycoprotein (PAG) of cattle (Bos taurus). We succeeded in the generation of a number of specific scFv antibodies against PAG from the mouse immune library. Subsequent selection using the immobilized scFv-Fc capture antibody allowed to isolate 1.9 nM VHH binder from the diverse synthetic library without any overlapping with the capture antibody binding site. The prototype sandwich ELISA based on the synthetic VHH and the immune scFv was established. This is the first successful example of the combination of synthetic and immune antibody libraries in a single sandwich immunoassay. Thus, our approach could be used for the express isolation of antibody pairs and the development of sandwich immunoassays for challenging antigens.},
  author       = {Dormeshkin, Dmitri and Shapira, Michail and Karputs, Alena and Kavaleuski, Anton and Kuzminski, Ivan and Stepanova, Elena and Gilep, Andrei},
  issn         = {1432-0614},
  journal      = {Applied Microbiology and Biotechnology},
  pages        = {5093--5103},
  publisher    = {Springer Nature},
  title        = {{Combining of synthetic VHH and immune scFv libraries for pregnancy-associated glycoproteins ELISA development}},
  doi          = {10.1007/s00253-022-12022-w},
  volume       = {106},
  year         = {2022},
}

@article{11469,
  abstract     = {Thermalizing and localized many-body quantum systems present two distinct dynamical phases of matter. Recently the fate of a localized system coupled to a thermalizing system viewed as a quantum bath received significant theoretical and experimental attention. In this work, we study a mobile impurity, representing a small quantum bath, that interacts locally with an Anderson insulator with a finite density of localized particles. Using static Hartree approximation to obtain an effective disorder strength, we formulate an analytic criterion for the perturbative stability of the localization. Next, we use an approximate dynamical Hartree method and the quasi-exact time-evolved block decimation (TEBD) algorithm to study the dynamics of the system. We find that the dynamical Hartree approach which completely ignores entanglement between the impurity and localized particles predicts the delocalization of the system. In contrast, the full numerical simulation of the unitary dynamics with TEBD suggests the stability of localization on numerically accessible timescales. Finally, using an extension of the density matrix renormalization group algorithm to excited states (DMRG-X), we approximate the highly excited eigenstates of the system. We find that the impurity remains localized in the eigenstates and entanglement is enhanced in a finite region around the position of the impurity, confirming the dynamical predictions. Dynamics and the DMRG-X results provide compelling evidence for the stability of localization.},
  author       = {Brighi, Pietro and Michailidis, Alexios and Kirova, Kristina and Abanin, Dmitry A. and Serbyn, Maksym},
  issn         = {2469-9969},
  journal      = {Physical Review B},
  number       = {22},
  publisher    = {American Physical Society},
  title        = {{Localization of a mobile impurity interacting with an Anderson insulator}},
  doi          = {10.1103/physrevb.105.224208},
  volume       = {105},
  year         = {2022},
}

@article{11470,
  abstract     = {Many-body localization (MBL) is an example of a dynamical phase of matter that avoids thermalization. While the MBL phase is robust to weak local perturbations, the fate of an MBL system coupled to a thermalizing quantum system that represents a “heat bath” is an open question that is actively investigated theoretically and experimentally. In this work, we consider the stability of an Anderson insulator with a finite density of particles interacting with a single mobile impurity—a small quantum bath. We give perturbative arguments that support the stability of localization in the strong interaction regime. Large-scale tensor network simulations of dynamics are employed to corroborate the presence of the localized phase and give quantitative predictions in the thermodynamic limit. We develop a phenomenological description of the dynamics in the strong interaction regime, and we demonstrate that the impurity effectively turns the Anderson insulator into an MBL phase, giving rise to nontrivial entanglement dynamics well captured by our phenomenology.},
  author       = {Brighi, Pietro and Michailidis, Alexios A. and Abanin, Dmitry A. and Serbyn, Maksym},
  issn         = {2469-9969},
  journal      = {Physical Review B},
  number       = {22},
  publisher    = {American Physical Society},
  title        = {{Propagation of many-body localization in an Anderson insulator}},
  doi          = {10.1103/physrevb.105.l220203},
  volume       = {105},
  year         = {2022},
}

@article{11471,
  abstract     = {Variational quantum algorithms are promising algorithms for achieving quantum advantage on nearterm devices. The quantum hardware is used to implement a variational wave function and measure observables, whereas the classical computer is used to store and update the variational parameters. The optimization landscape of expressive variational ansätze is however dominated by large regions in parameter space, known as barren plateaus, with vanishing gradients, which prevents efficient optimization. In this work we propose a general algorithm to avoid barren plateaus in the initialization and throughout the optimization. To this end we define a notion of weak barren plateaus (WBPs) based on the entropies of local reduced density matrices. The presence of WBPs can be efficiently quantified using recently introduced shadow tomography of the quantum state with a classical computer. We demonstrate that avoidance of WBPs suffices to ensure sizable gradients in the initialization. In addition, we demonstrate that decreasing the gradient step size, guided by the entropies allows WBPs to be avoided during the optimization process. This paves the way for efficient barren plateau-free optimization on near-term devices. },
  author       = {Sack, Stefan and Medina Ramos, Raimel A and Michailidis, Alexios and Kueng, Richard and Serbyn, Maksym},
  issn         = {2691-3399},
  journal      = {PRX Quantum},
  keywords     = {General Medicine},
  number       = {2},
  publisher    = {American Physical Society},
  title        = {{Avoiding barren plateaus using classical shadows}},
  doi          = {10.1103/prxquantum.3.020365},
  volume       = {3},
  year         = {2022},
}

@phdthesis{11473,
  abstract     = {The polaron model is a basic model of quantum field theory describing a single particle
interacting with a bosonic field. It arises in many physical contexts. We are mostly concerned
with models applicable in the context of an impurity atom in a Bose-Einstein condensate as
well as the problem of electrons moving in polar crystals.
The model has a simple structure in which the interaction of the particle with the field is given
by a term linear in the field’s creation and annihilation operators. In this work, we investigate
the properties of this model by providing rigorous estimates on various energies relevant to the
problem. The estimates are obtained, for the most part, by suitable operator techniques which
constitute the principal mathematical substance of the thesis.
The first application of these techniques is to derive the polaron model rigorously from first
principles, i.e., from a full microscopic quantum-mechanical many-body problem involving an
impurity in an otherwise homogeneous system. We accomplish this for the N + 1 Bose gas
in the mean-field regime by showing that a suitable polaron-type Hamiltonian arises at weak
interactions as a low-energy effective theory for this problem.
In the second part, we investigate rigorously the ground state of the model at fixed momentum
and for large values of the coupling constant. Qualitatively, the system is expected to display
a transition from the quasi-particle behavior at small momenta, where the dispersion relation
is parabolic and the particle moves through the medium dragging along a cloud of phonons, to
the radiative behavior at larger momenta where the polaron decelerates and emits free phonons.
At the same time, in the strong coupling regime, the bosonic field is expected to behave purely
classically. Accordingly, the effective mass of the polaron at strong coupling is conjectured to
be asymptotically equal to the one obtained from the semiclassical counterpart of the problem,
first studied by Landau and Pekar in the 1940s. For polaron models with regularized form
factors and phonon dispersion relations of superfluid type, i.e., bounded below by a linear
function of the wavenumbers for all phonon momenta as in the interacting Bose gas, we prove
that for a large window of momenta below the radiation threshold, the energy-momentum
relation at strong coupling is indeed essentially a parabola with semi-latus rectum equal to the
Landau–Pekar effective mass, as expected.
For the Fröhlich polaron describing electrons in polar crystals where the dispersion relation is
of the optical type and the form factor is formally UV–singular due to the nature of the point
charge-dipole interaction, we are able to give the corresponding upper bound. In contrast to
the regular case, this requires the inclusion of the quantum fluctuations of the phonon field,
which makes the problem considerably more difficult.
The results are supplemented by studies on the absolute ground-state energy at strong coupling,
a proof of the divergence of the effective mass with the coupling constant for a wide class of
polaron models, as well as the discussion of the apparent UV singularity of the Fröhlich model
and the application of the techniques used for its removal for the energy estimates.
},
  author       = {Mysliwy, Krzysztof},
  issn         = {2663-337X},
  pages        = {138},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Polarons in Bose gases and polar crystals: Some rigorous energy estimates}},
  doi          = {10.15479/at:ista:11473},
  year         = {2022},
}

@inproceedings{11476,
  abstract     = {Messaging platforms like Signal are widely deployed and provide strong security in an asynchronous setting. It is a challenging problem to construct a protocol with similar security guarantees that can efficiently scale to large groups. A major bottleneck are the frequent key rotations users need to perform to achieve post compromise forward security.

In current proposals – most notably in TreeKEM (which is part of the IETF’s Messaging Layer Security (MLS) protocol draft) – for users in a group of size n to rotate their keys, they must each craft a message of size log(n) to be broadcast to the group using an (untrusted) delivery server.

In larger groups, having users sequentially rotate their keys requires too much bandwidth (or takes too long), so variants allowing any T≤n users to simultaneously rotate their keys in just 2 communication rounds have been suggested (e.g. “Propose and Commit” by MLS). Unfortunately, 2-round concurrent updates are either damaging or expensive (or both); i.e. they either result in future operations being more costly (e.g. via “blanking” or “tainting”) or are costly themselves requiring Ω(T) communication for each user [Bienstock et al., TCC’20].

In this paper we propose CoCoA; a new scheme that allows for T concurrent updates that are neither damaging nor costly. That is, they add no cost to future operations yet they only require Ω(log2(n)) communication per user. To circumvent the [Bienstock et al.] lower bound, CoCoA increases the number of rounds needed to complete all updates from 2 up to (at most) log(n); though typically fewer rounds are needed.

The key insight of our protocol is the following: in the (non-concurrent version of) TreeKEM, a delivery server which gets T concurrent update requests will approve one and reject the remaining T−1. In contrast, our server attempts to apply all of them. If more than one user requests to rotate the same key during a round, the server arbitrarily picks a winner. Surprisingly, we prove that regardless of how the server chooses the winners, all previously compromised users will recover after at most log(n) such update rounds.

To keep the communication complexity low, CoCoA is a server-aided CGKA. That is, the delivery server no longer blindly forwards packets, but instead actively computes individualized packets tailored to each user. As the server is untrusted, this change requires us to develop new mechanisms ensuring robustness of the protocol.},
  author       = {Alwen, Joël and Auerbach, Benedikt and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {Advances in Cryptology – EUROCRYPT 2022},
  isbn         = {9783031070846},
  issn         = {1611-3349},
  location     = {Trondheim, Norway},
  pages        = {815–844},
  publisher    = {Springer Nature},
  title        = {{CoCoA: Concurrent continuous group key agreement}},
  doi          = {10.1007/978-3-031-07085-3_28},
  volume       = {13276},
  year         = {2022},
}

