@phdthesis{10030,
  abstract     = {This PhD thesis is primarily focused on the study of discrete transport problems, introduced for the first time in the seminal works of Maas [Maa11] and Mielke [Mie11] on finite state Markov chains and reaction-diffusion equations, respectively. More in detail, my research focuses on the study of transport costs on graphs, in particular the convergence and the stability of such problems in the discrete-to-continuum limit. This thesis also includes some results concerning
non-commutative optimal transport. The first chapter of this thesis consists of a general introduction to the optimal transport problems, both in the discrete, the continuous, and the non-commutative setting. Chapters 2 and 3 present the content of two works, obtained in collaboration with Peter Gladbach, Eva Kopfer, and Jan Maas, where we have been able to show the convergence of discrete transport costs on periodic graphs to suitable continuous ones, which can be described by means of a homogenisation result. We first focus on the particular case of quadratic costs on the real line and then extending the result to more general costs in arbitrary dimension. Our results are the first complete characterisation of limits of transport costs on periodic graphs in arbitrary dimension which do not rely on any additional symmetry. In Chapter 4 we turn our attention to one of the intriguing connection between evolution equations and optimal transport, represented by the theory of gradient flows. We show that discrete gradient flow structures associated to a finite volume approximation of a certain class of diffusive equations (Fokker–Planck) is stable in the limit of vanishing meshes, reproving the convergence of the scheme via the method of evolutionary Γ-convergence and exploiting a more variational point of view on the problem. This is based on a collaboration with Dominik Forkert and Jan Maas. Chapter 5 represents a change of perspective, moving away from the discrete world and reaching the non-commutative one. As in the discrete case, we discuss how classical tools coming from the commutative optimal transport can be translated into the setting of density matrices. In particular, in this final chapter we present a non-commutative version of the Schrödinger problem (or entropic regularised optimal transport problem) and discuss existence and characterisation of minimisers, a duality result, and present a non-commutative version of the well-known Sinkhorn algorithm to compute the above mentioned optimisers. This is based on a joint work with Dario Feliciangeli and Augusto Gerolin. Finally, Appendix A and B contain some additional material and discussions, with particular attention to Harnack inequalities and the regularity of flows on discrete spaces.},
  author       = {Portinale, Lorenzo},
  issn         = {2663-337X},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Discrete-to-continuum limits of transport problems and gradient flows in the space of measures}},
  doi          = {10.15479/at:ista:10030},
  year         = {2021},
}

@article{10033,
  abstract     = {The ⊗*-monoidal structure on the category of sheaves on the Ran space is not pro-nilpotent in the sense of [3]. However, under some connectivity assumptions, we prove that Koszul duality induces an equivalence of categories and that this equivalence behaves nicely with respect to Verdier duality on the Ran space and integrating along the Ran space, i.e. taking factorization homology. Based on ideas sketched in [4], we show that these results also offer a simpler alternative to one of the two main steps in the proof of the Atiyah-Bott formula given in [7] and [5].},
  author       = {Ho, Quoc P},
  issn         = {1090-2082},
  journal      = {Advances in Mathematics},
  keywords     = {Chiral algebras, Chiral homology, Factorization algebras, Koszul duality, Ran space},
  publisher    = {Elsevier},
  title        = {{The Atiyah-Bott formula and connectivity in chiral Koszul duality}},
  doi          = {10.1016/j.aim.2021.107992},
  volume       = {392},
  year         = {2021},
}

@phdthesis{10035,
  abstract     = {Many security definitions come in two flavors: a stronger “adaptive” flavor, where the adversary can arbitrarily make various choices during the course of the attack, and a weaker “selective” flavor where the adversary must commit to some or all of their choices a-priori. For example, in the context of identity-based encryption, selective security requires the adversary to decide on the identity of the attacked party at the very beginning of the game whereas adaptive security allows the attacker to first see the master public key and some secret keys before making this choice. Often, it appears to be much easier to achieve selective security than it is to achieve adaptive security. A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption [Pan07][FJP15], constrained PRFs [FKPR14], and Yao’s garbled circuits [JW16]. Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework (published at Crypto ’17 [JKK+17a]) that connects all of these works and allows us to present them in a unified and simplified fashion. Having the framework in place, we show how to achieve adaptive security for proxy re-encryption schemes (published at PKC ’19 [FKKP19]) and provide the first adaptive security proofs for continuous group key agreement protocols (published at S&P ’21 [KPW+21]). Questioning optimality of our framework, we then show that currently used proof techniques cannot lead to significantly better security guarantees for "graph-building" games (published at TCC ’21 [KKPW21a]). These games cover generalized selective decryption, as well as the security of prominent constructions for constrained PRFs, continuous group key agreement, and proxy re-encryption. Finally, we revisit the adaptive security of Yao’s garbled circuits and extend the analysis of Jafargholi and Wichs in two directions: While they prove adaptive security only for a modified construction with increased online complexity, we provide the first positive results for the original construction by Yao (published at TCC ’21 [KKP21a]). On the negative side, we prove that the results of Jafargholi and Wichs are essentially optimal by showing that no black-box reduction can provide a significantly better security bound (published at Crypto ’21 [KKPW21c]).},
  author       = {Klein, Karen},
  issn         = {2663-337X},
  pages        = {276},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the adaptive security of graph-based games}},
  doi          = {10.15479/at:ista:10035},
  year         = {2021},
}

@inproceedings{10041,
  abstract     = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth   δ , the loss in security is at most exponential in   δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth  δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in   δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel},
  booktitle    = {41st Annual International Cryptology Conference, Part II },
  isbn         = {978-3-030-84244-4},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {486--515},
  publisher    = {Springer Nature},
  title        = {{Limits on the Adaptive Security of Yao’s Garbling}},
  doi          = {10.1007/978-3-030-84245-1_17},
  volume       = {12826},
  year         = {2021},
}

@inproceedings{10044,
  abstract     = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a S^O(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(d w log(S)), d being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z},
  booktitle    = {19th Theory of Cryptography Conference 2021},
  location     = {Raleigh, NC, United States},
  publisher    = {International Association for Cryptologic Research},
  title        = {{On treewidth, separators and Yao's garbling}},
  year         = {2021},
}

@unpublished{10045,
  abstract     = {Given a fixed finite metric space (V,μ), the {\em minimum 0-extension problem}, denoted as 0-Ext[μ], is equivalent to the following optimization problem: minimize function of the form minx∈Vn∑ifi(xi)+∑ijcijμ(xi,xj) where cij,cvi are given nonnegative costs and fi:V→R are functions given by fi(xi)=∑v∈Vcviμ(xi,v). The computational complexity of 0-Ext[μ] has been recently established by Karzanov and by Hirai: if metric μ is {\em orientable modular} then 0-Ext[μ] can be solved in polynomial time, otherwise 0-Ext[μ] is NP-hard. To prove the tractability part, Hirai developed a theory of discrete convex functions on orientable modular graphs generalizing several known classes of functions in discrete convex analysis, such as L♮-convex functions. We consider a more general version of the problem in which unary functions fi(xi) can additionally have terms of the form cuv;iμ(xi,{u,v}) for {u,v}∈F, where set F⊆(V2) is fixed. We extend the complexity classification above by providing an explicit condition on (μ,F) for the problem to be tractable. In order to prove the tractability part, we generalize Hirai's theory and define a larger class of discrete convex functions. It covers, in particular, another well-known class of functions, namely submodular functions on an integer lattice. Finally, we improve the complexity of Hirai's algorithm for solving 0-Ext on orientable modular graphs.
},
  author       = {Dvorak, Martin and Kolmogorov, Vladimir},
  booktitle    = {arXiv},
  keywords     = {minimum 0-extension problem, metric labeling problem, discrete metric spaces, metric extensions, computational complexity, valued constraint satisfaction problems, discrete convex analysis, L-convex functions},
  title        = {{Generalized minimum 0-extension problem and discrete convexity}},
  doi          = {10.48550/arXiv.2109.10203},
  year         = {2021},
}

@inproceedings{10048,
  abstract     = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical
framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower
bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and
broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.
},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {19th Theory of Cryptography Conference 2021},
  location     = {Raleigh, NC, United States},
  publisher    = {International Association for Cryptologic Research},
  title        = {{The cost of adaptivity in security games on graphs}},
  year         = {2021},
}

@inproceedings{10049,
  abstract     = {While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open.},
  author       = {Klein, Karen and Pascual Perez, Guillermo and Walter, Michael and Kamath Hosdurg, Chethan and Capretto, Margarita and Cueto Noval, Miguel and Markov, Ilia and Yeo, Michelle X and Alwen, Joel F and Pietrzak, Krzysztof Z},
  booktitle    = {2021 IEEE Symposium on Security and Privacy },
  location     = {San Francisco, CA, United States},
  pages        = {268--284},
  publisher    = {IEEE},
  title        = {{Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement}},
  doi          = {10.1109/sp40001.2021.00035},
  year         = {2021},
}

@article{10051,
  abstract     = {Rab-interacting molecule (RIM)-binding protein 2 (BP2) is a multidomain protein of the presynaptic active zone (AZ). By binding to RIM, bassoon (Bsn), and voltage-gated Ca2+ channels (CaV), it is considered to be a central organizer of the topography of CaV and release sites of synaptic vesicles (SVs) at the AZ. Here, we used RIM-BP2 knock-out (KO) mice and their wild-type (WT) littermates of either sex to investigate the role of RIM-BP2 at the endbulb of Held synapse of auditory nerve fibers (ANFs) with bushy cells (BCs) of the cochlear nucleus, a fast relay of the auditory pathway with high release probability. Disruption of RIM-BP2 lowered release probability altering short-term plasticity and reduced evoked EPSCs. Analysis of SV pool dynamics during high-frequency train stimulation indicated a reduction of SVs with high release probability but an overall normal size of the readily releasable SV pool (RRP). The Ca2+-dependent fast component of SV replenishment after RRP depletion was slowed. Ultrastructural analysis by superresolution light and electron microscopy revealed an impaired topography of presynaptic CaV and a reduction of docked and membrane-proximal SVs at the AZ. We conclude that RIM-BP2 organizes the topography of CaV, and promotes SV tethering and docking. This way RIM-BP2 is critical for establishing a high initial release probability as required to reliably signal sound onset information that we found to be degraded in BCs of RIM-BP2-deficient mice in vivo. SIGNIFICANCE STATEMENT: Rab-interacting molecule (RIM)-binding proteins (BPs) are key organizers of the active zone (AZ). Using a multidisciplinary approach to the calyceal endbulb of Held synapse that transmits auditory information at rates of up to hundreds of Hertz with submillisecond precision we demonstrate a requirement for RIM-BP2 for normal auditory signaling. Endbulb synapses lacking RIM-BP2 show a reduced release probability despite normal whole-terminal Ca2+ influx and abundance of the key priming protein Munc13-1, a reduced rate of SV replenishment, as well as an altered topography of voltage-gated (CaV)2.1 Ca2+ channels, and fewer docked and membrane proximal synaptic vesicles (SVs). This hampers transmission of sound onset information likely affecting downstream neural computations such as of sound localization.},
  author       = {Butola, Tanvi and Alvanos, Theocharis and Hintze, Anika and Koppensteiner, Peter and Kleindienst, David and Shigemoto, Ryuichi and Wichmann, Carolin and Moser, Tobias},
  issn         = {1529-2401},
  journal      = {Journal of Neuroscience},
  number       = {37},
  pages        = {7742--7767},
  publisher    = {Society for Neuroscience},
  title        = {{RIM-binding protein 2 organizes Ca<sup>21</sup> channel topography and regulates release probability and vesicle replenishment at a fast central synapse}},
  doi          = {10.1523/JNEUROSCI.0586-21.2021},
  volume       = {41},
  year         = {2021},
}

@inproceedings{10052,
  abstract     = {A deterministic finite automaton (DFA) 𝒜 is composite if its language L(𝒜) can be decomposed into an intersection ⋂_{i = 1}^k L(𝒜_i) of languages of smaller DFAs. Otherwise, 𝒜 is prime. This notion of primality was introduced by Kupferman and Mosheiff in 2013, and while they proved that we can decide whether a DFA is composite, the precise complexity of this problem is still open, with a doubly-exponential gap between the upper and lower bounds. In this work, we focus on permutation DFAs, i.e., those for which the transition monoid is a group. We provide an NP algorithm to decide whether a permutation DFA is composite, and show that the difficulty of this problem comes from the number of non-accepting states of the instance: we give a fixed-parameter tractable algorithm with the number of rejecting states as the parameter. Moreover, we investigate the class of commutative permutation DFAs. Their structural properties allow us to decide compositionality in NL, and even in LOGSPACE if the alphabet size is fixed. Despite this low complexity, we show that complex behaviors still arise in this class: we provide a family of composite DFAs each requiring polynomially many factors with respect to its size. We also consider the variant of the problem that asks whether a DFA is k-factor composite, that is, decomposable into k smaller DFAs, for some given integer k ∈ ℕ. We show that, for commutative permutation DFAs, restricting the number of factors makes the decision computationally harder, and yields a problem with tight bounds: it is NP-complete. Finally, we show that in general, this problem is in PSPACE, and it is in LOGSPACE for DFAs with a singleton alphabet.},
  author       = {Jecker, Ismael R and Mazzocchi, Nicolas and Wolf, Petra},
  booktitle    = {32nd International Conference on Concurrency Theory},
  isbn         = {978-3-9597-7203-7},
  issn         = {1868-8969},
  location     = {Paris, France},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{Decomposing permutation automata}},
  doi          = {10.4230/LIPIcs.CONCUR.2021.18},
  volume       = {203},
  year         = {2021},
}

@inproceedings{10053,
  abstract     = {This paper characterizes the latency of the simplified successive-cancellation (SSC) decoding scheme for polar codes under hardware resource constraints. In particular, when the number of processing elements P that can perform SSC decoding operations in parallel is limited, as is the case in practice, the latency of SSC decoding is O(N1−1 μ+NPlog2log2NP), where N is the block length of the code and μ is the scaling exponent of polar codes for the channel. Three direct consequences of this bound are presented. First, in a fully-parallel implementation where P=N2 , the latency of SSC decoding is O(N1−1/μ) , which is sublinear in the block length. This recovers a result from an earlier work. Second, in a fully-serial implementation where P=1 , the latency of SSC decoding scales as O(Nlog2log2N) . The multiplicative constant is also calculated: we show that the latency of SSC decoding when P=1 is given by (2+o(1))Nlog2log2N . Third, in a semi-parallel implementation, the smallest P that gives the same latency as that of the fully-parallel implementation is P=N1/μ . The tightness of our bound on SSC decoding latency and the applicability of the foregoing results is validated through extensive simulations.},
  author       = {Hashemi, Seyyed Ali and Mondelli, Marco and Fazeli, Arman and Vardy, Alexander and Cioffi, John and Goldsmith, Andrea},
  booktitle    = {2021 IEEE International Symposium on Information Theory},
  isbn         = {978-1-5386-8210-4},
  issn         = {2157-8095},
  location     = {Melbourne, Australia},
  pages        = {2369--2374},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Parallelism versus latency in simplified successive-cancellation decoding of polar codes}},
  doi          = {10.1109/ISIT45174.2021.9518153},
  year         = {2021},
}

@inproceedings{10054,
  abstract     = {Graphs and games on graphs are fundamental models for the analysis of reactive systems, in particular, for model-checking and the synthesis of reactive systems. The class of ω-regular languages provides a robust specification formalism for the desired properties of reactive systems. In the classical infinitary formulation of the liveness part of an ω-regular specification, a "good" event must happen eventually without any bound between the good events. A stronger notion of liveness is bounded liveness, which requires that good events happen within d transitions. Given a graph or a game graph with n vertices, m edges, and a bounded liveness objective, the previous best-known algorithmic bounds are as follows: (i) O(dm) for graphs, which in the worst-case is O(n³); and (ii) O(n² d²) for games on graphs. Our main contributions improve these long-standing algorithmic bounds. For graphs we present: (i) a randomized algorithm with one-sided error with running time O(n^{2.5} log n) for the bounded liveness objectives; and (ii) a deterministic linear-time algorithm for the complement of bounded liveness objectives. For games on graphs, we present an O(n² d) time algorithm for the bounded liveness objectives.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Kale, Sagar Sudhir and Svozil, Alexander},
  booktitle    = {48th International Colloquium on Automata, Languages, and Programming},
  isbn         = {978-3-95977-195-5},
  issn         = {1868-8969},
  location     = {Glasgow, Scotland},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{Faster algorithms for bounded liveness in graphs and game graphs}},
  doi          = {10.4230/LIPIcs.ICALP.2021.124},
  volume       = {198},
  year         = {2021},
}

@inproceedings{10055,
  abstract     = {Repeated idempotent elements are commonly used to characterise iterable behaviours in abstract models of computation. Therefore, given a monoid M, it is natural to ask how long a sequence of elements of M needs to be to ensure the presence of consecutive idempotent factors. This question is formalised through the notion of the Ramsey function R_M associated to M, obtained by mapping every k ∈ ℕ to the minimal integer R_M(k) such that every word u ∈ M^* of length R_M(k) contains k consecutive non-empty factors that correspond to the same idempotent element of M. In this work, we study the behaviour of the Ramsey function R_M by investigating the regular 𝒟-length of M, defined as the largest size L(M) of a submonoid of M isomorphic to the set of natural numbers {1,2, …, L(M)} equipped with the max operation. We show that the regular 𝒟-length of M determines the degree of R_M, by proving that k^L(M) ≤ R_M(k) ≤ (k|M|⁴)^L(M). To allow applications of this result, we provide the value of the regular 𝒟-length of diverse monoids. In particular, we prove that the full monoid of n × n Boolean matrices, which is used to express transition monoids of non-deterministic automata, has a regular 𝒟-length of (n²+n+2)/2.},
  author       = {Jecker, Ismael R},
  booktitle    = {38th International Symposium on Theoretical Aspects of Computer Science},
  isbn         = {978-3-9597-7180-1},
  issn         = {1868-8969},
  location     = {Saarbrücken, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{A Ramsey theorem for finite monoids}},
  doi          = {10.4230/LIPIcs.STACS.2021.44},
  volume       = {187},
  year         = {2021},
}

@phdthesis{10058,
  abstract     = {Quantum information and computation has become a vast field paved with opportunities for researchers and investors. As large multinational companies and international funds are heavily investing in quantum technologies it is still a question which platform is best suited for the task of realizing a scalable quantum processor. In this work we investigate hole spins in Ge quantum wells. These hold great promise as they possess several favorable properties: a small effective mass, a strong spin-orbit coupling, long relaxation time and an inherent immunity to hyperfine noise. All these characteristics helped Ge hole spin qubits to evolve from a single qubit to a fully entangled four qubit processor in only 3 years. Here, we investigated a qubit approach leveraging the large out-of-plane g-factors of heavy hole states in Ge quantum dots. We found this qubit to be reproducibly operable at extremely low magnetic field and at large speeds while maintaining coherence. This was possible because large differences of g-factors in adjacent dots can be achieved in the out-of-plane direction. In the in-plane direction the small g-factors, on the other hand, can be altered very effectively by the confinement potentials. Here, we found that this can even lead to a sign change of the g-factors. The resulting g-factor difference alters the dynamics of the system drastically and produces effects typically attributed to a spin-orbit induced spin-flip term.  The investigations carried out in this thesis give further insights into the possibilities of holes in Ge and reveal new physical properties that need to be considered when designing future spin qubit experiments.},
  author       = {Jirovec, Daniel},
  issn         = {2663-337X},
  keywords     = {qubits, quantum computing, holes},
  pages        = {151},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Singlet-Triplet qubits and spin-orbit interaction in 2-dimensional Ge hole gases}},
  doi          = {10.15479/at:ista:10058},
  year         = {2021},
}

@unpublished{10066,
  abstract     = {The potential of Si and SiGe-based devices for the scaling of quantum circuits is tainted by device variability. Each device needs to be tuned to operation conditions. We give a key step towards tackling this variability with an algorithm that, without modification, is capable of tuning a 4-gate Si FinFET, a 5-gate GeSi nanowire and a 7-gate SiGe heterostructure double quantum dot device from scratch. We achieve tuning times of 30, 10, and 92 minutes, respectively. The algorithm also provides insight into the parameter space landscape for each of these devices. These results show that overarching solutions for the tuning of quantum devices are enabled by machine learning.},
  author       = {Severin, B. and Lennon, D. T. and Camenzind, L. C. and Vigneau, F. and Fedele, F. and Jirovec, Daniel and Ballabio, A. and Chrastina, D. and Isella, G. and Kruijf, M. de and Carballido, M. J. and Svab, S. and Kuhlmann, A. V. and Braakman, F. R. and Geyer, S. and Froning, F. N. M. and Moon, H. and Osborne, M. A. and Sejdinovic, D. and Katsaros, Georgios and Zumbühl, D. M. and Briggs, G. A. D. and Ares, N.},
  booktitle    = {arXiv},
  title        = {{Cross-architecture tuning of silicon and SiGe-based quantum devices using machine learning}},
  doi          = {10.48550/arXiv.2107.12975},
  year         = {2021},
}

@article{10067,
  abstract     = {The search for novel entangled phases of matter has lead to the recent discovery of a new class of “entanglement transitions,” exemplified by random tensor networks and monitored quantum circuits. Most known examples can be understood as some classical ordering transitions in an underlying statistical mechanics model, where entanglement maps onto the free-energy cost of inserting a domain wall. In this paper we study the possibility of entanglement transitions driven by physics beyond such statistical mechanics mappings. Motivated by recent applications of neural-network-inspired variational Ansätze, we investigate under what conditions on the variational parameters these Ansätze can capture an entanglement transition. We study the entanglement scaling of short-range restricted Boltzmann machine (RBM) quantum states with random phases. For uncorrelated random phases, we analytically demonstrate the absence of an entanglement transition and reveal subtle finite-size effects in finite-size numerical simulations. Introducing phases with correlations decaying as 1/r^α in real space, we observe three regions with a different scaling of entanglement entropy depending on the exponent α. We study the nature of the transition between these regions, finding numerical evidence for critical behavior. Our work establishes the presence of long-range correlated phases in RBM-based wave functions as a required ingredient for entanglement transitions.},
  author       = {Medina Ramos, Raimel A and Vasseur, Romain and Serbyn, Maksym},
  issn         = {2469-9969},
  journal      = {Physical Review B},
  number       = {10},
  publisher    = {American Physical Society},
  title        = {{Entanglement transitions from restricted Boltzmann machines}},
  doi          = {10.1103/physrevb.104.104205},
  volume       = {104},
  year         = {2021},
}

@article{10069,
  abstract     = {The extent to which women differ in the course of blood cell counts throughout pregnancy, and the importance of these changes to pregnancy outcomes has not been well defined. Here, we develop a series of statistical analyses of repeated measures data to reveal the degree to which women differ in the course of pregnancy, predict the changes that occur, and determine the importance of these changes for post-partum hemorrhage (PPH) which is one of the leading causes of maternal mortality. We present a prospective cohort of 4082 births recorded at the University Hospital, Lausanne, Switzerland between 2009 and 2014 where full labour records could be obtained, along with complete blood count data taken at hospital admission. We find significant differences, at a [Formula: see text] level, among women in how blood count values change through pregnancy for mean corpuscular hemoglobin, mean corpuscular volume, mean platelet volume, platelet count and red cell distribution width. We find evidence that almost all complete blood count values show trimester-specific associations with PPH. For example, high platelet count (OR 1.20, 95% CI 1.01-1.53), high mean platelet volume (OR 1.58, 95% CI 1.04-2.08), and high erythrocyte levels (OR 1.36, 95% CI 1.01-1.57) in trimester 1 increased PPH, but high values in trimester 3 decreased PPH risk (OR 0.85, 0.79, 0.67 respectively). We show that differences among women in the course of blood cell counts throughout pregnancy have an important role in shaping pregnancy outcome and tracking blood count value changes through pregnancy improves identification of women at increased risk of postpartum hemorrhage. This study provides greater understanding of the complex changes in blood count values that occur through pregnancy and provides indicators to guide the stratification of patients into risk groups.},
  author       = {Robinson, Matthew Richard and Patxot, Marion and Stojanov, Miloš and Blum, Sabine and Baud, David},
  issn         = {2045-2322},
  journal      = {Scientific Reports},
  publisher    = {Springer Nature},
  title        = {{Postpartum hemorrhage risk is driven by changes in blood composition through pregnancy}},
  doi          = {10.1038/s41598-021-98411-z},
  volume       = {11},
  year         = {2021},
}

@article{10070,
  abstract     = {We extensively discuss the Rademacher and Sobolev-to-Lipschitz properties for generalized intrinsic distances on strongly local Dirichlet spaces possibly without square field operator. We present many non-smooth and infinite-dimensional examples. As an application, we prove the integral Varadhan short-time asymptotic with respect to a given distance function for a large class of strongly local Dirichlet forms.},
  author       = {Dello Schiavo, Lorenzo and Suzuki, Kohei},
  issn         = {1096-0783},
  journal      = {Journal of Functional Analysis},
  number       = {11},
  publisher    = {Elsevier},
  title        = {{Rademacher-type theorems and Sobolev-to-Lipschitz properties for strongly local Dirichlet spaces}},
  doi          = {10.1016/j.jfa.2021.109234},
  volume       = {281},
  year         = {2021},
}

@article{10071,
  author       = {Adams, Henry and Kourimska, Hana and Heiss, Teresa and Percival, Sarah and Ziegelmeier, Lori},
  issn         = {1088-9477},
  journal      = {Notices of the American Mathematical Society},
  number       = {9},
  pages        = {1511--1514},
  publisher    = {American Mathematical Society},
  title        = {{How to tutorial-a-thon}},
  doi          = {10.1090/noti2349},
  volume       = {68},
  year         = {2021},
}

@inproceedings{10072,
  abstract     = {The Lovász Local Lemma (LLL) is a powerful tool in probabilistic combinatorics which can be used to establish the existence of objects that satisfy certain properties. The breakthrough paper of Moser and Tardos and follow-up works revealed that the LLL has intimate connections with a class of stochastic local search algorithms for finding such desirable objects. In particular, it can be seen as a sufficient condition for this type of algorithms to converge fast. Besides conditions for existence of and fast convergence to desirable objects, one may naturally ask further questions regarding properties of these algorithms. For instance, "are they parallelizable?", "how many solutions can they output?", "what is the expected "weight" of a solution?", etc. These questions and more have been answered for a class of LLL-inspired algorithms called commutative. In this paper we introduce a new, very natural and more general notion of commutativity (essentially matrix commutativity) which allows us to show a number of new refined properties of LLL-inspired local search algorithms with significantly simpler proofs.},
  author       = {Harris, David G. and Iliopoulos, Fotis and Kolmogorov, Vladimir},
  booktitle    = {Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques},
  isbn         = {978-3-9597-7207-5},
  issn         = {1868-8969},
  location     = {Virtual},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{A new notion of commutativity for the algorithmic Lovász Local Lemma}},
  doi          = {10.4230/LIPIcs.APPROX/RANDOM.2021.31},
  volume       = {207},
  year         = {2021},
}

