@phdthesis{68,
  abstract     = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.},
  author       = {Zimin, Alexander},
  issn         = {2663-337X},
  pages        = {92},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Learning from dependent data}},
  doi          = {10.15479/AT:ISTA:TH1048},
  year         = {2018},
}

@phdthesis{69,
  abstract     = {A qubit, a unit of quantum information, is essentially any quantum mechanical two-level system which can be coherently controlled. Still, to be used for computation, it has to fulfill criteria. Qubits, regardless of the system in which they are realized, suffer from decoherence. This leads to loss of the information stored in the qubit. The upper bound of the time scale on which decoherence happens is set by the spin relaxation time. In this thesis I studied a two-level system consisting of a Zeeman-split hole spin confined in a quantum dot formed in a Ge hut wire. Such Ge hut wires have emerged as a promising material system for the realization of spin qubits, due to the combination of two significant properties: long spin coherence time as expected for group IV semiconductors due to the low hyperfine interaction and a strong valence band spin-orbit coupling. Here, I present how to fabricate quantum dot devices suitable for electrical transport measurements. Coupled quantum dot devices allowed the realization of a charge sensor, which is electrostatically and tunnel coupled to a quantum dot. By integrating the charge sensor into a radio-frequency reflectometry setup, I performed for the first time single-shot readout measurements of hole spins and extracted the hole spin relaxation times in Ge hut wires.},
  author       = {Vukušić, Lada},
  issn         = {2663-337X},
  pages        = {103},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Charge sensing and spin relaxation times of holes in Ge hut wires}},
  doi          = {10.15479/AT:ISTA:TH_1047},
  year         = {2018},
}

@article{690,
  abstract     = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.},
  author       = {Lee, Jii and Schnelli, Kevin},
  journal      = {Probability Theory and Related Fields},
  number       = {1-2},
  publisher    = {Springer},
  title        = {{Local law and Tracy–Widom limit for sparse random matrices}},
  doi          = {10.1007/s00440-017-0787-8},
  volume       = {171},
  year         = {2018},
}

@article{691,
  abstract     = {Background: Transport protein particle (TRAPP) is a multisubunit complex that regulates membrane trafficking through the Golgi apparatus. The clinical phenotype associated with mutations in various TRAPP subunits has allowed elucidation of their functions in specific tissues. The role of some subunits in human disease, however, has not been fully established, and their functions remain uncertain.

Objective: We aimed to expand the range of neurodevelopmental disorders associated with mutations in TRAPP subunits by exome sequencing of consanguineous families.

Methods: Linkage and homozygosity mapping and candidate gene analysis were used to identify homozygous mutations in families. Patient fibroblasts were used to study splicing defect and zebrafish to model the disease.

Results: We identified six individuals from three unrelated families with a founder homozygous splice mutation in TRAPPC6B, encoding a core subunit of the complex TRAPP I. Patients manifested a neurodevelopmental disorder characterised by microcephaly, epilepsy and autistic features, and showed splicing defect. Zebrafish trappc6b morphants replicated the human phenotype, displaying decreased head size and neuronal hyperexcitability, leading to a lower seizure threshold.

Conclusion: This study provides clinical and functional evidence of the role of TRAPPC6B in brain development and function.},
  author       = {Marin Valencia, Isaac and Novarino, Gaia and Johansen, Anide and Rosti, Başak and Issa, Mahmoud and Musaev, Damir and Bhat, Gifty and Scott, Eric and Silhavy, Jennifer and Stanley, Valentina and Rosti, Rasim and Gleeson, Jeremy and Imam, Farhad and Zaki, Maha and Gleeson, Joseph},
  issn         = {0022-2593},
  journal      = {Journal of Medical Genetics},
  number       = {1},
  pages        = {48 -- 54},
  publisher    = {BMJ Publishing Group},
  title        = {{A homozygous founder mutation in TRAPPC6B associates with a neurodevelopmental disorder characterised by microcephaly epilepsy and autistic features}},
  doi          = {10.1136/jmedgenet-2017-104627},
  volume       = {55},
  year         = {2018},
}

@article{692,
  abstract     = {We consider families of confocal conics and two pencils of Apollonian circles having the same foci. We will show that these families of curves generate trivial 3-webs and find the exact formulas describing them.},
  author       = {Akopyan, Arseniy},
  journal      = {Geometriae Dedicata},
  number       = {1},
  pages        = {55 -- 64},
  publisher    = {Springer},
  title        = {{3-Webs generated by confocal conics and circles}},
  doi          = {10.1007/s10711-017-0265-6},
  volume       = {194},
  year         = {2018},
}

@inproceedings{6941,
  abstract     = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time.

Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation.

This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.},
  author       = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z},
  booktitle    = {22nd International Conference on Financial Cryptography and Data Security},
  isbn         = {9783662583869},
  issn         = {1611-3349},
  location     = {Nieuwpoort, Curacao},
  pages        = {480--499},
  publisher    = {Springer Nature},
  title        = {{SpaceMint: A cryptocurrency based on proofs of space}},
  doi          = {10.1007/978-3-662-58387-6_26},
  volume       = {10957},
  year         = {2018},
}

@article{7,
  abstract     = {Animal social networks are shaped by multiple selection pressures, including the need to ensure efficient communication and functioning while simultaneously limiting disease transmission. Social animals could potentially further reduce epidemic risk by altering their social networks in the presence of pathogens, yet there is currently no evidence for such pathogen-triggered responses. We tested this hypothesis experimentally in the ant Lasius niger using a combination of automated tracking, controlled pathogen exposure, transmission quantification, and temporally explicit simulations. Pathogen exposure induced behavioral changes in both exposed ants and their nestmates, which helped contain the disease by reinforcing key transmission-inhibitory properties of the colony's contact network. This suggests that social network plasticity in response to pathogens is an effective strategy for mitigating the effects of disease in social groups.},
  author       = {Stroeymeyt, Nathalie and Grasse, Anna V and Crespi, Alessandro and Mersch, Danielle and Cremer, Sylvia and Keller, Laurent},
  issn         = {1095-9203},
  journal      = {Science},
  number       = {6417},
  pages        = {941 -- 945},
  publisher    = {AAAS},
  title        = {{Social network plasticity decreases disease transmission in a eusocial insect}},
  doi          = {10.1126/science.aat4793},
  volume       = {362},
  year         = {2018},
}

@article{70,
  abstract     = {We consider the totally asymmetric simple exclusion process in a critical scaling parametrized by a≥0, which creates a shock in the particle density of order aT−1/3, T the observation time. When starting from step initial data, we provide bounds on the limiting law which in particular imply that in the double limit lima→∞limT→∞ one recovers the product limit law and the degeneration of the correlation length observed at shocks of order 1. This result is shown to apply to a general last-passage percolation model. We also obtain bounds on the two-point functions of several airy processes.},
  author       = {Nejjar, Peter},
  issn         = {1980-0436},
  journal      = {Latin American Journal of Probability and Mathematical Statistics},
  number       = {2},
  pages        = {1311--1334},
  publisher    = {Instituto Nacional de Matematica Pura e Aplicada},
  title        = {{Transition to shocks in TASEP and decoupling of last passage times}},
  doi          = {10.30757/ALEA.v15-49},
  volume       = {15},
  year         = {2018},
}

@article{703,
  abstract     = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.},
  author       = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan},
  issn         = {01628828},
  journal      = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  number       = {7},
  pages        = {1668--1682},
  publisher    = {IEEE},
  title        = {{Maximum persistency via iterative relaxed inference with graphical models}},
  doi          = {10.1109/TPAMI.2017.2730884},
  volume       = {40},
  year         = {2018},
}

@article{705,
  abstract     = {Although dopamine receptors D1 and D2 play key roles in hippocampal function, their synaptic localization within the hippocampus has not been fully elucidated. In order to understand precise functions of pre- or postsynaptic dopamine receptors (DRs), the development of protocols to differentiate pre- and postsynaptic DRs is essential. So far, most studies on determination and quantification of DRs did not discriminate between subsynaptic localization. Therefore, the aim of the study was to generate a robust workflow for the localization of DRs. This work provides the basis for future work on hippocampal DRs, in light that DRs may have different functions at pre- or postsynaptic sites. Synaptosomes from rat hippocampi isolated by a sucrose gradient protocol were prepared for super-resolution direct stochastic optical reconstruction microscopy (dSTORM) using Bassoon as a presynaptic zone and Homer1 as postsynaptic density marker. Direct labeling of primary validated antibodies against dopamine receptors D1 (D1R) and D2 (D2R) with Alexa Fluor 594 enabled unequivocal assignment of D1R and D2R to both, pre- and postsynaptic sites. D1R immunoreactivity clusters were observed within the presynaptic active zone as well as at perisynaptic sites at the edge of the presynaptic active zone. The results may be useful for the interpretation of previous studies and the design of future work on DRs in the hippocampus. Moreover, the reduction of the complexity of brain tissue by the use of synaptosomal preparations and dSTORM technology may represent a useful tool for synaptic localization of brain proteins.},
  author       = {Miklosi, Andras and Del Favero, Giorgia and Bulat, Tanja and Höger, Harald and Shigemoto, Ryuichi and Marko, Doris and Lubec, Gert},
  journal      = {Molecular Neurobiology},
  number       = {6},
  pages        = {4857 – 4869},
  publisher    = {Springer},
  title        = {{Super resolution microscopical localization of dopamine receptors 1 and 2 in rat hippocampal synaptosomes}},
  doi          = {10.1007/s12035-017-0688-y},
  volume       = {55},
  year         = {2018},
}

@inproceedings{7116,
  abstract     = {Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights.},
  author       = {Grubic, Demjan and Tam, Leo and Alistarh, Dan-Adrian and Zhang, Ce},
  booktitle    = {Proceedings of the 21st International Conference on Extending Database Technology},
  isbn         = {9783893180783},
  issn         = {2367-2005},
  location     = {Vienna, Austria},
  pages        = {145--156},
  publisher    = {OpenProceedings},
  title        = {{Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study}},
  doi          = {10.5441/002/EDBT.2018.14},
  year         = {2018},
}

@inproceedings{7123,
  abstract     = {Population protocols are a popular model of distributed computing, in which n agents with limited local state interact randomly, and cooperate to collectively compute global predicates. Inspired by recent developments in DNA programming, an extensive series of papers, across different communities, has examined the computability and complexity characteristics of this model. Majority, or consensus, is a central task in this model, in which agents need to collectively reach a decision as to which one of two states A or B had a higher initial count. Two metrics are important: the time that a protocol requires to stabilize to an output decision, and the state space size that each agent requires to do so. It is known that majority requires Ω(log log n) states per agent to allow for fast (poly-logarithmic time) stabilization, and that O(log2 n) states are sufficient. Thus, there is an exponential gap between the space upper and lower bounds for this problem. This paper addresses this question.

On the negative side, we provide a new lower bound of Ω(log n) states for any protocol which stabilizes in O(n1–c) expected time, for any constant c > 0. This result is conditional on monotonicity and output assumptions, satisfied by all known protocols. Technically, it represents a departure from previous lower bounds, in that it does not rely on the existence of dense configurations. Instead, we introduce a new generalized surgery technique to prove the existence of incorrect executions for any algorithm which would contradict the lower bound. Subsequently, our lower bound also applies to general initial configurations, including ones with a leader. On the positive side, we give a new algorithm for majority which uses O(log n) states, and stabilizes in O(log2 n) expected time. Central to the algorithm is a new leaderless phase clock technique, which allows agents to synchronize in phases of Θ(n log n) consecutive interactions using O(log n) states per agent, exploiting a new connection between population protocols and power-of-two-choices load balancing mechanisms. We also employ our phase clock to build a leader election algorithm with a state space of size O(log n), which stabilizes in O(log2 n) expected time.},
  author       = {Alistarh, Dan-Adrian and Aspnes, James and Gelashvili, Rati},
  booktitle    = {Proceedings of the 29th Annual ACM-SIAM Symposium on Discrete Algorithms},
  isbn         = {9781611975031},
  location     = {New Orleans, LA, United States},
  pages        = {2221--2239},
  publisher    = {ACM},
  title        = {{Space-optimal majority in population protocols}},
  doi          = {10.1137/1.9781611975031.144},
  year         = {2018},
}

@article{723,
  abstract     = {Escaping local optima is one of the major obstacles to function optimisation. Using the metaphor of a fitness landscape, local optima correspond to hills separated by fitness valleys that have to be overcome. We define a class of fitness valleys of tunable difficulty by considering their length, representing the Hamming path between the two optima and their depth, the drop in fitness. For this function class we present a runtime comparison between stochastic search algorithms using different search strategies. The (1+1) EA is a simple and well-studied evolutionary algorithm that has to jump across the valley to a point of higher fitness because it does not accept worsening moves (elitism). In contrast, the Metropolis algorithm and the Strong Selection Weak Mutation (SSWM) algorithm, a famous process in population genetics, are both able to cross the fitness valley by accepting worsening moves. We show that the runtime of the (1+1) EA depends critically on the length of the valley while the runtimes of the non-elitist algorithms depend crucially on the depth of the valley. Moreover, we show that both SSWM and Metropolis can also efficiently optimise a rugged function consisting of consecutive valleys.},
  author       = {Oliveto, Pietro and Paixao, Tiago and Pérez Heredia, Jorge and Sudholt, Dirk and Trubenova, Barbora},
  journal      = {Algorithmica},
  number       = {5},
  pages        = {1604 -- 1633},
  publisher    = {Springer},
  title        = {{How to escape local optima in black box optimisation when non elitism outperforms elitism}},
  doi          = {10.1007/s00453-017-0369-2},
  volume       = {80},
  year         = {2018},
}

@article{738,
  abstract     = {This paper is devoted to automatic competitive analysis of real-time scheduling algorithms for firm-deadline tasksets, where only completed tasks con- tribute some utility to the system. Given such a taskset T , the competitive ratio of an on-line scheduling algorithm A for T is the worst-case utility ratio of A over the utility achieved by a clairvoyant algorithm. We leverage the theory of quantitative graph games to address the competitive analysis and competitive synthesis problems. For the competitive analysis case, given any taskset T and any finite-memory on- line scheduling algorithm A , we show that the competitive ratio of A in T can be computed in polynomial time in the size of the state space of A . Our approach is flexible as it also provides ways to model meaningful constraints on the released task sequences that determine the competitive ratio. We provide an experimental study of many well-known on-line scheduling algorithms, which demonstrates the feasibility of our competitive analysis approach that effectively replaces human ingenuity (required Preliminary versions of this paper have appeared in Chatterjee et al. ( 2013 , 2014 ). B Andreas Pavlogiannis pavlogiannis@ist.ac.at Krishnendu Chatterjee krish.chat@ist.ac.at Alexander Kößler koe@ecs.tuwien.ac.at Ulrich Schmid s@ecs.tuwien.ac.at 1 IST Austria (Institute of Science and Technology Austria), Am Campus 1, 3400 Klosterneuburg, Austria 2 Embedded Computing Systems Group, Vienna University of Technology, Treitlstrasse 3, 1040 Vienna, Austria 123 Real-Time Syst for finding worst-case scenarios) by computing power. For the competitive synthesis case, we are just given a taskset T , and the goal is to automatically synthesize an opti- mal on-line scheduling algorithm A , i.e., one that guarantees the largest competitive ratio possible for T . We show how the competitive synthesis problem can be reduced to a two-player graph game with partial information, and establish that the compu- tational complexity of solving this game is Np -complete. The competitive synthesis problem is hence in Np in the size of the state space of the non-deterministic labeled transition system encoding the taskset. Overall, the proposed framework assists in the selection of suitable scheduling algorithms for a given taskset, which is in fact the most common situation in real-time systems design. },
  author       = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich},
  journal      = {Real-Time Systems},
  number       = {1},
  pages        = {166 -- 207},
  publisher    = {Springer},
  title        = {{Automated competitive analysis of real time scheduling with graph games}},
  doi          = {10.1007/s11241-017-9293-4},
  volume       = {54},
  year         = {2018},
}

@inproceedings{7407,
  abstract     = {Proofs of space (PoS) [Dziembowski et al., CRYPTO'15] are proof systems where a prover can convince a verifier that he "wastes" disk space. PoS were introduced as a more ecological and economical replacement for proofs of work which are currently used to secure blockchains like Bitcoin. In this work we investigate extensions of PoS which allow the prover to embed useful data into the dedicated space, which later can be recovered. Our first contribution is a security proof for the original PoS from CRYPTO'15 in the random oracle model (the original proof only applied to a restricted class of adversaries which can store a subset of the data an honest prover would store). When this PoS is instantiated with recent constructions of maximally depth robust graphs, our proof implies basically optimal security. As a second contribution we show three different extensions of this PoS where useful data can be embedded into the space required by the prover. Our security proof for the PoS extends (non-trivially) to these constructions. We discuss how some of these variants can be used as proofs of catalytic space (PoCS), a notion we put forward in this work, and which basically is a PoS where most of the space required by the prover can be used to backup useful data. Finally we discuss how one of the extensions is a candidate construction for a proof of replication (PoR), a proof system recently suggested in the Filecoin whitepaper. },
  author       = {Pietrzak, Krzysztof Z},
  booktitle    = {10th Innovations in Theoretical Computer Science  Conference (ITCS 2019)},
  isbn         = {978-3-95977-095-8},
  issn         = {1868-8969},
  location     = {San Diego, CA, United States},
  pages        = {59:1--59:25},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Proofs of catalytic space}},
  doi          = {10.4230/LIPICS.ITCS.2019.59},
  volume       = {124},
  year         = {2018},
}

@article{742,
  abstract     = {We give a detailed and easily accessible proof of Gromov’s Topological Overlap Theorem. Let X be a finite simplicial complex or, more generally, a finite polyhedral cell complex of dimension d. Informally, the theorem states that if X has sufficiently strong higher-dimensional expansion properties (which generalize edge expansion of graphs and are defined in terms of cellular cochains of X) then X has the following topological overlap property: for every continuous map (Formula presented.) there exists a point (Formula presented.) that is contained in the images of a positive fraction (Formula presented.) of the d-cells of X. More generally, the conclusion holds if (Formula presented.) is replaced by any d-dimensional piecewise-linear manifold M, with a constant (Formula presented.) that depends only on d and on the expansion properties of X, but not on M.},
  author       = {Dotterrer, Dominic and Kaufman, Tali and Wagner, Uli},
  journal      = {Geometriae Dedicata},
  number       = {1},
  pages        = {307–317},
  publisher    = {Springer},
  title        = {{On expansion and topological overlap}},
  doi          = {10.1007/s10711-017-0291-4},
  volume       = {195},
  year         = {2018},
}

@unpublished{75,
  abstract     = {We prove that any convex body in the plane can be partitioned into m convex parts of equal areas and perimeters for any integer m≥2; this result was previously known for prime powers m=pk. We also give a higher-dimensional generalization.},
  author       = {Akopyan, Arseniy and Avvakumov, Sergey and Karasev, Roman},
  publisher    = {arXiv},
  title        = {{Convex fair partitions into arbitrary number of pieces}},
  doi          = {10.48550/arXiv.1804.03057},
  year         = {2018},
}

@article{76,
  abstract     = {Consider a fully-connected synchronous distributed system consisting of n nodes, where up to f nodes may be faulty and every node starts in an arbitrary initial state. In the synchronous C-counting problem, all nodes need to eventually agree on a counter that is increased by one modulo C in each round for given C&gt;1. In the self-stabilising firing squad problem, the task is to eventually guarantee that all non-faulty nodes have simultaneous responses to external inputs: if a subset of the correct nodes receive an external “go” signal as input, then all correct nodes should agree on a round (in the not-too-distant future) in which to jointly output a “fire” signal. Moreover, no node should generate a “fire” signal without some correct node having previously received a “go” signal as input. We present a framework reducing both tasks to binary consensus at very small cost. For example, we obtain a deterministic algorithm for self-stabilising Byzantine firing squads with optimal resilience f&lt;n/3, asymptotically optimal stabilisation and response time O(f), and message size O(log f). As our framework does not restrict the type of consensus routines used, we also obtain efficient randomised solutions.},
  author       = {Lenzen, Christoph and Rybicki, Joel},
  journal      = {Distributed Computing},
  publisher    = {Springer},
  title        = {{Near-optimal self-stabilising counting and firing squads}},
  doi          = {10.1007/s00446-018-0342-6},
  year         = {2018},
}

@article{77,
  abstract     = {Holes confined in quantum dots have gained considerable interest in the past few years due to their potential as spin qubits. Here we demonstrate two-axis control of a spin 3/2 qubit in natural Ge. The qubit is formed in a hut wire double quantum dot device. The Pauli spin blockade principle allowed us to demonstrate electric dipole spin resonance by applying a radio frequency electric field to one of the electrodes defining the double quantum dot. Coherent hole spin oscillations with Rabi frequencies reaching 140 MHz are demonstrated and dephasing times of 130 ns are measured. The reported results emphasize the potential of Ge as a platform for fast and electrically tunable hole spin qubit devices.},
  author       = {Watzinger, Hannes and Kukucka, Josip and Vukusic, Lada and Gao, Fei and Wang, Ting and Schäffler, Friedrich and Zhang, Jian and Katsaros, Georgios},
  journal      = {Nature Communications},
  number       = {3902 },
  publisher    = {Nature Publishing Group},
  title        = {{A germanium hole spin qubit}},
  doi          = {10.1038/s41467-018-06418-4},
  volume       = {9},
  year         = {2018},
}

@article{456,
  abstract     = {Inhibition of the endoplasmic reticulum stress pathway may hold the key to Zika virus-associated microcephaly treatment. },
  author       = {Novarino, Gaia},
  journal      = {Science Translational Medicine},
  number       = {423},
  publisher    = {American Association for the Advancement of Science},
  title        = {{Zika-associated microcephaly: Reduce the stress and race for the treatment}},
  doi          = {10.1126/scitranslmed.aar7514},
  volume       = {10},
  year         = {2018},
}

