@article{58,
  abstract     = {Inside a two-dimensional region (``cake&quot;&quot;), there are m nonoverlapping tiles of a certain kind (``toppings&quot;&quot;). We want to expand the toppings while keeping them nonoverlapping, and possibly add some blank pieces of the same ``certain kind,&quot;&quot; such that the entire cake is covered. How many blanks must we add? We study this question in several cases: (1) The cake and toppings are general polygons. (2) The cake and toppings are convex figures. (3) The cake and toppings are axis-parallel rectangles. (4) The cake is an axis-parallel rectilinear polygon and the toppings are axis-parallel rectangles. In all four cases, we provide tight bounds on the number of blanks.},
  author       = {Akopyan, Arseniy and Segal Halevi, Erel},
  journal      = {SIAM Journal on Discrete Mathematics},
  number       = {3},
  pages        = {2242 -- 2257},
  publisher    = {Society for Industrial and Applied Mathematics },
  title        = {{Counting blanks in polygonal arrangements}},
  doi          = {10.1137/16M110407X},
  volume       = {32},
  year         = {2018},
}

@article{5816,
  abstract     = {Solid-state qubit manipulation and read-out fidelities are reaching fault-tolerance, but quantum error correction requires millions of physical qubits and therefore a scalable quantum computer architecture. To solve signal-line bandwidth and fan-out problems, microwave sources required for qubit manipulation might be embedded close to the qubit chip, typically operating at temperatures below 4 K. Here, we perform the first low temperature measurements of a 130 nm BiCMOS based SiGe voltage controlled oscillator at cryogenic temperature. We determined the frequency and output power dependence on temperature and magnetic field up to 5 T and measured the temperature influence on its noise performance. The device maintains its full functionality from 300 K to 4 K. The carrier frequency at 4 K increases by 3% with respect to the carrier frequency at 300 K, and the output power at 4 K increases by 10 dB relative to the output power at 300 K. The frequency tuning range of approximately 20% remains unchanged between 300 K and 4 K. In an in-plane magnetic field of 5 T, the carrier frequency shifts by only 0.02% compared to the frequency at zero magnetic field.},
  author       = {Hollmann, Arne and Jirovec, Daniel and Kucharski, Maciej and Kissinger, Dietmar and Fischer, Gunter and Schreiber, Lars R.},
  issn         = {00346748},
  journal      = {Review of Scientific Instruments},
  number       = {11},
  publisher    = {AIP Publishing},
  title        = {{30 GHz-voltage controlled oscillator operating at 4 K}},
  doi          = {10.1063/1.5038258},
  volume       = {89},
  year         = {2018},
}

@article{5830,
  abstract     = {CLE peptides have been implicated in various developmental processes of plants and mediate their responses to environmental stimuli. However, the biological relevance of most CLE genes remains to be functionally characterized. Here, we report that CLE9, which is expressed in stomata, acts as an essential regulator in the induction of stomatal closure. Exogenous application of CLE9 peptides or overexpression of CLE9 effectively led to stomatal closure and enhanced drought tolerance, whereas CLE9 loss-of-function mutants were sensitivity to drought stress. CLE9-induced stomatal closure was impaired in abscisic acid (ABA)-deficient mutants, indicating that ABA is required for CLE9-medaited guard cell signalling. We further deciphered that two guard cell ABA-signalling components, OST1 and SLAC1, were responsible for CLE9-induced stomatal closure. MPK3 and MPK6 were activated by the CLE9 peptide, and CLE9 peptides failed to close stomata in mpk3 and mpk6 mutants. In addition, CLE9 peptides stimulated the induction of hydrogen peroxide (H2O2) and nitric oxide (NO) synthesis associated with stomatal closure, which was abolished in the NADPH oxidase-deficient mutants or nitric reductase mutants, respectively. Collectively, our results reveal a novel ABA-dependent function of CLE9 in the regulation of stomatal apertures, thereby suggesting a potential role of CLE9 in the stress acclimatization of plants.},
  author       = {Zhang, Luosha and Shi, Xiong and Zhang, Yutao and Wang, Jiajing and Yang, Jingwei and Ishida, Takashi and Jiang, Wenqian and Han, Xiangyu and Kang, Jingke and Wang, Xuening and Pan, Lixia and Lv, Shuo and Cao, Bing and Zhang, Yonghong and Wu, Jinbin and Han, Huibin and Hu, Zhubing and Cui, Langjun and Sawa, Shinichiro and He, Junmin and Wang, Guodong},
  issn         = {01407791},
  journal      = {Plant Cell and Environment},
  publisher    = {Wiley},
  title        = {{CLE9 peptide-induced stomatal closure is mediated by abscisic acid, hydrogen peroxide, and nitric oxide in arabidopsis thaliana}},
  doi          = {10.1111/pce.13475},
  year         = {2018},
}

@article{5858,
  abstract     = {Spatial patterns are ubiquitous on the subcellular, cellular and tissue level, and can be studied using imaging techniques such as light and fluorescence microscopy. Imaging data provide quantitative information about biological systems; however, mechanisms causing spatial patterning often remain elusive. In recent years, spatio-temporal mathematical modelling has helped to overcome this problem. Yet, outliers and structured noise limit modelling of whole imaging data, and models often consider spatial summary statistics. Here, we introduce an integrated data-driven modelling approach that can cope with measurement artefacts and whole imaging data. Our approach combines mechanistic models of the biological processes with robust statistical models of the measurement process. The parameters of the integrated model are calibrated using a maximum-likelihood approach. We used this integrated modelling approach to study in vivo gradients of the chemokine (C-C motif) ligand 21 (CCL21). CCL21 gradients guide dendritic cells and are important in the adaptive immune response. Using artificial data, we verified that the integrated modelling approach provides reliable parameter estimates in the presence of measurement noise and that bias and variance of these estimates are reduced compared to conventional approaches. The application to experimental data allowed the parametrization and subsequent refinement of the model using additional mechanisms. Among other results, model-based hypothesis testing predicted lymphatic vessel-dependent concentration of heparan sulfate, the binding partner of CCL21. The selected model provided an accurate description of the experimental data and was partially validated using published data. Our findings demonstrate that integrated statistical modelling of whole imaging data is computationally feasible and can provide novel biological insights.},
  author       = {Hross, Sabrina and Theis, Fabian J. and Sixt, Michael K and Hasenauer, Jan},
  issn         = {17425689},
  journal      = {Journal of the Royal Society Interface},
  number       = {149},
  publisher    = {Royal Society Publishing},
  title        = {{Mechanistic description of spatial processes using integrative modelling of noise-corrupted imaging data}},
  doi          = {10.1098/rsif.2018.0600},
  volume       = {15},
  year         = {2018},
}

@article{5859,
  abstract     = {The emergence of syntax during childhood is a remarkable example of how complex correlations unfold in nonlinear ways through development. In particular, rapid transitions seem to occur as children reach the age of two, which seems to separate a two-word, tree-like network of syntactic relations among words from the scale-free graphs associated with the adult, complex grammar. Here, we explore the evolution of syntax networks through language acquisition using the chromatic number, which captures the transition and provides a natural link to standard theories on syntactic structures. The data analysis is compared to a null model of network growth dynamics which is shown to display non-trivial and sensible differences. At a more general level, we observe that the chromatic classes define independent regions of the graph, and thus, can be interpreted as the footprints of incompatibility relations, somewhat as opposed to modularity considerations.},
  author       = {Corominas-Murtra, Bernat and Fibla, Martí Sànchez and Valverde, Sergi and Solé, Ricard},
  issn         = {2054-5703},
  journal      = {Royal Society Open Science},
  number       = {12},
  publisher    = {The Royal Society},
  title        = {{Chromatic transitions in the emergence of syntax networks}},
  doi          = {10.1098/rsos.181286},
  volume       = {5},
  year         = {2018},
}

@article{5860,
  abstract     = {A major problem for evolutionary theory is understanding the so-called open-ended nature of evolutionary change, from its definition to its origins. Open-ended evolution (OEE) refers to the unbounded increase in complexity that seems to characterize evolution on multiple scales. This property seems to be a characteristic feature of biological and technological evolution and is strongly tied to the generative potential associated with combinatorics, which allows the system to grow and expand their available state spaces. Interestingly, many complex systems presumably displaying OEE, from language to proteins, share a common statistical property: the presence of Zipf's Law. Given an inventory of basic items (such as words or protein domains) required to build more complex structures (sentences or proteins) Zipf's Law tells us that most of these elements are rare whereas a few of them are extremely common. Using algorithmic information theory, in this paper we provide a fundamental definition for open-endedness, which can be understood as postulates. Its statistical counterpart, based on standard Shannon information theory, has the structure of a variational problem which is shown to lead to Zipf's Law as the expected consequence of an evolutionary process displaying OEE. We further explore the problem of information conservation through an OEE process and we conclude that statistical information (standard Shannon information) is not conserved, resulting in the paradoxical situation in which the increase of information content has the effect of erasing itself. We prove that this paradox is solved if we consider non-statistical forms of information. This last result implies that standard information theory may not be a suitable theoretical framework to explore the persistence and increase of the information content in OEE systems.},
  author       = {Corominas-Murtra, Bernat and Seoane, Luís F. and Solé, Ricard},
  issn         = {17425689},
  journal      = {Journal of the Royal Society Interface},
  number       = {149},
  publisher    = {Royal Society Publishing},
  title        = {{Zipf's Law, unbounded complexity and open-ended evolution}},
  doi          = {10.1098/rsif.2018.0395},
  volume       = {15},
  year         = {2018},
}

@article{5861,
  abstract     = {In zebrafish larvae, it is the cell type that determines how the cell responds to a chemokine signal.},
  author       = {Alanko, Jonna H and Sixt, Michael K},
  issn         = {2050084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{The cell sets the tone}},
  doi          = {10.7554/eLife.37888},
  volume       = {7},
  year         = {2018},
}

@article{5888,
  abstract     = {Despite the remarkable number of scientific breakthroughs of the last 100 years, the treatment of neurodevelopmental
disorders (e.g., autism spectrum disorder, intellectual disability) remains a great challenge. Recent advancements in
genomics, such as whole-exome or whole-genome sequencing, have enabled scientists to identify numerous
mutations underlying neurodevelopmental disorders. Given the few hundred risk genes that have been discovered,
the etiological variability and the heterogeneous clinical presentation, the need for genotype — along with phenotype-
based diagnosis of individual patients has become a requisite. In this review we look at recent advancements in
genomic analysis and their translation into clinical practice.},
  author       = {Tarlungeanu, Dora-Clara and Novarino, Gaia},
  issn         = {2092-6413},
  journal      = {Experimental & Molecular Medicine},
  number       = {8},
  publisher    = {Springer Nature},
  title        = {{Genomics in neurodevelopmental disorders: an avenue to personalized medicine}},
  doi          = {10.1038/s12276-018-0129-7},
  volume       = {50},
  year         = {2018},
}

@inbook{59,
  abstract     = {Graph-based games are an important tool in computer science. They have applications in synthesis, verification, refinement, and far beyond. We review graphbased games with objectives on infinite plays. We give definitions and algorithms to solve the games and to give a winning strategy. The objectives we consider are mostly Boolean, but we also look at quantitative graph-based games and their objectives. Synthesis aims to turn temporal logic specifications into correct reactive systems. We explain the reduction of synthesis to graph-based games (or equivalently tree automata) using synthesis of LTL specifications as an example. We treat the classical approach that uses determinization of parity automata and more modern approaches.},
  author       = {Bloem, Roderick and Chatterjee, Krishnendu and Jobstmann, Barbara},
  booktitle    = {Handbook of Model Checking},
  editor       = {Henzinger, Thomas A and Clarke, Edmund M. and Veith, Helmut and Bloem, Roderick},
  isbn         = {978-3-319-10574-1},
  pages        = {921 -- 962},
  publisher    = {Springer},
  title        = {{Graph games and reactive synthesis}},
  doi          = {10.1007/978-3-319-10575-8_27},
  year         = {2018},
}

@article{5914,
  abstract     = {With the advent of optogenetics, it became possible to change the activity of a targeted population of neurons in a temporally controlled manner. To combine the advantages of 60-channel in vivo tetrode recording and laser-based optogenetics, we have developed a closed-loop recording system that allows for the actual electrophysiological signal to be used as a trigger for the laser light mediating the optogenetic intervention. We have optimized the weight, size, and shape of the corresponding implant to make it compatible with the size, force, and movements of a behaving mouse, and we have shown that the system can efficiently block sharp wave ripple (SWR) events using those events themselves as a trigger. To demonstrate the full potential of the optogenetic recording system we present a pilot study addressing the contribution of SWR events to learning in a complex behavioral task.},
  author       = {Rangel Guerrero, Dámaris K and Donnett, James G. and Csicsvari, Jozsef L and Kovács, Krisztián},
  journal      = {eNeuro},
  number       = {4},
  publisher    = {Society of Neuroscience},
  title        = {{Tetrode recording from the hippocampus of behaving mice coupled with four-point-irradiation closed-loop optogenetics: A technique to study the contribution of Hippocampal SWR events to learning}},
  doi          = {10.1523/ENEURO.0087-18.2018},
  volume       = {5},
  year         = {2018},
}

@inproceedings{5959,
  abstract     = {Formalizing properties of systems with continuous dynamics is a challenging task. In this paper, we propose a formal framework for specifying and monitoring rich temporal properties of real-valued signals. We introduce signal first-order logic (SFO) as a specification language that combines first-order logic with linear-real arithmetic and unary function symbols interpreted as piecewise-linear signals. We first show that while the satisfiability problem for SFO is undecidable, its membership and monitoring problems are decidable. We develop an offline monitoring procedure for SFO that has polynomial complexity in the size of the input trace and the specification, for a fixed number of quantifiers and function symbols. We show that the algorithm has computation time linear in the size of the input trace for the important fragment of bounded-response specifications interpreted over input traces with finite variability. We can use our results to extend signal temporal logic with first-order quantifiers over time and value parameters, while preserving its efficient monitoring. We finally demonstrate the practical appeal of our logic through a case study in the micro-electronics domain.},
  author       = {Bakhirkin, Alexey and Ferrere, Thomas and Henzinger, Thomas A and Nickovicl, Deian},
  booktitle    = {2018 International Conference on Embedded Software},
  isbn         = {9781538655603},
  location     = {Turin, Italy},
  pages        = {1--10},
  publisher    = {IEEE},
  title        = {{Keynote: The first-order logic of signals}},
  doi          = {10.1109/emsoft.2018.8537203},
  year         = {2018},
}

@article{5960,
  abstract     = {In this paper we present a reliable method to verify the existence of loops along the uncertain trajectory of a robot, based on proprioceptive measurements only, within a bounded-error context. The loop closure detection is one of the key points in simultaneous localization and mapping (SLAM) methods, especially in homogeneous environments with difficult scenes recognitions. The proposed approach is generic and could be coupled with conventional SLAM algorithms to reliably reduce their computing burden, thus improving the localization and mapping processes in the most challenging environments such as unexplored underwater extents. To prove that a robot performed a loop whatever the uncertainties in its evolution, we employ the notion of topological degree that originates in the field of differential topology. We show that a verification tool based on the topological degree is an optimal method for proving robot loops. This is demonstrated both on datasets from real missions involving autonomous underwater vehicles and by a mathematical discussion.},
  author       = {Rohou, Simon and Franek, Peter and Aubry, Clément and Jaulin, Luc},
  issn         = {1741-3176},
  journal      = {The International Journal of Robotics Research},
  number       = {12},
  pages        = {1500--1516},
  publisher    = {SAGE Publications},
  title        = {{Proving the existence of loops in robot trajectories}},
  doi          = {10.1177/0278364918808367},
  volume       = {37},
  year         = {2018},
}

@inproceedings{5961,
  abstract     = {The area of machine learning has made considerable progress over the past decade, enabled by the widespread availability of large datasets, as well as by improved algorithms and models. Given the large computational demands of machine learning workloads, parallelism, implemented either through single-node concurrency or through multi-node distribution, has been a third key ingredient to advances in machine learning.
The goal of this tutorial is to provide the audience with an overview of standard distribution techniques in machine learning, with an eye towards the intriguing trade-offs between synchronization and communication costs of distributed machine learning algorithms, on the one hand, and their convergence, on the other.The tutorial will focus on parallelization strategies for the fundamental stochastic gradient descent (SGD) algorithm, which is a key tool when training machine learning models, from classical instances such as linear regression, to state-of-the-art neural network architectures.
The tutorial will describe the guarantees provided by this algorithm in the sequential case, and then move on to cover both shared-memory and message-passing parallelization strategies, together with the guarantees they provide, and corresponding trade-offs. The presentation will conclude with a broad overview of ongoing research in distributed and concurrent machine learning. The tutorial will assume no prior knowledge beyond familiarity with basic concepts in algebra and analysis.
},
  author       = {Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing  - PODC '18},
  isbn         = {9781450357951},
  location     = {Egham, United Kingdom},
  pages        = {487--488},
  publisher    = {ACM Press},
  title        = {{A brief tutorial on distributed and concurrent machine learning}},
  doi          = {10.1145/3212734.3212798},
  year         = {2018},
}

@inproceedings{5962,
  abstract     = {Stochastic Gradient Descent (SGD) is a fundamental algorithm in machine learning, representing the optimization backbone for training several classic models, from regression to neural networks. Given the recent practical focus on distributed machine learning, significant work has been dedicated to the convergence properties of this algorithm under the inconsistent and noisy updates arising from execution in a distributed environment. However, surprisingly, the convergence properties of this classic algorithm in the standard shared-memory model are still not well-understood. In this work, we address this gap, and provide new convergence bounds for lock-free concurrent stochastic gradient descent, executing in the classic asynchronous shared memory model, against a strong adaptive adversary. Our results give improved upper and lower bounds on the "price of asynchrony'' when executing the fundamental SGD algorithm in a concurrent setting. They show that this classic optimization tool can converge faster and with a wider range of parameters than previously known under asynchronous iterations. At the same time, we exhibit a fundamental trade-off between the maximum delay in the system and the rate at which SGD can converge, which governs the set of parameters under which this algorithm can still work efficiently.},
  author       = {Alistarh, Dan-Adrian and De Sa, Christopher and Konstantinov, Nikola H},
  booktitle    = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing  - PODC '18},
  isbn         = {9781450357951},
  location     = {Egham, United Kingdom},
  pages        = {169--178},
  publisher    = {ACM Press},
  title        = {{The convergence of stochastic gradient descent in asynchronous shared memory}},
  doi          = {10.1145/3212734.3212763},
  year         = {2018},
}

@inproceedings{5963,
  abstract     = {There has been significant progress in understanding the parallelism inherent to iterative sequential algorithms: for many classic algorithms, the depth of the dependence structure is now well understood, and scheduling techniques have been developed to exploit this shallow dependence structure for efficient parallel implementations. A related, applied research strand has studied methods by which certain iterative task-based algorithms can be efficiently parallelized via relaxed concurrent priority schedulers. These allow for high concurrency when inserting and removing tasks, at the cost of executing superfluous work due to the relaxed semantics of the scheduler. In this work, we take a step towards unifying these two research directions, by showing that there exists a family of relaxed priority schedulers that can efficiently and deterministically execute classic iterative algorithms such as greedy maximal independent set (MIS) and matching. Our primary result shows that, given a randomized scheduler with an expected relaxation factor of k in terms of the maximum allowed priority inversions on a task, and any graph on n vertices, the scheduler is able to execute greedy MIS with only an additive factor of \poly(k) expected additional iterations compared to an exact (but not scalable) scheduler. This counter-intuitive result demonstrates that the overhead of relaxation when computing MIS is not dependent on the input size or structure of the input graph. Experimental results show that this overhead can be clearly offset by the gain in performance due to the highly scalable scheduler. In sum, we present an efficient method to deterministically parallelize iterative sequential algorithms, with provable runtime guarantees in terms of the number of executed tasks to completion.},
  author       = {Alistarh, Dan-Adrian and Brown, Trevor A and Kopinsky, Justin and Nadiradze, Giorgi},
  booktitle    = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing  - PODC '18},
  isbn         = {9781450357951},
  location     = {Egham, United Kingdom},
  pages        = {377--386},
  publisher    = {ACM Press},
  title        = {{Relaxed schedulers can efficiently parallelize iterative algorithms}},
  doi          = {10.1145/3212734.3212756},
  year         = {2018},
}

@inproceedings{5964,
  abstract     = {A standard design pattern found in many concurrent data structures, such as hash tables or ordered containers, is an alternation of parallelizable sections that incur no data conflicts and critical sections that must run sequentially and are protected with locks. A lock can be viewed as a queue that arbitrates the order in which the critical sections are executed, and a natural question is whether we can use stochastic analysis to predict the resulting throughput. As a preliminary evidence to the affirmative, we describe a simple model that can be used to predict the throughput of coarse-grained lock-based algorithms. We show that our model works well for CLH lock, and we expect it to work for other popular lock designs such as TTAS, MCS, etc.},
  author       = {Aksenov, Vitaly and Alistarh, Dan-Adrian and Kuznetsov, Petr},
  booktitle    = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing  - PODC '18},
  isbn         = {9781450357951},
  location     = {Egham, United Kingdom},
  pages        = {411--413},
  publisher    = {ACM Press},
  title        = {{Brief Announcement: Performance prediction for coarse-grained locking}},
  doi          = {10.1145/3212734.3212785},
  year         = {2018},
}

@inproceedings{5965,
  abstract     = {Relaxed concurrent data structures have become increasingly popular, due to their scalability in graph processing and machine learning applications (\citeNguyen13, gonzalez2012powergraph ). Despite considerable interest, there exist families of natural, high performing randomized relaxed concurrent data structures, such as the popular MultiQueue~\citeMQ pattern for implementing relaxed priority queue data structures, for which no guarantees are known in the concurrent setting~\citeAKLN17. Our main contribution is in showing for the first time that, under a set of analytic assumptions, a family of relaxed concurrent data structures, including variants of MultiQueues, but also a new approximate counting algorithm we call the MultiCounter, provides strong probabilistic guarantees on the degree of relaxation with respect to the sequential specification, in arbitrary concurrent executions. We formalize these guarantees via a new correctness condition called distributional linearizability, tailored to concurrent implementations with randomized relaxations. Our result is based on a new analysis of an asynchronous variant of the classic power-of-two-choices load balancing algorithm, in which placement choices can be based on inconsistent, outdated information (this result may be of independent interest). We validate our results empirically, showing that the MultiCounter algorithm can implement scalable relaxed timestamps.},
  author       = {Alistarh, Dan-Adrian and Brown, Trevor A and Kopinsky, Justin and Li, Jerry Z. and Nadiradze, Giorgi},
  booktitle    = {Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures  - SPAA '18},
  isbn         = {9781450357999},
  location     = {Vienna, Austria},
  pages        = {133--142},
  publisher    = {ACM Press},
  title        = {{Distributionally linearizable data structures}},
  doi          = {10.1145/3210377.3210411},
  year         = {2018},
}

@inproceedings{5966,
  abstract     = {The transactional conflict problem arises in transactional systems whenever two or more concurrent transactions clash on a data item. While the standard solution to such conflicts is to immediately abort one of the transactions, some practical systems consider the alternative of delaying conflict resolution for a short interval, which may allow one of the transactions to commit. The challenge in the transactional conflict problem is to choose the optimal length of this delay interval so as to minimize the overall running time penalty for the conflicting transactions. In this paper, we propose a family of optimal online algorithms for the transactional conflict problem. Specifically, we consider variants of this problem which arise in different implementations of transactional systems, namely "requestor wins'' and "requestor aborts'' implementations: in the former, the recipient of a coherence request is aborted, whereas in the latter, it is the requestor which has to abort. Both strategies are implemented by real systems. We show that the requestor aborts case can be reduced to a classic instance of the ski rental problem, while the requestor wins case leads to a new version of this classical problem, for which we derive optimal deterministic and randomized algorithms. Moreover, we prove that, under a simplified adversarial model, our algorithms are constant-competitive with the offline optimum in terms of throughput. We validate our algorithmic results empirically through a hardware simulation of hardware transactional memory (HTM), showing that our algorithms can lead to non-trivial performance improvements for classic concurrent data structures.},
  author       = {Alistarh, Dan-Adrian and Haider, Syed Kamran and Kübler, Raphael and Nadiradze, Giorgi},
  booktitle    = {Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures  - SPAA '18},
  isbn         = {9781450357999},
  location     = {Vienna, Austria},
  pages        = {383--392},
  publisher    = {ACM Press},
  title        = {{The transactional conflict problem}},
  doi          = {10.1145/3210377.3210406},
  year         = {2018},
}

@inproceedings{5967,
  abstract     = {The Big Match is a multi-stage two-player game. In each stage Player 1 hides one or two pebbles in his hand, and his opponent has to guess that number; Player 1 loses a point if Player 2 is correct, and otherwise he wins a point. As soon as Player 1 hides one pebble, the players cannot change their choices in any future stage.
Blackwell and Ferguson (1968) give an ε-optimal strategy for Player 1 that hides, in each stage, one pebble with a probability that depends on the entire past history. Any strategy that depends just on the clock or on a finite memory is worthless. The long-standing natural open problem has been whether every strategy that depends just on the clock and a finite memory is worthless. We prove that there is such a strategy that is ε-optimal. In fact, we show that just two states of memory are sufficient.
},
  author       = {Hansen, Kristoffer Arnsfelt and Ibsen-Jensen, Rasmus and Neyman, Abraham},
  booktitle    = {Proceedings of the 2018 ACM Conference on Economics and Computation  - EC '18},
  isbn         = {9781450358293},
  location     = {Ithaca, NY, United States},
  pages        = {149--150},
  publisher    = {ACM Press},
  title        = {{The Big Match with a clock and a bit of memory}},
  doi          = {10.1145/3219166.3219198},
  year         = {2018},
}

@article{5971,
  abstract     = {We consider a Wigner-type ensemble, i.e. large hermitian N×N random matrices H=H∗ with centered independent entries and with a general matrix of variances Sxy=𝔼∣∣Hxy∣∣2. The norm of H is asymptotically given by the maximum of the support of the self-consistent density of states. We establish a bound on this maximum in terms of norms of powers of S that substantially improves the earlier bound 2∥S∥1/2∞ given in [O. Ajanki, L. Erdős and T. Krüger, Universality for general Wigner-type matrices, Prob. Theor. Rel. Fields169 (2017) 667–727]. The key element of the proof is an effective Markov chain approximation for the contributions of the weighted Dyck paths appearing in the iterative solution of the corresponding Dyson equation.},
  author       = {Erdös, László and Mühlbacher, Peter},
  issn         = {2010-3271},
  journal      = {Random matrices: Theory and applications},
  publisher    = {World Scientific Publishing},
  title        = {{Bounds on the norm of Wigner-type random matrices}},
  doi          = {10.1142/s2010326319500096},
  year         = {2018},
}

