@article{11581,
  abstract     = {Using wide-field narrow-band surveys, we provide a new measurement of the z = 6.6 Lymanα emitter (LAE) luminosity function (LF), which constraints the bright end for the first time. We use a combination of archival narrow-band NB921 data in UDS and new NB921 measurements in SA22 and COSMOS/UltraVISTA, all observed with the Subaru telescope, with a total area of ∼5 deg2. We exclude lower redshift interlopers by using broad-band optical and near-infrared photometry and also exclude three supernovae with data split over multiple epochs. Combining the UDS and COSMOS samples, we find no evolution of the bright end of the Lyα LF between z = 5.7 and 6.6, which is supported by spectroscopic follow-up, and conclude that sources with Himiko-like luminosity are not as rare as previously thought, with number densities of ∼1.5 × 10−5 Mpc−3. Combined with our wide-field SA22 measurements, our results indicate a non-Schechter-like bright end of the LF at z = 6.6 and a different evolution of observed faint and bright LAEs, overcoming cosmic variance. This differential evolution is also seen in the spectroscopic follow-up of UV-selected galaxies and is now also confirmed for LAEs, and we argue that it may be an effect of reionization. Using a toy model, we show that such differential evolution of the LF is expected, since brighter sources are able to ionize their surroundings earlier, such that Lyα photons are able to escape. Our targets are excellent candidates for detailed follow-up studies and provide the possibility to give a unique view on the earliest stages in the formation of galaxies and reionization process.},
  author       = {Matthee, Jorryt J and Sobral, David and Santos, Sérgio and Röttgering, Huub and Darvish, Behnam and Mobasher, Bahram},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics},
  number       = {1},
  pages        = {400--417},
  publisher    = {Oxford University Press},
  title        = {{Identification of the brightest Lyα emitters at z = 6.6: implications for the evolution of the luminosity function in the reionization era}},
  doi          = {10.1093/mnras/stv947},
  volume       = {451},
  year         = {2015},
}

@article{11668,
  abstract     = {We study multiple keyword sponsored search auctions with budgets. Each keyword has multiple ad slots with a click-through rate. The bidders have additive valuations, which are linear in the click-through rates, and budgets, which are restricting their overall payments. Additionally, the number of slots per keyword assigned to a bidder is bounded.

We show the following results: (1) We give the first mechanism for multiple keywords, where click-through rates differ among slots. Our mechanism is incentive compatible in expectation, individually rational in expectation, and Pareto optimal. (2) We study the combinatorial setting, where each bidder is only interested in a subset of the keywords. We give an incentive compatible, individually rational, Pareto-optimal, and deterministic mechanism for identical click-through rates. (3) We give an impossibility result for incentive compatible, individually rational, Pareto-optimal, and deterministic mechanisms for bidders with diminishing marginal valuations.},
  author       = {Colini-Baldeschi, Riccardo and Leonardi, Stefano and Henzinger, Monika H and Starnberger, Martin},
  issn         = {2167-8383},
  journal      = {ACM Transactions on Economics and Computation},
  keywords     = {Algorithms, Economics, Clinching ascending auction, auctions with budgets, Sponsored search auctions},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{On multiple keyword sponsored search auctions with budgets}},
  doi          = {10.1145/2818357},
  volume       = {4},
  year         = {2015},
}

@article{11669,
  abstract     = {We study individual rational, Pareto-optimal, and incentive compatible mechanisms for auctions with heterogeneous items and budget limits. We consider settings with multiunit demand and additive valuations. For single-dimensional valuations we prove a positive result for randomized mechanisms, and a negative result for deterministic mechanisms. While the positive result allows for private budgets, the negative result is for public budgets. For multidimensional valuations and public budgets we prove an impossibility result that applies to deterministic and randomized mechanisms. Taken together this shows the power of randomization in certain settings with heterogeneous items, but it also shows its limitations.},
  author       = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin},
  issn         = {2167-8383},
  journal      = {ACM Transactions on Economics and Computation},
  keywords     = {Algorithmic game theory, auction theory, Clinching auction, Pareto optimality, Budget limits},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{Auctions for heterogeneous items and budget limits}},
  doi          = {10.1145/2818351},
  volume       = {4},
  year         = {2015},
}

@article{11670,
  abstract     = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.},
  author       = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar},
  issn         = {2167-8383},
  journal      = {ACM Transactions on Economics and Computation},
  keywords     = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{An expressive mechanism for auctions on the web}},
  doi          = {10.1145/2716312},
  volume       = {4},
  year         = {2015},
}

@inproceedings{11773,
  abstract     = {Ad exchanges are an emerging platform for trading advertisement slots on the web with billions of dollars revenue per year. Every time a user visits a web page, the publisher of that web page can ask an ad exchange to auction off the ad slots on this page to determine which advertisements are shown at which price. Due to the high volume of traffic, ad networks typically act as mediators for individual advertisers at ad exchanges. If multiple advertisers in an ad network are interested in the ad slots of the same auction, the ad network might use a “local” auction to resell the obtained ad slots among its advertisers.

In this work we want to deepen the theoretical understanding of these new markets by analyzing them from the viewpoint of combinatorial auctions. Prior work studied mostly single-item auctions, while we allow the advertisers to express richer preferences over multiple items. We develop a game-theoretic model for the entanglement of the central auction at the ad exchange with the local auctions at the ad networks. We consider the incentives of all three involved parties and suggest a three-party competitive equilibrium, an extension of the Walrasian equilibrium that ensures envy-freeness for all participants. We show the existence of a three-party competitive equilibrium and a polynomial-time algorithm to find one for gross-substitute bidder valuations.},
  author       = {Ben-Zwi, Oren and Henzinger, Monika H and Loitzenbauer, Veronika},
  booktitle    = {11th International Conference on Web and Internet Economics},
  isbn         = {9783662489949},
  issn         = {0302-9743},
  location     = {Amsterdam, Netherlands},
  pages        = {104–117},
  publisher    = {Springer Nature},
  title        = {{Ad exchange: Envy-free auctions with mediators}},
  doi          = {10.1007/978-3-662-48995-6_8},
  volume       = {9470},
  year         = {2015},
}

@inproceedings{11774,
  abstract     = {Combinatorial auctions (CA) are a well-studied area in algorithmic mechanism design. However, contrary to the standard model, empirical studies suggest that a bidder’s valuation often does not depend solely on the goods assigned to him. For instance, in adwords auctions an advertiser might not want his ads to be displayed next to his competitors’ ads. In this paper, we propose and analyze several natural graph-theoretic models that incorporate such negative externalities, in which bidders form a directed conflict graph with maximum out-degree Δ. We design algorithms and truthful mechanisms for social welfare maximization that attain approximation ratios depending on Δ.

For CA, our results are twofold: (1) A lottery that eliminates conflicts by discarding bidders/items independent of the bids. It allows to apply any truthful 𝛼-approximation mechanism for conflict-free valuations and yields an 𝒪(𝛼Δ)-approximation mechanism. (2) For fractionally sub-additive valuations, we design a rounding algorithm via a novel combination of a semi-definite program and a linear program, resulting in a cone program; the approximation ratio is 𝒪((ΔloglogΔ)/logΔ). The ratios are almost optimal given existing hardness results.

For adwords auctions, we present several algorithms for the most relevant scenario when the number of items is small. In particular, we design a truthful mechanism with approximation ratio 𝑜(Δ) when the number of items is only logarithmic in the number of bidders.},
  author       = {Cheung, Yun Kuen and Henzinger, Monika H and Hoefer, Martin and Starnberger, Martin},
  booktitle    = {11th International Conference on Web and Internet Economics},
  isbn         = {9783662489949},
  issn         = {0302-9743},
  location     = {Amsterdam, Netherlands},
  pages        = {230–243},
  publisher    = {Springer Nature},
  title        = {{Combinatorial auctions with conflict-based externalities}},
  doi          = {10.1007/978-3-662-48995-6_17},
  volume       = {9470},
  year         = {2015},
}

@inproceedings{11785,
  abstract     = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.},
  author       = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  booktitle    = {42nd International Colloquium on Automata, Languages and Programming},
  isbn         = {9783662476710},
  issn         = {0302-9743},
  location     = {Kyoto, Japan},
  pages        = {725 -- 736},
  publisher    = {Springer Nature},
  title        = {{Improved algorithms for decremental single-source reachability on directed graphs}},
  doi          = {10.1007/978-3-662-47672-7_59},
  volume       = {9134},
  year         = {2015},
}

@inproceedings{11786,
  abstract     = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.},
  author       = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.},
  booktitle    = {42nd International Colloquium on Automata, Languages and Programming},
  isbn         = {9783662476710},
  issn         = {0302-9743},
  location     = {Kyoto, Japan},
  pages        = {206 -- 218},
  publisher    = {Springer Nature},
  title        = {{Design of dynamic algorithms via primal-dual method}},
  doi          = {10.1007/978-3-662-47672-7_17},
  volume       = {9134},
  year         = {2015},
}

@inproceedings{11787,
  abstract     = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.},
  author       = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika},
  booktitle    = {2nd International Colloquium on Automata, Languages and Programming},
  isbn         = {9783662476710},
  issn         = {0302-9743},
  location     = {Kyoto, Japan},
  pages        = {713 -- 724},
  publisher    = {Springer Nature},
  title        = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}},
  doi          = {10.1007/978-3-662-47672-7_58},
  volume       = {9134},
  year         = {2015},
}

@inproceedings{11788,
  abstract     = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.},
  author       = {Dvořák, Wolfgang and Henzinger, Monika H},
  booktitle    = {12th International Workshop of Approximation and Online Algorithms},
  issn         = {0302-9743},
  location     = {Wroclaw, Poland},
  pages        = {156–167},
  publisher    = {Springer Nature},
  title        = {{Online ad assignment with an ad exchange}},
  doi          = {10.1007/978-3-319-18263-6_14},
  volume       = {8952},
  year         = {2015},
}

@article{7765,
  abstract     = {We introduce a principle unique to disordered solids wherein the contribution of any bond to one global perturbation is uncorrelated with its contribution to another. Coupled with sufficient variability in the contributions of different bonds, this “independent bond-level response” paves the way for the design of real materials with unusual and exquisitely tuned properties. To illustrate this, we choose two global perturbations: compression and shear. By applying a bond removal procedure that is both simple and experimentally relevant to remove a very small fraction of bonds, we can drive disordered spring networks to both the incompressible and completely auxetic limits of mechanical behavior.},
  author       = {Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
  issn         = {0031-9007},
  journal      = {Physical Review Letters},
  number       = {22},
  publisher    = {American Physical Society},
  title        = {{The principle of independent bond-level response: Tuning by pruning to exploit disorder for global behavior}},
  doi          = {10.1103/physrevlett.114.225501},
  volume       = {114},
  year         = {2015},
}

@article{7766,
  abstract     = {We study the vibrational properties near a free surface of disordered spring networks derived from jammed sphere packings. In bulk systems, without surfaces, it is well understood that such systems have a plateau in the density of vibrational modes extending down to a frequency scale ω*. This frequency is controlled by ΔZ = 〈Z〉 − 2d, the difference between the average coordination of the spheres and twice the spatial dimension, d, of the system, which vanishes at the jamming transition. In the presence of a free surface we find that there is a density of disordered vibrational modes associated with the surface that extends far below ω*. The total number of these low-frequency surface modes is controlled by ΔZ, and the profile of their decay into the bulk has two characteristic length scales, which diverge as ΔZ−1/2 and ΔZ−1 as the jamming transition is approached.},
  author       = {Sussman, Daniel M. and Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
  issn         = {1744-683X},
  journal      = {Soft Matter},
  number       = {14},
  pages        = {2745--2751},
  publisher    = {Royal Society of Chemistry},
  title        = {{Disordered surface vibrations in jammed sphere packings}},
  doi          = {10.1039/c4sm02905d},
  volume       = {11},
  year         = {2015},
}

@article{7767,
  abstract     = {We present a model of soft active particles that leads to a rich array of collective behavior found also in dense biological swarms of bacteria and other unicellular organisms. Our model uses only local interactions, such as Vicsek-type nearest-neighbor alignment, short-range repulsion, and a local boundary term. Changing the relative strength of these interactions leads to migrating swarms, rotating swarms, and jammed swarms, as well as swarms that exhibit run-and-tumble motion, alternating between migration and either rotating or jammed states. Interestingly, although a migrating swarm moves slower than an individual particle, the diffusion constant can be up to three orders of magnitude larger, suggesting that collective motion can be highly advantageous, for example, when searching for food.},
  author       = {van Drongelen, Ruben and Pal, Anshuman and Goodrich, Carl Peter and Idema, Timon},
  issn         = {1539-3755},
  journal      = {Physical Review E},
  number       = {3},
  publisher    = {American Physical Society},
  title        = {{Collective dynamics of soft active particles}},
  doi          = {10.1103/physreve.91.032706},
  volume       = {91},
  year         = {2015},
}

@inproceedings{777,
  abstract     = {In many applications, the data is of rich structure that can be represented by a hypergraph, where the data items are represented by vertices and the associations among items are represented by hyperedges. Equivalently, we are given an input bipartite graph with two types of vertices: items, and associations (which we refer to as topics). We consider the problem of partitioning the set of items into a given number of components such that the maximum number of topics covered by a component is minimized. This is a clustering problem with various applications, e.g. partitioning of a set of information objects such as documents, images, and videos, and load balancing in the context of modern computation platforms.Inthis paper, we focus on the streaming computation model for this problem, in which items arrive online one at a time and each item must be assigned irrevocably to a component at its arrival time. Motivated by scalability requirements, we focus on the class of streaming computation algorithms with memory limited to be at most linear in the number of components. We show that a greedy assignment strategy is able to recover a hidden co-clustering of items under a natural set of recovery conditions. We also report results of an extensive empirical evaluation, which demonstrate that this greedy strategy yields superior performance when compared with alternative approaches.},
  author       = {Alistarh, Dan-Adrian and Iglesias, Jennifer and Vojnović, Milan},
  pages        = {1900 -- 1908},
  publisher    = {Neural Information Processing Systems},
  title        = {{Streaming min-max hypergraph partitioning}},
  volume       = {2015-January},
  year         = {2015},
}

@unpublished{7779,
  abstract     = {The fact that a disordered material is not constrained in its properties in
the same way as a crystal presents significant and yet largely untapped
potential for novel material design. However, unlike their crystalline
counterparts, disordered solids are not well understood. One of the primary
obstacles is the lack of a theoretical framework for thinking about disorder
and its relation to mechanical properties. To this end, we study an idealized
system of frictionless athermal soft spheres that, when compressed, undergoes a
jamming phase transition with diverging length scales and clean power-law
signatures. This critical point is the cornerstone of a much larger "jamming
scenario" that has the potential to provide the essential theoretical
foundation necessary for a unified understanding of the mechanics of disordered
solids. We begin by showing that jammed sphere packings have a valid linear
regime despite the presence of "contact nonlinearities." We then investigate
the critical nature of the transition, focusing on diverging length scales and
finite-size effects. Next, we argue that jamming plays the same role for
disordered solids as the perfect crystal plays for crystalline solids. Not only
can it be considered an idealized starting point for understanding disordered
materials, but it can even influence systems that have a relatively high amount
of crystalline order. The behavior of solids can thus be thought of as existing
on a spectrum, with the perfect crystal and the jamming transition at opposing
ends. Finally, we introduce a new principle wherein the contribution of an
individual bond to one global property is independent of its contribution to
another. This principle allows the different global responses of a disordered
system to be manipulated independently and provides a great deal of flexibility
in designing materials with unique, textured and tunable properties.},
  author       = {Goodrich, Carl Peter},
  booktitle    = {arXiv:1510.08820},
  pages        = {242},
  title        = {{Unearthing the anticrystal: Criticality in the linear response of  disordered solids}},
  year         = {2015},
}

@inproceedings{778,
  abstract     = {Several Hybrid Transactional Memory (HyTM) schemes have recently been proposed to complement the fast, but best-effort nature of Hardware Transactional Memory (HTM) with a slow, reliable software backup. However, the costs of providing concurrency between hardware and software transactions in HyTM are still not well understood. In this paper, we propose a general model for HyTM implementations, which captures the ability of hardware transactions to buffer memory accesses. The model allows us to formally quantify and analyze the amount of overhead (instrumentation) caused by the potential presence of software transactions.We prove that (1) it is impossible to build a strictly serializable HyTM implementation that has both uninstrumented reads and writes, even for very weak progress guarantees, and (2) the instrumentation cost incurred by a hardware transaction in any progressive opaque HyTM is linear in the size of the transaction’s data set.We further describe two implementations which exhibit optimal instrumentation costs for two different progress conditions. In sum, this paper proposes the first formal HyTM model and captures for the first time the trade-off between the degree of hardware-software TM concurrency and the amount of instrumentation overhead.},
  author       = {Alistarh, Dan-Adrian and Kopinsky, Justin and Kuznetsov, Petr and Ravi, Srivatsan and Shavit, Nir},
  pages        = {185 -- 199},
  publisher    = {Springer},
  title        = {{Inherent limitations of hybrid transactional memory}},
  doi          = {10.1007/978-3-662-48653-5_13},
  volume       = {9363},
  year         = {2015},
}

@inproceedings{779,
  abstract     = {The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU, are either prohibitively expensive or require significant programming expertise, to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated. In this paper, we take a new approach to concurrent memory reclamation: instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads. Initial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free.},
  author       = {Alistarh, Dan-Adrian and Matveev, Alexander and Leiserson, William and Shavit, Nir},
  pages        = {123 -- 132},
  publisher    = {ACM},
  title        = {{ThreadScan: Automatic and scalable memory reclamation}},
  doi          = {10.1145/2755573.2755600},
  volume       = {2015-June},
  year         = {2015},
}

@inproceedings{780,
  abstract     = {Population protocols are networks of finite-state agents, interacting randomly, and updating their states using simple rules. Despite their extreme simplicity, these systems have been shown to cooperatively perform complex computational tasks, such as simulating register machines to compute standard arithmetic functions. The election of a unique leader agent is a key requirement in such computational constructions. Yet, the fastest currently known population protocol for electing a leader only has linear convergence time, and it has recently been shown that no population protocol using a constant number of states per node may overcome this linear bound. In this paper, we give the first population protocol for leader election with polylogarithmic convergence time, using polylogarithmic memory states per node. The protocol structure is quite simple: each node has an associated value, and is either a leader (still in contention) or a minion (following some leader). A leader keeps incrementing its value and “defeats” other leaders in one-to-one interactions, and will drop from contention and become a minion if it meets a leader with higher value. Importantly, a leader also drops out if it meets a minion with higher absolute value. While these rules are quite simple, the proof that this algorithm achieves polylogarithmic convergence time is non-trivial. In particular, the argument combines careful use of concentration inequalities with anti-concentration bounds, showing that the leaders’ values become spread apart as the execution progresses, which in turn implies that straggling leaders get quickly eliminated. We complement our analysis with empirical results, showing that our protocol converges extremely fast, even for large network sizes.},
  author       = {Alistarh, Dan-Adrian and Gelashvili, Rati},
  pages        = {479 -- 491},
  publisher    = {Springer},
  title        = {{Polylogarithmic-time leader election in population protocols}},
  doi          = {10.1007/978-3-662-47666-6_38},
  volume       = {9135},
  year         = {2015},
}

@inproceedings{781,
  abstract     = {Population protocols, roughly defined as systems consisting of large numbers of simple identical agents, interacting at random and updating their state following simple rules, are an important research topic at the intersection of distributed computing and biology. One of the fundamental tasks that a population protocol may solve is majority: each node starts in one of two states; the goal is for all nodes to reach a correct consensus on which of the two states was initially the majority. Despite considerable research effort, known protocols for this problem are either exact but slow (taking linear parallel time to converge), or fast but approximate (with non-zero probability of error). In this paper, we show that this trade-off between preciasion and speed is not inherent. We present a new protocol called Average and Conquer (AVC) that solves majority ex-actly in expected parallel convergence time O(log n/(sε) + log n log s), where n is the number of nodes, εn is the initial node advantage of the majority state, and s = Ω(log n log log n) is the number of states the protocol employs. This shows that the majority problem can be solved exactly in time poly-logarithmic in n, provided that the memory per node is s = Ω(1/ε + lognlog1/ε). On the negative side, we establish a lower bound of Ω(1/ε) on the expected paraallel convergence time for the case of four memory states per node, and a lower bound of Ω(logn) parallel time for protocols using any number of memory states per node.per node, and a lower bound of (log n) parallel time for protocols using any number of memory states per node.},
  author       = {Alistarh, Dan-Adrian and Gelashvili, Rati and Vojnović, Milan},
  pages        = {47 -- 56},
  publisher    = {ACM},
  title        = {{Fast and exact majority in population protocols}},
  doi          = {10.1145/2767386.2767429},
  volume       = {2015-July},
  year         = {2015},
}

@inproceedings{782,
  abstract     = {In this work, we consider the following random process, mo- Tivated by the analysis of lock-free concurrent algorithms under high memory contention. In each round, a new scheduling step is allocated to one of n threads, according to a distribution p = (p1; p2; : : : ; pn), where thread i is scheduled with probability pi. When some thread first reaches a set threshold of executed steps, it registers a win, completing its current operation, and resets its step count to 1. At the same time, threads whose step count was close to the threshold also get reset because of the win, but to 0 steps, being penalized for almost winning. We are interested in two questions: how often does some thread complete an operation (system latency), and how often does a specific thread complete an operation (individual latency)? We provide asymptotically tight bounds for the system and individual latency of this general concurrency pattern, for arbitrary scheduling distributions p. Surprisingly, a sim- ple characterization exists: in expectation, the system will complete a new operation every Θ(1/p 2) steps, while thread i will complete a new operation every Θ(1/2=p i ) steps. The proof is interesting in its own right, as it requires a careful analysis of how the higher norms of the vector p inuence the thread step counts and latencies in this random process. Our result offers a simple connection between the scheduling distribution and the average performance of concurrent algorithms, which has several applications.},
  author       = {Alistarh, Dan-Adrian and Sauerwald, Thomas and Vojnović, Milan},
  pages        = {251 -- 260},
  publisher    = {ACM},
  title        = {{Lock-Free algorithms under stochastic schedulers}},
  doi          = {10.1145/2767386.2767430},
  volume       = {2015-July},
  year         = {2015},
}

