@article{1655,
  abstract     = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.},
  author       = {Martius, Georg S and Olbrich, Eckehard},
  journal      = {Entropy},
  number       = {10},
  pages        = {7266 -- 7297},
  publisher    = {MDPI},
  title        = {{Quantifying emergent behavior of autonomous robots}},
  doi          = {10.3390/e17107266},
  volume       = {17},
  year         = {2015},
}

@inproceedings{1656,
  abstract     = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
  booktitle    = {Proceedings - Symposium on Logic in Computer Science},
  location     = {Kyoto, Japan},
  publisher    = {IEEE},
  title        = {{Nested weighted automata}},
  doi          = {10.1109/LICS.2015.72},
  volume       = {2015-July},
  year         = {2015},
}

@inproceedings{1657,
  abstract     = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) ~the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) ~the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., Ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems, which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. },
  author       = {Chatterjee, Krishnendu and Komárková, Zuzana and Kretinsky, Jan},
  location     = {Kyoto, Japan},
  pages        = {244 -- 256},
  publisher    = {IEEE},
  title        = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
  doi          = {10.1109/LICS.2015.32},
  year         = {2015},
}

@inproceedings{1658,
  abstract     = {Continuous-time Markov chain (CTMC) models have become a central tool for understanding the dynamics of complex reaction networks and the importance of stochasticity in the underlying biochemical processes. When such models are employed to answer questions in applications, in order to ensure that the model provides a sufficiently accurate representation of the real system, it is of vital importance that the model parameters are inferred from real measured data. This, however, is often a formidable task and all of the existing methods fail in one case or the other, usually because the underlying CTMC model is high-dimensional and computationally difficult to analyze. The parameter inference methods that tend to scale best in the dimension of the CTMC are based on so-called moment closure approximations. However, there exists a large number of different moment closure approximations and it is typically hard to say a priori which of the approximations is the most suitable for the inference procedure. Here, we propose a moment-based parameter inference method that automatically chooses the most appropriate moment closure method. Accordingly, contrary to existing methods, the user is not required to be experienced in moment closure techniques. In addition to that, our method adaptively changes the approximation during the parameter inference to ensure that always the best approximation is used, even in cases where different approximations are best in different regions of the parameter space.},
  author       = {Bogomolov, Sergiy and Henzinger, Thomas A and Podelski, Andreas and Ruess, Jakob and Schilling, Christian},
  location     = {Nantes, France},
  pages        = {77 -- 89},
  publisher    = {Springer},
  title        = {{Adaptive moment closure for parameter inference of biochemical reaction networks}},
  doi          = {10.1007/978-3-319-23401-4_8},
  volume       = {9308},
  year         = {2015},
}

@inproceedings{1659,
  abstract     = {The target discounted-sum problem is the following: Given a rational discount factor 0 &lt; λ &lt; 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata.},
  author       = {Boker, Udi and Henzinger, Thomas A and Otop, Jan},
  booktitle    = {LICS},
  issn         = {1043-6871 },
  location     = {Kyoto, Japan},
  pages        = {750 -- 761},
  publisher    = {IEEE},
  title        = {{The target discounted-sum problem}},
  doi          = {10.1109/LICS.2015.74},
  year         = {2015},
}

@inproceedings{1660,
  abstract     = {We study the pattern frequency vector for runs in probabilistic Vector Addition Systems with States (pVASS). Intuitively, each configuration of a given pVASS is assigned one of finitely many patterns, and every run can thus be seen as an infinite sequence of these patterns. The pattern frequency vector assigns to each run the limit of pattern frequencies computed for longer and longer prefixes of the run. If the limit does not exist, then the vector is undefined. We show that for one-counter pVASS, the pattern frequency vector is defined and takes one of finitely many values for almost all runs. Further, these values and their associated probabilities can be approximated up to an arbitrarily small relative error in polynomial time. For stable two-counter pVASS, we show the same result, but we do not provide any upper complexity bound. As a byproduct of our study, we discover counterexamples falsifying some classical results about stochastic Petri nets published in the 80s.},
  author       = {Brázdil, Tomáš and Kiefer, Stefan and Kučera, Antonín and Novotny, Petr},
  location     = {Kyoto, Japan},
  pages        = {44 -- 55},
  publisher    = {IEEE},
  title        = {{Long-run average behaviour of probabilistic vector addition systems}},
  doi          = {10.1109/LICS.2015.15},
  year         = {2015},
}

@inproceedings{1661,
  abstract     = {The computation of the winning set for one-pair Streett objectives and for k-pair Streett objectives in (standard) graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formed ness of specifications, and the synthesis of reactive systems. We give faster algorithms for the computation of the winning set for (1) one-pair Streett objectives (aka parity-3 problem) in game graphs and (2) for k-pair Streett objectives in graphs. For both problems this represents the first improvement in asymptotic running time in 15 years.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Loitzenbauer, Veronika},
  booktitle    = {Proceedings - Symposium on Logic in Computer Science},
  location     = {Kyoto, Japan},
  publisher    = {IEEE},
  title        = {{Improved algorithms for one-pair and k-pair Streett objectives}},
  doi          = {10.1109/LICS.2015.34},
  volume       = {2015-July},
  year         = {2015},
}

@article{1663,
  abstract     = {CREB-binding protein (CBP) and p300 are transcriptional coactivators involved in numerous biological processes that affect cell growth, transformation, differentiation, and development. In this study, we provide evidence of the involvement of homeodomain-interacting protein kinase 2 (HIPK2) in the regulation of CBP activity. We show that HIPK2 interacts with and phosphorylates several regions of CBP. We demonstrate that serines 2361, 2363, 2371, 2376, and 2381 are responsible for the HIPK2-induced mobility shift of CBP C-terminal activation domain. Moreover, we show that HIPK2 strongly potentiates the transcriptional activity of CBP. However, our data suggest that HIPK2 activates CBP mainly by counteracting the repressive action of cell cycle regulatory domain 1 (CRD1), located between amino acids 977 and 1076, independently of CBP phosphorylation. Our findings thus highlight a complex regulation of CBP activity by HIPK2, which might be relevant for the control of specific sets of target genes involved in cellular proliferation, differentiation and apoptosis.},
  author       = {Kovács, Krisztián and Steinmann, Myriam and Halfon, Olivier and Magistretti, Pierre and Cardinaux, Jean},
  journal      = {Cellular Signalling},
  number       = {11},
  pages        = {2252 -- 2260},
  publisher    = {Elsevier},
  title        = {{Complex regulation of CREB-binding protein by homeodomain-interacting protein kinase 2}},
  doi          = {10.1016/j.cellsig.2015.08.001},
  volume       = {27},
  year         = {2015},
}

@article{1664,
  abstract     = {Over a century of research into the origin of turbulence in wall-bounded shear flows has resulted in a puzzling picture in which turbulence appears in a variety of different states competing with laminar background flow. At moderate flow speeds, turbulence is confined to localized patches; it is only at higher speeds that the entire flow becomes turbulent. The origin of the different states encountered during this transition, the front dynamics of the turbulent regions and the transformation to full turbulence have yet to be explained. By combining experiments, theory and computer simulations, here we uncover a bifurcation scenario that explains the transformation to fully turbulent pipe flow and describe the front dynamics of the different states encountered in the process. Key to resolving this problem is the interpretation of the flow as a bistable system with nonlinear propagation (advection) of turbulent fronts. These findings bridge the gap between our understanding of the onset of turbulence and fully turbulent flows.},
  author       = {Barkley, Dwight and Song, Baofang and Vasudevan, Mukund and Lemoult, Grégoire M and Avila, Marc and Hof, Björn},
  journal      = {Nature},
  number       = {7574},
  pages        = {550 -- 553},
  publisher    = {Nature Publishing Group},
  title        = {{The rise of fully turbulent flow}},
  doi          = {10.1038/nature15701},
  volume       = {526},
  year         = {2015},
}

@article{1665,
  abstract     = {Which genetic alterations drive tumorigenesis and how they evolve over the course of disease and therapy are central questions in cancer biology. Here we identify 44 recurrently mutated genes and 11 recurrent somatic copy number variations through whole-exome sequencing of 538 chronic lymphocytic leukaemia (CLL) and matched germline DNA samples, 278 of which were collected in a prospective clinical trial. These include previously unrecognized putative cancer drivers (RPS15, IKZF3), and collectively identify RNA processing and export, MYC activity, and MAPK signalling as central pathways involved in CLL. Clonality analysis of this large data set further enabled reconstruction of temporal relationships between driver events. Direct comparison between matched pre-treatment and relapse samples from 59 patients demonstrated highly frequent clonal evolution. Thus, large sequencing data sets of clinically informative samples enable the discovery of novel genes associated with cancer, the network of relationships between the driver events, and their impact on disease relapse and clinical outcome.},
  author       = {Landau, Dan and Tausch, Eugen and Taylor Weiner, Amaro and Stewart, Chip and Reiter, Johannes and Bahlo, Jasmin and Kluth, Sandra and Božić, Ivana and Lawrence, Michael and Böttcher, Sebastian and Carter, Scott and Cibulskis, Kristian and Mertens, Daniel and Sougnez, Carrie and Rosenberg, Mara and Hess, Julian and Edelmann, Jennifer and Kless, Sabrina and Kneba, Michael and Ritgen, Matthias and Fink, Anna and Fischer, Kirsten and Gabriel, Stacey and Lander, Eric and Nowak, Martin and Döhner, Hartmut and Hallek, Michael and Neuberg, Donna and Getz, Gad and Stilgenbauer, Stephan and Wu, Catherine},
  journal      = {Nature},
  number       = {7574},
  pages        = {525 -- 530},
  publisher    = {Nature Publishing Group},
  title        = {{Mutations driving CLL and their evolution in progression and relapse}},
  doi          = {10.1038/nature15395},
  volume       = {526},
  year         = {2015},
}

@article{1666,
  abstract     = {Evolution of gene regulation is crucial for our understanding of the phenotypic differences between species, populations and individuals. Sequence-specific binding of transcription factors to the regulatory regions on the DNA is a key regulatory mechanism that determines gene expression and hence heritable phenotypic variation. We use a biophysical model for directional selection on gene expression to estimate the rates of gain and loss of transcription factor binding sites (TFBS) in finite populations under both point and insertion/deletion mutations. Our results show that these rates are typically slow for a single TFBS in an isolated DNA region, unless the selection is extremely strong. These rates decrease drastically with increasing TFBS length or increasingly specific protein-DNA interactions, making the evolution of sites longer than ∼ 10 bp unlikely on typical eukaryotic speciation timescales. Similarly, evolution converges to the stationary distribution of binding sequences very slowly, making the equilibrium assumption questionable. The availability of longer regulatory sequences in which multiple binding sites can evolve simultaneously, the presence of “pre-sites” or partially decayed old sites in the initial sequence, and biophysical cooperativity between transcription factors, can all facilitate gain of TFBS and reconcile theoretical calculations with timescales inferred from comparative genomics.},
  author       = {Tugrul, Murat and Paixao, Tiago and Barton, Nicholas H and Tkacik, Gasper},
  journal      = {PLoS Genetics},
  number       = {11},
  publisher    = {Public Library of Science},
  title        = {{Dynamics of transcription factor binding site evolution}},
  doi          = {10.1371/journal.pgen.1005639},
  volume       = {11},
  year         = {2015},
}

@inproceedings{1667,
  abstract     = {We consider parametric version of fixed-delay continuoustime Markov chains (or equivalently deterministic and stochastic Petri nets, DSPN) where fixed-delay transitions are specified by parameters, rather than concrete values. Our goal is to synthesize values of these parameters that, for a given cost function, minimise expected total cost incurred before reaching a given set of target states. We show that under mild assumptions, optimal values of parameters can be effectively approximated using translation to a Markov decision process (MDP) whose actions correspond to discretized values of these parameters. To this end we identify and overcome several interesting phenomena arising in systems with fixed delays.},
  author       = {Brázdil, Tomáš and Korenčiak, L'Uboš and Krčál, Jan and Novotny, Petr and Řehák, Vojtěch},
  location     = {Madrid, Spain},
  pages        = {141 -- 159},
  publisher    = {Springer},
  title        = {{Optimizing performance of continuous-time stochastic systems using timeout synthesis}},
  doi          = {10.1007/978-3-319-22264-6_10},
  volume       = {9259},
  year         = {2015},
}

@inproceedings{1668,
  abstract     = {We revisit the security (as a pseudorandom permutation) of cascading-based constructions for block-cipher key-length extension. Previous works typically considered the extreme case where the adversary is given the entire codebook of the construction, the only complexity measure being the number qe of queries to the underlying ideal block cipher, representing adversary’s secret-key-independent computation. Here, we initiate a systematic study of the more natural case of an adversary restricted to adaptively learning a number qc of plaintext/ciphertext pairs that is less than the entire codebook. For any such qc, we aim to determine the highest number of block-cipher queries qe the adversary can issue without being able to successfully distinguish the construction (under a secret key) from a random permutation.
More concretely, we show the following results for key-length extension schemes using a block cipher with n-bit blocks and κ-bit keys:
Plain cascades of length ℓ=2r+1 are secure whenever qcqre≪2r(κ+n), qc≪2κ and qe≪22κ. The bound for r=1 also applies to two-key triple encryption (as used within Triple DES).
The r-round XOR-cascade is secure as long as qcqre≪2r(κ+n), matching an attack by Gaži (CRYPTO 2013).
We fully characterize the security of Gaži and Tessaro’s two-call },
  author       = {Gazi, Peter and Lee, Jooyoung and Seurin, Yannick and Steinberger, John and Tessaro, Stefano},
  location     = {Istanbul, Turkey},
  pages        = {319 -- 341},
  publisher    = {Springer},
  title        = {{Relaxing full-codebook security: A refined analysis of key-length extension schemes}},
  doi          = {10.1007/978-3-662-48116-5_16},
  volume       = {9054},
  year         = {2015},
}

@inproceedings{1669,
  abstract     = {Computational notions of entropy (a.k.a. pseudoentropy) have found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The most important tools to argue about pseudoentropy are chain rules, which quantify by how much (in terms of quantity and quality) the pseudoentropy of a given random variable X decreases when conditioned on some other variable Z (think for example of X as a secret key and Z as information leaked by a side-channel). In this paper we give a very simple and modular proof of the chain rule for HILL pseudoentropy, improving best known parameters. Our version allows for increasing the acceptable length of leakage in applications up to a constant factor compared to the best previous bounds. As a contribution of independent interest, we provide a comprehensive study of all known versions of the chain rule, comparing their worst-case strength and limitations.},
  author       = {Pietrzak, Krzysztof Z and Skórski, Maciej},
  location     = {Guadalajara, Mexico},
  pages        = {81 -- 98},
  publisher    = {Springer},
  title        = {{The chain rule for HILL pseudoentropy, revisited}},
  doi          = {10.1007/978-3-319-22174-8_5},
  volume       = {9230},
  year         = {2015},
}

@inproceedings{1670,
  abstract     = {Planning in hybrid domains poses a special challenge due to the involved mixed discrete-continuous dynamics. A recent solving approach for such domains is based on applying model checking techniques on a translation of PDDL+ planning problems to hybrid automata. However, the proposed translation is limited because must behavior is only overapproximated, and hence, processes and events are not reflected exactly. In this paper, we present the theoretical foundation of an exact PDDL+ translation. We propose a schema to convert a hybrid automaton with must transitions into an equivalent hybrid automaton featuring only may transitions.},
  author       = {Bogomolov, Sergiy and Magazzeni, Daniele and Minopoli, Stefano and Wehrle, Martin},
  location     = {Jerusalem, Israel},
  pages        = {42 -- 46},
  publisher    = {AAAI Press},
  title        = {{PDDL+ planning with hybrid automata: Foundations of translating must behavior}},
  year         = {2015},
}

@inproceedings{1671,
  abstract     = {This paper studies the concrete security of PRFs and MACs obtained by keying hash functions based on the sponge paradigm. One such hash function is KECCAK, selected as NIST’s new SHA-3 standard. In contrast to other approaches like HMAC, the exact security of keyed sponges is not well understood. Indeed, recent security analyses delivered concrete security bounds which are far from existing attacks. This paper aims to close this gap. We prove (nearly) exact bounds on the concrete PRF security of keyed sponges using a random permutation. These bounds are tight for the most relevant ranges of parameters, i.e., for messages of length (roughly) l ≤ min{2n/4, 2r} blocks, where n is the state size and r is the desired output length; and for l ≤ q queries (to the construction or the underlying permutation). Moreover, we also improve standard-model bounds. As an intermediate step of independent interest, we prove tight bounds on the PRF security of the truncated CBC-MAC construction, which operates as plain CBC-MAC, but only returns a prefix of the output.},
  author       = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano},
  location     = {Santa Barbara, CA, United States},
  pages        = {368 -- 387},
  publisher    = {Springer},
  title        = {{The exact PRF security of truncation: Tight bounds for keyed sponges and truncated CBC}},
  doi          = {10.1007/978-3-662-47989-6_18},
  volume       = {9215},
  year         = {2015},
}

@inproceedings{1672,
  abstract     = {Composable notions of incoercibility aim to forbid a coercer from using anything beyond the coerced parties’ inputs and outputs to catch them when they try to deceive him. Existing definitions are restricted to weak coercion types, and/or are not universally composable. Furthermore, they often make too strong assumptions on the knowledge of coerced parties—e.g., they assume they known the identities and/or the strategies of other coerced parties, or those of corrupted parties— which makes them unsuitable for applications of incoercibility such as e-voting, where colluding adversarial parties may attempt to coerce honest voters, e.g., by offering them money for a promised vote, and use their own view to check that the voter keeps his end of the bargain. In this work we put forward the first universally composable notion of incoercible multi-party computation, which satisfies the above intuition and does not assume collusions among coerced parties or knowledge of the corrupted set. We define natural notions of UC incoercibility corresponding to standard coercion-types, i.e., receipt-freeness and resistance to full-active coercion. Importantly, our suggested notion has the unique property that it builds on top of the well studied UC framework by Canetti instead of modifying it. This guarantees backwards compatibility, and allows us to inherit results from the rich UC literature. We then present MPC protocols which realize our notions of UC incoercibility given access to an arguably minimal setup—namely honestly generate tamper-proof hardware performing a very simple cryptographic operation—e.g., a smart card. This is, to our knowledge, the first proposed construction of an MPC protocol (for more than two parties) that is incoercibly secure and universally composable, and therefore the first construction of a universally composable receipt-free e-voting protocol.},
  author       = {Alwen, Joel F and Ostrovsky, Rafail and Zhou, Hongsheng and Zikas, Vassilis},
  booktitle    = {Advances in Cryptology - CRYPTO 2015},
  isbn         = {978-3-662-47999-5},
  location     = {Santa Barbara, CA, United States},
  pages        = {763 -- 780},
  publisher    = {Springer},
  title        = {{Incoercible multi-party computation and universally composable receipt-free voting}},
  doi          = {10.1007/978-3-662-48000-7_37},
  volume       = {9216},
  year         = {2015},
}

@article{1673,
  abstract     = {When a new mutant arises in a population, there is a probability it outcompetes the residents and fixes. The structure of the population can affect this fixation probability. Suppressing population structures reduce the difference between two competing variants, while amplifying population structures enhance the difference. Suppressors are ubiquitous and easy to construct, but amplifiers for the large population limit are more elusive and only a few examples have been discovered. Whether or not a population structure is an amplifier of selection depends on the probability distribution for the placement of the invading mutant. First, we prove that there exist only bounded amplifiers for adversarial placement-that is, for arbitrary initial conditions. Next, we show that the Star population structure, which is known to amplify for mutants placed uniformly at random, does not amplify for mutants that arise through reproduction and are therefore placed proportional to the temperatures of the vertices. Finally, we construct population structures that amplify for all mutational events that arise through reproduction, uniformly at random, or through some combination of the two. },
  author       = {Adlam, Ben and Chatterjee, Krishnendu and Nowak, Martin},
  journal      = {Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences},
  number       = {2181},
  publisher    = {Royal Society of London},
  title        = {{Amplifiers of selection}},
  doi          = {10.1098/rspa.2015.0114},
  volume       = {471},
  year         = {2015},
}

@article{1674,
  abstract     = {We consider N × N random matrices of the form H = W + V where W is a real symmetric Wigner matrix and V a random or deterministic, real, diagonal matrix whose entries are independent of W. We assume subexponential decay for the matrix entries of W and we choose V so that the eigenvalues of W and V are typically of the same order. For a large class of diagonal matrices V, we show that the rescaled distribution of the extremal eigenvalues is given by the Tracy-Widom distribution F1 in the limit of large N. Our proofs also apply to the complex Hermitian setting, i.e. when W is a complex Hermitian Wigner matrix.},
  author       = {Lee, Jioon and Schnelli, Kevin},
  journal      = {Reviews in Mathematical Physics},
  number       = {8},
  publisher    = {World Scientific Publishing},
  title        = {{Edge universality for deformed Wigner matrices}},
  doi          = {10.1142/S0129055X1550018X},
  volume       = {27},
  year         = {2015},
}

@inproceedings{1675,
  abstract     = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).},
  author       = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z},
  location     = {Santa Barbara, CA, United States},
  pages        = {585 -- 605},
  publisher    = {Springer},
  title        = {{Proofs of space}},
  doi          = {10.1007/978-3-662-48000-7_29},
  volume       = {9216},
  year         = {2015},
}

