@inproceedings{635,
  abstract     = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.},
  author       = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {33 -- 62},
  publisher    = {Springer},
  title        = {{Scrypt is maximally memory hard}},
  doi          = {10.1007/978-3-319-56617-7_2},
  volume       = {10212},
  year         = {2017},
}

@inproceedings{636,
  abstract     = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.},
  author       = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan},
  editor       = {Abate, Alessandro and Geeraerts, Gilles},
  isbn         = {978-331965764-6},
  location     = {Berlin, Germany},
  pages        = {189 -- 206},
  publisher    = {Springer},
  title        = {{On the quantitative semantics of regular expressions over real-valued signals}},
  doi          = {10.1007/978-3-319-65765-3_11},
  volume       = {10419},
  year         = {2017},
}

@inproceedings{637,
  abstract     = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.},
  author       = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel},
  editor       = {Katz, Jonathan and Shacham, Hovav},
  isbn         = {978-331963687-0},
  location     = {Santa Barbara, CA, United States},
  pages        = {133 -- 163},
  publisher    = {Springer},
  title        = {{Be adaptive avoid overcommitting}},
  doi          = {10.1007/978-3-319-63688-7_5},
  volume       = {10401},
  year         = {2017},
}

@inproceedings{639,
  abstract     = {We study the problem of developing efficient approaches for proving worst-case bounds of non-deterministic recursive programs. Ranking functions are sound and complete for proving termination and worst-case bounds of non-recursive programs. First, we apply ranking functions to recursion, resulting in measure functions, and show that they provide a sound and complete approach to prove worst-case bounds of non-deterministic recursive programs. Our second contribution is the synthesis of measure functions in non-polynomial forms. We show that non-polynomial measure functions with logarithm and exponentiation can be synthesized through abstraction of logarithmic or exponentiation terms, Farkas’ Lemma, and Handelman’s Theorem using linear programming. While previous methods obtain worst-case polynomial bounds, our approach can synthesize bounds of the form O(n log n) as well as O(nr) where r is not an integer. We present experimental results to demonstrate that our approach can efficiently obtain worst-case bounds of classical recursive algorithms such as Merge-Sort, Closest-Pair, Karatsuba’s algorithm and Strassen’s algorithm.},
  author       = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963389-3},
  location     = {Heidelberg, Germany},
  pages        = {41 -- 63},
  publisher    = {Springer},
  title        = {{Non-polynomial worst case analysis of recursive programs}},
  doi          = {10.1007/978-3-319-63390-9_3},
  volume       = {10427},
  year         = {2017},
}

@inproceedings{640,
  abstract     = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {3 -- 32},
  publisher    = {Springer},
  title        = {{Depth-robust graphs and their cumulative memory complexity}},
  doi          = {10.1007/978-3-319-56617-7_1},
  volume       = {10212},
  year         = {2017},
}

@article{642,
  abstract     = {Cauchy problems with SPDEs on the whole space are localized to Cauchy problems on a ball of radius R. This localization reduces various kinds of spatial approximation schemes to finite dimensional problems. The error is shown to be exponentially small. As an application, a numerical scheme is presented which combines the localization and the space and time discretization, and thus is fully implementable.},
  author       = {Gerencser, Mate and Gyöngy, István},
  issn         = {00255718},
  journal      = {Mathematics of Computation},
  number       = {307},
  pages        = {2373 -- 2397},
  publisher    = {American Mathematical Society},
  title        = {{Localization errors in solving stochastic partial differential equations in the whole space}},
  doi          = {10.1090/mcom/3201},
  volume       = {86},
  year         = {2017},
}

@misc{6426,
  abstract     = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.},
  author       = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz},
  issn         = {2664-1690},
  pages        = {28},
  publisher    = {IST Austria},
  title        = {{Synchronizing the asynchronous}},
  doi          = {10.15479/AT:IST-2018-853-v2-2},
  year         = {2017},
}

@article{644,
  abstract     = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
  author       = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
  journal      = {SIAM Journal on Computing},
  number       = {3},
  pages        = {1087 -- 1110},
  publisher    = {SIAM},
  title        = {{The complexity of general-valued CSPs}},
  doi          = {10.1137/16M1091836},
  volume       = {46},
  year         = {2017},
}

@inproceedings{645,
  abstract     = {Markov decision processes (MDPs) are standard models for probabilistic systems with non-deterministic behaviours. Long-run average rewards provide a mathematically elegant formalism for expressing long term performance. Value iteration (VI) is one of the simplest and most efficient algorithmic approaches to MDPs with other properties, such as reachability objectives. Unfortunately, a naive extension of VI does not work for MDPs with long-run average rewards, as there is no known stopping criterion. In this work our contributions are threefold. (1) We refute a conjecture related to stopping criteria for MDPs with long-run average rewards. (2) We present two practical algorithms for MDPs with long-run average rewards based on VI. First, we show that a combination of applying VI locally for each maximal end-component (MEC) and VI for reachability objectives can provide approximation guarantees. Second, extending the above approach with a simulation-guided on-demand variant of VI, we present an anytime algorithm that is able to deal with very large models. (3) Finally, we present experimental results showing that our methods significantly outperform the standard approaches on several benchmarks.},
  author       = {Ashok, Pranav and Chatterjee, Krishnendu and Daca, Przemyslaw and Kretinsky, Jan and Meggendorfer, Tobias},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963386-2},
  location     = {Heidelberg, Germany},
  pages        = {201 -- 221},
  publisher    = {Springer},
  title        = {{Value iteration for long run average reward in markov decision processes}},
  doi          = {10.1007/978-3-319-63387-9_10},
  volume       = {10426},
  year         = {2017},
}

@inproceedings{646,
  abstract     = {We present a novel convex relaxation and a corresponding inference algorithm for the non-binary discrete tomography problem, that is, reconstructing discrete-valued images from few linear measurements. In contrast to state of the art approaches that split the problem into a continuous reconstruction problem for the linear measurement constraints and a discrete labeling problem to enforce discrete-valued reconstructions, we propose a joint formulation that addresses both problems simultaneously, resulting in a tighter convex relaxation. For this purpose a constrained graphical model is set up and evaluated using a novel relaxation optimized by dual decomposition. We evaluate our approach experimentally and show superior solutions both mathematically (tighter relaxation) and experimentally in comparison to previously proposed relaxations.},
  author       = {Kuske, Jan and Swoboda, Paul and Petra, Stefanie},
  editor       = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
  isbn         = {978-331958770-7},
  location     = {Kolding, Denmark},
  pages        = {235 -- 246},
  publisher    = {Springer},
  title        = {{A novel convex relaxation for non binary discrete tomography}},
  doi          = {10.1007/978-3-319-58771-4_19},
  volume       = {10302},
  year         = {2017},
}

@inproceedings{647,
  abstract     = {Despite researchers’ efforts in the last couple of decades, reachability analysis is still a challenging problem even for linear hybrid systems. Among the existing approaches, the most practical ones are mainly based on bounded-time reachable set over-approximations. For the purpose of unbounded-time analysis, one important strategy is to abstract the original system and find an invariant for the abstraction. In this paper, we propose an approach to constructing a new kind of abstraction called conic abstraction for affine hybrid systems, and to computing reachable sets based on this abstraction. The essential feature of a conic abstraction is that it partitions the state space of a system into a set of convex polyhedral cones which is derived from a uniform conic partition of the derivative space. Such a set of polyhedral cones is able to cut all trajectories of the system into almost straight segments so that every segment of a reach pipe in a polyhedral cone tends to be straight as well, and hence can be over-approximated tightly by polyhedra using similar techniques as HyTech or PHAVer. In particular, for diagonalizable affine systems, our approach can guarantee to find an invariant for unbounded reachable sets, which is beyond the capability of bounded-time reachability analysis tools. We implemented the approach in a tool and experiments on benchmarks show that our approach is more powerful than SpaceEx and PHAVer in dealing with diagonalizable systems.},
  author       = {Bogomolov, Sergiy and Giacobbe, Mirco and Henzinger, Thomas A and Kong, Hui},
  isbn         = {978-331965764-6},
  location     = {Berlin, Germany},
  pages        = {116 -- 132},
  publisher    = {Springer},
  title        = {{Conic abstractions for hybrid systems}},
  doi          = {10.1007/978-3-319-65765-3_7},
  volume       = {10419 },
  year         = {2017},
}

@inproceedings{648,
  abstract     = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) diﬀers from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.},
  author       = {Skórski, Maciej},
  editor       = {Jäger, Gerhard and Steila, Silvia},
  isbn         = {978-331955910-0},
  location     = {Bern, Switzerland},
  pages        = {600 -- 613},
  publisher    = {Springer},
  title        = {{On the complexity of breaking pseudoentropy}},
  doi          = {10.1007/978-3-319-55911-7_43},
  volume       = {10185},
  year         = {2017},
}

@inproceedings{650,
  abstract     = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.},
  author       = {Skórski, Maciej},
  editor       = {Jäger, Gerhard and Steila, Silvia},
  issn         = {03029743},
  location     = {Bern, Switzerland},
  pages        = {586 -- 599},
  publisher    = {Springer},
  title        = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}},
  doi          = {10.1007/978-3-319-55911-7_42},
  volume       = {10185},
  year         = {2017},
}

@inproceedings{6517,
  abstract     = {A (possibly degenerate) drawing of a graph G in the plane is approximable by an embedding if it can be turned into an embedding by an arbitrarily small perturbation. We show that testing, whether a drawing of a planar graph G in the plane is approximable by an embedding, can be carried out in polynomial time, if a desired embedding of G belongs to a fixed isotopy class, i.e., the rotation system (or equivalently the faces) of the embedding of G and the choice of outer face are fixed. In other words, we show that c-planarity with embedded pipes is tractable for graphs with fixed embeddings. To the best of our knowledge an analogous result was previously known essentially only when G is a cycle.},
  author       = {Fulek, Radoslav},
  location     = {Phuket, Thailand},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Embedding graphs into embedded graphs}},
  doi          = {10.4230/LIPICS.ISAAC.2017.34},
  volume       = {92},
  year         = {2017},
}

@inproceedings{6519,
  abstract     = {Graph games with omega-regular winning conditions provide a mathematical framework to analyze a wide range of problems in the analysis of reactive systems and programs (such as the synthesis of reactive systems, program repair, and the verification of branching time properties). Parity conditions are canonical forms to specify omega-regular winning conditions. Graph games with parity conditions are equivalent to mu-calculus model checking, and thus a very important algorithmic problem. Symbolic algorithms are of great significance because they provide scalable algorithms for the analysis of large finite-state systems, as well as algorithms for the analysis of infinite-state systems with finite quotient. A set-based symbolic algorithm uses the basic set operations and the one-step predecessor operators. We consider graph games with n vertices and parity conditions with c priorities (equivalently, a mu-calculus formula with c alternations of least and greatest fixed points). While many explicit algorithms exist for graph games with parity conditions, for set-based symbolic algorithms there are only two algorithms (notice that we use space to refer to the number of sets stored by a symbolic algorithm): (a) the basic algorithm that requires O(n^c) symbolic operations and linear space; and (b) an improved algorithm that requires O(n^{c/2+1}) symbolic operations but also O(n^{c/2+1}) space (i.e., exponential space). In this work we present two set-based symbolic algorithms for parity games: (a) our first algorithm requires O(n^{c/2+1}) symbolic operations and only requires linear space; and (b) developing on our first algorithm, we present an algorithm that requires O(n^{c/3+1}) symbolic operations and only linear space. We also present the first linear space set-based symbolic algorithm for parity games that requires at most a sub-exponential number of symbolic operations. },
  author       = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Loitzenbauer, Veronika},
  location     = {Stockholm, Sweden},
  publisher    = {Schloss Dagstuhl -Leibniz-Zentrum fuer Informatik},
  title        = {{Improved set-based symbolic algorithms for parity games}},
  doi          = {10.4230/LIPICS.CSL.2017.18},
  volume       = {82},
  year         = {2017},
}

@inproceedings{6526,
  abstract     = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.},
  author       = {Skórski, Maciej},
  booktitle    = {2017 IEEE International Symposium on Information Theory (ISIT)},
  isbn         = {9781509040964},
  location     = {Aachen, Germany},
  publisher    = {IEEE},
  title        = {{On the complexity of estimating Rènyi divergences}},
  doi          = {10.1109/isit.2017.8006529},
  year         = {2017},
}

@inproceedings{6527,
  abstract     = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs.

Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice.

In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we:
*Prove that their depth-robustness is asymptotically maximal.
*Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF.
*Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks. 
We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice.

Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT).
},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben},
  booktitle    = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security},
  isbn         = {9781450349468},
  location     = {Dallas, TX, USA},
  pages        = {1001--1017},
  publisher    = {ACM Press},
  title        = {{Practical graphs for optimal side-channel resistant memory-hard functions}},
  doi          = {10.1145/3133956.3134031},
  year         = {2017},
}

@article{653,
  abstract     = {The extent of heterogeneity among driver gene mutations present in naturally occurring metastases - that is, treatment-naive metastatic disease - is largely unknown. To address this issue, we carried out 60× whole-genome sequencing of 26 metastases from four patients with pancreatic cancer. We found that identical mutations in known driver genes were present in every metastatic lesion for each patient studied. Passenger gene mutations, which do not have known or predicted functional consequences, accounted for all intratumoral heterogeneity. Even with respect to these passenger mutations, our analysis suggests that the genetic similarity among the founding cells of metastases was higher than that expected for any two cells randomly taken from a normal tissue. The uniformity of known driver gene mutations among metastases in the same patient has critical and encouraging implications for the success of future targeted therapies in advanced-stage disease.},
  author       = {Makohon Moore, Alvin and Zhang, Ming and Reiter, Johannes and Božić, Ivana and Allen, Benjamin and Kundu, Deepanjan and Chatterjee, Krishnendu and Wong, Fay and Jiao, Yuchen and Kohutek, Zachary and Hong, Jungeui and Attiyeh, Marc and Javier, Breanna and Wood, Laura and Hruban, Ralph and Nowak, Martin and Papadopoulos, Nickolas and Kinzler, Kenneth and Vogelstein, Bert and Iacobuzio Donahue, Christine},
  issn         = {10614036},
  journal      = {Nature Genetics},
  number       = {3},
  pages        = {358 -- 366},
  publisher    = {Nature Publishing Group},
  title        = {{Limited heterogeneity of known driver gene mutations among the metastases of individual patients with pancreatic cancer}},
  doi          = {10.1038/ng.3764},
  volume       = {49},
  year         = {2017},
}

@article{654,
  abstract     = {In November 2016, developmental biologists, synthetic biologists and engineers gathered in Paris for a meeting called ‘Engineering the embryo’. The participants shared an interest in exploring how synthetic systems can reveal new principles of embryonic development, and how the in vitro manipulation and modeling of development using stem cells can be used to integrate ideas and expertise from physics, developmental biology and tissue engineering. As we review here, the conference pinpointed some of the challenges arising at the intersection of these fields, along with great enthusiasm for finding new approaches and collaborations.},
  author       = {Kicheva, Anna and Rivron, Nicolas},
  issn         = {09501991},
  journal      = {Development},
  number       = {5},
  pages        = {733 -- 736},
  publisher    = {Company of Biologists},
  title        = {{Creating to understand – developmental biology meets engineering in Paris}},
  doi          = {10.1242/dev.144915},
  volume       = {144},
  year         = {2017},
}

@article{655,
  abstract     = {The bacterial flagellum is a self-assembling nanomachine. The external flagellar filament, several times longer than a bacterial cell body, is made of a few tens of thousands subunits of a single protein: flagellin. A fundamental problem concerns the molecular mechanism of how the flagellum grows outside the cell, where no discernible energy source is available. Here, we monitored the dynamic assembly of individual flagella using in situ labelling and real-time immunostaining of elongating flagellar filaments. We report that the rate of flagellum growth, initially ~1,700 amino acids per second, decreases with length and that the previously proposed chain mechanism does not contribute to the filament elongation dynamics. Inhibition of the proton motive force-dependent export apparatus revealed a major contribution of substrate injection in driving filament elongation. The combination of experimental and mathematical evidence demonstrates that a simple, injection-diffusion mechanism controls bacterial flagella growth outside the cell.},
  author       = {Renault, Thibaud and Abraham, Anthony and Bergmiller, Tobias and Paradis, Guillaume and Rainville, Simon and Charpentier, Emmanuelle and Guet, Calin C and Tu, Yuhai and Namba, Keiichi and Keener, James and Minamino, Tohru and Erhardt, Marc},
  issn         = {2050084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{Bacterial flagella grow through an injection diffusion mechanism}},
  doi          = {10.7554/eLife.23136},
  volume       = {6},
  year         = {2017},
}

