@inproceedings{605,
  abstract     = {Position based cryptography (PBC), proposed in the seminal work of Chandran, Goyal, Moriarty, and Ostrovsky (SIAM J. Computing, 2014), aims at constructing cryptographic schemes in which the identity of the user is his geographic position. Chandran et al. construct PBC schemes for secure positioning and position-based key agreement in the bounded-storage model (Maurer, J. Cryptology, 1992). Apart from bounded memory, their security proofs need a strong additional restriction on the power of the adversary: he cannot compute joint functions of his inputs. Removing this assumption is left as an open problem. We show that an answer to this question would resolve a long standing open problem in multiparty communication complexity: finding a function that is hard to compute with low communication complexity in the simultaneous message model, but easy to compute in the fully adaptive model. On a more positive side: we also show some implications in the other direction, i.e.: we prove that lower bounds on the communication complexity of certain multiparty problems imply existence of PBC primitives. Using this result we then show two attractive ways to “bypass” our hardness result: the first uses the random oracle model, the second weakens the locality requirement in the bounded-storage model to online computability. The random oracle construction is arguably one of the simplest proposed so far in this area. Our results indicate that constructing improved provably secure protocols for PBC requires a better understanding of multiparty communication complexity. This is yet another example where negative results in one area (in our case: lower bounds in multiparty communication complexity) can be used to construct secure cryptographic schemes.},
  author       = {Brody, Joshua and Dziembowski, Stefan and Faust, Sebastian and Pietrzak, Krzysztof Z},
  editor       = {Kalai, Yael and Reyzin, Leonid},
  isbn         = {978-331970499-9},
  location     = {Baltimore, MD, United States},
  pages        = {56 -- 81},
  publisher    = {Springer},
  title        = {{Position based cryptography and multiparty communication complexity}},
  doi          = {10.1007/978-3-319-70500-2_3},
  volume       = {10677},
  year         = {2017},
}

@inproceedings{609,
  abstract     = {Several cryptographic schemes and applications are based on functions that are both reasonably efficient to compute and moderately hard to invert, including client puzzles for Denial-of-Service protection, password protection via salted hashes, or recent proof-of-work blockchain systems. Despite their wide use, a definition of this concept has not yet been distilled and formalized explicitly. Instead, either the applications are proven directly based on the assumptions underlying the function, or some property of the function is proven, but the security of the application is argued only informally. The goal of this work is to provide a (universal) definition that decouples the efforts of designing new moderately hard functions and of building protocols based on them, serving as an interface between the two. On a technical level, beyond the mentioned definitions, we instantiate the model for four different notions of hardness. We extend the work of Alwen and Serbinenko (STOC 2015) by providing a general tool for proving security for the first notion of memory-hard functions that allows for provably secure applications. The tool allows us to recover all of the graph-theoretic techniques developed for proving security under the older, non-composable, notion of security used by Alwen and Serbinenko. As an application of our definition of moderately hard functions, we prove the security of two different schemes for proofs of effort (PoE). We also formalize and instantiate the concept of a non-interactive proof of effort (niPoE), in which the proof is not bound to a particular communication context but rather any bit-string chosen by the prover.},
  author       = {Alwen, Joel F and Tackmann, Björn},
  editor       = {Kalai, Yael and Reyzin, Leonid},
  isbn         = {978-331970499-9},
  location     = {Baltimore, MD, United States},
  pages        = {493 -- 526},
  publisher    = {Springer},
  title        = {{Moderately hard functions: Definition, instantiations, and applications}},
  doi          = {10.1007/978-3-319-70500-2_17},
  volume       = {10677},
  year         = {2017},
}

@article{6196,
  abstract     = {PMAC is a simple and parallel block-cipher mode of operation, which was introduced by Black and Rogaway at Eurocrypt 2002. If instantiated with a (pseudo)random permutation over n-bit strings, PMAC constitutes a provably secure variable input-length (pseudo)random function. For adversaries making q queries, each of length at most l (in n-bit blocks), and of total length σ ≤ ql, the original paper proves an upper bound on the distinguishing advantage of  Ο(σ2/2n), while the currently best bound is  Ο (qσ/2n).In this work we show that this bound is tight by giving an attack with advantage Ω (q2l/2n). In the PMAC construction one initially XORs a mask to every message block, where the mask for the ith block is computed as τi := γi·L, where L is a (secret) random value, and γi is the i-th codeword of the Gray code. Our attack applies more generally to any sequence of γi’s which contains a large coset of a subgroup of GF(2n). We then investigate if the security of PMAC can be further improved by using τi’s that are k-wise independent, for k > 1 (the original distribution is only 1-wise independent). We observe that the security of PMAC will not increase in general, even if the masks are chosen from a 2-wise independent distribution, and then prove that the security increases to O(q<2/2n), if the τi are 4-wise independent. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether 3-wise independence is already sufficient to get this level of security is left as an open problem.},
  author       = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
  issn         = {2519-173X},
  journal      = {IACR Transactions on Symmetric Cryptology},
  number       = {2},
  pages        = {145--161},
  publisher    = {Ruhr University Bochum},
  title        = {{The exact security of PMAC}},
  doi          = {10.13154/TOSC.V2016.I2.145-161},
  volume       = {2016},
  year         = {2017},
}

@inproceedings{635,
  abstract     = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.},
  author       = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {33 -- 62},
  publisher    = {Springer},
  title        = {{Scrypt is maximally memory hard}},
  doi          = {10.1007/978-3-319-56617-7_2},
  volume       = {10212},
  year         = {2017},
}

@inproceedings{637,
  abstract     = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.},
  author       = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel},
  editor       = {Katz, Jonathan and Shacham, Hovav},
  isbn         = {978-331963687-0},
  location     = {Santa Barbara, CA, United States},
  pages        = {133 -- 163},
  publisher    = {Springer},
  title        = {{Be adaptive avoid overcommitting}},
  doi          = {10.1007/978-3-319-63688-7_5},
  volume       = {10401},
  year         = {2017},
}

@inproceedings{640,
  abstract     = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {3 -- 32},
  publisher    = {Springer},
  title        = {{Depth-robust graphs and their cumulative memory complexity}},
  doi          = {10.1007/978-3-319-56617-7_1},
  volume       = {10212},
  year         = {2017},
}

@inproceedings{648,
  abstract     = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) diﬀers from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.},
  author       = {Skórski, Maciej},
  editor       = {Jäger, Gerhard and Steila, Silvia},
  isbn         = {978-331955910-0},
  location     = {Bern, Switzerland},
  pages        = {600 -- 613},
  publisher    = {Springer},
  title        = {{On the complexity of breaking pseudoentropy}},
  doi          = {10.1007/978-3-319-55911-7_43},
  volume       = {10185},
  year         = {2017},
}

@inproceedings{650,
  abstract     = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.},
  author       = {Skórski, Maciej},
  editor       = {Jäger, Gerhard and Steila, Silvia},
  issn         = {03029743},
  location     = {Bern, Switzerland},
  pages        = {586 -- 599},
  publisher    = {Springer},
  title        = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}},
  doi          = {10.1007/978-3-319-55911-7_42},
  volume       = {10185},
  year         = {2017},
}

@inproceedings{6526,
  abstract     = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.},
  author       = {Skórski, Maciej},
  booktitle    = {2017 IEEE International Symposium on Information Theory (ISIT)},
  isbn         = {9781509040964},
  location     = {Aachen, Germany},
  publisher    = {IEEE},
  title        = {{On the complexity of estimating Rènyi divergences}},
  doi          = {10.1109/isit.2017.8006529},
  year         = {2017},
}

@inproceedings{6527,
  abstract     = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs.

Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice.

In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we:
*Prove that their depth-robustness is asymptotically maximal.
*Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF.
*Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks. 
We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice.

Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT).
},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben},
  booktitle    = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security},
  isbn         = {9781450349468},
  location     = {Dallas, TX, USA},
  pages        = {1001--1017},
  publisher    = {ACM Press},
  title        = {{Practical graphs for optimal side-channel resistant memory-hard functions}},
  doi          = {10.1145/3133956.3134031},
  year         = {2017},
}

@article{1187,
  abstract     = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
  journal      = {Journal of Cryptology},
  number       = {4},
  pages        = {1238 -- 1275},
  publisher    = {Springer},
  title        = {{Efficient authentication from hard learning problems}},
  doi          = {10.1007/s00145-016-9247-3},
  volume       = {30},
  year         = {2017},
}

@article{1177,
  abstract     = {Boldyreva, Palacio and Warinschi introduced a multiple forking game as an extension of general forking. The notion of (multiple) forking is a useful abstraction from the actual simulation of cryptographic scheme to the adversary in a security reduction, and is achieved through the intermediary of a so-called wrapper algorithm. Multiple forking has turned out to be a useful tool in the security argument of several cryptographic protocols. However, a reduction employing multiple forking incurs a significant degradation of (Formula presented.) , where (Formula presented.) denotes the upper bound on the underlying random oracle calls and (Formula presented.) , the number of forkings. In this work we take a closer look at the reasons for the degradation with a tighter security bound in mind. We nail down the exact set of conditions for success in the multiple forking game. A careful analysis of the cryptographic schemes and corresponding security reduction employing multiple forking leads to the formulation of ‘dependence’ and ‘independence’ conditions pertaining to the output of the wrapper in different rounds. Based on the (in)dependence conditions we propose a general framework of multiple forking and a General Multiple Forking Lemma. Leveraging (in)dependence to the full allows us to improve the degradation factor in the multiple forking game by a factor of (Formula presented.). By implication, the cost of a single forking involving two random oracles (augmented forking) matches that involving a single random oracle (elementary forking). Finally, we study the effect of these observations on the concrete security of existing schemes employing multiple forking. We conclude that by careful design of the protocol (and the wrapper in the security reduction) it is possible to harness our observations to the full extent.},
  author       = {Kamath Hosdurg, Chethan and Chatterjee, Sanjit},
  journal      = {Algorithmica},
  number       = {4},
  pages        = {1321 -- 1362},
  publisher    = {Springer},
  title        = {{A closer look at multiple-forking: Leveraging (in)dependence for a tighter bound}},
  doi          = {10.1007/s00453-015-9997-6},
  volume       = {74},
  year         = {2016},
}

@inproceedings{1179,
  abstract     = {Computational notions of entropy have recently found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The two main types of results which make computational notions so useful are (1) Chain rules, which quantify by how much the computational entropy of a variable decreases if conditioned on some other variable (2) Transformations, which quantify to which extend one type of entropy implies another.

Such chain rules and transformations typically lose a significant amount in quality of the entropy, and are the reason why applying these results one gets rather weak quantitative security bounds. In this paper we for the first time prove lower bounds in this context, showing that existing results for transformations are, unfortunately, basically optimal for non-adaptive black-box reductions (and it’s hard to imagine how non black-box reductions or adaptivity could be useful here.)

A variable X has k bits of HILL entropy of quality (ϵ,s)
if there exists a variable Y with k bits min-entropy which cannot be distinguished from X with advantage ϵ

by distinguishing circuits of size s. A weaker notion is Metric entropy, where we switch quantifiers, and only require that for every distinguisher of size s, such a Y exists.

We first describe our result concerning transformations. By definition, HILL implies Metric without any loss in quality. Metric entropy often comes up in applications, but must be transformed to HILL for meaningful security guarantees. The best known result states that if a variable X has k bits of Metric entropy of quality (ϵ,s)
, then it has k bits of HILL with quality (2ϵ,s⋅ϵ2). We show that this loss of a factor Ω(ϵ−2)

in circuit size is necessary. In fact, we show the stronger result that this loss is already necessary when transforming so called deterministic real valued Metric entropy to randomised boolean Metric (both these variants of Metric entropy are implied by HILL without loss in quality).

The chain rule for HILL entropy states that if X has k bits of HILL entropy of quality (ϵ,s)
, then for any variable Z of length m, X conditioned on Z has k−m bits of HILL entropy with quality (ϵ,s⋅ϵ2/2m). We show that a loss of Ω(2m/ϵ) in circuit size necessary here. Note that this still leaves a gap of ϵ between the known bound and our lower bound.},
  author       = {Pietrzak, Krzysztof Z and Maciej, Skorski},
  location     = {Beijing, China},
  pages        = {183 -- 203},
  publisher    = {Springer},
  title        = {{Pseudoentropy: Lower-bounds for chain rules and transformations}},
  doi          = {10.1007/978-3-662-53641-4_8},
  volume       = {9985},
  year         = {2016},
}

@inproceedings{1653,
  abstract     = {A somewhere statistically binding (SSB) hash, introduced by Hubáček and Wichs (ITCS ’15), can be used to hash a long string x to a short digest y = H hk (x) using a public hashing-key hk. Furthermore, there is a way to set up the hash key hk to make it statistically binding on some arbitrary hidden position i, meaning that: (1) the digest y completely determines the i’th bit (or symbol) of x so that all pre-images of y have the same value in the i’th position, (2) it is computationally infeasible to distinguish the position i on which hk is statistically binding from any other position i’. Lastly, the hash should have a local opening property analogous to Merkle-Tree hashing, meaning that given x and y = H hk (x) it should be possible to create a short proof π that certifies the value of the i’th bit (or symbol) of x without having to provide the entire input x. A similar primitive called a positional accumulator, introduced by Koppula, Lewko and Waters (STOC ’15) further supports dynamic updates of the hashed value. These tools, which are interesting in their own right, also serve as one of the main technical components in several recent works building advanced applications from indistinguishability obfuscation (iO).

The prior constructions of SSB hashing and positional accumulators required fully homomorphic encryption (FHE) and iO respectively. In this work, we give new constructions of these tools based on well studied number-theoretic assumptions such as DDH, Phi-Hiding and DCR, as well as a general construction from lossy/injective functions.},
  author       = {Okamoto, Tatsuaki and Pietrzak, Krzysztof Z and Waters, Brent and Wichs, Daniel},
  location     = {Auckland, New Zealand},
  pages        = {121 -- 145},
  publisher    = {Springer},
  title        = {{New realizations of somewhere statistically binding hashing and positional accumulators}},
  doi          = {10.1007/978-3-662-48797-6_6},
  volume       = {9452},
  year         = {2016},
}

@article{1479,
  abstract     = {Most entropy notions H(.) like Shannon or min-entropy satisfy a chain rule stating that for random variables X,Z, and A we have H(X|Z,A)≥H(X|Z)−|A|. That is, by conditioning on A the entropy of X can decrease by at most the bitlength |A| of A. Such chain rules are known to hold for some computational entropy notions like Yao’s and unpredictability-entropy. For HILL entropy, the computational analogue of min-entropy, the chain rule is of special interest and has found many applications, including leakage-resilient cryptography, deterministic encryption, and memory delegation. These applications rely on restricted special cases of the chain rule. Whether the chain rule for conditional HILL entropy holds in general was an open problem for which we give a strong negative answer: we construct joint distributions (X,Z,A), where A is a distribution over a single bit, such that the HILL entropy H HILL (X|Z) is large but H HILL (X|Z,A) is basically zero.

Our counterexample just makes the minimal assumption that NP⊈P/poly. Under the stronger assumption that injective one-way function exist, we can make all the distributions efficiently samplable.

Finally, we show that some more sophisticated cryptographic objects like lossy functions can be used to sample a distribution constituting a counterexample to the chain rule making only a single invocation to the underlying object.},
  author       = {Krenn, Stephan and Pietrzak, Krzysztof Z and Wadia, Akshay and Wichs, Daniel},
  journal      = {Computational Complexity},
  number       = {3},
  pages        = {567 -- 605},
  publisher    = {Springer},
  title        = {{A counterexample to the chain rule for conditional HILL entropy}},
  doi          = {10.1007/s00037-015-0120-9},
  volume       = {25},
  year         = {2016},
}

@article{1592,
  abstract     = {A modular approach to constructing cryptographic protocols leads to simple designs but often inefficient instantiations. On the other hand, ad hoc constructions may yield efficient protocols at the cost of losing conceptual simplicity. We suggest a new design paradigm, structure-preserving cryptography, that provides a way to construct modular protocols with reasonable efficiency while retaining conceptual simplicity. A cryptographic scheme over a bilinear group is called structure-preserving if its public inputs and outputs consist of elements from the bilinear groups and their consistency can be verified by evaluating pairing-product equations. As structure-preserving schemes smoothly interoperate with each other, they are useful as building blocks in modular design of cryptographic applications. This paper introduces structure-preserving commitment and signature schemes over bilinear groups with several desirable properties. The commitment schemes include homomorphic, trapdoor and length-reducing commitments to group elements, and the structure-preserving signature schemes are the first ones that yield constant-size signatures on multiple group elements. A structure-preserving signature scheme is called automorphic if the public keys lie in the message space, which cannot be achieved by compressing inputs via a cryptographic hash function, as this would destroy the mathematical structure we are trying to preserve. Automorphic signatures can be used for building certification chains underlying privacy-preserving protocols. Among a vast number of applications of structure-preserving protocols, we present an efficient round-optimal blind-signature scheme and a group signature scheme with an efficient and concurrently secure protocol for enrolling new members.},
  author       = {Abe, Masayuki and Fuchsbauer, Georg and Groth, Jens and Haralambiev, Kristiyan and Ohkubo, Miyako},
  journal      = {Journal of Cryptology},
  number       = {2},
  pages        = {363 -- 421},
  publisher    = {Springer},
  title        = {{Structure preserving signatures and commitments to group elements}},
  doi          = {10.1007/s00145-014-9196-7},
  volume       = {29},
  year         = {2016},
}

@inproceedings{1365,
  abstract     = {A memory-hard function (MHF) f is equipped with a space cost σ and time cost τ parameter such that repeatedly computing fσ,τ on an application specific integrated circuit (ASIC) is not economically advantageous relative to a general purpose computer. Technically we would like that any (generalized) circuit for evaluating an iMHF fσ,τ has area × time (AT) complexity at Θ(σ2 ∗ τ). A data-independent MHF (iMHF) has the added property that it can be computed with almost optimal memory and time complexity by an algorithm which accesses memory in a pattern independent of the input value. Such functions can be specified by fixing a directed acyclic graph (DAG) G on n = Θ(σ ∗ τ) nodes representing its computation graph. In this work we develop new tools for analyzing iMHFs. First we define and motivate a new complexity measure capturing the amount of energy (i.e. electricity) required to compute a function. We argue that, in practice, this measure is at least as important as the more traditional AT-complexity. Next we describe an algorithm A for repeatedly evaluating an iMHF based on an arbitrary DAG G. We upperbound both its energy and AT complexities per instance evaluated in terms of a certain combinatorial property of G. Next we instantiate our attack for several general classes of DAGs which include those underlying many of the most important iMHF candidates in the literature. In particular, we obtain the following results which hold for all choices of parameters σ and τ (and thread-count) such that n = σ ∗ τ. -The Catena-Dragonfly function of [FLW13] has AT and energy complexities O(n1.67). -The Catena-Butterfly function of [FLW13] has complexities is O(n1.67). -The Double-Buffer and the Linear functions of [CGBS16] both have complexities in O(n1.67). -The Argon2i function of [BDK15] (winner of the Password Hashing Competition [PHC]) has complexities O(n7/4 log(n)). -The Single-Buffer function of [CGBS16] has complexities O(n7/4 log(n)). -Any iMHF can be computed by an algorithm with complexities O(n2/ log1 −ε(n)) for all ε &gt; 0. In particular when τ = 1 this shows that the goal of constructing an iMHF with AT-complexity Θ(σ2 ∗ τ ) is unachievable. Along the way we prove a lemma upper-bounding the depth-robustness of any DAG which may prove to be of independent interest.},
  author       = {Alwen, Joel F and Blocki, Jeremiah},
  location     = {Santa Barbara, CA, USA},
  pages        = {241 -- 271},
  publisher    = {Springer},
  title        = {{Efficiently computing data-independent memory-hard functions}},
  doi          = {10.1007/978-3-662-53008-5_9},
  volume       = {9815},
  year         = {2016},
}

@inproceedings{1366,
  abstract     = {We study the problem of devising provably secure PRNGs with input based on the sponge paradigm. Such constructions are very appealing, as efficient software/hardware implementations of SHA-3 can easily be translated into a PRNG in a nearly black-box way. The only existing sponge-based construction, proposed by Bertoni et al. (CHES 2010), fails to achieve the security notion of robustness recently considered by Dodis et al. (CCS 2013), for two reasons: (1) The construction is deterministic, and thus there are high-entropy input distributions on which the construction fails to extract random bits, and (2) The construction is not forward secure, and presented solutions aiming at restoring forward security have not been rigorously analyzed. We propose a seeded variant of Bertoni et al.’s PRNG with input which we prove secure in the sense of robustness, delivering in particular concrete security bounds. On the way, we make what we believe to be an important conceptual contribution, developing a variant of the security framework of Dodis et al. tailored at the ideal permutation model that captures PRNG security in settings where the weakly random inputs are provided from a large class of possible adversarial samplers which are also allowed to query the random permutation. As a further application of our techniques, we also present an efficient sponge-based key-derivation function (which can be instantiated from SHA-3 in a black-box fashion), which we also prove secure when fed with samples from permutation-dependent distributions.},
  author       = {Gazi, Peter and Tessaro, Stefano},
  location     = {Vienna, Austria},
  pages        = {87 -- 116},
  publisher    = {Springer},
  title        = {{Provably robust sponge-based PRNGs and KDFs}},
  doi          = {10.1007/978-3-662-49890-3_4},
  volume       = {9665},
  year         = {2016},
}

@inproceedings{1225,
  abstract     = {At Crypto 2015 Fuchsbauer, Hanser and Slamanig (FHS) presented the first standard-model construction of efficient roundoptimal blind signatures that does not require complexity leveraging. It is conceptually simple and builds on the primitive of structure-preserving signatures on equivalence classes (SPS-EQ). FHS prove the unforgeability of their scheme assuming EUF-CMA security of the SPS-EQ scheme and hardness of a version of the DH inversion problem. Blindness under adversarially chosen keys is proven under an interactive variant of the DDH assumption. We propose a variant of their scheme whose blindness can be proven under a non-interactive assumption, namely a variant of the bilinear DDH assumption. We moreover prove its unforgeability assuming only unforgeability of the underlying SPS-EQ but no additional assumptions as needed for the FHS scheme.},
  author       = {Fuchsbauer, Georg and Hanser, Christian and Kamath Hosdurg, Chethan and Slamanig, Daniel},
  location     = {Amalfi, Italy},
  pages        = {391 -- 408},
  publisher    = {Springer},
  title        = {{Practical round-optimal blind signatures in the standard model from weaker assumptions}},
  doi          = {10.1007/978-3-319-44618-9_21},
  volume       = {9841},
  year         = {2016},
}

@inproceedings{1229,
  abstract     = {Witness encryption (WE) was introduced by Garg et al. [GGSW13]. A WE scheme is defined for some NP language L and lets a sender encrypt messages relative to instances x. A ciphertext for x can be decrypted using w witnessing x ∈ L, but hides the message if x ∈ L. Garg et al. construct WE from multilinear maps and give another construction [GGH+13b] using indistinguishability obfuscation (iO) for circuits. Due to the reliance on such heavy tools, WE can cur- rently hardly be implemented on powerful hardware and will unlikely be realizable on constrained devices like smart cards any time soon. We construct a WE scheme where encryption is done by simply computing a Naor-Yung ciphertext (two CPA encryptions and a NIZK proof). To achieve this, our scheme has a setup phase, which outputs public parameters containing an obfuscated circuit (only required for decryption), two encryption keys and a common reference string (used for encryption). This setup need only be run once, and the parame- ters can be used for arbitrary many encryptions. Our scheme can also be turned into a functional WE scheme, where a message is encrypted w.r.t. a statement and a function f, and decryption with a witness w yields f (m, w). Our construction is inspired by the functional encryption scheme by Garg et al. and we prove (selective) security assuming iO and statistically simulation-sound NIZK. We give a construction of the latter in bilinear groups and combining it with ElGamal encryption, our ciphertexts are of size 1.3 kB at a 128-bit security level and can be computed on a smart card.},
  author       = {Abusalah, Hamza M and Fuchsbauer, Georg and Pietrzak, Krzysztof Z},
  location     = {Guildford, UK},
  pages        = {285 -- 303},
  publisher    = {Springer},
  title        = {{Offline witness encryption}},
  doi          = {10.1007/978-3-319-39555-5_16},
  volume       = {9696},
  year         = {2016},
}

