@article{1619,
  abstract     = {The emergence of drug resistant pathogens is a serious public health problem. It is a long-standing goal to predict rates of resistance evolution and design optimal treatment strategies accordingly. To this end, it is crucial to reveal the underlying causes of drug-specific differences in the evolutionary dynamics leading to resistance. However, it remains largely unknown why the rates of resistance evolution via spontaneous mutations and the diversity of mutational paths vary substantially between drugs. Here we comprehensively quantify the distribution of fitness effects (DFE) of mutations, a key determinant of evolutionary dynamics, in the presence of eight antibiotics representing the main modes of action. Using precise high-throughput fitness measurements for genome-wide Escherichia coli gene deletion strains, we find that the width of the DFE varies dramatically between antibiotics and, contrary to conventional wisdom, for some drugs the DFE width is lower than in the absence of stress. We show that this previously underappreciated divergence in DFE width among antibiotics is largely caused by their distinct drug-specific dose-response characteristics. Unlike the DFE, the magnitude of the changes in tolerated drug concentration resulting from genome-wide mutations is similar for most drugs but exceptionally small for the antibiotic nitrofurantoin, i.e., mutations generally have considerably smaller resistance effects for nitrofurantoin than for other drugs. A population genetics model predicts that resistance evolution for drugs with this property is severely limited and confined to reproducible mutational paths. We tested this prediction in laboratory evolution experiments using the “morbidostat”, a device for evolving bacteria in well-controlled drug environments. Nitrofurantoin resistance indeed evolved extremely slowly via reproducible mutations—an almost paradoxical behavior since this drug causes DNA damage and increases the mutation rate. Overall, we identified novel quantitative characteristics of the evolutionary landscape that provide the conceptual foundation for predicting the dynamics of drug resistance evolution.},
  author       = {Chevereau, Guillaume and Dravecka, Marta and Batur, Tugce and Guvenek, Aysegul and Ayhan, Dilay and Toprak, Erdal and Bollenbach, Mark Tobias},
  journal      = {PLoS Biology},
  number       = {11},
  publisher    = {Public Library of Science},
  title        = {{Quantifying the determinants of evolutionary dynamics leading to drug resistance}},
  doi          = {10.1371/journal.pbio.1002299},
  volume       = {13},
  year         = {2015},
}

@article{1623,
  abstract     = {Background
Photosynthetic cyanobacteria are attractive for a range of biotechnological applications including biofuel production. However, due to slow growth, screening of mutant libraries using microtiter plates is not feasible.
Results
We present a method for high-throughput, single-cell analysis and sorting of genetically engineered l-lactate-producing strains of Synechocystis sp. PCC6803. A microfluidic device is used to encapsulate single cells in picoliter droplets, assay the droplets for l-lactate production, and sort strains with high productivity. We demonstrate the separation of low- and high-producing reference strains, as well as enrichment of a more productive l-lactate-synthesizing population after UV-induced mutagenesis. The droplet platform also revealed population heterogeneity in photosynthetic growth and lactate production, as well as the presence of metabolically stalled cells.
Conclusions
The workflow will facilitate metabolic engineering and directed evolution studies and will be useful in studies of cyanobacteria biochemistry and physiology.
},
  author       = {Hammar, Petter and Angermayr, Andreas and Sjostrom, Staffan and Van Der Meer, Josefin and Hellingwerf, Klaas and Hudson, Elton and Joensson, Hakaan},
  journal      = {Biotechnology for Biofuels},
  number       = {1},
  publisher    = {BioMed Central},
  title        = {{Single-cell screening of photosynthetic growth and lactate production by cyanobacteria}},
  doi          = {10.1186/s13068-015-0380-2},
  volume       = {8},
  year         = {2015},
}

@article{1624,
  abstract     = {Population structure can facilitate evolution of cooperation. In a structured population, cooperators can form clusters which resist exploitation by defectors. Recently, it was observed that a shift update rule is an extremely strong amplifier of cooperation in a one dimensional spatial model. For the shift update rule, an individual is chosen for reproduction proportional to fecundity; the offspring is placed next to the parent; a random individual dies. Subsequently, the population is rearranged (shifted) until all individual cells are again evenly spaced out. For large population size and a one dimensional population structure, the shift update rule favors cooperation for any benefit-to-cost ratio greater than one. But every attempt to generalize shift updating to higher dimensions while maintaining its strong effect has failed. The reason is that in two dimensions the clusters are fragmented by the movements caused by rearranging the cells. Here we introduce the natural phenomenon of a repulsive force between cells of different types. After a birth and death event, the cells are being rearranged minimizing the overall energy expenditure. If the repulsive force is sufficiently high, shift becomes a strong promoter of cooperation in two dimensions.},
  author       = {Pavlogiannis, Andreas and Chatterjee, Krishnendu and Adlam, Ben and Nowak, Martin},
  journal      = {Scientific Reports},
  publisher    = {Nature Publishing Group},
  title        = {{Cellular cooperation with shift updating and repulsion}},
  doi          = {10.1038/srep17147},
  volume       = {5},
  year         = {2015},
}

@inproceedings{1628,
  abstract     = {We propose a method for fabricating deformable objects with spatially varying elasticity using 3D printing. Using a single, relatively stiff printer material, our method designs an assembly of smallscale microstructures that have the effect of a softer material at the object scale, with properties depending on the microstructure used in each part of the object. We build on work in the area of metamaterials, using numerical optimization to design tiled microstructures with desired properties, but with the key difference that our method designs families of related structures that can be interpolated to smoothly vary the material properties over a wide range. To create an object with spatially varying elastic properties, we tile the object's interior with microstructures drawn from these families, generating a different microstructure for each cell using an efficient algorithm to select compatible structures for neighboring cells. We show results computed for both 2D and 3D objects, validating several 2D and 3D printed structures using standard material tests as well as demonstrating various example applications.},
  author       = {Schumacher, Christian and Bickel, Bernd and Rys, Jan and Marschner, Steve and Daraio, Chiara and Gross, Markus},
  location     = {Los Angeles, CA, USA},
  number       = {4},
  publisher    = {ACM},
  title        = {{Microstructures to control elasticity in 3D printing}},
  doi          = {10.1145/2766926},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1630,
  abstract     = {We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model, we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D    scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.},
  author       = {Guerrero, Paul and Jeschke, Stefan and Wimmer, Michael and Wonka, Peter},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{Learning shape placements by example}},
  doi          = {10.1145/2766933},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1632,
  abstract     = {This paper presents a liquid simulation technique that enforces the incompressibility condition using a stream function solve instead of a pressure projection. Previous methods have used stream function techniques for the simulation of detailed single-phase flows, but a formulation for liquid simulation has proved elusive in part due to the free surface boundary conditions. In this paper, we introduce a stream function approach to liquid simulations with novel boundary conditions for free surfaces, solid obstacles, and solid-fluid coupling.

Although our approach increases the dimension of the linear system necessary to enforce incompressibility, it provides interesting and surprising benefits. First, the resulting flow is guaranteed to be divergence-free regardless of the accuracy of the solve. Second, our free-surface boundary conditions guarantee divergence-free motion even in the un-simulated air phase, which enables two-phase flow simulation by only computing a single phase. We implemented this method using a variant of FLIP simulation which only samples particles within a narrow band of the liquid surface, and we illustrate the effectiveness of our method for detailed two-phase flow simulations with complex boundaries, detailed bubble interactions, and two-way solid-fluid coupling.},
  author       = {Ando, Ryoichi and Thuerey, Nils and Wojtan, Christopher J},
  location     = {Los Angeles, CA, USA},
  number       = {4},
  publisher    = {ACM},
  title        = {{A stream function solver for liquid simulations}},
  doi          = {10.1145/2766935},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1633,
  abstract     = {We present a method for simulating brittle fracture under the assumptions of quasi-static linear elastic fracture mechanics (LEFM). Using the boundary element method (BEM) and Lagrangian crack-fronts, we produce highly detailed fracture surfaces. The computational cost of the BEM is alleviated by using a low-resolution mesh and interpolating the resulting stress intensity factors when propagating the high-resolution crack-front.

Our system produces physics-based fracture surfaces with high spatial and temporal resolution, taking spatial variation of material toughness and/or strength into account. It also allows for crack initiation to be handled separately from crack propagation, which is not only more reasonable from a physics perspective, but can also be used to control the simulation.

Separating the resolution of the crack-front from the resolution of the computational mesh increases the efficiency and therefore the amount of visual detail on the resulting fracture surfaces. The BEM also allows us to re-use previously computed blocks of the system matrix.},
  author       = {Hahn, David and Wojtan, Christopher J},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{High-resolution brittle fracture simulation with boundary elements}},
  doi          = {10.1145/2766896},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1634,
  abstract     = {Simulating the delightful dynamics of soap films, bubbles, and foams has traditionally required the use of a fully three-dimensional many-phase Navier-Stokes solver, even though their visual appearance is completely dominated by the thin liquid surface. We depart from earlier work on soap bubbles and foams by noting that their dynamics are naturally described by a Lagrangian vortex sheet model in which circulation is the primary variable. This leads us to derive a novel circulation-preserving surface-only discretization of foam dynamics driven by surface tension on a non-manifold triangle mesh. We represent the surface using a mesh-based multimaterial surface tracker which supports complex bubble topology changes, and evolve the surface according to the ambient air flow induced by a scalar circulation field stored on the mesh. Surface tension forces give rise to a simple update rule for circulation, even at non-manifold Plateau borders, based on a discrete measure of signed scalar mean curvature. We further incorporate vertex constraints to enable the interaction of soap films with wires. The result is a method that is at once simple, robust, and efficient, yet able to capture an array of soap films behaviors including foam rearrangement, catenoid collapse, blowing bubbles, and double bubbles being pulled apart.},
  author       = {Da, Fang and Batty, Christopher and Wojtan, Christopher J and Grinspun, Eitan},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{Double bubbles sans toil and trouble: discrete circulation-preserving vortex sheets for soap films and foams}},
  doi          = {10.1145/2767003},
  volume       = {34},
  year         = {2015},
}

@article{1635,
  abstract     = {We calculate a Ricci curvature lower bound for some classical examples of random walks, namely, a chain on a slice of the n-dimensional discrete cube (the so-called Bernoulli-Laplace model) and the random transposition shuffle of the symmetric group of permutations on n letters.},
  author       = {Erbar, Matthias and Maas, Jan and Tetali, Prasad},
  journal      = {Annales de la faculté des sciences de Toulouse},
  number       = {4},
  pages        = {781 -- 800},
  publisher    = {Faculté des sciences de Toulouse},
  title        = {{Discrete Ricci curvature bounds for Bernoulli-Laplace and random transposition models}},
  doi          = {10.5802/afst.1464},
  volume       = {24},
  year         = {2015},
}

@inproceedings{1636,
  abstract     = {Constraint Satisfaction Problem (CSP) is a fundamental algorithmic problem that appears in many areas of Computer Science. It can be equivalently stated as computing a homomorphism R→ΓΓ between two relational structures, e.g. between two directed graphs. Analyzing its complexity has been a prominent research direction, especially for the fixed template CSPs where the right side ΓΓ is fixed and the left side R is unconstrained.

Far fewer results are known for the hybrid setting that restricts both sides simultaneously. It assumes that R belongs to a certain class of relational structures (called a structural restriction in this paper). We study which structural restrictions are effective, i.e. there exists a fixed template ΓΓ (from a certain class of languages) for which the problem is tractable when R is restricted, and NP-hard otherwise. We provide a characterization for structural restrictions that are closed under inverse homomorphisms. The criterion is based on the chromatic number of a relational structure defined in this paper; it generalizes the standard chromatic number of a graph.

As our main tool, we use the algebraic machinery developed for fixed template CSPs. To apply it to our case, we introduce a new construction called a “lifted language”. We also give a characterization for structural restrictions corresponding to minor-closed families of graphs, extend results to certain Valued CSPs (namely conservative valued languages), and state implications for (valued) CSPs with ordered variables and for the maximum weight independent set problem on some restricted families of graphs.},
  author       = {Kolmogorov, Vladimir and Rolinek, Michal and Takhanov, Rustem},
  booktitle    = {26th International Symposium},
  isbn         = {978-3-662-48970-3},
  location     = {Nagoya, Japan},
  pages        = {566 -- 577},
  publisher    = {Springer Nature},
  title        = {{Effectiveness of structural restrictions for hybrid CSPs}},
  doi          = {10.1007/978-3-662-48971-0_48},
  volume       = {9472},
  year         = {2015},
}

@inproceedings{1637,
  abstract     = {An instance of the Valued Constraint Satisfaction Problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P ≠ NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in {0, ∞} corresponds to ordinary CSPs, where one deals only with the feasibility issue and there is no optimization. This case is the subject of the Algebraic CSP Dichotomy Conjecture predicting for which constraint languages CSPs are tractable (i.e. solvable in polynomial time) and for which NP-hard. The case when all allowed functions take only finite values corresponds to finite-valued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Zivny. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e. the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
  author       = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
  location     = {Berkeley, CA, United States},
  pages        = {1246 -- 1258},
  publisher    = {IEEE},
  title        = {{The complexity of general-valued CSPs}},
  doi          = {10.1109/FOCS.2015.80},
  year         = {2015},
}

@article{1639,
  abstract     = {In this paper the optimal transport and the metamorphosis perspectives are combined. For a pair of given input images geodesic paths in the space of images are defined as minimizers of a resulting path energy. To this end, the underlying Riemannian metric measures the rate of transport cost and the rate of viscous dissipation. Furthermore, the model is capable to deal with strongly varying image contrast and explicitly allows for sources and sinks in the transport equations which are incorporated in the metric related to the metamorphosis approach by Trouvé and Younes. In the non-viscous case with source term existence of geodesic paths is proven in the space of measures. The proposed model is explored on the range from merely optimal transport to strongly dissipative dynamics. For this model a robust and effective variational time discretization of geodesic paths is proposed. This requires to minimize a discrete path energy consisting of a sum of consecutive image matching functionals. These functionals are defined on corresponding pairs of intensity functions and on associated pairwise matching deformations. Existence of time discrete geodesics is demonstrated. Furthermore, a finite element implementation is proposed and applied to instructive test cases and to real images. In the non-viscous case this is compared to the algorithm proposed by Benamou and Brenier including a discretization of the source term. Finally, the model is generalized to define discrete weighted barycentres with applications to textures and objects.},
  author       = {Maas, Jan and Rumpf, Martin and Schönlieb, Carola and Simon, Stefan},
  journal      = {ESAIM: Mathematical Modelling and Numerical Analysis},
  number       = {6},
  pages        = {1745 -- 1769},
  publisher    = {EDP Sciences},
  title        = {{A generalized model for optimal transport of images including dissipation and density modulation}},
  doi          = {10.1051/m2an/2015043},
  volume       = {49},
  year         = {2015},
}

@article{1640,
  abstract     = {Auxin and cytokinin are key endogenous regulators of plant development. Although cytokinin-mediated modulation of auxin distribution is a developmentally crucial hormonal interaction, its molecular basis is largely unknown. Here we show a direct regulatory link between cytokinin signalling and the auxin transport machinery uncovering a mechanistic framework for cytokinin-auxin cross-talk. We show that the CYTOKININ RESPONSE FACTORS (CRFs), transcription factors downstream of cytokinin perception, transcriptionally control genes encoding PIN-FORMED (PIN) auxin transporters at a specific PIN CYTOKININ RESPONSE ELEMENT (PCRE) domain. Removal of this cis-regulatory element effectively uncouples PIN transcription from the CRF-mediated cytokinin regulation and attenuates plant cytokinin sensitivity. We propose that CRFs represent a missing cross-talk component that fine-tunes auxin transport capacity downstream of cytokinin signalling to control plant development.},
  author       = {Šimášková, Mária and O'Brien, José and Khan-Djamei, Mamoona and Van Noorden, Giel and Ötvös, Krisztina and Vieten, Anne and De Clercq, Inge and Van Haperen, Johanna and Cuesta, Candela and Hoyerová, Klára and Vanneste, Steffen and Marhavy, Peter and Wabnik, Krzysztof T and Van Breusegem, Frank and Nowack, Moritz and Murphy, Angus and Friml, Jiřĺ and Weijers, Dolf and Beeckman, Tom and Benková, Eva},
  journal      = {Nature Communications},
  publisher    = {Nature Publishing Group},
  title        = {{Cytokinin response factors regulate PIN-FORMED auxin transporters}},
  doi          = {10.1038/ncomms9717},
  volume       = {6},
  year         = {2015},
}

@article{1642,
  abstract     = {The Hanani-Tutte theorem is a classical result proved for the first time in the 1930s that characterizes planar graphs as graphs that admit a drawing in the plane in which every pair of edges not sharing a vertex cross an even number of times. We generalize this result to clustered graphs with two disjoint clusters, and show that a straightforward extension to flat clustered graphs with three or more disjoint clusters is not possible. For general clustered graphs we show a variant of the Hanani-Tutte theorem in the case when each cluster induces a connected subgraph. Di Battista and Frati proved that clustered planarity of embedded clustered graphs whose every face is incident to at most five vertices can be tested in polynomial time. We give a new and short proof of this result, using the matroid intersection algorithm.},
  author       = {Fulek, Radoslav and Kynčl, Jan and Malinovič, Igor and Pálvölgyi, Dömötör},
  issn         = {1077-8926},
  journal      = {Electronic Journal of Combinatorics},
  number       = {4},
  publisher    = {Electronic Journal of Combinatorics},
  title        = {{Clustered planarity testing revisited}},
  doi          = {10.37236/5002},
  volume       = {22},
  year         = {2015},
}

@inproceedings{1644,
  abstract     = {Increasing the computational complexity of evaluating a hash function, both for the honest users as well as for an adversary, is a useful technique employed for example in password-based cryptographic schemes to impede brute-force attacks, and also in so-called proofs of work (used in protocols like Bitcoin) to show that a certain amount of computation was performed by a legitimate user. A natural approach to adjust the complexity of a hash function is to iterate it c times, for some parameter c, in the hope that any query to the scheme requires c evaluations of the underlying hash function. However, results by Dodis et al. (Crypto 2012) imply that plain iteration falls short of achieving this goal, and designing schemes which provably have such a desirable property remained an open problem. This paper formalizes explicitly what it means for a given scheme to amplify the query complexity of a hash function. In the random oracle model, the goal of a secure query-complexity amplifier (QCA) scheme is captured as transforming, in the sense of indifferentiability, a random oracle allowing R queries (for the adversary) into one provably allowing only r &lt; R queries. Turned around, this means that making r queries to the scheme requires at least R queries to the actual random oracle. Second, a new scheme, called collision-free iteration, is proposed and proven to achieve c-fold QCA for both the honest parties and the adversary, for any fixed parameter c.},
  author       = {Demay, Grégory and Gazi, Peter and Maurer, Ueli and Tackmann, Björn},
  location     = {Lugano, Switzerland},
  pages        = {159 -- 180},
  publisher    = {Springer},
  title        = {{Query-complexity amplification for random oracles}},
  doi          = {10.1007/978-3-319-17470-9_10},
  volume       = {9063},
  year         = {2015},
}

@inproceedings{1646,
  abstract     = {A pseudorandom function (PRF) is a keyed function F : K × X → Y where, for a random key k ∈ K, the function F(k, ·) is indistinguishable from a uniformly random function, given black-box access. A key-homomorphic PRF has the additional feature that for any keys k, k' and any input x, we have F(k+k', x) = F(k, x)⊕F(k', x) for some group operations +,⊕ on K and Y, respectively. A constrained PRF for a family of setsS ⊆ P(X) has the property that, given any key k and set S ∈ S, one can efficiently compute a “constrained” key kS that enables evaluation of F(k, x) on all inputs x ∈ S, while the values F(k, x) for x /∈ S remain pseudorandom even given kS. In this paper we construct PRFs that are simultaneously constrained and key homomorphic, where the homomorphic property holds even for constrained keys. We first show that the multilinear map-based bit-fixing and circuit-constrained PRFs of Boneh and Waters (Asiacrypt 2013) can be modified to also be keyhomomorphic. We then show that the LWE-based key-homomorphic PRFs of Banerjee and Peikert (Crypto 2014) are essentially already prefix-constrained PRFs, using a (non-obvious) definition of constrained keys and associated group operation. Moreover, the constrained keys themselves are pseudorandom, and the constraining and evaluation functions can all be computed in low depth. As an application of key-homomorphic constrained PRFs,we construct a proxy re-encryption schemewith fine-grained access control. This scheme allows storing encrypted data on an untrusted server, where each file can be encrypted relative to some attributes, so that only parties whose constrained keys match the attributes can decrypt. Moreover, the server can re-key (arbitrary subsets of) the ciphertexts without learning anything about the plaintexts, thus permitting efficient and finegrained revocation.},
  author       = {Banerjee, Abishek and Fuchsbauer, Georg and Peikert, Chris and Pietrzak, Krzysztof Z and Stevens, Sophie},
  booktitle    = {12th Theory of Cryptography Conference},
  isbn         = {978-3-662-46496-0},
  location     = {Warsaw, Poland},
  pages        = {31 -- 60},
  publisher    = {Springer Nature},
  title        = {{Key-homomorphic constrained pseudorandom functions}},
  doi          = {10.1007/978-3-662-46497-7_2},
  volume       = {9015},
  year         = {2015},
}

@inproceedings{1647,
  abstract     = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.},
  author       = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel},
  location     = {Santa Barbara, CA, United States},
  pages        = {233 -- 253},
  publisher    = {Springer},
  title        = {{Practical round-optimal blind signatures in the standard model}},
  doi          = {10.1007/978-3-662-48000-7_12},
  volume       = {9216},
  year         = {2015},
}

@inproceedings{1648,
  abstract     = {Generalized Selective Decryption (GSD), introduced by Panjwani [TCC’07], is a game for a symmetric encryption scheme Enc that captures the difficulty of proving adaptive security of certain protocols, most notably the Logical Key Hierarchy (LKH) multicast encryption protocol. In the GSD game there are n keys k1,..., kn, which the adversary may adaptively corrupt (learn); moreover, it can ask for encryptions Encki (kj) of keys under other keys. The adversary’s task is to distinguish keys (which it cannot trivially compute) from random. Proving the hardness of GSD assuming only IND-CPA security of Enc is surprisingly hard. Using “complexity leveraging” loses a factor exponential in n, which makes the proof practically meaningless. We can think of the GSD game as building a graph on n vertices, where we add an edge i → j when the adversary asks for an encryption of kj under ki. If restricted to graphs of depth ℓ, Panjwani gave a reduction that loses only a factor exponential in ℓ (not n). To date, this is the only non-trivial result known for GSD. In this paper we give almost-polynomial reductions for large classes of graphs. Most importantly, we prove the security of the GSD game restricted to trees losing only a quasi-polynomial factor n3 log n+5. Trees are an important special case capturing real-world protocols like the LKH protocol. Our new bound improves upon Panjwani’s on some LKH variants proposed in the literature where the underlying tree is not balanced. Our proof builds on ideas from the “nested hybrids” technique recently introduced by Fuchsbauer et al. [Asiacrypt’14] for proving the adaptive security of constrained PRFs.},
  author       = {Fuchsbauer, Georg and Jafargholi, Zahra and Pietrzak, Krzysztof Z},
  location     = {Santa Barbara, CA, USA},
  pages        = {601 -- 620},
  publisher    = {Springer},
  title        = {{A quasipolynomial reduction for generalized selective decryption on trees}},
  doi          = {10.1007/978-3-662-47989-6_29},
  volume       = {9215},
  year         = {2015},
}

@inproceedings{1649,
  abstract     = {We extend a commitment scheme based on the learning with errors over rings (RLWE) problem, and present efficient companion zeroknowledge proofs of knowledge. Our scheme maps elements from the ring (or equivalently, n elements from },
  author       = {Benhamouda, Fabrice and Krenn, Stephan and Lyubashevsky, Vadim and Pietrzak, Krzysztof Z},
  location     = {Vienna, Austria},
  pages        = {305 -- 325},
  publisher    = {Springer},
  title        = {{Efficient zero-knowledge proofs for commitments from learning with errors over rings}},
  doi          = {10.1007/978-3-319-24174-6_16},
  volume       = {9326},
  year         = {2015},
}

@inproceedings{1650,
  abstract     = {We consider the task of deriving a key with high HILL entropy (i.e., being computationally indistinguishable from a key with high min-entropy) from an unpredictable source.

Previous to this work, the only known way to transform unpredictability into a key that was ϵ indistinguishable from having min-entropy was via pseudorandomness, for example by Goldreich-Levin (GL) hardcore bits. This approach has the inherent limitation that from a source with k bits of unpredictability entropy one can derive a key of length (and thus HILL entropy) at most k−2log(1/ϵ) bits. In many settings, e.g. when dealing with biometric data, such a 2log(1/ϵ) bit entropy loss in not an option. Our main technical contribution is a theorem that states that in the high entropy regime, unpredictability implies HILL entropy. Concretely, any variable K with |K|−d bits of unpredictability entropy has the same amount of so called metric entropy (against real-valued, deterministic distinguishers), which is known to imply the same amount of HILL entropy. The loss in circuit size in this argument is exponential in the entropy gap d, and thus this result only applies for small d (i.e., where the size of distinguishers considered is exponential in d).

To overcome the above restriction, we investigate if it’s possible to first “condense” unpredictability entropy and make the entropy gap small. We show that any source with k bits of unpredictability can be condensed into a source of length k with k−3 bits of unpredictability entropy. Our condenser simply “abuses&quot; the GL construction and derives a k bit key from a source with k bits of unpredicatibily. The original GL theorem implies nothing when extracting that many bits, but we show that in this regime, GL still behaves like a “condenser&quot; for unpredictability. This result comes with two caveats (1) the loss in circuit size is exponential in k and (2) we require that the source we start with has no HILL entropy (equivalently, one can efficiently check if a guess is correct). We leave it as an intriguing open problem to overcome these restrictions or to prove they’re inherent.},
  author       = {Skórski, Maciej and Golovnev, Alexander and Pietrzak, Krzysztof Z},
  location     = {Kyoto, Japan},
  pages        = {1046 -- 1057},
  publisher    = {Springer},
  title        = {{Condensed unpredictability }},
  doi          = {10.1007/978-3-662-47672-7_85},
  volume       = {9134},
  year         = {2015},
}

