@article{626,
  abstract     = {Our focus here is on the infinitesimal model. In this model, one or several quantitative traits are described as the sum of a genetic and a non-genetic component, the first being distributed within families as a normal random variable centred at the average of the parental genetic components, and with a variance independent of the parental traits. Thus, the variance that segregates within families is not perturbed by selection, and can be predicted from the variance components. This does not necessarily imply that the trait distribution across the whole population should be Gaussian, and indeed selection or population structure may have a substantial effect on the overall trait distribution. One of our main aims is to identify some general conditions on the allelic effects for the infinitesimal model to be accurate. We first review the long history of the infinitesimal model in quantitative genetics. Then we formulate the model at the phenotypic level in terms of individual trait values and relationships between individuals, but including different evolutionary processes: genetic drift, recombination, selection, mutation, population structure, …. We give a range of examples of its application to evolutionary questions related to stabilising selection, assortative mating, effective population size and response to selection, habitat preference and speciation. We provide a mathematical justification of the model as the limit as the number M of underlying loci tends to infinity of a model with Mendelian inheritance, mutation and environmental noise, when the genetic component of the trait is purely additive. We also show how the model generalises to include epistatic effects. We prove in particular that, within each family, the genetic components of the individual trait values in the current generation are indeed normally distributed with a variance independent of ancestral traits, up to an error of order 1∕M. Simulations suggest that in some cases the convergence may be as fast as 1∕M.},
  author       = {Barton, Nicholas H and Etheridge, Alison and Véber, Amandine},
  issn         = {00405809},
  journal      = {Theoretical Population Biology},
  pages        = {50 -- 73},
  publisher    = {Academic Press},
  title        = {{The infinitesimal model: Definition derivation and implications}},
  doi          = {10.1016/j.tpb.2017.06.001},
  volume       = {118},
  year         = {2017},
}

@article{627,
  abstract     = {Beige adipocytes are a new type of recruitable brownish adipocytes, with highly mitochondrial membrane uncoupling protein 1 expression and thermogenesis. Beige adipocytes were found among white adipocytes, especially in subcutaneous white adipose tissue (sWAT). Therefore, beige adipocytes may be involved in the regulation of energy metabolism and fat deposition. Transient receptor potential melastatin 8 (TRPM8), a Ca2+-permeable non-selective cation channel, plays vital roles in the regulation of various cellular functions. It has been reported that TRPM8 activation enhanced the thermogenic function of brown adiposytes. However, the involvement of TRPM8 in the thermogenic function of WAT remains unexplored. Our data revealed that TRPM8 was expressed in mouse white adipocytes at mRNA, protein and functional levels. The mRNA expression of Trpm8 was significantly increased in the differentiated white adipocytes than pre-adipocytes. Moreover, activation of TRPM8 by menthol enhanced the expression of thermogenic genes in cultured white aidpocytes. And menthol-induced increases of the thermogenic genes in white adipocytes was inhibited by either KT5720 (a protein kinase A inhibitor) or BAPTA-AM. In addition, high fat diet (HFD)-induced obesity in mice was significantly recovered by co-treatment with menthol. Dietary menthol enhanced WAT &quot;browning&quot; and improved glucose metabolism in HFD-induced obesity mice as well. Therefore, we concluded that TRPM8 might be involved in WAT &quot;browning&quot; by increasing the expression levels of genes related to thermogenesis and energy metabolism. And dietary menthol could be a novel approach for combating human obesity and related metabolic diseases.},
  author       = {Jiang, Changyu and Zhai, Ming-Zhu and Yan, Dong and Li, Da and Li, Chen and Zhang, Yonghong and Xiao, Lizu and Xiong, Donglin and Deng, Qiwen and Sun, Wuping},
  issn         = {1949-2553},
  journal      = {Oncotarget},
  number       = {43},
  pages        = {75114 -- 75126},
  publisher    = {Impact Journals},
  title        = {{Dietary menthol-induced TRPM8 activation enhances WAT “browning” and ameliorates diet-induced obesity}},
  doi          = {10.18632/oncotarget.20540},
  volume       = {8},
  year         = {2017},
}

@inproceedings{628,
  abstract     = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.},
  author       = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963386-2},
  location     = {Heidelberg, Germany},
  pages        = {118 -- 139},
  publisher    = {Springer},
  title        = {{Automated recurrence analysis for almost linear expected runtime bounds}},
  doi          = {10.1007/978-3-319-63387-9_6},
  volume       = {10426},
  year         = {2017},
}

@phdthesis{6287,
  abstract     = {The main objects considered in the present work are simplicial and CW-complexes with vertices forming a random point cloud. In particular, we consider a Poisson point process in R^n and study Delaunay and Voronoi complexes of the first and higher orders and weighted Delaunay complexes obtained as sections of Delaunay complexes, as well as the Čech complex. Further, we examine theDelaunay complex of a Poisson point process on the sphere S^n, as well as of a uniform point cloud, which is equivalent to the convex hull, providing a connection to the theory of random polytopes. Each of the complexes in question can be endowed with a radius function, which maps its cells to the radii of appropriately chosen circumspheres, called the radius of the cell. Applying and developing discrete Morse theory for these functions, joining it together with probabilistic and sometimes analytic machinery, and developing several integral geometric tools, we aim at getting the distributions of circumradii of typical cells. For all considered complexes, we are able to generalize and obtain up to constants the distribution of radii of typical intervals of all types. In low dimensions the constants can be computed explicitly, thus providing the explicit expressions for the expected numbers of cells. In particular, it allows to find the expected density of simplices of every dimension for a Poisson point process in R^4, whereas the result for R^3 was known already in 1970's.},
  author       = {Nikitenko, Anton},
  issn         = {2663-337X},
  pages        = {86},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Discrete Morse theory for random complexes }},
  doi          = {10.15479/AT:ISTA:th_873},
  year         = {2017},
}

@inbook{629,
  abstract     = {Even simple cells like bacteria have precisely regulated cellular anatomies, which allow them to grow, divide and to respond to internal or external cues with high fidelity. How spatial and temporal intracellular organization in prokaryotic cells is achieved and maintained on the basis of locally interacting proteins still remains largely a mystery. Bulk biochemical assays with purified components and in vivo experiments help us to approach key cellular processes from two opposite ends, in terms of minimal and maximal complexity. However, to understand how cellular phenomena emerge, that are more than the sum of their parts, we have to assemble cellular subsystems step by step from the bottom up. Here, we review recent in vitro reconstitution experiments with proteins of the bacterial cell division machinery and illustrate how they help to shed light on fundamental cellular mechanisms that constitute spatiotemporal order and regulate cell division.},
  author       = {Loose, Martin and Zieske, Katja and Schwille, Petra},
  booktitle    = {Prokaryotic Cytoskeletons},
  pages        = {419 -- 444},
  publisher    = {Springer},
  title        = {{Reconstitution of protein dynamics involved in bacterial cell division}},
  doi          = {10.1007/978-3-319-53047-5_15},
  volume       = {84},
  year         = {2017},
}

@phdthesis{6291,
  abstract     = {Bacteria and their pathogens – phages – are the most abundant living entities on Earth. Throughout their coevolution, bacteria have evolved multiple immune systems to overcome the ubiquitous threat from the phages. Although the molecu- lar details of these immune systems’ functions are relatively well understood, their epidemiological consequences for the phage-bacterial communities have been largely neglected. In this thesis we employed both experimental and theoretical methods to explore whether herd and social immunity may arise in bacterial popu- lations. Using our experimental system consisting of Escherichia coli strains with a CRISPR based immunity to the T7 phage we show that herd immunity arises in phage-bacterial communities and that it is accentuated when the populations are spatially structured. By fitting a mathematical model, we inferred expressions for the herd immunity threshold and the velocity of spread of a phage epidemic in partially resistant bacterial populations, which both depend on the bacterial growth rate, phage burst size and phage latent period. We also investigated the poten- tial for social immunity in Streptococcus thermophilus and its phage 2972 using a bioinformatic analysis of potentially coding short open reading frames with a signalling signature, encoded within the CRISPR associated genes. Subsequently, we tested one identified potentially signalling peptide and found that its addition to a phage-challenged culture increases probability of survival of bacteria two fold, although the results were only marginally significant. Together, these results demonstrate that the ubiquitous arms races between bacteria and phages have further consequences at the level of the population.},
  author       = {Payne, Pavel},
  issn         = {2663-337X},
  pages        = {83},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Bacterial herd and social immunity to phages}},
  year         = {2017},
}

@inproceedings{630,
  abstract     = {Background: Standards have become available to share semantically encoded vital parameters from medical devices, as required for example by personal healthcare records. Standardised sharing of biosignal data largely remains open. Objectives: The goal of this work is to explore available biosignal file format and data exchange standards and profiles, and to conceptualise end-To-end solutions. Methods: The authors reviewed and discussed available biosignal file format standards with other members of international standards development organisations (SDOs). Results: A raw concept for standards based acquisition, storage, archiving and sharing of biosignals was developed. The GDF format may serve for storing biosignals. Signals can then be shared using FHIR resources and may be stored on FHIR servers or in DICOM archives, with DICOM waveforms as one possible format. Conclusion: Currently a group of international SDOs (e.g. HL7, IHE, DICOM, IEEE) is engaged in intensive discussions. This discussion extends existing work that already was adopted by large implementer communities. The concept presented here only reports the current status of the discussion in Austria. The discussion will continue internationally, with results to be expected over the coming years.},
  author       = {Sauermann, Stefan and David, Veronika and Schlögl, Alois and Egelkraut, Reinhard and Frohner, Matthias and Pohn, Birgit and Urbauer, Philipp and Mense, Alexander},
  isbn         = {978-161499758-0},
  location     = {Vienna, Austria},
  pages        = {356 -- 362},
  publisher    = {IOS Press},
  title        = {{Biosignals standards and FHIR: The way to go}},
  doi          = {10.3233/978-1-61499-759-7-356},
  volume       = {236},
  year         = {2017},
}

@inproceedings{631,
  abstract     = {Template polyhedra generalize intervals and octagons to polyhedra whose facets are orthogonal to a given set of arbitrary directions. They have been employed in the abstract interpretation of programs and, with particular success, in the reachability analysis of hybrid automata. While previously, the choice of directions has been left to the user or a heuristic, we present a method for the automatic discovery of directions that generalize and eliminate spurious counterexamples. We show that for the class of convex hybrid automata, i.e., hybrid automata with (possibly nonlinear) convex constraints on derivatives, such directions always exist and can be found using convex optimization. We embed our method inside a CEGAR loop, thus enabling the time-unbounded reachability analysis of an important and richer class of hybrid automata than was previously possible. We evaluate our method on several benchmarks, demonstrating also its superior efficiency for the special case of linear hybrid automata.},
  author       = {Bogomolov, Sergiy and Frehse, Goran and Giacobbe, Mirco and Henzinger, Thomas A},
  isbn         = {978-366254576-8},
  location     = {Uppsala, Sweden},
  pages        = {589 -- 606},
  publisher    = {Springer},
  title        = {{Counterexample guided refinement of template polyhedra}},
  doi          = {10.1007/978-3-662-54577-5_34},
  volume       = {10205},
  year         = {2017},
}

@article{632,
  abstract     = {We consider a 2D quantum system of N bosons in a trapping potential |x|s, interacting via a pair potential of the form N2β−1 w(Nβ x). We show that for all 0 &lt; β &lt; (s + 1)/(s + 2), the leading order behavior of ground states of the many-body system is described in the large N limit by the corresponding cubic nonlinear Schrödinger energy functional. Our result covers the focusing case (w &lt; 0) where even the stability of the many-body system is not obvious. This answers an open question mentioned by X. Chen and J. Holmer for harmonic traps (s = 2). Together with the BBGKY hierarchy approach used by these authors, our result implies the convergence of the many-body quantum dynamics to the focusing NLS equation with harmonic trap for all 0 &lt; β &lt; 3/4. },
  author       = {Lewin, Mathieu and Nam, Phan and Rougerie, Nicolas},
  journal      = {Proceedings of the American Mathematical Society},
  number       = {6},
  pages        = {2441 -- 2454},
  publisher    = {American Mathematical Society},
  title        = {{A note on 2D focusing many boson systems}},
  doi          = {10.1090/proc/13468},
  volume       = {145},
  year         = {2017},
}

@inproceedings{633,
  abstract     = {A Rapidly-exploring Random Tree (RRT) is an algorithm which can search a non-convex region of space by incrementally building a space-filling tree. The tree is constructed from random points drawn from system’s state space and is biased to grow towards large unexplored areas in the system. RRT can provide better coverage of a system’s possible behaviors compared with random simulations, but is more lightweight than full reachability analysis. In this paper, we explore some of the design decisions encountered while implementing a hybrid extension of the RRT algorithm, which have not been elaborated on before. In particular, we focus on handling non-determinism, which arises due to discrete transitions. We introduce the notion of important points to account for this phenomena. We showcase our ideas using heater and navigation benchmarks.},
  author       = {Bak, Stanley and Bogomolov, Sergiy and Henzinger, Thomas A and Kumar, Aviral},
  editor       = {Abate, Alessandro and Bodo, Sylvie},
  isbn         = {978-331963500-2},
  location     = {Heidelberg, Germany},
  pages        = {83 -- 89},
  publisher    = {Springer},
  title        = {{Challenges and tool implementation of hybrid rapidly exploring random trees}},
  doi          = {10.1007/978-3-319-63501-9_6},
  volume       = {10381},
  year         = {2017},
}

@inbook{634,
  abstract     = {As autism spectrum disorder (ASD) is largely regarded as a neurodevelopmental condition, long-time consensus was that its hallmark features are irreversible. However, several studies from recent years using defined mouse models of ASD have provided clear evidence that in mice neurobiological and behavioural alterations can be ameliorated or even reversed by genetic restoration or pharmacological treatment either before or after symptom onset. Here, we review findings on genetic and pharmacological reversibility of phenotypes in mouse models of ASD. Our review should give a comprehensive overview on both aspects and encourage future studies to better understand the underlying molecular mechanisms that might be translatable from animals to humans.},
  author       = {Schroeder, Jan and Deliu, Elena and Novarino, Gaia and Schmeisser, Michael},
  booktitle    = {Translational Anatomy and Cell Biology of Autism Spectrum Disorder},
  editor       = {Schmeisser, Michael and Boekers, Tobias},
  pages        = {189 -- 211},
  publisher    = {Springer},
  title        = {{Genetic and pharmacological reversibility of phenotypes in mouse models of autism spectrum disorder}},
  doi          = {10.1007/978-3-319-52498-6_10},
  volume       = {224},
  year         = {2017},
}

@inproceedings{635,
  abstract     = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.},
  author       = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {33 -- 62},
  publisher    = {Springer},
  title        = {{Scrypt is maximally memory hard}},
  doi          = {10.1007/978-3-319-56617-7_2},
  volume       = {10212},
  year         = {2017},
}

@inproceedings{636,
  abstract     = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.},
  author       = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan},
  editor       = {Abate, Alessandro and Geeraerts, Gilles},
  isbn         = {978-331965764-6},
  location     = {Berlin, Germany},
  pages        = {189 -- 206},
  publisher    = {Springer},
  title        = {{On the quantitative semantics of regular expressions over real-valued signals}},
  doi          = {10.1007/978-3-319-65765-3_11},
  volume       = {10419},
  year         = {2017},
}

@inproceedings{637,
  abstract     = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.},
  author       = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel},
  editor       = {Katz, Jonathan and Shacham, Hovav},
  isbn         = {978-331963687-0},
  location     = {Santa Barbara, CA, United States},
  pages        = {133 -- 163},
  publisher    = {Springer},
  title        = {{Be adaptive avoid overcommitting}},
  doi          = {10.1007/978-3-319-63688-7_5},
  volume       = {10401},
  year         = {2017},
}

@proceedings{638,
  abstract     = {This book constitutes the refereed proceedings of the 9th InternationalWorkshop on Numerical Software Verification, NSV 2016, held in Toronto, ON, Canada in July 2011 - colocated with CAV 2016, the 28th International Conference on Computer Aided Verification.
The NSV workshop is dedicated to the development of logical and mathematical techniques for the reasoning about programmability and reliability.},
  editor       = {Bogomolov, Sergiy and Martel, Matthieu and Prabhakar, Pavithra},
  issn         = {0302-9743},
  location     = {Toronto, ON, Canada},
  publisher    = {Springer},
  title        = {{Numerical Software Verification}},
  doi          = {10.1007/978-3-319-54292-8},
  volume       = {10152},
  year         = {2017},
}

@inproceedings{639,
  abstract     = {We study the problem of developing efficient approaches for proving worst-case bounds of non-deterministic recursive programs. Ranking functions are sound and complete for proving termination and worst-case bounds of non-recursive programs. First, we apply ranking functions to recursion, resulting in measure functions, and show that they provide a sound and complete approach to prove worst-case bounds of non-deterministic recursive programs. Our second contribution is the synthesis of measure functions in non-polynomial forms. We show that non-polynomial measure functions with logarithm and exponentiation can be synthesized through abstraction of logarithmic or exponentiation terms, Farkas’ Lemma, and Handelman’s Theorem using linear programming. While previous methods obtain worst-case polynomial bounds, our approach can synthesize bounds of the form O(n log n) as well as O(nr) where r is not an integer. We present experimental results to demonstrate that our approach can efficiently obtain worst-case bounds of classical recursive algorithms such as Merge-Sort, Closest-Pair, Karatsuba’s algorithm and Strassen’s algorithm.},
  author       = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963389-3},
  location     = {Heidelberg, Germany},
  pages        = {41 -- 63},
  publisher    = {Springer},
  title        = {{Non-polynomial worst case analysis of recursive programs}},
  doi          = {10.1007/978-3-319-63390-9_3},
  volume       = {10427},
  year         = {2017},
}

@inproceedings{640,
  abstract     = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
  editor       = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
  isbn         = {978-331956616-0},
  location     = {Paris, France},
  pages        = {3 -- 32},
  publisher    = {Springer},
  title        = {{Depth-robust graphs and their cumulative memory complexity}},
  doi          = {10.1007/978-3-319-56617-7_1},
  volume       = {10212},
  year         = {2017},
}

@inproceedings{641,
  abstract     = {We introduce two novel methods for learning parameters of graphical models for image labelling. The following two tasks underline both methods: (i) perturb model parameters based on given features and ground truth labelings, so as to exactly reproduce these labelings as optima of the local polytope relaxation of the labelling problem; (ii) train a predictor for the perturbed model parameters so that improved model parameters can be applied to the labelling of novel data. Our first method implements task (i) by inverse linear programming and task (ii) using a regressor e.g. a Gaussian process. Our second approach simultaneously solves tasks (i) and (ii) in a joint manner, while being restricted to linearly parameterised predictors. Experiments demonstrate the merits of both approaches.},
  author       = {Trajkovska, Vera and Swoboda, Paul and Åström, Freddie and Petra, Stefanie},
  editor       = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
  isbn         = {978-331958770-7},
  location     = {Kolding, Denmark},
  pages        = {323 -- 334},
  publisher    = {Springer},
  title        = {{Graphical model parameter learning by inverse linear programming}},
  doi          = {10.1007/978-3-319-58771-4_26},
  volume       = {10302},
  year         = {2017},
}

@article{642,
  abstract     = {Cauchy problems with SPDEs on the whole space are localized to Cauchy problems on a ball of radius R. This localization reduces various kinds of spatial approximation schemes to finite dimensional problems. The error is shown to be exponentially small. As an application, a numerical scheme is presented which combines the localization and the space and time discretization, and thus is fully implementable.},
  author       = {Gerencser, Mate and Gyöngy, István},
  issn         = {00255718},
  journal      = {Mathematics of Computation},
  number       = {307},
  pages        = {2373 -- 2397},
  publisher    = {American Mathematical Society},
  title        = {{Localization errors in solving stochastic partial differential equations in the whole space}},
  doi          = {10.1090/mcom/3201},
  volume       = {86},
  year         = {2017},
}

@misc{6426,
  abstract     = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.},
  author       = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz},
  issn         = {2664-1690},
  pages        = {28},
  publisher    = {IST Austria},
  title        = {{Synchronizing the asynchronous}},
  doi          = {10.15479/AT:IST-2018-853-v2-2},
  year         = {2017},
}

