@article{6650,
  abstract     = {We propose a novel technique for the automatic design of molds to cast highly complex shapes. The technique generates composite, two-piece molds. Each mold piece is made up of a hard plastic shell and a flexible silicone part. Thanks to the thin, soft, and smartly shaped silicone part, which is kept in place by a hard plastic shell, we can cast objects of unprecedented complexity. An innovative algorithm based on a volumetric analysis defines the layout of the internal cuts in the silicone mold part. Our approach can robustly handle thin protruding features and intertwined topologies that have caused previous methods to fail. We compare our results with state of the art techniques, and we demonstrate the casting of shapes with extremely complex geometry.},
  author       = {Alderighi, Thomas and Malomo, Luigi and Giorgi, Daniela and Bickel, Bernd and Cignoni, Paolo and Pietroni, Nico},
  issn         = {0730-0301},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {ACM},
  title        = {{Volume-aware design of composite molds}},
  doi          = {10.1145/3306346.3322981},
  volume       = {38},
  year         = {2019},
}

@article{6657,
  abstract     = {In this article a model is described how Open Access definitions can be formed on the basis of objective criteria. The common Open Access definitions such as "gold" and "green" are not exactly defined. This becomes a problem as soon as one begins to measure Open Access, for example if the development of the Open Access share should be monitored. This was discussed in the working group on Open Access Monitoring  of  the  AT2OA  project  and  the  present  model  was  developed, which is based on 5 critics with 4 characteristics: location, licence, version, embargo and conditions of the Open Access publication are taken into account. In the meantime, the model has also been tested in practice using R scripts, and the initial results are quite promising.},
  author       = {Danowski, Patrick},
  issn         = {1022-2588},
  journal      = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare},
  number       = {1},
  pages        = {59--65},
  publisher    = {Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare},
  title        = {{An Austrian proposal for the classification of Open Access Tuples (COAT) - distinguish different open access types beyond colors}},
  doi          = {10.31263/voebm.v72i1.2276},
  volume       = {72},
  year         = {2019},
}

@article{6658,
  abstract     = {New genes are a major source of novelties, and a disproportionate amount of them are known to show testis expression in later phases of male gametogenesis in different groups such as mammals and plants. Here, we propose that this enhanced expression is a consequence of haploid selection during the latter stages of male gametogenesis. Because emerging adaptive mutations will be fixed faster if their phenotypes are expressed by haploid rather than diploid genotypes, new genes with advantageous functions arising during this unique stage of development have a better chance to become fixed. To test this hypothesis, expression levels of genes of differing evolutionary age were examined at various stages of Drosophila spermatogenesis. We found, consistent with a model based on haploid selection, that new Drosophila genes are both expressed in later haploid phases of spermatogenesis and harbor a significant enrichment of adaptive mutations. Additionally, the observed overexpression of new genes in the latter phases of spermatogenesis was limited to the autosomes. Because all male cells exhibit hemizygous expression for X-linked genes (and therefore effectively haploid), there is no expectation that selection acting on late spermatogenesis will have a different effect on X-linked genes in comparison to initial diploid phases. Together, our proposed hypothesis and the analyzed data suggest that natural selection in haploid cells elucidates several aspects of the origin of new genes by explaining the general prevalence of their testis expression, and a parsimonious solution for new alleles to avoid being lost by genetic drift or pseudogenization. },
  author       = {Raices, Julia and Otto, Paulo and Vibranovski, Maria},
  journal      = {Genome Research},
  number       = {7},
  pages        = {1115--1122},
  publisher    = {CSH Press},
  title        = {{Haploid selection drives new gene male germline expression}},
  doi          = {10.1101/gr.238824.118},
  volume       = {29},
  year         = {2019},
}

@article{6659,
  abstract     = {Chemical labeling of proteins with synthetic molecular probes offers the possibility to probe the functions of proteins of interest in living cells. However, the methods for covalently labeling targeted proteins using complementary peptide tag-probe pairs are still limited, irrespective of the versatility of such pairs in biological research. Herein, we report the new CysHis tag-Ni(II) probe pair for the specific covalent labeling of proteins. A broad-range evaluation of the reactivity profiles of the probe and the CysHis peptide tag afforded a tag-probe pair with an optimized and high labeling selectivity and reactivity. In particular, the labeling specificity of this pair was notably improved compared to the previously reported one. This pair was successfully utilized for the fluorescence imaging of membrane proteins on the surfaces of living cells, demonstrating its potential utility in biological research.},
  author       = {Zenmyo, Naoki and Tokumaru, Hiroki and Uchinomiya, Shohei and Fuchida, Hirokazu and Tabata, Shigekazu and Hamachi, Itaru and Shigemoto, Ryuichi and Ojida, Akio},
  issn         = {00092673},
  journal      = {Bulletin of the Chemical Society of Japan},
  number       = {5},
  pages        = {995--1000},
  publisher    = {Bulletin of the Chemical Society of Japan},
  title        = {{Optimized reaction pair of the CysHis tag and Ni(II)-NTA probe for highly selective chemical labeling of membrane proteins}},
  doi          = {10.1246/bcsj.20190034},
  volume       = {92},
  year         = {2019},
}

@article{6660,
  abstract     = {Commercially available full-color 3D printing allows for detailed control of material deposition in a volume, but an exact reproduction of a target surface appearance is hampered by the strong subsurface scattering that causes nontrivial volumetric cross-talk at the print surface. Previous work showed how an iterative optimization scheme based on accumulating absorptive materials at the surface can be used to find a volumetric distribution of print materials that closely approximates a given target appearance.

In this work, we first revisit the assumption that pushing the absorptive materials to the surface results in minimal volumetric cross-talk. We design a full-fledged optimization on a small domain for this task and confirm this previously reported heuristic. Then, we extend the above approach that is critically limited to color reproduction on planar surfaces, to arbitrary 3D shapes. Our method enables high-fidelity color texture reproduction on 3D prints by effectively compensating for internal light scattering within arbitrarily shaped objects. In addition, we propose a content-aware gamut mapping that significantly improves color reproduction for the pathological case of thin geometric features. Using a wide range of sample objects with complex textures and geometries, we demonstrate color reproduction whose fidelity is superior to state-of-the-art drivers for color 3D printers.},
  author       = {Sumin, Denis and Weyrich, Tim and Rittig, Tobias and Babaei, Vahid and Nindel, Thomas and Wilkie, Alexander and Didyk, Piotr and Bickel, Bernd and Křivánek, Jaroslav and Myszkowski, Karol},
  issn         = {0730-0301},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {ACM},
  title        = {{Geometry-aware scattering compensation for 3D printing}},
  doi          = {10.1145/3306346.3322992},
  volume       = {38},
  year         = {2019},
}

@article{6671,
  abstract     = {In this paper we discuss three results. The first two concern general sets of positive reach: we first characterize the reach of a closed set by means of a bound on the metric distortion between the distance measured in the ambient Euclidean space and the shortest path distance measured in the set. Secondly, we prove that the intersection of a ball with radius less than the reach with the set is geodesically convex, meaning that the shortest path between any two points in the intersection lies itself in the intersection. For our third result we focus on manifolds with positive reach and give a bound on the angle between tangent spaces at two different points in terms of the reach and the distance between the two points.},
  author       = {Boissonnat, Jean-Daniel and Lieutier, André and Wintraecken, Mathijs},
  issn         = {2367-1734},
  journal      = {Journal of Applied and Computational Topology},
  number       = {1-2},
  pages        = {29–58},
  publisher    = {Springer Nature},
  title        = {{The reach, metric distortion, geodesic convexity and the variation of tangent spaces}},
  doi          = {10.1007/s41468-019-00029-8},
  volume       = {3},
  year         = {2019},
}

@inproceedings{6673,
  abstract     = {Several classic problems in graph processing and computational geometry are solved via incremental algorithms, which split computation into a series of small tasks acting on shared state, which gets updated progressively. While the sequential variant of such algorithms usually specifies a fixed (but sometimes random) order in which the tasks should be performed, a standard approach to parallelizing such algorithms is to relax this constraint to allow for out-of-order parallel execution. This is the case for parallel implementations of Dijkstra's single-source shortest-paths (SSSP) algorithm, and for parallel Delaunay mesh triangulation. While many software frameworks parallelize incremental computation in this way, it is still not well understood whether this relaxed ordering approach can still provide any complexity guarantees. In this paper, we address this problem, and analyze the efficiency guarantees provided by a range of incremental algorithms when parallelized via relaxed schedulers. We show that, for algorithms such as Delaunay mesh triangulation and sorting by insertion, schedulers with a maximum relaxation factor of k in terms of the maximum priority inversion allowed will introduce a maximum amount of wasted work of O(łog n poly(k)), where n is the number of tasks to be executed. For SSSP, we show that the additional work is O(poly(k), dmax / wmin), where dmax is the maximum distance between two nodes, and wmin is the minimum such distance. In practical settings where n >> k, this suggests that the overheads of relaxation will be outweighed by the improved scalability of the relaxed scheduler. On the negative side, we provide lower bounds showing that certain algorithms will inherently incur a non-trivial amount of wasted work due to scheduler relaxation, even for relatively benign relaxed schedulers.},
  author       = {Alistarh, Dan-Adrian and Nadiradze, Giorgi and Koval, Nikita},
  booktitle    = {31st ACM Symposium on Parallelism in Algorithms and Architectures},
  isbn         = {9781450361842},
  location     = {Phoenix, AZ, United States},
  pages        = {145--154},
  publisher    = {ACM Press},
  title        = {{Efficiency guarantees for parallel incremental algorithms under relaxed schedulers}},
  doi          = {10.1145/3323165.3323201},
  year         = {2019},
}

@inproceedings{6676,
  abstract     = {It is impossible to deterministically solve wait-free consensus in an asynchronous system. The classic proof uses a valency argument, which constructs an infinite execution by repeatedly extending a finite execution. We introduce extension-based proofs, a class of impossibility proofs that are modelled as an interaction between a prover and a protocol and that include valency arguments.

Using proofs based on combinatorial topology, it has been shown that it is impossible to deterministically solve k-set agreement among n > k ≥ 2 processes in a wait-free manner. However, it was unknown whether proofs based on simpler techniques were possible. We show that this impossibility result cannot be obtained by an extension-based proof and, hence, extension-based proofs are limited in power.},
  author       = {Alistarh, Dan-Adrian and Aspnes, James and Ellen, Faith and Gelashvili, Rati and Zhu, Leqi},
  booktitle    = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing},
  isbn         = {9781450367059},
  location     = {Phoenix, AZ, United States},
  pages        = {986--996},
  publisher    = {ACM Press},
  title        = {{Why extension-based proofs fail}},
  doi          = {10.1145/3313276.3316407},
  year         = {2019},
}

@inproceedings{6677,
  abstract     = {The Fiat-Shamir heuristic transforms a public-coin interactive proof into a non-interactive argument, by replacing the verifier with a cryptographic hash function that is applied to the protocol’s transcript. Constructing hash functions for which this transformation is sound is a central and long-standing open question in cryptography.

We show that solving the END−OF−METERED−LINE problem is no easier than breaking the soundness of the Fiat-Shamir transformation when applied to the sumcheck protocol. In particular, if the transformed protocol is sound, then any hard problem in #P gives rise to a hard distribution in the class CLS, which is contained in PPAD. Our result opens up the possibility of sampling moderately-sized games for which it is hard to find a Nash equilibrium, by reducing the inversion of appropriately chosen one-way functions to #SAT.

Our main technical contribution is a stateful incrementally verifiable procedure that, given a SAT instance over n variables, counts the number of satisfying assignments. This is accomplished via an exponential sequence of small steps, each computable in time poly(n). Incremental verifiability means that each intermediate state includes a sumcheck-based proof of its correctness, and the proof can be updated and verified in time poly(n).},
  author       = {Choudhuri, Arka Rai and Hubáček, Pavel and Kamath Hosdurg, Chethan and Pietrzak, Krzysztof Z and Rosen, Alon and Rothblum, Guy N.},
  booktitle    = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing  - STOC 2019},
  isbn         = {9781450367059},
  location     = {Phoenix, AZ, United States},
  pages        = {1103--1114},
  publisher    = {ACM Press},
  title        = {{Finding a Nash equilibrium is no easier than breaking Fiat-Shamir}},
  doi          = {10.1145/3313276.3316400},
  year         = {2019},
}

@article{6680,
  abstract     = {This paper analyzes how partial selfing in a large source population influences its ability to colonize a new habitat via the introduction of a few founder individuals. Founders experience inbreeding depression due to partially recessive deleterious alleles as well as maladaptation to the new environment due to selection on a large number of additive loci. I first introduce a simplified version of the Inbreeding History Model (Kelly, 2007) in order to characterize mutation‐selection balance in a large, partially selfing source population under selection involving multiple non‐identical loci. I then use individual‐based simulations to study the eco‐evolutionary dynamics of founders establishing in the new habitat under a model of hard selection. The study explores how selfing rate shapes establishment probabilities of founders via effects on both inbreeding depression and adaptability to the new environment, and also distinguishes the effects of selfing on the initial fitness of founders from its effects on the long‐term adaptive response of the populations they found. A high rate of (but not complete) selfing is found to aid establishment over a wide range of parameters, even in the absence of mate limitation. The sensitivity of the results to assumptions about the nature of polygenic selection are discussed.},
  author       = {Sachdeva, Himani},
  issn         = {1558-5646},
  journal      = {Evolution},
  number       = {9},
  pages        = {1729--1745},
  publisher    = {Wiley},
  title        = {{Effect of partial selfing and polygenic selection on establishment in a new habitat}},
  doi          = {10.1111/evo.13812},
  volume       = {73},
  year         = {2019},
}

@phdthesis{6681,
  abstract     = {The first part of the thesis considers the computational aspects of the homotopy groups πd(X) of a topological space X. It is well known that there is no algorithm to decide whether the fundamental group π1(X) of a given finite simplicial complex X is trivial. On the other hand, there are several algorithms that, given a finite simplicial complex X that is simply connected (i.e., with π1(X) trivial), compute the higher homotopy group πd(X) for any given d ≥ 2.
However, these algorithms come with a caveat: They compute the isomorphism type of πd(X), d ≥ 2 as an abstract finitely generated abelian group given by generators and relations, but they work with very implicit representations of the elements of πd(X). We present an algorithm that, given a simply connected space X, computes πd(X) and represents its elements as simplicial maps from suitable triangulations of the d-sphere Sd to X. For fixed d, the algorithm runs in time exponential in size(X), the number of simplices of X. Moreover, we prove that this is optimal: For every fixed d ≥ 2,
we construct a family of simply connected spaces X such that for any simplicial map representing a generator of πd(X), the size of the triangulation of S d on which the map is defined, is exponential in size(X).
In the second part of the thesis, we prove that the following question is algorithmically undecidable for d < ⌊3(k+1)/2⌋, k ≥ 5 and (k, d) ̸= (5, 7), which covers essentially everything outside the meta-stable range: Given a finite simplicial complex K of dimension k, decide whether there exists a piecewise-linear (i.e., linear on an arbitrarily fine subdivision of K) embedding f : K ↪→ Rd of K into a d-dimensional Euclidean space.},
  author       = {Zhechev, Stephan Y},
  issn         = {2663-337X},
  pages        = {104},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Algorithmic aspects of homotopy theory and embeddability}},
  doi          = {10.15479/AT:ISTA:6681},
  year         = {2019},
}

@article{6710,
  abstract     = {Sexual dimorphism in morphology, physiology or life history traits is common in dioecious plants at reproductive maturity, but it is typically inconspicuous or absent in juveniles. Although plants of different sexes probably begin to diverge in gene expression both before their reproduction commences and before dimorphism becomes readily apparent, to our knowledge transcriptome-wide differential gene expression has yet to be demonstrated for any angiosperm species.},
  author       = {Cossard, Guillaume and Toups, Melissa A and Pannell, John },
  issn         = {1095-8290},
  journal      = {Annals of botany},
  number       = {7},
  pages        = {1119--1131},
  publisher    = {Oxford University Press},
  title        = {{Sexual dimorphism and rapid turnover in gene expression in pre-reproductive seedlings of a dioecious herb}},
  doi          = {10.1093/aob/mcy183},
  volume       = {123},
  year         = {2019},
}

@article{6713,
  abstract     = {Evolutionary studies are often limited by missing data that are critical to understanding the history of selection. Selection experiments, which reproduce rapid evolution under controlled conditions, are excellent tools to study how genomes evolve under selection. Here we present a genomic dissection of the Longshanks selection experiment, in which mice were selectively bred over 20 generations for longer tibiae relative to body mass, resulting in 13% longer tibiae in two replicates. We synthesized evolutionary theory, genome sequences and molecular genetics to understand the selection response and found that it involved both polygenic adaptation and discrete loci of major effect, with the strongest loci tending to be selected in parallel between replicates. We show that selection may favor de-repression of bone growth through inactivating two limb enhancers of an inhibitor, Nkx3-2. Our integrative genomic analyses thus show that it is possible to connect individual base-pair changes to the overall selection response.},
  author       = {Castro, João Pl and Yancoskie, Michelle N. and Marchini, Marta and Belohlavy, Stefanie and Hiramatsu, Layla and Kučka, Marek and Beluch, William H. and Naumann, Ronald and Skuplik, Isabella and Cobb, John and Barton, Nicholas H and Rolian, Campbell and Chan, Yingguang Frank},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{An integrative genomic analysis of the Longshanks selection experiment for longer limbs in mice}},
  doi          = {10.7554/eLife.42014},
  volume       = {8},
  year         = {2019},
}

@article{6717,
  abstract     = {With the recent publication by Silpe and Bassler (2019), considering phage detection of a bacterial quorum-sensing (QS) autoinducer, we now have as many as five examples of phage-associated intercellular communication (Table 1). Each potentially involves ecological inferences by phages as to concentrations of surrounding phage-infected or uninfected bacteria. While the utility of phage detection of bacterial QS molecules may at first glance appear to be straightforward, we suggest in this commentary that the underlying ecological explanation is unlikely to be simple.},
  author       = {Igler, Claudia and Abedon, Stephen T.},
  journal      = {Frontiers in Microbiology},
  publisher    = {Frontiers},
  title        = {{Commentary: A host-produced quorum-sensing autoinducer controls a phage lysis-lysogeny decision}},
  doi          = {10.3389/fmicb.2019.01171},
  volume       = {10},
  year         = {2019},
}

@inproceedings{6725,
  abstract     = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. 
Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {46th International Colloquium on Automata, Languages and Programming},
  isbn         = {978-3-95977-109-2},
  issn         = {1868-8969},
  location     = {Patras, Greece},
  pages        = {77:1--77:12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Testing the complexity of a valued CSP language}},
  doi          = {10.4230/LIPICS.ICALP.2019.77},
  volume       = {132},
  year         = {2019},
}

@inbook{6726,
  abstract     = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.},
  author       = {Walter, Michael},
  booktitle    = {Progress in Cryptology – AFRICACRYPT 2019},
  editor       = {Buchmann, J and Nitaj, A and Rachidi, T},
  isbn         = {978-3-0302-3695-3},
  issn         = {0302-9743},
  location     = {Rabat, Morocco},
  pages        = {157--180},
  publisher    = {Springer Nature},
  title        = {{Sampling the integers with low relative error}},
  doi          = {10.1007/978-3-030-23696-0_9},
  volume       = {11627},
  year         = {2019},
}

@article{6750,
  abstract     = {Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. },
  author       = {Hashemi, Seyyed Ali and Condo, Carlo and Mondelli, Marco and Gross, Warren J},
  issn         = {1053587X},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {22},
  publisher    = {IEEE},
  title        = {{Rate-flexible fast polar decoders}},
  doi          = {10.1109/TSP.2019.2944738},
  volume       = {67},
  year         = {2019},
}

@article{6752,
  abstract     = {Two-player games on graphs are widely studied in formal methods, as they model the interaction between a system and its environment. The game is played by moving a token throughout a graph to produce an infinite path. There are several common modes to determine how the players move the token through the graph; e.g., in turn-based games the players alternate turns in moving the token. We study the bidding mode of moving the token, which, to the best of our knowledge, has never been studied in infinite-duration games. The following bidding rule was previously defined and called Richman bidding. Both players have separate budgets, which sum up to 1. In each turn, a bidding takes place: Both players submit bids simultaneously, where a bid is legal if it does not exceed the available budget, and the higher bidder pays his bid to the other player and moves the token. The central question studied in bidding games is a necessary and sufficient initial budget for winning the game: a threshold budget in a vertex is a value t ∈ [0, 1] such that if Player 1’s budget exceeds t, he can win the game; and if Player 2’s budget exceeds 1 − t, he can win the game. Threshold budgets were previously shown to exist in every vertex of a reachability game, which have an interesting connection with random-turn games—a sub-class of simple stochastic games in which the player who moves is chosen randomly. We show the existence of threshold budgets for a qualitative class of infinite-duration games, namely parity games, and a quantitative class, namely mean-payoff games. The key component of the proof is a quantitative solution to strongly connected mean-payoff bidding games in which we extend the connection with random-turn games to these games, and construct explicit optimal strategies for both players.},
  author       = {Avni, Guy and Henzinger, Thomas A and Chonev, Ventsislav K},
  issn         = {1557735X},
  journal      = {Journal of the ACM},
  number       = {4},
  publisher    = {ACM},
  title        = {{Infinite-duration bidding games}},
  doi          = {10.1145/3340295},
  volume       = {66},
  year         = {2019},
}

@article{6755,
  abstract     = {Differentiated sex chromosomes are accompanied by a difference in gene dose between X/Z-specific and autosomal genes. At the transcriptomic level, these sex-linked genes can lead to expression imbalance, or gene dosage can be compensated by epigenetic mechanisms and results into expression level equalization. Schistosoma mansoni has been previously described as a ZW species (i.e., female heterogamety, in opposition to XY male heterogametic species) with a partial dosage compensation, but underlying mechanisms are still unexplored. Here, we combine transcriptomic (RNA-Seq) and epigenetic data (ChIP-Seq against H3K4me3, H3K27me3,andH4K20me1histonemarks) in free larval cercariae and intravertebrate parasitic stages. For the first time, we describe differences in dosage compensation status in ZW females, depending on the parasitic status: free cercariae display global dosage compensation, whereas intravertebrate stages show a partial dosage compensation. We also highlight regional differences of gene expression along the Z chromosome in cercariae, but not in the intravertebrate stages. Finally, we feature a consistent permissive chromatin landscape of the Z chromosome in both sexes and stages. We argue that dosage compensation in schistosomes is characterized by chromatin remodeling mechanisms in the Z-specific region.},
  author       = {Picard, Marion A L and Vicoso, Beatriz and Roquis, David and Bulla, Ingo and Augusto, Ronaldo C. and Arancibia, Nathalie and Grunau, Christoph and Boissier, Jérôme and Cosseau, Céline},
  issn         = {1759-6653},
  journal      = {Genome biology and evolution},
  number       = {7},
  pages        = {1909--1922},
  publisher    = {Oxford Academic Press},
  title        = {{Dosage compensation throughout the Schistosoma mansoni lifecycle: Specific chromatin landscape of the Z chromosome}},
  doi          = {10.1093/gbe/evz133},
  volume       = {11},
  year         = {2019},
}

@article{6756,
  abstract     = {We study the topology generated by the temperature fluctuations of the cosmic microwave background (CMB) radiation, as quantified by the number of components and holes, formally given by the Betti numbers, in the growing excursion sets. We compare CMB maps observed by the Planck satellite with a thousand simulated maps generated according to the ΛCDM paradigm with Gaussian distributed fluctuations. The comparison is multi-scale, being performed on a sequence of degraded maps with mean pixel separation ranging from 0.05 to 7.33°. The survey of the CMB over 𝕊2 is incomplete due to obfuscation effects by bright point sources and other extended foreground objects like our own galaxy. To deal with such situations, where analysis in the presence of “masks” is of importance, we introduce the concept of relative homology. The parametric χ2-test shows differences between observations and simulations, yielding p-values at percent to less than permil levels roughly between 2 and 7°, with the difference in the number of components and holes peaking at more than 3σ sporadically at these scales. The highest observed deviation between the observations and simulations for b0 and b1 is approximately between 3σ and 4σ at scales of 3–7°. There are reports of mildly unusual behaviour of the Euler characteristic at 3.66° in the literature, computed from independent measurements of the CMB temperature fluctuations by Planck’s predecessor, the Wilkinson Microwave Anisotropy Probe (WMAP) satellite. The mildly anomalous behaviour of the Euler characteristic is phenomenologically related to the strongly anomalous behaviour of components and holes, or the zeroth and first Betti numbers, respectively. Further, since these topological descriptors show consistent anomalous behaviour over independent measurements of Planck and WMAP, instrumental and systematic errors may be an unlikely source. These are also the scales at which the observed maps exhibit low variance compared to the simulations, and approximately the range of scales at which the power spectrum exhibits a dip with respect to the theoretical model. Non-parametric tests show even stronger differences at almost all scales. Crucially, Gaussian simulations based on power-spectrum matching the characteristics of the observed dipped power spectrum are not able to resolve the anomaly. Understanding the origin of the anomalies in the CMB, whether cosmological in nature or arising due to late-time effects, is an extremely challenging task. Regardless, beyond the trivial possibility that this may still be a manifestation of an extreme Gaussian case, these observations, along with the super-horizon scales involved, may motivate the study of primordial non-Gaussianity. Alternative scenarios worth exploring may be models with non-trivial topology, including topological defect models.},
  author       = {Pranav, Pratyush and Adler, Robert J. and Buchert, Thomas and Edelsbrunner, Herbert and Jones, Bernard J.T. and Schwartzman, Armin and Wagner, Hubert and Van De Weygaert, Rien},
  issn         = {14320746},
  journal      = {Astronomy and Astrophysics},
  publisher    = {EDP Sciences},
  title        = {{Unexpected topology of the temperature fluctuations in the cosmic microwave background}},
  doi          = {10.1051/0004-6361/201834916},
  volume       = {627},
  year         = {2019},
}

