@article{14001,
  abstract     = {Chiral molecules interact and react differently with other chiral objects, depending on their handedness. Therefore, it is essential to understand and ultimately control the evolution of molecular chirality during chemical reactions. Although highly sophisticated techniques for the controlled synthesis of chiral molecules have been developed, the observation of chirality on the natural femtosecond time scale of a chemical reaction has so far remained out of reach in the gas phase. Here, we demonstrate a general experimental technique, based on high-harmonic generation in tailored laser fields, and apply it to probe the time evolution of molecular chirality during the photodissociation of 2-iodobutane. These measurements show a change in sign and a pronounced increase in the magnitude of the chiral response over the first 100 fs, followed by its decay within less than 500 fs, revealing the photodissociation to achiral products. The observed time evolution is explained in terms of the variation of the electric and magnetic transition-dipole moments between the lowest electronic states of the cation as a function of the reaction coordinate. These results open the path to investigations of the chirality of molecular-reaction pathways, light-induced chirality in chemical processes, and the control of molecular chirality through tailored laser pulses.},
  author       = {Baykusheva, Denitsa Rangelova and Zindel, Daniel and Svoboda, Vít and Bommeli, Elias and Ochsner, Manuel and Tehlar, Andres and Wörner, Hans Jakob},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  keywords     = {Multidisciplinary},
  number       = {48},
  pages        = {23923--23929},
  publisher    = {Proceedings of the National Academy of Sciences},
  title        = {{Real-time probing of chirality during a chemical reaction}},
  doi          = {10.1073/pnas.1907189116},
  volume       = {116},
  year         = {2019},
}

@inproceedings{14184,
  abstract     = {Learning disentangled representations is considered a cornerstone problem in
representation learning. Recently, Locatello et al. (2019) demonstrated that
unsupervised disentanglement learning without inductive biases is theoretically
impossible and that existing inductive biases and unsupervised methods do not
allow to consistently learn disentangled representations. However, in many
practical settings, one might have access to a limited amount of supervision,
for example through manual labeling of (some) factors of variation in a few
training examples. In this paper, we investigate the impact of such supervision
on state-of-the-art disentanglement methods and perform a large scale study,
training over 52000 models under well-defined and reproducible experimental
conditions. We observe that a small number of labeled examples (0.01--0.5\% of
the data set), with potentially imprecise and incomplete labels, is sufficient
to perform model selection on state-of-the-art unsupervised models. Further, we
investigate the benefit of incorporating supervision into the training process.
Overall, we empirically validate that with little and imprecise supervision it
is possible to reliably learn disentangled representations.},
  author       = {Locatello, Francesco and Tschannen, Michael and Bauer, Stefan and Rätsch, Gunnar and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {8th International Conference on Learning Representations},
  location     = {Virtual},
  title        = {{Disentangling factors of variation using few labels}},
  year         = {2019},
}

@inproceedings{14189,
  abstract     = {We consider the problem of recovering a common latent source with independent
components from multiple views. This applies to settings in which a variable is
measured with multiple experimental modalities, and where the goal is to
synthesize the disparate measurements into a single unified representation. We
consider the case that the observed views are a nonlinear mixing of
component-wise corruptions of the sources. When the views are considered
separately, this reduces to nonlinear Independent Component Analysis (ICA) for
which it is provably impossible to undo the mixing. We present novel
identifiability proofs that this is possible when the multiple views are
considered jointly, showing that the mixing can theoretically be undone using
function approximators such as deep neural networks. In contrast to known
identifiability results for nonlinear ICA, we prove that independent latent
sources with arbitrary mixing can be recovered as long as multiple,
sufficiently different noisy views are available.},
  author       = {Gresele, Luigi and Rubenstein, Paul K. and Mehrjou, Arash and Locatello, Francesco and Schölkopf, Bernhard},
  booktitle    = {Proceedings of the 35th Conference on Uncertainty in Artificial  Intelligence},
  location     = {Tel Aviv, Israel},
  pages        = {217--227},
  publisher    = {ML Research Press},
  title        = {{The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA}},
  volume       = {115},
  year         = {2019},
}

@inproceedings{14190,
  abstract     = {Learning meaningful and compact representations with disentangled semantic
aspects is considered to be of key importance in representation learning. Since
real-world data is notoriously costly to collect, many recent state-of-the-art
disentanglement models have heavily relied on synthetic toy data-sets. In this
paper, we propose a novel data-set which consists of over one million images of
physical 3D objects with seven factors of variation, such as object color,
shape, size and position. In order to be able to control all the factors of
variation precisely, we built an experimental platform where the objects are
being moved by a robotic arm. In addition, we provide two more datasets which
consist of simulations of the experimental setup. These datasets provide for
the first time the possibility to systematically investigate how well different
disentanglement methods perform on real data in comparison to simulation, and
how simulated data can be leveraged to build better representations of the real
world. We provide a first experimental study of these questions and our results
indicate that learned models transfer poorly, but that model and hyperparameter
selection is an effective means of transferring information to the real world.},
  author       = {Gondal, Muhammad Waleed and Wüthrich, Manuel and Miladinović, Đorđe and Locatello, Francesco and Breidt, Martin and Volchkov, Valentin and Akpo, Joel and Bachem, Olivier and Schölkopf, Bernhard and Bauer, Stefan},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  title        = {{On the transfer of inductive bias from simulation to the real world: a new disentanglement dataset}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14191,
  abstract     = {A broad class of convex optimization problems can be formulated as a semidefinite program (SDP), minimization of a convex function over the positive-semidefinite cone subject to some affine constraints. The majority of classical SDP solvers are designed for the deterministic setting where problem data is readily available. In this setting, generalized conditional gradient methods (aka Frank-Wolfe-type methods) provide scalable solutions by leveraging the so-called linear minimization oracle instead of the projection onto the semidefinite cone. Most problems in machine learning and modern engineering applications, however, contain some degree of stochasticity. In this work, we propose the first conditional-gradient-type method for solving stochastic optimization problems under affine constraints. Our method guarantees O(k−1/3) convergence rate in expectation on the objective residual and O(k−5/12) on the feasibility gap.},
  author       = {Locatello, Francesco and Yurtsever, Alp and Fercoq, Olivier and Cevher, Volkan},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  pages        = {14291–14301},
  title        = {{Stochastic Frank-Wolfe for composite convex minimization}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14193,
  abstract     = {A disentangled representation encodes information about the salient factors
of variation in the data independently. Although it is often argued that this
representational format is useful in learning to solve many real-world
down-stream tasks, there is little empirical evidence that supports this claim.
In this paper, we conduct a large-scale study that investigates whether
disentangled representations are more suitable for abstract reasoning tasks.
Using two new tasks similar to Raven's Progressive Matrices, we evaluate the
usefulness of the representations learned by 360 state-of-the-art unsupervised
disentanglement models. Based on these representations, we train 3600 abstract
reasoning models and observe that disentangled representations do in fact lead
to better down-stream performance. In particular, they enable quicker learning
using fewer samples.},
  author       = {Steenkiste, Sjoerd van and Locatello, Francesco and Schmidhuber, Jürgen and Bachem, Olivier},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  title        = {{Are disentangled representations helpful for abstract visual reasoning?}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14197,
  abstract     = {Recently there has been a significant interest in learning disentangled
representations, as they promise increased interpretability, generalization to
unseen scenarios and faster learning on downstream tasks. In this paper, we
investigate the usefulness of different notions of disentanglement for
improving the fairness of downstream prediction tasks based on representations.
We consider the setting where the goal is to predict a target variable based on
the learned representation of high-dimensional observations (such as images)
that depend on both the target variable and an \emph{unobserved} sensitive
variable. We show that in this setting both the optimal and empirical
predictions can be unfair, even if the target variable and the sensitive
variable are independent. Analyzing the representations of more than
\num{12600} trained state-of-the-art disentangled models, we observe that
several disentanglement scores are consistently correlated with increased
fairness, suggesting that disentanglement may be a useful property to encourage
fairness when sensitive variables are not observed.},
  author       = {Locatello, Francesco and Abbati, Gabriele and Rainforth, Tom and Bauer, Stefan and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  pages        = {14611–14624},
  title        = {{On the fairness of disentangled representations}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14200,
  abstract     = {The key idea behind the unsupervised learning of disentangled representations
is that real-world data is generated by a few explanatory factors of variation
which can be recovered by unsupervised learning algorithms. In this paper, we
provide a sober look at recent progress in the field and challenge some common
assumptions. We first theoretically show that the unsupervised learning of
disentangled representations is fundamentally impossible without inductive
biases on both the models and the data. Then, we train more than 12000 models
covering most prominent methods and evaluation metrics in a reproducible
large-scale experimental study on seven different data sets. We observe that
while the different methods successfully enforce properties ``encouraged'' by
the corresponding losses, well-disentangled models seemingly cannot be
identified without supervision. Furthermore, increased disentanglement does not
seem to lead to a decreased sample complexity of learning for downstream tasks.
Our results suggest that future work on disentanglement learning should be
explicit about the role of inductive biases and (implicit) supervision,
investigate concrete benefits of enforcing disentanglement of the learned
representations, and consider a reproducible experimental setup covering
several data sets.},
  author       = {Locatello, Francesco and Bauer, Stefan and Lucic, Mario and Rätsch, Gunnar and Gelly, Sylvain and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {Proceedings of the 36th International Conference on Machine Learning},
  location     = {Long Beach, CA, United States},
  pages        = {4114--4124},
  publisher    = {ML Research Press},
  title        = {{Challenging common assumptions in the unsupervised learning of disentangled representations}},
  volume       = {97},
  year         = {2019},
}

@article{14299,
  abstract     = {DNA origami nano-objects are usually designed around generic single-stranded “scaffolds”. Many properties of the target object are determined by details of those generic scaffold sequences. Here, we enable designers to fully specify the target structure not only in terms of desired 3D shape but also in terms of the sequences used. To this end, we built design tools to construct scaffold sequences de novo based on strand diagrams, and we developed scalable production methods for creating design-specific scaffold strands with fully user-defined sequences. We used 17 custom scaffolds having different lengths and sequence properties to study the influence of sequence redundancy and sequence composition on multilayer DNA origami assembly and to realize efficient one-pot assembly of multiscaffold DNA origami objects. Furthermore, as examples for functionalized scaffolds, we created a scaffold that enables direct, covalent cross-linking of DNA origami via UV irradiation, and we built DNAzyme-containing scaffolds that allow postfolding DNA origami domain separation.},
  author       = {FAS, Engelhardt and Praetorius, Florian M and Wachauf, CH and Brüggenthies, G and Kohler, F and Kick, B and Kadletz, KL and Pham, PN and Behler, KL and Gerling, T and Dietz, H},
  issn         = {1936-086x},
  journal      = {ACS Nano},
  number       = {5},
  pages        = {5015--5027},
  publisher    = {ACS Publications},
  title        = {{Custom-size, functional, and durable DNA origami with design-specific scaffolds}},
  doi          = {10.1021/acsnano.9b01025},
  volume       = {13},
  year         = {2019},
}

@article{9016,
  abstract     = {Inhibiting the histone H3–ASF1 (anti‐silencing function 1) protein–protein interaction (PPI) represents a potential approach for treating numerous cancers. As an α‐helix‐mediated PPI, constraining the key histone H3 helix (residues 118–135) is a strategy through which chemical probes might be elaborated to test this hypothesis. In this work, variant H3118–135 peptides bearing pentenylglycine residues at the i and i+4 positions were constrained by olefin metathesis. Biophysical analyses revealed that promotion of a bioactive helical conformation depends on the position at which the constraint is introduced, but that the potency of binding towards ASF1 is unaffected by the constraint and instead that enthalpy–entropy compensation occurs.},
  author       = {Bakail, May M and Rodriguez‐Marin, Silvia and Hegedüs, Zsófia and Perrin, Marie E. and Ochsenbein, Françoise and Wilson, Andrew J.},
  issn         = {1439-4227},
  journal      = {ChemBioChem},
  number       = {7},
  pages        = {891--895},
  publisher    = {Wiley},
  title        = {{Recognition of ASF1 by using hydrocarbon‐constrained peptides}},
  doi          = {10.1002/cbic.201800633},
  volume       = {20},
  year         = {2019},
}

@article{9018,
  abstract     = {Anti-silencing function 1 (ASF1) is a conserved H3-H4 histone chaperone involved in histone dynamics during replication, transcription, and DNA repair. Overexpressed in proliferating tissues including many tumors, ASF1 has emerged as a promising therapeutic target. Here, we combine structural, computational, and biochemical approaches to design peptides that inhibit the ASF1-histone interaction. Starting from the structure of the human ASF1-histone complex, we developed a rational design strategy combining epitope tethering and optimization of interface contacts to identify a potent peptide inhibitor with a dissociation constant of 3 nM. When introduced into cultured cells, the inhibitors impair cell proliferation, perturb cell-cycle progression, and reduce cell migration and invasion in a manner commensurate with their affinity for ASF1. Finally, we find that direct injection of the most potent ASF1 peptide inhibitor in mouse allografts reduces tumor growth. Our results open new avenues to use ASF1 inhibitors as promising leads for cancer therapy.},
  author       = {Bakail, May M and Gaubert, Albane and Andreani, Jessica and Moal, Gwenaëlle and Pinna, Guillaume and Boyarchuk, Ekaterina and Gaillard, Marie-Cécile and Courbeyrette, Regis and Mann, Carl and Thuret, Jean-Yves and Guichard, Bérengère and Murciano, Brice and Richet, Nicolas and Poitou, Adeline and Frederic, Claire and Le Du, Marie-Hélène and Agez, Morgane and Roelants, Caroline and Gurard-Levin, Zachary A. and Almouzni, Geneviève and Cherradi, Nadia and Guerois, Raphael and Ochsenbein, Françoise},
  issn         = {2451-9456},
  journal      = {Cell Chemical Biology},
  keywords     = {Clinical Biochemistry, Molecular Medicine, Biochemistry, Molecular Biology, Pharmacology, Drug Discovery},
  number       = {11},
  pages        = {1573--1585.e10},
  publisher    = {Elsevier},
  title        = {{Design on a rational basis of high-affinity peptides inhibiting the histone chaperone ASF1}},
  doi          = {10.1016/j.chembiol.2019.09.002},
  volume       = {26},
  year         = {2019},
}

@article{9060,
  abstract     = {Molecular motors are essential to the living, generating fluctuations that boost transport and assist assembly. Active colloids, that consume energy to move, hold similar potential for man-made materials controlled by forces generated from within. Yet, their use as a powerhouse in materials science lacks. Here we show a massive acceleration of the annealing of a monolayer of passive beads by moderate addition of self-propelled microparticles. We rationalize our observations with a model of collisions that drive active fluctuations and activate the annealing. The experiment is quantitatively compared with Brownian dynamic simulations that further unveil a dynamical transition in the mechanism of annealing. Active dopants travel uniformly in the system or co-localize at the grain boundaries as a result of the persistence of their motion. Our findings uncover the potential of internal activity to control materials and lay the groundwork for the rise of materials science beyond equilibrium.},
  author       = {Ramananarivo, Sophie and Ducrot, Etienne and Palacci, Jérémie A},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  keywords     = {General Biochemistry, Genetics and Molecular Biology, General Physics and Astronomy, General Chemistry},
  number       = {1},
  publisher    = {Springer Nature},
  title        = {{Activity-controlled annealing of colloidal monolayers}},
  doi          = {10.1038/s41467-019-11362-y},
  volume       = {10},
  year         = {2019},
}

@article{9460,
  abstract     = {Epigenetic reprogramming is required for proper regulation of gene expression in eukaryotic organisms. In Arabidopsis, active DNA demethylation is crucial for seed viability, pollen function, and successful reproduction. The DEMETER (DME) DNA glycosylase initiates localized DNA demethylation in vegetative and central cells, so-called companion cells that are adjacent to sperm and egg gametes, respectively. In rice, the central cell genome displays local DNA hypomethylation, suggesting that active DNA demethylation also occurs in rice; however, the enzyme responsible for this process is unknown. One candidate is the rice REPRESSOR OF SILENCING 1a (ROS1a) gene, which is related to DME and is essential for rice seed viability and pollen function. Here, we report genome-wide analyses of DNA methylation in wild-type and ros1a mutant sperm and vegetative cells. We find that the rice vegetative cell genome is locally hypomethylated compared with sperm by a process that requires ROS1a activity. We show that many ROS1a target sequences in the vegetative cell are hypomethylated in the rice central cell, suggesting that ROS1a also demethylates the central cell genome. Similar to Arabidopsis, we show that sperm non-CG methylation is indirectly promoted by DNA demethylation in the vegetative cell. These results reveal that DNA glycosylase-mediated DNA demethylation processes are conserved in Arabidopsis and rice, plant species that diverged 150 million years ago. Finally, although global non-CG methylation levels of sperm and egg differ, the maternal and paternal embryo genomes show similar non-CG methylation levels, suggesting that rice gamete genomes undergo dynamic DNA methylation reprogramming after cell fusion.},
  author       = {Kim, M. Yvonne and Ono, Akemi and Scholten, Stefan and Kinoshita, Tetsu and Zilberman, Daniel and Okamoto, Takashi and Fischer, Robert L.},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  keywords     = {Multidisciplinary},
  number       = {19},
  pages        = {9652--9657},
  publisher    = {National Academy of Sciences},
  title        = {{DNA demethylation by ROS1a in rice vegetative cells promotes methylation in sperm}},
  doi          = {10.1073/pnas.1821435116},
  volume       = {116},
  year         = {2019},
}

@article{9530,
  abstract     = {Background
DNA methylation of active genes, also known as gene body methylation, is found in many animal and plant genomes. Despite this, the transcriptional and developmental role of such methylation remains poorly understood. Here, we explore the dynamic range of DNA methylation in honey bee, a model organism for gene body methylation.

Results
Our data show that CG methylation in gene bodies globally fluctuates during honey bee development. However, these changes cause no gene expression alterations. Intriguingly, despite the global alterations, tissue-specific CG methylation patterns of complete genes or exons are rare, implying robust maintenance of genic methylation during development. Additionally, we show that CG methylation maintenance fluctuates in somatic cells, while reaching maximum fidelity in sperm cells. Finally, unlike universally present CG methylation, we discovered non-CG methylation specifically in bee heads that resembles such methylation in mammalian brain tissue.

Conclusions
Based on these results, we propose that gene body CG methylation can oscillate during development if it is kept to a level adequate to preserve function. Additionally, our data suggest that heightened non-CG methylation is a conserved regulator of animal nervous systems.},
  author       = {Harris, Keith D. and Lloyd, James P. B. and Domb, Katherine and Zilberman, Daniel and Zemach, Assaf},
  issn         = {1756-8935},
  journal      = {Epigenetics and Chromatin},
  publisher    = {Springer Nature},
  title        = {{DNA methylation is maintained with high fidelity in the honey bee germline and exhibits global non-functional fluctuations during somatic development}},
  doi          = {10.1186/s13072-019-0307-4},
  volume       = {12},
  year         = {2019},
}

@article{9580,
  abstract     = {An r-cut of a k-uniform hypergraph H is a partition of the vertex set of H into r parts and the size of the cut is the number of edges which have a vertex in each part. A classical result of Edwards says that every m-edge graph has a 2-cut of size m/2+Ω)(m−−√) and this is best possible. That is, there exist cuts which exceed the expected size of a random cut by some multiple of the standard deviation. We study analogues of this and related results in hypergraphs. First, we observe that similarly to graphs, every m-edge k-uniform hypergraph has an r-cut whose size is Ω(m−−√) larger than the expected size of a random r-cut. Moreover, in the case where k = 3 and r = 2 this bound is best possible and is attained by Steiner triple systems. Surprisingly, for all other cases (that is, if k ≥ 4 or r ≥ 3), we show that every m-edge k-uniform hypergraph has an r-cut whose size is Ω(m5/9) larger than the expected size of a random r-cut. This is a significant difference in behaviour, since the amount by which the size of the largest cut exceeds the expected size of a random cut is now considerably larger than the standard deviation.},
  author       = {Conlon, David and Fox, Jacob and Kwan, Matthew Alan and Sudakov, Benny},
  issn         = {1565-8511},
  journal      = {Israel Journal of Mathematics},
  number       = {1},
  pages        = {67--111},
  publisher    = {Springer},
  title        = {{Hypergraph cuts above the average}},
  doi          = {10.1007/s11856-019-1897-z},
  volume       = {233},
  year         = {2019},
}

@article{9585,
  abstract     = {An n-vertex graph is called C-Ramsey if it has no clique or independent set of size C log n. All known constructions of Ramsey graphs involve randomness in an essential way, and there is an ongoing line of research towards showing that in fact all Ramsey graphs must obey certain “richness” properties characteristic of random graphs. More than 25 years ago, Erdős, Faudree and Sós conjectured that in any C-Ramsey graph there are Ω(n^5/2) induced subgraphs, no pair of which have the same numbers of vertices and edges. Improving on earlier results of Alon, Balogh, Kostochka and Samotij, in this paper we prove this conjecture.},
  author       = {Kwan, Matthew Alan and Sudakov, Benny},
  issn         = {1088-6850},
  journal      = {Transactions of the American Mathematical Society},
  number       = {8},
  pages        = {5571--5594},
  publisher    = {American Mathematical Society},
  title        = {{Proof of a conjecture on induced subgraphs of Ramsey graphs}},
  doi          = {10.1090/tran/7729},
  volume       = {372},
  year         = {2019},
}

@article{9586,
  abstract     = {Consider integers  𝑘,ℓ  such that  0⩽ℓ⩽(𝑘2) . Given a large graph  𝐺 , what is the fraction of  𝑘 -vertex subsets of  𝐺  which span exactly  ℓ  edges? When  𝐺  is empty or complete, and  ℓ  is zero or  (𝑘2) , this fraction can be exactly 1. On the other hand, if  ℓ  is far from these extreme values, one might expect that this fraction is substantially smaller than 1. This was recently proved by Alon, Hefetz, Krivelevich, and Tyomkyn who initiated the systematic study of this question and proposed several natural conjectures.
Let  ℓ∗=min{ℓ,(𝑘2)−ℓ} . Our main result is that for any  𝑘  and  ℓ , the fraction of  𝑘 -vertex subsets that span  ℓ  edges is at most  log𝑂(1)(ℓ∗/𝑘)√ 𝑘/ℓ∗, which is best-possible up to the logarithmic factor. This improves on multiple results of Alon, Hefetz, Krivelevich, and Tyomkyn, and resolves one of their conjectures. In addition, we also make some first steps towards some analogous questions for hypergraphs.
Our proofs involve some Ramsey-type arguments, and a number of different probabilistic tools, such as polynomial anticoncentration inequalities, hypercontractivity, and a coupling trick for random variables defined on a ‘slice’ of the Boolean hypercube.},
  author       = {Kwan, Matthew Alan and Sudakov, Benny and Tran, Tuan},
  issn         = {1469-7750},
  journal      = {Journal of the London Mathematical Society},
  number       = {3},
  pages        = {757--777},
  publisher    = {Wiley},
  title        = {{Anticoncentration for subgraph statistics}},
  doi          = {10.1112/jlms.12192},
  volume       = {99},
  year         = {2019},
}

@article{9677,
  abstract     = {Progress in the atomic-scale modeling of matter over the past decade has been tremendous. This progress has been brought about by improvements in methods for evaluating interatomic forces that work by either solving the electronic structure problem explicitly, or by computing accurate approximations of the solution and by the development of techniques that use the Born–Oppenheimer (BO) forces to move the atoms on the BO potential energy surface. As a consequence of these developments it is now possible to identify stable or metastable states, to sample configurations consistent with the appropriate thermodynamic ensemble, and to estimate the kinetics of reactions and phase transitions. All too often, however, progress is slowed down by the bottleneck associated with implementing new optimization algorithms and/or sampling techniques into the many existing electronic-structure and empirical-potential codes. To address this problem, we are thus releasing a new version of the i-PI software. This piece of software is an easily extensible framework for implementing advanced atomistic simulation techniques using interatomic potentials and forces calculated by an external driver code. While the original version of the code (Ceriotti et al., 2014) was developed with a focus on path integral molecular dynamics techniques, this second release of i-PI not only includes several new advanced path integral methods, but also offers other classes of algorithms. In other words, i-PI is moving towards becoming a universal force engine that is both modular and tightly coupled to the driver codes that evaluate the potential energy surface and its derivatives.},
  author       = {Kapil, Venkat and Rossi, Mariana and Marsalek, Ondrej and Petraglia, Riccardo and Litman, Yair and Spura, Thomas and Cheng, Bingqing and Cuzzocrea, Alice and Meißner, Robert H. and Wilkins, David M. and Helfrecht, Benjamin A. and Juda, Przemysław and Bienvenue, Sébastien P. and Fang, Wei and Kessler, Jan and Poltavsky, Igor and Vandenbrande, Steven and Wieme, Jelle and Corminboeuf, Clemence and Kühne, Thomas D. and Manolopoulos, David E. and Markland, Thomas E. and Richardson, Jeremy O. and Tkatchenko, Alexandre and Tribello, Gareth A. and Van Speybroeck, Veronique and Ceriotti, Michele},
  issn         = {0010-4655},
  journal      = {Computer Physics Communications},
  pages        = {214--223},
  publisher    = {Elsevier},
  title        = {{i-PI 2.0: A universal force engine for advanced molecular simulations}},
  doi          = {10.1016/j.cpc.2018.09.020},
  volume       = {236},
  year         = {2019},
}

@article{9680,
  abstract     = {Atomistic modeling of phase transitions, chemical reactions, or other rare events that involve overcoming high free energy barriers usually entails prohibitively long simulation times. Introducing a bias potential as a function of an appropriately chosen set of collective variables can significantly accelerate the exploration of phase space, albeit at the price of distorting the distribution of microstates. Efficient reweighting to recover the unbiased distribution can be nontrivial when employing adaptive sampling techniques such as metadynamics, variationally enhanced sampling, or parallel bias metadynamics, in which the system evolves in a quasi-equilibrium manner under a time-dependent bias. We introduce an iterative unbiasing scheme that makes efficient use of all the trajectory data and that does not require the distribution to be evaluated on a grid. The method can thus be used even when the bias has a high dimensionality. We benchmark this approach against some of the existing schemes on model systems with different complexity and dimensionality.},
  author       = {Giberti, F. and Cheng, Bingqing and Tribello, G. A. and Ceriotti, M.},
  issn         = {1549-9626},
  journal      = {Journal of Chemical Theory and Computation},
  number       = {1},
  pages        = {100--107},
  publisher    = {American Chemical Society},
  title        = {{Iterative unbiasing of quasi-equilibrium sampling}},
  doi          = {10.1021/acs.jctc.9b00907},
  volume       = {16},
  year         = {2019},
}

@article{9689,
  abstract     = {A central goal of computational physics and chemistry is to predict material properties by using first-principles methods based on the fundamental laws of quantum mechanics. However, the high computational costs of these methods typically prevent rigorous predictions of macroscopic quantities at finite temperatures, such as heat capacity, density, and chemical potential. Here, we enable such predictions by marrying advanced free-energy methods with data-driven machine-learning interatomic potentials. We show that, for the ubiquitous and technologically essential system of water, a first-principles thermodynamic description not only leads to excellent agreement with experiments, but also reveals the crucial role of nuclear quantum fluctuations in modulating the thermodynamic stabilities of different phases of water.},
  author       = {Cheng, Bingqing and Engel, Edgar A. and Behler, Jörg and Dellago, Christoph and Ceriotti, Michele},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  number       = {4},
  pages        = {1110--1115},
  publisher    = {National Academy of Sciences},
  title        = {{Ab initio thermodynamics of liquid and solid water}},
  doi          = {10.1073/pnas.1815117116},
  volume       = {116},
  year         = {2019},
}

