@article{13464,
  abstract     = {Massive binaries that merge as compact objects are the progenitors of gravitational-wave sources. Most of these binaries experience one or more phases of mass transfer, during which one of the stars loses all or part of its outer envelope and becomes a stripped-envelope star. The evolution of the size of these stripped stars is crucial in determining whether they experience further interactions and understanding their ultimate fate. We present new calculations of stripped-envelope stars based on binary evolution models computed with MESA. We use these to investigate their radius evolution as a function of mass and metallicity. We further discuss their pre-supernova observable characteristics and potential consequences of their evolution on the properties of supernovae from stripped stars. At high metallicity, we find that practically all of the hydrogen-rich envelope is removed, which is in agreement with earlier findings. Only progenitors with initial masses below 10 M⊙ expand to large radii (up to 100 R⊙), while more massive progenitors remain compact. At low metallicity, a substantial amount of hydrogen remains and the progenitors can, in principle, expand to giant sizes (> 400 R⊙) for all masses we consider. This implies that they can fill their Roche lobe anew. We show that the prescriptions commonly used in population synthesis models underestimate the stellar radii by up to two orders of magnitude. We expect that this has consequences for the predictions for gravitational-wave sources from double neutron star mergers, particularly with regard to their metallicity dependence.},
  author       = {Laplace, E. and Götberg, Ylva Louise Linsdotter and de Mink, S. E. and Justham, S. and Farmer, R.},
  issn         = {1432-0746},
  journal      = {Astronomy & Astrophysics},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics},
  publisher    = {EDP Sciences},
  title        = {{The expansion of stripped-envelope stars: Consequences for supernovae and gravitational-wave progenitors}},
  doi          = {10.1051/0004-6361/201937300},
  volume       = {637},
  year         = {2020},
}

@article{13465,
  abstract     = {Gravitational-wave detections are now probing the black hole (BH) mass distribution, including the predicted pair-instability mass gap. These data require robust quantitative predictions, which are challenging to obtain. The most massive BH progenitors experience episodic mass ejections on time-scales shorter than the convective turnover time-scale. This invalidates the steady-state assumption on which the classic mixing length theory relies. We compare the final BH masses computed with two different versions of the stellar evolutionary code MESA
⁠: (i) using the default implementation of Paxton et al. (2018) and (ii) solving an additional equation accounting for the time-scale for convective deceleration. In the second grid, where stronger convection develops during the pulses and carries part of the energy, we find weaker pulses. This leads to lower amounts of mass being ejected and thus higher final BH masses of up to ∼5M⊙
⁠. The differences are much smaller for the progenitors that determine the maximum mass of BHs below the gap. This prediction is robust at MBH,max≃48M⊙
⁠, at least within the idealized context of this study. This is an encouraging indication that current models are robust enough for comparison with the present-day gravitational-wave detections. However, the large differences between individual models emphasize the importance of improving the treatment of convection in stellar models, especially in the light of the data anticipated from the third generation of gravitational-wave detectors.},
  author       = {Renzo, M and Farmer, R J and Justham, S and de Mink, S E and Götberg, Ylva Louise Linsdotter and Marchant, P},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics},
  number       = {3},
  pages        = {4333--4341},
  publisher    = {Oxford University Press},
  title        = {{Sensitivity of the lower edge of the pair-instability black hole mass gap to the treatment of time-dependent convection}},
  doi          = {10.1093/mnras/staa549},
  volume       = {493},
  year         = {2020},
}

@article{13466,
  abstract     = {Context. A majority of massive stars are part of binary systems, a large fraction of which will inevitably interact during their lives. Binary-interaction products (BiPs), that is, stars affected by such interaction, are expected to be commonly present in stellar populations. BiPs are thus a crucial ingredient in the understanding of stellar evolution.
Aims. We aim to identify and characterize a statistically significant sample of BiPs by studying clusters of 10 − 40 Myr, an age at which binary population models predict the abundance of BiPs to be highest. One example of such a cluster is NGC 330 in the Small Magellanic Cloud.
Methods. Using MUSE WFM-AO observations of NGC 330, we resolved the dense cluster core for the first time and were able to extract spectra of its entire massive star population. We developed an automated spectral classification scheme based on the equivalent widths of spectral lines in the red part of the spectrum.
Results. We characterize the massive star content of the core of NGC 330, which contains more than 200 B stars, 2 O stars, 6 A-type supergiants, and 11 red supergiants. We find a lower limit on the Be star fraction of 32 ± 3% in the whole sample. It increases to at least 46 ± 10% when we only consider stars brighter than V = 17 mag. We estimate an age of the cluster core between 35 and 40 Myr and a total cluster mass of 88−18+17 × 103 M⊙.
Conclusions. We find that the population in the cluster core is different than the population in the outskirts: while the stellar content in the core appears to be older than the stars in the outskirts, the Be star fraction and the observed binary fraction are significantly higher. Furthermore, we detect several BiP candidates that will be subject of future studies.},
  author       = {Bodensteiner, J. and Sana, H. and Mahy, L. and Patrick, L. R. and de Koter, A. and de Mink, S. E. and Evans, C. J. and Götberg, Ylva Louise Linsdotter and Langer, N. and Lennon, D. J. and Schneider, F. R. N. and Tramper, F.},
  issn         = {1432-0746},
  journal      = {Astronomy & Astrophysics},
  keywords     = {stars: massive / stars: emission-line / Be / binaries: spectroscopic / blue stragglers / Magellanic Clouds},
  publisher    = {EDP Sciences},
  title        = {{The young massive SMC cluster NGC 330 seen by MUSE}},
  doi          = {10.1051/0004-6361/201936743},
  volume       = {634},
  year         = {2020},
}

@article{13467,
  abstract     = {Massive stars are often found in binary systems, and it has been argued that binary products boost the ionizing radiation of stellar populations. Accurate predictions for binary products are needed to understand and quantify their contribution to cosmic reionization. We investigate the contribution of stars stripped in binaries because (1) they are, arguably, the best-understood products of binary evolution, (2) we recently produced the first radiative transfer calculations for the atmospheres of these stripped stars that predict their ionizing spectra, and (3) they are very promising sources because they boost the ionizing emission of stellar populations at late times. This allows stellar feedback to clear the surroundings such that a higher fraction of their photons can escape and ionize the intergalactic medium. Combining our detailed predictions for the ionizing spectra with a simple cosmic reionization model, we estimate that stripped stars contributed tens of percent of the photons that caused cosmic reionization of hydrogen, depending on the assumed escape fractions. More importantly, stripped stars harden the ionizing emission. We estimate that the spectral index for the ionizing part of the spectrum can increase to −1 compared to ≲ − 2 for single stars. At high redshift, stripped stars and massive single stars combined dominate the He II-ionizing emission, but we expect that active galactic nuclei drive cosmic helium reionization. Further observational consequences we expect are (1) high ionization states for the intergalactic gas surrounding stellar systems, such as C IV and Si IV, and (2) additional heating of the intergalactic medium of up to a few thousand Kelvin. Quantifying these warrants the inclusion of accurate models for stripped stars and other binary products in full cosmological simulations.},
  author       = {Götberg, Ylva Louise Linsdotter and de Mink, S. E. and McQuinn, M. and Zapartas, E. and Groh, J. H. and Norman, C.},
  issn         = {1432-0746},
  journal      = {Astronomy & Astrophysics},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics},
  publisher    = {EDP Sciences},
  title        = {{Contribution from stars stripped in binaries to cosmic reionization of hydrogen and helium}},
  doi          = {10.1051/0004-6361/201936669},
  volume       = {634},
  year         = {2020},
}

@article{13998,
  abstract     = {The interaction of strong near-infrared (NIR) laser pulses with wide-bandgap dielectrics produces high harmonics in the extreme ultraviolet (XUV) wavelength range. These observations have opened up the possibility of attosecond metrology in solids, which would benefit from a precise measurement of the emission times of individual harmonics with respect to the NIR laser field. Here we show that, when high-harmonics are detected from the input surface of a magnesium oxide crystal, a bichromatic probing of the XUV emission shows a clear synchronization largely consistent with a semiclassical model of electron–hole recollisions in bulk solids. On the other hand, the bichromatic spectrogram of harmonics originating from the exit surface of the 200 μm-thick crystal is strongly modified, indicating the influence of laser field distortions during propagation. Our tracking of sub-cycle electron and hole re-collisions at XUV energies is relevant to the development of solid-state sources of attosecond pulses.},
  author       = {Vampa, Giulio and Lu, Jian and You, Yong Sing and Baykusheva, Denitsa Rangelova and Wu, Mengxi and Liu, Hanzhe and Schafer, Kenneth J and Gaarde, Mette B and Reis, David A and Ghimire, Shambhu},
  issn         = {1361-6455},
  journal      = {Journal of Physics B: Atomic, Molecular and Optical Physics},
  keywords     = {Condensed Matter Physics, Atomic and Molecular Physics, and Optics},
  number       = {14},
  publisher    = {IOP Publishing},
  title        = {{Attosecond synchronization of extreme ultraviolet high harmonics from crystals}},
  doi          = {10.1088/1361-6455/ab8e56},
  volume       = {53},
  year         = {2020},
}

@unpublished{14028,
  abstract     = {The present review addresses the technical advances and the theoretical developments to realize and rationalize attosecond-science experiments that reveal a new dynamical time scale (10−15-10−18 s), with a particular emphasis on molecular systems and the implications of attosecond processes for chemical dynamics. After a brief outline of the theoretical framework for treating non-perturbative phenomena in Section 2, we introduce the physical mechanisms underlying high-harmonic generation and attosecond technology. The relevant technological developments and experimental schemes are covered in Section 3. Throughout the remainder of the chapter, we report on selected applications in molecular attosecond physics, thereby addressing specific phenomena mediated by purely electronic dynamics: charge localization in molecular hydrogen, charge migration in biorelevant molecules, high-harmonic spectroscopy, and delays in molecular photoionization.},
  author       = {Baykusheva, Denitsa Rangelova and Wörner, Hans Jakob},
  pages        = {2002.02111},
  title        = {{Attosecond molecular spectroscopy and dynamics}},
  doi          = {10.48550/arXiv.2002.02111},
  year         = {2020},
}

@unpublished{14095,
  abstract     = {The Habitable Exoplanet Observatory, or HabEx, has been designed to be the Great Observatory of the 2030s. For the first time in human history, technologies have matured sufficiently to enable an affordable space-based telescope mission capable of discovering and characterizing Earthlike planets orbiting nearby bright sunlike stars in order to search for signs of habitability and biosignatures. Such a mission can also be equipped with instrumentation that will enable broad and exciting general astrophysics and planetary science not possible from current or planned facilities. HabEx is a space telescope with unique imaging and multi-object spectroscopic capabilities at wavelengths ranging from ultraviolet (UV) to near-IR. These capabilities allow for a broad suite of compelling science that cuts across the entire NASA astrophysics portfolio. HabEx has three primary science goals: (1) Seek out nearby worlds and explore their habitability; (2) Map out nearby planetary systems and understand the diversity of the worlds they contain; (3) Enable new explorations of astrophysical systems from our own solar system to external galaxies by extending our reach in the UV through near-IR. This Great Observatory science will be selected through a competed GO program, and will account for about 50% of the HabEx primary mission. The preferred HabEx architecture is a 4m, monolithic, off-axis telescope that is diffraction-limited at 0.4 microns and is in an L2 orbit. HabEx employs two starlight suppression systems: a coronagraph and a starshade, each with their own dedicated instrument.},
  author       = {Gaudi, B. Scott and Seager, Sara and Mennesson, Bertrand and Kiessling, Alina and Warfield, Keith and Cahoy, Kerri and Clarke, John T. and Shawn Domagal-Goldman, Shawn Domagal-Goldman and Feinberg, Lee and Guyon, Olivier and Kasdin, Jeremy and Mawet, Dimitri and Plavchan, Peter and Robinson, Tyler and Rogers, Leslie and Scowen, Paul and Somerville, Rachel and Stapelfeldt, Karl and Stark, Christopher and Stern, Daniel and Turnbull, Margaret and Amini, Rashied and Kuan, Gary and Martin, Stefan and Morgan, Rhonda and Redding, David and Stahl, H. Philip and Webb, Ryan and Oscar Alvarez-Salazar, Oscar Alvarez-Salazar and Arnold, William L. and Arya, Manan and Balasubramanian, Bala and Baysinger, Mike and Bell, Ray and Below, Chris and Benson, Jonathan and Blais, Lindsey and Booth, Jeff and Bourgeois, Robert and Bradford, Case and Brewer, Alden and Brooks, Thomas and Cady, Eric and Caldwell, Mary and Calvet, Rob and Carr, Steven and Chan, Derek and Cormarkovic, Velibor and Coste, Keith and Cox, Charlie and Danner, Rolf and Davis, Jacqueline and Dewell, Larry and Dorsett, Lisa and Dunn, Daniel and East, Matthew and Effinger, Michael and Eng, Ron and Freebury, Greg and Garcia, Jay and Gaskin, Jonathan and Greene, Suzan and Hennessy, John and Hilgemann, Evan and Hood, Brad and Holota, Wolfgang and Howe, Scott and Huang, Pei and Hull, Tony and Hunt, Ron and Hurd, Kevin and Johnson, Sandra and Kissil, Andrew and Knight, Brent and Kolenz, Daniel and Kraus, Oliver and Krist, John and Li, Mary and Lisman, Doug and Mandic, Milan and Mann, John and Marchen, Luis and Colleen Marrese-Reading, Colleen Marrese-Reading and McCready, Jonathan and McGown, Jim and Missun, Jessica and Miyaguchi, Andrew and Moore, Bradley and Nemati, Bijan and Nikzad, Shouleh and Nissen, Joel and Novicki, Megan and Perrine, Todd and Pineda, Claudia and Polanco, Otto and Putnam, Dustin and Qureshi, Atif and Richards, Michael and Riggs, A. J. Eldorado and Rodgers, Michael and Rud, Mike and Saini, Navtej and Scalisi, Dan and Scharf, Dan and Schulz, Kevin and Serabyn, Gene and Sigrist, Norbert and Sikkia, Glory and Singleton, Andrew and Shaklan, Stuart and Smith, Scott and Southerd, Bart and Stahl, Mark and Steeves, John and Sturges, Brian and Sullivan, Chris and Tang, Hao and Taras, Neil and Tesch, Jonathan and Therrell, Melissa and Tseng, Howard and Valente, Marty and Buren, David Van and Villalvazo, Juan and Warwick, Steve and Webb, David and Westerhoff, Thomas and Wofford, Rush and Wu, Gordon and Woo, Jahning and Wood, Milana and Ziemer, John and Arney, Giada and Anderson, Jay and Jesús Maíz-Apellániz, Jesús Maíz-Apellániz and Bartlett, James and Belikov, Ruslan and Bendek, Eduardo and Cenko, Brad and Douglas, Ewan and Dulz, Shannon and Evans, Chris and Faramaz, Virginie and Feng, Y. Katherina and Ferguson, Harry and Follette, Kate and Ford, Saavik and García, Miriam and Geha, Marla and Gelino, Dawn and Götberg, Ylva Louise Linsdotter and Hildebrandt, Sergi and Hu, Renyu and Jahnke, Knud and Kennedy, Grant and Kreidberg, Laura and Isella, Andrea and Lopez, Eric and Marchis, Franck and Macri, Lucas and Marley, Mark and Matzko, William and Mazoyer, Johan and McCandliss, Stephan and Meshkat, Tiffany and Mordasini, Christoph and Morris, Patrick and Nielsen, Eric and Newman, Patrick and Petigura, Erik and Postman, Marc and Reines, Amy and Roberge, Aki and Roederer, Ian and Ruane, Garreth and Schwieterman, Edouard and Sirbu, Dan and Spalding, Christopher and Teplitz, Harry and Tumlinson, Jason and Turner, Neal and Werk, Jessica and Wofford, Aida and Wyatt, Mark and Young, Amber and Zellem, Rob},
  booktitle    = {arXiv},
  title        = {{The habitable exoplanet observatory (HabEx) mission concept study final report}},
  doi          = {10.48550/arXiv.2001.06683},
  year         = {2020},
}

@unpublished{14096,
  abstract     = {A binary neutron star merger has been observed in a multi-messenger detection of gravitational wave (GW) and electromagnetic (EM) radiation. Binary neutron stars that merge within a Hubble time, as well as many other compact binaries, are expected to form via common envelope evolution. Yet five decades of research on common envelope evolution have not yet resulted in a satisfactory understanding of the multi-spatial multi-timescale evolution for the systems that lead to compact binaries. In this paper, we report on the first successful simulations of common envelope ejection leading to binary neutron star formation in 3D hydrodynamics. We simulate the dynamical inspiral phase of the interaction between a 12M⊙ red supergiant and a 1.4M⊙ neutron star for different initial separations and initial conditions. For all of our simulations, we find complete envelope ejection and final orbital separations of af≈1.3-5.1R⊙ depending on the simulation and criterion, leading to binary neutron stars that can merge within a Hubble time. We find αCE-equivalent efficiencies of ≈0.1-2.7 depending on the simulation and criterion, but this may be specific for these extended progenitors. We fully resolve the core of the star to ≲0.005R⊙ and our 3D hydrodynamics simulations are informed by an adjusted 1D analytic energy formalism and a 2D kinematics study in order to overcome the prohibitive computational cost of simulating these systems. The framework we develop in this paper can be used to simulate a wide variety of interactions between stars, from stellar mergers to common envelope episodes leading to GW sources.},
  author       = {Jamie A. P. Law-Smith, Jamie A. P. Law-Smith and Everson, Rosa Wallace and Enrico Ramirez-Ruiz, Enrico Ramirez-Ruiz and Mink, Selma E. de and Son, Lieke A. C. van and Götberg, Ylva Louise Linsdotter and Zellmann, Stefan and Alejandro Vigna-Gómez, Alejandro Vigna-Gómez and Renzo, Mathieu and Wu, Samantha and Schrøder, Sophie L. and Foley, Ryan J. and Tenley Hutchinson-Smith, Tenley Hutchinson-Smith},
  booktitle    = {arXiv},
  title        = {{Successful common envelope ejection and binary neutron star formation in 3D hydrodynamics}},
  doi          = {10.48550/arXiv.2011.06630},
  year         = {2020},
}

@article{14125,
  abstract     = {Motivation: Recent technological advances have led to an increase in the production and availability of single-cell data. The ability to integrate a set of multi-technology measurements would allow the identification of biologically or clinically meaningful observations through the unification of the perspectives afforded by each technology. In most cases, however, profiling technologies consume the used cells and thus pairwise correspondences between datasets are lost. Due to the sheer size single-cell datasets can acquire, scalable algorithms that are able to universally match single-cell measurements carried out in one cell to its corresponding sibling in another technology are needed.
Results: We propose Single-Cell data Integration via Matching (SCIM), a scalable approach to recover such correspondences in two or more technologies. SCIM assumes that cells share a common (low-dimensional) underlying structure and that the underlying cell distribution is approximately constant across technologies. It constructs a technology-invariant latent space using an autoencoder framework with an adversarial objective. Multi-modal datasets are integrated by pairing cells across technologies using a bipartite matching scheme that operates on the low-dimensional latent representations. We evaluate SCIM on a simulated cellular branching process and show that the cell-to-cell matches derived by SCIM reflect the same pseudotime on the simulated dataset. Moreover, we apply our method to two real-world scenarios, a melanoma tumor sample and a human bone marrow sample, where we pair cells from a scRNA dataset to their sibling cells in a CyTOF dataset achieving 90% and 78% cell-matching accuracy for each one of the samples, respectively.},
  author       = {Stark, Stefan G and Ficek, Joanna and Locatello, Francesco and Bonilla, Ximena and Chevrier, Stéphane and Singer, Franziska and Aebersold, Rudolf and Al-Quaddoomi, Faisal S and Albinus, Jonas and Alborelli, Ilaria and Andani, Sonali and Attinger, Per-Olof and Bacac, Marina and Baumhoer, Daniel and Beck-Schimmer, Beatrice and Beerenwinkel, Niko and Beisel, Christian and Bernasconi, Lara and Bertolini, Anne and Bodenmiller, Bernd and Bonilla, Ximena and Casanova, Ruben and Chevrier, Stéphane and Chicherova, Natalia and D'Costa, Maya and Danenberg, Esther and Davidson, Natalie and gan, Monica-Andreea Dră and Dummer, Reinhard and Engler, Stefanie and Erkens, Martin and Eschbach, Katja and Esposito, Cinzia and Fedier, André and Ferreira, Pedro and Ficek, Joanna and Frei, Anja L and Frey, Bruno and Goetze, Sandra and Grob, Linda and Gut, Gabriele and Günther, Detlef and Haberecker, Martina and Haeuptle, Pirmin and Heinzelmann-Schwarz, Viola and Herter, Sylvia and Holtackers, Rene and Huesser, Tamara and Irmisch, Anja and Jacob, Francis and Jacobs, Andrea and Jaeger, Tim M and Jahn, Katharina and James, Alva R and Jermann, Philip M and Kahles, André and Kahraman, Abdullah and Koelzer, Viktor H and Kuebler, Werner and Kuipers, Jack and Kunze, Christian P and Kurzeder, Christian and Lehmann, Kjong-Van and Levesque, Mitchell and Lugert, Sebastian and Maass, Gerd and Manz, Markus and Markolin, Philipp and Mena, Julien and Menzel, Ulrike and Metzler, Julian M and Miglino, Nicola and Milani, Emanuela S and Moch, Holger and Muenst, Simone and Murri, Riccardo and Ng, Charlotte KY and Nicolet, Stefan and Nowak, Marta and Pedrioli, Patrick GA and Pelkmans, Lucas and Piscuoglio, Salvatore and Prummer, Michael and Ritter, Mathilde and Rommel, Christian and Rosano-González, María L and Rätsch, Gunnar and Santacroce, Natascha and Castillo, Jacobo Sarabia del and Schlenker, Ramona and Schwalie, Petra C and Schwan, Severin and Schär, Tobias and Senti, Gabriela and Singer, Franziska and Sivapatham, Sujana and Snijder, Berend and Sobottka, Bettina and Sreedharan, Vipin T and Stark, Stefan and Stekhoven, Daniel J and Theocharides, Alexandre PA and Thomas, Tinu M and Tolnay, Markus and Tosevski, Vinko and Toussaint, Nora C and Tuncel, Mustafa A and Tusup, Marina and Drogen, Audrey Van and Vetter, Marcus and Vlajnic, Tatjana and Weber, Sandra and Weber, Walter P and Wegmann, Rebekka and Weller, Michael and Wendt, Fabian and Wey, Norbert and Wicki, Andreas and Wollscheid, Bernd and Yu, Shuqing and Ziegler, Johanna and Zimmermann, Marc and Zoche, Martin and Zuend, Gregor and Rätsch, Gunnar and Lehmann, Kjong-Van},
  issn         = {1367-4811},
  journal      = {Bioinformatics},
  keywords     = {Computational Mathematics, Computational Theory and Mathematics, Computer Science Applications, Molecular Biology, Biochemistry, Statistics and Probability},
  number       = {Supplement_2},
  pages        = {i919--i927},
  publisher    = {Oxford University Press},
  title        = {{SCIM: Universal single-cell matching with unpaired feature sets}},
  doi          = {10.1093/bioinformatics/btaa843},
  volume       = {36},
  year         = {2020},
}

@inproceedings{14186,
  abstract     = {The goal of the unsupervised learning of disentangled representations is to
separate the independent explanatory factors of variation in the data without
access to supervision. In this paper, we summarize the results of Locatello et
al., 2019, and focus on their implications for practitioners. We discuss the
theoretical result showing that the unsupervised learning of disentangled
representations is fundamentally impossible without inductive biases and the
practical challenges it entails. Finally, we comment on our experimental
findings, highlighting the limitations of state-of-the-art approaches and
directions for future research.},
  author       = {Locatello, Francesco and Bauer, Stefan and Lucic, Mario and Rätsch, Gunnar and Gelly, Sylvain and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {The 34th AAAI Conference on Artificial Intelligence},
  isbn         = {9781577358350},
  issn         = {2374-3468},
  location     = {New York, NY, United States},
  number       = {9},
  pages        = {13681--13684},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{A commentary on the unsupervised learning of disentangled representations}},
  doi          = {10.1609/aaai.v34i09.7120},
  volume       = {34},
  year         = {2020},
}

@inproceedings{14187,
  abstract     = {We propose a novel Stochastic Frank-Wolfe (a.k.a. conditional gradient)
algorithm for constrained smooth finite-sum minimization with a generalized
linear prediction/structure. This class of problems includes empirical risk
minimization with sparse, low-rank, or other structured constraints. The
proposed method is simple to implement, does not require step-size tuning, and
has a constant per-iteration cost that is independent of the dataset size.
Furthermore, as a byproduct of the method we obtain a stochastic estimator of
the Frank-Wolfe gap that can be used as a stopping criterion. Depending on the
setting, the proposed method matches or improves on the best computational
guarantees for Stochastic Frank-Wolfe algorithms. Benchmarks on several
datasets highlight different regimes in which the proposed method exhibits a
faster empirical convergence than related methods. Finally, we provide an
implementation of all considered methods in an open-source package.},
  author       = {Négiar, Geoffrey and Dresdner, Gideon and Tsai, Alicia and Ghaoui, Laurent El and Locatello, Francesco and Freund, Robert M. and Pedregosa, Fabian},
  booktitle    = {Proceedings of the 37th International Conference on Machine Learning},
  location     = {Virtual},
  pages        = {7253--7262},
  title        = {{Stochastic Frank-Wolfe for constrained finite-sum minimization}},
  volume       = {119},
  year         = {2020},
}

@inproceedings{14188,
  abstract     = {Intelligent agents should be able to learn useful representations by
observing changes in their environment. We model such observations as pairs of
non-i.i.d. images sharing at least one of the underlying factors of variation.
First, we theoretically show that only knowing how many factors have changed,
but not which ones, is sufficient to learn disentangled representations.
Second, we provide practical algorithms that learn disentangled representations
from pairs of images without requiring annotation of groups, individual
factors, or the number of factors that have changed. Third, we perform a
large-scale empirical study and show that such pairs of observations are
sufficient to reliably learn disentangled representations on several benchmark
data sets. Finally, we evaluate our learned representations and find that they
are simultaneously useful on a diverse suite of tasks, including generalization
under covariate shifts, fairness, and abstract reasoning. Overall, our results
demonstrate that weak supervision enables learning of useful disentangled
representations in realistic scenarios.},
  author       = {Locatello, Francesco and Poole, Ben and Rätsch, Gunnar and Schölkopf, Bernhard and Bachem, Olivier and Tschannen, Michael},
  booktitle    = {Proceedings of the 37th International Conference on Machine Learning},
  location     = {Virtual},
  pages        = {6348–6359},
  title        = {{Weakly-supervised disentanglement without compromises}},
  volume       = {119},
  year         = {2020},
}

@article{14195,
  abstract     = {The idea behind the unsupervised learning of disentangled representations is that real-world data is generated by a few explanatory factors of variation which can be recovered by unsupervised learning algorithms. In this paper, we provide a sober look at recent progress in the field and challenge some common assumptions. We first theoretically show that the unsupervised learning of disentangled representations is fundamentally impossible without inductive biases on both the models and the data. Then, we train over 14000
 models covering most prominent methods and evaluation metrics in a reproducible large-scale experimental study on eight data sets. We observe that while the different methods successfully enforce properties “encouraged” by the corresponding losses, well-disentangled models seemingly cannot be identified without supervision. Furthermore, different evaluation metrics do not always agree on what should be considered “disentangled” and exhibit systematic differences in the estimation. Finally, increased disentanglement does not seem to necessarily lead to a decreased sample complexity of learning for downstream tasks. Our results suggest that future work on disentanglement learning should be explicit about the role of inductive biases and (implicit) supervision, investigate concrete benefits of enforcing disentanglement of the learned representations, and consider a reproducible experimental setup covering several data sets.},
  author       = {Locatello, Francesco and Bauer, Stefan and Lucic, Mario and Rätsch, Gunnar and Gelly, Sylvain and Schölkopf, Bernhard and Bachem, Olivier},
  journal      = {Journal of Machine Learning Research},
  publisher    = {MIT Press},
  title        = {{A sober look at the unsupervised learning of disentangled representations and their evaluation}},
  volume       = {21},
  year         = {2020},
}

@inproceedings{9001,
  abstract     = {Quantum illumination is a sensing technique that employs entangled signal-idler beams to improve the detection efficiency of low-reflectivity objects in environments with large thermal noise. The advantage over classical strategies is evident at low signal brightness, a feature which could make the protocol an ideal prototype for non-invasive scanning or low-power short-range radar. Here we experimentally investigate the concept of quantum illumination at microwave frequencies, by generating entangled fields using a Josephson parametric converter which are then amplified to illuminate a room-temperature object at a distance of 1 meter. Starting from experimental data, we simulate the case of perfect idler photon number detection, which results in a quantum advantage compared to the relative classical benchmark. Our results highlight the opportunities and challenges on the way towards a first room-temperature application of microwave quantum circuits.},
  author       = {Barzanjeh, Shabir and Pirandola, Stefano and Vitali, David and Fink, Johannes M},
  booktitle    = {IEEE National Radar Conference - Proceedings},
  isbn         = {9781728189420},
  issn         = {1097-5659},
  location     = {Florence, Italy},
  number       = {9},
  publisher    = {IEEE},
  title        = {{Microwave quantum illumination with a digital phase-conjugated receiver}},
  doi          = {10.1109/RadarConf2043947.2020.9266397},
  volume       = {2020},
  year         = {2020},
}

@article{9007,
  abstract     = {Motivated by a recent question of Peyre, we apply the Hardy–Littlewood circle method to count “sufficiently free” rational points of bounded height on arbitrary smooth projective hypersurfaces of low degree that are defined over the rationals.},
  author       = {Browning, Timothy D and Sawin, Will},
  issn         = {14208946},
  journal      = {Commentarii Mathematici Helvetici},
  number       = {4},
  pages        = {635--659},
  publisher    = {European Mathematical Society},
  title        = {{Free rational points on smooth hypersurfaces}},
  doi          = {10.4171/CMH/499},
  volume       = {95},
  year         = {2020},
}

@article{9011,
  abstract     = {Distributed ledgers provide high availability and integrity, making them a key enabler for practical and secure computation of distributed workloads among mutually distrustful parties. Many practical applications also require strong confidentiality, however. This work enhances permissioned and permissionless blockchains with the ability to manage confidential data without forfeiting availability or decentralization. The proposed Calypso architecture addresses two orthogonal challenges confronting modern distributed ledgers: (a) enabling the auditable management of secrets and (b) protecting distributed computations against arbitrage attacks when their results depend on the ordering and secrecy of inputs.

Calypso introduces on-chain secrets, a novel abstraction that enforces atomic deposition of an auditable trace whenever users access confidential data. Calypso provides user-controlled consent management that ensures revocation atomicity and accountable anonymity. To enable permissionless deployment, we introduce an incentive scheme and provide users with the option to select their preferred trustees. We evaluated our Calypso prototype with a confidential document-sharing application and a decentralized lottery. Our benchmarks show that transaction-processing latency increases linearly in terms of security (number of trustees) and is in the range of 0.2 to 8 seconds for 16 to 128 trustees.},
  author       = {Kokoris Kogias, Eleftherios and Alp, Enis Ceyhun and Gasser, Linus and Jovanovic, Philipp and Syta, Ewa and Ford, Bryan},
  issn         = {2150-8097},
  journal      = {Proceedings of the VLDB Endowment},
  number       = {4},
  pages        = {586--599},
  publisher    = {Association for Computing Machinery},
  title        = {{CALYPSO: Private data management for decentralized ledgers}},
  doi          = {10.14778/3436905.3436917},
  volume       = {14},
  year         = {2020},
}

@article{9039,
  abstract     = {We give a short and self-contained proof for rates of convergence of the Allen--Cahn equation towards mean curvature flow, assuming that a classical (smooth) solution to the latter exists and starting from well-prepared initial data. Our approach is based on a relative entropy technique. In particular, it does not require a stability analysis for the linearized Allen--Cahn operator. As our analysis also does not rely on the comparison principle, we expect it to be applicable to more complex equations and systems.},
  author       = {Fischer, Julian L and Laux, Tim and Simon, Theresa M.},
  issn         = {10957154},
  journal      = {SIAM Journal on Mathematical Analysis},
  number       = {6},
  pages        = {6222--6233},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Convergence rates of the Allen-Cahn equation to mean curvature flow: A short proof based on relative entropies}},
  doi          = {10.1137/20M1322182},
  volume       = {52},
  year         = {2020},
}

@inproceedings{9040,
  abstract     = {Machine learning and formal methods have complimentary benefits and drawbacks. In this work, we address the controller-design problem with a combination of techniques from both fields. The use of black-box neural networks in deep reinforcement learning (deep RL) poses a challenge for such a combination. Instead of reasoning formally about the output of deep RL, which we call the wizard, we extract from it a decision-tree based model, which we refer to as the magic book. Using the extracted model as an intermediary, we are able to handle problems that are infeasible for either deep RL or formal methods by themselves. First, we suggest, for the first time, a synthesis procedure that is based on a magic book. We synthesize a stand-alone correct-by-design controller that enjoys the favorable performance of RL. Second, we incorporate a magic book in a bounded model checking (BMC) procedure. BMC allows us to find numerous traces of the plant under the control of the wizard, which a user can use to increase the trustworthiness of the wizard and direct further training.},
  author       = {Alamdari, Par Alizadeh and Avni, Guy and Henzinger, Thomas A and Lukina, Anna},
  booktitle    = {Proceedings of the 20th Conference on Formal Methods in Computer-Aided Design},
  isbn         = {9783854480426},
  issn         = {2708-7824},
  location     = {Online Conference},
  pages        = {138--147},
  publisher    = {TU Wien Academic Press},
  title        = {{Formal methods with a touch of magic}},
  doi          = {10.34727/2020/isbn.978-3-85448-042-6_21},
  year         = {2020},
}

@article{9067,
  abstract     = {Gadolinium silicide (Gd5Si4) nanoparticles are an interesting class of materials due to their high magnetization, low Curie temperature, low toxicity in biological environments and their multifunctional properties. We report the magnetic and magnetothermal properties of gadolinium silicide (Gd5Si4) nanoparticles prepared by surfactant-assisted ball milling of arc melted bulk ingots of the compound. Using different milling times and speeds, a wide range of crystallite sizes (13–43 nm) could be produced and a reduction in Curie temperature (TC) from 340 K to 317 K was achieved, making these nanoparticles suitable for self-controlled magnetic hyperthermia applications. The magnetothermal effect was measured in applied AC magnetic fields of amplitude 164–239 Oe and frequencies 163–519 kHz. All particles showed magnetic heating with a strong dependence of the specific absorption rate (SAR) on the average crystallite size. The highest SAR of 3.7 W g−1 was measured for 43 nm sized nanoparticles of Gd5Si4. The high SAR and low TC, (within the therapeutic range for magnetothermal therapy) makes the Gd5Si4 behave like self-regulating heat switches that would be suitable for self-controlled magnetic hyperthermia applications after biocompatibility and cytotoxicity tests.},
  author       = {Nauman, Muhammad and Alnasir, Muhammad Hisham and Hamayun, Muhammad Asif and Wang, YiXu and Shatruk, Michael and Manzoor, Sadia},
  issn         = {2046-2069},
  journal      = {RSC Advances},
  keywords     = {General Chemistry, General Chemical Engineering},
  number       = {47},
  pages        = {28383--28389},
  publisher    = {Royal Society of Chemistry},
  title        = {{Size-dependent magnetic and magnetothermal properties of gadolinium silicide nanoparticles}},
  doi          = {10.1039/d0ra05394e},
  volume       = {10},
  year         = {2020},
}

@inproceedings{9103,
  abstract     = {We introduce LRT-NG, a set of techniques and an associated toolset that computes a reachtube (an over-approximation of the set of reachable states over a given time horizon) of a nonlinear dynamical system. LRT-NG significantly advances the state-of-the-art Langrangian Reachability and its associated tool LRT. From a theoretical perspective, LRT-NG is superior to LRT in three ways. First, it uses for the first time an analytically computed metric for the propagated ball which is proven to minimize the ball’s volume. We emphasize that the metric computation is the centerpiece of all bloating-based techniques. Secondly, it computes the next reachset as the intersection of two balls: one based on the Cartesian metric and the other on the new metric. While the two metrics were previously considered opposing approaches, their joint use considerably tightens the reachtubes. Thirdly, it avoids the "wrapping effect" associated with the validated integration of the center of the reachset, by optimally absorbing the interval approximation in the radius of the next ball. From a tool-development perspective, LRT-NG is superior to LRT in two ways. First, it is a standalone tool that no longer relies on CAPD. This required the implementation of the Lohner method and a Runge-Kutta time-propagation method. Secondly, it has an improved interface, allowing the input model and initial conditions to be provided as external input files. Our experiments on a comprehensive set of benchmarks, including two Neural ODEs, demonstrates LRT-NG’s superior performance compared to LRT, CAPD, and Flow*.},
  author       = {Gruenbacher, Sophie and Cyranka, Jacek and Lechner, Mathias and Islam, Md Ariful and Smolka, Scott A. and Grosu, Radu},
  booktitle    = {Proceedings of the 59th IEEE Conference on Decision and Control},
  isbn         = {9781728174471},
  issn         = {07431546},
  location     = {Jeju Islang, Korea (South)},
  pages        = {1556--1563},
  publisher    = {IEEE},
  title        = {{Lagrangian reachtubes: The next generation}},
  doi          = {10.1109/CDC42340.2020.9304042},
  volume       = {2020},
  year         = {2020},
}

