@misc{14990,
  abstract     = {The software artefact to evaluate the approximation of stationary distributions implementation.},
  author       = {Meggendorfer, Tobias},
  publisher    = {Zenodo},
  title        = {{Artefact for: Correct Approximation of Stationary Distributions}},
  doi          = {10.5281/ZENODO.7548214},
  year         = {2023},
}

@misc{14991,
  abstract     = {This repository contains the data, scripts, WRF codes and files required to reproduce the results of the manuscript "Assessing Memory in Convection Schemes Using Idealized Tests" submitted to the Journal of Advances in Modeling Earth Systems (JAMES).},
  author       = {Hwong, Yi-Ling and Colin, Maxime and Aglas, Philipp and Muller, Caroline J and Sherwood, Steven C.},
  publisher    = {Zenodo},
  title        = {{Data-assessing memory in convection schemes using idealized tests}},
  doi          = {10.5281/ZENODO.7757041},
  year         = {2023},
}

@inbook{14992,
  abstract     = {In this chapter we first review the Levy–Lieb functional, which gives the lowest kinetic and interaction energy that can be reached with all possible quantum states having a given density. We discuss two possible convex generalizations of this functional, corresponding to using mixed canonical and grand-canonical states, respectively. We present some recent works about the local density approximation, in which the functionals get replaced by purely local functionals constructed using the uniform electron gas energy per unit volume. We then review the known upper and lower bounds on the Levy–Lieb functionals. We start with the kinetic energy alone, then turn to the classical interaction alone, before we are able to put everything together. A later section is devoted to the Hohenberg–Kohn theorem and the role of many-body unique continuation in its proof.},
  author       = {Lewin, Mathieu and Lieb, Elliott H. and Seiringer, Robert},
  booktitle    = {Density Functional Theory},
  editor       = {Cances, Eric and Friesecke, Gero},
  isbn         = {9783031223396},
  issn         = {3005-0286},
  pages        = {115--182},
  publisher    = {Springer},
  title        = {{Universal Functionals in Density Functional Theory}},
  doi          = {10.1007/978-3-031-22340-2_3},
  year         = {2023},
}

@inproceedings{14993,
  abstract     = {Traditional top-down approaches for global health have historically failed to achieve social progress (Hoffman et al., 2015; Hoffman & Røttingen, 2015). Recently, however, a more holistic, multi-level approach termed One Health (OH) (Osterhaus et al., 2020) is being adopted. Several sets of challenges have been identified for the implementation of OH (dos S. Ribeiro et al., 2019), including policy and funding, education and training, and multi-actor, multi-domain, and multi-level collaborations. These exist despite the increasing accessibility to
knowledge and digital collaborative research tools through the internet. To address some of these challenges, we propose a general framework for grassroots community-based means of participatory research. Additionally, we present a specific roadmap to create a Machine Learning for Global Health community in Africa. The proposed framework aims to enable any small group of individuals with scarce resources to build and sustain an online community within approximately two years. We provide a discussion on the potential impact of the proposed framework for global health research collaborations.},
  author       = {Currin, Christopher and Asiedu , Mercy Nyamewaa and Fourie, Chris and Rosman, Benjamin and Turki, Houcemeddine and Lambebo Tonja, Atnafu and Abbott, Jade and Ajala, Marvellous and Adedayo, Sadiq Adewale and Emezue, Chris Chinenye and Machangara, Daphne},
  booktitle    = {1st Workshop on Machine Learning & Global Health},
  location     = {Kigali, Rwanda},
  publisher    = {OpenReview},
  title        = {{A framework for grassroots research collaboration in machine learning and global health}},
  year         = {2023},
}

@misc{14994,
  abstract     = {This resource contains the artifacts for reproducing the experimental results presented in the paper titled "A Flexible Toolchain for Symbolic Rabin Games under Fair and Stochastic Uncertainties" that has been submitted in CAV 2023.},
  author       = {Majumdar, Rupak and Mallik, Kaushik and Rychlicki, Mateusz and Schmuck, Anne-Kathrin and Soudjani, Sadegh},
  publisher    = {Zenodo},
  title        = {{A flexible toolchain for symbolic rabin games under fair and stochastic uncertainties}},
  doi          = {10.5281/ZENODO.7877790},
  year         = {2023},
}

@misc{14995,
  abstract     = {Lincheck is a new practical and user-friendly framework for testing concurrent data structures on the Java Virtual Machine (JVM). It provides a simple and declarative way to write concurrent tests. Instead of describing how to perform the test, users specify what to test by declaring all the operations to examine; the framework automatically handles the rest. As a result, tests written with Lincheck are concise and easy to understand. 
The artifact presents a collection of Lincheck tests that discover new bugs in popular libraries and implementations from the concurrency literature -- they are listed in Table 1, Section 3. To evaluate the performance of Lincheck analysis, the collection of tests also includes those which check correct data structures and, thus, always succeed. Similarly to Table 2, Section 3, the experiments demonstrate the reasonable time to perform a test. Finally, Lincheck provides user-friendly output with an easy-to-follow trace to reproduce a detected error, significantly simplifying further investigation.},
  author       = {Koval, Nikita and Fedorov, Alexander and Sokolova, Maria and Tsitelov, Dmitry and Alistarh, Dan-Adrian},
  publisher    = {Zenodo},
  title        = {{Lincheck: A practical framework for testing concurrent data structures on JVM}},
  doi          = {10.5281/ZENODO.7877757},
  year         = {2023},
}

@inproceedings{15023,
  abstract     = {Reinforcement learning has shown promising results in learning neural network policies for complicated control tasks. However, the lack of formal guarantees about the behavior of such policies remains an impediment to their deployment. We propose a novel method for learning a composition of neural network policies in stochastic environments, along with a formal certificate which guarantees that a specification over the policy's behavior is satisfied with the desired probability. Unlike prior work on verifiable RL, our approach leverages the compositional nature of logical specifications provided in SpectRL, to learn over graphs of probabilistic reach-avoid specifications. The formal guarantees are provided by learning neural network policies together with reach-avoid supermartingales (RASM) for the graph’s sub-tasks and then composing them into a global policy. We also derive a tighter lower bound compared to previous work on the probability of reach-avoidance implied by a RASM, which is required to find a compositional policy with an acceptable probabilistic threshold for complex tasks with multiple edge policies. We implement a prototype of our approach and evaluate it on a Stochastic Nine Rooms environment.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Verma, Abhinav and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {37th Conference on Neural Information Processing Systems},
  location     = {New Orleans, LO, United States},
  title        = {{Compositional policy learning in stochastic control systems with formal guarantees}},
  year         = {2023},
}

@misc{15027,
  abstract     = {This data repository underpins the paper, published in PNAS (doi pending) and bioarxiv (doi: https://doi.org/10.1101/2023.07.05.547777).},
  author       = {Curk, Samo},
  publisher    = {Figshare},
  title        = {{aggregation_data}},
  year         = {2023},
}

@misc{15035,
  abstract     = {This artifact aims to reproduce experiments from the paper Monitoring Hyperproperties With Prefix Transducers accepted at RV'23, and give further pointers to implementation of prefix transducers.
It has two parts: a pre-compiled docker image and sources that one can use to compile (locally or in docker) the software and run the experiments.},
  author       = {Chalupa, Marek and Henzinger, Thomas A},
  publisher    = {Zenodo},
  title        = {{Monitoring hyperproperties with prefix transducers}},
  doi          = {10.5281/ZENODO.8191723},
  year         = {2023},
}

@unpublished{15039,
  abstract     = {A crucial property for achieving secure, trustworthy and interpretable deep learning systems is their robustness: small changes to a system's inputs should not result in large changes to its outputs. Mathematically, this means one strives for networks with a small Lipschitz constant. Several recent works have focused on how to construct such Lipschitz networks, typically by imposing constraints on the weight matrices. In this work, we study an orthogonal aspect, namely the role of the activation function. We show that commonly used activation functions, such as MaxMin, as well as all piece-wise linear ones with two segments unnecessarily restrict the class of representable functions, even in the simplest one-dimensional setting. We furthermore introduce the new N-activation function that is provably more expressive than currently popular activation functions. We provide code at this https URL.},
  author       = {Prach, Bernd and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{1-Lipschitz neural networks are more expressive with N-activations}},
  doi          = {10.48550/ARXIV.2311.06103},
  year         = {2023},
}

@inproceedings{13053,
  abstract     = {Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .},
  author       = {Peste, Elena-Alexandra and Vladu, Adrian and Kurtic, Eldar and Lampert, Christoph and Alistarh, Dan-Adrian},
  booktitle    = {11th International Conference on Learning Representations },
  location     = {Kigali, Rwanda },
  title        = {{CrAM: A Compression-Aware Minimizer}},
  year         = {2023},
}

@phdthesis{13074,
  abstract     = {Deep learning has become an integral part of a large number of important applications, and many of the recent breakthroughs have been enabled by the ability to train very large models, capable to capture complex patterns and relationships from the data. At the same time, the massive sizes of modern deep learning models have made their deployment to smaller devices more challenging; this is particularly important, as in many applications the users rely on accurate deep learning predictions, but they only have access to devices with limited memory and compute power. One solution to this problem is to prune neural networks, by setting as many of their parameters as possible to zero, to obtain accurate sparse models with lower memory footprint. Despite the great research progress in obtaining sparse models that preserve accuracy, while satisfying memory and computational constraints, there are still many challenges associated with efficiently training sparse models, as well as understanding their generalization properties.

The focus of this thesis is to investigate how the training process of sparse models can be made more efficient, and to understand the differences between sparse and dense models in terms of how well they can generalize to changes in the data distribution. We first study a method for co-training sparse and dense models, at a lower cost compared to regular training. With our method we can obtain very accurate sparse networks, and dense models that can recover the baseline accuracy. Furthermore, we are able to more easily analyze the differences, at prediction level, between the sparse-dense model pairs. Next, we investigate the generalization properties of sparse neural networks in more detail, by studying how well different sparse models trained on a larger task can adapt to smaller, more specialized tasks, in a transfer learning scenario. Our analysis across multiple pruning methods and sparsity levels reveals that sparse models provide features that can transfer similarly to or better than the dense baseline. However, the choice of the pruning method plays an important role, and can influence the results when the features are fixed (linear finetuning), or when they are allowed to adapt to the new task (full finetuning). Using sparse models with fixed masks for finetuning on new tasks has an important practical advantage, as it enables training neural networks on smaller devices. However, one drawback of current pruning methods is that the entire training cycle has to be repeated to obtain the initial sparse model, for every sparsity target; in consequence, the entire training process is costly and also multiple models need to be stored. In the last part of the thesis we propose a method that can train accurate dense models that are compressible in a single step, to multiple sparsity levels, without additional finetuning. Our method results in sparse models that can be competitive with existing pruning methods, and which can also successfully generalize to new tasks.},
  author       = {Peste, Elena-Alexandra},
  issn         = {2663-337X},
  pages        = {147},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Efficiency and generalization of sparse neural networks}},
  doi          = {10.15479/at:ista:13074},
  year         = {2023},
}

@phdthesis{13081,
  abstract     = {During development, tissues undergo changes in size and shape to form functional organs. Distinct cellular processes such as cell division and cell rearrangements underlie tissue morphogenesis. Yet how the distinct processes are controlled and coordinated, and how they contribute to morphogenesis is poorly understood. In our study, we addressed these questions using the developing mouse neural tube. This epithelial organ transforms from a flat epithelial sheet to an epithelial tube while increasing in size and undergoing morpho-gen-mediated patterning. The extent and mechanism of neural progenitor rearrangement within the developing mouse neuroepithelium is unknown. To investigate this, we per-formed high resolution lineage tracing analysis to quantify the extent of epithelial rear-rangement at different stages of neural tube development. We quantitatively described the relationship between apical cell size with cell cycle dependent interkinetic nuclear migra-tions (IKNM) and performed high cellular resolution live imaging of the neuroepithelium to study the dynamics of junctional remodeling.  Furthermore, developed a vertex model of the neuroepithelium to investigate the quantitative contribution of cell proliferation, cell differentiation and mechanical properties to the epithelial rearrangement dynamics and validated the model predictions through functional experiments. Our analysis revealed that at early developmental stages, the apical cell area kinetics driven by IKNM induce high lev-els of cell rearrangements in a regime of high junctional tension and contractility. After E9.5, there is a sharp decline in the extent of cell rearrangements, suggesting that the epi-thelium transitions from a fluid-like to a solid-like state. We found that this transition is regulated by the growth rate of the tissue, rather than by changes in cell-cell adhesion and contractile forces. Overall, our study provides a quantitative description of the relationship between tissue growth, cell cycle dynamics, epithelia rearrangements and the emergent tissue material properties, and novel insights on how epithelial cell dynamics influences tissue morphogenesis.},
  author       = {Bocanegra, Laura},
  issn         = {2663 - 337X},
  pages        = {93},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Epithelial dynamics during mouse neural tube development}},
  doi          = {10.15479/at:ista:13081},
  year         = {2023},
}

@article{13091,
  abstract     = {We use a function field version of the Hardy–Littlewood circle method to study the locus of free rational curves on an arbitrary smooth projective hypersurface of sufficiently low degree. On the one hand this allows us to bound the dimension of the singular locus of the moduli space of rational curves on such hypersurfaces and, on the other hand, it sheds light on Peyre’s reformulation of the Batyrev–Manin conjecture in terms of slopes with respect to the tangent bundle.},
  author       = {Browning, Timothy D and Sawin, Will},
  issn         = {1944-7833},
  journal      = {Algebra and Number Theory},
  number       = {3},
  pages        = {719--748},
  publisher    = {Mathematical Sciences Publishers},
  title        = {{Free rational curves on low degree hypersurfaces and the circle method}},
  doi          = {10.2140/ant.2023.17.719},
  volume       = {17},
  year         = {2023},
}

@article{13092,
  abstract     = {There is a need for the development of lead-free thermoelectric materials for medium-/high-temperature applications. Here, we report a thiol-free tin telluride (SnTe) precursor that can be thermally decomposed to produce SnTe crystals with sizes ranging from tens to several hundreds of nanometers. We further engineer SnTe–Cu2SnTe3 nanocomposites with a homogeneous phase distribution by decomposing the liquid SnTe precursor containing a dispersion of Cu1.5Te colloidal nanoparticles. The presence of Cu within the SnTe and the segregated semimetallic Cu2SnTe3 phase effectively improves the electrical conductivity of SnTe while simultaneously reducing the lattice thermal conductivity without compromising the Seebeck coefficient. Overall, power factors up to 3.63 mW m–1 K–2 and thermoelectric figures of merit up to 1.04 are obtained at 823 K, which represent a 167% enhancement compared with pristine SnTe.},
  author       = {Nan, Bingfei and Song, Xuan and Chang, Cheng and Xiao, Ke and Zhang, Yu and Yang, Linlin and Horta, Sharona and Li, Junshan and Lim, Khak Ho and Ibáñez, Maria and Cabot, Andreu},
  issn         = {1944-8252},
  journal      = {ACS Applied Materials and Interfaces},
  number       = {19},
  pages        = {23380–23389},
  publisher    = {American Chemical Society},
  title        = {{Bottom-up synthesis of SnTe-based thermoelectric composites}},
  doi          = {10.1021/acsami.3c00625},
  volume       = {15},
  year         = {2023},
}

@article{13093,
  abstract     = {The direct, solid state, and reversible conversion between heat and electricity using thermoelectric devices finds numerous potential uses, especially around room temperature. However, the relatively high material processing cost limits their real applications. Silver selenide (Ag2Se) is one of the very few n-type thermoelectric (TE) materials for room-temperature applications. Herein, we report a room temperature, fast, and aqueous-phase synthesis approach to produce Ag2Se, which can be extended to other metal chalcogenides. These materials reach TE figures of merit (zT) of up to 0.76 at 380 K. To improve these values, bismuth sulfide (Bi2S3) particles also prepared in an aqueous solution are incorporated into the Ag2Se matrix. In this way, a series of Ag2Se/Bi2S3 composites with Bi2S3 wt % of 0.5, 1.0, and 1.5 are prepared by solution blending and hot-press sintering. The presence of Bi2S3 significantly improves the Seebeck coefficient and power factor while at the same time decreasing the thermal conductivity with no apparent drop in electrical conductivity. Thus, a maximum zT value of 0.96 is achieved in the composites with 1.0 wt % Bi2S3 at 370 K. Furthermore, a high average zT value (zTave) of 0.93 in the 300–390 K range is demonstrated.},
  author       = {Nan, Bingfei and Li, Mengyao and Zhang, Yu and Xiao, Ke and Lim, Khak Ho and Chang, Cheng and Han, Xu and Zuo, Yong and Li, Junshan and Arbiol, Jordi and Llorca, Jordi and Ibáñez, Maria and Cabot, Andreu},
  issn         = {2637-6113},
  journal      = {ACS Applied Electronic Materials},
  publisher    = {American Chemical Society},
  title        = {{Engineering of thermoelectric composites based on silver selenide in aqueous solution and ambient temperature}},
  doi          = {10.1021/acsaelm.3c00055},
  year         = {2023},
}

@article{13094,
  abstract     = {Endocytosis is a key cellular process involved in the uptake of nutrients, pathogens, or the therapy of diseases. Most studies have focused on spherical objects, whereas biologically relevant shapes can be highly anisotropic. In this letter, we use an experimental model system based on Giant Unilamellar Vesicles (GUVs) and dumbbell-shaped colloidal particles to mimic and investigate the first stage of the passive endocytic process: engulfment of an anisotropic object by the membrane. Our model has specific ligand–receptor interactions realized by mobile receptors on the vesicles and immobile ligands on the particles. Through a series of experiments, theory, and molecular dynamics simulations, we quantify the wrapping process of anisotropic dumbbells by GUVs and identify distinct stages of the wrapping pathway. We find that the strong curvature variation in the neck of the dumbbell as well as membrane tension are crucial in determining both the speed of wrapping and the final states.},
  author       = {Azadbakht, Ali and Meadowcroft, Billie and Varkevisser, Thijs and Šarić, Anđela and Kraft, Daniela J.},
  issn         = {1530-6992},
  journal      = {Nano Letters},
  number       = {10},
  pages        = {4267–4273},
  publisher    = {American Chemical Society},
  title        = {{Wrapping pathways of anisotropic dumbbell particles by Giant Unilamellar Vesicles}},
  doi          = {10.1021/acs.nanolett.3c00375},
  volume       = {23},
  year         = {2023},
}

@article{13095,
  abstract     = {Disulfide bond formation is fundamentally important for protein structure and constitutes a key mechanism by which cells regulate the intracellular oxidation state. Peroxiredoxins (PRDXs) eliminate reactive oxygen species such as hydrogen peroxide through a catalytic cycle of Cys oxidation and reduction. Additionally, upon Cys oxidation PRDXs undergo extensive conformational rearrangements that may underlie their presently structurally poorly defined functions as molecular chaperones. Rearrangements include high molecular-weight oligomerization, the dynamics of which are, however, poorly understood, as is the impact of disulfide bond formation on these properties. Here we show that formation of disulfide bonds along the catalytic cycle induces extensive μs time scale dynamics, as monitored by magic-angle spinning NMR of the 216 kDa-large Tsa1 decameric assembly and solution-NMR of a designed dimeric mutant. We ascribe the conformational dynamics to structural frustration, resulting from conflicts between the disulfide-constrained reduction of mobility and the desire to fulfill other favorable contacts.},
  author       = {Troussicot, Laura and Vallet, Alicia and Molin, Mikael and Burmann, Björn M. and Schanda, Paul},
  issn         = {1520-5126},
  journal      = {Journal of the American Chemical Society},
  number       = {19},
  pages        = {10700–10711},
  publisher    = {American Chemical Society},
  title        = {{Disulfide-bond-induced structural frustration and dynamic disorder in a peroxiredoxin from MAS NMR}},
  doi          = {10.1021/jacs.3c01200},
  volume       = {145},
  year         = {2023},
}

@article{13096,
  abstract     = {Eukaryotic cells can undergo different forms of programmed cell death, many of which culminate in plasma membrane rupture as the defining terminal event1,2,3,4,5,6,7. Plasma membrane rupture was long thought to be driven by osmotic pressure, but it has recently been shown to be in many cases an active process, mediated by the protein ninjurin-18 (NINJ1). Here we resolve the structure of NINJ1 and the mechanism by which it ruptures membranes. Super-resolution microscopy reveals that NINJ1 clusters into structurally diverse assemblies in the membranes of dying cells, in particular large, filamentous assemblies with branched morphology. A cryo-electron microscopy structure of NINJ1 filaments shows a tightly packed fence-like array of transmembrane α-helices. Filament directionality and stability is defined by two amphipathic α-helices that interlink adjacent filament subunits. The NINJ1 filament features a hydrophilic side and a hydrophobic side, and molecular dynamics simulations show that it can stably cap membrane edges. The function of the resulting supramolecular arrangement was validated by site-directed mutagenesis. Our data thus suggest that, during lytic cell death, the extracellular α-helices of NINJ1 insert into the plasma membrane to polymerize NINJ1 monomers into amphipathic filaments that rupture the plasma membrane. The membrane protein NINJ1 is therefore an interactive component of the eukaryotic cell membrane that functions as an in-built breaking point in response to activation of cell death.},
  author       = {Degen, Morris and Santos, José Carlos and Pluhackova, Kristyna and Cebrero, Gonzalo and Ramos, Saray and Jankevicius, Gytis and Hartenian, Ella and Guillerm, Undina and Mari, Stefania A. and Kohl, Bastian and Müller, Daniel J. and Schanda, Paul and Maier, Timm and Perez, Camilo and Sieben, Christian and Broz, Petr and Hiller, Sebastian},
  issn         = {1476-4687},
  journal      = {Nature},
  pages        = {1065--1071},
  publisher    = {Springer Nature},
  title        = {{Structural basis of NINJ1-mediated plasma membrane rupture in cell death}},
  doi          = {10.1038/s41586-023-05991-z},
  volume       = {618},
  year         = {2023},
}

@article{13097,
  abstract     = {Vertebrate movement is orchestrated by spinal inter- and motor neurons that, together with sensory and cognitive input, produce dynamic motor behaviors. These behaviors vary from the simple undulatory swimming of fish and larval aquatic species to the highly coordinated running, reaching and grasping of mice, humans and other mammals. This variation raises the fundamental question of how spinal circuits have changed in register with motor behavior. In simple, undulatory fish, exemplified by the lamprey, two broad classes of interneurons shape motor neuron output: ipsilateral-projecting excitatory neurons, and commissural-projecting inhibitory neurons. An additional class of ipsilateral inhibitory neurons is required to generate escape swim behavior in larval zebrafish and tadpoles. In limbed vertebrates, a more complex spinal neuron composition is observed. In this review, we provide evidence that movement elaboration correlates with an increase and specialization of these three basic interneuron types into molecularly, anatomically, and functionally distinct subpopulations. We summarize recent work linking neuron types to movement-pattern generation across fish, amphibians, reptiles, birds and mammals.},
  author       = {Wilson, Alexia C and Sweeney, Lora Beatrice Jaeger},
  issn         = {1662-5110},
  journal      = {Frontiers in Neural Circuits},
  publisher    = {Frontiers},
  title        = {{Spinal cords: Symphonies of interneurons across species}},
  doi          = {10.3389/fncir.2023.1146449},
  volume       = {17},
  year         = {2023},
}

