@article{8940,
  abstract     = {We quantise Whitney’s construction to prove the existence of a triangulation for any C^2 manifold, so that we get an algorithm with explicit bounds. We also give a new elementary proof, which is completely geometric.},
  author       = {Boissonnat, Jean-Daniel and Kachanovich, Siargey and Wintraecken, Mathijs},
  issn         = {1432-0444},
  journal      = {Discrete & Computational Geometry},
  keywords     = {Theoretical Computer Science, Computational Theory and Mathematics, Geometry and Topology, Discrete Mathematics and Combinatorics},
  number       = {1},
  pages        = {386--434},
  publisher    = {Springer Nature},
  title        = {{Triangulating submanifolds: An elementary and quantified version of Whitney’s method}},
  doi          = {10.1007/s00454-020-00250-8},
  volume       = {66},
  year         = {2021},
}

@inproceedings{10002,
  abstract     = {We present a faster symbolic algorithm for the following central problem in probabilistic verification: Compute the maximal end-component (MEC) decomposition of Markov decision processes (MDPs). This problem generalizes the SCC decomposition problem of graphs and closed recurrent sets of Markov chains. The model of symbolic algorithms is widely used in formal verification and model-checking, where access to the input model is restricted to only symbolic operations (e.g., basic set operations and computation of one-step neighborhood). For an input MDP with  n  vertices and  m  edges, the classical symbolic algorithm from the 1990s for the MEC decomposition requires  O(n2)  symbolic operations and  O(1)  symbolic space. The only other symbolic algorithm for the MEC decomposition requires  O(nm−−√)  symbolic operations and  O(m−−√)  symbolic space. A main open question is whether the worst-case  O(n2)  bound for symbolic operations can be beaten. We present a symbolic algorithm that requires  O˜(n1.5)  symbolic operations and  O˜(n−−√)  symbolic space. Moreover, the parametrization of our algorithm provides a trade-off between symbolic operations and symbolic space: for all  0<ϵ≤1/2  the symbolic algorithm requires  O˜(n2−ϵ)  symbolic operations and  O˜(nϵ)  symbolic space ( O˜  hides poly-logarithmic factors). Using our techniques we present faster algorithms for computing the almost-sure winning regions of  ω -regular objectives for MDPs. We consider the canonical parity objectives for  ω -regular objectives, and for parity objectives with  d -priorities we present an algorithm that computes the almost-sure winning region with  O˜(n2−ϵ)  symbolic operations and  O˜(nϵ)  symbolic space, for all  0<ϵ≤1/2 .},
  author       = {Chatterjee, Krishnendu and Dvorak, Wolfgang and Henzinger, Monika H and Svozil, Alexander},
  booktitle    = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science},
  isbn         = {978-1-6654-4896-3},
  issn         = {1043-6871},
  keywords     = {Computer science, Computational modeling, Markov processes, Probabilistic logic, Formal verification, Game Theory},
  location     = {Rome, Italy},
  pages        = {1--13},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Symbolic time and space tradeoffs for probabilistic verification}},
  doi          = {10.1109/LICS52264.2021.9470739},
  year         = {2021},
}

@inproceedings{10004,
  abstract     = {Markov chains are the de facto finite-state model for stochastic dynamical systems, and Markov decision processes (MDPs) extend Markov chains by incorporating non-deterministic behaviors. Given an MDP and rewards on states, a classical optimization criterion is the maximal expected total reward where the MDP stops after T steps, which can be computed by a simple dynamic programming algorithm. We consider a natural generalization of the problem where the stopping times can be chosen according to a probability distribution, such that the expected stopping time is T, to optimize the expected total reward. Quite surprisingly we establish inter-reducibility of the expected stopping-time problem for Markov chains with the Positivity problem (which is related to the well-known Skolem problem), for which establishing either decidability or undecidability would be a major breakthrough. Given the hardness of the exact problem, we consider the approximate version of the problem: we show that it can be solved in exponential time for Markov chains and in exponential space for MDPs.},
  author       = {Chatterjee, Krishnendu and Doyen, Laurent},
  booktitle    = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science},
  isbn         = {978-1-6654-4896-3},
  issn         = {1043-6871},
  keywords     = {Computer science, Heuristic algorithms, Memory management, Automata, Markov processes, Probability distribution, Complexity theory},
  location     = {Rome, Italy},
  pages        = {1--13},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Stochastic processes with expected stopping time}},
  doi          = {10.1109/LICS52264.2021.9470595},
  year         = {2021},
}

@article{14125,
  abstract     = {Motivation: Recent technological advances have led to an increase in the production and availability of single-cell data. The ability to integrate a set of multi-technology measurements would allow the identification of biologically or clinically meaningful observations through the unification of the perspectives afforded by each technology. In most cases, however, profiling technologies consume the used cells and thus pairwise correspondences between datasets are lost. Due to the sheer size single-cell datasets can acquire, scalable algorithms that are able to universally match single-cell measurements carried out in one cell to its corresponding sibling in another technology are needed.
Results: We propose Single-Cell data Integration via Matching (SCIM), a scalable approach to recover such correspondences in two or more technologies. SCIM assumes that cells share a common (low-dimensional) underlying structure and that the underlying cell distribution is approximately constant across technologies. It constructs a technology-invariant latent space using an autoencoder framework with an adversarial objective. Multi-modal datasets are integrated by pairing cells across technologies using a bipartite matching scheme that operates on the low-dimensional latent representations. We evaluate SCIM on a simulated cellular branching process and show that the cell-to-cell matches derived by SCIM reflect the same pseudotime on the simulated dataset. Moreover, we apply our method to two real-world scenarios, a melanoma tumor sample and a human bone marrow sample, where we pair cells from a scRNA dataset to their sibling cells in a CyTOF dataset achieving 90% and 78% cell-matching accuracy for each one of the samples, respectively.},
  author       = {Stark, Stefan G and Ficek, Joanna and Locatello, Francesco and Bonilla, Ximena and Chevrier, Stéphane and Singer, Franziska and Aebersold, Rudolf and Al-Quaddoomi, Faisal S and Albinus, Jonas and Alborelli, Ilaria and Andani, Sonali and Attinger, Per-Olof and Bacac, Marina and Baumhoer, Daniel and Beck-Schimmer, Beatrice and Beerenwinkel, Niko and Beisel, Christian and Bernasconi, Lara and Bertolini, Anne and Bodenmiller, Bernd and Bonilla, Ximena and Casanova, Ruben and Chevrier, Stéphane and Chicherova, Natalia and D'Costa, Maya and Danenberg, Esther and Davidson, Natalie and gan, Monica-Andreea Dră and Dummer, Reinhard and Engler, Stefanie and Erkens, Martin and Eschbach, Katja and Esposito, Cinzia and Fedier, André and Ferreira, Pedro and Ficek, Joanna and Frei, Anja L and Frey, Bruno and Goetze, Sandra and Grob, Linda and Gut, Gabriele and Günther, Detlef and Haberecker, Martina and Haeuptle, Pirmin and Heinzelmann-Schwarz, Viola and Herter, Sylvia and Holtackers, Rene and Huesser, Tamara and Irmisch, Anja and Jacob, Francis and Jacobs, Andrea and Jaeger, Tim M and Jahn, Katharina and James, Alva R and Jermann, Philip M and Kahles, André and Kahraman, Abdullah and Koelzer, Viktor H and Kuebler, Werner and Kuipers, Jack and Kunze, Christian P and Kurzeder, Christian and Lehmann, Kjong-Van and Levesque, Mitchell and Lugert, Sebastian and Maass, Gerd and Manz, Markus and Markolin, Philipp and Mena, Julien and Menzel, Ulrike and Metzler, Julian M and Miglino, Nicola and Milani, Emanuela S and Moch, Holger and Muenst, Simone and Murri, Riccardo and Ng, Charlotte KY and Nicolet, Stefan and Nowak, Marta and Pedrioli, Patrick GA and Pelkmans, Lucas and Piscuoglio, Salvatore and Prummer, Michael and Ritter, Mathilde and Rommel, Christian and Rosano-González, María L and Rätsch, Gunnar and Santacroce, Natascha and Castillo, Jacobo Sarabia del and Schlenker, Ramona and Schwalie, Petra C and Schwan, Severin and Schär, Tobias and Senti, Gabriela and Singer, Franziska and Sivapatham, Sujana and Snijder, Berend and Sobottka, Bettina and Sreedharan, Vipin T and Stark, Stefan and Stekhoven, Daniel J and Theocharides, Alexandre PA and Thomas, Tinu M and Tolnay, Markus and Tosevski, Vinko and Toussaint, Nora C and Tuncel, Mustafa A and Tusup, Marina and Drogen, Audrey Van and Vetter, Marcus and Vlajnic, Tatjana and Weber, Sandra and Weber, Walter P and Wegmann, Rebekka and Weller, Michael and Wendt, Fabian and Wey, Norbert and Wicki, Andreas and Wollscheid, Bernd and Yu, Shuqing and Ziegler, Johanna and Zimmermann, Marc and Zoche, Martin and Zuend, Gregor and Rätsch, Gunnar and Lehmann, Kjong-Van},
  issn         = {1367-4811},
  journal      = {Bioinformatics},
  keywords     = {Computational Mathematics, Computational Theory and Mathematics, Computer Science Applications, Molecular Biology, Biochemistry, Statistics and Probability},
  number       = {Supplement_2},
  pages        = {i919--i927},
  publisher    = {Oxford University Press},
  title        = {{SCIM: Universal single-cell matching with unpaired feature sets}},
  doi          = {10.1093/bioinformatics/btaa843},
  volume       = {36},
  year         = {2020},
}

@article{11657,
  abstract     = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weight sum of the cut edges. Here, we introduce a linear-time algorithm to compute near-minimum cuts. Our algorithm is based on cluster contraction using label propagation and Padberg and Rinaldi’s contraction heuristics [SIAM Review, 1991]. We give both sequential and shared-memory parallel implementations of our algorithm. Extensive experiments on both real-world and generated instances show that our algorithm finds the optimal cut on nearly all instances significantly faster than other state-of-the-art exact algorithms, and our error rate is lower than that of other heuristic algorithms. In addition, our parallel algorithm runs a factor 7.5× faster on average when using 32 threads. To further speed up computations, we also give a version of our algorithm that performs random edge contractions as preprocessing. This version achieves a lower running time and better parallel scalability at the expense of a higher error rate.},
  author       = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian and Strash, Darren},
  issn         = {1084-6654},
  journal      = {ACM Journal of Experimental Algorithmics},
  keywords     = {Theoretical Computer Science},
  pages        = {1--22},
  publisher    = {Association for Computing Machinery},
  title        = {{Practical minimum cut algorithms}},
  doi          = {10.1145/3274662},
  volume       = {23},
  year         = {2018},
}

@article{11670,
  abstract     = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.},
  author       = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar},
  issn         = {2167-8383},
  journal      = {ACM Transactions on Economics and Computation},
  keywords     = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{An expressive mechanism for auctions on the web}},
  doi          = {10.1145/2716312},
  volume       = {4},
  year         = {2015},
}

@article{8459,
  abstract     = {Nuclear magnetic resonance (NMR) is a powerful tool for observing the motion of biomolecules at the atomic level. One technique, the analysis of relaxation dispersion phenomenon, is highly suited for studying the kinetics and thermodynamics of biological processes. Built on top of the relax computational environment for NMR dynamics is a new dispersion analysis designed to be comprehensive, accurate and easy-to-use. The software supports more models, both numeric and analytic, than current solutions. An automated protocol, available for scripting and driving the graphical user interface (GUI), is designed to simplify the analysis of dispersion data for NMR spectroscopists. Decreases in optimization time are granted by parallelization for running on computer clusters and by skipping an initial grid search by using parameters from one solution as the starting point for another —using analytic model results for the numeric models, taking advantage of model nesting, and using averaged non-clustered results for the clustered analysis.},
  author       = {Morin, Sébastien and Linnet, Troels E and Lescanne, Mathilde and Schanda, Paul and Thompson, Gary S and Tollinger, Martin and Teilum, Kaare and Gagné, Stéphane and Marion, Dominique and Griesinger, Christian and Blackledge, Martin and d’Auvergne, Edward J},
  issn         = {1367-4803},
  journal      = {Bioinformatics},
  keywords     = {Statistics and Probability, Computational Theory and Mathematics, Biochemistry, Molecular Biology, Computational Mathematics, Computer Science Applications},
  number       = {15},
  pages        = {2219--2220},
  publisher    = {Oxford University Press},
  title        = {{Relax: The analysis of biomolecular kinetics and thermodynamics using NMR relaxation dispersion data}},
  doi          = {10.1093/bioinformatics/btu166},
  volume       = {30},
  year         = {2014},
}

@article{9145,
  abstract     = {We have found a new way to express the solutions of the RSM (Reynolds Stress Model) equations that allows us to present the turbulent diffusivities for heat, salt and momentum in a way that is considerably simpler and thus easier to implement than in previous work. The RSM provides the dimensionless mixing efficiencies Γα (α stands for heat, salt and momentum). However, to compute the diffusivities, one needs additional information, specifically, the dissipation ε. Since a dynamic equation for the latter that includes the physical processes relevant to the ocean is still not available, one must resort to different sources of information outside the RSM to obtain a complete Mixing Scheme usable in OGCMs.
As for the RSM results, we show that the Γα’s are functions of both Ri and Rρ (Richardson number and density ratio representing double diffusion, DD); the Γα are different for heat, salt and momentum; in the case of heat, the traditional value Γh = 0.2 is valid only in the presence of strong shear (when DD is inoperative) while when shear subsides, NATRE data show that Γh can be three times as large, a result that we reproduce. The salt Γs is given in terms of Γh. The momentum Γm has thus far been guessed with different prescriptions while the RSM provides a well defined expression for Γm(Ri, Rρ). Having tested Γh, we then test the momentum Γm by showing that the turbulent Prandtl number Γm/Γh vs. Ri reproduces the available data quite well.

As for the dissipation ε, we use different representations, one for the mixed layer (ML), one for the thermocline and one for the ocean’s bottom. For the ML, we adopt a procedure analogous to the one successfully used in PB (planetary boundary layer) studies; for the thermocline, we employ an expression for the variable εN−2 from studies of the internal gravity waves spectra which includes a latitude dependence; for the ocean bottom, we adopt the enhanced bottom diffusivity expression used by previous authors but with a state of the art internal tidal energy formulation and replace the fixed Γα = 0.2 with the RSM result that brings into the problem the Ri, Rρ dependence of the Γα; the unresolved bottom drag, which has thus far been either ignored or modeled with heuristic relations, is modeled using a formalism we previously developed and tested in PBL studies.
We carried out several tests without an OGCM. Prandtl and flux Richardson numbers vs. Ri. The RSM model reproduces both types of data satisfactorily. DD and Mixing efficiency Γh(Ri, Rρ). The RSM model reproduces well the NATRE data. Bimodal ε-distribution. NATRE data show that ε(Ri < 1) ≈ 10ε(Ri > 1), which our model reproduces. Heat to salt flux ratio. In the Ri ≫ 1 regime, the RSM predictions reproduce the data satisfactorily. NATRE mass diffusivity. The z-profile of the mass diffusivity reproduces well the measurements at NATRE. The local form of the mixing scheme is algebraic with one cubic equation to solve.},
  author       = {Canuto, V.M. and Howard, A.M. and Cheng, Y. and Muller, Caroline J and Leboissetier, A. and Jayne, S.R.},
  issn         = {1463-5003},
  journal      = {Ocean Modelling},
  keywords     = {Computer Science (miscellaneous), Geotechnical Engineering and Engineering Geology, Atmospheric Science, Oceanography},
  number       = {3-4},
  pages        = {70--91},
  publisher    = {Elsevier},
  title        = {{Ocean turbulence, III: New GISS vertical mixing scheme}},
  doi          = {10.1016/j.ocemod.2010.04.006},
  volume       = {34},
  year         = {2010},
}

@article{8509,
  abstract     = {The goal of this paper is to present to nonspecialists what is perhaps the simplest possible geometrical picture explaining the mechanism of Arnold diffusion. We choose to speak of a specific model—that of geometric rays in a periodic optical medium. This model is equivalent to that of a particle in a periodic potential in ${\mathbb R}^{n}$ with energy prescribed and to the geodesic flow in a Riemannian metric on ${\mathbb R}^{n} $.},
  author       = {Kaloshin, Vadim and Levi, Mark},
  issn         = {0036-1445},
  journal      = {SIAM Review},
  keywords     = {Theoretical Computer Science, Applied Mathematics, Computational Mathematics},
  number       = {4},
  pages        = {702--720},
  publisher    = {Society for Industrial & Applied Mathematics},
  title        = {{Geometry of Arnold diffusion}},
  doi          = {10.1137/070703235},
  volume       = {50},
  year         = {2008},
}

