[{"year":"2023","acknowledgement":"We thank the Barton group for useful discussion and feedback during the writing of this article. Comments from Roger Butlin, Molly Schumer's Group, the tskit development team, editors and three reviewers greatly improved the manuscript. Funding was provided by SCAS (Natural Sciences Programme, Knut and Alice Wallenberg Foundation), an FWF Wittgenstein grant (PT1001Z211), an FWF standalone grant (grant P 32166), and an ERC Advanced Grant. YFC was supported by the Max Planck Society and an ERC Proof of Concept Grant #101069216 (HAPLOTAGGING).","_id":"12159","date_updated":"2023-08-16T08:18:47Z","abstract":[{"text":"The term “haplotype block” is commonly used in the developing field of haplotype-based inference methods. We argue that the term should be defined based on the structure of the Ancestral Recombination Graph (ARG), which contains complete information on the ancestry of a sample. We use simulated examples to demonstrate key features of the relationship between haplotype blocks and ancestral structure, emphasizing the stochasticity of the processes that generate them. Even the simplest cases of neutrality or of a “hard” selective sweep produce a rich structure, often missed by commonly used statistics. We highlight a number of novel methods for inferring haplotype structure, based on the full ARG, or on a sequence of trees, and illustrate how they can be used to define haplotype blocks using an empirical data set. While the advent of new, computationally efficient methods makes it possible to apply these concepts broadly, they (and additional new methods) could benefit from adding features to explore haplotype blocks, as we define them. Understanding and applying the concept of the haplotype block will be essential to fully exploit long and linked-read sequencing technologies.","lang":"eng"}],"month":"03","oa_version":"Published Version","type":"journal_article","page":"1441-1457","file_date_updated":"2023-08-16T08:15:41Z","date_created":"2023-01-12T12:09:17Z","volume":32,"status":"public","external_id":{"isi":["000900762000001"],"pmid":["36433653"]},"citation":{"apa":"Shipilina, D., Pal, A., Stankowski, S., Chan, Y. F., &#38; Barton, N. H. (2023). On the origin and structure of haplotype blocks. <i>Molecular Ecology</i>. Wiley. <a href=\"https://doi.org/10.1111/mec.16793\">https://doi.org/10.1111/mec.16793</a>","ista":"Shipilina D, Pal A, Stankowski S, Chan YF, Barton NH. 2023. On the origin and structure of haplotype blocks. Molecular Ecology. 32(6), 1441–1457.","mla":"Shipilina, Daria, et al. “On the Origin and Structure of Haplotype Blocks.” <i>Molecular Ecology</i>, vol. 32, no. 6, Wiley, 2023, pp. 1441–57, doi:<a href=\"https://doi.org/10.1111/mec.16793\">10.1111/mec.16793</a>.","ama":"Shipilina D, Pal A, Stankowski S, Chan YF, Barton NH. On the origin and structure of haplotype blocks. <i>Molecular Ecology</i>. 2023;32(6):1441-1457. doi:<a href=\"https://doi.org/10.1111/mec.16793\">10.1111/mec.16793</a>","short":"D. Shipilina, A. Pal, S. Stankowski, Y.F. Chan, N.H. Barton, Molecular Ecology 32 (2023) 1441–1457.","chicago":"Shipilina, Daria, Arka Pal, Sean Stankowski, Yingguang Frank Chan, and Nicholas H Barton. “On the Origin and Structure of Haplotype Blocks.” <i>Molecular Ecology</i>. Wiley, 2023. <a href=\"https://doi.org/10.1111/mec.16793\">https://doi.org/10.1111/mec.16793</a>.","ieee":"D. Shipilina, A. Pal, S. Stankowski, Y. F. Chan, and N. H. Barton, “On the origin and structure of haplotype blocks,” <i>Molecular Ecology</i>, vol. 32, no. 6. Wiley, pp. 1441–1457, 2023."},"intvolume":"        32","has_accepted_license":"1","oa":1,"publication_status":"published","date_published":"2023-03-01T00:00:00Z","ddc":["570"],"pmid":1,"department":[{"_id":"NiBa"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"Wiley","scopus_import":"1","article_processing_charge":"Yes (via OA deal)","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"publication":"Molecular Ecology","file":[{"success":1,"file_name":"2023_MolecularEcology_Shipilina.pdf","relation":"main_file","content_type":"application/pdf","file_size":7144607,"creator":"dernst","date_updated":"2023-08-16T08:15:41Z","file_id":"14062","checksum":"b10e0f8fa3dc4d72aaf77a557200978a","date_created":"2023-08-16T08:15:41Z","access_level":"open_access"}],"day":"01","author":[{"full_name":"Shipilina, Daria","orcid":"0000-0002-1145-9226","id":"428A94B0-F248-11E8-B48F-1D18A9856A87","last_name":"Shipilina","first_name":"Daria"},{"first_name":"Arka","last_name":"Pal","orcid":"0000-0002-4530-8469","id":"6AAB2240-CA9A-11E9-9C1A-D9D1E5697425","full_name":"Pal, Arka"},{"full_name":"Stankowski, Sean","id":"43161670-5719-11EA-8025-FABC3DDC885E","first_name":"Sean","last_name":"Stankowski"},{"full_name":"Chan, Yingguang Frank","first_name":"Yingguang Frank","last_name":"Chan"},{"first_name":"Nicholas H","last_name":"Barton","full_name":"Barton, Nicholas H","id":"4880FE40-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-8548-5240"}],"title":"On the origin and structure of haplotype blocks","project":[{"grant_number":"P32166","_id":"05959E1C-7A3F-11EA-A408-12923DDC885E","name":"The maintenance of alternative adaptive peaks in snapdragons"},{"grant_number":"Z211","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"},{"_id":"bd6958e0-d553-11ed-ba76-86eba6a76c00","name":"Understanding the evolution of continuous genomes","grant_number":"101055327"}],"language":[{"iso":"eng"}],"issue":"6","isi":1,"keyword":["Genetics","Ecology","Evolution","Behavior and Systematics"],"publication_identifier":{"issn":["0962-1083"],"eissn":["1365-294X"]},"quality_controlled":"1","doi":"10.1111/mec.16793"},{"project":[{"call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","name":"Formal Methods for Stochastic Models: Algorithms and Applications","grant_number":"863818"},{"grant_number":"Z211","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"}],"language":[{"iso":"eng"}],"isi":1,"publication_identifier":{"eissn":["2041-1723"]},"quality_controlled":"1","doi":"10.1038/s41467-023-37817-x","pmid":1,"department":[{"_id":"KrCh"}],"publisher":"Springer Nature","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","ec_funded":1,"article_processing_charge":"No","scopus_import":"1","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_type":"original","publication":"Nature Communications","file":[{"success":1,"file_name":"2023_NatureComm_Schmid.pdf","content_type":"application/pdf","relation":"main_file","file_size":1786475,"creator":"dernst","date_updated":"2023-04-25T09:13:53Z","file_id":"12868","checksum":"a4b3b7b36fbef068cabf4fb99501fef6","date_created":"2023-04-25T09:13:53Z","access_level":"open_access"}],"day":"12","author":[{"last_name":"Schmid","first_name":"Laura","full_name":"Schmid, Laura","orcid":"0000-0002-6978-7329","id":"38B437DE-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Ekbatani","first_name":"Farbod","full_name":"Ekbatani, Farbod"},{"id":"2FDF8F3C-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5116-955X","full_name":"Hilbe, Christian","first_name":"Christian","last_name":"Hilbe"},{"first_name":"Krishnendu","last_name":"Chatterjee","full_name":"Chatterjee, Krishnendu","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-4561-241X"}],"title":"Quantitative assessment can stabilize indirect reciprocity under imperfect information","article_number":"2086","status":"public","external_id":{"isi":["001003644100020"],"pmid":["37045828"]},"citation":{"short":"L. Schmid, F. Ekbatani, C. Hilbe, K. Chatterjee, Nature Communications 14 (2023).","chicago":"Schmid, Laura, Farbod Ekbatani, Christian Hilbe, and Krishnendu Chatterjee. “Quantitative Assessment Can Stabilize Indirect Reciprocity under Imperfect Information.” <i>Nature Communications</i>. Springer Nature, 2023. <a href=\"https://doi.org/10.1038/s41467-023-37817-x\">https://doi.org/10.1038/s41467-023-37817-x</a>.","ieee":"L. Schmid, F. Ekbatani, C. Hilbe, and K. Chatterjee, “Quantitative assessment can stabilize indirect reciprocity under imperfect information,” <i>Nature Communications</i>, vol. 14. Springer Nature, 2023.","apa":"Schmid, L., Ekbatani, F., Hilbe, C., &#38; Chatterjee, K. (2023). Quantitative assessment can stabilize indirect reciprocity under imperfect information. <i>Nature Communications</i>. Springer Nature. <a href=\"https://doi.org/10.1038/s41467-023-37817-x\">https://doi.org/10.1038/s41467-023-37817-x</a>","mla":"Schmid, Laura, et al. “Quantitative Assessment Can Stabilize Indirect Reciprocity under Imperfect Information.” <i>Nature Communications</i>, vol. 14, 2086, Springer Nature, 2023, doi:<a href=\"https://doi.org/10.1038/s41467-023-37817-x\">10.1038/s41467-023-37817-x</a>.","ista":"Schmid L, Ekbatani F, Hilbe C, Chatterjee K. 2023. Quantitative assessment can stabilize indirect reciprocity under imperfect information. Nature Communications. 14, 2086.","ama":"Schmid L, Ekbatani F, Hilbe C, Chatterjee K. Quantitative assessment can stabilize indirect reciprocity under imperfect information. <i>Nature Communications</i>. 2023;14. doi:<a href=\"https://doi.org/10.1038/s41467-023-37817-x\">10.1038/s41467-023-37817-x</a>"},"intvolume":"        14","has_accepted_license":"1","oa":1,"publication_status":"published","date_published":"2023-04-12T00:00:00Z","ddc":["000"],"year":"2023","acknowledgement":"This work was supported by the European Research Council CoG 863818 (ForM-SMArt) (to K.C.) and the European Research Council Starting Grant 850529: E-DIRECT (to C.H.). L.S. received additional partial support by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award), and also thanks the support by the Stochastic Analysis and Application Research Center (SAARC) under National Research Foundation of Korea grant NRF-2019R1A5A1028324. The authors additionally thank Stefan Schmid for providing access to his lab infrastructure at the University of Vienna for the purpose of collecting simulation data.","_id":"12861","date_updated":"2025-07-14T09:09:52Z","abstract":[{"lang":"eng","text":"The field of indirect reciprocity investigates how social norms can foster cooperation when individuals continuously monitor and assess each other’s social interactions. By adhering to certain social norms, cooperating individuals can improve their reputation and, in turn, receive benefits from others. Eight social norms, known as the “leading eight,\" have been shown to effectively promote the evolution of cooperation as long as information is public and reliable. These norms categorize group members as either ’good’ or ’bad’. In this study, we examine a scenario where individuals instead assign nuanced reputation scores to each other, and only cooperate with those whose reputation exceeds a certain threshold. We find both analytically and through simulations that such quantitative assessments are error-correcting, thus facilitating cooperation in situations where information is private and unreliable. Moreover, our results identify four specific norms that are robust to such conditions, and may be relevant for helping to sustain cooperation in natural populations."}],"month":"04","oa_version":"Published Version","type":"journal_article","file_date_updated":"2023-04-25T09:13:53Z","date_created":"2023-04-23T22:01:03Z","volume":14},{"_id":"10774","year":"2022","acknowledgement":"This work was funded in part by the Wittgenstein Award Z211-N23 of the Austrian Science Fund (FWF) and by the FWF project W1255-N23.","date_created":"2022-02-20T23:01:34Z","volume":13182,"date_updated":"2022-08-05T09:02:56Z","abstract":[{"lang":"eng","text":"We study the problem of specifying sequential information-flow properties of systems. Information-flow properties are hyperproperties, as they compare different traces of a system. Sequential information-flow properties can express changes, over time, in the information-flow constraints. For example, information-flow constraints during an initialization phase of a system may be different from information-flow constraints that are required during the operation phase. We formalize several variants of interpreting sequential information-flow constraints, which arise from different assumptions about what can be observed of the system. For this purpose, we introduce a first-order logic, called Hypertrace Logic, with both trace and time quantifiers for specifying linear-time hyperproperties. We prove that HyperLTL, which corresponds to a fragment of Hypertrace Logic with restricted quantifier prefixes, cannot specify the majority of the studied variants of sequential information flow, including all variants in which the transition between sequential phases (such as initialization and operation) happens asynchronously. Our results rely on new equivalences between sets of traces that cannot be distinguished by certain classes of formulas from Hypertrace Logic. This presents a new approach to proving inexpressiveness results for HyperLTL."}],"type":"conference","oa_version":"Preprint","month":"01","page":"1-19","citation":{"chicago":"Bartocci, Ezio, Thomas Ferrere, Thomas A Henzinger, Dejan Nickovic, and Ana Oliveira Da Costa. “Flavors of Sequential Information Flow.” In <i>Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</i>, 13182:1–19. Springer Nature, 2022. <a href=\"https://doi.org/10.1007/978-3-030-94583-1_1\">https://doi.org/10.1007/978-3-030-94583-1_1</a>.","ieee":"E. Bartocci, T. Ferrere, T. A. Henzinger, D. Nickovic, and A. O. Da Costa, “Flavors of sequential information flow,” in <i>Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</i>, Philadelphia, PA, United States, 2022, vol. 13182, pp. 1–19.","short":"E. Bartocci, T. Ferrere, T.A. Henzinger, D. Nickovic, A.O. Da Costa, in:, Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Springer Nature, 2022, pp. 1–19.","ama":"Bartocci E, Ferrere T, Henzinger TA, Nickovic D, Da Costa AO. Flavors of sequential information flow. In: <i>Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</i>. Vol 13182. Springer Nature; 2022:1-19. doi:<a href=\"https://doi.org/10.1007/978-3-030-94583-1_1\">10.1007/978-3-030-94583-1_1</a>","apa":"Bartocci, E., Ferrere, T., Henzinger, T. A., Nickovic, D., &#38; Da Costa, A. O. (2022). Flavors of sequential information flow. In <i>Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</i> (Vol. 13182, pp. 1–19). Philadelphia, PA, United States: Springer Nature. <a href=\"https://doi.org/10.1007/978-3-030-94583-1_1\">https://doi.org/10.1007/978-3-030-94583-1_1</a>","ista":"Bartocci E, Ferrere T, Henzinger TA, Nickovic D, Da Costa AO. 2022. Flavors of sequential information flow. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). VMCAI: Verifcation, Model Checking, and Abstract Interpretation, LNCS, vol. 13182, 1–19.","mla":"Bartocci, Ezio, et al. “Flavors of Sequential Information Flow.” <i>Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</i>, vol. 13182, Springer Nature, 2022, pp. 1–19, doi:<a href=\"https://doi.org/10.1007/978-3-030-94583-1_1\">10.1007/978-3-030-94583-1_1</a>."},"intvolume":"     13182","external_id":{"arxiv":["2105.02013"]},"status":"public","alternative_title":["LNCS"],"main_file_link":[{"url":" https://doi.org/10.48550/arXiv.2105.02013","open_access":"1"}],"date_published":"2022-01-14T00:00:00Z","publication_status":"published","oa":1,"scopus_import":"1","article_processing_charge":"No","publication":"Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)","department":[{"_id":"ToHe"}],"publisher":"Springer Nature","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","arxiv":1,"title":"Flavors of sequential information flow","day":"14","author":[{"full_name":"Bartocci, Ezio","last_name":"Bartocci","first_name":"Ezio"},{"id":"40960E6E-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5199-3143","full_name":"Ferrere, Thomas","first_name":"Thomas","last_name":"Ferrere"},{"orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","full_name":"Henzinger, Thomas A","first_name":"Thomas A","last_name":"Henzinger"},{"id":"41BCEE5C-F248-11E8-B48F-1D18A9856A87","full_name":"Nickovic, Dejan","last_name":"Nickovic","first_name":"Dejan"},{"full_name":"Da Costa, Ana Oliveira","first_name":"Ana Oliveira","last_name":"Da Costa"}],"language":[{"iso":"eng"}],"conference":{"name":"VMCAI: Verifcation, Model Checking, and Abstract Interpretation","location":"Philadelphia, PA, United States","start_date":"2022-01-16","end_date":"2022-01-18"},"project":[{"name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"Z211"}],"quality_controlled":"1","doi":"10.1007/978-3-030-94583-1_1","publication_identifier":{"issn":["03029743"],"eissn":["16113349"],"isbn":["9783030945824"]}},{"date_published":"2022-02-22T00:00:00Z","publication_status":"published","intvolume":"     13124","citation":{"ieee":"T. A. Henzinger, “Quantitative monitoring of software,” in <i>Software Verification</i>, New Haven, CT, United States, 2022, vol. 13124, pp. 3–6.","chicago":"Henzinger, Thomas A. “Quantitative Monitoring of Software.” In <i>Software Verification</i>, 13124:3–6. LNCS. Springer Nature, 2022. <a href=\"https://doi.org/10.1007/978-3-030-95561-8_1\">https://doi.org/10.1007/978-3-030-95561-8_1</a>.","short":"T.A. Henzinger, in:, Software Verification, Springer Nature, 2022, pp. 3–6.","ama":"Henzinger TA. Quantitative monitoring of software. In: <i>Software Verification</i>. Vol 13124. LNCS. Springer Nature; 2022:3-6. doi:<a href=\"https://doi.org/10.1007/978-3-030-95561-8_1\">10.1007/978-3-030-95561-8_1</a>","mla":"Henzinger, Thomas A. “Quantitative Monitoring of Software.” <i>Software Verification</i>, vol. 13124, Springer Nature, 2022, pp. 3–6, doi:<a href=\"https://doi.org/10.1007/978-3-030-95561-8_1\">10.1007/978-3-030-95561-8_1</a>.","ista":"Henzinger TA. 2022. Quantitative monitoring of software. Software Verification. NSV: Numerical Software VerificationLNCS vol. 13124, 3–6.","apa":"Henzinger, T. A. (2022). Quantitative monitoring of software. In <i>Software Verification</i> (Vol. 13124, pp. 3–6). New Haven, CT, United States: Springer Nature. <a href=\"https://doi.org/10.1007/978-3-030-95561-8_1\">https://doi.org/10.1007/978-3-030-95561-8_1</a>"},"external_id":{"isi":["000771713200001"]},"status":"public","volume":13124,"date_created":"2022-03-20T23:01:40Z","page":"3-6","type":"conference","month":"02","oa_version":"None","date_updated":"2023-08-03T06:11:55Z","abstract":[{"lang":"eng","text":"We present a formal framework for the online black-box monitoring of software using monitors with quantitative verdict functions. Quantitative verdict functions have several advantages. First, quantitative monitors can be approximate, i.e., the value of the verdict function does not need to correspond exactly to the value of the property under observation. Second, quantitative monitors can be quantified universally, i.e., for every possible observed behavior, the monitor tries to make the best effort to estimate the value of the property under observation. Third, quantitative monitors can watch boolean as well as quantitative properties, such as average response time. Fourth, quantitative monitors can use non-finite-state resources, such as counters. As a consequence, quantitative monitors can be compared according to how many resources they use (e.g., the number of counters) and how precisely they approximate the property under observation. This allows for a rich spectrum of cost-precision trade-offs in monitoring software."}],"_id":"10891","acknowledgement":"The formal framework for quantitative monitoring which is presented in this invited talk was defined jointly with N. Ege Saraç at LICS 2021. This work was supported in part by the Wittgenstein Award Z211-N23 of the Austrian Science Fund.","year":"2022","doi":"10.1007/978-3-030-95561-8_1","quality_controlled":"1","publication_identifier":{"issn":["0302-9743"],"isbn":["9783030955601"],"eissn":["1611-3349"]},"conference":{"end_date":"2021-10-19","location":"New Haven, CT, United States","name":"NSV: Numerical Software Verification","start_date":"2021-10-18"},"isi":1,"series_title":"LNCS","language":[{"iso":"eng"}],"project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"title":"Quantitative monitoring of software","author":[{"full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724","last_name":"Henzinger","first_name":"Thomas A"}],"day":"22","publication":"Software Verification","scopus_import":"1","article_processing_charge":"No","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","publisher":"Springer Nature","department":[{"_id":"ToHe"}]},{"related_material":{"record":[{"id":"11366","status":"public","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"7808","status":"public"},{"id":"10666","status":"public","relation":"part_of_dissertation"},{"id":"10665","status":"public","relation":"part_of_dissertation"},{"id":"10667","status":"public","relation":"part_of_dissertation"}]},"citation":{"apa":"Lechner, M. (2022). <i>Learning verifiable representations</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:11362\">https://doi.org/10.15479/at:ista:11362</a>","ista":"Lechner M. 2022. Learning verifiable representations. Institute of Science and Technology Austria.","mla":"Lechner, Mathias. <i>Learning Verifiable Representations</i>. Institute of Science and Technology Austria, 2022, doi:<a href=\"https://doi.org/10.15479/at:ista:11362\">10.15479/at:ista:11362</a>.","ama":"Lechner M. Learning verifiable representations. 2022. doi:<a href=\"https://doi.org/10.15479/at:ista:11362\">10.15479/at:ista:11362</a>","short":"M. Lechner, Learning Verifiable Representations, Institute of Science and Technology Austria, 2022.","chicago":"Lechner, Mathias. “Learning Verifiable Representations.” Institute of Science and Technology Austria, 2022. <a href=\"https://doi.org/10.15479/at:ista:11362\">https://doi.org/10.15479/at:ista:11362</a>.","ieee":"M. Lechner, “Learning verifiable representations,” Institute of Science and Technology Austria, 2022."},"alternative_title":["ISTA Thesis"],"status":"public","date_published":"2022-05-12T00:00:00Z","ddc":["004"],"supervisor":[{"last_name":"Henzinger","first_name":"Thomas A","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724"}],"oa":1,"publication_status":"published","has_accepted_license":"1","_id":"11362","year":"2022","file_date_updated":"2022-05-17T15:19:39Z","date_created":"2022-05-12T07:14:01Z","page":"124","month":"05","type":"dissertation","oa_version":"Published Version","abstract":[{"lang":"eng","text":"Deep learning has enabled breakthroughs in challenging computing problems and has emerged as the standard problem-solving tool for computer vision and natural language processing tasks.\r\nOne exception to this trend is safety-critical tasks where robustness and resilience requirements contradict the black-box nature of neural networks. \r\nTo deploy deep learning methods for these tasks, it is vital to provide guarantees on neural network agents' safety and robustness criteria. \r\nThis can be achieved by developing formal verification methods to verify the safety and robustness properties of neural networks.\r\n\r\nOur goal is to design, develop and assess safety verification methods for neural networks to improve their reliability and trustworthiness in real-world applications.\r\nThis thesis establishes techniques for the verification of compressed and adversarially trained models as well as the design of novel neural networks for verifiably safe decision-making.\r\n\r\nFirst, we establish the problem of verifying quantized neural networks. Quantization is a technique that trades numerical precision for the computational efficiency of running a neural network and is widely adopted in industry.\r\nWe show that neglecting the reduced precision when verifying a neural network can lead to wrong conclusions about the robustness and safety of the network, highlighting that novel techniques for quantized network verification are necessary. We introduce several bit-exact verification methods explicitly designed for quantized neural networks and experimentally confirm on realistic networks that the network's robustness and other formal properties are affected by the quantization.\r\n\r\nFurthermore, we perform a case study providing evidence that adversarial training, a standard technique for making neural networks more robust, has detrimental effects on the network's performance. This robustness-accuracy tradeoff has been studied before regarding the accuracy obtained on classification datasets where each data point is independent of all other data points. On the other hand, we investigate the tradeoff empirically in robot learning settings where a both, a high accuracy and a high robustness, are desirable.\r\nOur results suggest that the negative side-effects of adversarial training outweigh its robustness benefits in practice.\r\n\r\nFinally, we consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with systems over the infinite time horizon. Bayesian neural networks are probabilistic models for learning uncertainties in the data and are therefore often used on robotic and healthcare applications where data is inherently stochastic.\r\nWe introduce a method for recalibrating Bayesian neural networks so that they yield probability distributions over safe decisions only.\r\nOur method learns a safety certificate that guarantees safety over the infinite time horizon to determine which decisions are safe in every possible state of the system.\r\nWe demonstrate the effectiveness of our approach on a series of reinforcement learning benchmarks."}],"date_updated":"2025-07-14T09:10:11Z","keyword":["neural networks","verification","machine learning"],"language":[{"iso":"eng"}],"project":[{"call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","name":"The Wittgenstein Prize","grant_number":"Z211"},{"grant_number":"101020093","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","name":"Vigilant Algorithmic Monitoring of Software"}],"doi":"10.15479/at:ista:11362","publication_identifier":{"isbn":["978-3-99078-017-6"]},"tmp":{"short":"CC BY-ND (4.0)","name":"Creative Commons Attribution-NoDerivatives 4.0 International (CC BY-ND 4.0)","image":"/image/cc_by_nd.png","legal_code_url":"https://creativecommons.org/licenses/by-nd/4.0/legalcode"},"ec_funded":1,"article_processing_charge":"No","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","publisher":"Institute of Science and Technology Austria","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"title":"Learning verifiable representations","degree_awarded":"PhD","author":[{"first_name":"Mathias","last_name":"Lechner","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias"}],"file":[{"file_name":"src.zip","creator":"mlechner","file_size":13210143,"relation":"source_file","content_type":"application/zip","checksum":"8eefa9c7c10ca7e1a2ccdd731962a645","file_id":"11378","date_updated":"2022-05-13T12:49:00Z","access_level":"closed","date_created":"2022-05-13T12:33:26Z"},{"access_level":"open_access","date_created":"2022-05-16T08:02:28Z","checksum":"1b9e1e5a9a83ed9d89dad2f5133dc026","date_updated":"2022-05-17T15:19:39Z","file_id":"11382","creator":"mlechner","content_type":"application/pdf","relation":"main_file","file_size":2732536,"file_name":"thesis_main-a2.pdf"}],"license":"https://creativecommons.org/licenses/by-nd/4.0/","day":"12"},{"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2103.04909","open_access":"1"}],"date_published":"2022-07-12T00:00:00Z","oa":1,"publication_status":"published","citation":{"apa":"Brunnbauer, A., Berducci, L., Brandstatter, A., Lechner, M., Hasani, R., Rus, D., &#38; Grosu, R. (2022). Latent imagination facilitates zero-shot transfer in autonomous racing. In <i>2022 International Conference on Robotics and Automation</i> (pp. 7513–7520). Philadelphia, PA, United States: IEEE. <a href=\"https://doi.org/10.1109/ICRA46639.2022.9811650\">https://doi.org/10.1109/ICRA46639.2022.9811650</a>","ista":"Brunnbauer A, Berducci L, Brandstatter A, Lechner M, Hasani R, Rus D, Grosu R. 2022. Latent imagination facilitates zero-shot transfer in autonomous racing. 2022 International Conference on Robotics and Automation. ICRA: International Conference on Robotics and Automation, 7513–7520.","mla":"Brunnbauer, Axel, et al. “Latent Imagination Facilitates Zero-Shot Transfer in Autonomous Racing.” <i>2022 International Conference on Robotics and Automation</i>, IEEE, 2022, pp. 7513–20, doi:<a href=\"https://doi.org/10.1109/ICRA46639.2022.9811650\">10.1109/ICRA46639.2022.9811650</a>.","ama":"Brunnbauer A, Berducci L, Brandstatter A, et al. Latent imagination facilitates zero-shot transfer in autonomous racing. In: <i>2022 International Conference on Robotics and Automation</i>. IEEE; 2022:7513-7520. doi:<a href=\"https://doi.org/10.1109/ICRA46639.2022.9811650\">10.1109/ICRA46639.2022.9811650</a>","short":"A. Brunnbauer, L. Berducci, A. Brandstatter, M. Lechner, R. Hasani, D. Rus, R. Grosu, in:, 2022 International Conference on Robotics and Automation, IEEE, 2022, pp. 7513–7520.","chicago":"Brunnbauer, Axel, Luigi Berducci, Andreas Brandstatter, Mathias Lechner, Ramin Hasani, Daniela Rus, and Radu Grosu. “Latent Imagination Facilitates Zero-Shot Transfer in Autonomous Racing.” In <i>2022 International Conference on Robotics and Automation</i>, 7513–20. IEEE, 2022. <a href=\"https://doi.org/10.1109/ICRA46639.2022.9811650\">https://doi.org/10.1109/ICRA46639.2022.9811650</a>.","ieee":"A. Brunnbauer <i>et al.</i>, “Latent imagination facilitates zero-shot transfer in autonomous racing,” in <i>2022 International Conference on Robotics and Automation</i>, Philadelphia, PA, United States, 2022, pp. 7513–7520."},"external_id":{"arxiv":["2103.04909"]},"status":"public","date_created":"2022-09-04T22:02:02Z","oa_version":"Preprint","type":"conference","month":"07","abstract":[{"text":"World models learn behaviors in a latent imagination space to enhance the sample-efficiency of deep reinforcement learning (RL) algorithms. While learning world models for high-dimensional observations (e.g., pixel inputs) has become practicable on standard RL benchmarks and some games, their effectiveness in real-world robotics applications has not been explored. In this paper, we investigate how such agents generalize to real-world autonomous vehicle control tasks, where advanced model-free deep RL algorithms fail. In particular, we set up a series of time-lap tasks for an F1TENTH racing robot, equipped with a high-dimensional LiDAR sensor, on a set of test tracks with a gradual increase in their complexity. In this continuous-control setting, we show that model-based agents capable of learning in imagination substantially outperform model-free agents with respect to performance, sample efficiency, successful task completion, and generalization. Moreover, we show that the generalization ability of model-based agents strongly depends on the choice of their observation model. We provide extensive empirical evidence for the effectiveness of world models provided with long enough memory horizons in sim2real tasks.","lang":"eng"}],"date_updated":"2022-09-05T08:46:12Z","page":"7513-7520","_id":"12010","year":"2022","acknowledgement":"L.B. was supported by the Doctoral College Resilient Embedded Systems. M.L. was supported in part by the ERC2020-AdG 101020093 and the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). R.H. and D.R. were supported by The Boeing Company and the Office of Naval Research (ONR) Grant N00014-18-1-2830. R.G. was partially supported by the Horizon-2020 ECSEL Project grant No. 783163 (iDev40) and A.B. by FFG Project ADEX.","quality_controlled":"1","doi":"10.1109/ICRA46639.2022.9811650","publication_identifier":{"issn":["1050-4729"],"isbn":["9781728196817"]},"language":[{"iso":"eng"}],"conference":{"start_date":"2022-05-23","name":"ICRA: International Conference on Robotics and Automation","location":"Philadelphia, PA, United States","end_date":"2022-05-27"},"project":[{"call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","name":"Vigilant Algorithmic Monitoring of Software","grant_number":"101020093"},{"_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"The Wittgenstein Prize","grant_number":"Z211"}],"arxiv":1,"title":"Latent imagination facilitates zero-shot transfer in autonomous racing","day":"12","author":[{"full_name":"Brunnbauer, Axel","first_name":"Axel","last_name":"Brunnbauer"},{"first_name":"Luigi","last_name":"Berducci","full_name":"Berducci, Luigi"},{"full_name":"Brandstatter, Andreas","last_name":"Brandstatter","first_name":"Andreas"},{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","last_name":"Lechner","first_name":"Mathias"},{"last_name":"Hasani","first_name":"Ramin","full_name":"Hasani, Ramin"},{"first_name":"Daniela","last_name":"Rus","full_name":"Rus, Daniela"},{"last_name":"Grosu","first_name":"Radu","full_name":"Grosu, Radu"}],"article_processing_charge":"No","scopus_import":"1","ec_funded":1,"publication":"2022 International Conference on Robotics and Automation","department":[{"_id":"ToHe"}],"publisher":"IEEE","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"title":"Closed-form continuous-time neural networks","arxiv":1,"author":[{"first_name":"Ramin","last_name":"Hasani","full_name":"Hasani, Ramin"},{"full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","first_name":"Mathias","last_name":"Lechner"},{"last_name":"Amini","first_name":"Alexander","full_name":"Amini, Alexander"},{"full_name":"Liebenwein, Lucas","last_name":"Liebenwein","first_name":"Lucas"},{"full_name":"Ray, Aaron","first_name":"Aaron","last_name":"Ray"},{"last_name":"Tschaikowski","first_name":"Max","full_name":"Tschaikowski, Max"},{"first_name":"Gerald","last_name":"Teschl","full_name":"Teschl, Gerald"},{"first_name":"Daniela","last_name":"Rus","full_name":"Rus, Daniela"}],"day":"15","file":[{"file_size":3259553,"content_type":"application/pdf","relation":"main_file","creator":"dernst","file_name":"2022_NatureMachineIntelligence_Hasani.pdf","success":1,"date_created":"2023-01-24T09:49:44Z","access_level":"open_access","file_id":"12355","date_updated":"2023-01-24T09:49:44Z","checksum":"b4789122ce04bfb4ac042390f59aaa8b"}],"publication":"Nature Machine Intelligence","scopus_import":"1","article_processing_charge":"No","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","publisher":"Springer Nature","department":[{"_id":"ToHe"}],"doi":"10.1038/s42256-022-00556-7","quality_controlled":"1","publication_identifier":{"issn":["2522-5839"]},"isi":1,"keyword":["Artificial Intelligence","Computer Networks and Communications","Computer Vision and Pattern Recognition","Human-Computer Interaction","Software"],"language":[{"iso":"eng"}],"issue":"11","project":[{"grant_number":"Z211","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"The Wittgenstein Prize"}],"volume":4,"file_date_updated":"2023-01-24T09:49:44Z","date_created":"2023-01-12T12:07:21Z","page":"992-1003","abstract":[{"text":"Continuous-time neural networks are a class of machine learning systems that can tackle representation learning on spatiotemporal decision-making tasks. These models are typically represented by continuous differential equations. However, their expressive power when they are deployed on computers is bottlenecked by numerical differential equation solvers. This limitation has notably slowed down the scaling and understanding of numerous natural physical phenomena such as the dynamics of nervous systems. Ideally, we would circumvent this bottleneck by solving the given dynamical system in closed form. This is known to be intractable in general. Here, we show that it is possible to closely approximate the interaction between neurons and synapses—the building blocks of natural and artificial neural networks—constructed by liquid time-constant networks efficiently in closed form. To this end, we compute a tightly bounded approximation of the solution of an integral appearing in liquid time-constant dynamics that has had no known closed-form solution so far. This closed-form solution impacts the design of continuous-time and continuous-depth neural models. For instance, since time appears explicitly in closed form, the formulation relaxes the need for complex numerical solvers. Consequently, we obtain models that are between one and five orders of magnitude faster in training and inference compared with differential equation-based counterparts. More importantly, in contrast to ordinary differential equation-based continuous networks, closed-form networks can scale remarkably well compared with other deep learning instances. Lastly, as these models are derived from liquid networks, they show good performance in time-series modelling compared with advanced recurrent neural network models.","lang":"eng"}],"date_updated":"2023-08-04T09:00:10Z","oa_version":"Published Version","month":"11","type":"journal_article","_id":"12147","acknowledgement":"This research was supported in part by the AI2050 program at Schmidt Futures (grant G-22-63172), the Boeing Company, and the United States Air Force Research Laboratory and the United States Air Force Artificial Intelligence Accelerator and was accomplished under cooperative agreement number FA8750-19-2-1000. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the United States Air Force or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes, notwithstanding any copyright notation herein. This work was further supported by The Boeing Company and Office of Naval Research grant N00014-18-1-2830. M.T. is supported by the Poul Due Jensen Foundation, grant 883901. M.L. was supported in part by the Austrian Science Fund under grant Z211-N23 (Wittgenstein Award). A.A. was supported by the National Science Foundation Graduate Research Fellowship Program. We thank T.-H. Wang, P. Kao, M. Chahine, W. Xiao, X. Li, L. Yin and Y. Ben for useful suggestions and for testing of CfC models to confirm the results across other domains.","year":"2022","ddc":["000"],"date_published":"2022-11-15T00:00:00Z","oa":1,"publication_status":"published","has_accepted_license":"1","intvolume":"         4","related_material":{"link":[{"relation":"erratum","url":"https://doi.org/10.1038/s42256-022-00597-y"}]},"citation":{"ieee":"R. Hasani <i>et al.</i>, “Closed-form continuous-time neural networks,” <i>Nature Machine Intelligence</i>, vol. 4, no. 11. Springer Nature, pp. 992–1003, 2022.","chicago":"Hasani, Ramin, Mathias Lechner, Alexander Amini, Lucas Liebenwein, Aaron Ray, Max Tschaikowski, Gerald Teschl, and Daniela Rus. “Closed-Form Continuous-Time Neural Networks.” <i>Nature Machine Intelligence</i>. Springer Nature, 2022. <a href=\"https://doi.org/10.1038/s42256-022-00556-7\">https://doi.org/10.1038/s42256-022-00556-7</a>.","short":"R. Hasani, M. Lechner, A. Amini, L. Liebenwein, A. Ray, M. Tschaikowski, G. Teschl, D. Rus, Nature Machine Intelligence 4 (2022) 992–1003.","ama":"Hasani R, Lechner M, Amini A, et al. Closed-form continuous-time neural networks. <i>Nature Machine Intelligence</i>. 2022;4(11):992-1003. doi:<a href=\"https://doi.org/10.1038/s42256-022-00556-7\">10.1038/s42256-022-00556-7</a>","ista":"Hasani R, Lechner M, Amini A, Liebenwein L, Ray A, Tschaikowski M, Teschl G, Rus D. 2022. Closed-form continuous-time neural networks. Nature Machine Intelligence. 4(11), 992–1003.","mla":"Hasani, Ramin, et al. “Closed-Form Continuous-Time Neural Networks.” <i>Nature Machine Intelligence</i>, vol. 4, no. 11, Springer Nature, 2022, pp. 992–1003, doi:<a href=\"https://doi.org/10.1038/s42256-022-00556-7\">10.1038/s42256-022-00556-7</a>.","apa":"Hasani, R., Lechner, M., Amini, A., Liebenwein, L., Ray, A., Tschaikowski, M., … Rus, D. (2022). Closed-form continuous-time neural networks. <i>Nature Machine Intelligence</i>. Springer Nature. <a href=\"https://doi.org/10.1038/s42256-022-00556-7\">https://doi.org/10.1038/s42256-022-00556-7</a>"},"status":"public","external_id":{"arxiv":["2106.13898"],"isi":["000884215600003"]}},{"keyword":["General Medicine"],"language":[{"iso":"eng"}],"issue":"6","project":[{"name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"Z211"},{"name":"Vigilant Algorithmic Monitoring of Software","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","grant_number":"101020093"}],"doi":"10.1609/aaai.v36i6.20631","quality_controlled":"1","publication_identifier":{"eissn":["2374-3468"],"isbn":["978577358350"],"issn":["2159-5399"]},"publication":"Proceedings of the AAAI Conference on Artificial Intelligence","ec_funded":1,"article_processing_charge":"No","scopus_import":"1","article_type":"original","publisher":"Association for the Advancement of Artificial Intelligence","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"ToHe"}],"arxiv":1,"title":"GoTube: Scalable statistical verification of continuous-depth models","author":[{"full_name":"Gruenbacher, Sophie A.","first_name":"Sophie A.","last_name":"Gruenbacher"},{"full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","first_name":"Mathias","last_name":"Lechner"},{"first_name":"Ramin","last_name":"Hasani","full_name":"Hasani, Ramin"},{"last_name":"Rus","first_name":"Daniela","full_name":"Rus, Daniela"},{"full_name":"Henzinger, Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger","first_name":"Thomas A"},{"full_name":"Smolka, Scott A.","first_name":"Scott A.","last_name":"Smolka"},{"last_name":"Grosu","first_name":"Radu","full_name":"Grosu, Radu"}],"day":"28","intvolume":"        36","citation":{"ama":"Gruenbacher SA, Lechner M, Hasani R, et al. GoTube: Scalable statistical verification of continuous-depth models. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. 2022;36(6):6755-6764. doi:<a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">10.1609/aaai.v36i6.20631</a>","ista":"Gruenbacher SA, Lechner M, Hasani R, Rus D, Henzinger TA, Smolka SA, Grosu R. 2022. GoTube: Scalable statistical verification of continuous-depth models. Proceedings of the AAAI Conference on Artificial Intelligence. 36(6), 6755–6764.","mla":"Gruenbacher, Sophie A., et al. “GoTube: Scalable Statistical Verification of Continuous-Depth Models.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 6, Association for the Advancement of Artificial Intelligence, 2022, pp. 6755–64, doi:<a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">10.1609/aaai.v36i6.20631</a>.","apa":"Gruenbacher, S. A., Lechner, M., Hasani, R., Rus, D., Henzinger, T. A., Smolka, S. A., &#38; Grosu, R. (2022). GoTube: Scalable statistical verification of continuous-depth models. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">https://doi.org/10.1609/aaai.v36i6.20631</a>","ieee":"S. A. Gruenbacher <i>et al.</i>, “GoTube: Scalable statistical verification of continuous-depth models,” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 6. Association for the Advancement of Artificial Intelligence, pp. 6755–6764, 2022.","chicago":"Gruenbacher, Sophie A., Mathias Lechner, Ramin Hasani, Daniela Rus, Thomas A Henzinger, Scott A. Smolka, and Radu Grosu. “GoTube: Scalable Statistical Verification of Continuous-Depth Models.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence, 2022. <a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">https://doi.org/10.1609/aaai.v36i6.20631</a>.","short":"S.A. Gruenbacher, M. Lechner, R. Hasani, D. Rus, T.A. Henzinger, S.A. Smolka, R. Grosu, Proceedings of the AAAI Conference on Artificial Intelligence 36 (2022) 6755–6764."},"status":"public","external_id":{"arxiv":["2107.08467"]},"date_published":"2022-06-28T00:00:00Z","main_file_link":[{"url":"https://arxiv.org/abs/2107.08467","open_access":"1"}],"oa":1,"publication_status":"published","_id":"12510","acknowledgement":"SG is funded by the Austrian Science Fund (FWF) project number W1255-N23. ML and TH are supported in part by FWF under grant Z211-N23 (Wittgenstein Award) and the ERC-2020-AdG 101020093. SS is supported by NSF awards DCL-2040599, CCF-1918225, and CPS-1446832. RH and DR are partially supported by Boeing. RG is partially supported by Horizon-2020 ECSEL Project grant No. 783163 (iDev40).","year":"2022","volume":36,"date_created":"2023-02-05T17:27:42Z","page":"6755-6764","abstract":[{"text":"We introduce a new statistical verification algorithm that formally quantifies the behavioral robustness of any time-continuous process formulated as a continuous-depth model. Our algorithm solves a set of global optimization (Go) problems over a given time horizon to construct a tight enclosure (Tube) of the set of all process executions starting from a ball of initial states. We call our algorithm GoTube. Through its construction, GoTube ensures that the bounding tube is conservative up to a desired probability and up to a desired tightness.\r\n GoTube is implemented in JAX and optimized to scale to complex continuous-depth neural network models. Compared to advanced reachability analysis tools for time-continuous neural networks, GoTube does not accumulate overapproximation errors between time steps and avoids the infamous wrapping effect inherent in symbolic techniques. We show that GoTube substantially outperforms state-of-the-art verification tools in terms of the size of the initial ball, speed, time-horizon, task completion, and scalability on a large set of experiments.\r\n GoTube is stable and sets the state-of-the-art in terms of its ability to scale to time horizons well beyond what has been previously possible.","lang":"eng"}],"date_updated":"2023-09-26T10:46:59Z","oa_version":"Preprint","type":"journal_article","month":"06"},{"publication_identifier":{"issn":["2159-5399"],"isbn":["978-1-57735-866-4"],"eissn":["2374-3468"]},"quality_controlled":"1","project":[{"name":"International IST Doctoral Program","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385"},{"name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"Z211"},{"grant_number":"863818","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","call_identifier":"H2020","name":"Formal Methods for Stochastic Models: Algorithms and Applications"}],"conference":{"location":"Virtual","name":"AAAI: Association for the Advancement of Artificial Intelligence","start_date":"2021-02-02","end_date":"2021-02-09"},"language":[{"iso":"eng"}],"issue":"5A","author":[{"full_name":"Henzinger, Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger","first_name":"Thomas A"},{"first_name":"Mathias","last_name":"Lechner","full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87"},{"id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-4681-1699","full_name":"Zikelic, Dorde","last_name":"Zikelic","first_name":"Dorde"}],"day":"28","file":[{"access_level":"open_access","date_created":"2022-01-26T07:41:16Z","checksum":"2bc8155b2526a70fba5b7301bc89dbd1","date_updated":"2022-01-26T07:41:16Z","file_id":"10684","creator":"mlechner","relation":"main_file","content_type":"application/pdf","file_size":137235,"success":1,"file_name":"16496-Article Text-19990-1-2-20210518 (1).pdf"}],"arxiv":1,"title":"Scalable verification of quantized neural networks","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"AAAI Press","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publication":"Proceedings of the AAAI Conference on Artificial Intelligence","ec_funded":1,"article_processing_charge":"No","scopus_import":"1","publication_status":"published","oa":1,"has_accepted_license":"1","date_published":"2021-05-28T00:00:00Z","ddc":["000"],"main_file_link":[{"url":"https://ojs.aaai.org/index.php/AAAI/article/view/16496","open_access":"1"}],"alternative_title":["Technical Tracks"],"external_id":{"arxiv":["2012.08185"]},"status":"public","related_material":{"record":[{"id":"11362","status":"public","relation":"dissertation_contains"}]},"intvolume":"        35","citation":{"apa":"Henzinger, T. A., Lechner, M., &#38; Zikelic, D. (2021). Scalable verification of quantized neural networks. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 3787–3795). Virtual: AAAI Press.","mla":"Henzinger, Thomas A., et al. “Scalable Verification of Quantized Neural Networks.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 5A, AAAI Press, 2021, pp. 3787–95.","ista":"Henzinger TA, Lechner M, Zikelic D. 2021. Scalable verification of quantized neural networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 3787–3795.","ama":"Henzinger TA, Lechner M, Zikelic D. Scalable verification of quantized neural networks. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:3787-3795.","short":"T.A. Henzinger, M. Lechner, D. Zikelic, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 3787–3795.","chicago":"Henzinger, Thomas A, Mathias Lechner, and Dorde Zikelic. “Scalable Verification of Quantized Neural Networks.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:3787–95. AAAI Press, 2021.","ieee":"T. A. Henzinger, M. Lechner, and D. Zikelic, “Scalable verification of quantized neural networks,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 5A, pp. 3787–3795."},"page":"3787-3795","abstract":[{"lang":"eng","text":"Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors\r\ninto account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches."}],"date_updated":"2025-07-14T09:10:11Z","month":"05","oa_version":"Published Version","type":"conference","volume":35,"date_created":"2022-01-25T15:15:02Z","file_date_updated":"2022-01-26T07:41:16Z","acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein\r\nAward), ERC CoG 863818 (FoRM-SMArt), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.\r\n","year":"2021","_id":"10665"},{"date_published":"2021-01-01T00:00:00Z","ddc":["000"],"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2103.08187"}],"publication_status":"published","oa":1,"has_accepted_license":"1","related_material":{"record":[{"id":"11362","status":"public","relation":"dissertation_contains"}]},"citation":{"ama":"Lechner M, Hasani R, Grosu R, Rus D, Henzinger TA. Adversarial training is not ready for robot learning. In: <i>2021 IEEE International Conference on Robotics and Automation</i>. ICRA. ; 2021:4140-4147. doi:<a href=\"https://doi.org/10.1109/ICRA48506.2021.9561036\">10.1109/ICRA48506.2021.9561036</a>","apa":"Lechner, M., Hasani, R., Grosu, R., Rus, D., &#38; Henzinger, T. A. (2021). Adversarial training is not ready for robot learning. In <i>2021 IEEE International Conference on Robotics and Automation</i> (pp. 4140–4147). Xi’an, China. <a href=\"https://doi.org/10.1109/ICRA48506.2021.9561036\">https://doi.org/10.1109/ICRA48506.2021.9561036</a>","mla":"Lechner, Mathias, et al. “Adversarial Training Is Not Ready for Robot Learning.” <i>2021 IEEE International Conference on Robotics and Automation</i>, 2021, pp. 4140–47, doi:<a href=\"https://doi.org/10.1109/ICRA48506.2021.9561036\">10.1109/ICRA48506.2021.9561036</a>.","ista":"Lechner M, Hasani R, Grosu R, Rus D, Henzinger TA. 2021. Adversarial training is not ready for robot learning. 2021 IEEE International Conference on Robotics and Automation. ICRA: International Conference on Robotics and AutomationICRA, 4140–4147.","chicago":"Lechner, Mathias, Ramin Hasani, Radu Grosu, Daniela Rus, and Thomas A Henzinger. “Adversarial Training Is Not Ready for Robot Learning.” In <i>2021 IEEE International Conference on Robotics and Automation</i>, 4140–47. ICRA, 2021. <a href=\"https://doi.org/10.1109/ICRA48506.2021.9561036\">https://doi.org/10.1109/ICRA48506.2021.9561036</a>.","ieee":"M. Lechner, R. Hasani, R. Grosu, D. Rus, and T. A. Henzinger, “Adversarial training is not ready for robot learning,” in <i>2021 IEEE International Conference on Robotics and Automation</i>, Xi’an, China, 2021, pp. 4140–4147.","short":"M. Lechner, R. Hasani, R. Grosu, D. Rus, T.A. Henzinger, in:, 2021 IEEE International Conference on Robotics and Automation, 2021, pp. 4140–4147."},"external_id":{"arxiv":["2103.08187"],"isi":["000765738803040"]},"status":"public","date_created":"2022-01-25T15:44:54Z","page":"4140-4147","abstract":[{"text":"Adversarial training is an effective method to train deep learning models that are resilient to norm-bounded perturbations, with the cost of nominal performance drop. While adversarial training appears to enhance the robustness and safety of a deep model deployed in open-world decision-critical applications, counterintuitively, it induces undesired behaviors in robot learning settings. In this paper, we show theoretically and experimentally that neural controllers obtained via adversarial training are subjected to three types of defects, namely transient, systematic, and conditional errors. We first generalize adversarial training to a safety-domain optimization scheme allowing for more generic specifications. We then prove that such a learning process tends to cause certain error profiles. We support our theoretical results by a thorough experimental safety analysis in a robot-learning task. Our results suggest that adversarial training is not yet ready for robot learning.","lang":"eng"}],"date_updated":"2023-08-17T06:58:38Z","oa_version":"None","type":"conference","_id":"10666","acknowledgement":"M.L. and T.A.H. are supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). R.H. and D.R. are supported by Boeing and R.G. by Horizon-2020 ECSEL Project grant no. 783163 (iDev40).","year":"2021","doi":"10.1109/ICRA48506.2021.9561036","quality_controlled":"1","publication_identifier":{"eisbn":["978-1-7281-9077-8"],"eissn":["2577-087X"],"isbn":["978-1-7281-9078-5"],"issn":["1050-4729"]},"isi":1,"series_title":"ICRA","conference":{"start_date":"2021-05-30","name":"ICRA: International Conference on Robotics and Automation","location":"Xi'an, China","end_date":"2021-06-05"},"language":[{"iso":"eng"}],"project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"arxiv":1,"title":"Adversarial training is not ready for robot learning","author":[{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","first_name":"Mathias","last_name":"Lechner"},{"full_name":"Hasani, Ramin","last_name":"Hasani","first_name":"Ramin"},{"first_name":"Radu","last_name":"Grosu","full_name":"Grosu, Radu"},{"full_name":"Rus, Daniela","last_name":"Rus","first_name":"Daniela"},{"last_name":"Henzinger","first_name":"Thomas A","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724"}],"license":"https://creativecommons.org/licenses/by-nc-nd/3.0/","publication":"2021 IEEE International Conference on Robotics and Automation","article_processing_charge":"No","tmp":{"name":"Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)","short":"CC BY-NC-ND (3.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode","image":"/images/cc_by_nc_nd.png"},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","department":[{"_id":"GradSch"},{"_id":"ToHe"}]},{"publication_status":"published","oa":1,"has_accepted_license":"1","ddc":["000"],"date_published":"2021-12-01T00:00:00Z","main_file_link":[{"url":"https://proceedings.neurips.cc/paper/2021/hash/544defa9fddff50c53b71c43e0da72be-Abstract.html","open_access":"1"}],"alternative_title":[" Advances in Neural Information Processing Systems"],"external_id":{"arxiv":["2111.03165"]},"status":"public","related_material":{"record":[{"id":"11362","status":"public","relation":"dissertation_contains"}]},"citation":{"short":"M. Lechner, Ð. Žikelić, K. Chatterjee, T.A. Henzinger, in:, 35th Conference on Neural Information Processing Systems, 2021.","ieee":"M. Lechner, Ð. Žikelić, K. Chatterjee, and T. A. Henzinger, “Infinite time horizon safety of Bayesian neural networks,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, 2021.","chicago":"Lechner, Mathias, Ðorđe Žikelić, Krishnendu Chatterjee, and Thomas A Henzinger. “Infinite Time Horizon Safety of Bayesian Neural Networks.” In <i>35th Conference on Neural Information Processing Systems</i>, 2021. <a href=\"https://doi.org/10.48550/arXiv.2111.03165\">https://doi.org/10.48550/arXiv.2111.03165</a>.","mla":"Lechner, Mathias, et al. “Infinite Time Horizon Safety of Bayesian Neural Networks.” <i>35th Conference on Neural Information Processing Systems</i>, 2021, doi:<a href=\"https://doi.org/10.48550/arXiv.2111.03165\">10.48550/arXiv.2111.03165</a>.","ista":"Lechner M, Žikelić Ð, Chatterjee K, Henzinger TA. 2021. Infinite time horizon safety of Bayesian neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems,  Advances in Neural Information Processing Systems, .","apa":"Lechner, M., Žikelić, Ð., Chatterjee, K., &#38; Henzinger, T. A. (2021). Infinite time horizon safety of Bayesian neural networks. In <i>35th Conference on Neural Information Processing Systems</i>. Virtual. <a href=\"https://doi.org/10.48550/arXiv.2111.03165\">https://doi.org/10.48550/arXiv.2111.03165</a>","ama":"Lechner M, Žikelić Ð, Chatterjee K, Henzinger TA. Infinite time horizon safety of Bayesian neural networks. In: <i>35th Conference on Neural Information Processing Systems</i>. ; 2021. doi:<a href=\"https://doi.org/10.48550/arXiv.2111.03165\">10.48550/arXiv.2111.03165</a>"},"oa_version":"Published Version","type":"conference","month":"12","date_updated":"2025-07-14T09:10:12Z","abstract":[{"text":"Bayesian neural networks (BNNs) place distributions over the weights of a neural network to model uncertainty in the data and the network's prediction. We consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with infinite time horizon systems. Compared to the existing sampling-based approaches, which are inapplicable to the infinite time horizon setting, we train a separate deterministic neural network that serves as an infinite time horizon safety certificate. In particular, we show that the certificate network guarantees the safety of the system over a subset of the BNN weight posterior's support. Our method first computes a safe weight set and then alters the BNN's weight posterior to reject samples outside this set. Moreover, we show how to extend our approach to a safe-exploration reinforcement learning setting, in order to avoid unsafe trajectories during the training of the policy. We evaluate our approach on a series of reinforcement learning benchmarks, including non-Lyapunovian safety specifications.","lang":"eng"}],"file_date_updated":"2022-01-26T07:39:59Z","date_created":"2022-01-25T15:45:58Z","acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award), ERC CoG 863818 (FoRM-SMArt), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.","year":"2021","_id":"10667","doi":"10.48550/arXiv.2111.03165","quality_controlled":"1","project":[{"name":"International IST Doctoral Program","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385"},{"grant_number":"863818","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","call_identifier":"H2020","name":"Formal Methods for Stochastic Models: Algorithms and Applications"},{"name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"}],"conference":{"end_date":"2021-12-10","name":"NeurIPS: Neural Information Processing Systems","location":"Virtual","start_date":"2021-12-06"},"language":[{"iso":"eng"}],"author":[{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","first_name":"Mathias","last_name":"Lechner"},{"last_name":"Žikelić","first_name":"Ðorđe","full_name":"Žikelić, Ðorđe"},{"first_name":"Krishnendu","last_name":"Chatterjee","full_name":"Chatterjee, Krishnendu","orcid":"0000-0002-4561-241X","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Henzinger","first_name":"Thomas A","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724"}],"day":"01","file":[{"success":1,"file_name":"infinite_time_horizon_safety_o.pdf","content_type":"application/pdf","relation":"main_file","file_size":452492,"creator":"mlechner","date_updated":"2022-01-26T07:39:59Z","file_id":"10682","checksum":"0fc0f852525c10dda9cc9ffea07fb4e4","date_created":"2022-01-26T07:39:59Z","access_level":"open_access"}],"title":"Infinite time horizon safety of Bayesian neural networks","arxiv":1,"user_id":"2EBD1598-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"GradSch"},{"_id":"ToHe"},{"_id":"KrCh"}],"publication":"35th Conference on Neural Information Processing Systems","tmp":{"name":"Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)","short":"CC BY-NC-ND (3.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode","image":"/images/cc_by_nc_nd.png"},"article_processing_charge":"No","ec_funded":1},{"publication":"Proceedings of the 38th International Conference on Machine Learning","article_processing_charge":"No","tmp":{"name":"Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)","short":"CC BY-NC-ND (3.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode","image":"/images/cc_by_nc_nd.png"},"publisher":"ML Research Press","user_id":"2EBD1598-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"title":"On-off center-surround receptive fields for accurate and robust image classification","author":[{"full_name":"Babaiee, Zahra","first_name":"Zahra","last_name":"Babaiee"},{"full_name":"Hasani, Ramin","first_name":"Ramin","last_name":"Hasani"},{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","first_name":"Mathias","last_name":"Lechner"},{"full_name":"Rus, Daniela","last_name":"Rus","first_name":"Daniela"},{"first_name":"Radu","last_name":"Grosu","full_name":"Grosu, Radu"}],"day":"01","file":[{"creator":"mlechner","file_size":4246561,"relation":"main_file","content_type":"application/pdf","file_name":"babaiee21a.pdf","success":1,"access_level":"open_access","date_created":"2022-01-26T07:38:32Z","checksum":"d30eae62561bb517d9f978437d7677db","file_id":"10681","date_updated":"2022-01-26T07:38:32Z"}],"conference":{"end_date":"2021-07-24","location":"Virtual","name":"ML: Machine Learning","start_date":"2021-07-18"},"language":[{"iso":"eng"}],"project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"quality_controlled":"1","publication_identifier":{"issn":["2640-3498"]},"_id":"10668","acknowledgement":"Z.B. is supported by the Doctoral College Resilient Embedded Systems, which is run jointly by the TU Wien’s Faculty of Informatics and the UAS Technikum Wien. R.G. is partially supported by the Horizon 2020 Era-Permed project Persorad, and ECSEL Project grant no. 783163 (iDev40). R.H and D.R were partially supported by Boeing and MIT. M.L. is supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award).","year":"2021","volume":139,"file_date_updated":"2022-01-26T07:38:32Z","date_created":"2022-01-25T15:46:33Z","page":"478-489","abstract":[{"text":"Robustness to variations in lighting conditions is a key objective for any deep vision system. To this end, our paper extends the receptive field of convolutional neural networks with two residual components, ubiquitous in the visual processing system of vertebrates: On-center and off-center pathways, with an excitatory center and inhibitory surround; OOCS for short. The On-center pathway is excited by the presence of a light stimulus in its center, but not in its surround, whereas the Off-center pathway is excited by the absence of a light stimulus in its center, but not in its surround. We design OOCS pathways via a difference of Gaussians, with their variance computed analytically from the size of the receptive fields. OOCS pathways complement each other in their response to light stimuli, ensuring this way a strong edge-detection capability, and as a result an accurate and robust inference under challenging lighting conditions. We provide extensive empirical evidence showing that networks supplied with OOCS pathways gain accuracy and illumination-robustness from the novel edge representation, compared to other baselines.","lang":"eng"}],"date_updated":"2022-05-04T15:02:27Z","month":"07","type":"conference","oa_version":"Published Version","intvolume":"       139","citation":{"ista":"Babaiee Z, Hasani R, Lechner M, Rus D, Grosu R. 2021. On-off center-surround receptive fields for accurate and robust image classification. Proceedings of the 38th International Conference on Machine Learning. ML: Machine Learning, PMLR, vol. 139, 478–489.","mla":"Babaiee, Zahra, et al. “On-off Center-Surround Receptive Fields for Accurate and Robust Image Classification.” <i>Proceedings of the 38th International Conference on Machine Learning</i>, vol. 139, ML Research Press, 2021, pp. 478–89.","apa":"Babaiee, Z., Hasani, R., Lechner, M., Rus, D., &#38; Grosu, R. (2021). On-off center-surround receptive fields for accurate and robust image classification. In <i>Proceedings of the 38th International Conference on Machine Learning</i> (Vol. 139, pp. 478–489). Virtual: ML Research Press.","ama":"Babaiee Z, Hasani R, Lechner M, Rus D, Grosu R. On-off center-surround receptive fields for accurate and robust image classification. In: <i>Proceedings of the 38th International Conference on Machine Learning</i>. Vol 139. ML Research Press; 2021:478-489.","short":"Z. Babaiee, R. Hasani, M. Lechner, D. Rus, R. Grosu, in:, Proceedings of the 38th International Conference on Machine Learning, ML Research Press, 2021, pp. 478–489.","ieee":"Z. Babaiee, R. Hasani, M. Lechner, D. Rus, and R. Grosu, “On-off center-surround receptive fields for accurate and robust image classification,” in <i>Proceedings of the 38th International Conference on Machine Learning</i>, Virtual, 2021, vol. 139, pp. 478–489.","chicago":"Babaiee, Zahra, Ramin Hasani, Mathias Lechner, Daniela Rus, and Radu Grosu. “On-off Center-Surround Receptive Fields for Accurate and Robust Image Classification.” In <i>Proceedings of the 38th International Conference on Machine Learning</i>, 139:478–89. ML Research Press, 2021."},"alternative_title":["PMLR"],"status":"public","date_published":"2021-07-01T00:00:00Z","ddc":["000"],"main_file_link":[{"url":"https://proceedings.mlr.press/v139/babaiee21a","open_access":"1"}],"oa":1,"publication_status":"published","has_accepted_license":"1"},{"date_published":"2021-05-28T00:00:00Z","ddc":["000"],"main_file_link":[{"url":"https://ojs.aaai.org/index.php/AAAI/article/view/17372","open_access":"1"}],"publication_status":"published","oa":1,"has_accepted_license":"1","intvolume":"        35","citation":{"apa":"Grunbacher, S., Hasani, R., Lechner, M., Cyranka, J., Smolka, S. A., &#38; Grosu, R. (2021). On the verification of neural ODEs with stochastic guarantees. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 11525–11535). Virtual: AAAI Press.","ista":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. 2021. On the verification of neural ODEs with stochastic guarantees. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 11525–11535.","mla":"Grunbacher, Sophie, et al. “On the Verification of Neural ODEs with Stochastic Guarantees.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 13, AAAI Press, 2021, pp. 11525–35.","ama":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. On the verification of neural ODEs with stochastic guarantees. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:11525-11535.","short":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S.A. Smolka, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 11525–11535.","chicago":"Grunbacher, Sophie, Ramin Hasani, Mathias Lechner, Jacek Cyranka, Scott A Smolka, and Radu Grosu. “On the Verification of Neural ODEs with Stochastic Guarantees.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:11525–35. AAAI Press, 2021.","ieee":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S. A. Smolka, and R. Grosu, “On the verification of neural ODEs with stochastic guarantees,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 13, pp. 11525–11535."},"alternative_title":["Technical Tracks"],"status":"public","external_id":{"arxiv":["2012.08863"]},"volume":35,"date_created":"2022-01-25T15:47:20Z","file_date_updated":"2022-01-26T07:38:08Z","page":"11525-11535","oa_version":"Published Version","type":"conference","month":"05","date_updated":"2022-05-24T06:33:14Z","abstract":[{"lang":"eng","text":"We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an\r\nabstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states\r\nover a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR."}],"_id":"10669","acknowledgement":"The authors would like to thank the reviewers for their insightful comments. RH and RG were partially supported by\r\nHorizon-2020 ECSEL Project grant No. 783163 (iDev40). RH was partially supported by Boeing. ML was supported\r\nin part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). SG was funded by FWF\r\nproject W1255-N23. JC was partially supported by NAWA Polish Returns grant PPN/PPO/2018/1/00029. SS was supported by NSF awards DCL-2040599, CCF-1918225, and CPS-1446832.\r\n","year":"2021","quality_controlled":"1","publication_identifier":{"issn":["2159-5399"],"isbn":["978-1-57735-866-4"],"eissn":["2374-3468"]},"conference":{"end_date":"2021-02-09","location":"Virtual","name":"AAAI: Association for the Advancement of Artificial Intelligence","start_date":"2021-02-02"},"issue":"13","language":[{"iso":"eng"}],"project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"}],"title":"On the verification of neural ODEs with stochastic guarantees","arxiv":1,"author":[{"full_name":"Grunbacher, Sophie","first_name":"Sophie","last_name":"Grunbacher"},{"full_name":"Hasani, Ramin","first_name":"Ramin","last_name":"Hasani"},{"first_name":"Mathias","last_name":"Lechner","full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Jacek","last_name":"Cyranka","full_name":"Cyranka, Jacek"},{"full_name":"Smolka, Scott A","first_name":"Scott A","last_name":"Smolka"},{"first_name":"Radu","last_name":"Grosu","full_name":"Grosu, Radu"}],"file":[{"date_created":"2022-01-26T07:38:08Z","access_level":"open_access","date_updated":"2022-01-26T07:38:08Z","file_id":"10680","checksum":"468d07041e282a1d46ffdae92f709630","relation":"main_file","content_type":"application/pdf","file_size":286906,"creator":"mlechner","success":1,"file_name":"17372-Article Text-20866-1-2-20210518.pdf"}],"day":"28","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"AAAI Press","department":[{"_id":"GradSch"},{"_id":"ToHe"}]},{"citation":{"chicago":"Vorbach, Charles J, Ramin Hasani, Alexander Amini, Mathias Lechner, and Daniela Rus. “Causal Navigation by Continuous-Time Neural Networks.” In <i>35th Conference on Neural Information Processing Systems</i>, 2021.","ieee":"C. J. Vorbach, R. Hasani, A. Amini, M. Lechner, and D. Rus, “Causal navigation by continuous-time neural networks,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, 2021.","short":"C.J. Vorbach, R. Hasani, A. Amini, M. Lechner, D. Rus, in:, 35th Conference on Neural Information Processing Systems, 2021.","ama":"Vorbach CJ, Hasani R, Amini A, Lechner M, Rus D. Causal navigation by continuous-time neural networks. In: <i>35th Conference on Neural Information Processing Systems</i>. ; 2021.","apa":"Vorbach, C. J., Hasani, R., Amini, A., Lechner, M., &#38; Rus, D. (2021). Causal navigation by continuous-time neural networks. In <i>35th Conference on Neural Information Processing Systems</i>. Virtual.","ista":"Vorbach CJ, Hasani R, Amini A, Lechner M, Rus D. 2021. Causal navigation by continuous-time neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems,  Advances in Neural Information Processing Systems, .","mla":"Vorbach, Charles J., et al. “Causal Navigation by Continuous-Time Neural Networks.” <i>35th Conference on Neural Information Processing Systems</i>, 2021."},"alternative_title":[" Advances in Neural Information Processing Systems"],"status":"public","external_id":{"arxiv":["2106.08314"]},"ddc":["000"],"date_published":"2021-12-01T00:00:00Z","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/hash/67ba02d73c54f0b83c05507b7fb7267f-Abstract.html"}],"publication_status":"published","oa":1,"has_accepted_license":"1","_id":"10670","acknowledgement":"C.V., R.H. A.A. and D.R. are partially supported by Boeing and MIT. A.A. is supported by the National Science Foundation (NSF) Graduate Research Fellowship Program. M.L. is supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). Research was sponsored by the United States Air Force Research Laboratory and the United States Air Force Artificial Intelligence Accelerator and was accomplished under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions contained in this document are those of the authors\r\nand should not be interpreted as representing the official policies, either expressed or implied, of the United States Air Force or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein.\r\n","year":"2021","date_created":"2022-01-25T15:47:50Z","file_date_updated":"2022-01-26T07:37:24Z","month":"12","type":"conference","oa_version":"Published Version","abstract":[{"text":"Imitation learning enables high-fidelity, vision-based learning of policies within rich, photorealistic environments. However, such techniques often rely on traditional discrete-time neural models and face difficulties in generalizing to domain shifts by failing to account for the causal relationships between the agent and the environment. In this paper, we propose a theoretical and experimental framework for learning causal representations using continuous-time neural networks, specifically over their discrete-time counterparts. We evaluate our method in the context of visual-control learning of drones over a series of complex tasks, ranging from short- and long-term navigation, to chasing static and dynamic objects through photorealistic environments. Our results demonstrate that causal continuous-time\r\ndeep models can perform robust navigation tasks, where advanced recurrent models fail. These models learn complex causal control representations directly from raw visual inputs and scale to solve a variety of tasks using imitation learning.","lang":"eng"}],"date_updated":"2022-01-26T14:33:31Z","conference":{"end_date":"2021-12-10","name":"NeurIPS: Neural Information Processing Systems","location":"Virtual","start_date":"2021-12-06"},"language":[{"iso":"eng"}],"project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"quality_controlled":"1","publication":"35th Conference on Neural Information Processing Systems","tmp":{"name":"Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)","short":"CC BY-NC-ND (3.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode","image":"/images/cc_by_nc_nd.png"},"article_processing_charge":"No","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"arxiv":1,"title":"Causal navigation by continuous-time neural networks","author":[{"last_name":"Vorbach","first_name":"Charles J","full_name":"Vorbach, Charles J"},{"full_name":"Hasani, Ramin","last_name":"Hasani","first_name":"Ramin"},{"first_name":"Alexander","last_name":"Amini","full_name":"Amini, Alexander"},{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","first_name":"Mathias","last_name":"Lechner"},{"last_name":"Rus","first_name":"Daniela","full_name":"Rus, Daniela"}],"day":"01","file":[{"access_level":"open_access","date_created":"2022-01-26T07:37:24Z","checksum":"be81f0ade174a8c9b2d4fe09590b2021","file_id":"10679","date_updated":"2022-01-26T07:37:24Z","creator":"mlechner","file_size":6841228,"content_type":"application/pdf","relation":"main_file","file_name":"NeurIPS-2021-causal-navigation-by-continuous-time-neural-networks-Paper.pdf","success":1}]},{"alternative_title":["Technical Tracks"],"status":"public","external_id":{"arxiv":["2006.04439"]},"intvolume":"        35","citation":{"apa":"Hasani, R., Lechner, M., Amini, A., Rus, D., &#38; Grosu, R. (2021). Liquid time-constant networks. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 7657–7666). Virtual: AAAI Press.","ista":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. 2021. Liquid time-constant networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 7657–7666.","mla":"Hasani, Ramin, et al. “Liquid Time-Constant Networks.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 9, AAAI Press, 2021, pp. 7657–66.","ama":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. Liquid time-constant networks. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:7657-7666.","short":"R. Hasani, M. Lechner, A. Amini, D. Rus, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 7657–7666.","chicago":"Hasani, Ramin, Mathias Lechner, Alexander Amini, Daniela Rus, and Radu Grosu. “Liquid Time-Constant Networks.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:7657–66. AAAI Press, 2021.","ieee":"R. Hasani, M. Lechner, A. Amini, D. Rus, and R. Grosu, “Liquid time-constant networks,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 9, pp. 7657–7666."},"oa":1,"publication_status":"published","has_accepted_license":"1","ddc":["000"],"date_published":"2021-05-28T00:00:00Z","main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/16936"}],"acknowledgement":"R.H. and D.R. are partially supported by Boeing. R.H. and R.G. were partially supported by the Horizon-2020 ECSEL\r\nProject grant No. 783163 (iDev40). M.L. was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). A.A. is supported by the National Science Foundation (NSF) Graduate Research Fellowship Program. This research work is partially drawn from the PhD dissertation of R.H.","year":"2021","_id":"10671","page":"7657-7666","month":"05","type":"conference","oa_version":"Published Version","abstract":[{"lang":"eng","text":"We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs."}],"date_updated":"2022-05-24T06:36:54Z","volume":35,"file_date_updated":"2022-01-26T07:36:03Z","date_created":"2022-01-25T15:48:36Z","project":[{"call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","name":"The Wittgenstein Prize","grant_number":"Z211"}],"conference":{"location":"Virtual","name":"AAAI: Association for the Advancement of Artificial Intelligence","start_date":"2021-02-02","end_date":"2021-02-09"},"issue":"9","language":[{"iso":"eng"}],"publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"],"isbn":["978-1-57735-866-4"]},"quality_controlled":"1","publisher":"AAAI Press","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publication":"Proceedings of the AAAI Conference on Artificial Intelligence","article_processing_charge":"No","author":[{"full_name":"Hasani, Ramin","first_name":"Ramin","last_name":"Hasani"},{"full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","last_name":"Lechner","first_name":"Mathias"},{"full_name":"Amini, Alexander","last_name":"Amini","first_name":"Alexander"},{"last_name":"Rus","first_name":"Daniela","full_name":"Rus, Daniela"},{"full_name":"Grosu, Radu","last_name":"Grosu","first_name":"Radu"}],"day":"28","file":[{"creator":"mlechner","file_size":4302669,"content_type":"application/pdf","relation":"main_file","file_name":"16936-Article Text-20430-1-2-20210518 (1).pdf","success":1,"access_level":"open_access","date_created":"2022-01-26T07:36:03Z","checksum":"0f06995fba06dbcfa7ed965fc66027ff","file_id":"10678","date_updated":"2022-01-26T07:36:03Z"}],"arxiv":1,"title":"Liquid time-constant networks"},{"project":[{"call_identifier":"FWF","_id":"264B3912-B435-11E9-9278-68D0E5697425","name":"Formal Methods meets Algorithmic Game Theory","grant_number":"M02369"},{"grant_number":"S11402-N23","name":"Rigorous Systems Engineering","_id":"25F2ACDE-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"},{"grant_number":"Z211","name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"language":[{"iso":"eng"}],"issue":"1","isi":1,"keyword":["computer science","computer science and game theory","logic in computer science"],"publication_identifier":{"eissn":["1860-5974"]},"quality_controlled":"1","doi":"10.23638/LMCS-17(1:10)2021","department":[{"_id":"ToHe"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","publisher":"International Federation for Computational Logic","article_processing_charge":"No","scopus_import":"1","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"publication":"Logical Methods in Computer Science","file":[{"creator":"alisjak","relation":"main_file","content_type":"application/pdf","file_size":819878,"success":1,"file_name":"2021_LMCS_AGHAJOHAR.pdf","access_level":"open_access","date_created":"2022-01-26T08:04:50Z","checksum":"b35586a50ed1ca8f44767de116d18d81","date_updated":"2022-01-26T08:04:50Z","file_id":"10690"}],"day":"03","author":[{"full_name":"Aghajohari, Milad","first_name":"Milad","last_name":"Aghajohari"},{"full_name":"Avni, Guy","orcid":"0000-0001-5588-8287","id":"463C8BC2-F248-11E8-B48F-1D18A9856A87","first_name":"Guy","last_name":"Avni"},{"id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","last_name":"Henzinger","first_name":"Thomas A"}],"title":"Determinacy in discrete-bidding infinite-duration games","arxiv":1,"external_id":{"arxiv":["1905.03588"],"isi":["000658724600010"]},"status":"public","citation":{"short":"M. Aghajohari, G. Avni, T.A. Henzinger, Logical Methods in Computer Science 17 (2021) 10:1-10:23.","ieee":"M. Aghajohari, G. Avni, and T. A. Henzinger, “Determinacy in discrete-bidding infinite-duration games,” <i>Logical Methods in Computer Science</i>, vol. 17, no. 1. International Federation for Computational Logic, p. 10:1-10:23, 2021.","chicago":"Aghajohari, Milad, Guy Avni, and Thomas A Henzinger. “Determinacy in Discrete-Bidding Infinite-Duration Games.” <i>Logical Methods in Computer Science</i>. International Federation for Computational Logic, 2021. <a href=\"https://doi.org/10.23638/LMCS-17(1:10)2021\">https://doi.org/10.23638/LMCS-17(1:10)2021</a>.","mla":"Aghajohari, Milad, et al. “Determinacy in Discrete-Bidding Infinite-Duration Games.” <i>Logical Methods in Computer Science</i>, vol. 17, no. 1, International Federation for Computational Logic, 2021, p. 10:1-10:23, doi:<a href=\"https://doi.org/10.23638/LMCS-17(1:10)2021\">10.23638/LMCS-17(1:10)2021</a>.","ista":"Aghajohari M, Avni G, Henzinger TA. 2021. Determinacy in discrete-bidding infinite-duration games. Logical Methods in Computer Science. 17(1), 10:1-10:23.","apa":"Aghajohari, M., Avni, G., &#38; Henzinger, T. A. (2021). Determinacy in discrete-bidding infinite-duration games. <i>Logical Methods in Computer Science</i>. International Federation for Computational Logic. <a href=\"https://doi.org/10.23638/LMCS-17(1:10)2021\">https://doi.org/10.23638/LMCS-17(1:10)2021</a>","ama":"Aghajohari M, Avni G, Henzinger TA. Determinacy in discrete-bidding infinite-duration games. <i>Logical Methods in Computer Science</i>. 2021;17(1):10:1-10:23. doi:<a href=\"https://doi.org/10.23638/LMCS-17(1:10)2021\">10.23638/LMCS-17(1:10)2021</a>"},"intvolume":"        17","has_accepted_license":"1","oa":1,"publication_status":"published","date_published":"2021-02-03T00:00:00Z","ddc":["510"],"year":"2021","acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grants S11402-N23 (RiSE/SHiNE), Z211-N23 (Wittgenstein Award), and M 2369-N33 (Meitner fellowship).\r\n","_id":"10674","abstract":[{"lang":"eng","text":"In two-player games on graphs, the players move a token through a graph to produce an infinite path, which determines the winner of the game. Such games are central in formal methods since they model the interaction between a non-terminating system and its environment. In bidding games the players bid for the right to move the token: in each round, the players simultaneously submit bids, and the higher bidder moves the token and pays the other player. Bidding games are known to have a clean and elegant mathematical structure that relies on the ability of the players to submit arbitrarily small bids. Many applications, however, require a fixed granularity for the bids, which can represent, for example, the monetary value expressed in cents. We study, for the first time, the combination of discrete-bidding and infinite-duration games. Our most important result proves that these games form a large determined subclass of concurrent games, where determinacy is the strong property that there always exists exactly one player who can guarantee winning the game. In particular, we show that, in contrast to non-discrete bidding games, the mechanism with which tied bids are resolved plays an important role in discrete-bidding games. We study several natural tie-breaking mechanisms and show that, while some do not admit determinacy, most natural mechanisms imply determinacy for every pair of initial budgets."}],"date_updated":"2023-08-17T06:56:42Z","oa_version":"Published Version","month":"02","type":"journal_article","page":"10:1-10:23","date_created":"2022-01-25T16:32:13Z","file_date_updated":"2022-01-26T08:04:50Z","volume":17},{"_id":"10688","acknowledgement":"This research was performed while Bernhard Kragl was at IST Austria, supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award).","year":"2021","volume":2,"date_created":"2022-01-26T08:01:30Z","file_date_updated":"2022-01-26T08:04:29Z","page":"143–152","month":"10","type":"conference","oa_version":"Published Version","abstract":[{"lang":"eng","text":"Civl is a static verifier for concurrent programs designed around the conceptual framework of layered refinement,\r\nwhich views the task of verifying a program as a sequence of program simplification steps each justified by its own invariant. Civl verifies a layered concurrent program that compactly expresses all the programs in this sequence and the supporting invariants. This paper presents the design and implementation of the Civl verifier."}],"date_updated":"2022-01-26T08:20:41Z","intvolume":"         2","citation":{"ama":"Kragl B, Qadeer S. The Civl verifier. In: Ruzica P, Whalen MW, eds. <i>Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design</i>. Vol 2. TU Wien Academic Press; 2021:143–152. doi:<a href=\"https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23\">10.34727/2021/isbn.978-3-85448-046-4_23</a>","ista":"Kragl B, Qadeer S. 2021. The Civl verifier. Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design. FMCAD: Formal Methods in Computer-Aided Design, Conference Series, vol. 2, 143–152.","mla":"Kragl, Bernhard, and Shaz Qadeer. “The Civl Verifier.” <i>Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design</i>, edited by Piskac Ruzica and Michael W. Whalen, vol. 2, TU Wien Academic Press, 2021, pp. 143–152, doi:<a href=\"https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23\">10.34727/2021/isbn.978-3-85448-046-4_23</a>.","apa":"Kragl, B., &#38; Qadeer, S. (2021). The Civl verifier. In P. Ruzica &#38; M. W. Whalen (Eds.), <i>Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design</i> (Vol. 2, pp. 143–152). Virtual: TU Wien Academic Press. <a href=\"https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23\">https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23</a>","ieee":"B. Kragl and S. Qadeer, “The Civl verifier,” in <i>Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design</i>, Virtual, 2021, vol. 2, pp. 143–152.","chicago":"Kragl, Bernhard, and Shaz Qadeer. “The Civl Verifier.” In <i>Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design</i>, edited by Piskac Ruzica and Michael W. Whalen, 2:143–152. TU Wien Academic Press, 2021. <a href=\"https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23\">https://doi.org/10.34727/2021/isbn.978-3-85448-046-4_23</a>.","short":"B. Kragl, S. Qadeer, in:, P. Ruzica, M.W. Whalen (Eds.), Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design, TU Wien Academic Press, 2021, pp. 143–152."},"alternative_title":["Conference Series"],"status":"public","editor":[{"full_name":"Ruzica, Piskac","last_name":"Ruzica","first_name":"Piskac"},{"first_name":"Michael W.","last_name":"Whalen","full_name":"Whalen, Michael W."}],"date_published":"2021-10-01T00:00:00Z","ddc":["000"],"publication_status":"published","oa":1,"has_accepted_license":"1","publication":"Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_processing_charge":"No","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","publisher":"TU Wien Academic Press","department":[{"_id":"ToHe"}],"title":"The Civl verifier","author":[{"id":"320FC952-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-7745-9117","full_name":"Kragl, Bernhard","last_name":"Kragl","first_name":"Bernhard"},{"full_name":"Qadeer, Shaz","first_name":"Shaz","last_name":"Qadeer"}],"day":"01","file":[{"file_id":"10689","date_updated":"2022-01-26T08:04:29Z","checksum":"35438ac9f9750340b7f8ae4ae3220d9f","date_created":"2022-01-26T08:04:29Z","access_level":"open_access","file_name":"2021_FCAD2021_Kragl.pdf","success":1,"file_size":390555,"relation":"main_file","content_type":"application/pdf","creator":"cchlebak"}],"conference":{"end_date":"2021-10-22","start_date":"2021-10-20","name":"FMCAD: Formal Methods in Computer-Aided Design","location":"Virtual"},"language":[{"iso":"eng"}],"project":[{"name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"}],"doi":"10.34727/2021/isbn.978-3-85448-046-4_23","quality_controlled":"1","publication_identifier":{"isbn":["978-3-85448-046-4"]}},{"acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award), ERC CoG 863818 (FoRM-SMArt), and by the European Union's Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.","year":"2021","_id":"10694","page":"617-636","abstract":[{"text":"In a two-player zero-sum graph game the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In bidding games, however, the players have budgets, and in each turn, we hold an “auction” (bidding) to determine which player moves the token: both players simultaneously submit bids and the higher bidder moves the token. The bidding mechanisms differ in their payment schemes. Bidding games were largely studied with variants of first-price bidding in which only the higher bidder pays his bid. We focus on all-pay bidding, where both players pay their bids. Finite-duration all-pay bidding games were studied and shown to be technically more challenging than their first-price counterparts. We study for the first time, infinite-duration all-pay bidding games. Our most interesting results are for mean-payoff objectives: we portray a complete picture for games played on strongly-connected graphs. We study both pure (deterministic) and mixed (probabilistic) strategies and completely characterize the optimal and almost-sure (with probability 1) payoffs the players can respectively guarantee. We show that mean-payoff games under all-pay bidding exhibit the intriguing mathematical properties of their first-price counterparts; namely, an equivalence with random-turn games in which in each turn, the player who moves is selected according to a (biased) coin toss. The equivalences for all-pay bidding are more intricate and unexpected than for first-price bidding.","lang":"eng"}],"date_updated":"2025-07-14T09:10:12Z","type":"conference","oa_version":"Preprint","month":"01","date_created":"2022-01-27T12:11:23Z","editor":[{"last_name":"Marx","first_name":"Dániel","full_name":"Marx, Dániel"}],"status":"public","external_id":{"arxiv":["2005.06636"]},"citation":{"chicago":"Avni, Guy, Ismael R Jecker, and Dorde Zikelic. “Infinite-Duration All-Pay Bidding Games.” In <i>Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms</i>, edited by Dániel Marx, 617–36. Society for Industrial and Applied Mathematics, 2021. <a href=\"https://doi.org/10.1137/1.9781611976465.38\">https://doi.org/10.1137/1.9781611976465.38</a>.","ieee":"G. Avni, I. R. Jecker, and D. Zikelic, “Infinite-duration all-pay bidding games,” in <i>Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms</i>, Virtual, 2021, pp. 617–636.","short":"G. Avni, I.R. Jecker, D. Zikelic, in:, D. Marx (Ed.), Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms, Society for Industrial and Applied Mathematics, 2021, pp. 617–636.","ama":"Avni G, Jecker IR, Zikelic D. Infinite-duration all-pay bidding games. In: Marx D, ed. <i>Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms</i>. Society for Industrial and Applied Mathematics; 2021:617-636. doi:<a href=\"https://doi.org/10.1137/1.9781611976465.38\">10.1137/1.9781611976465.38</a>","apa":"Avni, G., Jecker, I. R., &#38; Zikelic, D. (2021). Infinite-duration all-pay bidding games. In D. Marx (Ed.), <i>Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms</i> (pp. 617–636). Virtual: Society for Industrial and Applied Mathematics. <a href=\"https://doi.org/10.1137/1.9781611976465.38\">https://doi.org/10.1137/1.9781611976465.38</a>","mla":"Avni, Guy, et al. “Infinite-Duration All-Pay Bidding Games.” <i>Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms</i>, edited by Dániel Marx, Society for Industrial and Applied Mathematics, 2021, pp. 617–36, doi:<a href=\"https://doi.org/10.1137/1.9781611976465.38\">10.1137/1.9781611976465.38</a>.","ista":"Avni G, Jecker IR, Zikelic D. 2021. Infinite-duration all-pay bidding games. Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms. SODA: Symposium on Discrete Algorithms, 617–636."},"publication_status":"published","oa":1,"date_published":"2021-01-01T00:00:00Z","main_file_link":[{"url":"https://arxiv.org/abs/2005.06636","open_access":"1"}],"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","publisher":"Society for Industrial and Applied Mathematics","department":[{"_id":"GradSch"},{"_id":"KrCh"}],"publication":"Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms","ec_funded":1,"scopus_import":"1","article_processing_charge":"No","author":[{"last_name":"Avni","first_name":"Guy","full_name":"Avni, Guy","id":"463C8BC2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5588-8287"},{"last_name":"Jecker","first_name":"Ismael R","id":"85D7C63E-7D5D-11E9-9C0F-98C4E5697425","full_name":"Jecker, Ismael R"},{"full_name":"Zikelic, Dorde","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-4681-1699","last_name":"Zikelic","first_name":"Dorde"}],"day":"01","arxiv":1,"title":"Infinite-duration all-pay bidding games","project":[{"grant_number":"Z211","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"},{"grant_number":"863818","call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","name":"Formal Methods for Stochastic Models: Algorithms and Applications"},{"grant_number":"665385","call_identifier":"H2020","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","name":"International IST Doctoral Program"}],"conference":{"end_date":"2021-01-13","location":"Virtual","name":"SODA: Symposium on Discrete Algorithms","start_date":"2021-01-10"},"language":[{"iso":"eng"}],"publication_identifier":{"isbn":["978-1-61197-646-5"]},"doi":"10.1137/1.9781611976465.38","quality_controlled":"1"},{"publication_identifier":{"issn":["09574174"]},"doi":"10.1016/j.eswa.2020.114203","quality_controlled":"1","project":[{"_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"The Wittgenstein Prize","grant_number":"Z211"}],"isi":1,"issue":"4","language":[{"iso":"eng"}],"author":[{"id":"8C6B42F8-C8E6-11E9-A03A-F2DCE5697425","full_name":"Sarac, Naci E","first_name":"Naci E","last_name":"Sarac"},{"full_name":"Altun, Ömer Faruk","last_name":"Altun","first_name":"Ömer Faruk"},{"first_name":"Kamil Tolga","last_name":"Atam","full_name":"Atam, Kamil Tolga"},{"first_name":"Sertac","last_name":"Karahoda","full_name":"Karahoda, Sertac"},{"last_name":"Kaya","first_name":"Kamer","full_name":"Kaya, Kamer"},{"first_name":"Hüsnü","last_name":"Yenigün","full_name":"Yenigün, Hüsnü"}],"file":[{"checksum":"600c2f81bc898a725bcfa7cf26ff4fed","date_updated":"2020-12-02T13:33:51Z","file_id":"8913","access_level":"open_access","date_created":"2020-12-02T13:33:51Z","file_name":"synchroPaperRevised.pdf","creator":"esarac","relation":"main_file","content_type":"application/pdf","file_size":634967}],"day":"01","article_number":"114203","title":"Boosting expensive synchronizing heuristics","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","publisher":"Elsevier","department":[{"_id":"ToHe"}],"publication":"Expert Systems with Applications","article_type":"original","scopus_import":"1","article_processing_charge":"No","oa":1,"publication_status":"published","has_accepted_license":"1","ddc":["000"],"date_published":"2021-04-01T00:00:00Z","external_id":{"isi":["000640531100038"]},"status":"public","intvolume":"       167","citation":{"ieee":"N. E. Sarac, Ö. F. Altun, K. T. Atam, S. Karahoda, K. Kaya, and H. Yenigün, “Boosting expensive synchronizing heuristics,” <i>Expert Systems with Applications</i>, vol. 167, no. 4. Elsevier, 2021.","chicago":"Sarac, Naci E, Ömer Faruk Altun, Kamil Tolga Atam, Sertac Karahoda, Kamer Kaya, and Hüsnü Yenigün. “Boosting Expensive Synchronizing Heuristics.” <i>Expert Systems with Applications</i>. Elsevier, 2021. <a href=\"https://doi.org/10.1016/j.eswa.2020.114203\">https://doi.org/10.1016/j.eswa.2020.114203</a>.","short":"N.E. Sarac, Ö.F. Altun, K.T. Atam, S. Karahoda, K. Kaya, H. Yenigün, Expert Systems with Applications 167 (2021).","ama":"Sarac NE, Altun ÖF, Atam KT, Karahoda S, Kaya K, Yenigün H. Boosting expensive synchronizing heuristics. <i>Expert Systems with Applications</i>. 2021;167(4). doi:<a href=\"https://doi.org/10.1016/j.eswa.2020.114203\">10.1016/j.eswa.2020.114203</a>","mla":"Sarac, Naci E., et al. “Boosting Expensive Synchronizing Heuristics.” <i>Expert Systems with Applications</i>, vol. 167, no. 4, 114203, Elsevier, 2021, doi:<a href=\"https://doi.org/10.1016/j.eswa.2020.114203\">10.1016/j.eswa.2020.114203</a>.","ista":"Sarac NE, Altun ÖF, Atam KT, Karahoda S, Kaya K, Yenigün H. 2021. Boosting expensive synchronizing heuristics. Expert Systems with Applications. 167(4), 114203.","apa":"Sarac, N. E., Altun, Ö. F., Atam, K. T., Karahoda, S., Kaya, K., &#38; Yenigün, H. (2021). Boosting expensive synchronizing heuristics. <i>Expert Systems with Applications</i>. Elsevier. <a href=\"https://doi.org/10.1016/j.eswa.2020.114203\">https://doi.org/10.1016/j.eswa.2020.114203</a>"},"oa_version":"Submitted Version","month":"04","type":"journal_article","date_updated":"2023-08-04T11:19:00Z","abstract":[{"text":"For automata, synchronization, the problem of bringing an automaton to a particular state regardless of its initial state, is important. It has several applications in practice and is related to a fifty-year-old conjecture on the length of the shortest synchronizing word. Although using shorter words increases the effectiveness in practice, finding a shortest one (which is not necessarily unique) is NP-hard. For this reason, there exist various heuristics in the literature. However, high-quality heuristics such as SynchroP producing relatively shorter sequences are very expensive and can take hours when the automaton has tens of thousands of states. The SynchroP heuristic has been frequently used as a benchmark to evaluate the performance of the new heuristics. In this work, we first improve the runtime of SynchroP and its variants by using algorithmic techniques. We then focus on adapting SynchroP for many-core architectures,\r\nand overall, we obtain more than 1000× speedup on GPUs compared to naive sequential implementation that has been frequently used as a benchmark to evaluate new heuristics in the literature. We also propose two SynchroP variants and evaluate their performance.","lang":"eng"}],"volume":167,"file_date_updated":"2020-12-02T13:33:51Z","date_created":"2020-12-02T13:34:25Z","acknowledgement":"This work was supported by The Scientific and Technological Research Council of Turkey (TUBITAK) [grant number 114E569]. This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). We would like to thank the authors of (Roman & Szykula, 2015) for providing their heuristics implementations, which we used to compare our SynchroP implementation as given in Table 11.","year":"2021","_id":"8912"},{"department":[{"_id":"ToHe"}],"publisher":"Association for Computing Machinery","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_processing_charge":"No","ec_funded":1,"scopus_import":"1","publication":"HSCC '21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control","day":"01","file":[{"checksum":"4c1202c1abf71384c3ee6fea88c2f80e","file_id":"9424","date_updated":"2021-05-25T13:53:22Z","access_level":"open_access","date_created":"2021-05-25T13:53:22Z","file_name":"2021_HSCC_Soto.pdf","success":1,"creator":"kschuh","file_size":1474786,"content_type":"application/pdf","relation":"main_file"}],"author":[{"last_name":"Garcia Soto","first_name":"Miriam","full_name":"Garcia Soto, Miriam","id":"4B3207F6-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-2936-5719"},{"last_name":"Henzinger","first_name":"Thomas A","full_name":"Henzinger, Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87"},{"id":"3A2F4DCE-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3658-1065","full_name":"Schilling, Christian","first_name":"Christian","last_name":"Schilling"}],"arxiv":1,"title":"Synthesis of hybrid automata with affine dynamics from time-series data","project":[{"name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"Z211"},{"grant_number":"754411","name":"ISTplus - Postdoctoral Fellowships","call_identifier":"H2020","_id":"260C2330-B435-11E9-9278-68D0E5697425"}],"language":[{"iso":"eng"}],"conference":{"end_date":"2021-05-21","start_date":"2021-05-19","location":"Nashville, TN, United States","name":"HSCC: International Conference on Hybrid Systems Computation and Control"},"keyword":["hybrid automaton","membership","system identification"],"isi":1,"publication_identifier":{"isbn":["9781450383394"]},"quality_controlled":"1","doi":"10.1145/3447928.3456704","year":"2021","acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award) and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 754411.","_id":"9200","type":"conference","month":"05","oa_version":"Published Version","date_updated":"2023-08-07T13:49:33Z","abstract":[{"lang":"eng","text":"Formal design of embedded and cyber-physical systems relies on mathematical modeling. In this paper, we consider the model class of hybrid automata whose dynamics are defined by affine differential equations. Given a set of time-series data, we present an algorithmic approach to synthesize a hybrid automaton exhibiting behavior that is close to the data, up to a specified precision, and changes in synchrony with the data. A fundamental problem in our synthesis algorithm is to check membership of a time series in a hybrid automaton. Our solution integrates reachability and optimization techniques for affine dynamical systems to obtain both a sufficient and a necessary condition for membership, combined in a refinement framework. The algorithm processes one time series at a time and hence can be interrupted, provide an intermediate result, and be resumed. We report experimental results demonstrating the applicability of our synthesis approach."}],"page":"2102.12734","date_created":"2021-02-26T16:30:39Z","file_date_updated":"2021-05-25T13:53:22Z","status":"public","external_id":{"arxiv":["2102.12734"],"isi":["000932821700028"]},"citation":{"ama":"Garcia Soto M, Henzinger TA, Schilling C. Synthesis of hybrid automata with affine dynamics from time-series data. In: <i>HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control</i>. Association for Computing Machinery; 2021:2102.12734. doi:<a href=\"https://doi.org/10.1145/3447928.3456704\">10.1145/3447928.3456704</a>","apa":"Garcia Soto, M., Henzinger, T. A., &#38; Schilling, C. (2021). Synthesis of hybrid automata with affine dynamics from time-series data. In <i>HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control</i> (p. 2102.12734). Nashville, TN, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3447928.3456704\">https://doi.org/10.1145/3447928.3456704</a>","ista":"Garcia Soto M, Henzinger TA, Schilling C. 2021. Synthesis of hybrid automata with affine dynamics from time-series data. HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control. HSCC: International Conference on Hybrid Systems Computation and Control, 2102.12734.","mla":"Garcia Soto, Miriam, et al. “Synthesis of Hybrid Automata with Affine Dynamics from Time-Series Data.” <i>HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control</i>, Association for Computing Machinery, 2021, p. 2102.12734, doi:<a href=\"https://doi.org/10.1145/3447928.3456704\">10.1145/3447928.3456704</a>.","chicago":"Garcia Soto, Miriam, Thomas A Henzinger, and Christian Schilling. “Synthesis of Hybrid Automata with Affine Dynamics from Time-Series Data.” In <i>HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control</i>, 2102.12734. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3447928.3456704\">https://doi.org/10.1145/3447928.3456704</a>.","ieee":"M. Garcia Soto, T. A. Henzinger, and C. Schilling, “Synthesis of hybrid automata with affine dynamics from time-series data,” in <i>HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control</i>, Nashville, TN, United States, 2021, p. 2102.12734.","short":"M. Garcia Soto, T.A. Henzinger, C. Schilling, in:, HSCC ’21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control, Association for Computing Machinery, 2021, p. 2102.12734."},"has_accepted_license":"1","oa":1,"publication_status":"published","ddc":["000"],"date_published":"2021-05-01T00:00:00Z"}]
