{"year":"2021","ddc":["000"],"project":[{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","name":"International IST Doctoral Program","grant_number":"665385","call_identifier":"H2020"},{"_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","name":"Formal Methods for Stochastic Models: Algorithms and Applications","grant_number":"863818","call_identifier":"H2020"},{"call_identifier":"FWF","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"}],"acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award), ERC CoG 863818 (FoRM-SMArt), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.","file_date_updated":"2022-01-26T07:39:59Z","external_id":{"arxiv":["2111.03165"]},"doi":"10.48550/arXiv.2111.03165","author":[{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","first_name":"Mathias","last_name":"Lechner"},{"last_name":"Žikelić","first_name":"Ðorđe","full_name":"Žikelić, Ðorđe"},{"id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","full_name":"Chatterjee, Krishnendu","first_name":"Krishnendu","last_name":"Chatterjee","orcid":"0000-0002-4561-241X"},{"last_name":"Henzinger","first_name":"Thomas A","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724"}],"publication":"35th Conference on Neural Information Processing Systems","has_accepted_license":"1","language":[{"iso":"eng"}],"_id":"10667","ec_funded":1,"citation":{"ieee":"M. Lechner, Ð. Žikelić, K. Chatterjee, and T. A. Henzinger, “Infinite time horizon safety of Bayesian neural networks,” in 35th Conference on Neural Information Processing Systems, Virtual, 2021.","mla":"Lechner, Mathias, et al. “Infinite Time Horizon Safety of Bayesian Neural Networks.” 35th Conference on Neural Information Processing Systems, 2021, doi:10.48550/arXiv.2111.03165.","short":"M. Lechner, Ð. Žikelić, K. Chatterjee, T.A. Henzinger, in:, 35th Conference on Neural Information Processing Systems, 2021.","chicago":"Lechner, Mathias, Ðorđe Žikelić, Krishnendu Chatterjee, and Thomas A Henzinger. “Infinite Time Horizon Safety of Bayesian Neural Networks.” In 35th Conference on Neural Information Processing Systems, 2021. https://doi.org/10.48550/arXiv.2111.03165.","ista":"Lechner M, Žikelić Ð, Chatterjee K, Henzinger TA. 2021. Infinite time horizon safety of Bayesian neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, .","apa":"Lechner, M., Žikelić, Ð., Chatterjee, K., & Henzinger, T. A. (2021). Infinite time horizon safety of Bayesian neural networks. In 35th Conference on Neural Information Processing Systems. Virtual. https://doi.org/10.48550/arXiv.2111.03165","ama":"Lechner M, Žikelić Ð, Chatterjee K, Henzinger TA. Infinite time horizon safety of Bayesian neural networks. In: 35th Conference on Neural Information Processing Systems. ; 2021. doi:10.48550/arXiv.2111.03165"},"conference":{"start_date":"2021-12-06","name":"NeurIPS: Neural Information Processing Systems","end_date":"2021-12-10","location":"Virtual"},"oa":1,"oa_version":"Published Version","date_created":"2022-01-25T15:45:58Z","user_id":"2EBD1598-F248-11E8-B48F-1D18A9856A87","alternative_title":[" Advances in Neural Information Processing Systems"],"quality_controlled":"1","title":"Infinite time horizon safety of Bayesian neural networks","related_material":{"record":[{"status":"public","id":"11362","relation":"dissertation_contains"}]},"status":"public","month":"12","article_processing_charge":"No","tmp":{"short":"CC BY-NC-ND (3.0)","image":"/images/cc_by_nc_nd.png","name":"Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode"},"date_updated":"2023-06-23T07:01:11Z","file":[{"relation":"main_file","checksum":"0fc0f852525c10dda9cc9ffea07fb4e4","content_type":"application/pdf","file_name":"infinite_time_horizon_safety_o.pdf","access_level":"open_access","creator":"mlechner","date_updated":"2022-01-26T07:39:59Z","file_id":"10682","success":1,"date_created":"2022-01-26T07:39:59Z","file_size":452492}],"type":"conference","license":"https://creativecommons.org/licenses/by-nc-nd/3.0/","day":"01","department":[{"_id":"GradSch"},{"_id":"ToHe"},{"_id":"KrCh"}],"abstract":[{"lang":"eng","text":"Bayesian neural networks (BNNs) place distributions over the weights of a neural network to model uncertainty in the data and the network's prediction. We consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with infinite time horizon systems. Compared to the existing sampling-based approaches, which are inapplicable to the infinite time horizon setting, we train a separate deterministic neural network that serves as an infinite time horizon safety certificate. In particular, we show that the certificate network guarantees the safety of the system over a subset of the BNN weight posterior's support. Our method first computes a safe weight set and then alters the BNN's weight posterior to reject samples outside this set. Moreover, we show how to extend our approach to a safe-exploration reinforcement learning setting, in order to avoid unsafe trajectories during the training of the policy. We evaluate our approach on a series of reinforcement learning benchmarks, including non-Lyapunovian safety specifications."}],"main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/hash/544defa9fddff50c53b71c43e0da72be-Abstract.html"}],"date_published":"2021-12-01T00:00:00Z","publication_status":"published"}