[{"date_created":"2020-10-19T13:46:06Z","department":[{"_id":"ToHe"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","article_processing_charge":"No","intvolume":"         2","month":"10","quality_controlled":"1","publication_identifier":{"eissn":["2522-5839"]},"isi":1,"volume":2,"article_type":"original","language":[{"iso":"eng"}],"doi":"10.1038/s42256-020-00237-3","publisher":"Springer Nature","year":"2020","type":"journal_article","author":[{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","last_name":"Lechner","full_name":"Lechner, Mathias","first_name":"Mathias"},{"last_name":"Hasani","full_name":"Hasani, Ramin","first_name":"Ramin"},{"first_name":"Alexander","full_name":"Amini, Alexander","last_name":"Amini"},{"first_name":"Thomas A","full_name":"Henzinger, Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger"},{"first_name":"Daniela","full_name":"Rus, Daniela","last_name":"Rus"},{"last_name":"Grosu","full_name":"Grosu, Radu","first_name":"Radu"}],"_id":"8679","date_published":"2020-10-01T00:00:00Z","project":[{"grant_number":"Z211","call_identifier":"FWF","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"citation":{"mla":"Lechner, Mathias, et al. “Neural Circuit Policies Enabling Auditable Autonomy.” <i>Nature Machine Intelligence</i>, vol. 2, Springer Nature, 2020, pp. 642–52, doi:<a href=\"https://doi.org/10.1038/s42256-020-00237-3\">10.1038/s42256-020-00237-3</a>.","apa":"Lechner, M., Hasani, R., Amini, A., Henzinger, T. A., Rus, D., &#38; Grosu, R. (2020). Neural circuit policies enabling auditable autonomy. <i>Nature Machine Intelligence</i>. Springer Nature. <a href=\"https://doi.org/10.1038/s42256-020-00237-3\">https://doi.org/10.1038/s42256-020-00237-3</a>","ista":"Lechner M, Hasani R, Amini A, Henzinger TA, Rus D, Grosu R. 2020. Neural circuit policies enabling auditable autonomy. Nature Machine Intelligence. 2, 642–652.","short":"M. Lechner, R. Hasani, A. Amini, T.A. Henzinger, D. Rus, R. Grosu, Nature Machine Intelligence 2 (2020) 642–652.","ama":"Lechner M, Hasani R, Amini A, Henzinger TA, Rus D, Grosu R. Neural circuit policies enabling auditable autonomy. <i>Nature Machine Intelligence</i>. 2020;2:642-652. doi:<a href=\"https://doi.org/10.1038/s42256-020-00237-3\">10.1038/s42256-020-00237-3</a>","chicago":"Lechner, Mathias, Ramin Hasani, Alexander Amini, Thomas A Henzinger, Daniela Rus, and Radu Grosu. “Neural Circuit Policies Enabling Auditable Autonomy.” <i>Nature Machine Intelligence</i>. Springer Nature, 2020. <a href=\"https://doi.org/10.1038/s42256-020-00237-3\">https://doi.org/10.1038/s42256-020-00237-3</a>.","ieee":"M. Lechner, R. Hasani, A. Amini, T. A. Henzinger, D. Rus, and R. Grosu, “Neural circuit policies enabling auditable autonomy,” <i>Nature Machine Intelligence</i>, vol. 2. Springer Nature, pp. 642–652, 2020."},"scopus_import":"1","publication":"Nature Machine Intelligence","external_id":{"isi":["000583337200011"]},"related_material":{"link":[{"url":"https://ist.ac.at/en/news/new-deep-learning-models/","relation":"press_release","description":"News on IST Homepage"}]},"title":"Neural circuit policies enabling auditable autonomy","date_updated":"2023-08-22T10:36:06Z","status":"public","publication_status":"published","oa_version":"None","page":"642-652","day":"01","abstract":[{"text":"A central goal of artificial intelligence in high-stakes decision-making applications is to design a single algorithm that simultaneously expresses generalizability by learning coherent representations of their world and interpretable explanations of its dynamics. Here, we combine brain-inspired neural computation principles and scalable deep learning architectures to design compact neural controllers for task-specific compartments of a full-stack autonomous vehicle control system. We discover that a single algorithm with 19 control neurons, connecting 32 encapsulated input features to outputs by 253 synapses, learns to map high-dimensional inputs into steering commands. This system shows superior generalizability, interpretability and robustness compared with orders-of-magnitude larger black-box learning systems. The obtained neural agents enable high-fidelity autonomy for task-specific parts of a complex autonomous system.","lang":"eng"}]}]
