[{"publication_identifier":{"issn":["0162-8828"],"eissn":["1939-3539"]},"doi":"10.1109/tpami.2023.3308391","quality_controlled":"1","keyword":["Applied Mathematics","Artificial Intelligence","Computational Theory and Mathematics","Computer Vision and Pattern Recognition","Software"],"language":[{"iso":"eng"}],"issue":"12","author":[{"first_name":"Dashti","last_name":"Ali","full_name":"Ali, Dashti"},{"full_name":"Asaad, Aras","first_name":"Aras","last_name":"Asaad"},{"full_name":"Jimenez, Maria-Jose","last_name":"Jimenez","first_name":"Maria-Jose"},{"first_name":"Vidit","last_name":"Nanda","full_name":"Nanda, Vidit"},{"full_name":"Paluzo-Hidalgo, Eduardo","first_name":"Eduardo","last_name":"Paluzo-Hidalgo"},{"first_name":"Manuel","last_name":"Soriano Trigueros","orcid":"0000-0003-2449-1433","id":"15ebd7cf-15bf-11ee-aebd-bb4bb5121ea8","full_name":"Soriano Trigueros, Manuel"}],"license":"https://creativecommons.org/licenses/by/4.0/","file":[{"access_level":"open_access","date_created":"2024-01-08T10:09:14Z","checksum":"465c28ef0b151b4b1fb47977ed5581ab","date_updated":"2024-01-08T10:09:14Z","file_id":"14740","creator":"dernst","relation":"main_file","content_type":"application/pdf","file_size":2370988,"success":1,"file_name":"2023_IEEEToP_Ali.pdf"}],"day":"01","title":"A survey of vectorization methods in topological data analysis","publisher":"IEEE","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"HeEd"}],"publication":"IEEE Transactions on Pattern Analysis and Machine Intelligence","article_processing_charge":"Yes (in subscription journal)","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"publication_status":"published","oa":1,"has_accepted_license":"1","ddc":["000"],"date_published":"2023-12-01T00:00:00Z","status":"public","intvolume":"        45","citation":{"mla":"Ali, Dashti, et al. “A Survey of Vectorization Methods in Topological Data Analysis.” <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>, vol. 45, no. 12, IEEE, 2023, pp. 14069–80, doi:<a href=\"https://doi.org/10.1109/tpami.2023.3308391\">10.1109/tpami.2023.3308391</a>.","ista":"Ali D, Asaad A, Jimenez M-J, Nanda V, Paluzo-Hidalgo E, Soriano Trigueros M. 2023. A survey of vectorization methods in topological data analysis. IEEE Transactions on Pattern Analysis and Machine Intelligence. 45(12), 14069–14080.","apa":"Ali, D., Asaad, A., Jimenez, M.-J., Nanda, V., Paluzo-Hidalgo, E., &#38; Soriano Trigueros, M. (2023). A survey of vectorization methods in topological data analysis. <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. IEEE. <a href=\"https://doi.org/10.1109/tpami.2023.3308391\">https://doi.org/10.1109/tpami.2023.3308391</a>","ama":"Ali D, Asaad A, Jimenez M-J, Nanda V, Paluzo-Hidalgo E, Soriano Trigueros M. A survey of vectorization methods in topological data analysis. <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. 2023;45(12):14069-14080. doi:<a href=\"https://doi.org/10.1109/tpami.2023.3308391\">10.1109/tpami.2023.3308391</a>","short":"D. Ali, A. Asaad, M.-J. Jimenez, V. Nanda, E. Paluzo-Hidalgo, M. Soriano Trigueros, IEEE Transactions on Pattern Analysis and Machine Intelligence 45 (2023) 14069–14080.","ieee":"D. Ali, A. Asaad, M.-J. Jimenez, V. Nanda, E. Paluzo-Hidalgo, and M. Soriano Trigueros, “A survey of vectorization methods in topological data analysis,” <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>, vol. 45, no. 12. IEEE, pp. 14069–14080, 2023.","chicago":"Ali, Dashti, Aras Asaad, Maria-Jose Jimenez, Vidit Nanda, Eduardo Paluzo-Hidalgo, and Manuel Soriano Trigueros. “A Survey of Vectorization Methods in Topological Data Analysis.” <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. IEEE, 2023. <a href=\"https://doi.org/10.1109/tpami.2023.3308391\">https://doi.org/10.1109/tpami.2023.3308391</a>."},"page":"14069-14080","date_updated":"2024-01-08T10:11:46Z","abstract":[{"lang":"eng","text":"Attempts to incorporate topological information in supervised learning tasks have resulted in the creation of several techniques for vectorizing persistent homology barcodes. In this paper, we study thirteen such methods. Besides describing an organizational framework for these methods, we comprehensively benchmark them against three well-known classification tasks. Surprisingly, we discover that the best-performing method is a simple vectorization, which consists only of a few elementary summary statistics. Finally, we provide a convenient web application which has been designed to facilitate exploration and experimentation with various vectorization methods."}],"month":"12","type":"journal_article","oa_version":"Published Version","volume":45,"file_date_updated":"2024-01-08T10:09:14Z","date_created":"2024-01-08T09:59:46Z","acknowledgement":"The work of Maria-Jose Jimenez, Eduardo Paluzo-Hidalgo and Manuel Soriano-Trigueros was supported in part by the Spanish grant Ministerio de Ciencia e Innovacion under Grants TED2021-129438B-I00 and PID2019-107339GB-I00, and in part by REXASI-PRO H-EU project, call HORIZON-CL4-2021-HUMAN-01-01 under Grant 101070028. The work of\r\nMaria-Jose Jimenez was supported by a grant of Convocatoria de la Universidad de Sevilla para la recualificacion del sistema universitario español, 2021-23, funded by the European Union, NextGenerationEU. The work of Vidit Nanda was supported in part by EPSRC under Grant EP/R018472/1 and in part by US AFOSR under Grant FA9550-22-1-0462. \r\nWe are grateful to the team of GUDHI and TEASPOON developers, for their work and their support. We are also grateful to Streamlit for providing extra resources to deploy the web app\r\nonline on Streamlit community cloud. We thank the anonymous referees for their helpful suggestions.","year":"2023","_id":"14739"},{"year":"2022","acknowledgement":"This research was supported in part by the AI2050 program at Schmidt Futures (grant G-22-63172), the Boeing Company, and the United States Air Force Research Laboratory and the United States Air Force Artificial Intelligence Accelerator and was accomplished under cooperative agreement number FA8750-19-2-1000. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the United States Air Force or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes, notwithstanding any copyright notation herein. This work was further supported by The Boeing Company and Office of Naval Research grant N00014-18-1-2830. M.T. is supported by the Poul Due Jensen Foundation, grant 883901. M.L. was supported in part by the Austrian Science Fund under grant Z211-N23 (Wittgenstein Award). A.A. was supported by the National Science Foundation Graduate Research Fellowship Program. We thank T.-H. Wang, P. Kao, M. Chahine, W. Xiao, X. Li, L. Yin and Y. Ben for useful suggestions and for testing of CfC models to confirm the results across other domains.","_id":"12147","abstract":[{"lang":"eng","text":"Continuous-time neural networks are a class of machine learning systems that can tackle representation learning on spatiotemporal decision-making tasks. These models are typically represented by continuous differential equations. However, their expressive power when they are deployed on computers is bottlenecked by numerical differential equation solvers. This limitation has notably slowed down the scaling and understanding of numerous natural physical phenomena such as the dynamics of nervous systems. Ideally, we would circumvent this bottleneck by solving the given dynamical system in closed form. This is known to be intractable in general. Here, we show that it is possible to closely approximate the interaction between neurons and synapses—the building blocks of natural and artificial neural networks—constructed by liquid time-constant networks efficiently in closed form. To this end, we compute a tightly bounded approximation of the solution of an integral appearing in liquid time-constant dynamics that has had no known closed-form solution so far. This closed-form solution impacts the design of continuous-time and continuous-depth neural models. For instance, since time appears explicitly in closed form, the formulation relaxes the need for complex numerical solvers. Consequently, we obtain models that are between one and five orders of magnitude faster in training and inference compared with differential equation-based counterparts. More importantly, in contrast to ordinary differential equation-based continuous networks, closed-form networks can scale remarkably well compared with other deep learning instances. Lastly, as these models are derived from liquid networks, they show good performance in time-series modelling compared with advanced recurrent neural network models."}],"date_updated":"2023-08-04T09:00:10Z","type":"journal_article","month":"11","oa_version":"Published Version","page":"992-1003","file_date_updated":"2023-01-24T09:49:44Z","date_created":"2023-01-12T12:07:21Z","volume":4,"external_id":{"isi":["000884215600003"],"arxiv":["2106.13898"]},"status":"public","citation":{"chicago":"Hasani, Ramin, Mathias Lechner, Alexander Amini, Lucas Liebenwein, Aaron Ray, Max Tschaikowski, Gerald Teschl, and Daniela Rus. “Closed-Form Continuous-Time Neural Networks.” <i>Nature Machine Intelligence</i>. Springer Nature, 2022. <a href=\"https://doi.org/10.1038/s42256-022-00556-7\">https://doi.org/10.1038/s42256-022-00556-7</a>.","ieee":"R. Hasani <i>et al.</i>, “Closed-form continuous-time neural networks,” <i>Nature Machine Intelligence</i>, vol. 4, no. 11. Springer Nature, pp. 992–1003, 2022.","short":"R. Hasani, M. Lechner, A. Amini, L. Liebenwein, A. Ray, M. Tschaikowski, G. Teschl, D. Rus, Nature Machine Intelligence 4 (2022) 992–1003.","ama":"Hasani R, Lechner M, Amini A, et al. Closed-form continuous-time neural networks. <i>Nature Machine Intelligence</i>. 2022;4(11):992-1003. doi:<a href=\"https://doi.org/10.1038/s42256-022-00556-7\">10.1038/s42256-022-00556-7</a>","apa":"Hasani, R., Lechner, M., Amini, A., Liebenwein, L., Ray, A., Tschaikowski, M., … Rus, D. (2022). Closed-form continuous-time neural networks. <i>Nature Machine Intelligence</i>. Springer Nature. <a href=\"https://doi.org/10.1038/s42256-022-00556-7\">https://doi.org/10.1038/s42256-022-00556-7</a>","ista":"Hasani R, Lechner M, Amini A, Liebenwein L, Ray A, Tschaikowski M, Teschl G, Rus D. 2022. Closed-form continuous-time neural networks. Nature Machine Intelligence. 4(11), 992–1003.","mla":"Hasani, Ramin, et al. “Closed-Form Continuous-Time Neural Networks.” <i>Nature Machine Intelligence</i>, vol. 4, no. 11, Springer Nature, 2022, pp. 992–1003, doi:<a href=\"https://doi.org/10.1038/s42256-022-00556-7\">10.1038/s42256-022-00556-7</a>."},"related_material":{"link":[{"url":"https://doi.org/10.1038/s42256-022-00597-y","relation":"erratum"}]},"intvolume":"         4","has_accepted_license":"1","oa":1,"publication_status":"published","ddc":["000"],"date_published":"2022-11-15T00:00:00Z","department":[{"_id":"ToHe"}],"publisher":"Springer Nature","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","scopus_import":"1","article_processing_charge":"No","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_type":"original","publication":"Nature Machine Intelligence","day":"15","file":[{"date_updated":"2023-01-24T09:49:44Z","file_id":"12355","checksum":"b4789122ce04bfb4ac042390f59aaa8b","date_created":"2023-01-24T09:49:44Z","access_level":"open_access","success":1,"file_name":"2022_NatureMachineIntelligence_Hasani.pdf","content_type":"application/pdf","relation":"main_file","file_size":3259553,"creator":"dernst"}],"author":[{"last_name":"Hasani","first_name":"Ramin","full_name":"Hasani, Ramin"},{"first_name":"Mathias","last_name":"Lechner","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias"},{"last_name":"Amini","first_name":"Alexander","full_name":"Amini, Alexander"},{"first_name":"Lucas","last_name":"Liebenwein","full_name":"Liebenwein, Lucas"},{"last_name":"Ray","first_name":"Aaron","full_name":"Ray, Aaron"},{"last_name":"Tschaikowski","first_name":"Max","full_name":"Tschaikowski, Max"},{"last_name":"Teschl","first_name":"Gerald","full_name":"Teschl, Gerald"},{"full_name":"Rus, Daniela","last_name":"Rus","first_name":"Daniela"}],"title":"Closed-form continuous-time neural networks","arxiv":1,"project":[{"name":"The Wittgenstein Prize","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"}],"language":[{"iso":"eng"}],"issue":"11","isi":1,"keyword":["Artificial Intelligence","Computer Networks and Communications","Computer Vision and Pattern Recognition","Human-Computer Interaction","Software"],"publication_identifier":{"issn":["2522-5839"]},"quality_controlled":"1","doi":"10.1038/s42256-022-00556-7"}]
