[{"article_type":"original","ec_funded":1,"author":[{"full_name":"Seiringer, Robert","first_name":"Robert","last_name":"Seiringer","orcid":"0000-0002-6781-0521","id":"4AFD0470-F248-11E8-B48F-1D18A9856A87"}],"oa_version":"Preprint","publisher":"World Scientific Publishing","oa":1,"issue":"01","type":"journal_article","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","department":[{"_id":"RoSe"}],"external_id":{"isi":["000613313200013"],"arxiv":["1912.12509"]},"keyword":["Mathematical Physics","Statistical and Nonlinear Physics"],"intvolume":"        33","abstract":[{"text":" We review old and new results on the Fröhlich polaron model. The discussion includes the validity of the (classical) Pekar approximation in the strong coupling limit, quantum corrections to this limit, as well as the divergence of the effective polaron mass.","lang":"eng"}],"project":[{"_id":"25C6DC12-B435-11E9-9278-68D0E5697425","name":"Analysis of quantum many-body systems","grant_number":"694227","call_identifier":"H2020"}],"scopus_import":"1","acknowledgement":"This work was supported by the European Research Council (ERC) under the Euro-pean Union’s Horizon 2020 research and innovation programme (grant agreementNo. 694227).","citation":{"ieee":"R. Seiringer, “The polaron at strong coupling,” <i>Reviews in Mathematical Physics</i>, vol. 33, no. 01. World Scientific Publishing, 2021.","mla":"Seiringer, Robert. “The Polaron at Strong Coupling.” <i>Reviews in Mathematical Physics</i>, vol. 33, no. 01, 2060012, World Scientific Publishing, 2021, doi:<a href=\"https://doi.org/10.1142/s0129055x20600120\">10.1142/s0129055x20600120</a>.","ista":"Seiringer R. 2021. The polaron at strong coupling. Reviews in Mathematical Physics. 33(01), 2060012.","ama":"Seiringer R. The polaron at strong coupling. <i>Reviews in Mathematical Physics</i>. 2021;33(01). doi:<a href=\"https://doi.org/10.1142/s0129055x20600120\">10.1142/s0129055x20600120</a>","apa":"Seiringer, R. (2021). The polaron at strong coupling. <i>Reviews in Mathematical Physics</i>. World Scientific Publishing. <a href=\"https://doi.org/10.1142/s0129055x20600120\">https://doi.org/10.1142/s0129055x20600120</a>","short":"R. Seiringer, Reviews in Mathematical Physics 33 (2021).","chicago":"Seiringer, Robert. “The Polaron at Strong Coupling.” <i>Reviews in Mathematical Physics</i>. World Scientific Publishing, 2021. <a href=\"https://doi.org/10.1142/s0129055x20600120\">https://doi.org/10.1142/s0129055x20600120</a>."},"day":"01","volume":33,"status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1912.12509"}],"month":"02","doi":"10.1142/s0129055x20600120","year":"2021","_id":"10852","publication":"Reviews in Mathematical Physics","title":"The polaron at strong coupling","date_updated":"2023-09-05T16:08:02Z","date_created":"2022-03-18T08:11:34Z","date_published":"2021-02-01T00:00:00Z","language":[{"iso":"eng"}],"quality_controlled":"1","arxiv":1,"publication_identifier":{"issn":["0129-055X"],"eissn":["1793-6659"]},"isi":1,"article_number":"2060012","publication_status":"published","article_processing_charge":"No"},{"month":"07","main_file_link":[{"url":"https://arxiv.org/abs/2105.08098","open_access":"1"}],"status":"public","day":"01","title":"A scalable concurrent algorithm for dynamic connectivity","year":"2021","_id":"10853","publication":"Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures","doi":"10.1145/3409964.3461810","conference":{"name":"SPAA: Symposium on Parallelism in Algorithms and Architectures","end_date":"2021-07-08","start_date":"2021-07-06","location":"Virtual, Online"},"quality_controlled":"1","arxiv":1,"language":[{"iso":"eng"}],"date_published":"2021-07-01T00:00:00Z","page":"208-220","date_updated":"2022-03-18T08:45:46Z","date_created":"2022-03-18T08:21:47Z","article_processing_charge":"No","publication_status":"published","publication_identifier":{"isbn":["9781450380706"]},"author":[{"first_name":"Alexander","full_name":"Fedorov, Alexander","last_name":"Fedorov"},{"first_name":"Nikita","full_name":"Koval, Nikita","last_name":"Koval"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","last_name":"Alistarh"}],"oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","publisher":"Association for Computing Machinery","oa_version":"Preprint","abstract":[{"text":"Dynamic Connectivity is a fundamental algorithmic graph problem, motivated by a wide range of applications to social and communication networks and used as a building block in various other algorithms, such as the bi-connectivity and the dynamic minimal spanning tree problems. In brief, we wish to maintain the connected components of the graph under dynamic edge insertions and deletions. In the sequential case, the problem has been well-studied from both theoretical and practical perspectives. However, much less is known about efficient concurrent solutions to this problem. This is the gap we address in this paper. We start from one of the classic data structures used to solve this problem, the Euler Tour Tree. Our first contribution is a non-blocking single-writer implementation of it. We leverage this data structure to obtain the first truly concurrent generalization of dynamic connectivity, which preserves the time complexity of its sequential counterpart, but is also scalable in practice. To achieve this, we rely on three main techniques. The first is to ensure that connectivity queries, which usually dominate real-world workloads, are non-blocking. The second non-trivial technique expands the above idea by making all queries that do not change the connectivity structure non-blocking. The third ingredient is applying fine-grained locking for updating the connected components, which allows operations on disjoint components to occur in parallel. We evaluate the resulting algorithm on various workloads, executing on both real and synthetic graphs. The results show the efficiency of each of the proposed optimizations; the most efficient variant improves the performance of a coarse-grained based implementation on realistic scenarios up to 6x on average and up to 30x when connectivity queries dominate.","lang":"eng"}],"department":[{"_id":"DaAl"}],"external_id":{"arxiv":["2105.08098"]},"scopus_import":"1","citation":{"apa":"Fedorov, A., Koval, N., &#38; Alistarh, D.-A. (2021). A scalable concurrent algorithm for dynamic connectivity. In <i>Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures</i> (pp. 208–220). Virtual, Online: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3409964.3461810\">https://doi.org/10.1145/3409964.3461810</a>","ama":"Fedorov A, Koval N, Alistarh D-A. A scalable concurrent algorithm for dynamic connectivity. In: <i>Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures</i>. Association for Computing Machinery; 2021:208-220. doi:<a href=\"https://doi.org/10.1145/3409964.3461810\">10.1145/3409964.3461810</a>","short":"A. Fedorov, N. Koval, D.-A. Alistarh, in:, Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures, Association for Computing Machinery, 2021, pp. 208–220.","chicago":"Fedorov, Alexander, Nikita Koval, and Dan-Adrian Alistarh. “A Scalable Concurrent Algorithm for Dynamic Connectivity.” In <i>Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures</i>, 208–20. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3409964.3461810\">https://doi.org/10.1145/3409964.3461810</a>.","mla":"Fedorov, Alexander, et al. “A Scalable Concurrent Algorithm for Dynamic Connectivity.” <i>Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures</i>, Association for Computing Machinery, 2021, pp. 208–20, doi:<a href=\"https://doi.org/10.1145/3409964.3461810\">10.1145/3409964.3461810</a>.","ieee":"A. Fedorov, N. Koval, and D.-A. Alistarh, “A scalable concurrent algorithm for dynamic connectivity,” in <i>Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures</i>, Virtual, Online, 2021, pp. 208–220.","ista":"Fedorov A, Koval N, Alistarh D-A. 2021. A scalable concurrent algorithm for dynamic connectivity. Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures. SPAA: Symposium on Parallelism in Algorithms and Architectures, 208–220."}},{"publication_status":"published","article_processing_charge":"No","publication_identifier":{"isbn":["9781450380720"]},"language":[{"iso":"eng"}],"arxiv":1,"quality_controlled":"1","date_updated":"2023-09-26T10:40:55Z","date_created":"2022-03-18T08:48:41Z","date_published":"2021-05-01T00:00:00Z","page":"71-72","year":"2021","_id":"10854","publication":"Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems","title":"Input-dynamic distributed algorithms for communication networks","conference":{"start_date":"2021-06-14","name":"SIGMETRICS: International Conference on Measurement and Modeling of Computer Systems","end_date":"2021-06-18","location":"Virtual, Online"},"doi":"10.1145/3410220.3453923","month":"05","day":"01","status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2005.07637"}],"project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"},{"_id":"26A5D39A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"840605","name":"Coordination in constrained and natural distributed systems"}],"related_material":{"record":[{"relation":"extended_version","status":"public","id":"10855"}]},"scopus_import":"1","acknowledgement":"We thank Jukka Suomela for discussions. We also thank our shepherd Mohammad Hajiesmaili and the reviewers for their time and suggestions on how to improve the paper. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska–Curie grant agreement No. 840605, from the Vienna Science and Technology Fund (WWTF) project WHATIF, ICT19-045, 2020-2024, and from the Austrian Science Fund (FWF) and netIDEE SCIENCE project P 33775-N.","citation":{"ista":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. 2021. Input-dynamic distributed algorithms for communication networks. Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems. SIGMETRICS: International Conference on Measurement and Modeling of Computer Systems, 71–72.","mla":"Foerster, Klaus-Tycho, et al. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, Association for Computing Machinery, 2021, pp. 71–72, doi:<a href=\"https://doi.org/10.1145/3410220.3453923\">10.1145/3410220.3453923</a>.","ieee":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, and S. Schmid, “Input-dynamic distributed algorithms for communication networks,” in <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, Virtual, Online, 2021, pp. 71–72.","chicago":"Foerster, Klaus-Tycho, Janne Korhonen, Ami Paz, Joel Rybicki, and Stefan Schmid. “Input-Dynamic Distributed Algorithms for Communication Networks.” In <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, 71–72. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3410220.3453923\">https://doi.org/10.1145/3410220.3453923</a>.","short":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, S. Schmid, in:, Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems, Association for Computing Machinery, 2021, pp. 71–72.","apa":"Foerster, K.-T., Korhonen, J., Paz, A., Rybicki, J., &#38; Schmid, S. (2021). Input-dynamic distributed algorithms for communication networks. In <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i> (pp. 71–72). Virtual, Online: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3410220.3453923\">https://doi.org/10.1145/3410220.3453923</a>","ama":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. Input-dynamic distributed algorithms for communication networks. In: <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>. Association for Computing Machinery; 2021:71-72. doi:<a href=\"https://doi.org/10.1145/3410220.3453923\">10.1145/3410220.3453923</a>"},"abstract":[{"text":"Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner?\r\nTo address this question, we define the batch dynamic CONGEST model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labelled graph under batch changes. We investigate, when a batch of alpha edge label changes arrive, - how much time as a function of alpha we need to update an existing solution, and - how much information the nodes have to keep in local memory between batches in order to update the solution quickly.\r\nOur work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. The diverse time complexity of our model spans from constant time, through time polynomial in alpha, and to alpha time, which we show to be enough for any task.","lang":"eng"}],"department":[{"_id":"DaAl"}],"external_id":{"arxiv":["2005.07637"]},"publisher":"Association for Computing Machinery","oa":1,"type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","ec_funded":1,"author":[{"full_name":"Foerster, Klaus-Tycho","first_name":"Klaus-Tycho","last_name":"Foerster"},{"id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen","full_name":"Korhonen, Janne","first_name":"Janne"},{"last_name":"Paz","full_name":"Paz, Ami","first_name":"Ami"},{"first_name":"Joel","full_name":"Rybicki, Joel","last_name":"Rybicki","orcid":"0000-0002-6432-6646","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Schmid, Stefan","first_name":"Stefan","last_name":"Schmid"}]},{"article_type":"original","author":[{"last_name":"Foerster","first_name":"Klaus-Tycho","full_name":"Foerster, Klaus-Tycho"},{"first_name":"Janne","full_name":"Korhonen, Janne","last_name":"Korhonen","id":"C5402D42-15BC-11E9-A202-CA2BE6697425"},{"last_name":"Paz","full_name":"Paz, Ami","first_name":"Ami"},{"first_name":"Joel","full_name":"Rybicki, Joel","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","last_name":"Rybicki","orcid":"0000-0002-6432-6646"},{"last_name":"Schmid","first_name":"Stefan","full_name":"Schmid, Stefan"}],"ec_funded":1,"oa_version":"Preprint","type":"journal_article","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"issue":"1","publisher":"Association for Computing Machinery","keyword":["Computer Networks and Communications","Hardware and Architecture","Safety","Risk","Reliability and Quality","Computer Science (miscellaneous)"],"external_id":{"arxiv":["2005.07637"]},"department":[{"_id":"DaAl"}],"abstract":[{"lang":"eng","text":"Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic \\congest model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labeled graph under batch changes. We investigate, when a batch of α edge label changes arrive, \\beginitemize \\item how much time as a function of α we need to update an existing solution, and \\item how much information the nodes have to keep in local memory between batches in order to update the solution quickly. \\enditemize Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. In particular, we derive non-trivial upper bounds for two selected, contrasting problems: maintaining a minimum spanning tree and detecting cliques."}],"intvolume":"         5","acknowledgement":"We thank Jukka Suomela for discussions. We also thank our shepherd Mohammad Hajiesmaili\r\nand the reviewers for their time and suggestions on how to improve the paper. This project\r\nhas received funding from the European Research Council (ERC) under the European Union’s\r\nHorizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), from the European Union’s Horizon 2020 research and innovation programme under the Marie\r\nSk lodowska–Curie grant agreement No. 840605, from the Vienna Science and Technology Fund (WWTF) project WHATIF, ICT19-045, 2020-2024, and from the Austrian Science Fund (FWF) and netIDEE SCIENCE project P 33775-N.","citation":{"ieee":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, and S. Schmid, “Input-dynamic distributed algorithms for communication networks,” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>, vol. 5, no. 1. Association for Computing Machinery, pp. 1–33, 2021.","mla":"Foerster, Klaus-Tycho, et al. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>, vol. 5, no. 1, Association for Computing Machinery, 2021, pp. 1–33, doi:<a href=\"https://doi.org/10.1145/3447384\">10.1145/3447384</a>.","ista":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. 2021. Input-dynamic distributed algorithms for communication networks. Proceedings of the ACM on Measurement and Analysis of Computing Systems. 5(1), 1–33.","ama":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. Input-dynamic distributed algorithms for communication networks. <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. 2021;5(1):1-33. doi:<a href=\"https://doi.org/10.1145/3447384\">10.1145/3447384</a>","apa":"Foerster, K.-T., Korhonen, J., Paz, A., Rybicki, J., &#38; Schmid, S. (2021). Input-dynamic distributed algorithms for communication networks. <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3447384\">https://doi.org/10.1145/3447384</a>","short":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, S. Schmid, Proceedings of the ACM on Measurement and Analysis of Computing Systems 5 (2021) 1–33.","chicago":"Foerster, Klaus-Tycho, Janne Korhonen, Ami Paz, Joel Rybicki, and Stefan Schmid. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3447384\">https://doi.org/10.1145/3447384</a>."},"scopus_import":"1","related_material":{"record":[{"relation":"shorter_version","id":"10854","status":"public"}]},"project":[{"grant_number":"840605","call_identifier":"H2020","name":"Coordination in constrained and natural distributed systems","_id":"26A5D39A-B435-11E9-9278-68D0E5697425"},{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning"}],"status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2005.07637"}],"day":"01","volume":5,"month":"03","doi":"10.1145/3447384","title":"Input-dynamic distributed algorithms for communication networks","publication":"Proceedings of the ACM on Measurement and Analysis of Computing Systems","year":"2021","_id":"10855","page":"1-33","date_published":"2021-03-01T00:00:00Z","date_updated":"2023-09-26T10:40:55Z","date_created":"2022-03-18T09:10:27Z","arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"publication_identifier":{"issn":["2476-1249"]},"article_processing_charge":"No","publication_status":"published"},{"intvolume":"         9","has_accepted_license":"1","abstract":[{"lang":"eng","text":"We study the properties of the maximal volume k-dimensional sections of the n-dimensional cube [−1, 1]n. We obtain a first order necessary condition for a k-dimensional subspace to be a local maximizer of the volume of such sections, which we formulate in a geometric way. We estimate the length of the projection of a vector of the standard basis of Rn onto a k-dimensional subspace that maximizes the volume of the intersection. We \u001cnd the optimal upper bound on the volume of a planar section of the cube [−1, 1]n , n ≥ 2."}],"department":[{"_id":"UlWa"}],"external_id":{"arxiv":["2004.02674"],"isi":["000734286800001"]},"keyword":["Applied Mathematics","Geometry and Topology","Analysis"],"scopus_import":"1","acknowledgement":"The authors acknowledge the support of the grant of the Russian Government N 075-15-\r\n2019-1926. G.I.was supported also by the SwissNational Science Foundation grant 200021-179133. The authors are very grateful to the anonymous reviewer for valuable remarks.","citation":{"ama":"Ivanov G, Tsiutsiurupa I. On the volume of sections of the cube. <i>Analysis and Geometry in Metric Spaces</i>. 2021;9(1):1-18. doi:<a href=\"https://doi.org/10.1515/agms-2020-0103\">10.1515/agms-2020-0103</a>","apa":"Ivanov, G., &#38; Tsiutsiurupa, I. (2021). On the volume of sections of the cube. <i>Analysis and Geometry in Metric Spaces</i>. De Gruyter. <a href=\"https://doi.org/10.1515/agms-2020-0103\">https://doi.org/10.1515/agms-2020-0103</a>","short":"G. Ivanov, I. Tsiutsiurupa, Analysis and Geometry in Metric Spaces 9 (2021) 1–18.","chicago":"Ivanov, Grigory, and Igor Tsiutsiurupa. “On the Volume of Sections of the Cube.” <i>Analysis and Geometry in Metric Spaces</i>. De Gruyter, 2021. <a href=\"https://doi.org/10.1515/agms-2020-0103\">https://doi.org/10.1515/agms-2020-0103</a>.","ieee":"G. Ivanov and I. Tsiutsiurupa, “On the volume of sections of the cube,” <i>Analysis and Geometry in Metric Spaces</i>, vol. 9, no. 1. De Gruyter, pp. 1–18, 2021.","mla":"Ivanov, Grigory, and Igor Tsiutsiurupa. “On the Volume of Sections of the Cube.” <i>Analysis and Geometry in Metric Spaces</i>, vol. 9, no. 1, De Gruyter, 2021, pp. 1–18, doi:<a href=\"https://doi.org/10.1515/agms-2020-0103\">10.1515/agms-2020-0103</a>.","ista":"Ivanov G, Tsiutsiurupa I. 2021. On the volume of sections of the cube. Analysis and Geometry in Metric Spaces. 9(1), 1–18."},"author":[{"id":"87744F66-5C6F-11EA-AFE0-D16B3DDC885E","last_name":"Ivanov","first_name":"Grigory","full_name":"Ivanov, Grigory"},{"first_name":"Igor","full_name":"Tsiutsiurupa, Igor","last_name":"Tsiutsiurupa"}],"file":[{"file_name":"2021_AnalysisMetricSpaces_Ivanov.pdf","file_id":"10857","creator":"dernst","success":1,"content_type":"application/pdf","checksum":"7e615ac8489f5eae580b6517debfdc53","file_size":789801,"date_updated":"2022-03-18T09:31:59Z","date_created":"2022-03-18T09:31:59Z","relation":"main_file","access_level":"open_access"}],"article_type":"original","publisher":"De Gruyter","issue":"1","oa":1,"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","oa_version":"Published Version","language":[{"iso":"eng"}],"quality_controlled":"1","arxiv":1,"date_updated":"2023-08-17T07:07:58Z","date_created":"2022-03-18T09:25:14Z","date_published":"2021-01-29T00:00:00Z","page":"1-18","isi":1,"publication_status":"published","article_processing_charge":"No","ddc":["510"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"publication_identifier":{"issn":["2299-3274"]},"month":"01","day":"29","volume":9,"status":"public","_id":"10856","license":"https://creativecommons.org/licenses/by/4.0/","year":"2021","publication":"Analysis and Geometry in Metric Spaces","title":"On the volume of sections of the cube","file_date_updated":"2022-03-18T09:31:59Z","doi":"10.1515/agms-2020-0103"},{"status":"public","day":"14","volume":11,"month":"07","doi":"10.3390/nano11071827","title":"Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping","file_date_updated":"2022-03-18T09:53:15Z","_id":"10858","year":"2021","publication":"Nanomaterials","date_published":"2021-07-14T00:00:00Z","date_updated":"2023-08-17T07:08:30Z","date_created":"2022-03-18T09:45:02Z","quality_controlled":"1","language":[{"iso":"eng"}],"publication_identifier":{"issn":["2079-4991"]},"ddc":["540"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"No","isi":1,"article_number":"1827","publication_status":"published","file":[{"file_size":4867547,"date_updated":"2022-03-18T09:53:15Z","relation":"main_file","access_level":"open_access","date_created":"2022-03-18T09:53:15Z","checksum":"f28a8b5cf80f5605828359bb398463b0","content_type":"application/pdf","creator":"dernst","success":1,"file_id":"10859","file_name":"2021_Nanomaterials_Li.pdf"}],"article_type":"original","author":[{"full_name":"Li, Mengyao","first_name":"Mengyao","last_name":"Li"},{"last_name":"Zhang","full_name":"Zhang, Yu","first_name":"Yu"},{"last_name":"Zhang","first_name":"Ting","full_name":"Zhang, Ting"},{"last_name":"Zuo","first_name":"Yong","full_name":"Zuo, Yong"},{"last_name":"Xiao","full_name":"Xiao, Ke","first_name":"Ke"},{"last_name":"Arbiol","first_name":"Jordi","full_name":"Arbiol, Jordi"},{"first_name":"Jordi","full_name":"Llorca, Jordi","last_name":"Llorca"},{"id":"2A70014E-F248-11E8-B48F-1D18A9856A87","last_name":"Liu","orcid":"0000-0001-7313-6740","first_name":"Yu","full_name":"Liu, Yu"},{"first_name":"Andreu","full_name":"Cabot, Andreu","last_name":"Cabot"}],"ec_funded":1,"oa_version":"Published Version","issue":"7","oa":1,"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","publisher":"MDPI","keyword":["General Materials Science","General Chemical Engineering"],"department":[{"_id":"MaIb"}],"external_id":{"isi":["000676570000001"]},"intvolume":"        11","has_accepted_license":"1","abstract":[{"text":"The cost-effective conversion of low-grade heat into electricity using thermoelectric devices requires developing alternative materials and material processing technologies able to reduce the currently high device manufacturing costs. In this direction, thermoelectric materials that do not rely on rare or toxic elements such as tellurium or lead need to be produced using high-throughput technologies not involving high temperatures and long processes. Bi2Se3 is an obvious possible Te-free alternative to Bi2Te3 for ambient temperature thermoelectric applications, but its performance is still low for practical applications, and additional efforts toward finding proper dopants are required. Here, we report a scalable method to produce Bi2Se3 nanosheets at low synthesis temperatures. We studied the influence of different dopants on the thermoelectric properties of this material. Among the elements tested, we demonstrated that Sn doping resulted in the best performance. Sn incorporation resulted in a significant improvement to the Bi2Se3 Seebeck coefficient and a reduction in the thermal conductivity in the direction of the hot-press axis, resulting in an overall 60% improvement in the thermoelectric figure of merit of Bi2Se3.","lang":"eng"}],"scopus_import":"1","citation":{"ama":"Li M, Zhang Y, Zhang T, et al. Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping. <i>Nanomaterials</i>. 2021;11(7). doi:<a href=\"https://doi.org/10.3390/nano11071827\">10.3390/nano11071827</a>","apa":"Li, M., Zhang, Y., Zhang, T., Zuo, Y., Xiao, K., Arbiol, J., … Cabot, A. (2021). Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping. <i>Nanomaterials</i>. MDPI. <a href=\"https://doi.org/10.3390/nano11071827\">https://doi.org/10.3390/nano11071827</a>","chicago":"Li, Mengyao, Yu Zhang, Ting Zhang, Yong Zuo, Ke Xiao, Jordi Arbiol, Jordi Llorca, Yu Liu, and Andreu Cabot. “Enhanced Thermoelectric Performance of N-Type Bi2Se3 Nanosheets through Sn Doping.” <i>Nanomaterials</i>. MDPI, 2021. <a href=\"https://doi.org/10.3390/nano11071827\">https://doi.org/10.3390/nano11071827</a>.","short":"M. Li, Y. Zhang, T. Zhang, Y. Zuo, K. Xiao, J. Arbiol, J. Llorca, Y. Liu, A. Cabot, Nanomaterials 11 (2021).","ieee":"M. Li <i>et al.</i>, “Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping,” <i>Nanomaterials</i>, vol. 11, no. 7. MDPI, 2021.","mla":"Li, Mengyao, et al. “Enhanced Thermoelectric Performance of N-Type Bi2Se3 Nanosheets through Sn Doping.” <i>Nanomaterials</i>, vol. 11, no. 7, 1827, MDPI, 2021, doi:<a href=\"https://doi.org/10.3390/nano11071827\">10.3390/nano11071827</a>.","ista":"Li M, Zhang Y, Zhang T, Zuo Y, Xiao K, Arbiol J, Llorca J, Liu Y, Cabot A. 2021. Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping. Nanomaterials. 11(7), 1827."},"acknowledgement":"M.L., Y.Z., T.Z. and K.X. thank the China Scholarship Council for their scholarship\r\nsupport. Y.L. acknowledges funding from the European Union’s Horizon 2020 research and\r\ninnovation program under the Marie Sklodowska-Curie grant agreement No. 754411. J.L. thanks the ICREA Academia program and projects MICINN/FEDER RTI2018-093996-B-C31 and G.C. 2017 SGR 128. ICN2 acknowledges funding from the Generalitat de Catalunya 2017 SGR 327 and the Spanish MINECO ENE2017-85087-C3.","project":[{"_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships","grant_number":"754411","call_identifier":"H2020"}]},{"language":[{"iso":"eng"}],"arxiv":1,"quality_controlled":"1","date_updated":"2023-09-05T12:43:09Z","date_created":"2022-03-18T09:55:59Z","page":"942-963","date_published":"2021-12-18T00:00:00Z","publication_status":"published","isi":1,"article_processing_charge":"No","publication_identifier":{"issn":["0008-4395"],"eissn":["1496-4287"]},"month":"12","volume":64,"day":"18","main_file_link":[{"url":"https://arxiv.org/abs/1804.10055","open_access":"1"}],"status":"public","publication":"Canadian Mathematical Bulletin","_id":"10860","year":"2021","title":"Tight frames and related geometric problems","doi":"10.4153/s000843952000096x","abstract":[{"text":"A tight frame is the orthogonal projection of some orthonormal basis of Rn onto Rk. We show that a set of vectors is a tight frame if and only if the set of all cross products of these vectors is a tight frame. We reformulate a range of problems on the volume of projections (or sections) of regular polytopes in terms of tight frames and write a first-order necessary condition for local extrema of these problems. As applications, we prove new results for the problem of maximization of the volume of zonotopes.","lang":"eng"}],"intvolume":"        64","external_id":{"isi":["000730165300021"],"arxiv":["1804.10055"]},"department":[{"_id":"UlWa"}],"keyword":["General Mathematics","Tight frame","Grassmannian","zonotope"],"citation":{"apa":"Ivanov, G. (2021). Tight frames and related geometric problems. <i>Canadian Mathematical Bulletin</i>. Canadian Mathematical Society. <a href=\"https://doi.org/10.4153/s000843952000096x\">https://doi.org/10.4153/s000843952000096x</a>","ama":"Ivanov G. Tight frames and related geometric problems. <i>Canadian Mathematical Bulletin</i>. 2021;64(4):942-963. doi:<a href=\"https://doi.org/10.4153/s000843952000096x\">10.4153/s000843952000096x</a>","short":"G. Ivanov, Canadian Mathematical Bulletin 64 (2021) 942–963.","chicago":"Ivanov, Grigory. “Tight Frames and Related Geometric Problems.” <i>Canadian Mathematical Bulletin</i>. Canadian Mathematical Society, 2021. <a href=\"https://doi.org/10.4153/s000843952000096x\">https://doi.org/10.4153/s000843952000096x</a>.","ieee":"G. Ivanov, “Tight frames and related geometric problems,” <i>Canadian Mathematical Bulletin</i>, vol. 64, no. 4. Canadian Mathematical Society, pp. 942–963, 2021.","mla":"Ivanov, Grigory. “Tight Frames and Related Geometric Problems.” <i>Canadian Mathematical Bulletin</i>, vol. 64, no. 4, Canadian Mathematical Society, 2021, pp. 942–63, doi:<a href=\"https://doi.org/10.4153/s000843952000096x\">10.4153/s000843952000096x</a>.","ista":"Ivanov G. 2021. Tight frames and related geometric problems. Canadian Mathematical Bulletin. 64(4), 942–963."},"acknowledgement":"The author was supported by the Swiss National Science Foundation grant 200021_179133. The author acknowledges the financial support from the Ministry of Education and Science of the Russian Federation in the framework of MegaGrant no. 075-15-2019-1926.","scopus_import":"1","author":[{"first_name":"Grigory","full_name":"Ivanov, Grigory","id":"87744F66-5C6F-11EA-AFE0-D16B3DDC885E","last_name":"Ivanov"}],"article_type":"original","publisher":"Canadian Mathematical Society","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","type":"journal_article","issue":"4","oa":1,"oa_version":"Preprint"},{"ddc":["570"],"citation":{"ista":"Lombardi F, Pepic S, Shriki O, Tkačik G, De Martino D. Quantifying the coexistence of neuronal oscillations and avalanches. <a href=\"https://doi.org/10.48550/ARXIV.2108.06686\">10.48550/ARXIV.2108.06686</a>.","ieee":"F. Lombardi, S. Pepic, O. Shriki, G. Tkačik, and D. De Martino, “Quantifying the coexistence of neuronal oscillations and avalanches.” arXiv.","mla":"Lombardi, Fabrizio, et al. <i>Quantifying the Coexistence of Neuronal Oscillations and Avalanches</i>. arXiv, doi:<a href=\"https://doi.org/10.48550/ARXIV.2108.06686\">10.48550/ARXIV.2108.06686</a>.","short":"F. Lombardi, S. Pepic, O. Shriki, G. Tkačik, D. De Martino, (n.d.).","chicago":"Lombardi, Fabrizio, Selver Pepic, Oren Shriki, Gašper Tkačik, and Daniele De Martino. “Quantifying the Coexistence of Neuronal Oscillations and Avalanches.” arXiv, n.d. <a href=\"https://doi.org/10.48550/ARXIV.2108.06686\">https://doi.org/10.48550/ARXIV.2108.06686</a>.","ama":"Lombardi F, Pepic S, Shriki O, Tkačik G, De Martino D. Quantifying the coexistence of neuronal oscillations and avalanches. doi:<a href=\"https://doi.org/10.48550/ARXIV.2108.06686\">10.48550/ARXIV.2108.06686</a>","apa":"Lombardi, F., Pepic, S., Shriki, O., Tkačik, G., &#38; De Martino, D. (n.d.). Quantifying the coexistence of neuronal oscillations and avalanches. arXiv. <a href=\"https://doi.org/10.48550/ARXIV.2108.06686\">https://doi.org/10.48550/ARXIV.2108.06686</a>"},"acknowledgement":"FL acknowledges support from the European Union’s Horizon 2020 research and innovation program under the Marie Sklodowska-Curie Grant Agreement No. 754411. GT\r\nacknowledges the support of the Austrian Science Fund (FWF) under Stand-Alone Grant\r\nNo. P34015.","article_processing_charge":"No","project":[{"_id":"260C2330-B435-11E9-9278-68D0E5697425","grant_number":"754411","call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships"},{"grant_number":"P34015","name":"Efficient coding with biophysical realism","_id":"626c45b5-2b32-11ec-9570-e509828c1ba6"}],"publication_status":"submitted","date_published":"2021-08-17T00:00:00Z","page":"37","department":[{"_id":"GaTk"}],"date_created":"2022-03-21T11:41:28Z","date_updated":"2022-03-22T07:53:18Z","external_id":{"arxiv":["2108.06686"]},"arxiv":1,"abstract":[{"lang":"eng","text":"Brain dynamics display collective phenomena as diverse as neuronal oscillations and avalanches. Oscillations are rhythmic, with fluctuations occurring at a characteristic scale, whereas avalanches are scale-free cascades of neural activity. Here we show that such antithetic features can coexist in a very generic class of adaptive neural networks. In the most simple yet fully microscopic model from this class we make direct contact with human brain resting-state activity recordings via tractable inference of the model's two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor fluctuations, collective behaviors of nearly-synchronous extreme events on multiple sensors, to neuronal avalanches unfolding over multiple sensors across multiple time-bins. Importantly, the inferred parameters correlate with model-independent signatures of \"closeness to criticality\", suggesting that the coexistence of scale-specific (neural oscillations) and scale-free (neuronal avalanches) dynamics in brain activity occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations."}],"language":[{"iso":"eng"}],"oa_version":"Preprint","doi":"10.48550/ARXIV.2108.06686","oa":1,"title":"Quantifying the coexistence of neuronal oscillations and avalanches","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"preprint","year":"2021","_id":"10912","publisher":"arXiv","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2108.06686"}],"status":"public","day":"17","author":[{"full_name":"Lombardi, Fabrizio","first_name":"Fabrizio","orcid":"0000-0003-2623-5249","id":"A057D288-3E88-11E9-986D-0CF4E5697425","last_name":"Lombardi"},{"id":"F93245C4-C3CA-11E9-B4F0-C6F4E5697425","last_name":"Pepic","full_name":"Pepic, Selver","first_name":"Selver"},{"last_name":"Shriki","first_name":"Oren","full_name":"Shriki, Oren"},{"orcid":"0000-0002-6699-1455","id":"3D494DCA-F248-11E8-B48F-1D18A9856A87","last_name":"Tkačik","full_name":"Tkačik, Gašper","first_name":"Gašper"},{"last_name":"De Martino","full_name":"De Martino, Daniele","first_name":"Daniele"}],"month":"08","ec_funded":1},{"conference":{"location":"Virtual, Online","start_date":"2021-02-02","end_date":"2021-02-09","name":"AAAI: Conference on Artificial Intelligence"},"title":"Asynchronous optimization methods for efficient training of deep neural networks with guarantees","_id":"11436","year":"2021","publication":"35th AAAI Conference on Artificial Intelligence, AAAI 2021","main_file_link":[{"url":" https://doi.org/10.48550/arXiv.1905.11845","open_access":"1"}],"status":"public","day":"18","volume":35,"month":"05","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"],"isbn":["9781713835974"]},"article_processing_charge":"No","publication_status":"published","date_published":"2021-05-18T00:00:00Z","page":"8209-8216","date_created":"2022-06-05T22:01:52Z","date_updated":"2022-06-07T06:53:36Z","arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"oa_version":"Preprint","oa":1,"issue":"9B","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"AAAI Press","author":[{"first_name":"Vyacheslav","full_name":"Kungurtsev, Vyacheslav","last_name":"Kungurtsev"},{"last_name":"Egan","first_name":"Malcolm","full_name":"Egan, Malcolm"},{"last_name":"Chatterjee","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87","first_name":"Bapi","full_name":"Chatterjee, Bapi"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"}],"ec_funded":1,"scopus_import":"1","acknowledgement":"Vyacheslav Kungurtsev was supported by the OP VVV project CZ.02.1.01/0.0/0.0/16 019/0000765 “Research Center for Informatics. Bapi Chatterjee was supported by the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 754411 (ISTPlus). Dan Alistarh has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","citation":{"ama":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In: <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>. Vol 35. AAAI Press; 2021:8209-8216.","apa":"Kungurtsev, V., Egan, M., Chatterjee, B., &#38; Alistarh, D.-A. (2021). Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i> (Vol. 35, pp. 8209–8216). Virtual, Online: AAAI Press.","chicago":"Kungurtsev, Vyacheslav, Malcolm Egan, Bapi Chatterjee, and Dan-Adrian Alistarh. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, 35:8209–16. AAAI Press, 2021.","short":"V. Kungurtsev, M. Egan, B. Chatterjee, D.-A. Alistarh, in:, 35th AAAI Conference on Artificial Intelligence, AAAI 2021, AAAI Press, 2021, pp. 8209–8216.","mla":"Kungurtsev, Vyacheslav, et al. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, vol. 35, no. 9B, AAAI Press, 2021, pp. 8209–16.","ieee":"V. Kungurtsev, M. Egan, B. Chatterjee, and D.-A. Alistarh, “Asynchronous optimization methods for efficient training of deep neural networks with guarantees,” in <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, Virtual, Online, 2021, vol. 35, no. 9B, pp. 8209–8216.","ista":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. 2021. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. 35th AAAI Conference on Artificial Intelligence, AAAI 2021. AAAI: Conference on Artificial Intelligence vol. 35, 8209–8216."},"project":[{"_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships","call_identifier":"H2020","grant_number":"754411"},{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning"}],"department":[{"_id":"DaAl"}],"external_id":{"arxiv":["1905.11845"]},"intvolume":"        35","abstract":[{"text":"Asynchronous distributed algorithms are a popular way to reduce synchronization costs in large-scale optimization, and in particular for neural network training. However, for nonsmooth and nonconvex objectives, few convergence guarantees exist beyond cases where closed-form proximal operator solutions are available. As training most popular deep neural networks corresponds to optimizing nonsmooth and nonconvex objectives, there is a pressing need for such convergence guarantees. In this paper, we analyze for the first time the convergence of stochastic asynchronous optimization for this general class of objectives. In particular, we focus on stochastic subgradient methods allowing for block variable partitioning, where the shared model is asynchronously updated by concurrent processes. To this end, we use a probabilistic model which captures key features of real asynchronous scheduling between concurrent processes. Under this model, we establish convergence with probability one to an invariant set for stochastic subgradient methods with momentum. From a practical perspective, one issue with the family of algorithms that we consider is that they are not efficiently supported by machine learning frameworks, which mostly focus on distributed data-parallel strategies. To address this, we propose a new implementation strategy for shared-memory based training of deep neural networks for a partitioned but shared model in single- and multi-GPU settings. Based on this implementation, we achieve on average1.2x speed-up in comparison to state-of-the-art training methods for popular image classification tasks, without compromising accuracy.","lang":"eng"}]},{"author":[{"last_name":"Alimisis","first_name":"Foivos","full_name":"Alimisis, Foivos"},{"id":"11396234-BB50-11E9-B24C-90FCE5697425","last_name":"Davies","orcid":"0000-0002-5646-9524","first_name":"Peter","full_name":"Davies, Peter"},{"last_name":"Vandereycken","first_name":"Bart","full_name":"Vandereycken, Bart"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"ec_funded":1,"type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"publisher":"Neural Information Processing Systems Foundation","oa_version":"Published Version","abstract":[{"text":"We study efficient distributed algorithms for the fundamental problem of principal component analysis and leading eigenvector computation on the sphere, when the data are randomly distributed among a set of computational nodes. We propose a new quantized variant of Riemannian gradient descent to solve this problem, and prove that the algorithm converges with high probability under a set of necessary spherical-convexity properties. We give bounds on the number of bits transmitted by the algorithm under common initialization schemes, and investigate the dependency on the problem dimension in each case.","lang":"eng"}],"intvolume":"         4","external_id":{"arxiv":["2110.14391"]},"department":[{"_id":"DaAl"}],"acknowledgement":"We would like to thank the anonymous reviewers for helpful comments and suggestions. We also thank Aurelien Lucchi and Antonio Orvieto for fruitful discussions at an early stage of this work. FA is partially supported by the SNSF under research project No. 192363 and conducted part of this work while at IST Austria under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 805223 ScaleML). PD partly conducted this work while at IST Austria and was supported by the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No. 754411.","citation":{"ista":"Alimisis F, Davies P, Vandereycken B, Alistarh D-A. 2021. Distributed principal component analysis with limited communication. Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 4, 2823–2834.","ieee":"F. Alimisis, P. Davies, B. Vandereycken, and D.-A. Alistarh, “Distributed principal component analysis with limited communication,” in <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 4, pp. 2823–2834.","mla":"Alimisis, Foivos, et al. “Distributed Principal Component Analysis with Limited Communication.” <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, vol. 4, Neural Information Processing Systems Foundation, 2021, pp. 2823–34.","short":"F. Alimisis, P. Davies, B. Vandereycken, D.-A. Alistarh, in:, Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021, pp. 2823–2834.","chicago":"Alimisis, Foivos, Peter Davies, Bart Vandereycken, and Dan-Adrian Alistarh. “Distributed Principal Component Analysis with Limited Communication.” In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, 4:2823–34. Neural Information Processing Systems Foundation, 2021.","apa":"Alimisis, F., Davies, P., Vandereycken, B., &#38; Alistarh, D.-A. (2021). Distributed principal component analysis with limited communication. In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i> (Vol. 4, pp. 2823–2834). Virtual, Online: Neural Information Processing Systems Foundation.","ama":"Alimisis F, Davies P, Vandereycken B, Alistarh D-A. Distributed principal component analysis with limited communication. In: <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>. Vol 4. Neural Information Processing Systems Foundation; 2021:2823-2834."},"scopus_import":"1","project":[{"call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"},{"grant_number":"754411","call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425"}],"month":"12","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/1680e9fa7b4dd5d62ece800239bb53bd-Paper.pdf"}],"status":"public","volume":4,"day":"01","title":"Distributed principal component analysis with limited communication","publication":"Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems","_id":"11452","year":"2021","conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2021-12-14","start_date":"2021-12-06","location":"Virtual, Online"},"arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"page":"2823-2834","date_published":"2021-12-01T00:00:00Z","date_updated":"2022-06-20T08:31:52Z","date_created":"2022-06-19T22:01:58Z","article_processing_charge":"No","publication_status":"published","publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]}},{"conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2021-12-06","end_date":"2021-12-14","location":"Virtual, Online"},"title":"Online learning of neural computations from sparse temporal feedback","_id":"11453","year":"2021","publication":"Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems","status":"public","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/88e1ce84f9feef5a08d0df0334c53468-Paper.pdf"}],"volume":20,"day":"01","month":"12","publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]},"article_processing_charge":"No","publication_status":"published","date_published":"2021-12-01T00:00:00Z","page":"16437-16450","date_created":"2022-06-19T22:01:59Z","date_updated":"2022-06-20T07:12:58Z","quality_controlled":"1","language":[{"iso":"eng"}],"oa_version":"Published Version","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","publisher":"Neural Information Processing Systems Foundation","author":[{"last_name":"Braun","full_name":"Braun, Lukas","first_name":"Lukas"},{"full_name":"Vogels, Tim P","first_name":"Tim P","orcid":"0000-0003-3295-6181","id":"CB6FF8D2-008F-11EA-8E08-2637E6697425","last_name":"Vogels"}],"scopus_import":"1","acknowledgement":"We would like to thank Professor Dr. Henning Sprekeler for his valuable suggestions and Dr. Andrew Saxe, Milan Klöwer and Anna Wallis for their constructive feedback on the manuscript. Lukas Braun was supported by the Network of European Neuroscience Schools through their NENS Exchange Grant program, by the European Union through their European Community Action Scheme for the Mobility of University Students, the Woodward Scholarship awarded by Wadham College, Oxford and the Medical Research Council [MR/N013468/1]. Tim P. Vogels was supported by a Wellcome Trust Senior Research Fellowship [214316/Z/18/Z].","citation":{"ieee":"L. Braun and T. P. Vogels, “Online learning of neural computations from sparse temporal feedback,” in <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 20, pp. 16437–16450.","mla":"Braun, Lukas, and Tim P. Vogels. “Online Learning of Neural Computations from Sparse Temporal Feedback.” <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, vol. 20, Neural Information Processing Systems Foundation, 2021, pp. 16437–50.","ista":"Braun L, Vogels TP. 2021. Online learning of neural computations from sparse temporal feedback. Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 20, 16437–16450.","ama":"Braun L, Vogels TP. Online learning of neural computations from sparse temporal feedback. In: <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>. Vol 20. Neural Information Processing Systems Foundation; 2021:16437-16450.","apa":"Braun, L., &#38; Vogels, T. P. (2021). Online learning of neural computations from sparse temporal feedback. In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i> (Vol. 20, pp. 16437–16450). Virtual, Online: Neural Information Processing Systems Foundation.","short":"L. Braun, T.P. Vogels, in:, Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021, pp. 16437–16450.","chicago":"Braun, Lukas, and Tim P Vogels. “Online Learning of Neural Computations from Sparse Temporal Feedback.” In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, 20:16437–50. Neural Information Processing Systems Foundation, 2021."},"project":[{"_id":"c084a126-5a5b-11eb-8a69-d75314a70a87","grant_number":"214316/Z/18/Z","name":"What’s in a memory? Spatiotemporal dynamics in strongly coupled recurrent neuronal networks."}],"department":[{"_id":"TiVo"}],"intvolume":"        20","abstract":[{"text":"Neuronal computations depend on synaptic connectivity and intrinsic electrophysiological properties. Synaptic connectivity determines which inputs from presynaptic neurons are integrated, while cellular properties determine how inputs are filtered over time. Unlike their biological counterparts, most computational approaches to learning in simulated neural networks are limited to changes in synaptic connectivity. However, if intrinsic parameters change, neural computations are altered drastically. Here, we include the parameters that determine the intrinsic properties,\r\ne.g., time constants and reset potential, into the learning paradigm. Using sparse feedback signals that indicate target spike times, and gradient-based parameter updates, we show that the intrinsic parameters can be learned along with the synaptic weights to produce specific input-output functions. Specifically, we use a teacher-student paradigm in which a randomly initialised leaky integrate-and-fire or resonate-and-fire neuron must recover the parameters of a teacher neuron. We show that complex temporal functions can be learned online and without backpropagation through time, relying on event-based updates only. Our results are a step towards online learning of neural computations from ungraded and unsigned sparse feedback signals with a biologically inspired learning mechanism.","lang":"eng"}]},{"status":"public","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/48000647b315f6f00f913caa757a70b3-Paper.pdf"}],"volume":34,"day":"6","month":"12","conference":{"end_date":"2021-12-14","name":"NeurIPS: Neural Information Processing Systems","start_date":"2021-12-06","location":"Virtual, Online"},"title":"AC/DC: Alternating Compressed/DeCompressed training of deep neural networks","publication":"35th Conference on Neural Information Processing Systems","year":"2021","_id":"11458","page":"8557-8570","date_published":"2021-12-06T00:00:00Z","date_updated":"2023-06-01T12:54:45Z","date_created":"2022-06-20T12:11:53Z","arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]},"article_processing_charge":"No","publication_status":"published","author":[{"first_name":"Elena-Alexandra","full_name":"Peste, Elena-Alexandra","last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Iofinova, Eugenia B","first_name":"Eugenia B","orcid":"0000-0002-7778-3221","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117"},{"full_name":"Vladu, Adrian","first_name":"Adrian","last_name":"Vladu"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"ec_funded":1,"oa_version":"Published Version","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","oa":1,"publisher":"Curran Associates","external_id":{"arxiv":["2106.12379"]},"department":[{"_id":"GradSch"},{"_id":"DaAl"}],"abstract":[{"lang":"eng","text":"The increasing computational requirements of deep neural networks (DNNs) have led to significant interest in obtaining DNN models that are sparse, yet accurate. Recent work has investigated the even harder case of sparse training, where the DNN weights are, for as much as possible, already sparse to reduce computational costs during training. Existing sparse training methods are often empirical and can have lower accuracy relative to the dense baseline. In this paper, we present a general approach called Alternating Compressed/DeCompressed (AC/DC) training of DNNs, demonstrate convergence for a variant of the algorithm, and show that AC/DC outperforms existing sparse training methods in accuracy at similar computational budgets; at high sparsity levels, AC/DC even outperforms existing methods that rely on accurate pre-trained dense models. An important property of AC/DC is that it allows co-training of dense and sparse models, yielding accurate sparse–dense model pairs at the end of the training process. This is useful in practice, where compressed variants may be desirable for deployment in resource-constrained settings without re-doing the entire training flow, and also provides us with insights into the accuracy gap between dense and compressed models. The code is available at: https://github.com/IST-DASLab/ACDC."}],"intvolume":"        34","acknowledged_ssus":[{"_id":"ScienComp"}],"citation":{"ieee":"E.-A. Peste, E. B. Iofinova, A. Vladu, and D.-A. Alistarh, “AC/DC: Alternating Compressed/DeCompressed training of deep neural networks,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 8557–8570.","mla":"Peste, Elena-Alexandra, et al. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 8557–70.","ista":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. 2021. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 8557–8570.","apa":"Peste, E.-A., Iofinova, E. B., Vladu, A., &#38; Alistarh, D.-A. (2021). AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 8557–8570). Virtual, Online: Curran Associates.","ama":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:8557-8570.","chicago":"Peste, Elena-Alexandra, Eugenia B Iofinova, Adrian Vladu, and Dan-Adrian Alistarh. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:8557–70. Curran Associates, 2021.","short":"E.-A. Peste, E.B. Iofinova, A. Vladu, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 8557–8570."},"acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), and a CNRS PEPS grant. This research was supported by the Scientific Service Units (SSU) of IST Austria through resources provided by Scientific Computing (SciComp). We would also like to thank Christoph Lampert for his feedback on an earlier version of this work, as well as for providing hardware for the Transformer-XL experiments.","scopus_import":"1","related_material":{"record":[{"id":"13074","status":"public","relation":"dissertation_contains"}]},"project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"}]},{"conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2021-12-06","end_date":"2021-12-14","location":"Virtual, Online"},"title":"M-FAC: Efficient matrix-free approximations of second-order information","publication":"35th Conference on Neural Information Processing Systems","_id":"11463","year":"2021","main_file_link":[{"url":"https://proceedings.neurips.cc/paper/2021/file/7cfd5df443b4eb0d69886a583b33de4c-Paper.pdf","open_access":"1"}],"status":"public","day":"06","volume":34,"month":"12","publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]},"article_processing_charge":"No","publication_status":"published","page":"14873-14886","date_published":"2021-12-06T00:00:00Z","date_updated":"2022-06-27T07:05:12Z","date_created":"2022-06-26T22:01:35Z","arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"oa_version":"Published Version","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","oa":1,"publisher":"Curran Associates","author":[{"first_name":"Elias","full_name":"Frantar, Elias","last_name":"Frantar","id":"09a8f98d-ec99-11ea-ae11-c063a7b7fe5f"},{"full_name":"Kurtic, Eldar","first_name":"Eldar","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","last_name":"Kurtic"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"}],"ec_funded":1,"citation":{"mla":"Frantar, Elias, et al. “M-FAC: Efficient Matrix-Free Approximations of Second-Order Information.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 14873–86.","ieee":"E. Frantar, E. Kurtic, and D.-A. Alistarh, “M-FAC: Efficient matrix-free approximations of second-order information,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 14873–14886.","ista":"Frantar E, Kurtic E, Alistarh D-A. 2021. M-FAC: Efficient matrix-free approximations of second-order information. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 14873–14886.","apa":"Frantar, E., Kurtic, E., &#38; Alistarh, D.-A. (2021). M-FAC: Efficient matrix-free approximations of second-order information. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 14873–14886). Virtual, Online: Curran Associates.","ama":"Frantar E, Kurtic E, Alistarh D-A. M-FAC: Efficient matrix-free approximations of second-order information. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:14873-14886.","chicago":"Frantar, Elias, Eldar Kurtic, and Dan-Adrian Alistarh. “M-FAC: Efficient Matrix-Free Approximations of Second-Order Information.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:14873–86. Curran Associates, 2021.","short":"E. Frantar, E. Kurtic, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 14873–14886."},"acknowledgement":"We gratefully acknowledge funding the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), as well as computational support from Amazon Web Services (AWS) EC2.","scopus_import":"1","project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"}],"external_id":{"arxiv":["2010.08222"]},"department":[{"_id":"DaAl"}],"abstract":[{"text":"Efficiently approximating local curvature information of the loss function is a key tool for optimization and compression of deep neural networks. Yet, most existing methods to approximate second-order information have high computational\r\nor storage costs, which limits their practicality. In this work, we investigate matrix-free, linear-time approaches for estimating Inverse-Hessian Vector Products (IHVPs) for the case when the Hessian can be approximated as a sum of rank-one matrices, as in the classic approximation of the Hessian by the empirical Fisher matrix. We propose two new algorithms: the first is tailored towards network compression and can compute the IHVP for dimension d, if the Hessian is given as a sum of m rank-one matrices, using O(dm2) precomputation, O(dm) cost for computing the IHVP, and query cost O(m) for any single element of the inverse Hessian. The second algorithm targets an optimization setting, where we wish to compute the product between the inverse Hessian, estimated over a sliding window of optimization steps, and a given gradient direction, as required for preconditioned SGD. We give an algorithm with cost O(dm + m2) for computing the IHVP and O(dm + m3) for adding or removing any gradient from the sliding window. These\r\ntwo algorithms yield state-of-the-art results for network pruning and optimization with lower computational overhead relative to existing second-order methods. Implementations are available at [9] and [17].","lang":"eng"}],"intvolume":"        34"},{"publication_status":"published","article_processing_charge":"No","publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]},"language":[{"iso":"eng"}],"quality_controlled":"1","arxiv":1,"date_created":"2022-06-26T22:01:35Z","date_updated":"2022-06-27T06:54:31Z","page":"7254-7266","date_published":"2021-12-06T00:00:00Z","publication":"35th Conference on Neural Information Processing Systems","_id":"11464","year":"2021","title":"Towards tight communication lower bounds for distributed optimisation","conference":{"end_date":"2021-12-14","start_date":"2021-12-06","name":"NeurIPS: Neural Information Processing Systems","location":"Virtual, Online"},"month":"12","volume":34,"day":"06","status":"public","main_file_link":[{"url":"https://proceedings.neurips.cc/paper/2021/file/3b92d18aa7a6176dd37d372bc2f1eb71-Paper.pdf","open_access":"1"}],"project":[{"name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"citation":{"mla":"Alistarh, Dan-Adrian, and Janne Korhonen. “Towards Tight Communication Lower Bounds for Distributed Optimisation.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 7254–66.","ieee":"D.-A. Alistarh and J. Korhonen, “Towards tight communication lower bounds for distributed optimisation,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 7254–7266.","ista":"Alistarh D-A, Korhonen J. 2021. Towards tight communication lower bounds for distributed optimisation. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 7254–7266.","apa":"Alistarh, D.-A., &#38; Korhonen, J. (2021). Towards tight communication lower bounds for distributed optimisation. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 7254–7266). Virtual, Online: Curran Associates.","ama":"Alistarh D-A, Korhonen J. Towards tight communication lower bounds for distributed optimisation. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:7254-7266.","chicago":"Alistarh, Dan-Adrian, and Janne Korhonen. “Towards Tight Communication Lower Bounds for Distributed Optimisation.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:7254–66. Curran Associates, 2021.","short":"D.-A. Alistarh, J. Korhonen, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 7254–7266."},"acknowledgement":"We thank the NeurIPS reviewers for insightful comments that helped us improve the positioning of our results, as well as for pointing out the subsampling approach for complementing the randomised lower bound. We also thank Foivos Alimisis and Peter Davies for useful discussions. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","scopus_import":"1","abstract":[{"text":"We consider a standard distributed optimisation setting where N machines, each holding a d-dimensional function\r\nfi, aim to jointly minimise the sum of the functions ∑Ni=1fi(x). This problem arises naturally in large-scale distributed optimisation, where a standard solution is to apply variants of (stochastic) gradient descent. We focus on the communication complexity of this problem: our main result provides the first fully unconditional bounds on total number of bits which need to be sent and received by the N machines to solve this problem under point-to-point communication, within a given error-tolerance. Specifically, we show that Ω(Ndlogd/Nε) total bits need to be communicated between the machines to find an additive ϵ-approximation to the minimum of ∑Ni=1fi(x). The result holds for both deterministic and randomised algorithms, and, importantly, requires no assumptions on the algorithm structure. The lower bound is tight under certain restrictions on parameter values, and is matched within constant factors for quadratic objectives by a new variant of quantised gradient descent, which we describe and analyse. Our results bring over tools from communication complexity to distributed optimisation, which has potential for further applications.","lang":"eng"}],"intvolume":"        34","external_id":{"arxiv":["2010.08222"]},"department":[{"_id":"DaAl"}],"publisher":"Curran Associates","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"oa_version":"Published Version","ec_funded":1,"author":[{"orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"},{"first_name":"Janne","full_name":"Korhonen, Janne","id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen"}]},{"publication_identifier":{"issn":["17597684"],"eissn":["17597692"]},"ddc":["570"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"Yes (via OA deal)","isi":1,"publication_status":"published","article_number":"e383","date_published":"2021-04-15T00:00:00Z","date_updated":"2024-03-07T15:03:00Z","date_created":"2020-05-24T22:01:00Z","quality_controlled":"1","language":[{"iso":"eng"}],"pmid":1,"doi":"10.1002/wdev.383","title":"Regulation of size and scale in vertebrate spinal cord development","file_date_updated":"2020-11-24T13:11:39Z","year":"2021","_id":"7883","publication":"Wiley Interdisciplinary Reviews: Developmental Biology","status":"public","day":"15","month":"04","scopus_import":"1","related_material":{"record":[{"status":"public","id":"14323","relation":"dissertation_contains"}]},"citation":{"ieee":"K. Kuzmicz-Kowalska and A. Kicheva, “Regulation of size and scale in vertebrate spinal cord development,” <i>Wiley Interdisciplinary Reviews: Developmental Biology</i>. Wiley, 2021.","mla":"Kuzmicz-Kowalska, Katarzyna, and Anna Kicheva. “Regulation of Size and Scale in Vertebrate Spinal Cord Development.” <i>Wiley Interdisciplinary Reviews: Developmental Biology</i>, e383, Wiley, 2021, doi:<a href=\"https://doi.org/10.1002/wdev.383\">10.1002/wdev.383</a>.","ista":"Kuzmicz-Kowalska K, Kicheva A. 2021. Regulation of size and scale in vertebrate spinal cord development. Wiley Interdisciplinary Reviews: Developmental Biology., e383.","ama":"Kuzmicz-Kowalska K, Kicheva A. Regulation of size and scale in vertebrate spinal cord development. <i>Wiley Interdisciplinary Reviews: Developmental Biology</i>. 2021. doi:<a href=\"https://doi.org/10.1002/wdev.383\">10.1002/wdev.383</a>","apa":"Kuzmicz-Kowalska, K., &#38; Kicheva, A. (2021). Regulation of size and scale in vertebrate spinal cord development. <i>Wiley Interdisciplinary Reviews: Developmental Biology</i>. Wiley. <a href=\"https://doi.org/10.1002/wdev.383\">https://doi.org/10.1002/wdev.383</a>","chicago":"Kuzmicz-Kowalska, Katarzyna, and Anna Kicheva. “Regulation of Size and Scale in Vertebrate Spinal Cord Development.” <i>Wiley Interdisciplinary Reviews: Developmental Biology</i>. Wiley, 2021. <a href=\"https://doi.org/10.1002/wdev.383\">https://doi.org/10.1002/wdev.383</a>.","short":"K. Kuzmicz-Kowalska, A. Kicheva, Wiley Interdisciplinary Reviews: Developmental Biology (2021)."},"acknowledgement":"Austrian Academy of Sciences, Grant/Award Number: DOC fellowship for Katarzyna Kuzmicz-Kowalska; Austrian Science Fund, Grant/Award Number: F78 (Stem Cell Modulation); H2020 European Research Council, Grant/Award Number: 680037","project":[{"name":"Coordination of Patterning And Growth In the Spinal Cord","grant_number":"680037","call_identifier":"H2020","_id":"B6FC0238-B512-11E9-945C-1524E6697425"},{"name":"The role of morphogens in the regulation of neural tube growth","_id":"267AF0E4-B435-11E9-9278-68D0E5697425"},{"grant_number":"F07802","name":"Morphogen control of growth and pattern in the spinal cord","_id":"059DF620-7A3F-11EA-A408-12923DDC885E"}],"department":[{"_id":"AnKi"}],"external_id":{"isi":["000531419400001"],"pmid":["32391980"]},"has_accepted_license":"1","abstract":[{"lang":"eng","text":"All vertebrates have a spinal cord with dimensions and shape specific to their species. Yet how species‐specific organ size and shape are achieved is a fundamental unresolved question in biology. The formation and sculpting of organs begins during embryonic development. As it develops, the spinal cord extends in anterior–posterior direction in synchrony with the overall growth of the body. The dorsoventral (DV) and apicobasal lengths of the spinal cord neuroepithelium also change, while at the same time a characteristic pattern of neural progenitor subtypes along the DV axis is established and elaborated. At the basis of these changes in tissue size and shape are biophysical determinants, such as the change in cell number, cell size and shape, and anisotropic tissue growth. These processes are controlled by global tissue‐scale regulators, such as morphogen signaling gradients as well as mechanical forces. Current challenges in the field are to uncover how these tissue‐scale regulatory mechanisms are translated to the cellular and molecular level, and how regulation of distinct cellular processes gives rise to an overall defined size. Addressing these questions will help not only to achieve a better understanding of how size is controlled, but also of how tissue size is coordinated with the specification of pattern."}],"oa_version":"Published Version","oa":1,"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","type":"journal_article","publisher":"Wiley","file":[{"checksum":"f0a7745d48afa09ea7025e876a0145a8","content_type":"application/pdf","file_size":2527276,"date_created":"2020-11-24T13:11:39Z","relation":"main_file","date_updated":"2020-11-24T13:11:39Z","access_level":"open_access","creator":"dernst","success":1,"file_id":"8800","file_name":"2020_WIREs_DevBio_KuzmiczKowalska.pdf"}],"article_type":"original","author":[{"id":"4CED352A-F248-11E8-B48F-1D18A9856A87","last_name":"Kuzmicz-Kowalska","full_name":"Kuzmicz-Kowalska, Katarzyna","first_name":"Katarzyna"},{"id":"3959A2A0-F248-11E8-B48F-1D18A9856A87","last_name":"Kicheva","orcid":"0000-0003-4509-4998","first_name":"Anna","full_name":"Kicheva, Anna"}],"ec_funded":1},{"publication":"Reviews in Mathematical Physics","year":"2021","_id":"7900","title":"Bosonic collective excitations in Fermi gases","doi":"10.1142/s0129055x20600090","month":"01","day":"01","volume":33,"main_file_link":[{"url":"https://arxiv.org/abs/1910.08190","open_access":"1"}],"status":"public","publication_status":"published","article_number":"2060009","isi":1,"article_processing_charge":"No","publication_identifier":{"issn":["0129-055X"],"eissn":["1793-6659"]},"language":[{"iso":"eng"}],"quality_controlled":"1","arxiv":1,"date_created":"2020-05-28T16:47:55Z","date_updated":"2023-09-05T16:07:40Z","date_published":"2021-01-01T00:00:00Z","publisher":"World Scientific","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","type":"journal_article","issue":"1","oa":1,"oa_version":"Preprint","ec_funded":1,"author":[{"id":"3DE6C32A-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-1071-6091","last_name":"Benedikter","full_name":"Benedikter, Niels P","first_name":"Niels P"}],"article_type":"original","project":[{"_id":"25C6DC12-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"694227","name":"Analysis of quantum many-body systems"}],"citation":{"ieee":"N. P. Benedikter, “Bosonic collective excitations in Fermi gases,” <i>Reviews in Mathematical Physics</i>, vol. 33, no. 1. World Scientific, 2021.","mla":"Benedikter, Niels P. “Bosonic Collective Excitations in Fermi Gases.” <i>Reviews in Mathematical Physics</i>, vol. 33, no. 1, 2060009, World Scientific, 2021, doi:<a href=\"https://doi.org/10.1142/s0129055x20600090\">10.1142/s0129055x20600090</a>.","ista":"Benedikter NP. 2021. Bosonic collective excitations in Fermi gases. Reviews in Mathematical Physics. 33(1), 2060009.","apa":"Benedikter, N. P. (2021). Bosonic collective excitations in Fermi gases. <i>Reviews in Mathematical Physics</i>. World Scientific. <a href=\"https://doi.org/10.1142/s0129055x20600090\">https://doi.org/10.1142/s0129055x20600090</a>","ama":"Benedikter NP. Bosonic collective excitations in Fermi gases. <i>Reviews in Mathematical Physics</i>. 2021;33(1). doi:<a href=\"https://doi.org/10.1142/s0129055x20600090\">10.1142/s0129055x20600090</a>","short":"N.P. Benedikter, Reviews in Mathematical Physics 33 (2021).","chicago":"Benedikter, Niels P. “Bosonic Collective Excitations in Fermi Gases.” <i>Reviews in Mathematical Physics</i>. World Scientific, 2021. <a href=\"https://doi.org/10.1142/s0129055x20600090\">https://doi.org/10.1142/s0129055x20600090</a>."},"scopus_import":"1","abstract":[{"lang":"eng","text":"Hartree–Fock theory has been justified as a mean-field approximation for fermionic systems. However, it suffers from some defects in predicting physical properties, making necessary a theory of quantum correlations. Recently, bosonization of many-body correlations has been rigorously justified as an upper bound on the correlation energy at high density with weak interactions. We review the bosonic approximation, deriving an effective Hamiltonian. We then show that for systems with Coulomb interaction this effective theory predicts collective excitations (plasmons) in accordance with the random phase approximation of Bohm and Pines, and with experimental observation."}],"intvolume":"        33","external_id":{"isi":["000613313200010"],"arxiv":["1910.08190"]},"department":[{"_id":"RoSe"}]},{"abstract":[{"lang":"eng","text":"We derive rigorously the leading order of the correlation energy of a Fermi gas in a scaling regime of high density and weak interaction. The result verifies the prediction of the random-phase approximation. Our proof refines the method of collective bosonization in three dimensions. We approximately diagonalize an effective Hamiltonian describing approximately bosonic collective excitations around the Hartree–Fock state, while showing that gapless and non-collective excitations have only a negligible effect on the ground state energy."}],"has_accepted_license":"1","intvolume":"       225","external_id":{"arxiv":["2005.08933"],"isi":["000646573600001"]},"department":[{"_id":"RoSe"}],"project":[{"_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854","name":"IST Austria Open Access Fund"},{"name":"Analysis of quantum many-body systems","grant_number":"694227","call_identifier":"H2020","_id":"25C6DC12-B435-11E9-9278-68D0E5697425"}],"citation":{"chicago":"Benedikter, Niels P, Phan Thành Nam, Marcello Porta, Benjamin Schlein, and Robert Seiringer. “Correlation Energy of a Weakly Interacting Fermi Gas.” <i>Inventiones Mathematicae</i>. Springer, 2021. <a href=\"https://doi.org/10.1007/s00222-021-01041-5\">https://doi.org/10.1007/s00222-021-01041-5</a>.","short":"N.P. Benedikter, P.T. Nam, M. Porta, B. Schlein, R. Seiringer, Inventiones Mathematicae 225 (2021) 885–979.","ama":"Benedikter NP, Nam PT, Porta M, Schlein B, Seiringer R. Correlation energy of a weakly interacting Fermi gas. <i>Inventiones Mathematicae</i>. 2021;225:885-979. doi:<a href=\"https://doi.org/10.1007/s00222-021-01041-5\">10.1007/s00222-021-01041-5</a>","apa":"Benedikter, N. P., Nam, P. T., Porta, M., Schlein, B., &#38; Seiringer, R. (2021). Correlation energy of a weakly interacting Fermi gas. <i>Inventiones Mathematicae</i>. Springer. <a href=\"https://doi.org/10.1007/s00222-021-01041-5\">https://doi.org/10.1007/s00222-021-01041-5</a>","ista":"Benedikter NP, Nam PT, Porta M, Schlein B, Seiringer R. 2021. Correlation energy of a weakly interacting Fermi gas. Inventiones Mathematicae. 225, 885–979.","ieee":"N. P. Benedikter, P. T. Nam, M. Porta, B. Schlein, and R. Seiringer, “Correlation energy of a weakly interacting Fermi gas,” <i>Inventiones Mathematicae</i>, vol. 225. Springer, pp. 885–979, 2021.","mla":"Benedikter, Niels P., et al. “Correlation Energy of a Weakly Interacting Fermi Gas.” <i>Inventiones Mathematicae</i>, vol. 225, Springer, 2021, pp. 885–979, doi:<a href=\"https://doi.org/10.1007/s00222-021-01041-5\">10.1007/s00222-021-01041-5</a>."},"acknowledgement":"We thank Christian Hainzl for helpful discussions and a referee for very careful reading of the paper and many helpful suggestions. NB and RS were supported by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 694227). Part of the research of NB was conducted on the RZD18 Nice–Milan–Vienna–Moscow. NB thanks Elliott H. Lieb and Peter Otte for explanations about the Luttinger model. PTN has received funding from the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany’s Excellence Strategy (EXC-2111-390814868). MP acknowledges financial support from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (ERC StG MaMBoQ, grant agreement No. 802901). BS gratefully acknowledges financial support from the NCCR SwissMAP, from the Swiss National Science Foundation through the Grant “Dynamical and energetic properties of Bose-Einstein condensates” and from the European Research Council through the ERC-AdG CLaQS (grant agreement No. 834782). All authors acknowledge support for workshop participation from Mathematisches Forschungsinstitut Oberwolfach (Leibniz Association). NB, PTN, BS, and RS acknowledge support for workshop participation from Fondation des Treilles.","scopus_import":"1","ec_funded":1,"author":[{"first_name":"Niels P","full_name":"Benedikter, Niels P","id":"3DE6C32A-F248-11E8-B48F-1D18A9856A87","last_name":"Benedikter","orcid":"0000-0002-1071-6091"},{"first_name":"Phan Thành","full_name":"Nam, Phan Thành","last_name":"Nam"},{"full_name":"Porta, Marcello","first_name":"Marcello","last_name":"Porta"},{"full_name":"Schlein, Benjamin","first_name":"Benjamin","last_name":"Schlein"},{"first_name":"Robert","full_name":"Seiringer, Robert","orcid":"0000-0002-6781-0521","id":"4AFD0470-F248-11E8-B48F-1D18A9856A87","last_name":"Seiringer"}],"article_type":"original","file":[{"file_size":1089319,"access_level":"open_access","date_created":"2022-05-16T12:23:40Z","relation":"main_file","date_updated":"2022-05-16T12:23:40Z","content_type":"application/pdf","checksum":"f38c79dfd828cdc7f49a34b37b83d376","success":1,"creator":"dernst","file_id":"11386","file_name":"2021_InventMath_Benedikter.pdf"}],"publisher":"Springer","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","oa":1,"oa_version":"Published Version","language":[{"iso":"eng"}],"arxiv":1,"quality_controlled":"1","date_updated":"2023-08-21T06:30:30Z","date_created":"2020-05-28T16:48:20Z","page":"885-979","date_published":"2021-05-03T00:00:00Z","publication_status":"published","isi":1,"article_processing_charge":"Yes (via OA deal)","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"ddc":["510"],"publication_identifier":{"issn":["0020-9910"],"eissn":["1432-1297"]},"month":"05","day":"03","volume":225,"status":"public","publication":"Inventiones Mathematicae","_id":"7901","year":"2021","file_date_updated":"2022-05-16T12:23:40Z","title":"Correlation energy of a weakly interacting Fermi gas","doi":"10.1007/s00222-021-01041-5"},{"status":"public","day":"01","volume":65,"month":"06","doi":"10.1007/s00454-020-00206-y","title":"Sheaf-theoretic stratification learning from geometric and topological perspectives","file_date_updated":"2020-11-25T09:06:41Z","_id":"7905","year":"2021","publication":"Discrete and Computational Geometry","date_published":"2021-06-01T00:00:00Z","page":"1166-1198","date_updated":"2024-03-07T15:01:58Z","date_created":"2020-05-30T10:26:04Z","arxiv":1,"quality_controlled":"1","language":[{"iso":"eng"}],"publication_identifier":{"eissn":["1432-0444"],"issn":["0179-5376"]},"ddc":["510"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"Yes (via OA deal)","isi":1,"publication_status":"published","file":[{"file_size":1013730,"date_updated":"2020-11-25T09:06:41Z","date_created":"2020-11-25T09:06:41Z","relation":"main_file","access_level":"open_access","checksum":"487a84ea5841b75f04f66d7ebd71b67e","content_type":"application/pdf","success":1,"creator":"dernst","file_id":"8803","file_name":"2020_DiscreteCompGeometry_Brown.pdf"}],"article_type":"original","author":[{"id":"70B7FDF6-608D-11E9-9333-8535E6697425","last_name":"Brown","full_name":"Brown, Adam","first_name":"Adam"},{"full_name":"Wang, Bei","first_name":"Bei","last_name":"Wang"}],"oa_version":"Published Version","oa":1,"type":"journal_article","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","publisher":"Springer Nature","department":[{"_id":"HeEd"}],"external_id":{"arxiv":["1712.07734"],"isi":["000536324700001"]},"intvolume":"        65","has_accepted_license":"1","abstract":[{"lang":"eng","text":"We investigate a sheaf-theoretic interpretation of stratification learning from geometric and topological perspectives. Our main result is the construction of stratification learning algorithms framed in terms of a sheaf on a partially ordered set with the Alexandroff topology. We prove that the resulting decomposition is the unique minimal stratification for which the strata are homogeneous and the given sheaf is constructible. In particular, when we choose to work with the local homology sheaf, our algorithm gives an alternative to the local homology transfer algorithm given in Bendich et al. (Proceedings of the 23rd Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 1355–1370, ACM, New York, 2012), and the cohomology stratification algorithm given in Nanda (Found. Comput. Math. 20(2), 195–222, 2020). Additionally, we give examples of stratifications based on the geometric techniques of Breiding et al. (Rev. Mat. Complut. 31(3), 545–593, 2018), illustrating how the sheaf-theoretic approach can be used to study stratifications from both topological and geometric perspectives. This approach also points toward future applications of sheaf theory in the study of topological data analysis by illustrating the utility of the language of sheaf theory in generalizing existing algorithms."}],"scopus_import":"1","citation":{"ista":"Brown A, Wang B. 2021. Sheaf-theoretic stratification learning from geometric and topological perspectives. Discrete and Computational Geometry. 65, 1166–1198.","ieee":"A. Brown and B. Wang, “Sheaf-theoretic stratification learning from geometric and topological perspectives,” <i>Discrete and Computational Geometry</i>, vol. 65. Springer Nature, pp. 1166–1198, 2021.","mla":"Brown, Adam, and Bei Wang. “Sheaf-Theoretic Stratification Learning from Geometric and Topological Perspectives.” <i>Discrete and Computational Geometry</i>, vol. 65, Springer Nature, 2021, pp. 1166–98, doi:<a href=\"https://doi.org/10.1007/s00454-020-00206-y\">10.1007/s00454-020-00206-y</a>.","chicago":"Brown, Adam, and Bei Wang. “Sheaf-Theoretic Stratification Learning from Geometric and Topological Perspectives.” <i>Discrete and Computational Geometry</i>. Springer Nature, 2021. <a href=\"https://doi.org/10.1007/s00454-020-00206-y\">https://doi.org/10.1007/s00454-020-00206-y</a>.","short":"A. Brown, B. Wang, Discrete and Computational Geometry 65 (2021) 1166–1198.","apa":"Brown, A., &#38; Wang, B. (2021). Sheaf-theoretic stratification learning from geometric and topological perspectives. <i>Discrete and Computational Geometry</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s00454-020-00206-y\">https://doi.org/10.1007/s00454-020-00206-y</a>","ama":"Brown A, Wang B. Sheaf-theoretic stratification learning from geometric and topological perspectives. <i>Discrete and Computational Geometry</i>. 2021;65:1166-1198. doi:<a href=\"https://doi.org/10.1007/s00454-020-00206-y\">10.1007/s00454-020-00206-y</a>"},"acknowledgement":"Open access funding provided by Institute of Science and Technology (IST Austria). This work was partially supported by NSF IIS-1513616 and NSF ABI-1661375. The authors would like to thank the anonymous referees for their insightful comments.","project":[{"name":"IST Austria Open Access Fund","_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854"}]},{"oa_version":"Published Version","oa":1,"type":"journal_article","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","publisher":"Springer Nature","file":[{"file_id":"15089","file_name":"2021_OptimizationLetters_Shehu.pdf","file_size":2148882,"relation":"main_file","access_level":"open_access","date_created":"2024-03-07T14:58:51Z","date_updated":"2024-03-07T14:58:51Z","content_type":"application/pdf","checksum":"63c5f31cd04626152a19f97a2476281b","success":1,"creator":"kschuh"}],"article_type":"original","author":[{"id":"3FC7CB58-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-9224-7139","last_name":"Shehu","full_name":"Shehu, Yekini","first_name":"Yekini"},{"first_name":"Aviv","full_name":"Gibali, Aviv","last_name":"Gibali"}],"ec_funded":1,"scopus_import":"1","citation":{"mla":"Shehu, Yekini, and Aviv Gibali. “New Inertial Relaxed Method for Solving Split Feasibilities.” <i>Optimization Letters</i>, vol. 15, Springer Nature, 2021, pp. 2109–26, doi:<a href=\"https://doi.org/10.1007/s11590-020-01603-1\">10.1007/s11590-020-01603-1</a>.","ieee":"Y. Shehu and A. Gibali, “New inertial relaxed method for solving split feasibilities,” <i>Optimization Letters</i>, vol. 15. Springer Nature, pp. 2109–2126, 2021.","ista":"Shehu Y, Gibali A. 2021. New inertial relaxed method for solving split feasibilities. Optimization Letters. 15, 2109–2126.","ama":"Shehu Y, Gibali A. New inertial relaxed method for solving split feasibilities. <i>Optimization Letters</i>. 2021;15:2109-2126. doi:<a href=\"https://doi.org/10.1007/s11590-020-01603-1\">10.1007/s11590-020-01603-1</a>","apa":"Shehu, Y., &#38; Gibali, A. (2021). New inertial relaxed method for solving split feasibilities. <i>Optimization Letters</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s11590-020-01603-1\">https://doi.org/10.1007/s11590-020-01603-1</a>","short":"Y. Shehu, A. Gibali, Optimization Letters 15 (2021) 2109–2126.","chicago":"Shehu, Yekini, and Aviv Gibali. “New Inertial Relaxed Method for Solving Split Feasibilities.” <i>Optimization Letters</i>. Springer Nature, 2021. <a href=\"https://doi.org/10.1007/s11590-020-01603-1\">https://doi.org/10.1007/s11590-020-01603-1</a>."},"acknowledgement":"Open access funding provided by Institute of Science and Technology (IST Austria). The authors are grateful to the referees for their insightful comments which have improved the earlier version of the manuscript greatly. The first author has received funding from the European Research Council (ERC) under the European Union’s Seventh Framework Program (FP7-2007-2013) (Grant agreement No. 616160).","project":[{"_id":"25FBA906-B435-11E9-9278-68D0E5697425","name":"Discrete Optimization in Computer Vision: Theory and Practice","call_identifier":"FP7","grant_number":"616160"},{"_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854","name":"IST Austria Open Access Fund"}],"department":[{"_id":"VlKo"}],"external_id":{"isi":["000537342300001"]},"intvolume":"        15","has_accepted_license":"1","abstract":[{"lang":"eng","text":"In this paper, we introduce a relaxed CQ method with alternated inertial step for solving split feasibility problems. We give convergence of the sequence generated by our method under some suitable assumptions. Some numerical implementations from sparse signal and image deblurring are reported to show the efficiency of our method."}],"doi":"10.1007/s11590-020-01603-1","title":"New inertial relaxed method for solving split feasibilities","file_date_updated":"2024-03-07T14:58:51Z","year":"2021","_id":"7925","publication":"Optimization Letters","status":"public","volume":15,"day":"01","month":"09","publication_identifier":{"issn":["1862-4472"],"eissn":["1862-4480"]},"ddc":["510"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"Yes (via OA deal)","isi":1,"publication_status":"published","date_published":"2021-09-01T00:00:00Z","page":"2109-2126","date_updated":"2024-03-07T15:00:43Z","date_created":"2020-06-04T11:28:33Z","quality_controlled":"1","language":[{"iso":"eng"}]},{"intvolume":"        34","abstract":[{"text":"We design fast deterministic algorithms for distance computation in the Congested Clique model. Our key contributions include:\r\n    A (2+ϵ)-approximation for all-pairs shortest paths in O(log2n/ϵ) rounds on unweighted undirected graphs. With a small additional additive factor, this also applies for weighted graphs. This is the first sub-polynomial constant-factor approximation for APSP in this model.\r\n    A (1+ϵ)-approximation for multi-source shortest paths from O(n−−√) sources in O(log2n/ϵ) rounds on weighted undirected graphs. This is the first sub-polynomial algorithm obtaining this approximation for a set of sources of polynomial size.\r\n\r\nOur main techniques are new distance tools that are obtained via improved algorithms for sparse matrix multiplication, which we leverage to construct efficient hopsets and shortest paths. Furthermore, our techniques extend to additional distance problems for which we improve upon the state-of-the-art, including diameter approximation, and an exact single-source shortest paths algorithm for weighted undirected graphs in O~(n1/6) rounds. ","lang":"eng"}],"department":[{"_id":"DaAl"}],"external_id":{"isi":["000556444600001"],"arxiv":["1903.05956"]},"project":[{"name":"IST Austria Open Access Fund","_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854"}],"scopus_import":"1","related_material":{"record":[{"status":"public","id":"6933","relation":"earlier_version"}]},"acknowledgement":"Open access funding provided by Institute of Science and Technology (IST Austria). We thank Mohsen Ghaffari, Michael Elkin and Merav Parter for fruitful discussions. This project has received funding from the European Union’s Horizon 2020 Research And Innovation Program under Grant Agreement No. 755839.","citation":{"ama":"Censor-Hillel K, Dory M, Korhonen J, Leitersdorf D. Fast approximate shortest paths in the congested clique. <i>Distributed Computing</i>. 2021;34:463-487. doi:<a href=\"https://doi.org/10.1007/s00446-020-00380-5\">10.1007/s00446-020-00380-5</a>","apa":"Censor-Hillel, K., Dory, M., Korhonen, J., &#38; Leitersdorf, D. (2021). Fast approximate shortest paths in the congested clique. <i>Distributed Computing</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s00446-020-00380-5\">https://doi.org/10.1007/s00446-020-00380-5</a>","chicago":"Censor-Hillel, Keren, Michal Dory, Janne Korhonen, and Dean Leitersdorf. “Fast Approximate Shortest Paths in the Congested Clique.” <i>Distributed Computing</i>. Springer Nature, 2021. <a href=\"https://doi.org/10.1007/s00446-020-00380-5\">https://doi.org/10.1007/s00446-020-00380-5</a>.","short":"K. Censor-Hillel, M. Dory, J. Korhonen, D. Leitersdorf, Distributed Computing 34 (2021) 463–487.","ieee":"K. Censor-Hillel, M. Dory, J. Korhonen, and D. Leitersdorf, “Fast approximate shortest paths in the congested clique,” <i>Distributed Computing</i>, vol. 34. Springer Nature, pp. 463–487, 2021.","mla":"Censor-Hillel, Keren, et al. “Fast Approximate Shortest Paths in the Congested Clique.” <i>Distributed Computing</i>, vol. 34, Springer Nature, 2021, pp. 463–87, doi:<a href=\"https://doi.org/10.1007/s00446-020-00380-5\">10.1007/s00446-020-00380-5</a>.","ista":"Censor-Hillel K, Dory M, Korhonen J, Leitersdorf D. 2021. Fast approximate shortest paths in the congested clique. Distributed Computing. 34, 463–487."},"author":[{"last_name":"Censor-Hillel","full_name":"Censor-Hillel, Keren","first_name":"Keren"},{"last_name":"Dory","full_name":"Dory, Michal","first_name":"Michal"},{"id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen","full_name":"Korhonen, Janne","first_name":"Janne"},{"full_name":"Leitersdorf, Dean","first_name":"Dean","last_name":"Leitersdorf"}],"article_type":"original","publisher":"Springer Nature","oa":1,"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","type":"journal_article","oa_version":"Published Version","language":[{"iso":"eng"}],"quality_controlled":"1","arxiv":1,"date_updated":"2024-03-07T14:43:39Z","date_created":"2020-06-07T22:00:54Z","date_published":"2021-12-01T00:00:00Z","page":"463-487","isi":1,"publication_status":"published","article_processing_charge":"Yes (via OA deal)","publication_identifier":{"eissn":["1432-0452"],"issn":["0178-2770"]},"month":"12","day":"01","volume":34,"status":"public","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1007/s00446-020-00380-5"}],"year":"2021","_id":"7939","publication":"Distributed Computing","title":"Fast approximate shortest paths in the congested clique","doi":"10.1007/s00446-020-00380-5"}]
