[{"year":"2022","citation":{"ama":"Konstantinov NH. Robustness and fairness in machine learning. 2022. doi:<a href=\"https://doi.org/10.15479/at:ista:10799\">10.15479/at:ista:10799</a>","apa":"Konstantinov, N. H. (2022). <i>Robustness and fairness in machine learning</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:10799\">https://doi.org/10.15479/at:ista:10799</a>","chicago":"Konstantinov, Nikola H. “Robustness and Fairness in Machine Learning.” Institute of Science and Technology Austria, 2022. <a href=\"https://doi.org/10.15479/at:ista:10799\">https://doi.org/10.15479/at:ista:10799</a>.","ieee":"N. H. Konstantinov, “Robustness and fairness in machine learning,” Institute of Science and Technology Austria, 2022.","mla":"Konstantinov, Nikola H. <i>Robustness and Fairness in Machine Learning</i>. Institute of Science and Technology Austria, 2022, doi:<a href=\"https://doi.org/10.15479/at:ista:10799\">10.15479/at:ista:10799</a>.","short":"N.H. Konstantinov, Robustness and Fairness in Machine Learning, Institute of Science and Technology Austria, 2022.","ista":"Konstantinov NH. 2022. Robustness and fairness in machine learning. Institute of Science and Technology Austria."},"date_updated":"2023-10-17T12:31:54Z","day":"08","degree_awarded":"PhD","doi":"10.15479/at:ista:10799","abstract":[{"lang":"eng","text":"Because of the increasing popularity of machine learning methods, it is becoming important to understand the impact of learned components on automated decision-making systems and to guarantee that their consequences are beneficial to society. In other words, it is necessary to ensure that machine learning is sufficiently trustworthy to be used in real-world applications. This thesis studies two properties of machine learning models that are highly desirable for the\r\nsake of reliability: robustness and fairness. In the first part of the thesis we study the robustness of learning algorithms to training data corruption. Previous work has shown that machine learning models are vulnerable to a range\r\nof training set issues, varying from label noise through systematic biases to worst-case data manipulations. This is an especially relevant problem from a present perspective, since modern machine learning methods are particularly data hungry and therefore practitioners often have to rely on data collected from various external sources, e.g. from the Internet, from app users or via crowdsourcing. Naturally, such sources vary greatly in the quality and reliability of the\r\ndata they provide. With these considerations in mind, we study the problem of designing machine learning algorithms that are robust to corruptions in data coming from multiple sources. We show that, in contrast to the case of a single dataset with outliers, successful learning within this model is possible both theoretically and practically, even under worst-case data corruptions. The second part of this thesis deals with fairness-aware machine learning. There are multiple areas where machine learning models have shown promising results, but where careful considerations are required, in order to avoid discrimanative decisions taken by such learned components. Ensuring fairness can be particularly challenging, because real-world training datasets are expected to contain various forms of historical bias that may affect the learning process. In this thesis we show that data corruption can indeed render the problem of achieving fairness impossible, by tightly characterizing the theoretical limits of fair learning under worst-case data manipulations. However, assuming access to clean data, we also show how fairness-aware learning can be made practical in contexts beyond binary classification, in particular in the challenging learning to rank setting."}],"ddc":["000"],"_id":"10799","author":[{"full_name":"Konstantinov, Nikola H","last_name":"Konstantinov","first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87"}],"date_created":"2022-02-28T13:03:49Z","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"article_processing_charge":"No","publication_status":"published","alternative_title":["ISTA Thesis"],"title":"Robustness and fairness in machine learning","ec_funded":1,"page":"176","file_date_updated":"2022-03-10T12:11:48Z","publisher":"Institute of Science and Technology Austria","type":"dissertation","date_published":"2022-03-08T00:00:00Z","publication_identifier":{"isbn":["978-3-99078-015-2"],"issn":["2663-337X"]},"oa":1,"supervisor":[{"first_name":"Christoph","last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"file":[{"date_created":"2022-03-06T11:42:54Z","file_size":4204905,"checksum":"626bc523ae8822d20e635d0e2d95182e","date_updated":"2022-03-06T11:42:54Z","file_name":"thesis.pdf","content_type":"application/pdf","relation":"main_file","access_level":"open_access","success":1,"file_id":"10823","creator":"nkonstan"},{"date_created":"2022-03-06T11:42:57Z","file_size":22841103,"checksum":"e2ca2b88350ac8ea1515b948885cbcb1","date_updated":"2022-03-10T12:11:48Z","content_type":"application/x-zip-compressed","file_name":"thesis.zip","relation":"source_file","access_level":"closed","file_id":"10824","creator":"nkonstan"}],"user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","related_material":{"record":[{"status":"public","id":"8724","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"10803","status":"public"},{"id":"10802","relation":"part_of_dissertation","status":"public"},{"status":"public","relation":"part_of_dissertation","id":"6590"}]},"status":"public","has_accepted_license":"1","project":[{"name":"International IST Doctoral Program","grant_number":"665385","call_identifier":"H2020","_id":"2564DBCA-B435-11E9-9278-68D0E5697425"}],"oa_version":"Published Version","month":"03","keyword":["robustness","fairness","machine learning","PAC learning","adversarial learning"],"language":[{"iso":"eng"}]},{"acknowledgement":"The authors thank Eugenia Iofinova and Bernd Prach for providing feedback on early versions of this paper. This publication was made possible by an ETH AI Center postdoctoral fellowship to Nikola Konstantinov.","volume":23,"ddc":["004"],"arxiv":1,"day":"01","abstract":[{"text":"Addressing fairness concerns about machine learning models is a crucial step towards their long-term adoption in real-world automated systems. While many approaches have been developed for training fair models from data, little is known about the robustness of these methods to data corruption. In this work we consider fairness-aware learning under worst-case data manipulations. We show that an adversary can in some situations force any learner to return an overly biased classifier, regardless of the sample size and with or without degrading\r\naccuracy, and that the strength of the excess bias increases for learning problems with underrepresented protected groups in the data. We also prove that our hardness results are tight up to constant factors. To this end, we study two natural learning algorithms that optimize for both accuracy and fairness and show that these algorithms enjoy guarantees that are order-optimal in terms of the corruption ratio and the protected groups frequencies in the large data\r\nlimit.","lang":"eng"}],"date_updated":"2023-09-26T10:44:37Z","year":"2022","citation":{"ista":"Konstantinov NH, Lampert C. 2022. Fairness-aware PAC learning from corrupted data. Journal of Machine Learning Research. 23, 1–60.","short":"N.H. Konstantinov, C. Lampert, Journal of Machine Learning Research 23 (2022) 1–60.","mla":"Konstantinov, Nikola H., and Christoph Lampert. “Fairness-Aware PAC Learning from Corrupted Data.” <i>Journal of Machine Learning Research</i>, vol. 23, ML Research Press, 2022, pp. 1–60.","chicago":"Konstantinov, Nikola H, and Christoph Lampert. “Fairness-Aware PAC Learning from Corrupted Data.” <i>Journal of Machine Learning Research</i>. ML Research Press, 2022.","ieee":"N. H. Konstantinov and C. Lampert, “Fairness-aware PAC learning from corrupted data,” <i>Journal of Machine Learning Research</i>, vol. 23. ML Research Press, pp. 1–60, 2022.","ama":"Konstantinov NH, Lampert C. Fairness-aware PAC learning from corrupted data. <i>Journal of Machine Learning Research</i>. 2022;23:1-60.","apa":"Konstantinov, N. H., &#38; Lampert, C. (2022). Fairness-aware PAC learning from corrupted data. <i>Journal of Machine Learning Research</i>. ML Research Press."},"external_id":{"arxiv":["2102.06004"]},"publisher":"ML Research Press","article_type":"original","page":"1-60","quality_controlled":"1","file_date_updated":"2022-07-12T15:08:28Z","publication_status":"published","date_created":"2022-02-28T14:05:42Z","department":[{"_id":"ChLa"}],"article_processing_charge":"No","title":"Fairness-aware PAC learning from corrupted data","intvolume":"        23","_id":"10802","scopus_import":"1","author":[{"full_name":"Konstantinov, Nikola H","last_name":"Konstantinov","first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","orcid":"0000-0002-4561-241X","full_name":"Lampert, Christoph"}],"file":[{"creator":"kschuh","file_id":"11570","success":1,"access_level":"open_access","relation":"main_file","file_name":"2022_JournalMachineLearningResearch_Konstantinov.pdf","content_type":"application/pdf","date_updated":"2022-07-12T15:08:28Z","checksum":"9cac897b54a0ddf3a553a2c33e88cfda","file_size":551862,"date_created":"2022-07-12T15:08:28Z"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","related_material":{"record":[{"status":"public","id":"10799","relation":"dissertation_contains"},{"status":"public","relation":"shorter_version","id":"13241"}]},"publication_identifier":{"issn":["1532-4435"],"eissn":["1533-7928"]},"oa":1,"tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"date_published":"2022-05-01T00:00:00Z","type":"journal_article","language":[{"iso":"eng"}],"keyword":["Fairness","robustness","data poisoning","trustworthy machine learning","PAC learning"],"oa_version":"Published Version","month":"05","publication":"Journal of Machine Learning Research","has_accepted_license":"1"},{"ddc":["004"],"citation":{"short":"M. Lechner, Learning Verifiable Representations, Institute of Science and Technology Austria, 2022.","mla":"Lechner, Mathias. <i>Learning Verifiable Representations</i>. Institute of Science and Technology Austria, 2022, doi:<a href=\"https://doi.org/10.15479/at:ista:11362\">10.15479/at:ista:11362</a>.","ista":"Lechner M. 2022. Learning verifiable representations. Institute of Science and Technology Austria.","apa":"Lechner, M. (2022). <i>Learning verifiable representations</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:11362\">https://doi.org/10.15479/at:ista:11362</a>","ama":"Lechner M. Learning verifiable representations. 2022. doi:<a href=\"https://doi.org/10.15479/at:ista:11362\">10.15479/at:ista:11362</a>","ieee":"M. Lechner, “Learning verifiable representations,” Institute of Science and Technology Austria, 2022.","chicago":"Lechner, Mathias. “Learning Verifiable Representations.” Institute of Science and Technology Austria, 2022. <a href=\"https://doi.org/10.15479/at:ista:11362\">https://doi.org/10.15479/at:ista:11362</a>."},"year":"2022","date_updated":"2025-07-14T09:10:11Z","abstract":[{"text":"Deep learning has enabled breakthroughs in challenging computing problems and has emerged as the standard problem-solving tool for computer vision and natural language processing tasks.\r\nOne exception to this trend is safety-critical tasks where robustness and resilience requirements contradict the black-box nature of neural networks. \r\nTo deploy deep learning methods for these tasks, it is vital to provide guarantees on neural network agents' safety and robustness criteria. \r\nThis can be achieved by developing formal verification methods to verify the safety and robustness properties of neural networks.\r\n\r\nOur goal is to design, develop and assess safety verification methods for neural networks to improve their reliability and trustworthiness in real-world applications.\r\nThis thesis establishes techniques for the verification of compressed and adversarially trained models as well as the design of novel neural networks for verifiably safe decision-making.\r\n\r\nFirst, we establish the problem of verifying quantized neural networks. Quantization is a technique that trades numerical precision for the computational efficiency of running a neural network and is widely adopted in industry.\r\nWe show that neglecting the reduced precision when verifying a neural network can lead to wrong conclusions about the robustness and safety of the network, highlighting that novel techniques for quantized network verification are necessary. We introduce several bit-exact verification methods explicitly designed for quantized neural networks and experimentally confirm on realistic networks that the network's robustness and other formal properties are affected by the quantization.\r\n\r\nFurthermore, we perform a case study providing evidence that adversarial training, a standard technique for making neural networks more robust, has detrimental effects on the network's performance. This robustness-accuracy tradeoff has been studied before regarding the accuracy obtained on classification datasets where each data point is independent of all other data points. On the other hand, we investigate the tradeoff empirically in robot learning settings where a both, a high accuracy and a high robustness, are desirable.\r\nOur results suggest that the negative side-effects of adversarial training outweigh its robustness benefits in practice.\r\n\r\nFinally, we consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with systems over the infinite time horizon. Bayesian neural networks are probabilistic models for learning uncertainties in the data and are therefore often used on robotic and healthcare applications where data is inherently stochastic.\r\nWe introduce a method for recalibrating Bayesian neural networks so that they yield probability distributions over safe decisions only.\r\nOur method learns a safety certificate that guarantees safety over the infinite time horizon to determine which decisions are safe in every possible state of the system.\r\nWe demonstrate the effectiveness of our approach on a series of reinforcement learning benchmarks.","lang":"eng"}],"day":"12","doi":"10.15479/at:ista:11362","degree_awarded":"PhD","file_date_updated":"2022-05-17T15:19:39Z","ec_funded":1,"page":"124","publisher":"Institute of Science and Technology Austria","author":[{"first_name":"Mathias","last_name":"Lechner","full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87"}],"_id":"11362","title":"Learning verifiable representations","alternative_title":["ISTA Thesis"],"article_processing_charge":"No","date_created":"2022-05-12T07:14:01Z","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publication_status":"published","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","related_material":{"record":[{"status":"public","relation":"part_of_dissertation","id":"11366"},{"relation":"part_of_dissertation","id":"7808","status":"public"},{"status":"public","id":"10666","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"10665","status":"public"},{"status":"public","id":"10667","relation":"part_of_dissertation"}]},"status":"public","file":[{"creator":"mlechner","file_id":"11378","access_level":"closed","relation":"source_file","file_name":"src.zip","content_type":"application/zip","date_updated":"2022-05-13T12:49:00Z","checksum":"8eefa9c7c10ca7e1a2ccdd731962a645","file_size":13210143,"date_created":"2022-05-13T12:33:26Z"},{"file_size":2732536,"checksum":"1b9e1e5a9a83ed9d89dad2f5133dc026","date_created":"2022-05-16T08:02:28Z","file_name":"thesis_main-a2.pdf","content_type":"application/pdf","date_updated":"2022-05-17T15:19:39Z","access_level":"open_access","relation":"main_file","creator":"mlechner","file_id":"11382"}],"type":"dissertation","date_published":"2022-05-12T00:00:00Z","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by-nd/4.0/legalcode","name":"Creative Commons Attribution-NoDerivatives 4.0 International (CC BY-ND 4.0)","image":"/image/cc_by_nd.png","short":"CC BY-ND (4.0)"},"oa":1,"supervisor":[{"id":"40876CD8-F248-11E8-B48F-1D18A9856A87","full_name":"Henzinger, Thomas A","orcid":"0000-0002-2985-7724","last_name":"Henzinger","first_name":"Thomas A"}],"publication_identifier":{"isbn":["978-3-99078-017-6"]},"keyword":["neural networks","verification","machine learning"],"language":[{"iso":"eng"}],"has_accepted_license":"1","month":"05","project":[{"name":"The Wittgenstein Prize","grant_number":"Z211","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"},{"call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","name":"Vigilant Algorithmic Monitoring of Software","grant_number":"101020093"}],"oa_version":"Published Version"},{"oa":1,"abstract":[{"text":"For a solar-like star, the surface rotation evolves with time, allowing in principle to estimate the age of a star from its surface rotation period. Here we are interested in measuring surface rotation periods of solar-like stars observed by the NASA mission Kepler. Different methods have been developed to track rotation signals in Kepler photometric light curves: time-frequency analysis based on wavelet techniques, autocorrelation and composite spectrum. We use the learning abilities of random forest classifiers to take decisions during two crucial steps of the analysis. First, given some input parameters, we discriminate the considered Kepler targets between rotating MS stars, non-rotating MS stars, red giants, binaries and pulsators. We then use a second classifier only on the MS rotating targets to decide the best data analysis treatment.","lang":"eng"}],"day":"23","arxiv":1,"doi":"10.48550/arXiv.1906.09609","external_id":{"arxiv":["1906.09609"]},"type":"preprint","date_published":"2019-06-23T00:00:00Z","citation":{"short":"S.N. Breton, L.A. Bugnet, A.R.G. Santos, A.L. Saux, S. Mathur, P.L. Palle, R.A. Garcia, ArXiv (n.d.).","mla":"Breton, S. N., et al. “Determining Surface Rotation Periods of Solar-like Stars Observed by the Kepler Mission Using Machine Learning Techniques.” <i>ArXiv</i>, 1906.09609, doi:<a href=\"https://doi.org/10.48550/arXiv.1906.09609\">10.48550/arXiv.1906.09609</a>.","ista":"Breton SN, Bugnet LA, Santos ARG, Saux AL, Mathur S, Palle PL, Garcia RA. Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques. arXiv, 1906.09609.","ama":"Breton SN, Bugnet LA, Santos ARG, et al. Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.1906.09609\">10.48550/arXiv.1906.09609</a>","apa":"Breton, S. N., Bugnet, L. A., Santos, A. R. G., Saux, A. L., Mathur, S., Palle, P. L., &#38; Garcia, R. A. (n.d.). Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.1906.09609\">https://doi.org/10.48550/arXiv.1906.09609</a>","chicago":"Breton, S. N., Lisa Annabelle Bugnet, A. R. G. Santos, A. Le Saux, S. Mathur, P. L. Palle, and R. A. Garcia. “Determining Surface Rotation Periods of Solar-like Stars Observed by the Kepler Mission Using Machine Learning Techniques.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.1906.09609\">https://doi.org/10.48550/arXiv.1906.09609</a>.","ieee":"S. N. Breton <i>et al.</i>, “Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques,” <i>arXiv</i>. ."},"year":"2019","date_updated":"2022-08-22T08:16:53Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","extern":"1","main_file_link":[{"url":"https://arxiv.org/abs/1906.09609","open_access":"1"}],"article_number":"1906.09609","title":"Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques","month":"06","date_created":"2022-07-20T11:18:53Z","article_processing_charge":"No","publication_status":"submitted","oa_version":"Preprint","author":[{"full_name":"Breton, S. N.","first_name":"S. N.","last_name":"Breton"},{"full_name":"Bugnet, Lisa Annabelle","orcid":"0000-0003-0142-4000","last_name":"Bugnet","first_name":"Lisa Annabelle","id":"d9edb345-f866-11ec-9b37-d119b5234501"},{"full_name":"Santos, A. R. G.","first_name":"A. R. G.","last_name":"Santos"},{"first_name":"A. Le","last_name":"Saux","full_name":"Saux, A. Le"},{"last_name":"Mathur","first_name":"S.","full_name":"Mathur, S."},{"last_name":"Palle","first_name":"P. L.","full_name":"Palle, P. L."},{"full_name":"Garcia, R. A.","first_name":"R. A.","last_name":"Garcia"}],"publication":"arXiv","_id":"11627","keyword":["asteroseismology","rotation","solar-like stars","kepler","machine learning","random forest"],"language":[{"iso":"eng"}]},{"extern":"1","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.1906.09611","open_access":"1"}],"abstract":[{"text":"The second mission of NASA’s Kepler satellite, K2, has collected hundreds of thousands of lightcurves for stars close to the ecliptic plane. This new sample could increase the number of known pulsating stars and then improve our understanding of those stars. For the moment only a few stars have been properly classified and published. In this work, we present a method to automaticly classify K2 pulsating stars using a Machine Learning technique called Random Forest. The objective is to sort out the stars in four classes: red giant (RG), main-sequence Solar-like stars (SL), classical pulsators (PULS) and Other. To do this we use the effective temperatures and the luminosities of the stars as well as the FliPer features, that measures the amount of power contained in the power spectral density. The classifier now retrieves the right classification for more than 80% of the stars.","lang":"eng"}],"oa":1,"doi":"10.48550/arXiv.1906.09611","arxiv":1,"day":"23","date_published":"2019-06-23T00:00:00Z","external_id":{"arxiv":["1906.09611"]},"type":"preprint","date_updated":"2022-08-22T08:20:29Z","citation":{"ista":"Saux AL, Bugnet LA, Mathur S, Breton SN, Garcia RA. Automatic classification of K2 pulsating stars using machine learning techniques. arXiv, 1906.09611.","mla":"Saux, A. Le, et al. “Automatic Classification of K2 Pulsating Stars Using Machine Learning Techniques.” <i>ArXiv</i>, 1906.09611, doi:<a href=\"https://doi.org/10.48550/arXiv.1906.09611\">10.48550/arXiv.1906.09611</a>.","short":"A.L. Saux, L.A. Bugnet, S. Mathur, S.N. Breton, R.A. Garcia, ArXiv (n.d.).","chicago":"Saux, A. Le, Lisa Annabelle Bugnet, S. Mathur, S. N. Breton, and R. A. Garcia. “Automatic Classification of K2 Pulsating Stars Using Machine Learning Techniques.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.1906.09611\">https://doi.org/10.48550/arXiv.1906.09611</a>.","ieee":"A. L. Saux, L. A. Bugnet, S. Mathur, S. N. Breton, and R. A. Garcia, “Automatic classification of K2 pulsating stars using machine learning techniques,” <i>arXiv</i>. .","apa":"Saux, A. L., Bugnet, L. A., Mathur, S., Breton, S. N., &#38; Garcia, R. A. (n.d.). Automatic classification of K2 pulsating stars using machine learning techniques. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.1906.09611\">https://doi.org/10.48550/arXiv.1906.09611</a>","ama":"Saux AL, Bugnet LA, Mathur S, Breton SN, Garcia RA. Automatic classification of K2 pulsating stars using machine learning techniques. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.1906.09611\">10.48550/arXiv.1906.09611</a>"},"year":"2019","language":[{"iso":"eng"}],"keyword":["asteroseismology - methods","data analysis - thecniques","machine learning - stars","oscillations"],"title":"Automatic classification of K2 pulsating stars using machine learning techniques","month":"06","article_number":"1906.09611","oa_version":"Preprint","publication_status":"submitted","date_created":"2022-07-21T06:57:10Z","article_processing_charge":"No","author":[{"full_name":"Saux, A. Le","last_name":"Saux","first_name":"A. Le"},{"id":"d9edb345-f866-11ec-9b37-d119b5234501","orcid":"0000-0003-0142-4000","full_name":"Bugnet, Lisa Annabelle","first_name":"Lisa Annabelle","last_name":"Bugnet"},{"full_name":"Mathur, S.","last_name":"Mathur","first_name":"S."},{"full_name":"Breton, S. N.","last_name":"Breton","first_name":"S. N."},{"first_name":"R. A.","last_name":"Garcia","full_name":"Garcia, R. A."}],"_id":"11630","publication":"arXiv"}]
