[{"month":"08","quality_controlled":"1","publication_identifier":{"eisbn":["9798350301298"],"eissn":["2575-7075"]},"isi":1,"date_created":"2024-01-10T08:42:40Z","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"acknowledgement":"The authors would like to sincerely thank Sara Hooker for her feedback during the development of this work. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. AP and DA acknowledge generous ERC support, via Starting Grant 805223 ScaleML.","article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","conference":{"end_date":"2023-06-24","start_date":"2023-06-17","name":"CVPR: Conference on Computer Vision and Pattern Recognition","location":"Vancouver, BC, Canada"},"author":[{"orcid":"0000-0002-7778-3221","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B","first_name":"Eugenia B"},{"last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87","first_name":"Elena-Alexandra","full_name":"Peste, Elena-Alexandra"},{"orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian"}],"oa":1,"_id":"14771","date_published":"2023-08-22T00:00:00Z","language":[{"iso":"eng"}],"doi":"10.1109/cvpr52729.2023.02334","publisher":"IEEE","year":"2023","type":"conference","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2304.12622"}],"publication":"2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition","related_material":{"link":[{"url":"https://github.com/IST-DASLab/pruned-vision-model-bias","relation":"software"}]},"external_id":{"arxiv":["2304.12622"],"isi":["001062531308068"]},"title":"Bias in pruned vision models: In-depth analysis and countermeasures","project":[{"grant_number":" W1260-N35","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A","name":"Vienna Graduate School on Computational Optimization"},{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"}],"citation":{"chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, and Dan-Adrian Alistarh. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 24364–73. IEEE, 2023. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>.","ieee":"E. B. Iofinova, E.-A. Peste, and D.-A. Alistarh, “Bias in pruned vision models: In-depth analysis and countermeasures,” in <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Vancouver, BC, Canada, 2023, pp. 24364–24373.","short":"E.B. Iofinova, E.-A. Peste, D.-A. Alistarh, in:, 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2023, pp. 24364–24373.","apa":"Iofinova, E. B., Peste, E.-A., &#38; Alistarh, D.-A. (2023). Bias in pruned vision models: In-depth analysis and countermeasures. In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 24364–24373). Vancouver, BC, Canada: IEEE. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>","mla":"Iofinova, Eugenia B., et al. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, IEEE, 2023, pp. 24364–73, doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>.","ista":"Iofinova EB, Peste E-A, Alistarh D-A. 2023. Bias in pruned vision models: In-depth analysis and countermeasures. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 24364–24373.","ama":"Iofinova EB, Peste E-A, Alistarh D-A. Bias in pruned vision models: In-depth analysis and countermeasures. In: <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. IEEE; 2023:24364-24373. doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>"},"arxiv":1,"page":"24364-24373","publication_status":"published","oa_version":"Preprint","day":"22","ec_funded":1,"abstract":[{"text":"Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias.","lang":"eng"}],"date_updated":"2024-01-10T08:59:26Z","status":"public"},{"type":"conference","year":"2022","publisher":"Institute of Electrical and Electronics Engineers","doi":"10.1109/cvpr52688.2022.01016","language":[{"iso":"eng"}],"date_published":"2022-07-01T00:00:00Z","_id":"14114","oa":1,"author":[{"first_name":"Dominik","full_name":"Zietlow, Dominik","last_name":"Zietlow"},{"full_name":"Lohaus, Michael","first_name":"Michael","last_name":"Lohaus"},{"last_name":"Balakrishnan","full_name":"Balakrishnan, Guha","first_name":"Guha"},{"last_name":"Kleindessner","first_name":"Matthaus","full_name":"Kleindessner, Matthaus"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco","full_name":"Locatello, Francesco"},{"full_name":"Scholkopf, Bernhard","first_name":"Bernhard","last_name":"Scholkopf"},{"last_name":"Russell","first_name":"Chris","full_name":"Russell, Chris"}],"conference":{"end_date":"2022-06-24","start_date":"2022-06-18","name":"CVPR: Conference on Computer Vision and Pattern Recognition","location":"New Orleans, LA, United States"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","extern":"1","department":[{"_id":"FrLo"}],"date_created":"2023-08-21T12:18:00Z","publication_identifier":{"isbn":["9781665469470"],"issn":["1063-6919"],"eissn":["2575-7075"]},"quality_controlled":"1","month":"07","status":"public","date_updated":"2023-09-11T09:19:14Z","abstract":[{"text":"Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups.","lang":"eng"}],"day":"01","page":"10400-10411","oa_version":"Preprint","publication_status":"published","scopus_import":"1","arxiv":1,"citation":{"apa":"Zietlow, D., Lohaus, M., Balakrishnan, G., Kleindessner, M., Locatello, F., Scholkopf, B., &#38; Russell, C. (2022). Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 10400–10411). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>","short":"D. Zietlow, M. Lohaus, G. Balakrishnan, M. Kleindessner, F. Locatello, B. Scholkopf, C. Russell, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–10411.","ista":"Zietlow D, Lohaus M, Balakrishnan G, Kleindessner M, Locatello F, Scholkopf B, Russell C. 2022. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 10400–10411.","mla":"Zietlow, Dominik, et al. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–11, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>.","ama":"Zietlow D, Lohaus M, Balakrishnan G, et al. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:10400-10411. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>","chicago":"Zietlow, Dominik, Michael Lohaus, Guha Balakrishnan, Matthaus Kleindessner, Francesco Locatello, Bernhard Scholkopf, and Chris Russell. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 10400–411. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>.","ieee":"D. Zietlow <i>et al.</i>, “Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 10400–10411."},"title":"Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers","external_id":{"arxiv":["2203.04913"]},"publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2203.04913"}]},{"status":"public","date_updated":"2023-08-04T10:33:28Z","publication_status":"published","page":"12256-12266","oa_version":"Preprint","day":"27","ec_funded":1,"abstract":[{"lang":"eng","text":"Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer."}],"scopus_import":"1","project":[{"grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"},{"grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"arxiv":1,"citation":{"ieee":"E. B. Iofinova, E.-A. Peste, M. Kurtz, and D.-A. Alistarh, “How well do sparse ImageNet models transfer?,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 12256–12266.","chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, Mark Kurtz, and Dan-Adrian Alistarh. “How Well Do Sparse ImageNet Models Transfer?” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 12256–66. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>.","ama":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. How well do sparse ImageNet models transfer? In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:12256-12266. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>","ista":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. 2022. How well do sparse ImageNet models transfer? 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Computer Vision and Pattern Recognition, 12256–12266.","mla":"Iofinova, Eugenia B., et al. “How Well Do Sparse ImageNet Models Transfer?” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–66, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>.","short":"E.B. Iofinova, E.-A. Peste, M. Kurtz, D.-A. Alistarh, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–12266.","apa":"Iofinova, E. B., Peste, E.-A., Kurtz, M., &#38; Alistarh, D.-A. (2022). How well do sparse ImageNet models transfer? In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 12256–12266). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>"},"publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","related_material":{"record":[{"status":"public","id":"13074","relation":"dissertation_contains"}]},"external_id":{"isi":["000870759105034"],"arxiv":["2111.13445"]},"title":"How well do sparse ImageNet models transfer?","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2111.13445","open_access":"1"}],"type":"conference","language":[{"iso":"eng"}],"doi":"10.1109/cvpr52688.2022.01195","publisher":"Institute of Electrical and Electronics Engineers","year":"2022","date_published":"2022-09-27T00:00:00Z","author":[{"first_name":"Eugenia B","full_name":"Iofinova, Eugenia B","orcid":"0000-0002-7778-3221","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","last_name":"Iofinova"},{"full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87","last_name":"Peste"},{"last_name":"Kurtz","first_name":"Mark","full_name":"Kurtz, Mark"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"}],"oa":1,"_id":"12299","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","article_processing_charge":"No","conference":{"start_date":"2022-06-18","end_date":"2022-06-24","name":"CVPR: Computer Vision and Pattern Recognition","location":"New Orleans, LA, United States"},"date_created":"2023-01-16T10:06:00Z","acknowledgement":"he authors would like to sincerely thank Christoph Lampert and Nir Shavit for fruitful discussions during the development of this work, and Eldar Kurtic for experimental support. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35, while AP and DA acknowledge generous support by the ERC, via Starting Grant 805223 ScaleML.","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"isi":1,"month":"09","quality_controlled":"1","publication_identifier":{"eissn":["2575-7075"]}},{"date_created":"2020-07-31T16:53:49Z","department":[{"_id":"ChLa"}],"file_date_updated":"2020-07-31T16:57:12Z","conference":{"location":"Virtual","start_date":"2020-06-14","end_date":"2020-06-19","name":"CVPR: Conference on Computer Vision and Pattern Recognition"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","ddc":["004"],"month":"07","publication_identifier":{"eissn":["2575-7075"],"eisbn":["9781728171685"]},"quality_controlled":"1","doi":"10.1109/CVPR42600.2020.00752","language":[{"iso":"eng"}],"year":"2020","publisher":"IEEE","type":"conference","author":[{"id":"13C09E74-18D9-11E9-8878-32CFE5697425","last_name":"Henderson","orcid":"0000-0002-5198-7445","full_name":"Henderson, Paul M","first_name":"Paul M"},{"last_name":"Tsiminaki","full_name":"Tsiminaki, Vagia","first_name":"Vagia"},{"first_name":"Christoph","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","last_name":"Lampert"}],"_id":"8186","oa":1,"date_published":"2020-07-01T00:00:00Z","arxiv":1,"citation":{"chicago":"Henderson, Paul M, Vagia Tsiminaki, and Christoph Lampert. “Leveraging 2D Data to Learn Textured 3D Mesh Generation.” In <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 7498–7507. IEEE, 2020. <a href=\"https://doi.org/10.1109/CVPR42600.2020.00752\">https://doi.org/10.1109/CVPR42600.2020.00752</a>.","ieee":"P. M. Henderson, V. Tsiminaki, and C. Lampert, “Leveraging 2D data to learn textured 3D mesh generation,” in <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Virtual, 2020, pp. 7498–7507.","apa":"Henderson, P. M., Tsiminaki, V., &#38; Lampert, C. (2020). Leveraging 2D data to learn textured 3D mesh generation. In <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 7498–7507). Virtual: IEEE. <a href=\"https://doi.org/10.1109/CVPR42600.2020.00752\">https://doi.org/10.1109/CVPR42600.2020.00752</a>","short":"P.M. Henderson, V. Tsiminaki, C. Lampert, in:, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2020, pp. 7498–7507.","ista":"Henderson PM, Tsiminaki V, Lampert C. 2020. Leveraging 2D data to learn textured 3D mesh generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 7498–7507.","mla":"Henderson, Paul M., et al. “Leveraging 2D Data to Learn Textured 3D Mesh Generation.” <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, IEEE, 2020, pp. 7498–507, doi:<a href=\"https://doi.org/10.1109/CVPR42600.2020.00752\">10.1109/CVPR42600.2020.00752</a>.","ama":"Henderson PM, Tsiminaki V, Lampert C. Leveraging 2D data to learn textured 3D mesh generation. In: <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. IEEE; 2020:7498-7507. doi:<a href=\"https://doi.org/10.1109/CVPR42600.2020.00752\">10.1109/CVPR42600.2020.00752</a>"},"scopus_import":"1","has_accepted_license":"1","main_file_link":[{"url":"https://openaccess.thecvf.com/content_CVPR_2020/papers/Henderson_Leveraging_2D_Data_to_Learn_Textured_3D_Mesh_Generation_CVPR_2020_paper.pdf","open_access":"1"}],"publication":"Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition","file":[{"file_name":"paper.pdf","file_size":10262773,"date_created":"2020-07-31T16:57:12Z","file_id":"8187","success":1,"date_updated":"2020-07-31T16:57:12Z","creator":"phenders","access_level":"open_access","content_type":"application/pdf","relation":"main_file"}],"title":"Leveraging 2D data to learn textured 3D mesh generation","external_id":{"arxiv":["2004.04180"]},"date_updated":"2023-10-17T07:37:11Z","status":"public","day":"01","oa_version":"Submitted Version","page":"7498-7507","publication_status":"published","abstract":[{"lang":"eng","text":"Numerous methods have been proposed for probabilistic generative modelling of\r\n3D objects. However, none of these is able to produce textured objects, which\r\nrenders them of limited use for practical tasks. In this work, we present the\r\nfirst generative model of textured 3D meshes. Training such a model would\r\ntraditionally require a large dataset of textured meshes, but unfortunately,\r\nexisting datasets of meshes lack detailed textures. We instead propose a new\r\ntraining methodology that allows learning from collections of 2D images without\r\nany 3D information. To do so, we train our model to explain a distribution of\r\nimages by modelling each image as a 3D foreground object placed in front of a\r\n2D background. Thus, it learns to generate meshes that when rendered, produce\r\nimages similar to those in its training set.\r\n  A well-known problem when generating meshes with deep networks is the\r\nemergence of self-intersections, which are problematic for many use-cases. As a\r\nsecond contribution we therefore introduce a new generation process for 3D\r\nmeshes that guarantees no self-intersections arise, based on the physical\r\nintuition that faces should push one another out of the way as they move.\r\n  We conduct extensive experiments on our approach, reporting quantitative and\r\nqualitative results on both synthetic data and natural images. These show our\r\nmethod successfully learns to generate plausible and diverse textured 3D\r\nsamples for five challenging object classes."}]},{"main_file_link":[{"url":" https://doi.org/10.48550/arXiv.1712.08087","open_access":"1"}],"title":"Learning intelligent dialogs for bounding box annotation","external_id":{"arxiv":["1712.08087"],"isi":["000457843609036"]},"publication":"2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition","arxiv":1,"citation":{"ama":"Uijlings J, Konyushkova K, Lampert C, Ferrari V. Learning intelligent dialogs for bounding box annotation. In: <i>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. IEEE; 2018:9175-9184. doi:<a href=\"https://doi.org/10.1109/cvpr.2018.00956\">10.1109/cvpr.2018.00956</a>","ista":"Uijlings J, Konyushkova K, Lampert C, Ferrari V. 2018. Learning intelligent dialogs for bounding box annotation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVF: Conference on Computer Vision and Pattern Recognition, 9175–9184.","apa":"Uijlings, J., Konyushkova, K., Lampert, C., &#38; Ferrari, V. (2018). Learning intelligent dialogs for bounding box annotation. In <i>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 9175–9184). Salt Lake City, UT, United States: IEEE. <a href=\"https://doi.org/10.1109/cvpr.2018.00956\">https://doi.org/10.1109/cvpr.2018.00956</a>","mla":"Uijlings, Jasper, et al. “Learning Intelligent Dialogs for Bounding Box Annotation.” <i>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, IEEE, 2018, pp. 9175–84, doi:<a href=\"https://doi.org/10.1109/cvpr.2018.00956\">10.1109/cvpr.2018.00956</a>.","short":"J. Uijlings, K. Konyushkova, C. Lampert, V. Ferrari, in:, 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2018, pp. 9175–9184.","ieee":"J. Uijlings, K. Konyushkova, C. Lampert, and V. Ferrari, “Learning intelligent dialogs for bounding box annotation,” in <i>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Salt Lake City, UT, United States, 2018, pp. 9175–9184.","chicago":"Uijlings, Jasper, Ksenia Konyushkova, Christoph Lampert, and Vittorio Ferrari. “Learning Intelligent Dialogs for Bounding Box Annotation.” In <i>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 9175–84. IEEE, 2018. <a href=\"https://doi.org/10.1109/cvpr.2018.00956\">https://doi.org/10.1109/cvpr.2018.00956</a>."},"scopus_import":"1","abstract":[{"text":"We introduce Intelligent Annotation Dialogs for bounding box annotation. We train an agent to automatically choose a sequence of actions for a human annotator to produce a bounding box in a minimal amount of time. Specifically, we consider two actions: box verification [34], where the annotator verifies a box generated by an object detector, and manual box drawing. We explore two kinds of agents, one based on predicting the probability that a box will be positively verified, and the other based on reinforcement learning. We demonstrate that (1) our agents are able to learn efficient annotation strategies in several scenarios, automatically adapting to the image difficulty, the desired quality of the boxes, and the detector strength; (2) in all scenarios the resulting annotation dialogs speed up annotation compared to manual box drawing alone and box verification alone, while also outperforming any fixed combination of verification and drawing in most scenarios; (3) in a realistic scenario where the detector is iteratively re-trained, our agents evolve a series of strategies that reflect the shifting trade-off between verification and drawing as the detector grows stronger.","lang":"eng"}],"day":"17","publication_status":"published","page":"9175-9184","oa_version":"Preprint","date_updated":"2023-09-19T15:11:49Z","status":"public","publication_identifier":{"isbn":["9781538664209"],"eissn":["2575-7075"]},"quality_controlled":"1","month":"12","isi":1,"department":[{"_id":"ChLa"}],"date_created":"2022-03-18T12:45:09Z","conference":{"location":"Salt Lake City, UT, United States","name":"CVF: Conference on Computer Vision and Pattern Recognition","end_date":"2018-06-23","start_date":"2018-06-18"},"article_processing_charge":"No","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","_id":"10882","oa":1,"author":[{"first_name":"Jasper","full_name":"Uijlings, Jasper","last_name":"Uijlings"},{"last_name":"Konyushkova","full_name":"Konyushkova, Ksenia","first_name":"Ksenia"},{"orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","last_name":"Lampert","first_name":"Christoph","full_name":"Lampert, Christoph"},{"full_name":"Ferrari, Vittorio","first_name":"Vittorio","last_name":"Ferrari"}],"date_published":"2018-12-17T00:00:00Z","year":"2018","publisher":"IEEE","doi":"10.1109/cvpr.2018.00956","language":[{"iso":"eng"}],"type":"conference"}]
