{"month":"01","language":[{"iso":"eng"}],"author":[{"orcid":"0000-0002-8407-0705","first_name":"Amélie","full_name":"Royer, Amélie","id":"3811D890-F248-11E8-B48F-1D18A9856A87","last_name":"Royer"},{"first_name":"Konstantinos","last_name":"Bousmalis","full_name":"Bousmalis, Konstantinos"},{"first_name":"Stephan","last_name":"Gouws","full_name":"Gouws, Stephan"},{"first_name":"Fred","last_name":"Bertsch","full_name":"Bertsch, Fred"},{"last_name":"Mosseri","full_name":"Mosseri, Inbar","first_name":"Inbar"},{"first_name":"Forrester","last_name":"Cole","full_name":"Cole, Forrester"},{"full_name":"Murphy, Kevin","last_name":"Murphy","first_name":"Kevin"}],"status":"public","date_published":"2020-01-08T00:00:00Z","oa_version":"Preprint","doi":"10.1007/978-3-030-30671-7_3","oa":1,"title":"XGAN: Unsupervised image-to-image translation for many-to-many mappings","editor":[{"first_name":"Richa","last_name":"Singh","full_name":"Singh, Richa"},{"first_name":"Mayank","last_name":"Vatsa","full_name":"Vatsa, Mayank"},{"last_name":"Patel","full_name":"Patel, Vishal M.","first_name":"Vishal M."},{"first_name":"Nalini","last_name":"Ratha","full_name":"Ratha, Nalini"}],"date_updated":"2023-09-07T13:16:18Z","article_processing_charge":"No","publication":"Domain Adaptation for Visual Understanding","quality_controlled":"1","_id":"8092","citation":{"chicago":"Royer, Amélie, Konstantinos Bousmalis, Stephan Gouws, Fred Bertsch, Inbar Mosseri, Forrester Cole, and Kevin Murphy. “XGAN: Unsupervised Image-to-Image Translation for Many-to-Many Mappings.” In Domain Adaptation for Visual Understanding, edited by Richa Singh, Mayank Vatsa, Vishal M. Patel, and Nalini Ratha, 33–49. Springer Nature, 2020. https://doi.org/10.1007/978-3-030-30671-7_3.","short":"A. Royer, K. Bousmalis, S. Gouws, F. Bertsch, I. Mosseri, F. Cole, K. Murphy, in:, R. Singh, M. Vatsa, V.M. Patel, N. Ratha (Eds.), Domain Adaptation for Visual Understanding, Springer Nature, 2020, pp. 33–49.","ista":"Royer A, Bousmalis K, Gouws S, Bertsch F, Mosseri I, Cole F, Murphy K. 2020.XGAN: Unsupervised image-to-image translation for many-to-many mappings. In: Domain Adaptation for Visual Understanding. , 33–49.","apa":"Royer, A., Bousmalis, K., Gouws, S., Bertsch, F., Mosseri, I., Cole, F., & Murphy, K. (2020). XGAN: Unsupervised image-to-image translation for many-to-many mappings. In R. Singh, M. Vatsa, V. M. Patel, & N. Ratha (Eds.), Domain Adaptation for Visual Understanding (pp. 33–49). Springer Nature. https://doi.org/10.1007/978-3-030-30671-7_3","mla":"Royer, Amélie, et al. “XGAN: Unsupervised Image-to-Image Translation for Many-to-Many Mappings.” Domain Adaptation for Visual Understanding, edited by Richa Singh et al., Springer Nature, 2020, pp. 33–49, doi:10.1007/978-3-030-30671-7_3.","ama":"Royer A, Bousmalis K, Gouws S, et al. XGAN: Unsupervised image-to-image translation for many-to-many mappings. In: Singh R, Vatsa M, Patel VM, Ratha N, eds. Domain Adaptation for Visual Understanding. Springer Nature; 2020:33-49. doi:10.1007/978-3-030-30671-7_3","ieee":"A. Royer et al., “XGAN: Unsupervised image-to-image translation for many-to-many mappings,” in Domain Adaptation for Visual Understanding, R. Singh, M. Vatsa, V. M. Patel, and N. Ratha, Eds. Springer Nature, 2020, pp. 33–49."},"publication_status":"published","related_material":{"record":[{"id":"8331","status":"deleted","relation":"dissertation_contains"},{"id":"8390","status":"public","relation":"dissertation_contains"}]},"abstract":[{"lang":"eng","text":"Image translation refers to the task of mapping images from a visual domain to another. Given two unpaired collections of images, we aim to learn a mapping between the corpus-level style of each collection, while preserving semantic content shared across the two domains. We introduce xgan, a dual adversarial auto-encoder, which captures a shared representation of the common domain semantic content in an unsupervised way, while jointly learning the domain-to-domain image translations in both directions. We exploit ideas from the domain adaptation literature and define a semantic consistency loss which encourages the learned embedding to preserve semantics shared across domains. We report promising qualitative results for the task of face-to-cartoon translation. The cartoon dataset we collected for this purpose, “CartoonSet”, is also publicly available as a new benchmark for semantic style transfer at https://google.github.io/cartoonset/index.html."}],"scopus_import":"1","department":[{"_id":"ChLa"}],"date_created":"2020-07-05T22:00:46Z","type":"book_chapter","page":"33-49","year":"2020","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"Springer Nature","main_file_link":[{"url":"https://arxiv.org/abs/1711.05139","open_access":"1"}],"publication_identifier":{"isbn":["9783030306717"]},"arxiv":1,"external_id":{"arxiv":["1711.05139"]},"day":"08"}