@inbook{8092,
  abstract     = {Image translation refers to the task of mapping images from a visual domain to another. Given two unpaired collections of images, we aim to learn a mapping between the corpus-level style of each collection, while preserving semantic content shared across the two domains. We introduce xgan, a dual adversarial auto-encoder, which captures a shared representation of the common domain semantic content in an unsupervised way, while jointly learning the domain-to-domain image translations in both directions. We exploit ideas from the domain adaptation literature and define a semantic consistency loss which encourages the learned embedding to preserve semantics shared across domains. We report promising qualitative results for the task of face-to-cartoon translation. The cartoon dataset we collected for this purpose, “CartoonSet”, is also publicly available as a new benchmark for semantic style transfer at https://google.github.io/cartoonset/index.html.},
  author       = {Royer, Amélie and Bousmalis, Konstantinos and Gouws, Stephan and Bertsch, Fred and Mosseri, Inbar and Cole, Forrester and Murphy, Kevin},
  booktitle    = {Domain Adaptation for Visual Understanding},
  editor       = {Singh, Richa and Vatsa, Mayank and Patel, Vishal M. and Ratha, Nalini},
  isbn         = {9783030306717},
  pages        = {33--49},
  publisher    = {Springer Nature},
  title        = {{XGAN: Unsupervised image-to-image translation for many-to-many mappings}},
  doi          = {10.1007/978-3-030-30671-7_3},
  year         = {2020},
}

