@inproceedings{12976,
  abstract     = {3D printing based on continuous deposition of materials, such as filament-based 3D printing, has seen widespread adoption thanks to its versatility in working with a wide range of materials. An important shortcoming of this type of technology is its limited multi-material capabilities. While there are simple hardware designs that enable multi-material printing in principle, the required software is heavily underdeveloped. A typical hardware design fuses together individual materials fed into a single chamber from multiple inlets before they are deposited. This design, however, introduces a time delay between the intended material mixture and its actual deposition. In this work, inspired by diverse path planning research in robotics, we show that this mechanical challenge can be addressed via improved printer control. We propose to formulate the search for optimal multi-material printing policies in a reinforcement
learning setup. We put forward a simple numerical deposition model that takes into account the non-linear material mixing and delayed material deposition. To validate our system we focus on color fabrication, a problem known for its strict requirements for varying material mixtures at a high spatial frequency. We demonstrate that our learned control policy outperforms state-of-the-art hand-crafted algorithms.},
  author       = {Liao, Kang and Tricard, Thibault and Piovarci, Michael and Seidel, Hans-Peter and Babaei, Vahid},
  booktitle    = {2023 IEEE International Conference on Robotics and Automation},
  issn         = {1050-4729},
  keywords     = {reinforcement learning, deposition, control, color, multi-filament},
  location     = {London, United Kingdom},
  pages        = {12345--12352},
  publisher    = {IEEE},
  title        = {{Learning deposition policies for fused multi-material 3D printing}},
  doi          = {10.1109/ICRA48891.2023.10160465},
  volume       = {2023},
  year         = {2023},
}

@inproceedings{12979,
  abstract     = {Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. },
  author       = {Condor, Jorge and Piovarci, Michael and Bickel, Bernd and Didyk, Piotr},
  booktitle    = {SIGGRAPH ’23 Conference Proceedings},
  isbn         = {9798400701597},
  keywords     = {color, gloss, perception, color compensation, color management},
  location     = {Los Angeles, CA, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Gloss-aware color correction for 3D printing}},
  doi          = {10.1145/3588432.3591546},
  year         = {2023},
}

@article{12984,
  abstract     = {Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more.},
  author       = {Piovarci, Michael and Chapiro, Alexandre and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {Transactions on Graphics},
  keywords     = {appearance, modeling, reproduction, tattoo, skin color, gamut mapping, ink-optimization, prosthetic},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Skin-Screen: A computational fabrication framework for color tattoos}},
  doi          = {10.1145/3592432},
  volume       = {42},
  year         = {2023},
}

@inproceedings{9943,
  abstract     = {Segmentation is the process of partitioning digital images into meaningful regions. The analysis of biological high content images often requires segmentation as a first step. We propose ilastik as an easy-to-use tool which allows the user without expertise in image processing to perform segmentation and classification in a unified way. ilastik learns from labels provided by the user through a convenient mouse interface. Based on these labels, ilastik infers a problem specific segmentation. A random forest classifier is used in the learning step, in which each pixel's neighborhood is characterized by a set of generic (nonlinear) features. ilastik supports up to three spatial plus one spectral dimension and makes use of all dimensions in the feature calculation. ilastik provides realtime feedback that enables the user to interactively refine the segmentation result and hence further fine-tune the classifier. An uncertainty measure guides the user to ambiguous regions in the images. Real time performance is achieved by multi-threading which fully exploits the capabilities of modern multi-core machines. Once a classifier has been trained on a set of representative images, it can be exported and used to automatically process a very large number of images (e.g. using the CellProfiler pipeline). ilastik is an open source project and released under the BSD license at www.ilastik.org.},
  author       = {Sommer, Christoph M and Straehle, Christoph and Köthe, Ullrich and Hamprecht, Fred A.},
  booktitle    = {2011 IEEE International Symposium on Biomedical Imaging: from Nano to Micro},
  isbn         = {978-1-4244-4127-3},
  issn         = {1945-8452},
  keywords     = {image segmentation, biomedical imaging, three dimensional displays, neurons, retina, observers, image color analysis},
  location     = {Chicago, Illinois, USA},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Ilastik: Interactive learning and segmentation toolkit}},
  doi          = {10.1109/isbi.2011.5872394},
  year         = {2011},
}

