@article{14488,
  abstract     = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handling both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with individual latent spaces for identity and illumination. The prior model is learned in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis, even when applied to unseen subjects under uncontrolled illumination. This work is an extension of Rao et al. (VoRF: Volumetric Relightable Faces 2022). We provide extensive evaluation and ablative studies of our model and also provide an application, where any face can be relighted using textual input.},
  author       = {Rao, Pramod and Mallikarjun, B. R. and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Zhan, Fangneng and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed},
  issn         = {1573-1405},
  journal      = {International Journal of Computer Vision},
  publisher    = {Springer Nature},
  title        = {{A deeper analysis of volumetric relightiable faces}},
  doi          = {10.1007/s11263-023-01899-3},
  year         = {2023},
}

@article{14628,
  abstract     = {We introduce a compact, intuitive procedural graph representation for cellular metamaterials, which are small-scale, tileable structures that can be architected to exhibit many useful material properties. Because the structures’ “architectures” vary widely—with elements such as beams, thin shells, and solid bulks—it is difficult to explore them using existing representations. Generic approaches like voxel grids are versatile, but it is cumbersome to represent and edit individual structures; architecture-specific approaches address these issues, but are incompatible with one another. By contrast, our procedural graph succinctly represents the construction process for any structure using a simple skeleton annotated with spatially varying thickness. To express the highly constrained triply periodic minimal surfaces (TPMS) in this manner, we present the first fully automated version of the conjugate surface construction method, which allows novices to create complex TPMS from intuitive input. We demonstrate our representation’s expressiveness, accuracy, and compactness by constructing a wide range of established structures and hundreds of novel structures with diverse architectures and material properties. We also conduct a user study to verify our representation’s ease-of-use and ability to expand engineers’ capacity for exploration.},
  author       = {Makatura, Liane and Wang, Bohan and Chen, Yi-Lu and Deng, Bolei and Wojtan, Christopher J and Bickel, Bernd and Matusik, Wojciech},
  issn         = {0730-0301},
  journal      = {ACM Transactions on Graphics},
  keywords     = {Computer Graphics and Computer-Aided Design},
  number       = {5},
  publisher    = {Association for Computing Machinery},
  title        = {{Procedural metamaterials: A unified procedural graph for metamaterial design}},
  doi          = {10.1145/3605389},
  volume       = {42},
  year         = {2023},
}

@article{13188,
  abstract     = {The Kirchhoff rod model describes the bending and twisting of slender elastic rods in three dimensions, and has been widely studied to enable the prediction of how a rod will deform, given its geometry and boundary conditions. In this work, we study a number of inverse problems with the goal of computing the geometry of a straight rod that will automatically deform to match a curved target shape after attaching its endpoints to a support structure. Our solution lets us finely control the static equilibrium state of a rod by varying the cross-sectional profiles along its length.
We also show that the set of physically realizable equilibrium states admits a concise geometric description in terms of linear line complexes, which leads to very efficient computational design algorithms. Implemented in an interactive software tool, they allow us to convert three-dimensional hand-drawn spline curves to elastic rods, and give feedback about the feasibility and practicality of a design in real time. We demonstrate the efficacy of our method by designing and manufacturing several physical prototypes with applications to interior design and soft robotics.},
  author       = {Hafner, Christian and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {Computer Graphics, Computational Design, Computational Geometry, Shape Modeling},
  number       = {5},
  publisher    = {Association for Computing Machinery},
  title        = {{The design space of Kirchhoff rods}},
  doi          = {10.1145/3606033},
  volume       = {42},
  year         = {2023},
}

@article{13267,
  abstract     = {Three-dimensional (3D) reconstruction of living brain tissue down to an individual synapse level would create opportunities for decoding the dynamics and structure–function relationships of the brain’s complex and dense information processing network; however, this has been hindered by insufficient 3D resolution, inadequate signal-to-noise ratio and prohibitive light burden in optical imaging, whereas electron microscopy is inherently static. Here we solved these challenges by developing an integrated optical/machine-learning technology, LIONESS (live information-optimized nanoscopy enabling saturated segmentation). This leverages optical modifications to stimulated emission depletion microscopy in comprehensively, extracellularly labeled tissue and previous information on sample structure via machine learning to simultaneously achieve isotropic super-resolution, high signal-to-noise ratio and compatibility with living tissue. This allows dense deep-learning-based instance segmentation and 3D reconstruction at a synapse level, incorporating molecular, activity and morphodynamic information. LIONESS opens up avenues for studying the dynamic functional (nano-)architecture of living brain tissue.},
  author       = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Lyudchik, Julia and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G.N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G},
  issn         = {1548-7105},
  journal      = {Nature Methods},
  pages        = {1256--1265},
  publisher    = {Springer Nature},
  title        = {{Dense 4D nanoscale reconstruction of living brain tissue}},
  doi          = {10.1038/s41592-023-01936-6},
  volume       = {20},
  year         = {2023},
}

@inproceedings{14241,
  abstract     = {We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.},
  author       = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki},
  booktitle    = {SIGGRAPH 2023 Conference Proceedings},
  isbn         = {9798400701597},
  location     = {Los Angeles, CA, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Stealth shaper: Reflectivity optimization as surface stylization}},
  doi          = {10.1145/3588432.3591542},
  year         = {2023},
}

@article{12972,
  abstract     = {Embroidery is a long-standing and high-quality approach to making logos and images on textiles. Nowadays, it can also be performed via automated machines that weave threads with high spatial accuracy. A characteristic feature of the appearance of the threads is a high degree of anisotropy. The anisotropic behavior is caused by depositing thin but long strings of thread. As a result, the stitched patterns convey both color and direction. Artists leverage this anisotropic behavior to enhance pure color images with textures, illusions of motion, or depth cues. However, designing colorful embroidery patterns with prescribed directionality is a challenging task, one usually requiring an expert designer. In this work, we propose an interactive algorithm that generates machine-fabricable embroidery patterns from multi-chromatic images equipped with user-specified directionality fields.We cast the problem of finding a stitching pattern into vector theory. To find a suitable stitching pattern, we extract sources and sinks from the divergence field of the vector field extracted from the input and use them to trace streamlines. We further optimize the streamlines to guarantee a smooth and connected stitching pattern. The generated patterns approximate the color distribution constrained by the directionality field. To allow for further artistic control, the trade-off between color match and directionality match can be interactively explored via an intuitive slider. We showcase our approach by fabricating several embroidery paths.},
  author       = {Liu, Zhenyuan and Piovarci, Michael and Hafner, Christian and Charrondiere, Raphael and Bickel, Bernd},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  keywords     = {embroidery, design, directionality, density, image},
  location     = {Saarbrucken, Germany},
  number       = {2},
  pages        = {397--409},
  publisher    = {Wiley},
  title        = {{Directionality-aware design of embroidery patterns}},
  doi          = {10.1111/cgf.14770 },
  volume       = {42},
  year         = {2023},
}

@inproceedings{12979,
  abstract     = {Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. },
  author       = {Condor, Jorge and Piovarci, Michael and Bickel, Bernd and Didyk, Piotr},
  booktitle    = {SIGGRAPH ’23 Conference Proceedings},
  isbn         = {9798400701597},
  keywords     = {color, gloss, perception, color compensation, color management},
  location     = {Los Angeles, CA, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Gloss-aware color correction for 3D printing}},
  doi          = {10.1145/3588432.3591546},
  year         = {2023},
}

@article{12984,
  abstract     = {Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more.},
  author       = {Piovarci, Michael and Chapiro, Alexandre and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {Transactions on Graphics},
  keywords     = {appearance, modeling, reproduction, tattoo, skin color, gamut mapping, ink-optimization, prosthetic},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Skin-Screen: A computational fabrication framework for color tattoos}},
  doi          = {10.1145/3592432},
  volume       = {42},
  year         = {2023},
}

@article{13049,
  abstract     = {We propose a computational design approach for covering a surface with individually addressable RGB LEDs, effectively forming a low-resolution surface screen. To achieve a low-cost and scalable approach, we propose creating designs from flat PCB panels bent in-place along the surface of a 3D printed core. Working with standard rigid PCBs enables the use of
established PCB manufacturing services, allowing the fabrication of designs with several hundred LEDs. 
Our approach optimizes the PCB geometry for folding, and then jointly optimizes the LED packing, circuit and routing, solving a challenging layout problem under strict manufacturing requirements. Unlike paper, PCBs cannot bend beyond a certain point without breaking. Therefore, we introduce parametric cut patterns acting as hinges, designed to allow bending while remaining compact. To tackle the joint optimization of placement, circuit and routing, we propose a specialized algorithm that splits the global problem into one sub-problem per triangle, which is then individually solved.
Our technique generates PCB blueprints in a completely automated way. After being fabricated by a PCB manufacturing service, the boards are bent and glued by the user onto the 3D printed support. We demonstrate our technique on a range of physical models and virtual examples, creating intricate surface light patterns from hundreds of LEDs.},
  author       = {Freire, Marco and Bhargava, Manas and Schreck, Camille and Hugron, Pierre-Alexandre and Bickel, Bernd and Lefebvre, Sylvain},
  issn         = {1557-7368},
  journal      = {Transactions on Graphics},
  keywords     = {PCB design and layout, Mesh geometry models},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{PCBend: Light up your 3D shapes with foldable circuit boards}},
  doi          = {10.1145/3592411},
  volume       = {42},
  year         = {2023},
}

@article{10922,
  abstract     = {We study structural rigidity for assemblies with mechanical joints. Existing methods identify whether an assembly is structurally rigid by assuming parts are perfectly rigid. Yet, an assembly identified as rigid may not be that “rigid” in practice, and existing methods cannot quantify how rigid an assembly is. We address this limitation by developing a new measure, worst-case rigidity, to quantify the rigidity of an assembly as the largest possible deformation that the assembly undergoes for arbitrary external loads of fixed magnitude. Computing worst-case rigidity is non-trivial due to non-rigid parts and different joint types. We thus formulate a new computational approach by encoding parts and their connections into a stiffness matrix, in which parts are modeled as deformable objects and joints as soft constraints. Based on this, we formulate worst-case rigidity analysis as an optimization that seeks the worst-case deformation of an assembly for arbitrary external loads, and solve the optimization problem via an eigenanalysis. Furthermore, we present methods to optimize the geometry and topology of various assemblies to enhance their rigidity, as guided by our rigidity measure. In the end, we validate our method on a variety of assembly structures with physical experiments and demonstrate its effectiveness by designing and fabricating several structurally rigid assemblies.},
  author       = {Liu, Zhenyuan and Hu, Jingyu and Xu, Hao and Song, Peng and Zhang, Ran and Bickel, Bernd and Fu, Chi-Wing},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  number       = {2},
  pages        = {507--519},
  publisher    = {Wiley},
  title        = {{Worst-case rigidity analysis and optimization for assemblies with mechanical joints}},
  doi          = {10.1111/cgf.14490},
  volume       = {41},
  year         = {2022},
}

@article{11442,
  abstract     = {Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator,
as they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is
sufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform
baseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. },
  author       = {Piovarci, Michael and Foshey, Michael and Xu, Jie and Erps, Timothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Closed-loop control of direct ink writing via reinforcement learning}},
  doi          = {10.1145/3528223.3530144},
  volume       = {41},
  year         = {2022},
}

@article{11735,
  abstract     = {Interlocking puzzles are intriguing geometric games where the puzzle pieces are held together based on their geometric arrangement, preventing the puzzle from falling apart. High-level-of-difficulty, or simply high-level, interlocking puzzles are a subclass of interlocking puzzles that require multiple moves to take out the first subassembly from the puzzle. Solving a high-level interlocking puzzle is a challenging task since one has to explore many different configurations of the puzzle pieces until reaching a configuration where the first subassembly can be taken out. Designing a high-level interlocking puzzle with a user-specified level of difficulty is even harder since the puzzle pieces have to be interlocking in all the configurations before the first subassembly is taken out.

In this paper, we present a computational approach to design high-level interlocking puzzles. The core idea is to represent all possible configurations of an interlocking puzzle as well as transitions among these configurations using a rooted, undirected graph called a disassembly graph and leverage this graph to find a disassembly plan that requires a minimal number of moves to take out the first subassembly from the puzzle. At the design stage, our algorithm iteratively constructs the geometry of each puzzle piece to expand the disassembly graph incrementally, aiming to achieve a user-specified level of difficulty. We show that our approach allows efficient generation of high-level interlocking puzzles of various shape complexities, including new solutions not attainable by state-of-the-art approaches.},
  author       = {Chen, Rulin and Wang, Ziqi and Song, Peng and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Computational design of high-level interlocking puzzles}},
  doi          = {10.1145/3528223.3530071},
  volume       = {41},
  year         = {2022},
}

@unpublished{11943,
  abstract     = {Complex wiring between neurons underlies the information-processing network enabling all brain functions, including cognition and memory. For understanding how the network is structured, processes information, and changes over time, comprehensive visualization of the architecture of living brain tissue with its cellular and molecular components would open up major opportunities. However, electron microscopy (EM) provides nanometre-scale resolution required for full <jats:italic>in-silico</jats:italic> reconstruction<jats:sup>1–5</jats:sup>, yet is limited to fixed specimens and static representations. Light microscopy allows live observation, with super-resolution approaches<jats:sup>6–12</jats:sup> facilitating nanoscale visualization, but comprehensive 3D-reconstruction of living brain tissue has been hindered by tissue photo-burden, photobleaching, insufficient 3D-resolution, and inadequate signal-to-noise ratio (SNR). Here we demonstrate saturated reconstruction of living brain tissue. We developed an integrated imaging and analysis technology, adapting stimulated emission depletion (STED) microscopy<jats:sup>6,13</jats:sup> in extracellularly labelled tissue<jats:sup>14</jats:sup> for high SNR and near-isotropic resolution. Centrally, a two-stage deep-learning approach leveraged previously obtained information on sample structure to drastically reduce photo-burden and enable automated volumetric reconstruction down to single synapse level. Live reconstruction provides unbiased analysis of tissue architecture across time in relation to functional activity and targeted activation, and contextual understanding of molecular labelling. This adoptable technology will facilitate novel insights into the dynamic functional architecture of living brain tissue.},
  author       = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G. N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G},
  booktitle    = {bioRxiv},
  publisher    = {Cold Spring Harbor Laboratory},
  title        = {{Saturated reconstruction of living brain tissue}},
  doi          = {10.1101/2022.03.16.484431},
  year         = {2022},
}

@article{11993,
  abstract     = {Moulding refers to a set of manufacturing techniques in which a mould, usually a cavity or a solid frame, is used to shape a liquid or pliable material into an object of the desired shape. The popularity of moulding comes from its effectiveness, scalability and versatility in terms of employed materials. Its relevance as a fabrication process is demonstrated by the extensive literature covering different aspects related to mould design, from material flow simulation to the automation of mould geometry design. In this state-of-the-art report, we provide an extensive review of the automatic methods for the design of moulds, focusing on contributions from a geometric perspective. We classify existing mould design methods based on their computational approach and the nature of their target moulding process. We summarize the relationships between computational approaches and moulding techniques, highlighting their strengths and limitations. Finally, we discuss potential future research directions.},
  author       = {Alderighi, Thomas and Malomo, Luigi and Auzinger, Thomas and Bickel, Bernd and Cignoni, Paulo and Pietroni, Nico},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  keywords     = {Computer Graphics and Computer-Aided Design},
  number       = {6},
  pages        = {435--452},
  publisher    = {Wiley},
  title        = {{State of the art in computational mould design}},
  doi          = {10.1111/cgf.14581},
  volume       = {41},
  year         = {2022},
}

@inproceedings{12452,
  abstract     = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.},
  author       = {Rao, Pramod and B R, Mallikarjun and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed},
  booktitle    = {33rd British Machine Vision Conference},
  location     = {London, United Kingdom},
  publisher    = {British Machine Vision Association and Society for Pattern Recognition},
  title        = {{VoRF: Volumetric Relightable Faces}},
  year         = {2022},
}

@article{9241,
  abstract     = {Volumetric light transport is a pervasive physical phenomenon, and therefore its accurate simulation is important for a broad array of disciplines. While suitable mathematical models for computing the transport are now available, obtaining the necessary material parameters needed to drive such simulations is a challenging task: direct measurements of these parameters from material samples are seldom possible. Building on the inverse scattering paradigm, we present a novel measurement approach which indirectly infers the transport parameters from extrinsic observations of multiple-scattered radiance. The novelty of the proposed approach lies in replacing structured illumination with a structured reflector bonded to the sample, and a robust fitting procedure that largely compensates for potential systematic errors in the calibration of the setup. We show the feasibility of our approach by validating simulations of complex 3D compositions of the measured materials against physical prints, using photo-polymer resins. As presented in this paper, our technique yields colorspace data suitable for accurate appearance reproduction in the area of 3D printing. Beyond that, and without fundamental changes to the basic measurement methodology, it could equally well be used to obtain spectral measurements that are useful for other application areas.},
  author       = {Elek, Oskar and Zhang, Ran and Sumin, Denis and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Křivánek, Jaroslav and Weyrich, Tim},
  issn         = {1094-4087},
  journal      = {Optics Express},
  number       = {5},
  pages        = {7568--7588},
  publisher    = {The Optical Society},
  title        = {{Robust and practical measurement of volume transport parameters in solid photo-polymer materials for 3D printing}},
  doi          = {10.1364/OE.406095},
  volume       = {29},
  year         = {2021},
}

@article{9376,
  abstract     = {This paper presents a method for designing planar multistable compliant structures. Given a sequence of desired stable states and the corresponding poses of the structure, we identify the topology and geometric realization of a mechanism—consisting of bars and joints—that is able to physically reproduce the desired multistable behavior. In order to solve this problem efficiently, we build on insights from minimally rigid graph theory to identify simple but effective topologies for the mechanism. We then optimize its geometric parameters, such as joint positions and bar lengths, to obtain correct transitions between the given poses. Simultaneously, we ensure adequate stability of each pose based on an effective approximate error metric related to the elastic energy Hessian of the bars in the mechanism. As demonstrated by our results, we obtain functional multistable mechanisms of manageable complexity that can be fabricated using 3D printing. Further, we evaluated the effectiveness of our method on a large number of examples in the simulation and fabricated several physical prototypes.},
  author       = {Zhang, Ran and Auzinger, Thomas and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {multistability, mechanism, computational design, rigidity},
  number       = {5},
  publisher    = {Association for Computing Machinery},
  title        = {{Computational design of planar multistable compliant structures}},
  doi          = {10.1145/3453477},
  volume       = {40},
  year         = {2021},
}

@article{9408,
  abstract     = {We present a computational design system that assists users to model, optimize, and fabricate quad-robots with soft skins. Our system addresses the challenging task of predicting their physical behavior by fully integrating the multibody dynamics of the mechanical skeleton and the elastic behavior of the soft skin. The developed motion control strategy uses an alternating optimization scheme to avoid expensive full space time-optimization, interleaving space-time optimization for the skeleton, and frame-by-frame optimization for the full dynamics. The output are motor torques to drive the robot to achieve a user prescribed motion trajectory. We also provide a collection of convenient engineering tools and empirical manufacturing guidance to support the fabrication of the designed quad-robot. We validate the feasibility of designs generated with our system through physics simulations and with a physically-fabricated prototype.},
  author       = {Feng, Xudong and Liu, Jiafeng and Wang, Huamin and Yang, Yin and Bao, Hujun and Bickel, Bernd and Xu, Weiwei},
  issn         = {10772626},
  journal      = {IEEE Transactions on Visualization and Computer Graphics},
  number       = {6},
  publisher    = {IEEE},
  title        = {{Computational design of skinned Quad-Robots}},
  doi          = {10.1109/TVCG.2019.2957218},
  volume       = {27},
  year         = {2021},
}

@article{9547,
  abstract     = {With the wider availability of full-color 3D printers, color-accurate 3D-print preparation has received increased attention. A key challenge lies in the inherent translucency of commonly used print materials that blurs out details of the color texture. Previous work tries to compensate for these scattering effects through strategic assignment of colored primary materials to printer voxels. To date, the highest-quality approach uses iterative optimization that relies on computationally expensive Monte Carlo light transport simulation to predict the surface appearance from subsurface scattering within a given print material distribution; that optimization, however, takes in the order of days on a single machine. In our work, we dramatically speed up the process by replacing the light transport simulation with a data-driven approach. Leveraging a deep neural network to predict the scattering within a highly heterogeneous medium, our method performs around two orders of magnitude faster than Monte Carlo rendering while yielding optimization results of similar quality level. The network is based on an established method from atmospheric cloud rendering, adapted to our domain and extended by a physically motivated weight sharing scheme that substantially reduces the network size. We analyze its performance in an end-to-end print preparation pipeline and compare quality and runtime to alternative approaches, and demonstrate its generalization to unseen geometry and material values. This for the first time enables full heterogenous material optimization for 3D-print preparation within time frames in the order of the actual printing time.},
  author       = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexey and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Křivánek, Jaroslav},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  number       = {2},
  pages        = {205--219},
  publisher    = {Wiley},
  title        = {{Neural acceleration of scattering-aware color 3D printing}},
  doi          = {10.1111/cgf.142626},
  volume       = {40},
  year         = {2021},
}

@inproceedings{10148,
  abstract     = {Tactile feedback of an object’s surface enables us to discern its material properties and affordances. This understanding is used in digital fabrication processes by creating objects with high-resolution surface variations to influence a user’s tactile perception. As the design of such surface haptics commonly relies on knowledge from real-life experiences, it is unclear how to adapt this information for digital design methods. In this work, we investigate replicating the haptics of real materials. Using an existing process for capturing an object’s microgeometry, we digitize and reproduce the stable surface information of a set of 15 fabric samples. In a psychophysical experiment, we evaluate the tactile qualities of our set of original samples and their replicas. From our results, we see that direct reproduction of surface variations is able to influence different psychophysical dimensions of the tactile perception of surface textures. While the fabrication process did not preserve all properties, our approach underlines that replication of surface microgeometries benefits fabrication methods in terms of haptic perception by covering a large range of tactile variations. Moreover, by changing the surface structure of a single fabricated material, its material perception can be influenced. We conclude by proposing strategies for capturing and reproducing digitized textures to better resemble the perceived haptics of the originals.},
  author       = {Degraen, Donald and Piovarci, Michael and Bickel, Bernd and Kruger, Antonio},
  booktitle    = {34th Annual ACM Symposium},
  isbn         = {978-1-4503-8635-7},
  location     = {Virtual},
  pages        = {954--971},
  publisher    = {Association for Computing Machinery},
  title        = {{Capturing tactile properties of real surfaces for haptic reproduction}},
  doi          = {10.1145/3472749.3474798},
  year         = {2021},
}

