@article{9818,
  abstract     = {Triangle mesh-based simulations are able to produce satisfying animations of knitted and woven cloth; however, they lack the rich geometric detail of yarn-level simulations. Naive texturing approaches do not consider yarn-level physics, while full yarn-level simulations may become prohibitively expensive for large garments. We propose a method to animate yarn-level cloth geometry on top of an underlying deforming mesh in a mechanics-aware fashion. Using triangle strains to interpolate precomputed yarn geometry, we are able to reproduce effects such as knit loops tightening under stretching. In combination with precomputed mesh animation or real-time mesh simulation, our method is able to animate yarn-level cloth in real-time at large scales.},
  author       = {Sperl, Georg and Narain, Rahul and Wojtan, Christopher J},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Mechanics-aware deformation of yarn pattern geometry}},
  doi          = {10.1145/3450626.3459816},
  volume       = {40},
  year         = {2021},
}

@article{9819,
  abstract     = {Photorealistic editing of head portraits is a challenging task as humans are very sensitive to inconsistencies in faces. We present an approach for high-quality intuitive editing of the camera viewpoint and scene illumination (parameterised with an environment map) in a portrait image. This requires our method to capture and control the full reflectance field of the person in the image. Most editing approaches rely on supervised learning using training data captured with setups such as light and camera stages. Such datasets are expensive to acquire, not readily available and do not capture all the rich variations of in-the-wild portrait images. In addition, most supervised approaches only focus on relighting, and do not allow camera viewpoint editing. Thus, they only capture and control a subset of the reflectance field. Recently, portrait editing has been demonstrated by operating in the generative model space of StyleGAN. While such approaches do not require direct supervision, there is a significant loss of quality when compared to the supervised approaches. In this paper, we present a method which learns from limited supervised training data. The training images only include people in a fixed neutral expression with eyes closed, without much hair or background variations. Each person is captured under 150 one-light-at-a-time conditions and under 8 camera poses. Instead of training directly in the image space, we design a supervised problem which learns transformations in the latent space of StyleGAN. This combines the best of supervised learning and generative adversarial modeling. We show that the StyleGAN prior allows for generalisation to different expressions, hairstyles and backgrounds. This produces high-quality photorealistic results for in-the-wild images and significantly outperforms existing methods. Our approach can edit the illumination and pose simultaneously, and runs at interactive rates.},
  author       = {Mallikarjun, B. R. and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed A. and Theobalt, Christian},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{PhotoApp: Photorealistic appearance editing of head portraits}},
  doi          = {10.1145/3450626.3459765},
  volume       = {40},
  year         = {2021},
}

@article{9820,
  abstract     = {Material appearance hinges on material reflectance properties but also surface geometry and illumination. The unlimited number of potential combinations between these factors makes understanding and predicting material appearance a very challenging task. In this work, we collect a large-scale dataset of perceptual ratings of appearance attributes with more than 215,680 responses for 42,120 distinct combinations of material, shape, and illumination. The goal of this dataset is twofold. First, we analyze for the first time the effects of illumination and geometry in material perception across such a large collection of varied appearances. We connect our findings to those of the literature, discussing how previous knowledge generalizes across very diverse materials, shapes, and illuminations. Second, we use the collected dataset to train a deep learning architecture for predicting perceptual attributes that correlate with human judgments. We demonstrate the consistent and robust behavior of our predictor in various challenging scenarios, which, for the first time, enables estimating perceived material attributes from general 2D images. Since our predictor relies on the final appearance in an image, it can compare appearance properties across different geometries and illumination conditions. Finally, we demonstrate several applications that use our predictor, including appearance reproduction using 3D printing, BRDF editing by integrating our predictor in a differentiable renderer, illumination design, or material recommendations for scene design.},
  author       = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{The effect of shape and illumination on material perception: Model and applications}},
  doi          = {10.1145/3450626.3459813},
  volume       = {40},
  year         = {2021},
}

@article{8384,
  abstract     = {Previous research on animations of soap bubbles, films, and foams largely focuses on the motion and geometric shape of the bubble surface. These works neglect the evolution of the bubble’s thickness, which is normally responsible for visual phenomena like surface vortices, Newton’s interference patterns, capillary waves, and deformation-dependent rupturing of films in a foam. In this paper, we model these natural phenomena by introducing the film thickness as a reduced degree of freedom in the Navier-Stokes equations and deriving their equations of motion. We discretize the equations on a nonmanifold triangle mesh surface and couple it to an existing bubble solver. In doing so, we also introduce an incompressible fluid solver for 2.5D films and a novel advection algorithm for convecting fields across non-manifold surface junctions. Our simulations enhance state-of-the-art bubble solvers with additional effects caused by convection, rippling, draining, and evaporation of the thin film.},
  author       = {Ishida, Sadashige and Synak, Peter and Narita, Fumiya and Hachisuka, Toshiya and Wojtan, Christopher J},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{A model for soap film dynamics with evolving thickness}},
  doi          = {10.1145/3386569.3392405},
  volume       = {39},
  year         = {2020},
}

@article{8385,
  abstract     = {We present a method for animating yarn-level cloth effects using a thin-shell solver. We accomplish this through numerical homogenization: we first use a large number of yarn-level simulations to build a model of the potential energy density of the cloth, and then use this energy density function to compute forces in a thin shell simulator. We model several yarn-based materials, including both woven and knitted fabrics. Our model faithfully reproduces expected effects like the stiffness of woven fabrics, and the highly deformable nature and anisotropy of knitted fabrics. Our approach does not require any real-world experiments nor measurements; because the method is based entirely on simulations, it can generate entirely new material models quickly, without the need for testing apparatuses or human intervention. We provide data-driven models of several woven and knitted fabrics, which can be used for efficient simulation with an off-the-shelf cloth solver.},
  author       = {Sperl, Georg and Narain, Rahul and Wojtan, Christopher J},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Homogenized yarn-level cloth}},
  doi          = {10.1145/3386569.3392412},
  volume       = {39},
  year         = {2020},
}

@article{8535,
  abstract     = {We propose a method to enhance the visual detail of a water surface simulation. Our method works as a post-processing step which takes a simulation as input and increases its apparent resolution by simulating many detailed Lagrangian water waves on top of it. We extend linear water wave theory to work in non-planar domains which deform over time, and we discretize the theory using Lagrangian wave packets attached to spline curves. The method is numerically stable and trivially parallelizable, and it produces high frequency ripples with dispersive wave-like behaviors customized to the underlying fluid simulation.},
  author       = {Skrivan, Tomas and Soderstrom, Andreas and Johansson, John and Sprenger, Christoph and Museth, Ken and Wojtan, Christopher J},
  issn         = {15577368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Wave curves: Simulating Lagrangian water waves on dynamically deforming surfaces}},
  doi          = {10.1145/3386569.3392466},
  volume       = {39},
  year         = {2020},
}

