@article{13129,
  abstract     = {We study the representative volume element (RVE) method, which is a method to approximately infer the effective behavior ahom of a stationary random medium. The latter is described by a coefficient field a(x) generated from a given ensemble ⟨⋅⟩ and the corresponding linear elliptic operator −∇⋅a∇. In line with the theory of homogenization, the method proceeds by computing d=3 correctors (d denoting the space dimension). To be numerically tractable, this computation has to be done on a finite domain: the so-called representative volume element, i.e., a large box with, say, periodic boundary conditions. The main message of this article is: Periodize the ensemble instead of its realizations. By this, we mean that it is better to sample from a suitably periodized ensemble than to periodically extend the restriction of a realization a(x) from the whole-space ensemble ⟨⋅⟩. We make this point by investigating the bias (or systematic error), i.e., the difference between ahom and the expected value of the RVE method, in terms of its scaling w.r.t. the lateral size L of the box. In case of periodizing a(x), we heuristically argue that this error is generically O(L−1). In case of a suitable periodization of ⟨⋅⟩
, we rigorously show that it is O(L−d). In fact, we give a characterization of the leading-order error term for both strategies and argue that even in the isotropic case it is generically non-degenerate. We carry out the rigorous analysis in the convenient setting of ensembles ⟨⋅⟩
 of Gaussian type, which allow for a straightforward periodization, passing via the (integrable) covariance function. This setting has also the advantage of making the Price theorem and the Malliavin calculus available for optimal stochastic estimates of correctors. We actually need control of second-order correctors to capture the leading-order error term. This is due to inversion symmetry when applying the two-scale expansion to the Green function. As a bonus, we present a stream-lined strategy to estimate the error in a higher-order two-scale expansion of the Green function.},
  author       = {Clozeau, Nicolas and Josien, Marc and Otto, Felix and Xu, Qiang},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics},
  publisher    = {Springer Nature},
  title        = {{Bias in the representative volume element method: Periodize the ensemble instead of its realizations}},
  doi          = {10.1007/s10208-023-09613-y},
  year         = {2023},
}

@article{9649,
  abstract     = {Isomanifolds are the generalization of isosurfaces to arbitrary dimension and codimension, i.e. manifolds defined as the zero set of some multivariate vector-valued smooth function f : Rd → Rd−n. A natural (and efficient) way to approximate an isomanifold is to consider its Piecewise-Linear (PL) approximation based on a triangulation T of the ambient space Rd. In this paper, we give conditions under which the PL-approximation of an isomanifold is topologically equivalent to the isomanifold. The conditions are easy to satisfy in the sense that they can always be met by taking a sufficiently
fine triangulation T . This contrasts with previous results on the triangulation of manifolds where, in arbitrary dimensions, delicate perturbations are needed to guarantee topological correctness, which leads to strong limitations in practice. We further give a bound on the Fréchet distance between the original isomanifold and its PL-approximation. Finally we show analogous results for the PL-approximation of an isomanifold with boundary.},
  author       = {Boissonnat, Jean-Daniel and Wintraecken, Mathijs},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics },
  pages        = {967--1012},
  publisher    = {Springer Nature},
  title        = {{The topological correctness of PL approximations of isomanifolds}},
  doi          = {10.1007/s10208-021-09520-0},
  volume       = {22},
  year         = {2022},
}

@article{10211,
  abstract     = {We study the problem of recovering an unknown signal 𝑥𝑥 given measurements obtained from a generalized linear model with a Gaussian sensing matrix. Two popular solutions are based on a linear estimator 𝑥𝑥^L and a spectral estimator 𝑥𝑥^s. The former is a data-dependent linear combination of the columns of the measurement matrix, and its analysis is quite simple. The latter is the principal eigenvector of a data-dependent matrix, and a recent line of work has studied its performance. In this paper, we show how to optimally combine 𝑥𝑥^L and 𝑥𝑥^s. At the heart of our analysis is the exact characterization of the empirical joint distribution of (𝑥𝑥,𝑥𝑥^L,𝑥𝑥^s) in the high-dimensional limit. This allows us to compute the Bayes-optimal combination of 𝑥𝑥^L and 𝑥𝑥^s, given the limiting distribution of the signal 𝑥𝑥. When the distribution of the signal is Gaussian, then the Bayes-optimal combination has the form 𝜃𝑥𝑥^L+𝑥𝑥^s and we derive the optimal combination coefficient. In order to establish the limiting distribution of (𝑥𝑥,𝑥𝑥^L,𝑥𝑥^s), we design and analyze an approximate message passing algorithm whose iterates give 𝑥𝑥^L and approach 𝑥𝑥^s. Numerical simulations demonstrate the improvement of the proposed combination with respect to the two methods considered separately.},
  author       = {Mondelli, Marco and Thrampoulidis, Christos and Venkataramanan, Ramji},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics},
  keywords     = {Applied Mathematics, Computational Theory and Mathematics, Computational Mathematics, Analysis},
  publisher    = {Springer},
  title        = {{Optimal combination of linear and spectral estimators for generalized linear models}},
  doi          = {10.1007/s10208-021-09531-x},
  year         = {2021},
}

@article{6662,
  abstract     = {In phase retrieval, we want to recover an unknown signal 𝑥∈ℂ𝑑 from n quadratic measurements of the form 𝑦𝑖=|⟨𝑎𝑖,𝑥⟩|2+𝑤𝑖, where 𝑎𝑖∈ℂ𝑑 are known sensing vectors and 𝑤𝑖 is measurement noise. We ask the following weak recovery question: What is the minimum number of measurements n needed to produce an estimator 𝑥^(𝑦) that is positively correlated with the signal 𝑥? We consider the case of Gaussian vectors 𝑎𝑎𝑖. We prove that—in the high-dimensional limit—a sharp phase transition takes place, and we locate the threshold in the regime of vanishingly small noise. For 𝑛≤𝑑−𝑜(𝑑), no estimator can do significantly better than random and achieve a strictly positive correlation. For 𝑛≥𝑑+𝑜(𝑑), a simple spectral estimator achieves a positive correlation. Surprisingly, numerical simulations with the same spectral estimator demonstrate promising performance with realistic sensing matrices. Spectral methods are used to initialize non-convex optimization algorithms in phase retrieval, and our approach can boost the performance in this setting as well. Our impossibility result is based on classical information-theoretic arguments. The spectral algorithm computes the leading eigenvector of a weighted empirical covariance matrix. We obtain a sharp characterization of the spectral properties of this random matrix using tools from free probability and generalizing a recent result by Lu and Li. Both the upper bound and lower bound generalize beyond phase retrieval to measurements 𝑦𝑖 produced according to a generalized linear model. As a by-product of our analysis, we compare the threshold of the proposed spectral method with that of a message passing algorithm.},
  author       = {Mondelli, Marco and Montanari, Andrea},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics},
  number       = {3},
  pages        = {703--773},
  publisher    = {Springer},
  title        = {{Fundamental limits of weak recovery with applications to phase retrieval}},
  doi          = {10.1007/s10208-018-9395-y},
  volume       = {19},
  year         = {2019},
}

