@inproceedings{10854,
  abstract     = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner?
To address this question, we define the batch dynamic CONGEST model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labelled graph under batch changes. We investigate, when a batch of alpha edge label changes arrive, - how much time as a function of alpha we need to update an existing solution, and - how much information the nodes have to keep in local memory between batches in order to update the solution quickly.
Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. The diverse time complexity of our model spans from constant time, through time polynomial in alpha, and to alpha time, which we show to be enough for any task.},
  author       = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan},
  booktitle    = {Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems},
  isbn         = {9781450380720},
  location     = {Virtual, Online},
  pages        = {71--72},
  publisher    = {Association for Computing Machinery},
  title        = {{Input-dynamic distributed algorithms for communication networks}},
  doi          = {10.1145/3410220.3453923},
  year         = {2021},
}

@article{10855,
  abstract     = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic \congest model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labeled graph under batch changes. We investigate, when a batch of α edge label changes arrive, \beginitemize \item how much time as a function of α we need to update an existing solution, and \item how much information the nodes have to keep in local memory between batches in order to update the solution quickly. \enditemize Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. In particular, we derive non-trivial upper bounds for two selected, contrasting problems: maintaining a minimum spanning tree and detecting cliques.},
  author       = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan},
  issn         = {2476-1249},
  journal      = {Proceedings of the ACM on Measurement and Analysis of Computing Systems},
  keywords     = {Computer Networks and Communications, Hardware and Architecture, Safety, Risk, Reliability and Quality, Computer Science (miscellaneous)},
  number       = {1},
  pages        = {1--33},
  publisher    = {Association for Computing Machinery},
  title        = {{Input-dynamic distributed algorithms for communication networks}},
  doi          = {10.1145/3447384},
  volume       = {5},
  year         = {2021},
}

@article{10856,
  abstract     = {We study the properties of the maximal volume k-dimensional sections of the n-dimensional cube [−1, 1]n. We obtain a first order necessary condition for a k-dimensional subspace to be a local maximizer of the volume of such sections, which we formulate in a geometric way. We estimate the length of the projection of a vector of the standard basis of Rn onto a k-dimensional subspace that maximizes the volume of the intersection. We nd the optimal upper bound on the volume of a planar section of the cube [−1, 1]n , n ≥ 2.},
  author       = {Ivanov, Grigory and Tsiutsiurupa, Igor},
  issn         = {2299-3274},
  journal      = {Analysis and Geometry in Metric Spaces},
  keywords     = {Applied Mathematics, Geometry and Topology, Analysis},
  number       = {1},
  pages        = {1--18},
  publisher    = {De Gruyter},
  title        = {{On the volume of sections of the cube}},
  doi          = {10.1515/agms-2020-0103},
  volume       = {9},
  year         = {2021},
}

@article{10858,
  abstract     = {The cost-effective conversion of low-grade heat into electricity using thermoelectric devices requires developing alternative materials and material processing technologies able to reduce the currently high device manufacturing costs. In this direction, thermoelectric materials that do not rely on rare or toxic elements such as tellurium or lead need to be produced using high-throughput technologies not involving high temperatures and long processes. Bi2Se3 is an obvious possible Te-free alternative to Bi2Te3 for ambient temperature thermoelectric applications, but its performance is still low for practical applications, and additional efforts toward finding proper dopants are required. Here, we report a scalable method to produce Bi2Se3 nanosheets at low synthesis temperatures. We studied the influence of different dopants on the thermoelectric properties of this material. Among the elements tested, we demonstrated that Sn doping resulted in the best performance. Sn incorporation resulted in a significant improvement to the Bi2Se3 Seebeck coefficient and a reduction in the thermal conductivity in the direction of the hot-press axis, resulting in an overall 60% improvement in the thermoelectric figure of merit of Bi2Se3.},
  author       = {Li, Mengyao and Zhang, Yu and Zhang, Ting and Zuo, Yong and Xiao, Ke and Arbiol, Jordi and Llorca, Jordi and Liu, Yu and Cabot, Andreu},
  issn         = {2079-4991},
  journal      = {Nanomaterials},
  keywords     = {General Materials Science, General Chemical Engineering},
  number       = {7},
  publisher    = {MDPI},
  title        = {{Enhanced thermoelectric performance of n-type Bi2Se3 nanosheets through Sn doping}},
  doi          = {10.3390/nano11071827},
  volume       = {11},
  year         = {2021},
}

@article{10860,
  abstract     = {A tight frame is the orthogonal projection of some orthonormal basis of Rn onto Rk. We show that a set of vectors is a tight frame if and only if the set of all cross products of these vectors is a tight frame. We reformulate a range of problems on the volume of projections (or sections) of regular polytopes in terms of tight frames and write a first-order necessary condition for local extrema of these problems. As applications, we prove new results for the problem of maximization of the volume of zonotopes.},
  author       = {Ivanov, Grigory},
  issn         = {1496-4287},
  journal      = {Canadian Mathematical Bulletin},
  keywords     = {General Mathematics, Tight frame, Grassmannian, zonotope},
  number       = {4},
  pages        = {942--963},
  publisher    = {Canadian Mathematical Society},
  title        = {{Tight frames and related geometric problems}},
  doi          = {10.4153/s000843952000096x},
  volume       = {64},
  year         = {2021},
}

@unpublished{10912,
  abstract     = {Brain dynamics display collective phenomena as diverse as neuronal oscillations and avalanches. Oscillations are rhythmic, with fluctuations occurring at a characteristic scale, whereas avalanches are scale-free cascades of neural activity. Here we show that such antithetic features can coexist in a very generic class of adaptive neural networks. In the most simple yet fully microscopic model from this class we make direct contact with human brain resting-state activity recordings via tractable inference of the model's two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor fluctuations, collective behaviors of nearly-synchronous extreme events on multiple sensors, to neuronal avalanches unfolding over multiple sensors across multiple time-bins. Importantly, the inferred parameters correlate with model-independent signatures of "closeness to criticality", suggesting that the coexistence of scale-specific (neural oscillations) and scale-free (neuronal avalanches) dynamics in brain activity occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.},
  author       = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele},
  pages        = {37},
  publisher    = {arXiv},
  title        = {{Quantifying the coexistence of neuronal oscillations and avalanches}},
  doi          = {10.48550/ARXIV.2108.06686},
  year         = {2021},
}

@inproceedings{11436,
  abstract     = {Asynchronous distributed algorithms are a popular way to reduce synchronization costs in large-scale optimization, and in particular for neural network training. However, for nonsmooth and nonconvex objectives, few convergence guarantees exist beyond cases where closed-form proximal operator solutions are available. As training most popular deep neural networks corresponds to optimizing nonsmooth and nonconvex objectives, there is a pressing need for such convergence guarantees. In this paper, we analyze for the first time the convergence of stochastic asynchronous optimization for this general class of objectives. In particular, we focus on stochastic subgradient methods allowing for block variable partitioning, where the shared model is asynchronously updated by concurrent processes. To this end, we use a probabilistic model which captures key features of real asynchronous scheduling between concurrent processes. Under this model, we establish convergence with probability one to an invariant set for stochastic subgradient methods with momentum. From a practical perspective, one issue with the family of algorithms that we consider is that they are not efficiently supported by machine learning frameworks, which mostly focus on distributed data-parallel strategies. To address this, we propose a new implementation strategy for shared-memory based training of deep neural networks for a partitioned but shared model in single- and multi-GPU settings. Based on this implementation, we achieve on average1.2x speed-up in comparison to state-of-the-art training methods for popular image classification tasks, without compromising accuracy.},
  author       = {Kungurtsev, Vyacheslav and Egan, Malcolm and Chatterjee, Bapi and Alistarh, Dan-Adrian},
  booktitle    = {35th AAAI Conference on Artificial Intelligence, AAAI 2021},
  isbn         = {9781713835974},
  issn         = {2374-3468},
  location     = {Virtual, Online},
  number       = {9B},
  pages        = {8209--8216},
  publisher    = {AAAI Press},
  title        = {{Asynchronous optimization methods for efficient training of deep neural networks with guarantees}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{11452,
  abstract     = {We study efficient distributed algorithms for the fundamental problem of principal component analysis and leading eigenvector computation on the sphere, when the data are randomly distributed among a set of computational nodes. We propose a new quantized variant of Riemannian gradient descent to solve this problem, and prove that the algorithm converges with high probability under a set of necessary spherical-convexity properties. We give bounds on the number of bits transmitted by the algorithm under common initialization schemes, and investigate the dependency on the problem dimension in each case.},
  author       = {Alimisis, Foivos and Davies, Peter and Vandereycken, Bart and Alistarh, Dan-Adrian},
  booktitle    = {Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {2823--2834},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Distributed principal component analysis with limited communication}},
  volume       = {4},
  year         = {2021},
}

@inproceedings{11453,
  abstract     = {Neuronal computations depend on synaptic connectivity and intrinsic electrophysiological properties. Synaptic connectivity determines which inputs from presynaptic neurons are integrated, while cellular properties determine how inputs are filtered over time. Unlike their biological counterparts, most computational approaches to learning in simulated neural networks are limited to changes in synaptic connectivity. However, if intrinsic parameters change, neural computations are altered drastically. Here, we include the parameters that determine the intrinsic properties,
e.g., time constants and reset potential, into the learning paradigm. Using sparse feedback signals that indicate target spike times, and gradient-based parameter updates, we show that the intrinsic parameters can be learned along with the synaptic weights to produce specific input-output functions. Specifically, we use a teacher-student paradigm in which a randomly initialised leaky integrate-and-fire or resonate-and-fire neuron must recover the parameters of a teacher neuron. We show that complex temporal functions can be learned online and without backpropagation through time, relying on event-based updates only. Our results are a step towards online learning of neural computations from ungraded and unsigned sparse feedback signals with a biologically inspired learning mechanism.},
  author       = {Braun, Lukas and Vogels, Tim P},
  booktitle    = {Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {16437--16450},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Online learning of neural computations from sparse temporal feedback}},
  volume       = {20},
  year         = {2021},
}

@inproceedings{11458,
  abstract     = {The increasing computational requirements of deep neural networks (DNNs) have led to significant interest in obtaining DNN models that are sparse, yet accurate. Recent work has investigated the even harder case of sparse training, where the DNN weights are, for as much as possible, already sparse to reduce computational costs during training. Existing sparse training methods are often empirical and can have lower accuracy relative to the dense baseline. In this paper, we present a general approach called Alternating Compressed/DeCompressed (AC/DC) training of DNNs, demonstrate convergence for a variant of the algorithm, and show that AC/DC outperforms existing sparse training methods in accuracy at similar computational budgets; at high sparsity levels, AC/DC even outperforms existing methods that rely on accurate pre-trained dense models. An important property of AC/DC is that it allows co-training of dense and sparse models, yielding accurate sparse–dense model pairs at the end of the training process. This is useful in practice, where compressed variants may be desirable for deployment in resource-constrained settings without re-doing the entire training flow, and also provides us with insights into the accuracy gap between dense and compressed models. The code is available at: https://github.com/IST-DASLab/ACDC.},
  author       = {Peste, Elena-Alexandra and Iofinova, Eugenia B and Vladu, Adrian and Alistarh, Dan-Adrian},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {8557--8570},
  publisher    = {Curran Associates},
  title        = {{AC/DC: Alternating Compressed/DeCompressed training of deep neural networks}},
  volume       = {34},
  year         = {2021},
}

@inproceedings{11463,
  abstract     = {Efficiently approximating local curvature information of the loss function is a key tool for optimization and compression of deep neural networks. Yet, most existing methods to approximate second-order information have high computational
or storage costs, which limits their practicality. In this work, we investigate matrix-free, linear-time approaches for estimating Inverse-Hessian Vector Products (IHVPs) for the case when the Hessian can be approximated as a sum of rank-one matrices, as in the classic approximation of the Hessian by the empirical Fisher matrix. We propose two new algorithms: the first is tailored towards network compression and can compute the IHVP for dimension d, if the Hessian is given as a sum of m rank-one matrices, using O(dm2) precomputation, O(dm) cost for computing the IHVP, and query cost O(m) for any single element of the inverse Hessian. The second algorithm targets an optimization setting, where we wish to compute the product between the inverse Hessian, estimated over a sliding window of optimization steps, and a given gradient direction, as required for preconditioned SGD. We give an algorithm with cost O(dm + m2) for computing the IHVP and O(dm + m3) for adding or removing any gradient from the sliding window. These
two algorithms yield state-of-the-art results for network pruning and optimization with lower computational overhead relative to existing second-order methods. Implementations are available at [9] and [17].},
  author       = {Frantar, Elias and Kurtic, Eldar and Alistarh, Dan-Adrian},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {14873--14886},
  publisher    = {Curran Associates},
  title        = {{M-FAC: Efficient matrix-free approximations of second-order information}},
  volume       = {34},
  year         = {2021},
}

@inproceedings{11464,
  abstract     = {We consider a standard distributed optimisation setting where N machines, each holding a d-dimensional function
fi, aim to jointly minimise the sum of the functions ∑Ni=1fi(x). This problem arises naturally in large-scale distributed optimisation, where a standard solution is to apply variants of (stochastic) gradient descent. We focus on the communication complexity of this problem: our main result provides the first fully unconditional bounds on total number of bits which need to be sent and received by the N machines to solve this problem under point-to-point communication, within a given error-tolerance. Specifically, we show that Ω(Ndlogd/Nε) total bits need to be communicated between the machines to find an additive ϵ-approximation to the minimum of ∑Ni=1fi(x). The result holds for both deterministic and randomised algorithms, and, importantly, requires no assumptions on the algorithm structure. The lower bound is tight under certain restrictions on parameter values, and is matched within constant factors for quadratic objectives by a new variant of quantised gradient descent, which we describe and analyse. Our results bring over tools from communication complexity to distributed optimisation, which has potential for further applications.},
  author       = {Alistarh, Dan-Adrian and Korhonen, Janne},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {7254--7266},
  publisher    = {Curran Associates},
  title        = {{Towards tight communication lower bounds for distributed optimisation}},
  volume       = {34},
  year         = {2021},
}

@article{7883,
  abstract     = {All vertebrates have a spinal cord with dimensions and shape specific to their species. Yet how species‐specific organ size and shape are achieved is a fundamental unresolved question in biology. The formation and sculpting of organs begins during embryonic development. As it develops, the spinal cord extends in anterior–posterior direction in synchrony with the overall growth of the body. The dorsoventral (DV) and apicobasal lengths of the spinal cord neuroepithelium also change, while at the same time a characteristic pattern of neural progenitor subtypes along the DV axis is established and elaborated. At the basis of these changes in tissue size and shape are biophysical determinants, such as the change in cell number, cell size and shape, and anisotropic tissue growth. These processes are controlled by global tissue‐scale regulators, such as morphogen signaling gradients as well as mechanical forces. Current challenges in the field are to uncover how these tissue‐scale regulatory mechanisms are translated to the cellular and molecular level, and how regulation of distinct cellular processes gives rise to an overall defined size. Addressing these questions will help not only to achieve a better understanding of how size is controlled, but also of how tissue size is coordinated with the specification of pattern.},
  author       = {Kuzmicz-Kowalska, Katarzyna and Kicheva, Anna},
  issn         = {17597692},
  journal      = {Wiley Interdisciplinary Reviews: Developmental Biology},
  publisher    = {Wiley},
  title        = {{Regulation of size and scale in vertebrate spinal cord development}},
  doi          = {10.1002/wdev.383},
  year         = {2021},
}

@article{7900,
  abstract     = {Hartree–Fock theory has been justified as a mean-field approximation for fermionic systems. However, it suffers from some defects in predicting physical properties, making necessary a theory of quantum correlations. Recently, bosonization of many-body correlations has been rigorously justified as an upper bound on the correlation energy at high density with weak interactions. We review the bosonic approximation, deriving an effective Hamiltonian. We then show that for systems with Coulomb interaction this effective theory predicts collective excitations (plasmons) in accordance with the random phase approximation of Bohm and Pines, and with experimental observation.},
  author       = {Benedikter, Niels P},
  issn         = {1793-6659},
  journal      = {Reviews in Mathematical Physics},
  number       = {1},
  publisher    = {World Scientific},
  title        = {{Bosonic collective excitations in Fermi gases}},
  doi          = {10.1142/s0129055x20600090},
  volume       = {33},
  year         = {2021},
}

@article{7901,
  abstract     = {We derive rigorously the leading order of the correlation energy of a Fermi gas in a scaling regime of high density and weak interaction. The result verifies the prediction of the random-phase approximation. Our proof refines the method of collective bosonization in three dimensions. We approximately diagonalize an effective Hamiltonian describing approximately bosonic collective excitations around the Hartree–Fock state, while showing that gapless and non-collective excitations have only a negligible effect on the ground state energy.},
  author       = {Benedikter, Niels P and Nam, Phan Thành and Porta, Marcello and Schlein, Benjamin and Seiringer, Robert},
  issn         = {1432-1297},
  journal      = {Inventiones Mathematicae},
  pages        = {885--979},
  publisher    = {Springer},
  title        = {{Correlation energy of a weakly interacting Fermi gas}},
  doi          = {10.1007/s00222-021-01041-5},
  volume       = {225},
  year         = {2021},
}

@article{7905,
  abstract     = {We investigate a sheaf-theoretic interpretation of stratification learning from geometric and topological perspectives. Our main result is the construction of stratification learning algorithms framed in terms of a sheaf on a partially ordered set with the Alexandroff topology. We prove that the resulting decomposition is the unique minimal stratification for which the strata are homogeneous and the given sheaf is constructible. In particular, when we choose to work with the local homology sheaf, our algorithm gives an alternative to the local homology transfer algorithm given in Bendich et al. (Proceedings of the 23rd Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 1355–1370, ACM, New York, 2012), and the cohomology stratification algorithm given in Nanda (Found. Comput. Math. 20(2), 195–222, 2020). Additionally, we give examples of stratifications based on the geometric techniques of Breiding et al. (Rev. Mat. Complut. 31(3), 545–593, 2018), illustrating how the sheaf-theoretic approach can be used to study stratifications from both topological and geometric perspectives. This approach also points toward future applications of sheaf theory in the study of topological data analysis by illustrating the utility of the language of sheaf theory in generalizing existing algorithms.},
  author       = {Brown, Adam and Wang, Bei},
  issn         = {1432-0444},
  journal      = {Discrete and Computational Geometry},
  pages        = {1166--1198},
  publisher    = {Springer Nature},
  title        = {{Sheaf-theoretic stratification learning from geometric and topological perspectives}},
  doi          = {10.1007/s00454-020-00206-y},
  volume       = {65},
  year         = {2021},
}

@article{7925,
  abstract     = {In this paper, we introduce a relaxed CQ method with alternated inertial step for solving split feasibility problems. We give convergence of the sequence generated by our method under some suitable assumptions. Some numerical implementations from sparse signal and image deblurring are reported to show the efficiency of our method.},
  author       = {Shehu, Yekini and Gibali, Aviv},
  issn         = {1862-4480},
  journal      = {Optimization Letters},
  pages        = {2109--2126},
  publisher    = {Springer Nature},
  title        = {{New inertial relaxed method for solving split feasibilities}},
  doi          = {10.1007/s11590-020-01603-1},
  volume       = {15},
  year         = {2021},
}

@article{7939,
  abstract     = {We design fast deterministic algorithms for distance computation in the Congested Clique model. Our key contributions include:
    A (2+ϵ)-approximation for all-pairs shortest paths in O(log2n/ϵ) rounds on unweighted undirected graphs. With a small additional additive factor, this also applies for weighted graphs. This is the first sub-polynomial constant-factor approximation for APSP in this model.
    A (1+ϵ)-approximation for multi-source shortest paths from O(n−−√) sources in O(log2n/ϵ) rounds on weighted undirected graphs. This is the first sub-polynomial algorithm obtaining this approximation for a set of sources of polynomial size.

Our main techniques are new distance tools that are obtained via improved algorithms for sparse matrix multiplication, which we leverage to construct efficient hopsets and shortest paths. Furthermore, our techniques extend to additional distance problems for which we improve upon the state-of-the-art, including diameter approximation, and an exact single-source shortest paths algorithm for weighted undirected graphs in O~(n1/6) rounds. },
  author       = {Censor-Hillel, Keren and Dory, Michal and Korhonen, Janne and Leitersdorf, Dean},
  issn         = {1432-0452},
  journal      = {Distributed Computing},
  pages        = {463--487},
  publisher    = {Springer Nature},
  title        = {{Fast approximate shortest paths in the congested clique}},
  doi          = {10.1007/s00446-020-00380-5},
  volume       = {34},
  year         = {2021},
}

@inbook{7941,
  abstract     = {Expansion microscopy is a recently developed super-resolution imaging technique, which provides an alternative to optics-based methods such as deterministic approaches (e.g. STED) or stochastic approaches (e.g. PALM/STORM). The idea behind expansion microscopy is to embed the biological sample in a swellable gel, and then to expand it isotropically, thereby increasing the distance between the fluorophores. This approach breaks the diffraction barrier by simply separating the emission point-spread-functions of the fluorophores. The resolution attainable in expansion microscopy is thus directly dependent on the separation that can be achieved, i.e. on the expansion factor. The original implementation of the technique achieved an expansion factor of fourfold, for a resolution of 70–80 nm. The subsequently developed X10 method achieves an expansion factor of 10-fold, for a resolution of 25–30 nm. This technique can be implemented with minimal technical requirements on any standard fluorescence microscope, and is more easily applied for multi-color imaging than either deterministic or stochastic super-resolution approaches. This renders X10 expansion microscopy a highly promising tool for new biological discoveries, as discussed here, and as demonstrated by several recent applications.},
  author       = {Truckenbrodt, Sven M and Rizzoli, Silvio O.},
  booktitle    = {Methods in Cell Biology},
  isbn         = {978012820807-6},
  issn         = {0091-679X},
  pages        = {33--56},
  publisher    = {Elsevier},
  title        = {{Simple multi-color super-resolution by X10 microscopy}},
  doi          = {10.1016/bs.mcb.2020.04.016},
  volume       = {161},
  year         = {2021},
}

@article{8196,
  abstract     = {This paper aims to obtain a strong convergence result for a Douglas–Rachford splitting method with inertial extrapolation step for finding a zero of the sum of two set-valued maximal monotone operators without any further assumption of uniform monotonicity on any of the involved maximal monotone operators. Furthermore, our proposed method is easy to implement and the inertial factor in our proposed method is a natural choice. Our method of proof is of independent interest. Finally, some numerical implementations are given to confirm the theoretical analysis.},
  author       = {Shehu, Yekini and Dong, Qiao-Li and Liu, Lu-Lu and Yao, Jen-Chih},
  issn         = {1573-2924},
  journal      = {Optimization and Engineering},
  pages        = {2627--2653},
  publisher    = {Springer Nature},
  title        = {{New strong convergence method for the sum of two maximal monotone operators}},
  doi          = {10.1007/s11081-020-09544-5},
  volume       = {22},
  year         = {2021},
}

