@article{7577,
  abstract     = {Weak convergence of inertial iterative method for solving variational inequalities is the focus of this paper. The cost function is assumed to be non-Lipschitz and monotone. We propose a projection-type method with inertial terms and give weak convergence analysis under appropriate conditions. Some test results are performed and compared with relevant methods in the literature to show the efficiency and advantages given by our proposed methods.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S.},
  issn         = {1563-504X},
  journal      = {Applicable Analysis},
  number       = {1},
  pages        = {192--216},
  publisher    = {Taylor & Francis},
  title        = {{Weak convergence for variational inequalities with inertial-type method}},
  doi          = {10.1080/00036811.2020.1736287},
  volume       = {101},
  year         = {2022},
}

@article{7925,
  abstract     = {In this paper, we introduce a relaxed CQ method with alternated inertial step for solving split feasibility problems. We give convergence of the sequence generated by our method under some suitable assumptions. Some numerical implementations from sparse signal and image deblurring are reported to show the efficiency of our method.},
  author       = {Shehu, Yekini and Gibali, Aviv},
  issn         = {1862-4480},
  journal      = {Optimization Letters},
  pages        = {2109--2126},
  publisher    = {Springer Nature},
  title        = {{New inertial relaxed method for solving split feasibilities}},
  doi          = {10.1007/s11590-020-01603-1},
  volume       = {15},
  year         = {2021},
}

@article{8196,
  abstract     = {This paper aims to obtain a strong convergence result for a Douglas–Rachford splitting method with inertial extrapolation step for finding a zero of the sum of two set-valued maximal monotone operators without any further assumption of uniform monotonicity on any of the involved maximal monotone operators. Furthermore, our proposed method is easy to implement and the inertial factor in our proposed method is a natural choice. Our method of proof is of independent interest. Finally, some numerical implementations are given to confirm the theoretical analysis.},
  author       = {Shehu, Yekini and Dong, Qiao-Li and Liu, Lu-Lu and Yao, Jen-Chih},
  issn         = {1573-2924},
  journal      = {Optimization and Engineering},
  pages        = {2627--2653},
  publisher    = {Springer Nature},
  title        = {{New strong convergence method for the sum of two maximal monotone operators}},
  doi          = {10.1007/s11081-020-09544-5},
  volume       = {22},
  year         = {2021},
}

@article{8817,
  abstract     = {The paper introduces an inertial extragradient subgradient method with self-adaptive step sizes for solving equilibrium problems in real Hilbert spaces. Weak convergence of the proposed method is obtained under the condition that the bifunction is pseudomonotone and Lipchitz continuous. Linear convergence is also given when the bifunction is strongly pseudomonotone and Lipchitz continuous. Numerical implementations and comparisons with other related inertial methods are given using test problems including a real-world application to Nash–Cournot oligopolistic electricity market equilibrium model.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S. and Thong, Duong Viet and Van, Nguyen Thi Cam},
  issn         = {1432-5217},
  journal      = {Mathematical Methods of Operations Research},
  number       = {2},
  pages        = {213--242},
  publisher    = {Springer Nature},
  title        = {{An inertial subgradient extragradient algorithm extended to pseudomonotone equilibrium problems}},
  doi          = {10.1007/s00186-020-00730-w},
  volume       = {93},
  year         = {2021},
}

@article{9234,
  abstract     = {In this paper, we present two new inertial projection-type methods for solving multivalued variational inequality problems in finite-dimensional spaces. We establish the convergence of the sequence generated by these methods when the multivalued mapping associated with the problem is only required to be locally bounded without any monotonicity assumption. Furthermore, the inertial techniques that we employ in this paper are quite different from the ones used in most papers. Moreover, based on the weaker assumptions on the inertial factor in our methods, we derive several special cases of our methods. Finally, we present some experimental results to illustrate the profits that we gain by introducing the inertial extrapolation steps.},
  author       = {Izuchukwu, Chinedu and Shehu, Yekini},
  issn         = {1572-9427},
  journal      = {Networks and Spatial Economics},
  keywords     = {Computer Networks and Communications, Software, Artificial Intelligence},
  number       = {2},
  pages        = {291--323},
  publisher    = {Springer Nature},
  title        = {{New inertial projection methods for solving multivalued variational inequality problems beyond monotonicity}},
  doi          = {10.1007/s11067-021-09517-w},
  volume       = {21},
  year         = {2021},
}

@article{9365,
  abstract     = {In this paper, we propose a new iterative method with alternated inertial step for solving split common null point problem in real Hilbert spaces. We obtain weak convergence of the proposed iterative algorithm. Furthermore, we introduce the notion of bounded linear regularity property for the split common null point problem and obtain the linear convergence property for the new algorithm under some mild assumptions. Finally, we provide some numerical examples to demonstrate the performance and efficiency of the proposed method.},
  author       = {Ogbuisi, Ferdinard U. and Shehu, Yekini and Yao, Jen Chih},
  issn         = {1029-4945},
  journal      = {Optimization},
  publisher    = {Taylor and Francis},
  title        = {{Convergence analysis of new inertial method for the split common null point problem}},
  doi          = {10.1080/02331934.2021.1914035},
  year         = {2021},
}

@article{9469,
  abstract     = {In this paper, we consider reflected three-operator splitting methods for monotone inclusion problems in real Hilbert spaces. To do this, we first obtain weak convergence analysis and nonasymptotic O(1/n) convergence rate of the reflected Krasnosel'skiĭ-Mann iteration for finding a fixed point of nonexpansive mapping in real Hilbert spaces under some seemingly easy to implement conditions on the iterative parameters. We then apply our results to three-operator splitting for the monotone inclusion problem and consequently obtain the corresponding convergence analysis. Furthermore, we derive reflected primal-dual algorithms for highly structured monotone inclusion problems. Some numerical implementations are drawn from splitting methods to support the theoretical analysis.},
  author       = {Iyiola, Olaniyi S. and Enyi, Cyril D. and Shehu, Yekini},
  issn         = {1029-4937},
  journal      = {Optimization Methods and Software},
  publisher    = {Taylor and Francis},
  title        = {{Reflected three-operator splitting method for monotone inclusion problem}},
  doi          = {10.1080/10556788.2021.1924715},
  year         = {2021},
}

@inproceedings{10072,
  abstract     = {The Lovász Local Lemma (LLL) is a powerful tool in probabilistic combinatorics which can be used to establish the existence of objects that satisfy certain properties. The breakthrough paper of Moser and Tardos and follow-up works revealed that the LLL has intimate connections with a class of stochastic local search algorithms for finding such desirable objects. In particular, it can be seen as a sufficient condition for this type of algorithms to converge fast. Besides conditions for existence of and fast convergence to desirable objects, one may naturally ask further questions regarding properties of these algorithms. For instance, "are they parallelizable?", "how many solutions can they output?", "what is the expected "weight" of a solution?", etc. These questions and more have been answered for a class of LLL-inspired algorithms called commutative. In this paper we introduce a new, very natural and more general notion of commutativity (essentially matrix commutativity) which allows us to show a number of new refined properties of LLL-inspired local search algorithms with significantly simpler proofs.},
  author       = {Harris, David G. and Iliopoulos, Fotis and Kolmogorov, Vladimir},
  booktitle    = {Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques},
  isbn         = {978-3-9597-7207-5},
  issn         = {1868-8969},
  location     = {Virtual},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{A new notion of commutativity for the algorithmic Lovász Local Lemma}},
  doi          = {10.4230/LIPIcs.APPROX/RANDOM.2021.31},
  volume       = {207},
  year         = {2021},
}

@inproceedings{10552,
  abstract     = {We study a class of convex-concave saddle-point problems of the form minxmaxy⟨Kx,y⟩+fP(x)−h∗(y) where K is a linear operator, fP is the sum of a convex function f with a Lipschitz-continuous gradient and the indicator function of a bounded convex polytope P, and h∗ is a convex (possibly nonsmooth) function. Such problem arises, for example, as a Lagrangian relaxation of various discrete optimization problems. Our main assumptions are the existence of an efficient linear minimization oracle (lmo) for fP and an efficient proximal map for h∗ which motivate the solution via a blend of proximal primal-dual algorithms and Frank-Wolfe algorithms. In case h∗ is the indicator function of a linear constraint and function f is quadratic, we show a O(1/n2) convergence rate on the dual objective, requiring O(nlogn) calls of lmo. If the problem comes from the constrained optimization problem minx∈Rd{fP(x)|Ax−b=0} then we additionally get bound O(1/n2) both on the primal gap and on the infeasibility gap. In the most general case, we show a O(1/n) convergence rate of the primal-dual gap again requiring O(nlogn) calls of lmo. To the best of our knowledge, this improves on the known convergence rates for the considered class of saddle-point problems. We show applications to labeling problems frequently appearing in machine learning and computer vision.},
  author       = {Kolmogorov, Vladimir and Pock, Thomas},
  booktitle    = {38th International Conference on Machine Learning},
  location     = {Virtual},
  title        = {{One-sided Frank-Wolfe algorithms for saddle problems}},
  year         = {2021},
}

@article{8077,
  abstract     = {The projection methods with vanilla inertial extrapolation step for variational inequalities have been of interest to many authors recently due to the improved convergence speed contributed by the presence of inertial extrapolation step. However, it is discovered that these projection methods with inertial steps lose the Fejér monotonicity of the iterates with respect to the solution, which is being enjoyed by their corresponding non-inertial projection methods for variational inequalities. This lack of Fejér monotonicity makes projection methods with vanilla inertial extrapolation step for variational inequalities not to converge faster than their corresponding non-inertial projection methods at times. Also, it has recently been proved that the projection methods with vanilla inertial extrapolation step may provide convergence rates that are worse than the classical projected gradient methods for strongly convex functions. In this paper, we introduce projection methods with alternated inertial extrapolation step for solving variational inequalities. We show that the sequence of iterates generated by our methods converges weakly to a solution of the variational inequality under some appropriate conditions. The Fejér monotonicity of even subsequence is recovered in these methods and linear rate of convergence is obtained. The numerical implementations of our methods compared with some other inertial projection methods show that our method is more efficient and outperforms some of these inertial projection methods.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S.},
  issn         = {0168-9274},
  journal      = {Applied Numerical Mathematics},
  pages        = {315--337},
  publisher    = {Elsevier},
  title        = {{Projection methods with alternating inertial steps for variational inequalities: Weak and linear convergence}},
  doi          = {10.1016/j.apnum.2020.06.009},
  volume       = {157},
  year         = {2020},
}

@article{7161,
  abstract     = {In this paper, we introduce an inertial projection-type method with different updating strategies for solving quasi-variational inequalities with strongly monotone and Lipschitz continuous operators in real Hilbert spaces. Under standard assumptions, we establish different strong convergence results for the proposed algorithm. Primary numerical experiments demonstrate the potential applicability of our scheme compared with some related methods in the literature.},
  author       = {Shehu, Yekini and Gibali, Aviv and Sagratella, Simone},
  issn         = {1573-2878},
  journal      = {Journal of Optimization Theory and Applications},
  pages        = {877–894},
  publisher    = {Springer Nature},
  title        = {{Inertial projection-type methods for solving quasi-variational inequalities in real Hilbert spaces}},
  doi          = {10.1007/s10957-019-01616-6},
  volume       = {184},
  year         = {2020},
}

@article{6593,
  abstract     = {We consider the monotone variational inequality problem in a Hilbert space and describe a projection-type method with inertial terms under the following properties: (a) The method generates a strongly convergent iteration sequence; (b) The method requires, at each iteration, only one projection onto the feasible set and two evaluations of the operator; (c) The method is designed for variational inequality for which the underline operator is monotone and uniformly continuous; (d) The method includes an inertial term. The latter is also shown to speed up the convergence in our numerical results. A comparison with some related methods is given and indicates that the new method is promising.},
  author       = {Shehu, Yekini and Li, Xiao-Huan and Dong, Qiao-Li},
  issn         = {1572-9265},
  journal      = {Numerical Algorithms},
  pages        = {365--388},
  publisher    = {Springer Nature},
  title        = {{An efficient projection-type method for monotone variational inequalities in Hilbert spaces}},
  doi          = {10.1007/s11075-019-00758-y},
  volume       = {84},
  year         = {2020},
}

@inproceedings{6725,
  abstract     = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. 
Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {46th International Colloquium on Automata, Languages and Programming},
  isbn         = {978-3-95977-109-2},
  issn         = {1868-8969},
  location     = {Patras, Greece},
  pages        = {77:1--77:12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Testing the complexity of a valued CSP language}},
  doi          = {10.4230/LIPICS.ICALP.2019.77},
  volume       = {132},
  year         = {2019},
}

@article{7000,
  abstract     = {The main contributions of this paper are the proposition and the convergence analysis of a class of inertial projection-type algorithm for solving variational inequality problems in real Hilbert spaces where the underline operator is monotone and uniformly continuous. We carry out a unified analysis of the proposed method under very mild assumptions. In particular, weak convergence of the generated sequence is established and nonasymptotic O(1 / n) rate of convergence is established, where n denotes the iteration counter. We also present some experimental results to illustrate the profits gained by introducing the inertial extrapolation steps.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S. and Li, Xiao-Huan and Dong, Qiao-Li},
  issn         = {1807-0302},
  journal      = {Computational and Applied Mathematics},
  number       = {4},
  publisher    = {Springer Nature},
  title        = {{Convergence analysis of projection method for variational inequalities}},
  doi          = {10.1007/s40314-019-0955-9},
  volume       = {38},
  year         = {2019},
}

@article{7412,
  abstract     = {We develop a framework for the rigorous analysis of focused stochastic local search algorithms. These algorithms search a state space by repeatedly selecting some constraint that is violated in the current state and moving to a random nearby state that addresses the violation, while (we hope) not introducing many new violations. An important class of focused local search algorithms with provable performance guarantees has recently arisen from algorithmizations of the Lovász local lemma (LLL), a nonconstructive tool for proving the existence of satisfying states by introducing a background measure on the state space. While powerful, the state transitions of algorithms in this class must be, in a precise sense, perfectly compatible with the background measure. In many applications this is a very restrictive requirement, and one needs to step outside the class. Here we introduce the notion of measure distortion and develop a framework for analyzing arbitrary focused stochastic local search algorithms, recovering LLL algorithmizations as the special case of no distortion. Our framework takes as input an arbitrary algorithm of such type and an arbitrary probability measure and shows how to use the measure as a yardstick of algorithmic progress, even for algorithms designed independently of the measure.},
  author       = {Achlioptas, Dimitris and Iliopoulos, Fotis and Kolmogorov, Vladimir},
  issn         = {1095-7111},
  journal      = {SIAM Journal on Computing},
  number       = {5},
  pages        = {1583--1602},
  publisher    = {SIAM},
  title        = {{A local lemma for focused stochastical algorithms}},
  doi          = {10.1137/16m109332x},
  volume       = {48},
  year         = {2019},
}

@inproceedings{7468,
  abstract     = {We present a new proximal bundle method for Maximum-A-Posteriori (MAP) inference in structured energy minimization problems. The method optimizes a Lagrangean relaxation of the original energy minimization problem using a multi plane block-coordinate Frank-Wolfe method that takes advantage of the specific structure of the Lagrangean decomposition. We show empirically that our method outperforms state-of-the-art Lagrangean decomposition based algorithms on some challenging Markov Random Field, multi-label discrete tomography and graph matching problems.},
  author       = {Swoboda, Paul and Kolmogorov, Vladimir},
  booktitle    = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
  isbn         = {9781728132938},
  issn         = {10636919},
  location     = {Long Beach, CA, United States},
  publisher    = {IEEE},
  title        = {{Map inference via block-coordinate Frank-Wolfe algorithm}},
  doi          = {10.1109/CVPR.2019.01140},
  volume       = {2019-June},
  year         = {2019},
}

@article{6596,
  abstract     = {It is well known that many problems in image recovery, signal processing, and machine learning can be modeled as finding zeros of the sum of maximal monotone and Lipschitz continuous monotone operators. Many papers have studied forward-backward splitting methods for finding zeros of the sum of two monotone operators in Hilbert spaces. Most of the proposed splitting methods in the literature have been proposed for the sum of maximal monotone and inverse-strongly monotone operators in Hilbert spaces. In this paper, we consider splitting methods for finding zeros of the sum of maximal monotone operators and Lipschitz continuous monotone operators in Banach spaces. We obtain weak and strong convergence results for the zeros of the sum of maximal monotone and Lipschitz continuous monotone operators in Banach spaces. Many already studied problems in the literature can be considered as special cases of this paper.},
  author       = {Shehu, Yekini},
  issn         = {1420-9012},
  journal      = {Results in Mathematics},
  number       = {4},
  publisher    = {Springer},
  title        = {{Convergence results of forward-backward algorithms for sum of monotone operators in Banach spaces}},
  doi          = {10.1007/s00025-019-1061-4},
  volume       = {74},
  year         = {2019},
}

@inproceedings{273,
  abstract     = {The accuracy of information retrieval systems is often measured using complex loss functions such as the average precision (AP) or the normalized discounted cumulative gain (NDCG). Given a set of positive and negative samples, the parameters of a retrieval system can be estimated by minimizing these loss functions. However, the non-differentiability and non-decomposability of these loss functions does not allow for simple gradient based optimization algorithms. This issue is generally circumvented by either optimizing a structured hinge-loss upper bound to the loss function or by using asymptotic methods like the direct-loss minimization framework. Yet, the high computational complexity of loss-augmented inference, which is necessary for both the frameworks, prohibits its use in large training data sets. To alleviate this deficiency, we present a novel quicksort flavored algorithm for a large class of non-decomposable loss functions. We provide a complete characterization of the loss functions that are amenable to our algorithm, and show that it includes both AP and NDCG based loss functions. Furthermore, we prove that no comparison based algorithm can improve upon the computational complexity of our approach asymptotically. We demonstrate the effectiveness of our approach in the context of optimizing the structured hinge loss upper bound of AP and NDCG loss for learning models for a variety of vision tasks. We show that our approach provides significantly better results than simpler decomposable loss functions, while requiring a comparable training time.},
  author       = {Mohapatra, Pritish and Rolinek, Michal and Jawahar, C V and Kolmogorov, Vladimir and Kumar, M Pawan},
  booktitle    = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  isbn         = {9781538664209},
  location     = {Salt Lake City, UT, USA},
  pages        = {3693--3701},
  publisher    = {IEEE},
  title        = {{Efficient optimization for rank-based loss functions}},
  doi          = {10.1109/cvpr.2018.00389},
  year         = {2018},
}

@inproceedings{193,
  abstract     = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.},
  author       = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal},
  booktitle    = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security},
  location     = {Incheon, Republic of Korea},
  pages        = {51 -- 65},
  publisher    = {ACM},
  title        = {{On the memory hardness of data independent password hashing functions}},
  doi          = {10.1145/3196494.3196534},
  year         = {2018},
}

@article{5975,
  abstract     = {We consider the recent formulation of the algorithmic Lov ́asz Local Lemma  [N. Har-vey and J. Vondr ́ak, inProceedings of FOCS, 2015, pp. 1327–1345; D. Achlioptas and F. Iliopoulos,inProceedings of SODA, 2016, pp. 2024–2038; D. Achlioptas, F. Iliopoulos, and V. Kolmogorov,ALocal Lemma for Focused Stochastic Algorithms, arXiv preprint, 2018] for finding objects that avoid“bad  features,”  or  “flaws.”   It  extends  the  Moser–Tardos  resampling  algorithm  [R.  A.  Moser  andG. Tardos,J. ACM, 57 (2010), 11] to more general discrete spaces.  At each step the method picks aflaw present in the current state and goes to a new state according to some prespecified probabilitydistribution (which depends on the current state and the selected flaw).  However, the recent formu-lation is less flexible than the Moser–Tardos method since it requires a specific flaw selection rule,whereas the algorithm of Moser and Tardos allows an arbitrary rule (and thus can potentially beimplemented more efficiently).  We formulate a new “commutativity” condition and prove that it issufficient for an arbitrary rule to work.  It also enables an efficient parallelization under an additionalassumption.  We then show that existing resampling oracles for perfect matchings and permutationsdo satisfy this condition.},
  author       = {Kolmogorov, Vladimir},
  issn         = {1095-7111},
  journal      = {SIAM Journal on Computing},
  number       = {6},
  pages        = {2029--2056},
  publisher    = {Society for Industrial & Applied Mathematics (SIAM)},
  title        = {{Commutativity in the algorithmic Lovász local lemma}},
  doi          = {10.1137/16m1093306},
  volume       = {47},
  year         = {2018},
}

