@InProceedings{Stich14sarp,
author = {Stich, Sebastian Urban},
title = {On Low Complexity Acceleration Techniques for Randomized Optimization},
booktitle = {Parallel Problem Solving from Nature -- PPSN XIII},
year = {2014},
editor = {Bartz-Beielstein, Thomas and Branke, J{\"u}rgen and Filipi{\v{c}}, Bogdan and Smith, Jim},
pages = {130--140},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Recently it was shown by Nesterov (2011) that techniques form convex optimization can be used to successfully accelerate simple derivative-free randomized optimization methods. The appeal of those schemes lies in their low complexity, which is only $\Theta$(n) per iteration---compared to $\Theta$(n 2) for algorithms storing second-order information or covariance matrices. From a high-level point of view, those accelerated schemes employ correlations between successive iterates---a concept looking similar to the evolution path used in Covariance Matrix Adaptation Evolution Strategies (CMA-ES). In this contribution, we (i) implement and empirically test a simple accelerated random search scheme (SARP). Our study is the first to provide numerical evidence that SARP can effectively be implemented with adaptive step size control and does not require access to gradient or advanced line search oracles. We (ii) try to empirically verify the supposed analogy between the evolution path and SARP. We propose an algorithm CMA-EP that uses only the evolution path to bias the search. This algorithm can be generalized to a family of low memory schemes, with complexity $\Theta$(mn) per iteration, following a recent approach by Loshchilov (2014). The study shows that the performance of CMA-EP heavily depends on the spectra of the objective function and thus it cannot accelerate as consistently as SARP.},
isbn = {978-3-319-10762-2},
}