@article {92,
title = {Convergence of distributed asynchronous learning vector quantization algorithms},
year = {2010},
abstract = {Motivated by the problem of effectively executing clustering algorithms on very large data sets, we address a model for large scale distributed clustering methods. To this end, we briefly recall some standards on the quantization problem and some results on the almost sure convergence of the Competitive Learning Vector Quantization (CLVQ) procedure. A general model for linear distributed asynchronous algorithms well adapted to several parallel computing architectures is also discussed. Our approach brings together this scalable model and the CLVQ algorithm, and we call the resulting technique the Distributed Asynchronous Learning Vector Quantization algorithm (DALVQ). An in-depth analysis of the almost sure convergence of the DALVQ algorithm is performed. A striking result is that we prove that the multiple versions of the quantizers distributed among the processors in the parallel architecture asymptotically reach a consensus almost surely. Furthermore, we also show that these versions converge almost surely towards the same nearly optimal value for the quantization criterion.},
keywords = {asynchronous, distributed, distributed consensus, k-means, scalability, stochastic optimization, vector quantization},
attachments = {http://www.quantize.maths-fi.com/sites/default/files/AsyncStochGradient.pdf},
author = {Beno{\^\i}t Patra}
}