@inproceedings{1000,
  abstract     = {We study probabilistic models of natural images and extend the autoregressive family of PixelCNN models by incorporating latent variables. Subsequently, we describe two new generative image models that exploit different image transformations as latent variables: a quantized grayscale view of the image or a multi-resolution image pyramid. The proposed models tackle two known shortcomings of existing PixelCNN models: 1) their tendency to focus on low-level image details, while largely ignoring high-level image information, such as object shapes, and 2) their computationally costly procedure for image sampling. We experimentally demonstrate benefits of our LatentPixelCNN models, in particular showing that they produce much more realistically looking image samples than previous state-of-the-art probabilistic models. },
  author       = {Kolesnikov, Alexander and Lampert, Christoph},
  booktitle    = {34th International Conference on Machine Learning},
  isbn         = {978-151085514-4},
  location     = {Sydney, Australia},
  pages        = {1905 -- 1914},
  publisher    = {JMLR},
  title        = {{PixelCNN models with auxiliary variables for natural image modeling}},
  volume       = {70},
  year         = {2017},
}

@inproceedings{432,
  abstract     = {Recently there has been significant interest in training machine-learning models at low precision: by reducing precision, one can reduce computation and communication by one order of magnitude. We examine training at reduced precision, both from a theoretical and practical perspective, and ask: is it possible to train models at end-to-end low precision with provable guarantees? Can this lead to consistent order-of-magnitude speedups? We mainly focus on linear models, and the answer is yes for linear models. We develop a simple framework called ZipML based on one simple but novel strategy called double sampling. Our ZipML framework is able to execute training at low precision with no bias, guaranteeing convergence, whereas naive quanti- zation would introduce significant bias. We val- idate our framework across a range of applica- tions, and show that it enables an FPGA proto- type that is up to 6.5 × faster than an implemen- tation using full 32-bit precision. We further de- velop a variance-optimal stochastic quantization strategy and show that it can make a significant difference in a variety of settings. When applied to linear models together with double sampling, we save up to another 1.7 × in data movement compared with uniform quantization. When training deep networks with quantized models, we achieve higher accuracy than the state-of-the- art XNOR-Net. },
  author       = {Zhang, Hantian and Li, Jerry and Kara, Kaan and Alistarh, Dan-Adrian and Liu, Ji and Zhang, Ce},
  booktitle    = {Proceedings of Machine Learning Research},
  isbn         = {978-151085514-4},
  location     = {Sydney, Australia},
  pages        = {4035 -- 4043},
  publisher    = {ML Research Press},
  title        = {{ZipML: Training linear models with end-to-end low precision, and a little bit of deep learning}},
  volume       = { 70},
  year         = {2017},
}

