@article{8268,
  abstract     = {Modern scientific instruments produce vast amounts of data, which can overwhelm the processing ability of computer systems. Lossy compression of data is an intriguing solution, but comes with its own drawbacks, such as potential signal loss, and the need for careful optimization of the compression ratio. In this work, we focus on a setting where this problem is especially acute: compressive sensing frameworks for interferometry and medical imaging. We ask the following question: can the precision of the data representation be lowered for all inputs, with recovery guarantees and practical performance Our first contribution is a theoretical analysis of the normalized Iterative Hard Thresholding (IHT) algorithm when all input data, meaning both the measurement matrix and the observation vector are quantized aggressively. We present a variant of low precision normalized IHT that, under mild conditions, can still provide recovery guarantees. The second contribution is the application of our quantization framework to radio astronomy and magnetic resonance imaging. We show that lowering the precision of the data can significantly accelerate image recovery. We evaluate our approach on telescope data and samples of brain images using CPU and FPGA implementations achieving up to a 9x speedup with negligible loss of recovery quality.},
  author       = {Gurel, Nezihe Merve and Kara, Kaan and Stojanov, Alen and Smith, Tyler and Lemmin, Thomas and Alistarh, Dan-Adrian and Puschel, Markus and Zhang, Ce},
  issn         = {19410476},
  journal      = {IEEE Transactions on Signal Processing},
  pages        = {4268--4282},
  publisher    = {IEEE},
  title        = {{Compressive sensing using iterative hard thresholding with low precision data representation: Theory and applications}},
  doi          = {10.1109/TSP.2020.3010355},
  volume       = {68},
  year         = {2020},
}

@article{6750,
  abstract     = {Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. },
  author       = {Hashemi, Seyyed Ali and Condo, Carlo and Mondelli, Marco and Gross, Warren J},
  issn         = {1053587X},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {22},
  publisher    = {IEEE},
  title        = {{Rate-flexible fast polar decoders}},
  doi          = {10.1109/TSP.2019.2944738},
  volume       = {67},
  year         = {2019},
}

