[{"language":[{"iso":"eng"}],"conference":{"end_date":"2021-07-24","location":"Virtual","start_date":"2021-07-18","name":"ICML: International Conference on Machine Learning"},"publication":"Proceedings of the 38th International Conference on Machine Learning","oa_version":"Published Version","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"main_file_link":[{"url":"http://proceedings.mlr.press/v139/nguyen21g.html","open_access":"1"}],"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","status":"public","date_published":"2021-01-01T00:00:00Z","type":"conference","oa":1,"page":"8119-8129","quality_controlled":"1","publisher":"ML Research Press","editor":[{"full_name":"Meila, Marina","last_name":"Meila","first_name":"Marina"},{"full_name":"Zhang, Tong","last_name":"Zhang","first_name":"Tong"}],"_id":"10595","author":[{"first_name":"Quynh","last_name":"Nguyen","full_name":"Nguyen, Quynh"},{"first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"last_name":"Montufar","first_name":"Guido F","full_name":"Montufar, Guido F"}],"publication_status":"published","department":[{"_id":"MaMo"}],"date_created":"2022-01-03T10:57:49Z","article_processing_charge":"No","title":"Tight bounds on the smallest eigenvalue of the neural tangent kernel for deep ReLU networks","alternative_title":["Proceedings of Machine Learning Research"],"intvolume":"       139","acknowledgement":"The authors would like to thank the anonymous reviewers for their helpful comments. MM was partially supported\r\nby the 2019 Lopez-Loreta Prize. QN and GM acknowledge support from the European Research Council (ERC) under\r\nthe European Union’s Horizon 2020 research and innovation programme (grant agreement no 757983).","volume":139,"date_updated":"2024-09-10T13:03:17Z","year":"2021","citation":{"short":"Q. Nguyen, M. Mondelli, G.F. Montufar, in:, M. Meila, T. Zhang (Eds.), Proceedings of the 38th International Conference on Machine Learning, ML Research Press, 2021, pp. 8119–8129.","mla":"Nguyen, Quynh, et al. “Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks.” <i>Proceedings of the 38th International Conference on Machine Learning</i>, edited by Marina Meila and Tong Zhang, vol. 139, ML Research Press, 2021, pp. 8119–29.","ista":"Nguyen Q, Mondelli M, Montufar GF. 2021. Tight bounds on the smallest eigenvalue of the neural tangent kernel for deep ReLU networks. Proceedings of the 38th International Conference on Machine Learning. ICML: International Conference on Machine Learning, Proceedings of Machine Learning Research, vol. 139, 8119–8129.","ama":"Nguyen Q, Mondelli M, Montufar GF. Tight bounds on the smallest eigenvalue of the neural tangent kernel for deep ReLU networks. In: Meila M, Zhang T, eds. <i>Proceedings of the 38th International Conference on Machine Learning</i>. Vol 139. ML Research Press; 2021:8119-8129.","apa":"Nguyen, Q., Mondelli, M., &#38; Montufar, G. F. (2021). Tight bounds on the smallest eigenvalue of the neural tangent kernel for deep ReLU networks. In M. Meila &#38; T. Zhang (Eds.), <i>Proceedings of the 38th International Conference on Machine Learning</i> (Vol. 139, pp. 8119–8129). Virtual: ML Research Press.","ieee":"Q. Nguyen, M. Mondelli, and G. F. Montufar, “Tight bounds on the smallest eigenvalue of the neural tangent kernel for deep ReLU networks,” in <i>Proceedings of the 38th International Conference on Machine Learning</i>, Virtual, 2021, vol. 139, pp. 8119–8129.","chicago":"Nguyen, Quynh, Marco Mondelli, and Guido F Montufar. “Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks.” In <i>Proceedings of the 38th International Conference on Machine Learning</i>, edited by Marina Meila and Tong Zhang, 139:8119–29. ML Research Press, 2021."},"external_id":{"arxiv":["2012.11654"]},"arxiv":1,"abstract":[{"text":"A recent line of work has analyzed the theoretical properties of deep neural networks via the Neural Tangent Kernel (NTK). In particular, the smallest eigenvalue of the NTK has been related to the memorization capacity, the global convergence of gradient descent algorithms and the generalization of deep nets. However, existing results either provide bounds in the two-layer setting or assume that the spectrum of the NTK matrices is bounded away from 0 for multi-layer networks. In this paper, we provide tight bounds on the smallest eigenvalue of NTK matrices for deep ReLU nets, both in the limiting case of infinite widths and for finite widths. In the finite-width setting, the network architectures we consider are fairly general: we require the existence of a wide layer with roughly order of $N$ neurons, $N$ being the number of data samples; and the scaling of the remaining layer widths is arbitrary (up to logarithmic factors). To obtain our results, we analyze various quantities of independent interest: we give lower bounds on the smallest singular value of hidden feature matrices, and upper bounds on the Lipschitz constant of input-output feature maps.","lang":"eng"}]},{"abstract":[{"text":"We thank Emmanuel Abbe and Min Ye for providing us the implementation of RPA decoding. D. Fathollahi and M. Mondelli are partially supported by the 2019 Lopez-Loreta Prize. N. Farsad is supported by Discovery Grant from the Natural Sciences and Engineering Research Council of Canada (NSERC) and Canada Foundation for Innovation (CFI), John R. Evans Leader Fund. S. A. Hashemi is supported by a Postdoctoral Fellowship from NSERC.","lang":"eng"}],"day":"01","doi":"10.1109/isit45174.2021.9517887","arxiv":1,"external_id":{"isi":["000701502201029"],"arxiv":["2011.12882"]},"isi":1,"citation":{"mla":"Fathollahi, Dorsa, et al. “Sparse Multi-Decoder Recursive Projection Aggregation for Reed-Muller Codes.” <i>2021 IEEE International Symposium on Information Theory</i>, Institute of Electrical and Electronics Engineers, 2021, pp. 1082–87, doi:<a href=\"https://doi.org/10.1109/isit45174.2021.9517887\">10.1109/isit45174.2021.9517887</a>.","short":"D. Fathollahi, N. Farsad, S.A. Hashemi, M. Mondelli, in:, 2021 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2021, pp. 1082–1087.","ista":"Fathollahi D, Farsad N, Hashemi SA, Mondelli M. 2021. Sparse multi-decoder recursive projection aggregation for Reed-Muller codes. 2021 IEEE International Symposium on Information Theory. ISIT: International Symposium on Information Theory, 1082–1087.","ama":"Fathollahi D, Farsad N, Hashemi SA, Mondelli M. Sparse multi-decoder recursive projection aggregation for Reed-Muller codes. In: <i>2021 IEEE International Symposium on Information Theory</i>. Institute of Electrical and Electronics Engineers; 2021:1082-1087. doi:<a href=\"https://doi.org/10.1109/isit45174.2021.9517887\">10.1109/isit45174.2021.9517887</a>","apa":"Fathollahi, D., Farsad, N., Hashemi, S. A., &#38; Mondelli, M. (2021). Sparse multi-decoder recursive projection aggregation for Reed-Muller codes. In <i>2021 IEEE International Symposium on Information Theory</i> (pp. 1082–1087). Virtual, Melbourne, Australia: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/isit45174.2021.9517887\">https://doi.org/10.1109/isit45174.2021.9517887</a>","ieee":"D. Fathollahi, N. Farsad, S. A. Hashemi, and M. Mondelli, “Sparse multi-decoder recursive projection aggregation for Reed-Muller codes,” in <i>2021 IEEE International Symposium on Information Theory</i>, Virtual, Melbourne, Australia, 2021, pp. 1082–1087.","chicago":"Fathollahi, Dorsa, Nariman Farsad, Seyyed Ali Hashemi, and Marco Mondelli. “Sparse Multi-Decoder Recursive Projection Aggregation for Reed-Muller Codes.” In <i>2021 IEEE International Symposium on Information Theory</i>, 1082–87. Institute of Electrical and Electronics Engineers, 2021. <a href=\"https://doi.org/10.1109/isit45174.2021.9517887\">https://doi.org/10.1109/isit45174.2021.9517887</a>."},"year":"2021","date_updated":"2024-09-10T13:03:18Z","title":"Sparse multi-decoder recursive projection aggregation for Reed-Muller codes","department":[{"_id":"MaMo"}],"date_created":"2022-01-03T11:31:26Z","article_processing_charge":"No","publication_status":"published","author":[{"full_name":"Fathollahi, Dorsa","last_name":"Fathollahi","first_name":"Dorsa"},{"full_name":"Farsad, Nariman","last_name":"Farsad","first_name":"Nariman"},{"full_name":"Hashemi, Seyyed Ali","first_name":"Seyyed Ali","last_name":"Hashemi"},{"orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425"}],"scopus_import":"1","_id":"10597","publisher":"Institute of Electrical and Electronics Engineers","quality_controlled":"1","page":"1082-1087","oa":1,"publication_identifier":{"isbn":["978-1-5386-8210-4"],"eisbn":["978-1-5386-8209-8"]},"type":"conference","date_published":"2021-09-01T00:00:00Z","status":"public","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","main_file_link":[{"url":"https://arxiv.org/abs/2011.12882","open_access":"1"}],"month":"09","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"oa_version":"Preprint","publication":"2021 IEEE International Symposium on Information Theory","conference":{"location":"Virtual, Melbourne, Australia","end_date":"2021-07-20","start_date":"2021-07-12","name":"ISIT: International Symposium on Information Theory"},"language":[{"iso":"eng"}]},{"publication":"Proceedings of The 24th International Conference on Artificial Intelligence and Statistics","month":"04","oa_version":"Preprint","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"language":[{"iso":"eng"}],"conference":{"name":"AISTATS: Artificial Intelligence and Statistics","start_date":"2021-04-13","end_date":"2021-04-15","location":"Virtual, San Diego, CA, United States"},"date_published":"2021-04-01T00:00:00Z","type":"conference","oa":1,"publication_identifier":{"issn":["2640-3498"]},"status":"public","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","related_material":{"record":[{"status":"public","relation":"later_version","id":"12480"}]},"main_file_link":[{"open_access":"1","url":"https://proceedings.mlr.press/v130/mondelli21a.html"}],"author":[{"id":"27EB676C-8706-11E9-9510-7717E6697425","first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco"},{"first_name":"Ramji","last_name":"Venkataramanan","full_name":"Venkataramanan, Ramji"}],"_id":"10598","scopus_import":"1","title":"Approximate message passing with spectral initialization for generalized linear models","alternative_title":["Proceedings of Machine Learning Research"],"intvolume":"       130","publication_status":"published","article_processing_charge":"Yes (via OA deal)","date_created":"2022-01-03T11:34:22Z","department":[{"_id":"MaMo"}],"page":"397-405","quality_controlled":"1","publisher":"ML Research Press","editor":[{"full_name":"Banerjee, Arindam","last_name":"Banerjee","first_name":"Arindam"},{"full_name":"Fukumizu, Kenji","first_name":"Kenji","last_name":"Fukumizu"}],"external_id":{"arxiv":["2010.03460"]},"date_updated":"2024-03-07T10:36:53Z","citation":{"ama":"Mondelli M, Venkataramanan R. Approximate message passing with spectral initialization for generalized linear models. In: Banerjee A, Fukumizu K, eds. <i>Proceedings of The 24th International Conference on Artificial Intelligence and Statistics</i>. Vol 130. ML Research Press; 2021:397-405.","apa":"Mondelli, M., &#38; Venkataramanan, R. (2021). Approximate message passing with spectral initialization for generalized linear models. In A. Banerjee &#38; K. Fukumizu (Eds.), <i>Proceedings of The 24th International Conference on Artificial Intelligence and Statistics</i> (Vol. 130, pp. 397–405). Virtual, San Diego, CA, United States: ML Research Press.","ieee":"M. Mondelli and R. Venkataramanan, “Approximate message passing with spectral initialization for generalized linear models,” in <i>Proceedings of The 24th International Conference on Artificial Intelligence and Statistics</i>, Virtual, San Diego, CA, United States, 2021, vol. 130, pp. 397–405.","chicago":"Mondelli, Marco, and Ramji Venkataramanan. “Approximate Message Passing with Spectral Initialization for Generalized Linear Models.” In <i>Proceedings of The 24th International Conference on Artificial Intelligence and Statistics</i>, edited by Arindam Banerjee and Kenji Fukumizu, 130:397–405. ML Research Press, 2021.","mla":"Mondelli, Marco, and Ramji Venkataramanan. “Approximate Message Passing with Spectral Initialization for Generalized Linear Models.” <i>Proceedings of The 24th International Conference on Artificial Intelligence and Statistics</i>, edited by Arindam Banerjee and Kenji Fukumizu, vol. 130, ML Research Press, 2021, pp. 397–405.","short":"M. Mondelli, R. Venkataramanan, in:, A. Banerjee, K. Fukumizu (Eds.), Proceedings of The 24th International Conference on Artificial Intelligence and Statistics, ML Research Press, 2021, pp. 397–405.","ista":"Mondelli M, Venkataramanan R. 2021. Approximate message passing with spectral initialization for generalized linear models. Proceedings of The 24th International Conference on Artificial Intelligence and Statistics. AISTATS: Artificial Intelligence and Statistics, Proceedings of Machine Learning Research, vol. 130, 397–405."},"year":"2021","abstract":[{"text":" We consider the problem of estimating a signal from measurements obtained via a generalized linear model. We focus on estimators based on approximate message passing (AMP), a family of iterative algorithms with many appealing features: the performance of AMP in the high-dimensional limit can be succinctly characterized under suitable model assumptions; AMP can also be tailored to the empirical distribution of the signal entries, and for a wide class of estimation problems, AMP is conjectured to be optimal among all polynomial-time algorithms. However, a major issue of AMP is that in many models (such as phase retrieval), it requires an initialization correlated with the ground-truth signal and independent from the measurement matrix. Assuming that such an initialization is available is typically not realistic. In this paper, we solve this problem by proposing an AMP algorithm initialized with a spectral estimator. With such an initialization, the standard AMP analysis fails since the spectral estimator depends in a complicated way on the design matrix. Our main contribution is a rigorous characterization of the performance of AMP with spectral initialization in the high-dimensional limit. The key technical idea is to define and analyze a two-phase artificial AMP algorithm that first produces the spectral estimator, and then closely approximates the iterates of the true AMP. We also provide numerical results that demonstrate the validity of the proposed approach. ","lang":"eng"}],"arxiv":1,"day":"01","volume":130,"acknowledgement":"The authors would like to thank Andrea Montanari for helpful discussions. M. Mondelli was partially supported by the 2019 Lopez-Loreta Prize. R. Venkataramanan was partially supported by the Alan Turing Institute under the EPSRC grant EP/N510129/1."},{"publisher":"Institute of Electrical and Electronics Engineers","page":"943-947","quality_controlled":"1","publication_status":"published","department":[{"_id":"MaMo"}],"date_created":"2022-01-03T11:39:51Z","article_processing_charge":"No","title":"Successive syndrome-check decoding of polar codes","_id":"10599","scopus_import":"1","author":[{"full_name":"Hashemi, Seyyed Ali","first_name":"Seyyed Ali","last_name":"Hashemi"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli"},{"full_name":"Cioffi, John","first_name":"John","last_name":"Cioffi"},{"first_name":"Andrea","last_name":"Goldsmith","full_name":"Goldsmith, Andrea"}],"acknowledgement":"This work is supported in part by ONR grant N00014-18-1-2191. S. A. Hashemi was supported by a Postdoctoral Fellowship from the Natural Sciences and Engineering Research Council of Canada (NSERC) and by Huawei. M. Mondelli was partially supported by the 2019 Lopez-Loreta Prize.","volume":"2021-October","doi":"10.1109/IEEECONF53345.2021.9723394","arxiv":1,"day":"01","abstract":[{"lang":"eng","text":"A two-part successive syndrome-check decoding of polar codes is proposed with the first part successively refining the received codeword and the second part checking its syndrome. A new formulation of the successive-cancellation (SC) decoding algorithm is presented that allows for successively refining the received codeword by comparing the log-likelihood ratio value of a frozen bit with its predefined value. The syndrome of the refined received codeword is then checked for possible errors. In case there are no errors, the decoding process is terminated. Otherwise, the decoder continues to refine the received codeword. The proposed method is extended to the case of SC list (SCL) decoding by terminating the decoding process when the syndrome of the best candidate in the list indicates no errors. Simulation results show that the proposed method reduces the time-complexity of SC and SCL decoders and their fast variants, especially at high signal-to-noise ratios."}],"date_updated":"2024-09-10T13:03:17Z","year":"2021","citation":{"ieee":"S. A. Hashemi, M. Mondelli, J. Cioffi, and A. Goldsmith, “Successive syndrome-check decoding of polar codes,” in <i>Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers</i>, Virtual, Pacific Grove, CA, United States, 2021, vol. 2021–October, pp. 943–947.","chicago":"Hashemi, Seyyed Ali, Marco Mondelli, John Cioffi, and Andrea Goldsmith. “Successive Syndrome-Check Decoding of Polar Codes.” In <i>Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers</i>, 2021–October:943–47. Institute of Electrical and Electronics Engineers, 2021. <a href=\"https://doi.org/10.1109/IEEECONF53345.2021.9723394\">https://doi.org/10.1109/IEEECONF53345.2021.9723394</a>.","apa":"Hashemi, S. A., Mondelli, M., Cioffi, J., &#38; Goldsmith, A. (2021). Successive syndrome-check decoding of polar codes. In <i>Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers</i> (Vol. 2021–October, pp. 943–947). Virtual, Pacific Grove, CA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/IEEECONF53345.2021.9723394\">https://doi.org/10.1109/IEEECONF53345.2021.9723394</a>","ama":"Hashemi SA, Mondelli M, Cioffi J, Goldsmith A. Successive syndrome-check decoding of polar codes. In: <i>Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers</i>. Vol 2021-October. Institute of Electrical and Electronics Engineers; 2021:943-947. doi:<a href=\"https://doi.org/10.1109/IEEECONF53345.2021.9723394\">10.1109/IEEECONF53345.2021.9723394</a>","ista":"Hashemi SA, Mondelli M, Cioffi J, Goldsmith A. 2021. Successive syndrome-check decoding of polar codes. Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers. ACSSC: Asilomar Conference on Signals, Systems, and Computers vol. 2021–October, 943–947.","mla":"Hashemi, Seyyed Ali, et al. “Successive Syndrome-Check Decoding of Polar Codes.” <i>Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers</i>, vol. 2021–October, Institute of Electrical and Electronics Engineers, 2021, pp. 943–47, doi:<a href=\"https://doi.org/10.1109/IEEECONF53345.2021.9723394\">10.1109/IEEECONF53345.2021.9723394</a>.","short":"S.A. Hashemi, M. Mondelli, J. Cioffi, A. Goldsmith, in:, Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers, Institute of Electrical and Electronics Engineers, 2021, pp. 943–947."},"external_id":{"arxiv":["2112.00057"]},"conference":{"start_date":"2021-10-31","name":"ACSSC: Asilomar Conference on Signals, Systems, and Computers","location":"Virtual, Pacific Grove, CA, United States","end_date":"2021-11-03"},"language":[{"iso":"eng"}],"oa_version":"Preprint","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"month":"11","publication":"Proceedings of the 55th Asilomar Conference on Signals, Systems, and Computers","main_file_link":[{"url":" https://doi.org/10.48550/arXiv.2112.00057","open_access":"1"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","publication_identifier":{"issn":["1058-6393"],"isbn":["9781665458283"]},"oa":1,"date_published":"2021-11-01T00:00:00Z","type":"conference"},{"volume":"2020-June","acknowledgement":"M. Mondelli was partially supported by grants NSF DMS-1613091, CCF-1714305, IIS-1741162 and ONR N00014-18-1-2729. S. A. Hashemi is supported by a Postdoctoral Fellowship from the Natural Sciences and Engineering Research Council of Canada (NSERC) and by Huawei.","abstract":[{"text":"This work analyzes the latency of the simplified successive cancellation (SSC) decoding scheme for polar codes proposed by Alamdar-Yazdi and Kschischang. It is shown that, unlike conventional successive cancellation decoding, where latency is linear in the block length, the latency of SSC decoding is sublinear. More specifically, the latency of SSC decoding is O(N 1−1/µ ), where N is the block length and µ is the scaling exponent of the channel, which captures the speed of convergence of the rate to capacity. Numerical results demonstrate the tightness of the bound and show that most of the latency reduction arises from the parallel decoding of subcodes of rate 0 and 1.","lang":"eng"}],"day":"01","arxiv":1,"doi":"10.1109/ISIT44484.2020.9174141","external_id":{"arxiv":["1909.04892"]},"year":"2020","citation":{"mla":"Mondelli, Marco, et al. “Simplified Successive Cancellation Decoding of Polar Codes Has Sublinear Latency.” <i>IEEE International Symposium on Information Theory - Proceedings</i>, vol. 2020–June, 401–406, IEEE, 2020, doi:<a href=\"https://doi.org/10.1109/ISIT44484.2020.9174141\">10.1109/ISIT44484.2020.9174141</a>.","short":"M. Mondelli, S.A. Hashemi, J. Cioffi, A. Goldsmith, in:, IEEE International Symposium on Information Theory - Proceedings, IEEE, 2020.","ista":"Mondelli M, Hashemi SA, Cioffi J, Goldsmith A. 2020. Simplified successive cancellation decoding of polar codes has sublinear latency. IEEE International Symposium on Information Theory - Proceedings. ISIT: Internation Symposium on Information Theory vol. 2020–June, 401–406.","apa":"Mondelli, M., Hashemi, S. A., Cioffi, J., &#38; Goldsmith, A. (2020). Simplified successive cancellation decoding of polar codes has sublinear latency. In <i>IEEE International Symposium on Information Theory - Proceedings</i> (Vol. 2020–June). Los Angeles, CA, United States: IEEE. <a href=\"https://doi.org/10.1109/ISIT44484.2020.9174141\">https://doi.org/10.1109/ISIT44484.2020.9174141</a>","ama":"Mondelli M, Hashemi SA, Cioffi J, Goldsmith A. Simplified successive cancellation decoding of polar codes has sublinear latency. In: <i>IEEE International Symposium on Information Theory - Proceedings</i>. Vol 2020-June. IEEE; 2020. doi:<a href=\"https://doi.org/10.1109/ISIT44484.2020.9174141\">10.1109/ISIT44484.2020.9174141</a>","ieee":"M. Mondelli, S. A. Hashemi, J. Cioffi, and A. Goldsmith, “Simplified successive cancellation decoding of polar codes has sublinear latency,” in <i>IEEE International Symposium on Information Theory - Proceedings</i>, Los Angeles, CA, United States, 2020, vol. 2020–June.","chicago":"Mondelli, Marco, Seyyed Ali Hashemi, John Cioffi, and Andrea Goldsmith. “Simplified Successive Cancellation Decoding of Polar Codes Has Sublinear Latency.” In <i>IEEE International Symposium on Information Theory - Proceedings</i>, Vol. 2020–June. IEEE, 2020. <a href=\"https://doi.org/10.1109/ISIT44484.2020.9174141\">https://doi.org/10.1109/ISIT44484.2020.9174141</a>."},"date_updated":"2023-08-07T13:36:24Z","publisher":"IEEE","quality_controlled":"1","title":"Simplified successive cancellation decoding of polar codes has sublinear latency","date_created":"2020-09-20T22:01:37Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","author":[{"orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"full_name":"Hashemi, Seyyed Ali","first_name":"Seyyed Ali","last_name":"Hashemi"},{"last_name":"Cioffi","first_name":"John","full_name":"Cioffi, John"},{"last_name":"Goldsmith","first_name":"Andrea","full_name":"Goldsmith, Andrea"}],"scopus_import":"1","_id":"8536","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","related_material":{"record":[{"relation":"later_version","id":"9047","status":"public"}]},"main_file_link":[{"url":"https://arxiv.org/abs/1909.04892","open_access":"1"}],"oa":1,"publication_identifier":{"isbn":["9781728164328"],"issn":["21578095"]},"type":"conference","date_published":"2020-06-01T00:00:00Z","conference":{"start_date":"2020-06-21","name":"ISIT: Internation Symposium on Information Theory","end_date":"2020-06-26","location":"Los Angeles, CA, United States"},"language":[{"iso":"eng"}],"article_number":"401-406","month":"06","oa_version":"Preprint","publication":"IEEE International Symposium on Information Theory - Proceedings"},{"quality_controlled":"1","page":"3619-3642","article_type":"original","publisher":"Institute of Mathematical Statistics","issue":"6","author":[{"last_name":"Javanmard","first_name":"Adel","full_name":"Javanmard, Adel"},{"orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"full_name":"Montanari, Andrea","first_name":"Andrea","last_name":"Montanari"}],"_id":"6748","intvolume":"        48","title":"Analysis of a two-layer neural network via displacement convexity","department":[{"_id":"MaMo"}],"article_processing_charge":"No","date_created":"2019-07-31T09:39:42Z","publication_status":"published","volume":48,"external_id":{"arxiv":["1901.01375"],"isi":["000598369200021"]},"isi":1,"citation":{"mla":"Javanmard, Adel, et al. “Analysis of a Two-Layer Neural Network via Displacement Convexity.” <i>Annals of Statistics</i>, vol. 48, no. 6, Institute of Mathematical Statistics, 2020, pp. 3619–42, doi:<a href=\"https://doi.org/10.1214/20-AOS1945\">10.1214/20-AOS1945</a>.","short":"A. Javanmard, M. Mondelli, A. Montanari, Annals of Statistics 48 (2020) 3619–3642.","ista":"Javanmard A, Mondelli M, Montanari A. 2020. Analysis of a two-layer neural network via displacement convexity. Annals of Statistics. 48(6), 3619–3642.","ama":"Javanmard A, Mondelli M, Montanari A. Analysis of a two-layer neural network via displacement convexity. <i>Annals of Statistics</i>. 2020;48(6):3619-3642. doi:<a href=\"https://doi.org/10.1214/20-AOS1945\">10.1214/20-AOS1945</a>","apa":"Javanmard, A., Mondelli, M., &#38; Montanari, A. (2020). Analysis of a two-layer neural network via displacement convexity. <i>Annals of Statistics</i>. Institute of Mathematical Statistics. <a href=\"https://doi.org/10.1214/20-AOS1945\">https://doi.org/10.1214/20-AOS1945</a>","chicago":"Javanmard, Adel, Marco Mondelli, and Andrea Montanari. “Analysis of a Two-Layer Neural Network via Displacement Convexity.” <i>Annals of Statistics</i>. Institute of Mathematical Statistics, 2020. <a href=\"https://doi.org/10.1214/20-AOS1945\">https://doi.org/10.1214/20-AOS1945</a>.","ieee":"A. Javanmard, M. Mondelli, and A. Montanari, “Analysis of a two-layer neural network via displacement convexity,” <i>Annals of Statistics</i>, vol. 48, no. 6. Institute of Mathematical Statistics, pp. 3619–3642, 2020."},"year":"2020","date_updated":"2024-03-06T08:28:50Z","abstract":[{"text":"Fitting a function by using linear combinations of a large number N of `simple' components is one of the most fruitful ideas in statistical learning. This idea lies at the core of a variety of methods, from two-layer neural networks to kernel regression, to boosting. In general, the resulting risk minimization problem is non-convex and is solved by gradient descent or its variants. Unfortunately, little is known about global convergence properties of these approaches.\r\nHere we consider the problem of learning a concave function f on a compact convex domain Ω⊆ℝd, using linear combinations of `bump-like' components (neurons). The parameters to be fitted are the centers of N bumps, and the resulting empirical risk minimization problem is highly non-convex. We prove that, in the limit in which the number of neurons diverges, the evolution of gradient descent converges to a Wasserstein gradient flow in the space of probability distributions over Ω. Further, when the bump width δ tends to 0, this gradient flow has a limit which is a viscous porous medium equation. Remarkably, the cost function optimized by this gradient flow exhibits a special property known as displacement convexity, which implies exponential convergence rates for N→∞, δ→0. Surprisingly, this asymptotic theory appears to capture well the behavior for moderate values of δ,N. Explaining this phenomenon, and understanding the dependence on δ,N in a quantitative manner remains an outstanding challenge.","lang":"eng"}],"day":"11","doi":"10.1214/20-AOS1945","arxiv":1,"language":[{"iso":"eng"}],"publication":"Annals of Statistics","month":"12","oa_version":"Preprint","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","main_file_link":[{"url":"https://arxiv.org/abs/1901.01375","open_access":"1"}],"type":"journal_article","date_published":"2020-12-11T00:00:00Z","oa":1,"publication_identifier":{"issn":["1932-6157"],"eissn":["1941-7330"]}},{"file":[{"file_id":"9217","creator":"dernst","relation":"main_file","access_level":"open_access","success":1,"date_updated":"2021-03-02T15:38:14Z","file_name":"2020_PMLR_Shevchenko.pdf","content_type":"application/pdf","date_created":"2021-03-02T15:38:14Z","file_size":5336380,"checksum":"f042c8d4316bd87c6361aa76f1fbdbbe"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_published":"2020-07-13T00:00:00Z","type":"conference","oa":1,"language":[{"iso":"eng"}],"publication":"Proceedings of the 37th International Conference on Machine Learning","has_accepted_license":"1","oa_version":"Published Version","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"month":"07","volume":119,"acknowledgement":"M. Mondelli was partially supported by the 2019 LopezLoreta Prize. The authors thank Phan-Minh Nguyen for helpful discussions and the IST Distributed Algorithms and Systems Lab for providing computational resources.","ddc":["000"],"date_updated":"2024-09-10T13:03:19Z","year":"2020","citation":{"chicago":"Shevchenko, Alexander, and Marco Mondelli. “Landscape Connectivity and Dropout Stability of SGD Solutions for Over-Parameterized Neural Networks.” In <i>Proceedings of the 37th International Conference on Machine Learning</i>, 119:8773–84. ML Research Press, 2020.","ieee":"A. Shevchenko and M. Mondelli, “Landscape connectivity and dropout stability of SGD solutions for over-parameterized neural networks,” in <i>Proceedings of the 37th International Conference on Machine Learning</i>, 2020, vol. 119, pp. 8773–8784.","apa":"Shevchenko, A., &#38; Mondelli, M. (2020). Landscape connectivity and dropout stability of SGD solutions for over-parameterized neural networks. In <i>Proceedings of the 37th International Conference on Machine Learning</i> (Vol. 119, pp. 8773–8784). ML Research Press.","ama":"Shevchenko A, Mondelli M. Landscape connectivity and dropout stability of SGD solutions for over-parameterized neural networks. In: <i>Proceedings of the 37th International Conference on Machine Learning</i>. Vol 119. ML Research Press; 2020:8773-8784.","ista":"Shevchenko A, Mondelli M. 2020. Landscape connectivity and dropout stability of SGD solutions for over-parameterized neural networks. Proceedings of the 37th International Conference on Machine Learning. vol. 119, 8773–8784.","short":"A. Shevchenko, M. Mondelli, in:, Proceedings of the 37th International Conference on Machine Learning, ML Research Press, 2020, pp. 8773–8784.","mla":"Shevchenko, Alexander, and Marco Mondelli. “Landscape Connectivity and Dropout Stability of SGD Solutions for Over-Parameterized Neural Networks.” <i>Proceedings of the 37th International Conference on Machine Learning</i>, vol. 119, ML Research Press, 2020, pp. 8773–84."},"external_id":{"arxiv":["1912.10095"]},"arxiv":1,"day":"13","abstract":[{"text":"The optimization of multilayer neural networks typically leads to a solution\r\nwith zero training error, yet the landscape can exhibit spurious local minima\r\nand the minima can be disconnected. In this paper, we shed light on this\r\nphenomenon: we show that the combination of stochastic gradient descent (SGD)\r\nand over-parameterization makes the landscape of multilayer neural networks\r\napproximately connected and thus more favorable to optimization. More\r\nspecifically, we prove that SGD solutions are connected via a piecewise linear\r\npath, and the increase in loss along this path vanishes as the number of\r\nneurons grows large. This result is a consequence of the fact that the\r\nparameters found by SGD are increasingly dropout stable as the network becomes\r\nwider. We show that, if we remove part of the neurons (and suitably rescale the\r\nremaining ones), the change in loss is independent of the total number of\r\nneurons, and it depends only on how many neurons are left. Our results exhibit\r\na mild dependence on the input dimension: they are dimension-free for two-layer\r\nnetworks and depend linearly on the dimension for multilayer networks. We\r\nvalidate our theoretical findings with numerical experiments for different\r\narchitectures and classification tasks.","lang":"eng"}],"page":"8773-8784","quality_controlled":"1","file_date_updated":"2021-03-02T15:38:14Z","publisher":"ML Research Press","_id":"9198","author":[{"full_name":"Shevchenko, Alexander","first_name":"Alexander","last_name":"Shevchenko"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"}],"publication_status":"published","article_processing_charge":"No","department":[{"_id":"MaMo"}],"date_created":"2021-02-25T09:36:22Z","title":"Landscape connectivity and dropout stability of SGD solutions for over-parameterized neural networks","intvolume":"       119"},{"author":[{"full_name":"Nguyen, Quynh","last_name":"Nguyen","first_name":"Quynh"},{"orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425"}],"_id":"9221","intvolume":"        33","title":"Global convergence of deep networks with one wide layer followed by pyramidal topology","date_created":"2021-03-03T12:06:02Z","article_processing_charge":"No","department":[{"_id":"MaMo"}],"publication_status":"published","quality_controlled":"1","page":"11961–11972","publisher":"Curran Associates","external_id":{"arxiv":["2002.07867"]},"year":"2020","citation":{"short":"Q. Nguyen, M. Mondelli, in:, 34th Conference on Neural Information Processing Systems, Curran Associates, 2020, pp. 11961–11972.","mla":"Nguyen, Quynh, and Marco Mondelli. “Global Convergence of Deep Networks with One Wide Layer Followed by Pyramidal Topology.” <i>34th Conference on Neural Information Processing Systems</i>, vol. 33, Curran Associates, 2020, pp. 11961–11972.","ista":"Nguyen Q, Mondelli M. 2020. Global convergence of deep networks with one wide layer followed by pyramidal topology. 34th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 33, 11961–11972.","apa":"Nguyen, Q., &#38; Mondelli, M. (2020). Global convergence of deep networks with one wide layer followed by pyramidal topology. In <i>34th Conference on Neural Information Processing Systems</i> (Vol. 33, pp. 11961–11972). Vancouver, Canada: Curran Associates.","ama":"Nguyen Q, Mondelli M. Global convergence of deep networks with one wide layer followed by pyramidal topology. In: <i>34th Conference on Neural Information Processing Systems</i>. Vol 33. Curran Associates; 2020:11961–11972.","ieee":"Q. Nguyen and M. Mondelli, “Global convergence of deep networks with one wide layer followed by pyramidal topology,” in <i>34th Conference on Neural Information Processing Systems</i>, Vancouver, Canada, 2020, vol. 33, pp. 11961–11972.","chicago":"Nguyen, Quynh, and Marco Mondelli. “Global Convergence of Deep Networks with One Wide Layer Followed by Pyramidal Topology.” In <i>34th Conference on Neural Information Processing Systems</i>, 33:11961–11972. Curran Associates, 2020."},"date_updated":"2024-09-10T13:03:17Z","abstract":[{"text":"Recent works have shown that gradient descent can find a global minimum for over-parameterized neural networks where the widths of all the hidden layers scale polynomially with N (N being the number of training samples). In this paper, we prove that, for deep networks, a single layer of width N following the input layer suffices to ensure a similar guarantee. In particular, all the remaining layers are allowed to have constant widths, and form a pyramidal topology. We show an application of our result to the widely used LeCun’s initialization and obtain an over-parameterization requirement for the single wide layer of order N2.\r\n","lang":"eng"}],"day":"07","arxiv":1,"volume":33,"acknowledgement":"The authors would like to thank Jan Maas, Mahdi Soltanolkotabi, and Daniel Soudry for the helpful discussions, Marius Kloft, Matthias Hein and Quoc Dinh Tran for proofreading portions of a prior version of this paper, and James Martens for a clarification concerning LeCun’s initialization. M. Mondelli was partially supported by the 2019 Lopez-Loreta Prize. Q. Nguyen was partially supported by the German Research Foundation (DFG) award KL 2698/2-1.","publication":"34th Conference on Neural Information Processing Systems","month":"07","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa_version":"Preprint","language":[{"iso":"eng"}],"conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2020-12-06","location":"Vancouver, Canada","end_date":"2020-12-12"},"type":"conference","date_published":"2020-07-07T00:00:00Z","oa":1,"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2002.07867"}]},{"oa":1,"publication_identifier":{"issn":["1053587X"]},"date_published":"2019-11-15T00:00:00Z","type":"journal_article","user_id":"D865714E-FA4E-11E9-B85B-F5C5E5697425","status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1903.09203"}],"month":"11","article_number":"8854897","oa_version":"Preprint","publication":"IEEE Transactions on Signal Processing","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. "}],"doi":"10.1109/TSP.2019.2944738","arxiv":1,"day":"15","external_id":{"arxiv":["1903.09203"]},"date_updated":"2021-01-12T08:08:51Z","year":"2019","citation":{"chicago":"Hashemi, Seyyed Ali, Carlo Condo, Marco Mondelli, and Warren J Gross. “Rate-Flexible Fast Polar Decoders.” <i>IEEE Transactions on Signal Processing</i>. IEEE, 2019. <a href=\"https://doi.org/10.1109/TSP.2019.2944738\">https://doi.org/10.1109/TSP.2019.2944738</a>.","ieee":"S. A. Hashemi, C. Condo, M. Mondelli, and W. J. Gross, “Rate-flexible fast polar decoders,” <i>IEEE Transactions on Signal Processing</i>, vol. 67, no. 22. IEEE, 2019.","apa":"Hashemi, S. A., Condo, C., Mondelli, M., &#38; Gross, W. J. (2019). Rate-flexible fast polar decoders. <i>IEEE Transactions on Signal Processing</i>. IEEE. <a href=\"https://doi.org/10.1109/TSP.2019.2944738\">https://doi.org/10.1109/TSP.2019.2944738</a>","ama":"Hashemi SA, Condo C, Mondelli M, Gross WJ. Rate-flexible fast polar decoders. <i>IEEE Transactions on Signal Processing</i>. 2019;67(22). doi:<a href=\"https://doi.org/10.1109/TSP.2019.2944738\">10.1109/TSP.2019.2944738</a>","ista":"Hashemi SA, Condo C, Mondelli M, Gross WJ. 2019. Rate-flexible fast polar decoders. IEEE Transactions on Signal Processing. 67(22), 8854897.","short":"S.A. Hashemi, C. Condo, M. Mondelli, W.J. Gross, IEEE Transactions on Signal Processing 67 (2019).","mla":"Hashemi, Seyyed Ali, et al. “Rate-Flexible Fast Polar Decoders.” <i>IEEE Transactions on Signal Processing</i>, vol. 67, no. 22, 8854897, IEEE, 2019, doi:<a href=\"https://doi.org/10.1109/TSP.2019.2944738\">10.1109/TSP.2019.2944738</a>."},"volume":67,"title":"Rate-flexible fast polar decoders","intvolume":"        67","publication_status":"published","article_processing_charge":"No","department":[{"_id":"MaMo"}],"date_created":"2019-07-31T09:51:14Z","author":[{"first_name":"Seyyed Ali","last_name":"Hashemi","full_name":"Hashemi, Seyyed Ali"},{"full_name":"Condo, Carlo","last_name":"Condo","first_name":"Carlo"},{"full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"last_name":"Gross","first_name":"Warren J","full_name":"Gross, Warren J"}],"issue":"22","_id":"6750","scopus_import":1,"article_type":"original","publisher":"IEEE","quality_controlled":"1"},{"oa_version":"Published Version","month":"10","article_number":"218","publication":"Algorithms","has_accepted_license":"1","language":[{"iso":"eng"}],"publication_identifier":{"issn":["1999-4893"]},"oa":1,"tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"date_published":"2019-10-18T00:00:00Z","type":"journal_article","file":[{"date_created":"2019-11-12T14:48:45Z","file_size":696791,"checksum":"267756d8f9db572f496cd1663c89d59a","date_updated":"2020-07-14T12:47:47Z","file_name":"2019_Algorithms_Mondelli.pdf","content_type":"application/pdf","access_level":"open_access","relation":"main_file","file_id":"7008","creator":"dernst"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","related_material":{"record":[{"status":"public","id":"6675","relation":"earlier_version"}]},"publication_status":"published","date_created":"2019-11-12T14:46:19Z","department":[{"_id":"MaMo"}],"title":"A new coding paradigm for the primitive relay channel","intvolume":"        12","_id":"7007","scopus_import":1,"author":[{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"},{"last_name":"Hassani","first_name":"S. Hamed","full_name":"Hassani, S. Hamed"},{"full_name":"Urbanke, Rüdiger","last_name":"Urbanke","first_name":"Rüdiger"}],"issue":"10","publisher":"MDPI","article_type":"original","quality_controlled":"1","file_date_updated":"2020-07-14T12:47:47Z","doi":"10.3390/a12100218","arxiv":1,"day":"18","abstract":[{"text":"We consider the primitive relay channel, where the source sends a message to the relay and to the destination, and the relay helps the communication by transmitting an additional message to the destination via a separate channel. Two well-known coding techniques have been introduced for this setting: decode-and-forward and compress-and-forward. In decode-and-forward, the relay completely decodes the message and sends some information to the destination; in compress-and-forward, the relay does not decode, and it sends a compressed version of the received signal to the destination using Wyner–Ziv coding. In this paper, we present a novel coding paradigm that provides an improved achievable rate for the primitive relay channel. The idea is to combine compress-and-forward and decode-and-forward via a chaining construction. We transmit over pairs of blocks: in the first block, we use compress-and-forward; and, in the second block, we use decode-and-forward. More specifically, in the first block, the relay does not decode, it compresses the received signal via Wyner–Ziv, and it sends only part of the compression to the destination. In the second block, the relay completely decodes the message, it sends some information to the destination, and it also sends the remaining part of the compression coming from the first block. By doing so, we are able to strictly outperform both compress-and-forward and decode-and-forward. Note that the proposed coding scheme can be implemented with polar codes. As such, it has the typical attractive properties of polar coding schemes, namely, quasi-linear encoding and decoding complexity, and error probability that decays at super-polynomial speed. As a running example, we take into account the special case of the erasure relay channel, and we provide a comparison between the rates achievable by our proposed scheme and the existing upper and lower bounds.","lang":"eng"}],"date_updated":"2023-02-23T12:49:28Z","citation":{"ista":"Mondelli M, Hassani SH, Urbanke R. 2019. A new coding paradigm for the primitive relay channel. Algorithms. 12(10), 218.","mla":"Mondelli, Marco, et al. “A New Coding Paradigm for the Primitive Relay Channel.” <i>Algorithms</i>, vol. 12, no. 10, 218, MDPI, 2019, doi:<a href=\"https://doi.org/10.3390/a12100218\">10.3390/a12100218</a>.","short":"M. Mondelli, S.H. Hassani, R. Urbanke, Algorithms 12 (2019).","chicago":"Mondelli, Marco, S. Hamed Hassani, and Rüdiger Urbanke. “A New Coding Paradigm for the Primitive Relay Channel.” <i>Algorithms</i>. MDPI, 2019. <a href=\"https://doi.org/10.3390/a12100218\">https://doi.org/10.3390/a12100218</a>.","ieee":"M. Mondelli, S. H. Hassani, and R. Urbanke, “A new coding paradigm for the primitive relay channel,” <i>Algorithms</i>, vol. 12, no. 10. MDPI, 2019.","apa":"Mondelli, M., Hassani, S. H., &#38; Urbanke, R. (2019). A new coding paradigm for the primitive relay channel. <i>Algorithms</i>. MDPI. <a href=\"https://doi.org/10.3390/a12100218\">https://doi.org/10.3390/a12100218</a>","ama":"Mondelli M, Hassani SH, Urbanke R. A new coding paradigm for the primitive relay channel. <i>Algorithms</i>. 2019;12(10). doi:<a href=\"https://doi.org/10.3390/a12100218\">10.3390/a12100218</a>"},"year":"2019","external_id":{"arxiv":["1801.03153"]},"volume":12,"ddc":["510"]}]
