[{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","volume":2022,"type":"conference","date_published":"2022-08-03T00:00:00Z","citation":{"short":"Y. Zhang, S. Vatedka, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 3085–3090.","mla":"Zhang, Yihan, and Shashank Vatedka. “Lower Bounds for Multiple Packing.” <i>2022 IEEE International Symposium on Information Theory</i>, vol. 2022, IEEE, 2022, pp. 3085–90, doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834443\">10.1109/ISIT50566.2022.9834443</a>.","ista":"Zhang Y, Vatedka S. 2022. Lower bounds for multiple packing. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 3085–3090.","ama":"Zhang Y, Vatedka S. Lower bounds for multiple packing. In: <i>2022 IEEE International Symposium on Information Theory</i>. Vol 2022. IEEE; 2022:3085-3090. doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834443\">10.1109/ISIT50566.2022.9834443</a>","apa":"Zhang, Y., &#38; Vatedka, S. (2022). Lower bounds for multiple packing. In <i>2022 IEEE International Symposium on Information Theory</i> (Vol. 2022, pp. 3085–3090). Espoo, Finland: IEEE. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834443\">https://doi.org/10.1109/ISIT50566.2022.9834443</a>","chicago":"Zhang, Yihan, and Shashank Vatedka. “Lower Bounds for Multiple Packing.” In <i>2022 IEEE International Symposium on Information Theory</i>, 2022:3085–90. IEEE, 2022. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834443\">https://doi.org/10.1109/ISIT50566.2022.9834443</a>.","ieee":"Y. Zhang and S. Vatedka, “Lower bounds for multiple packing,” in <i>2022 IEEE International Symposium on Information Theory</i>, Espoo, Finland, 2022, vol. 2022, pp. 3085–3090."},"year":"2022","date_updated":"2022-09-05T10:39:04Z","abstract":[{"lang":"eng","text":"We study the problem of high-dimensional multiple packing in Euclidean space. Multiple packing is a natural generalization of sphere packing and is defined as follows. Let P, N > 0 and L∈Z≥2. A multiple packing is a set C of points in Bn(0–,nP−−−√) such that any point in ℝ n lies in the intersection of at most L – 1 balls of radius nN−−−√ around points in C. 1 In this paper, we derive two lower bounds on the largest possible density of a multiple packing. These bounds are obtained through a stronger notion called average-radius multiple packing. Specifically, we exactly pin down the asymptotics of (expurgated) Gaussian codes and (expurgated) spherical codes under average-radius multiple packing. To this end, we apply tools from high-dimensional geometry and large deviation theory. The bound for spherical codes matches the previous best known bound which was obtained for the standard (weaker) notion of multiple packing through a curious connection with error exponents [Bli99], [ZV21]. The bound for Gaussian codes suggests that they are strictly inferior to spherical codes."}],"publication_identifier":{"isbn":["9781665421591"],"issn":["2157-8095"]},"day":"03","doi":"10.1109/ISIT50566.2022.9834443","language":[{"iso":"eng"}],"quality_controlled":"1","page":"3085-3090","conference":{"start_date":"2022-06-26","name":"ISIT: Internation Symposium on Information Theory","location":"Espoo, Finland","end_date":"2022-07-01"},"publisher":"IEEE","author":[{"first_name":"Yihan","last_name":"Zhang","full_name":"Zhang, Yihan","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c"},{"full_name":"Vatedka, Shashank","last_name":"Vatedka","first_name":"Shashank"}],"scopus_import":"1","publication":"2022 IEEE International Symposium on Information Theory","_id":"12015","intvolume":"      2022","title":"Lower bounds for multiple packing","month":"08","date_created":"2022-09-04T22:02:05Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","oa_version":"None"},{"publication":"2022 IEEE International Symposium on Information Theory","month":"08","oa_version":"Preprint","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"language":[{"iso":"eng"}],"conference":{"name":"ISIT: Internation Symposium on Information Theory","start_date":"2022-06-26","location":"Espoo, Finland","end_date":"2022-07-01"},"date_published":"2022-08-03T00:00:00Z","type":"conference","oa":1,"publication_identifier":{"issn":["2157-8095"],"isbn":["9781665421591"]},"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2201.10082"}],"author":[{"first_name":"Dorsa","last_name":"Fathollahi","full_name":"Fathollahi, Dorsa"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"}],"_id":"12016","scopus_import":"1","title":"Polar coded computing: The role of the scaling exponent","intvolume":"      2022","publication_status":"published","department":[{"_id":"MaMo"}],"date_created":"2022-09-04T22:02:05Z","article_processing_charge":"No","page":"2154-2159","quality_controlled":"1","publisher":"IEEE","external_id":{"arxiv":["2201.10082"]},"date_updated":"2024-09-10T13:03:17Z","year":"2022","citation":{"apa":"Fathollahi, D., &#38; Mondelli, M. (2022). Polar coded computing: The role of the scaling exponent. In <i>2022 IEEE International Symposium on Information Theory</i> (Vol. 2022, pp. 2154–2159). Espoo, Finland: IEEE. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834712\">https://doi.org/10.1109/ISIT50566.2022.9834712</a>","ama":"Fathollahi D, Mondelli M. Polar coded computing: The role of the scaling exponent. In: <i>2022 IEEE International Symposium on Information Theory</i>. Vol 2022. IEEE; 2022:2154-2159. doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834712\">10.1109/ISIT50566.2022.9834712</a>","chicago":"Fathollahi, Dorsa, and Marco Mondelli. “Polar Coded Computing: The Role of the Scaling Exponent.” In <i>2022 IEEE International Symposium on Information Theory</i>, 2022:2154–59. IEEE, 2022. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834712\">https://doi.org/10.1109/ISIT50566.2022.9834712</a>.","ieee":"D. Fathollahi and M. Mondelli, “Polar coded computing: The role of the scaling exponent,” in <i>2022 IEEE International Symposium on Information Theory</i>, Espoo, Finland, 2022, vol. 2022, pp. 2154–2159.","short":"D. Fathollahi, M. Mondelli, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 2154–2159.","mla":"Fathollahi, Dorsa, and Marco Mondelli. “Polar Coded Computing: The Role of the Scaling Exponent.” <i>2022 IEEE International Symposium on Information Theory</i>, vol. 2022, IEEE, 2022, pp. 2154–59, doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834712\">10.1109/ISIT50566.2022.9834712</a>.","ista":"Fathollahi D, Mondelli M. 2022. Polar coded computing: The role of the scaling exponent. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2154–2159."},"abstract":[{"lang":"eng","text":"We consider the problem of coded distributed computing using polar codes. The average execution time of a coded computing system is related to the error probability for transmission over the binary erasure channel in recent work by Soleymani, Jamali and Mahdavifar, where the performance of binary linear codes is investigated. In this paper, we focus on polar codes and unveil a connection between the average execution time and the scaling exponent μ of the family of codes. In the finite-length characterization of polar codes, the scaling exponent is a key object capturing the speed of convergence to capacity. In particular, we show that (i) the gap between the normalized average execution time of polar codes and that of optimal MDS codes is O(n –1/μ ), and (ii) this upper bound can be improved to roughly O(n –1/2 ) by considering polar codes with large kernels. We conjecture that these bounds could be improved to O(n –2/μ ) and O(n –1 ), respectively, and provide a heuristic argument as well as numerical evidence supporting this view."}],"arxiv":1,"doi":"10.1109/ISIT50566.2022.9834712","day":"03","acknowledgement":"D. Fathollahi and M. Mondelli were partially supported by the 2019 Lopez-Loreta Prize. The authors thank Hamed Hassani and Hessam Mahdavifar for helpful discussions.","volume":2022},{"day":"03","publication_identifier":{"issn":["2157-8095"],"isbn":["9781665421591"]},"doi":"10.1109/ISIT50566.2022.9834632","abstract":[{"lang":"eng","text":"In the classic adversarial communication problem, two parties communicate over a noisy channel in the presence of a malicious jamming adversary. The arbitrarily varying channels (AVCs) offer an elegant framework to study a wide range of interesting adversary models. The optimal throughput or capacity over such AVCs is intimately tied to the underlying adversary model; in some cases, capacity is unknown and the problem is known to be notoriously hard. The omniscient adversary, one which knows the sender’s entire channel transmission a priori, is one of such classic models of interest; the capacity under such an adversary remains an exciting open problem. The myopic adversary is a generalization of that model where the adversary’s observation may be corrupted over a noisy discrete memoryless channel. Through the adversary’s myopicity, one can unify the slew of different adversary models, ranging from the omniscient adversary to one that is completely blind to the transmission (the latter is the well known oblivious model where the capacity is fully characterized).In this work, we present new results on the capacity under both the omniscient and myopic adversary models. We completely characterize the positive capacity threshold over general AVCs with omniscient adversaries. The characterization is in terms of two key combinatorial objects: the set of completely positive distributions and the CP-confusability set. For omniscient AVCs with positive capacity, we present non-trivial lower and upper bounds on the capacity; unlike some of the previous bounds, our bounds hold under fairly general input and jamming constraints. Our lower bound improves upon the generalized Gilbert-Varshamov bound for general AVCs while the upper bound generalizes the well known Elias-Bassalygo bound (known for binary and q-ary alphabets). For the myopic AVCs, we build on prior results known for the so-called sufficiently myopic model, and present new results on the positive rate communication threshold over the so-called insufficiently myopic regime (a completely insufficient myopic adversary specializes to an omniscient adversary). We present interesting examples for the widely studied models of adversarial bit-flip and bit-erasure channels. In fact, for the bit-flip AVC with additive adversarial noise as well as random noise, we completely characterize the omniscient model capacity when the random noise is sufficiently large vis-a-vis the adversary’s budget."}],"citation":{"ama":"Yadav AK, Alimohammadi M, Zhang Y, Budkuley AJ, Jaggi S. New results on AVCs with omniscient and myopic adversaries. In: <i>2022 IEEE International Symposium on Information Theory</i>. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:2535-2540. doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834632\">10.1109/ISIT50566.2022.9834632</a>","apa":"Yadav, A. K., Alimohammadi, M., Zhang, Y., Budkuley, A. J., &#38; Jaggi, S. (2022). New results on AVCs with omniscient and myopic adversaries. In <i>2022 IEEE International Symposium on Information Theory</i> (Vol. 2022, pp. 2535–2540). Espoo, Finland: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834632\">https://doi.org/10.1109/ISIT50566.2022.9834632</a>","ieee":"A. K. Yadav, M. Alimohammadi, Y. Zhang, A. J. Budkuley, and S. Jaggi, “New results on AVCs with omniscient and myopic adversaries,” in <i>2022 IEEE International Symposium on Information Theory</i>, Espoo, Finland, 2022, vol. 2022, pp. 2535–2540.","chicago":"Yadav, Anuj Kumar, Mohammadreza Alimohammadi, Yihan Zhang, Amitalok J. Budkuley, and Sidharth Jaggi. “New Results on AVCs with Omniscient and Myopic Adversaries.” In <i>2022 IEEE International Symposium on Information Theory</i>, 2022:2535–40. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834632\">https://doi.org/10.1109/ISIT50566.2022.9834632</a>.","short":"A.K. Yadav, M. Alimohammadi, Y. Zhang, A.J. Budkuley, S. Jaggi, in:, 2022 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2022, pp. 2535–2540.","mla":"Yadav, Anuj Kumar, et al. “New Results on AVCs with Omniscient and Myopic Adversaries.” <i>2022 IEEE International Symposium on Information Theory</i>, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 2535–40, doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834632\">10.1109/ISIT50566.2022.9834632</a>.","ista":"Yadav AK, Alimohammadi M, Zhang Y, Budkuley AJ, Jaggi S. 2022. New results on AVCs with omniscient and myopic adversaries. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2535–2540."},"year":"2022","date_updated":"2023-02-13T09:00:14Z","type":"conference","date_published":"2022-08-03T00:00:00Z","volume":2022,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","article_processing_charge":"No","date_created":"2022-09-04T22:02:06Z","department":[{"_id":"MaMo"}],"oa_version":"None","publication_status":"published","intvolume":"      2022","title":"New results on AVCs with omniscient and myopic adversaries","month":"08","scopus_import":"1","publication":"2022 IEEE International Symposium on Information Theory","_id":"12017","author":[{"full_name":"Yadav, Anuj Kumar","last_name":"Yadav","first_name":"Anuj Kumar"},{"full_name":"Alimohammadi, Mohammadreza","last_name":"Alimohammadi","first_name":"Mohammadreza"},{"id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","full_name":"Zhang, Yihan","last_name":"Zhang","first_name":"Yihan"},{"first_name":"Amitalok J.","last_name":"Budkuley","full_name":"Budkuley, Amitalok J."},{"full_name":"Jaggi, Sidharth","first_name":"Sidharth","last_name":"Jaggi"}],"publisher":"Institute of Electrical and Electronics Engineers","conference":{"start_date":"2022-06-26","name":"ISIT: Internation Symposium on Information Theory","end_date":"2022-07-01","location":"Espoo, Finland"},"quality_controlled":"1","page":"2535-2540","language":[{"iso":"eng"}]},{"month":"08","title":"Lower bounds on list decoding capacity using error exponents","intvolume":"      2022","oa_version":"None","publication_status":"published","department":[{"_id":"MaMo"}],"date_created":"2022-09-04T22:02:06Z","article_processing_charge":"No","author":[{"full_name":"Zhang, Yihan","first_name":"Yihan","last_name":"Zhang","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c"},{"full_name":"Vatedka, Shashank","last_name":"Vatedka","first_name":"Shashank"}],"publication":"2022 IEEE International Symposium on Information Theory","_id":"12018","scopus_import":"1","conference":{"end_date":"2022-07-01","location":"Espoo, Finland","start_date":"2022-06-26","name":"ISIT: Internation Symposium on Information Theory"},"publisher":"Institute of Electrical and Electronics Engineers","language":[{"iso":"eng"}],"page":"1324-1329","quality_controlled":"1","abstract":[{"lang":"eng","text":"We study the problem of characterizing the maximal rates of list decoding in Euclidean spaces for finite list sizes. For any positive integer L ≥ 2 and real N > 0, we say that a subset C⊂Rn is an (N,L – 1)-multiple packing or an (N,L– 1)-list decodable code if every Euclidean ball of radius nN−−−√ in ℝ n contains no more than L − 1 points of C. We study this problem with and without ℓ 2 norm constraints on C, and derive the best-known lower bounds on the maximal rate for (N,L−1) multiple packing. Our bounds are obtained via error exponents for list decoding over Additive White Gaussian Noise (AWGN) channels. We establish a curious inequality which relates the error exponent, a quantity of average-case nature, to the list-decoding radius, a quantity of worst-case nature. We derive various bounds on the error exponent for list decoding in both bounded and unbounded settings which could be of independent interest beyond multiple packing."}],"doi":"10.1109/ISIT50566.2022.9834815","day":"03","publication_identifier":{"issn":["2157-8095"],"isbn":["9781665421591"]},"date_published":"2022-08-03T00:00:00Z","type":"conference","date_updated":"2023-02-13T09:02:06Z","citation":{"ieee":"Y. Zhang and S. Vatedka, “Lower bounds on list decoding capacity using error exponents,” in <i>2022 IEEE International Symposium on Information Theory</i>, Espoo, Finland, 2022, vol. 2022, pp. 1324–1329.","chicago":"Zhang, Yihan, and Shashank Vatedka. “Lower Bounds on List Decoding Capacity Using Error Exponents.” In <i>2022 IEEE International Symposium on Information Theory</i>, 2022:1324–29. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834815\">https://doi.org/10.1109/ISIT50566.2022.9834815</a>.","ama":"Zhang Y, Vatedka S. Lower bounds on list decoding capacity using error exponents. In: <i>2022 IEEE International Symposium on Information Theory</i>. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:1324-1329. doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834815\">10.1109/ISIT50566.2022.9834815</a>","apa":"Zhang, Y., &#38; Vatedka, S. (2022). Lower bounds on list decoding capacity using error exponents. In <i>2022 IEEE International Symposium on Information Theory</i> (Vol. 2022, pp. 1324–1329). Espoo, Finland: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834815\">https://doi.org/10.1109/ISIT50566.2022.9834815</a>","ista":"Zhang Y, Vatedka S. 2022. Lower bounds on list decoding capacity using error exponents. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 1324–1329.","mla":"Zhang, Yihan, and Shashank Vatedka. “Lower Bounds on List Decoding Capacity Using Error Exponents.” <i>2022 IEEE International Symposium on Information Theory</i>, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 1324–29, doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834815\">10.1109/ISIT50566.2022.9834815</a>.","short":"Y. Zhang, S. Vatedka, in:, 2022 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2022, pp. 1324–1329."},"year":"2022","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","volume":2022},{"conference":{"end_date":"2022-07-01","location":"Espoo, Finland","start_date":"2022-06-26","name":"ISIT: Internation Symposium on Information Theory"},"publisher":"Institute of Electrical and Electronics Engineers","language":[{"iso":"eng"}],"quality_controlled":"1","page":"2553-2558","intvolume":"      2022","title":"List-decodable zero-rate codes for the Z-channel","month":"08","department":[{"_id":"MaMo"}],"date_created":"2022-09-04T22:02:07Z","article_processing_charge":"No","oa_version":"None","publication_status":"published","author":[{"first_name":"Nikita","last_name":"Polyanskii","full_name":"Polyanskii, Nikita"},{"id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","full_name":"Zhang, Yihan","last_name":"Zhang","first_name":"Yihan"}],"scopus_import":"1","_id":"12019","publication":"2022 IEEE International Symposium on Information Theory","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","volume":2022,"abstract":[{"text":"This paper studies combinatorial properties of codes for the Z-channel. A Z-channel with error fraction τ takes as input a length-n binary codeword and injects in an adversarial manner up to nτ asymmetric errors, i.e., errors that only zero out bits but do not flip 0’s to 1’s. It is known that the largest (L − 1)-list-decodable code for the Z-channel with error fraction τ has exponential (in n) size if τ is less than a critical value that we call the Plotkin point and has constant size if τ is larger than the threshold. The (L−1)-list-decoding Plotkin point is known to be L−1L−1−L−LL−1. In this paper, we show that the largest (L−1)-list-decodable code ε-above the Plotkin point has size Θ L (ε −3/2 ) for any L − 1 ≥ 1.","lang":"eng"}],"publication_identifier":{"issn":["2157-8095"],"isbn":["9781665421591"]},"day":"03","doi":"10.1109/ISIT50566.2022.9834829","type":"conference","date_published":"2022-08-03T00:00:00Z","citation":{"mla":"Polyanskii, Nikita, and Yihan Zhang. “List-Decodable Zero-Rate Codes for the Z-Channel.” <i>2022 IEEE International Symposium on Information Theory</i>, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 2553–58, doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834829\">10.1109/ISIT50566.2022.9834829</a>.","short":"N. Polyanskii, Y. Zhang, in:, 2022 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2022, pp. 2553–2558.","ista":"Polyanskii N, Zhang Y. 2022. List-decodable zero-rate codes for the Z-channel. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2553–2558.","apa":"Polyanskii, N., &#38; Zhang, Y. (2022). List-decodable zero-rate codes for the Z-channel. In <i>2022 IEEE International Symposium on Information Theory</i> (Vol. 2022, pp. 2553–2558). Espoo, Finland: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834829\">https://doi.org/10.1109/ISIT50566.2022.9834829</a>","ama":"Polyanskii N, Zhang Y. List-decodable zero-rate codes for the Z-channel. In: <i>2022 IEEE International Symposium on Information Theory</i>. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:2553-2558. doi:<a href=\"https://doi.org/10.1109/ISIT50566.2022.9834829\">10.1109/ISIT50566.2022.9834829</a>","chicago":"Polyanskii, Nikita, and Yihan Zhang. “List-Decodable Zero-Rate Codes for the Z-Channel.” In <i>2022 IEEE International Symposium on Information Theory</i>, 2022:2553–58. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/ISIT50566.2022.9834829\">https://doi.org/10.1109/ISIT50566.2022.9834829</a>.","ieee":"N. Polyanskii and Y. Zhang, “List-decodable zero-rate codes for the Z-channel,” in <i>2022 IEEE International Symposium on Information Theory</i>, Espoo, Finland, 2022, vol. 2022, pp. 2553–2558."},"year":"2022","date_updated":"2023-02-13T09:02:18Z"},{"isi":1,"external_id":{"arxiv":["2109.02122"],"isi":["000937284600006"]},"date_updated":"2023-08-04T09:34:43Z","citation":{"ista":"Doan N, Hashemi SA, Mondelli M, Gross WJ. 2022. Decoding Reed-Muller codes with successive codeword permutations. IEEE Transactions on Communications. 70(11), 7134–7145.","short":"N. Doan, S.A. Hashemi, M. Mondelli, W.J. Gross, IEEE Transactions on Communications 70 (2022) 7134–7145.","mla":"Doan, Nghia, et al. “Decoding Reed-Muller Codes with Successive Codeword Permutations.” <i>IEEE Transactions on Communications</i>, vol. 70, no. 11, Institute of Electrical and Electronics Engineers, 2022, pp. 7134–45, doi:<a href=\"https://doi.org/10.1109/tcomm.2022.3211101\">10.1109/tcomm.2022.3211101</a>.","chicago":"Doan, Nghia, Seyyed Ali Hashemi, Marco Mondelli, and Warren J. Gross. “Decoding Reed-Muller Codes with Successive Codeword Permutations.” <i>IEEE Transactions on Communications</i>. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/tcomm.2022.3211101\">https://doi.org/10.1109/tcomm.2022.3211101</a>.","ieee":"N. Doan, S. A. Hashemi, M. Mondelli, and W. J. Gross, “Decoding Reed-Muller codes with successive codeword permutations,” <i>IEEE Transactions on Communications</i>, vol. 70, no. 11. Institute of Electrical and Electronics Engineers, pp. 7134–7145, 2022.","ama":"Doan N, Hashemi SA, Mondelli M, Gross WJ. Decoding Reed-Muller codes with successive codeword permutations. <i>IEEE Transactions on Communications</i>. 2022;70(11):7134-7145. doi:<a href=\"https://doi.org/10.1109/tcomm.2022.3211101\">10.1109/tcomm.2022.3211101</a>","apa":"Doan, N., Hashemi, S. A., Mondelli, M., &#38; Gross, W. J. (2022). Decoding Reed-Muller codes with successive codeword permutations. <i>IEEE Transactions on Communications</i>. Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/tcomm.2022.3211101\">https://doi.org/10.1109/tcomm.2022.3211101</a>"},"year":"2022","abstract":[{"text":"A novel recursive list decoding (RLD) algorithm for Reed-Muller (RM) codes based on successive permutations (SP) of the codeword is presented. A low-complexity SP scheme applied to a subset of the symmetry group of RM codes is first proposed to carefully select a good codeword permutation on the fly. Then, the proposed SP technique is integrated into an improved RLD algorithm that initializes different decoding paths with random codeword permutations, which are sampled from the full symmetry group of RM codes. Finally, efficient latency and complexity reduction schemes are introduced that virtually preserve the error-correction performance of the proposed decoder. Simulation results demonstrate that at the target frame error rate of 10−3 for the RM code of length 256 with 163 information bits, the proposed decoder reduces 6% of the computational complexity and 22% of the decoding latency of the state-of-the-art semi-parallel simplified successive-cancellation decoder with fast Hadamard transform (SSC-FHT) that uses 96 permutations from the full symmetry group of RM codes, while relatively maintaining the error-correction performance and memory consumption of the semi-parallel permuted SSC-FHT decoder.","lang":"eng"}],"doi":"10.1109/tcomm.2022.3211101","arxiv":1,"day":"01","volume":70,"author":[{"first_name":"Nghia","last_name":"Doan","full_name":"Doan, Nghia"},{"first_name":"Seyyed Ali","last_name":"Hashemi","full_name":"Hashemi, Seyyed Ali"},{"orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"full_name":"Gross, Warren J.","last_name":"Gross","first_name":"Warren J."}],"issue":"11","_id":"12233","scopus_import":"1","title":"Decoding Reed-Muller codes with successive codeword permutations","intvolume":"        70","publication_status":"published","date_created":"2023-01-16T09:50:38Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","page":"7134-7145","quality_controlled":"1","article_type":"original","publisher":"Institute of Electrical and Electronics Engineers","date_published":"2022-11-01T00:00:00Z","type":"journal_article","oa":1,"publication_identifier":{"issn":["0090-6778"],"eissn":["1558-0857"]},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","status":"public","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2109.02122"}],"publication":"IEEE Transactions on Communications","month":"11","oa_version":"Preprint","language":[{"iso":"eng"}]},{"quality_controlled":"1","page":"4901-4948","publisher":"Institute of Electrical and Electronics Engineers","article_type":"original","scopus_import":"1","_id":"12273","issue":"8","author":[{"id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","last_name":"Zhang","first_name":"Yihan","full_name":"Zhang, Yihan"},{"full_name":"Vatedka, Shashank","last_name":"Vatedka","first_name":"Shashank"},{"full_name":"Jaggi, Sidharth","first_name":"Sidharth","last_name":"Jaggi"},{"full_name":"Sarwate, Anand D.","first_name":"Anand D.","last_name":"Sarwate"}],"article_processing_charge":"No","department":[{"_id":"MaMo"}],"date_created":"2023-01-16T10:01:19Z","publication_status":"published","intvolume":"        68","title":"Quadratically constrained myopic adversarial channels","volume":68,"year":"2022","citation":{"ista":"Zhang Y, Vatedka S, Jaggi S, Sarwate AD. 2022. Quadratically constrained myopic adversarial channels. IEEE Transactions on Information Theory. 68(8), 4901–4948.","short":"Y. Zhang, S. Vatedka, S. Jaggi, A.D. Sarwate, IEEE Transactions on Information Theory 68 (2022) 4901–4948.","mla":"Zhang, Yihan, et al. “Quadratically Constrained Myopic Adversarial Channels.” <i>IEEE Transactions on Information Theory</i>, vol. 68, no. 8, Institute of Electrical and Electronics Engineers, 2022, pp. 4901–48, doi:<a href=\"https://doi.org/10.1109/tit.2022.3167554\">10.1109/tit.2022.3167554</a>.","ieee":"Y. Zhang, S. Vatedka, S. Jaggi, and A. D. Sarwate, “Quadratically constrained myopic adversarial channels,” <i>IEEE Transactions on Information Theory</i>, vol. 68, no. 8. Institute of Electrical and Electronics Engineers, pp. 4901–4948, 2022.","chicago":"Zhang, Yihan, Shashank Vatedka, Sidharth Jaggi, and Anand D. Sarwate. “Quadratically Constrained Myopic Adversarial Channels.” <i>IEEE Transactions on Information Theory</i>. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/tit.2022.3167554\">https://doi.org/10.1109/tit.2022.3167554</a>.","ama":"Zhang Y, Vatedka S, Jaggi S, Sarwate AD. Quadratically constrained myopic adversarial channels. <i>IEEE Transactions on Information Theory</i>. 2022;68(8):4901-4948. doi:<a href=\"https://doi.org/10.1109/tit.2022.3167554\">10.1109/tit.2022.3167554</a>","apa":"Zhang, Y., Vatedka, S., Jaggi, S., &#38; Sarwate, A. D. (2022). Quadratically constrained myopic adversarial channels. <i>IEEE Transactions on Information Theory</i>. Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/tit.2022.3167554\">https://doi.org/10.1109/tit.2022.3167554</a>"},"date_updated":"2023-08-04T10:08:49Z","external_id":{"arxiv":["1801.05951"],"isi":["000838527100004"]},"isi":1,"day":"01","doi":"10.1109/tit.2022.3167554","arxiv":1,"abstract":[{"text":"We study communication in the presence of a jamming adversary where quadratic power constraints are imposed on the transmitter and the jammer. The jamming signal is allowed to be a function of the codebook, and a noncausal but noisy observation of the transmitted codeword. For a certain range of the noise-to-signal ratios (NSRs) of the transmitter and the jammer, we are able to characterize the capacity of this channel under deterministic encoding or stochastic encoding, i.e., with no common randomness between the encoder/decoder pair. For the remaining NSR regimes, we determine the capacity under the assumption of a small amount of common randomness (at most 2log(n) bits in one sub-regime, and at most Ω(n) bits in the other sub-regime) available to the encoder-decoder pair. Our proof techniques involve a novel myopic list-decoding result for achievability, and a Plotkin-type push attack for the converse in a subregion of the NSRs, both of which may be of independent interest. We also give bounds on the strong secrecy capacity of this channel assuming that the jammer is simultaneously eavesdropping.","lang":"eng"}],"language":[{"iso":"eng"}],"publication":"IEEE Transactions on Information Theory","oa_version":"Preprint","month":"08","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.1801.05951"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","status":"public","type":"journal_article","date_published":"2022-08-01T00:00:00Z","publication_identifier":{"eissn":["1557-9654"],"issn":["0018-9448"]},"oa":1},{"status":"public","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","related_material":{"record":[{"status":"public","id":"10598","relation":"earlier_version"}]},"file":[{"date_created":"2023-02-02T08:35:52Z","file_size":1729997,"checksum":"01411ffa76d3e380a0446baeb89b1ef7","date_updated":"2023-02-02T08:35:52Z","content_type":"application/pdf","file_name":"2022_JourStatisticalMechanics_Mondelli.pdf","relation":"main_file","success":1,"access_level":"open_access","file_id":"12481","creator":"dernst"}],"date_published":"2022-11-24T00:00:00Z","type":"journal_article","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"oa":1,"publication_identifier":{"issn":["1742-5468"]},"language":[{"iso":"eng"}],"keyword":["Statistics","Probability and Uncertainty","Statistics and Probability","Statistical and Nonlinear Physics"],"publication":"Journal of Statistical Mechanics: Theory and Experiment","has_accepted_license":"1","month":"11","article_number":"114003","oa_version":"Published Version","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"ddc":["510","530"],"acknowledgement":"The authors would like to thank Andrea Montanari for helpful discussions.\r\nM Mondelli was partially supported by the 2019 Lopez-Loreta Prize. R Venkataramanan was partially supported by the Alan Turing Institute under the EPSRC Grant\r\nEP/N510129/1.","volume":2022,"isi":1,"external_id":{"isi":["000889589900001"]},"date_updated":"2024-03-07T10:36:52Z","year":"2022","citation":{"apa":"Mondelli, M., &#38; Venkataramanan, R. (2022). Approximate message passing with spectral initialization for generalized linear models. <i>Journal of Statistical Mechanics: Theory and Experiment</i>. IOP Publishing. <a href=\"https://doi.org/10.1088/1742-5468/ac9828\">https://doi.org/10.1088/1742-5468/ac9828</a>","ama":"Mondelli M, Venkataramanan R. Approximate message passing with spectral initialization for generalized linear models. <i>Journal of Statistical Mechanics: Theory and Experiment</i>. 2022;2022(11). doi:<a href=\"https://doi.org/10.1088/1742-5468/ac9828\">10.1088/1742-5468/ac9828</a>","ieee":"M. Mondelli and R. Venkataramanan, “Approximate message passing with spectral initialization for generalized linear models,” <i>Journal of Statistical Mechanics: Theory and Experiment</i>, vol. 2022, no. 11. IOP Publishing, 2022.","chicago":"Mondelli, Marco, and Ramji Venkataramanan. “Approximate Message Passing with Spectral Initialization for Generalized Linear Models.” <i>Journal of Statistical Mechanics: Theory and Experiment</i>. IOP Publishing, 2022. <a href=\"https://doi.org/10.1088/1742-5468/ac9828\">https://doi.org/10.1088/1742-5468/ac9828</a>.","short":"M. Mondelli, R. Venkataramanan, Journal of Statistical Mechanics: Theory and Experiment 2022 (2022).","mla":"Mondelli, Marco, and Ramji Venkataramanan. “Approximate Message Passing with Spectral Initialization for Generalized Linear Models.” <i>Journal of Statistical Mechanics: Theory and Experiment</i>, vol. 2022, no. 11, 114003, IOP Publishing, 2022, doi:<a href=\"https://doi.org/10.1088/1742-5468/ac9828\">10.1088/1742-5468/ac9828</a>.","ista":"Mondelli M, Venkataramanan R. 2022. Approximate message passing with spectral initialization for generalized linear models. Journal of Statistical Mechanics: Theory and Experiment. 2022(11), 114003."},"abstract":[{"lang":"eng","text":"We consider the problem of estimating a signal from measurements obtained via a generalized linear model. We focus on estimators based on approximate message passing (AMP), a family of iterative algorithms with many appealing features: the performance of AMP in the high-dimensional limit can be succinctly characterized under suitable model assumptions; AMP can also be tailored to the empirical distribution of the signal entries, and for a wide class of estimation problems, AMP is conjectured to be optimal among all polynomial-time algorithms. However, a major issue of AMP is that in many models (such as phase retrieval), it requires an initialization correlated with the ground-truth signal and independent from the measurement matrix. Assuming that such an initialization is available is typically not realistic. In this paper, we solve this problem by proposing an AMP algorithm initialized with a spectral estimator. With such an initialization, the standard AMP analysis fails since the spectral estimator depends in a complicated way on the design matrix. Our main contribution is a rigorous characterization of the performance of AMP with spectral initialization in the high-dimensional limit. The key technical idea is to define and analyze a two-phase artificial AMP algorithm that first produces the spectral estimator, and then closely approximates the iterates of the true AMP. We also provide numerical results that demonstrate the validity of the proposed approach."}],"doi":"10.1088/1742-5468/ac9828","day":"24","file_date_updated":"2023-02-02T08:35:52Z","quality_controlled":"1","article_type":"original","publisher":"IOP Publishing","author":[{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"},{"full_name":"Venkataramanan, Ramji","last_name":"Venkataramanan","first_name":"Ramji"}],"issue":"11","_id":"12480","scopus_import":"1","title":"Approximate message passing with spectral initialization for generalized linear models","intvolume":"      2022","publication_status":"published","date_created":"2023-02-02T08:31:57Z","article_processing_charge":"Yes (via OA deal)","department":[{"_id":"MaMo"}]},{"publication_status":"accepted","oa_version":"Preprint","department":[{"_id":"MaMo"}],"article_processing_charge":"No","date_created":"2023-02-10T13:45:41Z","month":"05","title":"The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation?","article_number":"2205.10009","publication":"arXiv","_id":"12536","author":[{"last_name":"Barbier","first_name":"Jean","full_name":"Barbier, Jean"},{"full_name":"Hou, TianQi","first_name":"TianQi","last_name":"Hou"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"},{"last_name":"Saenz","first_name":"Manuel","full_name":"Saenz, Manuel"}],"language":[{"iso":"eng"}],"arxiv":1,"doi":"10.48550/arXiv.2205.10009","day":"20","abstract":[{"lang":"eng","text":"We consider the problem of estimating a rank-1 signal corrupted by structured rotationally invariant noise, and address the following question: how well do inference algorithms perform when the noise statistics is unknown and hence Gaussian noise is assumed? While the matched Bayes-optimal setting with unstructured noise is well understood, the analysis of this mismatched problem is only at its premises. In this paper, we make a step towards understanding the effect of the strong source of mismatch which is the noise statistics. Our main technical contribution is the rigorous analysis of a Bayes estimator and of an approximate message passing (AMP) algorithm, both of which incorrectly assume a Gaussian setup. The first result exploits the theory of spherical integrals and of low-rank matrix perturbations; the idea behind the second one is to design and analyze an artificial AMP which, by taking advantage of the flexibility in the denoisers, is able to \"correct\" the mismatch. Armed with these sharp asymptotic characterizations, we unveil a rich and often unexpected phenomenology. For example, despite AMP is in principle designed to efficiently compute the Bayes estimator, the former is outperformed by the latter in terms of mean-square error. We show that this performance gap is due to an incorrect estimation of the signal norm. In fact, when the SNR is large enough, the overlaps of the AMP and the Bayes estimator coincide, and they even match those of optimal estimators taking into account the structure of the noise."}],"oa":1,"date_updated":"2023-02-16T09:41:25Z","citation":{"ama":"Barbier J, Hou T, Mondelli M, Saenz M. The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation? <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2205.10009\">10.48550/arXiv.2205.10009</a>","apa":"Barbier, J., Hou, T., Mondelli, M., &#38; Saenz, M. (n.d.). The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation? <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2205.10009\">https://doi.org/10.48550/arXiv.2205.10009</a>","chicago":"Barbier, Jean, TianQi Hou, Marco Mondelli, and Manuel Saenz. “The Price of Ignorance: How Much Does It Cost to Forget Noise Structure in Low-Rank Matrix Estimation?” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2205.10009\">https://doi.org/10.48550/arXiv.2205.10009</a>.","ieee":"J. Barbier, T. Hou, M. Mondelli, and M. Saenz, “The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation?,” <i>arXiv</i>. .","short":"J. Barbier, T. Hou, M. Mondelli, M. Saenz, ArXiv (n.d.).","mla":"Barbier, Jean, et al. “The Price of Ignorance: How Much Does It Cost to Forget Noise Structure in Low-Rank Matrix Estimation?” <i>ArXiv</i>, 2205.10009, doi:<a href=\"https://doi.org/10.48550/arXiv.2205.10009\">10.48550/arXiv.2205.10009</a>.","ista":"Barbier J, Hou T, Mondelli M, Saenz M. The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation? arXiv, 2205.10009."},"year":"2022","date_published":"2022-05-20T00:00:00Z","type":"preprint","external_id":{"arxiv":["2205.10009"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2205.10009","open_access":"1"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"type":"conference","date_published":"2022-07-24T00:00:00Z","oa":1,"publication_identifier":{"isbn":["9781713871088"]},"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2205.10217"}],"publication":"36th Conference on Neural Information Processing Systems","month":"07","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa_version":"Preprint","language":[{"iso":"eng"}],"external_id":{"arxiv":["2205.10217"]},"citation":{"mla":"Bombari, Simone, et al. “Memorization and Optimization in Deep Neural Networks with Minimum Over-Parameterization.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Curran Associates, 2022, pp. 7628–40.","short":"S. Bombari, M.H. Amani, M. Mondelli, in:, 36th Conference on Neural Information Processing Systems, Curran Associates, 2022, pp. 7628–7640.","ista":"Bombari S, Amani MH, Mondelli M. 2022. Memorization and optimization in deep neural networks with minimum over-parameterization. 36th Conference on Neural Information Processing Systems. vol. 35, 7628–7640.","apa":"Bombari, S., Amani, M. H., &#38; Mondelli, M. (2022). Memorization and optimization in deep neural networks with minimum over-parameterization. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 7628–7640). Curran Associates.","ama":"Bombari S, Amani MH, Mondelli M. Memorization and optimization in deep neural networks with minimum over-parameterization. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Curran Associates; 2022:7628-7640.","chicago":"Bombari, Simone, Mohammad Hossein Amani, and Marco Mondelli. “Memorization and Optimization in Deep Neural Networks with Minimum Over-Parameterization.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:7628–40. Curran Associates, 2022.","ieee":"S. Bombari, M. H. Amani, and M. Mondelli, “Memorization and optimization in deep neural networks with minimum over-parameterization,” in <i>36th Conference on Neural Information Processing Systems</i>, 2022, vol. 35, pp. 7628–7640."},"year":"2022","date_updated":"2024-09-10T13:03:19Z","abstract":[{"lang":"eng","text":"The Neural Tangent Kernel (NTK) has emerged as a powerful tool to provide memorization, optimization and generalization guarantees in deep neural networks. A line of work has studied the NTK spectrum for two-layer and deep networks with at least a layer with Ω(N) neurons, N being the number of training samples. Furthermore, there is increasing evidence suggesting that deep networks with sub-linear layer widths are powerful memorizers and optimizers, as long as the number of parameters exceeds the number of samples. Thus, a natural open question is whether the NTK is well conditioned in such a challenging sub-linear setup. In this paper, we answer this question in the affirmative. Our key technical contribution is a lower bound on the smallest NTK eigenvalue for deep networks with the minimum possible over-parameterization: the number of parameters is roughly Ω(N) and, hence, the number of neurons is as little as Ω(N−−√). To showcase the applicability of our NTK bounds, we provide two results concerning memorization capacity and optimization guarantees for gradient descent training."}],"day":"24","arxiv":1,"volume":35,"acknowledgement":"The authors were partially supported by the 2019 Lopez-Loreta prize, and they would like to thank\r\nQuynh Nguyen, Mahdi Soltanolkotabi and Adel Javanmard for helpful discussions.\r\n","author":[{"first_name":"Simone","last_name":"Bombari","full_name":"Bombari, Simone","id":"ca726dda-de17-11ea-bc14-f9da834f63aa"},{"full_name":"Amani, Mohammad Hossein","first_name":"Mohammad Hossein","last_name":"Amani"},{"full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"}],"_id":"12537","intvolume":"        35","title":"Memorization and optimization in deep neural networks with minimum over-parameterization","date_created":"2023-02-10T13:46:37Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","quality_controlled":"1","page":"7628-7640","publisher":"Curran Associates"},{"language":[{"iso":"eng"}],"conference":{"start_date":"2022-11-01","name":"ITW: Information Theory Workshop","location":"Mumbai, India","end_date":"2022-11-09"},"publication":"IEEE Information Theory Workshop","oa_version":"Preprint","month":"11","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2205.08199"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","date_published":"2022-11-16T00:00:00Z","type":"journal_article","publication_identifier":{"isbn":["9781665483414"]},"oa":1,"page":"588-593","quality_controlled":"1","publisher":"IEEE","article_type":"original","_id":"12538","scopus_import":"1","author":[{"last_name":"Amani","first_name":"Mohammad Hossein","full_name":"Amani, Mohammad Hossein"},{"id":"ca726dda-de17-11ea-bc14-f9da834f63aa","full_name":"Bombari, Simone","first_name":"Simone","last_name":"Bombari"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","last_name":"Mondelli","first_name":"Marco","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020"},{"full_name":"Pukdee, Rattana","last_name":"Pukdee","first_name":"Rattana"},{"full_name":"Rini, Stefano","last_name":"Rini","first_name":"Stefano"}],"publication_status":"published","department":[{"_id":"MaMo"}],"date_created":"2023-02-10T13:47:56Z","article_processing_charge":"No","title":"Sharp asymptotics on the compression of two-layer neural networks","date_updated":"2023-12-18T11:31:47Z","citation":{"chicago":"Amani, Mohammad Hossein, Simone Bombari, Marco Mondelli, Rattana Pukdee, and Stefano Rini. “Sharp Asymptotics on the Compression of Two-Layer Neural Networks.” <i>IEEE Information Theory Workshop</i>. IEEE, 2022. <a href=\"https://doi.org/10.1109/ITW54588.2022.9965870\">https://doi.org/10.1109/ITW54588.2022.9965870</a>.","ieee":"M. H. Amani, S. Bombari, M. Mondelli, R. Pukdee, and S. Rini, “Sharp asymptotics on the compression of two-layer neural networks,” <i>IEEE Information Theory Workshop</i>. IEEE, pp. 588–593, 2022.","apa":"Amani, M. H., Bombari, S., Mondelli, M., Pukdee, R., &#38; Rini, S. (2022). Sharp asymptotics on the compression of two-layer neural networks. <i>IEEE Information Theory Workshop</i>. Mumbai, India: IEEE. <a href=\"https://doi.org/10.1109/ITW54588.2022.9965870\">https://doi.org/10.1109/ITW54588.2022.9965870</a>","ama":"Amani MH, Bombari S, Mondelli M, Pukdee R, Rini S. Sharp asymptotics on the compression of two-layer neural networks. <i>IEEE Information Theory Workshop</i>. 2022:588-593. doi:<a href=\"https://doi.org/10.1109/ITW54588.2022.9965870\">10.1109/ITW54588.2022.9965870</a>","ista":"Amani MH, Bombari S, Mondelli M, Pukdee R, Rini S. 2022. Sharp asymptotics on the compression of two-layer neural networks. IEEE Information Theory Workshop., 588–593.","mla":"Amani, Mohammad Hossein, et al. “Sharp Asymptotics on the Compression of Two-Layer Neural Networks.” <i>IEEE Information Theory Workshop</i>, IEEE, 2022, pp. 588–93, doi:<a href=\"https://doi.org/10.1109/ITW54588.2022.9965870\">10.1109/ITW54588.2022.9965870</a>.","short":"M.H. Amani, S. Bombari, M. Mondelli, R. Pukdee, S. Rini, IEEE Information Theory Workshop (2022) 588–593."},"year":"2022","external_id":{"arxiv":["2205.08199"]},"doi":"10.1109/ITW54588.2022.9965870","arxiv":1,"day":"16","abstract":[{"text":"In this paper, we study the compression of a target two-layer neural network with N nodes into a compressed network with M<N nodes. More precisely, we consider the setting in which the weights of the target network are i.i.d. sub-Gaussian, and we minimize the population L_2 loss between the outputs of the target and of the compressed network, under the assumption of Gaussian inputs. By using tools from high-dimensional probability, we show that this non-convex problem can be simplified when the target network is sufficiently over-parameterized, and provide the error rate of this approximation as a function of the input dimension and N. In this mean-field limit, the simplified objective, as well as the optimal weights of the compressed network, does not depend on the realization of the target network, but only on expected scaling factors. Furthermore, for networks with ReLU activation, we conjecture that the optimum of the simplified optimization problem is achieved by taking weights on the Equiangular Tight Frame (ETF), while the scaling of the weights and the orientation of the ETF depend on the parameters of the target network. Numerical evidence is provided to support this conjecture.","lang":"eng"}]},{"department":[{"_id":"MaMo"}],"article_processing_charge":"No","date_created":"2023-02-10T13:49:04Z","publication_status":"published","intvolume":"       162","title":"Estimation in rotationally invariant generalized linear models via approximate message passing","_id":"12540","author":[{"full_name":"Venkataramanan, Ramji","first_name":"Ramji","last_name":"Venkataramanan"},{"id":"94ec913c-dc85-11ea-9058-e5051ab2428b","full_name":"Kögler, Kevin","last_name":"Kögler","first_name":"Kevin"},{"last_name":"Mondelli","first_name":"Marco","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425"}],"publisher":"ML Research Press","quality_controlled":"1","file_date_updated":"2023-02-13T10:53:11Z","abstract":[{"text":"We consider the problem of signal estimation in generalized linear models defined via rotationally invariant design matrices. Since these matrices can have an arbitrary spectral distribution, this model is well suited for capturing complex correlation structures which often arise in applications. We propose a novel family of approximate message passing (AMP) algorithms for signal estimation, and rigorously characterize their performance in the high-dimensional limit via a state evolution recursion. Our rotationally invariant AMP has complexity of the same order as the existing AMP derived under the restrictive assumption of a Gaussian design; our algorithm also recovers this existing AMP as a special case. Numerical results showcase a performance close to Vector AMP (which is conjectured to be Bayes-optimal in some settings), but obtained with a much lower complexity, as the proposed algorithm does not require a computationally expensive singular value decomposition.","lang":"eng"}],"year":"2022","citation":{"ieee":"R. Venkataramanan, K. Kögler, and M. Mondelli, “Estimation in rotationally invariant generalized linear models via approximate message passing,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, 2022, vol. 162.","chicago":"Venkataramanan, Ramji, Kevin Kögler, and Marco Mondelli. “Estimation in Rotationally Invariant Generalized Linear Models via Approximate Message Passing.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, Vol. 162. ML Research Press, 2022.","ama":"Venkataramanan R, Kögler K, Mondelli M. Estimation in rotationally invariant generalized linear models via approximate message passing. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 162. ML Research Press; 2022.","apa":"Venkataramanan, R., Kögler, K., &#38; Mondelli, M. (2022). Estimation in rotationally invariant generalized linear models via approximate message passing. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 162). Baltimore, MD, United States: ML Research Press.","ista":"Venkataramanan R, Kögler K, Mondelli M. 2022. Estimation in rotationally invariant generalized linear models via approximate message passing. Proceedings of the 39th International Conference on Machine Learning. ICML: International Conference on Machine Learning vol. 162, 22.","mla":"Venkataramanan, Ramji, et al. “Estimation in Rotationally Invariant Generalized Linear Models via Approximate Message Passing.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 162, 22, ML Research Press, 2022.","short":"R. Venkataramanan, K. Kögler, M. Mondelli, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, 2022."},"date_updated":"2024-09-10T13:03:17Z","volume":162,"acknowledgement":"The authors would like to thank the anonymous reviewers for their helpful comments. KK and MM were partially supported by the 2019 Lopez-Loreta Prize.","ddc":["000"],"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa_version":"Published Version","article_number":"22","has_accepted_license":"1","publication":"Proceedings of the 39th International Conference on Machine Learning","conference":{"start_date":"2022-07-17","name":"ICML: International Conference on Machine Learning","location":"Baltimore, MD, United States","end_date":"2022-07-23"},"language":[{"iso":"eng"}],"oa":1,"type":"conference","date_published":"2022-01-01T00:00:00Z","file":[{"relation":"main_file","success":1,"access_level":"open_access","creator":"dernst","file_id":"12547","file_size":2341343,"checksum":"67436eb0a660789514cdf9db79e84683","date_created":"2023-02-13T10:53:11Z","file_name":"2022_PMLR_Venkataramanan.pdf","content_type":"application/pdf","date_updated":"2023-02-13T10:53:11Z"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"day":"30","arxiv":1,"doi":"10.48550/arXiv.2203.16701","oa":1,"abstract":[{"lang":"eng","text":"Memorization of the relation between entities in a dataset can lead to privacy issues when using a trained model for question answering. We introduce Relational Memorization (RM) to understand, quantify and control this phenomenon. While bounding general memorization can have detrimental effects on the performance of a trained model, bounding RM does not prevent effective learning. The difference is most pronounced when the data distribution is long-tailed, with many queries having only few training examples: Impeding general memorization prevents effective learning, while impeding only relational memorization still allows learning general properties of the underlying concepts. We formalize the notion of Relational Privacy (RP) and, inspired by Differential Privacy (DP), we provide a possible definition of Differential Relational Privacy (DrP). These notions can be used to describe and compute bounds on the amount of RM in a trained model. We illustrate Relational Privacy concepts in experiments with large-scale models for Question Answering."}],"citation":{"ista":"Bombari S, Achille A, Wang Z, Wang Y-X, Xie Y, Singh KY, Appalaraju S, Mahadevan V, Soatto S. Towards differential relational privacy and its use in question answering. arXiv, 2203.16701.","mla":"Bombari, Simone, et al. “Towards Differential Relational Privacy and Its Use in Question Answering.” <i>ArXiv</i>, 2203.16701, doi:<a href=\"https://doi.org/10.48550/arXiv.2203.16701\">10.48550/arXiv.2203.16701</a>.","short":"S. Bombari, A. Achille, Z. Wang, Y.-X. Wang, Y. Xie, K.Y. Singh, S. Appalaraju, V. Mahadevan, S. Soatto, ArXiv (n.d.).","chicago":"Bombari, Simone, Alessandro Achille, Zijian Wang, Yu-Xiang Wang, Yusheng Xie, Kunwar Yashraj Singh, Srikar Appalaraju, Vijay Mahadevan, and Stefano Soatto. “Towards Differential Relational Privacy and Its Use in Question Answering.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2203.16701\">https://doi.org/10.48550/arXiv.2203.16701</a>.","ieee":"S. Bombari <i>et al.</i>, “Towards differential relational privacy and its use in question answering,” <i>arXiv</i>. .","ama":"Bombari S, Achille A, Wang Z, et al. Towards differential relational privacy and its use in question answering. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2203.16701\">10.48550/arXiv.2203.16701</a>","apa":"Bombari, S., Achille, A., Wang, Z., Wang, Y.-X., Xie, Y., Singh, K. Y., … Soatto, S. (n.d.). Towards differential relational privacy and its use in question answering. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2203.16701\">https://doi.org/10.48550/arXiv.2203.16701</a>"},"year":"2022","date_updated":"2023-04-25T07:34:49Z","type":"preprint","external_id":{"arxiv":["2203.16701"]},"date_published":"2022-03-30T00:00:00Z","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2203.16701","open_access":"1"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","date_created":"2023-04-23T16:11:48Z","department":[{"_id":"GradSch"},{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"submitted","oa_version":"Preprint","article_number":"2203.16701","title":"Towards differential relational privacy and its use in question answering","month":"03","_id":"12860","publication":"arXiv","author":[{"id":"ca726dda-de17-11ea-bc14-f9da834f63aa","first_name":"Simone","last_name":"Bombari","full_name":"Bombari, Simone"},{"first_name":"Alessandro","last_name":"Achille","full_name":"Achille, Alessandro"},{"last_name":"Wang","first_name":"Zijian","full_name":"Wang, Zijian"},{"full_name":"Wang, Yu-Xiang","last_name":"Wang","first_name":"Yu-Xiang"},{"last_name":"Xie","first_name":"Yusheng","full_name":"Xie, Yusheng"},{"full_name":"Singh, Kunwar Yashraj","first_name":"Kunwar Yashraj","last_name":"Singh"},{"full_name":"Appalaraju, Srikar","first_name":"Srikar","last_name":"Appalaraju"},{"full_name":"Mahadevan, Vijay","last_name":"Mahadevan","first_name":"Vijay"},{"first_name":"Stefano","last_name":"Soatto","full_name":"Soatto, Stefano"}],"language":[{"iso":"eng"}]},{"publisher":"ML Research Press","quality_controlled":"1","page":"8119-8129","file_date_updated":"2023-06-19T10:49:12Z","date_created":"2023-06-18T22:00:48Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","intvolume":"       139","title":"Tight bounds on the smallest Eigenvalue of the neural tangent kernel for deep ReLU networks","scopus_import":"1","_id":"13146","author":[{"full_name":"Nguyen, Quynh","last_name":"Nguyen","first_name":"Quynh"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","first_name":"Marco","last_name":"Mondelli"},{"first_name":"Guido","last_name":"Montufar","full_name":"Montufar, Guido"}],"volume":139,"acknowledgement":"The authors would like to thank the anonymous reviewers for their helpful comments. MM was partially supported by the 2019 Lopez-Loreta Prize. QN and GM acknowledge support from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement no 757983).","ddc":["000"],"day":"01","arxiv":1,"abstract":[{"text":"A recent line of work has analyzed the theoretical properties of deep neural networks via the Neural Tangent Kernel (NTK). In particular, the smallest eigenvalue of the NTK has been related to the memorization capacity, the global convergence of gradient descent algorithms and the generalization of deep nets. However, existing results either provide bounds in the two-layer setting or assume that the spectrum of the NTK matrices is bounded away from 0 for multi-layer networks. In this paper, we provide tight bounds on the smallest eigenvalue of NTK matrices for deep ReLU nets, both in the limiting case of infinite widths and for finite widths. In the finite-width setting, the network architectures we consider are fairly general: we require the existence of a wide layer with roughly order of N neurons, N being the number of data samples; and the scaling of the remaining layer widths is arbitrary (up to logarithmic factors). To obtain our results, we analyze various quantities of independent interest: we give lower bounds on the smallest singular value of hidden feature matrices, and upper bounds on the Lipschitz constant of input-output feature maps.","lang":"eng"}],"year":"2021","citation":{"ista":"Nguyen Q, Mondelli M, Montufar G. 2021. Tight bounds on the smallest Eigenvalue of the neural tangent kernel for deep ReLU networks. Proceedings of the 38th International Conference on Machine Learning. International Conference on Machine Learning vol. 139, 8119–8129.","short":"Q. Nguyen, M. Mondelli, G. Montufar, in:, Proceedings of the 38th International Conference on Machine Learning, ML Research Press, 2021, pp. 8119–8129.","mla":"Nguyen, Quynh, et al. “Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks.” <i>Proceedings of the 38th International Conference on Machine Learning</i>, vol. 139, ML Research Press, 2021, pp. 8119–29.","chicago":"Nguyen, Quynh, Marco Mondelli, and Guido Montufar. “Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks.” In <i>Proceedings of the 38th International Conference on Machine Learning</i>, 139:8119–29. ML Research Press, 2021.","ieee":"Q. Nguyen, M. Mondelli, and G. Montufar, “Tight bounds on the smallest Eigenvalue of the neural tangent kernel for deep ReLU networks,” in <i>Proceedings of the 38th International Conference on Machine Learning</i>, Virtual, 2021, vol. 139, pp. 8119–8129.","apa":"Nguyen, Q., Mondelli, M., &#38; Montufar, G. (2021). Tight bounds on the smallest Eigenvalue of the neural tangent kernel for deep ReLU networks. In <i>Proceedings of the 38th International Conference on Machine Learning</i> (Vol. 139, pp. 8119–8129). Virtual: ML Research Press.","ama":"Nguyen Q, Mondelli M, Montufar G. Tight bounds on the smallest Eigenvalue of the neural tangent kernel for deep ReLU networks. In: <i>Proceedings of the 38th International Conference on Machine Learning</i>. Vol 139. ML Research Press; 2021:8119-8129."},"date_updated":"2024-09-10T13:03:17Z","external_id":{"arxiv":["2012.11654"]},"conference":{"start_date":"2021-07-18","name":"International Conference on Machine Learning","location":"Virtual","end_date":"2021-07-24"},"language":[{"iso":"eng"}],"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa_version":"Published Version","month":"07","has_accepted_license":"1","publication":"Proceedings of the 38th International Conference on Machine Learning","file":[{"file_name":"2021_PMLR_Nguyen.pdf","content_type":"application/pdf","date_updated":"2023-06-19T10:49:12Z","file_size":591332,"checksum":"19489cf5e16a0596b1f92e317d97c9b0","date_created":"2023-06-19T10:49:12Z","creator":"dernst","file_id":"13155","relation":"main_file","success":1,"access_level":"open_access"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","publication_identifier":{"isbn":["9781713845065"],"eissn":["2640-3498"]},"oa":1,"tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"type":"conference","date_published":"2021-07-01T00:00:00Z"},{"abstract":[{"text":" We prove that, for the binary erasure channel (BEC), the polar-coding paradigm gives rise to codes that not only approach the Shannon limit but do so under the best possible scaling of their block length as a function of the gap to capacity. This result exhibits the first known family of binary codes that attain both optimal scaling and quasi-linear complexity of encoding and decoding. Our proof is based on the construction and analysis of binary polar codes with large kernels. When communicating reliably at rates within ε>0 of capacity, the code length n often scales as O(1/εμ), where the constant μ is called the scaling exponent. It is known that the optimal scaling exponent is μ=2, and it is achieved by random linear codes. The scaling exponent of conventional polar codes (based on the 2×2 kernel) on the BEC is μ=3.63. This falls far short of the optimal scaling guaranteed by random codes. Our main contribution is a rigorous proof of the following result: for the BEC, there exist ℓ×ℓ binary kernels, such that polar codes constructed from these kernels achieve scaling exponent μ(ℓ) that tends to the optimal value of 2 as ℓ grows. We furthermore characterize precisely how large ℓ needs to be as a function of the gap between μ(ℓ) and 2. The resulting binary codes maintain the recursive structure of conventional polar codes, and thereby achieve construction complexity O(n) and encoding/decoding complexity O(nlogn).","lang":"eng"}],"doi":"10.1109/TIT.2020.3038806","arxiv":1,"day":"01","external_id":{"arxiv":["1711.01339"]},"date_updated":"2024-03-07T12:18:50Z","citation":{"apa":"Fazeli, A., Hassani, H., Mondelli, M., &#38; Vardy, A. (2021). Binary linear codes with optimal scaling: Polar codes with large kernels. <i>IEEE Transactions on Information Theory</i>. IEEE. <a href=\"https://doi.org/10.1109/TIT.2020.3038806\">https://doi.org/10.1109/TIT.2020.3038806</a>","ama":"Fazeli A, Hassani H, Mondelli M, Vardy A. Binary linear codes with optimal scaling: Polar codes with large kernels. <i>IEEE Transactions on Information Theory</i>. 2021;67(9):5693-5710. doi:<a href=\"https://doi.org/10.1109/TIT.2020.3038806\">10.1109/TIT.2020.3038806</a>","ieee":"A. Fazeli, H. Hassani, M. Mondelli, and A. Vardy, “Binary linear codes with optimal scaling: Polar codes with large kernels,” <i>IEEE Transactions on Information Theory</i>, vol. 67, no. 9. IEEE, pp. 5693–5710, 2021.","chicago":"Fazeli, Arman, Hamed Hassani, Marco Mondelli, and Alexander Vardy. “Binary Linear Codes with Optimal Scaling: Polar Codes with Large Kernels.” <i>IEEE Transactions on Information Theory</i>. IEEE, 2021. <a href=\"https://doi.org/10.1109/TIT.2020.3038806\">https://doi.org/10.1109/TIT.2020.3038806</a>.","mla":"Fazeli, Arman, et al. “Binary Linear Codes with Optimal Scaling: Polar Codes with Large Kernels.” <i>IEEE Transactions on Information Theory</i>, vol. 67, no. 9, IEEE, 2021, pp. 5693–710, doi:<a href=\"https://doi.org/10.1109/TIT.2020.3038806\">10.1109/TIT.2020.3038806</a>.","short":"A. Fazeli, H. Hassani, M. Mondelli, A. Vardy, IEEE Transactions on Information Theory 67 (2021) 5693–5710.","ista":"Fazeli A, Hassani H, Mondelli M, Vardy A. 2021. Binary linear codes with optimal scaling: Polar codes with large kernels. IEEE Transactions on Information Theory. 67(9), 5693–5710."},"year":"2021","volume":67,"title":"Binary linear codes with optimal scaling: Polar codes with large kernels","intvolume":"        67","publication_status":"published","department":[{"_id":"MaMo"}],"article_processing_charge":"No","date_created":"2021-01-10T23:01:18Z","author":[{"first_name":"Arman","last_name":"Fazeli","full_name":"Fazeli, Arman"},{"full_name":"Hassani, Hamed","last_name":"Hassani","first_name":"Hamed"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco"},{"first_name":"Alexander","last_name":"Vardy","full_name":"Vardy, Alexander"}],"issue":"9","_id":"9002","scopus_import":"1","article_type":"original","publisher":"IEEE","page":"5693-5710","quality_controlled":"1","publication_identifier":{"issn":["0018-9448"],"eissn":["1557-9654"]},"date_published":"2021-09-01T00:00:00Z","type":"journal_article","status":"public","related_material":{"record":[{"relation":"earlier_version","id":"6665","status":"public"}]},"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","month":"09","oa_version":"Preprint","publication":"IEEE Transactions on Information Theory","language":[{"iso":"eng"}]},{"main_file_link":[{"url":"https://arxiv.org/abs/1909.04892","open_access":"1"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","related_material":{"record":[{"status":"public","id":"8536","relation":"earlier_version"}]},"status":"public","publication_identifier":{"eissn":["15582248"],"issn":["15361276"]},"oa":1,"type":"journal_article","date_published":"2021-01-01T00:00:00Z","language":[{"iso":"eng"}],"oa_version":"Preprint","month":"01","publication":"IEEE Transactions on Wireless Communications","acknowledgement":"M. Mondelli was partially supported by grants NSF DMS-1613091, CCF-1714305, IIS-1741162, and ONR N00014-18-1-2729. S. A. Hashemi is supported by a Postdoctoral Fellowship from the Natural Sciences and Engineering Research Council of Canada (NSERC) and by Huawei. The authors would like to thank the anonymous reviewers for their comments that helped improving the quality of the manuscript.","volume":20,"day":"01","doi":"10.1109/TWC.2020.3022922","arxiv":1,"abstract":[{"lang":"eng","text":"This work analyzes the latency of the simplified successive cancellation (SSC) decoding scheme for polar codes proposed by Alamdar-Yazdi and Kschischang. It is shown that, unlike conventional successive cancellation decoding, where latency is linear in the block length, the latency of SSC decoding is sublinear. More specifically, the latency of SSC decoding is O(N1−1/μ) , where N is the block length and μ is the scaling exponent of the channel, which captures the speed of convergence of the rate to capacity. Numerical results demonstrate the tightness of the bound and show that most of the latency reduction arises from the parallel decoding of subcodes of rate 0 or 1."}],"year":"2021","citation":{"ista":"Mondelli M, Hashemi SA, Cioffi JM, Goldsmith A. 2021. Sublinear latency for simplified successive cancellation decoding of polar codes. IEEE Transactions on Wireless Communications. 20(1), 18–27.","mla":"Mondelli, Marco, et al. “Sublinear Latency for Simplified Successive Cancellation Decoding of Polar Codes.” <i>IEEE Transactions on Wireless Communications</i>, vol. 20, no. 1, IEEE, 2021, pp. 18–27, doi:<a href=\"https://doi.org/10.1109/TWC.2020.3022922\">10.1109/TWC.2020.3022922</a>.","short":"M. Mondelli, S.A. Hashemi, J.M. Cioffi, A. Goldsmith, IEEE Transactions on Wireless Communications 20 (2021) 18–27.","ieee":"M. Mondelli, S. A. Hashemi, J. M. Cioffi, and A. Goldsmith, “Sublinear latency for simplified successive cancellation decoding of polar codes,” <i>IEEE Transactions on Wireless Communications</i>, vol. 20, no. 1. IEEE, pp. 18–27, 2021.","chicago":"Mondelli, Marco, Seyyed Ali Hashemi, John M. Cioffi, and Andrea Goldsmith. “Sublinear Latency for Simplified Successive Cancellation Decoding of Polar Codes.” <i>IEEE Transactions on Wireless Communications</i>. IEEE, 2021. <a href=\"https://doi.org/10.1109/TWC.2020.3022922\">https://doi.org/10.1109/TWC.2020.3022922</a>.","ama":"Mondelli M, Hashemi SA, Cioffi JM, Goldsmith A. Sublinear latency for simplified successive cancellation decoding of polar codes. <i>IEEE Transactions on Wireless Communications</i>. 2021;20(1):18-27. doi:<a href=\"https://doi.org/10.1109/TWC.2020.3022922\">10.1109/TWC.2020.3022922</a>","apa":"Mondelli, M., Hashemi, S. A., Cioffi, J. M., &#38; Goldsmith, A. (2021). Sublinear latency for simplified successive cancellation decoding of polar codes. <i>IEEE Transactions on Wireless Communications</i>. IEEE. <a href=\"https://doi.org/10.1109/TWC.2020.3022922\">https://doi.org/10.1109/TWC.2020.3022922</a>"},"date_updated":"2023-08-07T13:36:25Z","external_id":{"isi":["000607808800002"],"arxiv":["1909.04892"]},"isi":1,"publisher":"IEEE","article_type":"original","quality_controlled":"1","page":"18-27","date_created":"2021-01-31T23:01:21Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","intvolume":"        20","title":"Sublinear latency for simplified successive cancellation decoding of polar codes","scopus_import":"1","_id":"9047","issue":"1","author":[{"first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"full_name":"Hashemi, Seyyed Ali","last_name":"Hashemi","first_name":"Seyyed Ali"},{"full_name":"Cioffi, John M.","first_name":"John M.","last_name":"Cioffi"},{"full_name":"Goldsmith, Andrea","first_name":"Andrea","last_name":"Goldsmith"}]},{"month":"09","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"oa_version":"Preprint","publication":"2021 IEEE International Symposium on Information Theory","conference":{"end_date":"2021-07-20","location":"Melbourne, Australia","start_date":"2021-07-12","name":"ISIT: International Symposium on Information Theory"},"language":[{"iso":"eng"}],"oa":1,"publication_identifier":{"issn":["2157-8095"],"eisbn":["978-1-5386-8209-8"],"isbn":["978-1-5386-8210-4"]},"type":"conference","date_published":"2021-09-01T00:00:00Z","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","status":"public","related_material":{"record":[{"relation":"later_version","id":"10364","status":"public"}]},"main_file_link":[{"url":"https://arxiv.org/abs/2012.13378","open_access":"1"}],"title":"Parallelism versus latency in simplified successive-cancellation decoding of polar codes","article_processing_charge":"No","date_created":"2021-09-27T14:33:14Z","department":[{"_id":"MaMo"}],"publication_status":"published","author":[{"full_name":"Hashemi, Seyyed Ali","first_name":"Seyyed Ali","last_name":"Hashemi"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"},{"full_name":"Fazeli, Arman","first_name":"Arman","last_name":"Fazeli"},{"full_name":"Vardy, Alexander","last_name":"Vardy","first_name":"Alexander"},{"first_name":"John","last_name":"Cioffi","full_name":"Cioffi, John"},{"full_name":"Goldsmith, Andrea","first_name":"Andrea","last_name":"Goldsmith"}],"scopus_import":"1","_id":"10053","publisher":"Institute of Electrical and Electronics Engineers","quality_controlled":"1","page":"2369-2374","abstract":[{"text":"This paper characterizes the latency of the simplified successive-cancellation (SSC) decoding scheme for polar codes under hardware resource constraints. In particular, when the number of processing elements P that can perform SSC decoding operations in parallel is limited, as is the case in practice, the latency of SSC decoding is O(N1−1 μ+NPlog2log2NP), where N is the block length of the code and μ is the scaling exponent of polar codes for the channel. Three direct consequences of this bound are presented. First, in a fully-parallel implementation where P=N2 , the latency of SSC decoding is O(N1−1/μ) , which is sublinear in the block length. This recovers a result from an earlier work. Second, in a fully-serial implementation where P=1 , the latency of SSC decoding scales as O(Nlog2log2N) . The multiplicative constant is also calculated: we show that the latency of SSC decoding when P=1 is given by (2+o(1))Nlog2log2N . Third, in a semi-parallel implementation, the smallest P that gives the same latency as that of the fully-parallel implementation is P=N1/μ . The tightness of our bound on SSC decoding latency and the applicability of the foregoing results is validated through extensive simulations.","lang":"eng"}],"day":"01","arxiv":1,"doi":"10.1109/ISIT45174.2021.9518153","external_id":{"isi":["000701502202078"],"arxiv":["2012.13378"]},"isi":1,"year":"2021","citation":{"ieee":"S. A. Hashemi, M. Mondelli, A. Fazeli, A. Vardy, J. Cioffi, and A. Goldsmith, “Parallelism versus latency in simplified successive-cancellation decoding of polar codes,” in <i>2021 IEEE International Symposium on Information Theory</i>, Melbourne, Australia, 2021, pp. 2369–2374.","chicago":"Hashemi, Seyyed Ali, Marco Mondelli, Arman Fazeli, Alexander Vardy, John Cioffi, and Andrea Goldsmith. “Parallelism versus Latency in Simplified Successive-Cancellation Decoding of Polar Codes.” In <i>2021 IEEE International Symposium on Information Theory</i>, 2369–74. Institute of Electrical and Electronics Engineers, 2021. <a href=\"https://doi.org/10.1109/ISIT45174.2021.9518153\">https://doi.org/10.1109/ISIT45174.2021.9518153</a>.","apa":"Hashemi, S. A., Mondelli, M., Fazeli, A., Vardy, A., Cioffi, J., &#38; Goldsmith, A. (2021). Parallelism versus latency in simplified successive-cancellation decoding of polar codes. In <i>2021 IEEE International Symposium on Information Theory</i> (pp. 2369–2374). Melbourne, Australia: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/ISIT45174.2021.9518153\">https://doi.org/10.1109/ISIT45174.2021.9518153</a>","ama":"Hashemi SA, Mondelli M, Fazeli A, Vardy A, Cioffi J, Goldsmith A. Parallelism versus latency in simplified successive-cancellation decoding of polar codes. In: <i>2021 IEEE International Symposium on Information Theory</i>. Institute of Electrical and Electronics Engineers; 2021:2369-2374. doi:<a href=\"https://doi.org/10.1109/ISIT45174.2021.9518153\">10.1109/ISIT45174.2021.9518153</a>","ista":"Hashemi SA, Mondelli M, Fazeli A, Vardy A, Cioffi J, Goldsmith A. 2021. Parallelism versus latency in simplified successive-cancellation decoding of polar codes. 2021 IEEE International Symposium on Information Theory. ISIT: International Symposium on Information Theory, 2369–2374.","mla":"Hashemi, Seyyed Ali, et al. “Parallelism versus Latency in Simplified Successive-Cancellation Decoding of Polar Codes.” <i>2021 IEEE International Symposium on Information Theory</i>, Institute of Electrical and Electronics Engineers, 2021, pp. 2369–74, doi:<a href=\"https://doi.org/10.1109/ISIT45174.2021.9518153\">10.1109/ISIT45174.2021.9518153</a>.","short":"S.A. Hashemi, M. Mondelli, A. Fazeli, A. Vardy, J. Cioffi, A. Goldsmith, in:, 2021 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2021, pp. 2369–2374."},"date_updated":"2024-09-10T13:03:18Z","acknowledgement":"S. A. Hashemi is supported by a Postdoctoral Fellowship from the Natural Sciences and Engineering Research Council\r\nof Canada (NSERC) and by Huawei. M. Mondelli is partially supported by the 2019 Lopez-Loreta Prize. A. Fazeli and A. Vardy were supported in part by the National Science Foundation under Grant CCF-1764104."},{"user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","status":"public","file":[{"date_updated":"2021-12-13T15:47:54Z","content_type":"application/pdf","file_name":"2021_Springer_Mondelli.pdf","date_created":"2021-12-13T15:47:54Z","checksum":"9ea12dd8045a0678000a3a59295221cb","file_size":2305731,"file_id":"10542","creator":"alisjak","access_level":"open_access","relation":"main_file","success":1}],"oa":1,"publication_identifier":{"eissn":["1615-3383"],"issn":["1615-3375"]},"type":"journal_article","date_published":"2021-08-17T00:00:00Z","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"keyword":["Applied Mathematics","Computational Theory and Mathematics","Computational Mathematics","Analysis"],"language":[{"iso":"eng"}],"month":"08","project":[{"name":"IST Austria Open Access Fund","_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854"}],"oa_version":"Published Version","has_accepted_license":"1","publication":"Foundations of Computational Mathematics","ddc":["510"],"acknowledgement":"M. Mondelli would like to thank Andrea Montanari for helpful discussions. All the authors would like to thank the anonymous reviewers for their helpful comments.","abstract":[{"lang":"eng","text":"We study the problem of recovering an unknown signal 𝑥𝑥 given measurements obtained from a generalized linear model with a Gaussian sensing matrix. Two popular solutions are based on a linear estimator 𝑥𝑥^L and a spectral estimator 𝑥𝑥^s. The former is a data-dependent linear combination of the columns of the measurement matrix, and its analysis is quite simple. The latter is the principal eigenvector of a data-dependent matrix, and a recent line of work has studied its performance. In this paper, we show how to optimally combine 𝑥𝑥^L and 𝑥𝑥^s. At the heart of our analysis is the exact characterization of the empirical joint distribution of (𝑥𝑥,𝑥𝑥^L,𝑥𝑥^s) in the high-dimensional limit. This allows us to compute the Bayes-optimal combination of 𝑥𝑥^L and 𝑥𝑥^s, given the limiting distribution of the signal 𝑥𝑥. When the distribution of the signal is Gaussian, then the Bayes-optimal combination has the form 𝜃𝑥𝑥^L+𝑥𝑥^s and we derive the optimal combination coefficient. In order to establish the limiting distribution of (𝑥𝑥,𝑥𝑥^L,𝑥𝑥^s), we design and analyze an approximate message passing algorithm whose iterates give 𝑥𝑥^L and approach 𝑥𝑥^s. Numerical simulations demonstrate the improvement of the proposed combination with respect to the two methods considered separately."}],"day":"17","doi":"10.1007/s10208-021-09531-x","arxiv":1,"external_id":{"arxiv":["2008.03326"],"isi":["000685721000001"]},"isi":1,"citation":{"ista":"Mondelli M, Thrampoulidis C, Venkataramanan R. 2021. Optimal combination of linear and spectral estimators for generalized linear models. Foundations of Computational Mathematics.","mla":"Mondelli, Marco, et al. “Optimal Combination of Linear and Spectral Estimators for Generalized Linear Models.” <i>Foundations of Computational Mathematics</i>, Springer, 2021, doi:<a href=\"https://doi.org/10.1007/s10208-021-09531-x\">10.1007/s10208-021-09531-x</a>.","short":"M. Mondelli, C. Thrampoulidis, R. Venkataramanan, Foundations of Computational Mathematics (2021).","ieee":"M. Mondelli, C. Thrampoulidis, and R. Venkataramanan, “Optimal combination of linear and spectral estimators for generalized linear models,” <i>Foundations of Computational Mathematics</i>. Springer, 2021.","chicago":"Mondelli, Marco, Christos Thrampoulidis, and Ramji Venkataramanan. “Optimal Combination of Linear and Spectral Estimators for Generalized Linear Models.” <i>Foundations of Computational Mathematics</i>. Springer, 2021. <a href=\"https://doi.org/10.1007/s10208-021-09531-x\">https://doi.org/10.1007/s10208-021-09531-x</a>.","apa":"Mondelli, M., Thrampoulidis, C., &#38; Venkataramanan, R. (2021). Optimal combination of linear and spectral estimators for generalized linear models. <i>Foundations of Computational Mathematics</i>. Springer. <a href=\"https://doi.org/10.1007/s10208-021-09531-x\">https://doi.org/10.1007/s10208-021-09531-x</a>","ama":"Mondelli M, Thrampoulidis C, Venkataramanan R. Optimal combination of linear and spectral estimators for generalized linear models. <i>Foundations of Computational Mathematics</i>. 2021. doi:<a href=\"https://doi.org/10.1007/s10208-021-09531-x\">10.1007/s10208-021-09531-x</a>"},"year":"2021","date_updated":"2023-09-05T14:13:57Z","article_type":"original","publisher":"Springer","file_date_updated":"2021-12-13T15:47:54Z","quality_controlled":"1","title":"Optimal combination of linear and spectral estimators for generalized linear models","department":[{"_id":"MaMo"}],"article_processing_charge":"Yes (via OA deal)","date_created":"2021-11-03T10:59:08Z","publication_status":"published","author":[{"first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"first_name":"Christos","last_name":"Thrampoulidis","full_name":"Thrampoulidis, Christos"},{"full_name":"Venkataramanan, Ramji","last_name":"Venkataramanan","first_name":"Ramji"}],"scopus_import":"1","_id":"10211"},{"publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]},"oa":1,"type":"conference","date_published":"2021-12-01T00:00:00Z","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2106.02356"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa_version":"Preprint","month":"12","publication":"35th Conference on Neural Information Processing Systems","conference":{"location":"Virtual","end_date":"2021-12-14","name":"NeurIPS: Neural Information Processing Systems","start_date":"2021-12-06"},"language":[{"iso":"eng"}],"day":"01","arxiv":1,"abstract":[{"lang":"eng","text":"We study the problem of estimating a rank-$1$ signal in the presence of rotationally invariant noise-a class of perturbations more general than Gaussian noise. Principal Component Analysis (PCA) provides a natural estimator, and sharp results on its performance have been obtained in the high-dimensional regime. Recently, an Approximate Message Passing (AMP) algorithm has been proposed as an alternative estimator with the potential to improve the accuracy of PCA. However, the existing analysis of AMP requires an initialization that is both correlated with the signal and independent of the noise, which is often unrealistic in practice. In this work, we combine the two methods, and propose to initialize AMP with PCA. Our main result is a rigorous asymptotic characterization of the performance of this estimator. Both the AMP algorithm and its analysis differ from those previously derived in the Gaussian setting: at every iteration, our AMP algorithm requires a specific term to account for PCA initialization, while in the Gaussian case, PCA initialization affects only the first iteration of AMP. The proof is based on a two-phase artificial AMP that first approximates the PCA estimator and then mimics the true AMP. Our numerical simulations show an excellent agreement between AMP results and theoretical predictions, and suggest an interesting open direction on achieving Bayes-optimal performance."}],"year":"2021","citation":{"ama":"Mondelli M, Venkataramanan R. PCA initialization for approximate message passing in rotationally invariant models. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2021:29616-29629.","apa":"Mondelli, M., &#38; Venkataramanan, R. (2021). PCA initialization for approximate message passing in rotationally invariant models. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 29616–29629). Virtual: Neural Information Processing Systems Foundation.","ieee":"M. Mondelli and R. Venkataramanan, “PCA initialization for approximate message passing in rotationally invariant models,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, 2021, vol. 35, pp. 29616–29629.","chicago":"Mondelli, Marco, and Ramji Venkataramanan. “PCA Initialization for Approximate Message Passing in Rotationally Invariant Models.” In <i>35th Conference on Neural Information Processing Systems</i>, 35:29616–29. Neural Information Processing Systems Foundation, 2021.","mla":"Mondelli, Marco, and Ramji Venkataramanan. “PCA Initialization for Approximate Message Passing in Rotationally Invariant Models.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2021, pp. 29616–29.","short":"M. Mondelli, R. Venkataramanan, in:, 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021, pp. 29616–29629.","ista":"Mondelli M, Venkataramanan R. 2021. PCA initialization for approximate message passing in rotationally invariant models. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 35, 29616–29629."},"date_updated":"2024-09-10T13:03:19Z","external_id":{"arxiv":["2106.02356"]},"acknowledgement":"M. Mondelli would like to thank László Erdős for helpful discussions. M. Mondelli was partially supported by the 2019 Lopez-Loreta Prize. R. Venkataramanan was partially supported by the Alan Turing Institute under the EPSRC grant EP/N510129/1.\r\n","volume":35,"date_created":"2022-01-03T10:50:02Z","department":[{"_id":"MaMo"}],"article_processing_charge":"No","publication_status":"published","intvolume":"        35","title":"PCA initialization for approximate message passing in rotationally invariant models","scopus_import":"1","_id":"10593","author":[{"full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"first_name":"Ramji","last_name":"Venkataramanan","full_name":"Venkataramanan, Ramji"}],"publisher":"Neural Information Processing Systems Foundation","quality_controlled":"1","page":"29616-29629"},{"oa":1,"publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]},"date_published":"2021-12-01T00:00:00Z","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","main_file_link":[{"url":"https://arxiv.org/abs/2102.09671","open_access":"1"}],"month":"12","oa_version":"Preprint","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"publication":"35th Conference on Neural Information Processing Systems","conference":{"name":"35th Conference on Neural Information Processing Systems","start_date":"2021-12-06","location":"Virtual","end_date":"2021-12-14"},"language":[{"iso":"eng"}],"abstract":[{"text":"The question of how and why the phenomenon of mode connectivity occurs in training deep neural networks has gained remarkable attention in the research community. From a theoretical perspective, two possible explanations have been proposed: (i) the loss function has connected sublevel sets, and (ii) the solutions found by stochastic gradient descent are dropout stable. While these explanations provide insights into the phenomenon, their assumptions are not always satisfied in practice. In particular, the first approach requires the network to have one layer with order of N neurons (N being the number of training samples), while the second one requires the loss to be almost invariant after removing half of the neurons at each layer (up to some rescaling of the remaining ones). In this work, we improve both conditions by exploiting the quality of the features at every intermediate layer together with a milder over-parameterization condition. More specifically, we show that: (i) under generic assumptions on the features of intermediate layers, it suffices that the last two hidden layers have order of N−−√ neurons, and (ii) if subsets of features at each layer are linearly separable, then no over-parameterization is needed to show the connectivity. Our experiments confirm that the proposed condition ensures the connectivity of solutions found by stochastic gradient descent, even in settings where the previous requirements do not hold.","lang":"eng"}],"arxiv":1,"day":"01","external_id":{"arxiv":["2102.09671"]},"date_updated":"2024-09-10T13:03:19Z","year":"2021","citation":{"chicago":"Nguyen, Quynh, Pierre Bréchet, and Marco Mondelli. “When Are Solutions Connected in Deep Networks?” In <i>35th Conference on Neural Information Processing Systems</i>, Vol. 35. Neural Information Processing Systems Foundation, 2021.","ieee":"Q. Nguyen, P. Bréchet, and M. Mondelli, “When are solutions connected in deep networks?,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, 2021, vol. 35.","apa":"Nguyen, Q., Bréchet, P., &#38; Mondelli, M. (2021). When are solutions connected in deep networks? In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 35). Virtual: Neural Information Processing Systems Foundation.","ama":"Nguyen Q, Bréchet P, Mondelli M. When are solutions connected in deep networks? In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2021.","ista":"Nguyen Q, Bréchet P, Mondelli M. 2021. When are solutions connected in deep networks? 35th Conference on Neural Information Processing Systems. 35th Conference on Neural Information Processing Systems vol. 35.","mla":"Nguyen, Quynh, et al. “When Are Solutions Connected in Deep Networks?” <i>35th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2021.","short":"Q. Nguyen, P. Bréchet, M. Mondelli, in:, 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021."},"volume":35,"acknowledgement":"MM was partially supported by the 2019 Lopez-Loreta Prize. QN and PB acknowledge support from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement no 757983).","title":"When are solutions connected in deep networks?","intvolume":"        35","publication_status":"published","article_processing_charge":"No","department":[{"_id":"MaMo"}],"date_created":"2022-01-03T10:56:20Z","author":[{"full_name":"Nguyen, Quynh","last_name":"Nguyen","first_name":"Quynh"},{"first_name":"Pierre","last_name":"Bréchet","full_name":"Bréchet, Pierre"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","last_name":"Mondelli","first_name":"Marco"}],"_id":"10594","publisher":"Neural Information Processing Systems Foundation","quality_controlled":"1"}]
