@inproceedings{1175,
  abstract     = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation.  Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
  author       = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
  editor       = {Papadimitriou, Christos},
  issn         = {18688969},
  location     = {Berkeley, CA, United States},
  pages        = {38:1--38--21},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Cumulative space in black-white pebbling and resolution}},
  doi          = {10.4230/LIPIcs.ITCS.2017.38},
  volume       = {67},
  year         = {2017},
}

@inproceedings{1176,
  abstract     = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
  author       = {Alwen, Joel F and Blocki, Jeremiah},
  isbn         = {978-150905761-0},
  location     = {Paris, France},
  publisher    = {IEEE},
  title        = {{Towards practical attacks on Argon2i and balloon hashing}},
  doi          = {10.1109/EuroSP.2017.47},
  year         = {2017},
}

@inproceedings{11772,
  abstract     = {A dynamic graph algorithm is a data structure that supports operations on dynamically changing graphs.},
  author       = {Henzinger, Monika H},
  booktitle    = {44th International Conference on Current Trends in Theory and Practice of Computer Science},
  isbn         = {9783319731162},
  issn         = {0302-9743},
  location     = {Krems, Austria},
  pages        = {40–44},
  publisher    = {Springer Nature},
  title        = {{The state of the art in dynamic graph algorithms}},
  doi          = {10.1007/978-3-319-73117-9_3},
  volume       = {10706},
  year         = {2017},
}

@inproceedings{1178,
  abstract     = {For any pair (X, Z) of correlated random variables we can think of Z as a randomized function of X. If the domain of Z is small, one can make this function computationally efficient by allowing it to be only approximately correct. In folklore this problem is known as simulating auxiliary inputs. This idea of simulating auxiliary information turns out to be a very usefull tool, finding applications in complexity theory, cryptography, pseudorandomness and zero-knowledge. In this paper we revisit this problem, achieving the following results: (a) We present a novel boosting algorithm for constructing the simulator. This boosting proof is of independent interest, as it shows how to handle “negative mass” issues when constructing probability measures by shifting distinguishers in descent algorithms. Our technique essentially fixes the flaw in the TCC’14 paper “How to Fake Auxiliary Inputs”. (b) The complexity of our simulator is better than in previous works, including results derived from the uniform min-max theorem due to Vadhan and Zheng. To achieve (s,ϵ) -indistinguishability we need the complexity O(s⋅25ℓϵ−2) in time/circuit size, which improve previous bounds by a factor of ϵ−2. In particular, with we get meaningful provable security for the EUROCRYPT’09 leakage-resilient stream cipher instantiated with a standard 256-bit block cipher, like },
  author       = {Skórski, Maciej},
  pages        = {159 -- 179},
  publisher    = {Springer},
  title        = {{Simulating auxiliary inputs, revisited}},
  doi          = {10.1007/978-3-662-53641-4_7},
  volume       = {9985},
  year         = {2017},
}

@inproceedings{787,
  abstract     = {Population protocols are a popular model of distributed computing, in which randomly-interacting agents with little computational power cooperate to jointly perform computational tasks. Inspired by developments in molecular computation, and in particular DNA computing, recent algorithmic work has focused on the complexity of solving simple yet fundamental tasks in the population model, such as leader election (which requires convergence to a single agent in a special &quot;leader&quot; state), and majority (in which agents must converge to a decision as to which of two possible initial states had higher initial count). Known results point towards an inherent trade-off between the time complexity of such algorithms, and the space complexity, i.e. size of the memory available to each agent. In this paper, we explore this trade-off and provide new upper and lower bounds for majority and leader election. First, we prove a unified lower bound, which relates the space available per node with the time complexity achievable by a protocol: for instance, our result implies that any protocol solving either of these tasks for n agents using O(log log n) states must take (n=polylogn) expected time. This is the first result to characterize time complexity for protocols which employ super-constant number of states per node, and proves that fast, poly-logarithmic running times require protocols to have relatively large space costs. On the positive side, we give algorithms showing that fast, poly-logarithmic convergence time can be achieved using O(log2 n) space per node, in the case of both tasks. Overall, our results highlight a time complexity separation between O(log log n) and (log2 n) state space size for both majority and leader election in population protocols, and introduce new techniques, which should be applicable more broadly.},
  author       = {Alistarh, Dan-Adrian and Aspnes, James and Eisenstat, David and Rivest, Ronald and Gelashvili, Rati},
  pages        = {2560 -- 2579},
  publisher    = {SIAM},
  title        = {{Time-space trade-offs in population protocols}},
  doi          = {doi.org/10.1137/1.9781611974782.169},
  year         = {2017},
}

@inproceedings{788,
  abstract     = {In contrast to electronic computation, chemical computation is noisy and susceptible to a variety of sources of error, which has prevented the construction of robust complex systems. To be effective, chemical algorithms must be designed with an appropriate error model in mind. Here we consider the model of chemical reaction networks that preserve molecular count (population protocols), and ask whether computation can be made robust to a natural model of unintended “leak” reactions. Our definition of leak is motivated by both the particular spurious behavior seen when implementing chemical reaction networks with DNA strand displacement cascades, as well as the unavoidable side reactions in any implementation due to the basic laws of chemistry. We develop a new “Robust Detection” algorithm for the problem of fast (logarithmic time) single molecule detection, and prove that it is robust to this general model of leaks. Besides potential applications in single molecule detection, the error-correction ideas developed here might enable a new class of robust-by-design chemical algorithms. Our analysis is based on a non-standard hybrid argument, combining ideas from discrete analysis of population protocols with classic Markov chain techniques.},
  author       = {Alistarh, Dan-Adrian and Dudek, Bartłomiej and Kosowski, Adrian and Soloveichik, David and Uznański, Przemysław},
  pages        = {155 -- 171},
  publisher    = {Springer},
  title        = {{Robust detection in leak-prone population protocols}},
  doi          = {10.1007/978-3-319-66799-7_11},
  volume       = {10467 LNCS},
  year         = {2017},
}

@inproceedings{789,
  abstract     = {The problem of efficient concurrent memory reclamation in unmanaged languages such as C or C++ is one of the major challenges facing the parallelization of billions of lines of legacy code. Garbage collectors for C/C++ can be inefficient; thus, programmers are often forced to use finely-crafted concurrent memory reclamation techniques. These techniques can provide good performance, but require considerable programming effort to deploy, and have strict requirements, allowing the programmer very little room for error. In this work, we present Forkscan, a new conservative concurrent memory reclamation scheme which is fully automatic and surprisingly scalable. Forkscan's semantics place it between automatic garbage collectors (it requires the programmer to explicitly retire nodes before they can be reclaimed), and concurrent memory reclamation techniques (as it does not assume that nodes are completely unlinked from the data structure for correctness). Forkscan's implementation exploits these new semantics for efficiency: we leverage parallelism and optimized implementations of signaling and copy-on-write in modern operating systems to efficiently obtain and process consistent snapshots of memory that can be scanned concurrently with the normal program operation. Empirical evaluation on a range of classical concurrent data structure microbenchmarks shows that Forkscan can preserve the scalability of the original code, while maintaining an order of magnitude lower latency than automatic garbage collection, and demonstrating competitive performance with finely crafted memory reclamation techniques.},
  author       = {Alistarh, Dan-Adrian and Leiserson, William and Matveev, Alexander and Shavit, Nir},
  pages        = {483 -- 498},
  publisher    = {ACM},
  title        = {{Forkscan: Conservative memory reclamation for modern operating systems}},
  doi          = {10.1145/3064176.3064214},
  year         = {2017},
}

@inproceedings{790,
  abstract     = {Stochastic gradient descent (SGD) is a commonly used algorithm for training linear machine learning models. Based on vector algebra, it benefits from the inherent parallelism available in an FPGA. In this paper, we first present a single-precision floating-point SGD implementation on an FPGA that provides similar performance as a 10-core CPU. We then adapt the design to make it capable of processing low-precision data. The low-precision data is obtained from a novel compression scheme - called stochastic quantization, specifically designed for machine learning applications. We test both full-precision and low-precision designs on various regression and classification data sets. We achieve up to an order of magnitude training speedup when using low-precision data compared to a full-precision SGD on the same FPGA and a state-of-the-art multi-core solution, while maintaining the quality of training. We open source the designs presented in this paper.},
  author       = {Kara, Kaan and Alistarh, Dan-Adrian and Alonso, Gustavo and Mutlu, Onur and Zhang, Ce},
  pages        = {160 -- 167},
  publisher    = {IEEE},
  title        = {{FPGA-accelerated dense linear machine learning: A precision-convergence trade-off}},
  doi          = {10.1109/FCCM.2017.39},
  year         = {2017},
}

@inproceedings{791,
  abstract     = {Consider the following random process: we are given n queues, into which elements of increasing labels are inserted uniformly at random. To remove an element, we pick two queues at random, and remove the element of lower label (higher priority) among the two. The cost of a removal is the rank of the label removed, among labels still present in any of the queues, that is, the distance from the optimal choice at each step. Variants of this strategy are prevalent in state-of-the-art concurrent priority queue implementations. Nonetheless, it is not known whether such implementations provide any rank guarantees, even in a sequential model. We answer this question, showing that this strategy provides surprisingly strong guarantees: Although the single-choice process, where we always insert and remove from a single randomly chosen queue, has degrading cost, going to infinity as we increase the number of steps, in the two choice process, the expected rank of a removed element is O(n) while the expected worst-case cost is O(n log n). These bounds are tight, and hold irrespective of the number of steps for which we run the process. The argument is based on a new technical connection between &quot;heavily loaded&quot; balls-into-bins processes and priority scheduling. Our analytic results inspire a new concurrent priority queue implementation, which improves upon the state of the art in terms of practical performance.},
  author       = {Alistarh, Dan-Adrian and Kopinsky, Justin and Li, Jerry and Nadiradze, Giorgi},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {978-145034992-5},
  location     = {Washington, WA, USA},
  pages        = {283 -- 292},
  publisher    = {ACM},
  title        = {{The power of choice in priority scheduling}},
  doi          = {10.1145/3087801.3087810},
  volume       = {Part F129314},
  year         = {2017},
}

@article{792,
  abstract     = {The chaotic dynamics of low-dimensional systems, such as Lorenz or Rössler flows, is guided by the infinity of periodic orbits embedded in their strange attractors. Whether this is also the case for the infinite-dimensional dynamics of Navier–Stokes equations has long been speculated, and is a topic of ongoing study. Periodic and relative periodic solutions have been shown to be involved in transitions to turbulence. Their relevance to turbulent dynamics – specifically, whether periodic orbits play the same role in high-dimensional nonlinear systems like the Navier–Stokes equations as they do in lower-dimensional systems – is the focus of the present investigation. We perform here a detailed study of pipe flow relative periodic orbits with energies and mean dissipations close to turbulent values. We outline several approaches to reduction of the translational symmetry of the system. We study pipe flow in a minimal computational cell at   Re=2500, and report a library of invariant solutions found with the aid of the method of slices. Detailed study of the unstable manifolds of a sample of these solutions is consistent with the picture that relative periodic orbits are embedded in the chaotic saddle and that they guide the turbulent dynamics.},
  author       = {Budanur, Nazmi B and Short, Kimberly and Farazmand, Mohammad and Willis, Ashley and Cvitanović, Predrag},
  issn         = {00221120},
  journal      = {Journal of Fluid Mechanics},
  pages        = {274 -- 301},
  publisher    = {Cambridge University Press},
  title        = {{Relative periodic orbits form the backbone of turbulent pipe flow}},
  doi          = {10.1017/jfm.2017.699},
  volume       = {833},
  year         = {2017},
}

@article{793,
  abstract     = {Let P be a finite point set in the plane. A cordinary triangle in P is a subset of P consisting of three non-collinear points such that each of the three lines determined by the three points contains at most c points of P . Motivated by a question of Erdös, and answering a question of de Zeeuw, we prove that there exists a constant c &gt; 0such that P contains a c-ordinary triangle, provided that P is not contained in the union of two lines. Furthermore, the number of c-ordinary triangles in P is Ω(| P |). },
  author       = {Fulek, Radoslav and Mojarrad, Hossein and Naszódi, Márton and Solymosi, József and Stich, Sebastian and Szedlák, May},
  issn         = {09257721},
  journal      = {Computational Geometry: Theory and Applications},
  pages        = {28 -- 31},
  publisher    = {Elsevier},
  title        = {{On the existence of ordinary triangles}},
  doi          = {10.1016/j.comgeo.2017.07.002},
  volume       = {66},
  year         = {2017},
}

@article{794,
  abstract     = {We show that c-planarity is solvable in quadratic time for flat clustered graphs with three clusters if the combinatorial embedding of the underlying graph is fixed. In simpler graph-theoretical terms our result can be viewed as follows. Given a graph G with the vertex set partitioned into three parts embedded on a 2-sphere, our algorithm decides if we can augment G by adding edges without creating an edge-crossing so that in the resulting spherical graph the vertices of each part induce a connected sub-graph. We proceed by a reduction to the problem of testing the existence of a perfect matching in planar bipartite graphs. We formulate our result in a slightly more general setting of cyclic clustered graphs, i.e., the simple graph obtained by contracting each cluster, where we disregard loops and multi-edges, is a cycle.},
  author       = {Fulek, Radoslav},
  journal      = {Computational Geometry: Theory and Applications},
  pages        = {1 -- 13},
  publisher    = {Elsevier},
  title        = {{C-planarity of embedded cyclic c-graphs}},
  doi          = {10.1016/j.comgeo.2017.06.016},
  volume       = {66},
  year         = {2017},
}

@article{795,
  abstract     = {We introduce a common generalization of the strong Hanani–Tutte theorem and the weak Hanani–Tutte theorem: if a graph G has a drawing D in the plane where every pair of independent edges crosses an even number of times, then G has a planar drawing preserving the rotation of each vertex whose incident edges cross each other evenly in D. The theorem is implicit in the proof of the strong Hanani–Tutte theorem by Pelsmajer, Schaefer and Štefankovič. We give a new, somewhat simpler proof.},
  author       = {Fulek, Radoslav and Kynčl, Jan and Pálvölgyi, Dömötör},
  issn         = {10778926},
  journal      = {Electronic Journal of Combinatorics},
  number       = {3},
  publisher    = {International Press},
  title        = {{Unified Hanani Tutte theorem}},
  doi          = {10.37236/6663},
  volume       = {24},
  year         = {2017},
}

@article{796,
  abstract     = {We present the fabrication and characterization of an aluminum transmon qubit on a silicon-on-insulator substrate. Key to the qubit fabrication is the use of an anhydrous hydrofluoric vapor process which selectively removes the lossy silicon oxide buried underneath the silicon device layer. For a 5.6 GHz qubit measured dispersively by a 7.1 GHz resonator, we find T1 = 3.5 μs and T∗2 = 2.2 μs. This process in principle permits the co-fabrication of silicon photonic and mechanical elements, providing a route towards chip-scale integration of electro-opto-mechanical transducers for quantum networking of superconducting microwave quantum circuits. The additional processing steps are compatible with established fabrication techniques for aluminum transmon qubits on silicon.},
  author       = {Keller, Andrew J and Dieterle, Paul and Fang, Michael and Berger, Brett and Fink, Johannes M and Painter, Oskar},
  issn         = {00036951},
  journal      = {Applied Physics Letters},
  number       = {4},
  publisher    = {American Institute of Physics},
  title        = {{Al transmon qubits on silicon on insulator for quantum device integration}},
  doi          = {10.1063/1.4994661},
  volume       = {111},
  year         = {2017},
}

@article{797,
  abstract     = {Phasenübergänge helfen beim Verständnis von Vielteilchensystemen in der Festkörperphysik und Fluiddynamik bis hin zur Teilchenphysik. Unserer internationalen Kollaboration ist es gelungen, einen neuartigen Phasenübergang in einem Quantensystem zu beobachten [1]. In einem Mikrowellenresonator konnte erstmals die spontane Zustandsänderung von undurchsichtig zu transparent nachgewiesen werden.},
  author       = {Fink, Johannes M},
  journal      = {Physik in unserer Zeit},
  number       = {3},
  pages        = {111 -- 113},
  publisher    = {Wiley},
  title        = {{Photonenblockade aufgelöst}},
  doi          = {10.1002/piuz.201770305},
  volume       = {48},
  year         = {2017},
}

@article{798,
  abstract     = {Nonreciprocal circuit elements form an integral part of modern measurement and communication systems. Mathematically they require breaking of time-reversal symmetry, typically achieved using magnetic materials and more recently using the quantum Hall effect, parametric permittivity modulation or Josephson nonlinearities. Here we demonstrate an on-chip magnetic-free circulator based on reservoir-engineered electromechanic interactions. Directional circulation is achieved with controlled phase-sensitive interference of six distinct electro-mechanical signal conversion paths. The presented circulator is compact, its silicon-on-insulator platform is compatible with both superconducting qubits and silicon photonics, and its noise performance is close to the quantum limit. With a high dynamic range, a tunable bandwidth of up to 30 MHz and an in situ reconfigurability as beam splitter or wavelength converter, it could pave the way for superconducting qubit processors with multiplexed on-chip signal processing and readout.},
  author       = {Barzanjeh, Shabir and Wulf, Matthias and Peruzzo, Matilda and Kalaee, Mahmoud and Dieterle, Paul and Painter, Oskar and Fink, Johannes M},
  issn         = {20411723},
  journal      = {Nature Communications},
  number       = {1},
  publisher    = {Nature Publishing Group},
  title        = {{Mechanical on chip microwave circulator}},
  doi          = {10.1038/s41467-017-01304-x},
  volume       = {8},
  year         = {2017},
}

@inbook{7980,
  abstract     = {In this part, the use of polysaccharides, either directly through composite approaches, or by carbonization will be described. In many cases, materials are obtained which are competitive in terms of capacitance and cycle lifetime. In this part, the use of polysaccharides, either directly through composite approaches, or by carbonization will be described. In many cases, materials are obtained which are competitive in terms of capacitance and cycle lifetime. The following part will focus mainly on cellulosic composites with conductive polymers since cellulose is most abundant and therefore has attracted much more research interest in this field whereas in the second part also other polysaccharides, such as chitin, xylans, alginates, pectins, dextrans and caragenaans have been used in carbonization experiments.},
  author       = {Yee Liew, Soon and Thielemans, Wim and Freunberger, Stefan Alexander and Spirk, Stefan},
  booktitle    = {Polysaccharide Based Supercapacitors},
  editor       = {Yee Liew, Soon and Thielemans, Wim and Freunberger, Stefan Alexander and Spirk, Stefan},
  isbn         = {9783319507538},
  issn         = {2191-5407},
  pages        = {15--53},
  publisher    = {Springer Nature},
  title        = {{Polysaccharides in supercapacitors}},
  doi          = {10.1007/978-3-319-50754-5_2},
  year         = {2017},
}

@article{7981,
  abstract     = {Aprotische Natrium‐O2‐Batterien basieren auf der reversiblen Bildung und Auflösung von Natriumsuperoxid (NaO2) während des Zellbetriebs. Nebenreaktionen des Elektrolyten und der Elektrode mit dem stark nukleophilen und basischen NaO2 führen zu mangelhafter Zyklenstabilität. Seine Reaktivität allein kann die Nebenreaktionen und schlechte Reversibilität jedoch nicht schlüssig erklären. Hier wird gezeigt, dass Singulett‐Sauerstoff (1O2) in allen Phasen des Betriebs entsteht und eine Hauptursache für Nebenreaktionen ist. 1O2 wurde in situ und ex situ mit einem 1O2‐Fänger detektiert, der schnell und selektiv ein Addukt mit 1O2 bildet. Mechanistisch betrachtet entsteht 1O2 entweder durch protonenunterstützte Disproportionierung von Superoxid während des Entladens, Lagerns und Ladens unter ca. 3.3 V oder durch direkte elektrochemische 1O2‐Entwicklung über ca. 3.3 V. Spuren von Wasser ermöglichen hohe Kapazitäten, beschleunigen aber auch Nebenreaktionen. Daher muss das hochreaktive 1O2 unbedingt kontrolliert werden, um die Zelle reversibel zu betreiben.},
  author       = {Schafzahl, Lukas and Mahne, Nika and Schafzahl, Bettina and Wilkening, Martin and Slugovc, Christian and Borisov, Sergey M. and Freunberger, Stefan Alexander},
  issn         = {0044-8249},
  journal      = {Angewandte Chemie},
  number       = {49},
  pages        = {15934--15938},
  publisher    = {Wiley},
  title        = {{Singulett-Sauerstoff in der aprotischen Natrium-O2-Batterie}},
  doi          = {10.1002/ange.201709351},
  volume       = {129},
  year         = {2017},
}

@article{7982,
  abstract     = {Beyond-intercalation batteries promise a step-change in energy storage compared to intercalation-based lithium-ion and sodium-ion batteries. However, only performance metrics that include all cell components and operation parameters can tell whether a true advance over intercalation batteries has been achieved.},
  author       = {Freunberger, Stefan Alexander},
  issn         = {2058-7546},
  journal      = {Nature Energy},
  number       = {7},
  publisher    = {Springer Nature},
  title        = {{True performance metrics in beyond-intercalation batteries}},
  doi          = {10.1038/nenergy.2017.91},
  volume       = {2},
  year         = {2017},
}

@article{7986,
  author       = {Mahne, Nika and Schafzahl, Bettina and Leypold, Christian and Leypold, Mario and Grumm, Sandra and Leitgeb, Anita and Strohmeier, Gernot A. and Wilkening, Martin and Fontaine, Olivier and Kramer, Denis and Slugovc, Christian and Borisov, Sergey M. and Freunberger, Stefan Alexander},
  issn         = {2058-7546},
  journal      = {Nature Energy},
  number       = {5},
  publisher    = {Springer Nature},
  title        = {{Singlet oxygen generation as a major cause for parasitic reactions during cycling of aprotic lithium–oxygen batteries}},
  doi          = {10.1038/nenergy.2017.36},
  volume       = {2},
  year         = {2017},
}

