@article{23,
  abstract     = {The strong atomistic spin–orbit coupling of holes makes single-shot spin readout measurements difficult because it reduces the spin lifetimes. By integrating the charge sensor into a high bandwidth radio frequency reflectometry setup, we were able to demonstrate single-shot readout of a germanium quantum dot hole spin and measure the spin lifetime. Hole spin relaxation times of about 90 μs at 500 mT are reported, with a total readout visibility of about 70%. By analyzing separately the spin-to-charge conversion and charge readout fidelities, we have obtained insight into the processes limiting the visibilities of hole spins. The analyses suggest that high hole visibilities are feasible at realistic experimental conditions, underlying the potential of hole spins for the realization of viable qubit devices.},
  author       = {Vukušić, Lada and Kukucka, Josip and Watzinger, Hannes and Milem, Joshua M and Schäffler, Friedrich and Katsaros, Georgios},
  issn         = {15306984},
  journal      = {Nano Letters},
  number       = {11},
  pages        = {7141 -- 7145},
  publisher    = {American Chemical Society},
  title        = {{Single-shot readout of hole spins in Ge}},
  doi          = {10.1021/acs.nanolett.8b03217},
  volume       = {18},
  year         = {2018},
}

@inproceedings{24,
  abstract     = {Partially-observable Markov decision processes (POMDPs) with discounted-sum payoff are a standard framework to model a wide range of problems related to decision making under uncertainty. Traditionally, the goal has been to obtain policies that optimize the expectation of the discounted-sum payoff. A key drawback of the expectation measure is that even low probability events with extreme payoff can significantly affect the expectation, and thus the obtained policies are not necessarily risk-averse. An alternate approach is to optimize the probability that the payoff is above a certain threshold, which allows obtaining risk-averse policies, but ignores optimization of the expectation. We consider the expectation optimization with probabilistic guarantee (EOPG) problem, where the goal is to optimize the expectation ensuring that the payoff is above a given threshold with at least a specified probability. We present several results on the EOPG problem, including the first algorithm to solve it.},
  author       = {Chatterjee, Krishnendu and Elgyütt, Adrian and Novotny, Petr and Rouillé, Owen},
  location     = {Stockholm, Sweden},
  pages        = {4692 -- 4699},
  publisher    = {IJCAI},
  title        = {{Expectation optimization with probabilistic guarantees in POMDPs with discounted-sum objectives}},
  doi          = {10.24963/ijcai.2018/652},
  volume       = {2018},
  year         = {2018},
}

@inproceedings{6664,
  abstract     = {Reed-Muller (RM) and polar codes are a class of capacity-achieving channel coding schemes with the same factor graph representation. Low-complexity decoding algorithms fall short in providing a good error-correction performance for RM and polar codes. Using the symmetric group of RM and polar codes, the specific decoding algorithm can be carried out on multiple permutations of the factor graph to boost the error-correction performance. However, this approach results in high decoding complexity. In this paper, we first derive the total number of factor graph permutations on which the decoding can be performed. We further propose a successive permutation (SP) scheme which finds the permutations on the fly, thus the decoding always progresses on a single factor graph permutation. We show that SP can be used to improve the error-correction performance of RM and polar codes under successive-cancellation (SC) and SC list (SCL) decoding, while keeping the memory requirements of the decoders unaltered. Our results for RM and polar codes of length 128 and rate 0.5 show that when SP is used and at a target frame error rate of 10 -4 , up to 0.5 dB and 0.1 dB improvement can be achieved for RM and polar codes respectively.},
  author       = {Hashemi, Seyyed Ali and Doan, Nghia and Mondelli, Marco and Gross, Warren },
  booktitle    = {2018 IEEE 10th International Symposium on Turbo Codes & Iterative Information Processing},
  location     = {Hong Kong, China},
  pages        = {1--5},
  publisher    = {IEEE},
  title        = {{Decoding Reed-Muller and polar codes by successive factor graph permutations}},
  doi          = {10.1109/istc.2018.8625281},
  year         = {2018},
}

@inproceedings{6665,
  abstract     = {We prove that, at least for the binary erasure channel, the polar-coding paradigm gives rise to codes that not only approach the Shannon limit but, in fact, do so under the best possible scaling of their block length as a function of the gap to capacity. This result exhibits the first known family of binary codes that attain both optimal scaling and quasi-linear complexity of encoding and decoding. Specifically, for any fixed δ > 0, we exhibit binary linear codes that ensure reliable communication at rates within ε > 0 of capacity with block length n = O(1/ε 2+δ ), construction complexity Θ(n), and encoding/decoding complexity Θ(n log n).},
  author       = {Fazeli, Arman and Hassani, Hamed and Mondelli, Marco and Vardy, Alexander},
  booktitle    = {2018 IEEE Information Theory Workshop},
  location     = {Guangzhou, China},
  pages        = {1--5},
  publisher    = {IEEE},
  title        = {{Binary linear codes with optimal scaling: Polar codes with large kernels}},
  doi          = {10.1109/itw.2018.8613428},
  year         = {2018},
}

@article{6674,
  abstract     = {Polar codes represent one of the major recent breakthroughs in coding theory and, because of their attractive features, they have been selected for the incoming 5G standard. As such, a lot of attention has been devoted to the development of decoding algorithms with good error performance and efficient hardware implementation. One of the leading candidates in this regard is represented by successive-cancellation list (SCL) decoding. However, its hardware implementation requires a large amount of memory. Recently, a partitioned SCL (PSCL) decoder has been proposed to significantly reduce the memory consumption. In this paper, we consider the paradigm of PSCL decoding from a practical standpoint, and we provide several improvements. First, by changing the target signal-to-noise ratio and consequently modifying the construction of the code, we are able to improve the performance at no additional computational, latency, or memory cost. Second, we bridge the performance gap between SCL and PSCL decoding by introducing a generalized PSCL decoder and a layered PSCL decoder. In this way, we obtain almost the same performance of the SCL decoder with a significantly lower memory requirement, as testified by hardware implementation results. Third, we present an optimal scheme to allocate cyclic redundancy checks. Finally, we provide a lower bound on the list size that guarantees optimal maximum a posteriori performance for the binary erasure channel.},
  author       = {Hashemi, Seyyed Ali and Mondelli, Marco and Hassani, S. Hamed and Condo, Carlo and Urbanke, Rudiger L. and Gross, Warren J.},
  issn         = {1558-0857},
  journal      = {IEEE Transactions on Communications},
  number       = {9},
  pages        = {3749--3759},
  publisher    = {IEEE},
  title        = {{Decoder partitioning: Towards practical list decoding of polar codes}},
  doi          = {10.1109/tcomm.2018.2832207},
  volume       = {66},
  year         = {2018},
}

@inproceedings{6675,
  abstract     = {We present a coding paradigm that provides a new achievable rate for the primitive relay channel by combining compress-and-forward and decode-and-forward with a chaining construction. In the primitive relay channel model, the source broadcasts a message to the relay and to the destination; and the relay facilitates this communication by sending an additional message to the destination through a separate channel. Two well-known coding approaches for this setting are decode-and-forward and compress-and-forward: in the former, the relay decodes the message and sends some of the information to the destination; in the latter, the relay does not attempt to decode, but it sends a compressed description of the received sequence to the destination via Wyner-Ziv coding. In our scheme, we transmit over pairs of blocks and we use compress-and-forward for the first block and decode-and-forward for the second. In particular, in the first block, the relay does not attempt to decode and it sends only a part of the compressed description of the received sequence; in the second block, the relay decodes the message and sends this information plus the remaining part of the compressed sequence relative to the first block. As a result, we strictly outperform both compress-and- forward and decode-and-forward. Furthermore, this paradigm can be implemented with a low-complexity polar coding scheme that has the typical attractive features of polar codes, i.e., quasi-linear encoding/decoding complexity and super-polynomial decay of the error probability. Throughout the paper we consider as a running example the special case of the erasure relay channel and we compare the rates achievable by our proposed scheme with the existing upper and lower bounds.},
  author       = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger},
  booktitle    = {2018 IEEE International Symposium on Information Theory},
  issn         = {2157-8117},
  location     = {Vail, CO, United States},
  pages        = {351--355},
  publisher    = {IEEE},
  title        = {{A new coding paradigm for the primitive relay channel}},
  doi          = {10.1109/isit.2018.8437479},
  year         = {2018},
}

@article{6678,
  abstract     = {We survey coding techniques that enable reliable transmission at rates that approach the capacity of an arbitrary discrete memoryless channel. In particular, we take the point of view of modern coding theory and discuss how recent advances in coding for symmetric channels help provide more efficient solutions for the asymmetric case. We consider, in more detail, three basic coding paradigms. The first one is Gallager's scheme that consists of concatenating a linear code with a non-linear mapping so that the input distribution can be appropriately shaped. We explicitly show that both polar codes and spatially coupled codes can be employed in this scenario. Furthermore, we derive a scaling law between the gap to capacity, the cardinality of the input and output alphabets, and the required size of the mapper. The second one is an integrated scheme in which the code is used both for source coding, in order to create codewords distributed according to the capacity-achieving input distribution, and for channel coding, in order to provide error protection. Such a technique has been recently introduced by Honda and Yamamoto in the context of polar codes, and we show how to apply it also to the design of sparse graph codes. The third paradigm is based on an idea of Böcherer and Mathar, and separates the two tasks of source coding and channel coding by a chaining construction that binds together several codewords. We present conditions for the source code and the channel code, and we describe how to combine any source code with any channel code that fulfill those conditions, in order to provide capacity-achieving schemes for asymmetric channels. In particular, we show that polar codes, spatially coupled codes, and homophonic codes are suitable as basic building blocks of the proposed coding strategy. Rather than focusing on the exact details of the schemes, the purpose of this tutorial is to present different coding techniques that can then be implemented with many variants. There is no absolute winner and, in order to understand the most suitable technique for a specific application scenario, we provide a detailed comparison that takes into account several performance metrics.},
  author       = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger },
  issn         = {0018-9448},
  journal      = {IEEE Transactions on Information Theory},
  number       = {5},
  pages        = {3371--3393},
  publisher    = {IEEE},
  title        = {{How to achieve the capacity of asymmetric channels}},
  doi          = {10.1109/tit.2018.2789885},
  volume       = {64},
  year         = {2018},
}

@article{67,
  abstract     = {Gene regulatory networks evolve through rewiring of individual components—that is, through changes in regulatory connections. However, the mechanistic basis of regulatory rewiring is poorly understood. Using a canonical gene regulatory system, we quantify the properties of transcription factors that determine the evolutionary potential for rewiring of regulatory connections: robustness, tunability and evolvability. In vivo repression measurements of two repressors at mutated operator sites reveal their contrasting evolutionary potential: while robustness and evolvability were positively correlated, both were in trade-off with tunability. Epistatic interactions between adjacent operators alleviated this trade-off. A thermodynamic model explains how the differences in robustness, tunability and evolvability arise from biophysical characteristics of repressor–DNA binding. The model also uncovers that the energy matrix, which describes how mutations affect repressor–DNA binding, encodes crucial information about the evolutionary potential of a repressor. The biophysical determinants of evolutionary potential for regulatory rewiring constitute a mechanistic framework for understanding network evolution.},
  author       = {Igler, Claudia and Lagator, Mato and Tkacik, Gasper and Bollback, Jonathan P and Guet, Calin C},
  journal      = {Nature Ecology and Evolution},
  number       = {10},
  pages        = {1633 -- 1643},
  publisher    = {Nature Publishing Group},
  title        = {{Evolutionary potential of transcription factors for gene regulatory rewiring}},
  doi          = {10.1038/s41559-018-0651-y},
  volume       = {2},
  year         = {2018},
}

@inproceedings{6728,
  abstract     = {Polar codes are a channel coding scheme for the next generation of wireless communications standard (5G). The belief propagation (BP) decoder allows for parallel decoding of polar codes, making it suitable for high throughput applications. However, the error-correction performance of polar codes under BP decoding is far from the requirements of 5G. It has been shown that the error-correction performance of BP can be improved if the decoding is performed on multiple permuted factor graphs of polar codes. However, a different BP decoding scheduling is required for each factor graph permutation which results in the design of a different decoder for each permutation. Moreover, the selection of the different factor graph permutations is at random, which prevents the decoder to achieve a desirable error correction performance with a small number of permutations. In this paper, we first show that the permutations on the factor graph can be mapped into suitable permutations on the codeword positions. As a result, we can make use of a single decoder for all the permutations. In addition, we introduce a method to construct a set of predetermined permutations which can provide the correct codeword if the decoding fails on the original permutation. We show that for the 5G polar code of length 1024, the error-correction performance of the proposed decoder is more than 0.25 dB better than that of the BP decoder with the same number of random permutations at the frame error rate of 10 -4 .},
  author       = {Doan, Nghia and Hashemi, Seyyed Ali and Mondelli, Marco and Gross, Warren J.},
  booktitle    = {2018 IEEE Global Communications Conference },
  isbn         = {9781538647271},
  location     = {Abu Dhabi, United Arab Emirates},
  publisher    = {IEEE},
  title        = {{On the decoding of polar codes on permuted factor graphs}},
  doi          = {10.1109/glocom.2018.8647308},
  year         = {2018},
}

@article{6774,
  abstract     = {A central problem of algebraic topology is to understand the homotopy groups  𝜋𝑑(𝑋)  of a topological space X. For the computational version of the problem, it is well known that there is no algorithm to decide whether the fundamental group  𝜋1(𝑋)  of a given finite simplicial complex X is trivial. On the other hand, there are several algorithms that, given a finite simplicial complex X that is simply connected (i.e., with   𝜋1(𝑋)  trivial), compute the higher homotopy group   𝜋𝑑(𝑋)  for any given   𝑑≥2 . However, these algorithms come with a caveat: They compute the isomorphism type of   𝜋𝑑(𝑋) ,   𝑑≥2  as an abstract finitely generated abelian group given by generators and relations, but they work with very implicit representations of the elements of   𝜋𝑑(𝑋) . Converting elements of this abstract group into explicit geometric maps from the d-dimensional sphere   𝑆𝑑  to X has been one of the main unsolved problems in the emerging field of computational homotopy theory. Here we present an algorithm that, given a simply connected space X, computes   𝜋𝑑(𝑋)  and represents its elements as simplicial maps from a suitable triangulation of the d-sphere   𝑆𝑑  to X. For fixed d, the algorithm runs in time exponential in   size(𝑋) , the number of simplices of X. Moreover, we prove that this is optimal: For every fixed   𝑑≥2 , we construct a family of simply connected spaces X such that for any simplicial map representing a generator of   𝜋𝑑(𝑋) , the size of the triangulation of   𝑆𝑑  on which the map is defined, is exponential in size(𝑋) .},
  author       = {Filakovský, Marek and Franek, Peter and Wagner, Uli and Zhechev, Stephan Y},
  issn         = {2367-1734},
  journal      = {Journal of Applied and Computational Topology},
  number       = {3-4},
  pages        = {177--231},
  publisher    = {Springer},
  title        = {{Computing simplicial representatives of homotopy group elements}},
  doi          = {10.1007/s41468-018-0021-5},
  volume       = {2},
  year         = {2018},
}

@phdthesis{68,
  abstract     = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.},
  author       = {Zimin, Alexander},
  issn         = {2663-337X},
  pages        = {92},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Learning from dependent data}},
  doi          = {10.15479/AT:ISTA:TH1048},
  year         = {2018},
}

@phdthesis{69,
  abstract     = {A qubit, a unit of quantum information, is essentially any quantum mechanical two-level system which can be coherently controlled. Still, to be used for computation, it has to fulfill criteria. Qubits, regardless of the system in which they are realized, suffer from decoherence. This leads to loss of the information stored in the qubit. The upper bound of the time scale on which decoherence happens is set by the spin relaxation time. In this thesis I studied a two-level system consisting of a Zeeman-split hole spin confined in a quantum dot formed in a Ge hut wire. Such Ge hut wires have emerged as a promising material system for the realization of spin qubits, due to the combination of two significant properties: long spin coherence time as expected for group IV semiconductors due to the low hyperfine interaction and a strong valence band spin-orbit coupling. Here, I present how to fabricate quantum dot devices suitable for electrical transport measurements. Coupled quantum dot devices allowed the realization of a charge sensor, which is electrostatically and tunnel coupled to a quantum dot. By integrating the charge sensor into a radio-frequency reflectometry setup, I performed for the first time single-shot readout measurements of hole spins and extracted the hole spin relaxation times in Ge hut wires.},
  author       = {Vukušić, Lada},
  issn         = {2663-337X},
  pages        = {103},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Charge sensing and spin relaxation times of holes in Ge hut wires}},
  doi          = {10.15479/AT:ISTA:TH_1047},
  year         = {2018},
}

@article{690,
  abstract     = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.},
  author       = {Lee, Jii and Schnelli, Kevin},
  journal      = {Probability Theory and Related Fields},
  number       = {1-2},
  publisher    = {Springer},
  title        = {{Local law and Tracy–Widom limit for sparse random matrices}},
  doi          = {10.1007/s00440-017-0787-8},
  volume       = {171},
  year         = {2018},
}

@article{691,
  abstract     = {Background: Transport protein particle (TRAPP) is a multisubunit complex that regulates membrane trafficking through the Golgi apparatus. The clinical phenotype associated with mutations in various TRAPP subunits has allowed elucidation of their functions in specific tissues. The role of some subunits in human disease, however, has not been fully established, and their functions remain uncertain.

Objective: We aimed to expand the range of neurodevelopmental disorders associated with mutations in TRAPP subunits by exome sequencing of consanguineous families.

Methods: Linkage and homozygosity mapping and candidate gene analysis were used to identify homozygous mutations in families. Patient fibroblasts were used to study splicing defect and zebrafish to model the disease.

Results: We identified six individuals from three unrelated families with a founder homozygous splice mutation in TRAPPC6B, encoding a core subunit of the complex TRAPP I. Patients manifested a neurodevelopmental disorder characterised by microcephaly, epilepsy and autistic features, and showed splicing defect. Zebrafish trappc6b morphants replicated the human phenotype, displaying decreased head size and neuronal hyperexcitability, leading to a lower seizure threshold.

Conclusion: This study provides clinical and functional evidence of the role of TRAPPC6B in brain development and function.},
  author       = {Marin Valencia, Isaac and Novarino, Gaia and Johansen, Anide and Rosti, Başak and Issa, Mahmoud and Musaev, Damir and Bhat, Gifty and Scott, Eric and Silhavy, Jennifer and Stanley, Valentina and Rosti, Rasim and Gleeson, Jeremy and Imam, Farhad and Zaki, Maha and Gleeson, Joseph},
  issn         = {0022-2593},
  journal      = {Journal of Medical Genetics},
  number       = {1},
  pages        = {48 -- 54},
  publisher    = {BMJ Publishing Group},
  title        = {{A homozygous founder mutation in TRAPPC6B associates with a neurodevelopmental disorder characterised by microcephaly epilepsy and autistic features}},
  doi          = {10.1136/jmedgenet-2017-104627},
  volume       = {55},
  year         = {2018},
}

@article{692,
  abstract     = {We consider families of confocal conics and two pencils of Apollonian circles having the same foci. We will show that these families of curves generate trivial 3-webs and find the exact formulas describing them.},
  author       = {Akopyan, Arseniy},
  journal      = {Geometriae Dedicata},
  number       = {1},
  pages        = {55 -- 64},
  publisher    = {Springer},
  title        = {{3-Webs generated by confocal conics and circles}},
  doi          = {10.1007/s10711-017-0265-6},
  volume       = {194},
  year         = {2018},
}

@inproceedings{6941,
  abstract     = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time.

Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation.

This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.},
  author       = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z},
  booktitle    = {22nd International Conference on Financial Cryptography and Data Security},
  isbn         = {9783662583869},
  issn         = {1611-3349},
  location     = {Nieuwpoort, Curacao},
  pages        = {480--499},
  publisher    = {Springer Nature},
  title        = {{SpaceMint: A cryptocurrency based on proofs of space}},
  doi          = {10.1007/978-3-662-58387-6_26},
  volume       = {10957},
  year         = {2018},
}

@article{7,
  abstract     = {Animal social networks are shaped by multiple selection pressures, including the need to ensure efficient communication and functioning while simultaneously limiting disease transmission. Social animals could potentially further reduce epidemic risk by altering their social networks in the presence of pathogens, yet there is currently no evidence for such pathogen-triggered responses. We tested this hypothesis experimentally in the ant Lasius niger using a combination of automated tracking, controlled pathogen exposure, transmission quantification, and temporally explicit simulations. Pathogen exposure induced behavioral changes in both exposed ants and their nestmates, which helped contain the disease by reinforcing key transmission-inhibitory properties of the colony's contact network. This suggests that social network plasticity in response to pathogens is an effective strategy for mitigating the effects of disease in social groups.},
  author       = {Stroeymeyt, Nathalie and Grasse, Anna V and Crespi, Alessandro and Mersch, Danielle and Cremer, Sylvia and Keller, Laurent},
  issn         = {1095-9203},
  journal      = {Science},
  number       = {6417},
  pages        = {941 -- 945},
  publisher    = {AAAS},
  title        = {{Social network plasticity decreases disease transmission in a eusocial insect}},
  doi          = {10.1126/science.aat4793},
  volume       = {362},
  year         = {2018},
}

@article{70,
  abstract     = {We consider the totally asymmetric simple exclusion process in a critical scaling parametrized by a≥0, which creates a shock in the particle density of order aT−1/3, T the observation time. When starting from step initial data, we provide bounds on the limiting law which in particular imply that in the double limit lima→∞limT→∞ one recovers the product limit law and the degeneration of the correlation length observed at shocks of order 1. This result is shown to apply to a general last-passage percolation model. We also obtain bounds on the two-point functions of several airy processes.},
  author       = {Nejjar, Peter},
  issn         = {1980-0436},
  journal      = {Latin American Journal of Probability and Mathematical Statistics},
  number       = {2},
  pages        = {1311--1334},
  publisher    = {Instituto Nacional de Matematica Pura e Aplicada},
  title        = {{Transition to shocks in TASEP and decoupling of last passage times}},
  doi          = {10.30757/ALEA.v15-49},
  volume       = {15},
  year         = {2018},
}

@article{703,
  abstract     = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.},
  author       = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan},
  issn         = {01628828},
  journal      = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  number       = {7},
  pages        = {1668--1682},
  publisher    = {IEEE},
  title        = {{Maximum persistency via iterative relaxed inference with graphical models}},
  doi          = {10.1109/TPAMI.2017.2730884},
  volume       = {40},
  year         = {2018},
}

@article{705,
  abstract     = {Although dopamine receptors D1 and D2 play key roles in hippocampal function, their synaptic localization within the hippocampus has not been fully elucidated. In order to understand precise functions of pre- or postsynaptic dopamine receptors (DRs), the development of protocols to differentiate pre- and postsynaptic DRs is essential. So far, most studies on determination and quantification of DRs did not discriminate between subsynaptic localization. Therefore, the aim of the study was to generate a robust workflow for the localization of DRs. This work provides the basis for future work on hippocampal DRs, in light that DRs may have different functions at pre- or postsynaptic sites. Synaptosomes from rat hippocampi isolated by a sucrose gradient protocol were prepared for super-resolution direct stochastic optical reconstruction microscopy (dSTORM) using Bassoon as a presynaptic zone and Homer1 as postsynaptic density marker. Direct labeling of primary validated antibodies against dopamine receptors D1 (D1R) and D2 (D2R) with Alexa Fluor 594 enabled unequivocal assignment of D1R and D2R to both, pre- and postsynaptic sites. D1R immunoreactivity clusters were observed within the presynaptic active zone as well as at perisynaptic sites at the edge of the presynaptic active zone. The results may be useful for the interpretation of previous studies and the design of future work on DRs in the hippocampus. Moreover, the reduction of the complexity of brain tissue by the use of synaptosomal preparations and dSTORM technology may represent a useful tool for synaptic localization of brain proteins.},
  author       = {Miklosi, Andras and Del Favero, Giorgia and Bulat, Tanja and Höger, Harald and Shigemoto, Ryuichi and Marko, Doris and Lubec, Gert},
  journal      = {Molecular Neurobiology},
  number       = {6},
  pages        = {4857 – 4869},
  publisher    = {Springer},
  title        = {{Super resolution microscopical localization of dopamine receptors 1 and 2 in rat hippocampal synaptosomes}},
  doi          = {10.1007/s12035-017-0688-y},
  volume       = {55},
  year         = {2018},
}

