@article{2010,
  abstract     = {Many algorithms for inferring causality rely heavily on the faithfulness assumption. The main justification for imposing this assumption is that the set of unfaithful distributions has Lebesgue measure zero, since it can be seen as a collection of hypersurfaces in a hypercube. However, due to sampling error the faithfulness condition alone is not sufficient for statistical estimation, and strong-faithfulness has been proposed and assumed to achieve uniform or high-dimensional consistency. In contrast to the plain faithfulness assumption, the set of distributions that is not strong-faithful has nonzero Lebesgue measure and in fact, can be surprisingly large as we show in this paper. We study the strong-faithfulness condition from a geometric and combinatorial point of view and give upper and lower bounds on the Lebesgue measure of strong-faithful distributions for various classes of directed acyclic graphs. Our results imply fundamental limitations for the PC-algorithm and potentially also for other algorithms based on partial correlation testing in the Gaussian case.},
  author       = {Uhler, Caroline and Raskutti, Garvesh and Bühlmann, Peter and Yu, Bin},
  journal      = {The Annals of Statistics},
  number       = {2},
  pages        = {436 -- 463},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Geometry of the faithfulness assumption in causal inference}},
  doi          = {10.1214/12-AOS1080},
  volume       = {41},
  year         = {2013},
}

@inproceedings{2181,
  abstract     = {There is a trade-off between performance and correctness in implementing concurrent data structures. Better performance may be achieved at the expense of relaxing correctness, by redefining the semantics of data structures. We address such a redefinition of data structure semantics and present a systematic and formal framework for obtaining new data structures by quantitatively relaxing existing ones. We view a data structure as a sequential specification S containing all &quot;legal&quot; sequences over an alphabet of method calls. Relaxing the data structure corresponds to defining a distance from any sequence over the alphabet to the sequential specification: the k-relaxed sequential specification contains all sequences over the alphabet within distance k from the original specification. In contrast to other existing work, our relaxations are semantic (distance in terms of data structure states). As an instantiation of our framework, we present two simple yet generic relaxation schemes, called out-of-order and stuttering relaxation, along with several ways of computing distances. We show that the out-of-order relaxation, when further instantiated to stacks, queues, and priority queues, amounts to tolerating bounded out-of-order behavior, which cannot be captured by a purely syntactic relaxation (distance in terms of sequence manipulation, e.g. edit distance). We give concurrent implementations of relaxed data structures and demonstrate that bounded relaxations provide the means for trading correctness for performance in a controlled way. The relaxations are monotonic which further highlights the trade-off: increasing k increases the number of permitted sequences, which as we demonstrate can lead to better performance. Finally, since a relaxed stack or queue also implements a pool, we actually have new concurrent pool implementations that outperform the state-of-the-art ones.},
  author       = {Henzinger, Thomas A and Kirsch, Christoph and Payer, Hannes and Sezgin, Ali and Sokolova, Ana},
  booktitle    = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
  isbn         = {978-1-4503-1832-7},
  location     = {Rome, Italy},
  pages        = {317 -- 328},
  publisher    = {ACM},
  title        = {{Quantitative relaxation of concurrent data structures}},
  doi          = {10.1145/2429069.2429109},
  year         = {2013},
}

@inproceedings{2182,
  abstract     = {We propose a general framework for abstraction with respect to quantitative properties, such as worst-case execution time, or power consumption. Our framework provides a systematic way for counter-example guided abstraction refinement for quantitative properties. The salient aspect of the framework is that it allows anytime verification, that is, verification algorithms that can be stopped at any time (for example, due to exhaustion of memory), and report approximations that improve monotonically when the algorithms are given more time. We instantiate the framework with a number of quantitative abstractions and refinement schemes, which differ in terms of how much quantitative information they keep from the original system. We introduce both state-based and trace-based quantitative abstractions, and we describe conditions that define classes of quantitative properties for which the abstractions provide over-approximations. We give algorithms for evaluating the quantitative properties on the abstract systems. We present algorithms for counter-example based refinements for quantitative properties for both state-based and segment-based abstractions. We perform a case study on worst-case execution time of executables to evaluate the anytime verification aspect and the quantitative abstractions we proposed.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
  booktitle    = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
  location     = {Rome, Italy},
  pages        = {115 -- 128},
  publisher    = {ACM},
  title        = {{Quantitative abstraction refinement}},
  doi          = {10.1145/2429069.2429085},
  year         = {2013},
}

@inproceedings{2209,
  abstract     = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon or planar straight-line graph. In this paper, we ask the reverse question: Given the straight skeleton (in form of a planar straight-line graph, with some rays to infinity), can we reconstruct a planar straight-line graph for which this was the straight skeleton? We show how to reduce this problem to the problem of finding a line that intersects a set of convex polygons. We can find these convex polygons and all such lines in $O(nlog n)$ time in the Real RAM computer model, where $n$ denotes the number of edges of the input graph. We also explain how our approach can be used for recognizing Voronoi diagrams of points, thereby completing a partial solution provided by Ash and Bolker in 1985.
},
  author       = {Biedl, Therese and Held, Martin and Huber, Stefan},
  location     = {St. Petersburg, Russia},
  pages        = {37 -- 46},
  publisher    = {IEEE},
  title        = {{Recognizing straight skeletons and Voronoi diagrams and reconstructing their input}},
  doi          = {10.1109/ISVD.2013.11},
  year         = {2013},
}

@inproceedings{2210,
  abstract     = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon. In this paper, we ask the reverse question: Given the straight skeleton (in form of a tree with a drawing in the plane, but with the exact position of the leaves unspecified), can we reconstruct the polygon? We show that in most cases there exists at most one polygon; in the remaining case there is an infinite number of polygons determined by one angle that can range in an interval. We can find this (set of) polygon(s) in linear time in the Real RAM computer model.},
  author       = {Biedl, Therese and Held, Martin and Huber, Stefan},
  booktitle    = {29th European Workshop on Computational Geometry},
  location     = {Braunschweig, Germany},
  pages        = {95 -- 98},
  publisher    = {TU Braunschweig},
  title        = {{Reconstructing polygons from embedded straight skeletons}},
  year         = {2013},
}

@inproceedings{2237,
  abstract     = {We describe new extensions of the Vampire theorem prover for computing tree interpolants. These extensions generalize Craig interpolation in Vampire, and can also be used to derive sequence interpolants. We evaluated our implementation on a large number of examples over the theory of linear integer arithmetic and integer-indexed arrays, with and without quantifiers. When compared to other methods, our experiments show that some examples could only be solved by our implementation.},
  author       = {Blanc, Régis and Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard},
  location     = {Stellenbosch, South Africa},
  pages        = {173 -- 181},
  publisher    = {Springer},
  title        = {{Tree interpolation in Vampire}},
  doi          = {10.1007/978-3-642-45221-5_13},
  volume       = {8312},
  year         = {2013},
}

@inproceedings{2238,
  abstract     = {We study the problem of achieving a given value in Markov decision processes (MDPs) with several independent discounted reward objectives. We consider a generalised version of discounted reward objectives, in which the amount of discounting depends on the states visited and on the objective. This definition extends the usual definition of discounted reward, and allows to capture the systems in which the value of different commodities diminish at different and variable rates.

We establish results for two prominent subclasses of the problem, namely state-discount models where the discount factors are only dependent on the state of the MDP (and independent of the objective), and reward-discount models where they are only dependent on the objective (but not on the state of the MDP). For the state-discount models we use a straightforward reduction to expected total reward and show that the problem whether a value is achievable can be solved in polynomial time. For the reward-discount model we show that memory and randomisation of the strategies are required, but nevertheless that the problem is decidable and it is sufficient to consider strategies which after a certain number of steps behave in a memoryless way.

For the general case, we show that when restricted to graphs (i.e. MDPs with no randomisation), pure strategies and discount factors of the form 1/n where n is an integer, the problem is in PSPACE and finite memory suffices for achieving a given value. We also show that when the discount factors are not of the form 1/n, the memory required by a strategy can be infinite.
},
  author       = {Chatterjee, Krishnendu and Forejt, Vojtěch and Wojtczak, Dominik},
  location     = {Stellenbosch, South Africa},
  pages        = {228 -- 242},
  publisher    = {Springer},
  title        = {{Multi-objective discounted reward verification in graphs and MDPs}},
  doi          = {10.1007/978-3-642-45221-5_17},
  volume       = {8312},
  year         = {2013},
}

@inproceedings{2243,
  abstract     = {We show that modal logic over universally first-order definable classes of transitive frames is decidable. More precisely, let K be an arbitrary class of transitive Kripke frames definable by a universal first-order sentence. We show that the global and finite global satisfiability problems of modal logic over K are decidable in NP, regardless of choice of K. We also show that the local satisfiability and the finite local satisfiability problems of modal logic over K are decidable in NEXPTIME.},
  author       = {Michaliszyn, Jakub and Otop, Jan},
  location     = {Torino, Italy},
  pages        = {563 -- 577},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Elementary modal logics over transitive structures}},
  doi          = {10.4230/LIPIcs.CSL.2013.563},
  volume       = {23},
  year         = {2013},
}

@inproceedings{2244,
  abstract     = {We consider two systems (α1,...,αm) and (β1,...,βn) of curves drawn on a compact two-dimensional surface ℳ with boundary. Each αi and each βj is either an arc meeting the boundary of ℳ at its two endpoints, or a closed curve. The αi are pairwise disjoint except for possibly sharing endpoints, and similarly for the βj. We want to &quot;untangle&quot; the βj from the αi by a self-homeomorphism of ℳ; more precisely, we seek an homeomorphism φ: ℳ → ℳ fixing the boundary of ℳ pointwise such that the total number of crossings of the αi with the φ(βj) is as small as possible. This problem is motivated by an application in the algorithmic theory of embeddings and 3-manifolds. We prove that if ℳ is planar, i.e., a sphere with h ≥ 0 boundary components (&quot;holes&quot;), then O(mn) crossings can be achieved (independently of h), which is asymptotically tight, as an easy lower bound shows. In general, for an arbitrary (orientable or nonorientable) surface ℳ with h holes and of (orientable or nonorientable) genus g ≥ 0, we obtain an O((m + n)4) upper bound, again independent of h and g. },
  author       = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
  location     = {Bordeaux, France},
  pages        = {472 -- 483},
  publisher    = {Springer},
  title        = {{Untangling two systems of noncrossing curves}},
  doi          = {10.1007/978-3-319-03841-4_41},
  volume       = {8242},
  year         = {2013},
}

@article{2247,
  abstract     = {Cooperative behavior, where one individual incurs a cost to help another, is a wide spread phenomenon. Here we study direct reciprocity in the context of the alternating Prisoner's Dilemma. We consider all strategies that can be implemented by one and two-state automata. We calculate the payoff matrix of all pairwise encounters in the presence of noise. We explore deterministic selection dynamics with and without mutation. Using different error rates and payoff values, we observe convergence to a small number of distinct equilibria. Two of them are uncooperative strict Nash equilibria representing always-defect (ALLD) and Grim. The third equilibrium is mixed and represents a cooperative alliance of several strategies, dominated by a strategy which we call Forgiver. Forgiver cooperates whenever the opponent has cooperated; it defects once when the opponent has defected, but subsequently Forgiver attempts to re-establish cooperation even if the opponent has defected again. Forgiver is not an evolutionarily stable strategy, but the alliance, which it rules, is asymptotically stable. For a wide range of parameter values the most commonly observed outcome is convergence to the mixed equilibrium, dominated by Forgiver. Our results show that although forgiving might incur a short-term loss it can lead to a long-term gain. Forgiveness facilitates stable cooperation in the presence of exploitation and noise.},
  author       = {Zagorsky, Benjamin and Reiter, Johannes and Chatterjee, Krishnendu and Nowak, Martin},
  journal      = {PLoS One},
  number       = {12},
  publisher    = {Public Library of Science},
  title        = {{Forgiver triumphs in alternating prisoner's dilemma }},
  doi          = {10.1371/journal.pone.0080814},
  volume       = {8},
  year         = {2013},
}

@article{2256,
  abstract     = {Linked (Open) Data - bibliographic data on the Semantic Web. Report of the Working Group on Linked Data to the plenary assembly of the Austrian Library Network (translation of the title). Linked Data stands for a certain approach to publishing data on the Web. The underlying idea is to harmonise heterogeneous data sources of different origin in order to improve their accessibility and interoperability, effectively making them queryable as a big distributed database. This report summarises relevant developments in Europe as well as the Linked Data Working Group‘s strategic and technical considerations regarding the publishing of the Austrian Library Network’s (OBV’s) bibliographic datasets. It concludes with the mutual agreement that the implementation of Linked Data principles within the OBV can only be taken into consideration accompanied by a discussion about the provision of the datasets under a free license.},
  author       = {Danowski, Patrick and Goldfarb, Doron and Schaffner, Verena and Seidler, Wolfram},
  journal      = {VÖB Mitteilungen},
  number       = {3/4},
  pages        = {559 -- 587},
  publisher    = {Verein Österreichischer Bibliothekarinnen und Bibliothekare},
  title        = {{Linked (Open) Data - Bibliographische Daten im Semantic Web}},
  volume       = {66},
  year         = {2013},
}

@inproceedings{2258,
  abstract     = {In a digital signature scheme with message recovery, rather than transmitting the message m and its signature σ, a single enhanced signature τ is transmitted. The verifier is able to recover m from τ and at the same time verify its authenticity. The two most important parameters of such a scheme are its security and overhead |τ| − |m|. A simple argument shows that for any scheme with “n bits security” |τ| − |m| ≥ n, i.e., the overhead is lower bounded by the security parameter n. Currently, the best known constructions in the random oracle model are far from this lower bound requiring an overhead of n + logq h , where q h is the number of queries to the random oracle. In this paper we give a construction which basically matches the n bit lower bound. We propose a simple digital signature scheme with n + o(logq h ) bits overhead, where q h denotes the number of random oracle queries.

Our construction works in two steps. First, we propose a signature scheme with message recovery having optimal overhead in a new ideal model, the random invertible function model. Second, we show that a four-round Feistel network with random oracles as round functions is tightly “public-indifferentiable” from a random invertible function. At the core of our indifferentiability proof is an almost tight upper bound for the expected number of edges of the densest “small” subgraph of a random Cayley graph, which may be of independent interest.
},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Szegedy, Mario},
  location     = {Santa Barbara, CA, United States},
  pages        = {571 -- 588},
  publisher    = {Springer},
  title        = {{Digital signatures with minimal overhead from indifferentiable random invertible functions}},
  doi          = {10.1007/978-3-642-40041-4_31},
  volume       = {8042},
  year         = {2013},
}

@inproceedings{2259,
  abstract     = {The learning with rounding (LWR) problem, introduced by Banerjee, Peikert and Rosen at EUROCRYPT ’12, is a variant of learning with errors (LWE), where one replaces random errors with deterministic rounding. The LWR problem was shown to be as hard as LWE for a setting of parameters where the modulus and modulus-to-error ratio are super-polynomial. In this work we resolve the main open problem and give a new reduction that works for a larger range of parameters, allowing for a polynomial modulus and modulus-to-error ratio. In particular, a smaller modulus gives us greater efficiency, and a smaller modulus-to-error ratio gives us greater security, which now follows from the worst-case hardness of GapSVP with polynomial (rather than super-polynomial) approximation factors.

As a tool in the reduction, we show that there is a “lossy mode” for the LWR problem, in which LWR samples only reveal partial information about the secret. This property gives us several interesting new applications, including a proof that LWR remains secure with weakly random secrets of sufficient min-entropy, and very simple constructions of deterministic encryption, lossy trapdoor functions and reusable extractors.

Our approach is inspired by a technique of Goldwasser et al. from ICS ’10, which implicitly showed the existence of a “lossy mode” for LWE. By refining this technique, we also improve on the parameters of that work to only requiring a polynomial (instead of super-polynomial) modulus and modulus-to-error ratio.
},
  author       = {Alwen, Joel F and Krenn, Stephan and Pietrzak, Krzysztof Z and Wichs, Daniel},
  location     = {Santa Barbara, CA, United States},
  number       = {1},
  pages        = {57 -- 74},
  publisher    = {Springer},
  title        = {{Learning with rounding, revisited: New reduction properties and applications}},
  doi          = {10.1007/978-3-642-40041-4_4},
  volume       = {8042},
  year         = {2013},
}

@inproceedings{2260,
  abstract     = {Direct Anonymous Attestation (DAA) is one of the most complex cryptographic protocols deployed in practice. It allows an embedded secure processor known as a Trusted Platform Module (TPM) to attest to the configuration of its host computer without violating the owner’s privacy. DAA has been standardized by the Trusted Computing Group and ISO/IEC.

The security of the DAA standard and all existing schemes is analyzed in the random-oracle model. We provide the first constructions of DAA in the standard model, that is, without relying on random oracles. Our constructions use new building blocks, including the first efficient signatures of knowledge in the standard model, which have many applications beyond DAA.
},
  author       = {Bernhard, David and Fuchsbauer, Georg and Ghadafi, Essam},
  location     = {Banff, AB, Canada},
  pages        = {518 -- 533},
  publisher    = {Springer},
  title        = {{Efficient signatures of knowledge and DAA in the standard model}},
  doi          = {10.1007/978-3-642-38980-1_33},
  volume       = {7954},
  year         = {2013},
}

@article{2264,
  abstract     = {Faithful progression through the cell cycle is crucial to the maintenance and developmental potential of stem cells. Here, we demonstrate that neural stem cells (NSCs) and intermediate neural progenitor cells (NPCs) employ a zinc-finger transcription factor specificity protein 2 (Sp2) as a cell cycle regulator in two temporally and spatially distinct progenitor domains. Differential conditional deletion of Sp2 in early embryonic cerebral cortical progenitors, and perinatal olfactory bulb progenitors disrupted transitions through G1, G2 and M phases, whereas DNA synthesis appeared intact. Cell-autonomous function of Sp2 was identified by deletion of Sp2 using mosaic analysis with double markers, which clearly established that conditional Sp2-null NSCs and NPCs are M phase arrested in vivo. Importantly, conditional deletion of Sp2 led to a decline in the generation of NPCs and neurons in the developing and postnatal brains. Our findings implicate Sp2-dependent mechanisms as novel regulators of cell cycle progression, the absence of which disrupts neurogenesis in the embryonic and postnatal brain.},
  author       = {Liang, Huixuan and Xiao, Guanxi and Yin, Haifeng and Hippenmeyer, Simon and Horowitz, Jonathan and Ghashghaei, Troy},
  journal      = {Development},
  number       = {3},
  pages        = {552 -- 561},
  publisher    = {Company of Biologists},
  title        = {{Neural development is dependent on the function of specificity protein 2 in cell cycle progression}},
  doi          = {10.1242/dev.085621},
  volume       = {140},
  year         = {2013},
}

@inproceedings{2270,
  abstract     = {Representation languages for coalitional games are a key research area in algorithmic game theory.   There is an inher-
ent tradeoff between how general a language is, allowing it to  capture  more  elaborate  games,  and  how  hard  it  is  computationally to optimize and solve such games.  One prominent  such  language  is  the  simple  yet  expressive
Weighted Graph Games  (WGGs) representation (Deng  and Papadimitriou 1994), which maintains knowledge about synergies between agents in the form of an edge weighted graph. We  consider  the  problem  of  finding  the  optimal  coalition structure in WGGs. The agents in such games are vertices in a graph, and the value of a coalition is the sum of the weights of the edges present between coalition members. The optimal coalition structure is a partition of the agents to coalitions, that maximizes the sum of utilities obtained by the coalitions. We  show  that  finding  the  optimal  coalition  structure  is  not only hard for general graphs,  but is also intractable for restricted families such as planar graphs which are amenable for many other combinatorial problems.  We then provide algorithms with constant factor approximations for planar, minorfree and bounded degree graphs.},
  author       = {Bachrach, Yoram and Kohli, Pushmeet and Kolmogorov, Vladimir and Zadimoghaddam, Morteza},
  location     = {Bellevue, WA, United States},
  pages        = {81--87},
  publisher    = {AAAI Press},
  title        = {{Optimal Coalition Structures in Cooperative Graph Games}},
  year         = {2013},
}

@inproceedings{2272,
  abstract     = {We consider Conditional Random Fields (CRFs) with pattern-based potentials defined on a chain. In this model the energy of a string (labeling) x1...xn is the sum of terms over intervals [i,j] where each term is non-zero only if the substring xi...xj equals a prespecified pattern α. Such CRFs can be naturally applied to many sequence tagging problems.
We present efficient algorithms for the three standard inference tasks in a CRF, namely computing (i) the partition function, (ii) marginals, and (iii) computing the MAP. Their complexities are respectively O(nL), O(nLℓmax) and O(nLmin{|D|,log(ℓmax+1)}) where L is the combined length of input patterns, ℓmax is the maximum length of a pattern, and D is the input alphabet. This improves on the previous algorithms of (Ye et al., 2009) whose complexities are respectively O(nL|D|), O(n|Γ|L2ℓ2max) and O(nL|D|), where |Γ| is the number of input patterns.
In addition, we give an efficient algorithm for sampling. Finally, we consider the case of non-positive weights. (Komodakis &amp; Paragios, 2009) gave an O(nL) algorithm for computing the MAP. We present a modification that has the same worst-case complexity but can beat it in the best case. },
  author       = {Takhanov, Rustem and Kolmogorov, Vladimir},
  booktitle    = {ICML'13 Proceedings of the 30th International Conference on International},
  location     = {Atlanta, GA, USA},
  number       = {3},
  pages        = {145 -- 153},
  publisher    = {ML Research Press},
  title        = {{Inference algorithms for pattern-based CRFs on sequence data}},
  volume       = {28},
  year         = {2013},
}

@techreport{2273,
  abstract     = {We propose a new family of message passing techniques for MAP estimation in graphical models which we call Sequential Reweighted Message Passing (SRMP). Special cases include well-known techniques such as Min-Sum Diusion (MSD) and a faster Sequential Tree-Reweighted Message Passing (TRW-S). Importantly, our derivation is simpler than the original derivation of TRW-S, and does not involve a  decomposition into trees. This allows easy generalizations. We present such a generalization for the case of higher-order graphical models, and test it on several real-world problems with promising results.},
  author       = {Vladimir Kolmogorov},
  publisher    = {IST Austria},
  title        = {{Reweighted message passing revisited}},
  year         = {2013},
}

@techreport{2274,
  abstract     = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto'92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system.

In this work, we put forward an alternative concept for PoWs -- so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model, using graphs with high &quot;pebbling complexity&quot; and Merkle hash-trees. },
  author       = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z},
  publisher    = {IST Austria},
  title        = {{Proofs of Space}},
  year         = {2013},
}

@inproceedings{2276,
  abstract     = {The problem of minimizing the Potts energy function frequently occurs in computer vision applications. One way to tackle this NP-hard problem was proposed by Kovtun [19, 20]. It identifies a part of an optimal solution by running k maxflow computations, where k is the number of labels. The number of “labeled” pixels can be significant in some applications, e.g. 50-93% in our tests for stereo. We show how to reduce the runtime to O (log k) maxflow computations (or one parametric maxflow computation). Furthermore, the output of our algorithm allows to speed-up the subsequent alpha expansion for the unlabeled part, or can be used as it is for time-critical applications. To derive our technique, we generalize the algorithm of Felzenszwalb et al. [7] for Tree Metrics . We also show a connection to k-submodular functions from combinatorial optimization, and discuss k-submodular relaxations for general energy functions.},
  author       = {Gridchyn, Igor and Kolmogorov, Vladimir},
  location     = {Sydney, Australia},
  pages        = {2320 -- 2327},
  publisher    = {IEEE},
  title        = {{Potts model, parametric maxflow and k-submodular functions}},
  doi          = {10.1109/ICCV.2013.288},
  year         = {2013},
}

