@article{3274,
  abstract     = {A boundary element model of a tunnel running through horizontally layered soil with anisotropic material properties is presented. Since there is no analytical fundamental solution for wave propagation inside a layered orthotropic medium in 3D, the fundamental displacements and stresses have to be calculated numerically. In our model this is done in the Fourier domain with respect to space and time. The assumption of a straight tunnel with infinite extension in the x direction makes it possible to decouple the system for every wave number kx, leading to a 2.5D-problem, which is suited for parallel computation. The special form of the fundamental solution, resulting from our Fourier ansatz, and the fact, that the calculation of the boundary integral equation is performed in the Fourier domain, enhances the stability and efficiency of the numerical calculations.},
  author       = {Rieckh, Georg and Kreuzer, Wolfgang and Waubke, Holger and Balazs, Peter},
  journal      = { Engineering Analysis with Boundary Elements},
  number       = {6},
  pages        = {960 -- 967},
  publisher    = {Elsevier},
  title        = {{A 2.5D-Fourier-BEM model for vibrations in a tunnel running through layered anisotropic soil}},
  doi          = {10.1016/j.enganabound.2011.12.014},
  volume       = {36},
  year         = {2012},
}

@inbook{3277,
  abstract     = {The problem of the origin of metazoa is becoming more urgent in the context of astrobiology. By now it is clear that clues to the understanding of this crucial transition in the evolution of life can arise in a fourth pathway besides the three possibilities in the quest for simplicity outlined by Bonner in his classical book. In other words, solar system exploration seems to be one way in the long-term to elucidate the simplicity of evolutionary development. We place these ideas in the context of different inheritance systems, namely the genotypic and phenotypic replicators with limited or unlimited heredity, and ask which of these can support multicellular development, and to which degree of complexity. However, the quest for evidence on the evolution of biotas from planets around other stars does not seem to be feasible with present technology with direct visualization of living organisms on exoplanets. But this may be attempted on the Galilean moons of Jupiter where there is a possibility of detecting reliable biomarkers in the next decade with the Europa Jupiter System Mission, in view of recent progress by landing micropenetrators on planetary, or satellite surfaces. Mars is a second possibility in the inner Solar System, in spite of the multiple difficulties faced by the fleet of past, present and future missions. We discuss a series of preliminary ideas for elucidating the origin of metazoan analogues with available instrumentation in potential payloads of feasible space missions to the Galilean moons.},
  author       = {de Vladar, Harold and Chela Flores, Julian},
  booktitle    = {Life on Earth and other planetary bodies},
  pages        = {387 -- 405},
  publisher    = {Springer},
  title        = {{Can the evolution of multicellularity be anticipated in the exploration of the solar system?}},
  doi          = {10.1007/978-94-007-4966-5_22},
  volume       = {24},
  year         = {2012},
}

@inproceedings{3279,
  abstract     = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions. 
},
  author       = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris},
  location     = {Taormina, Sicily, Italy},
  pages        = {369 -- 382},
  publisher    = {Springer},
  title        = {{Hardness preserving constructions of pseudorandom functions}},
  doi          = {10.1007/978-3-642-28914-9_21},
  volume       = {7194},
  year         = {2012},
}

@inproceedings{3280,
  abstract     = {The (decisional) learning with errors problem (LWE) asks to distinguish &quot;noisy&quot; inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called &quot;subspace LWE&quot; (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. },
  author       = {Pietrzak, Krzysztof Z},
  location     = {Taormina, Sicily, Italy},
  pages        = {548 -- 563},
  publisher    = {Springer},
  title        = {{Subspace LWE}},
  doi          = {10.1007/978-3-642-28914-9_31},
  volume       = {7194},
  year         = {2012},
}

@inproceedings{3281,
  abstract     = {We consider the problem of amplifying the &quot;lossiness&quot; of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic &quot;lossy functions,&quot; where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
  author       = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
  location     = {Taormina, Sicily, Italy},
  pages        = {458 -- 475},
  publisher    = {Springer},
  title        = {{Lossy functions do not amplify well}},
  doi          = {10.1007/978-3-642-28914-9_26},
  volume       = {7194},
  year         = {2012},
}

@inproceedings{3282,
  abstract     = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is &quot;message-hiding&quot; into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.},
  author       = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel},
  location     = {Cambridge, UK},
  pages        = {355 -- 374},
  publisher    = {Springer},
  title        = {{Message authentication, revisited}},
  doi          = {10.1007/978-3-642-29011-4_22},
  volume       = {7237},
  year         = {2012},
}

@inproceedings{3284,
  abstract     = {We study the complexity of valued constraint satisfaction problems (VCSP). A problem from VCSP is characterised by a constraint language, a fixed set of cost functions over a finite domain. An instance of the problem is specified by a sum of cost functions from the language and the goal is to minimise the sum. Under the unique games conjecture, the approximability of finite-valued VCSPs is well-understood, see Raghavendra [FOCS’08]. However, there is no characterisation of finite-valued VCSPs, let alone general-valued VCSPs, that can be solved exactly in polynomial time, thus giving insights from a combinatorial optimisation perspective.
We consider the case of languages containing all possible unary cost functions. In the case of languages consisting of only {0, ∞}-valued cost functions (i.e. relations), such languages have been called conservative and studied by Bulatov [LICS’03] and recently by Barto [LICS’11]. Since we study valued languages, we call a language conservative if it contains all finite-valued unary cost functions. The computational complexity of conservative valued languages has been studied by Cohen et al. [AIJ’06] for languages over Boolean domains, by Deineko et al. [JACM’08] for {0,1}-valued languages (a.k.a Max-CSP), and by Takhanov [STACS’10] for {0,∞}-valued languages containing all finite- valued unary cost functions (a.k.a. Min-Cost-Hom).
We prove a Schaefer-like dichotomy theorem for conservative valued languages: if all cost functions in the language satisfy a certain condition (specified by a complementary combination of STP and MJN multimorphisms), then any instance can be solved in polynomial time (via a new algorithm developed in this paper), otherwise the language is NP-hard. This is the first complete complexity classification of general-valued constraint languages over non-Boolean domains. It is a common phenomenon that complexity classifications of problems over non-Boolean domains is significantly harder than the Boolean case. The polynomial-time algorithm we present for the tractable cases is a generalisation of the submodular minimisation problem and a result of Cohen et al. [TCS’08].
Our results generalise previous results by Takhanov [STACS’10] and (a subset of results) by Cohen et al. [AIJ’06] and Deineko et al. [JACM’08]. Moreover, our results do not rely on any computer-assisted search as in Deineko et al. [JACM’08], and provide a powerful tool for proving hardness of finite-valued and general-valued languages.},
  author       = {Vladimir Kolmogorov and Živný, Stanislav},
  pages        = {750 -- 759},
  publisher    = {SIAM},
  title        = {{The complexity of conservative valued CSPs}},
  year         = {2012},
}

@article{3289,
  abstract     = {Viral manipulation of transduction pathways associated with key cellular functions such as survival, response to microbial infection, and cytoskeleton reorganization can provide the supportive milieu for a productive infection. Here, we demonstrate that vaccinia virus (VACV) infection leads to activation of the stress-activated protein kinase (SAPK)/extracellular signal-regulated kinase (ERK) 4/7 (MKK4/7)-c-Jun N-terminal protein kinase 1/2 (JNK1/2) pathway; further, the stimulation of this pathway requires postpenetration, prereplicative events in the viral replication cycle. Although the formation of intracellular mature virus (IMV) was not affected in MKK4/7- or JNK1/2-knockout (KO) cells, we did note an accentuated deregulation of microtubule and actin network organization in infected JNK1/2-KO cells. This was followed by deregulated viral trafficking to the periphery and enhanced enveloped particle release. Furthermore, VACV infection induced alterations in the cell contractility and morphology, and cell migration was reduced in the JNK-KO cells. In addition, phosphorylation of proteins implicated with early cell contractility and cell migration, such as microtubule-associated protein 1B and paxillin, respectively, was not detected in the VACV-infected KO cells. In sum, our findings uncover a regulatory role played by the MKK4/7-JNK1/2 pathway in cytoskeleton reorganization during VACV infection.
},
  author       = {Pereira, Anna and Leite, Flávia and Brasil, Bruno and Soares Martins, Jamaria and Torres, Alice and Pimenta, Paulo and Souto Padrón, Thais and Tranktman, Paula and Ferreira, Paulo and Kroon, Erna and Bonjardim, Cláudio},
  journal      = {Journal of Virology},
  number       = {1},
  pages        = {172 -- 184},
  publisher    = {ASM},
  title        = {{A vaccinia virus-driven interplay between the MKK4/7-JNK1/2 pathway and cytoskeleton reorganization}},
  doi          = {10.1128/JVI.05638-11},
  volume       = {86},
  year         = {2012},
}

@article{330,
  abstract     = {A procedure for the continuous production of Cu 2ZnSnS 4 (CZTS) nanoparticles with controlled composition is presented. CZTS nanoparticles were prepared through the reaction of the metals' amino complexes with elemental sulfur in a continuous-flow reactor at moderate temperatures (300-330 °C). High-resolution transmission electron microscopy and X-ray diffraction analysis showed the nanocrystals to have a crystallographic structure compatible with that of the kesterite. Chemical characterization of the materials showed the presence of the four elements in each individual nanocrystal. Composition control was achieved by adjusting the solution flow rate through the reactor and the proper choice of the nominal precursor concentration within the flowing solution. Single-particle analysis revealed a composition distribution within each sample, which was optimized at the highest synthesis temperatures used. },
  author       = {Shavel, Alexey and Cadavid, Doris and Ibáñez, Maria and Carrete, Alex and Cabot, Andreu},
  journal      = {Journal of the American Chemical Society},
  number       = {3},
  pages        = {1438 -- 1441},
  publisher    = {ACS},
  title        = {{Continuous production of Cu inf 2 inf ZnSnS inf 4 inf nanocrystals in a flow reactor}},
  doi          = {10.1021/ja209688a},
  volume       = {134},
  year         = {2012},
}

@article{3310,
  abstract     = {The theory of persistent homology opens up the possibility to reason about topological features of a space or a function quantitatively and in combinatorial terms. We refer to this new angle at a classical subject within algebraic topology as a point calculus, which we present for the family of interlevel sets of a real-valued function. Our account of the subject is expository, devoid of proofs, and written for non-experts in algebraic topology.},
  author       = {Bendich, Paul and Cabello, Sergio and Edelsbrunner, Herbert},
  journal      = {Pattern Recognition Letters},
  number       = {11},
  pages        = {1436 -- 1444},
  publisher    = {Elsevier},
  title        = {{A point calculus for interlevel set homology}},
  doi          = {10.1016/j.patrec.2011.10.007},
  volume       = {33},
  year         = {2012},
}

@article{3314,
  abstract     = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.},
  author       = {Chatterjee, Krishnendu and Majumdar, Ritankar},
  journal      = {International Journal of Foundations of Computer Science},
  number       = {3},
  pages        = {609 -- 625},
  publisher    = {World Scientific Publishing},
  title        = {{Discounting and averaging in games across time scales}},
  doi          = {10.1142/S0129054112400308},
  volume       = {23},
  year         = {2012},
}

@article{3317,
  abstract     = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.},
  author       = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M},
  journal      = {Nature Reviews Neuroscience},
  number       = {1},
  pages        = {7 -- 21},
  publisher    = {Nature Publishing Group},
  title        = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}},
  doi          = {10.1038/nrn3125},
  volume       = {13},
  year         = {2012},
}

@article{3331,
  abstract     = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with  bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.},
  author       = {Kerber, Michael and Sagraloff, Michael},
  journal      = { Journal of Symbolic Computation},
  number       = {3},
  pages        = {239 -- 258},
  publisher    = {Elsevier},
  title        = {{A worst case bound for topology computation of algebraic curves}},
  doi          = {10.1016/j.jsc.2011.11.001},
  volume       = {47},
  year         = {2012},
}

@inproceedings{3341,
  abstract     = {We consider two-player stochastic games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with \omega-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity gameswith respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition function is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
  author       = {Chatterjee, Krishnendu},
  location     = {Tallinn, Estonia},
  pages        = {270 -- 285},
  publisher    = {Springer},
  title        = {{Robustness of structurally equivalent concurrent parity games}},
  doi          = {10.1007/978-3-642-28729-9_18},
  volume       = {7213},
  year         = {2012},
}

@inproceedings{10750,
  abstract     = {The goal of this work is to study the superconducting coherence length in the fluctuation regime in cuprate superconductors. In this work we present cantilever torque magnetometry measurements of micron-size BSCCO flakes patterned with arrays of nanometer scale rings or holes. Using ultrasensitive dynamic torque magnetometry, oscillations in magnetization are observed near Tc as a function of the applied magnetic flux threading the array. Special effort was made to detect the oscillations in magnetization at temperatures above Tc, where the Nernst effect and magnetization measurements suggest the possibility of pairing. To constrain the magnitude of the coherence length in the fluctuation regime, we will present the dependence of the amplitude of the h/2e period oscillations as a function of temperature and hole size.},
  author       = {Polshyn, Hryhoriy and Budakian, Raffi},
  booktitle    = {APS March Meeting 2012},
  issn         = {0003-0503},
  location     = {Boston, MA, United States},
  number       = {1},
  publisher    = {American Physical Society},
  title        = {{Cantilever torque magnetometry study of multiply connected BSCCO arrays near Tc}},
  volume       = {57},
  year         = {2012},
}

@inbook{10896,
  abstract     = {Under physiological conditions the brain, via the purine salvage pathway, reuses the preformed purine bases hypoxanthine, derived from ATP degradation, and adenine (Ade), derived from polyamine synthesis, to restore its ATP pool. However, the massive degradation of ATP during ischemia, although providing valuable neuroprotective adenosine, results in the accumulation and loss of diffusible purine metabolites and thereby leads to a protracted reduction in the post-ischemic ATP pool size. In vivo, this may both limit the ability to deploy ATP-dependent reparative mechanisms and reduce the subsequent availability of adenosine, whilst in brain slices results in tissue with substantially lower levels of ATP than in vivo. In the present review, we describe the mechanisms by which brain tissue replenishes its ATP, how this can be improved with the clinically tolerated chemicals D-ribose and adenine, and the functional, and potential therapeutic, implications of doing so.},
  author       = {zur Nedden, Stephanie and Doney, Alexander S. and Frenguelli, Bruno G.},
  booktitle    = {Adenosine},
  editor       = {Masino, Susan and Boison, Detlev},
  isbn         = {9781461439028},
  pages        = {109--129},
  publisher    = {Springer},
  title        = {{The double-edged sword: Gaining Adenosine at the expense of ATP. How to balance the books}},
  doi          = {10.1007/978-1-4614-3903-5_6},
  year         = {2012},
}

@inproceedings{10903,
  abstract     = {We propose a logic-based framework for automated reasoning about sequential programs manipulating singly-linked lists and arrays with unbounded data. We introduce the logic SLAD, which allows combining shape constraints, written in a fragment of Separation Logic, with data and size constraints. We address the problem of checking the entailment between SLAD formulas, which is crucial in performing pre-post condition reasoning. Although this problem is undecidable in general for SLAD, we propose a sound and powerful procedure that is able to solve this problem for a large class of formulas, beyond the capabilities of existing techniques and tools. We prove that this procedure is complete, i.e., it is actually a decision procedure for this problem, for an important fragment of SLAD including known decidable logics. We implemented this procedure and shown its preciseness and its efficiency on a significant benchmark of formulas.},
  author       = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela},
  booktitle    = {Automated Technology for Verification and Analysis},
  isbn         = {9783642333859},
  issn         = {1611-3349},
  location     = {Thiruvananthapuram, India},
  pages        = {167--182},
  publisher    = {Springer},
  title        = {{Accurate invariant checking for programs manipulating lists and arrays with infinite data}},
  doi          = {10.1007/978-3-642-33386-6_14},
  volume       = {7561},
  year         = {2012},
}

@inproceedings{10904,
  abstract     = {Multi-dimensional mean-payoff and energy games provide the mathematical foundation for the quantitative study of reactive systems, and play a central role in the emerging quantitative theory of verification and synthesis. In this work, we study the strategy synthesis problem for games with such multi-dimensional objectives along with a parity condition, a canonical way to express ω-regular conditions. While in general, the winning strategies in such games may require infinite memory, for synthesis the most relevant problem is the construction of a finite-memory winning strategy (if one exists). Our main contributions are as follows. First, we show a tight exponential bound (matching upper and lower bounds) on the memory required for finite-memory winning strategies in both multi-dimensional mean-payoff and energy games along with parity objectives. This significantly improves the triple exponential upper bound for multi energy games (without parity) that could be derived from results in literature for games on VASS (vector addition systems with states). Second, we present an optimal symbolic and incremental algorithm to compute a finite-memory winning strategy (if one exists) in such games. Finally, we give a complete characterization of when finite memory of strategies can be traded off for randomness. In particular, we show that for one-dimension mean-payoff parity games, randomized memoryless strategies are as powerful as their pure finite-memory counterparts.},
  author       = {Chatterjee, Krishnendu and Randour, Mickael and Raskin, Jean-François},
  booktitle    = {CONCUR 2012 - Concurrency Theory},
  editor       = {Koutny, Maciej and Ulidowski, Irek},
  isbn         = {9783642329395},
  issn         = {0302-9743},
  location     = {Newcastle upon Tyne, United Kingdom},
  pages        = {115--131},
  publisher    = {Springer},
  title        = {{Strategy synthesis for multi-dimensional quantitative objectives}},
  doi          = {10.1007/978-3-642-32940-1_10},
  volume       = {7454},
  year         = {2012},
}

@inproceedings{10905,
  abstract     = {Energy games belong to a class of turn-based two-player infinite-duration games played on a weighted directed graph. It is one of the rare and intriguing combinatorial problems that lie in NP ∩ co−NP, but are not known to be in P. While the existence of polynomial-time algorithms has been a major open problem for decades, there is no algorithm that solves any non-trivial subclass in polynomial time.
In this paper, we give several results based on the weight structures of the graph. First, we identify a notion of penalty and present a polynomial-time algorithm when the penalty is large. Our algorithm is the first polynomial-time algorithm on a large class of weighted graphs. It includes several counter examples that show that many previous algorithms, such as value iteration and random facet algorithms, require at least sub-exponential time. Our main technique is developing the first non-trivial approximation algorithm and showing how to convert it to an exact algorithm. Moreover, we show that in a practical case in verification where weights are clustered around a constant number of values, the energy game problem can be solved in polynomial time. We also show that the problem is still as hard as in general when the clique-width is bounded or the graph is strongly ergodic, suggesting that restricting graph structures need not help.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  booktitle    = {Algorithms – ESA 2012},
  isbn         = {9783642330896},
  issn         = {1611-3349},
  location     = {Ljubljana, Slovenia},
  pages        = {301--312},
  publisher    = {Springer},
  title        = {{Polynomial-time algorithms for energy games with special weight structures}},
  doi          = {10.1007/978-3-642-33090-2_27},
  volume       = {7501},
  year         = {2012},
}

@inproceedings{10906,
  abstract     = {HSF(C) is a tool that automates verification of safety and liveness properties for C programs. This paper describes the verification approach taken by HSF(C) and provides instructions on how to install and use the tool.},
  author       = {Grebenshchikov, Sergey and Gupta, Ashutosh and Lopes, Nuno P. and Popeea, Corneliu and Rybalchenko, Andrey},
  booktitle    = {Tools and Algorithms for the Construction and Analysis of Systems},
  editor       = {Flanagan, Cormac and König, Barbara},
  isbn         = {9783642287558},
  issn         = {1611-3349},
  location     = {Tallinn, Estonia},
  pages        = {549--551},
  publisher    = {Springer},
  title        = {{HSF(C): A software verifier based on Horn clauses}},
  doi          = {10.1007/978-3-642-28756-5_46},
  volume       = {7214},
  year         = {2012},
}

