@article{1051,
  abstract     = {We demonstrate the temporal Talbot effect for trapped matter waves using ultracold atoms in an optical lattice. We investigate the phase evolution of an array of essentially non-interacting matter waves and observe matter-wave collapse and revival in the form of a Talbot interference pattern. By using long expansion times, we image momentum space with sub-recoil resolution, allowing us to observe fractional Talbot fringes up to tenth order.},
  author       = {Mark, Manfred and Haller, Elmar and Danzl, Johann G and Lauber, Katharina and Gustavsson, Mattias and Nägerl, Hanns},
  journal      = {New Journal of Physics},
  publisher    = {IOP Publishing Ltd.},
  title        = {{Demonstration of the temporal matter-wave Talbot effect for trapped matter waves}},
  doi          = {10.1088/1367-2630/13/8/085008},
  volume       = {13},
  year         = {2011},
}

@article{1052,
  abstract     = {The present paper aims at finding optimal parameters for trapping of Cs 2 molecules in optical lattices, with the perspective of creating a quantum degenerate gas of ground-state molecules. We have calculated dynamic polarizabilities of Cs 2 molecules subject to an oscillating electric field, using accurate potential curves and electronic transition dipole moments. We show that for some particular wavelengths of the optical lattice, called &quot;magic wavelengths&quot;, the polarizability of the ground-state molecules is equal to the one of a Feshbach molecule. As the creation of the sample of ground-state molecules relies on an adiabatic population transfer from weakly-bound molecules created on a Feshbach resonance, such a coincidence ensures that both the initial and final states are favorably trapped by the lattice light, allowing optimized transfer in agreement with the experimental observation.},
  author       = {Vexiau, Romain and Bouloufa, Nadia and Aymar, Mireille and Danzl, Johann G and Mark, Manfred and Nägerl, Hanns and Dulieu, Olivier},
  journal      = {European Physical Journal D},
  number       = {1-2},
  pages        = {243 -- 250},
  publisher    = {Springer},
  title        = {{Optimal trapping wavelengths of Cs 2 molecules in an optical lattice}},
  doi          = {10.1140/epjd/e2011-20085-4},
  volume       = {65},
  year         = {2011},
}

@article{1053,
  abstract     = {We perform precision measurements on a Mott-insulator quantum state of ultracold atoms with tunable interactions. We probe the dependence of the superfluid-to-Mott-insulator transition on the interaction strength and explore the limits of the standard Bose-Hubbard model description. By tuning the on-site interaction energies to values comparable to the interband separation, we are able to quantitatively measure number-dependent shifts in the excitation spectrum caused by effective multibody interactions.},
  author       = {Mark, Manfred and Haller, Elmar and Lauber, Katharina and Danzl, Johann G and Daley, Andrew and Nägerl, Hanns},
  journal      = {Physical Review Letters},
  number       = {17},
  publisher    = {American Physical Society},
  title        = {{Precision measurements on a tunable Mott insulator of ultracold atoms}},
  doi          = {10.1103/PhysRevLett.107.175301},
  volume       = {107},
  year         = {2011},
}

@article{1054,
  abstract     = {We investigate local three-body correlations for bosonic particles in three dimensions and one dimension as a function of the interaction strength. The three-body correlation function g(3) is determined by measuring the three-body recombination rate in an ultracold gas of Cs atoms. In three dimensions, we measure the dependence of g(3) on the gas parameter in a BEC, finding good agreement with the theoretical prediction accounting for beyond-mean-field effects. In one dimension, we observe a reduction of g( 3) by several orders of magnitude upon increasing interactions from the weakly interacting BEC to the strongly interacting Tonks-Girardeau regime, in good agreement with predictions from the Lieb-Liniger model for all strengths of interaction.},
  author       = {Haller, Elmar and Rabie, Mahmoud and Mark, Manfred and Danzl, Johann G and Hart, Russell and Lauber, Katharina and Pupillo, Guido and Nägerl, Hanns},
  journal      = {Physical Review Letters},
  number       = {23},
  publisher    = {American Physical Society},
  title        = {{Three-body correlation functions and recombination rates for bosons in three dimensions and one dimension}},
  doi          = {10.1103/PhysRevLett.107.230404},
  volume       = {107},
  year         = {2011},
}

@inproceedings{11864,
  abstract     = {Auctions are widely used on the Web. Applications range from internet advertising to platforms such as eBay. In most of these applications the auctions in use are single/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of [1] is taking a first step towards addressing the problem of limited expressiveness by computing a bidder optimal, envy free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piece-wise linear utility functions with non-identical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain non-degeneracy requirement, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are non-degenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy free outcomes for a general class of continuous utility functions via piece-wise linear approximation. Finally, we prove hardness results for even more expressive settings.},
  author       = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar},
  booktitle    = {Proceedings of the 20th international conference on World wide web},
  isbn         = {978-145030632-4},
  location     = {Hyderabad, India},
  pages        = {127 -- 136},
  publisher    = {Association for Computing Machinery},
  title        = {{An expressive mechanism for auctions on the web}},
  doi          = {10.1145/1963405.1963427},
  year         = {2011},
}

@article{12649,
  abstract     = {Physically based hydrological models describe natural processes more accurately than conceptual models but require extensive data sets to produce accurate results. To identify the value of different data sets for improving the performance of the distributed hydrological model TOPKAPI we combine a multivariable validation technique with Monte Carlo simulations. The study is carried out in the snow and ice-dominated Rhonegletscher basin, as these types of mountainous basins are generally the most critical with respect to data availability and sensitivity to climate fluctuations. Each observational data set is used individually and in combination with the other data sets to determine a subset of best parameter combinations out of 10,000 Monte Carlos runs performed with randomly generated parameter sets. We validate model results against discharge, glacier mass balance, and satellite snow cover images for a 14 year time period (1994–2007). While the use of all data sets combined provides the best overall model performance (defined by the concurrent best agreement of simulated discharge, snow cover and mass balance with their respective measurements), the use of one or two variables for constraining the model results in poorer performance. Using only one data set for constraining the model glacier mass balance proved to be the most efficient observation leading to the best overall model performance. Our main result is that a combination of discharge and satellite snow cover images is best for improving model performance, since the volumetric information of discharge data and the spatial information of snow cover images are complementary.},
  author       = {Finger, David and Pellicciotti, Francesca and Konz, Markus and Rimkus, Stefan and Burlando, Paolo},
  issn         = {0043-1397},
  journal      = {Water Resources Research},
  number       = {7},
  publisher    = {American Geophysical Union},
  title        = {{The value of glacier mass balance, satellite snow cover images, and hourly discharge for improving the performance of a physically based distributed hydrological model}},
  doi          = {10.1029/2010wr009824},
  volume       = {47},
  year         = {2011},
}

@inbook{12650,
  abstract     = {Streamflow is a hydrological variable measured at a defined river cross-section; it spatially integrates the runoff generating processes in the contributing watershed, including precipitation and air temperature. Trends in streamflow are progressive changes in the time series of streamflow that can be detected with statistical methods and their statistical significance can be assessed. Mountainous regions are particularly vulnerable to streamflow change because of their high specific runoff and the sensitivity to the distribution of precipitation and air temperature, and the processes of snow accumulation and melt.},
  author       = {Molnar, Peter and Burlando, Paolo and Pellicciotti, Francesca},
  booktitle    = {Encyclopedia of Snow, Ice and Glaciers},
  editor       = {Singh, Vijay and Singh, Pratap and Haritashya, Umesh},
  isbn         = {978-90-481-2641-5},
  issn         = {1871-756X},
  pages        = {1084--1089},
  publisher    = {Springer Nature},
  title        = {{Streamflow Trends in Mountainous Regions}},
  doi          = {10.1007/978-90-481-2642-2_543},
  year         = {2011},
}

@article{12651,
  abstract     = {Temperature data from three Automatic Weather Stations and twelve Temperature Loggers are used to investigate the spatiotemporal variability of temperature over a glacier, its main atmospheric controls, the suitability of extrapolation techniques and their effect on melt modeling. We use data collected on Juncal Norte Glacier, central Chile, during one ablation season. We examine temporal and spatial variability in lapse rates (LRs), together with alternative statistical interpolation methods. The main control over the glacier thermal regime is the development of a katabatic boundary layer (KBL). Katabatic wind occurs at night and in the morning and is eroded in the afternoon. LRs reveal strong diurnal variability, with steeper LRs during the day when the katabatic wind weakens and shallower LRs during the night and morning. We suggest that temporally variable LRs should be used to account for the observed change. They tend to be steeper than equivalent constant LRs, and therefore result in a reduction in simulated melt compared to use of constant LRs when extrapolating from lower to higher elevations. In addition to the temporal variability, the temperature-elevation relationship varies also in space. Differences are evident between local LRs and including such variability in melt modeling affects melt simulations. Extrapolation methods based on the spatial variability of the observations after removal of the elevation trend, such as Inverse Distance Weighting or Kriging, do not seem necessary for simulations of gridded temperature data over a glacier.},
  author       = {Petersen, L. and Pellicciotti, Francesca},
  issn         = {0148-0227},
  journal      = {Journal of Geophysical Research: Atmospheres},
  keywords     = {Paleontology, Space and Planetary Science, Earth and Planetary Sciences (miscellaneous), Atmospheric Science, Earth-Surface Processes, Geochemistry and Petrology, Soil Science, Water Science and Technology, Ecology, Aquatic Science, Forestry, Oceanography, Geophysics},
  number       = {D23},
  publisher    = {American Geophysical Union},
  title        = {{Spatial and temporal variability of air temperature on a melting glacier: Atmospheric controls, extrapolation methods and their effect on melt modeling, Juncal Norte Glacier, Chile}},
  doi          = {10.1029/2011jd015842},
  volume       = {116},
  year         = {2011},
}

@article{12652,
  abstract     = {We explore the robustness and transferability of parameterizations of cloud radiative forcing used in glacier melt models at two sites in the Swiss Alps. We also look at the rationale behind some of the most commonly used approaches, and explore the relationship between cloud transmittance and several standard meteorological variables. The 2 m air-temperature diurnal range is the best predictor of variations in cloud transmittance. However, linear and exponential parameterizations can only explain 30–50% of the observed variance in computed cloud transmittance factors. We examine the impact of modelled cloud transmittance factors on both solar radiation and ablation rates computed with an enhanced temperature-index model. The melt model performance decreases when modelled radiation is used, the reduction being due to an underestimation of incoming solar radiation on clear-sky days. The model works well under overcast conditions. We also seek alternatives to the use of in situ ground data. However, outputs from an atmospheric model (2.2 km horizontal resolution) do not seem to provide an alternative to the parameterizations of cloud radiative forcing based on observations of air temperature at glacier automatic weather stations. Conversely, the correct definition of overcast conditions is important.},
  author       = {Pellicciotti, Francesca and Raschle, Thomas and Huerlimann, Thomas and Carenzo, Marco and Burlando, Paolo},
  issn         = {1727-5652},
  journal      = {Journal of Glaciology},
  number       = {202},
  pages        = {367--381},
  publisher    = {Cambridge University Press},
  title        = {{Transmission of solar radiation through clouds on melting glaciers: A comparison of parameterizations and their impact on melt modelling}},
  doi          = {10.3189/002214311796406013},
  volume       = {57},
  year         = {2011},
}

@article{1299,
  abstract     = {Recent experiments have shown that motion detection in Drosophila starts with splitting the visual input into two parallel channels encoding brightness increments (ON) or decrements (OFF). This suggests the existence of either two (ON-ON, OFF-OFF) or four (for all pairwise interactions) separate motion detectors. To decide between these possibilities, we stimulated flies using sequences of ON and OFF brightness pulses while recording from motion-sensitive tangential cells. We found direction-selective responses to sequences of same sign (ON-ON, OFF-OFF), but not of opposite sign (ON-OFF, OFF-ON), refuting the existence of four separate detectors. Based on further measurements, we propose a model that reproduces a variety of additional experimental data sets, including ones that were previously interpreted as support for four separate detectors. Our experiments and the derived model mark an important step in guiding further dissection of the fly motion detection circuit.},
  author       = {Eichner, Hubert and Maximilian Jösch and Schnell, Bettina and Reiff, Dierk F and Borst, Alexander},
  journal      = {Neuron},
  number       = {6},
  pages        = {1155 -- 1164},
  publisher    = {Elsevier},
  title        = {{Internal structure of the fly elementary motion detector}},
  doi          = {10.1016/j.neuron.2011.03.028},
  volume       = {70},
  year         = {2011},
}

@article{969,
  abstract     = {We investigate the isotope effect on the London penetration depth of a superconductor which measures n S/m*, the ratio of superfluid density to effective mass. We use a simplified model of electrons weakly coupled to a single phonon frequency ω E, but assume that the energy gap Δ does not have any isotope effect. Nevertheless, we find an isotope effect for n S/m* which is significant if Δ is sufficiently large that it becomes comparable to ω E, a regime of interest to high-T c cuprate superconductors and possibly other families of unconventional superconductors with relatively high T c. Our model is too simple to describe the cuprates and it gives the wrong sign of the isotope effect when compared with experiment, but it is a proof of principle that the isotope effect exists for n S/m* in materials where the pairing gap and T c are not of phonon origin and have no isotope effect.},
  author       = {Maksym Serbyn and Lee, Patrick},
  journal      = {Physical Review B - Condensed Matter and Materials Physics},
  number       = {2},
  publisher    = {American Physical Society},
  title        = {{Isotope effect on the superfluid density in conventional and high-temperature superconductors}},
  doi          = {10.1103/PhysRevB.83.024506},
  volume       = {83},
  year         = {2011},
}

@misc{9762,
  abstract     = {Defining population structure and genetic diversity levels is of the utmost importance for developing efficient conservation strategies. Overfishing has caused mean annual catches of the European spiny lobster (Palinurus elephas) to decrease alarmingly along its distribution area. In this context, there is a need for comprehensive studies to evaluate the genetic health of the exploited populations. The present work is based on a set of 10 nuclear markers amplified in 331 individuals from 10 different localities covering most of P. elephas distribution area. Samples from Atlantic and Mediterranean basins showed small but significant differences, indicating that P. elephas populations do not behave as a single panmictic unit but form two partially-overlapping groups. Despite intense overfishing, our dataset did not recover a recent bottleneck signal, and showed a large and stable historical effective size instead. This result could be accounted for by specific life history traits (reproduction and longevity) and the limitations of molecular markers in covering very recent timescales for non temporal samples. Our study emphasizes the necessity of integrating information on effective population sizes and life history parameters when evaluating population connectivity levels from genetic data.},
  author       = {Palero, Ferran and Abello, Pere and Macpherson, Enrique and Beaumont, Mark and Pascual, Marta},
  publisher    = {IST Austria},
  title        = {{Data from: Effect of oceanographic barriers and overfishing on the population genetic structure of the European spiny lobster (Palinurus elephas)}},
  doi          = {10.5061/dryad.299h8},
  year         = {2011},
}

@inproceedings{9943,
  abstract     = {Segmentation is the process of partitioning digital images into meaningful regions. The analysis of biological high content images often requires segmentation as a first step. We propose ilastik as an easy-to-use tool which allows the user without expertise in image processing to perform segmentation and classification in a unified way. ilastik learns from labels provided by the user through a convenient mouse interface. Based on these labels, ilastik infers a problem specific segmentation. A random forest classifier is used in the learning step, in which each pixel's neighborhood is characterized by a set of generic (nonlinear) features. ilastik supports up to three spatial plus one spectral dimension and makes use of all dimensions in the feature calculation. ilastik provides realtime feedback that enables the user to interactively refine the segmentation result and hence further fine-tune the classifier. An uncertainty measure guides the user to ambiguous regions in the images. Real time performance is achieved by multi-threading which fully exploits the capabilities of modern multi-core machines. Once a classifier has been trained on a set of representative images, it can be exported and used to automatically process a very large number of images (e.g. using the CellProfiler pipeline). ilastik is an open source project and released under the BSD license at www.ilastik.org.},
  author       = {Sommer, Christoph M and Straehle, Christoph and Köthe, Ullrich and Hamprecht, Fred A.},
  booktitle    = {2011 IEEE International Symposium on Biomedical Imaging: from Nano to Micro},
  isbn         = {978-1-4244-4127-3},
  issn         = {1945-8452},
  keywords     = {image segmentation, biomedical imaging, three dimensional displays, neurons, retina, observers, image color analysis},
  location     = {Chicago, Illinois, USA},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Ilastik: Interactive learning and segmentation toolkit}},
  doi          = {10.1109/isbi.2011.5872394},
  year         = {2011},
}

@inproceedings{3345,
  abstract     = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
  author       = {Chatterjee, Krishnendu and Doyen, Laurent},
  location     = {Warsaw, Poland},
  pages        = {206 -- 218},
  publisher    = {Springer},
  title        = {{Energy and mean-payoff parity Markov Decision Processes}},
  doi          = {10.1007/978-3-642-22993-0_21},
  volume       = {6907},
  year         = {2011},
}

@inproceedings{3346,
  abstract     = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with k reward functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the single-objective case, both randomization and memory are necessary for strategies, and that finite-memory randomized strategies are sufficient. Under the satisfaction objective, in contrast to the single-objective case, infinite memory is necessary for strategies, and that randomized memoryless strategies are sufficient for epsilon-approximation, for all epsilon&gt;;0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be epsilon-approximated in time polynomial in the size of the MDP and 1/epsilon, and exponential in the number of reward functions, for all epsilon&gt;;0. Our results also reveal flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, correct the flaws and obtain improved results.},
  author       = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
  location     = {Toronto, Canada},
  publisher    = {IEEE},
  title        = {{Two views on multiple mean payoff objectives in Markov Decision Processes}},
  doi          = {10.1109/LICS.2011.10},
  year         = {2011},
}

@inproceedings{3347,
  abstract     = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens &quot;eventually&quot;. Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
  author       = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
  location     = {Tarragona, Spain},
  pages        = {216 -- 226},
  publisher    = {Springer},
  title        = {{Finitary languages}},
  doi          = {10.1007/978-3-642-21254-3_16},
  volume       = {6638},
  year         = {2011},
}

@inproceedings{3348,
  abstract     = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
  author       = {Chatterjee, Krishnendu and Prabhu, Vinayak},
  location     = {Chicago, USA},
  pages        = {221 -- 230},
  publisher    = {Springer},
  title        = {{Synthesis of memory efficient real time controllers for safety objectives}},
  doi          = {10.1145/1967701.1967734},
  year         = {2011},
}

@inproceedings{3349,
  abstract     = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
  author       = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
  location     = {Minori, Italy},
  pages        = {74 -- 86},
  publisher    = {EPTCS},
  title        = {{A reduction from parity games to simple stochastic games}},
  doi          = {10.4204/EPTCS.54.6},
  volume       = {54},
  year         = {2011},
}

@article{335,
  abstract     = {Recently reported synthetic routes for the production of hollow nanoparticles have stimulated significant interest for the possibilities this novel geometry offers. While advantageous properties have been found and innovative applications have been proposed, the development of the full potential of these new nanostructures is still strongly tied to the extent of control that can be accomplished over their characteristics (e.g., composition, size, shell thickness, and nanocrystalline structure). In the present work, we investigate the means and limits of control over these parameters that can be obtained by the Kirkendall effect synthetic route on cadmium chalcogenide nanocrystalline shells. We demonstrate that the selection of the reactants and oxidation conditions allows some extent of control of the nanocrystalline structure and thickness of the shell. However, the tuning range is limited by the intrinsic restrictions of the synthetic procedure and by the dependence of the particle geometry on the same reaction conditions. Thus, we further explore the range of control over the shell parameters that can be accomplished through post-synthesis processes, such as chemical etching and thermal annealing. },
  author       = {Ibáñez, Maria and Fan, Jiandong and Li, Wenhua and Cadavid, Doris and Nafria, Raquel and Carrete, Alex and Cabot, Andreu},
  journal      = {Chemistry of Materials},
  number       = {12},
  pages        = {3095 -- 3104},
  publisher    = {American Chemical Society},
  title        = {{Means and limits of control of the shell parameters in hollow nanoparticles obtained by the Kirkendall effect}},
  doi          = {10.1021/cm2006633},
  volume       = {23},
  year         = {2011},
}

@inproceedings{3350,
  abstract     = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
  author       = {Chatterjee, Krishnendu and Majumdar, Ritankar},
  editor       = {Fahrenberg, Uli and Tripakis, Stavros},
  location     = {Aalborg, Denmark},
  pages        = {145 -- 159},
  publisher    = {Springer},
  title        = {{Minimum attention controller synthesis for omega regular objectives}},
  doi          = {10.1007/978-3-642-24310-3_11},
  volume       = {6919},
  year         = {2011},
}

