@misc{12407,
  abstract     = {As the complexity and criticality of software increase every year, so does the importance of run-time monitoring. Third-party monitoring, with limited knowledge of the monitored software, and best-effort monitoring, which keeps pace with the monitored software, are especially valuable, yet underexplored areas of run-time monitoring. Most existing monitoring frameworks do not support their combination because they either require access to the monitored code for instrumentation purposes or the processing of all observed events, or both.

We present a middleware framework, VAMOS, for the run-time monitoring of software which is explicitly designed to support third-party and best-effort scenarios. The design goals of VAMOS are (i) efficiency (keeping pace at low overhead), (ii) flexibility (the ability to monitor black-box code through a variety of different event channels, and the connectability to monitors written in different specification languages), and (iii) ease-of-use. To achieve its goals, VAMOS combines aspects of event broker and event recognition systems with aspects of stream processing systems.

We implemented a prototype toolchain for VAMOS and conducted experiments including a case study of monitoring for data races. The results indicate that VAMOS enables writing useful yet efficient monitors, is compatible with a variety of event sources and monitor specifications, and simplifies key aspects of setting up a monitoring system from scratch.},
  author       = {Chalupa, Marek and Mühlböck, Fabian and Muroya Lei, Stefanie and Henzinger, Thomas A},
  issn         = {2664-1690},
  keywords     = {runtime monitoring, best effort, third party},
  pages        = {38},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{VAMOS: Middleware for Best-Effort Third-Party Monitoring}},
  doi          = {10.15479/AT:ISTA:12407},
  year         = {2023},
}

@inproceedings{10206,
  abstract     = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. The typical approach is to detect inputs from novel classes and retrain the classifier on an augmented dataset. However, not only the classifier but also the detection mechanism needs to adapt in order to distinguish between newly learned and yet unknown input classes. To address this challenge, we introduce an algorithmic framework for active monitoring of a neural network. A monitor wrapped in our framework operates in parallel with the neural network and interacts with a human user via a series of interpretable labeling queries for incremental adaptation. In addition, we propose an adaptive quantitative monitor to improve precision. An experimental evaluation on a diverse set of benchmarks with varying numbers of classes confirms the benefits of our active monitoring framework in dynamic scenarios.},
  author       = {Lukina, Anna and Schilling, Christian and Henzinger, Thomas A},
  booktitle    = {21st International Conference on Runtime Verification},
  isbn         = {9-783-0308-8493-2},
  issn         = {1611-3349},
  keywords     = {monitoring, neural networks, novelty detection},
  location     = {Virtual},
  pages        = {42--61},
  publisher    = {Springer Nature},
  title        = {{Into the unknown: active monitoring of neural networks}},
  doi          = {10.1007/978-3-030-88494-9_3},
  volume       = {12974 },
  year         = {2021},
}

