@inproceedings{12171,
  abstract     = {We propose an algorithmic approach for synthesizing linear hybrid automata from time-series data. Unlike existing approaches, our approach provides a whole family of models with the same discrete structure but different dynamics. Each model in the family is guaranteed to capture the input data up to a precision error ε, in the following sense: For each time series, the model contains an execution that is ε-close to the data points. Our construction allows to effectively choose a model from this family with minimal precision error ε. We demonstrate the algorithm’s efficiency and its ability to find precise models in two case studies.},
  author       = {Garcia Soto, Miriam and Henzinger, Thomas A and Schilling, Christian},
  booktitle    = {20th International Symposium on Automated Technology for Verification and Analysis},
  isbn         = {9783031199912},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {337--353},
  publisher    = {Springer Nature},
  title        = {{Synthesis of parametric hybrid automata from time series}},
  doi          = {10.1007/978-3-031-19992-9_22},
  volume       = {13505},
  year         = {2022},
}

@inproceedings{12175,
  abstract     = {An automaton is history-deterministic (HD) if one can safely resolve its non-deterministic choices on the fly. In a recent paper, Henzinger, Lehtinen and Totzke studied this in the context of Timed Automata [9], where it was conjectured that the class of timed ω-languages recognised by HD-timed automata strictly extends that of deterministic ones. We provide a proof for this fact.},
  author       = {Bose, Sougata and Henzinger, Thomas A and Lehtinen, Karoliina and Schewe, Sven and Totzke, Patrick},
  booktitle    = {16th International Conference on Reachability Problems},
  isbn         = {9783031191343},
  issn         = {1611-3349},
  location     = {Kaiserslautern, Germany},
  pages        = {67--76},
  publisher    = {Springer Nature},
  title        = {{History-deterministic timed automata are not determinizable}},
  doi          = {10.1007/978-3-031-19135-0_5},
  volume       = {13608},
  year         = {2022},
}

@inproceedings{12176,
  abstract     = {A proof of exponentiation (PoE) in a group G of unknown order allows a prover to convince a verifier that a tuple (x,q,T,y)∈G×N×N×G satisfies xqT=y. This primitive has recently found exciting applications in the constructions of verifiable delay functions and succinct arguments of knowledge. The most practical PoEs only achieve soundness either under computational assumptions, i.e., they are arguments (Wesolowski, Journal of Cryptology 2020), or in groups that come with the promise of not having any small subgroups (Pietrzak, ITCS 2019). The only statistically-sound PoE in general groups of unknown order is due to Block et al. (CRYPTO 2021), and can be seen as an elaborate parallel repetition of Pietrzak’s PoE: to achieve λ bits of security, say λ=80, the number of repetitions required (and thus the blow-up in communication) is as large as λ.

In this work, we propose a statistically-sound PoE for the case where the exponent q is the product of all primes up to some bound B. We show that, in this case, it suffices to run only λ/log(B) parallel instances of Pietrzak’s PoE, which reduces the concrete proof-size compared to Block et al. by an order of magnitude. Furthermore, we show that in the known applications where PoEs are used as a building block such structured exponents are viable. Finally, we also discuss batching of our PoE, showing that many proofs (for the same G and q but different x and T) can be batched by adding only a single element to the proof per additional statement.},
  author       = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Klein, Karen and Pietrzak, Krzysztof Z},
  booktitle    = {Advances in Cryptology – CRYPTO 2022},
  isbn         = {9783031159787},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {370--399},
  publisher    = {Springer Nature},
  title        = {{Practical statistically-sound proofs of exponentiation in any group}},
  doi          = {10.1007/978-3-031-15979-4_13},
  volume       = {13508},
  year         = {2022},
}

@inproceedings{12298,
  abstract     = {Existing committee-based Byzantine state machine replication (SMR) protocols, typically deployed in production blockchains, face a clear trade-off: (1) they either achieve linear communication cost in the steady state, but sacrifice liveness during periods of asynchrony, or (2) they are robust (progress with probability one) but pay quadratic communication cost. We believe this trade-off is unwarranted since existing linear protocols still have asymptotic quadratic cost in the worst case. We design Ditto, a Byzantine SMR protocol that enjoys the best of both worlds: optimal communication on and off the steady state (linear and quadratic, respectively) and progress guarantee under asynchrony and DDoS attacks. We achieve this by replacing the view-synchronization of partially synchronous protocols with an asynchronous fallback mechanism at no extra asymptotic cost. Specifically, we start from HotStuff, a state-of-the-art linear protocol, and gradually build Ditto. As a separate contribution and an intermediate step, we design a 2-chain version of HotStuff, Jolteon, which leverages a quadratic view-change mechanism to reduce the latency of the standard 3-chain HotStuff. We implement and experimentally evaluate all our systems to prove that breaking the robustness-efficiency trade-off is in the realm of practicality.},
  author       = {Gelashvili, Rati and Kokoris Kogias, Eleftherios and Sonnino, Alberto and Spiegelman, Alexander and Xiang, Zhuolun},
  booktitle    = {Financial Cryptography and Data Security},
  isbn         = {9783031182822},
  issn         = {1611-3349},
  location     = {Radisson Grenada Beach Resort, Grenada},
  pages        = {296--315},
  publisher    = {Springer Nature},
  title        = {{Jolteon and ditto: Network-adaptive efficient consensus with asynchronous fallback}},
  doi          = {10.1007/978-3-031-18283-9_14},
  volume       = {13411},
  year         = {2022},
}

@inproceedings{12302,
  abstract     = {We propose a novel algorithm to decide the language inclusion between (nondeterministic) Büchi automata, a PSPACE-complete problem. Our approach, like others before, leverage a notion of quasiorder to prune the search for a counterexample by discarding candidates which are subsumed by others for the quasiorder. Discarded candidates are guaranteed to not compromise the completeness of the algorithm. The novelty of our work lies in the quasiorder used to discard candidates. We introduce FORQs (family of right quasiorders) that we obtain by adapting the notion of family of right congruences put forward by Maler and Staiger in 1993. We define a FORQ-based inclusion algorithm which we prove correct and instantiate it for a specific FORQ, called the structural FORQ, induced by the Büchi automaton to the right of the inclusion sign. The resulting implementation, called FORKLIFT, scales up better than the state-of-the-art on a variety of benchmarks including benchmarks from program verification and theorem proving for word combinatorics. Artifact: https://doi.org/10.5281/zenodo.6552870},
  author       = {Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas Adrien},
  booktitle    = {Computer Aided Verification},
  isbn         = {9783031131875},
  issn         = {1611-3349},
  location     = {Haifa, Israel},
  pages        = {109--129},
  publisher    = {Springer Nature},
  title        = {{FORQ-based language inclusion formal testing}},
  doi          = {10.1007/978-3-031-13188-2_6},
  volume       = {13372},
  year         = {2022},
}

@inproceedings{12516,
  abstract     = {The homogeneous continuous LWE (hCLWE) problem is to distinguish samples of a specific high-dimensional Gaussian mixture from standard normal samples. It was shown to be at least as hard as Learning with Errors, but no reduction in the other direction is currently known.
We present four new public-key encryption schemes based on the hardness of hCLWE, with varying tradeoffs between decryption and security errors, and different discretization techniques. Our schemes yield a polynomial-time algorithm for solving hCLWE using a Statistical Zero-Knowledge oracle.},
  author       = {Bogdanov, Andrej and Cueto Noval, Miguel and Hoffmann, Charlotte and Rosen, Alon},
  booktitle    = {Theory of Cryptography},
  isbn         = {9783031223648},
  issn         = {1611-3349},
  location     = {Chicago, IL, United States},
  pages        = {565--592},
  publisher    = {Springer Nature},
  title        = {{Public-Key Encryption from Homogeneous CLWE}},
  doi          = {10.1007/978-3-031-22365-5_20},
  volume       = {13748},
  year         = {2022},
}

@inproceedings{11771,
  abstract     = {Classic dynamic data structure problems maintain a data structure subject to a sequence S of updates and they answer queries using the latest version of the data structure, i.e., the data structure after processing the whole sequence. To handle operations that change the sequence S of updates, Demaine et al. [7] introduced retroactive data structures (RDS). A retroactive operation modifies the update sequence S in a given position t, called time, and either creates or cancels an update in S at time t. A fully retroactive data structure supports queries at any time t: a query at time t is answered using only the updates of S up to time t. While efficient RDS have been proposed for classic data structures, e.g., stack, priority queue and binary search tree, the retroactive version of graph problems are rarely studied.

In this paper we study retroactive graph problems including connectivity, minimum spanning forest (MSF), maximum degree, etc. We show that under the OMv conjecture (proposed by Henzinger et al. [15]), there does not exist fully RDS maintaining connectivity or MSF, or incremental fully RDS maintaining the maximum degree with 𝑂(𝑛1−𝜖) time per operation, for any constant 𝜖>0. Furthermore, We provide RDS with almost tight time per operation. We give fully RDS for maintaining the maximum degree, connectivity and MSF in 𝑂̃ (𝑛) time per operation. We also give an algorithm for the incremental (insertion-only) fully retroactive connectivity with 𝑂̃ (1) time per operation, showing that the lower bound cannot be extended to this setting.

We also study a restricted version of RDS, where the only change to S is the swap of neighboring updates and show that for this problem we can beat the above hardness result. This also implies the first non-trivial dynamic Reeb graph computation algorithm.},
  author       = {Henzinger, Monika H and Wu, Xiaowei},
  booktitle    = {17th International Symposium on Algorithms and Data Structures},
  isbn         = {9783030835071},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {471–484},
  publisher    = {Springer Nature},
  title        = {{Upper and lower bounds for fully retroactive graph problems}},
  doi          = {10.1007/978-3-030-83508-8_34},
  volume       = {12808},
  year         = {2021},
}

@inproceedings{9210,
  abstract     = {Modern neural networks can easily fit their training set perfectly. Surprisingly, despite being “overfit” in this way, they tend to generalize well to future data, thereby defying the classic bias–variance trade-off of machine learning theory. Of the many possible explanations, a prevalent one is that training by stochastic gradient descent (SGD) imposes an implicit bias that leads it to learn simple functions, and these simple functions generalize well. However, the specifics of this implicit bias are not well understood.
In this work, we explore the smoothness conjecture which states that SGD is implicitly biased towards learning functions that are smooth. We propose several measures to formalize the intuitive notion of smoothness, and we conduct experiments to determine whether SGD indeed implicitly optimizes for these measures. Our findings rule out the possibility that smoothness measures based on first-order derivatives are being implicitly enforced. They are supportive, though, of the smoothness conjecture for measures based on second-order derivatives.},
  author       = {Volhejn, Vaclav and Lampert, Christoph},
  booktitle    = {42nd German Conference on Pattern Recognition},
  isbn         = {9783030712778},
  issn         = {1611-3349},
  location     = {Tübingen, Germany},
  pages        = {246--259},
  publisher    = {Springer},
  title        = {{Does SGD implicitly optimize for smoothness?}},
  doi          = {10.1007/978-3-030-71278-5_18},
  volume       = {12544},
  year         = {2021},
}

@inproceedings{9227,
  abstract     = {In the multiway cut problem we are given a weighted undirected graph   G=(V,E)  and a set   T⊆V  of k terminals. The goal is to find a minimum weight set of edges   E′⊆E  with the property that by removing   E′  from G all the terminals become disconnected. In this paper we present a simple local search approximation algorithm for the multiway cut problem with approximation ratio   2−2k . We present an experimental evaluation of the performance of our local search algorithm and show that it greatly outperforms the isolation heuristic of Dalhaus et al. and it has similar performance as the much more complex algorithms of Calinescu et al., Sharma and Vondrak, and Buchbinder et al. which have the currently best known approximation ratios for this problem.},
  author       = {Bloch-Hansen, Andrew and Samei, Nasim and Solis-Oba, Roberto},
  booktitle    = {Conference on Algorithms and Discrete Applied Mathematics},
  isbn         = {9783030678982},
  issn         = {1611-3349},
  location     = {Rupnagar, India},
  pages        = {346--358},
  publisher    = {Springer Nature},
  title        = {{Experimental evaluation of a local search approximation algorithm for the multiway cut problem}},
  doi          = {10.1007/978-3-030-67899-9_28},
  volume       = {12601},
  year         = {2021},
}

@inproceedings{9620,
  abstract     = {In this note, we introduce a distributed twist on the classic coupon collector problem: a set of m collectors wish to each obtain a set of n coupons; for this, they can each sample coupons uniformly at random, but can also meet in pairwise interactions, during which they can exchange coupons. By doing so, they hope to reduce the number of coupons that must be sampled by each collector in order to obtain a full set. This extension is natural when considering real-world manifestations of the coupon collector phenomenon, and has been remarked upon and studied empirically (Hayes and Hannigan 2006, Ahmad et al. 2014, Delmarcelle 2019).

We provide the first theoretical analysis for such a scenario. We find that “coupon collecting with friends” can indeed significantly reduce the number of coupons each collector must sample, and raises interesting connections to the more traditional variants of the problem. While our analysis is in most cases asymptotically tight, there are several open questions raised, regarding finer-grained analysis of both “coupon collecting with friends,” and of a long-studied variant of the original problem in which a collector requires multiple full sets of coupons.},
  author       = {Alistarh, Dan-Adrian and Davies, Peter},
  booktitle    = {Structural Information and Communication Complexity},
  isbn         = {9783030795269},
  issn         = {1611-3349},
  location     = {Wrocław, Poland},
  pages        = {3--12},
  publisher    = {Springer Nature},
  title        = {{Collecting coupons is faster with friends}},
  doi          = {10.1007/978-3-030-79527-6_1},
  volume       = {12810},
  year         = {2021},
}

@inproceedings{10041,
  abstract     = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth   δ , the loss in security is at most exponential in   δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth  δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in   δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel},
  booktitle    = {41st Annual International Cryptology Conference, Part II },
  isbn         = {978-3-030-84244-4},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {486--515},
  publisher    = {Springer Nature},
  title        = {{Limits on the Adaptive Security of Yao’s Garbling}},
  doi          = {10.1007/978-3-030-84245-1_17},
  volume       = {12826},
  year         = {2021},
}

@inproceedings{10076,
  abstract     = {We present a novel approach for blockchain asset owners to reclaim their funds in case of accidental private-key loss or transfer to a mistyped address. Our solution can be deployed upon failure or absence of proactively implemented backup mechanisms, such as secret sharing and cold storage. The main advantages against previous proposals is it does not require any prior action from users and works with both single-key and multi-sig accounts. We achieve this by a 3-phase   Commit()→Reveal()→Claim()−or−Challenge()  smart contract that enables accessing funds of addresses for which the spending key is not available. We provide an analysis of the threat and incentive models and formalize the concept of reactive KEy-Loss Protection (KELP).},
  author       = {Blackshear, Sam and Chalkias, Konstantinos and Chatzigiannis, Panagiotis and Faizullabhoy, Riyaz and Khaburzaniya, Irakliy and Kokoris Kogias, Eleftherios and Lind, Joshua and Wong, David and Zakian, Tim},
  booktitle    = {FC 2021 Workshops},
  isbn         = {978-3-6626-3957-3},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {431--450},
  publisher    = {Springer Nature},
  title        = {{Reactive key-loss protection in blockchains}},
  doi          = {10.1007/978-3-662-63958-0_34},
  volume       = {12676 },
  year         = {2021},
}

@inproceedings{10108,
  abstract     = {We argue that the time is ripe to investigate differential monitoring, in which the specification of a program's behavior is implicitly given by a second program implementing the same informal specification. Similar ideas have been proposed before, and are currently implemented in restricted form for testing and specialized run-time analyses, aspects of which we combine. We discuss the challenges of implementing differential monitoring as a general-purpose, black-box run-time monitoring framework, and present promising results of a preliminary implementation, showing low monitoring overheads for diverse programs.},
  author       = {Mühlböck, Fabian and Henzinger, Thomas A},
  booktitle    = {International Conference on Runtime Verification},
  isbn         = {978-3-030-88493-2},
  issn         = {1611-3349},
  keywords     = {run-time verification, software engineering, implicit specification},
  location     = {Virtual},
  pages        = {231--243},
  publisher    = {Springer Nature},
  title        = {{Differential monitoring}},
  doi          = {10.1007/978-3-030-88494-9_12},
  volume       = {12974},
  year         = {2021},
}

@inproceedings{10206,
  abstract     = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. The typical approach is to detect inputs from novel classes and retrain the classifier on an augmented dataset. However, not only the classifier but also the detection mechanism needs to adapt in order to distinguish between newly learned and yet unknown input classes. To address this challenge, we introduce an algorithmic framework for active monitoring of a neural network. A monitor wrapped in our framework operates in parallel with the neural network and interacts with a human user via a series of interpretable labeling queries for incremental adaptation. In addition, we propose an adaptive quantitative monitor to improve precision. An experimental evaluation on a diverse set of benchmarks with varying numbers of classes confirms the benefits of our active monitoring framework in dynamic scenarios.},
  author       = {Lukina, Anna and Schilling, Christian and Henzinger, Thomas A},
  booktitle    = {21st International Conference on Runtime Verification},
  isbn         = {9-783-0308-8493-2},
  issn         = {1611-3349},
  keywords     = {monitoring, neural networks, novelty detection},
  location     = {Virtual},
  pages        = {42--61},
  publisher    = {Springer Nature},
  title        = {{Into the unknown: active monitoring of neural networks}},
  doi          = {10.1007/978-3-030-88494-9_3},
  volume       = {12974 },
  year         = {2021},
}

@inproceedings{10324,
  abstract     = {Off-chain protocols (channels) are a promising solution to the scalability and privacy challenges of blockchain payments. Current proposals, however, require synchrony assumptions to preserve the safety of a channel, leaking to an adversary the exact amount of time needed to control the network for a successful attack. In this paper, we introduce Brick, the first payment channel that remains secure under network asynchrony and concurrently provides correct incentives. The core idea is to incorporate the conflict resolution process within the channel by introducing a rational committee of external parties, called wardens. Hence, if a party wants to close a channel unilaterally, it can only get the committee’s approval for the last valid state. Additionally, Brick provides sub-second latency because it does not employ heavy-weight consensus. Instead, Brick uses consistent broadcast to announce updates and close the channel, a light-weight abstraction that is powerful enough to preserve safety and liveness to any rational parties. We formally define and prove for Brick the properties a payment channel construction should fulfill. We also design incentives for Brick such that honest and rational behavior aligns. Finally, we provide a reference implementation of the smart contracts in Solidity.},
  author       = {Avarikioti, Zeta and Kokoris Kogias, Eleftherios and Wattenhofer, Roger and Zindros, Dionysis},
  booktitle    = {25th International Conference on Financial Cryptography and Data Security},
  isbn         = {9-783-6626-4330-3},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {209--230},
  publisher    = {Springer Nature},
  title        = {{Brick: Asynchronous incentive-compatible payment channels}},
  doi          = {10.1007/978-3-662-64331-0_11},
  volume       = {12675 },
  year         = {2021},
}

@inproceedings{10325,
  abstract     = {Since the inception of Bitcoin, a plethora of distributed ledgers differing in design and purpose has been created. While by design, blockchains provide no means to securely communicate with external systems, numerous attempts towards trustless cross-chain communication have been proposed over the years. Today, cross-chain communication (CCC) plays a fundamental role in cryptocurrency exchanges, scalability efforts via sharding, extension of existing systems through sidechains, and bootstrapping of new blockchains. Unfortunately, existing proposals are designed ad-hoc for specific use-cases, making it hard to gain confidence in their correctness and composability. We provide the first systematic exposition of cross-chain communication protocols. We formalize the underlying research problem and show that CCC is impossible without a trusted third party, contrary to common beliefs in the blockchain community. With this result in mind, we develop a framework to design new and evaluate existing CCC protocols, focusing on the inherent trust assumptions thereof, and derive a classification covering the field of cross-chain communication to date. We conclude by discussing open challenges for CCC research and the implications of interoperability on the security and privacy of blockchains.},
  author       = {Zamyatin, Alexei and Al-Bassam, Mustafa and Zindros, Dionysis and Kokoris Kogias, Eleftherios and Moreno-Sanchez, Pedro and Kiayias, Aggelos and Knottenbelt, William J.},
  booktitle    = {25th International Conference on Financial Cryptography and Data Security},
  isbn         = {9-783-6626-4330-3},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {3--36},
  publisher    = {Springer Nature},
  title        = {{SoK: Communication across distributed ledgers}},
  doi          = {10.1007/978-3-662-64331-0_1},
  volume       = {12675 },
  year         = {2021},
}

@inproceedings{10407,
  abstract     = {Digital hardware Trojans are integrated circuits whose implementation differ from the specification in an arbitrary and malicious way. For example, the circuit can differ from its specified input/output behavior after some fixed number of queries (known as “time bombs”) or on some particular input (known as “cheat codes”). To detect such Trojans, countermeasures using multiparty computation (MPC) or verifiable computation (VC) have been proposed. On a high level, to realize a circuit with specification   F  one has more sophisticated circuits   F⋄  manufactured (where   F⋄  specifies a MPC or VC of   F ), and then embeds these   F⋄ ’s into a master circuit which must be trusted but is relatively simple compared to   F . Those solutions impose a significant overhead as   F⋄  is much more complex than   F , also the master circuits are not exactly trivial. In this work, we show that in restricted settings, where   F  has no evolving state and is queried on independent inputs, we can achieve a relaxed security notion using very simple constructions. In particular, we do not change the specification of the circuit at all (i.e.,   F=F⋄ ). Moreover the master circuit basically just queries a subset of its manufactured circuits and checks if they’re all the same. The security we achieve guarantees that, if the manufactured circuits are initially tested on up to T inputs, the master circuit will catch Trojans that try to deviate on significantly more than a 1/T fraction of the inputs. This bound is optimal for the type of construction considered, and we provably achieve it using a construction where 12 instantiations of   F  need to be embedded into the master. We also discuss an extremely simple construction with just 2 instantiations for which we conjecture that it already achieves the optimal bound.},
  author       = {Chakraborty, Suvradip and Dziembowski, Stefan and Gałązka, Małgorzata and Lizurej, Tomasz and Pietrzak, Krzysztof Z and Yeo, Michelle X},
  isbn         = {9-783-0309-0452-4},
  issn         = {1611-3349},
  location     = {Raleigh, NC, United States},
  pages        = {397--428},
  publisher    = {Springer Nature},
  title        = {{Trojan-resilience without cryptography}},
  doi          = {10.1007/978-3-030-90453-1_14},
  volume       = {13043},
  year         = {2021},
}

@inproceedings{10408,
  abstract     = {Key trees are often the best solution in terms of transmission cost and storage requirements for managing keys in a setting where a group needs to share a secret key, while being able to efficiently rotate the key material of users (in order to recover from a potential compromise, or to add or remove users). Applications include multicast encryption protocols like LKH (Logical Key Hierarchies) or group messaging like the current IETF proposal TreeKEM. A key tree is a (typically balanced) binary tree, where each node is identified with a key: leaf nodes hold users’ secret keys while the root is the shared group key. For a group of size N, each user just holds   log(N)  keys (the keys on the path from its leaf to the root) and its entire key material can be rotated by broadcasting   2log(N)  ciphertexts (encrypting each fresh key on the path under the keys of its parents). In this work we consider the natural setting where we have many groups with partially overlapping sets of users, and ask if we can find solutions where the cost of rotating a key is better than in the trivial one where we have a separate key tree for each group. We show that in an asymptotic setting (where the number m of groups is fixed while the number N of users grows) there exist more general key graphs whose cost converges to the cost of a single group, thus saving a factor linear in the number of groups over the trivial solution. As our asymptotic “solution” converges very slowly and performs poorly on concrete examples, we propose an algorithm that uses a natural heuristic to compute a key graph for any given group structure. Our algorithm combines two greedy algorithms, and is thus very efficient: it first converts the group structure into a “lattice graph”, which is then turned into a key graph by repeatedly applying the algorithm for constructing a Huffman code. To better understand how far our proposal is from an optimal solution, we prove lower bounds on the update cost of continuous group-key agreement and multicast encryption in a symbolic model admitting (asymmetric) encryption, pseudorandom generators, and secret sharing as building blocks.},
  author       = {Alwen, Joel F and Auerbach, Benedikt and Baig, Mirza Ahad and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {19th International Conference},
  isbn         = {9-783-0309-0455-5},
  issn         = {1611-3349},
  location     = {Raleigh, NC, United States},
  pages        = {222--253},
  publisher    = {Springer Nature},
  title        = {{Grafting key trees: Efficient key management for overlapping groups}},
  doi          = {10.1007/978-3-030-90456-2_8},
  volume       = {13044},
  year         = {2021},
}

@inproceedings{10409,
  abstract     = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size   S  and treewidth   w  with only a   SO(w)  loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly   O(δwlog(S)) ,   δ  being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.  with only a   SO(w)  loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly   O(δwlog(S)) ,   δ  being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z},
  booktitle    = {19th International Conference},
  isbn         = {9-783-0309-0452-4},
  issn         = {1611-3349},
  location     = {Raleigh, NC, United States},
  pages        = {486--517},
  publisher    = {Springer Nature},
  title        = {{On treewidth, separators and Yao’s garbling}},
  doi          = {10.1007/978-3-030-90453-1_17},
  volume       = {13043 },
  year         = {2021},
}

@inproceedings{10410,
  abstract     = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {19th International Conference},
  isbn         = {9-783-0309-0452-4},
  issn         = {1611-3349},
  location     = {Raleigh, NC, United States},
  pages        = {550--581},
  publisher    = {Springer Nature},
  title        = {{The cost of adaptivity in security games on graphs}},
  doi          = {10.1007/978-3-030-90453-1_19},
  volume       = {13043},
  year         = {2021},
}

