kai-e-pubs.bib

@inproceedings{EGvdM2007:LFCS,
  author = {Kai Engelhardt and Peter Gammie and Ron van der
                  Meyden},
  title = {Model Checking Knowledge and Linear Time: {PSPACE}
                  Cases},
  booktitle = {Logical Foundations of Computer Science,
                  International Symposium, LFCS 2007, New York, NY,
                  USA, June 4-7, 2007, Proceedings},
  pages = {195--211},
  year = 2007,
  editor = {Sergei N. Art\"{e}mov and Anil Nerode},
  volume = 4514,
  series = {LNCS},
  month = jun,
  publisher = {Springer-Verlag},
  abstract = {We present a general algorithm scheme for model
                  checking logics of knowledge, common knowledge and
                  linear time, based on simulations to a class of
                  structures that capture the way that agents update
                  their knowledge. We show that the scheme leads to
                  PSPACE implementations of model checking the logic
                  of knowledge and linear time in several special
                  cases: perfect recall systems with a single agent or
                  in which all communication is by synchronous
                  broadcast, and systems in which knowledge is
                  interpreted using either the agents' current
                  observation only or its current observation and
                  clock value. In all these results, common knowledge
                  operators may be included in the language. Matching
                  lower bounds are provided, and it is shown that
                  although the complexity bound matches the PSPACE
                  complexity of the linear time temporal logic LTL, as
                  a function of the model size the problems considered
                  have a higher complexity than LTL.}
}
@inproceedings{E2002:Refine,
  author = {Kai Engelhardt},
  title = {Towards a Refinement Theory that Supports Reasoning
                  about Knowledge and Time for Multiple Agents (Work
                  in Progress)},
  booktitle = {{REFINE '02} An {FME} sponsored Refinement Workshop
                  in collaboration with {BCS FACS}},
  pages = 23,
  year = 2002,
  editor = {John Derrick and Eerke Boiten and Jim Woodcock and
                  Joakim von Wright},
  month = jul,
  publisher = {Computing Laboratory, University of Kent at Canterbury, UK},
  note = {Preliminary Proceedings.}
}
@phdthesis{E:PhD,
  author = {Kai Engelhardt},
  title = {Model-Oriented Data Refinement},
  school = {Institut f\"{u}r Informatik und Praktische Mathematik, Christian-Albrechts-Universit\"{a}t zu Kiel},
  pages = 280,
  year = 1997,
  month = jul,
  abstract = {The goal of this thesis is to provide a
                  comprehensive and systematic introduction to the
                  important and highly applicable method of data
                  refinement and proving simulation. We concentrate in
                  the first part on the general principles needed to
                  prove data refinement correct, and begin with an
                  explanation of the fundamental notions, showing that
                  data refinement proofs reduce to proving simulation.
                  The topics of Hoare Logic and the Refinement
                  Calculus are then introduced and a general theory of
                  simulations is developed and related to them.
                  Accessibility and comprehension are emphasised in
                  order to guide newcomers to the area. The second
                  part of this thesis contains a detailed survey of
                  important methods in this area, such as VDM, and the
                  methods due to Abadi \& Lamport, Hehner, Lynch and
                  Reynolds, and Back's refinement calculus. All these
                  methods are carefully analysed, and shown to be
                  either incomplete, with counterexamples to their
                  application, or to be always applicable whenever
                  data refinement holds. This is shown by proving, for
                  the first time, that all of them can be described
                  and analysed in terms of two simple notions: forward
                  and backward simulation.}
}
@mastersthesis{E:master93,
  author = {Kai Engelhardt},
  title = {{Verallgemeinerungen der Methode von Abadi und
                  Lamport um ein von A. Pnueli gestelltes Problem zu
                  l\"{o}sen}},
  school = {Institut f\"{u}r Informatik und Praktische Mathematik, Christian-Albrechts-Universit\"{a}t zu Kiel},
  year = 1993,
  month = jan,
  annote = {German title but English text},
  abstract = {By adding a new technique and a simple proof
                  strategy to Abadi \& Lamport's 1988 method
                  \cite{AL88} for proving refinement between
                  specifications of distributed programs correct, the
                  inherent limitation of their method, occurring when
                  the abstract level of specification features
                  so-called infinite invisible nondeterminism or
                  internal discontinuity, can be sometimes overcome.
                  This technique is applied to the cruel last step of
                  a three step correctness proof for an algorithm for
                  communication between migrating processes within a
                  finite network due to Kleinman, Moscowitz, Pnueli \&
                  Shapiro \cite{KMPS91}.}
}
@incollection{EH2008,
  author = {Kai Engelhardt and Ralf Huuck},
  title = {Smaller Abstractions for $\forall${CTL}$^*$ without
                  {Next}},
  booktitle = {Concurrency, Compositionality, and Correctness:
                  Essays in Honor of Willem-Paul de Roever},
  pages = {250--259},
  publisher = {Springer-Verlag},
  year = 2010,
  editor = {Dennis Dams and Ulrich Hannemann and Martin Steffen},
  volume = 5930,
  series = {LNCS},
  abstract = {The success of applying model-checking to large
                  systems depends crucially on the choice of good
                  abstractions. In this work we present an approach
                  for constructing abstractions when checking
                  next-free universal CTL$^*$ properties. It is known
                  that functional abstractions are safe and that
                  next-free universal CTL$^*$ is insensitive to finite
                  stuttering. We exploit these results by introducing
                  a safe \emph{next-free abstraction} that is
                  typically smaller than the usual functional one
                  while at the same time more precise, i.e., it has
                  less spurious counter-examples.},
  isbn = {978-3-642-11511-0}
}
@inproceedings{EM2005a,
  author = {Kai Engelhardt and Yoram Moses},
  title = {Causing Communication Closure: Safe Program
                  Composition with Non-{FIFO} Channels},
  booktitle = {{DISC 2005} 19$^\mathrm{th}$ International Symposium
                  on Distributed Computing},
  pages = {229--243},
  year = 2005,
  editor = {Pierre Fraigniaud},
  volume = 3724,
  series = {LNCS},
  month = sep # { 26--29},
  publisher = {Springer-Verlag},
  doi = {10.1007/11561927_18},
  abstract = {A semantic framework for analyzing safe composition
                  of distributed programs is presented. Its
                  applicability is illustrated by a study of program
                  composition when communication is reliable but not
                  necessarily FIFO\@. In this model, special care must
                  be taken to ensure that messages do not accidentally
                  overtake one another in the composed program. We
                  show that barriers do not exist in this model.
                  Indeed, no program that sends or receives messages
                  can automatically be composed with arbitrary
                  programs without jeopardizing their intended
                  behavior. Safety of composition becomes
                  context-sensitive and new tools are needed for
                  ensuring it. A notion of \emph{sealing} is defined,
                  where if a program $P$ is immediately followed by a
                  program $Q$ that seals $P$ then $P$ will be
                  communication-closed---it will execute as if it runs
                  in isolation. The investigation of sealing in this
                  model reveals a novel connection between Lamport
                  causality and safe composition. A characterization
                  of sealable programs is given, as well as efficient
                  algorithms for testing if $Q$ seals $P$ and for
                  constructing a seal for a significant class of
                  programs. It is shown that every sealable program
                  that is open to interference on $O(n^2)$ channels
                  can be sealed using $O(n)$ messages.}
}
@inproceedings{EM2005b,
  author = {Kai Engelhardt and Yoram Moses},
  title = {Safe Composition of Distributed Programs
                  Communicating over Order-Preserving Imperfect
                  Channels},
  booktitle = {7$^\mathrm{th}$ International Workshop on
                  Distributed Computing {IWDC 2005}},
  pages = {32--44},
  year = 2005,
  editor = {Ajit Pal and Ajay Kshemkalyani and Rajeev Kumar and
                  Arobinda Gupta},
  volume = 3741,
  series = {LNCS},
  month = dec # { 27--30},
  publisher = {Springer-Verlag},
  doi = {10.1007/11603771_4},
  abstract = {The fundamental question considered in this paper is
                  when program $Q$, if executed immediately after
                  program $P$, is guaranteed not to interfere with $P$
                  and be safe from interference by $P$. If a message
                  sent by one of these programs is received by the
                  other, it may affect and modify the other's
                  execution. The notion of \emph{communication-closed
                  layers (CCLs)} introduced by Elrad and Francez in
                  1982 is a useful tool for studying such
                  interference. CCLs have been considered mainly in
                  the context of reliable FIFO channels (without
                  duplication), where one can design programs layers
                  that do not interfere with any other layer. When
                  channels are less than perfect such programs are no
                  longer feasible. The absence of interference between
                  layers becomes context-dependent. In this paper we
                  study the impact of message duplication and loss on
                  the safety on the safety of layer composition. Using
                  a communication phase operator, the \emph{fits
                  after} relation among programs is defined. If
                  program $Q$ fits after $P$ then $P$ and $Q$ will not
                  interfere with each other in executions of $P*Q$.
                  For programs $P$ and $Q$ in a natural class of
                  programs we outline efficient algorithms for the
                  following: (1) deciding whether $Q$ fits after $P$;
                  (2) deciding whether $Q$ \emph{seals} $P$, meaning
                  that $Q$ fits after $P$ and no following program can
                  communicate with $P$; and (3) constructing a
                  \emph{separator} $S$ that both fits after $P$ and
                  satisfies that $Q$ fits after $P*S$.}
}
@inproceedings{EM2005c,
  author = {Kai Engelhardt and Yoram Moses},
  title = {Single-Bit Messages are Insufficient in the Presence
                  of Duplication},
  booktitle = {7$^\mathrm{th}$ International Workshop on
                  Distributed Computing {IWDC 2005}},
  pages = {25--31},
  year = 2005,
  editor = {Ajit Pal and Ajay Kshemkalyani and Rajeev Kumar and
                  Arobinda Gupta},
  volume = 3741,
  series = {LNCS},
  month = dec # { 27--30},
  publisher = {Springer-Verlag},
  doi = {10.1007/11603771_3},
  abstract = {Ideal communication channels in asynchronous systems
                  are reliable, deliver messages in FIFO order, and do
                  not deliver spurious or duplicate messages. A
                  message vocabulary of size two (i.e., single-bit
                  messages) suffices to encode and transmit messages
                  of arbitrary finite length over such channels. This
                  note proves that single-bit messages are
                  insufficient once channels potentially deliver
                  duplicate messages. In particular, it is shown that
                  no protocol allows the sender to notify the receiver
                  which of three values it holds, over a
                  bidirectional, reliable, FIFO channel that may
                  duplicate messages. This implies that messages must
                  encode some additional control information, e.g., in
                  the form of headers or tags.}
}
@article{EM2008ipl,
  author = {Kai Engelhardt and Yoram Moses},
  title = {Single-bit messages are insufficient for data link
                  over duplicating channels},
  journal = {Information Processing Letters},
  year = 2008,
  volume = 107,
  number = 6,
  pages = {235--239},
  month = aug,
  doi = {10.1016/j.ipl.2008.03.010},
  abstract = {Ideal communication channels in asynchronous
                  systems are reliable, deliver messages in FIFO
                  order, and do not deliver spurious or duplicate
                  messages. A message vocabulary of size two (i.e.,
                  single-bit messages) suffices to encode and transmit
                  messages of arbitrary finite length over such
                  channels. This note proves that single-bit messages
                  are insufficient once channels potentially deliver
                  duplicate messages. In particular, it is shown that
                  no protocol allows the sender to notify the receiver
                  which of three values it holds, over a
                  bidirectional, reliable, FIFO channel that may
                  duplicate messages. This implies that messages must
                  encode some additional control information, e.g., in
                  the form of headers or tags.}
}
@article{EM2009dc,
  author = {Kai Engelhardt and Yoram Moses},
  title = {Causing communication closure: safe program
                  composition with reliable non-{FIFO} channels},
  journal = {Distributed Computing},
  year = 2009,
  volume = 22,
  number = 2,
  pages = {73--91},
  month = oct,
  publisher = {Springer-Verlag},
  doi = {10.1007/s00446-009-0081-9},
  abstract = {A rigorous framework for analyzing safe composition
                  of distributed programs is presented. It facilitates
                  specifying notions of safe sequential execution of
                  distributed programs in various models of
                  communication. A notion of \emph{sealing} is
                  defined, where if a program $P$ is immediately
                  followed by a program $Q$ that seals $P$ then $P$
                  will be communication-closed---it will execute as if
                  it runs in isolation. None of its send or receive
                  actions will match or interact with actions
                  outside $P$. The applicability of sealing is
                  illustrated by a study of program composition when
                  communication is reliable but not necessarily
                  FIFO\@. In this model, special care must be taken to
                  ensure that messages do not accidentally overtake
                  one another in the composed program. In this model
                  no program that sends or receives messages can be
                  composed automatically with arbitrary programs
                  without jeopardizing their intended behavior. Safety
                  of composition becomes context-sen\-si\-tive and new
                  tools are needed for ensuring it. The investigation
                  of sealing in this model reveals a novel connection
                  between Lamport causality and safe composition. A
                  characterization of sealable programs is given, as
                  well as efficient algorithms for testing if $Q$
                  seals $P$ and for constructing a seal for a class of
                  straight-line programs. It is shown that every
                  sealable program can be sealed using $O(n)$
                  messages. In fact, $3n-4$ messages are necessary and
                  sufficient in the worst case, despite the fact that
                  a sealable program may be open to interference on
                  $\Omega(n^2)$ channels.}
}
@book{EdR:cup98,
  author = {Willem-Paul de Roever and Kai Engelhardt},
  title = {Data Refinement: Model-Oriented Proof Methods and
                  their Comparison},
  publisher = {Cambridge University Press},
  pages = 430,
  year = 1998,
  number = 47,
  series = {Cambridge Tracts in Theoretical Computer Science},
  annote = {With the assistance of Jos Coenen, Karl-Heinz Buth,
                  Paul Gardiner, Yassine Lakhnech, and Frank
                  Stomp},
  opthttp = {http://www.informatik.uni-kiel.de/~bkmail1/},
  note = {Paperback re-issue 2009. Errata at \url{https://kai-e.github.io/pubs/dRE1998/errata.pdf}.}
}
@article{EdR:facs95,
  author = {Kai Engelhardt and Willem-Paul de Roever},
  title = {Towards a Practitioners' Approach to {Abadi} and
                  {Lamport's} Method},
  journal = {Formal Aspects of Computing},
  publisher = {Springer-Verlag},
  year = 1995,
  volume = 7,
  number = 5,
  pages = {550--575},
  abstract = {Our own basic intuitions are presented when
                  introducing the method developed by Abadi and
                  Lamport in \cite{AL88} for proving refinement
                  between specifications of nondeterministic programs
                  correct to people unacquainted with it. The example
                  we use to illustrate this method is a nontrivial
                  communication protocol that provides a mechanism
                  analogous to message passing between migrating
                  processes within a fixed finite network of nodes due
                  to Kleinman, Moscowitz, Pnueli, and
                  Shapiro \cite{KMPS91}. Especially the cruel last
                  step of a three step refinement proof of that
                  protocol gives rise to a deeper understanding of,
                  and some small enhancements to, Abadi and Lamport's
                  1988 method.}
}
@inproceedings{EdR:fme93,
  author = {Kai Engelhardt and Willem-Paul de Roever},
  title = {Generalizing {Abadi} \& {Lamport's} Method to Solve
                  a Problem Posed by {A. Pnueli}},
  booktitle = {{FME} '93: Industrial-Strength Formal Methods},
  editor = {Jim C.P. Woodcock and Peter Gorm Larsen},
  volume = 670,
  series = {LNCS},
  year = 1993,
  publisher = {Springer-Verlag},
  month = apr,
  pages = {294--313},
  annote = {Contains an error}
}
@incollection{EdR:liberPV,
  author = {Kai Engelhardt and Willem-Paul de Roever},
  title = {New Win[e/d] for Old Bags},
  booktitle = {A dynamic and quick intellect, {Paul Vit\'{a}nyi 25
                  years @ CWI}},
  publisher = {CWI},
  year = 1996,
  editor = {John Tromp},
  address = {Amsterdam},
  month = nov,
  pages = {59--66},
  annote = {A more descriptive title would be ``A pure, sound, and
                  complete (in the sense of Cook) Hoare logic for a
                  language with specification statements and
                  recursion'', which is definitely too long}
}
@inproceedings{EdR:mfcs96,
  author = {Kai Engelhardt and Willem-Paul de Roever},
  title = {Simulation of Specification Statements in {Hoare}
                  Logic},
  booktitle = {Mathematical Foundations of Computer Science 1996,
                  21st International Symposium, {MFCS '96}, Cracow,
                  Poland, Proceedings},
  editor = {Wojciech Penczek and Andrzej Sza{\l}as},
  volume = 1113,
  series = {LNCS},
  year = 1996,
  publisher = {Springer-Verlag},
  month = sep,
  pages = {324--335},
  abstract = {Data refinement is a powerful technique to derive
                  implementations in terms of low-level data
                  structures like bytes from specification in terms of
                  high-level data structures like queues. The higher
                  level operations need not be coded as ordinary
                  programs; it is more convenient to introduce
                  specification statements to the programming language
                  and use them instead of actual code. Specification
                  statements represent the maximal program satisfying
                  a given Hoare-triple. Sound and (relatively)
                  complete simulation techniques allow for proving
                  data refinement by local arguments. A major
                  challenge for simulation consists of expressing the
                  weakest lower level specification simulating a given
                  higher level specification w.r.t.\ a given relation
                  between these two levels of abstraction. We present
                  solutions to this challenge for upward and downward
                  simulation in both partial and total correctness
                  frameworks, thus reducing the task of proving data
                  refinement to proving validity of certain
                  Hoare-triples.}
}
@inproceedings{EvdM99:wsproc,
  author = {Kai Engelhardt and Ron van der Meyden},
  title = {Modal Logics with a Hierarchy of Local Propositional
                  Quantifiers (Extended Abstract)},
  booktitle = {Proceedings of the {FLoC'99} Workshop
                  Complexity-Theoretic and Recursion-Theoretic Methods
                  in Databases and Artificial Intelligence},
  pages = {81--90},
  year = 1999,
  editor = {Thomas Eiter and Georg Gottlob and Victor Marek and
                  Jeffrey Remmel},
  month = jul # { 6}
}
@inproceedings{EvdMM2000:FOSSACS,
  author = {Kai Engelhardt and Ron van der Meyden and Yoram
                  Moses},
  title = {A Program Refinement Framework Supporting Reasoning
                  about Knowledge and Time},
  booktitle = {Foundations of Software Science and Computation
                  Structures},
  pages = {114--129},
  year = 2000,
  editor = {Jerzy Tiuryn},
  volume = 1784,
  series = {LNCS},
  month = mar,
  publisher = {Springer-Verlag},
  abstract = {This paper develops a highly expressive semantic
                  framework for program refinement that supports both
                  temporal reasoning and reasoning about the knowledge
                  of a single agent. The framework generalizes a
                  previously developed temporal refinement framework
                  by amalgamating it with a logic of quantified local
                  propositions, a generalization of the logic of
                  knowledge. The combined framework provides a formal
                  setting for development of knowledge-based programs,
                  and addresses two problems of existing theories of
                  such programs: lack of compositionality and the fact
                  that such programs often have only implementations
                  of high computational complexity. Use of the
                  framework is illustrated by a control theoretic
                  example concerning a robot operating with an
                  imprecise position sensor.}
}
@inproceedings{EvdMM2001:LPAR,
  author = {Kai Engelhardt and Ron van der Meyden and Yoram
                  Moses},
  title = {A Refinement Theory that Supports Reasoning about
                  Knowledge and Time for Synchronous Agents},
  booktitle = {Proceedings {LPAR} 2001},
  pages = {125--141},
  year = 2001,
  editor = {Robert Nieuwenhuis and Andrei Voronkov},
  volume = 2250,
  series = {LNAI},
  month = dec,
  publisher = {Springer-Verlag},
  doi = {10.1007/3-540-45653-8_9},
  abstract = {An expressive semantic framework for program
                  refinement that supports both temporal reasoning and
                  reasoning about the knowledge of multiple agents is
                  developed. The refinement calculus owes the
                  cleanliness of its decomposition rules for all
                  programming language constructs and the relative
                  simplicity of its semantic model to a rigid
                  synchrony assumption which requires all agents and
                  the environment to proceed in lockstep. The new
                  features of the calculus are illustrated in a
                  derivation of the two-phase-commit protocol.}
}
@inproceedings{EvdMM98:TARK,
  author = {Kai Engelhardt and Ron van der Meyden and Yoram
                  Moses},
  title = {Knowledge and the Logic of Local Propositions},
  booktitle = {Theoretical Aspects of Rationality and Knowledge,
                  Proceedings of the Seventh Conference (TARK 1998)},
  pages = {29--41},
  year = 1998,
  month = jul,
  editor = {Itzhak Gilboa},
  publisher = {Morgan Kaufmann},
  abstract = {An agent's limited view of the state of a
                  distributed system may render globally different
                  situations indistinguishable. A proposition is local
                  for this agent whenever his view suffices to decide
                  this proposition. Motivated by a framework for the
                  development of distributed programs from
                  knowledge-based specifications, we introduce a modal
                  logic of local propositions, in which it is possible
                  to quantify over such propositions. We show that
                  this logic is able to represent a rich set of
                  epistemic notions. Under the usual strong semantics,
                  this logic is not recursively axiomatizable,
                  however. We show that by weakening the semantics of
                  quantification, it is possible to obtain a logic
                  that is axiomatizable and is still able to express
                  interesting epistemic notions.}
}
@inproceedings{EvdMS2002:AiML,
  author = {Kai Engelhardt and Ron van der Meyden and Kaile Su},
  title = {Modal Logics with a Linear Hierarchy of Local
                  Propositional Quantifiers (Preliminary Version)},
  booktitle = {Advances in Modal Logic 2002 (AiML)},
  pages = {63--76},
  year = 2002,
  editor = {Nobu-Yuki Suzuki and Frank Wolter},
  month = sep,
  organization = {Institut de recherche en informatique de Toulouse,
                  Universit\'{e} Paul Sabatier},
  optnote = {Best paper award}
}
@incollection{EvdMS:AiMLbook,
  author = {Kai Engelhardt and Ron van der Meyden and Kaile Su},
  title = {Modal Logics with a Linear Hierarchy of Local
                  Propositional Quantifiers},
  booktitle = {Advances in Modal Logic},
  pages = {9--30},
  publisher = {King's College London Publications},
  year = 2003,
  editor = {Philippe Balbiani and Nobu-Yuki Suzuki and Frank
                  Wolter and Michael Zakharyaschev},
  volume = 4,
  isbn = {0-9543006-1-0 (Paperback), 0-9543006-2-9 (Hardback)},
  abstract = {Local propositions arise in the context of the
                  semantics for logics of knowledge in multi-agent
                  systems. A proposition is local to an agent when it
                  depends only on that agent's local state. We
                  consider a logic, LLP, that extends S5, the modal
                  logic of necessity (in which the modality refers to
                  truth at all worlds) by adding a quantifier ranging
                  over the set of all propositions and, for each
                  agent, a propositional quantifier ranging over the
                  agent's local propositions. LLP is able to express a
                  large variety of epistemic modalities, including
                  knowledge, common knowledge and distributed
                  knowledge. However, this expressiveness comes at a
                  cost: the logic is equivalent to second order
                  predicate logic when two independent agents are
                  present \cite{EvdMM98:TARK}, hence undecidable and
                  not axiomatizable. This paper identifies a class of
                  multi-agent S5 structures, \emph{hierarchical
                  structures}, in which the agents' information has
                  the structure of a linear hierarchy. All systems
                  with just a single agent are hierarchical. It is
                  shown that LLP becomes decidable with respect to
                  hierarchical systems. The main result of the paper
                  is the completeness of an axiomatization for the
                  hierarchical case.}
}
@inproceedings{Klein_EH_09,
  publisher = {ACM},
  author = {Gerwin Klein and Kevin Elphinstone and Gernot Heiser
                  and June Andronick and David Cock and Philip Derrin
                  and Dhammika Elkaduwe and Kai Engelhardt and Rafal
                  Kolanski and Michael Norrish and Thomas Sewell and
                  Harvey Tuch and Simon Winwood},
  title = {{seL4}: Formal Verification of an {OS} Kernel},
  booktitle = {Proceedings of the 22nd ACM Symposium on Operating
                  Systems Principles (SOSP)},
  address = {Big Sky, MT, USA},
  year = 2009,
  month = oct,
  optnote = {Best paper award},
  pages = {207--220},
  pdf = {http://www.sigops.org/sosp/sosp09/papers/klein-sosp09.pdf}
}
@article{Klein_EH_10:cacm,
  author = {Gerwin Klein and June Andronick and Kevin
                  Elphinstone and Gernot Heiser and David Cock and
                  Philip Derrin and Dhammika Elkaduwe and Kai
                  Engelhardt and Rafal Kolanski and Michael Norrish
                  and Thomas Sewell and Harvey Tuch and Simon Winwood},
  number = 6,
  year = 2010,
  volume = 53,
  journal = {Communications of the ACM},
  title = {{seL4}: Formal Verification of an Operating-System
                  Kernel},
  pages = {107--115},
  month = jun,
  doi = {10.1145/1743546.1743574},
  abstract = {We report on the formal, machine-checked
                  verification of the seL4 microkernel from an
                  abstract specification down to its C implementation.
                  We assume correctness of compiler, assembly code,
                  hardware, and boot code. seL4 is a third-generation
                  microkernel of L4 provenance, comprising 8,700 lines
                  of C and 600 lines of assembler. Its performance is
                  comparable to other high-performance L4 kernels. We
                  prove that the implementation always strictly
                  follows our high-level abstract specification of
                  kernel behaviour. This encompasses traditional
                  design and implementation safety properties such as
                  that the kernel will never crash, and it will never
                  perform an unsafe operation. It also implies much
                  more: we can predict precisely how the kernel will
                  behave in every possible situation.}
}
@unpublished{E2010:ARSPA_WITS,
  author = {Kai Engelhardt},
  title = {A Note on Noninterference in the Presence of
                  Colluding Adversaries (Preliminary Report)},
  note = {Presented at Automated Reasoning for Security
                  Protocol Analysis and Issues in the Theory of
                  Security, {ARSPA-WITS'10}},
  month = mar # { 27--28},
  year = 2010,
  abstract = {Whether adversaries can glean information from a
                  distributed system in a formal sense hinges on the
                  definition of such a system and what can be observed
                  by those agents. In the presence of colluding
                  adversaries, the standard definition of
                  non-interference by Goguen and Meseguer and its many
                  variants proposed in the literature fail in a very
                  intuitive sense to capture a simple collusion
                  attack. The crucial difference between what is
                  modelled in those definitions and what we argue
                  needs to be modelled is that \emph{teams can observe
                  pomsets} as Plotkin and Pratt stated. In this note
                  we expose what goes wrong in the known approaches
                  and explain how to fix the problem.}
}
@inproceedings{EvdMZ2012a,
  author = {Kai Engelhardt and Ron van der Meyden and Chenyi
                  Zhang},
  title = {Intransitive Noninterference in Nondeterministic
                  Systems},
  booktitle = {19th {ACM} Conference on Computer and Communications
                  Security},
  year = 2012,
  editor = {Ting Yu and George Danezis and Virgil D. Gligor},
  month = oct,
  publisher = {ACM},
  abstract = {This paper addresses the question of how
                  TA-security, a semantics for intransitive
                  information-flow policies in deterministic systems,
                  can be generalized to nondeterministic systems.
                  Various definitions are proposed, including
                  definitions that state that the system enforces as
                  much of the policy as possible in the context of
                  attacks in which groups of agents collude by sharing
                  information through channels that lie outside the
                  system. Relationships between the various
                  definitions proposed are characterized, and an
                  unwinding-based proof technique is developed.
                  Finally, it is shown that on a specific class of
                  systems, access control systems with local
                  non-determinism, the strongest definition can be
                  verified by checking a simple static property.}
}
@inproceedings{GHE2015:PLDI,
  author = {Peter Gammie and Antony L. Hosking and Kai
                  Engelhardt},
  title = {Relaxing Safely: Verified On-the-Fly Garbage
                  Collection for x86-{TSO}},
  booktitle = {{PLDI} 2015, 36th annual {ACM SIGPLAN} conference on
                  Programming Language Design and Implementation},
  pages = {99--109},
  year = 2015,
  editor = {Steve Blackburn},
  address = {Portland, OR},
  month = may,
  organization = {{ACM}},
  abstract = {We report on a machine-checked verification of
                  safety for a state-of-the-art, on-the-fly,
                  concurrent, mark-sweep garbage collector that is
                  designed for multi-core architectures with weak
                  memory consistency. The proof explicitly
                  incorporates the relaxed memory semantics of x86
                  multiprocessors. To our knowledge, this is the first
                  fully machine-checked proof of safety for such a
                  garbage collector. We couch the proof in a framework
                  that system implementers will find appealing, with
                  the fundamental components of the system specified
                  in a simple and intuitive programming language. The
                  abstract model is detailed enough for its
                  correspondence with an assembly language
                  implementation to be straightforward.}
}
@article{GHE2015:afp,
  author = {Peter Gammie and Tony Hosking and Kai Engelhardt},
  title = {Relaxing Safely: Verified On-the-Fly Garbage
                  Collection for x86-{TSO}},
  journal = {Archive of Formal Proofs},
  year = 2015,
  url = {http://afp.sourceforge.net/entries/ConcurrentGC.shtml},
  abstract = {We use ConcurrentIMP to model Schism, a
                  state-of-the-art real-time garbage collection scheme
                  for weak memory, and show that it is safe on
                  x86-TSO.
                  This development accompanies the PLDI 2015
                  paper of the same name.}
}
@inproceedings{E2017qifca,
  author = {Kai Engelhardt},
  title = {A Better Composition Operator for Quantitative
                  Information Flow Analyses},
  booktitle = {Computer Security -- ESORICS 2017, 22nd European
                  Symposium on Research in Computer Security Oslo,
                  Norway, September 11--15, 2017, Proceedings, Part I},
  year = 2017,
  editor = {Simon N. Foley and Dieter Gollmann and Einar
                  Snekkenes},
  volume = 10492,
  series = {LNCS},
  pages = {446--463},
  month = sep # { 11--13},
  address = {Oslo, Norway},
  publisher = {Springer-Verlag},
  doi = {10.1007/978-3-319-66402-6_26},
  abstract = {Given a description of the quantitative information
                  flow (qif) for components, how can we determine the
                  qif of a system composed from components? We explore
                  this fundamental question mathematically and provide
                  an answer based on a new composition operator. We
                  investigate its properties and prove that it
                  generalises existing composition operators. We
                  illustrate the results with a fresh look on Chaum's
                  dining cryptographers. We show that the new operator
                  enjoys various convenient algebraic properties and
                  that it is well-behaved under composition
                  refinement.}
}
@inproceedings{MSE2018euroSnP,
  author = {Toby Murray and Robert Sison and Kai Engelhardt},
  title = {\textsc{Covern}: A Logic for Compositional
                  Verification of Information Flow Control},
  booktitle = {IEEE European Symposium on Security and Privacy
                  (EuroS\&P)},
  year = 2018,
  editor = {Frank Piessens and Matthew Smith},
  month = apr,
  abstract = {Shared memory concurrency is pervasive in modern
                  programming, including in systems that must protect
                  highly sensitive data. Recently, verification has
                  finally emerged as a practical tool for proving
                  interesting security properties of real programs,
                  particularly information flow control (IFC)
                  security. Yet there remain no general logics for
                  verifying IFC security of shared-memory concurrent
                  programs. In this paper we present the first such
                  logic, \textsc{Covern} (Compositional Verification
                  of Noninterference) and its proof of soundness via a
                  new generic framework for \emph{general}
                  rely-guarantee IFC reasoning. We apply
                  \textsc{Covern} to model and verify the
                  security-critical software functionality of the
                  Cross Domain Desktop Compositor, an embedded device
                  that facilitates simultaneous and intuitive user
                  interaction with multiple classified networks while
                  preventing leakage between them. To our knowledge
                  this is the first foundational, machine-checked
                  proof of IFC security for a non-trivial
                  shared-memory concurrent program in the literature.},
  url = {https://covern.org/papers/EuroSP18.pdf},
  doi = {10.1109/EuroSP.2018.00010}
}