@inproceedings{aagaard1998, author = {Mark D. Aagaard and Robert B. Jones and Carl-Johan H. Seger}, title = {Combining Theorem Proving and Trajectory Evaluation in an Industrial Environment}, pages = {538--541}, booktitle = {Proceedings of the 1998 Conference on Design Automation ({DAC}-98)}, month = jun, publisher = {ACM/IEEE}, address = {Los Alamitos, CA}, year = 1998 }

@article{abadi1994, author = {Martin Abadi and Joseph Y. Halpern}, title = {Decidability and expressiveness for first-order logics of probability}, journal = {Information and Computation}, year = 1994 }

@inproceedings{abal2012, author = {Abal, Iago and Cunha, Alcino and Hurd, Joe and Sousa Pinto, Jorge}, title = {Using Term Rewriting to Solve Bit-Vector Arithmetic Problems (Poster Presentation)}, pages = {493--495}, crossref = {sat2012}, url = {http://gilith.com/research/papers} }

@book{ackermann1954, author = {Wilhelm Ackermann}, title = {Solvable Cases of the Decision Problem}, publisher = {North-Holland}, series = {Studies in Logic and the Foundations of Mathematics}, address = {Amsterdam}, year = 1954 }

@inproceedings{adams2010, author = {Adams, Mark}, title = {Introducing {HOL} {Z}ero}, crossref = {icms2010}, pages = {142--143}, abstract = {Theorem provers are now playing an important role in two diverse fields: computer system verification and mathematics. In computer system verification, they are a key component in toolsets that rigorously establish the absence of errors in critical computer hardware and software, such as processor design and safety-critical software, where traditional testing techniques provide inadequate assurance. In mathematics, they are used to check the veracity of recent high-profile proofs, such as the Four Colour Theorem and the Kepler Conjecture, whose scale and complexity have pushed traditional peer review to its limits.} }

@incollection{ahrendt1998, author = {Wolfgang Ahrendt and Bernhard Beckert and Reiner H{\"a}hnle and Wolfram Menzel and Wolfgang Reif and Gerhard Schellhorn and Peter H. Schmitt}, title = {Integration of Automated and Interactive Theorem Proving}, publisher = {Kluwer}, booktitle = {Automated Deduction: A Basis for Applications}, editor = {W. Bibel and P. Schmitt}, year = 1998, volume = {II}, chapter = {4}, pages = {97--116}, url = {http://i11www.ira.uka.de/~ahrendt/papers/intDfgPaper.ps.gz}, abstract = {In this chapter we present a project to integrate interactive and automated theorem proving. Its aim is to combine the advantages of the two paradigms. We focus on one particular application domain, which is deduction for the purpose of software verification. Some of the reported facts may not be valid in other domains. We report on the integration concepts and on the experimental results with a prototype implementation.} }

@article{akbarpour2006, author = {Akbarpour, B. and Tahar, S.}, title = {An Approach for the Formal Verification of {DSP} Designs using Theorem Proving}, journal = {IEEE Transactions on CAD of Integrated Circuits and Systems}, volume = 25, number = 8, pages = {1141--1457}, year = 2006, url = {http://www.cl.cam.ac.uk/~ba265/papers/TCAD-DSP.pdf} }

@inproceedings{amtoft2008, author = {Torben Amtoft and John Hatcliff and Edwin Rodr\'iguez and Robby and Jonathan Hoag and David Greve}, title = {Specification and Checking of Software Contracts for Conditional Information Flow}, crossref = {fm2008}, url = {http://people.cis.ksu.edu/~tamtoft/Papers/papers.html} }

@inproceedings{anderson2001, author = {Anderson, R.}, title = {Why Information Security is Hard - An Economic Perspective}, crossref = {acsac2001}, pages = {358--365}, url = {http://www.cl.cam.ac.uk/~rja14/Papers/econ.pdf}, abstract = {According to one common view, information security comes down to technical measures. Given better access control policy models, formal proofs of cryptographic protocols, approved firewalls, better ways of detecting intrusions and malicious code, and better tools for system evaluation and assurance, the problems can be solved. The author puts forward a contrary view: information insecurity is at least as much due to perverse incentives. Many of the problems can be explained more clearly and convincingly using the language of microeconomics: network externalities, asymmetric information, moral hazard, adverse selection, liability dumping and the tragedy of the commons.} }

@book{anderson2001a, title = {Security Engineering: A Guide to Building Dependable Distributed Systems}, author = {Anderson, Ross}, edition = 1, month = jan, year = 2001, publisher = {Wiley}, url = {http://www.cl.cam.ac.uk/~rja14/book.html} }

@manual{apol2008, title = {Apol Help Files}, organization = {Tresys Technology}, year = 2008, note = {Available from \url{http://oss.tresys.com/projects/setools/wiki/helpFiles/iflow_help}} }

@inproceedings{armando1998, author = {A. Armando and S. Ranise}, title = {From Integrated Reasoning Specialists to ``Plug-and-Play'' Reasoning Components}, pages = {42--54}, crossref = {aisc1998} }

@article{armando2003, author = {Alessandro Armando and Silvio Ranise and Michael Rusinowitch}, title = {A Rewriting Approach to Satisfiability Procedures}, journal = {Information and Computation}, volume = 183, number = 2, month = jun, year = 2003, pages = {140--164}, url = {http://www.loria.fr/~rusi/pub/longcsl01.ps} }

@inproceedings{arthan2000, author = {R. Arthan and P. Caseley and C. O'Halloran and A. Smith}, title = {{ClawZ}: Control Laws in {Z}}, pages = {169--176}, crossref = {icfem2000} }

@article{arthan2005, author = {R. D. Arthan and R. B. Jones}, title = {{Z} in {HOL} in {ProofPower}}, journal = {BCS FACS FACTS}, year = {2005-1} }

@inproceedings{astrachan1992, author = {Owen L. Astrachan and Mark E. Stickel}, title = {Caching and Lemmaizing in Model Elimination Theorem Provers}, pages = {224--238}, crossref = {cade1992}, url = {http://www.cs.duke.edu/~ola/meteor.html} }

@article{astrachan1997, title = {The Use of Lemmas in the Model Elimination Procedure}, author = {O. L. Astrachan and Donald W. Loveland}, journal = {Journal of Automated Reasoning}, pages = {117--141}, month = aug, year = 1997, volume = 19, number = 1, url = {http://www.cs.duke.edu/~ola/meteor.html} }

@book{baader1998, author = {Franz Baader and Tobias Nipkow}, title = {Term Rewriting and All That}, publisher = {Cambridge University Press}, year = 1998 }

@book{baker1984, author = {Alan Baker}, title = {A Concise Introduction to the Theory of Numbers}, publisher = {Cambridge University Press}, year = 1984 }

@inproceedings{baker1992, author = {Siani Baker and Andrew Ireland and Alan Smaill}, title = {On the Use of the Constructive Omega-Rule Within Automated Deduction}, pages = {214--225}, crossref = {lpar1992}, url = {http://www.dai.ed.ac.uk/papers/documents/rp560.html}, abstract = {The cut elimination theorem for predicate calculus states that every proof may be replaced by one which does not involve use of the cut rule. This theorem no longer holds when the system is extended with Peano's axioms to give a formalisation for arithmetic. The problem of generalisation results, since arbitrary formulae can be cut in. This makes theorem-proving very difficult - one solution is to embed arithmetic in a stronger system, where cut elimination holds. This paper describes a system based on the omega rule, and shows that certain statements are provable in this system which are not provable in Peano arithmetic without cut. Moreover, an important application is presented in the form of a new method of generalisation by means of "guiding proofs" in the stronger system, which sometimes succeeds in producing proofs in the original system when other methods fail. The implementation of such a system is also described.} }

@inproceedings{baldamus2001, author = {Michael Baldamus and Klaus Schneider and Michael Wenz and Roberto Ziller}, title = {Can {American} {Checkers} be Solved by Means of Symbolic Model Checking?}, booktitle = {Formal Methods Elsewhere (a Satellite Workshop of FORTE-PSTV-2000 devoted to applications of formal methods to areas other than communication protocols and software engineering)}, editor = {Howard Bowman}, month = may, year = 2001, publisher = {Elsevier}, series = {Electronic Notes in Theoretical Computer Science}, volume = 43, url = {http://rsg.informatik.uni-kl.de/publications/data/BSWZ01.pdf}, abstract = {Symbolic model checking has become a successful technique for verifying large finite state systems up to more than $10^20$ states. The key idea of this method is that extremely large sets can often be efficiently represented with propositional formulas. Most tools implement these formulas by means of binary decision diagrams (BDDs), which have therefore become a key data structure in modern VLSI CAD systems. Some board games like American checkers have a state space whose size is well within the range of state space sizes that have been tackled by symbolic model checking. Moreover, the question whether there is a winning strategy in these games can be reduced to two simple $\mu$-calculus formulas. Hence, the entire problem to solve such games can be reduced to simple $\mu$-calculus model checking problems. In this paper, we show how to model American checkers as a finite state system by means of BDDs. We moreover specify the existence of winning strategies for both players by simple $\mu$-calculus formulas. Further still, we report on our experimental results with our own model checking tool, and we describe some powerful improvements that we have found in trying to solve the game.} }

@inproceedings{ball2006, author = {T. Ball and E. Bounimova and B. Cook and V. Levin and J. Lichtenberg and C. McGarvey and B. Ondrusek and S. K. Rajamani and A. Ustuner}, title = {Thorough Static Analysis of Device Drivers}, pages = {73--85}, crossref = {eurosys2006} }

@inproceedings{ballarin2004, author = {Clemens Ballarin}, title = {Locales and locale expressions in {I}sabelle/{I}sar}, pages = {34--50}, crossref = {types2003} }

@article{bancerek1989, author = {Grzegorz Bancerek}, title = {The Fundamental Properties of Natural Numbers}, journal = {Journal of Formalized Mathematics}, volume = {1}, year = 1989 }

@book{bardell1987, author = {Paul H. Bardell and W. H. McAnney and J. Savir}, title = {Built In Test for {VLSI}: Pseudorandom Techniques}, publisher = {John Wiley}, month = oct, year = 1987, url = {http://www.wiley.com/Corporate/Website/Objects/Products/0,9049,65654,00.html}, abstract = {This handbook provides ready access to all of the major concepts, techniques, problems, and solutions in the emerging field of pseudorandom pattern testing. Until now, the literature in this area has been widely scattered, and published work, written by professionals in several disciplines, has treated notation and mathematics in ways that vary from source to source. This book opens with a clear description of the shortcomings of conventional testing as applied to complex digital circuits, revewing by comparison the principles of design for testability of more advanced digital technology. Offers in-depth discussions of test sequence generation and response data compression, including pseudorandom sequence generators; the mathematics of shift-register sequences and their potential for built-in testing. Also details random and memory testing and the problems of assessing the efficiency of such tests, and the limitations and practical concerns of built-in testing.} }

@techreport{barras1997, title = {The {Coq} Proof Assistant Reference Manual: Version 6.1}, author = {Bruno Barras and Samuel Boutin and Cristina Cornes and Judicael Courant and Jean-Christophe Filliatre and Eduardo Gimenez and Hugo Herbelin and G{\'e}rard Huet and C{\'{e}}sar A. Mu{\~{n}}oz and Chetan Murthy and Catherine Parent and Christine Paulin-Mohring and Amokrane Saibi and Benjamin Werner}, type = {Technical Report}, institution = {INRIA (Institut National de Recherche en Informatique et en Automatique), France}, number = {RT-0203}, year = 1997 }

@inproceedings{barras2000, author = {Bruno Barras}, title = {Programming and Computing in {HOL}}, pages = {17--37}, crossref = {tphols2000} }

@article{bauer1998, author = {Andrej Bauer and Edmund M. Clarke and Xudong Zhao}, title = {Analytica - An Experiment in Combining Theorem Proving and Symbolic Computation.}, journal = {Journal of Automated Reasoning}, volume = 21, number = 3, year = 1998, pages = {295-325}, url = {http://reports-archive.adm.cs.cmu.edu/anon/1992/CMUCS92147.ps} }

@article{beckert1995, author = {Bernhard Beckert and Joachim Posegga}, title = {{leanTAP}: Lean Tableau-based Deduction}, journal = {Journal of Automated Reasoning}, volume = 15, number = 3, pages = {339--358}, month = dec, year = 1995, url = {http://i12www.ira.uka.de/~beckert/pub/LeanTAP.ps.Z} }

@book{beeson1985, author = {Michael J. Beeson}, title = {Foundations of Constructive Mathematics}, publisher = {Springer}, year = 1985 }

@inproceedings{beeson1998, author = {Michael Beeson}, title = {Unification in {Lambda-Calculi} with if-then-else}, pages = {103--118}, crossref = {cade1998}, url = {http://link.springer-ny.com/link/service/series/0558/bibs/1421/14210103.htm}, abstract = {A new unification algorithm is introduced, which (unlike previous algorithms for unification in $lambda$-calculus) shares the pleasant properties of first-order unification. Proofs of these properties are given, in particular uniqueness of the answer and the most-general-unifier property. This unification algorithm can be used to generalize first-order proof-search algorithms to second-order logic, making possible for example a straighforward treatment of McCarthy's circumscription schema.} }

@book{bell1930, author = {Eric T. Bell}, title = {Debunking Science}, publisher = {University of Washington Book Store}, year = 1930 }

@phdthesis{benzmuller1999, author = {C. Benzmuller}, title = {Equality and Extensionality in Higher-Order Theorem Proving}, school = {Saarland University}, month = apr, year = 1999, url = {http://www.ags.uni-sb.de/~chris/papers/diss.ps.gz} }

@inproceedings{berghofer2000, author = {Stefan Berghofer and Tobias Nipkow}, title = {Proof terms for simply typed higher order logic}, pages = {38--52}, crossref = {tphols2000}, url = {http://www4.informatik.tu-muenchen.de/~nipkow/pubs/tphols00.html}, abstract = {This paper presents proof terms for simply typed, intuitionistic higher order logic, a popular logical framework. Unification-based algorithms for the compression and reconstruction of proof terms are described and have been implemented in the theorem prover Isabelle. Experimental results confirm the effectiveness of the compression scheme.} }

@article{berlekamp1970, author = {E. R. Berlekamp}, title = {Factoring polynomials over large finite fields}, journal = {Math. Comput.}, volume = 24, year = 1970, page = {713--735}, abstract = {This paper presents algorithms for root-finding and factorization, two problems in finite fields. The latter problem is reduced to the root-finding problem, for which a probabilistic algorithm is given. This paper is a precursor of Rabin (1980).} }

@article{bernerslee2001, author = {Tim Berners-Lee and James Hendler and Ora Lassila}, title = {The Semantic Web}, journal = {Scientific American}, month = may, year = 2001 }

@article{bessey2010, author = {Bessey, Al and Block, Ken and Chelf, Ben and Chou, Andy and Fulton, Bryan and Hallem, Seth and Gros, Charles H. and Kamsky, Asya and McPeak, Scott and Engler, Dawson}, title = {A few billion lines of code later: using static analysis to find bugs in the real world}, journal = {Communications of the ACM}, volume = 53, number = 2, pages = {66--75}, month = feb, year = 2010, publisher = {ACM}, address = {New York, NY, USA}, url = {http://dx.doi.org/10.1145/1646353.1646374} }

@inproceedings{bezem2000, author = {Marc Bezem and Dimitri Hendriks and Hans de Nivelle}, title = {Automated Proof Construction in Type Theory Using Resolution}, pages = {148--163}, crossref = {cade2000}, url = {http://www.phil.uu.nl/~hendriks/} }

@article{bialas1990, author = {J\'ozef Bia{\l}as}, title = {The $\sigma$-additive Measure Theory}, journal = {Journal of Formalized Mathematics}, year = {1990}, url = {http://mizar.uwb.edu.pl/JFM/Vol2/measure1.html} }

@article{bialas1992, author = {J\'ozef Bia{\l}as}, title = {Properties of {Caratheodor}'s Measure}, journal = {Journal of Formalized Mathematics}, year = {1992}, url = {http://mizar.uwb.edu.pl/JFM/Vol4/measure4.html} }

@book{bierce1911, author = {Ambrose Bierce}, title = {The Devil's Dictionary}, publisher = {Dover}, edition = {Dover thrift}, year = 1993 }

@inbook{biere2003, author = {Armin Biere and Alessandro Cimatti and Edmund M. Clarke and Ofer Strichman and Yunshun Zhu}, title = {Bounded Model Checking}, pages = {117--148}, series = {Advances in Computers}, volume = 58, publisher = {Elsevier}, month = aug, year = 2003, url = {http://www-2.cs.cmu.edu/~ofers/advances.pdf} }

@book{billingsley1986, author = {P. Billingsley}, title = {Probability and Measure}, edition = {2nd}, publisher = {John Wiley \& Sons, Inc.}, address = {New York}, year = 1986 }

@inproceedings{bingham2014, author = {Jesse Bingham and Joe Leslie-Hurd}, title = {Verifying Relative Error Bounds Using Symbolic Simulation}, pages = {277--292}, crossref = {cav2014}, url = {http://www.gilith.com/research/papers/}, abstract = {In this paper we consider the problem of formally verifying hardware that is specified to compute reciprocal, reciprocal square root, and power-of-two functions on floating point numbers to within a given relative error. Such specifications differ from the common case in which any given input is specified to have exactly one correct output. Our approach is based on symbolic simulation with binary decision diagrams, and involves two distinct steps. First, we prove a lemma that reduces the relative error specification to several inequalities that involve reasoning about natural numbers only. The most complex of these inequalities asserts that the product of several naturals is less-than/greater-than another natural. Second, we invoke one of several customized algorithms that decides the inequality, without performing the expensive symbolic multiplications directly. We demonstrate the effectiveness of our approach on a next-generation Intel processor design and report encouraging time and space metrics for these proofs.} }

@unpublished{bishop2004, author = {Steven Bishop and Matthew Fairbairn and Michael Norrish and Peter Sewell and Keith Wansbrough}, title = {The {TCP} Specification: A Quick Introduction}, note = {Available from Peter Sewell's web site}, month = mar, year = 2004, url = {http://www.cl.cam.ac.uk/~pes20/Netsem/quickstart.ps} }

@book{blake1999, author = {Ian Blake and Gadiel Seroussi and Nigel Smart}, title = {Elliptic Curves in Cryptography}, publisher = {Cambridge University Press}, year = 1999, volume = 265, series = {London Mathematical Society Lecture Note Series} }

@article{blazy2009, author = {Sandrine Blazy and Xavier Leroy}, title = {Mechanized Semantics for the {C}light Subset of the {C} Language}, journal = {Special Issue of the Journal of Automated Reasoning}, month = oct, year = 2009, volume = 43, number = 3, pages = {263--288}, url = {http://www.springerlink.com/content/l2422j427262l3h6/}, abstract = {This article presents the formal semantics of a large subset of the C language called Clight. Clight includes pointer arithmetic, struct and union types, C loops and structured switch statements. Clight is the source language of the CompCert verified compiler. The formal semantics of Clight is a big-step operational semantics that observes both terminating and diverging executions and produces traces of input/output events. The formal semantics of Clight is mechanized using the Coq proof assistant. In addition to the semantics of Clight, this article describes its integration in the CompCert verified compiler and several ways by which the semantics was validated.} }

@unpublished{bleicher2006, author = {Bleicher, E.}, year = 2006, note = {Available at the web page {http://www.k4it.de}}, title = {Freezer and {EGT}-query service on {N}alimov {DTM} {EGT}s} }

@techreport{boldo2006, title = {A High-Level Formalization of Floating-Point Numbers in {PVS}}, author = {Boldo, Sylvie and Mu{\~{n}}oz, C{\'e}sar}, institution = {National Aeronautics and Space Administration}, number = {NASA/CR-2006-214298}, month = oct, year = 2006, address = {Langley Research Center, Hampton VA 23681-2199, USA}, url = {http://shemesh.larc.nasa.gov/fm/papers/Boldo-CR-2006-214298-Floating-Point.pdf} }

@book{boole1847, author = {George Boole}, title = {The Mathematical Analysis of Logic, Being an Essay Toward a Calculus of Deductive Reasoning}, address = {Cambridge}, publisher = {Macmillan}, year = 1847, pages = 82 }

@article{bortin2006, author = {Bortin, Maksym and Broch~Johnsen, Einar and L\"{u}th, Christoph}, title = {Structured Formal Development in {I}sabelle}, journal = {Nordic Journal of Computing}, year = 2006, volume = 13, pages = {1--20}, url = {http://www.informatik.uni-bremen.de/~cxl/awe/papers.html}, abstract = {General purpose theorem provers provide advanced facilities for proving properties about specifications, and may therefore be a valuable tool in formal program development. However, these provers generally lack many of the useful structuring mechanisms found in functional programming or specification languages. This paper presents a constructive approach to adding theory morphisms and parametrisation to theorem provers, while preserving the proof support and consistency of the prover. The approach is implemented in Isabelle and illustrated by examples of an algorithm design rule and of the modular development of computational effects for imperative language features based on monads.} }

@article{boulton1993, author = {Richard J. Boulton}, title = {Lazy Techniques for Fully Expansive Theorem Proving}, journal = {Formal Methods in System Design}, year = 1993, volume = 3, number = {1/2}, pages = {25--47}, month = aug }

@inproceedings{boulton1995, author = {Richard J. Boulton}, title = {Combining Decision Procedures in the {HOL} System}, pages = {75--89}, crossref = {hol1995} }

@article{boulton2009, author = {Richard Boulton and Joe Hurd and Konrad Slind}, title = {Computer Assisted Reasoning: A {F}estschrift for {M}ichael {J.} {C.} {G}ordon}, journal = {Special Issue of the Journal of Automated Reasoning}, month = oct, year = 2009, volume = 43, number = 3, pages = {237--242}, url = {http://www.springerlink.com/content/5650666043651557/} }

@inproceedings{bourdoncle1993, author = {Fran{\c{c}}ois Bourdoncle}, title = {Efficient Chaotic Iteration Strategies with Widenings}, pages = {128--141}, crossref = {fmpa1993}, url = {http://web.me.com/fbourdoncle/page4/page12/page12.html}, abstract = {Abstract interpretation is a formal method that enables the static and automatic determination of run-time properties of programs. This method uses a characterization of program invariants as least and greatest fixed points of continuous functions over complete lattices of program properties. In this paper, we study precise and efficient chaotic iteration strategies for computing such fixed points when lattices are of infinite height and speedup techniques, known as widening and narrowing, have to be used. These strategies are based on a weak topological ordering of the dependency graph of the system of semantic equations associated with the program and minimize the loss in precision due to the use of widening operators. We discuss complexity and implementation issues and give precise upper bounds on the complexity of the intraprocedural and interprocedural abstract interpretation of higher-order programs based on the structure of their control flow graph.} }

@article{bourzutschky2005, author = {Bourzutschky, M. S. and Tamplin, J. A. and Haworth, G. McC.}, year = 2005, title = {Chess endgames: 6-man data and strategy}, journal = {Theoretical Computer Science}, volume = 349, issue = 2, pages = {140--157}, annote = {ISSN 0304-3975} }

@article{boyer1984, author = {Robert S. Boyer and J Strother Moore}, title = {Proof checking the {RSA} public key encryption algorithm}, journal = {American Mathematical Monthly}, volume = 91, number = 3, pages = {181--189}, year = 1984, url = {http://www.cs.utexas.edu/users/boyer/rsa.ps.Z} }

@article{boyer1988, author = {Robert S. Boyer and J Strother Moore}, title = {Integrating Decision Procedures into Heuristic Theorem Provers: A Case Study of Linear Arithmetic}, journal = {Machine Intelligence}, volume = {11}, publisher = {Oxford University Press}, year = 1988, pages = {83-124} }

@inproceedings{boyer1994, author = {Robert S. Boyer and N. G. de Bruijn and G{\'e}rard Huet and Andrzej Trybulec}, title = {A mechanically proof-checked encyclopedia of mathematics: Should we build one? Can we?}, pages = {237--251}, crossref = {cade1994} }

@inproceedings{browning2012, author = {Browning, Sally A. and Hurd, Joe}, title = {Cryptol: The Language of Cryptanalysis (Invited Talk Abstract)}, pages = {57--59}, crossref = {sharcs2012}, url = {http://gilith.com/research/papers} }

@article{bryant1992, author = {Randal E. Bryant}, title = {Symbolic {Boolean} Manipulation with Ordered Binary-Decision Diagrams}, journal = {ACM Computing Surveys}, volume = 24, number = 3, pages = {293--318}, month = sep, year = 1992, url = {http://www.acm.org/pubs/toc/Abstracts/0360-0300/136043.html}, abstract = {Ordered Binary-Decision Diagrams (OBDDs) represent Boolean functions as directed acyclic graphs. They form a canonical representation, making testing of functional properties such as satisfiability and equivalence straightforward. A number of operations on Boolean functions can be implemented as graph algorithms on OBDD data structures. Using OBDDs, a wide variety of problems can be solved through {\em symbolic analysis}. First, the possible variations in system parameters and operating conditions are encoded with Boolean variables. Then the system is evaluated for all variations by a sequence of OBDD operations. Researchers have thus solved a number of problems in digital-system design, finite-state system analysis, artificial intelligence, and mathematical logic. This paper describes the OBDD data structure and surveys a number of applications that have been solved by OBDD-based symbolic analysis.} }

@unpublished{bundy1996, author = {Alan Bundy}, title = {Artificial Mathematicians}, note = {Available from the author's web site}, month = may, year = 1996, url = {http://www.dai.ed.ac.uk/homes/bundy/tmp/new-scientist.ps.gz} }

@inproceedings{debruijn1970, author = {N. de Bruijn}, title = {The Mathematical language {AUTOMATH}, its usage, and some of its extensions}, booktitle = {Symposium on Automatic Demonstration}, organization = {Lecture Notes in Mathematics, 125}, publisher = {Springer}, pages = {29--61}, year = 1970 }

@article{buffon1777, author = {G. Buffon}, title = {Essai d'arithm\'etique morale}, journal = {Suppl\'ement \`a l'Histoire Naturelle}, volume = 4, year = 1777 }

@book{bundy1983, author = {Alan Bundy}, title = {The Computer Modelling of Mathematical Reasoning}, publisher = {Academic Press}, year = 1983 }

@inproceedings{burke2010, author = {David Burke and Joe Hurd and John Launchbury and Aaron Tomb}, title = {Trust Relationship Modeling for Software Assurance}, url = {http://www.gilith.com/research/papers}, crossref = {fast2010} }

@article{burrows1990, author = {Michael Burrows and Mart{\'\i}n Abadi and Roger Needham}, title = {A Logic of Authentication}, journal = {ACM Transactions on Computer Systems}, volume = 8, year = 1990, pages = {18--36}, number = 1, month = feb, url = {http://www.acm.org/pubs/articles/journals/tocs/1990-8-1/p18-burrows/p18-burrows.pdf}, abstract = {Authentication protocols are the basis of security in many distributed systems, and it is therefore essential to ensure that these protocols function correctly. Unfortunately, their design has been extremely error prone. Most of the protocols found in the literature contain redundancies or security flaws. A simple logic has allowed us to describe the beliefs of trustworthy parties involved in authentication protocols and the evolution of these beliefs as a consequence of communication. We have been able to explain a variety of authentication protocols formally, to discover subtleties and errors in them, and to suggest improvements. In this paper we present the logic and then give the results of our analysis of four published protocols, chosen either because of their practical importance or because they serve to illustrate our method.} }

@article{burstall1977, author = {Rod Burstall and John Darlington}, title = {A transformation system for developing recursive programs}, journal = {Journal of the Association for Computing Machinery}, volume = {1}, month = jan, year = 1977 }

@inproceedings{busch1994, author = {H. Busch}, title = {First-Order Automation for Higher-Order-Logic Theorem Proving}, crossref = {hol1994} }

@inproceedings{butler2010, author = {Butler, Ricky W. and Hagen, George and Maddalon, Jeffrey M. and Mu{\~{n}}oz, C{\'e}sar and Narkawicz, Anthony and Dowek, Gilles}, title = {How Formal Methods Implels Discovery: A Short History of an Air Traffic Management Project}, crossref = {nfm2010}, url = {http://shemesh.larc.nasa.gov/NFM2010/papers/nfm2010_34_46.pdf} }

@article{caprotti2001, author = {O. Caprotti and M. Oostdijk}, title = {Formal and Efficient Primality Proofs by use of Computer Algebra Oracles}, journal = {Journal of Symbolic Computation}, volume = 32, number = {1--2}, pages = {55--70}, year = 2001, editor = {T. Recio and M. Kerber}, note = {Special Issue on Computer Algebra and Mechanized Reasoning} }

@inproceedings{carreno1995, author = {Carre{\~{n}}o, V{\'{i}}ctor A. and Miner, Paul S.}, title = {Specification of the {IEEE}-854 Floating-Point Standard in {HOL} and {PVS}}, crossref = {hol1995a}, url = {http://shemesh.larc.nasa.gov/people/vac/publications/hug95.pdf} }

@article{celiku2004, author = {Orieta Celiku and Annabelle McIver}, title = {Cost-Based Analysis of Probabilistic Programs Mechanised in {HOL}}, journal = {Nordic Journal of Computing}, volume = 11, number = 2, pages = {102--128}, year = 2004, url = {http://www.cse.unsw.edu.au/~carrollm/probs/Papers/Celiku-04.pdf} }

@inproceedings{celiku2005, author = {Orieta Celiku and Annabelle McIver}, title = {Compositional Specification and Analysis of Cost-Based Properties in Probabilistic Programs}, pages = {107-122}, url = {http://rd.springer.com/chapter/10.1007\%2F11526841_9}, crossref = {fme2005} }

@phdthesis{celiku2006, author = {Orieta Celiku}, title = {Mechanized Reasoning for Dually-Nondeterministic and Probabilistic Programs}, year = 2006, school = {{\AA}bo Akademi University}, url = {http://tucs.fi/publications/view/?id=phdCeliku06a} }

@book{chang1973, author = {Chin-Liang Chang and Richard Char-Tung Lee}, title = {Symbolic Logic and Mechanical Theorem Proving}, publisher = {Academic Press}, address = {New York}, year = 1973 }

@article{church1940, author = {Alonzo Church}, title = {A Formulation of the Simple Theory of Types}, journal = {Journal of Symbolic Logic}, year = 1940, volume = 5, pages = {56--68} }

@article{claessen2000, author = {Koen Claessen and John Hughes}, title = {{QuickCheck}: a lightweight tool for random testing of {Haskell} programs}, journal = {ACM SIG{\-}PLAN Notices}, volume = {35}, number = {9}, pages = {268--279}, month = sep, year = {2000}, url = {http://www.md.chalmers.se/~rjmh/QuickCheck/} }

@book{clarke1999, author = {E. M. Clarke and O. Grumberg and D. A. Peled}, title = {{M}odel {C}hecking}, publisher = {The MIT Press}, year = {1999} }

@article{coe1996, author = {T. Coe}, title = {Inside the {P}entium {FDIV} Bug}, journal = {Dr.~Dobbs Journal}, year = 1996, month = apr }

@article{cohn1989, author = {Avra Cohn}, title = {The Notion of Proof in Hardware Verification}, journal = {Journal of Automated Reasoning}, volume = 5, number = 2, year = {1989}, pages = {127--139} }

@inproceedings{colwell2000, author = {Bob Colwell and Bob Brennan}, title = {Intel's Formal Verification Experience on the {Willamette} Development}, pages = {106--107}, crossref = {tphols2000} }

@phdthesis{compagnoni1995, author = {Adriana B. Compagnoni}, title = {Higher-Order Subtyping with Intersection Types}, year = 1995, month = jan, school = {Catholic University, Nigmegen} }

@inproceedings{cook1971, author = {Stephen A. Cook}, title = {The complexity of theorem-proving procedures}, pages = {151--158}, editor = {{ACM}}, booktitle = {Third annual {ACM} Symposium on Theory of Computing}, address = {Shaker Heights, Ohio, USA}, month = may, year = 1971, publisher = {ACM Press} }

@unpublished{cook2000, author = {Stephen A. Cook}, title = {The {P} Versus {NP} Problem}, month = apr, year = 2000, note = {Manuscript prepared for the Clay Mathematics Institute for the Millennium Prize Problems}, url = {http://www.claymath.org/prizeproblems/pvsnp.htm} }

@inproceedings{cook2006, author = {Byron Cook and Andreas Podelski and Andrey Rybalchenko}, title = {Terminator: Beyond Safety}, pages = {415--418}, crossref = {cav2006}, url = {http://citeseer.ist.psu.edu/cook06terminator.html}, abstract = {Previous symbolic software model checkers (i.e., program analysis tools based on predicate abstraction, pushdown model checking and iterative counterexample-guided abstraction refinement, etc.) are restricted to safety properties. Terminator is the first software model checker for termination. It is now being used to prove that device driver dispatch routines always return to their caller (or return counterexamples if they if they fail to terminate).} }

@book{cormen1990, author = {Thomas H. Cormen and Charles Eric Leiserson and Ronald L. Rivest}, title = {Introduction to Algorithms}, publisher = {MIT Press/McGraw-Hill}, address = {Cambridge, Massachusetts}, year = 1990 }

@inproceedings{coutts2008, author = {Coutts, Duncan and Potoczny-Jones, Isaac and Stewart, Don}, title = {Haskell: Batteries Included}, pages = {125--126}, crossref = {haskell2008}, url = {http://www.cse.unsw.edu.au/~dons/papers/CPJS08.html}, abstract = {The quality of a programming language itself is only one component in the ability of application writers to get the job done. Programming languages can succeed or fail based on the breadth and quality of their library collection. Over the last few years, the Haskell community has risen to the task of building the library infrastructure necessary for Haskell to succeed as a programming language suitable for writing real-world applications. This on-going work, the Cabal and Hackage effort, is built on the open source model of distributed development, and have resulted in a flowering of development in the language with more code produced and reused now than at any point in the community's history. It is easier to obtain and use Haskell code, in a wider range of environments, than ever before. This demonstration describes the infrastructure and process of Haskell development inside the Cabal/Hackage framework, including the build system, library dependency resolution, centralised publication, documentation and distribution, and how the code escapes outward into the wider software community. We survey the benefits and trade-offs in a distributed, collaborative development ecosystem and look at a proposed Haskell Platform that envisages a complete Haskell development environment, batteries included.} }

@techreport{craigen1991, author = {D. Craigen and S. Kromodimoeljo and I. Meisels and B. Pase and M. Saaltink}, title = {{EVES}: An Overview}, institution = {ORA Corporation}, number = {CP-91-5402-43}, year = 1991 }

@techreport{curzon1994, author = {Paul Curzon}, institution = {University of Cambridge Computer Laboratory}, month = mar, number = {328}, title = {The formal verification of the {Fairisle} {ATM} switching element: an overview}, year = 1994 }

@inproceedings{cyrluk1996, author = {David Cyrluk and Patrick Lincoln and Natarajan Shankar}, title = {On {Shostak}'s Decision Procedure for Combinations of Theories}, crossref = {cade1996} }

@inproceedings{dahn1997, author = {Ingo Dahn and Christoph Wernhard}, title = {First Order Proof Problems Extracted from an Article in the {MIZAR} Mathematical Library}, crossref = {ftp1997}, url = {http://www-irm.mathematik.hu-berlin.de/~ilf/papers/index.html} }

@inproceedings{dahn1998, title = {Interpretation of a {Mizar}-like logic in first-order logic}, author = {Ingo Dahn}, pages = {116--126}, crossref = {ftp1998} }

@article{daumas2009, author = {Daumas, Marc and Lester, David and Mu{\~{n}}oz, C{\'{e}}sar}, title = {Verified Real Number Calculations: A Library for Interval Arithmetic}, journal = {IEEE Transactions on Computers}, volume = 58, number = 2, pages = {226--237}, month = feb, year = 2009 }

@book{davenport1992, author = {Harold Davenport}, title = {The Higher Arithmetic}, publisher = {Cambridge University Press}, year = 1992, edition = {Sixth} }

@book{degroot1989, author = {Morris DeGroot}, title = {Probability and Statistics}, publisher = {Addison-Wesley}, edition = {2nd}, year = 1989 }

@incollection{deleeuw1955, author = {K. de Leeuw and E. F. Moore and C. E. Shannon and N. Shapiro}, title = {Computability by probabilistic machines}, pages = {183--212}, booktitle = {Automata Studies}, editor = {C. E. Shannon and J. McCarthy}, year = 1955, publisher = {Princeton University Press}, address = {Princeton, NJ} }

@article{demillo1979, author = {De Millo, Richard A. and Lipton, Richard J. and Perlis, Alan J.}, title = {Social processes and proofs of theorems and programs}, journal = {Communications of the ACM}, volume = 22, number = 5, year = 1979, pages = {271--280}, doi = {http://doi.acm.org/10.1145/359104.359106}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {It is argued that formal verifications of programs, no matter how obtained, will not play the same key role in the development of computer science and software engineering as proofs do in mathematics. Furthermore the absence of continuity, the inevitability of change, and the complexity of specification of significantly many real programs make the formal verification process difficult to justify and manage. It is felt that ease of formal verification should not dominate program language design.} }

@inproceedings{divito2004, author = {Di Vito, Ben L.}, title = {Hypatheon: A Mathematical Database for {PVS} Users (Extended Abstract)}, crossref = {namkm2004}, url = {http://imps.mcmaster.ca/na-mkm-2004/proceedings/pdfs/divito.pdf} }

@inproceedings{dennis2000, author = {Louise A. Dennis and Graham Collins and Michael Norrish and Richard Boulton and Konrad Slind and Graham Robinson and Mike Gordon and Tom Melham}, title = {The {PROSPER} Toolkit}, pages = {78--92}, crossref = {tacas2000}, url = {http://www.comlab.ox.ac.uk/tom.melham/pub/Dennis-2000-PT.pdf} }

@techreport{denzinger1997, author = {J{\"o}rg Denzinger and Dirk Fuchs}, title = {Knowledge-based Cooperation between Theorem Provers by {TECHS}}, year = 1997, type = {SEKI-Report}, number = {SR-97-11}, institution = {University of Kaiserslautern}, url = {http://agent.informatik.uni-kl.de/seki/1997/Fuchs.SR-97-11.ps.gz} }

@article{dijkstra1972, title = {The humble programmer}, author = {Dijkstra, Edsger W.}, journal = {Communications of the ACM}, volume = {15}, number = 10, pages = {859--866}, year = 1972, month = oct, publisher = {ACM}, address = {New York, NY, USA}, url = {http://dx.doi.org/10.1145/355604.361591} }

@book{dijkstra1976, author = {E. W. Dijkstra}, title = {A Discipline of Programming}, publisher = {Prentice Hall}, year = 1976 }

@inproceedings{dolstra2008, author = {Eelco Dolstra and Andres L{\"o}h}, title = {{NixOS}: A Purely Functional {Linux} Distribution}, pages = {367--378}, crossref = {icfp2008}, url = {http://doi.acm.org/10.1145/1411204.1411255} }

@inproceedings{duan2005, author = {Jianjun Duan and Joe Hurd and Guodong Li and Scott Owens and Konrad Slind and Junxing Zhang}, title = {Functional Correctness Proofs of Encryption Algorithms}, url = {http://www.gilith.com/research/papers}, crossref = {lpar2005}, pages = {519--533} }

@inproceedings{edelkamp2002, author = {Stefan Edelkamp}, title = {Symbolic Exploration in Two-Player Games: Preliminary Results}, pages = {40--48}, booktitle = {The International Conference on AI Planning \& Scheduling (AIPS), Workshop on Model Checking}, address = {Toulouse, France}, year = 2002, url = {http://ls5-www.cs.uni-dortmund.de/~edelkamp/publications/TwoBdd.pdf}, abstract = {In this paper symbolic exploration with binary decision diagrams (BDDs) is applied to two-player games to improve main memory consumption for reachability analysis and game-theoretical classification, since BDDs provide a compact representation for large set of game positions. A number of examples are evaluated: Tic-Tac-Toe, Nim, Hex, and Four Connect. In Chess we restrict the considerations to the creation of endgame databases. The results are preliminary, but the study puts forth the idea that BDDs are widely applicable in game playing and provides a universal tool for people interested in quickly solving practical problems.} }

@misc{euclid300bc, author = {Euclid}, title = {Elements}, year = {300B.C.}, url = {http://www-groups.dcs.st-andrews.ac.uk/~history/Mathematicians/Euclid.html} }

@book{euclid1956, author = {Euclid}, title = {Elements}, publisher = {Dover}, year = 1956, note = {Translated by Sir Thomas L. Heath} }

@article{farmer1993, author = {William M. Farmer and Joshua D. Guttman and F. Javier Thayer}, title = {{IMPS}: An Interactive Mathematical Proof System}, journal = {Journal of Automated Reasoning}, year = 1993, volume = 11, pages = {213--248}, url = {http://imps.mcmaster.ca/wmfarmer/publications.html}, abstract = {IMPS is an Interactive Mathematical Proof System intended as a general purpose tool for formulating and applying mathematics in a familiar fashion. The logic of IMPS is based on a version of simple type theory with partial functions and subtypes. Mathematical specification and inference are performed relative to axiomatic theories, which can be related to one another via inclusion and theory interpretation. IMPS provides relatively large primitive inference steps to facilitate human control of the deductive process and human comprehension of the resulting proofs. An initial theory library containing almost a thousand repeatable proofs covers significant portions of logic, algebra and analysis, and provides some support for modeling applications in computer science.} }

@inproceedings{farmer1994, author = {William M. Farmer}, title = {Theory Interpretation in Simple Type Theory}, pages = {96--123}, crossref = {hoa1993}, url = {http://imps.mcmaster.ca/doc/interpretations.pdf} }

@article{farmer2008, author = {William M. Farmer}, title = {The seven virtues of simple type theory}, journal = {Journal of Applied Logic}, year = 2008, volume = 6, pages = {267--286}, url = {http://imps.mcmaster.ca/wmfarmer/publications.html}, abstract = {Simple type theory, also known as higher-order logic, is a natural extension of first-order logic which is simple, elegant, highly expressive, and practical. This paper surveys the virtues of simple type theory and attempts to show that simple type theory is an attractive alternative to first-order logic for practical-minded scientists, engineers, and mathematicians. It recommends that simple type theory be incorporated into introductory logic courses offered by mathematics departments and into the undergraduate curricula for computer science and software engineering students.} }

@article{feldman1984, author = {V. A. Feldman and D. Harel}, title = {A probabilistic dynamic logic}, journal = {Journal of Computer and System Sciences}, volume = {28}, number = {2}, pages = {193--215}, year = 1984 }

@article{fetzer1988, author = {Fetzer, James H.}, title = {Program verification: the very idea}, journal = {Communications of the ACM}, volume = 31, number = 9, year = 1988, pages = {1048--1063}, doi = {http://doi.acm.org/10.1145/48529.48530}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {The notion of program verification appears to trade upon an equivocation. Algorithms, as logical structures, are appropriate subjects for deductive verification. Programs, as causal models of those structures, are not. The success of program verification as a generally applicable and completely reliable method for guaranteeing program performance is not even a theoretical possibility.} }

@inbook{fide2004, author = {FIDE}, title = {The {FIDE} Handbook}, chapter = {E.I. The Laws of Chess}, publisher = {FIDE}, year = 2004, note = {Available for download from the FIDE website.}, url = {http://www.fide.com/official/handbook.asp} }

@article{fidge2003, author = {C. Fidge and C. Shankland}, title = {But what if {I} don't want to wait forever?}, journal = {Formal Aspects of Computing}, year = {to appear in 2003} }

@article{fierz2002, author = {Fierz, M. and Cash, M. and Gilbert, E.}, year = 2002, title = {The 2002 World Computer-Checkers Championship}, journal = {ICGA Journal}, volume = 25, number = 3, pages = {196--198} }

@book{fishman1996, author = {George S. Fishman}, title = {Monte Carlo: Concepts, Algorithms and Applications}, publisher = {Springer}, year = 1996 }

@book{fleuriot2001, author = {J. D. Fleuriot}, title = {A Combination of Geometry Theorem Proving and Nonstandard Analysis, with Application to Newton's Principia}, publisher = {Springer}, series = {Distinguished Dissertations}, year = {2001} }

@inproceedings{fox2003, author = {Anthony Fox}, pages = {25--40}, title = {Formal Specification and Verification of {ARM6}}, crossref = {tphols2003} }

@inproceedings{fox2005, author = {Anthony Fox}, pages = {157--174}, title = {An Algebraic Framework for Verifying the Correctness of Hardware with Input and Output: A Formalization in {HOL}}, crossref = {calco2005} }

@unpublished{fuchs2009, author = {Adam Fuchs and Avik Chaudhuri and Jeffrey Foster}, title = {{SCanDroid}: Automated Security Certification of {A}ndroid Applications}, note = {Available from the second author's website}, year = 2009, url = {http://www.cs.umd.edu/~avik/projects/scandroidascaa/}, abstract = {Android is a popular mobile-device platform developed by Google. Android's application model is designed to encourage applications to share their code and data with other applications. While such sharing can be tightly controlled with permissions, in general users cannot determine what applications will do with their data, and thereby cannot decide what permissions such applications should run with. In this paper we present SCanDroid, a tool for reasoning automatically about the security of Android applications. SCanDroid's analysis is modular to allow incremental checking of applications as they are installed on an Android device. It extracts security specifications from manifests that accompany such applications, and checks whether data flows through those applications are consistent with those specifications. To our knowledge, SCanDroid is the first program analysis tool for Android, and we expect it to be useful for automated security certification of Android applications.} }

@inproceedings{gabbay1992, author = {Dov M. Gabbay and Hans J{\"u}rgen Ohlbach}, title = {Quantifier elimination in second--order predicate logic}, booktitle = {Principles of Knowledge Representation and Reasoning (KR92)}, year = 1992, editor = {Bernhard Nebel and Charles Rich and William Swartout}, pages = {425--435}, publisher = {Morgan Kaufmann}, url = {http://www.pms.informatik.uni-muenchen.de/mitarbeiter/ohlbach/homepage/publications/PL/abstracts.shtml#scan}, abstract = {An algorithm is presented which eliminates second--order quantifiers over predicate variables in formulae of type exists P1,...,PnF where F is an arbitrary formula of first--order predicate logic. The resulting formula is equivalent to the original formula -- if the algorithm terminates. The algorithm can for example be applied to do interpolation, to eliminate the second--order quantifiers in circumscription, to compute the correlations between structures and power structures, to compute semantic properties corresponding to Hilbert axioms in non classical logics and to compute model theoretic semantics for new logics.} }

@misc{gathercole1997, author = {Chris Gathercole and Peter Ross}, title = {Small Populations over Many Generations can beat Large Populations over Few Generations in Genetic Programming}, howpublished = {http://www.dai.ed.ac.uk/students/chrisg/}, year = 1997 }

@article{gill1977, author = {J. T. Gill}, title = {Computational complexity of probabilistic {Turing} machines}, journal = {SIAM Journal on Computing}, volume = 6, number = 4, pages = {675--695}, month = dec, year = 1977 }

@article{gilmore1960, author = {P. C. Gilmore}, title = {A proof method for quantification theory: Its justification and realization}, journal = {IBM Journal of Research and Development}, volume = 4, pages = {28--35}, year = 1960 }

@book{girard1989, author = {Jean-Yves Girard}, series = {Cambridge tracts in theoretical computer science}, subject = {Electronic digital computers--Programming Proof theory Type theory}, volume = {7}, camlib = {[Univ. Lib.] 348:8.b.95.262 South Front 4}, publisher = {Cambridge University Press}, pages = {xi,176p}, year = 1989, city = {Cambridge}, size = {26cm}, title = {Proofs and types} }

@article{godel1931, author = {K. G{\"o}del}, journal = {Monatshefte f{\"u}r Mathematik und Physik}, pages = {173--198}, title = {{\"U}ber formal unentscheidbare {S}{\"a}tze der {P}rincipia {M}athematica und verwandter {S}ysteme}, volume = 38, year = 1931 }

@book{godel1962, author = {K. G{\"o}del}, title = {On Formally Undecidable Propositions of Principia Mathematica and Related Systems}, year = 1962, publisher = {Oliver and Boyd}, note = {Translated by B. Meltzer}, address = {London} }

@book{goguen1996, author = {J. Goguen and G. Malcolm}, title = {Algebraic Semantics of Imperative Programs}, edition = {1st}, publisher = {MIT Press}, address = {Cambridge, Mass.}, year = 1996, isbn = {0-262-07172-X} }

@inproceedings{goldberg2002, author = {E. Goldberg and Y. Novikov}, title = {{BerkMin}: {A} Fast and Robust {SAT}-Solver}, booktitle = {Design, Automation, and Test in Europe (DATE '02)}, month = mar, year = 2002, pages = {142--149}, url = {http://www.ece.cmu.edu/~mvelev/goldberg_novikov_date02.pdf} }

@book{goldie1991, author = {Charles M. Goldie and Richard G. E. Pinch}, title = {Communication theory}, series = {LMS Student Texts}, volume = 20, publisher = {Cambridge University Press}, year = 1991 }

@unpublished{gonthier2004, author = {Georges Gonthier}, title = {A computer-checked proof of the Four Colour Theorem}, year = 2004, note = {Available for download at the author's website}, url = {http://research.microsoft.com/~gonthier/4colproof.pdf}, abstract = {This report gives an account of a successful formalization of the proof of the Four Colour Theorem, which was fully checked by the Coq v7.3.1 proof assistant. This proof is largely based on the mixed mathematics/computer proof of Robertson et al, but contains original contributions as well. This document is organized as follows: section 1 gives a historical introduction to the problem and positions our work in this setting; section 2 defines more precisely what was proved; section 3 explains the broad outline of the proof; section 4 explains how we exploited the features of the Coq assistant to conduct the proof, and gives a brief description of the tactic shell that we used to write our proof scripts; section 5 is a detailed account of the formal proof (for even more details the actual scripts can be consulted); section 6 is a chronological account of how the formal proof was developed; finally, we draw some general conclusions in section 7.} }

@inproceedings{gordon1978, author = {Gordon, M. and Milner, R. and Morris, L. and Newey, M. and Wadsworth, C.}, title = {A {M}etalanguage for interactive proof in {LCF}}, crossref = {popl1978}, pages = {119--130} }

@book{gordon1979, author = {M. Gordon and R. Milner and C. Wadsworth}, title = {Edinburgh {LCF}}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 78, year = 1979 }

@techreport{gordon1985, author = {Mike Gordon}, title = {Why {H}igher-{O}rder {L}ogic is a Good Formalism For Specifying and Verifying Hardware}, year = 1985, institution = {Computer Laboratory, The University of Cambridge}, number = 77 }

@book{gordon1988, author = {M. J. C. Gordon}, title = {Programming Language Theory and its Implementation}, publisher = {Prentice Hall}, year = 1988 }

@incollection{gordon1988a, author = {Michael J. C. Gordon}, title = {{HOL}: A proof generating system for higher-order logic}, booktitle = {{VLSI} Specification, Verification and Synthesis}, editor = {Graham Birtwistle and P. A. Subrahmanyam}, year = 1988, pages = {73--128}, publisher = {Kluwer Academic Publishers}, address = {Boston} }

@incollection{gordon1989, author = {M. J. C. Gordon}, title = {Mechanizing Programming Logics in Higher Order Logic}, booktitle = {Current Trends in Hardware Verification and Automated Theorem Proving}, editor = {G. Birtwistle and P. A. Subrahmanyam}, publisher = {Springer-Verlag}, year = 1989, pages = {387--439}, url = {ftp://ftp.cl.cam.ac.uk/hvg/papers/Banffpaper.dvi.gz} }

@book{gordon1993, editor = {M. J. C. Gordon and T. F. Melham}, title = {Introduction to HOL (A theorem-proving environment for higher order logic)}, publisher = {Cambridge University Press}, year = 1993 }

@techreport{gordon1994, author = {Mike Gordon}, title = {Merging {HOL} with Set Theory: preliminary experiments}, institution = {University of Cambridge Computer Laboratory}, number = 353, month = nov, year = 1994, url = {http://www.cl.cam.ac.uk/users/mjcg/papers/holst}, abstract = {Set theory is the standard foundation for mathematics, but the majority of general purpose mechanized proof assistants support versions of type theory (higher order logic). Examples include Alf, Automath, Coq, Ehdm, HOL, IMPS, Lambda, LEGO, Nuprl, PVS and Veritas. For many applications type theory works well and provides, for specification, the benefits of type-checking that are well-known in programming. However, there are areas where types get in the way or seem unmotivated. Furthermore, most people with a scientific or engineering background already know set theory, whereas type theory may appear inaccessable and so be an obstacle to the uptake of proof assistants based on it. This paper describes some experiments (using HOL) in combining set theory and type theory; the aim is to get the best of both worlds in a single system. Three approaches have been tried, all based on an axiomatically specified type V of ZF-like sets: (i) HOL is used without any additions besides V; (ii) an embedding of the HOL logic into V is provided; (iii) HOL axiomatic theories are automatically translated into set-theoretic definitional theories. These approaches are illustrated with two examples: the construction of lists and a simple lemma in group theory.} }

@unpublished{gordon1996, author = {M. J. C. Gordon}, title = {Notes on {PVS} from a {HOL} perspective}, year = 1996, note = {Available from the author's web page}, url = {http://www.cl.cam.ac.uk/~mjcg/PVS.html}, abstract = {During the first week of July 1995 I visited SRI Menlo Park to find out more about PVS (Prototype Verification System). The preceding week I attended LICS95, where I had several talks with Natarajan Shankar accompanied by demos of the system on his SparcBook laptop. This note consists of a somewhat rambling and selective report on some of the things I learned, together with a discussion of their implications for the evolution of the HOL system.} }

@inproceedings{gordon2000, author = {Michael J. C. Gordon}, title = {Reachability Programming in {HOL98} Using {BDD}s}, pages = {179--196}, crossref = {tphols2000}, url = {http://www.cl.cam.ac.uk/~mjcg/BDD/}, abstract = {Two methods of programming BDD-based symbolic algorithms in the Hol98 proof assistant are presented. The goal is to provide a platform for implementing intimate combinations of deduction and algorithmic verification, like model checking. The first programming method uses a small kernel of ML functions to convert between BDDs, terms and theorems. It is easy to use and is suitable for rapid prototying experiments. The second method requires lower-level programming but can support more efficient calculations. It is based on an LCF-like use of an abstract type to encapsulate rules for manipulating judgements r t -> b meaning ``logical term t is represented by BDD b with respect to variable order r''. The two methods are illustrated by showing how to perform the standard fixed-point calculation of the BDD of the set of reachable states of a finite state machine.} }

@inbook{gordon2000a, author = {Michael J. C. Gordon}, title = {Proof, Language, and Interaction: Essays in Honour of Robin Milner}, chapter = {6. From {LCF} to {HOL}: A Short History}, publisher = {MIT Press}, month = may, year = 2000 }

@article{gordon2002, author = {Michael J. C. Gordon}, title = {Programming combinations of deduction and {BDD}-based symbolic calculation}, journal = {LMS Journal of Computation and Mathematics}, volume = 5, pages = {56--76}, month = aug, year = 2002, url = {http://www.lms.ac.uk/jcm/5/lms2000-001}, abstract = {A generalisation of Milner's `LCF approach' is described. This allows algorithms based on binary decision diagrams (BDDs) to be programmed as derived proof rules in a calculus of representation judgements. The derivation of representation judgements becomes an LCF-style proof by defining an abstract type for judgements analogous to the LCF type of theorems. The primitive inference rules for representation judgements correspond to the operations provided by an efficient BDD package coded in C (BuDDy). Proof can combine traditional inference with steps inferring representation judgements. The resulting system provides a platform to support a tight and principled integration of theorem proving and model checking. The methods are illustrated by using them to solve all instances of a generalised Missionaries and Cannibals problem.} }

@inproceedings{gordon2002a, author = {Michael J. C. Gordon}, title = {Puzzle{T}ool : An Example of Programming Computation and Deduction}, pages = {214-229}, crossref = {tphols2002} }

@inproceedings{gordon2003, author = {Mike Gordon and Joe Hurd and Konrad Slind}, title = {Executing the formal semantics of the {Accellera} {Property} {Specification} {Language} by mechanised theorem proving}, pages = {200--215}, crossref = {charme2003}, url = {http://www.gilith.com/research/papers}, abstract = {The Accellera Property Specification Language (PSL) is designed for the formal specification of hardware. The Reference Manual contains a formal semantics, which we previously encoded in a machine readable version of higher order logic. In this paper we describe how to `execute' the formal semantics using proof scripts coded in the HOL theorem prover's metalanguage ML. The goal is to see if it is feasible to implement useful tools that work directly from the official semantics by mechanised proof. Such tools will have a high assurance of conforming to the standard. We have implemented two experimental tools: an interpreter that evaluates whether a finite trace $w$, which may be generated by a simulator, satisfies a PSL formula $f$ (i.e.~$w \models f$), and a compiler that converts PSL formulas to checkers in an intermediate format suitable for translation to HDL for inclusion in simulation test-benches. Although our tools use logical deduction and are thus slower than hand-crafted implementations, they may be speedy enough for some applications. They can also provide a reference for more efficient implementations.} }

@inproceedings{gordon2005, author = {Mike Gordon and Juliano Iyoda and Scott Owens and Konrad Slind}, title = {A Proof-Producing Hardware Compiler for a Subset of Higher Order Logic}, pages = {59--75}, crossref = {tphols2005a}, url = {http://www.cl.cam.ac.uk/~mjcg/proposals/dev/} }

@misc{gordon2009, author = {M. J. C. Gordon}, title = {Specification and Verification {I}}, year = 2009, note = {Course notes available from \url{http://www.cl.cam.ac.uk/~mjcg/Teaching/SpecVer1/SpecVer1.html}} }

@misc{gordon2009a, author = {M. J. C. Gordon}, title = {Specification and Verification {II}}, year = 2009, note = {Course notes available from \url{http://www.cl.cam.ac.uk/~mjcg/Teaching/SpecVer2/SpecVer2.html}} }

@book{gorenstein1994, author = {Daniel Gorenstein and Richard Lyons and Ronald Solomon}, title = {The Classification of the Finite Simple Groups}, year = 1994, publisher = {AMS}, url = {http://www.ams.org/online_bks/surv40-1/} }

@inproceedings{graunke2008, author = {Paul Graunke}, title = {Verified Safety and Information Flow of a Block Device}, pages = {187--202}, url = {http://dx.doi.org/10.1016/j.entcs.2008.06.049}, crossref = {ssv2008}, abstract = {This work reports on the author's experience designing, implementing, and formally verifying a low-level piece of system software. The timing model and the adaptation of an existing information flow policy to a monadic framework are reasonably novel. Interactive compilation through equational rewriting worked well in practice. Finally, the project uncovered some potential areas for improving interactive theorem provers.} }

@book{grubb2003, author = {Penny Grubb and Armstrong A. Takang}, title = {Software Maintenance: Concepts and Practice}, publisher = {World Scientific Publishing Company}, edition = {2nd}, year = 2003, month = jul }

@techreport{gunter1989, author = {Elsa Gunter}, title = {Doing Algebra in Simple Type Theory}, year = 1989, institution = {Department of Computer and Information Science, University of Pennsylvania}, number = {MS-CIS-89-38, Logic \&\ Computation 09} }

@article{guttman2005, author = {Joshua D. Guttman and Amy L. Herzog and John D. Ramsdell and Clement W. Skorupka}, title = {Verifying Information Flow Goals in {S}ecurity-{E}nhanced {L}inux}, journal = {Journal of Computer Security}, volume = 13, number = 1, pages = {115--134}, year = 2005, url = {http://www.ccs.neu.edu/home/guttman/selinux.pdf} }

@inproceedings{haftmann2010, author = {Haftmann, Florian}, title = {From Higher-Order Logic to {Haskell}: There and Back Again}, pages = {155--158}, crossref = {pepm2010}, url = {http://www4.in.tum.de/~haftmann/pdf/from_hol_to_haskell_haftmann.pdf}, abstract = {We present two tools which together allow reasoning about (a substantial subset of) Haskell programs. One is the code generator of the proof assistant Isabelle, which turns specifications formulated in Isabelle's higher-order logic into executable Haskell source text; the other is Haskabelle, a tool to translate programs written in Haskell into Isabelle specifications. The translation from Isabelle to Haskell directly benefits from the rigorous correctness approach of a proof assistant: generated Haskell programs are always partially correct w.r.t. to the specification from which they are generated.} }

@book{hajek1998, author = {Petr H{\'a}jek}, title = {Metamathematics of Fuzzy Logic}, month = aug, year = 1998, series = {Trends in Logic}, volume = 4, publisher = {Kluwer Academic Publishers}, address = {Dordrecht}, url = {http://www.wkap.nl/prod/b/0-7923-5238-6}, abstract = {This book presents a systestematic treatment of deductive aspects and structures of fuzzy logic understood as many valued logic sui generis. Some important systems of real-valued propositional and predicate calculus are defined and investigated. The aim is to show that fuzzy logic as a logic of imprecise (vague) propositions does have well-developed formal foundations and that most things usually named `fuzzy inference' can be naturally understood as logical deduction. There are two main groups of intended readers. First, logicians: they can see that fuzzy logic is indeed a branch of logic and may find several very interesting open problems. Second, equally important, researchers involved in fuzzy logic applications and soft computing. As a matter of fact, most of these are not professional logicians so that it can easily happen that an application, clever and successful as it may be, is presented in a way which is logically not entirely correct or may appear simple-minded. (Standard presentations of the logical aspects of fuzzy controllers are the most typical example.) This fact would not be very important if only the bon ton of logicians were harmed; but it is the opinion of the author (who is a mathematical logician) that a better understanding of the strictly logical basis of fuzzy logic (in the usual broad sense) is very useful for fuzzy logic appliers since if they know better what they are doing, they may hope to do it better. In addition, a better mutual understanding between (classical) logicians and researchers in fuzzy logic, promises to lead to deeper cooperation and new results. Audience: mathematicians, logicians, computer scientists, specialists in artificial intelligence and knowledge engineering, developers of fuzzy logic.} }

@inproceedings{hales2006, author = {Thomas C. Hales}, title = {Introduction to the {F}lyspeck Project}, crossref = {dagstuhl2006}, url = {http://drops.dagstuhl.de/opus/volltexte/2006/432/}, abstract = {This article gives an introduction to a long-term project called Flyspeck, whose purpose is to give a formal verification of the Kepler Conjecture. The Kepler Conjecture asserts that the density of a packing of equal radius balls in three dimensions cannot exceed $pi/sqrt{18}$. The original proof of the Kepler Conjecture, from 1998, relies extensively on computer calculations. Because the proof relies on relatively few external results, it is a natural choice for a formalization effort.} }

@article{halpern1990, author = {Joseph Y. Halpern}, title = {An analysis of first-order logics of probability}, journal = {Artificial Intelligence}, year = 1990, url = {http://www.cs.cornell.edu/home/halpern/abstract.html#journal25}, abstract = {We consider two approaches to giving semantics to first-order logics of probability. The first approach puts a probability on the domain, and is appropriate for giving semantics to formulas involving statistical information such as ``The probability that a randomly chosen bird flies is greater than .9.'' The second approach puts a probability on possible worlds, and is appropriate for giving semantics to formulas describing degrees of belief, such as ``The probability that Tweety (a particular bird) flies is greater than .9.'' We show that the two approaches can be easily combined, allowing us to reason in a straightforward way about statistical information and degrees of belief. We then consider axiomatizing these logics. In general, it can be shown that no complete axiomatization is possible. We provide axiom systems that are sound and complete in cases where a complete axiomatization is possible, showing that they do allow us to capture a great deal of interesting reasoning about probability.} }

@book{hankerson2003, author = {Darrel Hankerson and Alfred Menezes and Scott Vanstone}, title = {Guide to Elliptic Curve Cryptography}, publisher = {Springer}, year = 2003, url = {http://www.cacr.math.uwaterloo.ca/ecc/} }

@article{hansell1971, author = {R. W. Hansell}, title = {Borel Measurable Mappings for Nonseparable Metric Spaces}, journal = {Transactions of the American Mathematical Society}, year = 1971, volume = 161, pages = {145--169}, month = nov, url = {http://uk.jstor.org/cgi-bin/jstor/listjournal/00029947/di970179}, abstract = {"One of the key papers in non-separable theory. [To show that f IN measurable E U implies countable range] One may use Section 2, Corollary 7, with the observation that [0,1] satisfies the assumptions and, in your situation, the collection f^(-1)(x), x\in X is even completely additive-Borel"} }

@book{hardy1993, author = {G. H. Hardy}, title = {A Mathematician's Apology, reprinted with a foreword by C. P. Snow}, publisher = {Cambridge University Press}, year = 1993 }

@techreport{harrison1995, author = {John Harrison}, title = {Metatheory and Reflection in Theorem Proving: A Survey and Critique}, institution = {SRI Cambridge}, address = {Millers Yard, Cambridge, UK}, year = 1995, type = {Technical Report}, number = {CRC-053}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/reflect.html}, abstract = {One way to ensure correctness of the inference performed by computer theorem provers is to force all proofs to be done step by step in a simple, more or less traditional, deductive system. Using techniques pioneered in Edinburgh LCF, this can be made palatable. However, some believe such an approach will never be efficient enough for large, complex proofs. One alternative, commonly called reflection, is to analyze proofs using a second layer of logic, a metalogic, and so justify abbreviating or simplifying proofs, making the kinds of shortcuts humans often do or appealing to specialized decision algorithms. In this paper we contrast the fully-expansive LCF approach with the use of reflection. We put forward arguments to suggest that the inadequacy of the LCF approach has not been adequately demonstrated, and neither has the practical utility of reflection (notwithstanding its undoubted intellectual interest). The LCF system with which we are most concerned is the HOL proof assistant. The plan of the paper is as follows. We examine ways of providing user extensibility for theorem provers, which naturally places the LCF and reflective approaches in opposition. A detailed introduction to LCF is provided, emphasizing ways in which it can be made efficient. Next, we present a short introduction to metatheory and its usefulness, and, starting from Gödel's proofs and Feferman's transfinite progressions of theories, look at logical `reflection principles'. We show how to introduce computational `reflection principles' which do not extend the power of the logic, but may make deductions in it more efficient, and speculate about their practical usefulness. Applications or proposed applications of computational reflection in theorem proving are surveyed, following which we draw some conclusions. In an appendix, we attempt to clarify a couple of other notions of `reflection' often encountered in the literature. The paper questions the too-easy acceptance of reflection principles as a practical necessity. However I hope it also serves as an adequate introduction to the concepts involved in reflection and a survey of relevant work. To this end, a rather extensive bibliography is provided.} }

@article{harrison1995a, author = {John Harrison}, title = {Binary Decision Diagrams as a {HOL} Derived Rule}, journal = {The Computer Journal}, year = 1995, volume = 38, pages = {162--170}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/bdd1.html}, abstract = {Binary Decision Diagrams (BDDs) are a representation for Boolean formulas which makes many operations, in particular tautology-checking, surprisingly efficient in important practical cases. In contrast to such custom decision procedures, the HOL theorem prover expands all proofs out to a sequence of extremely simple primitive inferences. In this paper we describe how the BDD algorithm may be adapted to comply with such strictures, helping us to understand the strengths and limitations of the HOL approach.} }

@inproceedings{harrison1996, author = {John Harrison}, title = {Optimizing Proof Search in Model Elimination}, pages = {313--327}, crossref = {cade1996}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/me.html}, abstract = {Many implementations of model elimination perform proof search by iteratively increasing a bound on the total size of the proof. We propose an optimized version of this search mode using a simple divide-and-conquer refinement. Optimized and unoptimized modes are compared, together with depth-bounded and best-first search, over the entire TPTP problem library. The optimized size-bounded mode seems to be the overall winner, but for each strategy there are problems on which it performs best. Some attempt is made to analyze why. We emphasize that our optimization, and other implementation techniques like caching, are rather general: they are not dependent on the details of model elimination, or even that the search is concerned with theorem proving. As such, we believe that this study is a useful complement to research on extending the model elimination calculus.} }

@techreport{harrison1996b, author = {John Harrison}, title = {Formalized Mathematics}, institution = {Turku Centre for Computer Science (TUCS)}, address = {Lemmink{\"a}isenkatu 14 A, FIN-20520 Turku, Finland}, year = 1996, type = {Technical Report}, number = 36, url = {http://www.cl.cam.ac.uk/users/jrh/papers/form-math3.html}, abstract = {It is generally accepted that in principle it's possible to formalize completely almost all of present-day mathematics. The practicability of actually doing so is widely doubted, as is the value of the result. But in the computer age we believe that such formalization is possible and desirable. In contrast to the QED Manifesto however, we do not offer polemics in support of such a project. We merely try to place the formalization of mathematics in its historical perspective, as well as looking at existing praxis and identifying what we regard as the most interesting issues, theoretical and practical.} }

@inproceedings{harrison1996c, author = {John Harrison}, title = {A {Mizar} Mode for {HOL}}, pages = {203--220}, crossref = {tphols1996}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/mizar.html}, abstract = {The HOL theorem prover is implemented in the LCF style, meaning that all inference is ultimately reduced to a collection of very simple (forward) primitive inference rules. By programming it is possible to build alternative means of proving theorems on top, while preserving security. Existing HOL proofs styles are, however, very different from those used in textbooks. Here we describe the addition of another proof style, inspired by Mizar. We believe the resulting system combines some of the best features of both HOL and Mizar.} }

@inproceedings{harrison1996d, author = {John Harrison}, title = {Proof Style}, pages = {154--172}, crossref = {types1996}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/style.html}, abstract = {We are concerned with how computer theorem provers should expect users to communicate proofs to them. There are many stylistic choices that still allow the machine to generate a completely formal proof object. The most obvious choice is the amount of guidance required from the user, or from the machine perspective, the degree of automation provided. But another important consideration, which we consider particularly significant, is the bias towards a `procedural' or `declarative' proof style. We will explore this choice in depth, and discuss the strengths and weaknesses of declarative and procedural styles for proofs in pure mathematics and for verification applications. We conclude with a brief summary of our own experiments in trying to combine both approaches.} }

@inproceedings{harrison1996e, author = {John Harrison}, title = {{HOL} Light: A Tutorial Introduction}, pages = {265--269}, crossref = {fmcad1996}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/demo.html}, abstract = {HOL Light is a new version of the HOL theorem prover. While retaining the reliability and programmability of earlier versions, it is more elegant, lightweight, powerful and automatic; it will be the basis for the Cambridge component of the HOL-2000 initiative to develop the next generation of HOL theorem provers. HOL Light is written in CAML Light, and so will run well even on small machines, e.g. PCs and Macintoshes with a few megabytes of RAM. This is in stark contrast to the resource-hungry systems which are the norm in this field, other versions of HOL included. Among the new features of this version are a powerful simplifier, effective first order automation, simple higher-order matching and very general support for inductive and recursive definitions. Many theorem provers, model checkers and other hardware verification tools are tied to a particular set of facilities and a particular style of interaction. However HOL Light offers a wide range of proof tools and proof styles to suit the needs of various applications. For work in high-level mathematical theories, one can use a more `declarative' textbook-like style of proof (inspired by Trybulec's Mizar system). For larger but more routine and mechanical applications, HOL Light includes more `procedural' automated tools for simplifying complicated expressions, proving large tautologies etc. We believe this unusual range makes HOL Light a particularly promising vehicle for floating point verification, which involves a mix of abstract mathematics and concrete low-level reasoning. We will aim to give a brief tutorial introduction to the system, illustrating some of its features by way of such floating point verification examples. In this paper we explain the system and its possible applications in a little more detail.} }

@techreport{harrison1997, author = {John Harrison}, title = {Floating point verification in {HOL} Light: the exponential function}, institution = {University of Cambridge Computer Laboratory}, year = 1997, number = 428, url = {http://www.cl.cam.ac.uk/users/jrh/papers/tang.html} }

@book{harrison1998, author = {John Harrison}, title = {Theorem Proving with the Real Numbers (Distinguished dissertations)}, publisher = {Springer}, year = 1998, url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-408.html} }

@manual{harrison1998b, author = {John Harrison}, title = {The {HOL} Light Manual (1.0)}, month = may, year = 1998, url = {http://www.cl.cam.ac.uk/users/jrh/hol-light/} }

@inproceedings{harrison1998c, author = {John Harrison}, title = {Formalizing {D}ijkstra}, pages = {171--188}, crossref = {tphols1998}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/dijkstra.html} }

@inproceedings{harrison2005, author = {John Harrison}, title = {A {HOL} Theory of {E}uclidean Space}, pages = {114--129}, crossref = {tphols2005}, url = {http://www.cl.cam.ac.uk/users/jrh/papers/hol05.html} }

@inproceedings{harrison2006, author = {John Harrison}, title = {Floating-Point Verification using Theorem Proving}, pages = {211--242}, crossref = {sfm2006}, url = {http://www.cl.cam.ac.uk/~jrh13/papers/sfm.html} }

@inproceedings{harrison2007, author = {John Harrison}, title = {Verifying nonlinear real formulas via sums of squares}, pages = {102--118}, crossref = {tphols2007}, url = {http://www.cl.cam.ac.uk/~jrh13/papers/sos.html} }

@inproceedings{harrison2007a, author = {John Harrison}, title = {Formalizing Basic Complex Analysis}, pages = {151--165}, crossref = {trybulec2007}, url = {http://www.cl.cam.ac.uk/~jrh13/papers/trybulec.html} }

@article{harrison2009, author = {John Harrison}, title = {Formalizing an Analytic Proof of the Prime Number Theorem}, journal = {Special Issue of the Journal of Automated Reasoning}, month = oct, year = 2009, volume = 43, number = 3, pages = {243--261}, url = {http://www.springerlink.com/content/x35225442882h618/}, abstract = {We describe the computer formalization of a complex-analytic proof of the Prime Number Theorem (PNT), a classic result from number theory characterizing the asymptotic density of the primes. The formalization, conducted using the HOL Light theorem prover, proceeds from the most basic axioms for mathematics yet builds from that foundation to develop the necessary analytic machinery including Cauchy’s integral formula, so that we are able to formalize a direct, modern and elegant proof instead of the more involved ‘elementary’ Erdös-Selberg argument. As well as setting the work in context and describing the highlights of the formalization, we analyze the relationship between the formal proof and its informal counterpart and so attempt to derive some general lessons about the formalization of mathematics.} }

@article{hart1983, author = {Sergiu Hart and Micha Sharir and Amir Pnueli}, title = {Termination of Probabilistic Concurrent Programs}, journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)}, volume = 5, number = 3, pages = {356--380}, month = jul, year = 1983 }

@article{hasan2007, author = {O. Hasan and S. Tahar}, title = {Formalization of the Standard Uniform Random Variable}, year = 2007, journal = {Theoretical Computer Science}, note = {To appear} }

@article{haworth2005, author = {G. McC. Haworth}, title = {6-Man Chess Solved}, month = sep, year = 2005, journal = {ICGA Journal}, volume = 28, number = 3, pages = {153} }

@unpublished{haworth2006, author = {G. McC. Haworth}, title = {Freezer analysis of {K}{R}{P}(a2){K}b{B}{P}(a3)}, year = 2006, note = {Private communication} }

@article{he1997, author = {Jifeng He and K. Seidel and A. McIver}, title = {Probabilistic models for the guarded command language}, journal = {Science of Computer Programming}, volume = 28, number = {2--3}, pages = {171--192}, month = apr, year = 1997 }

@article{heinz1999, author = {E. A. Heinz}, title = {Endgame databases and efficient index schemes}, journal = {ICCA Journal}, volume = 22, number = 1, pages = {22--32}, month = mar, year = 1999, url = {http://supertech.lcs.mit.edu/~heinz/ps/edb_index.ps.gz}, abstract = {Endgame databases have become an integral part of modern chess programs during the past few years. Since the early 1990s two different kinds of endgame databases are publicly available, namely Edwards' so-called "tablebases" and Thompson's collection of 5-piece databases. Although Thompson's databases enjoy much wider international fame, most current chess programs use tablebases because they integrate far better with the rest of the search. For the benefit of all those enthusi- asts who intend to incorporate endgame databases into their own chess programs, this article describes the index schemes of Edwards' tablebases and Thompson's databases in detail, explains their differences, and provides a comparative evaluation of both. In addition we introduce new indexing methods that improve on the published state-of-the-art and shrink the respective index ranges even further (especially for large databases and endgames with Pawns). This reduces the overall space requirements of the resulting databases substantially. We also propose several solutions for the problem of potential en-passant captures in endgame databases with at least one Pawn per each side. Shortly after the initial submission of our original text, Nalimov independently applied similar techniques as ours and even more advanced index schemes to his new tablebases which are now publicly available on the Internet, too. We briefly put his work into perspective as well.} }

@misc{heitkotter1998, author = {J{\"o}rg Heitk{\"o}tter and David Beasley}, title = {The Hitch-Hiker's Guide to Evolutionary Computation: A list of Frequently Asked Questions (FAQ)}, howpublished = {USENET: comp.ai.genetic}, year = 1998, url = {ftp://rtfm.mit.edu/pub/usenet/news.answers/ai-faq/genetic/} }

@inproceedings{herencia2011, author = {Herencia-Zapana, Heber and Hagen, George and Narkawicz, Anthony}, title = {Formalizing Probabilistic Safety Claims}, pages = {162--176}, crossref = {nfm2011} }

@article{herik1987, author = {Herik, H. J. van den and Herschberg, I. S. and Nakad, N.}, title = {A Six-Men-Endgame Database: {K}{R}{P}(a2){K}b{B}{P}(a3)}, journal = {ICCA Journal}, volume = 10, number = 4, pages = {163--180}, year = 1987 }

@article{herik1988, author = {Herik, H. J. van den and Herschberg, I. S. and Nakad, N.}, year = 1988, title = {A Reply to {R}. {S}attler's Remarks on the {K}{R}{P}(a2)-{K}b{B}{P}(a3) Database}, journal = {ICCA Journal}, volume = 11, number = {2/3}, pages = {88--91} }

@article{herman1990, author = {Ted Herman}, title = {Probabilistic self-stabilization}, journal = {Information Processing Letters}, volume = 35, number = 2, pages = {63--67}, day = 29, month = jun, year = 1990 }

@inproceedings{hoang2003, author = {Thai Son Hoang and Z. Jin and K. Robinsion and A. K. McIver and C. C. Morgan}, title = {Probabilistic invariants for probabilistic machines}, booktitle = {Proceedings of the 3rd International Conference of B and Z users}, pages = {240--259}, series = {Lecture Notes in Computer Science}, volume = 2651, year = 2003, publisher = {Springer} }

@unpublished{hoare2002, author = {Tony Hoare}, title = {The Verifying Compiler}, note = {Sample submission for the Workshop on Grand Challenges for Computing Research}, month = nov, year = 2002, url = {http://umbriel.dcs.gla.ac.uk/NeSC/general/esi/events/Grand_Challenges/verifying_compiler.pdf} }

@article{homeier1995, author = {Peter V. Homeier and David F. Martin}, title = {A Mechanically Verified Verification Condition Generator}, journal = {The Computer Journal}, year = 1995, volume = 38, number = 2, month = jul, pages = {131--141}, url = {http://www.cis.upenn.edu/~homeier/publications/vcg-bcj.pdf} }

@inproceedings{homeier2009, author = {Peter V. Homeier}, title = {The {HOL}-{O}mega Logic}, pages = {244--259}, crossref = {tphols2009}, url = {http://www.trustworthytools.com/holw/holw-lncs5674.pdf} }

@book{hooper1992, author = {David Hooper and Kenneth Whyld}, title = {The Oxford Companion to Chess}, publisher = {Oxford University Press}, month = sep, year = 1992, edition = {2nd} }

@book{hopcroft2001, author = {John E. Hopcroft and Rajeev Motwani and Jeffrey D. Ullman}, title = {Introduction to Automata Theory, Languages, and Computation}, publisher = {Addison-Wesley}, year = 2001, edition = {2nd} }

@techreport{huet1980, author = {G{\'e}rard Huet and Derek Oppen}, title = {Equations and Rewrite Rules: A Survey}, institution = {SRI International}, month = jan, year = 1980, number = {CSL-111} }

@inproceedings{huffman2012, author = {Brian Huffman}, title = {Formal Verification of Monad Transformers}, crossref = {icfp2012}, url = {http://web.cecs.pdx.edu/~brianh/icfp2012.html} }

@article{hughes1989, author = {J. Hughes}, title = {Why Functional Programming Matters}, journal = {Computer Journal}, volume = {32}, number = {2}, pages = {98--107}, year = 1989, url = {http://www.md.chalmers.se/~rjmh/Papers/whyfp.html} }

@phdthesis{huisman2001, author = {Marieke Huisman}, title = {Reasoning about {Java} Programs in higher order logic with {PVS} and {Isabelle}}, school = {University of Nijmegen, Holland}, year = 2001, month = feb }

@manual{hurd1998, author = {Joe Hurd}, title = {The Real Number Theories in hol98}, month = nov, year = 1998, note = {Part of the documentation for the hol98 theorem prover}, abstract = {A description of the hol98 reals library, ported partly from the reals library in HOL90 and partly from the real library in hol-light.} }

@inproceedings{hurd1999, author = {Joe Hurd}, title = {Integrating {Gandalf} and {HOL}}, crossref = {tphols1999}, pages = {311--321}, url = {http://www.gilith.com/research/papers}, abstract = {Gandalf is a first-order resolution theorem-prover, optimized for speed and specializing in manipulations of large clauses. In this paper I describe GANDALF_TAC, a HOL tactic that proves goals by calling Gandalf and mirroring the resulting proofs in HOL. This call can occur over a network, and a Gandalf server may be set up servicing multiple HOL clients. In addition, the translation of the Gandalf proof into HOL fits in with the LCF model and guarantees logical consistency.} }

@techreport{hurd1999a, author = {Joe Hurd}, title = {Integrating {Gandalf} and {HOL}}, institution = {University of Cambridge Computer Laboratory}, issn = {1476-2986}, number = 461, month = may, year = 1999, note = {Second edition}, url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-461.html} }

@inproceedings{hurd2000, author = {Joe Hurd}, title = {Congruence Classes with Logic Variables}, pages = {87--101}, crossref = {tphols2000a} }

@inproceedings{hurd2000a, author = {Joe Hurd}, title = {Lightweight Probability Theory for Verification}, pages = {103--113}, crossref = {tphols2000a}, abstract = {There are many algorithms that make use of probabilistic choice, but a lack of tools available to specify and verify their operation. The primary contribution of this paper is a lightweight modelling of such algorithms in higher-order logic, together with some key properties that enable verification. The theory is applied to a uniform random number generator and some basic properties are established. As a secondary contribution, all the theory developed has been mechanized in the hol98 theorem-prover.} }

@manual{hurd2000b, author = {Joe Hurd}, title = {The Probability Theories in hol98}, month = jun, year = 2000, note = {Part of the documentation for the hol98 theorem prover}, abstract = {A description of the hol98 probability library.} }

@article{hurd2001, author = {Joe Hurd}, title = {Congruence Classes with Logic Variables}, journal = {Logic Journal of the {IGPL}}, pages = {59--75}, volume = 9, number = 1, month = jan, year = 2001, url = {http://www3.oup.co.uk/igpl/Volume_09/Issue_01/#Hurd}, abstract = {We are improving equality reasoning in automatic theorem-provers, and congruence classes provide an efficient storage mechanism for terms, as well as the congruence closure decision procedure. We describe the technical steps involved in integrating logic variables with congruence classes, and present an algorithm that can be proved to find all matches between classes (modulo certain equalities). An application of this algorithm makes possible a percolation algorithm for undirected rewriting in minimal space; this is described and an implementation in hol98 is examined in some detail.} }

@inproceedings{hurd2001a, author = {Joe Hurd}, title = {Predicate Subtyping with Predicate Sets}, pages = {265--280}, crossref = {tphols2001}, url = {http://www.gilith.com/research/papers}, abstract = {We show how PVS-style predicate subtyping can be simulated in HOL using predicate sets, and explain how to perform subtype checking using this model. We illustrate some applications of this to specification and verification in HOL, and also demonstrate some limits of the approach. Finally we show preliminary results of a subtype checker that has been integrated into a contextual rewriter.} }

@inproceedings{hurd2001b, author = {Joe Hurd}, title = {Verification of the {Miller-Rabin} Probabilistic Primality Test}, pages = {223--238}, crossref = {tphols2001a} }

@phdthesis{hurd2002, author = {Joe Hurd}, title = {Formal Verification of Probabilistic Algorithms}, school = {University of Cambridge}, year = 2002, url = {http://www.gilith.com/research/papers}, abstract = {This thesis shows how probabilistic algorithms can be formally verified using a mechanical theorem prover. We begin with an extensive foundational development of probability, creating a higher-order logic formalization of mathematical measure theory. This allows the definition of the probability space we use to model a random bit generator, which informally is a stream of coin-flips, or technically an infinite sequence of IID Bernoulli(1/2) random variables. Probabilistic programs are modelled using the state-transformer monad familiar from functional programming, where the random bit generator is passed around in the computation. Functions remove random bits from the generator to perform their calculation, and then pass back the changed random bit generator with the result. Our probability space modelling the random bit generator allows us to give precise probabilistic specifications of such programs, and then verify them in the theorem prover. We also develop technical support designed to expedite verification: probabilistic quantifiers; a compositional property subsuming measurability and independence; a probabilistic while loop together with a formal concept of termination with probability 1. We also introduce a technique for reducing properties of a probabilistic while loop to properties of programs that are guaranteed to terminate: these can then be established using induction and standard methods of program correctness. We demonstrate the formal framework with some example probabilistic programs: sampling algorithms for four probability distributions; some optimal procedures for generating dice rolls from coin flips; the symmetric simple random walk. In addition, we verify the Miller-Rabin primality test, a well-known and commercially used probabilistic algorithm. Our fundamental perspective allows us to define a version with strong properties, which we can execute in the logic to prove compositeness of numbers.} }

@inproceedings{hurd2002b, author = {Joe Hurd}, title = {A Formal Approach to Probabilistic Termination}, pages = {230--245}, crossref = {tphols2002}, url = {http://www.gilith.com/research/papers}, abstract = {We present a probabilistic version of the while loop, in the context of our mechanized framework for verifying probabilistic programs. The while loop preserves useful program properties of measurability and independence, provided a certain condition is met. This condition is naturally interpreted as ``from every starting state, the while loop will terminate with probability 1'', and we compare it to other probabilistic termination conditions in the literature. For illustration, we verify in HOL two example probabilistic algorithms that necessarily rely on probabilistic termination: an algorithm to sample the Bernoulli(p) distribution using coin-flips; and the symmetric simple random walk.} }

@inproceedings{hurd2002c, author = {Joe Hurd}, title = {{HOL} Theorem Prover Case Study: Verifying Probabilistic Programs}, pages = {83--92}, crossref = {avocs2002}, abstract = {The focus of this paper is the question: ``How suited is the HOL theorem prover to the verification of probabilistic programs?'' To answer this, we give a brief introduction to our model of probabilistic programs in HOL, and then compare this approach to other formal tools that have been used to verify probabilistic programs: the Prism model checker, the Coq theorem prover, and the B method.} }

@inproceedings{hurd2002d, author = {Joe Hurd}, title = {Fast Normalization in the {HOL} Theorem Prover}, crossref = {arw2002}, note = {An extended abstract}, url = {http://www.gilith.com/research/papers} }

@inproceedings{hurd2002e, author = {Joe Hurd}, title = {An {LCF}-Style Interface between {HOL} and First-Order Logic}, pages = {134--138}, crossref = {cade2002}, url = {http://www.gilith.com/research/papers} }

@article{hurd2003, author = {Joe Hurd}, title = {Verification of the {Miller-Rabin} Probabilistic Primality Test}, journal = {Journal of Logic and Algebraic Programming}, month = {May--August}, year = {2003}, volume = 50, number = {1--2}, pages = {3--21}, note = {Special issue on Probabilistic Techniques for the Design and Analysis of Systems}, url = {http://www.gilith.com/research/papers}, abstract = {Using the HOL theorem prover, we apply our formalization of probability theory to specify and verify the Miller-Rabin probabilistic primality test. The version of the test commonly found in algorithm textbooks implicitly accepts probabilistic termination, but our own verified implementation satisfies the stronger property of guaranteed termination. Completing the proof of correctness requires a significant body of group theory and computational number theory to be formalized in the theorem prover. Once verified, the primality test can either be executed in the logic (using rewriting) and used to prove the compositeness of numbers, or manually extracted to Standard ML and used to find highly probable primes.} }

@techreport{hurd2003a, author = {Joe Hurd}, title = {Using Inequalities as Term Ordering Constraints}, month = jun, year = 2003, institution = {University of Cambridge Computer Laboratory}, issn = {1476-2986}, number = 567, url = {http://www.gilith.com/research/papers}, url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-567.html}, abstract = {In this paper we show how linear inequalities can be used to approximate Knuth-Bendix term ordering constraints, and how term operations such as substitution can be carried out on systems of inequalities. Using this representation allows an off-the-shelf linear arithmetic decision procedure to check the satisfiability of a set of ordering constraints. We present a formal description of a resolution calculus where systems of inequalities are used to constrain clauses, and implement this using the Omega test as a satisfiability checker. We give the results of an experiment over problems in the TPTP archive, comparing the practical performance of the resolution calculus with and without inherited inequality constraints.} }

@techreport{hurd2003c, author = {Joe Hurd}, title = {Formal Verification of Probabilistic Algorithms}, institution = {University of Cambridge Computer Laboratory}, issn = {1476-2986}, month = may, year = 2003, number = 566, url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-566.html} }

@inproceedings{hurd2003d, author = {Joe Hurd}, title = {First-Order Proof Tactics in Higher-Order Logic Theorem Provers}, pages = {56--68}, crossref = {strata2003}, url = {http://www.gilith.com/research/papers}, abstract = {In this paper we evaluate the effectiveness of first-order proof procedures when used as tactics for proving subgoals in a higher-order logic interactive theorem prover. We first motivate why such first-order proof tactics are useful, and then describe the core integrating technology: an `LCF-style' logical kernel for clausal first-order logic. This allows the choice of different logical mappings between higher-order logic and first-order logic to be used depending on the subgoal, and also enables several different first-order proof procedures to cooperate on constructing the proof. This work was carried out using the HOL4 theorem prover; we comment on the ease of transferring the technology to other higher-order logic theorem provers.} }

@inproceedings{hurd2004, author = {Joe Hurd and Annabelle McIver and Carroll Morgan}, title = {Probabilistic Guarded Commands Mechanized in {HOL}}, crossref = {qapl2004}, pages = {95--111} }

@inproceedings{hurd2004a, author = {Joe Hurd}, title = {Compiling {HOL4} to Native Code}, crossref = {tphols2004a}, url = {http://www.gilith.com/research/papers}, abstract = {We present a framework for extracting and compiling proof tools and theories from a higher order logic theorem prover, so that the theorem prover can be used as a platform for supporting reasoning in other applications. The framework is demonstrated on a small application that uses HOL4 to find proofs of arbitrary first order logic formulas.} }

@inproceedings{hurd2005, author = {Joe Hurd}, title = {Formal Verification of Chess Endgame Databases}, pages = {85--100}, crossref = {tphols2005a}, url = {http://www.gilith.com/research/papers}, abstract = {Chess endgame databases store the number of moves required to force checkmate for all winning positions: with such a database it is possible to play perfect chess. This paper describes a method to construct endgame databases that are formally verified to logically follow from the laws of chess. The method employs a theorem prover to model the laws of chess and ensure that the construction is correct, and also a BDD engine to compactly represent and calculate with large sets of chess positions. An implementation using the HOL4 theorem prover and the BuDDY BDD engine is able to solve all four piece pawnless endgames.} }

@unpublished{hurd2005a, author = {Joe Hurd}, title = {Formalizing Elliptic Curve Cryptography in Higher Order Logic}, month = oct, year = 2005, note = {Available from the author's website}, url = {http://www.gilith.com/research/papers}, abstract = {Formalizing a mathematical theory using a theorem prover is a necessary first step to proving the correctness of programs that refer to that theory in their specification. This report demonstrates how the mathematical theory of elliptic curves and their application to cryptography can be formalized in higher order logic. This formal development is mechanized using the HOL4 theorem prover, resulting in a collection of formally verified functional programs (expressed as higher order logic functions) that correctly implement the primitive operations of elliptic curve cryptography.} }

@article{hurd2005b, author = {Joe Hurd and Annabelle McIver and Carroll Morgan}, title = {Probabilistic Guarded Commands Mechanized in {HOL}}, journal = {Theoretical Computer Science}, volume = 346, issue = 1, month = nov, year = 2005, pages = {96--112}, url = {http://www.gilith.com/research/papers}, abstract = {The probabilistic guarded-command language pGCL contains both demonic and probabilistic nondeterminism, which makes it suitable for reasoning about distributed random algorithms Proofs are based on weakest precondition semantics, using an underlying logic of real- (rather than Boolean-) valued functions. We present a mechanization of the quantitative logic for pGCL using the HOL4 theorem prover, including a proof that all pGCL commands satisfy the new condition sublinearity, the quantitative generalization of conjunctivity for standard GCL. The mechanized theory also supports the creation of an automatic proof tool which takes as input an annotated pGCL program and its partial correctness specification, and derives from that a sufficient set of verification conditions. This is employed to verify the partial correctness of the probabilistic voting stage in Rabin's mutual-exclusion algorithm.} }

@inproceedings{hurd2005c, author = {Joe Hurd}, title = {First Order Proof for Higher Order Theorem Provers (abstract)}, pages = {1--3}, crossref = {eshol2005} }

@inproceedings{hurd2005d, author = {Joe Hurd}, title = {System Description: The {M}etis Proof Tactic}, pages = {103--104}, crossref = {eshol2005} }

@inproceedings{hurd2006a, author = {Joe Hurd and Mike Gordon and Anthony Fox}, title = {Formalized Elliptic Curve Cryptography}, crossref = {hcss2006}, url = {http://www.gilith.com/research/papers}, abstract = {Formalizing a mathematical theory is a necessary first step to proving the correctness of programs that refer to that theory in their specification. This paper demonstrates how the mathematical theory of elliptic curves and their application to cryptography can be formalized in higher order logic. This formal development is mechanized using the HOL4 theorem prover, resulting in a collection of higher order logic functions that correctly implement the primitive operations of elliptic curve cryptography.} }

@article{hurd2006b, author = {Joe Hurd}, title = {Book Review: Rippling: Meta-Level Guidance for Mathematical Reasoning by {A}. {B}undy, {D}. {B}asin, {D}. {H}utter and {A}. {I}reland}, journal = {Bulletin of Symbolic Logic}, volume = 12, number = 3, pages = {498--499}, year = 2006, url = {http://www.gilith.com/research/papers} }

@unpublished{hurd2007, author = {Joe Hurd}, title = {Embedding {C}ryptol in Higher Order Logic}, month = mar, year = 2007, url = {http://www.gilith.com/research/papers}, note = {Available from the author's website}, abstract = {This report surveys existing approaches to embedding Cryptol programs in higher order logic, and presents a new approach that aims to simplify as much as possible reasoning about the embedded programs.} }

@inproceedings{hurd2007a, author = {Joe Hurd}, title = {Proof Pearl: The Termination Analysis of {TERMINATOR}}, crossref = {tphols2007}, pages = {151--156}, url = {http://www.gilith.com/research/papers}, abstract = {TERMINATOR is a static analysis tool developed by Microsoft Research for proving termination of Windows device drivers written in C. This proof pearl describes a formalization in higher order logic of the program analysis employed by TERMINATOR, and verifies that if the analysis succeeds then program termination logically follows.} }

@inproceedings{hurd2007b, author = {Joe Hurd and Anthony Fox and Mike Gordon and Konrad Slind}, title = {{ARM} Verification (Abstract)}, crossref = {hcss2007}, url = {http://www.gilith.com/research/papers} }

@inproceedings{hurd2009, author = {Joe Hurd}, title = {{O}pen{T}heory: Package Management for Higher Order Logic Theories}, pages = {31--37}, crossref = {plmms2009}, url = {http://www.gilith.com/research/papers}, abstract = {Interactive theorem proving has grown from toy examples to major projects formalizing mathematics and verifying software, and there is now a critical need for theory engineering techniques to support these efforts. This paper introduces the OpenTheory project, which aims to provide an effective package management system for logical theories. The OpenTheory article format allows higher order logic theories to be exported from one theorem prover, compressed by a stand-alone tool, and imported into a different theorem prover. Articles naturally support theory interpretations, which is the mechanism by which theories can be cleanly transferred from one theorem prover context to another, and which also leads to more efficient developments of standard theories.} }

@inproceedings{hurd2009a, author = {Hurd, Joe and Carlsson, Magnus and Finne, Sigbjorn and Letner, Brett and Stanley, Joel and White, Peter}, title = {Policy {DSL}: High-level Specifications of Information Flows for Security Policies}, crossref = {hcss2009}, url = {http://www.gilith.com/research/papers}, abstract = {SELinux security policies are powerful tools to implement properties such as process confinement and least privilege. They can also be used to support MLS policies on SELinux. However, the policies are very complex, and creating them is a difficult and error-prone process. Furthermore, it is not possible to state explicit constraints on an SELinux policy such as ``information flowing to the network must be encrypted''. We present two related Domain Specific Languages (DSL) to make it much easier to specify SELinux security policies. The first DSL is called Lobster. Lobster allows the user to state an information flow policy at a very high level of abstraction, and then refine the policy into lower and lower levels until it can be translated by the Lobster compiler into an SELinux policy. The second DSL is called Symbion. Symbion allows the user to state policy constraints such as ``information flowing to the network must be encrypted''. Symbion and Lobster will then interface with tools that can check that the policy satisfies the constraints. We also present the analysis tool Shrimp, with which we are able to analyze and find errors in the SELinux Reference Policy. Shrimp is also the basis for our ongoing effort in reverse translating the Reference Policy into Lobster. Since Lobster and Symbion represent information flows and contraints on information flows, they are more broadly applicable than to just SELinux. We will point to some of the directions we wish to take Lobster and Symbion beyond SELinux.} }

@inproceedings{hurd2010a, author = {Joe Hurd and Guy Haworth}, title = {Data Assurance in Opaque Computations}, pages = {221--231}, crossref = {acg2009}, url = {http://gilith.com/research/papers}, abstract = {The chess endgame is increasingly being seen through the lens of, and therefore effectively defined by, a data `model' of itself. It is vital that such models are clearly faithful to the reality they purport to represent. This paper examines that issue and systems engineering responses to it, using the chess endgame as the exemplar scenario. A structured survey has been carried out of the intrinsic challenges and complexity of creating endgame data by reviewing the past pattern of errors during work in progress, surfacing in publications and occurring after the data was generated. Specific measures are proposed to counter observed classes of error-risk, including a preliminary survey of techniques for using state-of-the-art verification tools to generate EGTs that are correct by construction. The approach may be applied generically beyond the game domain.} }

@inproceedings{hurd2010b, author = {Joe Hurd}, title = {Composable Packages for Higher Order Logic Theories}, crossref = {verify2010}, url = {http://gilith.com/research/papers}, abstract = {Interactive theorem proving is tackling ever larger formalization and verification projects, and there is a critical need for theory engineering techniques to support these efforts. One such technique is effective package management, which has the potential to simplify the development of logical theories by precisely checking dependencies and promoting re-use. This paper introduces a domain-specific language for defining composable packages of higher order logic theories, which is designed to naturally handle the complex dependency structures that often arise in theory development. The package composition language functions as a module system for theories, and the paper presents a well-defined semantics for the supported operations. Preliminary tests of the package language and its toolset have been made by packaging the theories distributed with the HOL Light theorem prover. This experience is described, leading to some initial theory engineering discussion on the ideal properties of a reusable theory.} }

@inproceedings{hurd2010c, author = {Joe Hurd}, title = {Evaluation Opportunities in Mechanized Theories (Invited Talk Abstract)}, crossref = {emsqms2010}, url = {http://gilith.com/research/papers} }

@manual{hurd2010d, author = {Joe Hurd}, title = {{O}pen{T}heory Article Format}, month = aug, year = 2010, note = {Available for download at \url{http://gilith.com/research/opentheory/article.html}}, annote = {Version 5}, url = {http://www.gilith.com/research/opentheory/article.html} }

@inproceedings{hurd2011, author = {Joe Hurd}, title = {The {OpenTheory} Standard Theory Library}, pages = {177--191}, crossref = {nfm2011}, url = {http://gilith.com/research/papers}, abstract = {Interactive theorem proving is tackling ever larger formalization and verification projects, and there is a critical need for theory engineering techniques to support these efforts. One such technique is cross-prover package management, which has the potential to simplify the development of logical theories and effectively share theories between different theorem prover implementations. The OpenTheory project has developed standards for packaging theories of the higher order logic implemented by the HOL family of theorem provers. What is currently missing is a standard theory library that can serve as a published contract of interoperability and contain proofs of basic properties that would otherwise appear in many theory packages. The core contribution of this paper is the presentation of a standard theory library for higher order logic represented as an OpenTheory package. We identify the core theory set of the HOL family of theorem provers, and describe the process of instrumenting the HOL Light theorem prover to extract a standardized version of its core theory development. We profile the axioms and theorems of our standard theory library and investigate the performance cost of separating the standard theory library into coherent hierarchical theory packages.} }

@inproceedings{hurd2012, author = {Hurd, Joe}, title = {{FUSE}: Inter-Application Security for Android (abstract)}, pages = {53--54}, crossref = {hcss2012}, url = {http://www.gilith.com/research/papers} }

@inproceedings{huth2000, author = {Michael Huth}, title = {The Interval Domain: A Matchmaker for {aCTL} and {aPCTL}}, booktitle = {US - Brazil Joint Workshops on the Formal Foundations of Software Systems}, year = 2000, editor = {Rance Cleaveland and Michael Mislove and Philip Mulry}, series = {Electronic Notes in Theoretical Computer Science}, volume = 14, publisher = {Elsevier}, url = {http://www.elsevier.nl/locate/entcs/volume14.html} }

@techreport{hutton1996, author = {Graham Hutton and Erik Meijer}, title = {Monadic Parser Combinators}, institution = {Department of Computer Science, University of Nottingham}, number = {NOTTCS-TR-96-4}, year = 1996, url = {http://www.cs.nott.ac.uk/~gmh//bib.html#monparsing}, abstract = {In functional programming, a popular approach to building recursive descent parsers is to model parsers as functions, and to define higher-order functions (or combinators) that implement grammar constructions such as sequencing, choice, and repetition. Such parsers form an instance of a monad, an algebraic structure from mathematics that has proved useful for addressing a number of computational problems. The purpose of this report is to provide a step-by-step tutorial on the monadic approach to building functional parsers, and to explain some of the benefits that result from exploiting monads. No prior knowledge of parser combinators or of monads is assumed. Indeed, this report can also be viewed as a first introduction to the use of monads in programming.} }

@inproceedings{jackson1996, author = {Paul B. Jackson}, title = {Expressive Typing and Abstract Theories in {Nuprl} and {PVS} (tutorial)}, crossref = {tphols1996}, url = {http://www.dcs.ed.ac.uk/home/pbj/papers/tphols96.ps.gz}, abstract = {This 90min tutorial covered two of the more novel aspects of the Nuprl and PVS theorem provers: Expressive type systems. These make specifications more consise and accurate, but good automation is needed to solve proof obligations that arise during type-checking. Support for abstract theories. These have uses in supporting mechanical proof developments, and in the domains of program specification refinement and mathematics. Key issues are whether types can be formed for classes of abstract theories and the need for automatic inference of theory interpretations. The tutorial assumed some familiarity with HOL-like theorem provers, but did not assume familiarity with either PVS or Nuprl in particular. The issue of Nuprl's constructivity was orthogonal to the issues discussed and was therefore glossed over.} }

@article{jamnik1999, author = {Mateja Jamnik and Alan Bundy and Ian Green}, title = {On Automating Diagrammatic Proofs of Arithmetic Arguments}, journal = {Journal of Logic, Language and Information}, volume = 8, number = 3, pages = {297--321}, year = 1999, note = {Also available as Department of Artificial Intelligence Research Paper No. 910}, url = {http://www.cs.bham.ac.uk/~mxj/papers/pub910.jolli-camera.ps} }

@techreport{jensen1993, author = {Claus Skaanning Jensen and Augustine Kong and Uffe Kjaerulff}, title = {Blocking {Gibbs} Sampling in Very Large Probabilistic Expert Systems}, institution = {Aalborg University}, number = {R-93-2031}, month = oct, year = 1993 }

@incollection{johnson1990, author = {D. S. Johnson}, title = {A Catalog of Complexity Classes}, booktitle = {Handbook of Theoretical Computer Science, Volume A: Algorithms and Complexity}, publisher = {Elsevier and The MIT Press (co-publishers)}, year = 1990, editor = {J. van Leeuwen}, chapter = 9, pages = {67--161}, abstract = {"Johnson presents an extensive survey of computational complexity classes. Of particular interest here is his discussion of randomized, probabilistic, and stochastic complexity classes."} }

@book{johnstone1987, author = {Peter T. Johnstone}, title = {Notes on logic and set theory}, publisher = {Cambridge University Press}, year = 1987 }

@phdthesis{jones1990, author = {Claire Jones}, title = {Probabilistic Non-Determinism}, school = {University of Edinburgh}, year = 1990, url = {http://www.lfcs.informatics.ed.ac.uk/reports/90/ECS-LFCS-90-105/} }

@inproceedings{jones1997, author = {Michael D. Jones}, title = {Restricted Types for {HOL}}, crossref = {tphols1997a}, url = {http://www.cs.utah.edu/~mjones/my.papers.html} }

@article{kammuller1999, author = {F. Kamm{\"u}ller and L. C. Paulson}, title = {A Formal Proof of {Sylow's} First Theorem -- An Experiment in Abstract Algebra with {Isabelle} {HOL}}, year = 1999, journal = {Journal of Automated Reasoning}, volume = 23, number = {3-4}, pages = {235--264} }

@inproceedings{kammuller2000, author = {F. Kamm{\"u}ller}, title = {Modular Reasoning in {I}sabelle}, crossref = {cade2000} }

@inproceedings{karger1993, author = {David R. Karger}, title = {Global Min-cuts in {RNC}, and Other Ramifications of a Simple Min-Cut Algorithm}, pages = {21--30}, booktitle = {Proceedings of the 4th Annual {ACM}-{SIAM} Symposium on Discrete Algorithms ({SODA} '93)}, address = {Austin, TX, USA}, month = jan, year = 1993, publisher = {SIAM} }

@incollection{karp1976, author = {R. M. Karp}, title = {The Probabilistic Analysis of some Combinatorial Search Algorithms}, pages = {1--20}, crossref = {traub1976} }

@article{kaufmann1997, author = {Matt Kaufmann and J Strother Moore}, title = {An Industrial Strength Theorem Prover for a Logic Based on {Common} {Lisp}}, journal = {IEEE Transactions on Software Engineering}, volume = 23, number = 4, pages = {203--213}, month = apr, year = 1997, url = {http://www.cs.utexas.edu/users/moore/publications/acl2-papers.html#Overviews} }

@misc{kaufmann1998, author = {Matt Kaufmann and J Strother Moore}, title = {A Precise Description of the {ACL2} Logic}, year = 1998, url = {http://www.cs.utexas.edu/users/moore/publications/km97a.ps.Z} }

@book{kaufmann2000, author = {Matt Kaufmann and Panagiotis Manolios and J Strother Moore}, title = {Computer-Aided Reasoning: An Approach}, publisher = {Kluwer Academic Publishers}, month = jun, year = 2000 }

@book{kaufmann2000a, editor = {Matt Kaufmann and Panagiotis Manolios and J Strother Moore}, title = {Computer-Aided Reasoning: ACL2 Case Studies}, publisher = {Kluwer Academic Publishers}, month = jun, year = 2000 }

@incollection{kelly1996, author = {F. P. Kelly}, title = {Notes on effective bandwidths}, booktitle = {Stochastic Networks: Theory and Applications}, editor = {F.P. Kelly and S. Zachary and I.B. Ziedins}, pages = {141--168}, publisher = {Oxford University Press}, year = 1996, series = {Royal Statistical Society Lecture Notes Series}, number = 4 }

@incollection{keisler1985, author = {H. J. Keisler}, title = {Probability Quantifiers}, year = 1985, booktitle = {Model-Theoretic Logics}, editor = {J. Barwise and S. Feferman}, publisher = {Springer}, address = {New York}, pages = {509--556} }

@article{king1996, author = {D. J. King and R. D. Arthan}, title = {Development of Practical Verification Tools}, journal = {ICL Systems Journal}, volume = 11, number = 1, month = may, year = 1996, url = {http://www.lemma-one.com/ProofPower/papers/papers.html}, abstract = {Current best practice for high-assurance security and safety-critical software often requires the use of machine-checked rigorous mathematical techniques. Unfortunately, while there have been some notable successes, the provision of software tools that adequately support such techniques is a hard research problem, albeit one that is slowly being solved. This paper describes some contributions to this area of research and development in ICL in recent years. This research builds both on ICL’s own experiences in building and using tools to support security applications and on work carried out by the Defence Research Agency into notations and methods for program verification.} }

@article{king2000, author = {Steve King and Jonathan Hammond and Rod Chapman and Andy Pryor}, title = {Is Proof More Cost-Effective Than Testing?}, journal = {IEEE Transactions on Software Engineering}, volume = 26, number = 8, month = aug, year = 2000, pages = {675-686}, abstract = {This paper describes the use of formal development methods on an industrial safety-critical application. The Z notation was used for documenting the system specification and part of the design, and the SPARK1 subset of Ada was used for coding. However, perhaps the most distinctive nature of the project lies in the amount of proof that was carried out: proofs were carried out both at the Z level-approximately 150 proofs in 500 pages-and at the SPARK code level-approximately 9,000 verification conditions generated and discharged. The project was carried out under UK Interim Defence Standards 00-55 and 00-56, which require the use of formal methods on safety-critical applications. It is believed to be the first to be completed against the rigorous demands of the 1991 version of these standards. The paper includes comparisons of proof with the various types of testing employed, in terms of their efficiency at finding faults. The most striking result is that the Z proof appears to be substantially more efficient at finding faults than the most efficient testing phase. Given the importance of early fault detection, we believe this helps to show the significant benefit and practicality of large-scale proof on projects of this kind.} }

@inproceedings{klein2009, title = {{seL4}: Formal Verification of an {OS} Kernel}, author = {Gerwin Klein and Kevin Elphinstone and Gernot Heiser and June Andronick and David Cock and Philip Derrin and Dhammika Elkaduwe and Kai Engelhardt and Rafal Kolanski and Michael Norrish and Thomas Sewell and Harvey Tuch and Simon Winwood}, pages = {207--220}, crossref = {sosp2009}, url = {http://ertos.nicta.com.au/publications/papers/Klein_EHACDEEKNSTW_09.abstract}, abstract = {Complete formal verification is the only way to guarantee that a system is free of programming errors. We present our experience in performing the formal, machine-checked verification of the seL4 microkernel from an abstract specification down to its C implementation. We assume correctness of compiler, assembly code, and hardware, and we used a unique design approach that fuses formal and operating systems techniques. To our knowledge, this is the first formal proof of functional correctness of a complete, general-purpose operating-system kernel. Functional correctness means here that the implementation always strictly follows our high-level abstract specification of kernel behaviour. This encompasses traditional design and implementation safety properties such as the kernel will never crash, and it will never perform an unsafe operation. It also proves much more: we can predict precisely how the kernel will behave in every possible situation. seL4, a third-generation microkernel of L4 provenance, comprises 8,700 lines of C code and 600 lines of assembler. Its performance is comparable to other high-performance L4 kernels.} }

@incollection{knuth1970, author = {D. E. Knuth and P. B. Bendix}, title = {Simple word problems in universal algebra}, booktitle = {Computational problems in abstract algebra}, editor = {J. Leech}, publisher = {Pergamon Press}, address = {Elmsford, N.Y.}, year = 1970, pages = {263--297} }

@incollection{knuth1976, author = {Donald E. Knuth and Andrew C. Yao}, title = {The Complexity of Nonuniform Random Number Generation}, crossref = {traub1976} }

@book{knuth1997, author = {Donald E. Knuth}, note = {Third edition}, publisher = {Addison-Wesley}, title = {The Art of Computer Programming: Seminumerical Algorithms}, year = 1997 }

@book{koblitz1987, author = {Neal Koblitz}, title = {A Course in Number Theory and Cryptography}, publisher = {Springer}, number = 114, series = {Graduate Texts in Mathematics}, year = 1987 }

@book{kolmogorov1950, author = {Andrei N. Kolmogorov}, year = 1950, title = {Foundations of the Theory of Probability}, publisher = {Chelsea}, address = {New York} }

@article{komissarchik1974, author = {Komissarchik, E. A. and Futer, A. L.}, year = 1974, title = {Ob Analize Ferzevogo Endshpilia pri Pomoshchi {EVM}}, journal = {Problemy Kybernet}, volume = 29, pages = {211--220} }

@inproceedings{korovin2001, author = {Konstantin Korovin and Andrei Voronkov}, title = {{Knuth-Bendix} Constraint Solving Is {NP}-Complete}, crossref = {icalp2001}, pages = {979--992} }

@inproceedings{kozen1979, author = {Dexter Kozen}, title = {Semantics of Probabilistic Programs}, pages = {101--114}, booktitle = {20th Annual Symposium on Foundations of Computer Science}, month = oct, publisher = {IEEE Computer Society Press}, address = {Long Beach, Ca., USA}, year = 1979 }

@inproceedings{kozen1983, author = {Dexter Kozen}, title = {A probabilistic {PDL}}, booktitle = {Proceedings of the 15th ACM Symposium on Theory of Computing}, year = 1983 }

@techreport{kozen1998, author = {Dexter Kozen}, title = {Efficient Code Certification}, institution = {Cornell University}, number = {98-1661}, month = jan, year = 1998 }

@mastersthesis{kristensen2005, author = {Jesper Torp Kristensen}, title = {Generation and compression of endgame tables in chess with fast random access using {OBDDs}}, school = {University of Aarhus, Department of Computer Science}, month = feb, year = 2005, url = {http://www.daimi.au.dk/~doktoren/master_thesis/handin/} }

@inproceedings{kumar1991, author = {R. Kumar and T. Kropf and K. Schneider}, title = {Integrating a First-Order Automatic Prover in the {HOL} Environment}, pages = {170--176}, crossref = {hol1991} }

@inproceedings{kumar2012, author = {Ramana Kumar and Joe Hurd}, title = {Standalone Tactics Using {OpenTheory}}, pages = {405--411}, crossref = {itp2012}, url = {http://gilith.com/research/papers}, abstract = {Proof tools in interactive theorem provers are usually developed within and tied to a specific system, which leads to a duplication of effort to make the functionality available in different systems. Many verification projects would benefit from access to proof tools developed in other systems. Using OpenTheory as a language for communicating between systems, we show how to turn a proof tool implemented for one system into a standalone tactic available to many systems via the internet. This enables, for example, LCF-style proof reconstruction efforts to be shared by users of different interactive theorem provers and removes the need for each user to install the external tool being integrated.} }

@inproceedings{kumar2013, author = {Ramana Kumar}, title = {Challenges in Using {O}pen{T}heory to Transport {H}arrison's {HOL} Model from {HOL} {L}ight to {HOL4}}, pages = {110--116}, crossref = {pxtp2013}, url = {http://www.easychair.org/publications/?page=2131296333}, abstract = {OpenTheory is being used for the first time (in work to be described at ITP 2013) as a tool in a larger project, as opposed to in an example demonstrating OpenTheory's capability. The tool works, demonstrating its viability. But it does not work completely smoothly, because the use case is somewhat at odds with OpenTheory's primary design goals. In this extended abstract, we explore the tensions between the goals that OpenTheory-like systems might have, and question the relative importance of various kinds of use. My hope is that describing issues arising from work in progress will stimulate fruitful discussion relevant to the development of proof exchange systems.} }

@inproceedings{kushilevitz1992, author = {Eyal Kushilevitz and Michael O. Rabin}, title = {Randomized Mutual Exclusion Algorithms Revisited}, pages = {275--283}, editor = {Maurice Herlihy}, booktitle = {Proceedings of the 11th Annual Symposium on Principles of Distributed Computing}, address = {Vancouver, BC, Canada}, month = aug, year = 1992, publisher = {ACM Press}, annote = {Contains a corrected version of Rabin's probabilistic mutual exclusion algorithm~\cite{rabin1982}} }

@inproceedings{kwiatkowska1999, author = {Marta Kwiatkowska and Gethin Norman and Roberto Segala and Jeremy Sproston}, title = {Automatic Verification of Real-Time Systems with Discrete Probability Distributions}, pages = {75--95}, crossref = {arts1999}, url = {ftp://ftp.cs.bham.ac.uk/pub/authors/M.Z.Kwiatkowska/arts99.ps.gz}, abstract = {We consider the timed automata model of [3], which allows the analysis of real-time systems expressed in terms of quantitative timing constraints. Traditional approaches to real-time system description express the model purely in terms of nondeterminism; however, we may wish to express the likelihood of the system making certain transitions. In this paper, we present a model for real-time systems augmented with discrete probability distributions. Furthermore, using the algorithm of [5] with fairness, we develop a model checking method for such models against temporal logic properties which can refer both to timing properties and probabilities, such as, ``with probability 0.6 or greater, the clock x remains below 5 until clock y exceeds 2''.} }

@inproceedings{kwiatkowska2001, author = {M. Kwiatkowska and G. Norman and D. Parker}, title = {PRISM: Probabilistic Symbolic Model Checker}, booktitle = {Proceedings of PAPM/PROBMIV 2001 Tools Session}, month = sep, year = 2001, url = {http://www.cs.bham.ac.uk/~dxp/prism/papers/PROBMIV01Tool.ps.gz} }

@article{lamacchia1991, author = {B. A. LaMacchia and A. M. Odlyzko}, title = {Computation of discrete logarithms in prime fields}, journal = {Designs, Codes, and Cryptography}, year = 1991, volume = 1, number = 1, pages = {46--62}, month = may, url = {http://www.dtc.umn.edu/~odlyzko/doc/complete.html}, abstract = {The presumed difficulty of computing discrete logarithms in finite fields is the basis of several popular public key cryptosystems. The secure identification option of the Sun Network File System, for example, uses discrete logarithms in a field GF(p) with p a prime of 192 bits. This paper describes an implementation of a discrete logarithm algorithm which shows that primes of under 200 bits, such as that in the Sun system, are very insecure. Some enhancements to this system are suggested.} }

@misc{lamport1993, author = {Leslie Lamport}, title = {How to Write a Proof}, month = feb, year = 1993, url = {http://www.research.compaq.com/SRC/personal/lamport/pubs/pubs.html#lamport-how-to-write}, abstract = {A method of writing proofs is proposed that makes it much harder to prove things that are not true. The method, based on hierarchical structuring, is simple and practical.} }

@book{lamport1994, author = {Leslie Lamport}, title = {Latex: A document preparation system}, publisher = {Addison-Wesley}, year = 1994, edition = {2nd} }

@article{lamport1999, author = {Leslie Lamport and Lawrence C. Paulson}, title = {Should Your Specification Language Be Typed?}, journal = {ACM Transactions on Programming Languages and Systems}, volume = 21, number = 3, pages = {502--526}, month = may, year = 1999, url = {http://www.research.compaq.com/SRC/personal/lamport/pubs/pubs.html#lamport-types}, abstract = {Most specification languages have a type system. Type systems are hard to get right, and getting them wrong can lead to inconsistencies. Set theory can serve as the basis for a specification languuage without types. This possibility, which has been widely overlooked, offers many advantages. Untyped set theory is simple and is more flexible than any simple typed formalism. Polymorphism, overloading, and subtyping can make a type system more powerful, but at the cost of increased complexity,and such refinements can never attain the flexibility of having no types at all. Typed formalisms have advantages too, stemming from the power of mechanical type checking. While types serve little purpose in hand proofs, they do help with mechanized proofs. In the absence of verification, type checking can catch errors in specifications. It may be possible to have the best of both worlds by adding typing annotations to an untyped specification language. We consider only specification languages, not programming languages.} }

@book{landman2004, author = {Bruce M. Landman and Aaron Robertson}, title = {Ramsey Theory on the Integers}, publisher = {American Mathematical Society}, month = feb, year = 2004 }

@inproceedings{launchbury1994, author = {John Launchbury and Simon L. Peyton Jones}, title = {Lazy functional state threads}, booktitle = {SIGPLAN Symposium on Programming Language Design and Implementation (PLDI'94), Orlando}, month = jun, year = 1994, pages = {24--35} }

@misc{lawson1999, author = {Jeff Lawson}, title = {Operational Code Authentication}, year = 1999, url = {http://www.distributed.net/source/specs/opcodeauth.html} }

@article{lehman1980, author = {Lehman, Meir M.}, title = {Programs, Life Cycles, and Laws of Software Evolution}, journal = {Proceedings of the IEEE}, volume = 68, number = 9, pages = {1060--1076}, year = 1980, url = {https://cs.uwaterloo.ca/~a78khan/cs446/additional-material/scribe/27-refactoring/Lehman-LawsOfSoftwareEvolution.pdf} }

@article{lenat1995, author = {Douglas B. Lenat}, title = {{CYC}: {A} Large-Scale Investment in Knowledge Infrastructure}, journal = {Communications of the ACM}, volume = 38, number = 11, pages = {33--38}, year = 1995, url = {http://citeseer.ist.psu.edu/lenat95cyc.html} }

@article{lenstra1987, author = {H. W. Lenstra}, title = {Factoring integers with elliptic curves}, journal = {Ann. Math.}, year = 1987, volume = 126, pages = {649--673} }

@inproceedings{leroy2006, author = {Xavier Leroy}, title = {Formal certification of a compiler back-end or: programming a compiler with a proof assistant}, pages = {42--54}, crossref = {popl2006}, url = {http://pauillac.inria.fr/~xleroy/publi/compiler-certif.pdf} }

@article{lesliehurd2013, author = {Joe Leslie-Hurd and Guy Haworth}, title = {Computer Theorem Proving and {HoTT}}, journal = {ICGA Journal}, volume = 36, number = 2, month = jun, year = 2013, pages = {100--103}, publisher = {International Computer Games Association}, url = {http://centaur.reading.ac.uk/33158/}, abstract = {Theorem-proving is a one-player game. The history of computer programs being the players goes back to 1956 and the `LT' Logic Theory Machine of Newell, Shaw and Simon. In game-playing terms, the `initial position' is the core set of axioms chosen for the particular logic and the `moves' are the rules of inference. Now, the Univalent Foundations Program at IAS Princeton and the resulting `HoTT' book on Homotopy Type Theory have demonstrated the success of a new kind of experimental mathematics using computer theorem proving.} }

@inproceedings{lesliehurd2013a, author = {Joe Leslie-Hurd}, title = {Maintaining Verified Software}, pages = {71--80}, crossref = {haskell2013}, url = {http://www.gilith.com/research/papers/}, abstract = {Maintaining software in the face of evolving dependencies is a challenging problem, and in addition to good release practices there is a need for automatic dependency analysis tools to avoid errors creeping in. Verified software reveals more semantic information in the form of mechanized proofs of functional specifications, and this can be used for dependency analysis. In this paper we present a scheme for automatic dependency analysis of verified software, which for each program checks that the collection of installed libraries is sufficient to guarantee its functional correctness. We illustrate the scheme with a case study of Haskell packages verified in higher order logic. The dependency analysis reduces the burden of maintaining verified Haskell packages by automatically computing version ranges for the packages they depend on, such that any combination provides the functionality required for correct operation.} }

@book{lewis1918, author = {Clarence Irving Lewis}, title = {A Survey of Symbolic Logic}, year = 1918, pages = {vi+406}, publisher = {Univ.\ of California Press, Berkeley}, address = {Berkeley}, note = {Reprint of Chapters I--IV by Dover Publications, 1960, New York} }

@inproceedings{lewis2003, author = {Lewis, J. R. and Martin, B.}, title = {Cryptol: High assurance, retargetable crypto development and validation}, pages = {820--825}, volume = 2, crossref = {milcom2003}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1290218}, abstract = {As cryptography becomes more vital to the infrastructure of computing systems, it becomes increasingly vital to be able to rapidly and correctly produce new implementations of cryptographic algorithms. To address these challenges, we introduce a new, formal methods-based approach to the specification and implementation of cryptography, present a number of scenarios of use, an overview of the language, and present part of a specification of the advanced encryption standard.} }

@unpublished{li2005, author = {Guodong Li and Konrad Slind}, title = {An Embedding of {C}ryptol in {HOL}-4}, year = 2005, note = {Unpublished draft} }

@article{loveland1968, author = {Donald W. Loveland}, journal = {Journal of the {ACM}}, month = apr, number = 2, pages = {236--251}, title = {Mechanical Theorem Proving by Model Elimination}, volume = 15, year = 1968 }

@inproceedings{lynch2001, author = {Christopher Lynch and Barbara Morawska}, title = {Goal-Directed {E}-Unification}, pages = {231--245}, crossref = {rta2001}, url = {http://people.clarkson.edu/~clynch/PAPERS/goal_short_final.ps}, abstract = {We give a general goal directed method for solving the E-unification problem. Our inference system is a generalization of the inference rules for Syntactic Theories, except that our inference system is proved complete for any equational theory. We also show how to easily modify our inference system into a more restricted inference system for Syntactic Theories, and show that our completeness techniques prove completeness there also.} }

@inproceedings{lynch2001a, author = {Christopher Lynch and Barbara Morawska}, title = {Goal-Directed {E}-Unification}, pages = {231--245}, crossref = {rta2001}, url = {http://people.clarkson.edu/~clynch/PAPERS/goal_short_final.ps}, abstract = {We give a general goal directed method for solving the -unification problem. Our inference system is a generalization of the inference rules for Syntactic Theories, except that our inference system is proved complete for any equational theory. We also show how to easily modify our inference system into a more restricted inference system for Syntactic Theories, and show that our completeness techniques prove completeness there also.} }

@book{mackenzie2001, author = {MacKenzie, Donald}, title = {Mechanizing proof: computing, risk, and trust}, year = 2001, publisher = {MIT Press} }

@inproceedings{macqueen1984, author = {David MacQueen}, title = {Modules for {S}tandard {ML}}, pages = {198--207}, crossref = {lfp1984}, url = {http://www.cs.cmu.edu/~rwh/courses/modules/papers/macqueen84/paper.pdf}, abstract = {The functional programming language ML has been undergoing a thorough redesign during the past year, and the module facility described here has been proposed as part of the revised language, now called Standard ML. The design has three main goals: (1) to facilitate the structuring of large ML programs; (2) to support separate compilation and generic library units; and (3) to employ new ideas in the semantics of data types to extend the power of ML's polymorphic type system. It is based on concepts inherent in the structure of ML, primarily the notions of a declaration, its type signature, and the environment that it denotes.} }

@techreport{maddalon2009, title = {A Mathematical Basis for the Safety Analysis of Conflict Prevention Algorithms}, author = {Maddalon, Jeffrey M. and Butler, Ricky W. and Mu{\~{n}}oz, C{\'e}sar}, institution = {National Aeronautics and Space Administration}, number = {NASA/TM-2009-215768}, month = jun, year = 2009, address = {Langley Research Center, Hampton VA 23681-2199, USA}, url = {http://shemesh.larc.nasa.gov/people/jmm/NASA-tm-2009-215768.pdf} }

@inproceedings{mairson1990, author = {Harry G. Mairson}, title = {Deciding {ML} Typability is Complete for Deterministic Exponential Time}, booktitle = {Conference Record of the Seventeenth Annual {ACM} Symposium on Principles of Programming Languages}, year = 1990, organization = {ACM SIGACT and SIGPLAN}, publisher = {ACM Press}, pages = {382--401} }

@book{manin1977, author = {Yu I. Manin}, title = {A Course in Mathematical Logic}, publisher = {Springer}, address = {New York}, year = 1977 }

@unpublished{matthews2005, author = {John Matthews}, title = {f{C}ryptol Semantics}, month = jan, year = 2005, note = {Available from the author on request} }

@book{mayer2007, author = {Frank Mayer and Karl MacMillan and David Caplan}, title = {{SEL}inux by Example}, publisher = {Prentice Hall}, series = {Open Source Software Development Series}, year = 2007 }

@inproceedings{mcallester1992, author = {David McAllester}, title = {Grammar Rewriting}, year = 1992, pages = {124--138}, crossref = {cade1992} }

@book{mccarty2005, author = {Bill McCarty}, title = {{SEL}inux: {NSA}'s Open Source Security Enhanced {L}inux}, publisher = {O'Reilly}, year = 2005 }

@book{mccorduck2004, author = {Pamela McCorduck}, title = {Machines Who Think: A Personal Inquiry into the History and Prospects of Artificial Intelligence}, publisher = {A. K. Peters}, month = mar, year = 2004 }

@book{mcgraw2006, title = {Software Security: Building Security In}, author = {McGraw, Gary}, month = feb, year = 2006, publisher = {Addison-Wesley}, series = {Addison-Wesley Software Security Series}, url = {http://www.swsec.com/} }

@techreport{mciver1996, author = {Annabelle McIver and Carroll Morgan}, title = {Probabilistic predicate transformers: Part 2}, institution = {Programming Research Group, Oxford University Computer Laboratory}, year = 1996, number = {PRG-TR-5-96}, month = mar, url = {http://web.comlab.ox.ac.uk/oucl/publications/tr/tr-5-96.html}, abstract = {Probabilistic predicate transformers guarantee standard (ordinary) predicate transformers to incorporate a notion of probabilistic choice in imperative programs. The basic theory of that, for finite state spaces, is set out in [5], together with a statements of their `healthiness conditions'. Here the earlier results are extended to infinite state spaces, and several more specialised topics are explored: the characterisation of standard and deterministic programs; and the structure of the extended space generated when `angelic choice' is added to the system.} }

@inproceedings{mciver1998, author = {Annabelle McIver and Carroll Morgan and Elena Troubitsyna}, title = {The probabilistic steam boiler: a case study in probabilistic data refinement}, booktitle = {Proceedings of the International Refinement Workshop and Formal Methods Pacific}, address = {Canberra}, year = 1998, url = {http://www.tucs.abo.fi/publications/techreports/TR173.html}, abstract = {Probabilistic choice and demonic nondeterminism have been combined in a model for sequential programs in which program refinement is defined by removing demonic nondeterminism. Here we study the more general topic of data refinement in the probabilistic setting, extending standard techniques to probabilistic programs. We use the method to obtain a quantitative assessment of safety of a (probabilistic) version of the steam boiler.} }

@article{mciver2001, author = {A. K. McIver and C. Morgan}, title = {Demonic, angelic and unbounded probabilistic choices in sequential programs}, journal = {Acta Informatica}, volume = 37, number = {4/5}, pages = {329--354}, year = 2001 }

@article{mciver2001b, author = {A. K. McIver and C. C. Morgan}, title = {Partial correctness for probabilistic demonic programs}, journal = {Theoretical Computer Science}, volume = 266, number = {1--2}, pages = {513--541}, year = 2001 }

@book{mciver2004, author = {Annabelle McIver and Carroll Morgan}, title = {Abstraction, Refinement and Proof for Probabilistic Systems}, publisher = {Springer}, year = 2004, url = {http://www.cse.unsw.edu.au/~carrollm/arp} }

@phdthesis{melham1989, author = {Thomas Frederick Melham}, title = {Formalizing Abstraction Mechanisms for Hardware Verification in Higher Order Logic}, school = {University of Cambridge}, month = aug, year = 1989 }

@article{melham1994, author = {T. F. Melham}, title = {A Mechanized Theory of the {$\Pi$}-calculus in {HOL}}, journal = {Nordic Journal of Computing}, volume = 1, number = 1, year = 1994, pages = {50--76} }

@article{meng2006, author = {Jia Meng and Claire Quigley and Lawrence C. Paulson}, title = {Automation for interactive proof: first prototype}, journal = {Information and Computation}, volume = 204, number = 10, year = 2006, pages = {1575--1596}, publisher = {Academic Press, Inc.}, address = {Duluth, MN, USA} }

@inproceedings{miller1975, author = {Gary L. Miller}, title = {Riemann's Hypothesis and Tests for Primality}, booktitle = {Conference Record of Seventh Annual {ACM} Symposium on Theory of Computation}, year = 1975, month = may, pages = {234--239}, address = {Albuquerque, New Mexico} }

@article{miller2010, author = {Miller, Steven P. and Whalen, Michael W. and Cofer, Darren D.}, title = {Software model checking takes off}, journal = {Communications of the ACM}, volume = 53, number = 2, month = feb, year = 2010, pages = {58--64}, doi = {http://doi.acm.org/10.1145/1646353.1646372}, publisher = {ACM}, address = {New York, NY, USA}, url = {http://cacm.acm.org/magazines/2010/2/69362-software-model-checking-takes-off/} }

@article{milner1978, author = {R. Milner}, title = {A theory of type polymorphism in programming}, journal = {Journal of Computer and System Sciences}, volume = 17, month = dec, pages = {348--375}, year = 1978 }

@book{milner1989, author = {Robin Milner}, title = {Communication and Concurrency}, series = {International Series in Computer Science}, publisher = {Prentice Hall}, year = 1989 }

@book{milner1997, author = {Milner, Robin and Tofte, Mads and Harper, Robert and MacQueen, David}, title = {The Definition of Standard ML}, year = 1997, publisher = {MIT Press}, address = {Cambridge, MA, USA} }

@inproceedings{mogul1987, author = {J. C. Mogul and R. F. Rashid and M. J. Accetta}, title = {The Packet Filter: An Efficient Mechanism for User-level Network Code}, booktitle = {ACM Symposium on Operating Systems Principles}, month = nov, year = 1987 }

@inproceedings{mokkedem2000, author = {Abdel Mokkedem and Tim Leonard}, title = {Formal Verification of the {Alpha} 21364 Network Protocol}, pages = {443--461}, crossref = {tphols2000} }

@inproceedings{monniaux2000, author = {David Monniaux}, title = {Abstract Interpretation of Probabilistic Semantics}, booktitle = {Seventh International Static Analysis Symposium (SAS'00)}, series = {Lecture Notes in Computer Science}, year = 2000, publisher = {Springer}, number = {1824}, url = {http://www.di.ens.fr/~monniaux/biblio/David_Monniaux.html}, abstract = {Following earlier models, we lift standard deterministic and nondeterministic semantics of imperative programs to probabilistic semantics. This semantics allows for random external inputs of known or unknown probability and random number generators. We then propose a method of analysis of programs according to this semantics, in the general framework of abstract interpretation. This method lifts an ordinary abstract lattice, for non-probabilistic programs, to one suitable for probabilistic programs. Our construction is highly generic. We discuss the influence of certain parameters on the precision of the analysis, basing ourselves on experimental results.} }

@inproceedings{monniaux2001, author = {David Monniaux}, title = {An Abstract {Monte-Carlo} Method for the Analysis of Probabilistic Programs (extended abstract)}, booktitle = {28th Symposium on Principles of Programming Languages (POPL '01)}, year = 2001, organization = {Association for Computer Machinery}, url = {http://www.di.ens.fr/~monniaux/biblio/David_Monniaux.html}, abstract = {We introduce a new method, combination of random testing and abstract interpretation, for the analysis of programs featuring both probabilistic and non-probabilistic nondeterminism. After introducing ordinary testing, we show how to combine testing and abstract interpretation and give formulas linking the precision of the results to the number of iterations. We then discuss complexity and optimization issues and end with some experimental results.} }

@techreport{morgan1995, author = {Carroll Morgan and Annabelle McIver and Karen Seidel and J. W. Sanders}, title = {Probabilistic predicate transformers}, institution = {Oxford University Computing Laboratory Programming Research Group}, number = {TR-4-95}, month = feb, year = 1995, url = {http://web.comlab.ox.ac.uk/oucl/publications/tr/tr-4-95.html}, abstract = {Predicate transformers facilitate reasoning about imperative programs, including those exhibiting demonic non-deterministic choice. Probabilistic predicate transformers extend that facility to programs containing probabilistic choice, so that one can in principle determine not only whether a program is guaranteed to establish a certain result, but also its probability of doing so. We bring together independent work of Claire Jones and Jifeng He, showing how their constructions can be made to correspond; from that link between a predicate-based and a relation-based view of probabilistic execution we are able to propose "probabilistic healthiness conditions", generalising those of Dijkstra for ordinary predicate transformers. The associated calculus seems suitable for exploring further the rigorous derivation of imperative probabilistic programs.} }

@inproceedings{morgan1996, author = {Carroll Morgan}, title = {Proof Rules for Probabilistic Loops}, booktitle = {Proceedings of the BCS-FACS 7th Refinement Workshop}, year = 1996, editor = {He Jifeng and John Cooke and Peter Wallis}, publisher = {Springer}, series = {Workshops in Computing}, url = {http://web.comlab.ox.ac.uk/oucl/publications/tr/tr-25-95.html} }

@article{morgan1996a, author = {Carroll Morgan and Annabelle McIver and Karen Seidel}, title = {Probabilistic predicate transformers}, journal = {ACM Transactions on Programming Languages and Systems}, year = 1996, volume = 18, number = 3, pages = {325--353}, month = may }

@article{morgan1999, author = {Carroll Morgan and Annabelle McIver}, title = {{pGCL}: formal reasoning for random algorithms}, journal = {South African Computer Journal}, year = 1999, volume = 22, pages = {14--27}, url = {http://web.comlab.ox.ac.uk/oucl/research/areas/probs/pGCL.ps.gz} }

@unpublished{moser1996, author = {Max Moser and Christopher Lynch and Joachim Steinbach}, title = {Model Elimination with Basic Ordered Paramodulation}, month = jan, year = 1996, note = {Available from the authors' web sites}, url = {http://people.clarkson.edu/~clynch/PAPERS/me.ps.gz}, abstract = {We present a new approach for goal-directed theorem proving with equality which integrates Basic Ordered Paramodulation into a Model Elimination framework. In order to be able to use orderings and to restrict the applications of equations to non-variable positions, the goal-directed tableau construction is combined with bottom-up completion where only positive literals are overlapped. The resulting calculus thus keeps the best properties of completion while giving up only part of the goal-directedness.} }

@article{moser1997, author = {Max Moser and Ortrun Ibens and Reinhold Letz and Joachim Steinbach and Christoph Goller and Johannes Schumann and Klaus Mayr}, title = {{SETHEO} and {E-SETHEO} -- The {CADE}-13 Systems}, journal = {Journal of Automated Reasoning}, year = 1997, volume = 18, pages = {237--246}, url = {http://wwwjessen.informatik.tu-muenchen.de/~goller/PAPERS/jar97.ps.gz}, abstract = {The model elimination theorem prover SETHEO (version V3.3) and its equational extension E-SETHEO are presented. SETHEO employs sophisticated mechanisms of subgoal selection, elaborate iterative deepening techniques, and local failure caching methods. Its equational counterpart E-SETHEO transforms formulae containing equality (using a variant of Brand's modification method) and processes the output with the standard SETHEO system. The paper gives an overview of the theoretical background, the system architecture, and the performance of both systems.} }

@book{motwani1995, author = {Rajeev Motwani and Prabhakar Raghavan}, title = {Randomized Algorithms}, publisher = {Cambridge University Press}, address = {Cambridge, England}, month = jun, year = 1995 }

@inproceedings{murphy1999, author = {Kevin P. Murphy and Yair Weiss and Michael I. Jordan}, title = {Loopy Belief Propagation for Approximate Inference: An Empirical Study}, booktitle = {Proceedings of the Fifteenth Conference on Uncertainty in Artificial Intelligence}, year = 1999, editor = {Laskey K.B. and Prade H.}, address = {San Francisco}, publisher = {Morgan Kaufmann}, url = {http://citeseer.ist.psu.edu/murphy99loopy.html} }

@article{nalimov2000, author = {E. V. Nalimov and G. McC. Haworth and E. A. Heinz}, title = {Space-efficient indexing of chess endgame tables}, journal = {ICGA Journal}, month = sep, year = 2000, volume = 23, number = 3, pages = {148--162}, url = {http://supertech.lcs.mit.edu/~heinz/ps/NHH_ICGA.ps.gz}, abstract = {Chess endgame tables should provide efficiently the value and depth of any required position during play. The indexing of an endgame's positions is crucial to meeting this objective. This paper updates Heinz' previous review of approaches to indexing and describes the latest approach by the first and third authors. Heinz' and Nalimov's endgame tables (EGTs) encompass the en passant rule and have the most compact index schemes to date. Nalimov's EGTs, to the Distance-to-Mate (DTM) metric, require only 30.6 x 10^9 elements in total for all the 3-to-5-man endgames and are individually more compact than previous tables. His new index scheme has proved itself while generating the tables and in the 1999 World Computer Chess Championship where many of the top programs used the new suite of EGTs.} }

@techreport{narkawicz2010, title = {Formal Verification of Air Traffic Conflict Prevention Bands Algorithms}, author = {Narkawicz, Anthony J. and Mu{\~{n}}oz, C{\'e}sar}, institution = {National Aeronautics and Space Administration}, number = {NASA/TM-2010-216706}, month = jun, year = 2010, address = {Langley Research Center, Hampton VA 23681-2199, USA}, url = {http://shemesh.larc.nasa.gov/people/cam/publications/NASA-TM-2010-216706.pdf} }

@techreport{necula1996, author = {George C. Necula and Peter Lee}, title = {Proof-Carrying Code}, institution = {Computer Science Department, Carnegie Mellon University}, number = {CMU-CS-96-165}, month = sep, year = 1996 }

@book{nederpelt1994, author = {R. P. Nederpel and J. H. Geuvers and R. C. De Vrijer}, title = {Selected Papers on Automath (Studies in Logic and the Foundations of Mathematics: Volume 133)}, publisher = {North-Holland}, year = 1994 }

@article{nedzusiak1989, author = {Andrzej N\c{e}dzusiak}, title = {$\sigma$-Fields and Probability}, journal = {Journal of Formalized Mathematics}, year = {1989}, url = {http://mizar.uwb.edu.pl/JFM/Vol1/prob_1.html} }

@article{nedzusiak1990, author = {Andrzej N\c{e}dzusiak}, title = {Probability}, journal = {Journal of Formalized Mathematics}, year = {1990}, url = {http://mizar.uwb.edu.pl/JFM/Vol2/prob_2.html} }

@article{nelson1979, author = {Greg Nelson and Derek C. Oppen}, title = {Simplification by Cooperating Decision Procedures}, journal = {ACM Transactions on Programming Languages and Systems}, volume = 1, number = 2, pages = {245--257}, month = oct, year = 1979 }

@article{nelson1980, author = {Greg Nelson and Derek C. Oppen}, title = {Fast Decision Procedures Bases on Congruence Closure}, journal = {Journal of the Association for Computing Machinery}, year = 1980, volume = {27}, number = {2}, pages = {356--364}, month = apr }

@inproceedings{nesi1993, author = {M. Nesi}, title = {Value-Passing {CCS} in {HOL}}, pages = {352--365}, crossref = {hol1993} }

@phdthesis{nesi1986, author = {Monica Nesi}, title = {Formalising Process Calculi in Higher Order Logic}, school = {University of Cambridge}, year = 1986, url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-411.html} }

@article{nieuwenhuis1995, author = {Robert Nieuwenhuis and Albert Rubio}, title = {Theorem Proving with Ordering and Equality Constrained Clauses}, journal = {Journal of Symbolic Computation}, volume = 19, number = 4, pages = {321--351}, month = apr, year = 1995 }

@incollection{nieuwenhuis2001, author = {R. Nieuwenhuis and A. Rubio}, title = {Paramodulation-Based Theorem Proving}, booktitle = {Handbook of Automated Reasoning}, chapter = 7, publisher = {Elsevier Science}, editor = {A. Robinson and A. Voronkov}, year = 2001, volume = {I}, pages = {371--443}, crossref = {robinson2001} }

@article{nilsson1986, author = {Nils J. Nilsson}, title = {Probabilistic Logic}, journal = {Artificial Intelligence}, year = 1986, volume = 28, number = 1, pages = {71--87} }

@inproceedings{nipkow1993, title = {Functional Unification of Higher-Order Patterns}, author = {Tobias Nipkow}, pages = {64--74}, booktitle = {Proceedings, Eighth Annual {IEEE} Symposium on Logic in Computer Science}, year = 1993, organization = {IEEE Computer Society Press}, abstract = {Higher-order patterns (HOPs) are a class of lambda-terms which behave almost like first-order terms w.r.t. unification: unification is decidable and unifiable terms have most general unifiers which are easy to compute. HOPs were first discovered by Dale Miller and subsequently developed and applied by Pfenning and Nipkow. This paper presents a stepwise development of a functional unification algorithm for HOPs. Both the usual representation of lambda-terms with alphabetic bound variables and de Bruijn's notation are treated. The appendix contains a complete listing of an implementation in Standard ML.} }

@inproceedings{nipkow2002, author = {Tobias Nipkow}, title = {Hoare Logics in {Isabelle/HOL}}, booktitle = {Proof and System-Reliability}, editor = {H. Schwichtenberg and R. Steinbr\"uggen}, publisher = {Kluwer}, year = 2002, pages = {341-367}, url = {http://www4.informatik.tu-muenchen.de/~nipkow/pubs/MOD2001.html} }

@book{nipkow2002a, author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel}, title = {Isabelle/HOL---A Proof Assistant for Higher-Order Logic}, publisher = {Springer}, series = {LNCS}, volume = 2283, year = 2002, url = {http://www4.informatik.tu-muenchen.de/~nipkow/LNCS2283/} }

@article{nipkow2009, author = {Tobias Nipkow}, title = {Social Choice Theory in {HOL}}, journal = {Special Issue of the Journal of Automated Reasoning}, month = oct, year = 2009, volume = 43, number = 3, pages = {289--304}, url = {http://www.springerlink.com/content/gj8qx75506626830/}, abstract = {This article presents formalizations in higher-order logic of two proofs of Arrow’s impossibility theorem due to Geanakoplos. The Gibbard-Satterthwaite theorem is derived as a corollary. Lacunae found in the literature are discussed.} }

@manual{nipkow2011, author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel}, title = {Isabelle/HOL---A Proof Assistant for Higher-Order Logic}, month = oct, year = 2011, note = {Available for download at \url{http://isabelle.in.tum.de/dist/Isabelle2011-1/doc/tutorial.pdf}}, url = {http://isabelle.in.tum.de/dist/Isabelle2011-1/doc/tutorial.pdf} }

@inproceedings{nivela1993, author = {Pilar Nivela and Robert Nieuwenhuis}, title = {Saturation of first-order (constrained) clauses with the {{\em Saturate}} system}, pages = {436--440}, editor = {Claude Kirchner}, booktitle = {Proceedings of the 5th International Conference on Rewriting Techniques and Applications ({RTA}-93)}, month = jun, year = 1993, series = {Lecture Notes in Computer Science}, volume = 690, publisher = {Springer} }

@phdthesis{norrish1998, author = {Michael Norrish}, title = {{C} formalised in {HOL}}, school = {University of Cambridge Computer Laboratory}, year = 1998 }

@article{norrish2002, author = {Michael Norrish and Konrad Slind}, title = {A Thread of {HOL} Development}, journal = {The Computer Journal}, volume = 41, number = 1, pages = {37--45}, year = 2002, abstract = {The HOL system is a mechanized proof assistant for higher-order logic that has been under continuous development since the mid-1980s, by an ever-changing group of developers and external contributors. We give a brief overview of various implementations of the HOL logic before focusing on the evolution of certain important features available in a recent implementation. We also illustrate how the module system of Standard ML provided security and modularity in the construction of the HOL kernel, as well as serving in a separate capacity as a useful representation medium for persistent, hierarchical logical theories.} }

@article{norrish2009, author = {Michael Norrish}, title = {Rewriting Conversions Implemented with Continuations}, journal = {Special Issue of the Journal of Automated Reasoning}, month = oct, year = 2009, volume = 43, number = 3, pages = {305--336}, url = {http://www.springerlink.com/content/903vr1g4rn114816/}, abstract = {We give a continuation-based implementation of rewriting for systems in the LCF tradition. These systems must construct explicit proofs of equations when rewriting, and currently do so in a way that can be very space-inefficient. An explicit representation of continuations improves performance on large terms, and on long-running computations.} }

@book{northrup2006, author = {L. Northrup and P. Feiler and R. P. Gabriel and J. Goodenough and R. Linger and T. Longstaff and R. Kazman and M. Klein and D. Schmidt and K. Sullivan and K. Wallnau}, title = {Ultra-Large-Scale Systems: The Software Challenge of the Future}, publisher = {Software Engineering Institute, Carnegie Mellon}, year = 2006, note = {The 2006 report for a 12-month study of ultra-large-scale systems software, sponsored by the United States Department of Defense}, url = {http://www.sei.cmu.edu/library/abstracts/books/0978695607.cfm} }

@book{nunn1999, author = {John Nunn}, title = {Secrets of Rook Endings}, publisher = {Gambit Publications}, month = dec, year = 1999 }

@book{nunn2002, author = {John Nunn}, title = {Secrets of Pawnless Endings}, publisher = {Gambit Publications}, edition = {Expanded edition 2}, year = 2002 }

@inproceedings{obua2006, author = {Steven Obua and Sebastian Skalberg}, title = {Importing {HOL} into {I}sabelle/{HOL}}, pages = {298--302}, crossref = {ijcar2006}, url = {http://www4.in.tum.de/~obua/importer/}, abstract = {We developed an importer from both HOL 4 and HOL-light into Isabelle/HOL. The importer works by replaying proofs within Isabelle/HOL that have been recorded in HOL 4 or HOL-light and is therefore completely safe. Concepts in the source HOL system, that is types and constants, can be mapped to concepts in Isabelle/HOL; this facilitates a true integration of imported theorems and theorems that are already available in Isabelle/HOL. The importer is part of the standard Isabelle distribution.} }

@book{okasaki1998, author = {Chris Okasaki}, title = {Purely Functional Data Structures}, publisher = {Cambridge University Press}, year = 1998, address = {Cambridge, UK}, abstract = {Data structures and efficiency analysis for functional programming. Code in ML and Haskell. Many references.} }

@inproceedings{oleary2013, author = {O'Leary, John and Kaivola, Roope and Melham, Tom}, title = {Relational {STE} and theorem proving for formal verification of industrial circuit designs}, crossref = {fmcad2013}, pages = {97--104}, url = {http://www.cs.utexas.edu/users/hunt/FMCAD/FMCAD13/papers/80-Relational-STE-and-Theorem-Proving.pdf} }

@inproceedings{ongtang2009, author = {Machigar Ongtang and Stephen McLaughlin and William Enck and Patrick McDaniel}, title = {Semantically Rich Application-Centric Security in {A}ndroid}, crossref = {acsac2009}, pages = {340--349}, url = {http://www.patrickmcdaniel.org/papers-ct.html} }

@techreport{owre1997, author = {Sam Owre and Natarajan Shankar}, title = {The Formal Semantics of {PVS}}, number = {SRI-CSL-97-2}, month = aug, year = 1997, institution = {Computer Science Laboratory, SRI International}, address = {Menlo Park, CA}, url = {http://pvs.csl.sri.com/manuals.html} }

@manual{owre1999, author = {S. Owre and N. Shankar and J. M. Rushby and D. W. J. Stringer-Calvert}, title = {{PVS} System Guide}, month = sep, year = 1999, organization = {Computer Science Laboratory, SRI International}, address = {Menlo Park, CA} }

@techreport{owre2001, author = {Sam Owre and N. Shankar}, title = {Theory Interpretations in {PVS}}, institution = {SRI International}, number = {SRI-CSL-01-01}, month = apr, year = 2001, url = {http://www.csl.sri.com/~owre/papers/interpretations/interpretations.pdf} }

@article{parks2000, author = {Michael Parks}, title = {Number-Theoretic Test Generation for Directed Rounding}, journal = {IEEE Transactions on Computers}, volume = 49, number = 7, pages = {651--658}, year = 2000, url = {http://www.michaelparks754.com/publications/parks-dirnd-augmented.pdf}, abstract = {We present methods to generate systematically the hardest test cases for multiplication, division, and square root subject to directed rounding, essentially extending previous work on number-theoretic floating-point testing to rounding modes other than to-nearest. The algorithms focus upon the rounding boundaries of the modes truncate, to-minusinfinity, and to-infinity, and programs based on them require little beyond exact arithmetic in the working precision to create billions of edge cases. We will show that the amount of work required to calculate trial multiplicands pays off in the form of free extra tests due to an interconnection among the operations considered herein. Although these tests do not replace proofs of correctness, they can be used to gain a high degree of confidence that the accuracy requirements as mandated by IEEE Standard 754 have been satisfied.} }

@misc{paulin2002, author = {Christine Paulin and Philippe Audebaud and Richard Lassaigne}, title = {Randomized Algorithms in Type Theory}, howpublished = {Slides from a talk delivered at Dagstuhl seminar 01341: Dependent Type Theory meets Practical Programming}, month = aug, year = 2001, url = {http://www.lri.fr/~paulin/ALEA/dagstuhl.ps.gz} }

@article{paulson83, author = {Lawrence C. Paulson}, title = {A Higher-Order Implementation of Rewriting}, journal = {Science of Computer Programming}, volume = 3, pages = {119-149}, year = 1983, url = {http://www.cl.cam.ac.uk/~lp15/papers/Reports/TR035-lcp-rewriting.pdf}, abstract = {Many automatic theorem-provers rely on rewriting. Using theorems as rewrite rules helps to simplify the subgoals that arise during a proof. LCF is an interactive theorem-prover intended for reasoning about computation. Its implementation of rewriting is presented in detail. LCF provides a family of rewriting functions, and operators to combine them. A succession of functions is described, from pattern matching primitives to the rewriting tool that performs most inferences in LCF proofs. The design is highly modular. Each function performs a basic, specific task, such as recognizing a certain form of tautology. Each operator implements one method of building a rewriting function from sim- pler ones. These pieces can be put together in numerous ways, yielding a variety of rewriting strategies. The approach involves programming with higher-order functions. Rewriting functions are data val- ues, produced by computation on other rewriting functions. The code is in daily use at Cambridge, demon- strating the practical use of functional programming.} }

@article{paulson1993, author = {Lawrence C. Paulson}, title = {Set Theory for Verification: {I}. {From} Foundations to Functions}, journal = {Journal of Automated Reasoning}, volume = 11, number = 3, pages = {353-389}, year = 1993, url = {http://www.cl.cam.ac.uk/users/lcp/papers/Sets/set-I.pdf}, abstract = {A logic for specification and verification is derived from the axioms of Zermelo-Fraenkel set theory. The proofs are performed using the proof assistant Isabelle. Isabelle is generic, supporting several different logics. Isabelle has the flexibility to adapt to variants of set theory. Its higher-order syntax supports the definition of new binding operators. Unknowns in subgoals can be instantiated incrementally. The paper describes the derivation of rules for descriptions, relations and functions, and discusses interactive proofs of Cantor's Theorem, the Composition of Homomorphisms challenge [9], and Ramsey's Theorem [5]. A generic proof assistant can stand up against provers dedicated to particular logics.} }

@article{paulson1994, author = {Lawrence C. Paulson}, title = {{Isabelle}: A Generic Theorem Prover}, journal = {Lecture Notes in Computer Science}, volume = {828}, pages = {xvii + 321}, year = 1994 }

@article{paulson1995, author = {Lawrence C. Paulson}, title = {Set Theory for Verification: {II}. {Induction} and Recursion}, journal = {Journal of Automated Reasoning}, volume = 15, number = 2, pages = {167-215}, year = 1995, url = {http://www.cl.cam.ac.uk/users/lcp/papers/Sets/set-II.pdf}, abstract = {A theory of recursive definitions has been mechanized in Isabelle's Zermelo-Fraenkel (ZF) set theory. The objective is to support the formalization of particular recursive definitions for use in verification, semantics proofs and other computational reasoning. Inductively defined sets are expressed as least fixedpoints, applying the Knaster-Tarski Theorem over a suitable set. Recursive functions are defined by well-founded recursion and its derivatives, such as transfinite recursion. Recursive data structures are expressed by applying the Knaster-Tarski Theorem to a set, such as $V_{\omega}$, that is closed under Cartesian product and disjoint sum. Worked examples include the transitive closure of a relation, lists, variable-branching trees and mutually recursive trees and forests. The Schr\"oder-Bernstein Theorem and the soundness of propositional logic are proved in Isabelle sessions.} }

@inproceedings{paulson1997, author = {Lawrence C. Paulson}, title = {Proving Properties of Security Protocols by Induction}, pages = {70-83}, crossref = {csfw1997}, url = {http://www.cl.cam.ac.uk/ftp/papers/reports/TR409-lcp-Proving-Properties-of-Security-Protocols-by-Induction.ps.gz} }

@article{paulson1997a, author = {Lawrence C. Paulson}, title = {Mechanizing Coinduction and Corecursion in Higher-Order Logic}, journal = {Journal of Logic and Computation}, year = 1997, volume = 7, number = 2, month = mar, pages = {175-204}, url = {http://www.cl.cam.ac.uk/Research/Reports/TR304-lcp-coinduction.ps.gz} }

@manual{paulson1998, author = {Lawrence C. Paulson}, title = {The {Isabelle} Reference Manual}, month = oct, year = 1998, note = {With contributions by Tobias Nipkow and Markus Wenzel} }

@article{paulson1999, author = {Lawrence C. Paulson}, title = {Inductive Analysis of the {Internet} Protocol {TLS}}, journal = {TISSEC}, month = aug, year = 1999, volume = 2, number = 3, pages = {332-351} }

@article{paulson1999a, author = {L. C. Paulson}, title = {A Generic Tableau Prover and its Integration with {Isabelle}}, journal = {Journal of Universal Computer Science}, volume = 5, number = 3, month = mar, year = 1999, url = {http://www.jucs.org/jucs_5_3/a_generic_tableau_prover} }

@article{paulson2000, author = {Lawrence C. Paulson}, title = {Mechanizing {UNITY} in {Isabelle}}, journal = {ACM Transactions on Computational Logic}, year = 2000, note = {In press} }

@inproceedings{paulson2007, author = {Lawrence C. Paulson and Kong Woei Susanto}, title = {Source-level proof reconstruction for interactive theorem proving}, crossref = {tphols2007} }

@inproceedings{pfenning1999, author = {F. Pfenning and C. Sch{\"u}rmann}, title = {System Description: Twelf --- {A} Meta-Logical Framework for Deductive Systems}, crossref = {cade1999} }

@inproceedings{pike2006, author = {Lee Pike and Mark Shields and John Matthews}, title = {A Verifying Core for a Cryptographic Language Compiler}, pages = {1--10}, crossref = {acl2006}, url = {http://www.cse.ogi.edu/~johnm/papers/ACL2Workshop06.pdf}, abstract = {A verifying compiler is one that emits both object code and a proof of correspondence between object and source code. We report the use of ACL2 in building a verifying compiler for Cryptol, a stream-based language for encryption algorithm specification that targets Rockwell Collins' AAMP7 microprocessor (and is designed to compile efficiently to hardware, too). This paper reports on our success in verifying the "core" transformations of the compiler -- those transformations over the sub-language of Cryptol that begin after "higher-order" aspects of the language are compiled away, and finish just before hardware or software specific transformations are exercised. The core transformations are responsible for aggressive optimizations. We have written an ACL2 macro that automatically generates both the correspondence theorems and their proofs. The compiler also supplies measure functions that ACL2 uses to automatically prove termination of Cryptol programs, including programs with mutually-recursive cliques of streams. Our verifying compiler has proved the correctness of its core transformations for multiple algorithms, including TEA, RC6, and AES. Finally, we describe an ACL2 book of primitive operations for the general specification and verification of encryption algorithms.} }

@book{pirsig, author = {Robert M. Pirsig}, title = {Zen and the Art of Motorcycle Maintenance}, publisher = {Morrow}, month = may, year = 1974, edition = {10th}, url = {http://www.aoe.vt.edu/~ciochett/lit/zen.html} }

@book{plotkin2000, editor = {Gordon Plotkin and Colin Stirling and Mads Tofte}, title = {Proof, Language, and Interaction: Essays in Honour of Robin Milner}, publisher = {MIT Press}, month = may, year = 2000 }

@inproceedings{podelski2004, author = {Andreas Podelski and Andrey Rybalchenko}, title = {Transition Invariants}, pages = {32--41}, crossref = {lics2004} }

@inproceedings{podelski2004a, author = {Andreas Podelski and Andrey Rybalchenko}, title = {A Complete Method for the Synthesis of Linear Ranking Functions}, pages = {239--251}, crossref = {vmcai2004} }

@article{pollack1995, author = {R. Pollack}, title = {On Extensibility of Proof Checkers}, journal = {Lecture Notes in Computer Science}, volume = {996}, pages = {140--161}, year = 1995, url = {ftp://ftp.cs.chalmers.se/pub/users/pollack/extensibility.ps.gz} }

@book{polya1948, author = {George P\'olya}, title = {How to Solve It}, publisher = {Princeton University Press}, address = {Princeton}, year = 1948 }

@article{posthoff1986, author = {Posthoff, Chr. and Herschberg, I. S.}, title = {Computer Analysis of a Queen Endgame}, journal = {ICCA Journal}, volume = 9, number = 4, pages = {189-198}, year = 1986, note = {Translation of Komissarchik (1974).} }

@inproceedings{pucella2008, author = {Pucella, Riccardo and Tov, Jesse A.}, title = {Haskell session types with (almost) no class}, pages = {25--36}, crossref = {haskell2008}, abstract = {We describe an implementation of session types in Haskell. Session types statically enforce that client-server communication proceeds according to protocols. They have been added to several concurrent calculi, but few implementations of session types are available. Our embedding takes advantage of Haskell where appropriate, but we rely on no exotic features. Thus our approach translates with minimal modification to other polymorphic, typed languages such as ML and Java. Our implementation works with existing Haskell concurrency mechanisms, handles multiple communication channels and recursive session types, and infers protocols automatically. While our implementation uses unsafe operations in Haskell, it does not violate Haskell's safety guarantees. We formalize this claim in a concurrent calculus with unsafe communication primitives over which we layer our implementation of session types, and we prove that the session types layer is safe. In particular, it enforces that channel-based communication follows consistent protocols.} }

@article{pugh1992, author = {William Pugh}, title = {A Practical Algorithm for Exact Array Dependence Analysis}, journal = {Communications of the ACM}, volume = 35, number = 8, pages = {102--114}, month = aug, year = 1992, abstract = {The Omega test is an integer programming algorithm that can determine whether a dependence exists between two array references, and if so, under what conditions. Conventional wisdom holds that integer programming techniques are far too expensive to be used for dependence analysis, except as a method of last resort for situations that cannot be decided by simpler methods. We present evidence that suggests this wisdom is wrong, and that the Omega test is competitive with approximate algorithms used in practice and suitable for use in production compilers. The Omega test is based on an extension of Fourier-Motzkin variable elimination to integer programming, and has worst-case exponential time complexity. However, we show that for many situations in which other (polynomial) methods are accurate, the Omega test has low order polynomial time complexity. The Omega test can be used to simplify integer programming problems, rather than just deciding them. This has many applications, including accurately and efficiently computing dependence direction and distance vectors.} }

@article{quinn1983, author = {Quinn, K.}, title = {Ever had problems rounding off figures? The stock exchange has}, journal = {Wall Street Journal}, volume = 202, number = 91, month = nov, year = 1983, url = {http://www5.in.tum.de/~huckle/Vancouv.pdf} }

@phdthesis{rabe2008, author = {Florian Rabe}, title = {Representing Logics and Logic Translations}, school = {Jacobs University Bremen}, month = may, year = 2008, url = {http://kwarc.info/frabe/} }

@article{rabin1963, author = {M. O. Rabin}, title = {Probabilistic Automata}, journal = {Information and Control}, volume = 6, year = 1963, pages = {230--245}, abstract = {"This is a seminal paper on the theory of probabilistic automata. Rabin defined the notion of a language being accepted by a probabilistic automaton relative to a cutpoint lambda. One of his key results was to show that there exist finite state probabilistic automata that define non-regular languages."} }

@incollection{rabin1976, author = {M. O. Rabin}, title = {Probabilistic Algorithms}, pages = {21--39}, crossref = {traub1976} }

@article{rabin1982, author = {Michael O. Rabin}, title = {{$N$}-Process Mutual Exclusion with Bounded Waiting by {$4\log_2(N)$}-Valued Shared Variable}, pages = {66--75}, journal = {Journal of Computer and System Sciences}, volume = 25, number = 1, month = aug, year = 1982 }

@inproceedings{ramsey2002, author = {Norman Ramsey and Avi Pfeffer}, title = {Stochastic Lambda Calculus and Monads of Probability Distributions}, crossref = {popl2002}, url = {http://www.eecs.harvard.edu/~nr/pubs/pmonad-abstract.html}, abstract = {Probability distributions are useful for expressing the meanings of probabilistic languages, which support formal modeling of and reasoning about uncertainty. Probability distributions form a monad, and the monadic definition leads to a simple, natural semantics for a stochastic lambda calculus, as well as simple, clean implementations of common queries. But the monadic implementation of the expectation query can be much less efficient than current best practices in probabilistic modeling. We therefore present a language of measure terms, which can not only denote discrete probability distributions but can also support the best known modeling techniques. We give a translation of stochastic lambda calculus into measure terms. Whether one translates into the probability monad or into measure terms, the results of the translations denote the same probability distribution.} }

@article{ray1993, author = {T. S. Ray}, title = {An evolutionary approach to synthetic biology, Zen and the art of creating life.}, journal = {Artificial Life}, year = 1993, volume = {1}, number = {1}, url = {ftp://tierra.slhs.udel.edu/doc/Zen.tex} }

@article{reiter1999, author = {M. K. Reiter and A. D. Rubin}, title = {Anonymous web transactions with Crowds}, journal = {Communications of the ACM}, year = 1999, volume = 42, number = 2, month = feb, pages = {32--38}, abstract = {The authors describe how the Crowds system works -- essentially, a group of users act as web forwarders for each other in a way that appears random to outsiders. They analyse the anonymity properties of the system and compare it with other systems. Crowds enables the retrieval of information over the web with only a small amount of private information leakage to other parties.} }

@inproceedings{richter2004, author = {Stefan Richter}, title = {Formalizing Integration Theory with an Application to Probabilistic Algorithms}, pages = {271--286}, crossref = {tphols2004} }

@article{robinson1965, author = {J. A. Robinson}, title = {A Machine-Oriented Logic Based on the Resolution Principle}, journal = {Journal of the ACM}, volume = 12, number = 1, pages = {23--49}, month = jan, year = 1965 }

@article{robinson1965a, author = {J. A. Robinson}, journal = {International Journal of Computer Mathematics}, pages = {227--234}, title = {Automatic Deduction with Hyper-Resolution}, volume = 1, year = 1965 }

@article{robinson1970, author = {J. A. Robinson}, title = {A note on mechanizing higher order logic}, journal = {Machine Intelligence}, year = {1970}, volume = {5}, pages = {121--135} }

@book{robinson1996, author = {Abraham Robinson}, title = {Non-standard Analysis}, publisher = {Princeton University Press}, year = 1996 }

@book{robinson2001, title = {Handbook of Automated Reasoning}, booktitle = {Handbook of Automated Reasoning}, editor = {A. Robinson and A. Voronkov}, year = 2001, publisher = {Elsevier Science} }

@article{roycroft1996, author = {Roycroft, A. J.}, year = 1996, title = {*{C}*}, journal = {EG}, volume = 7, number = 119, pages = {771} }

@article{roycroft1997, author = {Roycroft, A. J.}, year = 1997, title = {The Computer Section}, journal = {EG}, volume = 8, number = 123, pages = {47--48} }

@article{roycroft1998, author = {Roycroft, A. J.}, year = 1998, title = {*{C}*}, journal = {EG}, volume = 8, number = {130 Supplement}, pages = {428} }

@article{roycroft1999, author = {Roycroft, A. J.}, year = 1999, title = {{AJR}s Snippets}, journal = {EG}, volume = 8, number = 131, pages = {476} }

@unpublished{rudnicki1992, author = {Piotr Rudnicki}, title = {An Overview of the {Mizar} Project}, note = {Notes to a talk at the workshop on Types for Proofs and Programs}, address = {B{\aa}stad, Sweden}, year = 1992, month = jun, url = {http://www.cs.ualberta.ca/~piotr/Mizar/index.html} }

@inproceedings{rushby2010, author = {John Rushby}, title = {Formalism in Safety Cases}, crossref = {sss2010}, pages = {3--17}, url = {http://www.csl.sri.com/users/rushby/abstracts/sss10} }

@book{russell1968, author = {Bertrand Russell}, title = {The Autobiography of Bertrand Russell}, publisher = {George Allen \& Unwin}, note = {3 volumes}, address = {London}, year = 1968 }

@article{russinoff1985, author = {David M. Russinoff}, title = {An Experiment with the {Boyer-Moore} Theorem Prover: A Proof of {Wilson's} Theorem}, journal = {Journal of Automated Reasoning}, year = 1985, pages = {121--139}, volume = 1 }

@inproceedings{saaltink1997, author = {Mark Saaltink}, title = {Domain Checking {Z} Specifications}, pages = {185--192}, booktitle = {LFM' 97: Fourth NASA Langley Formal Methods Workshop}, year = 1997, editor = {C. Michael Holloway and Kelly J. Hayhurst}, address = {Hampton, VA}, month = sep, url = {http://atb-www.larc.nasa.gov/Lfm97/proceedings} }

@article{sattler1988, author = {Sattler, R.}, year = 1988, title = {Further to the {K}{R}{P}(a2){K}b{B}{P}(a3) Database}, journal = {ICCA Journal}, volume = 11, number = {2/3}, pages = {82--87} }

@book{schaeffer1997, author = {Schaeffer, J.}, year = 1997, title = {One Jump Ahead: Challenging Human Supremacy in Checkers}, publisher = {Springer}, address = {New York}, annote = {ISBN 0-3879-4930-5} }

@inproceedings{schaeffer2003, author = {Schaeffer, J. and Bjornsson, Y. and Burch, N. and Lake, R. and Lu, P. and Sutphen, S}, title = {Building the Checkers 10-piece Endgame Databases}, crossref = {cg2003} }

@book{schneider1999, title = {Trust in Cyberspace}, author = {Schneider, Fred}, publisher = {National Academy Press}, year = 1999 }

@book{schneier1996, author = {Bruce Schneier}, title = {Applied Cryptography}, publisher = {Wiley}, year = 1996, edition = {Second} }

@inproceedings{schumann1994, author = {Johann Ph. Schumann}, title = {{DELTA} --- {A} bottom-up processor for top-down theorem provers (system abstract)}, crossref = {cade1994}, url = {http://wwwjessen.informatik.tu-muenchen.de/~schumann/delta-cade.html} }

@article{schumann1994a, author = {Johann Schumann}, journal = {Journal of Automated Reasoning}, pages = {409--421}, title = {Tableaux-based Theorem Provers: Systems and Implementations}, volume = 13, year = 1994, url = {http://wwwjessen.informatik.tu-muenchen.de/~schumann/tableau-jar.html}, abstract = {The following list of tableaux-based theorem provers was assembled in the Spring and Summer 1993 as the result of a wide-spread enquiry via e-mail. It is intended to provide a short overview of the field and existing implementations. For each system, a short description is given. Additionally, useful information about the system is presented in tabular form. This includes the type of logic which can be handled by the system (input), the implementation language, hardware and operating systems requirements (implementation). Most of the systems are available as binaries or as sources with documentation and can be obtained via anonymous ftp or upon request. The descriptions and further information have been submitted by the individuals whose names are given as contact address. The provers are ordered alphabetically by their name (or the author's name).} }

@article{schwartz1980, author = {J. T. Schwartz}, title = {Fast probabilistic algorithms for verification of polynomial identities}, journal = {Journal of the ACM}, volume = 27, year = 1980, pages = {701--717}, number = 4, month = oct, url = {http://www.acm.org/pubs/articles/journals/jacm/1980-27-4/p701-schwartz/p701-schwartz.pdf} }

@techreport{seger1992, author = {Carl-Johan Seger}, title = {Introduction to Formal Hardware Verification}, institution = {Department of Computer Science, University of British Columbia}, month = jun, year = 1992, number = {TR-92-13}, url = {http://www.cs.ubc.ca/cgi-bin/tr/1992/TR-92-13}, abstract = {Formal hardware verification has recently attracted considerable interest. The need for ``correct'' designs in safety-critical applications, coupled with the major cost associated with products delivered late, are two of the main factors behind this. In addition, as the complexity of the designs increase, an ever smaller percentage of the possible behaviors of the designs will be simulated. Hence, the confidence in the designs obtained by simulation is rapidly diminishing. This paper provides an introduction to the topic by describing three of the main approaches to formal hardware verification: theorem-proving, model checking and symbolic simulation. We outline the underlying theory behind each approach, we illustrate the approaches by applying them to simple examples and we discuss their strengths and weaknesses. We conclude the paper by describing current on-going work on combining the approaches to achieve multi-level verification approaches.} }

@techreport{seidel1996, author = {Karen Seidel and Carroll Morgan and Annabelle McIver}, title = {An introduction to probabilistic predicate transformers}, institution = {Oxford Programming Research Group Technical Report}, number = {TR-6-96}, year = 1996 }

@inproceedings{seidel1997, author = {Karen Seidel and Carroll Morgan and Annabelle McIver}, title = {Probabilistic imperative programming: a rigorous approach}, crossref = {fmp1997} }

@inproceedings{shankar1999, author = {Natarajan Shankar and Sam Owre}, title = {Principles and Pragmatics of Subtyping in {PVS}}, booktitle = {Recent Trends in Algebraic Development Techniques, {WADT '99}}, editor = {D. Bert and C. Choppy and P. D. Mosses}, pages = {37--52}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 1827, month = sep, year = 1999, address = {Toulouse, France}, url = {http://www.csl.sri.com/reports/html/wadt99.html} }

@unpublished{shields2006, author = {Mark Shields}, title = {A language for symmetric-key cryptographic algorithms and its efficient implementation}, month = mar, year = 2006, note = {Available from the author's website}, url = {http://www.cartesianclosed.com/pub/mcryptol/}, abstract = {The development of cryptographic hardware for classified data is expensive and time consuming. We present a domain-specific language, microCryptol, and a corresponding compiler, mcc, to address these costs. microCryptol supports the capture of mathematically precise specifications of algorithms, while also allowing those specifications to be compiled to efficient imperative code able to execute on embedded microprocessors.} }

@inproceedings{shield2001, author = {Jamie Shield and Ian J. Hayes and David A. Carrington}, title = {Using Theory Interpretation to Mechanise the Reals in a Theorem Prover}, booktitle = {Electronic Notes in Theoretical Computer Science}, volume = 42, publisher = {Elsevier}, editor = {Colin Fidge}, year = 2001, url = {http://citeseer.nj.nec.com/shield98mechanising.html} }

@book{silverman1986, author = {J. H. Silverman}, title = {The Arithmetic of Elliptic Curves}, publisher = {Springer}, year = 1986, series = {Graduate Texts in Mathematics}, number = 106 }

@inproceedings{slaney2004, author = {John Slaney and Arnold Binas and David Price}, title = {Guiding a Theorem Prover with Soft Constraints}, crossref = {ecai2004}, pages = {221--225}, url = {http://www.cs.toronto.edu/~abinas/pubs/sos_ecai.pdf}, abstract = {Attempts to use finite models to guide the search for proofs by resolution and the like in forst order logic all suffer from the need to trade off the expense of generating and maintaining models against the improvement in quality of guidance as investment in the semantic aspect of reasoning is increased. Previous attempts to resolve this tradeoff have resulted either in poor selection of models, or in fragility as the search becomes over-sensitive to the order of clauses, or in extreme slowness. Here we present a fresh approach, whereby most of the clauses for which a model is sought are treated as soft constraints. The result is a partial model of all of the clauses rather than an exact model of only a subset of them. This allows our system to combine the speed of maintaining just a single model with the robustness previously requiring multiple models. We present experimental evidence of benefits over a range of first order problem domains.} }

@manual{slind1999, author = {Konrad Slind}, title = {{HOL98} Draft User's Manual, Athabasca Release, Version 2}, month = jan, year = 1999, note = {Part of the documentation included with the hol98 theorem-prover} }

@phdthesis{slind1999a, author = {Konrad Slind}, title = {Reasoning about Terminating Functional Programs}, school = {Technical University of Munich}, year = 1999 }

@manual{slind2001, author = {Konrad Slind and Michael Norrish}, title = {The {HOL} System Tutorial}, month = feb, year = 2001, note = {Part of the documentation included with the HOL4 theorem-prover}, url = {http://hol.sf.net/} }

@inproceedings{slind2003, author = {Konrad Slind and Joe Hurd}, title = {Applications of Polytypism in Theorem Proving}, pages = {103--119}, crossref = {tphols2003}, url = {http://www.gilith.com/research/papers}, abstract = {Polytypic functions have mainly been studied in the context of functional programming languages. In that setting, applications of polytypism include elegant treatments of polymorphic equality, prettyprinting, and the encoding and decoding of high-level datatypes to and from low-level binary formats. In this paper, we discuss how polytypism supports some aspects of theorem proving: automated termination proofs of recursive functions, incorporation of the results of metalanguage evaluation, and equivalence-preserving translation to a low-level format suitable for propositional methods. The approach is based on an interpretation of higher order logic types as terms, and easily deals with mutual and nested recursive types.} }

@article{solovay1977, author = {R. Solovay and V. Strassen}, title = {A Fast {Monte-Carlo} Test for Primality}, journal = {SIAM Journal on Computing}, volume = 6, number = 1, month = mar, year = 1977, pages = {84--85} }

@article{srivastava2008, author = {Saurabh Srivastava and Michael Hicks and Jeffrey S. Foster and Patrick Jenkins}, title = {Modular Information Hiding and Type-Safe Linking for {C}}, journal = {IEEE Transactions on Software Engineering}, year = 2008, volume = 34, number = 3, month = {May/June} }

@inproceedings{stewart2012, author = {Daryl Stewart}, title = {Formal for everyone - {C}hallenges in achievable multicore design and verification}, pages = {186}, crossref = {fmcad2012}, url = {http://www.cs.utexas.edu/~hunt/FMCAD/FMCAD12/FormalForEveryone_DStewart_ARM.pdf} }

@article{stiller1989, author = {Stiller, L. B.}, year = 1989, title = {Parallel Analysis of Certain Endgames}, journal = {ICCA Journal}, volume = 12, number = 2, pages = {55--64} }

@misc{stirling1997, author = {Colin Stirling}, title = {Bisimulation, model checking and other games}, month = jun, year = 1997, note = {Notes for a Mathfit instructional meeting on games and computation, held in Edinburgh, Scotland}, url = {http://www.dcs.ed.ac.uk/home/cps/mathfit.ps} }

@book{stirzaker1994, author = {David Stirzaker}, title = {Elementary Probability}, year = 1994, publisher = {Cambridge University Press} }

@phdthesis{strohlein1970, author = {T. Str{\"o}hlein}, title = {Untersuchungen {\"u}ber kombinatorische Spiele}, school = {Technical University of Munich}, year = 1970 }

@book{sullivan2008, author = {O'Sullivan, Bryan and Goerzen, John and Stewart, Don}, title = {Real World Haskell}, year = 2008, edition = {1st}, publisher = {O'Reilly Media, Inc.} }

@techreport{suttner1997, author = {Christian B. Suttner and Geoff Sutcliffe}, title = {The {TPTP} Problem Library --- v2.1.0}, institution = {Department of Computer Science, James Cook University}, number = {JCU-CS-97/8}, month = dec, year = 1997, url = {www.cs.jcu.edu.au/ftp/pub/techreports/97-8.ps.gz} }

@phdthesis{syme1998, author = {Donald Syme}, title = {Declarative Theorem Proving for Operational Semantics}, school = {University of Cambridge}, year = 1998 }

@manual{syme1998b, author = {Donald Syme}, title = {A Simplifier/Programmable Grinder for hol98}, month = jan, year = 1998, note = {Part of the documentation included with the hol98 theorem-prover} }

@inproceedings{tammet1996, author = {Tanel Tammet}, title = {A Resolution Theorem Prover for Intuitionistic Logic}, crossref = {cade1996} }

@manual{tammet1997, author = {Tanel Tammet}, title = {Gandalf version c-1.0c Reference Manual}, month = oct, year = 1997, url = {http://www.cs.chalmers.se/~tammet/gandalf/} }

@article{tammet1997a, author = {Tanel Tammet}, title = {Gandalf}, journal = {Journal of Automated Reasoning}, pages = {199--204}, month = apr, year = 1997, volume = 18, number = 2 }

@inproceedings{tammet1998, author = {Tanel Tammet}, title = {Towards Efficient Subsumption}, crossref = {cade1998} }

@unpublished{tamplin2006, author = {Tamplin, J.}, year = 2006, note = {Available from the web page {http://chess.jaet.org/endings/}}, title = {{EGT}-query service extending to 6-man pawnless endgame {EGT}s in {DTC}, {DTM}, {DTZ} and {DTZ}${}_{50}$ metrics} }

@unpublished{tay2006, author = {Aaron Tay}, title = {A guide to Endgames Tablebase}, year = 2006, note = {Available from the web page {http://www.aarontay.per.sg/Winboard/egtb.html}} }

@misc{thery1999, author = {Laurent Th\'ery}, title = {A Quick Overview of {HOL} and {PVS}}, month = aug, year = 1999, note = {Lecture Notes from the Types Summer School '99: Theory and Practice of Formal Proofs, held in Giens, France}, url = {http://www-sop.inria.fr/types-project/lnotes/types99-lnotes.html} }

@article{thompson1986, author = {Thompson, K.}, year = 1986, title = {Retrograde Analysis of Certain Endgames}, journal = {ICCA Journal}, volume = 9, number = 3, pages = {131--139} }

@book{toulmin1958, author = {Stephen Toulmin}, title = {The Uses of Argument}, publisher = {Cambridge University Press}, year = 1958 }

@book{touretzky1990, author = {David S. Touretzky}, title = {Common Lisp: A Gentle Introduction to Symbolic Computation}, publisher = {Benjamin Cummings}, year = 1990, url = {http://www.cs.cmu.edu/~dst/LispBook/index.html} }

@book{traub1976, title = {Algorithms and Complexity: New Directions and Recent Results}, booktitle = {Algorithms and Complexity: New Directions and Recent Results}, year = 1976, editor = {J. F. Traub}, address = {New York}, publisher = {Academic Press}, abstract = {"This book provides a record of a conference, and contains many novel ideas for probabilistic algorithms."} }

@inproceedings{trybulec1985, author = {A. Trybulec and H. A. Blair}, title = {Computer Aided Reasoning}, pages = {406--412}, booktitle = {Proceedings of the Conference on Logic of Programs}, editor = {Rohit Parikh}, month = jun, year = 1985, address = {Brooklyn, NY}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 193 }

@inproceedings{trybulec1985a, author = {Andrzej Trybulec and Howard Blair}, title = {Computer Assisted Reasoning with {MIZAR}}, pages = {26--28}, booktitle = {Proceedings of the 9th International Joint Conference on Artificial Intelligence}, editor = {Aravind Joshi}, month = aug, year = 1985, address = {Los Angeles, CA}, publisher = {Morgan Kaufmann} }

@inproceedings{turing1949, author = {Alan M. Turing}, title = {Checking a Large Routine}, pages = {67--69}, booktitle = {Report of a Conference on High Speed Automatic Calculating Machines}, month = jun, year = 1949, address = {Cambridge, England}, publisher = {University Mathematical Laboratory} }

@article{turner1979, author = {D. A. Turner}, title = {A New Implementation Technique for Applicative Languages}, journal = {Software -- Practice and Experience}, volume = 9, number = 1, pages = {31--49}, month = jan, year = 1979, abstract = {Combinators, a "curried" version of lambda calculus that eliminates the need for symbol binding. Combinators can be reduced (evaluated) locally and in parallel, so they make an interesting model of parallel computation. Combinator hackers: this paper introduces some new combinators, besides S-K-I, that help keep the translation from blowing up in space.} }

@article{tymoczko1979, author = {Thomas Tymoczko}, title = {The Four Color Problems}, journal = {Journal of Philosophy}, volume = {76}, year = 1979 }

@book{vanroy2004, author = {Peter Van Roy and Seif Haridi}, title = {Concepts, Techniques, and Models of Computer Programming}, publisher = {MIT Press}, year = 2004, month = mar }

@techreport{vardi1997, author = {Moshe Y. Vardi}, title = {Why Is Modal Logic So Robustly Decidable?}, number = {TR97-274}, institution = {Rice University}, month = apr, year = 1997, abstract = {In the last 20 years modal logic has been applied to numerous areas of computer science, including artificial intelligence, program verification, hardware verification, database theory, and distributed computing. There are two main computational problems associated with modal logic. The first problem is checking if a given formula is true in a given state of a given structure. This problem is known as the model-checking problem. The second problem is checking if a given formula is true in all states of all structures. This problem is known as the validity problem. Both problems are decidable. The model-checking problem can be solved in linear time, while the validity problem is PSPACE-complete. This is rather surprising when one considers the fact that modal logic, in spite of its apparent propositional syntax, is essentially a first-order logic, since the necessity and possibility modalities quantify over the set of possible worlds, and model checking and validity for first-order logic are computationally hard problems. Why, then, is modal logic so robustly decidable? To answer this question, we have to take a close look at modal logic as a fragment of first-order logic. A careful examination reveals that propositional modal logic can in fact be viewed as a fragment of 2-variable first-order logic. It turns out that this fragment is computationally much more tractable than full first-order logic, which provides some explanation for the tractability of modal logic. Upon a deeper examination, however, we discover that this explanation is not too satisfactory. The tractability of modal logic is quite and cannot be explained by the relationship to two-variable first-order logic. We argue that the robust decidability of modal logic can be explained by the so-called tree-model property, and we show how the tree-model property leads to automata-based decision procedures.} }

@inproceedings{volker2007, author = {Norbert V{\"o}lker}, title = {{HOL2P} - {A} System of Classical Higher Order Logic with Second Order Polymorphism}, pages = {334--351}, crossref = {tphols2007}, url = {http://cswww.essex.ac.uk/staff/norbert/hol2p/} }

@incollection{vonneumann1963, author = {John von Neumann}, title = {Various techniques for use in connection with random digits}, booktitle = {von Neumann's Collected Works}, volume = 5, publisher = {Pergamon}, year = 1963, pages = {768--770} }

@phdthesis{vos2000, author = {Tanja E. J. Vos}, year = 2000, school = {Utrecht University}, title = {{UNITY} in Diversity: {A} Stratified Approach to the Verification of Distributed Algorithms} }

@inproceedings{wadler1992, author = {Philip Wadler}, title = {The essence of functional programming}, booktitle = {19th Symposium on Principles of Programming Languages}, publisher = {ACM Press}, year = 1992, month = jan }

@article{wadler1992a, author = {Philip Wadler}, title = {Comprehending monads}, journal = {Mathematical Structures in Computer Science}, pages = {461--493}, year = 1992, volume = 2, url = {http://www.research.avayalabs.com/user/wadler/topics/monads.html}, abstract = {Category theorists invented \emph{monads} in the 1960's to concisely express certain aspects of universal algebra. Functional programmers invented \emph{list comprehensions} in the 1970's to concisely express certain programs involving lists. This paper shows how list comprehensions may be generalised to an arbitrary monad, and how the resulting programming feature can concisely express in a pure functional language some programs that manipulate state, handle exceptions, parse text, or invoke continuations. A new solution to the old problem of destructive array update is also presented. No knowledge of category theory is assumed.} }

@book{wagon1993, author = {Stan Wagon}, title = {The Banach-Tarski Paradox}, publisher = {Cambridge University Press}, year = 1993 }

@unpublished{wansbrough2001, author = {Keith Wansbrough and Michael Norrish and Peter Sewell and Andrei Serjantov}, title = {Timing {UDP}: mechanized semantics for sockets, threads and failures}, note = {Draft}, year = 2001, url = {http://www.cl.cam.ac.uk/users/pes20/Netsem/} }

@book{weaver1987, author = {Jefferson Hane Weaver}, title = {The World of Physics}, volume = {II}, publisher = {Simon and Schuster}, address = {New York}, year = 1987 }

@inproceedings{welinder1995, author = {Morten Welinder}, title = {Very Efficient Conversions}, crossref = {hol1995}, pages = {340--352}, url = {http://www.diku.dk/forskning/topps/bibliography/1995.html#D-248}, abstract = {Using program transformation techniques from the field of partial evaluation an automatic tool for generating very efficient conversions from equality-stating theorems has been implemented. In the situation where a Hol user would normally employ the built-in function GEN\_REWRITE\_CONV, a function that directly produces a conversion of the desired functionality, this article demonstrates how producing the conversion in the form of a program text instead of as a closure can lead to significant speed-ups. The Hol system uses a set of 31 simplifying equations on a very large number of intermediate terms derived, e.g., during backwards proofs. For this set the conversion generated by the two-step method is about twice as fast as the method currently used. When installing the new conversion, tests show that the overall running times of Hol proofs are reduced by about 10\%. Apart from the speed-up this is completely invisible to the user. With cooperation from the user further speed-up is possible.} }

@inproceedings{wenzel1999, author = {Markus Wenzel}, title = {Isar - {A} Generic Interpretative Approach to Readable Formal Proof Documents}, pages = {167--184}, crossref = {tphols1999} }

@inproceedings{westfold2007, author = {Stephen Westfold}, title = {Integrating {I}sabelle/{HOL} with {S}pecware}, crossref = {tphols2007a} }

@article{white1991, author = {Arthur White}, title = {Limerick}, pages = 91, journal = {Mathematical Magazine}, volume = 64, number = 2, month = apr, year = 1991 }

@book{whitehead1910, author = {Alfred North Whitehead and Bertrand Russell}, year = 1910, title = {Principia Mathematica}, publisher = {Cambridge University Press}, address = {Cambridge} }

@book{whitehead1911, author = {A. N. Whitehead}, year = 1911, title = {An Introduction to Mathematics}, publisher = {Williams and Northgate}, address = {London} }

@inproceedings{wiedijk2001, author = {Freek Wiedijk}, title = {{Mizar} Light for {HOL} Light}, crossref = {tphols2001} }

@inproceedings{wiedijk2007, author = {Freek Wiedijk}, title = {Mizar's Soft Type System}, pages = {383--399}, crossref = {tphols2007} }

@book{williams1991, author = {David Williams}, title = {Probability with Martingales}, publisher = {Cambridge University Press}, year = 1991 }

@mastersthesis{witty1995, author = {Carl Roger Witty}, title = {The {Ontic} Inference Language}, school = {MIT}, month = jun, year = 1995, url = {http://www.ai.mit.edu/~cwitty/cwitty.html}, abstract = {OIL, the Ontic Inference Language, is a simple bottom-up logic programming language with equality reasoning. Although intended for use in writing proof verification systems, OIL is an interesting general-purpose programming language. In some cases, very simple OIL programs can achieve an efficiency which requires a much more complicated algorithm in a traditional programming language. This thesis gives a formal semantics for OIL and some new results related to its efficient implementation. The main new result is a method of transforming bottomup logic programs with equality to bottom-up logic programs without equality. An introduction to OIL and several examples are also included.} }

@manual{wong1993, author = {Wai Wong}, title = {The {HOL} \verb^res_quan^ library}, year = 1993, note = {HOL88 documentation}, url = {http://www.ftp.cl.cam.ac.uk/ftp/hvg/hol98/libraries/library-docs.html} }

@inproceedings{wong1995, author = {W. Wong}, title = {Recording and Checking {HOL} Proofs}, pages = {353--368}, crossref = {hol1995}, url = {http://www.cl.cam.ac.uk/research/hvg/proofchecker/#recordcheck}, abstract = {Formal proofs generated by mechanized theorem proving systems may consist of a large number of inferences. As these theorem proving systems are usually very complex, it is extremely difficult if not impossible to formally verify them. This calls for an independent means of ensuring the consistency of mechanically generated proofs. This paper describes a method of recording HOL proofs in terms of a sequence of applications of inference rules. The recorded proofs can then be checked by an independent proof checker. Also described in this paper is an efficient proof checker which is able to check a practical proof consisting of thousands of inference steps.} }

@techreport{wong1996, author = {Wai Wong}, title = {A Proof Checker for {HOL}}, institution = {University of Cambridge Computer Laboratory}, number = {389}, month = mar, year = 1996 }

@book{wos1992, author = {Larry Wos and Ross Overbeek and Ewing Lusk and Jim Boyle}, year = 1992, title = {Automated Reasoning: Introduction and Applications}, publisher = {McGraw-Hill}, address = {New York}, edition = {2nd} }

@book{wos1999, author = {Wos, Larry and Pieper, Gail W.}, title = {A fascinating country in the world of computing: your guide to automated reasoning}, year = 1999, isbn = {981-02-3910-6}, publisher = {World Scientific Publishing Co., Inc.}, address = {River Edge, NJ, USA}, url = {http://books.google.com/books?id=mSHBkIVHXZUC&lpg=PP1&ots=9mc4xxcLgF&dq=larry%20wos&pg=PP1#v=onepage&q&f=false} }

@article{wu2001, author = {Wu, R. and Beal, D. F.}, year = 2001, title = {Solving Chinese Chess Endgames by Database Construction}, journal = {Information Sciences}, volume = 135, number = {3--4}, pages = {207-228} }

@phdthesis{xi1998, author = {Hongwei Xi}, title = {Dependent Types in Practical Programming}, school = {Carnegie Mellon University}, month = sep, year = 1998, url = {http://www.ececs.uc.edu/~hwxi/DML/DML.html} }

@article{zadeh1965, author = {Lotfi A. Zadeh}, journal = {Information and Control}, pages = {338--353}, title = {Fuzzy Sets}, volume = 8, year = 1965 }

@phdthesis{zammit1999, author = {Vincent Zammit}, title = {On the Readability of Machine Checkable Formal Proofs}, school = {University of Kent at Canterbury}, month = mar, year = 1999 }

@inproceedings{zhang2002, author = {Lintao Zhang and Sharad Malik}, title = {The Quest for Efficient Boolean Satisfiability Solvers}, pages = {295--313}, crossref = {cade2002}, url = {http://www.ee.princeton.edu/~chaff/publication/cade_cav_2002.pdf} }

@inproceedings{zhang2005, author = {Junxing Zhang and Konrad Slind}, title = {Verification of {E}uclid's Algorithm for Finding Multiplicative Inverses}, pages = {205--220}, crossref = {tphols2005a} }

@book{fips1994, key = {FIPS-186-1994}, author = {{FIPS}}, title = {Digital Signature Standard}, series = {Federal Information Processing Standards Publication 186}, publisher = {U.S. Department of Commerce/NIST National Technical Information Service}, month = may, year = 1994, url = {http://www.itl.nist.gov/fipspubs/fip186.htm} }

@manual{galois2008, key = {GALOIS-2008-CRYPTOL}, title = {Cryptol Programming Guide}, organization = {Galois, Inc.}, month = oct, year = 2008, note = {Available for download at \url{http://corp.galois.com/storage/files/downloads/Cryptol_ProgrammingGuide.pdf}}, url = {http://corp.galois.com/storage/files/downloads/Cryptol_ProgrammingGuide.pdf} }

@book{ieee1985, key = {IEEE-754-1985}, author = {{IEEE}}, title = {Standard for binary floating-point arithmetic}, series = {ANSI/IEEE Standard 754-1985}, year = 1985, publisher = {The Institute of Electrical and Electronic Engineers, Inc.}, address = {345 East 47th Street, New York, NY 10017, USA}, url = {http://ieeexplore.ieee.org/xpls/abs\_all.jsp?arnumber=30711} }

@book{ieee2001, key = {IEEE-1364-2001}, author = {{IEEE}}, title = {Standard for Verilog Hardware Description Language}, series = {IEEE Standard 1364-2001}, year = 2001, publisher = {The Institute of Electrical and Electronic Engineers, Inc.}, address = {345 East 47th Street, New York, NY 10017, USA}, abstract = {The Verilog Hardware Description Language (HDL) is defined in this standard. Verilog HDL is a formal notation intended for use in all phases of the creation of electronic systems. Because it is both machine readable and human readable, it supports the development, verification, synthesis, and testing of hardware designs; the communication of hardware design data; and the maintenance, modification, and procurement of hardware. The primary audiences for this standard are the implementors of tools supporting the language and advanced users of the language.} }

@book{ieee2001a, key = {IEEE-1003-1-2001}, author = {{IEEE}}, title = {Standard for Information Technology---Portable Operating System Interface (POSIX) Base Definitions}, series = {IEEE Standard 1003.1-2001}, year = 2001, publisher = {The Institute of Electrical and Electronic Engineers, Inc.}, address = {345 East 47th Street, New York, NY 10017, USA}, abstract = {This standard defines a standard operating system interface and environment, including a command interpreter (or shell), and common utility programs to support applications portability at the source code level. It is the single common revision to IEEE Std 1003.1-1996, IEEE Std 1003.2-1992, and the Base Specifications of The Open Group Single UNIX Specification, Version 2.} }

@book{mod1991, key = {MOD-00-55-1991}, author = {{MoD}}, title = {Requirements for the Procurement of Safety Critical Software in Defence Equipment}, series = {Interim Defence Standard 00-55}, publisher = {Ministry of Defence}, address = {Directorate of Standardization, Kentigern House, 65 Brown St, Glasgow G2 8EX, UK}, month = apr, year = 1991 }

@unpublished{nsa2005, key = {NSA-2005-SUITE-B}, author = {{National Security Agency}}, title = {Fact Sheet {NSA} {S}uite {B} Cryptography}, year = 2005, note = {Published on the web at \url{http://www.nsa.gov/ia/industry/crypto_suite_b.cfm}}, url = {http://www.nsa.gov/ia/industry/crypto_suite_b.cfm} }

@unpublished{nsa2009, key = {NSA-2009-ECC}, title = {The Case for Elliptic Curve Cryptography}, author = {{National Security Agency}}, month = jan, year = 2009, note = {Available for download at \url{http://www.nsa.gov/business/programs/elliptic_curve.shtml}}, url = {http://www.nsa.gov/business/programs/elliptic_curve.shtml} }

@unpublished{nsa2010, key = {NSA-2010-SUITE-B}, title = {{NSA} {S}uite {B} Cryptography}, author = {{National Security Agency}}, month = nov, year = 2010, note = {Available for download at \url{http://www.nsa.gov/ia/programs/suiteb_cryptography/}}, url = {http://www.nsa.gov/ia/programs/suiteb_cryptography/} }

@unpublished{nsa2010a, key = {NSA-2010-NIST}, title = {Mathematical routines for the {NIST} prime elliptic curves}, author = {{National Security Agency}}, month = apr, year = 2010, note = {Available for download at \url{http://www.nsa.gov/ia/_files/nist-routines.pdf}}, url = {http://www.nsa.gov/ia/_files/nist-routines.pdf} }

@inproceedings{qed1994, key = {QED-1994}, author = {Anonymous}, title = {The {QED} Manifesto}, crossref = {cade1994}, url = {http://www-unix.mcs.anl.gov/qed/} }

@proceedings{acg2009, key = {ACG-2009}, title = {Advances in Computer Games, 12th International Conference ({ACG}~2009)}, booktitle = {Advances in Computer Games, 12th International Conference ({ACG}~2009)}, editor = {Van den Herik, H. Jaap and Spronck, Pieter}, month = may, year = 2010, location = {Pamplona, Spain}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 6048, url = {http://www.springerlink.com/content/978-3-642-12992-6}, doi = {10.1007/978-3-642-12993-3} }

@proceedings{acl2006, key = {ACL-2006}, title = {Proceedings of the Sixth International Workshop on the {ACL2} Theorem Prover and its Applications}, booktitle = {Proceedings of the Sixth International Workshop on the {ACL2} Theorem Prover and its Applications}, editor = {Panagiotis Manolios and Matthew Wilding}, month = aug, year = 2006, location = {Seattle, Washington, USA}, publisher = {HappyJack Books} }

@proceedings{acsac2001, key = {ACSAC-2001}, title = {Proceedings of the 17th Annual Computer Security Applications Conference (ACSAC 2001)}, booktitle = {Proceedings of the 17th Annual Computer Security Applications Conference (ACSAC 2001)}, year = 2001 }

@proceedings{acsac2009, key = {ACSAC-2009}, title = {Proceedings of the 25th Annual Computer Security Applications Conference (ACSAC 2009)}, booktitle = {Proceedings of the 25th Annual Computer Security Applications Conference (ACSAC 2009)}, month = dec, year = 2009, location = {Honolulu, Hawaii} }

@proceedings{aisc1998, key = {AISC-1998}, title = {Artificial Intelligence and Symbolic Computation (AISC '98)}, booktitle = {Artificial Intelligence and Symbolic Computation (AISC '98)}, editor = {Jacques Calmet and Jan Plaza}, month = sep, year = 1998, address = {Plattsburgh, New York, USA}, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, volume = {1476}, url = {http://link.springer-ny.com/link/service/series/0558/tocs/t1476.htm} }

@proceedings{arw2002, key = {ARW-2002}, title = {Ninth Workshop on Automated Reasoning: Bridging the Gap between Theory and Practice}, booktitle = {Ninth Workshop on Automated Reasoning: Bridging the Gap between Theory and Practice}, editor = {Toby Walsh}, month = apr, year = 2002, location = {Imperial College, London, UK}, publisher = {The Society for the Study of Artificial Intelligence and Simulation of Behaviour}, isbn = {1902956264} }

@proceedings{arts1999, key = {ARTS-1999}, editor = {Joost-Pieter Katoen}, title = {Formal Methods for Real-Time and Probabilistic Systems}, series = {Lecture Notes in Computer Science}, volume = 1601, month = may, year = 1999, address = {Bamberg, Germany}, publisher = {Springer}, url = {http://link.springer-ny.com/link/service/series/0558/tocs/t1601.htm} }

@techreport{avocs2002, key = {AVOCS-2002}, title = {{AVoCS} 2002: Second Workshop on Automated Verification of Critical Systems}, booktitle = {{AVoCS} 2002: Second Workshop on Automated Verification of Critical Systems}, author = {Gethin Norman and Marta Kwiatkowska and Dimitar Guelev}, editor = {Gethin Norman and Marta Kwiatkowska and Dimitar Guelev}, month = apr, year = 2002, institution = {School of Computer Science, University of Birmingham}, series = {University of Birmingham Technical Report}, number = {CSR-02-6} }

@proceedings{csfw1997, key = {CSFW-1997}, title = {10th Computer Security Foundations Workshop}, booktitle = {10th Computer Security Foundations Workshop}, year = 1997, publisher = {IEEE Computer Society Press} }

@proceedings{cade1992, key = {CADE-1992}, title = {Proceedings of the 11th International Conference on Automated Deduction (CADE-11)}, booktitle = {Proceedings of the 11th International Conference on Automated Deduction (CADE-11)}, editor = {Deepak Kapur}, location = {Saratoga Springs, NY, USA}, month = jun, year = 1992, series = {Lecture Notes in Artificial Intelligence}, volume = 607, publisher = {Springer} }

@proceedings{cade1994, key = {CADE-1994}, title = {12th International Conference on Automated Deduction (CADE-12)}, booktitle = {12th International Conference on Automated Deduction (CADE-12)}, editor = {Alan Bundy}, month = jun, year = 1994, location = {Nancy, France}, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, volume = 814 }

@proceedings{cade1996, key = {CADE-1996}, title = {13th International Conference on Automated Deduction (CADE-13)}, booktitle = {13th International Conference on Automated Deduction (CADE-13)}, editor = {Michael A. McRobbie and John K. Slaney}, location = {New Brunswick, NJ, USA}, month = jul, year = 1996, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, volume = 1104 }

@proceedings{cade1998, key = {CADE-1998}, title = {Proceedings of the 15th International Conference on Automated Deduction (CADE-15)}, booktitle = {Proceedings of the 15th International Conference on Automated Deduction (CADE-15)}, editor = {Claude Kirchner and H{\'e}l{\`e}ne Kirchner}, month = jul, year = 1998, location = {Lindau, Germany}, series = {Lecture Notes in Artificial Intelligence}, volume = 1421, publisher = {Springer} }

@proceedings{cade1999, key = {CADE-1999}, title = {Proceedings of the 16th International Conference on Automated Deduction (CADE-16)}, booktitle = {Proceedings of the 16th International Conference on Automated Deduction (CADE-16)}, editor = {H. Ganzinger}, month = jul, year = 1999, location = {Trento, Italy}, series = {Lecture Notes in Artificial Intelligence}, volume = 1632, publisher = {Springer} }

@proceedings{cade2000, key = {CADE-2000}, title = {Proceedings of the 17th International Conference on Automated Deduction (CADE-17)}, booktitle = {Proceedings of the 17th International Conference on Automated Deduction (CADE-17)}, editor = {David A. McAllester}, month = jun, year = 2000, location = {Pittsburgh, PA, USA}, series = {Lecture Notes in Computer Science}, volume = 1831, publisher = {Springer} }

@proceedings{cade2002, key = {CADE-2002}, title = {Proceedings of the 18th International Conference on Automated Deduction (CADE-18)}, booktitle = {Proceedings of the 18th International Conference on Automated Deduction (CADE-18)}, editor = {Andrei Voronkov}, month = jul, year = 2002, location = {Copenhagen, Denmark}, series = {Lecture Notes in Artificial Intelligence}, volume = 2392, publisher = {Springer} }

@proceedings{calco2005, key = {CALCO-2005}, title = {{CALCO} 2005}, booktitle = {{CALCO} 2005}, editor = {J. L. Fiadeiro et al.}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {3629}, year = {2005} }

@proceedings{cav2006, key = {CAV-2006}, title = {Proceedings of the 18th International Conference on Computer Aided Verification (CAV 2006)}, booktitle = {Proceedings of the 18th International Conference on Computer Aided Verification (CAV 2006)}, editor = {Thomas Ball and Robert B. Jones}, month = aug, year = 2006, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 4144, location = {Seattle, WA, USA} }

@proceedings{cav2014, key = {CAV-2014}, title = {Proceedings of the 26th International Conference on Computer Aided Verification (CAV 2014)}, booktitle = {Proceedings of the 26th International Conference on Computer Aided Verification (CAV 2014)}, editor = {Armin Biere and Roderick Bloem}, month = jul, year = 2014, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 8559, location = {Vienna, Austria} }

@proceedings{cg2003, key = {CG-2003}, title = {Proceedings of the 10th Advances in Computer Games Conference}, booktitle = {Proceedings of the 10th Advances in Computer Games Conference}, editor = {Herik, H. J. van den and Iida, H. and Heinz, E. A.}, month = nov, year = 2003, publisher = {Kluwer}, annote = {ISBN 1-4020-7709-2} }

@proceedings{charme2003, key = {CHARME-2003}, title = {Correct Hardware Design and Verification Methods (CHARME 2003)}, booktitle = {Correct Hardware Design and Verification Methods (CHARME 2003)}, month = oct, year = 2003, editor = {Daniel Geist and Enrico Tronci}, volume = 2860, series = {Lecture Notes in Computer Science}, publisher = {Springer} }

@proceedings{dagstuhl2006, key = {DAGSTUHL-2006}, title = {Mathematics, Algorithms, Proofs}, booktitle = {Mathematics, Algorithms, Proofs}, year = 2006, editor = {Thierry Coquand and Henri Lombardi and Marie-Fran{\c{c}}oise Roy}, number = {05021}, series = {Dagstuhl Seminar Proceedings}, issn = {1862-4405}, publisher = {Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI), Schloss Dagstuhl, Germany} }

@proceedings{ecai2004, key = {ECAI-2004}, title = {16th European Conference on Artificial Intelligence (ECAI 2004)}, booktitle = {16th European Conference on Artificial Intelligence (ECAI 2004)}, month = aug, year = 2004, editor = {Ramon L{\'{o}}pez de M{\'{a}}ntaras and Lorenza Saitta}, publisher = {IOS Press} }

@proceedings{emsqms2010, key = {EMSQMS-2010}, title = {Proceedings of the Workshop on Evaluation Methods for Solvers and Quality Metrics for Solutions (EMS+QMS 2010)}, booktitle = {Proceedings of the Workshop on Evaluation Methods for Solvers and Quality Metrics for Solutions (EMS+QMS 2010)}, editor = {McGuinness, D. and Stump, A. and Sutcliffe, G. and Tinelli, C.}, month = jul, year = 2010, location = {Edinburgh, Scotland} }

@proceedings{eshol2005, key = {ESHOL-2005}, title = {Empirically Successful Automated Reasoning in Higher-Order Logic}, booktitle = {Empirically Successful Automated Reasoning in Higher-Order Logic}, editor = {Christoph Benzmueller and John Harrison and Carsten Schuermann}, month = dec, year = 2005, location = {Montego Bay, Jamaica}, publisher = {arXiv.org}, url = {http://arxiv.org/abs/cs/0601042} }

@proceedings{eurosys2006, key = {EUROSYS-2006}, title = {Proceedings of the EuroSys 2006 Conference}, booktitle = {Proceedings of the EuroSys 2006 Conference}, location = {Leuven, Belgium}, month = apr, year = 2006 }

@proceedings{fast2010, key = {FAST-2010}, title = {Proceedings of the 7th International Workshop on Formal Aspects of Security \& Trust (FAST 2010)}, booktitle = {Proceedings of the 7th International Workshop on Formal Aspects of Security \& Trust (FAST 2010)}, location = {Pisa, Italy}, month = sep, year = 2010 }

@proceedings{fm2008, key = {FM-2008}, title = {Proceedings of Formal Methods 2008}, booktitle = {Proceedings of Formal Methods 2008}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 5014, month = may, year = 2008, location = {Turku, Finland} }

@proceedings{fmcad1996, key = {FMCAD-1996}, title = {Proceedings of the First International Conference on Formal Methods in Computer-Aided Design ({FMCAD} '96)}, booktitle = {Proceedings of the First International Conference on Formal Methods in Computer-Aided Design ({FMCAD} '96)}, editor = {Mandayam Srivas and Albert Camilleri}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 1166, year = 1996 }

@proceedings{fmcad2012, key = {FMCAD-2012}, title = {Formal Methods in Computer-Aided Design ({FMCAD} 2012)}, booktitle = {Formal Methods in Computer-Aided Design ({FMCAD} 2012)}, editor = {Gianpiero Cabodi and Satnam Singh}, publisher = {IEEE}, month = oct, year = 2012, location = {Cambridge, UK} }

@proceedings{fmcad2013, key = {FMCAD-2013}, title = {Formal Methods in Computer-Aided Design ({FMCAD} 2013)}, booktitle = {Formal Methods in Computer-Aided Design ({FMCAD} 2013)}, editor = {Jobstmann, Barbara and Ray, Sandip}, publisher = {IEEE}, month = oct, year = 2013, location = {Portland, Oregon, USA} }

@proceedings{fme2005, key = {FME-2005}, title = {Proceedings of Formal Methods Europe 2005}, booktitle = {Proceedings of Formal Methods Europe 2005}, editor = {John Fitzgerald and Ian J. Hayes and Andrzej Tarlecki}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 3582, month = jul, year = 2005, location = {Newcastle, UK} }

@proceedings{fmp1997, key = {FMP-1997}, title = {Formal Methods Pacific '97}, booktitle = {Formal Methods Pacific '97}, editor = {Lindsay Groves and Steve Reeves}, month = jul, year = 1997, location = {Wellington, New Zealand}, publisher = {Springer}, url = {http://www.cs.auckland.ac.nz/CDMTCS/docs/fmp97.html} }

@proceedings{fmpa1993, key = {FMPA-1993}, title = {Proceedings of the International Conference on Formal Methods in Programming and Their Applications}, booktitle = {Proceedings of the International Conference on Formal Methods in Programming and Their Applications}, editor = {Dines Bj{\o}rner and Manfred Broy and Igor V. Pottosin}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 735, month = jun, year = 1993, location = {Novosibirsk, Russia} }

@proceedings{ftp1997, key = {FTP-1997}, title = {International Workshop on First-Order Theorem Proving (FTP '97)}, booktitle = {International Workshop on First-Order Theorem Proving (FTP '97)}, editor = {Maria Paola Bonacina and Ulrich Furbach}, month = oct, year = 1997, location = {Schloss Hagenberg, Austria}, series = {RISC-Linz Report Series}, number = {97-50}, organization = {Johannes Kepler Universit\"at Linz}, url = {http://www.logic.at/ftp97} }

@proceedings{ftp1998, key = {FTP-1998}, title = {International Workshop on First-Order Theorem Proving (FTP '98)}, booktitle = {International Workshop on First-Order Theorem Proving (FTP '98)}, editor = {Ricardo Caferra and Gernot Salzer}, month = nov, year = 1998, series = {Technical Report E1852-GS-981}, organization = {Technische Universit\"at Wien}, location = {Vienna, Austria}, url = {http://www.logic.at/ftp98} }

@proceedings{haskell2008, key = {HASKELL-2008}, editor = {Andy Gill}, title = {Haskell '08: Proceedings of the first ACM SIGPLAN symposium on Haskell}, booktitle = {Haskell '08: Proceedings of the first ACM SIGPLAN symposium on Haskell}, month = sep, year = 2008, location = {Victoria, BC, Canada}, publisher = {ACM} }

@proceedings{haskell2013, key = {HASKELL-2013}, title = {Haskell '13: Proceedings of the 2013 ACM SIGPLAN symposium on Haskell}, booktitle = {Haskell '13: Proceedings of the 2013 ACM SIGPLAN symposium on Haskell}, editor = {Shan, Chung-chieh}, month = sep, year = 2013, location = {Boston, Massachusetts, USA}, publisher = {ACM} }

@proceedings{hcss2006, key = {HCSS-2006}, title = {High Confidence Software and Systems: HCSS 2006}, booktitle = {High Confidence Software and Systems: HCSS 2006}, month = apr, year = 2006 }

@proceedings{hcss2007, key = {HCSS-2007}, title = {High Confidence Software and Systems: HCSS 2007}, booktitle = {High Confidence Software and Systems: HCSS 2007}, month = may, year = 2007 }

@proceedings{hcss2009, key = {HCSS-2009}, title = {High Confidence Software and Systems: HCSS 2009}, booktitle = {High Confidence Software and Systems: HCSS 2009}, month = may, year = 2009 }

@proceedings{hcss2012, key = {HCSS-2012}, editor = {John Launchbury and Ray Richards}, title = {Proceedings of the Twelfth Annual High Confidence Software and Systems Conference (HCSS 2012)}, booktitle = {Proceedings of the Twelfth Annual High Confidence Software and Systems Conference (HCSS 2012)}, month = may, year = 2012, location = {Annapolis, MD, USA} }

@proceedings{hoa1993, key = {HOA-1993}, editor = {Jan Heering and Karl Meinke and Bernhard M{\"o}ller and Tobias Nipkow}, title = {Higher-Order Algebra, Logic, and Term Rewriting, First International Workshop (HOA '93)}, booktitle = {Higher-Order Algebra, Logic, and Term Rewriting, First International Workshop (HOA '93)}, location = {Amsterdam, The Netherlands}, year = 1994, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 816 }

@proceedings{hol1991, key = {HOL-1991}, title = {Proceedings of the 1991 International Workshop on the HOL Theorem Proving System and its Applications (HOL '91), August 1991}, booktitle = {Proceedings of the 1991 International Workshop on the HOL Theorem Proving System and its Applications (HOL '91), August 1991}, editor = {Myla Archer and Jeffrey J. Joyce and Karl N. Levitt and Phillip J. Windley}, location = {Davis, CA, USA}, publisher = {IEEE Computer Society Press}, year = 1992 }

@proceedings{hol1993, key = {HOL-1993}, title = {Higher Order Logic Theorem Proving and its Applications, 6th International Workshop}, booktitle = {Higher Order Logic Theorem Proving and its Applications, 6th International Workshop}, editor = {J. J. Joyce and C.-J. H. Seger}, month = aug, year = 1993, location = {Vancouver, Canada}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 780 }

@proceedings{hol1994, key = {HOL-1994}, title = {Higher Order Logic Theorem Proving and Its Applications, 7th International Workshop}, booktitle = {Higher Order Logic Theorem Proving and Its Applications, 7th International Workshop}, editor = {Tom Melham and Juanito Camilleri}, month = sep, year = 1994, location = {Valletta, Malta}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 859 }

@proceedings{hol1995, key = {HOL-1995}, title = {Proceedings of the 8th International Workshop on Higher Order Logic Theorem Proving and Its Applications (HOL '95)}, booktitle = {Proceedings of the 8th International Workshop on Higher Order Logic Theorem Proving and Its Applications (HOL '95)}, editor = {E. T. Schubert and P. J. Windley and J. Alves-Foss}, month = sep, year = 1995, series = {Lecture Notes in Computer Science}, volume = 971, publisher = {Springer}, location = {Aspen Grove, UT, USA} }

@proceedings{hol1995a, key = {HOL-1995a}, title = {Proceedings of the 8th International Workshop on Higher Order Logic Theorem Proving and Its Applications (HOL '95)}, booktitle = {Proceedings of the 8th International Workshop on Higher Order Logic Theorem Proving and Its Applications (HOL '95)}, month = sep, year = 1995, location = {Aspen Grove, UT, USA}, note = {Supplemental proceedings} }

@proceedings{icalp2001, key = {ICALP-2001}, title = {Automata, Languages and Programming}, booktitle = {Automata, Languages and Programming}, editor = {F. Orejas and P. G. Spirakis and J. van Leeuwen}, month = jul, year = 2001, series = {Lecture Notes in Computer Science}, volume = 2076, publisher = {Springer}, location = {Crete, Greece}, url = {http://link.springer-ny.com/link/service/series/0558/tocs/t2076.htm} }

@proceedings{icfem2000, key = {ICFEM-2000}, title = {Proceedings of the 3rd IEEE International Conference on Formal Engineering Methods (ICFEM 2000)}, booktitle = {Proceedings of the 3rd IEEE International Conference on Formal Engineering Methods (ICFEM 2000)}, month = sep, year = 2000, location = {York, England} }

@proceedings{icfp2008, key = {ICFP-2008}, editor = {James Hook and Peter Thiemann}, title = {Proceedings of the 13th ACM SIGPLAN international conference on Functional programming (ICFP 2008)}, booktitle = {Proceedings of the 13th ACM SIGPLAN International Conference on Functional programming (ICFP 2008)}, publisher = {ACM}, month = sep, year = 2008, location = {Victoria, BC, Canada} }

@proceedings{icfp2012, key = {ICFP-2012}, editor = {Peter Thiemann and Robby Bruce Findler}, title = {Proceedings of the 17th ACM SIGPLAN International Conference on Functional Programming (ICFP 2012)}, booktitle = {Proceedings of the 17th ACM SIGPLAN International Conference on Functional Programming (ICFP 2012)}, publisher = {ACM}, month = sep, year = 2012, location = {Copenhagen, Denmark} }

@proceedings{icms2010, title = {Proceedings of the Third International Congress on Mathematical Software (ICMS 2010)}, booktitle = {Proceedings of the Third International Congress on Mathematical Software (ICMS 2010)}, editor = {Fukuda, Komei and Hoeven, Joris vander and Joswig, Michael and Takayama, Nobuki}, month = sep, year = 2010, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {6327}, location = {Kobe, Japan}, url = {http://dx.doi.org/10.1007/978-3-642-15582-6_25} }

@proceedings{ijcar2006, key = {IJCAR-2006}, editor = {Ulrich Furbach and Natarajan Shankar}, title = {Automated Reasoning, Third International Joint Conference (IJCAR 2006)}, booktitle = {Automated Reasoning, Third International Joint Conference (IJCAR 2006)}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 4130, month = aug, year = 2006, location = {Seattle, WA, USA} }

@proceedings{itp2012, key = {ITP-2012}, title = {Third International Conference on Interactive Theorem Proving ({ITP}~2012)}, booktitle = {Third International Conference on Interactive Theorem Proving ({ITP}~2012)}, editor = {Beringer, Lennart and Felty, Amy}, month = aug, year = 2012, location = {Princeton, New Jersey, USA}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 7406 }

@proceedings{lfp1984, key = {LFP-1984}, editor = {Robert S. Boyer and Edward S. Schneider and Guy L. Steele, Jr.}, title = {LFP '84: Proceedings of the 1984 ACM Symposium on LISP and functional programming}, booktitle = {LFP '84: Proceedings of the 1984 ACM Symposium on LISP and functional programming}, month = aug, year = 1984, location = {Austin, Texas, United States}, publisher = {ACM} }

@proceedings{lics2004, key = {LICS-2004}, title = {19th IEEE Symposium on Logic in Computer Science (LICS 2004)}, booktitle = {19th IEEE Symposium on Logic in Computer Science (LICS 2004)}, month = jul, year = 2004, location = {Turku, Finland} }

@proceedings{lpar1992, key = {LPAR-1992}, title = {Proceedings of the International Conference on Logic Programming and Automated Reasoning (LPAR '92)}, booktitle = {Proceedings of the International Conference on Logic Programming and Automated Reasoning (LPAR '92)}, editor = {A. Voronkov}, location = {St. Petersburg, Russia}, month = jul, year = 1992, series = {Lecture Notes in Artificial Intelligence}, volume = 624, publisher = {Springer} }

@proceedings{lpar2005, key = {LPAR-2005}, title = {Logic for Programming, Artificial Intelligence, and Reasoning, 12th International Conference (LPAR 2005)}, booktitle = {Logic for Programming, Artificial Intelligence, and Reasoning, 12th International Conference (LPAR 2005)}, editor = {Geoff Sutcliffe and Andrei Voronkov}, location = {Montego Bay, Jamaica}, month = dec, year = 2005, series = {Lecture Notes in Artificial Intelligence}, volume = 3835, publisher = {Springer} }

@proceedings{milcom2003, key = {MILCOM-2003}, title = {Military Communications Conference 2003}, booktitle = {Military Communications Conference 2003}, month = oct, year = 2003, publisher = {IEEE}, url = {http://ieeexplore.ieee.org/xpl/RecentCon.jsp?punumber=9057} }

@misc{namkm2004, key = {NA-MKM-2004}, title = {Proceedings of the Second {N}orth {A}merican Workshop on Mathematical Knowledge Management ({NA-MKM} 2004)}, booktitle = {Proceedings of the Second {N}orth American Workshop on Mathematical Knowledge Management ({NA-MKM} 2004)}, author = {Bernd Wegner}, editor = {Bernd Wegner}, month = jan, year = 2004, location = {Phoenix, Arizona, USA} }

@techreport{nfm2010, key = {NFM-2010}, title = {{P}roceedings of the Second {NASA} Formal Methods Symposium ({NFM 2010})}, booktitle = {{P}roceedings of the Second {NASA} Formal Methods Symposium ({NFM 2010})}, author = {C{\'e}sar Mu{\~{n}}oz}, editor = {C{\'e}sar Mu{\~{n}}oz}, institution = {National Aeronautics and Space Administration}, number = {NASA/CP-2010-216215}, month = apr, year = 2010, address = {Langley Research Center, Hampton VA 23681-2199, USA} }

@proceedings{nfm2011, key = {NFM-2011}, title = {Third International Symposium on {NASA} Formal Methods ({NFM 2011})}, booktitle = {Third International Symposium on {NASA} Formal Methods ({NFM 2011})}, editor = {Mihaela Bobaru and Klaus Havelund and Gerard J. Holzmann and Rajeev Joshi}, month = apr, year = 2011, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 6617, location = {Pasadena, California, United States} }

@proceedings{pepm2010, key = {PEPM-2010}, title = {Proceedings of the ACM SIGPLAN workshop on Partial Evaluation and Program Manipulation (PEPM 2010)}, booktitle = {Proceedings of the ACM SIGPLAN workshop on Partial Evaluation and Program Manipulation (PEPM 2010)}, editor = {John P. Gallagher and Janis Voigtl{\"a}nder}, month = jan, year = 2010, publisher = {ACM}, location = {Madrid, Spain} }

@proceedings{plmms2009, key = {PLMMS-2009}, title = {PLMMS '09: Proceedings of the ACM SIGSAM 2009 International Workshop on Programming Languages for Mechanized Mathematics Systems}, booktitle = {PLMMS '09: Proceedings of the ACM SIGSAM 2009 International Workshop on Programming Languages for Mechanized Mathematics Systems}, editor = {Gabriel Dos Reis and Laurent Th{\'e}ry}, month = aug, year = 2009, location = {Munich, Germany}, publisher = {ACM} }

@proceedings{popl1978, key = {POPL-1978}, title = {POPL '78: Proceedings of the 5th ACM SIGACT-SIGPLAN symposium on Principles of programming languages}, booktitle = {POPL '78: Proceedings of the 5th ACM SIGACT-SIGPLAN symposium on Principles of programming languages}, year = 1978, location = {Tucson, Arizona}, publisher = {ACM}, address = {New York, NY, USA} }

@proceedings{popl2002, key = {POPL-2002}, title = {{POPL} 2002: The 29th {ACM} SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, booktitle = {{POPL} 2002: The 29th {ACM} SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, month = jan, year = 2002, location = {Portland, OR, USA}, publisher = {ACM Press} }

@proceedings{popl2006, key = {POPL-2006}, title = {Proceedings of the 33rd ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages (POPL 2006)}, booktitle = {{Proceedings of the 33rd ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages (POPL 2006)}}, editor = {J. Gregory Morrisett and Simon L. Peyton Jones}, publisher = {ACM}, month = jan, year = 2006, location = {Charleston, South Carolina, USA} }

@proceedings{pxtp2013, key = {PXTP-2013}, title = {Proceedings of the Third International Workshop on Proof Exchange for Theorem Proving (PxTP 2013)}, booktitle = {Proceedings of the Third International Workshop on Proof Exchange for Theorem Proving (PxTP 2013)}, editor = {Jasmin Christian Blanchette and Josef Urban}, series = {EPiC Series}, volume = 14, month = jun, year = 2013, publisher = {EasyChair} }

@proceedings{qapl2004, key = {QAPL-2004}, title = {{Proceedings of the 2nd Workshop on Quantitative Aspects of Programming Languages (QAPL 2004)}}, booktitle = {{Proceedings of the 2nd Workshop on Quantitative Aspects of Programming Languages (QAPL 2004)}}, editor = {A. Cerone and A. Di Pierro}, month = jan, year = 2005, publisher = {Elsevier}, volume = 112, series = {Electronic Notes in Theoretical Computer Science (ENTCS)}, location = {Barcelona, Spain}, url = {http://www.sciencedirect.com/science/journal/15710661} }

@proceedings{rta2001, key = {RTA-2001}, title = {Rewriting Techniques and Applications (RTA 2001)}, booktitle = {Rewriting Techniques and Applications (RTA 2001)}, month = may, year = 2001, location = {Utrecht, The Netherlands}, series = {Lecture Notes in Computer Science}, volume = 2051, publisher = {Springer}, url = {http://link.springer-ny.com/link/service/series/0558/tocs/t2051.htm} }

@proceedings{sat2012, key = {SAT-2012}, title = {Proceedings of the 15th International Conference on the Theory and Applications of Satisfiability Testing (SAT 2012)}, booktitle = {Proceedings of the 15th International Conference on the Theory and Applications of Satisfiability Testing (SAT 2012)}, month = jun, year = 2012, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 7317, location = {Trento, Italy} }

@proceedings{scd2004, key = {SCD-2004}, title = {Summary of a Workshop on Software Certification and Dependability}, booktitle = {Summary of a Workshop on Software Certification and Dependability}, year = 2004, publisher = {The National Academies Press}, url = {http://www.nap.edu/catalog.php?record_id=11133}, abstract = {Certification of critical software systems (e.g., for safety and security) is important to help ensure their dependability. Today, certification relies as much on evaluation of the software development process as it does on the system s properties. While the latter are preferable, the complexity of these systems usually makes them extremely difficult to evaluate. To explore these and related issues, the National Coordination Office for Information technology Research and Development asked the NRC to undertake a study to assess the current state of certification in dependable systems. The study is in two phases: the first to frame the problem and the second to assess it. This report presents a summary of a workshop held as part of the first phase. The report presents a summary of workshop participants presentations and subsequent discussion. It covers, among other things, the strengths and limitations of process; new challenges and opportunities; experience to date; organization context; and cost-effectiveness of software engineering techniques. A consensus report will be issued upon completion of the second phase.} }

@proceedings{sfm2006, key = {SFM-2006}, title = {Formal Methods for Hardware Verification, 6th International School on Formal Methods for the Design of Computer, Communication, and Software Systems ({SFM} 2006)}, booktitle = {Formal Methods for Hardware Verification, 6th International School on Formal Methods for the Design of Computer, Communication, and Software Systems ({SFM} 2006)}, editor = {Marco Bernardo and Alessandro Cimatti}, month = may, year = 2006, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 3965, location = {Bertinoro, Italy} }

@proceedings{sharcs2012, key = {SHARCS-2012}, title = {Proceedings of the 5th Special-Purpose Hardware for Attacking Cryptographic Systems Workshop (SHARCS 2012)}, booktitle = {Proceedings of the 5th Special-Purpose Hardware for Attacking Cryptographic Systems Workshop (SHARCS 2012)}, editor = {Bernstein, Daniel J. and Gaj, Kris}, month = mar, year = 2012, location = {Washington DC, USA} }

@proceedings{software1968, key = {SOFTWARE-1968}, editor = {P. Naur and B. Randell}, title = {Software Engineering}, booktitle = {Software Engineering}, month = oct, year = 1968, publisher = {Scientific Affairs Division, NATO}, location = {Garmisch, Germany}, url = {http://homepages.cs.ncl.ac.uk/brian.randell/NATO/} }

@proceedings{sosp2009, key = {SOSP-2009}, editor = {Jeanna Neefe Matthews and Thomas E. Anderson}, title = {Proceedings of the 22nd ACM Symposium on Operating Systems Principles}, booktitle = {Proceedings of the 22nd ACM Symposium on Operating Systems Principles}, month = oct, year = 2009, publisher = {ACM}, location = {Big Sky, MT, USA} }

@proceedings{sss2010, key = {SSS-2010}, editor = {Chris Dale and Tom Anderson}, title = {Making Systems Safer: Proceedings of the Eighteenth Safety-Critical Systems Symposium}, booktitle = {Making Systems Safer: Proceedings of the Eighteenth Safety-Critical Systems Symposium}, month = feb, year = 2010, publisher = {Springer}, location = {Bristol, UK} }

@proceedings{ssv2008, key = {SSV-2008}, title = {Proceedings of the 3rd International Workshop on Systems Software Verification (SSV 2008)}, booktitle = {Proceedings of the 3rd International Workshop on Systems Software Verification (SSV 2008)}, month = jul, year = 2008, publisher = {Elsevier}, series = {Electronic Notes in Theoretical Computer Science}, volume = 217 }

@techreport{strata2003, key = {STRATA-2003}, title = {Design and Application of Strategies/Tactics in Higher Order Logics (STRATA 2003)}, booktitle = {Design and Application of Strategies/Tactics in Higher Order Logics (STRATA 2003)}, author = {Myla Archer and Ben Di Vito and C{\'{e}}sar Mu{\~{n}}oz}, editor = {Myla Archer and Ben Di Vito and C{\'{e}}sar Mu{\~{n}}oz}, month = sep, year = 2003, institution = {{NASA}}, series = {{NASA} Technical Reports}, number = {NASA/CP-2003-212448}, url = {http://techreports.larc.nasa.gov/ltrs/PDF/2003/cp/NASA-2003-cp212448.pdf} }

@proceedings{tacas2000, key = {TACAS-2000}, title = {Tools and Algorithms for the Construction and Analysis of Systems: 6th International Conference, {TACAS} 2000}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems: 6th International Conference, {TACAS} 2000}, editor = {Susanne Graf and Michael Schwartzbach}, series = {Lecture Notes in Computer Science}, volume = 1785, publisher = {Springer}, month = mar, year = 2000, location = {Berlin, Germany} }

@proceedings{tphols1996, key = {TPHOLS-1996}, title = {Theorem Proving in Higher Order Logics, 9th International Conference, TPHOLs '96}, booktitle = {Theorem Proving in Higher Order Logics, 9th International Conference, TPHOLs '96}, editor = {Joakim von Wright and Jim Grundy and John Harrison}, month = aug, year = 1996, series = {Lecture Notes in Computer Science}, volume = 1125, publisher = {Springer}, location = {Turku, Finland} }

@proceedings{tphols1997, key = {TPHOLS-1997}, title = {Theorem Proving in Higher Order Logics, 10th International Conference, TPHOLs '97}, booktitle = {Theorem Proving in Higher Order Logics, 10th International Conference, TPHOLs '97}, editor = {Elsa L. Gunter and Amy Felty}, month = aug, year = 1997, location = {Murray Hill, NJ, USA}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 1275 }

@proceedings{tphols1997a, key = {TPHOLS-1997a}, title = {Supplemental Proceedings of the 10th International Conference on Theorem Proving in Higher Order Logics, TPHOLs '97}, booktitle = {Supplemental Proceedings of the 10th International Conference on Theorem Proving in Higher Order Logics, TPHOLs '97}, editor = {Elsa L. Gunter}, month = aug, year = 1997, location = {Murray Hill, NJ, USA} }

@proceedings{tphols1998, key = {TPHOLS-1998}, title = {Theorem Proving in Higher Order Logics, 11th International Conference, {TPHOLs} '98}, booktitle = {Theorem Proving in Higher Order Logics, 11th International Conference, {TPHOLs} '98}, editor = {Jim Grundy and Malcolm Newey}, month = sep, year = 1998, series = {Lecture Notes in Computer Science}, volume = 1497, publisher = {Springer}, location = {Canberra, Australia} }

@proceedings{tphols1999, key = {TPHOLS-1999}, title = {Theorem Proving in Higher Order Logics, 12th International Conference, {TPHOLs} '99}, booktitle = {Theorem Proving in Higher Order Logics, 12th International Conference, {TPHOLs} '99}, editor = {Yves Bertot and Gilles Dowek and Andr\'e Hirschowitz and Christine Paulin and Laurent Th\'ery}, month = sep, year = 1999, series = {Lecture Notes in Computer Science}, volume = 1690, publisher = {Springer}, location = {Nice, France}, url = {http://link.springer.de/link/service/series/0558/tocs/t1690.htm} }

@proceedings{tphols2000, key = {TPHOLS-2000}, title = {Theorem Proving in Higher Order Logics, 13th International Conference: {TPHOLs} 2000}, booktitle = {Theorem Proving in Higher Order Logics, 13th International Conference: {TPHOLs} 2000}, editor = {Mark Aagaard and John Harrison}, month = aug, year = 2000, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 1869, location = {Portland, OR, USA} }

@techreport{tphols2000a, key = {TPHOLS-2000a}, title = {{TPHOLs} 2000: Supplemental Proceedings}, booktitle = {{TPHOLs} 2000: Supplemental Proceedings}, author = {Mark Aagaard and John Harrison and Tom Schubert}, editor = {Mark Aagaard and John Harrison and Tom Schubert}, month = aug, year = 2000, institution = {Oregon Graduate Institute}, series = {Oregon Graduate Institute Technical Reports}, number = {CSE-00-009} }

@proceedings{tphols2001, key = {TPHOLS-2001}, title = {14th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2001}, booktitle = {14th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2001}, year = 2001, editor = {Richard J. Boulton and Paul B. Jackson}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 2152, month = sep, location = {Edinburgh, Scotland}, url = {http://link.springer.de/link/service/series/0558/tocs/t2152.htm} }

@techreport{tphols2001a, key = {TPHOLS-2001a}, title = {{TPHOLs} 2001: Supplemental Proceedings}, booktitle = {{TPHOLs} 2001: Supplemental Proceedings}, author = {Richard J. Boulton and Paul B. Jackson}, editor = {Richard J. Boulton and Paul B. Jackson}, month = sep, year = 2001, institution = {Division of Informatics, University of Edinburgh}, series = {University of Edinburgh Informatics Report Series}, number = {EDI-INF-RR-0046}, url = {http://www.informatics.ed.ac.uk/publications/report/0046.html} }

@proceedings{tphols2002, key = {TPHOLS-2002}, title = {15th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2002}, booktitle = {15th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2002}, year = 2002, editor = {V{\'{i}}ctor A. Carre{\~{n}}o and C{\'{e}}sar A. Mu{\~{n}}oz and Sofi{\`{e}}ne Tahar}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 2410, month = aug, location = {Hampton, VA, USA}, url = {http://link.springer.de/link/service/series/0558/tocs/t2410.htm} }

@proceedings{tphols2003, key = {TPHOLS-2003}, title = {16th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2003}, booktitle = {16th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2003}, year = 2003, editor = {David Basin and Burkhart Wolff}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 2758, month = sep, location = {Rome, Italy}, url = {http://link.springer.de/link/service/series/0558/tocs/t2758.htm} }

@proceedings{tphols2004, key = {TPHOLS-2004}, title = {17th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2004}, booktitle = {17th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2004}, year = 2004, editor = {Konrad Slind and Annette Bunker and Ganesh Gopalakrishnan}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 3223, month = sep, location = {Park City, Utah, USA} }

@techreport{tphols2004a, key = {TPHOLS-2004a}, title = {{TPHOLs} 2004: Emerging Trends}, booktitle = {{TPHOLs} 2004: Emerging Trends}, author = {Konrad Slind}, editor = {Konrad Slind}, month = sep, year = 2004, institution = {School of Computing, University of Utah}, series = {School of Computing, University of Utah}, number = {UUCS-04}, url = {http://www.cs.utah.edu/techreports} }

@proceedings{tphols2005, key = {TPHOLS-2005}, title = {18th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2005}, booktitle = {18th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2005}, editor = {Joe Hurd and Tom Melham}, month = aug, year = 2005, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 3603, location = {Oxford, UK}, url = {http://www.springeronline.com/3-540-28372-2} }

@techreport{tphols2005a, key = {TPHOLS-2005a}, title = {Theorem Proving in Higher Order Logics: Emerging Trends Proceedings}, booktitle = {Theorem Proving in Higher Order Logics: Emerging Trends Proceedings}, author = {Joe Hurd and Edward Smith and Ashish Darbari}, editor = {Joe Hurd and Edward Smith and Ashish Darbari}, month = aug, year = 2005, institution = {Oxford University Computing Laboratory}, series = {Oxford University Computing Laboratory Research Reports}, number = {PRG-RR-05-02}, url = {http://www.gilith.com/research/papers} }

@proceedings{tphols2007, key = {TPHOLS-2007}, title = {20th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2007}, booktitle = {20th International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2007}, editor = {Klaus Schneider and Jens Brandt}, month = sep, year = 2007, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 4732, location = {Kaiserslautern, Germany}, url = {http://link.springer.de/link/service/series/0558/tocs/t4732.htm} }

@techreport{tphols2007a, key = {TPHOLS-2007a}, title = {Theorem Proving in Higher Order Logics: Emerging Trends Proceedings}, booktitle = {Theorem Proving in Higher Order Logics: Emerging Trends Proceedings}, author = {Klaus Schneider and Jens Brandt}, editor = {Klaus Schneider and Jens Brandt}, month = aug, year = 2007, institution = {Department of Computer Science, University of Kaiserslautern}, series = {Department of Computer Science, University of Kaiserslautern Technical Reports}, number = {364/07}, url = {http://es.cs.uni-kl.de/TPHOLs-2007/proceedings.html} }

@proceedings{tphols2008, key = {TPHOLS-2008}, title = {21st International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2008}, booktitle = {21st International Conference on Theorem Proving in Higher Order Logics: {TPHOLs} 2008}, editor = {Otmane Ait Mohamed, César Muñoz and Sofiène Tahar}, month = aug, year = 2008, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 5170, location = {Montreal, Canada}, url = {http://link.springer.de/link/service/series/0558/tocs/t5170.htm} }

@proceedings{tphols2009, key = {TPHOLS-2009}, title = {Theorem Proving in Higher Order Logics, 22nd International Conference ({TPHOLs}~2009)}, booktitle = {Theorem Proving in Higher Order Logics, 22nd International Conference ({TPHOLs}~2009)}, editor = {Stefan Berghofer and Tobias Nipkow and Christian Urban and Makarius Wenzel}, month = aug, year = 2009, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 5674 }

@proceedings{trybulec2007, editor = {Matuszewski, R. and Zalewska, A.}, title = {From Insight to Proof: Festschrift in Honour of Andrzej Trybulec}, booktitle = {From Insight to Proof: Festschrift in Honour of Andrzej Trybulec}, publisher = {University of Bia{\l}ystok}, series = {Studies in Logic, Grammar and Rhetoric}, volume = {10(23)}, year = 2007, url = {http://mizar.org/trybulec65/} }

@proceedings{types1996, key = {TYPES-1996}, title = {Types for Proofs and Programs: International Workshop {TYPES' 96}}, booktitle = {Types for Proofs and Programs: International Workshop {TYPES' 96}}, editor = {Eduardo Gim{\'e}nex and Christine Paulin-Mohring}, month = sep, year = 1996, location = {Aussois, France}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 1512 }

@proceedings{types2003, key = {TYPES-2003}, title = {Types for Proofs and Programs: Third International Workshop, TYPES 2003}, booktitle = {Types for Proofs and Programs: Third International Workshop, TYPES 2003}, editor = {Stefano Berardi and Mario Coppo and Ferruccio Damiani}, month = may, year = 2004, location = {Torino, Italy}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 3085 }

@proceedings{verify2010, key = {VERIFY-2010}, title = {Proceedings of the 6th International Verification Workshop (VERIFY 2010)}, booktitle = {Proceedings of the 6th International Verification Workshop (VERIFY 2010)}, editor = {Aderhold, M. and Autexier, S. and Mantel, H.}, month = jul, year = 2010, location = {Edinburgh, Scotland} }

@proceedings{vmcai2004, key = {VMCAI-2004}, title = {Proceedings of the 5th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI 2004)}, booktitle = {Proceedings of the 5th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI 2004)}, editor = {Bernhard Steffen and Giorgio Levi}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = 2937, month = jan, year = 2004, location = {Venice, Italy} }

*This file was generated by
bibtex2html 1.97.*