From 04f43102705a3f722f3f902d5d71368e874f552a Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Sat, 30 Dec 2017 18:54:24 0500
Subject: [PATCH] books/bookvol13 next draft
MIMEVersion: 1.0
ContentType: text/plain; charset=UTF8
ContentTransferEncoding: 8bit
Goal: Proving Axiom Correct
\index{Blakley, Bob}
\begin{chunk}{axiom.bib}
@inproceedings{Blak96,
author = "Blakley, Bob",
title = {{The Emperor's Old Armor}},
booktitle = "Proc. 1996 New Security Paradigms Workshop",
publisher = "ACM",
year = "1996",
paper = "Blak96.pdf"
}
\end{chunk}
\index{Brooks, Frederick P.}
\begin{chunk}{axiom.bib}
@article{Broo87,
author = "Brooks, Frederick P.",
title = {{No Silver Bullet: Essence and Accidents of Software Engineering}},
journal = "IEEE Computer",
volume = "20",
number = "4",
pages = "1019",
year = "1987",
abstract =
"Fashioning complex conceptual constructs is the essence; accidental
tasks arise in representing the constructs in language. Past progress
has so reduced the accidental tasks that future progress now depends
upon addressing the essence.",
paper = "Broo87.pdf"
}
\end{chunk}
\index{Glass, Robert L.}
\begin{chunk}{axiom.bib}
@article{Glas94,
author = "Glass, Robert L.",
title = {{The SoftwareResearch Crisis}},
journal = "IEEE Software",
volume = "11",
number = "6",
year = "1994",
abstract =
"With the advantage of more than 25 years' hindsight, this twentyfirst
century author looks askance at the 'crisis' in software practice and
expresses deep concern for a crisis in software research.",
paper = "Glas94.pdf"
}
\end{chunk}
\index{Fenton, Norman}
\begin{chunk}{axiom.bib}
@article{Fent93,
author = "Fenton, Norman",
title = {{How Effective Are Software Engineering Methods}},
journal = "J. Systems Software",
volume = "22",
pages = "141148",
year = "1993",
abstract =
"For 25 years, software engineers have sought methods which they hope
can provide a technological fix for the “software crisis.” Proponents
of specific methods claim that their use leads to significantly
improved quality and productivity. Such claims are rarely, if ever,
backed up by hard evidence. We show that often, where real empirical
evidence does exist, the results are counter to the views of the
socalled experts. We examine the impact on the software industry of
continuing to place our trust in unproven, and often revolutionary,
methods. The very poor state of the art of empirical assessment in
software engineering can be explained in part by inappropriate or
inadequate use of measurement. Numerous empirical studies are flawed
because of their poor experimental design and lack of adherence to
proper measurement principles.",
paper = "Fent93.pdf"
}
\end{chunk}
\index{Lindsay, Peter A.}
\begin{chunk}{axiom.bib}
@article{Lind88,
author = "Lindsay, Peter A.",
title = {{A Survery of Mechanical Support for Formal Reasoning}},
journal = "Software Engineering Journal",
volume = "3",
number = "1",
year = "1988",
pages = "327",
paper = "Lind88.pdf"
}
\end{chunk}
\index{MartinL\"of, P.}
\begin{chunk}{axiom.bib}
@inproceedings{Mart79,
author = "MartinLof, P.",
title = {{Constructive Mathematics and Computer Programming}},
booktitle = "Proc. 6th Int. Congress for Logic, Methodology and
Philosophy of Science",
publisher = "NorthHolland",
year = "1979",
pages = "153179",
abstract =
"If programming is understood not as the writing of instructions for
this or that computing machine but as the design of methods of
computation that it is the computer's duty to execute (a difference
that Dijkstra has referred to as the difference between computer
science and computing science), then it no longer seems possible to
distinguish the discipline of programming from constructive
mathematics. This explains why the intuitionistic theory of types,
which was originally developed as a symbolism for the precise
codification of constructive mathematics, may equally well be viewed
as a programming language. As such it provides a precise notation not
only, like other programming languages, for the programs themselves
but also for the tasks that the programs are supposed to
perform. Moreover, the inference rules of the theory of types, which
are again completely formal, appear as rules of correct program
synthesis. Thus the correctness of a program written in the theory of
types is proved formally at the same time as it is being
synthesized.",
paper = "Mart79.pdf"
}
\end{chunk}
\index{MartinL\"of, P.}
\begin{chunk}{axiom.bib}
@inproceedings{Mart73,
author = "MartinLof, P.",
title = {{An Intuitionistic Theory of Types}},
booktitle = "Logic Colloquium 1973",
publisher = "H.E. Rose and J.G. Shepherdson",
pages = "73118",
year = "1973",
abstract =
"The theory of types with which we shall be concerned is intended to
be a full scale system for foralizing intuitionistic mathematics as
developed, for example, in the book by Bishop 1967. The language of
the theory is richer than the language of first order predicate
logic. This makes it possible to strengthen the axioms for existence
and disjunction. In the case of existence, the possibility of
strengthening the usual elimination rule seems first to have been
indicated by Howard 1969, whose proposed axioms are special cases of
the existential elimination rule of the present theory. Furthermore,
there is a reflection principle which links the generation of objects
and types and plays somewhat the same role for the present theory as
does the replacement axiom for ZermeloFrankel set theory.
An earlier, not yet conclusive, attempt at formulating a theory of
this kind was made by Scott 1970. Also related, although less closely,
are the type and logic free theories of consructions of Kreisel 1962
and 1965 and Goodman 1970.
In its first version, the present theory was based on the strongly
impredicative axiom that there is a type of all types whatsoever,
which is at the same time a type and an object of that type. This
axiom had to be abandoned, however, after it was shown to lead to a
contradiction by Jean Yves Girard. I am very grateful to him for
showing me his paradox. The change that it necessitated is so drastic
that my theory no longer contains intuitionistic simple type theory as
it originally did. Instead, its proof theoretic strength should be
close to that of predicative analysis.",
paper = "Mart73.pdf"
}
\end{chunk}
\index{Bates, Joseph L.}
\index{Constable, Robert L.}
\begin{chunk}{axiom.bib}
@article{Bate85,
author = "Bates, Joseph L. and Constable, Robert L.",
title = {{Proofs as Programs}},
journal = "ACM TOPLAS",
volume = "7",
number = "1",
year = "1985",
abstract =
"The significant intellectual cost of programming is for problem
solving and explaining, not for coding. Yet programming systems offer
mechanical assistance for the coding process exclusively. We
illustrate the use of an implemented program development system,
called PRL ('pearl'), that provides automated assistance with the
difficult part. The problem and its explained solution are seen as
formal objects in a constructive logic of the data domains. These
formal explanations can be executed at various stages of
completion. The most incomplete explanations resemble applicative
programs, the most complete are formal proofs.",
paper = "Bate85.pdf"
}
\end{chunk}
\index{Coquand, Thierry}
\index{Huet, Gerard}
\begin{chunk}{axiom.bib}
@article{Coqu85,
author = "Coquand, Thierry and Huet, Gerard",
title = {{Constructions: A Higher Order Proof System for Mechanizing
Mathematics}},
journal = "LNCS",
volume = "203",
pages = "151184",
year = "1985",
abstract =
"We present an extensive set of mathematical propositions and proofs
in order to demonstrate the power of expression of the theory of
constructions.",
paper = "Coqu85.pdf"
}
\end{chunk}
\index{Calude, C.S.}
\index{Calude, E.}
\index{Marcus, S.}
\begin{chunk}{axiom.bib}
@techreport{Calu07,
author = "Calude, C.S. and Calude, E. and Marcus, S.",
title = {{Proving and Programming}},
type = "technical report",
institution = "Centre for Discrete Mathematics and Theoretical Computer
Science",
number = "CDMTCS309",
year = "2007",
abstract =
"There is a strong analogy between proving theorems in mathematics and
writing programs in computer science. This paper is devoted to an
analysis, from the perspective of this analogy, of proof in
mathematics. We will argue that while the Hilbertian notion of proof
has few chances to change, future proofs will be of various types,
will play different roles, and their truth will be checked
differently. Programming gives mathematics a new form of
understanding. The computer is the driving force behind these
changes.",
paper = "Calu07.pdf"
}
\end{chunk}
\index{Fass, Leona F.}
\begin{chunk}{axiom.bib}
@article{Fass04,
author = "Fass, Leona F.",
title = {{Approximations, Anomalies and ``The Proof of Correctness Wars''}},
journal = "ACM SIGSOFT Software Engineering Notes",
volume = "29",
number = "2",
year = "2004",
abstract =
"We discuss approaches to establishing 'correctness' and describe the
usefulness of logicbased model checkers for producing better
practical system designs. While we could develop techniques for
'constructing correctness' in our theoretical behavioralmodeling
research, when applied to Real World processes such as software
development only approximate correctness might be established and
anomalous behaviors subsequently found. This we view as a positive
outcome since resultant adaptation, or flaw detection and correction,
may lead to improved development and designs. We find researchers
employing model checking as a formal methods tool to develop empirical
techniques have reached similar conclusions. Thus we cite some
applications of model checking to generate tests and detect defects in
such Real World processes as aviation system development,
faultdetection systems, and security.",
paper = "Fass04.pdf"
}
\end{chunk}
\index{Thurston, William P.}
\begin{chunk}{axiom.bib}
@article{Thur94,
author = "Thurston, William P.",
title = {{On Proof and Progress in Mathematics}},
journal = "Bulletin of the American Mathematical Society",
volume = "30",
number = "2",
year = "1994",
paper = "Thur94.pdf"
}
\end{chunk}
\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@article{Hoar96,
author = "Hoare, C.A.R",
title = {{How did software get so reliable without proof?}},
journal = "LNCS",
volume = "1051",
year = "1996",
abstract =
"By surveying current software engineering practice, this paper
reveals that the techniques employed to achieve reliability are little
different from those which have proved effective in all other branches
of modern engineering: rigorous management of procedures for design
inspection and review; quality assurance based on a wide range of
targeted tests; continuous evolution by removal of errors from
products already in widespread use; and defensive programming, among
other forms of deliberate overengineering. Formal methods and proof
play a small direct role in large scale programming; but they do
provide a conceptual framework and basic understanding to promote the
best of current practice, and point directions for future
improvement.",
paper = "Hoar96.pdf"
}
\end{chunk}
\index{Glass, Robert L.}
\begin{chunk}{axiom.bib}
@article{Glas02,
author = "Glass, Robert L.",
title = {{The Proof of Correctness Wars}},
journal = "Communications of the ACM",
volume = "45",
number = "8",
year = "2002",
pages = "1921",
paper = "Glas02.pdf"
}
\end{chunk}
\index{Fetzer, James H.}
\begin{chunk}{axiom.bib}
@article{Fetz88,
author = "Fetzer, James H.",
title = {{Program Verification: The Very Idea}},
journal = "Communications of the ACM",
volume = "31",
number = "9",
pages = "10481063",
year = "1988",
abstract =
"The notion of program verification appears to trade upon an
equvocation. Algorithms, as logical structures, are appropriate
subjects for deductive verification. Programs, as causal models of
those structures, are not. The success of program verification as a
generally applicable and completely reliable method for guaranteeing
program performance is not even a theoretical possibility.",
paper = "Fetz88.pdf"
}
\end{chunk}
\index{Manna, Zohar}
\begin{chunk}{axiom.bib}
@misc{Mann80,
author = "Manna, Zohar",
title = {{Lectures on the Logic of Computer Programming}},
publisher = "Society for Industrial and Applied Mathematics",
year = "1980",
paper = "Mann80.tgz"
}
\end{chunk}
\index{Manna, Zohar}
\index{Waldinger, Richard}
\begin{chunk}{axiom.bib}
@article{Mann78,
author = "Manna, Zohar and Waldinger, Richard",
title = {{The Logic of Computer Programming}},
journal = "IEEE Trans. on Software Engineering",
volume = "4",
number = "3",
year = "1978",
abstract =
"Techniques derived from mathematical logic promise to provide an
alternative to the conventional methodology for constructing,
debugging, and optimizing computer programs. Ultimately, these
techniques are intended to lead to the automation of many of the
facets of the programming process.
This paper provides a unified tutorial exposition of the logical
techniques, illustrating each with examples. The strengths and
limitations of each technique as a practial programming aid are
assessed and attempts to implement these methods in experimental
systems are discussed.",
paper = "Mann78.pdf"
}
\end{chunk}
\index{Hall, Anthony}
\begin{chunk}{axiom.bib}
@article{Hall90,
author = "Hall, Anthony",
title = {{7 Myths of Formal Methods}},
journal = "IEEE Software",
volume = "7",
number = "5",
pages = "1119",
year = "1990",
abstract =
"Formal methods are difficult, expensive, and not widely useful,
detractors say. Using a case study and other realworld examples, this
article challenges such common myths.",
paper = "Hall80.pdf"
}
\end{chunk}
\index{Dijkstra, Edsger}
\begin{chunk}{axiom.bib}
@misc{Dijk83,
author = "Dijkstra, Edsger",
title = {{Fruits of Misunderstanding}},
year = "1983",
link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD854.html}",
paper = "Dijk83.txt"
}
\end{chunk}
\index{Bradley, Aaron R.}
\index{Manna, Zohar}
\begin{chunk}{axiom.bib}
@book{Brad07,
author = "Bradley, Aaron R. and Manna, Zohar",
title = {{The Calculus of Computation}},
publisher = "Springer",
year = "2007",
isbn = "9783'540'741121"
}
\end{chunk}
\index{Reid, Alastair}
\begin{chunk}{axiom.bib}
@misc{Reid17,
author = "Reid, Alastair",
title = {{How can you trust formally varified software}},
link = "\url{https://media.ccc.de/v/34c38915how_can_you_trust_formally_verified_software#t=12}",
year = "2017",
abstract =
"Formal verification of software has finally started to become viable:
we have examples of formally verified microkernels, realistic
compilers, hypervisors, etc. These are huge achievements and we can
expect to see even more impressive results in the future but the
correctness proofs depend on a number of assumptions about the Trusted
Computing Base that the software depends on. Two key questions to ask
are: Are the specifications of the Trusted Computing Base correct? And
do the implementations match the specifications? I will explore the
philosophical challenges and practical steps you can taek in answering
that question for one of the major dependencies: the hardware your
software runs on. I will describe the combination of formal
verification and testing that ARM uses to verify the processor
specification and I will talk about our current challenge: getting the
specification down to zero bugs while the architecture continues to
evolve."
}
\end{chunk}
\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@misc{Chli17a,
author = "Chlipala, Adam",
title = {{Coming Soon: MachineChecked Mathematical Proofs in Everyday
Software and Hardware Development}},
link = "\url{https://media.ccc.de/v/34c39105coming_soon_machinechecked_mathematical_proofs_in_everyday_software_and_hardware_development}",
year = "2017",
abstract =
"Most working engineers view machinechecked mathematical proofs as an
academic curiosity, if they have ever heard of the concept at all. In
contrast, activities like testing, debugging, and code review are
accepted as essential. They are woven into the lives of nearly all
developers. In this talk, I will explain how I see machinechecked
proofs enabling new everyday activities for developers of computer
software and hardware. These activities have the potential to lower
development effort dramatically, at the same time as they increase our
assurance that systems behave correctly and securely. I will give a
cosmological overview of this field, answering the FAQs that seem to
stand in the way of practicality; and I will illustrate the principles
with examples from projects that you can clone from GitHub today,
covering the computing stack from digital hardware design to
cryptographic software and applications.
Today's developers of computer software and hardware are tremendously
effective, compared to their predecessors. We have found very
effective ways of modularizing and validating our work. The talk is
about ammunition for these activities from a perhapsunexpected
source.
Modularity involves breaking a complex system into a hierarchy of
simpler pieces, which may be written and understood
separately. Structured programming (e.g., using loops and conditionals
instead of gotos) helps us read and understand parts of a single
function in isolation, and data abstraction lets us encapsulate
important functionality in objects, with guarantees that other code
can only access the private data by calling public methods. That way,
we can convince ourselves that the encapsulated code upholds certain
essential properties, regardless of which other code it is linked
with. Systematic unit testing also helps enforce contracts for units
of modularity. Each of these techniques can be rerun automatically, to
catch regressions in evolving systems, and catch those regressions in
a way that accurately points the finger of responsibility to
particular modules.
Validation is an important part of development that encompasses
testing, debugging, code review, and anything else that we do to raise
our confidence that the system behaves as intended. Experienced
engineers know that validation tends to take up the majority of
engineering effort. Often that effort involves mentally taxing
activities that would not otherwise come up in coding. One example is
thinking about testcase coverage, and another is including
instrumentation that produces traces to consult during debugging.
It is not hard for working developers to imagine great productivity
gains from better ways to break systems into pieces or raise our
confidence in those pieces. The claim I will make in this talk is that
a key source of such insights has been neglected: machinechecked
mathematical proofs. Here the basic functionality is an ASCII language
for defining mathematical objects, stating theorems about them, and
giving proofs of theorems. Crucially, an algorithm checks that
purported proofs really do establish the theorems. By going about
these activities in the style of programming, we inherit usual
supporting tools like IDEs, version control, continuous integration,
and automated build processes. But how could so esoteric a task as
math proofs call for that kind of tooling, and what does it have to do
with building real computer systems?
I will explain a shared vision to that end, developed along with many
other members of my research community. Let me try to convince you
that all of the following goals are attainable in the next 10 years.
\begin{itemize}
\item We will have complete computer systems implementing moderately
complex network servers for popular protocols, proved to implement
those protocols correctly, from the level of digital circuits on
up. We will remove all deployed code (hardware or software) from the
trusted computing base, shifting our trust to much smaller
specifications and proof checkers.
\item Hobbyists will be able to design new embedded computing
platforms by mixing and matching opensource hardware and software
components, also mixing and matching the proofs of these components,
guaranteeing no bugs at the digitalabstraction level or higher, with
no need for debugging.
\item New styles of library design will be enabled by the chance to
attach a formal behavioral specification to each library. For
instance, rankandfile programmers will able to assemble their own
code for cryptographic protocols, with code that looks like reference
implementations in Python, but getting performance comparable to what
experts handcraft in assembly today. Yet that benefit would come with
no need to trust that library authors have avoided bugs or intentional
backdoors, perhaps even including automatic proofs of cryptographic
security properties.
\end{itemize}
Main technical topics to cover to explain my optimism:
\begin{itemize}
\item The basic functionality of proof assistants and why we should
trust their conclusions
\item How to think about system decomposition with specifications and
proofs, including why, for most components, we do not need to worry
about specification mistakes
\item The different modes of applying proof technology to check or
generate components
\item The engineering techniques behind costeffective proof authoring
for realistic systems
\item A hardware case study: Kami, supporting componentbased digital
hardware authoring with proofs
\item A software case study: Fiat Cryptography, supporting
correctbyconstruction autogeneration of fast code for
ellipticcurve cryptography
\item Pointers to where to look next, if you would like to learn more
about this technology
\end{itemize}"
}
\end{chunk}
\index{Wilkes, Maurice}
\begin{chunk}{axiom.bib}
@book{Wilk85,
author = "Wilkes, Maurice",
title = {{Memoirs of a Computer Pioneer}},
publisher = "MIT Press",
year = "1985"
}
\end{chunk}
\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@book{Appe17,
author = "Appel, Andrew W.",
title = {{Verified Functional Algorithms}},
year = "2017",
publisher = "University of Pennsylvania",
link =
"\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
}
\end{chunk}

books/axiom.bib  500 +++++++++++++++++++++++++++++++
books/bookvol13.pamphlet  505 +++++++++++++++++++++++++++++
books/bookvolbib.pamphlet  599 +++++++++++++++++++++++++++++++++++++
changelog  2 +
patch  643 ++++++++++++++++++++++++++++++++++++
src/axiomwebsite/patches.html  2 +
6 files changed, 2124 insertions(+), 127 deletions()
diff git a/books/axiom.bib b/books/axiom.bib
index 81a33a0..fd9b710 100644
 a/books/axiom.bib
+++ b/books/axiom.bib
@@ 6947,6 +6947,15 @@ paper = "Brea89.pdf"
paper = "Yous04.pdf"
}
+@book{Appe17,
+ author = "Appel, Andrew W.",
+ title = {{Verified Functional Algorithms}},
+ year = "2017",
+ publisher = "University of Pennsylvania",
+ link =
+ "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
+}
+
@misc{Atte15,
author = "van Atten, Mark and Sundholm, Goran",
title = {{L.E.J. Brouwer's 'Unrelability of the Logical Principles'
@@ 6970,6 +6979,187 @@ paper = "Brea89.pdf"
paper = "Atte15.pdf"
}
+@article{Bate85,
+ author = "Bates, Joseph L. and Constable, Robert L.",
+ title = {{Proofs as Programs}},
+ journal = "ACM TOPLAS",
+ volume = "7",
+ number = "1",
+ year = "1985",
+ abstract =
+ "The significant intellectual cost of programming is for problem
+ solving and explaining, not for coding. Yet programming systems offer
+ mechanical assistance for the coding process exclusively. We
+ illustrate the use of an implemented program development system,
+ called PRL ('pearl'), that provides automated assistance with the
+ difficult part. The problem and its explained solution are seen as
+ formal objects in a constructive logic of the data domains. These
+ formal explanations can be executed at various stages of
+ completion. The most incomplete explanations resemble applicative
+ programs, the most complete are formal proofs.",
+ paper = "Bate85.pdf"
+}
+
+@inproceedings{Blak96,
+ author = "Blakley, Bob",
+ title = {{The Emperor's Old Armor}},
+ booktitle = "Proc. 1996 New Security Paradigms Workshop",
+ publisher = "ACM",
+ year = "1996",
+ paper = "Blak96.pdf"
+}
+
+@article{Broo87,
+ author = "Brooks, Frederick P.",
+ title = {{No Silver Bullet: Essence and Accidents of Software Engineering}},
+ journal = "IEEE Computer",
+ volume = "20",
+ number = "4",
+ pages = "1019",
+ year = "1987",
+ abstract =
+ "Fashioning complex conceptual constructs is the essence; accidental
+ tasks arise in representing the constructs in language. Past progress
+ has so reduced the accidental tasks that future progress now depends
+ upon addressing the essence.",
+ paper = "Broo87.pdf"
+}
+
+@techreport{Calu07,
+ author = "Calude, C.S. and Calude, E. and Marcus, S.",
+ title = {{Proving and Programming}},
+ type = "technical report",
+ institution = "Centre for Discrete Mathematics and Theoretical Computer
+ Science",
+ number = "CDMTCS309",
+ year = "2007",
+ abstract =
+ "There is a strong analogy between proving theorems in mathematics and
+ writing programs in computer science. This paper is devoted to an
+ analysis, from the perspective of this analogy, of proof in
+ mathematics. We will argue that while the Hilbertian notion of proof
+ has few chances to change, future proofs will be of various types,
+ will play different roles, and their truth will be checked
+ differently. Programming gives mathematics a new form of
+ understanding. The computer is the driving force behind these
+ changes.",
+ paper = "Calu07.pdf"
+}
+
+@misc{Chli17a,
+ author = "Chlipala, Adam",
+ title = {{Coming Soon: MachineChecked Mathematical Proofs in Everyday
+ Software and Hardware Development}},
+ link = "\url{https://media.ccc.de/v/34c39105coming_soon_machinechecked_mathematical_proofs_in_everyday_software_and_hardware_development}",
+ year = "2017",
+ abstract =
+ "Most working engineers view machinechecked mathematical proofs as an
+ academic curiosity, if they have ever heard of the concept at all. In
+ contrast, activities like testing, debugging, and code review are
+ accepted as essential. They are woven into the lives of nearly all
+ developers. In this talk, I will explain how I see machinechecked
+ proofs enabling new everyday activities for developers of computer
+ software and hardware. These activities have the potential to lower
+ development effort dramatically, at the same time as they increase our
+ assurance that systems behave correctly and securely. I will give a
+ cosmological overview of this field, answering the FAQs that seem to
+ stand in the way of practicality; and I will illustrate the principles
+ with examples from projects that you can clone from GitHub today,
+ covering the computing stack from digital hardware design to
+ cryptographic software and applications.
+
+ Today's developers of computer software and hardware are tremendously
+ effective, compared to their predecessors. We have found very
+ effective ways of modularizing and validating our work. The talk is
+ about ammunition for these activities from a perhapsunexpected
+ source.
+
+ Modularity involves breaking a complex system into a hierarchy of
+ simpler pieces, which may be written and understood
+ separately. Structured programming (e.g., using loops and conditionals
+ instead of gotos) helps us read and understand parts of a single
+ function in isolation, and data abstraction lets us encapsulate
+ important functionality in objects, with guarantees that other code
+ can only access the private data by calling public methods. That way,
+ we can convince ourselves that the encapsulated code upholds certain
+ essential properties, regardless of which other code it is linked
+ with. Systematic unit testing also helps enforce contracts for units
+ of modularity. Each of these techniques can be rerun automatically, to
+ catch regressions in evolving systems, and catch those regressions in
+ a way that accurately points the finger of responsibility to
+ particular modules.
+
+ Validation is an important part of development that encompasses
+ testing, debugging, code review, and anything else that we do to raise
+ our confidence that the system behaves as intended. Experienced
+ engineers know that validation tends to take up the majority of
+ engineering effort. Often that effort involves mentally taxing
+ activities that would not otherwise come up in coding. One example is
+ thinking about testcase coverage, and another is including
+ instrumentation that produces traces to consult during debugging.
+
+ It is not hard for working developers to imagine great productivity
+ gains from better ways to break systems into pieces or raise our
+ confidence in those pieces. The claim I will make in this talk is that
+ a key source of such insights has been neglected: machinechecked
+ mathematical proofs. Here the basic functionality is an ASCII language
+ for defining mathematical objects, stating theorems about them, and
+ giving proofs of theorems. Crucially, an algorithm checks that
+ purported proofs really do establish the theorems. By going about
+ these activities in the style of programming, we inherit usual
+ supporting tools like IDEs, version control, continuous integration,
+ and automated build processes. But how could so esoteric a task as
+ math proofs call for that kind of tooling, and what does it have to do
+ with building real computer systems?
+
+ I will explain a shared vision to that end, developed along with many
+ other members of my research community. Let me try to convince you
+ that all of the following goals are attainable in the next 10 years.
+
+ \begin{itemize}
+ \item We will have complete computer systems implementing moderately
+ complex network servers for popular protocols, proved to implement
+ those protocols correctly, from the level of digital circuits on
+ up. We will remove all deployed code (hardware or software) from the
+ trusted computing base, shifting our trust to much smaller
+ specifications and proof checkers.
+ \item Hobbyists will be able to design new embedded computing
+ platforms by mixing and matching opensource hardware and software
+ components, also mixing and matching the proofs of these components,
+ guaranteeing no bugs at the digitalabstraction level or higher, with
+ no need for debugging.
+ \item New styles of library design will be enabled by the chance to
+ attach a formal behavioral specification to each library. For
+ instance, rankandfile programmers will able to assemble their own
+ code for cryptographic protocols, with code that looks like reference
+ implementations in Python, but getting performance comparable to what
+ experts handcraft in assembly today. Yet that benefit would come with
+ no need to trust that library authors have avoided bugs or intentional
+ backdoors, perhaps even including automatic proofs of cryptographic
+ security properties.
+ \end{itemize}
+
+ Main technical topics to cover to explain my optimism:
+ \begin{itemize}
+ \item The basic functionality of proof assistants and why we should
+ trust their conclusions
+ \item How to think about system decomposition with specifications and
+ proofs, including why, for most components, we do not need to worry
+ about specification mistakes
+ \item The different modes of applying proof technology to check or
+ generate components
+ \item The engineering techniques behind costeffective proof authoring
+ for realistic systems
+ \item A hardware case study: Kami, supporting componentbased digital
+ hardware authoring with proofs
+ \item A software case study: Fiat Cryptography, supporting
+ correctbyconstruction autogeneration of fast code for
+ ellipticcurve cryptography
+ \item Pointers to where to look next, if you would like to learn more
+ about this technology
+ \end{itemize}"
+}
+
@book{Chli17,
author = "Chlipala, Adam",
title = {{Formal Reasoning About Programs}},
@@ 6995,16 +7185,6 @@ paper = "Brea89.pdf"
paper = "Chli17.pdf"
}
@misc{Cyph17,
 author = "Cypherpunks",
 title = {{Chapter 4: Verification Techniques}},
 link = "\url{http://www.cypherpunks.to/~peter/04_verif_techniques.pdf}",
 year = "2017",
 abstract = "Wherein existing methods for building secure systems are
 examined and found wanting",
 paper = "Cyph17.pdf"
}

@article{Cons85a,
author = "Constable, R.L. and Knoblock, T.B. and Gates, J.L.",
title = {{Writing Programs that Construct Proofs}},
@@ 7031,6 +7211,98 @@ paper = "Brea89.pdf"
paper = "Cons85a.pdf"
}
+@article{Coqu85,
+ author = "Coquand, Thierry and Huet, Gerard",
+ title = {{Constructions: A Higher Order Proof System for Mechanizing
+ Mathematics}},
+ journal = "LNCS",
+ volume = "203",
+ pages = "151184",
+ year = "1985",
+ abstract =
+ "We present an extensive set of mathematical propositions and proofs
+ in order to demonstrate the power of expression of the theory of
+ constructions.",
+ paper = "Coqu85.pdf"
+}
+
+@misc{Cyph17,
+ author = "Cypherpunks",
+ title = {{Chapter 4: Verification Techniques}},
+ link = "\url{http://www.cypherpunks.to/~peter/04_verif_techniques.pdf}",
+ year = "2017",
+ abstract = "Wherein existing methods for building secure systems are
+ examined and found wanting",
+ paper = "Cyph17.pdf"
+}
+
+@article{Fass04,
+ author = "Fass, Leona F.",
+ title = {{Approximations, Anomalies and ``The Proof of Correctness Wars''}},
+ journal = "ACM SIGSOFT Software Engineering Notes",
+ volume = "29",
+ number = "2",
+ year = "2004",
+ abstract =
+ "We discuss approaches to establishing 'correctness' and describe the
+ usefulness of logicbased model checkers for producing better
+ practical system designs. While we could develop techniques for
+ 'constructing correctness' in our theoretical behavioralmodeling
+ research, when applied to Real World processes such as software
+ development only approximate correctness might be established and
+ anomalous behaviors subsequently found. This we view as a positive
+ outcome since resultant adaptation, or flaw detection and correction,
+ may lead to improved development and designs. We find researchers
+ employing model checking as a formal methods tool to develop empirical
+ techniques have reached similar conclusions. Thus we cite some
+ applications of model checking to generate tests and detect defects in
+ such Real World processes as aviation system development,
+ faultdetection systems, and security.",
+ paper = "Fass04.pdf"
+}
+
+@article{Fent93,
+ author = "Fenton, Norman",
+ title = {{How Effective Are Software Engineering Methods}},
+ journal = "J. Systems Software",
+ volume = "22",
+ pages = "141148",
+ year = "1993",
+ abstract =
+ "For 25 years, software engineers have sought methods which they hope
+ can provide a technological fix for the “software crisis.” Proponents
+ of specific methods claim that their use leads to significantly
+ improved quality and productivity. Such claims are rarely, if ever,
+ backed up by hard evidence. We show that often, where real empirical
+ evidence does exist, the results are counter to the views of the
+ socalled experts. We examine the impact on the software industry of
+ continuing to place our trust in unproven, and often revolutionary,
+ methods. The very poor state of the art of empirical assessment in
+ software engineering can be explained in part by inappropriate or
+ inadequate use of measurement. Numerous empirical studies are flawed
+ because of their poor experimental design and lack of adherence to
+ proper measurement principles.",
+ paper = "Fent93.pdf"
+}
+
+@article{Fetz88,
+ author = "Fetzer, James H.",
+ title = {{Program Verification: The Very Idea}},
+ journal = "Communications of the ACM",
+ volume = "31",
+ number = "9",
+ pages = "10481063",
+ year = "1988",
+ abstract =
+ "The notion of program verification appears to trade upon an
+ equvocation. Algorithms, as logical structures, are appropriate
+ subjects for deductive verification. Programs, as causal models of
+ those structures, are not. The success of program verification as a
+ generally applicable and completely reliable method for guaranteeing
+ program performance is not even a theoretical possibility.",
+ paper = "Fetz88.pdf"
+}
+
@book{Font16,
author = "Font, Josep Marie",
title = {{Abstract Algebraic Logic: An Introductory Textbook}},
@@ 7039,6 +7311,199 @@ paper = "Brea89.pdf"
isbn = "9781848902077"
}
+@article{Glas94,
+ author = "Glass, Robert L.",
+ title = {{The SoftwareResearch Crisis}},
+ journal = "IEEE Software",
+ volume = "11",
+ number = "6",
+ year = "1994",
+ abstract =
+ "With the advantage of more than 25 years' hindsight, this twentyfirst
+ century author looks askance at the 'crisis' in software practice and
+ expresses deep concern for a crisis in software research.",
+ paper = "Glas94.pdf"
+}
+
+@article{Glas02,
+ author = "Glass, Robert L.",
+ title = {{The Proof of Correctness Wars}},
+ journal = "Communications of the ACM",
+ volume = "45",
+ number = "8",
+ year = "2002",
+ pages = "1921",
+ paper = "Glas02.pdf"
+}
+
+@article{Hall90,
+ author = "Hall, Anthony",
+ title = {{7 Myths of Formal Methods}},
+ journal = "IEEE Software",
+ volume = "7",
+ number = "5",
+ pages = "1119",
+ year = "1990",
+ abstract =
+ "Formal methods are difficult, expensive, and not widely useful,
+ detractors say. Using a case study and other realworld examples, this
+ article challenges such common myths.",
+ paper = "Hall80.pdf"
+}
+
+@article{Hoar96,
+ author = "Hoare, C.A.R",
+ title = {{How did software get so reliable without proof?}},
+ journal = "LNCS",
+ volume = "1051",
+ year = "1996",
+ abstract =
+ "By surveying current software engineering practice, this paper
+ reveals that the techniques employed to achieve reliability are little
+ different from those which have proved effective in all other branches
+ of modern engineering: rigorous management of procedures for design
+ inspection and review; quality assurance based on a wide range of
+ targeted tests; continuous evolution by removal of errors from
+ products already in widespread use; and defensive programming, among
+ other forms of deliberate overengineering. Formal methods and proof
+ play a small direct role in large scale programming; but they do
+ provide a conceptual framework and basic understanding to promote the
+ best of current practice, and point directions for future
+ improvement.",
+ paper = "Hoar96.pdf"
+}
+
+@article{Lind88,
+ author = "Lindsay, Peter A.",
+ title = {{A Survery of Mechanical Support for Formal Reasoning}},
+ journal = "Software Engineering Journal",
+ volume = "3",
+ number = "1",
+ year = "1988",
+ pages = "327",
+ paper = "Lind88.pdf"
+}
+
+@article{Mann78,
+ author = "Manna, Zohar and Waldinger, Richard",
+ title = {{The Logic of Computer Programming}},
+ journal = "IEEE Trans. on Software Engineering",
+ volume = "4",
+ number = "3",
+ year = "1978",
+ abstract =
+ "Techniques derived from mathematical logic promise to provide an
+ alternative to the conventional methodology for constructing,
+ debugging, and optimizing computer programs. Ultimately, these
+ techniques are intended to lead to the automation of many of the
+ facets of the programming process.
+
+ This paper provides a unified tutorial exposition of the logical
+ techniques, illustrating each with examples. The strengths and
+ limitations of each technique as a practial programming aid are
+ assessed and attempts to implement these methods in experimental
+ systems are discussed.",
+ paper = "Mann78.pdf"
+}
+
+@misc{Mann80,
+ author = "Manna, Zohar",
+ title = {{Lectures on the Logic of Computer Programming}},
+ publisher = "Society for Industrial and Applied Mathematics",
+ year = "1980",
+ paper = "Mann80.tgz"
+}
+
+@inproceedings{Mart73,
+ author = "MartinLof, P.",
+ title = {{An Intuitionistic Theory of Types}},
+ booktitle = "Logic Colloquium 1973",
+ publisher = "H.E. Rose and J.G. Shepherdson",
+ pages = "73118",
+ year = "1973",
+ abstract =
+ "The theory of types with which we shall be concerned is intended to
+ be a full scale system for foralizing intuitionistic mathematics as
+ developed, for example, in the book by Bishop 1967. The language of
+ the theory is richer than the language of first order predicate
+ logic. This makes it possible to strengthen the axioms for existence
+ and disjunction. In the case of existence, the possibility of
+ strengthening the usual elimination rule seems first to have been
+ indicated by Howard 1969, whose proposed axioms are special cases of
+ the existential elimination rule of the present theory. Furthermore,
+ there is a reflection principle which links the generation of objects
+ and types and plays somewhat the same role for the present theory as
+ does the replacement axiom for ZermeloFrankel set theory.
+
+ An earlier, not yet conclusive, attempt at formulating a theory of
+ this kind was made by Scott 1970. Also related, although less closely,
+ are the type and logic free theories of consructions of Kreisel 1962
+ and 1965 and Goodman 1970.
+
+ In its first version, the present theory was based on the strongly
+ impredicative axiom that there is a type of all types whatsoever,
+ which is at the same time a type and an object of that type. This
+ axiom had to be abandoned, however, after it was shown to lead to a
+ contradiction by Jean Yves Girard. I am very grateful to him for
+ showing me his paradox. The change that it necessitated is so drastic
+ that my theory no longer contains intuitionistic simple type theory as
+ it originally did. Instead, its proof theoretic strength should be
+ close to that of predicative analysis.",
+ paper = "Mart73.pdf"
+}
+
+@inproceedings{Mart79,
+ author = "MartinLof, P.",
+ title = {{Constructive Mathematics and Computer Programming}},
+ booktitle = "Proc. 6th Int. Congress for Logic, Methodology and
+ Philosophy of Science",
+ publisher = "NorthHolland",
+ year = "1979",
+ pages = "153179",
+ abstract =
+ "If programming is understood not as the writing of instructions for
+ this or that computing machine but as the design of methods of
+ computation that it is the computer's duty to execute (a difference
+ that Dijkstra has referred to as the difference between computer
+ science and computing science), then it no longer seems possible to
+ distinguish the discipline of programming from constructive
+ mathematics. This explains why the intuitionistic theory of types,
+ which was originally developed as a symbolism for the precise
+ codification of constructive mathematics, may equally well be viewed
+ as a programming language. As such it provides a precise notation not
+ only, like other programming languages, for the programs themselves
+ but also for the tasks that the programs are supposed to
+ perform. Moreover, the inference rules of the theory of types, which
+ are again completely formal, appear as rules of correct program
+ synthesis. Thus the correctness of a program written in the theory of
+ types is proved formally at the same time as it is being
+ synthesized.",
+ paper = "Mart79.pdf"
+}
+
+@misc{Reid17,
+ author = "Reid, Alastair",
+ title = {{How can you trust formally varified software}},
+ link = "\url{https://media.ccc.de/v/34c38915how_can_you_trust_formally_verified_software#t=12}",
+ year = "2017",
+ abstract =
+ "Formal verification of software has finally started to become viable:
+ we have examples of formally verified microkernels, realistic
+ compilers, hypervisors, etc. These are huge achievements and we can
+ expect to see even more impressive results in the future but the
+ correctness proofs depend on a number of assumptions about the Trusted
+ Computing Base that the software depends on. Two key questions to ask
+ are: Are the specifications of the Trusted Computing Base correct? And
+ do the implementations match the specifications? I will explore the
+ philosophical challenges and practical steps you can taek in answering
+ that question for one of the major dependencies: the hardware your
+ software runs on. I will describe the combination of formal
+ verification and testing that ARM uses to verify the processor
+ specification and I will talk about our current challenge: getting the
+ specification down to zero bugs while the architecture continues to
+ evolve."
+}
+
@book{Rest00,
author = "Restall, Greg",
title = {{An Introduction to Substructural Logics}},
@@ 7058,6 +7523,13 @@ paper = "Brea89.pdf"
paper = "Tryb92.pdf"
}
+@book{Wilk85a,
+ author = "Wilkes, Maurice",
+ title = {{Memoirs of a Computer Pioneer}},
+ publisher = "MIT Press",
+ year = "1985"
+}
+
@incollection{Soze06,
author = "Sozeau, Matthieu",
title = {{Subset Coercions in Coq}},
@@ 9126,6 +9598,14 @@ paper = "Brea89.pdf"
isbn = "013215871X"
}
+@misc{Dijk83,
+ author = "Dijkstra, Edsger",
+ title = {{Fruits of Misunderstanding}},
+ year = "1983",
+ link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD854.html}",
+ paper = "Dijk83.txt"
+}
+
@misc{Dolz04a,
author = "Dolzmann, A. and Seidl, A. and Sturm, T.",
title = {{Redlog User Manual}},
diff git a/books/bookvol13.pamphlet b/books/bookvol13.pamphlet
index 8f83678..6d56c7e 100644
 a/books/bookvol13.pamphlet
+++ b/books/bookvol13.pamphlet
@@ 17,20 +17,83 @@ automating the process. Automated machine proofs are not possible
in general but will exist for known algorithms.
\begin{quote}
{\bf Writing is nature's way of letting you know how sloppy your
thinking is
}  Guindon\cite{Lamp02}
+{\bf Q: Why bother doing proofs about programming languages? They are almost
+always boring if the definitions are right.}
+
+{\bf A: The definitions are almost always wrong.}
\end{quote}
\begin{quote}
{\bf Mathematics is nature's way of letting you know how sloppy
your writing is.
}  Leslie Lamport\cite{Lamp02}
+{\bf If programming is understood not as the writing of instructions for
+this or that computing machine but as the design of methods of
+computation that it is the computer's duty to execute, then it no
+longer seems possible to distinguish the discipline of programming
+from constructive mathematics.}\\
+ Per MartinL\"of \cite{Mart79}
+\end{quote}
+
+\begin{quote}
+{\bf Our basic premise is that the ability to construct and modify programs
+will not improve without a new and comprehensive look at the entire
+programming process. Past theoretical research, say, in the logic of
+programs, has tended to focus on methods for reasoning about
+individual programs; little has been done, it seems to us, to develop
+a sound understanding of the process of programming  the process by
+which programs evolve in concept and in practice. At present, we lack
+the means to describe the techniques of program construction and
+improvement in ways that properly link verification, documentation and
+adaptability.}\\
+ Scherlis and Scott (1983) in \cite{Maso86}
+\end{quote}
+
+\begin{quote}
+{\bf The intrinsically discrete nature of symbol processing makes
+programming such a tricky job that the application of formal
+techniques becomes a necessity.}\\
+ Edsger W. Dijkstra \cite{Dijk83}
+\end{quote}
+
+\begin{quote}
+{\bf The notion of a proof serves not only to organize information,
+but to direct the analysis of a problem and produce the necessary
+insights. It is as much an analytical tool as it is a final product.}\\
+ Bates and Constable \cite{Bate85}
+\end{quote}
+
+\begin{quote}
+{\bf By June 1949 people had begun to realize that it was not so easy to
+get programs right as at one time appeared. ... the realization came
+over me with full force that a good part of the remainder of my life
+was going to be spent in finding errors in my own programs.}\\
+ Maurice Wilkes \cite{Wilk85a}
\end{quote}
\begin{quote}
{\bf
The existence of the computer is giving impetus to the discovery of
+{\bf I believe the hard part of building software to be the
+specification, design, and testing of this conceptual construct, not
+the labor of representing it and testing the fidelity of the
+representation. If this is true, building software will always be
+hard. There is inherently no silver bullet.}\\
+ Fredrick P. Brooks Jr. \cite{Broo87}
+\end{quote}
+
+\begin{quote}
+{\bf I hold the opinion that the construction of computer programs is
+a mathematical activity like the solution of differential equations,
+that programs can be derived from their specifications through
+mathematical insight, calculation, and proof, using algebraic laws as
+simple and elegant as those of elementary arithmetic.}\\
+ C. A. R. Hoare \cite{Fetz88}
+\end{quote}
+
+\begin{quote}
+{\bf It might be said that programs are conjectures, while executions are
+attempted refutations.}\\
+ James Fetzer \cite{Fetz88}
+\end{quote}
+
+\begin{quote}
+{\bf The existence of the computer is giving impetus to the discovery of
algorithms that generate proofs. I can still hear the echos of the
collective sigh of relief that greeted the announcement in 1970 that
there is no general algorithm to test for integer solutions to
@@ 38,7 +101,7 @@ polynomial Diophantine equations; Hilbert's tenth problem has no
solution. Yet, as I look at my own field, I see that creating
algorithms that generate proofs constitutes some of the most important
mathematics being done. The allpurpose proof machine may be dead, but
tightly targeted machines are thriving.}
+tightly targeted machines are thriving.}\\
 Dave Bressoud \cite{Bres93}
\end{quote}
@@ 53,26 +116,11 @@ many years, I expect that we will have interactive computer programs
that can help people compile significant chunks of formally complete
and correct mathematics (based on a few perhaps shaky but at least
explicit assumptions) and that they will become part of the standard
mathematician's working environment.}
+mathematician's working environment.}\\
 William P. Thurston \cite{Thur94}
\end{quote}
\begin{quote}
{\bf Our basic premise is that the ability to construct and modify programs
will not improve without a new and comprehensive look at the entire
programming process. Past theoretical research, say, in the logic of
programs, has tended to focus on methods for reasoning about
individual programs; little has been done, it seems to us, to develop
a sound understanding of the process of programming  the process by
which programs evolve in concept and in practice. At present, we lack
the means to describe the techniques of program construction and
improvement in ways that properly link verification, documentation and
adaptability.}

 Scherlis and Scott (1983) in \cite{Maso86}
\end{quote}

\begin{quote}
{\bf ...constructive mathematics provides a way of viewing the language of
logical propositions as a {\sl specification} language for
programs. An ongoing thrust of work in computer science has been to
@@ 90,27 +138,297 @@ isomorphic to a type expression, and the proof of a proposition would
suggest precisely how to construct an inhabitant of the type, which
would be a term in a functional programming language. The term that
inhabits the type corresponding to a proposition is often referred to as
the {\sl computational content} of the proposition.}

+the {\sl computational content} of the proposition.}\\
 Paul Bernard Jackson\cite{Jack95}

\end{quote}
\begin{quote}
Q: Why bother doing proofs about programming languages? They are almost
always boring if the definitions are right.
+{\bf Writing is nature's way of letting you know how sloppy your
+thinking is. }\\
+ Guindon \cite{Lamp02}
+\end{quote}
A: The definitions are almost always wrong.
+\begin{quote}
+{\bf Mathematics is nature's way of letting you know how sloppy
+your writing is.}\\
+ Leslie Lamport \cite{Lamp02}
\end{quote}
\begin{quote}
Type theory is nothing short of a grand unified theory of computation
+{\bf Type theory is nothing short of a grand unified theory of computation
unified with mathematics so ultimately there is no difference between
math and the code.

+math and the code.}\\
 Robert Harper\cite{Harp13}
\end{quote}
+\begin{quote}
+{\bf Informal proofs are algorithms. Formal proofs are code.}\\
+ Benjamin Pierce \cite{Pier17}
+\end{quote}
+
+\chapter{Why this effort will not succeed}
+
+This is an effort to prove Axiom correct. That is, we wish to prove
+that the algorithms provide correct, trustworthy answers.
+
+All prior attempts at combinining a Computer Algebra system and a
+Proof system raise the issue that the CAS is untrustworthy.
+
+Axiom tries to encode mathematical algorithms. Unlike other systems it
+is built on the scaffold of group theory which provides a sound
+mathematical foundation. As such, it seems only reasonable that the
+algorithms in Axiom can be proven correct, hence the project to Prove
+Axiom Correct (PAC).
+
+The PAC project will not succeed. This is perfectly obvious from the
+outset. But, given the law of the excluded middle (that is,
+$A \lor ~A$) is not applicable in this case, the fact that the project
+"does not succeed" does not imply failure. Learning will occur.
+
+That said, we can list quite a few reasons why PAC will not succeed,
+most of which are explained in more detail in \cite{Cyph17}. We
+provide useful names for the likely criticisms presented there and
+paraphrase those criticisms applied to our context.
+\begin{enumerate}
+\item {\bf Leap of Faith} All of these efforts either require making a
+leap of faith to go from verified code to a realworld problem, or
+required the use of an artificially restricted system in order to
+function (the NewSpeak approach, create a language in which it is
+impossible to think bad thoughts), indicating that formal verification
+down to the binary code level is unlikely to be practical in any
+generally accepted formal methods sense.
+\item {\bf Tools for Toy Problems} The tools used to support formal
+methods arose from an academic research envvironment characterised by
+a small number of highly skilled users. The tools were tested on small
+problems (usually referred to somewhat disparagingly as ``toy
+problems'') which were targeted more at exercising the tools than
+exercising the problem.
+\item {\bf Opaque Specifications} Another factor which complicates the
+use of formal methods is that the mathematical methods available to
+software engineers are often very difficult to use and plagued by
+notation which is cumbersome and hard to read and understand.
+\item {\bf Too Large a Task} The national technology base for this
+level of task is essentially nonexistent. There do not appear to be
+even 20 people in the world that have undertaken the essential
+steps. ``In order to get a system with excellent system integrity, you
+must ensure that it is designed and built by geniuses. Geniuses are in
+short supply'' \cite{Blak96}.
+\item {\bf False Axioms} Problems occur when a proof has to be
+manually augmented with ``selfvidient'' truths which sometimes turn
+out to be false and end up misguiding the prover.
+\item {\bf Intellectual Cost} Requiring a proof of an algorithm adds to the
+already outsized cost of creating the algorithm in Axiom, given the
+steep intellectual hill such a system already presents.
+\item {\bf Ripple Cost} Multiple layers of abstraction are required to
+go from a proof to its implementation and a change in any of the
+layers can result in a ripple effect as changes propagate. If a change
+manages to propagate its way into a formally proven section, portions
+of, or possibly all of, the proof might need to be redone.
+\item {\bf Informal Implementation Details} Current techniques rarely
+reach down any further than the highlevel specificaiton resulting in
+large amounts of time and energy being poured into producing a design
+specification which is void of any implementation details.
+\item {\bf Specification Mismatch} Formal proofs that a specification
+is correct don't show that the assumptions made in the proof are
+correct in the actual physical system. If the code which is supposed
+to implement the formal specification doesn't quite conform to it or
+the compiler can't produce an executable which quite matches what was
+intended in the code then no amount of formal proving will be able to
+guarantee the execution behaviour of the code.
+\item {\bf Natural / Formal Mismatch} Natural language descriptions of
+what is intended may mismatch the formal language descriptions. In
+many cases ``common sense'' assumptions come into play which are never
+formally stated.
+\item {\bf Specification Flaws} The code may implement a specification
+exactly as stated but the specification can be flawed.
+\item {\bf Misimplemented Specifications} The specification may be
+correct but the implementation does not match the specification.
+\item {\bf Specification Narrowing} The specification language may not
+be sufficient to state all of the required concepts so a ``narrowing''
+of the specification is made to approximate the intent.
+\item {\bf Specification Widening} The specification language may
+generalize the required concepts so a ``widening'' of the
+specification is made beyond the required intent.
+\item {\bf Specification Impedence Mismatch} The specification
+language does not cover the concepts in the domain of interest,
+requiring considerable effort to ``model'' the domain.
+\item {\bf Specification Blindness} The specification writers can't
+take into account every eventuality which will arise. As a result, an
+implemention of a specification doesn't just implement it, it alters
+it in order to fit realworld constraints which weren't foreseen by
+the original authors of the specification.
+\item {\bf Contradictory Proofs} If the system is inconsistent, it would
+be possible to find two contradictory proofs. It is likely that such
+an inconsistency will not be detected and only one of the proofs will
+be accepted since the other one is never generated.
+\item {\bf Likely to be Ignored} There are thousands of proofs published,
+most of which are ``write only'' and are never checked. This is likely
+to be the case with Axiom's proofs.
+\item {\bf Proven Programs are Wrong} It is possible to prove a
+program correct and still get wrong results. Testing is still
+required.
+\item {\bf Proofs are a Social Process} Axiom's ``social circle'' is
+vanishingly small.
+\item {\bf Chicken and Egg} Do proofs follow from programs or programs
+follow from proofs?
+\item {\bf Boiling the Ocean} The task is much too large.
+\item {\bf MetaTheory Cost} There is a cost to developing theories for
+new data types; these are unnecessary ``meta'' costs.
+\item {\bf Partial Functions} Not all of the functions are total so it
+is difficult to prove theorems about them.
+\item {\bf Undecidable Theories} Some theories are known to be
+undecidable so there will be problems with their proofs.
+\end{enumerate}
+
+Fenton \cite{Fent93} states that there is no hard evidence to show that
+\begin{enumerate}
+\item formal methods have been used cost effectively on a real
+safetycritical system development
+\item the use of formal methods can deliver reliability more cost
+effectively than traditional structured methods with enhanced testing
+\item sufficient numbers of either developers or users can ever be
+trained to make proper use of formal methods
+\end{enumerate}
+
+Calude, et al. \cite{Calu07} take a quite different attack on the idea
+of proving and programming, specifically
+\begin{enumerate}
+\item Theorems (in mathematics) correspond to algorithms and not
+programs (in computer science); algorithms are subject to mathematical
+proofs (for example correctness)
+\item The role of proof in mathematical modeling is very small:
+adequacy is the main issue
+\item Programs (in computer science) correspond to mathematical
+models. They are not subject to proofs, but to an adequacy and
+relevance analysis; in this type of analysis, some proofs may
+appear. Correctness proofs in computer science (if any) are not
+costeffective.
+\item Rigour in programming is superior to rigour in mathematical
+proofs.
+\item Programming gives mathematics a new form of understanding
+\item Although the Hilbertian notion of proof has few chances to
+change, future proofs will be of various types and will play different
+roles, and their truth will be checked differently.
+\item In general, correctness is undecidable
+\item for most nontrivial cases, correctness is a monumental task
+which gives an added confidence at a disproportionate cost.
+\end{enumerate}
+
+Still another attack comes from Hoare \cite{Hoar96} which states that
+"By surveying current software engineering practice, this paper
+reveals that the techniques employed to achieve reliability are little
+different from those which have proved effective in all other branches
+of modern engineering: rigorous management of procedures for design
+inspection and review; quality assurance based on a wide range of
+targeted tests; continuous evolution by removal of errors from
+products already in widespread use; and defensive programming, among
+other forms of deliberate overengineering. Formal methods and proof
+play a small direct role in large scale programming; but they do
+provide a conceptual framework and basic understanding to promote the
+best of current practice, and point directions for future
+improvement."
+
+Fetzer \cite{Fetz88} adds
+"The notion of program verification appears to trade upon an
+equvocation. Algorithms, as logical structures, are appropriate
+subjects for deductive verification. Programs, as causal models of
+those structures, are not. The success of program verification as a
+generally applicable and completely reliable method for guaranteeing
+program performance is not even a theoretical possibility."
+
+An unnamed letter writer to the CACM says "It is time somebody said it
+ loud and clear  the formal approach to software verification does
+not work now and probably never will." \cite{Glas02}
+
+DeMillo et al. \cite{Demi79} says
+"It is argued that formal verifications of programs, no matter how
+obtained, will not play the same key role in the development of
+computer science and software engineering as proofs do in mathematics.
+Furthermore the absence of continuity, the inevitability of change, and
+the complexity of specification of significantly many real programs
+make the formal verification process difficult to justify and manage.
+It is felt that ease of formal verification should not dominate program
+language design.",
+
+DeMillo argues that proofs are social constructs. "Mathematiical
+proofs increase our confidence in the truth of mathematical statements
+only after they have been subjected to the social mechanisms of the
+mathematical community. These same mechanisms doom the socalled
+proofs of software, the long formal verifications that correspond, not
+to the working mathematical proof, but to the imaginary logical
+structure that the mathematician conjures up to describe his feeling
+of belief. Verifications are not messages, a person who ran out into
+the hall to communicate his latest verification would rapidly find
+himself a social pariah. Verifications cannot really be read; a reader
+can flay himself through one of the shorter ones by dint of heroic
+effort, but that's not reading. Being unreadable and  literally 
+unspeakable, verifications cannot be internalized, transformed,
+generalized, used, connected to other disciplines, and eventually
+incorporated into a community consciousness. They cannot acquire
+credibility gradually, as a mathematical theorem does; one either
+believes them blindly, as a pure act of faith, or not at all."
+
+And still more with "There is a fundamental logical objection to
+verification, an objection on its own ground of formalistic
+rigor. Since the requirement for a program is informal and the program
+is formal, there must be a transition, and the transition itself must
+necessarily be informal."
+
+And "So, having for the moment suspended all rational disbelief, let
+us suppose that the programmer gets the message ``VERIFIED.'' And let
+us suppose further that the message does not result from a failure on
+the part of the verifying system. What does the programmer know? He
+knows that his program is formally, logically, provably, certifiably
+correct. He does not know, however, to what extent it is reliable,
+dependable, trustworthy, safe; he does not know within what limits it
+will work; he does not know what happens when it exceeds those
+limits. And yet he has that mystical stamp of approval ``VERIFIED.''"
+
+Manna and Waldinger \cite{Mann78} mentions
+\begin{enumerate}
+\item We can never be sure that the specifications are correct
+\item No verification system can verify every correct program
+\item We can never be certain that a verification system is correct
+\end{enumerate}
+
+Hall \cite{Hall90} presents 7 myths of formal methods.
+\begin{enumerate}
+\item Formal methods can guarantee that software is perfect.
+\item They work by proving that programs are correct.
+\item Only highly critical systems benefit from their use.
+\item They involve complex mathematics.
+\item They increase the cost of development.
+\item They are incomprehensible to clients.
+\item Nobody uses them for real projects.
+\end{enumerate}
+
+Bowen and Hinchey \cite{Bowe95} follow up with 7 more myths.
+\begin{enumerate}
+\item Formal methods delay the developent process
+\item Formal methods lack tools
+\item Formal methods replace traditional engineering design methods
+\item Formal methods only apply to software
+\item Formal methods are unnecessary
+\item Formal methods are not supported
+\item Formal methods people always use formal methods
+\end{enumerate}
+
+\chapter{Progress Will Occur}
+
+At the very lowest level there have been some truly impressive steps
+toward formal verification of lowlevel implmentation details.
+
+Reid \cite{Reid17} has created and verified a huge specification (over
+1/2 Million nodes) of the ARM processor. The specification passes
+almost every test in their huge (11k) test suite. Using the
+specification with an SMT Solver allows the ability to do things like
+ask what input will give the known output, for example.
+
+Chlipala has done both machinelevel \cite{Chli17a} and higherlevel
+\cite{Chli17} formal proofs. He claims that within 10 years it will be
+normal to have deployed computer systems with toptobottom correctness
+proofs which were checked algorithmically.
+
\chapter{Here is a problem}
Proving programs correct involves working with a second programming
@@ 3074,7 +3392,11 @@ Adams\cite{Adam01}
Ballarin\cite{Ball95}
Cardelli\cite{Card85} states that {\bf coercions} are a form of adhoc polymorphism.
+Berger\cite{Berg95} The Greatest Common Divisor: A Case Study for
+Program Extraction from Classical Proofs.
+
+Cardelli\cite{Card85} states that {\bf coercions} are a
+form of adhoc polymorphism.
Axiom allows MLstyle type inference in the interpreter. Declaring a function
\begin{verbatim}
f(n) == n + 1
@@ 3105,11 +3427,65 @@ Fr\"uhwirht\cite{Frue91} details optimistic type systems for logic programs.
Harrison\cite{Harr98}
+In most cases, the certificate is simply the answer. An xception is
+the GCD, where a slightly more elaborate certificate is better for our
+purposes. If we ask Maple to find the GCD of $x^21$ and $x^5+1$ using
+its gcd function, for example, it responds with $x+1$. How can this
+result be checked? It's certainly straightforward to check that this
+is {\sl a} common divisor. If we don't want to code polynomial
+division ourselves in HOL, we can call Maple's {\tt divide} function,
+and the simply verify the quotient as above. But how can we prove that
+$x+1$ is the {\sl greatest} common divisor? (Note that the use of
+`greatest' is a misnomer: in a general ring we say that $a$ is a GCD
+of $f$b and $c$ iff it is a common divisor, and any other common divisor
+of $b$ and $c$ divides $a$. For example, both 2 and 2 are GCDs of 8
+and 10 over $\mathbb{Z}$.) At first sight, there is no easy way, short
+of replicating something like the Euclidean algorithm inside the logic
+(though that isn't a really difficult prospect).
+
+However, a variant of Maple's GCD algorithm, called {\tt gcdex} will,
+given polynomials $p$ and $q$, produce not just the GCD $d$, but also
+two other polynomials $r$ and $s$ such that $d=pr+qs$. (Indeed, the
+coefficients in this sort of Bezout identity follow easily from the
+Euclidean GCD algorithm.) For example, applied to $x^21$ and $x^5+1$
+we get the following equation:
+\[(x^3x)(x^21)+1(x^5+1)=x+1\]
+This again can be checked wasily, and from that, the fact that $x+1$
+is the {\sl greatest} common divisior follows by an easily proved
+theorem, since obviously any common factor of $x^21$ and $x^5+1$
+must, by the above equation, divide $x+1$ too. So here, given a
+certificate slightly more elaborate than simply the answer, easy and
+efficient checking is possible.
+
+
Jenks\cite{Jenk84b} Overview of Scratchpad.
Kifer\cite{Kife91} Typed Predicate Calculus giving declarative meaning to
logic programs with type declarations and type inference.
+Meshveliani\cite{Mesh16a}
+
+{\bf Prejudice 1}: ``Proof by contradiction is not possible in
+constructive mathematics''
+
+In fact: {\sl it is possible}  when the relation has a
+{\bf decision algorithm}.
+
+{\bf Example}: In most domains in computer algebra the equality
+relation has a decision algorithm \verb_=?_. Respecitvely, a program of
+the kind.
+\[{\rm case\ x\ } =? {\rm\ y\ of\ } \backslash{}
+\{ (yes x\approx y)\rightarrow\ldots;
+({\rm\ no\ }x\ne y)\rightarrow\ldots\}\]
+actually applies the {\sl excluded third law} to this relation.
+
+{\bf Prejudice 2}: ``Programs in the verified programming tools (like
+Coq, Agda) do not provide a proof itself, instead they provide an
+algorithm to build a proof witness for each concrete data''.
+
+I claim: {\sl they also provide a proof in its ordinary meaning}
+(this is so in Agda, and I expect, the same is with Coq).
+
Smolka\cite{Smol89a} details the foundations for relational logic programming
with polymorphically ordersorted data types.
@@ 3120,6 +3496,65 @@ ALGOL 68 has carefully defined rules for coercion, using dereferencing, deproced
widening, rowing, uniting, and voiding to transform values to the type required
for further computation.
+McAllester, D. and Arkondas, K., \cite{Mcal96}
+"Primitive recursion is a well known syntactic restriction on
+recursion definitions which guarantees termination. Unfortunately
+many natural definitions, such as the most common definition of
+Euclid's GCD algorithm, are not primitive recursive. Walther has
+recently given a proof system for verifying termination of a
+broader class of definitions. Although Walther's system is highly
+automatible, the class of acceptable definitions remains only
+semidecidable. Here we simplify Walther's calculus and give a
+syntactic criteria generalizes primitive recursion and handles
+most of the examples given by Walthar. We call the corresponding
+class of acceptable defintions ``Walther recursive''.",
+
+In Harrison\cite[p13]{Harr98} we find
+
+There are several examples of computer algebra results which may be
+checked relatively easily:
+\begin{itemize}
+\item factoring polynomials (or numbers)
+\item finding GCDs of polynomials (or numbers)
+\item solving equations (algebraic, simultaneous, differential,...)
+\item finding antiderviatives
+\item finding closed forms for summations
+\end{itemize}
+
+In most cases the certificate is simply the answer. An exception is
+the GCD, where a slightly more elaborate certificate is better for our
+purposes. If we ask to find the GCD of $x^21$ and $x^5+1$ using the
+gcd function, for example, the respons is $x+1$. How can this result
+be checked? It's certainly straightforward to check that this is
+{\sl a} common divisor. If we don't want to code polynomial division
+ourselves in HOL, we can call the {\tt divide} function, and then
+simply verify the quotient as above. But how can we prove that $x+1$
+is the {\sl greatest} common divisor\footnote{The use of
+``greatest'' is a misnomer: in a general ring we say that $a$ is the
+GCD of $b$ and $c$ iff it is a common divisor, and any other common
+divisor of $b$ and $c$ divides $a$. For example, both 2 and 2 are
+GCDs of 8 and 10 over $\mathbb{Z}$.} At first sight, there is no easy
+way, short of replicating something like the Euclidean algorithm
+inside the logic (although that isn't really a difficult prospect).
+
+However, a variant GCD algorithm, called {\sl gcdex} will, given
+polynomials $p$ and $q$, produce not just the GCD $d$, but also two
+other polynomials $r$ and $s$ such that $d=pr+qs$. (Indeed, the
+coefficients in this sort of Bezout identity follow easily from the
+Euclidean GCD algorithm.) For example, applied to $x^21$ and
+$x^5+1$ we get the following equation:
+\[(x^3x)(x^21)+1(x^5+1)=x+1\]
+This again can be checked easily, and from that, the fact that $x+1$
+is the {\sl greatest} common divisor follows by an easily proved
+theorem, since obviously any common factor of $x^21$ and $x^5+1$
+must, by the above equation, divide $x+1$ too. So here, given a
+certificate slightly more elaborate than simply the answer, easy and
+efficient checking is possible.
+
+\cite{Neup13} The case study was motivated by a master's thesis at RISC
+Linz, which implemented a CA algorithm for the greatest common divisor
+of multivariate polynomials in SML\cite{Mein13}.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\cleardoublepage
\phantomsection
diff git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index 3bbe9ee..ca94390 100644
 a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ 9448,6 +9448,19 @@ when shown in factored form.
\section{Proving Axiom Correct  The Project} %%%%%%%%%%%%%%%%%%%%
+\index{Appel, Andrew W.}
+\begin{chunk}{axiom.bib}
+@book{Appe17,
+ author = "Appel, Andrew W.",
+ title = {{Verified Functional Algorithms}},
+ year = "2017",
+ publisher = "University of Pennsylvania",
+ link =
+ "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
+}
+
+\end{chunk}
+
\index{van Atten, Mark}
\index{Sundholm, Goran}
\begin{chunk}{axiom.bib}
@@ 9476,6 +9489,210 @@ when shown in factored form.
\end{chunk}
+\index{Bates, Joseph L.}
+\index{Constable, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Bate85,
+ author = "Bates, Joseph L. and Constable, Robert L.",
+ title = {{Proofs as Programs}},
+ journal = "ACM TOPLAS",
+ volume = "7",
+ number = "1",
+ year = "1985",
+ abstract =
+ "The significant intellectual cost of programming is for problem
+ solving and explaining, not for coding. Yet programming systems offer
+ mechanical assistance for the coding process exclusively. We
+ illustrate the use of an implemented program development system,
+ called PRL ('pearl'), that provides automated assistance with the
+ difficult part. The problem and its explained solution are seen as
+ formal objects in a constructive logic of the data domains. These
+ formal explanations can be executed at various stages of
+ completion. The most incomplete explanations resemble applicative
+ programs, the most complete are formal proofs.",
+ paper = "Bate85.pdf"
+}
+
+\end{chunk}
+
+\index{Blakley, Bob}
+\begin{chunk}{axiom.bib}
+@inproceedings{Blak96,
+ author = "Blakley, Bob",
+ title = {{The Emperor's Old Armor}},
+ booktitle = "Proc. 1996 New Security Paradigms Workshop",
+ publisher = "ACM",
+ year = "1996",
+ paper = "Blak96.pdf"
+}
+
+\end{chunk}
+
+\index{Brooks, Frederick P.}
+\begin{chunk}{axiom.bib}
+@article{Broo87,
+ author = "Brooks, Frederick P.",
+ title = {{No Silver Bullet: Essence and Accidents of Software Engineering}},
+ journal = "IEEE Computer",
+ volume = "20",
+ number = "4",
+ pages = "1019",
+ year = "1987",
+ abstract =
+ "Fashioning complex conceptual constructs is the essence; accidental
+ tasks arise in representing the constructs in language. Past progress
+ has so reduced the accidental tasks that future progress now depends
+ upon addressing the essence.",
+ paper = "Broo87.pdf"
+}
+
+\end{chunk}
+
+\index{Calude, C.S.}
+\index{Calude, E.}
+\index{Marcus, S.}
+\begin{chunk}{axiom.bib}
+@techreport{Calu07,
+ author = "Calude, C.S. and Calude, E. and Marcus, S.",
+ title = {{Proving and Programming}},
+ type = "technical report",
+ institution = "Centre for Discrete Mathematics and Theoretical Computer
+ Science",
+ number = "CDMTCS309",
+ year = "2007",
+ abstract =
+ "There is a strong analogy between proving theorems in mathematics and
+ writing programs in computer science. This paper is devoted to an
+ analysis, from the perspective of this analogy, of proof in
+ mathematics. We will argue that while the Hilbertian notion of proof
+ has few chances to change, future proofs will be of various types,
+ will play different roles, and their truth will be checked
+ differently. Programming gives mathematics a new form of
+ understanding. The computer is the driving force behind these
+ changes.",
+ paper = "Calu07.pdf"
+}
+
+\end{chunk}
+
+\index{Chlipala, Adam}
+\begin{chunk}{axiom.bib}
+@misc{Chli17a,
+ author = "Chlipala, Adam",
+ title = {{Coming Soon: MachineChecked Mathematical Proofs in Everyday
+ Software and Hardware Development}},
+ link = "\url{https://media.ccc.de/v/34c39105coming_soon_machinechecked_mathematical_proofs_in_everyday_software_and_hardware_development}",
+ year = "2017",
+ abstract =
+ "Most working engineers view machinechecked mathematical proofs as an
+ academic curiosity, if they have ever heard of the concept at all. In
+ contrast, activities like testing, debugging, and code review are
+ accepted as essential. They are woven into the lives of nearly all
+ developers. In this talk, I will explain how I see machinechecked
+ proofs enabling new everyday activities for developers of computer
+ software and hardware. These activities have the potential to lower
+ development effort dramatically, at the same time as they increase our
+ assurance that systems behave correctly and securely. I will give a
+ cosmological overview of this field, answering the FAQs that seem to
+ stand in the way of practicality; and I will illustrate the principles
+ with examples from projects that you can clone from GitHub today,
+ covering the computing stack from digital hardware design to
+ cryptographic software and applications.
+
+ Today's developers of computer software and hardware are tremendously
+ effective, compared to their predecessors. We have found very
+ effective ways of modularizing and validating our work. The talk is
+ about ammunition for these activities from a perhapsunexpected
+ source.
+
+ Modularity involves breaking a complex system into a hierarchy of
+ simpler pieces, which may be written and understood
+ separately. Structured programming (e.g., using loops and conditionals
+ instead of gotos) helps us read and understand parts of a single
+ function in isolation, and data abstraction lets us encapsulate
+ important functionality in objects, with guarantees that other code
+ can only access the private data by calling public methods. That way,
+ we can convince ourselves that the encapsulated code upholds certain
+ essential properties, regardless of which other code it is linked
+ with. Systematic unit testing also helps enforce contracts for units
+ of modularity. Each of these techniques can be rerun automatically, to
+ catch regressions in evolving systems, and catch those regressions in
+ a way that accurately points the finger of responsibility to
+ particular modules.
+
+ Validation is an important part of development that encompasses
+ testing, debugging, code review, and anything else that we do to raise
+ our confidence that the system behaves as intended. Experienced
+ engineers know that validation tends to take up the majority of
+ engineering effort. Often that effort involves mentally taxing
+ activities that would not otherwise come up in coding. One example is
+ thinking about testcase coverage, and another is including
+ instrumentation that produces traces to consult during debugging.
+
+ It is not hard for working developers to imagine great productivity
+ gains from better ways to break systems into pieces or raise our
+ confidence in those pieces. The claim I will make in this talk is that
+ a key source of such insights has been neglected: machinechecked
+ mathematical proofs. Here the basic functionality is an ASCII language
+ for defining mathematical objects, stating theorems about them, and
+ giving proofs of theorems. Crucially, an algorithm checks that
+ purported proofs really do establish the theorems. By going about
+ these activities in the style of programming, we inherit usual
+ supporting tools like IDEs, version control, continuous integration,
+ and automated build processes. But how could so esoteric a task as
+ math proofs call for that kind of tooling, and what does it have to do
+ with building real computer systems?
+
+ I will explain a shared vision to that end, developed along with many
+ other members of my research community. Let me try to convince you
+ that all of the following goals are attainable in the next 10 years.
+
+ \begin{itemize}
+ \item We will have complete computer systems implementing moderately
+ complex network servers for popular protocols, proved to implement
+ those protocols correctly, from the level of digital circuits on
+ up. We will remove all deployed code (hardware or software) from the
+ trusted computing base, shifting our trust to much smaller
+ specifications and proof checkers.
+ \item Hobbyists will be able to design new embedded computing
+ platforms by mixing and matching opensource hardware and software
+ components, also mixing and matching the proofs of these components,
+ guaranteeing no bugs at the digitalabstraction level or higher, with
+ no need for debugging.
+ \item New styles of library design will be enabled by the chance to
+ attach a formal behavioral specification to each library. For
+ instance, rankandfile programmers will able to assemble their own
+ code for cryptographic protocols, with code that looks like reference
+ implementations in Python, but getting performance comparable to what
+ experts handcraft in assembly today. Yet that benefit would come with
+ no need to trust that library authors have avoided bugs or intentional
+ backdoors, perhaps even including automatic proofs of cryptographic
+ security properties.
+ \end{itemize}
+
+ Main technical topics to cover to explain my optimism:
+ \begin{itemize}
+ \item The basic functionality of proof assistants and why we should
+ trust their conclusions
+ \item How to think about system decomposition with specifications and
+ proofs, including why, for most components, we do not need to worry
+ about specification mistakes
+ \item The different modes of applying proof technology to check or
+ generate components
+ \item The engineering techniques behind costeffective proof authoring
+ for realistic systems
+ \item A hardware case study: Kami, supporting componentbased digital
+ hardware authoring with proofs
+ \item A software case study: Fiat Cryptography, supporting
+ correctbyconstruction autogeneration of fast code for
+ ellipticcurve cryptography
+ \item Pointers to where to look next, if you would like to learn more
+ about this technology
+ \end{itemize}"
+}
+
+\end{chunk}
+
\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@book{Chli17,
@@ 9505,19 +9722,6 @@ when shown in factored form.
\end{chunk}
\begin{chunk}{axiom.bib}
@misc{Cyph17,
 author = "Cypherpunks",
 title = {{Chapter 4: Verification Techniques}},
 link = "\url{http://www.cypherpunks.to/~peter/04_verif_techniques.pdf}",
 year = "2017",
 abstract = "Wherein existing methods for building secure systems are
 examined and found wanting",
 paper = "Cyph17.pdf"
}

\end{chunk}

\index{Constable, R.L.}
\index{Knoblock, T.B.}
\index{Gates, J.L.}
@@ 9550,6 +9754,118 @@ when shown in factored form.
\end{chunk}
+\index{Coquand, Thierry}
+\index{Huet, Gerard}
+\begin{chunk}{axiom.bib}
+@article{Coqu85,
+ author = "Coquand, Thierry and Huet, Gerard",
+ title = {{Constructions: A Higher Order Proof System for Mechanizing
+ Mathematics}},
+ journal = "LNCS",
+ volume = "203",
+ pages = "151184",
+ year = "1985",
+ abstract =
+ "We present an extensive set of mathematical propositions and proofs
+ in order to demonstrate the power of expression of the theory of
+ constructions.",
+ paper = "Coqu85.pdf"
+}
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@misc{Cyph17,
+ author = "Cypherpunks",
+ title = {{Chapter 4: Verification Techniques}},
+ link = "\url{http://www.cypherpunks.to/~peter/04_verif_techniques.pdf}",
+ year = "2017",
+ abstract = "Wherein existing methods for building secure systems are
+ examined and found wanting",
+ paper = "Cyph17.pdf"
+}
+
+\end{chunk}
+
+\index{Fass, Leona F.}
+\begin{chunk}{axiom.bib}
+@article{Fass04,
+ author = "Fass, Leona F.",
+ title = {{Approximations, Anomalies and ``The Proof of Correctness Wars''}},
+ journal = "ACM SIGSOFT Software Engineering Notes",
+ volume = "29",
+ number = "2",
+ year = "2004",
+ abstract =
+ "We discuss approaches to establishing 'correctness' and describe the
+ usefulness of logicbased model checkers for producing better
+ practical system designs. While we could develop techniques for
+ 'constructing correctness' in our theoretical behavioralmodeling
+ research, when applied to Real World processes such as software
+ development only approximate correctness might be established and
+ anomalous behaviors subsequently found. This we view as a positive
+ outcome since resultant adaptation, or flaw detection and correction,
+ may lead to improved development and designs. We find researchers
+ employing model checking as a formal methods tool to develop empirical
+ techniques have reached similar conclusions. Thus we cite some
+ applications of model checking to generate tests and detect defects in
+ such Real World processes as aviation system development,
+ faultdetection systems, and security.",
+ paper = "Fass04.pdf"
+}
+
+\end{chunk}
+
+\index{Fenton, Norman}
+\begin{chunk}{axiom.bib}
+@article{Fent93,
+ author = "Fenton, Norman",
+ title = {{How Effective Are Software Engineering Methods}},
+ journal = "J. Systems Software",
+ volume = "22",
+ pages = "141148",
+ year = "1993",
+ abstract =
+ "For 25 years, software engineers have sought methods which they hope
+ can provide a technological fix for the “software crisis.” Proponents
+ of specific methods claim that their use leads to significantly
+ improved quality and productivity. Such claims are rarely, if ever,
+ backed up by hard evidence. We show that often, where real empirical
+ evidence does exist, the results are counter to the views of the
+ socalled experts. We examine the impact on the software industry of
+ continuing to place our trust in unproven, and often revolutionary,
+ methods. The very poor state of the art of empirical assessment in
+ software engineering can be explained in part by inappropriate or
+ inadequate use of measurement. Numerous empirical studies are flawed
+ because of their poor experimental design and lack of adherence to
+ proper measurement principles.",
+ paper = "Fent93.pdf"
+}
+
+\end{chunk}
+
+\index{Fetzer, James H.}
+\begin{chunk}{axiom.bib}
+@article{Fetz88,
+ author = "Fetzer, James H.",
+ title = {{Program Verification: The Very Idea}},
+ journal = "Communications of the ACM",
+ volume = "31",
+ number = "9",
+ pages = "10481063",
+ year = "1988",
+ abstract =
+ "The notion of program verification appears to trade upon an
+ equvocation. Algorithms, as logical structures, are appropriate
+ subjects for deductive verification. Programs, as causal models of
+ those structures, are not. The success of program verification as a
+ generally applicable and completely reliable method for guaranteeing
+ program performance is not even a theoretical possibility.",
+ paper = "Fetz88.pdf"
+}
+
+\end{chunk}
+
\index{Font, Josep Marie}
\begin{chunk}{axiom.bib}
@book{Font16,
@@ 9562,6 +9878,240 @@ when shown in factored form.
\end{chunk}
+\index{Glass, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Glas94,
+ author = "Glass, Robert L.",
+ title = {{The SoftwareResearch Crisis}},
+ journal = "IEEE Software",
+ volume = "11",
+ number = "6",
+ year = "1994",
+ abstract =
+ "With the advantage of more than 25 years' hindsight, this twentyfirst
+ century author looks askance at the 'crisis' in software practice and
+ expresses deep concern for a crisis in software research.",
+ paper = "Glas94.pdf"
+}
+
+\end{chunk}
+
+\index{Glass, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Glas02,
+ author = "Glass, Robert L.",
+ title = {{The Proof of Correctness Wars}},
+ journal = "Communications of the ACM",
+ volume = "45",
+ number = "8",
+ year = "2002",
+ pages = "1921",
+ paper = "Glas02.pdf"
+}
+
+\end{chunk}
+
+\index{Hall, Anthony}
+\begin{chunk}{axiom.bib}
+@article{Hall90,
+ author = "Hall, Anthony",
+ title = {{7 Myths of Formal Methods}},
+ journal = "IEEE Software",
+ volume = "7",
+ number = "5",
+ pages = "1119",
+ year = "1990",
+ abstract =
+ "Formal methods are difficult, expensive, and not widely useful,
+ detractors say. Using a case study and other realworld examples, this
+ article challenges such common myths.",
+ paper = "Hall80.pdf"
+}
+
+\end{chunk}
+
+\index{Hoare, C.A.R}
+\begin{chunk}{axiom.bib}
+@article{Hoar96,
+ author = "Hoare, C.A.R",
+ title = {{How did software get so reliable without proof?}},
+ journal = "LNCS",
+ volume = "1051",
+ year = "1996",
+ abstract =
+ "By surveying current software engineering practice, this paper
+ reveals that the techniques employed to achieve reliability are little
+ different from those which have proved effective in all other branches
+ of modern engineering: rigorous management of procedures for design
+ inspection and review; quality assurance based on a wide range of
+ targeted tests; continuous evolution by removal of errors from
+ products already in widespread use; and defensive programming, among
+ other forms of deliberate overengineering. Formal methods and proof
+ play a small direct role in large scale programming; but they do
+ provide a conceptual framework and basic understanding to promote the
+ best of current practice, and point directions for future
+ improvement.",
+ paper = "Hoar96.pdf"
+}
+
+\end{chunk}
+
+\index{Lindsay, Peter A.}
+\begin{chunk}{axiom.bib}
+@article{Lind88,
+ author = "Lindsay, Peter A.",
+ title = {{A Survery of Mechanical Support for Formal Reasoning}},
+ journal = "Software Engineering Journal",
+ volume = "3",
+ number = "1",
+ year = "1988",
+ pages = "327",
+ paper = "Lind88.pdf"
+}
+
+\end{chunk}
+
+\index{Manna, Zohar}
+\index{Waldinger, Richard}
+\begin{chunk}{axiom.bib}
+@article{Mann78,
+ author = "Manna, Zohar and Waldinger, Richard",
+ title = {{The Logic of Computer Programming}},
+ journal = "IEEE Trans. on Software Engineering",
+ volume = "4",
+ number = "3",
+ year = "1978",
+ abstract =
+ "Techniques derived from mathematical logic promise to provide an
+ alternative to the conventional methodology for constructing,
+ debugging, and optimizing computer programs. Ultimately, these
+ techniques are intended to lead to the automation of many of the
+ facets of the programming process.
+
+ This paper provides a unified tutorial exposition of the logical
+ techniques, illustrating each with examples. The strengths and
+ limitations of each technique as a practial programming aid are
+ assessed and attempts to implement these methods in experimental
+ systems are discussed.",
+ paper = "Mann78.pdf"
+}
+
+\end{chunk}
+
+\index{Manna, Zohar}
+\begin{chunk}{axiom.bib}
+@misc{Mann80,
+ author = "Manna, Zohar",
+ title = {{Lectures on the Logic of Computer Programming}},
+ publisher = "Society for Industrial and Applied Mathematics",
+ year = "1980",
+ paper = "Mann80.tgz"
+}
+
+\end{chunk}
+
+\index{MartinL\"of, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Mart73,
+ author = "MartinLof, P.",
+ title = {{An Intuitionistic Theory of Types}},
+ booktitle = "Logic Colloquium 1973",
+ publisher = "H.E. Rose and J.G. Shepherdson",
+ pages = "73118",
+ year = "1973",
+ abstract =
+ "The theory of types with which we shall be concerned is intended to
+ be a full scale system for foralizing intuitionistic mathematics as
+ developed, for example, in the book by Bishop 1967. The language of
+ the theory is richer than the language of first order predicate
+ logic. This makes it possible to strengthen the axioms for existence
+ and disjunction. In the case of existence, the possibility of
+ strengthening the usual elimination rule seems first to have been
+ indicated by Howard 1969, whose proposed axioms are special cases of
+ the existential elimination rule of the present theory. Furthermore,
+ there is a reflection principle which links the generation of objects
+ and types and plays somewhat the same role for the present theory as
+ does the replacement axiom for ZermeloFrankel set theory.
+
+ An earlier, not yet conclusive, attempt at formulating a theory of
+ this kind was made by Scott 1970. Also related, although less closely,
+ are the type and logic free theories of consructions of Kreisel 1962
+ and 1965 and Goodman 1970.
+
+ In its first version, the present theory was based on the strongly
+ impredicative axiom that there is a type of all types whatsoever,
+ which is at the same time a type and an object of that type. This
+ axiom had to be abandoned, however, after it was shown to lead to a
+ contradiction by Jean Yves Girard. I am very grateful to him for
+ showing me his paradox. The change that it necessitated is so drastic
+ that my theory no longer contains intuitionistic simple type theory as
+ it originally did. Instead, its proof theoretic strength should be
+ close to that of predicative analysis.",
+ paper = "Mart73.pdf"
+}
+
+\end{chunk}
+
+\index{MartinL\"of, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Mart79,
+ author = "MartinLof, P.",
+ title = {{Constructive Mathematics and Computer Programming}},
+ booktitle = "Proc. 6th Int. Congress for Logic, Methodology and
+ Philosophy of Science",
+ publisher = "NorthHolland",
+ year = "1979",
+ pages = "153179",
+ abstract =
+ "If programming is understood not as the writing of instructions for
+ this or that computing machine but as the design of methods of
+ computation that it is the computer's duty to execute (a difference
+ that Dijkstra has referred to as the difference between computer
+ science and computing science), then it no longer seems possible to
+ distinguish the discipline of programming from constructive
+ mathematics. This explains why the intuitionistic theory of types,
+ which was originally developed as a symbolism for the precise
+ codification of constructive mathematics, may equally well be viewed
+ as a programming language. As such it provides a precise notation not
+ only, like other programming languages, for the programs themselves
+ but also for the tasks that the programs are supposed to
+ perform. Moreover, the inference rules of the theory of types, which
+ are again completely formal, appear as rules of correct program
+ synthesis. Thus the correctness of a program written in the theory of
+ types is proved formally at the same time as it is being
+ synthesized.",
+ paper = "Mart79.pdf"
+}
+
+\end{chunk}
+
+\index{Reid, Alastair}
+\begin{chunk}{axiom.bib}
+@misc{Reid17,
+ author = "Reid, Alastair",
+ title = {{How can you trust formally varified software}},
+ link = "\url{https://media.ccc.de/v/34c38915how_can_you_trust_formally_verified_software#t=12}",
+ year = "2017",
+ abstract =
+ "Formal verification of software has finally started to become viable:
+ we have examples of formally verified microkernels, realistic
+ compilers, hypervisors, etc. These are huge achievements and we can
+ expect to see even more impressive results in the future but the
+ correctness proofs depend on a number of assumptions about the Trusted
+ Computing Base that the software depends on. Two key questions to ask
+ are: Are the specifications of the Trusted Computing Base correct? And
+ do the implementations match the specifications? I will explore the
+ philosophical challenges and practical steps you can taek in answering
+ that question for one of the major dependencies: the hardware your
+ software runs on. I will describe the combination of formal
+ verification and testing that ARM uses to verify the processor
+ specification and I will talk about our current challenge: getting the
+ specification down to zero bugs while the architecture continues to
+ evolve."
+}
+
+\end{chunk}
+
\index{Restall, Greg}
\begin{chunk}{axiom.bib}
@book{Rest00,
@@ 9590,6 +10140,17 @@ when shown in factored form.
\end{chunk}
+\index{Wilkes, Maurice}
+\begin{chunk}{axiom.bib}
+@book{Wilk85a,
+ author = "Wilkes, Maurice",
+ title = {{Memoirs of a Computer Pioneer}},
+ publisher = "MIT Press",
+ year = "1985"
+}
+
+\end{chunk}
+
\section{Proving Axiom Correct  Coercion in CASProof Systesms} %
\index{Sozeau, Matthieu}
@@ 12298,6 +12859,18 @@ when shown in factored form.
\end{chunk}
+\index{Dijkstra, Edsger}
+\begin{chunk}{axiom.bib}
+@misc{Dijk83,
+ author = "Dijkstra, Edsger",
+ title = {{Fruits of Misunderstanding}},
+ year = "1983",
+ link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD854.html}",
+ paper = "Dijk83.txt"
+}
+
+\end{chunk}
+
\index{Dolzmann, A.}
\index{Seidl, A.}
\index{Sturm, T.}
diff git a/changelog b/changelog
index 36360f7..c85c9df 100644
 a/changelog
+++ b/changelog
@@ 1,3 +1,5 @@
+20171230 tpd src/axiomwebsite/patches.html 20171230.01.tpd.patch
+20171230 tpd books/bookvol13 next draft
20171224 tpd src/axiomwebsite/patches.html 20171224.01.tpd.patch
20171224 tpd books/bookvolbib add references
20171217 tpd src/axiomwebsite/patches.html 20171217.01.tpd.patch
diff git a/patch b/patch
index e130d3d..336fea0 100644
 a/patch
+++ b/patch
@@ 1,98 +1,603 @@
books/bookvolbib adding references
+books/bookvol13 next draft
Goal: Proving Axiom Correct
\index{Pfenning, Frank}
+\index{Blakley, Bob}
\begin{chunk}{axiom.bib}
@inproceedings{Pfen88,
 author = "Pfenning, Frank",
 title = {{Partial Polymorphic Type Inference and HigherOrder Unification}},
 booktitle = "Proc 1988 ACM Conf. on Lisp and Functional Programming",
 pages = "153163",
 year = "1988",
+@inproceedings{Blak96,
+ author = "Blakley, Bob",
+ title = {{The Emperor's Old Armor}},
+ booktitle = "Proc. 1996 New Security Paradigms Workshop",
publisher = "ACM",
 isbn = "089791273X",
+ year = "1996",
+ paper = "Blak96.pdf"
+}
+
+\end{chunk}
+
+\index{Brooks, Frederick P.}
+\begin{chunk}{axiom.bib}
+@article{Broo87,
+ author = "Brooks, Frederick P.",
+ title = {{No Silver Bullet: Essence and Accidents of Software Engineering}},
+ journal = "IEEE Computer",
+ volume = "20",
+ number = "4",
+ pages = "1019",
+ year = "1987",
abstract =
 "We show that the problem of partial type inference in the $n$th
 order polymorphic $\lambda$calculus is equivalent to $n$th order
 unification. On the one hand, this means that partial type inference
 in polymorphic $\lambda$calculi of order 2 or higher is
 undecidable. On the other hand, higherorder unification is often
 tractable in practice, and our translation entails a very useful
 algorithm for partial type inference in the $omega$order polymorphic
 $\lambda$calculus. We present an implementation in $\lambda$Prolog in
 full.",
 paper = "Pfen88.pdf"
+ "Fashioning complex conceptual constructs is the essence; accidental
+ tasks arise in representing the constructs in language. Past progress
+ has so reduced the accidental tasks that future progress now depends
+ upon addressing the essence.",
+ paper = "Broo87.pdf"
+}
+
+\end{chunk}
+
+\index{Glass, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Glas94,
+ author = "Glass, Robert L.",
+ title = {{The SoftwareResearch Crisis}},
+ journal = "IEEE Software",
+ volume = "11",
+ number = "6",
+ year = "1994",
+ abstract =
+ "With the advantage of more than 25 years' hindsight, this twentyfirst
+ century author looks askance at the 'crisis' in software practice and
+ expresses deep concern for a crisis in software research.",
+ paper = "Glas94.pdf"
+}
+
+\end{chunk}
+
+\index{Fenton, Norman}
+\begin{chunk}{axiom.bib}
+@article{Fent93,
+ author = "Fenton, Norman",
+ title = {{How Effective Are Software Engineering Methods}},
+ journal = "J. Systems Software",
+ volume = "22",
+ pages = "141148",
+ year = "1993",
+ abstract =
+ "For 25 years, software engineers have sought methods which they hope
+ can provide a technological fix for the “software crisis.” Proponents
+ of specific methods claim that their use leads to significantly
+ improved quality and productivity. Such claims are rarely, if ever,
+ backed up by hard evidence. We show that often, where real empirical
+ evidence does exist, the results are counter to the views of the
+ socalled experts. We examine the impact on the software industry of
+ continuing to place our trust in unproven, and often revolutionary,
+ methods. The very poor state of the art of empirical assessment in
+ software engineering can be explained in part by inappropriate or
+ inadequate use of measurement. Numerous empirical studies are flawed
+ because of their poor experimental design and lack of adherence to
+ proper measurement principles.",
+ paper = "Fent93.pdf"
+}
+
+\end{chunk}
+
+\index{Lindsay, Peter A.}
+\begin{chunk}{axiom.bib}
+@article{Lind88,
+ author = "Lindsay, Peter A.",
+ title = {{A Survery of Mechanical Support for Formal Reasoning}},
+ journal = "Software Engineering Journal",
+ volume = "3",
+ number = "1",
+ year = "1988",
+ pages = "327",
+ paper = "Lind88.pdf"
+}
+
+\end{chunk}
+
+\index{MartinL\"of, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Mart79,
+ author = "MartinLof, P.",
+ title = {{Constructive Mathematics and Computer Programming}},
+ booktitle = "Proc. 6th Int. Congress for Logic, Methodology and
+ Philosophy of Science",
+ publisher = "NorthHolland",
+ year = "1979",
+ pages = "153179",
+ abstract =
+ "If programming is understood not as the writing of instructions for
+ this or that computing machine but as the design of methods of
+ computation that it is the computer's duty to execute (a difference
+ that Dijkstra has referred to as the difference between computer
+ science and computing science), then it no longer seems possible to
+ distinguish the discipline of programming from constructive
+ mathematics. This explains why the intuitionistic theory of types,
+ which was originally developed as a symbolism for the precise
+ codification of constructive mathematics, may equally well be viewed
+ as a programming language. As such it provides a precise notation not
+ only, like other programming languages, for the programs themselves
+ but also for the tasks that the programs are supposed to
+ perform. Moreover, the inference rules of the theory of types, which
+ are again completely formal, appear as rules of correct program
+ synthesis. Thus the correctness of a program written in the theory of
+ types is proved formally at the same time as it is being
+ synthesized.",
+ paper = "Mart79.pdf"
+}
+
+\end{chunk}
+
+\index{MartinL\"of, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Mart73,
+ author = "MartinLof, P.",
+ title = {{An Intuitionistic Theory of Types}},
+ booktitle = "Logic Colloquium 1973",
+ publisher = "H.E. Rose and J.G. Shepherdson",
+ pages = "73118",
+ year = "1973",
+ abstract =
+ "The theory of types with which we shall be concerned is intended to
+ be a full scale system for foralizing intuitionistic mathematics as
+ developed, for example, in the book by Bishop 1967. The language of
+ the theory is richer than the language of first order predicate
+ logic. This makes it possible to strengthen the axioms for existence
+ and disjunction. In the case of existence, the possibility of
+ strengthening the usual elimination rule seems first to have been
+ indicated by Howard 1969, whose proposed axioms are special cases of
+ the existential elimination rule of the present theory. Furthermore,
+ there is a reflection principle which links the generation of objects
+ and types and plays somewhat the same role for the present theory as
+ does the replacement axiom for ZermeloFrankel set theory.
+
+ An earlier, not yet conclusive, attempt at formulating a theory of
+ this kind was made by Scott 1970. Also related, although less closely,
+ are the type and logic free theories of consructions of Kreisel 1962
+ and 1965 and Goodman 1970.
+
+ In its first version, the present theory was based on the strongly
+ impredicative axiom that there is a type of all types whatsoever,
+ which is at the same time a type and an object of that type. This
+ axiom had to be abandoned, however, after it was shown to lead to a
+ contradiction by Jean Yves Girard. I am very grateful to him for
+ showing me his paradox. The change that it necessitated is so drastic
+ that my theory no longer contains intuitionistic simple type theory as
+ it originally did. Instead, its proof theoretic strength should be
+ close to that of predicative analysis.",
+ paper = "Mart73.pdf"
+}
+
+\end{chunk}
+
+\index{Bates, Joseph L.}
+\index{Constable, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Bate85,
+ author = "Bates, Joseph L. and Constable, Robert L.",
+ title = {{Proofs as Programs}},
+ journal = "ACM TOPLAS",
+ volume = "7",
+ number = "1",
+ year = "1985",
+ abstract =
+ "The significant intellectual cost of programming is for problem
+ solving and explaining, not for coding. Yet programming systems offer
+ mechanical assistance for the coding process exclusively. We
+ illustrate the use of an implemented program development system,
+ called PRL ('pearl'), that provides automated assistance with the
+ difficult part. The problem and its explained solution are seen as
+ formal objects in a constructive logic of the data domains. These
+ formal explanations can be executed at various stages of
+ completion. The most incomplete explanations resemble applicative
+ programs, the most complete are formal proofs.",
+ paper = "Bate85.pdf"
+}
+
+\end{chunk}
+
+\index{Coquand, Thierry}
+\index{Huet, Gerard}
+\begin{chunk}{axiom.bib}
+@article{Coqu85,
+ author = "Coquand, Thierry and Huet, Gerard",
+ title = {{Constructions: A Higher Order Proof System for Mechanizing
+ Mathematics}},
+ journal = "LNCS",
+ volume = "203",
+ pages = "151184",
+ year = "1985",
+ abstract =
+ "We present an extensive set of mathematical propositions and proofs
+ in order to demonstrate the power of expression of the theory of
+ constructions.",
+ paper = "Coqu85.pdf"
+}
+
+\end{chunk}
+
+\index{Calude, C.S.}
+\index{Calude, E.}
+\index{Marcus, S.}
+\begin{chunk}{axiom.bib}
+@techreport{Calu07,
+ author = "Calude, C.S. and Calude, E. and Marcus, S.",
+ title = {{Proving and Programming}},
+ type = "technical report",
+ institution = "Centre for Discrete Mathematics and Theoretical Computer
+ Science",
+ number = "CDMTCS309",
+ year = "2007",
+ abstract =
+ "There is a strong analogy between proving theorems in mathematics and
+ writing programs in computer science. This paper is devoted to an
+ analysis, from the perspective of this analogy, of proof in
+ mathematics. We will argue that while the Hilbertian notion of proof
+ has few chances to change, future proofs will be of various types,
+ will play different roles, and their truth will be checked
+ differently. Programming gives mathematics a new form of
+ understanding. The computer is the driving force behind these
+ changes.",
+ paper = "Calu07.pdf"
+}
+
+\end{chunk}
+
+\index{Fass, Leona F.}
+\begin{chunk}{axiom.bib}
+@article{Fass04,
+ author = "Fass, Leona F.",
+ title = {{Approximations, Anomalies and ``The Proof of Correctness Wars''}},
+ journal = "ACM SIGSOFT Software Engineering Notes",
+ volume = "29",
+ number = "2",
+ year = "2004",
+ abstract =
+ "We discuss approaches to establishing 'correctness' and describe the
+ usefulness of logicbased model checkers for producing better
+ practical system designs. While we could develop techniques for
+ 'constructing correctness' in our theoretical behavioralmodeling
+ research, when applied to Real World processes such as software
+ development only approximate correctness might be established and
+ anomalous behaviors subsequently found. This we view as a positive
+ outcome since resultant adaptation, or flaw detection and correction,
+ may lead to improved development and designs. We find researchers
+ employing model checking as a formal methods tool to develop empirical
+ techniques have reached similar conclusions. Thus we cite some
+ applications of model checking to generate tests and detect defects in
+ such Real World processes as aviation system development,
+ faultdetection systems, and security.",
+ paper = "Fass04.pdf"
+}
+
+\end{chunk}
+
+\index{Thurston, William P.}
+\begin{chunk}{axiom.bib}
+@article{Thur94,
+ author = "Thurston, William P.",
+ title = {{On Proof and Progress in Mathematics}},
+ journal = "Bulletin of the American Mathematical Society",
+ volume = "30",
+ number = "2",
+ year = "1994",
+ paper = "Thur94.pdf"
+}
+
+\end{chunk}
+
+\index{Hoare, C.A.R}
+\begin{chunk}{axiom.bib}
+@article{Hoar96,
+ author = "Hoare, C.A.R",
+ title = {{How did software get so reliable without proof?}},
+ journal = "LNCS",
+ volume = "1051",
+ year = "1996",
+ abstract =
+ "By surveying current software engineering practice, this paper
+ reveals that the techniques employed to achieve reliability are little
+ different from those which have proved effective in all other branches
+ of modern engineering: rigorous management of procedures for design
+ inspection and review; quality assurance based on a wide range of
+ targeted tests; continuous evolution by removal of errors from
+ products already in widespread use; and defensive programming, among
+ other forms of deliberate overengineering. Formal methods and proof
+ play a small direct role in large scale programming; but they do
+ provide a conceptual framework and basic understanding to promote the
+ best of current practice, and point directions for future
+ improvement.",
+ paper = "Hoar96.pdf"
+}
+
+\end{chunk}
+
+\index{Glass, Robert L.}
+\begin{chunk}{axiom.bib}
+@article{Glas02,
+ author = "Glass, Robert L.",
+ title = {{The Proof of Correctness Wars}},
+ journal = "Communications of the ACM",
+ volume = "45",
+ number = "8",
+ year = "2002",
+ pages = "1921",
+ paper = "Glas02.pdf"
+}
+
+\end{chunk}
+
+\index{Fetzer, James H.}
+\begin{chunk}{axiom.bib}
+@article{Fetz88,
+ author = "Fetzer, James H.",
+ title = {{Program Verification: The Very Idea}},
+ journal = "Communications of the ACM",
+ volume = "31",
+ number = "9",
+ pages = "10481063",
+ year = "1988",
+ abstract =
+ "The notion of program verification appears to trade upon an
+ equvocation. Algorithms, as logical structures, are appropriate
+ subjects for deductive verification. Programs, as causal models of
+ those structures, are not. The success of program verification as a
+ generally applicable and completely reliable method for guaranteeing
+ program performance is not even a theoretical possibility.",
+ paper = "Fetz88.pdf"
+}
+
+\end{chunk}
+
+\index{Manna, Zohar}
+\begin{chunk}{axiom.bib}
+@misc{Mann80,
+ author = "Manna, Zohar",
+ title = {{Lectures on the Logic of Computer Programming}},
+ publisher = "Society for Industrial and Applied Mathematics",
+ year = "1980",
+ paper = "Mann80.tgz"
+}
+
+\end{chunk}
+
+\index{Manna, Zohar}
+\index{Waldinger, Richard}
+\begin{chunk}{axiom.bib}
+@article{Mann78,
+ author = "Manna, Zohar and Waldinger, Richard",
+ title = {{The Logic of Computer Programming}},
+ journal = "IEEE Trans. on Software Engineering",
+ volume = "4",
+ number = "3",
+ year = "1978",
+ abstract =
+ "Techniques derived from mathematical logic promise to provide an
+ alternative to the conventional methodology for constructing,
+ debugging, and optimizing computer programs. Ultimately, these
+ techniques are intended to lead to the automation of many of the
+ facets of the programming process.
+
+ This paper provides a unified tutorial exposition of the logical
+ techniques, illustrating each with examples. The strengths and
+ limitations of each technique as a practial programming aid are
+ assessed and attempts to implement these methods in experimental
+ systems are discussed.",
+ paper = "Mann78.pdf"
+}
+
+\end{chunk}
+
+\index{Hall, Anthony}
+\begin{chunk}{axiom.bib}
+@article{Hall90,
+ author = "Hall, Anthony",
+ title = {{7 Myths of Formal Methods}},
+ journal = "IEEE Software",
+ volume = "7",
+ number = "5",
+ pages = "1119",
+ year = "1990",
+ abstract =
+ "Formal methods are difficult, expensive, and not widely useful,
+ detractors say. Using a case study and other realworld examples, this
+ article challenges such common myths.",
+ paper = "Hall80.pdf"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger}
+\begin{chunk}{axiom.bib}
+@misc{Dijk83,
+ author = "Dijkstra, Edsger",
+ title = {{Fruits of Misunderstanding}},
+ year = "1983",
+ link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD854.html}",
+ paper = "Dijk83.txt"
+}
+
+\end{chunk}
+
+\index{Bradley, Aaron R.}
+\index{Manna, Zohar}
+\begin{chunk}{axiom.bib}
+@book{Brad07,
+ author = "Bradley, Aaron R. and Manna, Zohar",
+ title = {{The Calculus of Computation}},
+ publisher = "Springer",
+ year = "2007",
+ isbn = "9783'540'741121"
+}
+
+\end{chunk}
+
+\index{Reid, Alastair}
+\begin{chunk}{axiom.bib}
+@misc{Reid17,
+ author = "Reid, Alastair",
+ title = {{How can you trust formally varified software}},
+ link = "\url{https://media.ccc.de/v/34c38915how_can_you_trust_formally_verified_software#t=12}",
+ year = "2017",
+ abstract =
+ "Formal verification of software has finally started to become viable:
+ we have examples of formally verified microkernels, realistic
+ compilers, hypervisors, etc. These are huge achievements and we can
+ expect to see even more impressive results in the future but the
+ correctness proofs depend on a number of assumptions about the Trusted
+ Computing Base that the software depends on. Two key questions to ask
+ are: Are the specifications of the Trusted Computing Base correct? And
+ do the implementations match the specifications? I will explore the
+ philosophical challenges and practical steps you can taek in answering
+ that question for one of the major dependencies: the hardware your
+ software runs on. I will describe the combination of formal
+ verification and testing that ARM uses to verify the processor
+ specification and I will talk about our current challenge: getting the
+ specification down to zero bugs while the architecture continues to
+ evolve."
}
\end{chunk}
\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@book{Chli17,
+@misc{Chli17a,
author = "Chlipala, Adam",
 title = {{Formal Reasoning About Programs}},
+ title = {{Coming Soon: MachineChecked Mathematical Proofs in Everyday
+ Software and Hardware Development}},
+ link = "\url{https://media.ccc.de/v/34c39105coming_soon_machinechecked_mathematical_proofs_in_everyday_software_and_hardware_development}",
year = "2017",
 publisher = "MIT",
 link = "\url{http://adam.chlipala.net/frap/frap_book.pdf}",
abstract =
 "Briefly, this book is about an approach to bringing software
 engineering up to speed with more traditional engineering disciplines,
 providing a mathematical foundation for rigorous analysis of realistic
 computer systems. As civil engineers apply their mathematical canon to
 reach high certainty that bridges will not fall down, the software
 engineer should apply a different canon to argue that programs behave
 properly. As other engineering disciplines have their computeraided
 design tools, computer science has proof assistants, IDEs for logical
 arguments. We will learn how to apply these tools to certify that
 programs behave as expected.
+ "Most working engineers view machinechecked mathematical proofs as an
+ academic curiosity, if they have ever heard of the concept at all. In
+ contrast, activities like testing, debugging, and code review are
+ accepted as essential. They are woven into the lives of nearly all
+ developers. In this talk, I will explain how I see machinechecked
+ proofs enabling new everyday activities for developers of computer
+ software and hardware. These activities have the potential to lower
+ development effort dramatically, at the same time as they increase our
+ assurance that systems behave correctly and securely. I will give a
+ cosmological overview of this field, answering the FAQs that seem to
+ stand in the way of practicality; and I will illustrate the principles
+ with examples from projects that you can clone from GitHub today,
+ covering the computing stack from digital hardware design to
+ cryptographic software and applications.
 More specifically: Introductions to two intertangled subjects: the Coq
 proof assistant, a tool for machinechecked mathematical theorem
 proving; and formal logical reasoning about the correctness of
 programs.",
 paper = "Chli17.pdf"
}
+ Today's developers of computer software and hardware are tremendously
+ effective, compared to their predecessors. We have found very
+ effective ways of modularizing and validating our work. The talk is
+ about ammunition for these activities from a perhapsunexpected
+ source.
+
+ Modularity involves breaking a complex system into a hierarchy of
+ simpler pieces, which may be written and understood
+ separately. Structured programming (e.g., using loops and conditionals
+ instead of gotos) helps us read and understand parts of a single
+ function in isolation, and data abstraction lets us encapsulate
+ important functionality in objects, with guarantees that other code
+ can only access the private data by calling public methods. That way,
+ we can convince ourselves that the encapsulated code upholds certain
+ essential properties, regardless of which other code it is linked
+ with. Systematic unit testing also helps enforce contracts for units
+ of modularity. Each of these techniques can be rerun automatically, to
+ catch regressions in evolving systems, and catch those regressions in
+ a way that accurately points the finger of responsibility to
+ particular modules.
+
+ Validation is an important part of development that encompasses
+ testing, debugging, code review, and anything else that we do to raise
+ our confidence that the system behaves as intended. Experienced
+ engineers know that validation tends to take up the majority of
+ engineering effort. Often that effort involves mentally taxing
+ activities that would not otherwise come up in coding. One example is
+ thinking about testcase coverage, and another is including
+ instrumentation that produces traces to consult during debugging.
+
+ It is not hard for working developers to imagine great productivity
+ gains from better ways to break systems into pieces or raise our
+ confidence in those pieces. The claim I will make in this talk is that
+ a key source of such insights has been neglected: machinechecked
+ mathematical proofs. Here the basic functionality is an ASCII language
+ for defining mathematical objects, stating theorems about them, and
+ giving proofs of theorems. Crucially, an algorithm checks that
+ purported proofs really do establish the theorems. By going about
+ these activities in the style of programming, we inherit usual
+ supporting tools like IDEs, version control, continuous integration,
+ and automated build processes. But how could so esoteric a task as
+ math proofs call for that kind of tooling, and what does it have to do
+ with building real computer systems?
+
+ I will explain a shared vision to that end, developed along with many
+ other members of my research community. Let me try to convince you
+ that all of the following goals are attainable in the next 10 years.
+
+ \begin{itemize}
+ \item We will have complete computer systems implementing moderately
+ complex network servers for popular protocols, proved to implement
+ those protocols correctly, from the level of digital circuits on
+ up. We will remove all deployed code (hardware or software) from the
+ trusted computing base, shifting our trust to much smaller
+ specifications and proof checkers.
+ \item Hobbyists will be able to design new embedded computing
+ platforms by mixing and matching opensource hardware and software
+ components, also mixing and matching the proofs of these components,
+ guaranteeing no bugs at the digitalabstraction level or higher, with
+ no need for debugging.
+ \item New styles of library design will be enabled by the chance to
+ attach a formal behavioral specification to each library. For
+ instance, rankandfile programmers will able to assemble their own
+ code for cryptographic protocols, with code that looks like reference
+ implementations in Python, but getting performance comparable to what
+ experts handcraft in assembly today. Yet that benefit would come with
+ no need to trust that library authors have avoided bugs or intentional
+ backdoors, perhaps even including automatic proofs of cryptographic
+ security properties.
+ \end{itemize}
+
+ Main technical topics to cover to explain my optimism:
+ \begin{itemize}
+ \item The basic functionality of proof assistants and why we should
+ trust their conclusions
+ \item How to think about system decomposition with specifications and
+ proofs, including why, for most components, we do not need to worry
+ about specification mistakes
+ \item The different modes of applying proof technology to check or
+ generate components
+ \item The engineering techniques behind costeffective proof authoring
+ for realistic systems
+ \item A hardware case study: Kami, supporting componentbased digital
+ hardware authoring with proofs
+ \item A software case study: Fiat Cryptography, supporting
+ correctbyconstruction autogeneration of fast code for
+ ellipticcurve cryptography
+ \item Pointers to where to look next, if you would like to learn more
+ about this technology
+ \end{itemize}"
+}
\end{chunk}
\index{van Atten, Mark}
\index{Sundholm, Goran}
+\index{Wilkes, Maurice}
\begin{chunk}{axiom.bib}
@misc{Atte15,
 author = "van Atten, Mark and Sundholm, Goran",
 title = {{L.E.J. Brouwer's 'Unrelability of the Logical Principles'
 translation}},
 year = "2015",
 link = "\url{https://arxiv.org/pdf/1511.01113.pdf}",
 abstract =
 "We present a new English translation of L.E.J. Brouwer's paper 'De
 onbetrouwbaarheid der logische principes' (The unreliability of the
 logical principles) of 1908, together with a philosophical and
 historical introduction. In this paper Brouwer for the first time
 objected to the idea that the Principle of the Excluded Middle is
 valid. We discuss the circumstances under which the manuscript was
 submitted and accepted, Brouwer's ideas on the principle of the
 excluded middle, and its consistency and partial validity, and his
 argument against the possibility of absolutely undecidable
 propositions. We not that principled objections to the general
 exculded middle similar to Brouwer's had been advanced in print by
 Jules Molk two years before. Finally, we discuss the influence on
 George Griss' negationless mathematics",
 paper = "Atte15.pdf"
+@book{Wilk85,
+ author = "Wilkes, Maurice",
+ title = {{Memoirs of a Computer Pioneer}},
+ publisher = "MIT Press",
+ year = "1985"
}
\end{chunk}
+\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@misc{Cyph17,
 author = "Cypherpunks",
 title = {{Chapter 4: Verification Techniques}},
 link = "\url{http://www.cypherpunks.to/~peter/04_verif_techniques.pdf}",
+@book{Appe17,
+ author = "Appel, Andrew W.",
+ title = {{Verified Functional Algorithms}},
year = "2017",
 abstract = "Wherein existing methods for building secure systems are
 examined and found wanting",
 paper = "Cyph17.pdf"
+ publisher = "University of Pennsylvania",
+ link =
+ "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
}
\end{chunk}
diff git a/src/axiomwebsite/patches.html b/src/axiomwebsite/patches.html
index 3d10bd1..df00da9 100644
 a/src/axiomwebsite/patches.html
+++ b/src/axiomwebsite/patches.html
@@ 5876,6 +5876,8 @@ src/interp/ioutput.lisp document CHARYBDIS
books/bookvol5 fix typos
20171224.01.tpd.patch
books/bookvolbib add references
+20171230.01.tpd.patch
+books/bookvol13 next draft

1.9.1